]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'spi/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 22 Aug 2013 04:15:25 +0000 (14:15 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 22 Aug 2013 04:15:25 +0000 (14:15 +1000)
3053 files changed:
CREDITS
Documentation/ABI/testing/sysfs-class-mtd
Documentation/ABI/testing/sysfs-fs-f2fs [new file with mode: 0644]
Documentation/DocBook/80211.tmpl
Documentation/DocBook/drm.tmpl
Documentation/DocBook/media/v4l/lirc_device_interface.xml
Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml
Documentation/DocBook/mtdnand.tmpl
Documentation/IRQ-affinity.txt
Documentation/SubmittingPatches
Documentation/acpi/enumeration.txt
Documentation/arm/OMAP/omap_pm
Documentation/arm64/booting.txt
Documentation/block/cfq-iosched.txt
Documentation/cachetlb.txt
Documentation/cpu-freq/cpu-drivers.txt
Documentation/development-process/2.Process
Documentation/device-mapper/cache.txt
Documentation/device-mapper/statistics.txt [new file with mode: 0644]
Documentation/device-mapper/thin-provisioning.txt
Documentation/devicetree/bindings/arm/l2cc.txt
Documentation/devicetree/bindings/arm/ste-u300.txt
Documentation/devicetree/bindings/arm/vexpress-sysreg.txt
Documentation/devicetree/bindings/ata/ahci-platform.txt
Documentation/devicetree/bindings/ata/sata_highbank.txt [new file with mode: 0644]
Documentation/devicetree/bindings/c6x/dscr.txt
Documentation/devicetree/bindings/clock/clk-exynos-audss.txt
Documentation/devicetree/bindings/clock/st,nomadik.txt
Documentation/devicetree/bindings/dma/atmel-dma.txt
Documentation/devicetree/bindings/dma/fsl-imx-dma.txt
Documentation/devicetree/bindings/dma/ste-dma40.txt
Documentation/devicetree/bindings/hid/hid-over-i2c.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/input-reset.txt [new file with mode: 0644]
Documentation/devicetree/bindings/media/i2c/adv7343.txt [new file with mode: 0644]
Documentation/devicetree/bindings/media/i2c/ths8200.txt [new file with mode: 0644]
Documentation/devicetree/bindings/metag/pdc-intc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/cros-ec.txt
Documentation/devicetree/bindings/mfd/palmas.txt
Documentation/devicetree/bindings/mfd/s2mps11.txt [new file with mode: 0644]
Documentation/devicetree/bindings/misc/atmel-ssc.txt
Documentation/devicetree/bindings/mtd/atmel-nand.txt
Documentation/devicetree/bindings/mtd/fsmc-nand.txt
Documentation/devicetree/bindings/mtd/partition.txt
Documentation/devicetree/bindings/net/can/atmel-can.txt
Documentation/devicetree/bindings/net/can/sja1000.txt
Documentation/devicetree/bindings/net/micrel-ksz9021.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/moxa,moxart-mac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/designware-pcie.txt
Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
Documentation/devicetree/bindings/power_supply/msm-poweroff.txt [new file with mode: 0644]
Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
Documentation/devicetree/bindings/regulator/88pm800.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/max8660.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/palmas-pmic.txt
Documentation/devicetree/bindings/regulator/pfuze100.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/regulator.txt
Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/serial/rs485.txt
Documentation/devicetree/bindings/sound/ak4554.c [new file with mode: 0644]
Documentation/devicetree/bindings/sound/alc5632.txt
Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/atmel-wm8904.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/fsl,ssi.txt [moved from Documentation/devicetree/bindings/powerpc/fsl/ssi.txt with 90% similarity]
Documentation/devicetree/bindings/sound/imx-audmux.txt
Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt
Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt
Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt
Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
Documentation/devicetree/bindings/sound/pcm1792a.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/rt5640.txt
Documentation/devicetree/bindings/sound/samsung-i2s.txt
Documentation/devicetree/bindings/sound/soc-ac97link.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/ti,pcm1681.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/tlv320aic3x.txt
Documentation/devicetree/bindings/sound/wm8731.txt
Documentation/devicetree/bindings/sound/wm8753.txt
Documentation/devicetree/bindings/sound/wm8903.txt
Documentation/devicetree/bindings/sound/wm8994.txt
Documentation/devicetree/bindings/thermal/exynos-thermal.txt [new file with mode: 0644]
Documentation/devicetree/bindings/thermal/imx-thermal.txt [new file with mode: 0644]
Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt [new file with mode: 0644]
Documentation/devicetree/bindings/video/atmel,lcdc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/video/simple-framebuffer.txt
Documentation/dma-buf-sharing.txt
Documentation/driver-model/devres.txt
Documentation/early-userspace/README
Documentation/fb/fbcon.txt
Documentation/fb/viafb.modes
Documentation/fb/viafb.txt
Documentation/filesystems/Locking
Documentation/filesystems/btrfs.txt
Documentation/filesystems/ext3.txt
Documentation/filesystems/ext4.txt
Documentation/filesystems/f2fs.txt
Documentation/filesystems/nfs/Exporting
Documentation/filesystems/nfs/pnfs.txt
Documentation/filesystems/qnx6.txt
Documentation/filesystems/relay.txt
Documentation/filesystems/sysfs-tagging.txt
Documentation/filesystems/vfs.txt
Documentation/filesystems/xfs.txt
Documentation/hid/uhid.txt
Documentation/hwmon/abituguru-datasheet
Documentation/hwmon/ads1015
Documentation/hwmon/w83791d
Documentation/hwmon/w83792d
Documentation/i2c/busses/i2c-piix4
Documentation/i2c/instantiating-devices
Documentation/i2c/upgrading-clients
Documentation/input/gamepad.txt [new file with mode: 0644]
Documentation/kbuild/kconfig.txt
Documentation/kernel-parameters.txt
Documentation/laptops/asus-laptop.txt
Documentation/laptops/sony-laptop.txt
Documentation/laptops/thinkpad-acpi.txt
Documentation/leds/leds-lm3556.txt
Documentation/leds/leds-lp3944.txt
Documentation/networking/00-INDEX
Documentation/networking/ip-sysctl.txt
Documentation/networking/netdev-FAQ.txt [new file with mode: 0644]
Documentation/networking/sctp.txt
Documentation/networking/tproxy.txt
Documentation/pinctrl.txt
Documentation/printk-formats.txt
Documentation/rapidio/rapidio.txt
Documentation/scsi/hptiop.txt
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/sound/alsa/HD-Audio.txt
Documentation/sound/alsa/compress_offload.txt
Documentation/sysfs-rules.txt
Documentation/target/tcm_mod_builder.py
Documentation/thermal/exynos_thermal
Documentation/trace/ftrace.txt
Documentation/virtual/kvm/api.txt
Documentation/x86/boot.txt
Documentation/zh_CN/SubmittingPatches
MAINTAINERS
arch/arc/boot/.gitignore [new file with mode: 0644]
arch/arc/include/asm/entry.h
arch/arc/include/asm/irqflags.h
arch/arc/include/asm/mmu.h
arch/arc/include/asm/mmu_context.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/tlbflush.h
arch/arc/kernel/.gitignore [new file with mode: 0644]
arch/arc/kernel/entry.S
arch/arc/kernel/smp.c
arch/arc/mm/Makefile
arch/arc/mm/init.c
arch/arc/mm/tlb.c
arch/arc/mm/tlbex.S
arch/arc/mm/tlbflush.c [new file with mode: 0644]
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/exynos5440.dtsi
arch/arm/include/asm/hardware/debug-8250.S [deleted file]
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/memblock.h
arch/arm/include/asm/neon.h [new file with mode: 0644]
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/prom.h
arch/arm/include/asm/xor.h
arch/arm/include/debug/8250.S [new file with mode: 0644]
arch/arm/include/debug/8250_32.S [deleted file]
arch/arm/include/debug/bcm2835.S [deleted file]
arch/arm/include/debug/cns3xxx.S [deleted file]
arch/arm/include/debug/highbank.S [deleted file]
arch/arm/include/debug/keystone.S [deleted file]
arch/arm/include/debug/mvebu.S [deleted file]
arch/arm/include/debug/mxs.S [deleted file]
arch/arm/include/debug/nomadik.S [deleted file]
arch/arm/include/debug/nspire.S [deleted file]
arch/arm/include/debug/picoxcell.S [deleted file]
arch/arm/include/debug/pl01x.S [moved from arch/arm/include/asm/hardware/debug-pl01x.S with 78% similarity]
arch/arm/include/debug/pxa.S [deleted file]
arch/arm/include/debug/rockchip.S [deleted file]
arch/arm/include/debug/socfpga.S [deleted file]
arch/arm/include/debug/sunxi.S [deleted file]
arch/arm/include/debug/u300.S [deleted file]
arch/arm/include/debug/ux500.S
arch/arm/include/debug/vexpress.S
arch/arm/kernel/atags.h
arch/arm/kernel/atags_parse.c
arch/arm/kernel/devtree.c
arch/arm/kernel/fiq.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/setup.c
arch/arm/kvm/mmu.c
arch/arm/lib/Makefile
arch/arm/lib/xor-neon.c [new file with mode: 0644]
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/at91sam9rl_devices.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/board-sam9263ek.c
arch/arm/mach-at91/board-sam9m10g45ek.c
arch/arm/mach-at91/board-sam9rlek.c
arch/arm/mach-at91/board.h
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/cpuidle.c
arch/arm/mach-davinci/include/mach/debug-macro.S [deleted file]
arch/arm/mach-dove/common.c
arch/arm/mach-dove/include/mach/debug-macro.S [deleted file]
arch/arm/mach-ebsa110/include/mach/debug-macro.S [deleted file]
arch/arm/mach-ep93xx/Kconfig
arch/arm/mach-ep93xx/include/mach/debug-macro.S [deleted file]
arch/arm/mach-ep93xx/include/mach/uncompress.h
arch/arm/mach-footbridge/include/mach/debug-macro.S
arch/arm/mach-gemini/include/mach/debug-macro.S [deleted file]
arch/arm/mach-integrator/include/mach/debug-macro.S [deleted file]
arch/arm/mach-iop13xx/include/mach/debug-macro.S [deleted file]
arch/arm/mach-iop32x/include/mach/debug-macro.S [deleted file]
arch/arm/mach-iop33x/include/mach/debug-macro.S [deleted file]
arch/arm/mach-ixp4xx/include/mach/debug-macro.S [deleted file]
arch/arm/mach-kirkwood/common.c
arch/arm/mach-kirkwood/include/mach/debug-macro.S [deleted file]
arch/arm/mach-lpc32xx/include/mach/debug-macro.S [deleted file]
arch/arm/mach-mv78xx0/include/mach/debug-macro.S [deleted file]
arch/arm/mach-omap2/devices.c
arch/arm/mach-orion5x/include/mach/debug-macro.S [deleted file]
arch/arm/mach-realview/include/mach/debug-macro.S [deleted file]
arch/arm/mach-rpc/include/mach/debug-macro.S [deleted file]
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-bockw.c
arch/arm/mach-spear/include/mach/debug-macro.S [deleted file]
arch/arm/mach-spear/include/mach/spear.h
arch/arm/mach-ux500/Makefile
arch/arm/mach-versatile/include/mach/debug-macro.S [deleted file]
arch/arm/mm/Kconfig
arch/arm/mm/cache-l2x0.c
arch/arm/mm/hugetlbpage.c
arch/arm/mm/init.c
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/mm/proc-feroceon.S
arch/arm/plat-pxa/ssp.c
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c
arch/arm64/Kconfig
arch/arm64/include/asm/neon.h [new file with mode: 0644]
arch/arm64/include/asm/pgtable-2level-types.h
arch/arm64/include/asm/pgtable-3level-types.h
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/head.S
arch/arm64/kernel/perf_event.c
arch/arm64/mm/init.c
arch/avr32/boards/atngw100/evklcd10x.c
arch/avr32/boards/atngw100/mrmt.c
arch/avr32/boards/atstk1000/atstk1000.h
arch/avr32/boards/atstk1000/setup.c
arch/avr32/boards/favr-32/setup.c
arch/avr32/boards/hammerhead/setup.c
arch/avr32/boards/merisc/display.c
arch/avr32/boards/mimc200/setup.c
arch/avr32/mach-at32ap/at32ap700x.c
arch/avr32/mach-at32ap/include/mach/board.h
arch/c6x/kernel/devicetree.c
arch/cris/Kconfig
arch/cris/arch-v10/drivers/Kconfig
arch/cris/arch-v10/drivers/Makefile
arch/cris/arch-v32/drivers/Kconfig
arch/cris/arch-v32/mach-a3/Kconfig
arch/cris/include/asm/processor.h
arch/cris/include/uapi/asm/kvm_para.h [new file with mode: 0644]
arch/frv/mb93090-mb00/pci-vdk.c
arch/ia64/include/asm/dmi.h
arch/ia64/kernel/elfcore.c
arch/m68k/Kconfig
arch/m68k/Kconfig.machine
arch/m68k/include/asm/io_no.h
arch/m68k/include/asm/page.h
arch/m68k/include/asm/page_mm.h
arch/m68k/kernel/setup_no.c
arch/m68k/kernel/signal.c
arch/m68k/platform/68000/m68328.c
arch/m68k/platform/68000/m68EZ328.c
arch/m68k/platform/68000/m68VZ328.c
arch/m68k/platform/68360/commproc.c
arch/m68k/platform/68360/config.c
arch/metag/Kconfig.soc
arch/metag/boot/dts/tz1090.dtsi
arch/metag/mm/init.c
arch/microblaze/kernel/prom.c
arch/microblaze/pci/pci-common.c
arch/mips/bcm63xx/nvram.c
arch/mips/cavium-octeon/setup.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/bmips.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/cputime.h [deleted file]
arch/mips/include/asm/current.h [deleted file]
arch/mips/include/asm/emergency-restart.h [deleted file]
arch/mips/include/asm/local64.h [deleted file]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
arch/mips/include/asm/mutex.h [deleted file]
arch/mips/include/asm/parport.h [deleted file]
arch/mips/include/asm/percpu.h [deleted file]
arch/mips/include/asm/scatterlist.h [deleted file]
arch/mips/include/asm/sections.h [deleted file]
arch/mips/include/asm/segment.h [deleted file]
arch/mips/include/asm/serial.h [deleted file]
arch/mips/include/asm/ucontext.h [deleted file]
arch/mips/include/asm/xor.h [deleted file]
arch/mips/include/uapi/asm/Kbuild
arch/mips/include/uapi/asm/auxvec.h [deleted file]
arch/mips/include/uapi/asm/ipcbuf.h [deleted file]
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/idle.c
arch/mips/kernel/prom.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/c-octeon.c
arch/mips/mm/tlb-funcs.S
arch/mips/mm/tlbex.c
arch/mips/netlogic/xlp/usb-init.c
arch/mips/pci/pci-octeon.c
arch/openrisc/kernel/prom.c
arch/parisc/configs/generic-32bit_defconfig [new file with mode: 0644]
arch/parisc/configs/generic-64bit_defconfig [new file with mode: 0644]
arch/powerpc/Kconfig
arch/powerpc/boot/.gitignore
arch/powerpc/boot/dts/b4420qds.dts
arch/powerpc/boot/dts/b4860qds.dts
arch/powerpc/boot/dts/b4qds.dtsi [moved from arch/powerpc/boot/dts/b4qds.dts with 100% similarity]
arch/powerpc/boot/dts/fsl/b4si-post.dtsi
arch/powerpc/boot/dts/fsl/qoriq-mpic4.3.dtsi [new file with mode: 0644]
arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
arch/powerpc/boot/dts/p1020rdb-pd.dts [new file with mode: 0644]
arch/powerpc/configs/85xx/p1023rds_defconfig
arch/powerpc/configs/corenet32_smp_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/configs/mpc83xx_defconfig
arch/powerpc/configs/mpc85xx_defconfig
arch/powerpc/configs/mpc85xx_smp_defconfig
arch/powerpc/include/asm/asm-compat.h
arch/powerpc/include/asm/btext.h
arch/powerpc/include/asm/cacheflush.h
arch/powerpc/include/asm/device.h
arch/powerpc/include/asm/emulated_ops.h
arch/powerpc/include/asm/epapr_hcalls.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/fsl_pamu_stash.h [new file with mode: 0644]
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/irqflags.h
arch/powerpc/include/asm/lppaca.h
arch/powerpc/include/asm/mpic.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/perf_event_fsl_emb.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/prom.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/reg_booke.h
arch/powerpc/include/asm/reg_fsl_emb.h
arch/powerpc/include/asm/rtas.h
arch/powerpc/include/asm/smp.h
arch/powerpc/include/asm/spinlock.h
arch/powerpc/include/asm/spu.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/include/asm/topology.h
arch/powerpc/include/asm/udbg.h
arch/powerpc/include/uapi/asm/elf.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/align.c
arch/powerpc/kernel/btext.c
arch/powerpc/kernel/cacheinfo.c
arch/powerpc/kernel/cpu_setup_fsl_booke.S
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/epapr_paravirt.c
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_40x.S
arch/powerpc/kernel/head_44x.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/head_8xx.S
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/io-workarounds.c
arch/powerpc/kernel/io.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/lparcfg.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/pci_dn.c
arch/powerpc/kernel/pci_of_scan.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/kernel/prom_parse.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/softemu8xx.c [deleted file]
arch/powerpc/kernel/swsusp_booke.S
arch/powerpc/kernel/time.c
arch/powerpc/kernel/tm.S
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/udbg_16550.c
arch/powerpc/kernel/vio.c
arch/powerpc/kvm/book3s_64_slb.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/emulate.c
arch/powerpc/lib/locks.c
arch/powerpc/math-emu/Makefile
arch/powerpc/math-emu/math.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/gup.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/init_32.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/slb.c
arch/powerpc/mm/subpage-prot.c
arch/powerpc/oprofile/op_model_fsl_emb.c
arch/powerpc/perf/Makefile
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/core-fsl-emb.c
arch/powerpc/perf/e6500-pmu.c [new file with mode: 0644]
arch/powerpc/platforms/52xx/mpc52xx_pic.c
arch/powerpc/platforms/85xx/corenet_ds.c
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
arch/powerpc/platforms/85xx/smp.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/cell/smp.c
arch/powerpc/platforms/cell/spu_syscalls.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/cell/spufs/spufs.h
arch/powerpc/platforms/powernv/Kconfig
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/powernv/eeh-ioda.c
arch/powerpc/platforms/powernv/opal-lpc.c [new file with mode: 0644]
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/powernv.h
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/dtl.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/hvconsole.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/nvram.c
arch/powerpc/platforms/pseries/plpar_wrappers.h
arch/powerpc/platforms/pseries/processor_idle.c
arch/powerpc/platforms/pseries/pseries_energy.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/platforms/wsp/wsp.h
arch/powerpc/sysdev/fsl_msi.c
arch/powerpc/sysdev/fsl_msi.h
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/fsl_pci.h
arch/powerpc/sysdev/xics/icp-native.c
arch/powerpc/sysdev/xics/xics-common.c
arch/s390/Kconfig
arch/s390/include/asm/airq.h
arch/s390/include/asm/bitops.h
arch/s390/include/asm/hardirq.h
arch/s390/include/asm/hugetlb.h
arch/s390/include/asm/hw_irq.h
arch/s390/include/asm/irq.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pci.h
arch/s390/include/asm/pci_insn.h
arch/s390/include/asm/pci_io.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/serial.h [new file with mode: 0644]
arch/s390/include/asm/switch_to.h
arch/s390/include/asm/tlb.h
arch/s390/include/asm/tlbflush.h
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/irq.c
arch/s390/kernel/nmi.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/vdso.c
arch/s390/lib/uaccess_pt.c
arch/s390/mm/dump_pagetables.c
arch/s390/mm/gup.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/pageattr.c
arch/s390/mm/pgtable.c
arch/s390/mm/vmem.c
arch/s390/pci/Makefile
arch/s390/pci/pci.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_insn.c
arch/s390/pci/pci_msi.c [deleted file]
arch/sh/boards/board-espt.c
arch/sh/boards/board-sh7757lcr.c
arch/sh/boards/mach-ecovec24/setup.c
arch/sh/boards/mach-se/7724/setup.c
arch/sh/boards/mach-sh7763rdp/setup.c
arch/sh/include/asm/hw_breakpoint.h
arch/sh/include/cpu-common/cpu/ubc.h [new file with mode: 0644]
arch/sh/include/cpu-sh2a/cpu/ubc.h [new file with mode: 0644]
arch/sh/kernel/cpu/sh2/setup-sh7619.c
arch/sh/kernel/cpu/sh2a/Makefile
arch/sh/kernel/cpu/sh2a/ubc.c [new file with mode: 0644]
arch/sh/kernel/cpu/shmobile/cpuidle.c
arch/sh/kernel/hw_breakpoint.c
arch/sparc/include/asm/switch_to_64.h
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/entry.S
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/ktlb.S
arch/sparc/kernel/ptrace_64.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/syscalls.S
arch/tile/Kconfig
arch/tile/Kconfig.debug
arch/tile/Makefile
arch/tile/gxio/Kconfig
arch/tile/gxio/Makefile
arch/tile/gxio/iorpc_mpipe.c
arch/tile/gxio/iorpc_mpipe_info.c
arch/tile/gxio/iorpc_trio.c
arch/tile/gxio/iorpc_uart.c [new file with mode: 0644]
arch/tile/gxio/mpipe.c
arch/tile/gxio/uart.c [new file with mode: 0644]
arch/tile/include/arch/trio.h
arch/tile/include/arch/uart.h [new file with mode: 0644]
arch/tile/include/arch/uart_def.h [new file with mode: 0644]
arch/tile/include/asm/Kbuild
arch/tile/include/asm/atomic_32.h
arch/tile/include/asm/barrier.h
arch/tile/include/asm/bitops.h
arch/tile/include/asm/cache.h
arch/tile/include/asm/cacheflush.h
arch/tile/include/asm/cmpxchg.h
arch/tile/include/asm/dma-mapping.h
arch/tile/include/asm/elf.h
arch/tile/include/asm/fixmap.h
arch/tile/include/asm/ftrace.h
arch/tile/include/asm/futex.h
arch/tile/include/asm/homecache.h
arch/tile/include/asm/io.h
arch/tile/include/asm/irqflags.h
arch/tile/include/asm/kdebug.h [new file with mode: 0644]
arch/tile/include/asm/kgdb.h [new file with mode: 0644]
arch/tile/include/asm/kprobes.h [new file with mode: 0644]
arch/tile/include/asm/kvm.h [new file with mode: 0644]
arch/tile/include/asm/kvm_host.h [new file with mode: 0644]
arch/tile/include/asm/kvm_para.h [moved from arch/tile/include/asm/hw_irq.h with 67% similarity]
arch/tile/include/asm/kvm_virtio.h [new file with mode: 0644]
arch/tile/include/asm/mmu.h
arch/tile/include/asm/mmu_context.h
arch/tile/include/asm/mmzone.h
arch/tile/include/asm/module.h
arch/tile/include/asm/page.h
arch/tile/include/asm/pci.h
arch/tile/include/asm/pgtable_32.h
arch/tile/include/asm/pgtable_64.h
arch/tile/include/asm/processor.h
arch/tile/include/asm/ptrace.h
arch/tile/include/asm/sections.h
arch/tile/include/asm/setup.h
arch/tile/include/asm/smp.h
arch/tile/include/asm/spinlock_64.h
arch/tile/include/asm/string.h
arch/tile/include/asm/switch_to.h
arch/tile/include/asm/thread_info.h
arch/tile/include/asm/timex.h
arch/tile/include/asm/traps.h
arch/tile/include/asm/uaccess.h
arch/tile/include/asm/unaligned.h
arch/tile/include/asm/vdso.h [new file with mode: 0644]
arch/tile/include/gxio/iorpc_mpipe.h
arch/tile/include/gxio/iorpc_mpipe_info.h
arch/tile/include/gxio/iorpc_trio.h
arch/tile/include/gxio/iorpc_uart.h [new file with mode: 0644]
arch/tile/include/gxio/mpipe.h
arch/tile/include/gxio/uart.h [new file with mode: 0644]
arch/tile/include/hv/drv_mpipe_intf.h
arch/tile/include/hv/drv_trio_intf.h
arch/tile/include/hv/drv_uart_intf.h [new file with mode: 0644]
arch/tile/include/hv/hypervisor.h
arch/tile/include/uapi/arch/Kbuild
arch/tile/include/uapi/arch/chip.h
arch/tile/include/uapi/arch/chip_tile64.h [deleted file]
arch/tile/include/uapi/arch/opcode_tilegx.h
arch/tile/include/uapi/arch/opcode_tilepro.h
arch/tile/include/uapi/arch/sim.h
arch/tile/include/uapi/arch/sim_def.h
arch/tile/include/uapi/arch/spr_def_32.h
arch/tile/include/uapi/arch/spr_def_64.h
arch/tile/include/uapi/asm/Kbuild
arch/tile/include/uapi/asm/auxvec.h
arch/tile/include/uapi/asm/cachectl.h
arch/tile/include/uapi/asm/kvm.h [new file with mode: 0644]
arch/tile/include/uapi/asm/kvm_virtio.h [new file with mode: 0644]
arch/tile/kernel/Makefile
arch/tile/kernel/asm-offsets.c
arch/tile/kernel/compat_signal.c
arch/tile/kernel/early_printk.c
arch/tile/kernel/entry.S
arch/tile/kernel/ftrace.c [new file with mode: 0644]
arch/tile/kernel/hardwall.c
arch/tile/kernel/head_32.S
arch/tile/kernel/head_64.S
arch/tile/kernel/hvglue.S [new file with mode: 0644]
arch/tile/kernel/hvglue.lds [deleted file]
arch/tile/kernel/hvglue_trace.c [new file with mode: 0644]
arch/tile/kernel/intvec_32.S
arch/tile/kernel/intvec_64.S
arch/tile/kernel/irq.c
arch/tile/kernel/kgdb.c [new file with mode: 0644]
arch/tile/kernel/kprobes.c [new file with mode: 0644]
arch/tile/kernel/kvm_virtio.c [new file with mode: 0644]
arch/tile/kernel/mcount_64.S [new file with mode: 0644]
arch/tile/kernel/pci-dma.c
arch/tile/kernel/pci.c
arch/tile/kernel/pci_gx.c
arch/tile/kernel/proc.c
arch/tile/kernel/process.c
arch/tile/kernel/ptrace.c
arch/tile/kernel/reboot.c
arch/tile/kernel/regs_32.S
arch/tile/kernel/regs_64.S
arch/tile/kernel/relocate_kernel_32.S
arch/tile/kernel/relocate_kernel_64.S
arch/tile/kernel/setup.c
arch/tile/kernel/signal.c
arch/tile/kernel/single_step.c
arch/tile/kernel/smp.c
arch/tile/kernel/smpboot.c
arch/tile/kernel/stack.c
arch/tile/kernel/sys.c
arch/tile/kernel/sysfs.c
arch/tile/kernel/time.c
arch/tile/kernel/tlb.c
arch/tile/kernel/traps.c
arch/tile/kernel/unaligned.c [new file with mode: 0644]
arch/tile/kernel/vdso.c [new file with mode: 0644]
arch/tile/kernel/vdso/Makefile [new file with mode: 0644]
arch/tile/kernel/vdso/vdso.S [new file with mode: 0644]
arch/tile/kernel/vdso/vdso.lds.S [new file with mode: 0644]
arch/tile/kernel/vdso/vdso32.S [new file with mode: 0644]
arch/tile/kernel/vdso/vgettimeofday.c [new file with mode: 0644]
arch/tile/kernel/vdso/vrt_sigreturn.S [new file with mode: 0644]
arch/tile/kernel/vmlinux.lds.S
arch/tile/kvm/Kconfig
arch/tile/kvm/Makefile [new file with mode: 0644]
arch/tile/kvm/entry.S [new file with mode: 0644]
arch/tile/kvm/kvm-tile.c [new file with mode: 0644]
arch/tile/lib/Makefile
arch/tile/lib/atomic_32.c
arch/tile/lib/atomic_asm_32.S
arch/tile/lib/cacheflush.c
arch/tile/lib/exports.c
arch/tile/lib/memchr_64.c
arch/tile/lib/memcpy_32.S
arch/tile/lib/memcpy_64.c
arch/tile/lib/memcpy_tile64.c [deleted file]
arch/tile/lib/memcpy_user_64.c
arch/tile/lib/memset_32.c
arch/tile/lib/memset_64.c
arch/tile/lib/strchr_32.c
arch/tile/lib/strchr_64.c
arch/tile/lib/string-endian.h
arch/tile/lib/strlen_32.c
arch/tile/lib/strnlen_32.c [new file with mode: 0644]
arch/tile/lib/strnlen_64.c [new file with mode: 0644]
arch/tile/lib/usercopy_32.S
arch/tile/lib/usercopy_64.S
arch/tile/mm/elf.c
arch/tile/mm/fault.c
arch/tile/mm/highmem.c
arch/tile/mm/homecache.c
arch/tile/mm/hugetlbpage.c
arch/tile/mm/init.c
arch/tile/mm/migrate_32.S
arch/tile/mm/migrate_64.S
arch/tile/mm/mmap.c
arch/tile/mm/pgtable.c
arch/x86/crypto/camellia_glue.c
arch/x86/ia32/ia32_aout.c
arch/x86/include/asm/bootparam_utils.h
arch/x86/include/asm/microcode_amd.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/xor_avx.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/devicetree.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/microcode_amd_early.c
arch/x86/kernel/tboot.c
arch/x86/lguest/boot.c
arch/x86/mm/srat.c
arch/x86/pci/i386.c
arch/x86/pci/mmconfig-shared.c
arch/x86/pci/mrst.c
arch/x86/um/elfcore.c
arch/xtensa/kernel/setup.c
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-throttle.c
block/cfq-iosched.c
crypto/aes_generic.c
crypto/api.c
crypto/camellia_generic.c
crypto/cast_common.c
crypto/scatterwalk.c
crypto/testmgr.c
drivers/acpi/Kconfig
drivers/acpi/ac.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpi_platform.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exoparg1.c
drivers/acpi/acpica/hwesleep.c
drivers/acpi/acpica/hwtimer.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/acpica/nswalk.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/uteval.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utstring.c
drivers/acpi/acpica/utxface.c
drivers/acpi/apei/erst.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/device_pm.c
drivers/acpi/dock.c
drivers/acpi/ec.c
drivers/acpi/event.c
drivers/acpi/fan.c
drivers/acpi/glue.c
drivers/acpi/internal.h
drivers/acpi/numa.c
drivers/acpi/osl.c
drivers/acpi/pci_slot.c
drivers/acpi/power.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_thermal.c
drivers/acpi/resource.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/thermal.c
drivers/acpi/utils.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/ata/libata-acpi.c
drivers/ata/libata-pmp.c
drivers/ata/libata.h
drivers/ata/pata_arasan_cf.c
drivers/ata/pata_at32.c
drivers/ata/pata_at91.c
drivers/ata/pata_ixp4xx_cf.c
drivers/ata/pata_octeon_cf.c
drivers/ata/pata_platform.c
drivers/ata/pata_pxa.c
drivers/ata/pata_samsung_cf.c
drivers/ata/sata_fsl.c
drivers/ata/sata_highbank.c
drivers/ata/sata_mv.c
drivers/ata/sata_rcar.c
drivers/base/power/main.c
drivers/base/power/opp.c
drivers/base/regmap/regmap-irq.c
drivers/bcma/Kconfig
drivers/bcma/main.c
drivers/bcma/scan.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/loop.c
drivers/block/rbd.c
drivers/bluetooth/btmrvl_debugfs.c
drivers/bluetooth/btmrvl_sdio.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/mxc-rnga.c
drivers/char/hw_random/omap-rng.c
drivers/char/hw_random/picoxcell-rng.c
drivers/char/hw_random/tx4939-rng.c
drivers/char/raw.c
drivers/char/sonypi.c
drivers/char/virtio_console.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/at32ap-cpufreq.c
drivers/cpufreq/blackfin-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq-nforce2.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_performance.c
drivers/cpufreq/cpufreq_powersave.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/cris-artpec3-cpufreq.c
drivers/cpufreq/cris-etraxfs-cpufreq.c
drivers/cpufreq/e_powersaver.c
drivers/cpufreq/elanfreq.c
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/exynos-cpufreq.h
drivers/cpufreq/exynos5440-cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/gx-suspmod.c
drivers/cpufreq/ia64-acpi-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/longrun.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/maple-cpufreq.c
drivers/cpufreq/mperf.c [deleted file]
drivers/cpufreq/mperf.h [deleted file]
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cpufreq/pmac32-cpufreq.c
drivers/cpufreq/pmac64-cpufreq.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/pxa3xx-cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpufreq/s3c64xx-cpufreq.c
drivers/cpufreq/sc520_freq.c
drivers/cpufreq/sh-cpufreq.c
drivers/cpufreq/sparc-us2e-cpufreq.c
drivers/cpufreq/sparc-us3-cpufreq.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpufreq/speedstep-ich.c
drivers/cpufreq/speedstep-smi.c
drivers/cpufreq/tilegx-cpufreq.c [new file with mode: 0644]
drivers/cpufreq/unicore2-cpufreq.c
drivers/cpuidle/Kconfig
drivers/cpuidle/Kconfig.arm [new file with mode: 0644]
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-calxeda.c
drivers/cpuidle/cpuidle-kirkwood.c
drivers/cpuidle/cpuidle-ux500.c [moved from arch/arm/mach-ux500/cpuidle.c with 90% similarity]
drivers/cpuidle/cpuidle.c
drivers/cpuidle/governors/ladder.c
drivers/cpuidle/governors/menu.c
drivers/cpuidle/sysfs.c
drivers/crypto/Kconfig
drivers/crypto/amcc/crypto4xx_alg.c
drivers/crypto/caam/Kconfig
drivers/crypto/caam/Makefile
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/desc_constr.h
drivers/crypto/caam/intern.h
drivers/crypto/caam/jr.c
drivers/crypto/caam/jr.h
drivers/crypto/caam/key_gen.c
drivers/crypto/caam/regs.h
drivers/crypto/nx/nx-aes-cbc.c
drivers/crypto/nx/nx-aes-ccm.c
drivers/crypto/nx/nx-aes-ctr.c
drivers/crypto/nx/nx-aes-ecb.c
drivers/crypto/nx/nx-aes-gcm.c
drivers/crypto/nx/nx-aes-xcbc.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/crypto/nx/nx.c
drivers/crypto/nx/nx.h
drivers/crypto/omap-aes.c
drivers/crypto/omap-sham.c
drivers/crypto/sahara.c
drivers/crypto/tegra-aes.c
drivers/crypto/ux500/hash/hash_core.c
drivers/dma/acpi-dma.c
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/dw/platform.c
drivers/dma/edma.c
drivers/dma/ep93xx_dma.c
drivers/dma/fsldma.c
drivers/dma/imx-sdma.c
drivers/dma/iop-adma.c
drivers/dma/ipu/ipu_idmac.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/mpc512x_dma.c
drivers/dma/mv_xor.c
drivers/dma/mxs-dma.c
drivers/dma/of-dma.c
drivers/dma/pch_dma.c
drivers/dma/pl330.c
drivers/dma/sh/shdma-of.c
drivers/dma/sh/shdma.c
drivers/dma/sh/sudmac.c
drivers/dma/sirf-dma.c
drivers/dma/ste_dma40.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/timb_dma.c
drivers/dma/txx9dmac.c
drivers/edac/tile_edac.c
drivers/firewire/core-cdev.c
drivers/firewire/core-transaction.c
drivers/firewire/ohci.c
drivers/firmware/efi/efi-pstore.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_agpsupport.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_dma.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_flip_work.c [new file with mode: 0644]
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_memory.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_proc.c [deleted file]
drivers/gpu/drm/drm_scatter.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/drm_vma_manager.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/gma500/gtt.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i810/i810_drv.h
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo_ch7xxx.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_debug.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c [new file with mode: 0644]
drivers/gpu/drm/mga/mga_drv.c
drivers/gpu/drm/mga/mga_drv.h
drivers/gpu/drm/mga/mga_state.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/nouveau/core/engine/disp/dport.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/omapdrm/Makefile
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_gem_helpers.c [deleted file]
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_dumb.c
drivers/gpu/drm/qxl/qxl_gem.c
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_object.c
drivers/gpu/drm/qxl/qxl_object.h
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/r128/r128_cce.c
drivers/gpu/drm/r128/r128_drv.c
drivers/gpu/drm/r128/r128_drv.h
drivers/gpu/drm/r128/r128_state.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/rcar-du/Makefile
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.h
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_drv.h
drivers/gpu/drm/rcar-du/rcar_du_encoder.c [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_encoder.h [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_group.c [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_group.h [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_kms.h
drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c [moved from drivers/gpu/drm/rcar-du/rcar_du_lvds.c with 57% similarity]
drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h [moved from drivers/gpu/drm/rcar-du/rcar_du_lvds.h with 53% similarity]
drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h [new file with mode: 0644]
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_du_plane.h
drivers/gpu/drm/rcar-du/rcar_du_regs.h
drivers/gpu/drm/rcar-du/rcar_du_vgacon.c [moved from drivers/gpu/drm/rcar-du/rcar_du_vga.c with 59% similarity]
drivers/gpu/drm/rcar-du/rcar_du_vgacon.h [moved from drivers/gpu/drm/rcar-du/rcar_du_vga.h with 56% similarity]
drivers/gpu/drm/rcar-du/rcar_lvds_regs.h [new file with mode: 0644]
drivers/gpu/drm/savage/savage_bci.c
drivers/gpu/drm/savage/savage_drv.c
drivers/gpu/drm/savage/savage_drv.h
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/sis/sis_drv.h
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/tdfx/tdfx_drv.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_slave.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_manager.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/via/via_dma.c
drivers/gpu/drm/via/via_drv.c
drivers/gpu/drm/via/via_drv.h
drivers/gpu/drm/via/via_mm.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/host1x/drm/drm.c
drivers/gpu/host1x/drm/gem.c
drivers/gpu/host1x/drm/gem.h
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/hid-a4tech.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-holtekff.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-kye.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-ntrig.c
drivers/hid/hid-picolcd_debugfs.c
drivers/hid/hid-roccat-arvo.c
drivers/hid/hid-roccat-isku.c
drivers/hid/hid-roccat-kone.c
drivers/hid/hid-roccat-koneplus.c
drivers/hid/hid-roccat-kovaplus.c
drivers/hid/hid-sony.c
drivers/hid/hid-wiimote-core.c
drivers/hid/hid-xinmo.c [new file with mode: 0644]
drivers/hid/hid-zydacron.c
drivers/hid/hidraw.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/uhid.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/usbhid.h
drivers/hwmon/Kconfig
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/ads1015.c
drivers/hwmon/ads7828.c
drivers/hwmon/adt7462.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/coretemp.c
drivers/hwmon/ds620.c
drivers/hwmon/emc6w201.c
drivers/hwmon/f71805f.c
drivers/hwmon/f71882fg.c
drivers/hwmon/f75375s.c
drivers/hwmon/g762.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/ina2xx.c
drivers/hwmon/it87.c
drivers/hwmon/lm87.c
drivers/hwmon/max197.c
drivers/hwmon/max6639.c
drivers/hwmon/mcp3021.c
drivers/hwmon/nct6775.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/pc87427.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/s3c-hwmon.c
drivers/hwmon/sht15.c
drivers/hwmon/smsc47m1.c
drivers/hwmon/w83627ehf.c
drivers/hwmon/w83627hf.c
drivers/hwmon/w83792d.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-bfin-twi.c
drivers/i2c/busses/i2c-cbus-gpio.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-nomadik.c
drivers/i2c/busses/i2c-nuc900.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-pca-platform.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/busses/i2c-powermac.c
drivers/i2c/busses/i2c-puv3.c
drivers/i2c/busses/i2c-pxa.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-s6000.c
drivers/i2c/busses/i2c-sh7760.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-stu300.c
drivers/i2c/busses/i2c-tiny-usb.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-smbus.c
drivers/i2c/muxes/i2c-arb-gpio-challenge.c
drivers/i2c/muxes/i2c-mux-gpio.c
drivers/i2c/muxes/i2c-mux-pca9541.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/i2c/muxes/i2c-mux-pinctrl.c
drivers/ide/ide-acpi.c
drivers/iio/light/adjd_s311.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/amso1100/c2_ae.c
drivers/infiniband/hw/amso1100/c2_cm.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/Kconfig
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/ehca/ipz_pt_fn.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/hw/qib/qib_common.h
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/hw/qib/qib_sdma.c
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/input/joystick/as5011.c
drivers/input/joystick/maplecontrol.c
drivers/input/keyboard/imx_keypad.c
drivers/input/keyboard/max7359_keypad.c
drivers/input/keyboard/nspire-keypad.c
drivers/input/keyboard/qt1070.c
drivers/input/keyboard/spear-keyboard.c
drivers/input/keyboard/tegra-kbc.c
drivers/input/misc/pwm-beeper.c
drivers/input/misc/twl6040-vibra.c
drivers/input/misc/wistron_btns.c
drivers/input/mouse/elantech.c
drivers/input/mouse/elantech.h
drivers/input/mouse/lifebook.c
drivers/input/mouse/synaptics.c
drivers/input/serio/arc_ps2.c
drivers/input/serio/olpc_apsp.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/cy8ctmg110_ts.c
drivers/input/touchscreen/eeti_ts.c
drivers/input/touchscreen/htcpen.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/arm-smmu.c
drivers/iommu/exynos-iommu.c
drivers/iommu/fsl_pamu.c [new file with mode: 0644]
drivers/iommu/fsl_pamu.h [new file with mode: 0644]
drivers/iommu/fsl_pamu_domain.c [new file with mode: 0644]
drivers/iommu/fsl_pamu_domain.h [new file with mode: 0644]
drivers/iommu/intel-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-imgpdc.c [new file with mode: 0644]
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/macintosh/ams/ams-input.c
drivers/md/Makefile
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-ioctl.c
drivers/md/dm-kcopyd.c
drivers/md/dm-raid1.c
drivers/md/dm-stats.c [new file with mode: 0644]
drivers/md/dm-stats.h [new file with mode: 0644]
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/dm.h
drivers/md/md.c
drivers/md/md.h
drivers/md/persistent-data/dm-block-manager.c
drivers/md/persistent-data/dm-block-manager.h
drivers/md/persistent-data/dm-btree.c
drivers/md/persistent-data/dm-space-map-common.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/dvb-core/dvb-usb-ids.h
drivers/media/i2c/adv7343.c
drivers/media/i2c/ml86v7667.c
drivers/media/i2c/ths8200.c
drivers/media/i2c/tvp514x.c
drivers/media/i2c/tvp7002.c
drivers/media/pci/bt8xx/bttv-cards.c
drivers/media/pci/bt8xx/bttvp.h
drivers/media/pci/cx23885/cx23885-av.c
drivers/media/pci/cx23885/cx23885-dvb.c
drivers/media/pci/cx23885/cx23885-video.c
drivers/media/pci/cx23885/cx23885-video.h [new file with mode: 0644]
drivers/media/platform/coda.c
drivers/media/platform/coda.h
drivers/media/platform/davinci/vpbe_display.c
drivers/media/platform/davinci/vpbe_osd.c
drivers/media/platform/davinci/vpbe_venc.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_capture.h
drivers/media/platform/davinci/vpif_display.c
drivers/media/platform/davinci/vpif_display.h
drivers/media/platform/davinci/vpss.c
drivers/media/platform/marvell-ccic/cafe-driver.c
drivers/media/platform/marvell-ccic/mcam-core.c
drivers/media/platform/marvell-ccic/mcam-core.h
drivers/media/platform/marvell-ccic/mmp-driver.c
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
drivers/media/platform/soc_camera/soc_camera.c
drivers/media/radio/radio-aztech.c
drivers/media/radio/radio-maxiradio.c
drivers/media/rc/ene_ir.c
drivers/media/rc/ene_ir.h
drivers/media/rc/iguanair.c
drivers/media/rc/ir-lirc-codec.c
drivers/media/rc/lirc_dev.c
drivers/media/rc/rc-main.c
drivers/media/rc/redrat3.c
drivers/media/usb/dvb-usb-v2/Kconfig
drivers/media/usb/dvb-usb/dib0700_devices.c
drivers/media/usb/dvb-usb/m920x.c
drivers/media/usb/em28xx/em28xx-video.c
drivers/media/usb/gspca/vicam.c
drivers/media/usb/s2255/s2255drv.c
drivers/media/usb/stk1160/Kconfig
drivers/media/usb/stk1160/stk1160-v4l.c
drivers/media/usb/tlg2300/pd-main.c
drivers/media/usb/usbtv/usbtv.c
drivers/media/v4l2-core/v4l2-async.c
drivers/media/v4l2-core/v4l2-mem2mem.c
drivers/mfd/88pm800.c
drivers/mfd/88pm805.c
drivers/mfd/88pm860x-core.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/aat2870-core.c
drivers/mfd/ab3100-core.c
drivers/mfd/ab8500-gpadc.c
drivers/mfd/adp5520.c
drivers/mfd/arizona-core.c
drivers/mfd/as3711.c
drivers/mfd/asic3.c
drivers/mfd/da903x.c
drivers/mfd/da9052-core.c
drivers/mfd/da9055-core.c
drivers/mfd/da9055-i2c.c
drivers/mfd/da9063-core.c [new file with mode: 0644]
drivers/mfd/da9063-i2c.c [new file with mode: 0644]
drivers/mfd/da9063-irq.c [new file with mode: 0644]
drivers/mfd/db8500-prcmu.c
drivers/mfd/dm355evm_msp.c
drivers/mfd/ezx-pcap.c
drivers/mfd/htc-egpio.c
drivers/mfd/htc-i2cpld.c
drivers/mfd/htc-pasic3.c
drivers/mfd/intel_msic.c
drivers/mfd/kempld-core.c
drivers/mfd/lm3533-core.c
drivers/mfd/lp8788.c
drivers/mfd/lpc_ich.c
drivers/mfd/max77686.c
drivers/mfd/max77693.c
drivers/mfd/max8925-i2c.c
drivers/mfd/max8997.c
drivers/mfd/max8998.c
drivers/mfd/mcp-sa11x0.c
drivers/mfd/menelaus.c
drivers/mfd/mfd-core.c
drivers/mfd/omap-usb-host.c
drivers/mfd/palmas.c
drivers/mfd/pcf50633-adc.c
drivers/mfd/pcf50633-core.c
drivers/mfd/pm8921-core.c
drivers/mfd/rc5t583.c
drivers/mfd/rtl8411.c
drivers/mfd/rts5209.c
drivers/mfd/rts5227.c
drivers/mfd/rts5229.c
drivers/mfd/rts5249.c
drivers/mfd/rtsx_pcr.c
drivers/mfd/rtsx_pcr.h
drivers/mfd/sec-core.c
drivers/mfd/si476x-i2c.c
drivers/mfd/sm501.c
drivers/mfd/sta2x11-mfd.c
drivers/mfd/stmpe.c
drivers/mfd/syscon.c
drivers/mfd/t7l66xb.c
drivers/mfd/tc3589x.c
drivers/mfd/tc6387xb.c
drivers/mfd/tc6393xb.c
drivers/mfd/ti-ssp.c
drivers/mfd/ti_am335x_tscadc.c
drivers/mfd/tps6105x.c
drivers/mfd/tps65010.c
drivers/mfd/tps65090.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65912-core.c
drivers/mfd/tps80031.c
drivers/mfd/twl-core.c
drivers/mfd/twl4030-audio.c
drivers/mfd/twl4030-madc.c
drivers/mfd/twl4030-power.c
drivers/mfd/twl6030-irq.c
drivers/mfd/twl6040.c
drivers/mfd/ucb1400_core.c
drivers/mfd/ucb1x00-core.c
drivers/mfd/wl1273-core.c
drivers/mfd/wm5110-tables.c
drivers/mfd/wm831x-core.c
drivers/mfd/wm831x-irq.c
drivers/mfd/wm831x-spi.c
drivers/mfd/wm8350-i2c.c
drivers/mfd/wm8400-core.c
drivers/mfd/wm8994-core.c
drivers/mfd/wm8994-irq.c
drivers/mmc/core/core.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mtd/bcm63xxpart.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/chips/gen_probe.c
drivers/mtd/chips/jedec_probe.c
drivers/mtd/devices/Kconfig
drivers/mtd/devices/bcm47xxsflash.c
drivers/mtd/devices/bcm47xxsflash.h
drivers/mtd/devices/block2mtd.c
drivers/mtd/devices/elm.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/devices/spear_smi.c
drivers/mtd/devices/sst25l.c
drivers/mtd/maps/Kconfig
drivers/mtd/maps/Makefile
drivers/mtd/maps/bfin-async-flash.c
drivers/mtd/maps/cfi_flagadm.c
drivers/mtd/maps/gpio-addr-flash.c
drivers/mtd/maps/impa7.c
drivers/mtd/maps/ixp4xx.c
drivers/mtd/maps/latch-addr-flash.c
drivers/mtd/maps/octagon-5066.c [deleted file]
drivers/mtd/maps/physmap.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/maps/rbtx4939-flash.c
drivers/mtd/maps/sa1100-flash.c
drivers/mtd/maps/vmax301.c [deleted file]
drivers/mtd/mtdcore.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/Makefile
drivers/mtd/nand/alauda.c [deleted file]
drivers/mtd/nand/ams-delta.c
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/atmel_nand_nfc.h [new file with mode: 0644]
drivers/mtd/nand/au1550nd.c
drivers/mtd/nand/bf5xx_nand.c
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/denali.c
drivers/mtd/nand/diskonchip.c
drivers/mtd/nand/docg4.c
drivers/mtd/nand/fsl_ifc_nand.c
drivers/mtd/nand/fsmc_nand.c
drivers/mtd/nand/gpio.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/lpc32xx_mlc.c
drivers/mtd/nand/lpc32xx_slc.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/nuc900_nand.c
drivers/mtd/nand/omap2.c
drivers/mtd/nand/orion_nand.c
drivers/mtd/nand/plat_nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/r852.c
drivers/mtd/nand/s3c2410.c
drivers/mtd/nand/sh_flctl.c
drivers/mtd/nand/sharpsl.c
drivers/mtd/nand/sm_common.c
drivers/mtd/nand/tmio_nand.c
drivers/mtd/nand/txx9ndfmc.c
drivers/mtd/ofpart.c
drivers/mtd/onenand/generic.c
drivers/mtd/onenand/omap2.c
drivers/mtd/onenand/onenand_bbt.c
drivers/mtd/onenand/samsung.c
drivers/mtd/sm_ftl.c
drivers/mtd/tests/Makefile
drivers/mtd/tests/mtd_test.c [new file with mode: 0644]
drivers/mtd/tests/mtd_test.h [new file with mode: 0644]
drivers/mtd/tests/nandbiterrs.c [moved from drivers/mtd/tests/mtd_nandbiterrs.c with 93% similarity]
drivers/mtd/tests/oobtest.c [moved from drivers/mtd/tests/mtd_oobtest.c with 90% similarity]
drivers/mtd/tests/pagetest.c [moved from drivers/mtd/tests/mtd_pagetest.c with 63% similarity]
drivers/mtd/tests/readtest.c [moved from drivers/mtd/tests/mtd_readtest.c with 83% similarity]
drivers/mtd/tests/speedtest.c [moved from drivers/mtd/tests/mtd_speedtest.c with 69% similarity]
drivers/mtd/tests/stresstest.c [moved from drivers/mtd/tests/mtd_stresstest.c with 74% similarity]
drivers/mtd/tests/subpagetest.c [moved from drivers/mtd/tests/mtd_subpagetest.c with 86% similarity]
drivers/mtd/tests/torturetest.c [moved from drivers/mtd/tests/mtd_torturetest.c with 90% similarity]
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/wl.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/can/flexcan.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic.h
drivers/net/ethernet/broadcom/cnic_defs.h
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/cisco/enic/Makefile
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_api.c [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_api.h [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_dev.h
drivers/net/ethernet/cisco/enic/enic_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/enic_res.h
drivers/net/ethernet/cisco/enic/vnic_devcmd.h
drivers/net/ethernet/cisco/enic/vnic_rq.c
drivers/net/ethernet/cisco/enic/vnic_rq.h
drivers/net/ethernet/cisco/enic/vnic_wq.c
drivers/net/ethernet/cisco/enic/vnic_wq.h
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/emulex/benet/be_roce.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/i825xx/sun3_82586.h
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/moxa/Kconfig [new file with mode: 0644]
drivers/net/ethernet/moxa/Makefile [new file with mode: 0644]
drivers/net/ethernet/moxa/moxart_ether.c [new file with mode: 0644]
drivers/net/ethernet/moxa/moxart_ether.h [new file with mode: 0644]
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/pasemi/pasemi_mac.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw.h [moved from include/linux/platform_data/cpsw.h with 86% similarity]
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/tile/Kconfig
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/irda/via-ircc.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/mdio-mux-mmioreg.c
drivers/net/phy/micrel.c
drivers/net/phy/realtek.c
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/asix.h
drivers/net/usb/asix_devices.c
drivers/net/usb/ax88172a.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/sbni.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath10k/bmi.c
drivers/net/wireless/ath/ath10k/bmi.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/hif.h
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htc.h
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/base.h
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath5k/pcu.c
drivers/net/wireless/ath/ath5k/qcu.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/ath/ath6kl/testmode.c
drivers/net/wireless/ath/ath6kl/testmode.h
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/antenna.c
drivers/net/wireless/ath/ath9k/ar9002_phy.c
drivers/net/wireless/ath/ath9k/ar9002_phy.h
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw-ops.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/phy.h
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/trace.h
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx.h
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.c
drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
drivers/net/wireless/brcm80211/brcmsmac/dma.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/cw1200/wsm.c
drivers/net/wireless/cw1200/wsm.h
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/iwlegacy/3945-rs.c
drivers/net/wireless/iwlegacy/3945.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/debugfs.c
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-io.c
drivers/net/wireless/iwlwifi/iwl-io.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/Makefile
drivers/net/wireless/iwlwifi/mvm/bt-coex.c
drivers/net/wireless/iwlwifi/mvm/constants.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/power_legacy.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/quota.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cfp.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_txrx.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800lib.h
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rtlwifi/ps.c
drivers/net/wireless/rtlwifi/rc.c
drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/testmode.c
drivers/net/wireless/ti/wlcore/testmode.h
drivers/net/wireless/zd1201.c
drivers/nfc/nfcsim.c
drivers/nfc/pn533.c
drivers/nfc/pn544/i2c.c
drivers/nfc/pn544/mei.c
drivers/nfc/pn544/pn544.c
drivers/nfc/pn544/pn544.h
drivers/of/fdt.c
drivers/of/platform.c
drivers/pci/host/Kconfig
drivers/pci/host/Makefile
drivers/pci/host/pci-exynos.c [new file with mode: 0644]
drivers/pci/host/pci-mvebu.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-designware.h [new file with mode: 0644]
drivers/pci/hotplug/acpiphp.h
drivers/pci/hotplug/acpiphp_core.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/acpiphp_ibm.c
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_core.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/iov.c
drivers/pci/pci-acpi.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/Kconfig
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/aer/aerdrv.h
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/panasonic-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/pnp/driver.c
drivers/pnp/pnpacpi/core.c
drivers/power/88pm860x_charger.c
drivers/power/Kconfig
drivers/power/collie_battery.c
drivers/power/pm2301_charger.c
drivers/power/power_supply_sysfs.c
drivers/power/reset/Kconfig
drivers/power/reset/Makefile
drivers/power/reset/msm-poweroff.c [new file with mode: 0644]
drivers/power/reset/xgene-reboot.c [new file with mode: 0644]
drivers/power/tosa_battery.c
drivers/regulator/88pm800.c [new file with mode: 0644]
drivers/regulator/88pm8607.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/aat2870-regulator.c
drivers/regulator/ab3100.c
drivers/regulator/ad5398.c
drivers/regulator/as3711-regulator.c
drivers/regulator/core.c
drivers/regulator/da903x.c
drivers/regulator/da9052-regulator.c
drivers/regulator/da9055-regulator.c
drivers/regulator/da9210-regulator.c [new file with mode: 0644]
drivers/regulator/da9210-regulator.h [new file with mode: 0644]
drivers/regulator/fan53555.c
drivers/regulator/fixed.c
drivers/regulator/gpio-regulator.c
drivers/regulator/helpers.c [new file with mode: 0644]
drivers/regulator/isl6271a-regulator.c
drivers/regulator/lp3971.c
drivers/regulator/lp3972.c
drivers/regulator/lp872x.c
drivers/regulator/lp8755.c
drivers/regulator/max1586.c
drivers/regulator/max8649.c
drivers/regulator/max8660.c
drivers/regulator/max8925-regulator.c
drivers/regulator/max8952.c
drivers/regulator/max8973-regulator.c
drivers/regulator/of_regulator.c
drivers/regulator/palmas-regulator.c
drivers/regulator/pcap-regulator.c
drivers/regulator/pcf50633-regulator.c
drivers/regulator/pfuze100-regulator.c [new file with mode: 0644]
drivers/regulator/s2mps11.c
drivers/regulator/tps51632-regulator.c
drivers/regulator/tps62360-regulator.c
drivers/regulator/tps65023-regulator.c
drivers/regulator/tps6524x-regulator.c
drivers/regulator/tps65912-regulator.c
drivers/regulator/twl-regulator.c
drivers/regulator/userspace-consumer.c
drivers/regulator/virtual.c
drivers/regulator/wm831x-dcdc.c
drivers/regulator/wm831x-isink.c
drivers/regulator/wm831x-ldo.c
drivers/regulator/wm8350-regulator.c
drivers/regulator/wm8400-regulator.c
drivers/regulator/wm8994-regulator.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_erp.c
drivers/s390/char/sclp_config.c
drivers/s390/cio/airq.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/cio.c
drivers/s390/cio/cio.h
drivers/s390/cio/cmf.c
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/bnx2i/57xx_iscsi_hsi.h
drivers/scsi/bnx2i/bnx2i_init.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/esp_scsi.c
drivers/scsi/esp_scsi.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/megaraid/megaraid_mbox.c
drivers/scsi/megaraid/megaraid_mm.c
drivers/scsi/megaraid/megaraid_sas_fusion.h
drivers/scsi/qla2xxx/qla_mr.c
drivers/spi/spi-bitbang.c
drivers/ssb/Kconfig
drivers/ssb/driver_chipcommon_sflash.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/media/lirc/lirc_igorplugusb.c
drivers/thermal/Kconfig
drivers/thermal/Makefile
drivers/thermal/cpu_cooling.c
drivers/thermal/exynos_thermal.c [deleted file]
drivers/thermal/imx_thermal.c [new file with mode: 0644]
drivers/thermal/samsung/Kconfig [new file with mode: 0644]
drivers/thermal/samsung/Makefile [new file with mode: 0644]
drivers/thermal/samsung/exynos_thermal_common.c [new file with mode: 0644]
drivers/thermal/samsung/exynos_thermal_common.h [new file with mode: 0644]
drivers/thermal/samsung/exynos_tmu.c [new file with mode: 0644]
drivers/thermal/samsung/exynos_tmu.h [new file with mode: 0644]
drivers/thermal/samsung/exynos_tmu_data.c [new file with mode: 0644]
drivers/thermal/samsung/exynos_tmu_data.h [new file with mode: 0644]
drivers/thermal/step_wise.c
drivers/tty/hvc/hvc_tile.c
drivers/tty/hvc/hvc_vio.c
drivers/tty/serial/Kconfig
drivers/tty/serial/Makefile
drivers/tty/serial/mpc52xx_uart.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/tilegx.c [new file with mode: 0644]
drivers/tty/sysrq.c
drivers/usb/gadget/storage_common.c
drivers/usb/host/fsl-mph-dr-of.c
drivers/vfio/vfio.c
drivers/vhost/vhost.c
drivers/video/Kconfig
drivers/video/atmel_lcdfb.c
drivers/video/backlight/hx8357.c
drivers/video/backlight/lp855x_bl.c
drivers/video/hdmi.c
drivers/video/matrox/matroxfb_base.c
drivers/video/mxsfb.c
drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
drivers/video/output.c
drivers/video/simplefb.c
drivers/watchdog/hpwdt.c
drivers/xen/acpi.c
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/Makefile
fs/adfs/file.c
fs/affs/file.c
fs/afs/file.c
fs/afs/internal.h
fs/afs/write.c
fs/aio.c
fs/bad_inode.c
fs/bfs/file.c
fs/binfmt_aout.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/bio.c
fs/block_dev.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/send.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/mds_client.c
fs/ceph/super.h
fs/cifs/AUTHORS
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/smb2inode.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/coredump.c
fs/direct-io.c
fs/dlm/ast.c
fs/dlm/user.c
fs/ecryptfs/file.c
fs/exofs/file.c
fs/ext2/file.c
fs/ext2/inode.c
fs/ext3/file.c
fs/ext3/inode.c
fs/ext3/super.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/extents_status.c
fs/ext4/extents_status.h
fs/ext4/file.c
fs/ext4/ialloc.c
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/migrate.c
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/gc.h
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/fat/file.c
fs/fat/inode.c
fs/fuse/cuse.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/gfs2/aops.c
fs/gfs2/dir.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/inode.c
fs/gfs2/lops.c
fs/gfs2/main.c
fs/gfs2/meta_io.c
fs/gfs2/meta_io.h
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/hostfs/hostfs_kern.c
fs/hpfs/file.c
fs/internal.h
fs/iov-iter.c [new file with mode: 0644]
fs/isofs/inode.c
fs/jbd/commit.c
fs/jbd/journal.c
fs/jffs2/file.c
fs/jfs/file.c
fs/jfs/inode.c
fs/jfs/jfs_dtree.c
fs/logfs/dev_mtd.c
fs/logfs/file.c
fs/logfs/super.c
fs/minix/file.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/internal.h
fs/nfs/nfs4_fs.h
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/super.c
fs/nfsd/nfs4state.c
fs/nilfs2/file.c
fs/nilfs2/inode.c
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/cluster/masklog.h
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/file.c
fs/ocfs2/ocfs2_trace.h
fs/ocfs2/xattr.h
fs/omfs/file.c
fs/proc/generic.c
fs/proc/root.c
fs/pstore/Kconfig
fs/pstore/inode.c
fs/pstore/internal.h
fs/pstore/platform.c
fs/pstore/ram.c
fs/quota/dquot.c
fs/quota/quota.c
fs/ramfs/file-mmu.c
fs/ramfs/file-nommu.c
fs/read_write.c
fs/reiserfs/bitmap.c
fs/reiserfs/dir.c
fs/reiserfs/file.c
fs/reiserfs/fix_node.c
fs/reiserfs/inode.c
fs/reiserfs/ioctl.c
fs/reiserfs/journal.c
fs/reiserfs/lock.c
fs/reiserfs/namei.c
fs/reiserfs/prints.c
fs/reiserfs/reiserfs.h
fs/reiserfs/resize.c
fs/reiserfs/stree.c
fs/reiserfs/super.c
fs/reiserfs/xattr.c
fs/reiserfs/xattr_acl.c
fs/romfs/mmap-nommu.c
fs/stat.c
fs/sysv/file.c
fs/ubifs/debug.c
fs/ubifs/file.c
fs/udf/file.c
fs/udf/inode.c
fs/udf/super.c
fs/ufs/file.c
fs/xfs/Makefile
fs/xfs/xfs_acl.c
fs/xfs/xfs_ag.h
fs/xfs/xfs_alloc.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr.h
fs/xfs/xfs_attr_inactive.c [new file with mode: 0644]
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_leaf.h
fs/xfs/xfs_attr_list.c [new file with mode: 0644]
fs/xfs/xfs_attr_remote.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.h
fs/xfs/xfs_bmap_btree.c
fs/xfs/xfs_bmap_util.c [new file with mode: 0644]
fs/xfs/xfs_bmap_util.h [new file with mode: 0644]
fs/xfs/xfs_btree.c
fs/xfs/xfs_btree.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_da_btree.c
fs/xfs/xfs_dfrag.c [deleted file]
fs/xfs/xfs_dfrag.h [deleted file]
fs/xfs/xfs_dir2.c
fs/xfs/xfs_dir2.h
fs/xfs/xfs_dir2_block.c
fs/xfs/xfs_dir2_data.c
fs/xfs/xfs_dir2_format.h
fs/xfs/xfs_dir2_leaf.c
fs/xfs/xfs_dir2_node.c
fs/xfs/xfs_dir2_priv.h
fs/xfs/xfs_dir2_readdir.c [new file with mode: 0644]
fs/xfs/xfs_dir2_sf.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_dquot_item.c
fs/xfs/xfs_error.c
fs/xfs/xfs_export.c
fs/xfs/xfs_extent_busy.c
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_extfree_item.h
fs/xfs/xfs_file.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_filestream.h
fs/xfs/xfs_format.h [new file with mode: 0644]
fs/xfs/xfs_fs.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_icache.h
fs/xfs/xfs_icreate_item.c
fs/xfs/xfs_icreate_item.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_buf.c [new file with mode: 0644]
fs/xfs/xfs_inode_buf.h [new file with mode: 0644]
fs/xfs/xfs_inode_fork.c [new file with mode: 0644]
fs/xfs/xfs_inode_fork.h [new file with mode: 0644]
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.h
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_ioctl.h
fs/xfs/xfs_ioctl32.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_iops.h
fs/xfs/xfs_linux.h
fs/xfs/xfs_log.c
fs/xfs/xfs_log.h
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_format.h [new file with mode: 0644]
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_rlimit.c [new file with mode: 0644]
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_bhv.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quota.h
fs/xfs/xfs_quota_defs.h [new file with mode: 0644]
fs/xfs/xfs_quotaops.c
fs/xfs/xfs_rename.c [deleted file]
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_rtalloc.h
fs/xfs/xfs_sb.c [new file with mode: 0644]
fs/xfs/xfs_sb.h
fs/xfs/xfs_super.c
fs/xfs/xfs_symlink.c
fs/xfs/xfs_symlink.h
fs/xfs/xfs_symlink_remote.c [new file with mode: 0644]
fs/xfs/xfs_trace.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.h
fs/xfs/xfs_trans_dquot.c
fs/xfs/xfs_trans_priv.h
fs/xfs/xfs_trans_resv.c [new file with mode: 0644]
fs/xfs/xfs_trans_resv.h [new file with mode: 0644]
fs/xfs/xfs_types.h
fs/xfs/xfs_utils.c [deleted file]
fs/xfs/xfs_utils.h [deleted file]
fs/xfs/xfs_vnodeops.c [deleted file]
fs/xfs/xfs_vnodeops.h [deleted file]
fs/xfs/xfs_xattr.c
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/acpi/acpixf.h
include/acpi/actypes.h
include/crypto/scatterwalk.h
include/drm/drmP.h
include/drm/drm_agpsupport.h [new file with mode: 0644]
include/drm/drm_crtc.h
include/drm/drm_dp_helper.h
include/drm/drm_fb_cma_helper.h
include/drm/drm_flip_work.h [new file with mode: 0644]
include/drm/drm_gem_cma_helper.h
include/drm/drm_mm.h
include/drm/drm_pciids.h
include/drm/drm_vma_manager.h [new file with mode: 0644]
include/drm/i2c/tda998x.h [new file with mode: 0644]
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/dt-bindings/input/input.h [new file with mode: 0644]
include/dt-bindings/sound/fsl-imx-audmux.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/aio.h
include/linux/ata.h
include/linux/atmel-ssc.h
include/linux/bcma/bcma.h
include/linux/binfmts.h
include/linux/bio.h
include/linux/blk_types.h
include/linux/cgroup.h
include/linux/clocksource.h
include/linux/coda.h
include/linux/coredump.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/cpuidle.h
include/linux/device-mapper.h
include/linux/dm9000.h
include/linux/dma/mmp-pdma.h [new file with mode: 0644]
include/linux/dmaengine.h
include/linux/elf.h
include/linux/elfcore.h
include/linux/fs.h
include/linux/fs_enet_pd.h
include/linux/fsl/mxs-dma.h [deleted file]
include/linux/hdmi.h
include/linux/hid.h
include/linux/i2c.h
include/linux/i2c/i2c-hid.h
include/linux/i2c/pxa-i2c.h
include/linux/ieee80211.h
include/linux/if_team.h
include/linux/igmp.h
include/linux/inetdevice.h
include/linux/iommu.h
include/linux/ipv6.h
include/linux/jbd.h
include/linux/ktime.h
include/linux/libata.h
include/linux/math64.h
include/linux/memcontrol.h
include/linux/mfd/arizona/gpio.h [new file with mode: 0644]
include/linux/mfd/da9063/core.h [new file with mode: 0644]
include/linux/mfd/da9063/pdata.h [new file with mode: 0644]
include/linux/mfd/da9063/registers.h [new file with mode: 0644]
include/linux/mfd/mcp.h
include/linux/mfd/palmas.h
include/linux/mfd/rtsx_common.h
include/linux/mfd/rtsx_pci.h
include/linux/mfd/samsung/s2mps11.h
include/linux/mfd/ti_am335x_tscadc.h
include/linux/mfd/twl6040.h
include/linux/mfd/ucb1x00.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/module.h
include/linux/moduleparam.h
include/linux/mtd/bbm.h
include/linux/mtd/fsmc.h
include/linux/mtd/mtd.h
include/linux/mtd/nand.h
include/linux/mv643xx_eth.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/of.h
include/linux/of_fdt.h
include/linux/pci-acpi.h
include/linux/pci.h
include/linux/pci_hotplug.h
include/linux/platform_data/asoc-s3c.h
include/linux/platform_data/atmel.h
include/linux/platform_data/brcmfmac-sdio.h
include/linux/platform_data/exynos_thermal.h [deleted file]
include/linux/platform_data/mtd-nand-pxa3xx.h
include/linux/platform_data/omap-abe-twl6040.h [deleted file]
include/linux/platform_data/rcar-du.h
include/linux/pstore.h
include/linux/pxa2xx_ssp.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/raid/pq.h
include/linux/regmap.h
include/linux/regulator/consumer.h
include/linux/regulator/driver.h
include/linux/regulator/fan53555.h
include/linux/regulator/machine.h
include/linux/regulator/max8660.h
include/linux/regulator/pfuze100.h [new file with mode: 0644]
include/linux/security.h
include/linux/serial_sci.h
include/linux/sh_eth.h
include/linux/skbuff.h
include/linux/smsc911x.h
include/linux/socket.h
include/linux/sunrpc/cache.h
include/linux/tcp.h
include/linux/usb/usbnet.h
include/linux/uwb/spec.h
include/linux/vfio.h
include/linux/vmpressure.h
include/linux/xattr.h
include/media/adv7343.h
include/media/davinci/vpif_types.h
include/media/lirc_dev.h
include/media/rc-core.h
include/media/tveeprom.h
include/media/v4l2-async.h
include/media/v4l2-mem2mem.h
include/media/v4l2-subdev.h
include/net/9p/transport.h
include/net/act_api.h
include/net/addrconf.h
include/net/af_rxrpc.h
include/net/af_unix.h
include/net/af_vsock.h [moved from net/vmw_vsock/af_vsock.h with 100% similarity]
include/net/arp.h
include/net/ax25.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/sco.h
include/net/cfg80211.h
include/net/checksum.h
include/net/cls_cgroup.h
include/net/fib_rules.h
include/net/ieee80211_radiotap.h
include/net/ip6_tunnel.h
include/net/ip_tunnels.h
include/net/irda/irlan_common.h
include/net/mac80211.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_helper.h
include/net/netfilter/nf_tproxy_core.h [deleted file]
include/net/netfilter/nfnetlink_queue.h
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/netprio_cgroup.h
include/net/nfc/nfc.h
include/net/pkt_cls.h
include/net/pkt_sched.h
include/net/route.h
include/net/sch_generic.h
include/net/sctp/auth.h
include/net/sctp/checksum.h
include/net/sctp/command.h
include/net/sctp/constants.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/sctp/tsnmap.h
include/net/sctp/ulpevent.h
include/net/sctp/ulpqueue.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/net/vsock_addr.h [moved from net/vmw_vsock/vsock_addr.h with 100% similarity]
include/net/vxlan.h [new file with mode: 0644]
include/net/xfrm.h
include/rdma/ib_verbs.h
include/rdma/iw_cm.h
include/sound/core.h
include/sound/pxa2xx-lib.h
include/sound/rcar_snd.h [new file with mode: 0644]
include/sound/soc-dapm.h
include/sound/soc-dpcm.h
include/sound/soc.h
include/sound/tea575x-tuner.h
include/trace/events/ext4.h
include/trace/events/power.h
include/uapi/drm/drm.h
include/uapi/drm/i915_drm.h
include/uapi/linux/aio_abi.h
include/uapi/linux/cifs/cifs_mount.h [new file with mode: 0644]
include/uapi/linux/dm-ioctl.h
include/uapi/linux/dn.h
include/uapi/linux/dqblk_xfs.h
include/uapi/linux/fib_rules.h
include/uapi/linux/fiemap.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/if_pppox.h
include/uapi/linux/if_tun.h
include/uapi/linux/ipv6.h
include/uapi/linux/kvm.h
include/uapi/linux/loop.h
include/uapi/linux/netfilter/Kbuild
include/uapi/linux/netfilter/nfnetlink_queue.h
include/uapi/linux/netfilter/xt_HMARK.h [moved from include/linux/netfilter/xt_HMARK.h with 100% similarity]
include/uapi/linux/netfilter/xt_rpfilter.h [moved from include/linux/netfilter/xt_rpfilter.h with 100% similarity]
include/uapi/linux/netfilter_bridge/ebt_802_3.h
include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h
include/uapi/linux/nfc.h
include/uapi/linux/nl80211.h
include/uapi/linux/openvswitch.h
include/uapi/linux/reiserfs_xattr.h
include/uapi/linux/sctp.h
include/uapi/linux/serial_core.h
include/uapi/linux/snmp.h
include/uapi/linux/tcp.h
include/uapi/linux/uhid.h
include/uapi/linux/virtio_net.h
include/uapi/linux/wimax/i2400m.h
include/uapi/sound/hdspm.h
include/video/atmel_lcdc.h
include/xen/acpi.h
include/xen/interface/platform.h
init/Kconfig
kernel/capability.c
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/cpu.c
kernel/cpuset.c
kernel/elfcore.c
kernel/events/core.c
kernel/module.c
kernel/params.c
kernel/power/suspend.c
kernel/sched/core.c
kernel/sched/cpuacct.c
kernel/sched/sched.h
kernel/time/sched_clock.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/wait.c
kernel/workqueue.c
lib/div64.c
lib/earlycpio.c
lib/raid6/.gitignore
lib/raid6/Makefile
lib/raid6/algos.c
lib/raid6/neon.c [new file with mode: 0644]
lib/raid6/neon.uc [new file with mode: 0644]
lib/raid6/test/Makefile
mm/filemap.c
mm/hugetlb_cgroup.c
mm/memcontrol.c
mm/page_io.c
mm/page_isolation.c
mm/shmem.c
mm/slab_common.c
mm/slub.c
mm/vmpressure.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/9p/client.c
net/9p/trans_rdma.c
net/Kconfig
net/appletalk/atalk_proc.c
net/batman-adv/unicast.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/bluetooth/rfcomm/tty.c
net/bluetooth/sco.c
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_mdb.c
net/bridge/br_netlink.c
net/bridge/br_notify.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/core/datagram.c
net/core/dev.c
net/core/fib_rules.c
net/core/flow_dissector.c
net/core/iovec.c
net/core/net-sysfs.c
net/core/netprio_cgroup.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/sock.c
net/core/stream.c
net/dccp/proto.c
net/ieee802154/6lowpan.c
net/ieee802154/6lowpan.h
net/ipv4/devinet.c
net/ipv4/fib_rules.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_memcontrol.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_state.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/netfilter/ip6t_MASQUERADE.c
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_state.c
net/ipx/ipx_proc.c
net/irda/irttp.c
net/key/af_key.c
net/llc/llc_proc.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/led.c
net/mac80211/led.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/rate.h
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rc80211_pid_algo.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_labels.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_helper.c
net/netfilter/nf_nat_proto_sctp.c
net/netfilter/nf_tproxy_core.c [deleted file]
net/netfilter/nfnetlink_queue_core.c
net/netfilter/nfnetlink_queue_ct.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_socket.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/nfc/core.c
net/nfc/hci/core.c
net/nfc/netlink.c
net/nfc/nfc.h
net/openvswitch/Kconfig
net/openvswitch/Makefile
net/openvswitch/vport-vxlan.c [new file with mode: 0644]
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/phonet/socket.c
net/rfkill/rfkill-regulator.c
net/sched/cls_cgroup.c
net/sched/sch_choke.c
net/sched/sch_netem.c
net/sctp/associola.c
net/sctp/auth.c
net/sctp/bind_addr.c
net/sctp/chunk.c
net/sctp/command.c
net/sctp/debug.c
net/sctp/endpointola.c
net/sctp/input.c
net/sctp/inqueue.c
net/sctp/ipv6.c
net/sctp/objcnt.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/primitive.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/ssnmap.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sctp/tsnmap.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
net/sunrpc/rpc_pipe.c
net/sunrpc/svcsock.c
net/sunrpc/xprtsock.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/vmw_vsock/vmci_transport.h
net/vmw_vsock/vsock_addr.c
net/wireless/core.c
net/wireless/core.h
net/wireless/mesh.c
net/wireless/nl80211.c
net/wireless/nl80211.h
net/wireless/rdev-ops.h
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/util.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
samples/hidraw/.gitignore [new file with mode: 0644]
samples/kprobes/kprobe_example.c
scripts/coccinelle/misc/boolreturn.cocci [new file with mode: 0644]
scripts/config
scripts/diffconfig
scripts/kconfig/confdata.c
scripts/kconfig/mconf.c
scripts/kconfig/nconf.c
scripts/kconfig/symbol.c
scripts/mod/modpost.c
scripts/package/builddeb
scripts/package/buildtar
scripts/package/mkspec
scripts/recordmcount.pl
scripts/sortextable.c
security/apparmor/Kconfig
security/apparmor/Makefile
security/apparmor/apparmorfs.c
security/apparmor/capability.c
security/apparmor/context.c
security/apparmor/crypto.c [new file with mode: 0644]
security/apparmor/domain.c
security/apparmor/include/apparmor.h
security/apparmor/include/apparmorfs.h
security/apparmor/include/audit.h
security/apparmor/include/capability.h
security/apparmor/include/context.h
security/apparmor/include/crypto.h [new file with mode: 0644]
security/apparmor/include/policy.h
security/apparmor/include/policy_unpack.h
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/policy.c
security/apparmor/policy_unpack.c
security/apparmor/procattr.c
security/capability.c
security/device_cgroup.c
security/integrity/evm/evm_main.c
security/security.c
security/selinux/hooks.c
security/selinux/include/objsec.h
security/selinux/include/security.h
security/selinux/include/xfrm.h
security/selinux/netnode.c
security/selinux/selinuxfs.c
security/selinux/ss/ebitmap.c
security/selinux/ss/ebitmap.h
security/selinux/ss/mls.c
security/selinux/ss/mls_types.h
security/selinux/ss/policydb.c
security/selinux/ss/services.c
security/selinux/xfrm.c
security/smack/smack_lsm.c
sound/arm/pxa2xx-ac97.c
sound/arm/pxa2xx-pcm-lib.c
sound/arm/pxa2xx-pcm.c
sound/arm/pxa2xx-pcm.h
sound/core/Kconfig
sound/core/Makefile
sound/core/pcm_dmaengine.c [moved from sound/soc/soc-dmaengine-pcm.c with 100% similarity]
sound/core/pcm_lib.c
sound/drivers/dummy.c
sound/firewire/speakers.c
sound/i2c/other/tea575x-tuner.c
sound/isa/gus/interwave.c
sound/oss/dmabuf.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_generic.h
sound/pci/hda/hda_hwdep.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_jack.c
sound/pci/hda/hda_jack.h
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/rme96.c
sound/pci/rme9652/hdspm.c
sound/soc/Kconfig
sound/soc/Makefile
sound/soc/atmel/Kconfig
sound/soc/atmel/Makefile
sound/soc/atmel/atmel-pcm-dma.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/atmel/atmel_wm8904.c [new file with mode: 0644]
sound/soc/atmel/sam9x5_wm8731.c [new file with mode: 0644]
sound/soc/au1x/db1200.c
sound/soc/au1x/psc-ac97.c
sound/soc/blackfin/bf5xx-ac97.h
sound/soc/cirrus/ep93xx-ac97.c
sound/soc/cirrus/ep93xx-i2s.c
sound/soc/codecs/Kconfig
sound/soc/codecs/Makefile
sound/soc/codecs/ac97.c
sound/soc/codecs/ad1980.c
sound/soc/codecs/ad73311.c
sound/soc/codecs/adau1701.c
sound/soc/codecs/adav80x.c
sound/soc/codecs/ads117x.c
sound/soc/codecs/ak4104.c
sound/soc/codecs/ak4554.c [new file with mode: 0644]
sound/soc/codecs/ak5386.c
sound/soc/codecs/arizona.c
sound/soc/codecs/arizona.h
sound/soc/codecs/bt-sco.c
sound/soc/codecs/cs4270.c
sound/soc/codecs/cs4271.c
sound/soc/codecs/hdmi.c
sound/soc/codecs/lm4857.c
sound/soc/codecs/max9768.c
sound/soc/codecs/max98090.c
sound/soc/codecs/max9877.c
sound/soc/codecs/mc13783.c
sound/soc/codecs/pcm1681.c [new file with mode: 0644]
sound/soc/codecs/pcm1792a.c [new file with mode: 0644]
sound/soc/codecs/pcm1792a.h [new file with mode: 0644]
sound/soc/codecs/pcm3008.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/si476x.c
sound/soc/codecs/spdif_receiver.c
sound/soc/codecs/spdif_transmitter.c
sound/soc/codecs/sta32x.c
sound/soc/codecs/tlv320aic26.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/twl4030.c
sound/soc/codecs/twl6040.c
sound/soc/codecs/uda134x.c
sound/soc/codecs/wl1273.c
sound/soc/codecs/wm0010.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8350.c
sound/soc/codecs/wm8727.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm8753.c
sound/soc/codecs/wm8782.c
sound/soc/codecs/wm8903.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8960.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8995.c
sound/soc/codecs/wm8997.c [new file with mode: 0644]
sound/soc/codecs/wm8997.h [new file with mode: 0644]
sound/soc/codecs/wm_adsp.c
sound/soc/codecs/wm_adsp.h
sound/soc/codecs/wm_hubs.c
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/imx-audmux.c
sound/soc/fsl/imx-audmux.h
sound/soc/fsl/imx-mc13783.c
sound/soc/fsl/imx-pcm-dma.c
sound/soc/fsl/imx-pcm-fiq.c
sound/soc/fsl/imx-pcm.h
sound/soc/fsl/imx-sgtl5000.c
sound/soc/fsl/imx-ssi.c
sound/soc/fsl/imx-ssi.h
sound/soc/fsl/imx-wm8962.c
sound/soc/kirkwood/Kconfig
sound/soc/kirkwood/Makefile
sound/soc/kirkwood/kirkwood-dma.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/kirkwood/kirkwood-openrd.c
sound/soc/kirkwood/kirkwood-t5325.c
sound/soc/kirkwood/kirkwood.h
sound/soc/mxs/Kconfig
sound/soc/mxs/mxs-saif.c
sound/soc/mxs/mxs-sgtl5000.c
sound/soc/nuc900/nuc900-ac97.c
sound/soc/omap/Kconfig
sound/soc/omap/mcbsp.c
sound/soc/omap/omap-abe-twl6040.c
sound/soc/omap/omap-mcbsp.c
sound/soc/pxa/Kconfig
sound/soc/pxa/brownstone.c
sound/soc/pxa/mioa701_wm9713.c
sound/soc/pxa/mmp-pcm.c
sound/soc/pxa/mmp-sspa.c
sound/soc/pxa/pxa-ssp.c
sound/soc/pxa/pxa2xx-ac97.c
sound/soc/pxa/pxa2xx-i2s.c
sound/soc/pxa/pxa2xx-pcm.c
sound/soc/pxa/ttc-dkb.c
sound/soc/s6000/s6105-ipcam.c
sound/soc/samsung/ac97.c
sound/soc/samsung/dma.c
sound/soc/samsung/dma.h
sound/soc/samsung/i2s-regs.h
sound/soc/samsung/i2s.c
sound/soc/samsung/smdk_wm8994.c
sound/soc/samsung/spdif.c
sound/soc/sh/Kconfig
sound/soc/sh/Makefile
sound/soc/sh/rcar/Makefile [new file with mode: 0644]
sound/soc/sh/rcar/adg.c [new file with mode: 0644]
sound/soc/sh/rcar/core.c [new file with mode: 0644]
sound/soc/sh/rcar/gen.c [new file with mode: 0644]
sound/soc/sh/rcar/rsnd.h [new file with mode: 0644]
sound/soc/sh/rcar/scu.c [new file with mode: 0644]
sound/soc/sh/rcar/ssi.c [new file with mode: 0644]
sound/soc/soc-compress.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-jack.c
sound/soc/soc-pcm.c
sound/soc/spear/Kconfig
sound/soc/tegra/Kconfig
sound/soc/tegra/tegra20_ac97.c
sound/soc/tegra/tegra_alc5632.c
sound/soc/tegra/tegra_rt5640.c
sound/soc/tegra/tegra_wm8753.c
sound/soc/tegra/trimslice.c
sound/soc/txx9/txx9aclc-ac97.c
sound/soc/ux500/mop500.c
sound/usb/6fire/firmware.c
sound/usb/endpoint.c
sound/usb/pcm.c
sound/usb/usx2y/usbusx2y.c
tools/lguest/lguest.c
tools/power/fspin/Makefile [new file with mode: 0644]
tools/power/fspin/fspin.1 [new file with mode: 0644]
tools/power/fspin/fspin.c [new file with mode: 0644]
tools/testing/selftests/Makefile
tools/testing/selftests/powerpc/Makefile [new file with mode: 0644]
tools/testing/selftests/powerpc/harness.c [new file with mode: 0644]
tools/testing/selftests/powerpc/pmu/Makefile [new file with mode: 0644]
tools/testing/selftests/powerpc/pmu/count_instructions.c [new file with mode: 0644]
tools/testing/selftests/powerpc/pmu/event.c [new file with mode: 0644]
tools/testing/selftests/powerpc/pmu/event.h [new file with mode: 0644]
tools/testing/selftests/powerpc/pmu/loop.S [new file with mode: 0644]
tools/testing/selftests/powerpc/subunit.h [new file with mode: 0644]
tools/testing/selftests/powerpc/utils.h [new file with mode: 0644]
tools/virtio/.gitignore [new file with mode: 0644]
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 206d0fcf07a5a22170670f93c43481094916fe36..33a2f2d8300959dee1f3549156c5ef8ac068fb48 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -637,14 +637,13 @@ S: 14509 NE 39th Street #1096
 S: Bellevue, Washington 98007
 S: USA
 
-N: Christopher L. Cheney
-E: ccheney@debian.org
-E: ccheney@cheney.cx
-W: http://www.cheney.cx
+N: Chris Cheney
+E: chris.cheney@gmail.com
+E: ccheney@redhat.com
 P: 1024D/8E384AF2 2D31 1927 87D7 1F24 9FF9  1BC5 D106 5AB3 8E38 4AF2
 D: Vista Imaging usb webcam driver
-S: 314 Prince of Wales
-S: Conroe, TX 77304
+S: 2308 Therrell Way
+S: McKinney, TX 75070
 S: USA
 
 N: Stuart Cheshire
index 3105644b3bfc45f27371765246f6d1deda46549b..bfd119ace6ad00c2ee56c4c16b25a78ec6b5d7f0 100644 (file)
@@ -128,9 +128,8 @@ KernelVersion:      3.4
 Contact:       linux-mtd@lists.infradead.org
 Description:
                Maximum number of bit errors that the device is capable of
-               correcting within each region covering an ecc step.  This will
-               always be a non-negative integer.  Note that some devices will
-               have multiple ecc steps within each writesize region.
+               correcting within each region covering an ECC step (see
+               ecc_step_size).  This will always be a non-negative integer.
 
                In the case of devices lacking any ECC capability, it is 0.
 
@@ -173,3 +172,15 @@ Description:
                This is generally applicable only to NAND flash devices with ECC
                capability.  It is ignored on devices lacking ECC capability;
                i.e., devices for which ecc_strength is zero.
+
+What:          /sys/class/mtd/mtdX/ecc_step_size
+Date:          May 2013
+KernelVersion: 3.10
+Contact:       linux-mtd@lists.infradead.org
+Description:
+               The size of a single region covered by ECC, known as the ECC
+               step.  Devices may have several equally sized ECC steps within
+               each writesize region.
+
+               It will always be a non-negative integer.  In the case of
+               devices lacking any ECC capability, it is 0.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
new file mode 100644 (file)
index 0000000..31942ef
--- /dev/null
@@ -0,0 +1,26 @@
+What:          /sys/fs/f2fs/<disk>/gc_max_sleep_time
+Date:          July 2013
+Contact:       "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+                Controls the maximun sleep time for gc_thread. Time
+                is in milliseconds.
+
+What:          /sys/fs/f2fs/<disk>/gc_min_sleep_time
+Date:          July 2013
+Contact:       "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+                Controls the minimum sleep time for gc_thread. Time
+                is in milliseconds.
+
+What:          /sys/fs/f2fs/<disk>/gc_no_gc_sleep_time
+Date:          July 2013
+Contact:       "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+                Controls the default sleep time for gc_thread. Time
+                is in milliseconds.
+
+What:          /sys/fs/f2fs/<disk>/gc_idle
+Date:          July 2013
+Contact:       "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+                Controls the victim selection policy for garbage collection.
index 49267ea975684391c1f1e7144fb0c88700557d66..f403ec3c5c9a4a17cf6c534f2da8590144faf766 100644 (file)
           <title>functions/definitions</title>
 !Finclude/net/mac80211.h ieee80211_rx_status
 !Finclude/net/mac80211.h mac80211_rx_flags
+!Finclude/net/mac80211.h mac80211_tx_info_flags
 !Finclude/net/mac80211.h mac80211_tx_control_flags
 !Finclude/net/mac80211.h mac80211_rate_control_flags
 !Finclude/net/mac80211.h ieee80211_tx_rate
index 7d1278e7a4341603a4b8c8c84953ca95f8cc30e2..9fc8ed4ac0f49b1759ed00d4f5ce731bd35e7a55 100644 (file)
               will become a fatal error.
             </para></listitem>
           </varlistentry>
-          <varlistentry>
-            <term>DRIVER_USE_MTRR</term>
-            <listitem><para>
-              Driver uses MTRR interface for mapping memory, the DRM core will
-              manage MTRR resources. Deprecated.
-            </para></listitem>
-          </varlistentry>
           <varlistentry>
             <term>DRIVER_PCI_DMA</term>
             <listitem><para>
               support shared IRQs (note that this is required of PCI  drivers).
             </para></listitem>
           </varlistentry>
-          <varlistentry>
-            <term>DRIVER_IRQ_VBL</term>
-            <listitem><para>Unused. Deprecated.</para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_DMA_QUEUE</term>
-            <listitem><para>
-              Should be set if the driver queues DMA requests and completes them
-              asynchronously.  Deprecated.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_FB_DMA</term>
-            <listitem><para>
-              Driver supports DMA to/from the framebuffer, mapping of frambuffer
-              DMA buffers to userspace will be supported. Deprecated.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_IRQ_VBL2</term>
-            <listitem><para>Unused. Deprecated.</para></listitem>
-          </varlistentry>
           <varlistentry>
             <term>DRIVER_GEM</term>
             <listitem><para>
@@ -2212,6 +2183,18 @@ void intel_crt_init(struct drm_device *dev)
 !Iinclude/drm/drm_rect.h
 !Edrivers/gpu/drm/drm_rect.c
     </sect2>
+    <sect2>
+      <title>Flip-work Helper Reference</title>
+!Pinclude/drm/drm_flip_work.h flip utils
+!Iinclude/drm/drm_flip_work.h
+!Edrivers/gpu/drm/drm_flip_work.c
+    </sect2>
+    <sect2>
+      <title>VMA Offset Manager</title>
+!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
+!Edrivers/gpu/drm/drm_vma_manager.c
+!Iinclude/drm/drm_vma_manager.h
+    </sect2>
   </sect1>
 
   <!-- Internals: kms properties -->
@@ -2422,18 +2405,18 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
       </abstract>
       <para>
         The <methodname>firstopen</methodname> method is called by the DRM core
-       when an application opens a device that has no other opened file handle.
-       Similarly the <methodname>lastclose</methodname> method is called when
-       the last application holding a file handle opened on the device closes
-       it. Both methods are mostly used for UMS (User Mode Setting) drivers to
-       acquire and release device resources which should be done in the
-       <methodname>load</methodname> and <methodname>unload</methodname>
-       methods for KMS drivers.
+       for legacy UMS (User Mode Setting) drivers only when an application
+       opens a device that has no other opened file handle. UMS drivers can
+       implement it to acquire device resources. KMS drivers can't use the
+       method and must acquire resources in the <methodname>load</methodname>
+       method instead.
       </para>
       <para>
-        Note that the <methodname>lastclose</methodname> method is also called
-       at module unload time or, for hot-pluggable devices, when the device is
-       unplugged. The <methodname>firstopen</methodname> and
+       Similarly the <methodname>lastclose</methodname> method is called when
+       the last application holding a file handle opened on the device closes
+       it, for both UMS and KMS drivers. Additionally, the method is also
+       called at module unload time or, for hot-pluggable devices, when the
+       device is unplugged. The <methodname>firstopen</methodname> and
        <methodname>lastclose</methodname> calls can thus be unbalanced.
       </para>
       <para>
@@ -2462,7 +2445,12 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
       <para>
         The <methodname>lastclose</methodname> method should restore CRTC and
        plane properties to default value, so that a subsequent open of the
-       device will not inherit state from the previous user.
+       device will not inherit state from the previous user. It can also be
+       used to execute delayed power switching state changes, e.g. in
+       conjunction with the vga-switcheroo infrastructure. Beyond that KMS
+       drivers should not do any further cleanup. Only legacy UMS drivers might
+       need to clean up device state so that the vga console or an independent
+       fbdev driver could take over.
       </para>
     </sect2>
     <sect2>
@@ -2498,7 +2486,6 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
        <programlisting>
        .poll = drm_poll,
        .read = drm_read,
-       .fasync = drm_fasync,
        .llseek = no_llseek,
        </programlisting>
       </para>
index 8d7eb6bf6312f4934c613912b7930d127c12956a..34cada2ca71038694f11faa3f1060fe641b12bd1 100644 (file)
@@ -46,7 +46,9 @@ describing an IR signal are read from the chardev.</para>
 values. Pulses and spaces are only marked implicitly by their position. The
 data must start and end with a pulse, therefore, the data must always include
 an uneven number of samples. The write function must block until the data has
-been transmitted by the hardware.</para>
+been transmitted by the hardware. If more data is provided than the hardware
+can send, the driver returns EINVAL.</para>
+
 </section>
 
 <section id="lirc_ioctl">
index 48748499c097516e9ac39b2e8584eb5a8e92c0f2..098ff483802e6094b09b62f29c85545ab0df5cc3 100644 (file)
@@ -92,8 +92,8 @@ to add them.</para>
            <entry>int</entry>
            <entry><structfield>quality</structfield></entry>
            <entry>Deprecated. If <link linkend="jpeg-quality-control"><constant>
-           V4L2_CID_JPEG_IMAGE_QUALITY</constant></link> control is exposed by
-           a driver applications should use it instead and ignore this field.
+           V4L2_CID_JPEG_COMPRESSION_QUALITY</constant></link> control is exposed
+           by a driver applications should use it instead and ignore this field.
            </entry>
          </row>
          <row>
index fe122d6e686f50e873d637d3f08929d042df4335..a248f42a121ef2a037fbab7a74ad684a431d1b52 100644 (file)
@@ -1224,8 +1224,6 @@ in this page</entry>
 #define NAND_BBT_CREATE                0x00000200
 /* Search good / bad pattern through all pages of a block */
 #define NAND_BBT_SCANALLPAGES  0x00000400
-/* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY     0x00000800
 /* Write bbt if neccecary */
 #define NAND_BBT_WRITE         0x00001000
 /* Read and write back block contents when writing bbt */
index 7890fae18529e3473700528a10f65d6ffd6f0533..01a675175a3674ef88a08ebb4f430dca3a4e4ec2 100644 (file)
@@ -57,8 +57,8 @@ i.e counters for the CPU0-3 did not change.
 
 Here is an example of limiting that same irq (44) to cpus 1024 to 1031:
 
-[root@moon 44]# echo 1024-1031 > smp_affinity
-[root@moon 44]# cat smp_affinity
+[root@moon 44]# echo 1024-1031 > smp_affinity_list
+[root@moon 44]# cat smp_affinity_list
 1024-1031
 
 Note that to do this with a bitmask would require 32 bitmasks of zero
index 6e97e73d87b507a996767c82bbf0fb6237bf7617..26b1e31d5a13e63a95fc68c5a0cb66daf744ab8f 100644 (file)
@@ -109,6 +109,16 @@ probably didn't even receive earlier versions of the patch.
 If the patch fixes a logged bug entry, refer to that bug entry by
 number and URL.
 
+If you want to refer to a specific commit, don't just refer to the
+SHA-1 ID of the commit. Please also include the oneline summary of
+the commit, to make it easier for reviewers to know what it is about.
+Example:
+
+       Commit e21d2170f36602ae2708 ("video: remove unnecessary
+       platform_set_drvdata()") removed the unnecessary
+       platform_set_drvdata(), but left the variable "dev" unused,
+       delete it.
+
 
 3) Separate your changes.
 
index d9be7a97dff35e7b1521e709e8a29c278d3fb434..64139a189a4c4f79bd1a43f3d9a5671bd40b0681 100644 (file)
@@ -207,7 +207,7 @@ passing those. One idea is to return this in _DSM method like:
                        Return (Local0)
                }
 
-Then the at25 SPI driver can get this configation by calling _DSM on its
+Then the at25 SPI driver can get this configuration by calling _DSM on its
 ACPI handle like:
 
        struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
index 9012bb03909443e4978d9d3a9fd6bbf0f095d907..4ae915a9f899cb7f34cb2bf13e6b1b79994d06e2 100644 (file)
@@ -78,7 +78,7 @@ to NULL.  Drivers should use the following idiom:
 The most common usage of these functions will probably be to specify
 the maximum time from when an interrupt occurs, to when the device
 becomes accessible.  To accomplish this, driver writers should use the
-set_max_mpu_wakeup_lat() function to to constrain the MPU wakeup
+set_max_mpu_wakeup_lat() function to constrain the MPU wakeup
 latency, and the set_max_dev_wakeup_lat() function to constrain the
 device wakeup latency (from clk_enable() to accessibility).  For
 example,
index 9c4d388daddc2f32411cdf341a8c7649079cbba6..5273c4d60e656ade8e4bbc8852c105ed8cdd2b24 100644 (file)
@@ -68,13 +68,23 @@ Image target is available instead.
 
 Requirement: MANDATORY
 
-The decompressed kernel image contains a 32-byte header as follows:
+The decompressed kernel image contains a 64-byte header as follows:
 
-  u32 magic    = 0x14000008;   /* branch to stext, little-endian */
-  u32 res0     = 0;            /* reserved */
+  u32 code0;                   /* Executable code */
+  u32 code1;                   /* Executable code */
   u64 text_offset;             /* Image load offset */
+  u64 res0     = 0;            /* reserved */
   u64 res1     = 0;            /* reserved */
   u64 res2     = 0;            /* reserved */
+  u64 res3     = 0;            /* reserved */
+  u64 res4     = 0;            /* reserved */
+  u32 magic    = 0x644d5241;   /* Magic number, little endian, "ARM\x64" */
+  u32 res5 = 0;                /* reserved */
+
+
+Header notes:
+
+- code0/code1 are responsible for branching to stext.
 
 The image must be placed at the specified offset (currently 0x80000)
 from the start of the system RAM and called there. The start of the
index 9887f0414c16642d204296d9c0b8abdc8096a5da..f3bc72945cbd3827872e4e25163830e3fef72571 100644 (file)
@@ -69,7 +69,7 @@ one, this value should be decreased relative to fifo_expire_async.
 group_idle
 -----------
 This parameter forces idling at the CFQ group level instead of CFQ
-queue level. This was introduced after after a bottleneck was observed
+queue level. This was introduced after a bottleneck was observed
 in higher end storage due to idle on sequential queue and allow dispatch
 from a single queue. The idea with this parameter is that it can be run with
 slice_idle=0 and group_idle=8, so that idling does not happen on individual
index 9b728dc17535f6f8c1630fe2e8f3a96e997feac9..d79b008e4a32896d86a8375d163757fcb8f5aa6f 100644 (file)
@@ -57,7 +57,7 @@ changes occur:
        interface must make sure that any previous page table
        modifications for the address space 'vma->vm_mm' in the range
        'start' to 'end-1' will be visible to the cpu.  That is, after
-       running, here will be no entries in the TLB for 'mm' for
+       running, there will be no entries in the TLB for 'mm' for
        virtual addresses in the range 'start' to 'end-1'.
 
        The "vma" is the backing store being used for the region.
@@ -375,8 +375,8 @@ maps this page at its virtual address.
 
   void flush_icache_page(struct vm_area_struct *vma, struct page *page)
        All the functionality of flush_icache_page can be implemented in
-       flush_dcache_page and update_mmu_cache. In 2.7 the hope is to
-       remove this interface completely.
+       flush_dcache_page and update_mmu_cache. In the future, the hope
+       is to remove this interface completely.
 
 The final category of APIs is for I/O to deliberately aliased address
 ranges inside the kernel.  Such aliases are set up by use of the
index 19fa98e07bf7fe79f403042d14858677f4474839..40282e6179135abc353bc8b04a86137820794468 100644 (file)
@@ -50,8 +50,6 @@ What shall this struct cpufreq_driver contain?
 
 cpufreq_driver.name -          The name of this driver.
 
-cpufreq_driver.owner -         THIS_MODULE;
-
 cpufreq_driver.init -          A pointer to the per-CPU initialization 
                                function.
 
index 4823577c65092f967ef3d46eb024fa8a1366ab7e..2e0617936e8f7f7624d0920be7ff3ae3d6217f68 100644 (file)
@@ -276,7 +276,7 @@ mainline get there via -mm.
 The current -mm patch is available in the "mmotm" (-mm of the moment)
 directory at:
 
-       http://userweb.kernel.org/~akpm/mmotm/
+       http://www.ozlabs.org/~akpm/mmotm/
 
 Use of the MMOTM tree is likely to be a frustrating experience, though;
 there is a definite chance that it will not even compile.
@@ -287,7 +287,7 @@ the mainline is expected to look like after the next merge window closes.
 Linux-next trees are announced on the linux-kernel and linux-next mailing
 lists when they are assembled; they can be downloaded from:
 
-       http://www.kernel.org/pub/linux/kernel/people/sfr/linux-next/
+       http://www.kernel.org/pub/linux/kernel/next/
 
 Some information about linux-next has been gathered at:
 
index e8cdf7241b66b3b413b0253042b4d4a54700dc4e..33d45ee0b737fade096136d50425caf46e344a47 100644 (file)
@@ -50,14 +50,16 @@ other parameters detailed later):
    which are dirty, and extra hints for use by the policy object.
    This information could be put on the cache device, but having it
    separate allows the volume manager to configure it differently,
-   e.g. as a mirror for extra robustness.
+   e.g. as a mirror for extra robustness.  This metadata device may only
+   be used by a single cache device.
 
 Fixed block size
 ----------------
 
 The origin is divided up into blocks of a fixed size.  This block size
 is configurable when you first create the cache.  Typically we've been
-using block sizes of 256k - 1024k.
+using block sizes of 256KB - 1024KB.  The block size must be between 64
+(32KB) and 2097152 (1GB) and a multiple of 64 (32KB).
 
 Having a fixed block size simplifies the target a lot.  But it is
 something of a compromise.  For instance, a small part of a block may be
diff --git a/Documentation/device-mapper/statistics.txt b/Documentation/device-mapper/statistics.txt
new file mode 100644 (file)
index 0000000..2a1673a
--- /dev/null
@@ -0,0 +1,186 @@
+DM statistics
+=============
+
+Device Mapper supports the collection of I/O statistics on user-defined
+regions of a DM device.         If no regions are defined no statistics are
+collected so there isn't any performance impact.  Only bio-based DM
+devices are currently supported.
+
+Each user-defined region specifies a starting sector, length and step.
+Individual statistics will be collected for each step-sized area within
+the range specified.
+
+The I/O statistics counters for each step-sized area of a region are
+in the same format as /sys/block/*/stat or /proc/diskstats (see:
+Documentation/iostats.txt).  But two extra counters (12 and 13) are
+provided: total time spent reading and writing in milliseconds.         All
+these counters may be accessed by sending the @stats_print message to
+the appropriate DM device via dmsetup.
+
+Each region has a corresponding unique identifier, which we call a
+region_id, that is assigned when the region is created.         The region_id
+must be supplied when querying statistics about the region, deleting the
+region, etc.  Unique region_ids enable multiple userspace programs to
+request and process statistics for the same DM device without stepping
+on each other's data.
+
+The creation of DM statistics will allocate memory via kmalloc or
+fallback to using vmalloc space.  At most, 1/4 of the overall system
+memory may be allocated by DM statistics.  The admin can see how much
+memory is used by reading
+/sys/module/dm_mod/parameters/stats_current_allocated_bytes
+
+Messages
+========
+
+    @stats_create <range> <step> [<program_id> [<aux_data>]]
+
+       Create a new region and return the region_id.
+
+       <range>
+         "-" - whole device
+         "<start_sector>+<length>" - a range of <length> 512-byte sectors
+                                     starting with <start_sector>.
+
+       <step>
+         "<area_size>" - the range is subdivided into areas each containing
+                         <area_size> sectors.
+         "/<number_of_areas>" - the range is subdivided into the specified
+                                number of areas.
+
+       <program_id>
+         An optional parameter.  A name that uniquely identifies
+         the userspace owner of the range.  This groups ranges together
+         so that userspace programs can identify the ranges they
+         created and ignore those created by others.
+         The kernel returns this string back in the output of
+         @stats_list message, but it doesn't use it for anything else.
+
+       <aux_data>
+         An optional parameter.  A word that provides auxiliary data
+         that is useful to the client program that created the range.
+         The kernel returns this string back in the output of
+         @stats_list message, but it doesn't use this value for anything.
+
+    @stats_delete <region_id>
+
+       Delete the region with the specified id.
+
+       <region_id>
+         region_id returned from @stats_create
+
+    @stats_clear <region_id>
+
+       Clear all the counters except the in-flight i/o counters.
+
+       <region_id>
+         region_id returned from @stats_create
+
+    @stats_list [<program_id>]
+
+       List all regions registered with @stats_create.
+
+       <program_id>
+         An optional parameter.
+         If this parameter is specified, only matching regions
+         are returned.
+         If it is not specified, all regions are returned.
+
+       Output format:
+         <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+
+    @stats_print <region_id> [<starting_line> <number_of_lines>]
+
+       Print counters for each step-sized area of a region.
+
+       <region_id>
+         region_id returned from @stats_create
+
+       <starting_line>
+         The index of the starting line in the output.
+         If omitted, all lines are returned.
+
+       <number_of_lines>
+         The number of lines to include in the output.
+         If omitted, all lines are returned.
+
+       Output format for each step-sized area of a region:
+
+         <start_sector>+<length> counters
+
+         The first 11 counters have the same meaning as
+         /sys/block/*/stat or /proc/diskstats.
+
+         Please refer to Documentation/iostats.txt for details.
+
+         1. the number of reads completed
+         2. the number of reads merged
+         3. the number of sectors read
+         4. the number of milliseconds spent reading
+         5. the number of writes completed
+         6. the number of writes merged
+         7. the number of sectors written
+         8. the number of milliseconds spent writing
+         9. the number of I/Os currently in progress
+         10. the number of milliseconds spent doing I/Os
+         11. the weighted number of milliseconds spent doing I/Os
+
+         Additional counters:
+         12. the total time spent reading in milliseconds
+         13. the total time spent writing in milliseconds
+
+    @stats_print_clear <region_id> [<starting_line> <number_of_lines>]
+
+       Atomically print and then clear all the counters except the
+       in-flight i/o counters.  Useful when the client consuming the
+       statistics does not want to lose any statistics (those updated
+       between printing and clearing).
+
+       <region_id>
+         region_id returned from @stats_create
+
+       <starting_line>
+         The index of the starting line in the output.
+         If omitted, all lines are printed and then cleared.
+
+       <number_of_lines>
+         The number of lines to process.
+         If omitted, all lines are printed and then cleared.
+
+    @stats_set_aux <region_id> <aux_data>
+
+       Store auxiliary data aux_data for the specified region.
+
+       <region_id>
+         region_id returned from @stats_create
+
+       <aux_data>
+         The string that identifies data which is useful to the client
+         program that created the range.  The kernel returns this
+         string back in the output of @stats_list message, but it
+         doesn't use this value for anything.
+
+Examples
+========
+
+Subdivide the DM device 'vol' into 100 pieces and start collecting
+statistics on them:
+
+  dmsetup message vol 0 @stats_create - /100
+
+Set the auxillary data string to "foo bar baz" (the escape for each
+space must also be escaped, otherwise the shell will consume them):
+
+  dmsetup message vol 0 @stats_set_aux 0 foo\\ bar\\ baz
+
+List the statistics:
+
+  dmsetup message vol 0 @stats_list
+
+Print the statistics:
+
+  dmsetup message vol 0 @stats_print 0
+
+Delete the statistics:
+
+  dmsetup message vol 0 @stats_delete 0
index 30b8b83bd333401a2cc1138d664d6086b4d47aef..50c44cf79b0e5f4467fc0af58e3af40234cef94e 100644 (file)
@@ -99,13 +99,14 @@ Using an existing pool device
                 $data_block_size $low_water_mark"
 
 $data_block_size gives the smallest unit of disk space that can be
-allocated at a time expressed in units of 512-byte sectors.  People
-primarily interested in thin provisioning may want to use a value such
-as 1024 (512KB).  People doing lots of snapshotting may want a smaller value
-such as 128 (64KB).  If you are not zeroing newly-allocated data,
-a larger $data_block_size in the region of 256000 (128MB) is suggested.
-$data_block_size must be the same for the lifetime of the
-metadata device.
+allocated at a time expressed in units of 512-byte sectors.
+$data_block_size must be between 128 (64KB) and 2097152 (1GB) and a
+multiple of 128 (64KB).  $data_block_size cannot be changed after the
+thin-pool is created.  People primarily interested in thin provisioning
+may want to use a value such as 1024 (512KB).  People doing lots of
+snapshotting may want a smaller value such as 128 (64KB).  If you are
+not zeroing newly-allocated data, a larger $data_block_size in the
+region of 256000 (128MB) is suggested.
 
 $low_water_mark is expressed in blocks of size $data_block_size.  If
 free space on the data device drops below this level then a dm event
index 69ddf9fad2dcccc36bbaa330b96170d2eb9b213d..c0c7626fd0ff760075c2c97c866c9f83cdb21819 100644 (file)
@@ -16,9 +16,11 @@ Required properties:
      performs the same operation).
        "marvell,"aurora-outer-cache: Marvell Controller designed to be
         compatible with the ARM one with outer cache mode.
-       "bcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
+       "brcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
        offset needs to be added to the address before passing down to the L2
        cache controller
+       "bcm,bcm11351-a2-pl310-cache": DEPRECATED by
+                                      "brcm,bcm11351-a2-pl310-cache"
 - cache-unified : Specifies the cache is a unified cache.
 - cache-level : Should be set to 2 for a level 2 cache.
 - reg : Physical base address and size of cache controller's memory mapped
index 69b5ab0b5f4b4eb4f330b820900d2eebf2dbf898..d11d80006a19037b50d0922054185ffe6371373c 100644 (file)
@@ -22,7 +22,7 @@ This contains the board-specific information.
 - compatible: must be "stericsson,s365".
 - vana15-supply: the regulator supplying the 1.5V to drive the
   board.
-- syscon: a pointer to the syscon node so we can acccess the
+- syscon: a pointer to the syscon node so we can access the
   syscon registers to set the board as self-powered.
 
 Example:
index 9cf3f25544c794607e07e331f35752d8f59d578c..5580e9c4bd8584034a9d9812c491962f08fa253e 100644 (file)
@@ -32,8 +32,8 @@ numbers - see motherboard's TRM for more details.
 The node describing a config device must refer to the sysreg node via
 "arm,vexpress,config-bridge" phandle (can be also defined in the node's
 parent) and relies on the board topology properties - see main vexpress
-node documentation for more details. It must must also define the
-following property:
+node documentation for more details. It must also define the following
+property:
 - arm,vexpress-sysreg,func : must contain two cells:
   - first cell defines function number (eg. 1 for clock generator,
     2 for voltage regulators etc.)
index 3ec0c5c4f0e91da8b520de71996fa6637de05705..89de1564950ce64cf2bdb1e087da04e2043a0db6 100644 (file)
@@ -4,27 +4,17 @@ SATA nodes are defined to describe on-chip Serial ATA controllers.
 Each SATA controller should have its own node.
 
 Required properties:
-- compatible        : compatible list, contains "calxeda,hb-ahci" or "snps,spear-ahci"
+- compatible        : compatible list, contains "snps,spear-ahci"
 - interrupts        : <interrupt mapping for SATA IRQ>
 - reg               : <registers mapping>
 
 Optional properties:
-- calxeda,port-phys: phandle-combophy and lane assignment, which maps each
-                       SATA port to a combophy and a lane within that
-                       combophy
-- calxeda,sgpio-gpio: phandle-gpio bank, bit offset, and default on or off,
-                       which indicates that the driver supports SGPIO
-                       indicator lights using the indicated GPIOs
-- calxeda,led-order : a u32 array that map port numbers to offsets within the
-                       SGPIO bitstream.
 - dma-coherent      : Present if dma operations are coherent
 
 Example:
         sata@ffe08000 {
-               compatible = "calxeda,hb-ahci";
-                reg = <0xffe08000 0x1000>;
-                interrupts = <115>;
-               calxeda,port-phys = <&combophy5 0 &combophy0 0 &combophy0 1
-                                       &combophy0 2 &combophy0 3>;
+               compatible = "snps,spear-ahci";
+               reg = <0xffe08000 0x1000>;
+               interrupts = <115>;
 
         };
diff --git a/Documentation/devicetree/bindings/ata/sata_highbank.txt b/Documentation/devicetree/bindings/ata/sata_highbank.txt
new file mode 100644 (file)
index 0000000..aa83407
--- /dev/null
@@ -0,0 +1,44 @@
+* Calxeda AHCI SATA Controller
+
+SATA nodes are defined to describe on-chip Serial ATA controllers.
+The Calxeda SATA controller mostly conforms to the AHCI interface
+with some special extensions to add functionality.
+Each SATA controller should have its own node.
+
+Required properties:
+- compatible        : compatible list, contains "calxeda,hb-ahci"
+- interrupts        : <interrupt mapping for SATA IRQ>
+- reg               : <registers mapping>
+
+Optional properties:
+- dma-coherent      : Present if dma operations are coherent
+- calxeda,port-phys : phandle-combophy and lane assignment, which maps each
+                       SATA port to a combophy and a lane within that
+                       combophy
+- calxeda,sgpio-gpio: phandle-gpio bank, bit offset, and default on or off,
+                       which indicates that the driver supports SGPIO
+                       indicator lights using the indicated GPIOs
+- calxeda,led-order : a u32 array that map port numbers to offsets within the
+                       SGPIO bitstream.
+- calxeda,tx-atten  : a u32 array that contains TX attenuation override
+                       codes, one per port. The upper 3 bytes are always
+                       0 and thus ignored.
+- calxeda,pre-clocks : a u32 that indicates the number of additional clock
+                       cycles to transmit before sending an SGPIO pattern
+- calxeda,post-clocks: a u32 that indicates the number of additional clock
+                       cycles to transmit after sending an SGPIO pattern
+
+Example:
+        sata@ffe08000 {
+               compatible = "calxeda,hb-ahci";
+               reg = <0xffe08000 0x1000>;
+               interrupts = <115>;
+               dma-coherent;
+               calxeda,port-phys = <&combophy5 0 &combophy0 0 &combophy0 1
+                                       &combophy0 2 &combophy0 3>;
+               calxeda,sgpio-gpio =<&gpioh 5 1 &gpioh 6 1 &gpioh 7 1>;
+               calxeda,led-order = <4 0 1 2 3>;
+               calxeda,tx-atten = <0xff 22 0xff 0xff 23>;
+               calxeda,pre-clocks = <10>;
+               calxeda,post-clocks = <0>;
+        };
index d847758f2b20c8b553d0cf8ab287219cfa858450..b0e97144cfb1c1b00ed41abd237cdcc54c0dc4c9 100644 (file)
@@ -5,7 +5,7 @@ TI C6X SoCs contain a region of miscellaneous registers which provide various
 function for SoC control or status. Details vary considerably among from SoC
 to SoC with no two being alike.
 
-In general, the Device State Configuraion Registers (DSCR) will provide one or
+In general, the Device State Configuration Registers (DSCR) will provide one or
 more configuration registers often protected by a lock register where one or
 more key values must be written to a lock register in order to unlock the
 configuration register for writes. These configuration register may be used to
index a1201802f90d0d8fdcfd552c7d359c3cc7b79d31..75e2e1999f87dabdb7eccc94ba01d472533a69fd 100644 (file)
@@ -2,7 +2,7 @@
 
 The Samsung Audio Subsystem clock controller generates and supplies clocks
 to Audio Subsystem block available in the S5PV210 and Exynos SoCs. The clock
-binding described here is applicable to all SoC's in Exynos family.
+binding described here is applicable to all SoCs in Exynos family.
 
 Required Properties:
 
index 7fc09773de4688b56c081ab2e0a2738dadf2202a..40e0cf1f7b9911b5cb9450da49cab031d1dccada 100644 (file)
@@ -17,7 +17,7 @@ Optional properties for the SRC node:
 - disable-mxtal: if present this will disable the MXTALO,
   i.e. the driver output for the main (~19.2 MHz) chrystal,
   if the board has its own circuitry for providing this
-  osciallator
+  oscillator
 
 
 PLL nodes: these nodes represent the two PLLs on the system,
index c280a0e6f42dcb9bf4ccbf1549e7458856f4299f..e1f343c7a34b7b10ea462a39b5e9a320ef463ba6 100644 (file)
@@ -18,14 +18,14 @@ dma0: dma@ffffec00 {
 
 DMA clients connected to the Atmel DMA controller must use the format
 described in the dma.txt file, using a three-cell specifier for each channel:
-a phandle plus two interger cells.
+a phandle plus two integer cells.
 The three cells in order are:
 
 1. A phandle pointing to the DMA controller.
 2. The memory interface (16 most significant bits), the peripheral interface
 (16 less significant bits).
 3. Parameters for the at91 DMA configuration register which are device
-dependant:
+dependent:
   - bit 7-0: peripheral identifier for the hardware handshaking interface. The
   identifier can be different for tx and rx.
   - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP.
index 2717ecb47db9ac4a30448d4f6af6a361a9d8ec67..7bd8847d6394e6bbfb7df8b43eea2af4375cc883 100644 (file)
@@ -34,7 +34,7 @@ Clients have to specify the DMA requests with phandles in a list.
 Required properties:
 - dmas: List of one or more DMA request specifiers. One DMA request specifier
     consists of a phandle to the DMA controller followed by the integer
-    specifiying the request line.
+    specifying the request line.
 - dma-names: List of string identifiers for the DMA requests. For the correct
     names, have a look at the specific client driver.
 
index bea5b73a739009634c579d20e892c1c4a17ebc92..a8c21c256baa5afbcb4b1a43d150ca47a5c4e3ea 100644 (file)
@@ -37,14 +37,14 @@ Each dmas request consists of 4 cells:
   1. A phandle pointing to the DMA controller
   2. Device Type
   3. The DMA request line number (only when 'use fixed channel' is set)
-  4. A 32bit mask specifying; mode, direction and endianess [NB: This list will grow]
+  4. A 32bit mask specifying; mode, direction and endianness [NB: This list will grow]
         0x00000001: Mode:
                 Logical channel when unset
                 Physical channel when set
         0x00000002: Direction:
                 Memory to Device when unset
                 Device to Memory when set
-        0x00000004: Endianess:
+        0x00000004: Endianness:
                 Little endian when unset
                 Big endian when set
         0x00000008: Use fixed channel:
diff --git a/Documentation/devicetree/bindings/hid/hid-over-i2c.txt b/Documentation/devicetree/bindings/hid/hid-over-i2c.txt
new file mode 100644 (file)
index 0000000..488edcb
--- /dev/null
@@ -0,0 +1,28 @@
+* HID over I2C Device-Tree bindings
+
+HID over I2C provides support for various Human Interface Devices over the
+I2C bus. These devices can be for example touchpads, keyboards, touch screens
+or sensors.
+
+The specification has been written by Microsoft and is currently available here:
+http://msdn.microsoft.com/en-us/library/windows/hardware/hh852380.aspx
+
+If this binding is used, the kernel module i2c-hid will handle the communication
+with the device and the generic hid core layer will handle the protocol.
+
+Required properties:
+- compatible: must be "hid-over-i2c"
+- reg: i2c slave address
+- hid-descr-addr: HID descriptor address
+- interrupt-parent: the phandle for the interrupt controller
+- interrupts: interrupt line
+
+Example:
+
+       i2c-hid-dev@2c {
+               compatible = "hid-over-i2c";
+               reg = <0x2c>;
+               hid-descr-addr = <0x0020>;
+               interrupt-parent = <&gpx3>;
+               interrupts = <3 2>;
+       };
diff --git a/Documentation/devicetree/bindings/input/input-reset.txt b/Documentation/devicetree/bindings/input/input-reset.txt
new file mode 100644 (file)
index 0000000..2bb2626
--- /dev/null
@@ -0,0 +1,33 @@
+Input: sysrq reset sequence
+
+A simple binding to represent a set of keys as described in
+include/uapi/linux/input.h. This is to communicate a sequence of keys to the
+sysrq driver. Upon holding the keys for a specified amount of time (if
+specified) the system is sync'ed and reset.
+
+Key sequences are global to the system but all the keys in a set must be coming
+from the same input device.
+
+The /chosen node should contain a 'linux,sysrq-reset-seq' child node to define
+a set of keys.
+
+Required property:
+sysrq-reset-seq: array of Linux keycodes, one keycode per cell.
+
+Optional property:
+timeout-ms: duration keys must be pressed together in milliseconds before
+generating a sysrq. If omitted the system is rebooted immediately when a valid
+sequence has been recognized.
+
+Example:
+
+ chosen {
+                linux,sysrq-reset-seq {
+                        keyset = <0x03
+                                  0x04
+                                  0x0a>;
+                        timeout-ms = <3000>;
+                };
+         };
+
+Would represent KEY_2, KEY_3 and KEY_9.
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7343.txt b/Documentation/devicetree/bindings/media/i2c/adv7343.txt
new file mode 100644 (file)
index 0000000..5653bc2
--- /dev/null
@@ -0,0 +1,48 @@
+* Analog Devices adv7343 video encoder
+
+The ADV7343 are high speed, digital-to-analog video encoders in a 64-lead LQFP
+package. Six high speed, 3.3 V, 11-bit video DACs provide support for composite
+(CVBS), S-Video (Y-C), and component (YPrPb/RGB) analog outputs in standard
+definition (SD), enhanced definition (ED), or high definition (HD) video
+formats.
+
+Required Properties :
+- compatible: Must be "adi,adv7343"
+
+Optional Properties :
+- adi,power-mode-sleep-mode: on enable the current consumption is reduced to
+                             micro ampere level. All DACs and the internal PLL
+                             circuit are disabled.
+- adi,power-mode-pll-ctrl: PLL and oversampling control. This control allows
+                          internal PLL 1 circuit to be powered down and the
+                          oversampling to be switched off.
+- ad,adv7343-power-mode-dac: array configuring the power on/off DAC's 1..6,
+                             0 = OFF and 1 = ON, Default value when this
+                             property is not specified is <0 0 0 0 0 0>.
+- ad,adv7343-sd-config-dac-out: array configure SD DAC Output's 1 and 2, 0 = OFF
+                                and 1 = ON, Default value when this property is
+                                not specified is <0 0>.
+
+Example:
+
+i2c0@1c22000 {
+       ...
+       ...
+
+       adv7343@2a {
+               compatible = "adi,adv7343";
+               reg = <0x2a>;
+
+               port {
+                       adv7343_1: endpoint {
+                                       adi,power-mode-sleep-mode;
+                                       adi,power-mode-pll-ctrl;
+                                       /* Use DAC1..3, DAC6 */
+                                       adi,dac-enable = <1 1 1 0 0 1>;
+                                       /* Use SD DAC output 1 */
+                                       adi,sd-dac-enable = <1 0>;
+                       };
+               };
+       };
+       ...
+};
diff --git a/Documentation/devicetree/bindings/media/i2c/ths8200.txt b/Documentation/devicetree/bindings/media/i2c/ths8200.txt
new file mode 100644 (file)
index 0000000..285f6ae
--- /dev/null
@@ -0,0 +1,19 @@
+* Texas Instruments THS8200 video encoder
+
+The ths8200 device is a digital to analog converter used in DVD players, video
+recorders, set-top boxes.
+
+Required Properties :
+- compatible : value must be "ti,ths8200"
+
+Example:
+
+       i2c0@1c22000 {
+               ...
+               ...
+               ths8200@5c {
+                       compatible = "ti,ths8200";
+                       reg = <0x5c>;
+               };
+               ...
+       };
diff --git a/Documentation/devicetree/bindings/metag/pdc-intc.txt b/Documentation/devicetree/bindings/metag/pdc-intc.txt
new file mode 100644 (file)
index 0000000..a691185
--- /dev/null
@@ -0,0 +1,105 @@
+* ImgTec Powerdown Controller (PDC) Interrupt Controller Binding
+
+This binding specifies what properties must be available in the device tree
+representation of a PDC IRQ controller. This has a number of input interrupt
+lines which can wake the system, and are passed on through output interrupt
+lines.
+
+Required properties:
+
+    - compatible: Specifies the compatibility list for the interrupt controller.
+      The type shall be <string> and the value shall include "img,pdc-intc".
+
+    - reg: Specifies the base PDC physical address(s) and size(s) of the
+      addressable register space. The type shall be <prop-encoded-array>.
+
+    - interrupt-controller: The presence of this property identifies the node
+      as an interrupt controller. No property value shall be defined.
+
+    - #interrupt-cells: Specifies the number of cells needed to encode an
+      interrupt source. The type shall be a <u32> and the value shall be 2.
+
+    - num-perips: Number of waking peripherals.
+
+    - num-syswakes: Number of SysWake inputs.
+
+    - interrupts: List of interrupt specifiers. The first specifier shall be the
+      shared SysWake interrupt, and remaining specifies shall be PDC peripheral
+      interrupts in order.
+
+* Interrupt Specifier Definition
+
+  Interrupt specifiers consists of 2 cells encoded as follows:
+
+    - <1st-cell>: The interrupt-number that identifies the interrupt source.
+                    0-7:  Peripheral interrupts
+                    8-15: SysWake interrupts
+
+    - <2nd-cell>: The level-sense information, encoded using the Linux interrupt
+                  flags as follows (only 4 valid for peripheral interrupts):
+                    0 = none (decided by software)
+                    1 = low-to-high edge triggered
+                    2 = high-to-low edge triggered
+                    3 = both edge triggered
+                    4 = active-high level-sensitive (required for perip irqs)
+                    8 = active-low level-sensitive
+
+* Examples
+
+Example 1:
+
+       /*
+        * TZ1090 PDC block
+        */
+       pdc: pdc@0x02006000 {
+               // This is an interrupt controller node.
+               interrupt-controller;
+
+               // Three cells to encode interrupt sources.
+               #interrupt-cells = <2>;
+
+               // Offset address of 0x02006000 and size of 0x1000.
+               reg = <0x02006000 0x1000>;
+
+               // Compatible with Meta hardware trigger block.
+               compatible = "img,pdc-intc";
+
+               // Three peripherals are connected.
+               num-perips = <3>;
+
+               // Four SysWakes are connected.
+               num-syswakes = <4>;
+
+               interrupts = <18 4 /* level */>, /* Syswakes */
+                            <30 4 /* level */>, /* Peripheral 0 (RTC) */
+                            <29 4 /* level */>, /* Peripheral 1 (IR) */
+                            <31 4 /* level */>; /* Peripheral 2 (WDT) */
+       };
+
+Example 2:
+
+       /*
+        * An SoC peripheral that is wired through the PDC.
+        */
+       rtc0 {
+               // The interrupt controller that this device is wired to.
+               interrupt-parent = <&pdc>;
+
+               // Interrupt source Peripheral 0
+               interrupts = <0   /* Peripheral 0 (RTC) */
+                             4>  /* IRQ_TYPE_LEVEL_HIGH */
+       };
+
+Example 3:
+
+       /*
+        * An interrupt generating device that is wired to a SysWake pin.
+        */
+       touchscreen0 {
+               // The interrupt controller that this device is wired to.
+               interrupt-parent = <&pdc>;
+
+               // Interrupt source SysWake 0 that is active-low level-sensitive
+               interrupts = <8 /* SysWake0 */
+                             8 /* IRQ_TYPE_LEVEL_LOW */>;
+       };
index e0e59c58a1f92120864af95355655abb2805c34b..5f229c5f6da96c09d011fe5bec75eda2ab2ba56d 100644 (file)
@@ -4,7 +4,7 @@ Google's ChromeOS EC is a Cortex-M device which talks to the AP and
 implements various function such as keyboard and battery charging.
 
 The EC can be connect through various means (I2C, SPI, LPC) and the
-compatible string used depends on the inteface. Each connection method has
+compatible string used depends on the interface. Each connection method has
 its own driver which connects to the top level interface-agnostic EC driver.
 Other Linux driver (such as cros-ec-keyb for the matrix keyboard) connect to
 the top-level driver.
index 892537d1a48f8fc394aa6c1330fa338c7a27bce7..e5f0f830346167e1a91f3430ecdbb0ca92ab1444 100644 (file)
@@ -5,6 +5,7 @@ twl6035 (palmas)
 twl6037 (palmas)
 tps65913 (palmas)
 tps65914 (palmas)
+tps659038
 
 Required properties:
 - compatible : Should be from the list
@@ -14,6 +15,7 @@ Required properties:
   ti,tps65913
   ti,tps65914
   ti,tps80036
+  ti,tps659038
 and also the generic series names
   ti,palmas
 - interrupt-controller : palmas has its own internal IRQs
diff --git a/Documentation/devicetree/bindings/mfd/s2mps11.txt b/Documentation/devicetree/bindings/mfd/s2mps11.txt
new file mode 100644 (file)
index 0000000..c9332c6
--- /dev/null
@@ -0,0 +1,109 @@
+
+* Samsung S2MPS11 Voltage and Current Regulator
+
+The Samsung S2MP211 is a multi-function device which includes voltage and
+current regulators, RTC, charger controller and other sub-blocks. It is
+interfaced to the host controller using a I2C interface. Each sub-block is
+addressed by the host system using different I2C slave address.
+
+Required properties:
+- compatible: Should be "samsung,s2mps11-pmic".
+- reg: Specifies the I2C slave address of the pmic block. It should be 0x66.
+
+Optional properties:
+- interrupt-parent: Specifies the phandle of the interrupt controller to which
+  the interrupts from s2mps11 are delivered to.
+- interrupts: Interrupt specifiers for interrupt sources.
+
+Optional nodes:
+- clocks: s2mps11 provides three(AP/CP/BT) buffered 32.768 KHz outputs, so to
+  register these as clocks with common clock framework instantiate a sub-node
+  named "clocks". It uses the common clock binding documented in :
+  [Documentation/devicetree/bindings/clock/clock-bindings.txt]
+  - #clock-cells: should be 1.
+
+  - The following is the list of clocks generated by the controller. Each clock
+    is assigned an identifier and client nodes use this identifier to specify
+    the clock which they consume.
+    Clock               ID
+    ----------------------
+    32KhzAP            0
+    32KhzCP            1
+    32KhzBT            2
+
+- regulators: The regulators of s2mps11 that have to be instantiated should be
+included in a sub-node named 'regulators'. Regulator nodes included in this
+sub-node should be of the format as listed below.
+
+       regulator_name {
+               [standard regulator constraints....];
+       };
+
+ regulator-ramp-delay for BUCKs = [6250/12500/25000(default)/50000] uV/us
+
+ BUCK[2/3/4/6] supports disabling ramp delay on hardware, so explictly
+ regulator-ramp-delay = <0> can be used for them to disable ramp delay.
+ In absence of regulator-ramp-delay property, default ramp delay will be used.
+
+NOTE: Some BUCKs share the ramp rate setting i.e. same ramp value will be set
+for a particular group of BUCKs. So provide same regulator-ramp-delay<value>.
+Grouping of BUCKs sharing ramp rate setting is as follow : BUCK[1, 6],
+BUCK[3, 4], and BUCK[7, 8, 10]
+
+The regulator constraints inside the regulator nodes use the standard regulator
+bindings which are documented elsewhere.
+
+The following are the names of the regulators that the s2mps11 pmic block
+supports. Note: The 'n' in LDOn and BUCKn represents the LDO or BUCK number
+as per the datasheet of s2mps11.
+
+       - LDOn
+                 - valid values for n are 1 to 28
+                 - Example: LDO0, LD01, LDO28
+       - BUCKn
+                 - valid values for n are 1 to 9.
+                 - Example: BUCK1, BUCK2, BUCK9
+
+Example:
+
+       s2mps11_pmic@66 {
+               compatible = "samsung,s2mps11-pmic";
+               reg = <0x66>;
+
+               s2m_osc: clocks{
+                       #clock-cells = 1;
+                       clock-output-names = "xx", "yy", "zz";
+               };
+
+               regulators {
+                       ldo1_reg: LDO1 {
+                               regulator-name = "VDD_ABB_3.3V";
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
+                       };
+
+                       ldo2_reg: LDO2 {
+                               regulator-name = "VDD_ALIVE_1.1V";
+                               regulator-min-microvolt = <1100000>;
+                               regulator-max-microvolt = <1100000>;
+                               regulator-always-on;
+                       };
+
+                       buck1_reg: BUCK1 {
+                               regulator-name = "vdd_mif";
+                               regulator-min-microvolt = <950000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                       };
+
+                       buck2_reg: BUCK2 {
+                               regulator-name = "vdd_arm";
+                               regulator-min-microvolt = <950000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               regulator-ramp-delay = <50000>;
+                       };
+               };
+       };
index 38e51ad2e07e6b5665d6b35850734e5073d62bc0..a45ae08c8ed1d3fef886c34c2c07a501a0719520 100644 (file)
@@ -7,9 +7,30 @@ Required properties:
 - reg: Should contain SSC registers location and length
 - interrupts: Should contain SSC interrupt
 
-Example:
+
+Required properties for devices compatible with "atmel,at91sam9g45-ssc":
+- dmas: DMA specifier, consisting of a phandle to DMA controller node,
+  the memory interface and SSC DMA channel ID (for tx and rx).
+  See Documentation/devicetree/bindings/dma/atmel-dma.txt for details.
+- dma-names: Must be "tx", "rx".
+
+Examples:
+- PDC transfer:
 ssc0: ssc@fffbc000 {
        compatible = "atmel,at91rm9200-ssc";
        reg = <0xfffbc000 0x4000>;
        interrupts = <14 4 5>;
 };
+
+- DMA transfer:
+ssc0: ssc@f0010000 {
+      compatible = "atmel,at91sam9g45-ssc";
+      reg = <0xf0010000 0x4000>;
+      interrupts = <28 4 5>;
+      dmas = <&dma0 1 13>,
+            <&dma0 1 14>;
+      dma-names = "tx", "rx";
+      pinctrl-names = "default";
+      pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+      status = "disabled";
+};
index d555421ea49f8237d5a8a0c1822facd379b60f1b..c4728839d0c1333098b38fe4e7aa96e3e0ab7f9e 100644 (file)
@@ -15,6 +15,7 @@ Required properties:
   optional gpio and may be set to 0 if not present.
 
 Optional properties:
+- atmel,nand-has-dma : boolean to support dma transfer for nand read/write.
 - nand-ecc-mode : String, operation mode of the NAND ecc mode, soft by default.
   Supported values are: "none", "soft", "hw", "hw_syndrome", "hw_oob_first",
   "soft_bch".
@@ -29,6 +30,14 @@ Optional properties:
   sector size 1024.
 - nand-bus-width : 8 or 16 bus width if not present 8
 - nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
+- Nand Flash Controller(NFC) is a slave driver under Atmel nand flash
+  - Required properties:
+    - compatible : "atmel,sama5d3-nfc".
+    - reg : should specify the address and size used for NFC command registers,
+            NFC registers and NFC Sram. NFC Sram address and size can be absent
+            if don't want to use it.
+  - Optional properties:
+    - atmel,write-by-sram: boolean to enable NFC write by sram.
 
 Examples:
 nand0: nand@40000000,0 {
@@ -77,3 +86,22 @@ nand0: nand@40000000 {
                ...
        };
 };
+
+/* for NFC supported chips */
+nand0: nand@40000000 {
+       compatible = "atmel,at91rm9200-nand";
+       #address-cells = <1>;
+       #size-cells = <1>;
+       ranges;
+        ...
+        nfc@70000000 {
+               compatible = "atmel,sama5d3-nfc";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <
+                       0x70000000 0x10000000   /* NFC Command Registers */
+                       0xffffc000 0x00000070   /* NFC HSMC regs */
+                       0x00200000 0x00100000   /* NFC SRAM banks */
+               >;
+       };
+};
index 2240ac09f6ba05cf1bea9aaa6788d01b9c3583e4..ec42935f390861810c1e7f6dd8e925220d46e6f1 100644 (file)
@@ -1,4 +1,5 @@
-* FSMC NAND
+ST Microelectronics Flexible Static Memory Controller (FSMC)
+NAND Interface
 
 Required properties:
 - compatible : "st,spear600-fsmc-nand", "stericsson,fsmc-nand"
@@ -9,6 +10,26 @@ Optional properties:
 - bank-width : Width (in bytes) of the device.  If not present, the width
   defaults to 1 byte
 - nand-skip-bbtscan: Indicates the the BBT scanning should be skipped
+- timings: array of 6 bytes for NAND timings. The meanings of these bytes
+  are:
+  byte 0 TCLR  : CLE to RE delay in number of AHB clock cycles, only 4 bits
+                 are valid. Zero means one clockcycle, 15 means 16 clock
+                 cycles.
+  byte 1 TAR   : ALE to RE delay, 4 bits are valid. Same format as TCLR.
+  byte 2 THIZ  : number of HCLK clock cycles during which the data bus is
+                 kept in Hi-Z (tristate) after the start of a write access.
+                 Only valid for write transactions. Zero means zero cycles,
+                 255 means 255 cycles.
+  byte 3 THOLD : number of HCLK clock cycles to hold the address (and data
+                 when writing) after the command deassertation. Zero means
+                 one cycle, 255 means 256 cycles.
+  byte 4 TWAIT : number of HCLK clock cycles to assert the command to the
+                 NAND flash in response to SMWAITn. Zero means 1 cycle,
+                 255 means 256 cycles.
+  byte 5 TSET  : number of HCLK clock cycles to assert the address before the
+                 command is asserted. Zero means one cycle, 255 means 256
+                 cycles.
+- bank: default NAND bank to use (0-3 are valid, 0 is the default).
 
 Example:
 
@@ -24,6 +45,8 @@ Example:
 
                bank-width = <1>;
                nand-skip-bbtscan;
+               timings = /bits/ 8 <0 0 0 2 3 0>;
+               bank = <1>;
 
                partition@0 {
                        ...
index 9315ac96b49b224665b674f1d0109805aaf0c9a8..8e5557da1955472b3b2faf8b6c638e9ef5e61293 100644 (file)
@@ -4,6 +4,7 @@ Partitions can be represented by sub-nodes of an mtd device. This can be used
 on platforms which have strong conventions about which portions of a flash are
 used for what purposes, but which don't use an on-flash partition table such
 as RedBoot.
+NOTE: if the sub-node has a compatible string, then it is not a partition.
 
 #address-cells & #size-cells must both be present in the mtd device. There are
 two valid values for both:
index 72cf0c5daff43323b1ca43995a2c6817e972dbe2..14e52a0d86ec2c96df5da66bea9ea148fe91fdd3 100644 (file)
@@ -8,7 +8,7 @@ Required properties:
 Example:
 
        can0: can@f000c000 {
-               compatbile = "atmel,at91sam9x5-can";
+               compatible = "atmel,at91sam9x5-can";
                reg = <0xf000c000 0x300>;
                interrupts = <40 4 5>
        };
index c2dbcec0ee31d33482858367e097140a766516d4..f2105a47ec87c547fcd88d383d136ec4d11eb65b 100644 (file)
@@ -37,7 +37,7 @@ Optional properties:
        If not specified or if the specified value is 0, the CLKOUT pin
        will be disabled.
 
-- nxp,no-comparator-bypass : Allows to disable the CAN input comperator.
+- nxp,no-comparator-bypass : Allows to disable the CAN input comparator.
 
 For further information, please have a look to the SJA1000 data sheet.
 
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
new file mode 100644 (file)
index 0000000..997a63f
--- /dev/null
@@ -0,0 +1,49 @@
+Micrel KSZ9021 Gigabit Ethernet PHY
+
+Some boards require special tuning values, particularly when it comes to
+clock delays.  You can specify clock delay values by adding
+micrel-specific properties to an Ethernet OF device node.
+
+All skew control options are specified in picoseconds.  The minimum
+value is 0, and the maximum value is 3000.
+
+Optional properties:
+ - rxc-skew-ps : Skew control of RXC pad
+ - rxdv-skew-ps : Skew control of RX CTL pad
+ - txc-skew-ps : Skew control of TXC pad
+ - txen-skew-ps : Skew control of TX_CTL pad
+ - rxd0-skew-ps : Skew control of RX data 0 pad
+ - rxd1-skew-ps : Skew control of RX data 1 pad
+ - rxd2-skew-ps : Skew control of RX data 2 pad
+ - rxd3-skew-ps : Skew control of RX data 3 pad
+ - txd0-skew-ps : Skew control of TX data 0 pad
+ - txd1-skew-ps : Skew control of TX data 1 pad
+ - txd2-skew-ps : Skew control of TX data 2 pad
+ - txd3-skew-ps : Skew control of TX data 3 pad
+
+Examples:
+
+       /* Attach to an Ethernet device with autodetected PHY */
+       &enet {
+               rxc-skew-ps = <3000>;
+               rxdv-skew-ps = <0>;
+               txc-skew-ps = <3000>;
+               txen-skew-ps = <0>;
+               status = "okay";
+       };
+
+       /* Attach to an explicitly-specified PHY */
+       mdio {
+               phy0: ethernet-phy@0 {
+                       rxc-skew-ps = <3000>;
+                       rxdv-skew-ps = <0>;
+                       txc-skew-ps = <3000>;
+                       txen-skew-ps = <0>;
+                       reg = <0>;
+               };
+       };
+       ethernet@70000 {
+               status = "okay";
+               phy = <&phy0>;
+               phy-mode = "rgmii-id";
+       };
diff --git a/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt b/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt
new file mode 100644 (file)
index 0000000..583418b
--- /dev/null
@@ -0,0 +1,21 @@
+MOXA ART Ethernet Controller
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-mac"
+- reg : Should contain register location and length
+- interrupts : Should contain the mac interrupt number
+
+Example:
+
+       mac0: mac@90900000 {
+               compatible = "moxa,moxart-mac";
+               reg =   <0x90900000 0x100>;
+               interrupts = <25 0>;
+       };
+
+       mac1: mac@92000000 {
+               compatible = "moxa,moxart-mac";
+               reg =   <0x92000000 0x100>;
+               interrupts = <27 0>;
+       };
index e2371f5cdebe594aa29138b3684e828bd2def7df..eabcb4b5db6e6711b244ea9a35e7b4ff711c12ef 100644 (file)
@@ -18,6 +18,7 @@ Required properties:
 - interrupt-map-mask and interrupt-map: standard PCI properties
        to define the mapping of the PCIe interface to interrupt
        numbers.
+- num-lanes: number of lanes to use
 - reset-gpio: gpio pin number of power good signal
 
 Example:
@@ -41,6 +42,7 @@ SoC specific DT Entry:
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
                interrupt-map = <0x0 0 &gic 53>;
+               num-lanes = <4>;
        };
 
        pcie@2a0000 {
@@ -60,6 +62,7 @@ SoC specific DT Entry:
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
                interrupt-map = <0x0 0 &gic 56>;
+               num-lanes = <4>;
        };
 
 Board specific DT Entry:
index 648d60eb9fd8f505be140b6d58a6cb771ed04220..7ccae490ff6dcc6936f7c9e5791435685d3ded30 100644 (file)
@@ -37,7 +37,7 @@ Bank: 3 (A, B and C)
   0xffffffff 0x7fff3ccf  /* pioB */
   0xffffffff 0x007fffff  /* pioC */
 
-For each peripheral/bank we will descibe in a u32 if a pin can can be
+For each peripheral/bank we will descibe in a u32 if a pin can be
 configured in it by putting 1 to the pin bit (1 << pin)
 
 Let's take the pioA on peripheral B
diff --git a/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt b/Documentation/devicetree/bindings/power_supply/msm-poweroff.txt
new file mode 100644 (file)
index 0000000..ce44ad3
--- /dev/null
@@ -0,0 +1,17 @@
+MSM Restart Driver
+
+A power supply hold (ps-hold) bit is set to power the msm chipsets.
+Clearing that bit allows us to restart/poweroff. The difference
+between poweroff and restart is determined by unique power manager IC
+settings.
+
+Required Properties:
+-compatible: "qcom,pshold"
+-reg: Specifies the physical address of the ps-hold register
+
+Example:
+
+       restart@fc4ab000 {
+               compatible = "qcom,pshold";
+               reg = <0xfc4ab000 0x4>;
+       };
index 5693877ab377d6007cdb9e7f4ae7705faec581d7..82dd5b65cf485b43e02775b4329d073dfcf7ebd6 100644 (file)
@@ -1,21 +1,20 @@
 * Freescale MSI interrupt controller
 
 Required properties:
-- compatible : compatible list, contains 2 entries,
-  first is "fsl,CHIP-msi", where CHIP is the processor(mpc8610, mpc8572,
-  etc.) and the second is "fsl,mpic-msi" or "fsl,ipic-msi" depending on
-  the parent type.
+- compatible : compatible list, may contain one or two entries
+  The first is "fsl,CHIP-msi", where CHIP is the processor(mpc8610, mpc8572,
+  etc.) and the second is "fsl,mpic-msi" or "fsl,ipic-msi" or
+  "fsl,mpic-msi-v4.3" depending on the parent type and version. If mpic
+  version is 4.3, the number of MSI registers is increased to 16, MSIIR1 is
+  provided to access these 16 registers, and compatible "fsl,mpic-msi-v4.3"
+  should be used. The first entry is optional; the second entry is
+  required.
 
 - reg : It may contain one or two regions. The first region should contain
   the address and the length of the shared message interrupt register set.
-  The second region should contain the address of aliased MSIIR register for
-  platforms that have such an alias.
-
-- msi-available-ranges: use <start count> style section to define which
-  msi interrupt can be used in the 256 msi interrupts. This property is
-  optional, without this, all the 256 MSI interrupts can be used.
-  Each available range must begin and end on a multiple of 32 (i.e.
-  no splitting an individual MSI register or the associated PIC interrupt).
+  The second region should contain the address of aliased MSIIR or MSIIR1
+  register for platforms that have such an alias, if using MSIIR1, the second
+  region must be added because different MSI group has different MSIIR1 offset.
 
 - interrupts : each one of the interrupts here is one entry per 32 MSIs,
   and routed to the host interrupt controller. the interrupts should
@@ -28,6 +27,14 @@ Required properties:
   to MPIC.
 
 Optional properties:
+- msi-available-ranges: use <start count> style section to define which
+  msi interrupt can be used in the 256 msi interrupts. This property is
+  optional, without this, all the MSI interrupts can be used.
+  Each available range must begin and end on a multiple of 32 (i.e.
+  no splitting an individual MSI register or the associated PIC interrupt).
+  MPIC v4.3 does not support this property because the 32 interrupts of an
+  individual register are not continuous when using MSIIR1.
+
 - msi-address-64: 64-bit PCI address of the MSIIR register. The MSIIR register
   is used for MSI messaging.  The address of MSIIR in PCI address space is
   the MSI message address.
@@ -54,6 +61,28 @@ Example:
                interrupt-parent = <&mpic>;
        };
 
+       msi@41600 {
+               compatible = "fsl,mpic-msi-v4.3";
+               reg = <0x41600 0x200 0x44148 4>;
+               interrupts = <
+                       0xe0 0 0 0
+                       0xe1 0 0 0
+                       0xe2 0 0 0
+                       0xe3 0 0 0
+                       0xe4 0 0 0
+                       0xe5 0 0 0
+                       0xe6 0 0 0
+                       0xe7 0 0 0
+                       0x100 0 0 0
+                       0x101 0 0 0
+                       0x102 0 0 0
+                       0x103 0 0 0
+                       0x104 0 0 0
+                       0x105 0 0 0
+                       0x106 0 0 0
+                       0x107 0 0 0>;
+       };
+
 The Freescale hypervisor and msi-address-64
 -------------------------------------------
 Normally, PCI devices have access to all of CCSR via an ATMU mapping.  The
diff --git a/Documentation/devicetree/bindings/regulator/88pm800.txt b/Documentation/devicetree/bindings/regulator/88pm800.txt
new file mode 100644 (file)
index 0000000..e8a54c2
--- /dev/null
@@ -0,0 +1,38 @@
+Marvell 88PM800 regulator
+
+Required properties:
+- compatible: "marvell,88pm800"
+- reg: I2C slave address
+- regulators: A node that houses a sub-node for each regulator within the
+  device. Each sub-node is identified using the node's name (or the deprecated
+  regulator-compatible property if present), with valid values listed below.
+  The content of each sub-node is defined by the standard binding for
+  regulators; see regulator.txt.
+
+The valid names for regulators are:
+
+  buck1, buck2, buck3, buck4, buck5, ldo1, ldo2, ldo3, ldo4, ldo5, ldo6, ldo7,
+  ldo8, ldo9, ldo10, ldo11, ldo12, ldo13, ldo14, ldo15, ldo16, ldo17, ldo18, ldo19
+
+Example:
+
+       pmic: 88pm800@31 {
+               compatible = "marvell,88pm800";
+               reg = <0x31>;
+
+               regulators {
+                       buck1 {
+                               regulator-min-microvolt = <600000>;
+                               regulator-max-microvolt = <3950000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+                       ldo1 {
+                               regulator-min-microvolt = <600000>;
+                               regulator-max-microvolt = <15000000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+...
+               };
+       };
diff --git a/Documentation/devicetree/bindings/regulator/max8660.txt b/Documentation/devicetree/bindings/regulator/max8660.txt
new file mode 100644 (file)
index 0000000..8ba994d
--- /dev/null
@@ -0,0 +1,47 @@
+Maxim MAX8660 voltage regulator
+
+Required properties:
+- compatible: must be one of "maxim,max8660", "maxim,max8661"
+- reg: I2C slave address, usually 0x34
+- any required generic properties defined in regulator.txt
+
+Example:
+
+       i2c_master {
+               max8660@34 {
+                       compatible = "maxim,max8660";
+                       reg = <0x34>;
+
+                       regulators {
+                               regulator@0 {
+                                       regulator-compatible= "V3(DCDC)";
+                                       regulator-min-microvolt = <725000>;
+                                       regulator-max-microvolt = <1800000>;
+                               };
+
+                               regulator@1 {
+                                       regulator-compatible= "V4(DCDC)";
+                                       regulator-min-microvolt = <725000>;
+                                       regulator-max-microvolt = <1800000>;
+                               };
+
+                               regulator@2 {
+                                       regulator-compatible= "V5(LDO)";
+                                       regulator-min-microvolt = <1700000>;
+                                       regulator-max-microvolt = <2000000>;
+                               };
+
+                               regulator@3 {
+                                       regulator-compatible= "V6(LDO)";
+                                       regulator-min-microvolt = <1800000>;
+                                       regulator-max-microvolt = <3300000>;
+                               };
+
+                               regulator@4 {
+                                       regulator-compatible= "V7(LDO)";
+                                       regulator-min-microvolt = <1800000>;
+                                       regulator-max-microvolt = <3300000>;
+                               };
+                       };
+               };
+       };
index 30b0581bb1ce63cbea673888e9fec22a5df72aa5..875639ae0606e1e6637cacd9ef35dfde9ec0399a 100644 (file)
@@ -25,8 +25,8 @@ Optional nodes:
               Additional custom properties  are listed below.
 
               For ti,palmas-pmic - smps12, smps123, smps3 depending on OTP,
-              smps45, smps457, smps7 depending on variant, smps6, smps[8-10],
-              ldo[1-9], ldoln, ldousb.
+              smps45, smps457, smps7 depending on variant, smps6, smps[8-9],
+              smps10_out2, smps10_out1, do[1-9], ldoln, ldousb.
 
               Optional sub-node properties:
               ti,warm-reset - maintain voltage during warm reset(boolean)
@@ -36,6 +36,9 @@ Optional nodes:
               ti,smps-range - OTP has the wrong range set for the hardware so override
               0 - low range, 1 - high range.
 
+- ti,system-power-controller: Telling whether or not this pmic is controlling
+                             the system power.
+
 Example:
 
 #include <dt-bindings/interrupt-controller/irq.h>
@@ -48,6 +51,8 @@ pmic {
 
        ti,ldo6-vibrator;
 
+       ti,system-power-controller;
+
        regulators {
                smps12_reg : smps12 {
                        regulator-name = "smps12";
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
new file mode 100644 (file)
index 0000000..fc989b2
--- /dev/null
@@ -0,0 +1,115 @@
+PFUZE100 family of regulators
+
+Required properties:
+- compatible: "fsl,pfuze100"
+- reg: I2C slave address
+
+Required child node:
+- regulators: This is the list of child nodes that specify the regulator
+  initialization data for defined regulators. Please refer to below doc
+  Documentation/devicetree/bindings/regulator/regulator.txt.
+
+  The valid names for regulators are:
+  sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
+
+Each regulator is defined using the standard binding for regulators.
+
+Example:
+
+       pmic: pfuze100@08 {
+               compatible = "fsl,pfuze100";
+               reg = <0x08>;
+
+               regulators {
+                       sw1a_reg: sw1ab {
+                               regulator-min-microvolt = <300000>;
+                               regulator-max-microvolt = <1875000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                               regulator-ramp-delay = <6250>;
+                       };
+
+                       sw1c_reg: sw1c {
+                               regulator-min-microvolt = <300000>;
+                               regulator-max-microvolt = <1875000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw2_reg: sw2 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw3a_reg: sw3a {
+                               regulator-min-microvolt = <400000>;
+                               regulator-max-microvolt = <1975000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw3b_reg: sw3b {
+                               regulator-min-microvolt = <400000>;
+                               regulator-max-microvolt = <1975000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw4_reg: sw4 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <3300000>;
+                       };
+
+                       swbst_reg: swbst {
+                               regulator-min-microvolt = <5000000>;
+                               regulator-max-microvolt = <5150000>;
+                       };
+
+                       snvs_reg: vsnvs {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vref_reg: vrefddr {
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vgen1_reg: vgen1 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <1550000>;
+                       };
+
+                       vgen2_reg: vgen2 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <1550000>;
+                       };
+
+                       vgen3_reg: vgen3 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                       };
+
+                       vgen4_reg: vgen4 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen5_reg: vgen5 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen6_reg: vgen6 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+               };
+       };
index 48a3b8e5d6bde80fce883c7db83f99dc4bcc7da7..2bd8f09787659269bb03847b3517cff2b52cfca9 100644 (file)
@@ -12,6 +12,8 @@ Optional properties:
 - regulator-allow-bypass: allow the regulator to go into bypass mode
 - <name>-supply: phandle to the parent supply/regulator node
 - regulator-ramp-delay: ramp delay for regulator(in uV/uS)
+  For hardwares which support disabling ramp rate, it should be explicitly
+  intialised to zero (regulator-ramp-delay = <0>) for disabling ramp delay.
 
 Deprecated properties:
 - regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt b/Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt
new file mode 100644 (file)
index 0000000..669b814
--- /dev/null
@@ -0,0 +1,65 @@
+Device tree bindings for Marvell PXA SSP ports
+
+Required properties:
+
+       - compatible:   Must be one of
+                               mrvl,pxa25x-ssp
+                               mvrl,pxa25x-nssp
+                               mrvl,pxa27x-ssp
+                               mrvl,pxa3xx-ssp
+                               mvrl,pxa168-ssp
+                               mrvl,pxa910-ssp
+                               mrvl,ce4100-ssp
+                               mrvl,lpss-ssp
+
+       - reg:          The memory base
+       - dmas:         Two dma phandles, one for rx, one for tx
+       - dma-names:    Must be "rx", "tx"
+
+
+Example for PXA3xx:
+
+       ssp0: ssp@41000000 {
+               compatible = "mrvl,pxa3xx-ssp";
+               reg = <0x41000000 0x40>;
+               ssp-id = <1>;
+               interrupts = <24>;
+               clock-names = "pxa27x-ssp.0";
+               dmas = <&dma 13
+                       &dma 14>;
+               dma-names = "rx", "tx";
+       };
+
+       ssp1: ssp@41700000 {
+               compatible = "mrvl,pxa3xx-ssp";
+               reg = <0x41700000 0x40>;
+               ssp-id = <2>;
+               interrupts = <16>;
+               clock-names = "pxa27x-ssp.1";
+               dmas = <&dma 15
+                       &dma 16>;
+               dma-names = "rx", "tx";
+       };
+
+       ssp2: ssp@41900000 {
+               compatibl3 = "mrvl,pxa3xx-ssp";
+               reg = <0x41900000 0x40>;
+               ssp-id = <3>;
+               interrupts = <0>;
+               clock-names = "pxa27x-ssp.2";
+               dmas = <&dma 66
+                       &dma 67>;
+               dma-names = "rx", "tx";
+       };
+
+       ssp3: ssp@41a00000 {
+               compatible = "mrvl,pxa3xx-ssp";
+               reg = <0x41a00000 0x40>;
+               ssp-id = <4>;
+               interrupts = <13>;
+               clock-names = "pxa27x-ssp.3";
+               dmas = <&dma 2
+                       &dma 3>;
+               dma-names = "rx", "tx";
+       };
+
index 1e753c69fc83298249434ecd32543da7b0d929de..32b1fa1f2a5b8dec1eca195f1fea93528155728b 100644 (file)
@@ -7,7 +7,7 @@ UART node.
 
 Required properties:
 - rs485-rts-delay: prop-encoded-array <a b> where:
-  * a is the delay beteween rts signal and beginning of data sent in milliseconds.
+  * a is the delay between rts signal and beginning of data sent in milliseconds.
       it corresponds to the delay before sending data.
   * b is the delay between end of data sent and rts signal in milliseconds
       it corresponds to the delay after sending data and actual release of the line.
diff --git a/Documentation/devicetree/bindings/sound/ak4554.c b/Documentation/devicetree/bindings/sound/ak4554.c
new file mode 100644 (file)
index 0000000..934fa02
--- /dev/null
@@ -0,0 +1,11 @@
+AK4554 ADC/DAC
+
+Required properties:
+
+  - compatible : "asahi-kasei,ak4554"
+
+Example:
+
+ak4554-adc-dac {
+       compatible = "asahi-kasei,ak4554";
+};
index 8608f747dcfe1869ae5061007a2fd583000ef545..ffd886d110bdcbc6aa79d2372149ae14867b2449 100644 (file)
@@ -13,6 +13,25 @@ Required properties:
   - #gpio-cells : Should be two. The first cell is the pin number and the
     second cell is used to specify optional parameters (currently unused).
 
+Pins on the device (for linking into audio routes):
+
+  * SPK_OUTP
+  * SPK_OUTN
+  * HP_OUT_L
+  * HP_OUT_R
+  * AUX_OUT_P
+  * AUX_OUT_N
+  * LINE_IN_L
+  * LINE_IN_R
+  * PHONE_P
+  * PHONE_N
+  * MIC1_P
+  * MIC1_N
+  * MIC2_P
+  * MIC2_N
+  * MICBIAS1
+  * DMICDAT
+
 Example:
 
 alc5632: alc5632@1e {
diff --git a/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt b/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt
new file mode 100644 (file)
index 0000000..0720857
--- /dev/null
@@ -0,0 +1,35 @@
+* Atmel at91sam9x5ek wm8731 audio complex
+
+Required properties:
+  - compatible: "atmel,sam9x5-wm8731-audio"
+  - atmel,model: The user-visible name of this sound complex.
+  - atmel,ssc-controller: The phandle of the SSC controller
+  - atmel,audio-codec: The phandle of the WM8731 audio codec
+  - atmel,audio-routing: A list of the connections between audio components.
+    Each entry is a pair of strings, the first being the connection's sink,
+    the second being the connection's source.
+
+Available audio endpoints for the audio-routing table:
+
+Board connectors:
+ * Headphone Jack
+ * Line In Jack
+
+wm8731 pins:
+cf Documentation/devicetree/bindings/sound/wm8731.txt
+
+Example:
+sound {
+       compatible = "atmel,sam9x5-wm8731-audio";
+
+       atmel,model = "wm8731 @ AT91SAM9X5EK";
+
+       atmel,audio-routing =
+               "Headphone Jack", "RHPOUT",
+               "Headphone Jack", "LHPOUT",
+               "LLINEIN", "Line In Jack",
+               "RLINEIN", "Line In Jack";
+
+       atmel,ssc-controller = <&ssc0>;
+       atmel,audio-codec = <&wm8731>;
+};
diff --git a/Documentation/devicetree/bindings/sound/atmel-wm8904.txt b/Documentation/devicetree/bindings/sound/atmel-wm8904.txt
new file mode 100644 (file)
index 0000000..8bbe50c
--- /dev/null
@@ -0,0 +1,55 @@
+Atmel ASoC driver with wm8904 audio codec complex
+
+Required properties:
+  - compatible: "atmel,asoc-wm8904"
+  - atmel,model: The user-visible name of this sound complex.
+  - atmel,audio-routing: A list of the connections between audio components.
+    Each entry is a pair of strings, the first being the connection's sink,
+    the second being the connection's source. Valid names for sources and
+    sinks are the WM8904's pins, and the jacks on the board:
+
+    WM8904 pins:
+
+    * IN1L
+    * IN1R
+    * IN2L
+    * IN2R
+    * IN3L
+    * IN3R
+    * HPOUTL
+    * HPOUTR
+    * LINEOUTL
+    * LINEOUTR
+    * MICBIAS
+
+    Board connectors:
+
+    * Headphone Jack
+    * Line In Jack
+    * Mic
+
+  - atmel,ssc-controller: The phandle of the SSC controller
+  - atmel,audio-codec: The phandle of the WM8904 audio codec
+
+Optional properties:
+  - pinctrl-names, pinctrl-0: Please refer to pinctrl-bindings.txt
+
+Example:
+sound {
+       compatible = "atmel,asoc-wm8904";
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_pck0_as_mck>;
+
+       atmel,model = "wm8904 @ AT91SAM9N12EK";
+
+       atmel,audio-routing =
+               "Headphone Jack", "HPOUTL",
+               "Headphone Jack", "HPOUTR",
+               "IN2L", "Line In Jack",
+               "IN2R", "Line In Jack",
+               "Mic", "MICBIAS",
+               "IN1L", "Mic";
+
+       atmel,ssc-controller = <&ssc0>;
+       atmel,audio-codec = <&wm8904>;
+};
similarity index 90%
rename from Documentation/devicetree/bindings/powerpc/fsl/ssi.txt
rename to Documentation/devicetree/bindings/sound/fsl,ssi.txt
index 5ff76c9c57d27212d88723b85442e431cfa1aed4..088a2c038f0112d21b487471a63b8760de86ca62 100644 (file)
@@ -47,6 +47,14 @@ Optional properties:
 - codec-handle:     Phandle to a 'codec' node that defines an audio
                     codec connected to this SSI.  This node is typically
                     a child of an I2C or other control node.
+- fsl,fiq-stream-filter: Bool property. Disabled DMA and use FIQ instead to
+                   filter the codec stream. This is necessary for some boards
+                   where an incompatible codec is connected to this SSI, e.g.
+                   on pca100 and pcm043.
+- dmas:                    Generic dma devicetree binding as described in
+                   Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names:       Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq
+                   is not defined.
 
 Child 'codec' node required properties:
 - compatible:       Compatible list, contains the name of the codec
index 215aa9817213e9ea8e469a94f19fcc251aa87352..f88a00e54c6351cca3972943482d14b620ba3671 100644 (file)
@@ -5,6 +5,15 @@ Required properties:
   or "fsl,imx31-audmux" for the version firstly used on i.MX31.
 - reg : Should contain AUDMUX registers location and length
 
+An initial configuration can be setup using child nodes.
+
+Required properties of optional child nodes:
+- fsl,audmux-port : Integer of the audmux port that is configured by this
+  child node.
+- fsl,port-config : List of configuration options for the specific port. For
+  imx31-audmux and above, it is a list of tuples <ptcr pdcr>. For
+  imx21-audmux it is a list of pcr values.
+
 Example:
 
 audmux@021d8000 {
diff --git a/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt b/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
new file mode 100644 (file)
index 0000000..74c9ba6
--- /dev/null
@@ -0,0 +1,28 @@
+Marvell PXA SSP CPU DAI bindings
+
+Required properties:
+
+       compatible      Must be "mrvl,pxa-ssp-dai"
+       port            A phandle reference to a PXA ssp upstream device
+
+Example:
+
+       /* upstream device */
+
+       ssp0: ssp@41000000 {
+               compatible = "mrvl,pxa3xx-ssp";
+               reg = <0x41000000 0x40>;
+               interrupts = <24>;
+               clock-names = "pxa27x-ssp.0";
+               dmas = <&dma 13
+                       &dma 14>;
+               dma-names = "rx", "tx";
+       };
+
+       /* DAI as user */
+
+       ssp_dai0: ssp_dai@0 {
+               compatible = "mrvl,pxa-ssp-dai";
+               port = <&ssp0>;
+       };
+
diff --git a/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt b/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt
new file mode 100644 (file)
index 0000000..551fbb8
--- /dev/null
@@ -0,0 +1,15 @@
+DT bindings for ARM PXA2xx PCM platform driver
+
+This is just a dummy driver that registers the PXA ASoC platform driver.
+It does not have any resources assigned.
+
+Required properties:
+
+       - compatible            'mrvl,pxa-pcm-audio'
+
+Example:
+
+       pxa_pcm_audio: snd_soc_pxa_audio {
+               compatible = "mrvl,pxa-pcm-audio";
+       };
+
index 05ffecb571037cb562778cb243a84555384345d1..8b8903ef0800069afdad90f10e5964f3bd2e20c9 100644 (file)
@@ -11,28 +11,8 @@ Required properties:
 - nvidia,audio-routing : A list of the connections between audio components.
   Each entry is a pair of strings, the first being the connection's sink,
   the second being the connection's source. Valid names for sources and
-  sinks are the ALC5632's pins:
-
-  ALC5632 pins:
-
-  * SPK_OUTP
-  * SPK_OUTN
-  * HP_OUT_L
-  * HP_OUT_R
-  * AUX_OUT_P
-  * AUX_OUT_N
-  * LINE_IN_L
-  * LINE_IN_R
-  * PHONE_P
-  * PHONE_N
-  * MIC1_P
-  * MIC1_N
-  * MIC2_P
-  * MIC2_N
-  * MICBIAS1
-  * DMICDAT
-
-  Board connectors:
+  sinks are the ALC5632's pins as documented in the binding for the device
+  and:
 
   * Headset Stereophone
   * Int Spk
index d130818700b228a200e4c81c35e7406f6e792e3a..dc6224994d69d97f31614a308d71ba6cf2c9784c 100644 (file)
@@ -11,32 +11,12 @@ Required properties:
 - nvidia,audio-routing : A list of the connections between audio components.
   Each entry is a pair of strings, the first being the connection's sink,
   the second being the connection's source. Valid names for sources and
-  sinks are the RT5640's pins, and the jacks on the board:
-
-  RT5640 pins:
-
-  * DMIC1
-  * DMIC2
-  * MICBIAS1
-  * IN1P
-  * IN1R
-  * IN2P
-  * IN2R
-  * HPOL
-  * HPOR
-  * LOUTL
-  * LOUTR
-  * MONOP
-  * MONON
-  * SPOLP
-  * SPOLN
-  * SPORP
-  * SPORN
-
-  Board connectors:
+  sinks are the RT5640's pins (as documented in its binding), and the jacks
+  on the board:
 
   * Headphones
   * Speakers
+  * Mic Jack
 
 - nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
   connected to the CODEC.
index d14510613a7fdf1ac076320a04468976aa88811a..aab6ce0ad2fc5930054a1f97c73ea5781aa57e10 100644 (file)
@@ -11,31 +11,8 @@ Required properties:
 - nvidia,audio-routing : A list of the connections between audio components.
   Each entry is a pair of strings, the first being the connection's sink,
   the second being the connection's source. Valid names for sources and
-  sinks are the WM8753's pins, and the jacks on the board:
-
-  WM8753 pins:
-
-  * LOUT1
-  * LOUT2
-  * ROUT1
-  * ROUT2
-  * MONO1
-  * MONO2
-  * OUT3
-  * OUT4
-  * LINE1
-  * LINE2
-  * RXP
-  * RXN
-  * ACIN
-  * ACOP
-  * MIC1N
-  * MIC1
-  * MIC2N
-  * MIC2
-  * Mic Bias
-
-  Board connectors:
+  sinks are the WM8753's pins as documented in the binding for the WM8753,
+  and the jacks on the board:
 
   * Headphone Jack
   * Mic Jack
index 3bf722deb722265422283b4b897c37ffc74d31ae..4b44dfb6ca0dcb08e5ad4468df6aeee853d3d56e 100644 (file)
@@ -11,28 +11,8 @@ Required properties:
 - nvidia,audio-routing : A list of the connections between audio components.
   Each entry is a pair of strings, the first being the connection's sink,
   the second being the connection's source. Valid names for sources and
-  sinks are the WM8903's pins, and the jacks on the board:
-
-  WM8903 pins:
-
-  * IN1L
-  * IN1R
-  * IN2L
-  * IN2R
-  * IN3L
-  * IN3R
-  * DMICDAT
-  * HPOUTL
-  * HPOUTR
-  * LINEOUTL
-  * LINEOUTR
-  * LOP
-  * LON
-  * ROP
-  * RON
-  * MICBIAS
-
-  Board connectors:
+  sinks are the WM8903's pins (documented in the WM8903 binding document),
+  and the jacks on the board:
 
   * Headphone Jack
   * Int Spk
diff --git a/Documentation/devicetree/bindings/sound/pcm1792a.txt b/Documentation/devicetree/bindings/sound/pcm1792a.txt
new file mode 100644 (file)
index 0000000..970ba1e
--- /dev/null
@@ -0,0 +1,18 @@
+Texas Instruments pcm1792a DT bindings
+
+This driver supports the SPI bus.
+
+Required properties:
+
+ - compatible: "ti,pcm1792a"
+
+For required properties on SPI, please consult
+Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Examples:
+
+       codec_spi: 1792a@0 {
+               compatible = "ti,pcm1792a";
+               spi-max-frequency = <600000>;
+       };
+
index 005bcb24d72dc1e5e359aeadf9842eda277d23e1..068a1141b06f19de0f5206e97de40f0af6793812 100644 (file)
@@ -18,6 +18,26 @@ Optional properties:
 
 - realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
 
+Pins on the device (for linking into audio routes):
+
+  * DMIC1
+  * DMIC2
+  * MICBIAS1
+  * IN1P
+  * IN1R
+  * IN2P
+  * IN2R
+  * HPOL
+  * HPOR
+  * LOUTL
+  * LOUTR
+  * MONOP
+  * MONON
+  * SPOLP
+  * SPOLN
+  * SPORP
+  * SPORN
+
 Example:
 
 rt5640 {
index 025e66b85a43905b3cb64b2e5590872d0a000b3e..7386d444ada1b4a7bcb7f29b29a5ef45b70ace48 100644 (file)
@@ -2,7 +2,15 @@
 
 Required SoC Specific Properties:
 
-- compatible : "samsung,i2s-v5"
+- compatible : should be one of the following.
+   - samsung,s3c6410-i2s: for 8/16/24bit stereo I2S.
+   - samsung,s5pv210-i2s: for 8/16/24bit multichannel(5.1) I2S with
+     secondary fifo, s/w reset control and internal mux for root clk src.
+   - samsung,exynos5420-i2s: for 8/16/24bit multichannel(7.1) I2S with
+     secondary fifo, s/w reset control, internal mux for root clk src and
+     TDM support. TDM (Time division multiplexing) is to allow transfer of
+     multiple channel audio data on single data line.
+
 - reg: physical base address of the controller and length of memory mapped
   region.
 - dmas: list of DMA controller phandle and DMA request line ordered pairs.
@@ -21,13 +29,6 @@ Required SoC Specific Properties:
 
 Optional SoC Specific Properties:
 
-- samsung,supports-6ch: If the I2S Primary sound source has 5.1 Channel
-  support, this flag is enabled.
-- samsung,supports-rstclr: This flag should be set if I2S software reset bit
-  control is required. When this flag is set I2S software reset bit will be
-  enabled or disabled based on need.
-- samsung,supports-secdai:If I2S block has a secondary FIFO and internal DMA,
-  then this flag is enabled.
 - samsung,idma-addr: Internal DMA register base address of the audio
   sub system(used in secondary sound source).
 - pinctrl-0: Should specify pin control groups used for this controller.
@@ -36,7 +37,7 @@ Optional SoC Specific Properties:
 Example:
 
 i2s0: i2s@03830000 {
-       compatible = "samsung,i2s-v5";
+       compatible = "samsung,s5pv210-i2s";
        reg = <0x03830000 0x100>;
        dmas = <&pdma0 10
                &pdma0 9
@@ -46,9 +47,6 @@ i2s0: i2s@03830000 {
                <&clock_audss EXYNOS_I2S_BUS>,
                <&clock_audss EXYNOS_SCLK_I2S>;
        clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
-       samsung,supports-6ch;
-       samsung,supports-rstclr;
-       samsung,supports-secdai;
        samsung,idma-addr = <0x03000000>;
        pinctrl-names = "default";
        pinctrl-0 = <&i2s0_bus>;
diff --git a/Documentation/devicetree/bindings/sound/soc-ac97link.txt b/Documentation/devicetree/bindings/sound/soc-ac97link.txt
new file mode 100644 (file)
index 0000000..80152a8
--- /dev/null
@@ -0,0 +1,28 @@
+AC97 link bindings
+
+These bindings can be included within any other device node.
+
+Required properties:
+ - pinctrl-names: Has to contain following states to setup the correct
+   pinmuxing for the used gpios:
+       "ac97-running": AC97-link is active
+       "ac97-reset": AC97-link reset state
+       "ac97-warm-reset": AC97-link warm reset state
+ - ac97-gpios: List of gpio phandles with args in the order ac97-sync,
+   ac97-sdata, ac97-reset
+
+
+Example:
+
+ssi {
+       ...
+
+       pinctrl-names = "default", "ac97-running", "ac97-reset", "ac97-warm-reset";
+       pinctrl-0 = <&ac97link_running>;
+       pinctrl-1 = <&ac97link_running>;
+       pinctrl-2 = <&ac97link_reset>;
+       pinctrl-3 = <&ac97link_warm_reset>;
+       ac97-gpios = <&gpio3 20 0 &gpio3 22 0 &gpio3 28 0>;
+
+       ...
+};
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm1681.txt b/Documentation/devicetree/bindings/sound/ti,pcm1681.txt
new file mode 100644 (file)
index 0000000..4df1718
--- /dev/null
@@ -0,0 +1,15 @@
+Texas Instruments PCM1681 8-channel PWM Processor
+
+Required properties:
+
+ - compatible:         Should contain "ti,pcm1681".
+ - reg:                        The i2c address. Should contain <0x4c>.
+
+Examples:
+
+       i2c_bus {
+               pcm1681@4c {
+                       compatible = "ti,pcm1681";
+                       reg = <0x4c>;
+               };
+       };
index f47c3f589fd03a8deb2485292116e33d17a0f7da..705a6b156c6c4fc00713443f5fa631996b259401 100644 (file)
@@ -3,7 +3,14 @@ Texas Instruments - tlv320aic3x Codec module
 The tlv320aic3x serial control bus communicates through I2C protocols
 
 Required properties:
-- compatible - "string" -  "ti,tlv320aic3x"
+
+- compatible - "string" - One of:
+    "ti,tlv320aic3x" - Generic TLV320AIC3x device
+    "ti,tlv320aic33" - TLV320AIC33
+    "ti,tlv320aic3007" - TLV320AIC3007
+    "ti,tlv320aic3106" - TLV320AIC3106
+
+
 - reg - <int> -  I2C slave address
 
 
index 15f70048469bdde098a857c6f2566d94ce320d2a..236690e99b87fbc81c0cb5b1823981cf1e87ff4d 100644 (file)
@@ -16,3 +16,12 @@ codec: wm8731@1a {
        compatible = "wlf,wm8731";
        reg = <0x1a>;
 };
+
+Available audio endpoints for an audio-routing table:
+ * LOUT: Left Channel Line Output
+ * ROUT: Right Channel Line Output
+ * LHPOUT: Left Channel Headphone Output
+ * RHPOUT: Right Channel Headphone Output
+ * LLINEIN: Left Channel Line Input
+ * RLINEIN: Right Channel Line Input
+ * MICIN: Microphone Input
index e65277a0fb60dee8b720ef3246cb2ae581f640c7..8eee6128210552e0d8decfedeffbc0b216a9ec21 100644 (file)
@@ -10,9 +10,31 @@ Required properties:
   - reg : the I2C address of the device for I2C, the chip select
           number for SPI.
 
+Pins on the device (for linking into audio routes):
+
+  * LOUT1
+  * LOUT2
+  * ROUT1
+  * ROUT2
+  * MONO1
+  * MONO2
+  * OUT3
+  * OUT4
+  * LINE1
+  * LINE2
+  * RXP
+  * RXN
+  * ACIN
+  * ACOP
+  * MIC1N
+  * MIC1
+  * MIC2N
+  * MIC2
+  * Mic Bias
+
 Example:
 
-codec: wm8737@1a {
+codec: wm8753@1a {
        compatible = "wlf,wm8753";
        reg = <0x1a>;
 };
index f102cbc426943370dad18b96f311827a86e39dad..94ec32c194bb1fc8f3dd4b6bbe10069008e4d673 100644 (file)
@@ -28,6 +28,25 @@ Optional properties:
     performed. If any entry has the value 0xffffffff, that GPIO's
     configuration will not be modified.
 
+Pins on the device (for linking into audio routes):
+
+  * IN1L
+  * IN1R
+  * IN2L
+  * IN2R
+  * IN3L
+  * IN3R
+  * DMICDAT
+  * HPOUTL
+  * HPOUTR
+  * LINEOUTL
+  * LINEOUTR
+  * LOP
+  * LON
+  * ROP
+  * RON
+  * MICBIAS
+
 Example:
 
 codec: wm8903@1a {
index f2f3e80934d227f123e4ecef3f8562c1530ed70a..e045e90a0924bc6e4d8a1861cd1a43b1a7a8d178 100644 (file)
@@ -32,6 +32,10 @@ Optional properties:
     The second cell is the flags, encoded as the trigger masks from
     Documentation/devicetree/bindings/interrupts.txt
 
+  - clocks : A list of up to two phandle and clock specifier pairs
+  - clock-names : A list of clock names sorted in the same order as clocks.
+                  Valid clock names are "MCLK1" and "MCLK2".
+
   - wlf,gpio-cfg : A list of GPIO configuration register values. If absent,
     no configuration of these registers is performed. If any value is
     over 0xffff then the register will be left as default. If present 11
diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt
new file mode 100644 (file)
index 0000000..284f530
--- /dev/null
@@ -0,0 +1,55 @@
+* Exynos Thermal Management Unit (TMU)
+
+** Required properties:
+
+- compatible : One of the following:
+              "samsung,exynos4412-tmu"
+              "samsung,exynos4210-tmu"
+              "samsung,exynos5250-tmu"
+              "samsung,exynos5440-tmu"
+- interrupt-parent : The phandle for the interrupt controller
+- reg : Address range of the thermal registers. For soc's which has multiple
+       instances of TMU and some registers are shared across all TMU's like
+       interrupt related then 2 set of register has to supplied. First set
+       belongs to each instance of TMU and second set belongs to common TMU
+       registers.
+- interrupts : Should contain interrupt for thermal system
+- clocks : The main clock for TMU device
+- clock-names : Thermal system clock name
+- vtmu-supply: This entry is optional and provides the regulator node supplying
+               voltage to TMU. If needed this entry can be placed inside
+               board/platform specific dts file.
+
+Example 1):
+
+       tmu@100C0000 {
+               compatible = "samsung,exynos4412-tmu";
+               interrupt-parent = <&combiner>;
+               reg = <0x100C0000 0x100>;
+               interrupts = <2 4>;
+               clocks = <&clock 383>;
+               clock-names = "tmu_apbif";
+               status = "disabled";
+               vtmu-supply = <&tmu_regulator_node>;
+       };
+
+Example 2):
+
+       tmuctrl_0: tmuctrl@160118 {
+               compatible = "samsung,exynos5440-tmu";
+               reg = <0x160118 0x230>, <0x160368 0x10>;
+               interrupts = <0 58 0>;
+               clocks = <&clock 21>;
+               clock-names = "tmu_apbif";
+       };
+
+Note: For multi-instance tmu each instance should have an alias correctly
+numbered in "aliases" node.
+
+Example:
+
+aliases {
+       tmuctrl0 = &tmuctrl_0;
+       tmuctrl1 = &tmuctrl_1;
+       tmuctrl2 = &tmuctrl_2;
+};
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt
new file mode 100644 (file)
index 0000000..541c25e
--- /dev/null
@@ -0,0 +1,17 @@
+* Temperature Monitor (TEMPMON) on Freescale i.MX SoCs
+
+Required properties:
+- compatible : "fsl,imx6q-thermal"
+- fsl,tempmon : phandle pointer to system controller that contains TEMPMON
+  control registers, e.g. ANATOP on imx6q.
+- fsl,tempmon-data : phandle pointer to fuse controller that contains TEMPMON
+  calibration data, e.g. OCOTP on imx6q.  The details about calibration data
+  can be found in SoC Reference Manual.
+
+Example:
+
+tempmon {
+       compatible = "fsl,imx6q-tempmon";
+       fsl,tempmon = <&anatop>;
+       fsl,tempmon-data = <&ocotp>;
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt
new file mode 100644 (file)
index 0000000..6ad1adf
--- /dev/null
@@ -0,0 +1,53 @@
+* Renesas SH-Mobile Serial Communication Interface
+
+Required properties:
+- compatible : Should be "renesas,sci-<port type>-uart", where <port type> may be
+  SCI, SCIF, IRDA, SCIFA or SCIFB.
+- reg : Address and length of the register set for the device
+- interrupts : Should contain the following IRQs: ERI, RXI, TXI and BRI.
+- cell-index : The device id.
+- renesas,scscr : Should contain a bitfield used by the Serial Control Register.
+  b7 = SCSCR_TIE
+  b6 = SCSCR_RIE
+  b5 = SCSCR_TE
+  b4 = SCSCR_RE
+  b3 = SCSCR_REIE
+  b2 = SCSCR_TOIE
+  b1 = SCSCR_CKE1
+  b0 = SCSCR_CKE0
+- renesas,scbrr-algo-id : Algorithm ID for the Bit Rate Register
+  1 = SCBRR_ALGO_1 ((clk + 16 * bps) / (16 * bps) - 1)
+  2 = SCBRR_ALGO_2 ((clk + 16 * bps) / (32 * bps) - 1)
+  3 = SCBRR_ALGO_3 (((clk * 2) + 16 * bps) / (16 * bps) - 1)
+  4 = SCBRR_ALGO_4 (((clk * 2) + 16 * bps) / (32 * bps) - 1)
+  5 = SCBRR_ALGO_5 (((clk * 1000 / 32) / bps) - 1)
+
+Optional properties:
+- renesas,autoconf : Set if device is capable of auto configuration
+- renesas,regtype : Overwrite the register layout. In most cases you can rely
+  on auto-probing (omit this property or set to 0) but some legacy devices
+  use a non-default register layout. Possible layouts are
+  0 = SCIx_PROBE_REGTYPE (default)
+  1 = SCIx_SCI_REGTYPE
+  2 = SCIx_IRDA_REGTYPE
+  3 = SCIx_SCIFA_REGTYPE
+  4 = SCIx_SCIFB_REGTYPE
+  5 = SCIx_SH2_SCIF_FIFODATA_REGTYPE
+  6 = SCIx_SH3_SCIF_REGTYPE
+  7 = SCIx_SH4_SCIF_REGTYPE
+  8 = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE
+  9 = SCIx_SH4_SCIF_FIFODATA_REGTYPE
+ 10 = SCIx_SH7705_SCIF_REGTYPE
+
+
+Example:
+       sci@0xe6c50000 {
+               compatible = "renesas,sci-SCIFA-uart";
+               interrupt-parent = <&intca>;
+               reg = <0xe6c50000 0x100>;
+               interrupts = <0x0c20>, <0x0c20>, <0x0c20>, <0x0c20>;
+               cell-index = <1>;
+               renesas,scscr = <0x30>;
+               renesas,scbrr-algo-id = <4>;
+               renesas,autoconf;
+       };
diff --git a/Documentation/devicetree/bindings/video/atmel,lcdc.txt b/Documentation/devicetree/bindings/video/atmel,lcdc.txt
new file mode 100644 (file)
index 0000000..1ec175e
--- /dev/null
@@ -0,0 +1,75 @@
+Atmel LCDC Framebuffer
+-----------------------------------------------------
+
+Required properties:
+- compatible :
+       "atmel,at91sam9261-lcdc" , 
+       "atmel,at91sam9263-lcdc" ,
+       "atmel,at91sam9g10-lcdc" ,
+       "atmel,at91sam9g45-lcdc" ,
+       "atmel,at91sam9g45es-lcdc" ,
+       "atmel,at91sam9rl-lcdc" ,
+       "atmel,at32ap-lcdc"
+- reg : Should contain 1 register ranges(address and length)
+- interrupts : framebuffer controller interrupt
+- display: a phandle pointing to the display node
+
+Required nodes:
+- display: a display node is required to initialize the lcd panel
+       This should be in the board dts.
+- default-mode: a videomode within the display with timing parameters
+       as specified below.
+
+Example:
+
+       fb0: fb@0x00500000 {
+               compatible = "atmel,at91sam9g45-lcdc";
+               reg = <0x00500000 0x1000>;
+               interrupts = <23 3 0>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_fb>;
+               display = <&display0>;
+               status = "okay";
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+       };
+
+Atmel LCDC Display
+-----------------------------------------------------
+Required properties (as per of_videomode_helper):
+
+ - atmel,dmacon: dma controler configuration
+ - atmel,lcdcon2: lcd controler configuration
+ - atmel,guard-time: lcd guard time (Delay in frame periods)
+ - bits-per-pixel: lcd panel bit-depth.
+
+Optional properties (as per of_videomode_helper):
+ - atmel,lcdcon-backlight: enable backlight
+ - atmel,lcd-wiring-mode: lcd wiring mode "RGB" or "BRG"
+ - atmel,power-control-gpio: gpio to power on or off the LCD (as many as needed)
+
+Example:
+       display0: display {
+               bits-per-pixel = <32>;
+               atmel,lcdcon-backlight;
+               atmel,dmacon = <0x1>;
+               atmel,lcdcon2 = <0x80008002>;
+               atmel,guard-time = <9>;
+               atmel,lcd-wiring-mode = <1>;
+
+               display-timings {
+                       native-mode = <&timing0>;
+                       timing0: timing0 {
+                               clock-frequency = <9000000>;
+                               hactive = <480>;
+                               vactive = <272>;
+                               hback-porch = <1>;
+                               hfront-porch = <1>;
+                               vback-porch = <40>;
+                               vfront-porch = <1>;
+                               hsync-len = <45>;
+                               vsync-len = <1>;
+                       };
+               };
+       };
index 3ea4605831115f9ec1780f9797f57145ff5d76cd..70c26f3a5b9ae26eb6e64e2d230728dca3a48488 100644 (file)
@@ -12,6 +12,7 @@ Required properties:
 - stride: The number of bytes in each line of the framebuffer.
 - format: The format of the framebuffer surface. Valid values are:
   - r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b).
+  - a8b8g8r8 (32-bit pixels, d[31:24]=a, d[23:16]=b, d[15:8]=g, d[7:0]=r).
 
 Example:
 
index 0b23261561d28818fdbd630d420211836b6aad6a..e31a2a9d2b075e9dda835218b63fd103cb91d6fa 100644 (file)
@@ -321,7 +321,7 @@ Access to a dma_buf from the kernel context involves three steps:
 
    When the importer is done accessing the range specified in begin_cpu_access,
    it needs to announce this to the exporter (to facilitate cache flushing and
-   unpinning of any pinned resources). The result of of any dma_buf kmap calls
+   unpinning of any pinned resources). The result of any dma_buf kmap calls
    after end_cpu_access is undefined.
 
    Interface:
index b4671459857f825038c9130a1c24336f23b94bdc..1d3233292989682a62252b206a7ff76e6f9d75cc 100644 (file)
@@ -237,6 +237,10 @@ MEM
   devm_kzalloc()
   devm_kfree()
 
+IIO
+  devm_iio_device_alloc()
+  devm_iio_device_free()
+
 IO region
   devm_request_region()
   devm_request_mem_region()
index 661a73fad399b39bedb584a8825f923be1a2a7ae..93e63a9af30b7b0ec63f1cdf3904d92e5731596c 100644 (file)
@@ -83,8 +83,7 @@ Where's this all leading?
 
 The klibc distribution contains some of the necessary software to make
 early userspace useful.  The klibc distribution is currently
-maintained separately from the kernel, but this may change early in
-the 2.7 era (it missed the boat for 2.5).
+maintained separately from the kernel.
 
 You can obtain somewhat infrequent snapshots of klibc from
 ftp://ftp.kernel.org/pub/linux/libs/klibc/
index 99ea58e65eff66ff326590b8657032f234fd7f9f..4a9739abc860651825f62e6ffb71f9b592d57015 100644 (file)
@@ -150,7 +150,7 @@ C. Boot options
 
 C. Attaching, Detaching and Unloading
 
-Before going on on how to attach, detach and unload the framebuffer console, an
+Before going on how to attach, detach and unload the framebuffer console, an
 illustration of the dependencies may help.
 
 The console layer, as with most subsystems, needs a driver that interfaces with
index 02e5b487f00e13eaba81532d9b34674865fbd6d5..2a547da2e5ccf1b965ee03db339e16c257dbc89a 100644 (file)
@@ -571,7 +571,7 @@ mode "640x480-60"
 #                   160 chars   800 lines
 #   Blank Time      4.798 us    0.564 ms
 #                   50 chars    28 lines
-#   Polarity        negtive    positive
+#   Polarity        negative    positive
 #
     mode "1280x800-60"
 # D: 83.500 MHz, H: 49.702 kHz, V: 60.00 Hz
index 444e34b52ae164019c77c74ea83384a5ac006f63..1cb2462a71ce6d41a0dc5cfb771aa367adf2d033 100644 (file)
@@ -32,7 +32,7 @@
     Start viafb with default settings:
         #modprobe viafb
 
-    Start viafb with with user options:
+    Start viafb with user options:
         #modprobe viafb viafb_mode=800x600 viafb_bpp=16 viafb_refresh=60
                   viafb_active_dev=CRT+DVI viafb_dvi_port=DVP1
                   viafb_mode1=1024x768 viafb_bpp=16 viafb_refresh1=60
index fe7afe22538149706eab5727c989d91a8530c387..21ef48f0778f25f9415fd856408aeb78747f260c 100644 (file)
@@ -192,8 +192,8 @@ prototypes:
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
                                unsigned long *);
        int (*migratepage)(struct address_space *, struct page *, struct page *);
@@ -426,7 +426,9 @@ prototypes:
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
index b349d57b76ea213b5b3c77bf39bf83c88201108f..9dae59407437916759a73e00166b6ba04b2c4e52 100644 (file)
@@ -87,7 +87,7 @@ Unless otherwise specified, all options default to off.
 
   device=<devicepath>
        Specify a device during mount so that ioctls on the control device
-       can be avoided.  Especialy useful when trying to mount a multi-device
+       can be avoided.  Especially useful when trying to mount a multi-device
        setup as root.  May be specified multiple times for multiple devices.
 
   discard
index 293855e950000ce53223d137b094d266d7911ad3..7ed0d17d67218597c33b46e391a095ca0e3e5aa2 100644 (file)
@@ -26,11 +26,12 @@ journal=inum                When a journal already exists, this option is ignored.
                        Otherwise, it specifies the number of the inode which
                        will represent the ext3 file system's journal file.
 
+journal_path=path
 journal_dev=devnum     When the external journal device's major/minor numbers
-                       have changed, this option allows the user to specify
+                       have changed, these options allow the user to specify
                        the new journal location.  The journal device is
-                       identified through its new major/minor numbers encoded
-                       in devnum.
+                       identified through either its new major/minor numbers
+                       encoded in devnum, or via a path to the device.
 
 norecovery             Don't load the journal on mounting. Note that this forces
 noload                 mount of inconsistent filesystem, which can lead to
index f7cbf574a875271296d93bee53b290ca36c44a66..a92c5aa8ce22ef11e48d1192d2f063f674fc65a4 100644 (file)
@@ -2,7 +2,7 @@
 Ext4 Filesystem
 ===============
 
-Ext4 is an an advanced level of the ext3 filesystem which incorporates
+Ext4 is an advanced level of the ext3 filesystem which incorporates
 scalability and reliability enhancements for supporting large filesystems
 (64 bit) in keeping with increasing disk capacities and state-of-the-art
 feature requirements.
index b91e2f26b672451e687b73fb42f18bb2ebd21821..3cd27bed6349ebeb051419b1de18a63e84288649 100644 (file)
@@ -18,8 +18,8 @@ according to its internal geometry or flash memory management scheme, namely FTL
 F2FS and its tools support various parameters not only for configuring on-disk
 layout, but also for selecting allocation and cleaning algorithms.
 
-The file system formatting tool, "mkfs.f2fs", is available from the following
-git tree:
+The following git tree provides the file system formatting tool (mkfs.f2fs),
+a consistency checking tool (fsck.f2fs), and a debugging tool (dump.f2fs).
 >> git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git
 
 For reporting bugs and sending patches, please use the following mailing list:
@@ -132,6 +132,38 @@ f2fs. Each file shows the whole f2fs information.
  - average SIT information about whole segments
  - current memory footprint consumed by f2fs.
 
+================================================================================
+SYSFS ENTRIES
+================================================================================
+
+Information about mounted f2f2 file systems can be found in
+/sys/fs/f2fs.  Each mounted filesystem will have a directory in
+/sys/fs/f2fs based on its device name (i.e., /sys/fs/f2fs/sda).
+The files in each per-device directory are shown in table below.
+
+Files in /sys/fs/f2fs/<devname>
+(see also Documentation/ABI/testing/sysfs-fs-f2fs)
+..............................................................................
+ File                         Content
+
+ gc_max_sleep_time            This tuning parameter controls the maximum sleep
+                              time for the garbage collection thread. Time is
+                              in milliseconds.
+
+ gc_min_sleep_time            This tuning parameter controls the minimum sleep
+                              time for the garbage collection thread. Time is
+                              in milliseconds.
+
+ gc_no_gc_sleep_time          This tuning parameter controls the default sleep
+                              time for the garbage collection thread. Time is
+                              in milliseconds.
+
+ gc_idle                      This parameter controls the selection of victim
+                              policy for garbage collection. Setting gc_idle = 0
+                              (default) will disable this option. Setting
+                              gc_idle = 1 will select the Cost Benefit approach
+                              & setting gc_idle = 2 will select the greedy aproach.
+
 ================================================================================
 USAGE
 ================================================================================
@@ -149,8 +181,12 @@ USAGE
  # mkfs.f2fs -l label /dev/block_device
  # mount -t f2fs /dev/block_device /mnt/f2fs
 
-Format options
---------------
+mkfs.f2fs
+---------
+The mkfs.f2fs is for the use of formatting a partition as the f2fs filesystem,
+which builds a basic on-disk layout.
+
+The options consist of:
 -l [label]   : Give a volume label, up to 512 unicode name.
 -a [0 or 1]  : Split start location of each area for heap-based allocation.
                1 is set by default, which performs this.
@@ -164,6 +200,37 @@ Format options
 -t [0 or 1]  : Disable discard command or not.
                1 is set by default, which conducts discard.
 
+fsck.f2fs
+---------
+The fsck.f2fs is a tool to check the consistency of an f2fs-formatted
+partition, which examines whether the filesystem metadata and user-made data
+are cross-referenced correctly or not.
+Note that, initial version of the tool does not fix any inconsistency.
+
+The options consist of:
+  -d debug level [default:0]
+
+dump.f2fs
+---------
+The dump.f2fs shows the information of specific inode and dumps SSA and SIT to
+file. Each file is dump_ssa and dump_sit.
+
+The dump.f2fs is used to debug on-disk data structures of the f2fs filesystem.
+It shows on-disk inode information reconized by a given inode number, and is
+able to dump all the SSA and SIT entries into predefined files, ./dump_ssa and
+./dump_sit respectively.
+
+The options consist of:
+  -d debug level [default:0]
+  -i inode no (hex)
+  -s [SIT dump segno from #1~#2 (decimal), for all 0~-1]
+  -a [SSA dump segno from #1~#2 (decimal), for all 0~-1]
+
+Examples:
+# dump.f2fs -i [ino] /dev/sdx
+# dump.f2fs -s 0~-1 /dev/sdx (SIT dump)
+# dump.f2fs -a 0~-1 /dev/sdx (SSA dump)
+
 ================================================================================
 DESIGN
 ================================================================================
index 09994c247289944248eb98c9340b1f5e337ed328..e543b1a619cc95b022f97d3c516687fc98f43735 100644 (file)
@@ -93,7 +93,7 @@ For a filesystem to be exportable it must:
    2/ make sure that d_splice_alias is used rather than d_add
       when ->lookup finds an inode for a given parent and name.
 
-      If inode is NULL, d_splice_alias(inode, dentry) is eqivalent to
+      If inode is NULL, d_splice_alias(inode, dentry) is equivalent to
 
                d_add(dentry, inode), NULL
 
index 52ae07f5f578bc44af9bf02b1bc3530510750fdc..adc81a35fe2d98026bf8cf0f778d315bcb78f407 100644 (file)
@@ -12,7 +12,7 @@ struct pnfs_layout_hdr
 ----------------------
 The on-the-wire command LAYOUTGET corresponds to struct
 pnfs_layout_segment, usually referred to by the variable name lseg.
-Each nfs_inode may hold a pointer to a cache of of these layout
+Each nfs_inode may hold a pointer to a cache of these layout
 segments in nfsi->layout, of type struct pnfs_layout_hdr.
 
 We reference the header for the inode pointing to it, across each
index 99e90184a72fad70453c54cc0974641435cffcbc..408679789136c196df02e0461f9576134e8bab8b 100644 (file)
@@ -149,7 +149,7 @@ Bitmap system area
 ------------------
 
 The bitmap itself is divided into three parts.
-First the system area, that is split into two halfs.
+First the system area, that is split into two halves.
 Then userspace.
 
 The requirement for a static, fixed preallocated system area comes from how
index 510b722667ac885cf7abed3732961941a5ad1c14..33e2f369473375b691f15583c836903728be1c39 100644 (file)
@@ -31,7 +31,7 @@ Semantics
 
 Each relay channel has one buffer per CPU, each buffer has one or more
 sub-buffers.  Messages are written to the first sub-buffer until it is
-too full to contain a new message, in which case it it is written to
+too full to contain a new message, in which case it is written to
 the next (if available).  Messages are never split across sub-buffers.
 At this point, userspace can be notified so it empties the first
 sub-buffer, while the kernel continues writing to the next.
index caaaf1266d8f33d37be423f7556ad69bbb1c535a..eb843e49c5a39d558d3bb91b043f47512b2edc59 100644 (file)
@@ -24,7 +24,7 @@ flag between KOBJ_NS_TYPE_NONE and KOBJ_NS_TYPES, and s_ns will
 point to the namespace to which it belongs.
 
 Each sysfs superblock's sysfs_super_info contains an array void
-*ns[KOBJ_NS_TYPES].  When a task in a tagging namespace
+*ns[KOBJ_NS_TYPES].  When a task in a tagging namespace
 kobj_nstype first mounts sysfs, a new superblock is created.  It
 will be differentiated from other sysfs mounts by having its
 s_fs_info->ns[kobj_nstype] set to the new namespace.  Note that
index f93a88250a448a99293d0a6d776af50b1a98453b..f8749f794ab97815de5479fef3749765b4cfcf5e 100644 (file)
@@ -573,8 +573,8 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        struct page* (*get_xip_page)(struct address_space *, sector_t,
                        int);
        /* migrate the contents of a page to the specified target */
@@ -790,7 +790,9 @@ struct file_operations {
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -825,10 +827,16 @@ otherwise noted.
 
   aio_read: called by io_submit(2) and other asynchronous I/O operations
 
+  read_iter: aio_read replacement, called by io_submit(2) and other
+       asynchronous I/O operations
+
   write: called by write(2) and related system calls
 
   aio_write: called by io_submit(2) and other asynchronous I/O operations
 
+  write_iter: aio_write replacement, called by io_submit(2) and other
+       asynchronous I/O operations
+
   iterate: called when the VFS needs to read the directory contents
 
   poll: called by the VFS when a process wants to check if there is
index 12525b17d9ed545343bd27e952cf366c5d228f51..5be51fd888bd9bccc02f47caca8c0a5b61a9ec76 100644 (file)
@@ -135,7 +135,7 @@ default behaviour.
        If the memory cost of 8 log buffers is too high on small
        systems, then it may be reduced at some cost to performance
        on metadata intensive workloads. The logbsize option below
-       controls the size of each buffer and so is also relevent to
+       controls the size of each buffer and so is also relevant to
        this case.
 
   logbsize=value
index 3c741214dfbbb028044be22a88c5359f7ba49646..dc35a2b75eeec08743b5e55614508a881589d3c0 100644 (file)
@@ -149,11 +149,13 @@ needs. Only UHID_OUTPUT and UHID_OUTPUT_EV have payloads.
   is of type "struct uhid_data_req".
   This may be received even though you haven't received UHID_OPEN, yet.
 
-  UHID_OUTPUT_EV:
+  UHID_OUTPUT_EV (obsolete):
   Same as UHID_OUTPUT but this contains a "struct input_event" as payload. This
   is called for force-feedback, LED or similar events which are received through
   an input device by the HID subsystem. You should convert this into raw reports
   and send them to your device similar to events of type UHID_OUTPUT.
+  This is no longer sent by newer kernels. Instead, HID core converts it into a
+  raw output report and sends it via UHID_OUTPUT.
 
   UHID_FEATURE:
   This event is sent if the kernel driver wants to perform a feature request as
index 8d2be8a0b1e38c146a80130d8d96d94ce6025516..86c0b1251c81b27f8bb9020edebad31754116e32 100644 (file)
@@ -299,7 +299,7 @@ Byte 1:
 min threshold (scale as bank 0x26)
 
 
-Warning for the adventerous
+Warning for the adventurous
 ===========================
 
 A word of caution to those who want to experiment and see if they can figure
index f6fe9c203733a9508cdfdc0c6b27e1d13921c3e1..063b80d857b1f86e54b2b31c89c77ffdff980847 100644 (file)
@@ -6,6 +6,10 @@ Supported chips:
     Prefix: 'ads1015'
     Datasheet: Publicly available at the Texas Instruments website :
                http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+  * Texas Instruments ADS1115
+    Prefix: 'ads1115'
+    Datasheet: Publicly available at the Texas Instruments website :
+               http://focus.ti.com/lit/ds/symlink/ads1115.pdf
 
 Authors:
         Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
@@ -13,9 +17,9 @@ Authors:
 Description
 -----------
 
-This driver implements support for the Texas Instruments ADS1015.
+This driver implements support for the Texas Instruments ADS1015/ADS1115.
 
-This device is a 12-bit A-D converter with 4 inputs.
+This device is a 12/16-bit A-D converter with 4 inputs.
 
 The inputs can be used single ended or in certain differential combinations.
 
index 90387c3540f70e4cb99e2401b2f608672f51ffc3..f4021a285460353ca0e4a2eda7b934d7839b57ff 100644 (file)
@@ -17,7 +17,7 @@ Credits:
     Philip Edelbrock <phil@netroedge.com>,
     and Mark Studebaker <mdsxyz123@yahoo.com>
   w83792d.c:
-    Chunhao Huang <DZShen@Winbond.com.tw>,
+    Shane Huang (Winbond),
     Rudolf Marek <r.marek@assembler.cz>
 
 Additional contributors:
index 8a023ce0b72e7726bf8888b88d304f0be5ce51a3..53f7b6866fec4d51c4049ba6c700d58e1950a046 100644 (file)
@@ -7,8 +7,7 @@ Supported chips:
     Addresses scanned: I2C 0x2c - 0x2f
     Datasheet: http://www.winbond.com.tw
 
-Author: Chunhao Huang
-Contact: DZShen <DZShen@Winbond.com.tw>
+Author: Shane Huang (Winbond)
 
 
 Module Parameters
index a370b2047cf3025b5a0c318af15c670a76bc41e3..c097e0f020fe1d786bed67fd3ed9e06fd99f7b63 100644 (file)
@@ -73,9 +73,10 @@ this driver on those mainboards.
 The ServerWorks Southbridges, the Intel 440MX, and the Victory66 are
 identical to the PIIX4 in I2C/SMBus support.
 
-The AMD SB700 and SP5100 chipsets implement two PIIX4-compatible SMBus
-controllers. If your BIOS initializes the secondary controller, it will
-be detected by this driver as an "Auxiliary SMBus Host Controller".
+The AMD SB700, SB800, SP5100 and Hudson-2 chipsets implement two
+PIIX4-compatible SMBus controllers. If your BIOS initializes the
+secondary controller, it will be detected by this driver as
+an "Auxiliary SMBus Host Controller".
 
 If you own Force CPCI735 motherboard or other OSB4 based systems you may need
 to change the SMBus Interrupt Select register so the SMBus controller uses
index 22182660dda762816ad4663588f9e50cb7076622..c70e7a7638d1e6e66cbddd3cf7800325560cad28 100644 (file)
@@ -19,7 +19,7 @@ i2c_board_info which is registered by calling i2c_register_board_info().
 
 Example (from omap2 h4):
 
-static struct i2c_board_info __initdata h4_i2c_board_info[] = {
+static struct i2c_board_info h4_i2c_board_info[] __initdata = {
        {
                I2C_BOARD_INFO("isp1301_omap", 0x2d),
                .irq            = OMAP_GPIO_IRQ(125),
index d6991625c407d41102f25a008c4f4801396ec1b7..8e5fbd88c7d1472af8f6d1da1ed5798340cfc345 100644 (file)
@@ -196,8 +196,8 @@ static int example_probe(struct i2c_client *i2c_client,
 
 Update the detach method, by changing the name to _remove and
 to delete the i2c_detach_client call. It is possible that you
-can also remove the ret variable as it is not not needed for
-any of the core functions.
+can also remove the ret variable as it is not needed for any
+of the core functions.
 
 - static int example_detach(struct i2c_client *client)
 + static int example_remove(struct i2c_client *client)
diff --git a/Documentation/input/gamepad.txt b/Documentation/input/gamepad.txt
new file mode 100644 (file)
index 0000000..8002c89
--- /dev/null
@@ -0,0 +1,156 @@
+                            Linux Gamepad API
+----------------------------------------------------------------------------
+
+1. Intro
+~~~~~~~~
+Linux provides many different input drivers for gamepad hardware. To avoid
+having user-space deal with different button-mappings for each gamepad, this
+document defines how gamepads are supposed to report their data.
+
+2. Geometry
+~~~~~~~~~~~
+As "gamepad" we define devices which roughly look like this:
+
+            ____________________________              __
+           / [__ZL__]          [__ZR__] \               |
+          / [__ TL __]        [__ TR __] \              | Front Triggers
+       __/________________________________\__         __|
+      /                                  _   \          |
+     /      /\           __             (N)   \         |
+    /       ||      __  |MO|  __     _       _ \        | Main Pad
+   |    <===DP===> |SE|      |ST|   (W) -|- (E) |       |
+    \       ||    ___          ___       _     /        |
+    /\      \/   /   \        /   \     (S)   /\      __|
+   /  \________ | LS  | ____ |  RS | ________/  \       |
+  |         /  \ \___/ /    \ \___/ /  \         |      | Control Sticks
+  |        /    \_____/      \_____/    \        |    __|
+  |       /                              \       |
+   \_____/                                \_____/
+
+       |________|______|    |______|___________|
+         D-Pad    Left       Right   Action Pad
+                 Stick       Stick
+
+                   |_____________|
+                      Menu Pad
+
+Most gamepads have the following features:
+  - Action-Pad
+    4 buttons in diamonds-shape (on the right side). The buttons are
+    differently labeled on most devices so we define them as NORTH,
+    SOUTH, WEST and EAST.
+  - D-Pad (Direction-pad)
+    4 buttons (on the left side) that point up, down, left and right.
+  - Menu-Pad
+    Different constellations, but most-times 2 buttons: SELECT - START
+    Furthermore, many gamepads have a fancy branded button that is used as
+    special system-button. It often looks different to the other buttons and
+    is used to pop up system-menus or system-settings.
+  - Analog-Sticks
+    Analog-sticks provide freely moveable sticks to control directions. Not
+    all devices have both or any, but they are present at most times.
+    Analog-sticks may also provide a digital button if you press them.
+  - Triggers
+    Triggers are located on the upper-side of the pad in vertical direction.
+    Not all devices provide them, but the upper buttons are normally named
+    Left- and Right-Triggers, the lower buttons Z-Left and Z-Right.
+  - Rumble
+    Many devices provide force-feedback features. But are mostly just
+    simple rumble motors.
+
+3. Detection
+~~~~~~~~~~~~
+All gamepads that follow the protocol described here map BTN_GAMEPAD. This is
+an alias for BTN_SOUTH/BTN_A. It can be used to identify a gamepad as such.
+However, not all gamepads provide all features, so you need to test for all
+features that you need, first. How each feature is mapped is described below.
+
+Legacy drivers often don't comply to these rules. As we cannot change them
+for backwards-compatibility reasons, you need to provide fixup mappings in
+user-space yourself. Some of them might also provide module-options that
+change the mappings so you can adivce users to set these.
+
+All new gamepads are supposed to comply with this mapping. Please report any
+bugs, if they don't.
+
+There are a lot of less-featured/less-powerful devices out there, which re-use
+the buttons from this protocol. However, they try to do this in a compatible
+fashion. For example, the "Nintendo Wii Nunchuk" provides two trigger buttons
+and one analog stick. It reports them as if it were a gamepad with only one
+analog stick and two trigger buttons on the right side.
+But that means, that if you only support "real" gamepads, you must test
+devices for _all_ reported events that you need. Otherwise, you will also get
+devices that report a small subset of the events.
+
+No other devices, that do not look/feel like a gamepad, shall report these
+events.
+
+4. Events
+~~~~~~~~~
+Gamepads report the following events:
+
+Action-Pad:
+  Every gamepad device has at least 2 action buttons. This means, that every
+  device reports BTN_SOUTH (which BTN_GAMEPAD is an alias for). Regardless
+  of the labels on the buttons, the codes are sent according to the
+  physical position of the buttons.
+  Please note that 2- and 3-button pads are fairly rare and old. You might
+  want to filter gamepads that do not report all four.
+    2-Button Pad:
+      If only 2 action-buttons are present, they are reported as BTN_SOUTH and
+      BTN_EAST. For vertical layouts, the upper button is BTN_EAST. For
+      horizontal layouts, the button more on the right is BTN_EAST.
+    3-Button Pad:
+      If only 3 action-buttons are present, they are reported as (from left
+      to right): BTN_WEST, BTN_SOUTH, BTN_EAST
+      If the buttons are aligned perfectly vertically, they are reported as
+      (from top down): BTN_WEST, BTN_SOUTH, BTN_EAST
+    4-Button Pad:
+      If all 4 action-buttons are present, they can be aligned in two
+      different formations. If diamond-shaped, they are reported as BTN_NORTH,
+      BTN_WEST, BTN_SOUTH, BTN_EAST according to their physical location.
+      If rectangular-shaped, the upper-left button is BTN_NORTH, lower-left
+      is BTN_WEST, lower-right is BTN_SOUTH and upper-right is BTN_EAST.
+
+D-Pad:
+  Every gamepad provides a D-Pad with four directions: Up, Down, Left, Right
+  Some of these are available as digital buttons, some as analog buttons. Some
+  may even report both. The kernel does not convert between these so
+  applications should support both and choose what is more appropriate if
+  both are reported.
+    Digital buttons are reported as:
+      BTN_DPAD_*
+    Analog buttons are reported as:
+      ABS_HAT0X and ABS_HAT0Y
+
+Analog-Sticks:
+  The left analog-stick is reported as ABS_X, ABS_Y. The right analog stick is
+  reported as ABS_RX, ABS_RY. Zero, one or two sticks may be present.
+  If analog-sticks provide digital buttons, they are mapped accordingly as
+  BTN_THUMBL (first/left) and BTN_THUMBR (second/right).
+
+Triggers:
+  Trigger buttons can be available as digital or analog buttons or both. User-
+  space must correctly deal with any situation and choose the most appropriate
+  mode.
+  Upper trigger buttons are reported as BTN_TR or ABS_HAT1X (right) and BTN_TL
+  or ABS_HAT1Y (left). Lower trigger buttons are reported as BTN_TR2 or
+  ABS_HAT2X (right/ZR) and BTN_TL2 or ABS_HAT2Y (left/ZL).
+  If only one trigger-button combination is present (upper+lower), they are
+  reported as "right" triggers (BTN_TR/ABS_HAT1X).
+
+Menu-Pad:
+  Menu buttons are always digital and are mapped according to their location
+  instead of their labels. That is:
+    1-button Pad: Mapped as BTN_START
+    2-button Pad: Left button mapped as BTN_SELECT, right button mapped as
+                  BTN_START
+  Many pads also have a third button which is branded or has a special symbol
+  and meaning. Such buttons are mapped as BTN_MODE. Examples are the Nintendo
+  "HOME" button, the XBox "X"-button or Sony "P" button.
+
+Rumble:
+  Rumble is adverticed as FF_RUMBLE.
+
+----------------------------------------------------------------------------
+  Written 2013 by David Herrmann <dh.herrmann@gmail.com>
index e349f293cc9829dc5cad185a41f95cb8627e90ea..8ef6dbb6a462d707401fe08289dfa51702125789 100644 (file)
@@ -175,11 +175,9 @@ Searching in menuconfig:
                /^hotplug
 
        When searching, symbols are sorted thus:
-         - exact match first: an exact match is when the search matches
-           the complete symbol name;
-         - alphabetical order: when two symbols do not match exactly,
-           they are sorted in alphabetical order (in the user's current
-           locale).
+         - first, exact matches, sorted alphabetically (an exact match
+           is when the search matches the complete symbol name);
+         - then, other matches, sorted alphabetically.
        For example: ^ATH.K matches:
            ATH5K ATH9K ATH5K_AHB ATH5K_DEBUG [...] ATH6KL ATH6KL_DEBUG
            [...] ATH9K_AHB ATH9K_BTCOEX_SUPPORT ATH9K_COMMON [...]
index 15356aca938cd9a7bb2cdef09d8e7a19da36db90..5ec77aad1773ce5f55894cff118edfe9229830cd 100644 (file)
@@ -235,10 +235,61 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Format: To spoof as Windows 98: ="Microsoft Windows"
 
        acpi_osi=       [HW,ACPI] Modify list of supported OS interface strings
-                       acpi_osi="string1"      # add string1 -- only one string
-                       acpi_osi="!string2"     # remove built-in string2
+                       acpi_osi="string1"      # add string1
+                       acpi_osi="!string2"     # remove string2
+                       acpi_osi=!*             # remove all strings
+                       acpi_osi=!              # disable all built-in OS vendor
+                                                 strings
                        acpi_osi=               # disable all strings
 
+                       'acpi_osi=!' can be used in combination with single or
+                       multiple 'acpi_osi="string1"' to support specific OS
+                       vendor string(s).  Note that such command can only
+                       affect the default state of the OS vendor strings, thus
+                       it cannot affect the default state of the feature group
+                       strings and the current state of the OS vendor strings,
+                       specifying it multiple times through kernel command line
+                       is meaningless.  This command is useful when one do not
+                       care about the state of the feature group strings which
+                       should be controlled by the OSPM.
+                       Examples:
+                         1. 'acpi_osi=! acpi_osi="Windows 2000"' is equivalent
+                            to 'acpi_osi="Windows 2000" acpi_osi=!', they all
+                            can make '_OSI("Windows 2000")' TRUE.
+
+                       'acpi_osi=' cannot be used in combination with other
+                       'acpi_osi=' command lines, the _OSI method will not
+                       exist in the ACPI namespace.  NOTE that such command can
+                       only affect the _OSI support state, thus specifying it
+                       multiple times through kernel command line is also
+                       meaningless.
+                       Examples:
+                         1. 'acpi_osi=' can make 'CondRefOf(_OSI, Local1)'
+                            FALSE.
+
+                       'acpi_osi=!*' can be used in combination with single or
+                       multiple 'acpi_osi="string1"' to support specific
+                       string(s).  Note that such command can affect the
+                       current state of both the OS vendor strings and the
+                       feature group strings, thus specifying it multiple times
+                       through kernel command line is meaningful.  But it may
+                       still not able to affect the final state of a string if
+                       there are quirks related to this string.  This command
+                       is useful when one want to control the state of the
+                       feature group strings to debug BIOS issues related to
+                       the OSPM features.
+                       Examples:
+                         1. 'acpi_osi="Module Device" acpi_osi=!*' can make
+                            '_OSI("Module Device")' FALSE.
+                         2. 'acpi_osi=!* acpi_osi="Module Device"' can make
+                            '_OSI("Module Device")' TRUE.
+                         3. 'acpi_osi=! acpi_osi=!* acpi_osi="Windows 2000"' is
+                            equivalent to
+                            'acpi_osi=!* acpi_osi=! acpi_osi="Windows 2000"'
+                            and
+                            'acpi_osi=!* acpi_osi="Windows 2000" acpi_osi=!',
+                            they all will make '_OSI("Windows 2000")' TRUE.
+
        acpi_pm_good    [X86]
                        Override the pmtimer bug detection: force the kernel
                        to assume that this machine's pmtimer latches its value
index 69f9fb3701e07804961f931ef76ea5716b4f8312..79a1bc675a8dd468bf2b9153690459230196408a 100644 (file)
@@ -8,8 +8,8 @@ http://acpi4asus.sf.net/
 
  This driver provides support for extra features of ACPI-compatible ASUS laptops.
  It may also support some MEDION, JVC or VICTOR laptops (such as MEDION 9675 or
- VICTOR XP7210 for example). It makes all the extra buttons generate standard
ACPI events that go through /proc/acpi/events and input events (like keyboards).
+ VICTOR XP7210 for example). It makes all the extra buttons generate input
+ events (like keyboards).
  On some models adds support for changing the display brightness and output,
  switching the LCD backlight on and off, and most importantly, allows you to
  blink those fancy LEDs intended for reporting mail and wireless status.
@@ -55,8 +55,8 @@ Usage
   DSDT) to me.
 
   That's all, now, all the events generated by the hotkeys of your laptop
-  should be reported in your /proc/acpi/event entry. You can check with
-  "acpi_listen".
+  should be reported via netlink events. You can check with
+  "acpi_genl monitor" (part of the acpica project).
 
   Hotkeys are also reported as input keys (like keyboards) you can check
   which key are supported using "xev" under X11.
index 0d5ac7f5287e611d4aa171937a1d83b31d0f6038..978b1e615155da1e4ddd24429c63708795965dd2 100644 (file)
@@ -12,10 +12,10 @@ Fn keys (hotkeys):
 ------------------
 Some models report hotkeys through the SNC or SPIC devices, such events are
 reported both through the ACPI subsystem as acpi events and through the INPUT
-subsystem. See the logs of acpid or /proc/acpi/event and
-/proc/bus/input/devices to find out what those events are and which input
-devices are created by the driver. Additionally, loading the driver with the
-debug option will report all events in the kernel log.
+subsystem. See the logs of /proc/bus/input/devices to find out what those
+events are and which input devices are created by the driver.
+Additionally, loading the driver with the debug option will report all events
+in the kernel log.
 
 The "scancodes" passed to the input system (that can be remapped with udev)
 are indexes to the table "sony_laptop_input_keycode_map" in the sony-laptop.c
index cf7bc6cb9719adca0bf96f2f65e7640f8866ceb2..86c52360ffe7326cce53541897d4aa98b86f0e8a 100644 (file)
@@ -329,20 +329,6 @@ sysfs notes:
 
                This attribute has poll()/select() support.
 
-       hotkey_report_mode:
-               Returns the state of the procfs ACPI event report mode
-               filter for hot keys.  If it is set to 1 (the default),
-               all hot key presses are reported both through the input
-               layer and also as ACPI events through procfs (but not
-               through netlink).  If it is set to 2, hot key presses
-               are reported only through the input layer.
-
-               This attribute is read-only in kernels 2.6.23 or later,
-               and read-write on earlier kernels.
-
-               May return -EPERM (write access locked out by module
-               parameter) or -EACCES (read-only).
-
        wakeup_reason:
                Set to 1 if the system is waking up because the user
                requested a bay ejection.  Set to 2 if the system is
@@ -518,24 +504,21 @@ SW_TABLET_MODE    Tablet ThinkPads HKEY events 0x5009 and 0x500A
 Non hotkey ACPI HKEY event map:
 -------------------------------
 
-Events that are not propagated by the driver, except for legacy
-compatibility purposes when hotkey_report_mode is set to 1:
-
-0x5001         Lid closed
-0x5002         Lid opened
-0x5009         Tablet swivel: switched to tablet mode
-0x500A         Tablet swivel: switched to normal mode
-0x7000         Radio Switch may have changed state
-
 Events that are never propagated by the driver:
 
 0x2304         System is waking up from suspend to undock
 0x2305         System is waking up from suspend to eject bay
 0x2404         System is waking up from hibernation to undock
 0x2405         System is waking up from hibernation to eject bay
+0x5001         Lid closed
+0x5002         Lid opened
+0x5009         Tablet swivel: switched to tablet mode
+0x500A         Tablet swivel: switched to normal mode
 0x5010         Brightness level changed/control event
 0x6000         KEYBOARD: Numlock key pressed
 0x6005         KEYBOARD: Fn key pressed (TO BE VERIFIED)
+0x7000         Radio Switch may have changed state
+
 
 Events that are propagated by the driver to userspace:
 
@@ -574,50 +557,6 @@ operating system is to force either an immediate suspend or hibernate
 cycle, or a system shutdown.  Obviously, something is very wrong if this
 happens.
 
-Compatibility notes:
-
-ibm-acpi and thinkpad-acpi 0.15 (mainline kernels before 2.6.23) never
-supported the input layer, and sent events over the procfs ACPI event
-interface.
-
-To avoid sending duplicate events over the input layer and the ACPI
-event interface, thinkpad-acpi 0.16 implements a module parameter
-(hotkey_report_mode), and also a sysfs device attribute with the same
-name.
-
-Make no mistake here: userspace is expected to switch to using the input
-layer interface of thinkpad-acpi, together with the ACPI netlink event
-interface in kernels 2.6.23 and later, or with the ACPI procfs event
-interface in kernels 2.6.22 and earlier.
-
-If no hotkey_report_mode module parameter is specified (or it is set to
-zero), the driver defaults to mode 1 (see below), and on kernels 2.6.22
-and earlier, also allows one to change the hotkey_report_mode through
-sysfs.  In kernels 2.6.23 and later, where the netlink ACPI event
-interface is available, hotkey_report_mode cannot be changed through
-sysfs (it is read-only).
-
-If the hotkey_report_mode module parameter is set to 1 or 2, it cannot
-be changed later through sysfs (any writes will return -EPERM to signal
-that hotkey_report_mode was locked.  On 2.6.23 and later, where
-hotkey_report_mode cannot be changed at all, writes will return -EACCES).
-
-hotkey_report_mode set to 1 makes the driver export through the procfs
-ACPI event interface all hot key presses (which are *also* sent to the
-input layer).  This is a legacy compatibility behaviour, and it is also
-the default mode of operation for the driver.
-
-hotkey_report_mode set to 2 makes the driver filter out the hot key
-presses from the procfs ACPI event interface, so these events will only
-be sent through the input layer.  Userspace that has been updated to use
-the thinkpad-acpi input layer interface should set hotkey_report_mode to
-2.
-
-Hot key press events are never sent to the ACPI netlink event interface.
-Really up-to-date userspace under kernel 2.6.23 and later is to use the
-netlink interface and the input layer interface, and don't bother at all
-with hotkey_report_mode.
-
 
 Brightness hotkey notes:
 
index d9eb91b5191353b68c84924ef0a62fe8716de41a..62278e871b503cfdc9ba31e99a92a82cd0d42d12 100644 (file)
@@ -71,7 +71,7 @@ To register the chip at address 0x63 on specific adapter, set the platform data
 according to include/linux/platform_data/leds-lm3556.h, set the i2c board info
 
 Example:
-       static struct i2c_board_info __initdata board_i2c_ch4[] = {
+       static struct i2c_board_info board_i2c_ch4[] __initdata = {
                {
                         I2C_BOARD_INFO(LM3556_NAME, 0x63),
                         .platform_data = &lm3556_pdata,
index c6eda18b15efbb0ceec0b2a25e8f6e06ada488cc..e88ac3b60c0813936f1a33a3cb932c1991c72aa3 100644 (file)
@@ -37,7 +37,7 @@ registered using the i2c_board_info mechanism.
 To register the chip at address 0x60 on adapter 0, set the platform data
 according to include/linux/leds-lp3944.h, set the i2c board info:
 
-       static struct i2c_board_info __initdata a910_i2c_board_info[] = {
+       static struct i2c_board_info a910_i2c_board_info[] __initdata = {
                {
                        I2C_BOARD_INFO("lp3944", 0x60),
                        .platform_data = &a910_lp3944_leds,
index 32dfbd924121f13747f220d692b0ad61a3a507d2..18b64b2b8a682552b4c3fd08f607a817218615c5 100644 (file)
@@ -124,6 +124,8 @@ multiqueue.txt
        - HOWTO for multiqueue network device support.
 netconsole.txt
        - The network console module netconsole.ko: configuration and notes.
+netdev-FAQ.txt
+       - FAQ describing how to submit net changes to netdev mailing list.
 netdev-features.txt
        - Network interface features API description.
 netdevices.txt
index 10742902146fc1da3ae1c84a94929b42a79e29d0..debfe857d8f9c8694f2483c9e41826973d24dac0 100644 (file)
@@ -440,6 +440,10 @@ tcp_syncookies - BOOLEAN
        SYN flood warnings in logs not being really flooded, your server
        is seriously misconfigured.
 
+       If you want to test which effects syncookies have to your
+       network connections you can set this knob to 2 to enable
+       unconditionally generation of syncookies.
+
 tcp_fastopen - INTEGER
        Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
        in the opening SYN packet. To use this feature, the client application
@@ -516,6 +520,19 @@ tcp_wmem - vector of 3 INTEGERs: min, default, max
        this value is ignored.
        Default: between 64K and 4MB, depending on RAM size.
 
+tcp_notsent_lowat - UNSIGNED INTEGER
+       A TCP socket can control the amount of unsent bytes in its write queue,
+       thanks to TCP_NOTSENT_LOWAT socket option. poll()/select()/epoll()
+       reports POLLOUT events if the amount of unsent bytes is below a per
+       socket value, and if the write queue is not full. sendmsg() will
+       also not add new buffers if the limit is hit.
+
+       This global variable controls the amount of unsent data for
+       sockets not using TCP_NOTSENT_LOWAT. For these sockets, a change
+       to the global variable has immediate effect.
+
+       Default: UINT_MAX (0xFFFFFFFF)
+
 tcp_workaround_signed_windows - BOOLEAN
        If set, assume no receipt of a window scaling option means the
        remote TCP is broken and treats the window as a signed quantity.
@@ -1022,7 +1039,15 @@ disable_policy - BOOLEAN
 disable_xfrm - BOOLEAN
        Disable IPSEC encryption on this interface, whatever the policy
 
+igmpv2_unsolicited_report_interval - INTEGER
+       The interval in milliseconds in which the next unsolicited
+       IGMPv1 or IGMPv2 report retransmit will take place.
+       Default: 10000 (10 seconds)
 
+igmpv3_unsolicited_report_interval - INTEGER
+       The interval in milliseconds in which the next unsolicited
+       IGMPv3 report retransmit will take place.
+       Default: 1000 (1 seconds)
 
 tag - INTEGER
        Allows you to write a number, which can be used as required.
@@ -1314,6 +1339,16 @@ ndisc_notify - BOOLEAN
        1 - Generate unsolicited neighbour advertisements when device is brought
            up or hardware address changes.
 
+mldv1_unsolicited_report_interval - INTEGER
+       The interval in milliseconds in which the next unsolicited
+       MLDv1 report retransmit will take place.
+       Default: 10000 (10 seconds)
+
+mldv2_unsolicited_report_interval - INTEGER
+       The interval in milliseconds in which the next unsolicited
+       MLDv2 report retransmit will take place.
+       Default: 1000 (1 second)
+
 icmp/*:
 ratelimit - INTEGER
        Limit the maximal rates for sending ICMPv6 packets.
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
new file mode 100644 (file)
index 0000000..d9112f0
--- /dev/null
@@ -0,0 +1,224 @@
+
+Information you need to know about netdev
+-----------------------------------------
+
+Q: What is netdev?
+
+A: It is a mailing list for all network related linux stuff.  This includes
+   anything found under net/  (i.e. core code like IPv6) and drivers/net
+   (i.e. hardware specific drivers) in the linux source tree.
+
+   Note that some subsystems (e.g. wireless drivers) which have a high volume
+   of traffic have their own specific mailing lists.
+
+   The netdev list is managed (like many other linux mailing lists) through
+   VGER ( http://vger.kernel.org/ ) and archives can be found below:
+
+       http://marc.info/?l=linux-netdev
+       http://www.spinics.net/lists/netdev/
+
+   Aside from subsystems like that mentioned above, all network related linux
+   development (i.e. RFC, review, comments, etc) takes place on netdev.
+
+Q: How do the changes posted to netdev make their way into linux?
+
+A: There are always two trees (git repositories) in play.  Both are driven
+   by David Miller, the main network maintainer.  There is the "net" tree,
+   and the "net-next" tree.  As you can probably guess from the names, the
+   net tree is for fixes to existing code already in the mainline tree from
+   Linus, and net-next is where the new code goes for the future release.
+   You can find the trees here:
+
+       http://git.kernel.org/?p=linux/kernel/git/davem/net.git
+       http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
+
+Q: How often do changes from these trees make it to the mainline Linus tree?
+
+A: To understand this, you need to know a bit of background information
+   on the cadence of linux development.  Each new release starts off with
+   a two week "merge window" where the main maintainers feed their new
+   stuff to Linus for merging into the mainline tree.  After the two weeks,
+   the merge window is closed, and it is called/tagged "-rc1".  No new
+   features get mainlined after this -- only fixes to the rc1 content
+   are expected.  After roughly a week of collecting fixes to the rc1
+   content, rc2 is released.  This repeats on a roughly weekly basis
+   until rc7 (typically; sometimes rc6 if things are quiet, or rc8 if
+   things are in a state of churn), and a week after the last vX.Y-rcN
+   was done, the official "vX.Y" is released.
+
+   Relating that to netdev:  At the beginning of the 2 week merge window,
+   the net-next tree will be closed - no new changes/features.  The
+   accumulated new content of the past ~10 weeks will be passed onto
+   mainline/Linus via a pull request for vX.Y -- at the same time,
+   the "net" tree will start accumulating fixes for this pulled content
+   relating to vX.Y
+
+   An announcement indicating when net-next has been closed is usually
+   sent to netdev, but knowing the above, you can predict that in advance.
+
+   IMPORTANT:  Do not send new net-next content to netdev during the
+   period during which net-next tree is closed.
+
+   Shortly after the two weeks have passed, (and vX.Y-rc1 is released) the
+   tree for net-next reopens to collect content for the next (vX.Y+1) release.
+
+   If you aren't subscribed to netdev and/or are simply unsure if net-next
+   has re-opened yet, simply check the net-next git repository link above for
+   any new networking related commits.
+
+   The "net" tree continues to collect fixes for the vX.Y content, and
+   is fed back to Linus at regular (~weekly) intervals.  Meaning that the
+   focus for "net" is on stablilization and bugfixes.
+
+   Finally, the vX.Y gets released, and the whole cycle starts over.
+
+Q: So where are we now in this cycle?
+
+A: Load the mainline (Linus) page here:
+
+       http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
+
+   and note the top of the "tags" section.  If it is rc1, it is early
+   in the dev cycle.  If it was tagged rc7 a week ago, then a release
+   is probably imminent.
+
+Q: How do I indicate which tree (net vs. net-next) my patch should be in?
+
+A: Firstly, think whether you have a bug fix or new "next-like" content.
+   Then once decided, assuming that you use git, use the prefix flag, i.e.
+
+       git format-patch --subject-prefix='PATCH net-next' start..finish
+
+   Use "net" instead of "net-next" (always lower case) in the above for
+   bug-fix net content.  If you don't use git, then note the only magic in
+   the above is just the subject text of the outgoing e-mail, and you can
+   manually change it yourself with whatever MUA you are comfortable with.
+
+Q: I sent a patch and I'm wondering what happened to it.  How can I tell
+   whether it got merged?
+
+A: Start by looking at the main patchworks queue for netdev:
+
+       http://patchwork.ozlabs.org/project/netdev/list/
+
+   The "State" field will tell you exactly where things are at with
+   your patch.
+
+Q: The above only says "Under Review".  How can I find out more?
+
+A: Generally speaking, the patches get triaged quickly (in less than 48h).
+   So be patient.  Asking the maintainer for status updates on your
+   patch is a good way to ensure your patch is ignored or pushed to
+   the bottom of the priority list.
+
+Q: How can I tell what patches are queued up for backporting to the
+   various stable releases?
+
+A: Normally Greg Kroah-Hartman collects stable commits himself, but
+   for networking, Dave collects up patches he deems critical for the
+   networking subsystem, and then hands them off to Greg.
+
+   There is a patchworks queue that you can see here:
+       http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
+
+   It contains the patches which Dave has selected, but not yet handed
+   off to Greg.  If Greg already has the patch, then it will be here:
+       http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
+
+   A quick way to find whether the patch is in this stable-queue is
+   to simply clone the repo, and then git grep the mainline commit ID, e.g.
+
+       stable-queue$ git grep -l 284041ef21fdf2e
+       releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+       releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+       releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+       stable/stable-queue$
+
+Q: I see a network patch and I think it should be backported to stable.
+   Should I request it via "stable@vger.kernel.org" like the references in
+   the kernel's Documentation/stable_kernel_rules.txt file say?
+
+A: No, not for networking.  Check the stable queues as per above 1st to see
+   if it is already queued.  If not, then send a mail to netdev, listing
+   the upstream commit ID and why you think it should be a stable candidate.
+
+   Before you jump to go do the above, do note that the normal stable rules
+   in Documentation/stable_kernel_rules.txt still apply.  So you need to
+   explicitly indicate why it is a critical fix and exactly what users are
+   impacted.  In addition, you need to convince yourself that you _really_
+   think it has been overlooked, vs. having been considered and rejected.
+
+   Generally speaking, the longer it has had a chance to "soak" in mainline,
+   the better the odds that it is an OK candidate for stable.  So scrambling
+   to request a commit be added the day after it appears should be avoided.
+
+Q: I have created a network patch and I think it should be backported to
+   stable.  Should I add a "Cc: stable@vger.kernel.org" like the references
+   in the kernel's Documentation/ directory say?
+
+A: No.  See above answer.  In short, if you think it really belongs in
+   stable, then ensure you write a decent commit log that describes who
+   gets impacted by the bugfix and how it manifests itself, and when the
+   bug was introduced.  If you do that properly, then the commit will
+   get handled appropriately and most likely get put in the patchworks
+   stable queue if it really warrants it.
+
+   If you think there is some valid information relating to it being in
+   stable that does _not_ belong in the commit log, then use the three
+   dash marker line as described in Documentation/SubmittingPatches to
+   temporarily embed that information into the patch that you send.
+
+Q: Someone said that the comment style and coding convention is different
+   for the networking content.  Is this true?
+
+A: Yes, in a largely trivial way.  Instead of this:
+
+       /*
+        * foobar blah blah blah
+        * another line of text
+        */
+
+   it is requested that you make it look like this:
+
+       /* foobar blah blah blah
+        * another line of text
+        */
+
+Q: I am working in existing code that has the former comment style and not the
+   latter.  Should I submit new code in the former style or the latter?
+
+A: Make it the latter style, so that eventually all code in the domain of
+   netdev is of this format.
+
+Q: I found a bug that might have possible security implications or similar.
+   Should I mail the main netdev maintainer off-list?
+
+A: No. The current netdev maintainer has consistently requested that people
+   use the mailing lists and not reach out directly.  If you aren't OK with
+   that, then perhaps consider mailing "security@kernel.org" or reading about
+   http://oss-security.openwall.org/wiki/mailing-lists/distros
+   as possible alternative mechanisms.
+
+Q: What level of testing is expected before I submit my change?
+
+A: If your changes are against net-next, the expectation is that you
+   have tested by layering your changes on top of net-next.  Ideally you
+   will have done run-time testing specific to your change, but at a
+   minimum, your changes should survive an "allyesconfig" and an
+   "allmodconfig" build without new warnings or failures.
+
+Q: Any other tips to help ensure my net/net-next patch gets OK'd?
+
+A: Attention to detail.  Re-read your own work as if you were the
+   reviewer.  You can start with using checkpatch.pl, perhaps even
+   with the "--strict" flag.  But do not be mindlessly robotic in
+   doing so.  If your change is a bug-fix, make sure your commit log
+   indicates the end-user visible symptom, the underlying reason as
+   to why it happens, and then if necessary, explain why the fix proposed
+   is the best way to get things done.   Don't mangle whitespace, and as
+   is common, don't mis-indent function arguments that span multiple lines.
+   If it is your 1st patch, mail it to yourself so you can test apply
+   it to an unpatched tree to confirm infrastructure didn't mangle it.
+
+   Finally, go back and read Documentation/SubmittingPatches to be
+   sure you are not repeating some common mistake documented there.
index 0c790a76910ef81052faeff278d57b5b4fe2499b..97b810ca9082dbad569c57b8b9bdd6f0a8dda1c1 100644 (file)
@@ -19,7 +19,6 @@ of SCTP that is RFC 2960 compliant and provides an programming interface
 referred to as the  UDP-style API of the Sockets Extensions for SCTP, as 
 proposed in IETF Internet-Drafts.    
 
-
 Caveats:  
 
 -lksctp can be built as statically or as a module.  However, be aware that 
@@ -33,6 +32,4 @@ For more information, please visit the lksctp project website:
    http://www.sf.net/projects/lksctp
 
 Or contact the lksctp developers through the mailing list:
-   <lksctp-developers@lists.sourceforge.net> 
-
-
+   <linux-sctp@vger.kernel.org>
index 7b5996d9357e017b8f05052522255a7405bf9248..ec11429e1d42a5bbbea2d21354a8b8dff1028fe0 100644 (file)
@@ -2,9 +2,8 @@ Transparent proxy support
 =========================
 
 This feature adds Linux 2.2-like transparent proxy support to current kernels.
-To use it, enable NETFILTER_TPROXY, the socket match and the TPROXY target in
-your kernel config. You will need policy routing too, so be sure to enable that
-as well.
+To use it, enable the socket match and the TPROXY target in your kernel config.
+You will need policy routing too, so be sure to enable that as well.
 
 
 1. Making non-local sockets work
index 052e13af2d38d682e975e33e2dc3723c4307f327..e3f322a4b358f94a6248542f46e5a12d28629e5d 100644 (file)
@@ -856,7 +856,7 @@ static unsigned long uart_sleep_mode[] = {
     PIN_CONF_PACKED(PIN_CONFIG_OUTPUT, 0),
 };
 
-static struct pinctrl_map __initdata pinmap[] = {
+static struct pinctrl_map pinmap[] __initdata = {
     PIN_MAP_MUX_GROUP("uart", PINCTRL_STATE_DEFAULT, "pinctrl-foo",
                       "u0_group", "u0"),
     PIN_MAP_CONFIGS_PIN("uart", PINCTRL_STATE_DEFAULT, "pinctrl-foo",
@@ -951,7 +951,7 @@ Since the above construct is pretty common there is a helper macro to make
 it even more compact which assumes you want to use pinctrl-foo and position
 0 for mapping, for example:
 
-static struct pinctrl_map __initdata mapping[] = {
+static struct pinctrl_map mapping[] __initdata = {
        PIN_MAP_MUX_GROUP("foo-i2c.o", PINCTRL_STATE_DEFAULT, "pinctrl-foo", NULL, "i2c0"),
 };
 
@@ -970,7 +970,7 @@ static unsigned long i2c_pin_configs[] = {
        FOO_SLEW_RATE_SLOW,
 };
 
-static struct pinctrl_map __initdata mapping[] = {
+static struct pinctrl_map mapping[] __initdata = {
        PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"),
        PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs),
        PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs),
@@ -984,7 +984,7 @@ order to explicitly indicate that the states were provided and intended to
 be empty. Table entry macro PIN_MAP_DUMMY_STATE serves the purpose of defining
 a named state without causing any pin controller to be programmed:
 
-static struct pinctrl_map __initdata mapping[] = {
+static struct pinctrl_map mapping[] __initdata = {
        PIN_MAP_DUMMY_STATE("foo-i2c.0", PINCTRL_STATE_DEFAULT),
 };
 
index 3e8cb73ac43c2be41b7537c156f7d87a1efbba90..22b4bc51fb4f18a08d06e93d0a8bce8d4b9bf0e4 100644 (file)
@@ -97,7 +97,7 @@ IPv4 addresses:
 
        %pI4    1.2.3.4
        %pi4    001.002.003.004
-       %p[Ii][hnbl]
+       %p[Ii]4[hnbl]
 
        For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4'
        specifiers result in a printed address with ('i4') or without ('I4')
index 717f5aa388b171bcdffa8115191491aac64f4bd2..28fbd877f85a7971ce446e75278d5cd0f2750318 100644 (file)
@@ -300,7 +300,7 @@ initialization.
 -------------------------------------------
 
 RapidIO subsystem code organization allows addition of new enumeration/discovery
-methods as new configuration options without significant impact to to the core
+methods as new configuration options without significant impact to the core
 RapidIO code.
 
 A new enumeration/discovery method has to be attached to one or more mport
index 4a4f47e759cde1b852032d5bfb488023322df2b5..12ecfd308e557a4ccbf2878bf36baced83acae22 100644 (file)
@@ -151,7 +151,7 @@ To send a request to the controller:
       generated.
 
     - The host read the outbound list copy pointer shadow register and compare
-      with previous saved read ponter N. If they are different, the host will
+      with previous saved read pointer N. If they are different, the host will
       read the (N+1)th outbound list unit.
 
       The host get the index of the request from the (N+1)th outbound list
index 809d72b8eff1a8217dc5e7666f3e895019421f9c..a46ddb85e83a0dcdf0f2a54fd9b745eadb31cb6f 100644 (file)
@@ -244,6 +244,7 @@ STAC9227/9228/9229/927x
   5stack-no-fp D965 5stack without front panel
   dell-3stack  Dell Dimension E520
   dell-bios    Fixes with Dell BIOS setup
+  dell-bios-amic Fixes with Dell BIOS setup including analog mic
   volknob      Fixes with volume-knob widget 0x24
   auto         BIOS setup (default)
 
index c3c912d023cc5c184c6f0496344cb1a1a82265eb..42a0a39b77e6a5da339e0f71717a6d3809986c24 100644 (file)
@@ -454,6 +454,8 @@ The generic parser supports the following hints:
 - need_dac_fix (bool): limits the DACs depending on the channel count
 - primary_hp (bool): probe headphone jacks as the primary outputs;
   default true
+- multi_io (bool): try probing multi-I/O config (e.g. shared
+  line-in/surround, mic/clfe jacks)
 - multi_cap_vol (bool): provide multiple capture volumes
 - inv_dmic_split (bool): provide split internal mic volume/switch for
   phase-inverted digital mics
index 0bcc55155911083345120cb15cc1afb78094e75d..fd74ff26376e9a2c9a12bb99f33deb1613230630 100644 (file)
@@ -73,7 +73,7 @@ The main requirements are:
 
 Design
 
-The new API shares a number of concepts with with the PCM API for flow
+The new API shares a number of concepts with the PCM API for flow
 control. Start, pause, resume, drain and stop commands have the same
 semantics no matter what the content is.
 
@@ -130,7 +130,7 @@ the settings should remain the exception.
 The timestamp becomes a multiple field structure. It lists the number
 of bytes transferred, the number of samples processed and the number
 of samples rendered/grabbed. All these values can be used to determine
-the avarage bitrate, figure out if the ring buffer needs to be
+the average bitrate, figure out if the ring buffer needs to be
 refilled or the delay due to decoding/encoding/io on the DSP.
 
 Note that the list of codecs/profiles/modes was derived from the
index c1a1fd636bf9e0c5ce6152d8cbe26f7c251bf770..a5f985ee1822fe37497d09c08a75acf599cba887 100644 (file)
@@ -47,7 +47,7 @@ versions of the sysfs interface.
         at device creation and removal
       - the unique key to the device at that point in time
       - the kernel's path to the device directory without the leading
-        /sys, and always starting with with a slash
+        /sys, and always starting with a slash
       - all elements of a devpath must be real directories. Symlinks
         pointing to /sys/devices must always be resolved to their real
         target and the target path must be used to access the device.
index 3fe0d812dcecf28262e38e2cfed9d2ff11b41e82..54d29c1320ed665daa5b73c652bf4b50af63b488 100755 (executable)
@@ -300,7 +300,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        int ret;\n\n"
        buf += "        if (strstr(name, \"tpgt_\") != name)\n"
        buf += "                return ERR_PTR(-EINVAL);\n"
-       buf += "        if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
+       buf += "        if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
        buf += "                return ERR_PTR(-EINVAL);\n\n"
        buf += "        tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
        buf += "        if (!tpg) {\n"
index 2b46f67b1ccbfbc8ba0c8ec813e7c00f71d5582a..9010c441696743ec87bec451a71c2b1403c0fa37 100644 (file)
@@ -1,17 +1,17 @@
-Kernel driver exynos4_tmu
+Kernel driver exynos_tmu
 =================
 
 Supported chips:
-* ARM SAMSUNG EXYNOS4 series of SoC
-  Prefix: 'exynos4-tmu'
+* ARM SAMSUNG EXYNOS4, EXYNOS5 series of SoC
   Datasheet: Not publicly available
 
 Authors: Donggeun Kim <dg77.kim@samsung.com>
+Authors: Amit Daniel <amit.daniel@samsung.com>
 
-Description
------------
+TMU controller Description:
+---------------------------
 
-This driver allows to read temperature inside SAMSUNG EXYNOS4 series of SoC.
+This driver allows to read temperature inside SAMSUNG EXYNOS4/5 series of SoC.
 
 The chip only exposes the measured 8-bit temperature code value
 through a register.
@@ -34,9 +34,9 @@ The three equations are:
   TI2: Trimming info for 85 degree Celsius (stored at TRIMINFO register)
        Temperature code measured at 85 degree Celsius which is unchanged
 
-TMU(Thermal Management Unit) in EXYNOS4 generates interrupt
+TMU(Thermal Management Unit) in EXYNOS4/5 generates interrupt
 when temperature exceeds pre-defined levels.
-The maximum number of configurable threshold is four.
+The maximum number of configurable threshold is five.
 The threshold levels are defined as follows:
   Level_0: current temperature > trigger_level_0 + threshold
   Level_1: current temperature > trigger_level_1 + threshold
@@ -47,6 +47,31 @@ The threshold levels are defined as follows:
   through the corresponding registers.
 
 When an interrupt occurs, this driver notify kernel thermal framework
-with the function exynos4_report_trigger.
+with the function exynos_report_trigger.
 Although an interrupt condition for level_0 can be set,
 it can be used to synchronize the cooling action.
+
+TMU driver description:
+-----------------------
+
+The exynos thermal driver is structured as,
+
+                                       Kernel Core thermal framework
+                               (thermal_core.c, step_wise.c, cpu_cooling.c)
+                                                               ^
+                                                               |
+                                                               |
+TMU configuration data -------> TMU Driver  <------> Exynos Core thermal wrapper
+(exynos_tmu_data.c)          (exynos_tmu.c)       (exynos_thermal_common.c)
+(exynos_tmu_data.h)          (exynos_tmu.h)       (exynos_thermal_common.h)
+
+a) TMU configuration data: This consist of TMU register offsets/bitfields
+               described through structure exynos_tmu_registers. Also several
+               other platform data (struct exynos_tmu_platform_data) members
+               are used to configure the TMU.
+b) TMU driver: This component initialises the TMU controller and sets different
+               thresholds. It invokes core thermal implementation with the call
+               exynos_report_trigger.
+c) Exynos Core thermal wrapper: This provides 3 wrapper function to use the
+               Kernel core thermal framework. They are exynos_unregister_thermal,
+               exynos_register_thermal and exynos_report_trigger.
index b937c6e2163c0997b6ffbcf4159503f1eb1bedc9..ea2d35d64d26f50226ddceeec795effaf6bbf6c9 100644 (file)
@@ -735,7 +735,7 @@ Here are the available options:
                 function as well as the function being traced.
 
   print-parent:
-   bash-4000  [01]  1477.606694: simple_strtoul <-strict_strtoul
+   bash-4000  [01]  1477.606694: simple_strtoul <-kstrtoul
 
   noprint-parent:
    bash-4000  [01]  1477.606694: simple_strtoul
@@ -759,7 +759,7 @@ Here are the available options:
             latency-format option is enabled.
 
     bash  4000 1 0 00000000 00010a95 [58127d26] 1720.415ms \
-    (+0.000ms): simple_strtoul (strict_strtoul)
+    (+0.000ms): simple_strtoul (kstrtoul)
 
   raw - This will display raw numbers. This option is best for
        use with user applications that can translate the raw
index ef925eaa14608fc806c9a58fd032d9aeaeea6305..858aecf21db2c9d449d984af406a838ed5143ed4 100644 (file)
@@ -53,7 +53,7 @@ incompatible change are allowed.  However, there is an extension
 facility that allows backward-compatible extensions to the API to be
 queried and used.
 
-The extension mechanism is not based on on the Linux version number.
+The extension mechanism is not based on the Linux version number.
 Instead, kvm defines extension identifiers and a facility to query
 whether a particular extension identifier is available.  If it is, a
 set of ioctls is available for application use.
index fc66d42422eef8ba30d52b0a49fae515fd46625b..f4f268c2b826de7f03b1c2bd2478a85eacd58f2e 100644 (file)
@@ -58,7 +58,7 @@ Protocol 2.11:        (Kernel 3.6) Added a field for offset of EFI handover
                protocol entry point.
 
 Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
-               to struct boot_params for for loading bzImage and ramdisk
+               to struct boot_params for loading bzImage and ramdisk
                above 4G in 64bit.
 
 **** MEMORY LAYOUT
index 0f4385a62a496c7157eb7359bf5494aed3c53301..be0bd47250629761870945f32e5f4bcee5ccf9c1 100644 (file)
@@ -146,7 +146,7 @@ Majordomo lists of VGER.KERNEL.ORG at:
         <http://vger.kernel.org/vger-lists.html>
 
 如果改动影响了用户空间和内核之间的接口,请给 MAN-PAGES 的维护者(列在
-MAITAINERS 文件里的)发送一个手册页(man-pages)补丁,或者至少通知一下改
+MAINTAINERS 文件里的)发送一个手册页(man-pages)补丁,或者至少通知一下改
 变,让一些信息有途径进入手册页。
 
 即使在第四步的时候,维护者没有作出回应,也要确认在修改他们的代码的时候
index 229c66b12cc21b9ebe6479932491ecd5e8784118..b0ba0433e4fabef38831352b7ef1aa62c24edbfd 100644 (file)
@@ -595,6 +595,7 @@ S:  Supported
 F:     sound/soc/codecs/adau*
 F:     sound/soc/codecs/adav*
 F:     sound/soc/codecs/ad1*
+F:     sound/soc/codecs/ad7*
 F:     sound/soc/codecs/ssm*
 F:     sound/soc/codecs/sigmadsp.*
 
@@ -4365,7 +4366,7 @@ F:        drivers/net/wireless/iwlegacy/
 
 INTEL WIRELESS WIFI LINK (iwlwifi)
 M:     Johannes Berg <johannes.berg@intel.com>
-M:     Wey-Yi Guy <wey-yi.w.guy@intel.com>
+M:     Emmanuel Grumbach <emmanuel.grumbach@intel.com>
 M:     Intel Linux Wireless <ilw@linux.intel.com>
 L:     linux-wireless@vger.kernel.org
 W:     http://intellinuxwireless.org
@@ -5396,6 +5397,7 @@ F:        drivers/watchdog/mena21_wdt.c
 
 METAG ARCHITECTURE
 M:     James Hogan <james.hogan@imgtec.com>
+L:     linux-metag@vger.kernel.org
 S:     Supported
 F:     arch/metag/
 F:     Documentation/metag/
@@ -5792,7 +5794,7 @@ M:        Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
 M:     Samuel Ortiz <sameo@linux.intel.com>
 L:     linux-wireless@vger.kernel.org
 L:     linux-nfc@lists.01.org (moderated for non-subscribers)
-S:     Maintained
+S:     Supported
 F:     net/nfc/
 F:     include/net/nfc/
 F:     include/uapi/linux/nfc.h
@@ -7131,6 +7133,7 @@ F:        drivers/tty/serial
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
 M:     Viresh Kumar <viresh.linux@gmail.com>
+M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 S:     Maintained
 F:     include/linux/dw_dmac.h
 F:     drivers/dma/dw/
@@ -7236,6 +7239,7 @@ W:        http://lksctp.sourceforge.net
 S:     Maintained
 F:     Documentation/networking/sctp.txt
 F:     include/linux/sctp.h
+F:     include/uapi/linux/sctp.h
 F:     include/net/sctp/
 F:     net/sctp/
 
@@ -7550,6 +7554,14 @@ S:       Maintained
 F:     Documentation/security/Smack.txt
 F:     security/smack/
 
+SMARTREFLEX DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
+M:     Kevin Hilman <khilman@kernel.org>
+M:     Nishanth Menon <nm@ti.com>
+S:     Maintained
+F:     drivers/power/avs/smartreflex.c
+F:     include/linux/power/smartreflex.h
+L:     linux-pm@vger.kernel.org
+
 SMC91x ETHERNET DRIVER
 M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Odd Fixes
@@ -7675,6 +7687,17 @@ F:       include/sound/
 F:     include/uapi/sound/
 F:     sound/
 
+SOUND - COMPRESSED AUDIO
+M:     Vinod Koul <vinod.koul@intel.com>
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
+S:     Supported
+F:     Documentation/sound/alsa/compress_offload.txt
+F:     include/sound/compress_driver.h
+F:     include/uapi/sound/compress_*
+F:     sound/core/compress_offload.c
+F:     sound/soc/soc-compress.c
+
 SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
 M:     Liam Girdwood <lgirdwood@gmail.com>
 M:     Mark Brown <broonie@kernel.org>
@@ -7682,6 +7705,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://alsa-project.org/main/index.php/ASoC
 S:     Supported
+F:     Documentation/sound/alsa/soc/
 F:     sound/soc/
 F:     include/sound/soc*
 
@@ -7972,6 +7996,12 @@ F:       arch/m68k/sun3*/
 F:     arch/m68k/include/asm/sun3*
 F:     drivers/net/ethernet/i825xx/sun3*
 
+SUNDANCE NETWORK DRIVER
+M:     Denis Kirjanov <kda@linux-powerpc.org>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/dlink/sundance.c
+
 SUPERH
 M:     Paul Mundt <lethal@linux-sh.org>
 L:     linux-sh@vger.kernel.org
@@ -8307,9 +8337,15 @@ M:       Chris Metcalf <cmetcalf@tilera.com>
 W:     http://www.tilera.com/scm/
 S:     Supported
 F:     arch/tile/
-F:     drivers/tty/hvc/hvc_tile.c
-F:     drivers/net/ethernet/tile/
+F:     drivers/char/tile-srom.c
+F:     drivers/cpufreq/tilegx-cpufreq.c
 F:     drivers/edac/tile_edac.c
+F:     drivers/net/ethernet/tile/
+F:     drivers/rtc/rtc-tile.c
+F:     drivers/tty/hvc/hvc_tile.c
+F:     drivers/tty/serial/tilegx.c
+F:     drivers/usb/host/*-tilegx.c
+F:     include/linux/usb/tilegx.h
 
 TLAN NETWORK DRIVER
 M:     Samuel Chessman <chessman@tux.org>
diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore
new file mode 100644 (file)
index 0000000..5d65b54
--- /dev/null
@@ -0,0 +1 @@
+*.dtb*
index df57611652e50b73322a6eaf7005598684208d4a..884081099f800fd6b4ba133bc1f1746084845c49 100644 (file)
  * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
  *
  * Before saving the full regfile - this reg is restored back, only
- * to be saved again on kernel mode stack, as part of ptregs.
+ * to be saved again on kernel mode stack, as part of pt_regs.
  *-------------------------------------------------------------*/
 .macro EXCPN_PROLOG_FREEUP_REG reg
 #ifdef CONFIG_SMP
 #endif
 .endm
 
+/*--------------------------------------------------------------
+ * Exception Entry prologue
+ * -Switches stack to K mode (if not already)
+ * -Saves the register file
+ *
+ * After this it is safe to call the "C" handlers
+ *-------------------------------------------------------------*/
+.macro EXCEPTION_PROLOGUE
+
+       /* Need at least 1 reg to code the early exception prologue */
+       EXCPN_PROLOG_FREEUP_REG r9
+
+       /* U/K mode at time of exception (stack not switched if already K) */
+       lr  r9, [erstatus]
+
+       /* ARC700 doesn't provide auto-stack switching */
+       SWITCH_TO_KERNEL_STK
+
+       /* save the regfile */
+       SAVE_ALL_SYS
+.endm
+
 /*--------------------------------------------------------------
  * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
  * Requires SP to be already switched to kernel mode Stack
index d99f79bcf865a248ddb1c1dfa1eb35a58ae5f948..b68b53f458d1bc07f89b5b64c8f9fd9dc84ba323 100644 (file)
@@ -157,13 +157,6 @@ static inline void arch_unmask_irq(unsigned int irq)
        flag    \scratch
 .endm
 
-.macro IRQ_DISABLE_SAVE  scratch, save
-       lr      \scratch, [status32]
-       mov     \save, \scratch         /* Make a copy */
-       bic     \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
-       flag    \scratch
-.endm
-
 .macro IRQ_ENABLE  scratch
        lr      \scratch, [status32]
        or      \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
index 7c03fe61759c2262d30550cfc6ad73dd363b2903..baf923f689c113b66a375737f66c39b78e9de270 100644 (file)
@@ -32,6 +32,8 @@
 /* Error code if probe fails */
 #define TLB_LKUP_ERR           0x80000000
 
+#define TLB_DUP_ERR    (TLB_LKUP_ERR | 0x00000001)
+
 /* TLB Commands */
 #define TLBWrite    0x1
 #define TLBRead     0x2
 #ifndef __ASSEMBLY__
 
 typedef struct {
-       unsigned long asid;     /* Pvt Addr-Space ID for mm */
-#ifdef CONFIG_ARC_TLB_DBG
-       struct task_struct *tsk;
-#endif
+       unsigned long asid[NR_CPUS];    /* Hw PID + Generation cycle */
 } mm_context_t;
 
 #ifdef CONFIG_ARC_DBG_TLB_PARANOIA
index 0d71fb11b57c753c5b1cb4dd13bd7b731b196e18..97949e70d690402ec5c70074a6d99511a28e96cc 100644 (file)
  * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
  *
  * Linux assigns each task a unique ASID. A simple round-robin allocation
- * of H/w ASID is done using software tracker @asid_cache.
+ * of H/w ASID is done using software tracker @asid_cpu.
  * When it reaches max 255, the allocation cycle starts afresh by flushing
  * the entire TLB and wrapping ASID back to zero.
  *
- * For book-keeping, Linux uses a couple of data-structures:
- *  -mm_struct has an @asid field to keep a note of task's ASID (needed at the
- *   time of say switch_mm( )
- *  -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
- *  given an ASID, finding the mm struct associated.
- *
- * The round-robin allocation algorithm allows for ASID stealing.
- * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
- * already assigned to another (switched-out) task. Obviously the prev owner
- * is marked with an invalid ASID to make it request for a new ASID when it
- * gets scheduled next time. However its TLB entries (with ASID "x") could
- * exist, which must be cleared before the same ASID is used by the new owner.
- * Flushing them would be plausible but costly solution. Instead we force a
- * allocation policy quirk, which ensures that a stolen ASID won't have any
- * TLB entries associates, alleviating the need to flush.
- * The quirk essentially is not allowing ASID allocated in prev cycle
- * to be used past a roll-over in the next cycle.
- * When this happens (i.e. task ASID > asid tracker), task needs to refresh
- * its ASID, aligning it to current value of tracker. If the task doesn't get
- * scheduled past a roll-over, hence its ASID is not yet realigned with
- * tracker, such ASID is anyways safely reusable because it is
- * gauranteed that TLB entries with that ASID wont exist.
+ * A new allocation cycle, post rollover, could potentially reassign an ASID
+ * to a different task. Thus the rule is to refresh the ASID in a new cycle.
+ * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits
+ * serve as cycle/generation indicator and natural 32 bit unsigned math
+ * automagically increments the generation when lower 8 bits rollover.
  */
 
-#define FIRST_ASID  0
-#define MAX_ASID    255                        /* 8 bit PID field in PID Aux reg */
-#define NO_ASID     (MAX_ASID + 1)     /* ASID Not alloc to mmu ctxt */
-#define NUM_ASID    ((MAX_ASID - FIRST_ASID) + 1)
+#define MM_CTXT_ASID_MASK      0x000000ff /* MMU PID reg :8 bit PID */
+#define MM_CTXT_CYCLE_MASK     (~MM_CTXT_ASID_MASK)
+
+#define MM_CTXT_FIRST_CYCLE    (MM_CTXT_ASID_MASK + 1)
+#define MM_CTXT_NO_ASID                0UL
 
-/* ASID to mm struct mapping */
-extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+#define asid_mm(mm, cpu)       mm->context.asid[cpu]
+#define hw_pid(mm, cpu)                (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)
 
-extern int asid_cache;
+DECLARE_PER_CPU(unsigned int, asid_cache);
+#define asid_cpu(cpu)          per_cpu(asid_cache, cpu)
 
 /*
- * Assign a new ASID to task. If the task already has an ASID, it is
- * relinquished.
+ * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
+ * Also set the MMU PID register to existing/updated ASID
  */
 static inline void get_new_mmu_context(struct mm_struct *mm)
 {
-       struct mm_struct *prev_owner;
+       const unsigned int cpu = smp_processor_id();
        unsigned long flags;
 
        local_irq_save(flags);
 
        /*
-        * Relinquish the currently owned ASID (if any).
-        * Doing unconditionally saves a cmp-n-branch; for already unused
-        * ASID slot, the value was/remains NULL
+        * Move to new ASID if it was not from current alloc cycle/generation.
+        * Callers needing new ASID unconditionally, independent of alloc-cycle
+        * (local_flush_tlb_mm() for forking  parent) first need to destroy the
+        * context, setting it to invalid value, which the check below would
+        * catch too
         */
-       asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+       if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))
+               goto set_hw;
 
        /* move to new ASID */
-       if (++asid_cache > MAX_ASID) {  /* ASID roll-over */
-               asid_cache = FIRST_ASID;
-               flush_tlb_all();
+       if (!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK)) {   /* ASID roll-over */
+               local_flush_tlb_all();
        }
 
-       /*
-        * Is next ASID already owned by some-one else (we are stealing it).
-        * If so, let the orig owner be aware of this, so when it runs, it
-        * asks for a brand new ASID. This would only happen for a long-lived
-        * task with ASID from prev allocation cycle (before ASID roll-over).
-        *
-        * This might look wrong - if we are re-using some other task's ASID,
-        * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
-        * care of such a case: it ensures that task with ASID from prev alloc
-        * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
-        * The stealing scenario described here will only happen if that task
-        * didn't get a chance to refresh it's ASID - implying stale entries
-        * won't exist.
+       /* Above was rollover of 8 bit ASID in 32 bit container.
+        * If the container itself wrapped around, set it to a non zero
+        * "generation" to distinguish from no context
         */
-       prev_owner = asid_mm_map[asid_cache];
-       if (prev_owner)
-               prev_owner->context.asid = NO_ASID;
+       if (!asid_cpu(cpu))
+               asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;
 
        /* Assign new ASID to tsk */
-       asid_mm_map[asid_cache] = mm;
-       mm->context.asid = asid_cache;
-
-#ifdef CONFIG_ARC_TLB_DBG
-       pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
-              " pid:%u, assigned asid:%lu\n",
-              (unsigned int)mm, (unsigned int)prev_owner,
-              (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
-              (mm->context.tsk)->pid, mm->context.asid);
-#endif
+       asid_mm(mm, cpu) = asid_cpu(cpu);
 
-       write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+set_hw:
+       write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
 
        local_irq_restore(flags);
 }
@@ -134,60 +102,66 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
 static inline int
 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
-       mm->context.asid = NO_ASID;
-#ifdef CONFIG_ARC_TLB_DBG
-       mm->context.tsk = tsk;
-#endif
+       int i;
+
+       for (i = 0; i < NR_CPUS; i++)
+               asid_mm(mm, i) = MM_CTXT_NO_ASID;
+
        return 0;
 }
 
+static inline void destroy_context(struct mm_struct *mm)
+{
+       asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;
+}
+
 /* Prepare the MMU for task: setup PID reg with allocated ASID
     If task doesn't have an ASID (never alloc or stolen, get a new ASID)
 */
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
 {
-#ifndef CONFIG_SMP
-       /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
-       write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
-#endif
+       int migrating = 0;
+
+#ifdef CONFIG_SMP
+       const int cpu = smp_processor_id();
 
        /*
-        * Get a new ASID if task doesn't have a valid one. Possible when
-        *  -task never had an ASID (fresh after fork)
-        *  -it's ASID was stolen - past an ASID roll-over.
-        *  -There's a third obscure scenario (if this task is running for the
-        *   first time afer an ASID rollover), where despite having a valid
-        *   ASID, we force a get for new ASID - see comments at top.
-        *
-        * Both the non-alloc scenario and first-use-after-rollover can be
-        * detected using the single condition below:  NO_ASID = 256
-        * while asid_cache is always a valid ASID value (0-255).
+        * If @next is migrating to a different CPU, force an ASID refresh (by
+        *      relinquishing current value as required by new implementation
+        *      of get_new_mmu_context()
+        * Use Case:
+        *      Task t1 migrates to a different core, forks, migrates back to
+        *      orig core. COW semantics requires it to have a new ASID now
+        *      so that pre-fork TLB entries can't be used.
         */
-       if (next->context.asid > asid_cache) {
-               get_new_mmu_context(next);
-       } else {
-               /*
-                * XXX: This will never happen given the chks above
-                * BUG_ON(next->context.asid > MAX_ASID);
-                */
-               write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
-       }
-
-}
-
-static inline void destroy_context(struct mm_struct *mm)
-{
-       unsigned long flags;
+       cpumask_clear_cpu(cpu, mm_cpumask(prev));
+       migrating = !cpumask_test_and_set_cpu(cpu, mm_cpumask(next));
+       if (migrating)
+               destroy_context(next);
+#endif
 
-       local_irq_save(flags);
+       /* threads of same process (and not migrating cores in SMP) */
+       if ((prev == next) && !migrating)
+               return;
 
-       asid_mm_map[mm->context.asid] = NULL;
-       mm->context.asid = NO_ASID;
+#ifndef CONFIG_SMP
+       /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+       write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
 
-       local_irq_restore(flags);
+       get_new_mmu_context(next);
 }
 
+/*
+ * Called at the time of execve() to get a new ASID
+ * Note the subtlety here: get_new_mmu_context() behaves differently here
+ * vs. in switch_mm(). Here it always returns a new ASID, because mm has
+ * an unallocated "initial" value, while in latter, it moves to a new ASID,
+ * only if it was unallocated
+ */
+#define activate_mm(prev, next)                switch_mm(prev, next, NULL)
+
 /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
  * for retiring-mm. However destroy_context( ) still needs to do that because
  * between mm_release( ) = >deactive_mm( ) and
@@ -197,17 +171,6 @@ static inline void destroy_context(struct mm_struct *mm)
  */
 #define deactivate_mm(tsk, mm)   do { } while (0)
 
-static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
-#ifndef CONFIG_SMP
-       write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
-#endif
-
-       /* Unconditionally get a new ASID */
-       get_new_mmu_context(next);
-
-}
-
 #define enter_lazy_tlb(mm, tsk)
 
 #endif /* __ASM_ARC_MMU_CONTEXT_H */
index 4749a0eee1cffcf3a06c916353af6db9facc19a1..6b0b7f7ef783cec0ecbd0bbd53e084be2011ae82 100644 (file)
 
 #define _PAGE_ACCESSED      (1<<1)     /* Page is accessed (S) */
 #define _PAGE_CACHEABLE     (1<<2)     /* Page is cached (H) */
-#define _PAGE_U_EXECUTE     (1<<3)     /* Page has user execute perm (H) */
-#define _PAGE_U_WRITE       (1<<4)     /* Page has user write perm (H) */
-#define _PAGE_U_READ        (1<<5)     /* Page has user read perm (H) */
-#define _PAGE_K_EXECUTE     (1<<6)     /* Page has kernel execute perm (H) */
-#define _PAGE_K_WRITE       (1<<7)     /* Page has kernel write perm (H) */
-#define _PAGE_K_READ        (1<<8)     /* Page has kernel perm (H) */
-#define _PAGE_GLOBAL        (1<<9)     /* Page is global (H) */
-#define _PAGE_MODIFIED      (1<<10)    /* Page modified (dirty) (S) */
-#define _PAGE_FILE          (1<<10)    /* page cache/ swap (S) */
-#define _PAGE_PRESENT       (1<<11)    /* TLB entry is valid (H) */
+#define _PAGE_EXECUTE       (1<<3)     /* Page has user execute perm (H) */
+#define _PAGE_WRITE         (1<<4)     /* Page has user write perm (H) */
+#define _PAGE_READ          (1<<5)     /* Page has user read perm (H) */
+#define _PAGE_MODIFIED      (1<<6)     /* Page modified (dirty) (S) */
+#define _PAGE_FILE          (1<<7)     /* page cache/ swap (S) */
+#define _PAGE_GLOBAL        (1<<8)     /* Page is global (H) */
+#define _PAGE_PRESENT       (1<<10)    /* TLB entry is valid (H) */
 
-#else
+#else  /* MMU v3 onwards */
 
-/* PD1 */
 #define _PAGE_CACHEABLE     (1<<0)     /* Page is cached (H) */
-#define _PAGE_U_EXECUTE     (1<<1)     /* Page has user execute perm (H) */
-#define _PAGE_U_WRITE       (1<<2)     /* Page has user write perm (H) */
-#define _PAGE_U_READ        (1<<3)     /* Page has user read perm (H) */
-#define _PAGE_K_EXECUTE     (1<<4)     /* Page has kernel execute perm (H) */
-#define _PAGE_K_WRITE       (1<<5)     /* Page has kernel write perm (H) */
-#define _PAGE_K_READ        (1<<6)     /* Page has kernel perm (H) */
-#define _PAGE_ACCESSED      (1<<7)     /* Page is accessed (S) */
-
-/* PD0 */
+#define _PAGE_EXECUTE       (1<<1)     /* Page has user execute perm (H) */
+#define _PAGE_WRITE         (1<<2)     /* Page has user write perm (H) */
+#define _PAGE_READ          (1<<3)     /* Page has user read perm (H) */
+#define _PAGE_ACCESSED      (1<<4)     /* Page is accessed (S) */
+#define _PAGE_MODIFIED      (1<<5)     /* Page modified (dirty) (S) */
+#define _PAGE_FILE          (1<<6)     /* page cache/ swap (S) */
 #define _PAGE_GLOBAL        (1<<8)     /* Page is global (H) */
 #define _PAGE_PRESENT       (1<<9)     /* TLB entry is valid (H) */
-#define _PAGE_SHARED_CODE   (1<<10)    /* Shared Code page with cmn vaddr
+#define _PAGE_SHARED_CODE   (1<<11)    /* Shared Code page with cmn vaddr
                                           usable for shared TLB entries (H) */
-
-#define _PAGE_MODIFIED      (1<<11)    /* Page modified (dirty) (S) */
-#define _PAGE_FILE          (1<<12)    /* page cache/ swap (S) */
-
-#define _PAGE_SHARED_CODE_H (1<<31)    /* Hardware counterpart of above */
 #endif
 
-/* Kernel allowed all permissions for all pages */
-#define _K_PAGE_PERMS  (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
+/* vmalloc permissions */
+#define _K_PAGE_PERMS  (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
                        _PAGE_GLOBAL | _PAGE_PRESENT)
 
 #ifdef CONFIG_ARC_CACHE_PAGES
  */
 #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
 
-#define _PAGE_READ     (_PAGE_U_READ    | _PAGE_K_READ)
-#define _PAGE_WRITE    (_PAGE_U_WRITE   | _PAGE_K_WRITE)
-#define _PAGE_EXECUTE  (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
-
 /* Set of bits not changed in pte_modify */
 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
 
 
 #define PAGE_SHARED    PAGE_U_W_R
 
-/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
- * kernel vaddr space - visible in all addr spaces, but kernel mode only
+/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
+ * user vaddr space - visible in all addr spaces, but kernel mode only
  * Thus Global, all-kernel-access, no-user-access, cached
  */
 #define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
 
 /* Masks for actual TLB "PD"s */
-#define PTE_BITS_IN_PD0        (_PAGE_GLOBAL | _PAGE_PRESENT)
-#define PTE_BITS_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE | \
-                        _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
-                        _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+#define PTE_BITS_IN_PD0                (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_RWX           (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
+#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE)
 
 /**************************************************************************
  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
index c9938e7a7dbd3b596fc3c0021d8ff57548719402..1bfeec2c0558c2f6f91142105bee0c6ff70c7a75 100644 (file)
@@ -20,27 +20,17 @@ struct pt_regs {
 
        /* Real registers */
        long bta;       /* bta_l1, bta_l2, erbta */
-       long lp_start;
-       long lp_end;
-       long lp_count;
+
+       long lp_start, lp_end, lp_count;
+
        long status32;  /* status32_l1, status32_l2, erstatus */
        long ret;       /* ilink1, ilink2 or eret */
        long blink;
        long fp;
        long r26;       /* gp */
-       long r12;
-       long r11;
-       long r10;
-       long r9;
-       long r8;
-       long r7;
-       long r6;
-       long r5;
-       long r4;
-       long r3;
-       long r2;
-       long r1;
-       long r0;
+
+       long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+
        long sp;        /* user/kernel sp depending on where we came from  */
        long orig_r0;
 
@@ -70,19 +60,7 @@ struct pt_regs {
 /* Callee saved registers - need to be saved only when you are scheduled out */
 
 struct callee_regs {
-       long r25;
-       long r24;
-       long r23;
-       long r22;
-       long r21;
-       long r20;
-       long r19;
-       long r18;
-       long r17;
-       long r16;
-       long r15;
-       long r14;
-       long r13;
+       long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
 };
 
 #define instruction_pointer(regs)      ((regs)->ret)
index b2f9bc7f68c8cb13ba73a368a321ffc2ece31c05..71c7b2e4b8745002083e71fd19ae28305d62972e 100644 (file)
@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 void local_flush_tlb_range(struct vm_area_struct *vma,
                           unsigned long start, unsigned long end);
 
-/* XXX: Revisit for SMP */
+#ifndef CONFIG_SMP
 #define flush_tlb_range(vma, s, e)     local_flush_tlb_range(vma, s, e)
 #define flush_tlb_page(vma, page)      local_flush_tlb_page(vma, page)
 #define flush_tlb_kernel_range(s, e)   local_flush_tlb_kernel_range(s, e)
 #define flush_tlb_all()                        local_flush_tlb_all()
 #define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
-
+#else
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                                                        unsigned long end);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+#endif /* CONFIG_SMP */
 #endif
diff --git a/arch/arc/kernel/.gitignore b/arch/arc/kernel/.gitignore
new file mode 100644 (file)
index 0000000..c5f676c
--- /dev/null
@@ -0,0 +1 @@
+vmlinux.lds
index 1d7165156e1708ee6a24391ed9218de330603ccf..b908dde8a331c26a00a4912309af78cf643fc05b 100644 (file)
@@ -267,12 +267,7 @@ ARC_EXIT handle_interrupt_level1
 
 ARC_ENTRY instr_service
 
-       EXCPN_PROLOG_FREEUP_REG r9
-
-       lr  r9, [erstatus]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_SYS
+       EXCEPTION_PROLOGUE
 
        lr  r0, [efa]
        mov r1, sp
@@ -289,15 +284,13 @@ ARC_EXIT instr_service
 
 ARC_ENTRY mem_service
 
-       EXCPN_PROLOG_FREEUP_REG r9
-
-       lr  r9, [erstatus]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_SYS
+       EXCEPTION_PROLOGUE
 
        lr  r0, [efa]
        mov r1, sp
+
+       FAKE_RET_FROM_EXCPN r9
+
        bl  do_memory_error
        b   ret_from_exception
 ARC_EXIT mem_service
@@ -308,11 +301,7 @@ ARC_EXIT mem_service
 
 ARC_ENTRY EV_MachineCheck
 
-       EXCPN_PROLOG_FREEUP_REG r9
-       lr  r9, [erstatus]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_SYS
+       EXCEPTION_PROLOGUE
 
        lr  r2, [ecr]
        lr  r0, [efa]
@@ -342,13 +331,7 @@ ARC_EXIT EV_MachineCheck
 
 ARC_ENTRY EV_TLBProtV
 
-       EXCPN_PROLOG_FREEUP_REG r9
-
-       ;Which mode (user/kernel) was the system in when Exception occured
-       lr  r9, [erstatus]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_SYS
+       EXCEPTION_PROLOGUE
 
        ;---------(3) Save some more regs-----------------
        ;  vineetg: Mar 6th: Random Seg Fault issue #1
@@ -406,12 +389,7 @@ ARC_EXIT EV_TLBProtV
 ; ---------------------------------------------
 ARC_ENTRY EV_PrivilegeV
 
-       EXCPN_PROLOG_FREEUP_REG r9
-
-       lr  r9, [erstatus]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_SYS
+       EXCEPTION_PROLOGUE
 
        lr  r0, [efa]
        mov r1, sp
@@ -427,14 +405,13 @@ ARC_EXIT EV_PrivilegeV
 ; ---------------------------------------------
 ARC_ENTRY EV_Extension
 
-       EXCPN_PROLOG_FREEUP_REG r9
-       lr  r9, [erstatus]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_SYS
+       EXCEPTION_PROLOGUE
 
        lr  r0, [efa]
        mov r1, sp
+
+       FAKE_RET_FROM_EXCPN r9
+
        bl  do_extension_fault
        b   ret_from_exception
 ARC_EXIT EV_Extension
@@ -526,14 +503,7 @@ trap_with_param:
 
 ARC_ENTRY EV_Trap
 
-       ; Need at least 1 reg to code the early exception prolog
-       EXCPN_PROLOG_FREEUP_REG r9
-
-       ;Which mode (user/kernel) was the system in when intr occured
-       lr  r9, [erstatus]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_SYS
+       EXCEPTION_PROLOGUE
 
        ;------- (4) What caused the Trap --------------
        lr     r12, [ecr]
@@ -642,6 +612,9 @@ resume_kernel_mode:
 
 #ifdef CONFIG_PREEMPT
 
+       ; This is a must for preempt_schedule_irq()
+       IRQ_DISABLE     r9
+
        ; Can't preempt if preemption disabled
        GET_CURR_THR_INFO_FROM_SP   r10
        ld  r8, [r10, THREAD_INFO_PREEMPT_COUNT]
@@ -651,8 +624,6 @@ resume_kernel_mode:
        ld  r9, [r10, THREAD_INFO_FLAGS]
        bbit0  r9, TIF_NEED_RESCHED, restore_regs
 
-       IRQ_DISABLE     r9
-
        ; Invoke PREEMPTION
        bl      preempt_schedule_irq
 
@@ -665,12 +636,11 @@ resume_kernel_mode:
 ;
 ; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
 ; IRQ shd definitely not happen between now and rtie
+; All 2 entry points to here already disable interrupts
 
 restore_regs :
 
-       ; Disable Interrupts while restoring reg-file back
-       ; XXX can this be optimised out
-       IRQ_DISABLE_SAVE    r9, r10     ;@r10 has prisitine (pre-disable) copy
+       lr      r10, [status32]
 
        ; Restore REG File. In case multiple Events outstanding,
        ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
index bca3052c956dab7edc2c5ccffba613f7a1d5cd39..482a42bdc051344c6b46170b6a9f59458540c742 100644 (file)
@@ -128,6 +128,7 @@ void start_kernel_secondary(void)
        atomic_inc(&mm->mm_users);
        atomic_inc(&mm->mm_count);
        current->active_mm = mm;
+       cpumask_set_cpu(cpu, mm_cpumask(mm));
 
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
index ac95cc239c1e47d3d2f5a133b1d66936c6af0b91..618561270518002eaf4b88339f42012980892f1a 100644 (file)
@@ -8,3 +8,4 @@
 
 obj-y  := extable.o ioremap.o dma.o fault.o init.o
 obj-y  += tlb.o tlbex.o cache_arc700.o mmap.o
+obj-$(CONFIG_SMP)              += tlbflush.o
index a08ce71854233e05510d45fc1183c19ecd35cb7c..81279ec73a6a7873b4a10fc8ec4c3546413bfd05 100644 (file)
@@ -127,9 +127,8 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
 #endif
 
 #ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-                                           unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
-       pr_err("%s(%lx, %lx)\n", __func__, start, end);
+       pr_err("%s(%llx, %llx)\n", __func__, start, end);
 }
 #endif /* CONFIG_OF_FLATTREE */
index 7957dc4e4d4a4c8acee3fe521d0ecf8bb939c39c..8fed015a711617a4b20e368e3ba6ea7ae703e64f 100644 (file)
@@ -52,6 +52,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/bug.h>
 #include <asm/arcregs.h>
 #include <asm/setup.h>
 #include <asm/mmu_context.h>
 
 
 /* A copy of the ASID from the PID reg is kept in asid_cache */
-int asid_cache = FIRST_ASID;
-
-/* ASID to mm struct mapping. We have one extra entry corresponding to
- * NO_ASID to save us a compare when clearing the mm entry for old asid
- * see get_new_mmu_context (asm-arc/mmu_context.h)
- */
-struct mm_struct *asid_mm_map[NUM_ASID + 1];
+DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
 
 /*
  * Utility Routine to erase a J-TLB entry
- * The procedure is to look it up in the MMU. If found, ERASE it by
- *  issuing a TlbWrite CMD with PD0 = PD1 = 0
+ * Caller needs to setup Index Reg (manually or via getIndex)
  */
-
-static void __tlb_entry_erase(void)
+static inline void __tlb_entry_erase(void)
 {
        write_aux_reg(ARC_REG_TLBPD1, 0);
        write_aux_reg(ARC_REG_TLBPD0, 0);
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
 }
 
-static void tlb_entry_erase(unsigned int vaddr_n_asid)
+static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
 {
        unsigned int idx;
 
-       /* Locate the TLB entry for this vaddr + ASID */
        write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
+
        write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
        idx = read_aux_reg(ARC_REG_TLBINDEX);
 
+       return idx;
+}
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+       unsigned int idx;
+
+       /* Locate the TLB entry for this vaddr + ASID */
+       idx = tlb_entry_lkup(vaddr_n_asid);
+
        /* No error means entry found, zero it out */
        if (likely(!(idx & TLB_LKUP_ERR))) {
                __tlb_entry_erase();
-       } else {                /* Some sort of Error */
-
+       } else {
                /* Duplicate entry error */
-               if (idx & 0x1) {
-                       /* TODO we need to handle this case too */
-                       pr_emerg("unhandled Duplicate flush for %x\n",
-                              vaddr_n_asid);
-               }
-               /* else entry not found so nothing to do */
+               WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
+                                          vaddr_n_asid);
        }
 }
 
@@ -159,7 +157,7 @@ static void utlb_invalidate(void)
 {
 #if (CONFIG_ARC_MMU_VER >= 2)
 
-#if (CONFIG_ARC_MMU_VER < 3)
+#if (CONFIG_ARC_MMU_VER == 2)
        /* MMU v2 introduced the uTLB Flush command.
         * There was however an obscure hardware bug, where uTLB flush would
         * fail when a prior probe for J-TLB (both totally unrelated) would
@@ -182,6 +180,37 @@ static void utlb_invalidate(void)
 
 }
 
+static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
+{
+       unsigned int idx;
+
+       /*
+        * First verify if entry for this vaddr+ASID already exists
+        * This also sets up PD0 (vaddr, ASID..) for final commit
+        */
+       idx = tlb_entry_lkup(pd0);
+
+       /*
+        * If Not already present get a free slot from MMU.
+        * Otherwise, Probe would have located the entry and set INDEX Reg
+        * with existing location. This will cause Write CMD to over-write
+        * existing entry with new PD0 and PD1
+        */
+       if (likely(idx & TLB_LKUP_ERR))
+               write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
+
+       /* setup the other half of TLB entry (pfn, rwx..) */
+       write_aux_reg(ARC_REG_TLBPD1, pd1);
+
+       /*
+        * Commit the Entry to MMU
+        * It doesnt sound safe to use the TLBWriteNI cmd here
+        * which doesn't flush uTLBs. I'd rather be safe than sorry.
+        */
+       write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+
 /*
  * Un-conditionally (without lookup) erase the entire MMU contents
  */
@@ -224,13 +253,14 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
                return;
 
        /*
-        * Workaround for Android weirdism:
-        * A binder VMA could end up in a task such that vma->mm != tsk->mm
-        * old code would cause h/w - s/w ASID to get out of sync
+        * - Move to a new ASID, but only if the mm is still wired in
+        *   (Android Binders ended up calling this for vma->mm != tsk->mm,
+        *    causing h/w - s/w ASID to get out of sync)
+        * - Also get_new_mmu_context() new implementation allocates a new
+        *   ASID only if it is not allocated already - so unallocate first
         */
-       if (current->mm != mm)
-               destroy_context(mm);
-       else
+       destroy_context(mm);
+       if (current->mm == mm)
                get_new_mmu_context(mm);
 }
 
@@ -245,8 +275,8 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                           unsigned long end)
 {
+       const unsigned int cpu = smp_processor_id();
        unsigned long flags;
-       unsigned int asid;
 
        /* If range @start to @end is more than 32 TLB entries deep,
         * its better to move to a new ASID rather than searching for
@@ -268,11 +298,10 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
        start &= PAGE_MASK;
 
        local_irq_save(flags);
-       asid = vma->vm_mm->context.asid;
 
-       if (asid != NO_ASID) {
+       if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
                while (start < end) {
-                       tlb_entry_erase(start | (asid & 0xff));
+                       tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
                        start += PAGE_SIZE;
                }
        }
@@ -319,6 +348,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 
 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 {
+       const unsigned int cpu = smp_processor_id();
        unsigned long flags;
 
        /* Note that it is critical that interrupts are DISABLED between
@@ -326,9 +356,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
         */
        local_irq_save(flags);
 
-       if (vma->vm_mm->context.asid != NO_ASID) {
-               tlb_entry_erase((page & PAGE_MASK) |
-                               (vma->vm_mm->context.asid & 0xff));
+       if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
+               tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
                utlb_invalidate();
        }
 
@@ -341,8 +370,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 {
        unsigned long flags;
-       unsigned int idx, asid_or_sasid;
-       unsigned long pd0_flags;
+       unsigned int asid_or_sasid, rwx;
+       unsigned long pd0, pd1;
 
        /*
         * create_tlb() assumes that current->mm == vma->mm, since
@@ -374,47 +403,30 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 
        local_irq_save(flags);
 
-       tlb_paranoid_check(vma->vm_mm->context.asid, address);
+       tlb_paranoid_check(hw_pid(vma->vm_mm), address);
 
        address &= PAGE_MASK;
 
        /* update this PTE credentials */
        pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
 
-       /* Create HW TLB entry Flags (in PD0) from PTE Flags */
-#if (CONFIG_ARC_MMU_VER <= 2)
-       pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
-#else
-       pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
-#endif
+       /* Create HW TLB(PD0,PD1) from PTE  */
 
        /* ASID for this task */
        asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
 
-       write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
+       pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
 
-       /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
-       write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1));
+       rwx = pte_val(*ptep) & PTE_BITS_RWX;
 
-       /* First verify if entry for this vaddr+ASID already exists */
-       write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
-       idx = read_aux_reg(ARC_REG_TLBINDEX);
+       if (pte_val(*ptep) & _PAGE_GLOBAL)
+               rwx <<= 3;              /* r w x => Kr Kw Kx 0 0 0 */
+       else
+               rwx |= (rwx << 3);      /* r w x => Kr Kw Kx Ur Uw Ux */
 
-       /*
-        * If Not already present get a free slot from MMU.
-        * Otherwise, Probe would have located the entry and set INDEX Reg
-        * with existing location. This will cause Write CMD to over-write
-        * existing entry with new PD0 and PD1
-        */
-       if (likely(idx & TLB_LKUP_ERR))
-               write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
+       pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
 
-       /*
-        * Commit the Entry to MMU
-        * It doesnt sound safe to use the TLBWriteNI cmd here
-        * which doesn't flush uTLBs. I'd rather be safe than sorry.
-        */
-       write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+       tlb_entry_insert(pd0, pd1);
 
        local_irq_restore(flags);
 }
@@ -553,11 +565,6 @@ void arc_mmu_init(void)
        if (mmu->pg_sz != PAGE_SIZE)
                panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
 
-       /*
-        * ASID mgmt data structures are compile time init
-        *  asid_cache = FIRST_ASID and asid_mm_map[] all zeroes
-        */
-
        local_flush_tlb_all();
 
        /* Enable the MMU */
@@ -674,7 +681,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
 void print_asid_mismatch(int is_fast_path)
 {
        int pid_sw, pid_hw;
-       pid_sw = current->active_mm->context.asid;
+       pid_sw = hw_pid(current->active_mm, smp_processor_id());
        pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
 
        pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
@@ -689,7 +696,8 @@ void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr)
 
        pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
 
-       if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID)))
+       if (addr < 0x70000000 && ((pid_hw != pid_sw) ||
+           (pid_sw == MM_CTXT_NO_ASID)))
                print_asid_mismatch(0);
 }
 #endif
index 5c5bb23001b071b02a0ea33dba44d72211656d28..50e83ca96b96a6f0f1ef3f068b21e33a9bcb4b0c 100644 (file)
 #include <asm/arcregs.h>
 #include <asm/cache.h>
 #include <asm/processor.h>
-#if (CONFIG_ARC_MMU_VER == 1)
 #include <asm/tlb-mmu1.h>
-#endif
 
-;--------------------------------------------------------------------------
-; scratch memory to save the registers (r0-r3) used to code TLB refill Handler
-; For details refer to comments before TLBMISS_FREEUP_REGS below
+;-----------------------------------------------------------------
+; ARC700 Exception Handling doesn't auto-switch stack and it only provides
+; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
+;
+; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
+; "global" is used to free-up FIRST core reg to be able to code the rest of
+; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
+; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
+; need to be saved as well by extending the "global" to be 4 words. Hence
+;      ".size   ex_saved_reg1, 16"
+; [All of this dance is to avoid stack switching for each TLB Miss, since we
+; only need to save only a handful of regs, as opposed to complete reg file]
+;
+; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
+; core reg as it will not be SMP safe.
+; Thus scratch AUX reg is used (and no longer used to cache task PGD).
+; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
+; Epilogue thus has to locate the "per-cpu" storage for regs.
+; To avoid cache line bouncing the per-cpu global is aligned/sized per
+; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
+;      ".size   ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
+
+; As simple as that....
 ;--------------------------------------------------------------------------
 
+; scratch memory to save [r0-r3] used to code TLB refill Handler
 ARCFP_DATA ex_saved_reg1
-       .align 1 << L1_CACHE_SHIFT      ; IMP: Must be Cache Line aligned
+       .align 1 << L1_CACHE_SHIFT
        .type   ex_saved_reg1, @object
 #ifdef CONFIG_SMP
        .size   ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
@@ -66,6 +85,44 @@ ex_saved_reg1:
        .zero 16
 #endif
 
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_SMP
+       sr  r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
+       GET_CPU_ID  r0                  ; get to per cpu scratch mem,
+       lsl r0, r0, L1_CACHE_SHIFT      ; cache line wide per cpu
+       add r0, @ex_saved_reg1, r0
+#else
+       st    r0, [@ex_saved_reg1]
+       mov_s r0, @ex_saved_reg1
+#endif
+       st_s  r1, [r0, 4]
+       st_s  r2, [r0, 8]
+       st_s  r3, [r0, 12]
+
+       ; VERIFY if the ASID in MMU-PID Reg is same as
+       ; one in Linux data structures
+
+       DBG_ASID_MISMATCH
+.endm
+
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_SMP
+       GET_CPU_ID  r0                  ; get to per cpu scratch mem
+       lsl r0, r0, L1_CACHE_SHIFT      ; each is cache line wide
+       add r0, @ex_saved_reg1, r0
+       ld_s  r3, [r0,12]
+       ld_s  r2, [r0, 8]
+       ld_s  r1, [r0, 4]
+       lr    r0, [ARC_REG_SCRATCH_DATA0]
+#else
+       mov_s r0, @ex_saved_reg1
+       ld_s  r3, [r0,12]
+       ld_s  r2, [r0, 8]
+       ld_s  r1, [r0, 4]
+       ld_s  r0, [r0]
+#endif
+.endm
+
 ;============================================================================
 ;  Troubleshooting Stuff
 ;============================================================================
@@ -161,13 +218,17 @@ ex_saved_reg1:
 ; IN: r0 = PTE, r1 = ptr to PTE
 
 .macro CONV_PTE_TO_TLB
-       and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE
-       sr  r3, [ARC_REG_TLBPD1]    ; these go in PD1
+       and    r3, r0, PTE_BITS_RWX     ;       r w x
+       lsl    r2, r3, 3                ; r w x 0 0 0
+       and.f  0,  r0, _PAGE_GLOBAL
+       or.z   r2, r2, r3               ; r w x r w x
+
+       and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
+       or  r3, r3, r2
+
+       sr  r3, [ARC_REG_TLBPD1]        ; these go in PD1
 
        and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
-#if (CONFIG_ARC_MMU_VER <= 2)   /* Neednot be done with v3 onwards */
-       lsr r2, r2                  ; shift PTE flags to match layout in PD0
-#endif
 
        lr  r3,[ARC_REG_TLBPD0]     ; MMU prepares PD0 with vaddr and asid
 
@@ -191,68 +252,6 @@ ex_saved_reg1:
 #endif
 .endm
 
-;-----------------------------------------------------------------
-; ARC700 Exception Handling doesn't auto-switch stack and it only provides
-; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
-;
-; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
-; "global" is used to free-up FIRST core reg to be able to code the rest of
-; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
-; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
-; need to be saved as well by extending the "global" to be 4 words. Hence
-;      ".size   ex_saved_reg1, 16"
-; [All of this dance is to avoid stack switching for each TLB Miss, since we
-; only need to save only a handful of regs, as opposed to complete reg file]
-;
-; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
-; core reg as it will not be SMP safe.
-; Thus scratch AUX reg is used (and no longer used to cache task PGD).
-; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
-; Epilogue thus has to locate the "per-cpu" storage for regs.
-; To avoid cache line bouncing the per-cpu global is aligned/sized per
-; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
-;      ".size   ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
-
-; As simple as that....
-
-.macro TLBMISS_FREEUP_REGS
-#ifdef CONFIG_SMP
-       sr  r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
-       GET_CPU_ID  r0                  ; get to per cpu scratch mem,
-       lsl r0, r0, L1_CACHE_SHIFT      ; cache line wide per cpu
-       add r0, @ex_saved_reg1, r0
-#else
-       st    r0, [@ex_saved_reg1]
-       mov_s r0, @ex_saved_reg1
-#endif
-       st_s  r1, [r0, 4]
-       st_s  r2, [r0, 8]
-       st_s  r3, [r0, 12]
-
-       ; VERIFY if the ASID in MMU-PID Reg is same as
-       ; one in Linux data structures
-
-       DBG_ASID_MISMATCH
-.endm
-
-;-----------------------------------------------------------------
-.macro TLBMISS_RESTORE_REGS
-#ifdef CONFIG_SMP
-       GET_CPU_ID  r0                  ; get to per cpu scratch mem
-       lsl r0, r0, L1_CACHE_SHIFT      ; each is cache line wide
-       add r0, @ex_saved_reg1, r0
-       ld_s  r3, [r0,12]
-       ld_s  r2, [r0, 8]
-       ld_s  r1, [r0, 4]
-       lr    r0, [ARC_REG_SCRATCH_DATA0]
-#else
-       mov_s r0, @ex_saved_reg1
-       ld_s  r3, [r0,12]
-       ld_s  r2, [r0, 8]
-       ld_s  r1, [r0, 4]
-       ld_s  r0, [r0]
-#endif
-.endm
 
 ARCFP_CODE     ;Fast Path Code, candidate for ICCM
 
@@ -277,8 +276,8 @@ ARC_ENTRY EV_TLBMissI
        ;----------------------------------------------------------------
        ; VERIFY_PTE: Check if PTE permissions approp for executing code
        cmp_s   r2, VMALLOC_START
-       mov.lo  r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE)
-       mov.hs  r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE)
+       mov_s   r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
+       or.hs   r2, r2, _PAGE_GLOBAL
 
        and     r3, r0, r2  ; Mask out NON Flag bits from PTE
        xor.f   r3, r3, r2  ; check ( ( pte & flags_test ) == flags_test )
@@ -317,26 +316,21 @@ ARC_ENTRY EV_TLBMissD
        ;----------------------------------------------------------------
        ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
 
-       mov_s   r2, 0
+       cmp_s   r2, VMALLOC_START
+       mov_s   r2, _PAGE_PRESENT       ; common bit for K/U PTE
+       or.hs   r2, r2, _PAGE_GLOBAL    ; kernel PTE only
+
+       ; Linux PTE [RWX] bits are semantically overloaded:
+       ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
+       ; -Otherwise they are user-mode permissions, and those are exactly
+       ;  same for kernel mode as well (e.g. copy_(to|from)_user)
+
        lr      r3, [ecr]
        btst_s  r3, ECR_C_BIT_DTLB_LD_MISS      ; Read Access
-       or.nz   r2, r2, _PAGE_U_READ            ; chk for Read flag in PTE
+       or.nz   r2, r2, _PAGE_READ              ; chk for Read flag in PTE
        btst_s  r3, ECR_C_BIT_DTLB_ST_MISS      ; Write Access
-       or.nz   r2, r2, _PAGE_U_WRITE           ; chk for Write flag in PTE
-       ; Above laddering takes care of XCHG access
-       ;   which is both Read and Write
-
-       ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx
-       ; For copy_(to|from)_user, despite exception taken in kernel mode,
-       ; this code is not hit, because EFA would still be the user mode
-       ; address (EFA < 0x6000_0000).
-       ; This code is for legit kernel mode faults, vmalloc specifically
-       ; (EFA: 0x7000_0000 to 0x7FFF_FFFF)
-
-       lr      r3, [efa]
-       cmp     r3, VMALLOC_START - 1   ; If kernel mode access
-       asl.hi  r2, r2, 3               ; make _PAGE_xx flags as _PAGE_K_xx
-       or      r2, r2, _PAGE_PRESENT   ; Common flag for K/U mode
+       or.nz   r2, r2, _PAGE_WRITE             ; chk for Write flag in PTE
+       ; Above laddering takes care of XCHG access (both R and W)
 
        ; By now, r2 setup with all the Flags we need to check in PTE
        and     r3, r0, r2              ; Mask out NON Flag bits from PTE
@@ -371,13 +365,7 @@ do_slow_path_pf:
 
        ; Slow path TLB Miss handled as a regular ARC Exception
        ; (stack switching / save the complete reg-file).
-       ; That requires freeing up r9
-       EXCPN_PROLOG_FREEUP_REG r9
-
-       lr  r9, [erstatus]
-
-       SWITCH_TO_KERNEL_STK
-       SAVE_ALL_SYS
+       EXCEPTION_PROLOGUE
 
        ; ------- setup args for Linux Page fault Hanlder ---------
        mov_s r0, sp
diff --git a/arch/arc/mm/tlbflush.c b/arch/arc/mm/tlbflush.c
new file mode 100644 (file)
index 0000000..a0abe48
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <asm/tlbflush.h>
+
+struct tlb_args {
+       struct vm_area_struct *ta_vma;
+       unsigned long ta_start;
+       unsigned long ta_end;
+};
+
+static inline void ipi_flush_tlb_all(void *ignored)
+{
+       local_flush_tlb_all();
+}
+
+static inline void ipi_flush_tlb_mm(void *arg)
+{
+       struct mm_struct *mm = (struct mm_struct *)arg;
+
+       local_flush_tlb_mm(mm);
+}
+
+static inline void ipi_flush_tlb_page(void *arg)
+{
+       struct tlb_args *ta = (struct tlb_args *)arg;
+
+       local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+}
+
+static inline void ipi_flush_tlb_range(void *arg)
+{
+       struct tlb_args *ta = (struct tlb_args *)arg;
+
+       local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+}
+
+static inline void ipi_flush_tlb_kernel_range(void *arg)
+{
+       struct tlb_args *ta = (struct tlb_args *)arg;
+
+       local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
+}
+
+void flush_tlb_all(void)
+{
+       on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+       on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+       struct tlb_args ta;
+       ta.ta_vma = vma;
+       ta.ta_start = uaddr;
+       on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+                     unsigned long start, unsigned long end)
+{
+       struct tlb_args ta;
+       ta.ta_vma = vma;
+       ta.ta_start = start;
+       ta.ta_end = end;
+       on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       struct tlb_args ta;
+       ta.ta_start = start;
+       ta.ta_end = end;
+       on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
+}
index 43594d5116efce810798b763da8f3e7ea6891c93..f33dbb387afe49a876e830f49921fa11225ac61e 100644 (file)
@@ -52,6 +52,7 @@ config ARM
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16
+       select IRQ_FORCED_THREADING
        select KTIME_SCALAR
        select PERF_USE_VMALLOC
        select RTC_LIB
@@ -1613,13 +1614,49 @@ config ARCH_NR_GPIO
 
 source kernel/Kconfig.preempt
 
-config HZ
+config HZ_FIXED
        int
        default 200 if ARCH_EBSA110 || ARCH_S3C24XX || ARCH_S5P64X0 || \
                ARCH_S5PV210 || ARCH_EXYNOS4
        default AT91_TIMER_HZ if ARCH_AT91
        default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
-       default 100
+
+choice
+       depends on !HZ_FIXED
+       prompt "Timer frequency"
+
+config HZ_100
+       bool "100 Hz"
+
+config HZ_200
+       bool "200 Hz"
+
+config HZ_250
+       bool "250 Hz"
+
+config HZ_300
+       bool "300 Hz"
+
+config HZ_500
+       bool "500 Hz"
+
+config HZ_1000
+       bool "1000 Hz"
+
+endchoice
+
+config HZ
+       int
+       default HZ_FIXED if HZ_FIXED
+       default 100 if HZ_100
+       default 200 if HZ_200
+       default 250 if HZ_250
+       default 300 if HZ_300
+       default 500 if HZ_500
+       default 1000
+
+config SCHED_HRTICK
+       def_bool HIGH_RES_TIMERS
 
 config SCHED_HRTICK
        def_bool HIGH_RES_TIMERS
@@ -1756,6 +1793,9 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
        def_bool y
        depends on ARM_LPAE
 
+config ARCH_WANT_GENERAL_HUGETLB
+       def_bool y
+
 source "mm/Kconfig"
 
 config FORCE_MAX_ZONEORDER
@@ -2175,6 +2215,13 @@ config NEON
          Say Y to include support code for NEON, the ARMv7 Advanced SIMD
          Extension.
 
+config KERNEL_MODE_NEON
+       bool "Support for NEON in kernel mode"
+       default n
+       depends on NEON
+       help
+         Say Y to include support for NEON in kernel mode.
+
 endmenu
 
 menu "Userspace binary formats"
@@ -2199,7 +2246,7 @@ source "kernel/power/Kconfig"
 
 config ARCH_SUSPEND_POSSIBLE
        depends on !ARCH_S5PC100
-       depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
+       depends on CPU_ARM920T || CPU_ARM926T || CPU_FEROCEON || CPU_SA1100 || \
                CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK
        def_bool y
 
index 583f4a00ec3210bf6cb3cf58dac4ae7ac8a2807c..2d57da32456278118bc9a52390036a87dc8d3651 100644 (file)
@@ -92,6 +92,7 @@ choice
        config DEBUG_BCM2835
                bool "Kernel low-level debugging on BCM2835 PL011 UART"
                depends on ARCH_BCM2835
+               select DEBUG_UART_PL01X
 
        config DEBUG_CLPS711X_UART1
                bool "Kernel low-level debugging messages via UART1"
@@ -110,6 +111,7 @@ choice
        config DEBUG_CNS3XXX
                bool "Kernel Kernel low-level debugging on Cavium Networks CNS3xxx"
                depends on ARCH_CNS3XXX
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want the debug print routines to direct
                   their output to the CNS3xxx UART0.
@@ -117,6 +119,7 @@ choice
        config DEBUG_DAVINCI_DA8XX_UART1
                bool "Kernel low-level debugging on DaVinci DA8XX using UART1"
                depends on ARCH_DAVINCI_DA8XX
+               select DEBUG_UART_8250
                help
                  Say Y here if you want the debug print routines to direct
                  their output to UART1 serial port on DaVinci DA8XX devices.
@@ -124,6 +127,7 @@ choice
        config DEBUG_DAVINCI_DA8XX_UART2
                bool "Kernel low-level debugging on DaVinci DA8XX using UART2"
                depends on ARCH_DAVINCI_DA8XX
+               select DEBUG_UART_8250
                help
                  Say Y here if you want the debug print routines to direct
                  their output to UART2 serial port on DaVinci DA8XX devices.
@@ -131,6 +135,7 @@ choice
        config DEBUG_DAVINCI_DMx_UART0
                bool "Kernel low-level debugging on DaVinci DMx using UART0"
                depends on ARCH_DAVINCI_DMx
+               select DEBUG_UART_8250
                help
                  Say Y here if you want the debug print routines to direct
                  their output to UART0 serial port on DaVinci DMx devices.
@@ -138,6 +143,7 @@ choice
        config DEBUG_DAVINCI_TNETV107X_UART1
                bool "Kernel low-level debugging on DaVinci TNETV107x using UART1"
                depends on ARCH_DAVINCI_TNETV107X
+               select DEBUG_UART_8250
                help
                  Say Y here if you want the debug print routines to direct
                  their output to UART1 serial port on DaVinci TNETV107X
@@ -177,6 +183,7 @@ choice
        config DEBUG_HIGHBANK_UART
                bool "Kernel low-level debugging messages via Highbank UART"
                depends on ARCH_HIGHBANK
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want the debug print routines to direct
                  their output to the UART on Highbank based devices.
@@ -191,6 +198,7 @@ choice
        config DEBUG_IMX23_UART
                bool "i.MX23 Debug UART"
                depends on SOC_IMX23
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want kernel low-level debugging support
                  on i.MX23.
@@ -212,6 +220,7 @@ choice
        config DEBUG_IMX28_UART
                bool "i.MX28 Debug UART"
                depends on SOC_IMX28
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want kernel low-level debugging support
                  on i.MX28.
@@ -261,6 +270,7 @@ choice
        config DEBUG_KEYSTONE_UART0
                bool "Kernel low-level debugging on KEYSTONE2 using UART0"
                depends on ARCH_KEYSTONE
+               select DEBUG_UART_8250
                help
                  Say Y here if you want the debug print routines to direct
                  their output to UART0 serial port on KEYSTONE2 devices.
@@ -268,6 +278,7 @@ choice
        config DEBUG_KEYSTONE_UART1
                bool "Kernel low-level debugging on KEYSTONE2 using UART1"
                depends on ARCH_KEYSTONE
+               select DEBUG_UART_8250
                help
                  Say Y here if you want the debug print routines to direct
                  their output to UART1 serial port on KEYSTONE2 devices.
@@ -275,6 +286,7 @@ choice
        config DEBUG_MMP_UART2
                bool "Kernel low-level debugging message via MMP UART2"
                depends on ARCH_MMP
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on MMP UART2.
@@ -282,6 +294,7 @@ choice
        config DEBUG_MMP_UART3
                bool "Kernel low-level debugging message via MMP UART3"
                depends on ARCH_MMP
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on MMP UART3.
@@ -326,6 +339,7 @@ choice
        config DEBUG_MVEBU_UART
                bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
                depends on ARCH_MVEBU
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on MVEBU based platforms.
@@ -344,6 +358,7 @@ choice
        config DEBUG_MVEBU_UART_ALTERNATE
                bool "Kernel low-level debugging messages via MVEBU UART (new bootloaders)"
                depends on ARCH_MVEBU
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on MVEBU based platforms.
@@ -358,6 +373,7 @@ choice
        config DEBUG_NOMADIK_UART
                bool "Kernel low-level debugging messages via NOMADIK UART"
                depends on ARCH_NOMADIK
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want kernel low-level debugging support
                  on NOMADIK based platforms.
@@ -365,6 +381,7 @@ choice
        config DEBUG_NSPIRE_CLASSIC_UART
                bool "Kernel low-level debugging via TI-NSPIRE 8250 UART"
                depends on ARCH_NSPIRE
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on TI-NSPIRE classic models.
@@ -372,20 +389,82 @@ choice
        config DEBUG_NSPIRE_CX_UART
                bool "Kernel low-level debugging via TI-NSPIRE PL011 UART"
                depends on ARCH_NSPIRE
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want kernel low-level debugging support
                  on TI-NSPIRE CX models.
 
-       config DEBUG_OMAP2PLUS_UART
-               bool "Kernel low-level debugging messages via OMAP2PLUS UART"
+       config DEBUG_OMAP2UART1
+               bool "OMAP2/3/4 UART1 (omap2/3 sdp boards and some omap3 boards)"
                depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
                help
-                 Say Y here if you want kernel low-level debugging support
-                 on OMAP2PLUS based platforms.
+                 This covers at least h4, 2430sdp, 3430sdp, 3630sdp,
+                 omap3 torpedo and 3530 lv som.
+
+       config DEBUG_OMAP2UART2
+               bool "Kernel low-level debugging messages via OMAP2/3/4 UART2"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_OMAP2UART3
+               bool "Kernel low-level debugging messages via OMAP2 UART3 (n8x0)"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_OMAP3UART3
+               bool "Kernel low-level debugging messages via OMAP3 UART3 (most omap3 boards)"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+               help
+                 This covers at least cm_t3x, beagle, crane, devkit8000,
+                 igep00x0, ldp, n900, n9(50), pandora, overo, touchbook,
+                 and 3517evm.
+
+       config DEBUG_OMAP4UART3
+               bool "Kernel low-level debugging messages via OMAP4/5 UART3 (omap4 blaze, panda, omap5 sevm)"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_OMAP3UART4
+               bool "Kernel low-level debugging messages via OMAP36XX UART4"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_OMAP4UART4
+               bool "Kernel low-level debugging messages via OMAP4/5 UART4"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_TI81XXUART1
+               bool "Kernel low-level debugging messages via TI81XX UART1 (ti8148evm)"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_TI81XXUART2
+               bool "Kernel low-level debugging messages via TI81XX UART2"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_TI81XXUART3
+               bool "Kernel low-level debugging messages via TI81XX UART3 (ti8168evm)"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_AM33XXUART1
+               bool "Kernel low-level debugging messages via AM33XX UART1"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
+
+       config DEBUG_ZOOM_UART
+               bool "Kernel low-level debugging messages via Zoom2/3 UART"
+               depends on ARCH_OMAP2PLUS
+               select DEBUG_OMAP2PLUS_UART
 
        config DEBUG_PICOXCELL_UART
                depends on ARCH_PICOXCELL
                bool "Use PicoXcell UART for low-level debug"
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on PicoXcell based platforms.
@@ -393,6 +472,7 @@ choice
        config DEBUG_PXA_UART1
                depends on ARCH_PXA
                bool "Use PXA UART1 for low-level debug"
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on PXA UART1.
@@ -400,6 +480,7 @@ choice
        config DEBUG_REALVIEW_STD_PORT
                bool "RealView Default UART"
                depends on ARCH_REALVIEW
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want the debug print routines to direct
                  their output to the serial port on RealView EB, PB11MP, PBA8
@@ -408,14 +489,64 @@ choice
        config DEBUG_REALVIEW_PB1176_PORT
                bool "RealView PB1176 UART"
                depends on MACH_REALVIEW_PB1176
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want the debug print routines to direct
                  their output to the standard serial port on the RealView
                  PB1176 platform.
 
-       config DEBUG_ROCKCHIP_UART
-               bool "Kernel low-level debugging messages via Rockchip UART"
+       config DEBUG_RK29_UART0
+               bool "Kernel low-level debugging messages via Rockchip RK29 UART0"
+               depends on ARCH_ROCKCHIP
+               select DEBUG_UART_8250
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Rockchip based platforms.
+
+       config DEBUG_RK29_UART1
+               bool "Kernel low-level debugging messages via Rockchip RK29 UART1"
+               depends on ARCH_ROCKCHIP
+               select DEBUG_UART_8250
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Rockchip based platforms.
+
+       config DEBUG_RK29_UART2
+               bool "Kernel low-level debugging messages via Rockchip RK29 UART2"
                depends on ARCH_ROCKCHIP
+               select DEBUG_UART_8250
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Rockchip based platforms.
+
+       config DEBUG_RK3X_UART0
+               bool "Kernel low-level debugging messages via Rockchip RK3X UART0"
+               depends on ARCH_ROCKCHIP
+               select DEBUG_UART_8250
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Rockchip based platforms.
+
+       config DEBUG_RK3X_UART1
+               bool "Kernel low-level debugging messages via Rockchip RK3X UART1"
+               depends on ARCH_ROCKCHIP
+               select DEBUG_UART_8250
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Rockchip based platforms.
+
+       config DEBUG_RK3X_UART2
+               bool "Kernel low-level debugging messages via Rockchip RK3X UART2"
+               depends on ARCH_ROCKCHIP
+               select DEBUG_UART_8250
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Rockchip based platforms.
+
+       config DEBUG_RK3X_UART3
+               bool "Kernel low-level debugging messages via Rockchip RK3X UART3"
+               depends on ARCH_ROCKCHIP
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on Rockchip based platforms.
@@ -471,6 +602,7 @@ choice
        config DEBUG_SOCFPGA_UART
                depends on ARCH_SOCFPGA
                bool "Use SOCFPGA UART for low-level debug"
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on SOCFPGA based platforms.
@@ -478,6 +610,7 @@ choice
        config DEBUG_SUNXI_UART0
                bool "Kernel low-level debugging messages via sunXi UART0"
                depends on ARCH_SUNXI
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on Allwinner A1X based platforms on the UART0.
@@ -485,13 +618,59 @@ choice
        config DEBUG_SUNXI_UART1
                bool "Kernel low-level debugging messages via sunXi UART1"
                depends on ARCH_SUNXI
+               select DEBUG_UART_8250
                help
                  Say Y here if you want kernel low-level debugging support
                  on Allwinner A1X based platforms on the UART1.
 
-       config DEBUG_TEGRA_UART
+       config TEGRA_DEBUG_UART_AUTO_ODMDATA
+               bool "Kernel low-level debugging messages via Tegra UART via ODMDATA"
+               depends on ARCH_TEGRA
+               select DEBUG_TEGRA_UART
+               help
+                 Automatically determines which UART to use for low-level
+                 debug based on the ODMDATA value. This value is part of
+                 the BCT, and is written to the boot memory device using
+                 nvflash, or other flashing tool.  When bits 19:18 are 3,
+                 then bits 17:15 indicate which UART to use; 0/1/2/3/4
+                 are UART A/B/C/D/E.
+
+       config TEGRA_DEBUG_UARTA
+               bool "Kernel low-level debugging messages via Tegra UART A"
+               depends on ARCH_TEGRA
+               select DEBUG_TEGRA_UART
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Tegra based platforms.
+
+       config TEGRA_DEBUG_UARTB
+               bool "Kernel low-level debugging messages via Tegra UART B"
+               depends on ARCH_TEGRA
+               select DEBUG_TEGRA_UART
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Tegra based platforms.
+
+       config TEGRA_DEBUG_UARTC
+               bool "Kernel low-level debugging messages via Tegra UART C"
                depends on ARCH_TEGRA
-               bool "Use Tegra UART for low-level debug"
+               select DEBUG_TEGRA_UART
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Tegra based platforms.
+
+       config TEGRA_DEBUG_UARTD
+               bool "Kernel low-level debugging messages via Tegra UART D"
+               depends on ARCH_TEGRA
+               select DEBUG_TEGRA_UART
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on Tegra based platforms.
+
+       config TEGRA_DEBUG_UARTE
+               bool "Kernel low-level debugging messages via Tegra UART E"
+               depends on ARCH_TEGRA
+               select DEBUG_TEGRA_UART
                help
                  Say Y here if you want kernel low-level debugging support
                  on Tegra based platforms.
@@ -510,19 +689,32 @@ choice
                  Say Y here if you want the debug print routines to direct
                  their output to the uart1 port on SiRFmarco devices.
 
-       config DEBUG_STI_UART
+       config STIH41X_DEBUG_ASC2
+               bool "Use StiH415/416 ASC2 UART for low-level debug"
+               depends on ARCH_STI
+               select DEBUG_STI_UART
+               help
+                 Say Y here if you want kernel low-level debugging support
+                 on STiH415/416 based platforms like b2000, which has
+                 default UART wired up to ASC2.
+
+                 If unsure, say N.
+
+       config STIH41X_DEBUG_SBC_ASC1
+               bool "Use StiH415/416 SBC ASC1 UART for low-level debug"
                depends on ARCH_STI
-               bool "Use StiH415/416 ASC for low-level debug"
+               select DEBUG_STI_UART
                help
                  Say Y here if you want kernel low-level debugging support
-                 on StiH415/416 based platforms like B2000, B2020.
-                 It support UART2 and SBC_UART1.
+                 on STiH415/416 based platforms like b2020. which has
+                 default UART wired up to SBC ASC1.
 
                  If unsure, say N.
 
        config DEBUG_U300_UART
                bool "Kernel low-level debugging messages via U300 UART0"
                depends on ARCH_U300
+               select DEBUG_UART_PL01X
                help
                  Say Y here if you want the debug print routines to direct
                  their output to the uart port on U300 devices.
@@ -548,6 +740,7 @@ choice
        config DEBUG_VEXPRESS_UART0_CA9
                bool "Use PL011 UART0 at 0x10009000 (V2P-CA9 core tile)"
                depends on ARCH_VEXPRESS
+               select DEBUG_UART_PL01X
                help
                  This option selects UART0 at 0x10009000. Except for custom models,
                  this applies only to the V2P-CA9 tile.
@@ -555,6 +748,7 @@ choice
        config DEBUG_VEXPRESS_UART0_RS1
                bool "Use PL011 UART0 at 0x1c090000 (RS1 complaint tiles)"
                depends on ARCH_VEXPRESS
+               select DEBUG_UART_PL01X
                help
                  This option selects UART0 at 0x1c090000. This applies to most
                  of the tiles using the RS1 memory map, including all new A-class
@@ -563,6 +757,7 @@ choice
        config DEBUG_VEXPRESS_UART0_CRX
                bool "Use PL011 UART0 at 0xb0090000 (Cortex-R compliant tiles)"
                depends on ARCH_VEXPRESS && !MMU
+               select DEBUG_UART_PL01X
                help
                  This option selects UART0 at 0xb0090000. This is appropriate for
                  Cortex-R series tiles and SMMs, such as Cortex-R5 and Cortex-R7
@@ -579,7 +774,7 @@ choice
                depends on !ARCH_MULTIPLATFORM
                help
                  Say Y here if your platform doesn't provide a UART option
-                 below. This relies on your platform choosing the right UART
+                 above. This relies on your platform choosing the right UART
                  definition internally in order for low-level debugging to
                  work.
 
@@ -610,11 +805,41 @@ choice
                  For more details about semihosting, please see
                  chapter 8 of DUI0203I_rvct_developer_guide.pdf from ARM Ltd.
 
+       config DEBUG_LL_UART_8250
+               bool "Kernel low-level debugging via 8250 UART"
+               help
+                 Say Y here if you wish the debug print routes to direct
+                 their output to an 8250 UART.  You can use this option
+                 to provide the parameters for the 8250 UART rather than
+                 selecting one of the platform specific options above if
+                 you know the parameters for the port.
+
+                 This option is preferred over the platform specific
+                 options; the platform specific options are deprecated
+                 and will be soon removed.
+
+       config DEBUG_LL_UART_PL01X
+               bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
+               help
+                 Say Y here if you wish the debug print routes to direct
+                 their output to a PL01x Primecell UART.  You can use
+                 this option to provide the parameters for the UART
+                 rather than selecting one of the platform specific
+                 options above if you know the parameters for the port.
+
+                 This option is preferred over the platform specific
+                 options; the platform specific options are deprecated
+                 and will be soon removed.
+
 endchoice
 
 config DEBUG_EXYNOS_UART
        bool
 
+config DEBUG_OMAP2PLUS_UART
+       bool
+       depends on ARCH_OMAP2PLUS
+
 config DEBUG_IMX_UART_PORT
        int "i.MX Debug UART Port Selection" if DEBUG_IMX1_UART || \
                                                DEBUG_IMX25_UART || \
@@ -631,140 +856,19 @@ config DEBUG_IMX_UART_PORT
          Choose UART port on which kernel low-level debug messages
          should be output.
 
-choice
-       prompt "Low-level debug console UART"
-       depends on DEBUG_OMAP2PLUS_UART
-
-       config DEBUG_OMAP2UART1
-               bool "OMAP2/3/4 UART1 (omap2/3 sdp boards and some omap3 boards)"
-               help
-                 This covers at least h4, 2430sdp, 3430sdp, 3630sdp,
-                 omap3 torpedo and 3530 lv som.
-
-       config DEBUG_OMAP2UART2
-               bool "OMAP2/3/4 UART2"
-
-       config DEBUG_OMAP2UART3
-               bool "OMAP2 UART3 (n8x0)"
-
-       config DEBUG_OMAP3UART3
-               bool "OMAP3 UART3 (most omap3 boards)"
-               help
-                 This covers at least cm_t3x, beagle, crane, devkit8000,
-                 igep00x0, ldp, n900, n9(50), pandora, overo, touchbook,
-                 and 3517evm.
-
-       config DEBUG_OMAP4UART3
-               bool "OMAP4/5 UART3 (omap4 blaze, panda, omap5 sevm)"
-
-       config DEBUG_OMAP3UART4
-               bool "OMAP36XX UART4"
-
-       config DEBUG_OMAP4UART4
-               bool "OMAP4/5 UART4"
-
-       config DEBUG_TI81XXUART1
-               bool "TI81XX UART1 (ti8148evm)"
-
-       config DEBUG_TI81XXUART2
-               bool "TI81XX UART2"
-
-       config DEBUG_TI81XXUART3
-               bool "TI81XX UART3 (ti8168evm)"
-
-       config DEBUG_AM33XXUART1
-               bool "AM33XX UART1"
-
-       config DEBUG_ZOOM_UART
-               bool "Zoom2/3 UART"
-endchoice
-
-choice
-       prompt "Low-level debug console UART"
-       depends on DEBUG_ROCKCHIP_UART
-
-       config DEBUG_RK29_UART0
-               bool "RK29 UART0"
-
-       config DEBUG_RK29_UART1
-               bool "RK29 UART1"
-
-       config DEBUG_RK29_UART2
-               bool "RK29 UART2"
-
-       config DEBUG_RK3X_UART0
-               bool "RK3X UART0"
-
-       config DEBUG_RK3X_UART1
-               bool "RK3X UART1"
-
-       config DEBUG_RK3X_UART2
-               bool "RK3X UART2"
-
-       config DEBUG_RK3X_UART3
-               bool "RK3X UART3"
-endchoice
-
-choice
-       prompt "Low-level debug console UART"
-       depends on DEBUG_LL && DEBUG_TEGRA_UART
-
-       config TEGRA_DEBUG_UART_AUTO_ODMDATA
-       bool "Via ODMDATA"
-       help
-         Automatically determines which UART to use for low-level debug based
-         on the ODMDATA value. This value is part of the BCT, and is written
-         to the boot memory device using nvflash, or other flashing tool.
-         When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
-         0/1/2/3/4 are UART A/B/C/D/E.
-
-       config TEGRA_DEBUG_UARTA
-               bool "UART A"
-
-       config TEGRA_DEBUG_UARTB
-               bool "UART B"
-
-       config TEGRA_DEBUG_UARTC
-               bool "UART C"
-
-       config TEGRA_DEBUG_UARTD
-               bool "UART D"
-
-       config TEGRA_DEBUG_UARTE
-               bool "UART E"
-
-endchoice
-
-choice
-       prompt "Low-level debug console UART"
-       depends on DEBUG_LL && DEBUG_STI_UART
-
-       config STIH41X_DEBUG_ASC2
-               bool "ASC2 UART"
-               help
-                 Say Y here if you want kernel low-level debugging support
-                 on STiH415/416 based platforms like b2000, which has
-                 default UART wired up to ASC2.
-
-                 If unsure, say N.
-
-       config STIH41X_DEBUG_SBC_ASC1
-               bool "SBC ASC1 UART"
-               help
-                 Say Y here if you want kernel low-level debugging support
-                 on STiH415/416 based platforms like b2020. which has
-                 default UART wired up to SBC ASC1.
-
-                 If unsure, say N.
+config DEBUG_TEGRA_UART
+       bool
+       depends on ARCH_TEGRA
 
-endchoice
+config DEBUG_STI_UART
+       bool
+       depends on ARCH_STI
 
 config DEBUG_LL_INCLUDE
        string
-       default "debug/bcm2835.S" if DEBUG_BCM2835
-       default "debug/cns3xxx.S" if DEBUG_CNS3XXX
+       default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
+       default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
        default "debug/exynos.S" if DEBUG_EXYNOS_UART
-       default "debug/highbank.S" if DEBUG_HIGHBANK_UART
        default "debug/icedcc.S" if DEBUG_ICEDCC
        default "debug/imx.S" if DEBUG_IMX1_UART || \
                                 DEBUG_IMX25_UART || \
@@ -775,33 +879,162 @@ config DEBUG_LL_INCLUDE
                                 DEBUG_IMX53_UART ||\
                                 DEBUG_IMX6Q_UART || \
                                 DEBUG_IMX6SL_UART
-       default "debug/keystone.S" if DEBUG_KEYSTONE_UART0 || \
-                                     DEBUG_KEYSTONE_UART1
-       default "debug/mvebu.S" if DEBUG_MVEBU_UART || \
-                                  DEBUG_MVEBU_UART_ALTERNATE
-       default "debug/mxs.S" if DEBUG_IMX23_UART || DEBUG_IMX28_UART
-       default "debug/nomadik.S" if DEBUG_NOMADIK_UART
-       default "debug/nspire.S" if     DEBUG_NSPIRE_CX_UART || \
-                                       DEBUG_NSPIRE_CLASSIC_UART
        default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
-       default "debug/picoxcell.S" if DEBUG_PICOXCELL_UART
-       default "debug/pxa.S" if DEBUG_PXA_UART1 || DEBUG_MMP_UART2 || \
-                                DEBUG_MMP_UART3
-       default "debug/rockchip.S" if DEBUG_ROCKCHIP_UART
        default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
-       default "debug/socfpga.S" if DEBUG_SOCFPGA_UART
        default "debug/sti.S" if DEBUG_STI_UART
-       default "debug/sunxi.S" if DEBUG_SUNXI_UART0 || DEBUG_SUNXI_UART1
        default "debug/tegra.S" if DEBUG_TEGRA_UART
-       default "debug/u300.S" if DEBUG_U300_UART
        default "debug/ux500.S" if DEBUG_UX500_UART
-       default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \
-               DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 || \
-               DEBUG_VEXPRESS_UART0_CRX
+       default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT
        default "debug/vt8500.S" if DEBUG_VT8500_UART0
        default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
        default "mach/debug-macro.S"
 
+# Compatibility options for PL01x
+config DEBUG_UART_PL01X
+       def_bool ARCH_EP93XX || \
+               ARCH_INTEGRATOR || \
+               ARCH_SPEAR3XX || \
+               ARCH_SPEAR6XX || \
+               ARCH_SPEAR13XX || \
+               ARCH_VERSATILE
+
+# Compatibility options for 8250
+config DEBUG_UART_8250
+       def_bool ARCH_DOVE || ARCH_EBSA110 || \
+               (FOOTBRIDGE && !DEBUG_DC21285_PORT) || \
+               ARCH_GEMINI || ARCH_IOP13XX || ARCH_IOP32X || \
+               ARCH_IOP33X || ARCH_IXP4XX || ARCH_KIRKWOOD || \
+               ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
+
+config DEBUG_UART_PHYS
+       hex "Physical base address of debug UART"
+       default 0x01c20000 if DEBUG_DAVINCI_DMx_UART0
+       default 0x01c28000 if DEBUG_SUNXI_UART0
+       default 0x01c28400 if DEBUG_SUNXI_UART1
+       default 0x01d0c000 if DEBUG_DAVINCI_DA8XX_UART1
+       default 0x01d0d000 if DEBUG_DAVINCI_DA8XX_UART2
+       default 0x02530c00 if DEBUG_KEYSTONE_UART0
+       default 0x02531000 if DEBUG_KEYSTONE_UART1
+       default 0x03010fe0 if ARCH_RPC
+       default 0x08108300 if DEBUG_DAVINCI_TNETV107X_UART1
+       default 0x10009000 if DEBUG_REALVIEW_STD_PORT || DEBUG_CNS3XXX || \
+                               DEBUG_VEXPRESS_UART0_CA9
+       default 0x1010c000 if DEBUG_REALVIEW_PB1176_PORT
+       default 0x10124000 if DEBUG_RK3X_UART0
+       default 0x10126000 if DEBUG_RK3X_UART1
+       default 0x101f1000 if ARCH_VERSATILE
+       default 0x101fb000 if DEBUG_NOMADIK_UART
+       default 0x16000000 if ARCH_INTEGRATOR
+       default 0x1c090000 if DEBUG_VEXPRESS_UART0_RS1
+       default 0x20060000 if DEBUG_RK29_UART0
+       default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
+       default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
+       default 0x20201000 if DEBUG_BCM2835
+       default 0x40090000 if ARCH_LPC32XX
+       default 0x40100000 if DEBUG_PXA_UART1
+       default 0x42000000 if ARCH_GEMINI
+       default 0x7c0003f8 if FOOTBRIDGE
+       default 0x80230000 if DEBUG_PICOXCELL_UART
+       default 0x80070000 if DEBUG_IMX23_UART
+       default 0x80074000 if DEBUG_IMX28_UART
+       default 0x808c0000 if ARCH_EP93XX
+       default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
+       default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
+       default 0xc0013000 if DEBUG_U300_UART
+       default 0xc8000000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
+       default 0xc8000003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
+       default 0xd0000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
+       default 0xd0012000 if DEBUG_MVEBU_UART
+       default 0xd4017000 if DEBUG_MMP_UART2
+       default 0xd4018000 if DEBUG_MMP_UART3
+       default 0xe0000000 if ARCH_SPEAR13XX
+       default 0xf0000be0 if ARCH_EBSA110
+       default 0xf1012000 if DEBUG_MVEBU_UART_ALTERNATE
+       default 0xf1012000 if ARCH_DOVE || ARCH_KIRKWOOD || ARCH_MV78XX0 || \
+                               ARCH_ORION5X
+       default 0xfe800000 if ARCH_IOP32X
+       default 0xffc02000 if DEBUG_SOCFPGA_UART
+       default 0xffd82340 if ARCH_IOP13XX
+       default 0xfff36000 if DEBUG_HIGHBANK_UART
+       default 0xfffff700 if ARCH_IOP33X
+       depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+               DEBUG_UART_8250 || DEBUG_UART_PL01X
+
+config DEBUG_UART_VIRT
+       hex "Virtual base address of debug UART"
+       default 0xe0010fe0 if ARCH_RPC
+       default 0xf0000be0 if ARCH_EBSA110
+       default 0xf0009000 if DEBUG_CNS3XXX
+       default 0xf01fb000 if DEBUG_NOMADIK_UART
+       default 0xf0201000 if DEBUG_BCM2835
+       default 0xf11f1000 if ARCH_VERSATILE
+       default 0xf1600000 if ARCH_INTEGRATOR
+       default 0xf1c28000 if DEBUG_SUNXI_UART0
+       default 0xf1c28400 if DEBUG_SUNXI_UART1
+       default 0xf2100000 if DEBUG_PXA_UART1
+       default 0xf4090000 if ARCH_LPC32XX
+       default 0xf4200000 if ARCH_GEMINI
+       default 0xf8009000 if DEBUG_VEXPRESS_UART0_CA9
+       default 0xf8090000 if DEBUG_VEXPRESS_UART0_RS1
+       default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
+       default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
+       default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
+       default 0xfd000000 if ARCH_SPEAR13XX
+       default 0xfd012000 if ARCH_MV78XX0
+       default 0xfde12000 if ARCH_DOVE
+       default 0xfe012000 if ARCH_ORION5X
+       default 0xfe017000 if DEBUG_MMP_UART2
+       default 0xfe018000 if DEBUG_MMP_UART3
+       default 0xfe100000 if DEBUG_IMX23_UART || DEBUG_IMX28_UART
+       default 0xfe230000 if DEBUG_PICOXCELL_UART
+       default 0xfe800000 if ARCH_IOP32X
+       default 0xfeb24000 if DEBUG_RK3X_UART0
+       default 0xfeb26000 if DEBUG_RK3X_UART1
+       default 0xfeb30c00 if DEBUG_KEYSTONE_UART0
+       default 0xfeb31000 if DEBUG_KEYSTONE_UART1
+       default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
+       default 0xfed60000 if DEBUG_RK29_UART0
+       default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
+       default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
+       default 0xfec02000 if DEBUG_SOCFPGA_UART
+       default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0
+       default 0xfed0c000 if DEBUG_DAVINCI_DA8XX_UART1
+       default 0xfed0d000 if DEBUG_DAVINCI_DA8XX_UART2
+       default 0xfed12000 if ARCH_KIRKWOOD
+       default 0xfedc0000 if ARCH_EP93XX
+       default 0xfee003f8 if FOOTBRIDGE
+       default 0xfee08300 if DEBUG_DAVINCI_TNETV107X_UART1
+       default 0xfee20000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
+       default 0xfee36000 if DEBUG_HIGHBANK_UART
+       default 0xfee82340 if ARCH_IOP13XX
+       default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
+       default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
+       default 0xfefff700 if ARCH_IOP33X
+       default 0xff003000 if DEBUG_U300_UART
+       default DEBUG_UART_PHYS if !MMU
+       depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+               DEBUG_UART_8250 || DEBUG_UART_PL01X
+
+config DEBUG_UART_8250_SHIFT
+       int "Register offset shift for the 8250 debug UART"
+       depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
+       default 0 if FOOTBRIDGE || ARCH_IOP32X
+       default 2
+
+config DEBUG_UART_8250_WORD
+       bool "Use 32-bit accesses for 8250 UART"
+       depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
+       depends on DEBUG_UART_8250_SHIFT >= 2
+       default y if DEBUG_PICOXCELL_UART || DEBUG_SOCFPGA_UART || \
+               ARCH_KEYSTONE || \
+               DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
+               DEBUG_DAVINCI_DA8XX_UART2 || DEBUG_DAVINCI_TNETV107X_UART1
+
+config DEBUG_UART_8250_FLOW_CONTROL
+       bool "Enable flow control for 8250 UART"
+       depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
+       default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_GEMINI || ARCH_RPC
+
 config DEBUG_UNCOMPRESS
        bool
        depends on ARCH_MULTIPLATFORM
index ef57277fc38fa4b79262dd056a896fe6c92c77cb..376090f07231919eb1348a5b58ede1ea7f0853cd 100644 (file)
        };
 
        i2s0: i2s@03830000 {
-               compatible = "samsung,i2s-v5";
+               compatible = "samsung,s5pv210-i2s";
                reg = <0x03830000 0x100>;
                dmas = <&pdma0 10
                        &pdma0 9
                        <&clock_audss EXYNOS_I2S_BUS>,
                        <&clock_audss EXYNOS_SCLK_I2S>;
                clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
-               samsung,supports-6ch;
-               samsung,supports-rstclr;
-               samsung,supports-secdai;
                samsung,idma-addr = <0x03000000>;
                pinctrl-names = "default";
                pinctrl-0 = <&i2s0_bus>;
        };
 
        i2s1: i2s@12D60000 {
-               compatible = "samsung,i2s-v5";
+               compatible = "samsung,s3c6410-i2s";
                reg = <0x12D60000 0x100>;
                dmas = <&pdma1 12
                        &pdma1 11>;
        };
 
        i2s2: i2s@12D70000 {
-               compatible = "samsung,i2s-v5";
+               compatible = "samsung,s3c6410-i2s";
                reg = <0x12D70000 0x100>;
                dmas = <&pdma0 12
                        &pdma0 11>;
index ff7f5d8558453f7722e241bc8ba2dcef5120751c..586134e2a382250883787c0fbafd9411afda67b2 100644 (file)
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
                interrupt-map = <0x0 0 &gic 53>;
+               num-lanes = <4>;
        };
 
        pcie@2a0000 {
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
                interrupt-map = <0x0 0 &gic 56>;
+               num-lanes = <4>;
        };
 };
diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S
deleted file mode 100644 (file)
index 22c6892..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/debug-8250.S
- *
- *  Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/serial_reg.h>
-
-               .macro  senduart,rd,rx
-               strb    \rd, [\rx, #UART_TX << UART_SHIFT]
-               .endm
-
-               .macro  busyuart,rd,rx
-1002:          ldrb    \rd, [\rx, #UART_LSR << UART_SHIFT]
-               and     \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
-               teq     \rd, #UART_LSR_TEMT | UART_LSR_THRE
-               bne     1002b
-               .endm
-
-               .macro  waituart,rd,rx
-#ifdef FLOW_CONTROL
-1001:          ldrb    \rd, [\rx, #UART_MSR << UART_SHIFT]
-               tst     \rd, #UART_MSR_CTS
-               beq     1001b
-#endif
-               .endm
index 441efc491b50aa0f402bf60fbe6ecd6020456875..69b879ac0289fde3a1ce8776b7d3602078286be7 100644 (file)
@@ -65,12 +65,12 @@ struct machine_desc {
 /*
  * Current machine - only accessible during boot.
  */
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
 
 /*
  * Machine type table - also only accessible during boot
  */
-extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
 #define for_each_machine_desc(p)                       \
        for (p = __arch_info_begin; p < __arch_info_end; p++)
 
index 00ca5f92648ea56616afe8bfbd4a25ff8f0b0e27..c2f5102ae6595b19b9e3d6494395d052ec74687d 100644 (file)
@@ -4,8 +4,7 @@
 struct meminfo;
 struct machine_desc;
 
-extern void arm_memblock_init(struct meminfo *, struct machine_desc *);
-
+void arm_memblock_init(struct meminfo *, const struct machine_desc *);
 phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
 
 #endif
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644 (file)
index 0000000..8f730fe
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/arm/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+
+#define cpu_has_neon()         (!!(elf_hwcap & HWCAP_NEON))
+
+#ifdef __ARM_NEON__
+
+/*
+ * If you are affected by the BUILD_BUG below, it probably means that you are
+ * using NEON code /and/ calling the kernel_neon_begin() function from the same
+ * compilation unit. To prevent issues that may arise from GCC reordering or
+ * generating(1) NEON instructions outside of these begin/end functions, the
+ * only supported way of using NEON code in the kernel is by isolating it in a
+ * separate compilation unit, and calling it from another unit from inside a
+ * kernel_neon_begin/kernel_neon_end pair.
+ *
+ * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
+ *     -mpfu=neon is set.
+ */
+
+#define kernel_neon_begin() \
+       BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
+
+#else
+void kernel_neon_begin(void);
+#endif
+void kernel_neon_end(void);
index 04aeb02d2e116f904e1871e416f433b249ec4086..be956dbf6baea3a7481baa11c58233d0741702d1 100644 (file)
@@ -100,7 +100,7 @@ extern pgprot_t             pgprot_s2_device;
 #define PAGE_HYP               _MOD_PROT(pgprot_kernel, L_PTE_HYP)
 #define PAGE_HYP_DEVICE                _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
 #define PAGE_S2                        _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE         _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE         _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
 
 #define __PAGE_NONE            __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
 #define __PAGE_SHARED          __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
index a219227c3e43815417887e2425d2c900d856333e..4a2985e21969ab283889935c20ad44a486ed1dcd 100644 (file)
 
 #ifdef CONFIG_OF
 
-extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
+extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
 extern void arm_dt_memblock_reserve(void);
 extern void __init arm_dt_init_cpu_maps(void);
 
 #else /* CONFIG_OF */
 
-static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
 {
        return NULL;
 }
index 7604673dc4278609b4f7ba68985d8b996b49ec47..4ffb26d4cad8d9f5225b83e15b9c20c1d3ef7c00 100644 (file)
@@ -7,7 +7,10 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/hardirq.h>
 #include <asm-generic/xor.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
 
 #define __XOR(a1, a2) a1 ^= a2
 
@@ -138,4 +141,74 @@ static struct xor_block_template xor_block_arm4regs = {
                xor_speed(&xor_block_arm4regs); \
                xor_speed(&xor_block_8regs);    \
                xor_speed(&xor_block_32regs);   \
+               NEON_TEMPLATES;                 \
        } while (0)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+extern struct xor_block_template const xor_block_neon_inner;
+
+static void
+xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+       if (in_interrupt()) {
+               xor_arm4regs_2(bytes, p1, p2);
+       } else {
+               kernel_neon_begin();
+               xor_block_neon_inner.do_2(bytes, p1, p2);
+               kernel_neon_end();
+       }
+}
+
+static void
+xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+               unsigned long *p3)
+{
+       if (in_interrupt()) {
+               xor_arm4regs_3(bytes, p1, p2, p3);
+       } else {
+               kernel_neon_begin();
+               xor_block_neon_inner.do_3(bytes, p1, p2, p3);
+               kernel_neon_end();
+       }
+}
+
+static void
+xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+               unsigned long *p3, unsigned long *p4)
+{
+       if (in_interrupt()) {
+               xor_arm4regs_4(bytes, p1, p2, p3, p4);
+       } else {
+               kernel_neon_begin();
+               xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
+               kernel_neon_end();
+       }
+}
+
+static void
+xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+               unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+       if (in_interrupt()) {
+               xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
+       } else {
+               kernel_neon_begin();
+               xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
+               kernel_neon_end();
+       }
+}
+
+static struct xor_block_template xor_block_neon = {
+       .name   = "neon",
+       .do_2   = xor_neon_2,
+       .do_3   = xor_neon_3,
+       .do_4   = xor_neon_4,
+       .do_5   = xor_neon_5
+};
+
+#define NEON_TEMPLATES \
+       do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
+#else
+#define NEON_TEMPLATES
+#endif
diff --git a/arch/arm/include/debug/8250.S b/arch/arm/include/debug/8250.S
new file mode 100644 (file)
index 0000000..7a2baf9
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * arch/arm/include/debug/8250.S
+ *
+ *  Copyright (C) 1994-2013 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/serial_reg.h>
+
+               .macro  addruart, rp, rv, tmp
+               ldr     \rp, =CONFIG_DEBUG_UART_PHYS
+               ldr     \rv, =CONFIG_DEBUG_UART_VIRT
+               .endm
+
+#ifdef CONFIG_DEBUG_UART_8250_WORD
+               .macro  store, rd, rx:vararg
+               str     \rd, \rx
+               .endm
+
+               .macro  load, rd, rx:vararg
+               ldr     \rd, \rx
+               .endm
+#else
+               .macro  store, rd, rx:vararg
+               strb    \rd, \rx
+               .endm
+
+               .macro  load, rd, rx:vararg
+               ldrb    \rd, \rx
+               .endm
+#endif
+
+#define UART_SHIFT CONFIG_DEBUG_UART_8250_SHIFT
+
+               .macro  senduart,rd,rx
+               store   \rd, [\rx, #UART_TX << UART_SHIFT]
+               .endm
+
+               .macro  busyuart,rd,rx
+1002:          load    \rd, [\rx, #UART_LSR << UART_SHIFT]
+               and     \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+               teq     \rd, #UART_LSR_TEMT | UART_LSR_THRE
+               bne     1002b
+               .endm
+
+               .macro  waituart,rd,rx
+#ifdef CONFIG_DEBUG_UART_8250_FLOW_CONTROL
+1001:          load    \rd, [\rx, #UART_MSR << UART_SHIFT]
+               tst     \rd, #UART_MSR_CTS
+               beq     1001b
+#endif
+               .endm
diff --git a/arch/arm/include/debug/8250_32.S b/arch/arm/include/debug/8250_32.S
deleted file mode 100644 (file)
index 8db01ee..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Derived from arch/arm/mach-davinci/include/mach/debug-macro.S to use 32-bit
- * accesses to the 8250.
- */
-
-#include <linux/serial_reg.h>
-
-               .macro  senduart,rd,rx
-               str     \rd, [\rx, #UART_TX << UART_SHIFT]
-               .endm
-
-               .macro  busyuart,rd,rx
-1002:          ldr     \rd, [\rx, #UART_LSR << UART_SHIFT]
-               and     \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
-               teq     \rd, #UART_LSR_TEMT | UART_LSR_THRE
-               bne     1002b
-               .endm
-
-               /* The UART's don't have any flow control IO's wired up. */
-               .macro  waituart,rd,rx
-               .endm
diff --git a/arch/arm/include/debug/bcm2835.S b/arch/arm/include/debug/bcm2835.S
deleted file mode 100644 (file)
index aed9199..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Debugging macro include header
- *
- * Copyright (C) 2010 Broadcom
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#define BCM2835_DEBUG_PHYS 0x20201000
-#define BCM2835_DEBUG_VIRT 0xf0201000
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =BCM2835_DEBUG_PHYS
-       ldr     \rv, =BCM2835_DEBUG_VIRT
-       .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/cns3xxx.S b/arch/arm/include/debug/cns3xxx.S
deleted file mode 100644 (file)
index d04c150..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Debugging macro include header
- *
- * Copyright 1994-1999 Russell King
- * Copyright 2008 Cavium Networks
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- */
-
-               .macro  addruart,rp,rv,tmp
-               mov     \rp, #0x00009000
-               orr     \rv, \rp, #0xf0000000   @ virtual base
-               orr     \rp, \rp, #0x10000000
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/highbank.S b/arch/arm/include/debug/highbank.S
deleted file mode 100644 (file)
index 8cad432..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-               .macro  addruart,rp,rv,tmp
-               ldr     \rv, =0xfee36000
-               ldr     \rp, =0xfff36000
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/keystone.S b/arch/arm/include/debug/keystone.S
deleted file mode 100644 (file)
index 9aef9ba..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Early serial debug output macro for Keystone SOCs
- *
- * Copyright 2013 Texas Instruments, Inc.
- *     Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * Based on RMKs low level debug code.
- *  Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/serial_reg.h>
-
-#define UART_SHIFT 2
-#if defined(CONFIG_DEBUG_KEYSTONE_UART0)
-#define UART_PHYS              0x02530c00
-#define UART_VIRT              0xfeb30c00
-#elif defined(CONFIG_DEBUG_KEYSTONE_UART1)
-#define UART_PHYS              0x02531000
-#define UART_VIRT              0xfeb31000
-#endif
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rv, =UART_VIRT                 @ physical base address
-       ldr     \rp, =UART_PHYS                 @ virtual base address
-       .endm
-
-       .macro  senduart,rd,rx
-       str     \rd, [\rx, #UART_TX << UART_SHIFT]
-       .endm
-
-       .macro  busyuart,rd,rx
-1002:  ldr     \rd, [\rx, #UART_LSR << UART_SHIFT]
-       and     \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
-       teq     \rd, #UART_LSR_TEMT | UART_LSR_THRE
-       bne     1002b
-       .endm
-
-       .macro  waituart,rd,rx
-       .endm
diff --git a/arch/arm/include/debug/mvebu.S b/arch/arm/include/debug/mvebu.S
deleted file mode 100644 (file)
index 6517311..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Early serial output macro for Marvell  SoC
- *
- * Copyright (C) 2012 Marvell
- *
- * Lior Amsalem <alior@marvell.com>
- * Gregory Clement <gregory.clement@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifdef CONFIG_DEBUG_MVEBU_UART_ALTERNATE
-#define ARMADA_370_XP_REGS_PHYS_BASE   0xf1000000
-#else
-#define ARMADA_370_XP_REGS_PHYS_BASE   0xd0000000
-#endif
-
-#define ARMADA_370_XP_REGS_VIRT_BASE   0xfec00000
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =ARMADA_370_XP_REGS_PHYS_BASE
-       ldr     \rv, =ARMADA_370_XP_REGS_VIRT_BASE
-       orr     \rp, \rp, #0x00012000
-       orr     \rv, \rv, #0x00012000
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/mxs.S b/arch/arm/include/debug/mxs.S
deleted file mode 100644 (file)
index d869515..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/* arch/arm/mach-mxs/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifdef CONFIG_DEBUG_IMX23_UART
-#define UART_PADDR     0x80070000
-#elif defined (CONFIG_DEBUG_IMX28_UART)
-#define UART_PADDR     0x80074000
-#endif
-
-#define UART_VADDR     0xfe100000
-
-               .macro  addruart, rp, rv, tmp
-               ldr     \rp, =UART_PADDR        @ physical
-               ldr     \rv, =UART_VADDR        @ virtual
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/nomadik.S b/arch/arm/include/debug/nomadik.S
deleted file mode 100644 (file)
index 7354179..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #0x00100000
-               add     \rp, \rp, #0x000fb000
-               add     \rv, \rp, #0xf0000000   @ virtual base
-               add     \rp, \rp, #0x10000000   @ physical base address
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/nspire.S b/arch/arm/include/debug/nspire.S
deleted file mode 100644 (file)
index 886fd27..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- *     linux/arch/arm/include/debug/nspire.S
- *
- *     Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- */
-
-#define NSPIRE_EARLY_UART_PHYS_BASE       0x90020000
-#define NSPIRE_EARLY_UART_VIRT_BASE       0xfee20000
-
-.macro addruart, rp, rv, tmp
-       ldr \rp, =(NSPIRE_EARLY_UART_PHYS_BASE)         @ physical base address
-       ldr \rv, =(NSPIRE_EARLY_UART_VIRT_BASE)         @ virtual base address
-.endm
-
-
-#ifdef CONFIG_DEBUG_NSPIRE_CX_UART
-#include <asm/hardware/debug-pl01x.S>
-#endif
-
-#ifdef CONFIG_DEBUG_NSPIRE_CLASSIC_UART
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
-#endif
diff --git a/arch/arm/include/debug/picoxcell.S b/arch/arm/include/debug/picoxcell.S
deleted file mode 100644 (file)
index bc1f07c..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#define UART_SHIFT 2
-#define PICOXCELL_UART1_BASE           0x80230000
-#define PHYS_TO_IO(x)                  (((x) & 0x00ffffff) | 0xfe000000)
-
-               .macro  addruart, rp, rv, tmp
-               ldr     \rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE)
-               ldr     \rp, =PICOXCELL_UART1_BASE
-               .endm
-
-#include "8250_32.S"
similarity index 78%
rename from arch/arm/include/asm/hardware/debug-pl01x.S
rename to arch/arm/include/debug/pl01x.S
index f9fd083eff630dd29b0710d603c11447919b9d54..37c6895b87e6d72ce5837e57a4bae0a367339cb2 100644 (file)
@@ -1,4 +1,4 @@
-/* arch/arm/include/asm/hardware/debug-pl01x.S
+/* arch/arm/include/debug/pl01x.S
  *
  * Debugging macro include header
  *
 */
 #include <linux/amba/serial.h>
 
+#ifdef CONFIG_DEBUG_UART_PHYS
+               .macro  addruart, rp, rv, tmp
+               ldr     \rp, =CONFIG_DEBUG_UART_PHYS
+               ldr     \rv, =CONFIG_DEBUG_UART_VIRT
+               .endm
+#endif
+
                .macro  senduart,rd,rx
                strb    \rd, [\rx, #UART01x_DR]
                .endm
diff --git a/arch/arm/include/debug/pxa.S b/arch/arm/include/debug/pxa.S
deleted file mode 100644 (file)
index e1e795a..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Early serial output macro for Marvell PXA/MMP SoC
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * Copyright (C) 2013 Haojian Zhuang
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#if defined(CONFIG_DEBUG_PXA_UART1)
-#define PXA_UART_REG_PHYS_BASE 0x40100000
-#define PXA_UART_REG_VIRT_BASE 0xf2100000
-#elif defined(CONFIG_DEBUG_MMP_UART2)
-#define PXA_UART_REG_PHYS_BASE 0xd4017000
-#define PXA_UART_REG_VIRT_BASE 0xfe017000
-#elif defined(CONFIG_DEBUG_MMP_UART3)
-#define PXA_UART_REG_PHYS_BASE 0xd4018000
-#define PXA_UART_REG_VIRT_BASE 0xfe018000
-#else
-#error "Select uart for DEBUG_LL"
-#endif
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =PXA_UART_REG_PHYS_BASE
-       ldr     \rv, =PXA_UART_REG_VIRT_BASE
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/rockchip.S b/arch/arm/include/debug/rockchip.S
deleted file mode 100644 (file)
index cfd883e..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Early serial output macro for Rockchip SoCs
- *
- * Copyright (C) 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#if defined(CONFIG_DEBUG_RK29_UART0)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20060000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed60000
-#elif defined(CONFIG_DEBUG_RK29_UART1)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20064000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed64000
-#elif defined(CONFIG_DEBUG_RK29_UART2)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20068000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed68000
-#elif defined(CONFIG_DEBUG_RK3X_UART0)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x10124000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfeb24000
-#elif defined(CONFIG_DEBUG_RK3X_UART1)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x10126000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfeb26000
-#elif defined(CONFIG_DEBUG_RK3X_UART2)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20064000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed64000
-#elif defined(CONFIG_DEBUG_RK3X_UART3)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20068000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed68000
-#endif
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =ROCKCHIP_UART_DEBUG_PHYS_BASE
-       ldr     \rv, =ROCKCHIP_UART_DEBUG_VIRT_BASE
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/socfpga.S b/arch/arm/include/debug/socfpga.S
deleted file mode 100644 (file)
index 966b2f9..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define UART_SHIFT 2
-#define DEBUG_LL_UART_OFFSET   0x00002000
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #DEBUG_LL_UART_OFFSET
-               orr     \rp, \rp, #0x00c00000
-               orr     \rv, \rp, #0xfe000000   @ virtual base
-               orr     \rp, \rp, #0xff000000   @ physical base
-               .endm
-
-#include "8250_32.S"
-
diff --git a/arch/arm/include/debug/sunxi.S b/arch/arm/include/debug/sunxi.S
deleted file mode 100644 (file)
index 04eb56d..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Early serial output macro for Allwinner A1X SoCs
- *
- * Copyright (C) 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#if defined(CONFIG_DEBUG_SUNXI_UART0)
-#define SUNXI_UART_DEBUG_PHYS_BASE 0x01c28000
-#define SUNXI_UART_DEBUG_VIRT_BASE 0xf1c28000
-#elif defined(CONFIG_DEBUG_SUNXI_UART1)
-#define SUNXI_UART_DEBUG_PHYS_BASE 0x01c28400
-#define SUNXI_UART_DEBUG_VIRT_BASE 0xf1c28400
-#endif
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =SUNXI_UART_DEBUG_PHYS_BASE
-       ldr     \rv, =SUNXI_UART_DEBUG_VIRT_BASE
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/u300.S b/arch/arm/include/debug/u300.S
deleted file mode 100644 (file)
index 6f04f08..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2006-2013 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * Debugging macro include header.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#define U300_SLOW_PER_PHYS_BASE                0xc0010000
-#define U300_SLOW_PER_VIRT_BASE                0xff000000
-
-       .macro  addruart, rp, rv, tmp
-       /* If we move the address using MMU, use this. */
-       ldr     \rp,      = U300_SLOW_PER_PHYS_BASE @ MMU off, physical address
-       ldr     \rv,      = U300_SLOW_PER_VIRT_BASE @ MMU on, virtual address
-       orr     \rp, \rp, #0x00003000
-       orr     \rv, \rv, #0x00003000
-       .endm
-
-#include <asm/hardware/debug-pl01x.S>
index fbd24beeb1fad70886ea044387c873a65ef455e2..aa7f63a8b5e03a2ed550044e459f51e34fc611cc 100644 (file)
@@ -45,4 +45,4 @@
        ldr     \rv, =UART_VIRT_BASE            @ yes, virtual address
        .endm
 
-#include <asm/hardware/debug-pl01x.S>
+#include <debug/pl01x.S>
index acafb229e2b691d4a4a467a3f0a17aba37c21a5d..524acd5a223e618940f0e04785a70b5bf64084e8 100644 (file)
 
                .endm
 
-#include <asm/hardware/debug-pl01x.S>
-
-#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CA9)
-
-               .macro  addruart,rp,rv,tmp
-               mov     \rp, #DEBUG_LL_UART_OFFSET
-               orr     \rv, \rp, #DEBUG_LL_VIRT_BASE
-               orr     \rp, \rp, #DEBUG_LL_PHYS_BASE
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
-
-#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_RS1)
-
-               .macro  addruart,rp,rv,tmp
-               mov     \rp, #DEBUG_LL_UART_OFFSET_RS1
-               orr     \rv, \rp, #DEBUG_LL_VIRT_BASE
-               orr     \rp, \rp, #DEBUG_LL_PHYS_BASE_RS1
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
-
-#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CRX)
-
-               .macro  addruart,rp,tmp,tmp2
-               ldr     \rp, =DEBUG_LL_UART_PHYS_CRX
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
-
-#else /* CONFIG_DEBUG_LL_UART_NONE */
-
-               .macro  addruart, rp, rv, tmp
-               /* Safe dummy values */
-               mov     \rp, #0
-               mov     \rv, #DEBUG_LL_VIRT_BASE
-               .endm
-
-               .macro  senduart,rd,rx
-               .endm
-
-               .macro  waituart,rd,rx
-               .endm
-
-               .macro  busyuart,rd,rx
-               .endm
-
+#include <debug/pl01x.S>
 #endif
index 9edc9692332d1a368a293f38e0a9543c2102a751..ec4164da6e3018737c42022bef16b3ed6bcea350 100644 (file)
@@ -7,9 +7,10 @@ static inline void save_atags(struct tag *tags) { }
 void convert_to_tag_list(struct tag *tags);
 
 #ifdef CONFIG_ATAGS
-struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr);
+const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
+       unsigned int machine_nr);
 #else
-static inline struct machine_desc *
+static inline const struct machine_desc *
 setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
 {
        early_print("no ATAGS support: can't continue\n");
index 14512e6931d87957b718445dd25b31283315a222..8c14de8180c0af46fea1531261a7a276177a91a8 100644 (file)
@@ -178,11 +178,11 @@ static void __init squash_mem_tags(struct tag *tag)
                        tag->hdr.tag = ATAG_NONE;
 }
 
-struct machine_desc * __init setup_machine_tags(phys_addr_t __atags_pointer,
-                                               unsigned int machine_nr)
+const struct machine_desc * __init
+setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
 {
        struct tag *tags = (struct tag *)&default_tags;
-       struct machine_desc *mdesc = NULL, *p;
+       const struct machine_desc *mdesc = NULL, *p;
        char *from = default_command_line;
 
        default_tags.mem.start = PHYS_OFFSET;
index 5859c8bc727c4254bc7e8fa254a4271d6b214242..eae1976f859dd6ffee3da4385163c4c5ddb21cc2 100644 (file)
@@ -176,10 +176,10 @@ void __init arm_dt_init_cpu_maps(void)
  * If a dtb was passed to the kernel in r2, then use it to choose the
  * correct machine_desc and to setup the system.
  */
-struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
+const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
 {
        struct boot_param_header *devtree;
-       struct machine_desc *mdesc, *mdesc_best = NULL;
+       const struct machine_desc *mdesc, *mdesc_best = NULL;
        unsigned int score, mdesc_score = ~1;
        unsigned long dt_root;
        const char *model;
@@ -188,7 +188,7 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
        DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
        MACHINE_END
 
-       mdesc_best = (struct machine_desc *)&__mach_desc_GENERIC_DT;
+       mdesc_best = &__mach_desc_GENERIC_DT;
 #endif
 
        if (!dt_phys)
index fc7920288a3d90a3f9c3ca38be03ff845f84515a..918875d96d5dc598985c7dce050e0a1785637b49 100644 (file)
@@ -89,7 +89,8 @@ void set_fiq_handler(void *start, unsigned int length)
 
        memcpy(base + offset, start, length);
        if (!cache_is_vipt_nonaliasing())
-               flush_icache_range(base + offset, offset + length);
+               flush_icache_range((unsigned long)base + offset, offset +
+                                  length);
        flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
 }
 
index d7c82df692436df0248fa1a00502cf74b7fca23c..57221e349a7ce0eec03445cd56de2845e5d23358 100644 (file)
@@ -82,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
        crash_save_cpu(&regs, smp_processor_id());
        flush_cache_all();
 
+       set_cpu_online(smp_processor_id(), false);
        atomic_dec(&waiting_for_crash_ipi);
        while (1)
                cpu_relax();
index aebe0e99c153eaa6889efc7b36459ebdfaeb65cc..8d6147b2001f82eca02c738265f9477dc2dbaefa 100644 (file)
@@ -118,7 +118,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
                        continue;
                }
 
-               err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
+               err = request_irq(irq, handler,
+                                 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
                                  cpu_pmu);
                if (err) {
                        pr_err("unable to request IRQ%d for ARM PMU counters\n",
index afc2489ee13bc098523f92549baa4a69728c1ced..7b31e63da26d7782f9fb3b84cada0f89386c36a5 100644 (file)
@@ -72,10 +72,10 @@ static int __init fpe_setup(char *line)
 __setup("fpe=", fpe_setup);
 #endif
 
-extern void paging_init(struct machine_desc *desc);
+extern void paging_init(const struct machine_desc *desc);
 extern void sanity_check_meminfo(void);
 extern enum reboot_mode reboot_mode;
-extern void setup_dma_zone(struct machine_desc *desc);
+extern void setup_dma_zone(const struct machine_desc *desc);
 
 unsigned int processor_id;
 EXPORT_SYMBOL(processor_id);
@@ -139,7 +139,7 @@ EXPORT_SYMBOL(elf_platform);
 static const char *cpu_name;
 static const char *machine_name;
 static char __initdata cmd_line[COMMAND_LINE_SIZE];
-struct machine_desc *machine_desc __initdata;
+const struct machine_desc *machine_desc __initdata;
 
 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
 #define ENDIANNESS ((char)endian_test.l)
@@ -607,7 +607,7 @@ static void __init setup_processor(void)
 
 void __init dump_machine_table(void)
 {
-       struct machine_desc *p;
+       const struct machine_desc *p;
 
        early_print("Available machine support:\n\nID (hex)\tNAME\n");
        for_each_machine_desc(p)
@@ -694,7 +694,7 @@ static int __init early_mem(char *p)
 }
 early_param("mem", early_mem);
 
-static void __init request_standard_resources(struct machine_desc *mdesc)
+static void __init request_standard_resources(const struct machine_desc *mdesc)
 {
        struct memblock_region *region;
        struct resource *res;
@@ -852,7 +852,7 @@ void __init hyp_mode_check(void)
 
 void __init setup_arch(char **cmdline_p)
 {
-       struct machine_desc *mdesc;
+       const struct machine_desc *mdesc;
 
        setup_processor();
        mdesc = setup_machine_fdt(__atags_pointer);
index ca6bea4859b48c35e9c34d970fb02580840e23b4..9583c95adbb771fbf85d0f46d08e64aaf9cc8e42 100644 (file)
@@ -495,7 +495,6 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 
        for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
                pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
-               kvm_set_s2pte_writable(&pte);
 
                ret = mmu_topup_memory_cache(&cache, 2, 2);
                if (ret)
index af72969820b4951448c9d95135383ae9d8387cde..aaf3a87311360d5c9db1531db9c3436af26237cd 100644 (file)
@@ -45,3 +45,9 @@ lib-$(CONFIG_ARCH_SHARK)      += io-shark.o
 
 $(obj)/csumpartialcopy.o:      $(obj)/csumpartialcopygeneric.S
 $(obj)/csumpartialcopyuser.o:  $(obj)/csumpartialcopygeneric.S
+
+ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
+  NEON_FLAGS                   := -mfloat-abi=softfp -mfpu=neon
+  CFLAGS_xor-neon.o            += $(NEON_FLAGS)
+  lib-$(CONFIG_XOR_BLOCKS)     += xor-neon.o
+endif
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
new file mode 100644 (file)
index 0000000..f485e5a
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * linux/arch/arm/lib/xor-neon.c
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/raid/xor.h>
+
+#ifndef __ARM_NEON__
+#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
+#endif
+
+/*
+ * Pull in the reference implementations while instructing GCC (through
+ * -ftree-vectorize) to attempt to exploit implicit parallelism and emit
+ * NEON instructions.
+ */
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC optimize "tree-vectorize"
+#else
+/*
+ * While older versions of GCC do not generate incorrect code, they fail to
+ * recognize the parallel nature of these functions, and emit plain ARM code,
+ * which is known to be slower than the optimized ARM code in asm-arm/xor.h.
+ */
+#warning This code requires at least version 4.6 of GCC
+#endif
+
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#include <asm-generic/xor.h>
+
+struct xor_block_template const xor_block_neon_inner = {
+       .name   = "__inner_neon__",
+       .do_2   = xor_8regs_2,
+       .do_3   = xor_8regs_3,
+       .do_4   = xor_8regs_4,
+       .do_5   = xor_8regs_5,
+};
index 629ea5fc95cf74e2cf72a145d3ea80c4d98aa4b3..b2a34740146aaab4d3af99b741979c670be6402e 100644 (file)
@@ -465,7 +465,7 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
 
 #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
 static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
 
 static struct resource lcdc_resources[] = {
        [0] = {
@@ -498,7 +498,7 @@ static struct platform_device at91_lcdc_device = {
        .num_resources  = ARRAY_SIZE(lcdc_resources),
 };
 
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
 {
        if (!data) {
                return;
@@ -559,7 +559,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
        platform_device_register(&at91_lcdc_device);
 }
 #else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
 #endif
 
 
index 858c8aac2daf06328ed99de3b56987647f81b33d..4aeadddbc18108918b883150bef49b3a770eeb42 100644 (file)
@@ -832,7 +832,7 @@ void __init at91_add_device_can(struct at91_can_data *data) {}
 
 #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
 static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
 
 static struct resource lcdc_resources[] = {
        [0] = {
@@ -859,7 +859,7 @@ static struct platform_device at91_lcdc_device = {
        .num_resources  = ARRAY_SIZE(lcdc_resources),
 };
 
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
 {
        if (!data)
                return;
@@ -891,7 +891,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
        platform_device_register(&at91_lcdc_device);
 }
 #else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
 #endif
 
 
index acb703e13331e2a9931bb321d7c1f9f71d550f11..cb36fa872d305d6f22b9133d48d3bdaaa678789d 100644 (file)
@@ -965,7 +965,7 @@ void __init at91_add_device_isi(struct isi_platform_data *data,
 
 #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
 static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
 
 static struct resource lcdc_resources[] = {
        [0] = {
@@ -991,7 +991,7 @@ static struct platform_device at91_lcdc_device = {
        .num_resources  = ARRAY_SIZE(lcdc_resources),
 };
 
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
 {
        if (!data)
                return;
@@ -1037,7 +1037,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
        platform_device_register(&at91_lcdc_device);
 }
 #else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
 #endif
 
 
index 352468f265a9616bb411fecc503a13a933e926b2..a698bdab2cce682fee2983e0942d5bc2126139e4 100644 (file)
@@ -498,7 +498,7 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
 
 #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
 static u64 lcdc_dmamask = DMA_BIT_MASK(32);
-static struct atmel_lcdfb_info lcdc_data;
+static struct atmel_lcdfb_pdata lcdc_data;
 
 static struct resource lcdc_resources[] = {
        [0] = {
@@ -525,7 +525,7 @@ static struct platform_device at91_lcdc_device = {
        .num_resources  = ARRAY_SIZE(lcdc_resources),
 };
 
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data)
 {
        if (!data) {
                return;
@@ -557,7 +557,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
        platform_device_register(&at91_lcdc_device);
 }
 #else
-void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
+void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {}
 #endif
 
 
index d3437624ca4e213092132bd6a16195e8668a9d7e..473546b9408bf087a6b1a38950a9cf2157b3497f 100644 (file)
@@ -389,7 +389,7 @@ static struct fb_monspecs at91fb_default_stn_monspecs = {
                                        | ATMEL_LCDC_IFWIDTH_4 \
                                        | ATMEL_LCDC_SCANMOD_SINGLE)
 
-static void at91_lcdc_stn_power_control(int on)
+static void at91_lcdc_stn_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
        /* backlight */
        if (on) {       /* power up */
@@ -401,7 +401,7 @@ static void at91_lcdc_stn_power_control(int on)
        }
 }
 
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
        .default_bpp                    = 1,
        .default_dmacon                 = ATMEL_LCDC_DMAEN,
        .default_lcdcon2                = AT91SAM9261_DEFAULT_STN_LCDCON2,
@@ -445,7 +445,7 @@ static struct fb_monspecs at91fb_default_tft_monspecs = {
                                        | ATMEL_LCDC_DISTYPE_TFT    \
                                        | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
 
-static void at91_lcdc_tft_power_control(int on)
+static void at91_lcdc_tft_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
        if (on)
                at91_set_gpio_value(AT91_PIN_PA12, 0);  /* power up */
@@ -453,7 +453,7 @@ static void at91_lcdc_tft_power_control(int on)
                at91_set_gpio_value(AT91_PIN_PA12, 1);  /* power down */
 }
 
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
        .lcdcon_is_backlight            = true,
        .default_bpp                    = 16,
        .default_dmacon                 = ATMEL_LCDC_DMAEN,
@@ -465,7 +465,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
 #endif
 
 #else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
 #endif
 
 
index 3284df05df14be82b8bf7acce98ef090ccf09b56..8b4942cbb6d9a62ffdb1f88e9676670817b7a107 100644 (file)
@@ -275,13 +275,13 @@ static struct fb_monspecs at91fb_default_monspecs = {
                                        | ATMEL_LCDC_DISTYPE_TFT \
                                        | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
 
-static void at91_lcdc_power_control(int on)
+static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
        at91_set_gpio_value(AT91_PIN_PA30, on);
 }
 
 /* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
        .lcdcon_is_backlight            = true,
        .default_bpp                    = 16,
        .default_dmacon                 = ATMEL_LCDC_DMAEN,
@@ -292,7 +292,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
 };
 
 #else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
 #endif
 
 
index 2a94896a1375029b8eb34e030981480ad35acd10..ef39078c8ce214973352a22055421d7404cb4952 100644 (file)
@@ -284,7 +284,7 @@ static struct fb_monspecs at91fb_default_monspecs = {
                                        | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
 
 /* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
        .lcdcon_is_backlight            = true,
        .default_bpp                    = 32,
        .default_dmacon                 = ATMEL_LCDC_DMAEN,
@@ -295,7 +295,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
 };
 
 #else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
 #endif
 
 
index aa265dcf212875651da778e9213177b7e41803de..604eecf6cd70d03b57c34e6a2736f9feceb5be98 100644 (file)
@@ -170,7 +170,7 @@ static struct fb_monspecs at91fb_default_monspecs = {
                                        | ATMEL_LCDC_DISTYPE_TFT \
                                        | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE)
 
-static void at91_lcdc_power_control(int on)
+static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
        if (on)
                at91_set_gpio_value(AT91_PIN_PC1, 0);   /* power up */
@@ -179,7 +179,7 @@ static void at91_lcdc_power_control(int on)
 }
 
 /* Driver datas */
-static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = {
        .lcdcon_is_backlight            = true,
        .default_bpp                    = 16,
        .default_dmacon                 = ATMEL_LCDC_DMAEN,
@@ -191,7 +191,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
 };
 
 #else
-static struct atmel_lcdfb_info __initdata ek_lcdc_data;
+static struct atmel_lcdfb_pdata __initdata ek_lcdc_data;
 #endif
 
 
index 4a234fb2ab3b80d73dbdfa6bafc1cf7a08253287..6c08b341167d308df456eb1a0e79b4f10b5a9e00 100644 (file)
@@ -107,8 +107,8 @@ extern void __init at91_add_device_pwm(u32 mask);
 extern void __init at91_add_device_ssc(unsigned id, unsigned pins);
 
  /* LCD Controller */
-struct atmel_lcdfb_info;
-extern void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data);
+struct atmel_lcdfb_pdata;
+extern void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data);
 
  /* AC97 */
 extern void __init at91_add_device_ac97(struct ac97c_platform_data *data);
index bea6793a7ede41cb04822303956d28347037460b..9f09f45835f822fe1a730d5992edda88fd7ce9d6 100644 (file)
@@ -1249,12 +1249,10 @@ static struct vpif_capture_config da850_vpif_capture_config = {
 
 static struct adv7343_platform_data adv7343_pdata = {
        .mode_config = {
-               .dac_3 = 1,
-               .dac_2 = 1,
-               .dac_1 = 1,
+               .dac = { 1, 1, 1 },
        },
        .sd_config = {
-               .sd_dac_out1 = 1,
+               .sd_dac_out = { 1 },
        },
 };
 
index 36aef3a7dedb074c5e036ee5f7af8ccec5053bcf..f1ac1c94ac0f363123842f4e9077c2bd58e645df 100644 (file)
@@ -65,7 +65,7 @@ static struct cpuidle_driver davinci_idle_driver = {
        .states[1]              = {
                .enter                  = davinci_enter_idle,
                .exit_latency           = 10,
-               .target_residency       = 100000,
+               .target_residency       = 10000,
                .flags                  = CPUIDLE_FLAG_TIME_VALID,
                .name                   = "DDR SR",
                .desc                   = "WFI and DDR Self Refresh",
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S
deleted file mode 100644 (file)
index b18b8eb..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Debugging macro for DaVinci
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/* Modifications
- * Jan 2009    Chaithrika U S  Added senduart, busyuart, waituart
- *                             macros, based on debug-8250.S file
- *                             but using 32-bit accesses required for
- *                              some davinci devices.
- */
-
-#include <linux/serial_reg.h>
-
-#include <mach/serial.h>
-
-#define UART_SHIFT     2
-
-#if defined(CONFIG_DEBUG_DAVINCI_DMx_UART0)
-#define UART_BASE      DAVINCI_UART0_BASE
-#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART1)
-#define UART_BASE      DA8XX_UART1_BASE
-#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART2)
-#define UART_BASE      DA8XX_UART2_BASE
-#elif defined(CONFIG_DEBUG_DAVINCI_TNETV107X_UART1)
-#define UART_BASE      TNETV107X_UART2_BASE
-#define UART_VIRTBASE  TNETV107X_UART2_VIRT
-#else
-#error "Select a specifc port for DEBUG_LL"
-#endif
-
-#ifndef UART_VIRTBASE
-#define UART_VIRTBASE  IO_ADDRESS(UART_BASE)
-#endif
-
-               .macro addruart, rp, rv, tmp
-               ldr     \rp, =UART_BASE
-               ldr     \rv, =UART_VIRTBASE
-               .endm
-
-               .macro  senduart,rd,rx
-               str     \rd, [\rx, #UART_TX << UART_SHIFT]
-               .endm
-
-               .macro  busyuart,rd,rx
-1002:          ldr     \rd, [\rx, #UART_LSR << UART_SHIFT]
-               and     \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
-               teq     \rd, #UART_LSR_TEMT | UART_LSR_THRE
-               bne     1002b
-               .endm
-
-               .macro  waituart,rd,rx
-#ifdef FLOW_CONTROL
-1001:          ldr     \rd, [\rx, #UART_MSR << UART_SHIFT]
-               tst     \rd, #UART_MSR_CTS
-               beq     1001b
-#endif
-               .endm
-
index 00247c7713135a1f403eeaa9a0ea196895efaea3..304f069ebf5001dd31a791a6a7377fb437c97cd7 100644 (file)
@@ -108,8 +108,8 @@ static void __init dove_clk_init(void)
        orion_clkdev_add(NULL, "sdhci-dove.1", sdio1);
        orion_clkdev_add(NULL, "orion_nand", nand);
        orion_clkdev_add(NULL, "cafe1000-ccic.0", camera);
-       orion_clkdev_add(NULL, "kirkwood-i2s.0", i2s0);
-       orion_clkdev_add(NULL, "kirkwood-i2s.1", i2s1);
+       orion_clkdev_add(NULL, "mvebu-audio.0", i2s0);
+       orion_clkdev_add(NULL, "mvebu-audio.1", i2s1);
        orion_clkdev_add(NULL, "mv_crypto", crypto);
        orion_clkdev_add(NULL, "dove-ac97", ac97);
        orion_clkdev_add(NULL, "dove-pdma", pdma);
diff --git a/arch/arm/mach-dove/include/mach/debug-macro.S b/arch/arm/mach-dove/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 5929cbc..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-dove/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/bridge-regs.h>
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =DOVE_SB_REGS_PHYS_BASE
-       ldr     \rv, =DOVE_SB_REGS_VIRT_BASE
-       orr     \rp, \rp, #0x00012000
-       orr     \rv, \rv, #0x00012000
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-ebsa110/include/mach/debug-macro.S b/arch/arm/mach-ebsa110/include/mach/debug-macro.S
deleted file mode 100644 (file)
index bb02c05..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* arch/arm/mach-ebsa110/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-**/
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #0xf0000000
-               orr     \rp, \rp, #0x00000be0
-               mov     \rp, \rv
-               .endm
-
-#define UART_SHIFT     2
-#define FLOW_CONTROL
-#include <asm/hardware/debug-8250.S>
index fe3c1fa5462b72d0d44c5700ade01a816a696542..93e54fd4e3d55900192f62e573d6d97a2919aa4b 100644 (file)
@@ -194,20 +194,6 @@ config MACH_VISION_EP9307
          Say 'Y' here if you want your kernel to support the
          Vision Engraving Systems EP9307 SoM.
 
-choice
-       prompt "Select a UART for early kernel messages"
-
-config EP93XX_EARLY_UART1
-       bool "UART1"
-
-config EP93XX_EARLY_UART2
-       bool "UART2"
-
-config EP93XX_EARLY_UART3
-       bool "UART3"
-
-endchoice
-
 endmenu
 
 endif
diff --git a/arch/arm/mach-ep93xx/include/mach/debug-macro.S b/arch/arm/mach-ep93xx/include/mach/debug-macro.S
deleted file mode 100644 (file)
index af54e43..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/debug-macro.S
- * Debugging macro include header
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-#include <mach/ep93xx-regs.h>
-
-               .macro  addruart, rp, rv, tmp
-               ldr     \rp, =EP93XX_APB_PHYS_BASE      @ Physical base
-               ldr     \rv, =EP93XX_APB_VIRT_BASE      @ virtual base
-               orr     \rp, \rp, #0x000c0000
-               orr     \rv, \rv, #0x000c0000
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
index b5cc77d2380bd59c24c2da80a9995d45c2277f85..03c42e5400d2e70e75b7986b04d4a2eb8f387cab 100644 (file)
@@ -31,18 +31,8 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
        *((volatile unsigned int *)ptr) = value;
 }
 
-#if defined(CONFIG_EP93XX_EARLY_UART1)
-#define UART_BASE              EP93XX_UART1_PHYS_BASE
-#elif defined(CONFIG_EP93XX_EARLY_UART2)
-#define UART_BASE              EP93XX_UART2_PHYS_BASE
-#elif defined(CONFIG_EP93XX_EARLY_UART3)
-#define UART_BASE              EP93XX_UART3_PHYS_BASE
-#else
-#define UART_BASE              EP93XX_UART1_PHYS_BASE
-#endif
-
-#define PHYS_UART_DATA         (UART_BASE + 0x00)
-#define PHYS_UART_FLAG         (UART_BASE + 0x18)
+#define PHYS_UART_DATA         (CONFIG_DEBUG_UART_PHYS + 0x00)
+#define PHYS_UART_FLAG         (CONFIG_DEBUG_UART_PHYS + 0x18)
 #define UART_FLAG_TXFF         0x20
 
 static inline void putc(int c)
index c169f0c99b2a323ad2f88ff0f9d20fac4579a064..02247f313e944237151e9fe656d68b4f8114ea7e 100644 (file)
 
 #include <asm/hardware/dec21285.h>
 
-#ifndef CONFIG_DEBUG_DC21285_PORT
-       /* For NetWinder debugging */
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #0x000003f8
-               orr     \rv, \rp, #0xfe000000   @ virtual
-               orr     \rv, \rv, #0x00e00000   @ virtual
-               orr     \rp, \rp, #0x7c000000   @ physical
-               .endm
-
-#define UART_SHIFT     0
-#define FLOW_CONTROL
-#include <asm/hardware/debug-8250.S>
-
-#else
 #include <mach/hardware.h>
        /* For EBSA285 debugging */
                .equ    dc21285_high, ARMCSR_BASE & 0xff000000
@@ -54,4 +40,3 @@
 
                .macro  waituart,rd,rx
                .endm
-#endif
diff --git a/arch/arm/mach-gemini/include/mach/debug-macro.S b/arch/arm/mach-gemini/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 8376707..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Copyright (C) 2001-2006 Storlink, Corp.
- *  Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <mach/hardware.h>
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =GEMINI_UART_BASE                  @ physical
-       ldr     \rv, =IO_ADDRESS(GEMINI_UART_BASE)      @ virtual
-       .endm
-
-#define UART_SHIFT     2
-#define FLOW_CONTROL
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-integrator/include/mach/debug-macro.S b/arch/arm/mach-integrator/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 411b116..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-integrator/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #0x16000000        @ physical base address
-               mov     \rv, #0xf0000000        @ virtual base
-               add     \rv, \rv, #0x16000000 >> 4
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-iop13xx/include/mach/debug-macro.S b/arch/arm/mach-iop13xx/include/mach/debug-macro.S
deleted file mode 100644 (file)
index d869a6f..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * arch/arm/mach-iop13xx/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-       .macro  addruart, rp, rv, tmp
-       mov     \rp, #0x00002300
-       orr     \rp, \rp, #0x00000040
-       orr     \rv, \rp, #0xfe000000   @ virtual
-       orr     \rv, \rv, #0x00e80000
-       orr     \rp, \rp, #0xff000000   @ physical
-       orr     \rp, \rp, #0x00d80000
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-iop32x/include/mach/debug-macro.S b/arch/arm/mach-iop32x/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 363bdf9..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-iop32x/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #0xfe000000        @ physical as well as virtual
-               orr     \rp, \rp, #0x00800000   @ location of the UART
-               mov     \rv, \rp
-               .endm
-
-#define UART_SHIFT     0
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-iop33x/include/mach/debug-macro.S b/arch/arm/mach-iop33x/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 361be1f..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * arch/arm/mach-iop33x/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #0x00ff0000
-               orr     \rp, \rp, #0x0000f700
-               orr     \rv, #0xfe000000        @ virtual
-               orr     \rp, #0xff000000        @ physical
-               .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-ixp4xx/include/mach/debug-macro.S b/arch/arm/mach-ixp4xx/include/mach/debug-macro.S
deleted file mode 100644 (file)
index ff686cb..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/* arch/arm/mach-ixp4xx/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-                .macro  addruart, rp, rv, tmp
-#ifdef __ARMEB__
-                mov     \rp, #3         @ Uart regs are at off set of 3 if
-                                       @ byte writes used - Big Endian.
-#else
-               mov     \rp, #0
-#endif
-               orr     \rv, \rp, #0xfe000000   @ virtual
-               orr     \rv, \rv, #0x00f00000
-                orr     \rp, \rp, #0xc8000000  @ physical
-                .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
index e9238b5567eeba8e1ac2df9402d53859b27b7879..1663de090984535dfac2f6dcae4647644b563704 100644 (file)
@@ -264,7 +264,7 @@ void __init kirkwood_clk_init(void)
        orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
        orion_clkdev_add("0", "pcie", pex0);
        orion_clkdev_add("1", "pcie", pex1);
-       orion_clkdev_add(NULL, "kirkwood-i2s", audio);
+       orion_clkdev_add(NULL, "mvebu-audio", audio);
        orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", runit);
        orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".1", runit);
 
@@ -560,7 +560,7 @@ void __init kirkwood_timer_init(void)
 /*****************************************************************************
  * Audio
  ****************************************************************************/
-static struct resource kirkwood_i2s_resources[] = {
+static struct resource kirkwood_audio_resources[] = {
        [0] = {
                .start  = AUDIO_PHYS_BASE,
                .end    = AUDIO_PHYS_BASE + SZ_16K - 1,
@@ -573,29 +573,23 @@ static struct resource kirkwood_i2s_resources[] = {
        },
 };
 
-static struct kirkwood_asoc_platform_data kirkwood_i2s_data = {
+static struct kirkwood_asoc_platform_data kirkwood_audio_data = {
        .burst       = 128,
 };
 
-static struct platform_device kirkwood_i2s_device = {
-       .name           = "kirkwood-i2s",
+static struct platform_device kirkwood_audio_device = {
+       .name           = "mvebu-audio",
        .id             = -1,
-       .num_resources  = ARRAY_SIZE(kirkwood_i2s_resources),
-       .resource       = kirkwood_i2s_resources,
+       .num_resources  = ARRAY_SIZE(kirkwood_audio_resources),
+       .resource       = kirkwood_audio_resources,
        .dev            = {
-               .platform_data  = &kirkwood_i2s_data,
+               .platform_data  = &kirkwood_audio_data,
        },
 };
 
-static struct platform_device kirkwood_pcm_device = {
-       .name           = "kirkwood-pcm-audio",
-       .id             = -1,
-};
-
 void __init kirkwood_audio_init(void)
 {
-       platform_device_register(&kirkwood_i2s_device);
-       platform_device_register(&kirkwood_pcm_device);
+       platform_device_register(&kirkwood_audio_device);
 }
 
 /*****************************************************************************
diff --git a/arch/arm/mach-kirkwood/include/mach/debug-macro.S b/arch/arm/mach-kirkwood/include/mach/debug-macro.S
deleted file mode 100644 (file)
index f785d40..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-kirkwood/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/bridge-regs.h>
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =KIRKWOOD_REGS_PHYS_BASE
-       ldr     \rv, =KIRKWOOD_REGS_VIRT_BASE
-       orr     \rp, \rp, #0x00012000
-       orr     \rv, \rv, #0x00012000
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-lpc32xx/include/mach/debug-macro.S b/arch/arm/mach-lpc32xx/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 351bd6c..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/include/mach/debug-macro.S
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/*
- * Debug output is hardcoded to standard UART 5
-*/
-
-       .macro  addruart, rp, rv, tmp
-       ldreq   \rp, =0x40090000
-       ldrne   \rv, =0xF4090000
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-mv78xx0/include/mach/debug-macro.S b/arch/arm/mach-mv78xx0/include/mach/debug-macro.S
deleted file mode 100644 (file)
index a7df02b..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-mv78xx0/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/mv78xx0.h>
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =MV78XX0_REGS_PHYS_BASE
-       ldr     \rv, =MV78XX0_REGS_VIRT_BASE
-       orr     \rp, \rp, #0x00012000
-       orr     \rv, \rv, #0x00012000
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
index 3c1279f27d1fe4436595e94fb4977707a5f5fadb..afc201713131a5941e4d61b85488aac178a2eab7 100644 (file)
@@ -570,12 +570,12 @@ static int __init omap2_init_devices(void)
                omap_init_mcspi();
                omap_init_sham();
                omap_init_aes();
+               omap_init_rng();
        } else {
                /* These can be removed when bindings are done */
                omap_init_wl12xx_of();
        }
        omap_init_sti();
-       omap_init_rng();
        omap_init_vout();
 
        return 0;
diff --git a/arch/arm/mach-orion5x/include/mach/debug-macro.S b/arch/arm/mach-orion5x/include/mach/debug-macro.S
deleted file mode 100644 (file)
index f340ed8..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-orion5x/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/orion5x.h>
-
-       .macro  addruart, rp, rv, tmp
-       ldr     \rp, =ORION5X_REGS_PHYS_BASE
-       ldr     \rv, =ORION5X_REGS_VIRT_BASE
-       orr     \rp, \rp, #0x00012000
-       orr     \rv, \rv, #0x00012000
-       .endm
-
-#define UART_SHIFT     2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-realview/include/mach/debug-macro.S b/arch/arm/mach-realview/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 8cc372d..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* arch/arm/mach-realview/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifdef CONFIG_DEBUG_REALVIEW_STD_PORT
-#define DEBUG_LL_UART_OFFSET   0x00009000
-#elif defined(CONFIG_DEBUG_REALVIEW_PB1176_PORT)
-#define DEBUG_LL_UART_OFFSET   0x0010c000
-#endif
-
-#ifndef DEBUG_LL_UART_OFFSET
-#error "Unknown RealView platform"
-#endif
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #DEBUG_LL_UART_OFFSET
-               orr     \rv, \rp, #0xfb000000   @ virtual base
-               orr     \rp, \rp, #0x10000000   @ physical base
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-rpc/include/mach/debug-macro.S b/arch/arm/mach-rpc/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 6d28cc9..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/* arch/arm/mach-rpc/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #0x00010000
-               orr     \rp, \rp, #0x00000fe0
-               orr     \rv, \rp, #0xe0000000   @ virtual
-               orr     \rp, \rp, #0x03000000   @ physical
-               .endm
-
-#define UART_SHIFT     2
-#define FLOW_CONTROL
-#include <asm/hardware/debug-8250.S>
index c5be60d85e4b95fa1339f64492d28097ac3763dc..3a6ffa250fb19c7221b127e231c3b2091720270d 100644 (file)
@@ -358,7 +358,6 @@ static struct platform_device usbhsf_device = {
 static struct sh_eth_plat_data sh_eth_platdata = {
        .phy                    = 0x00, /* LAN8710A */
        .edmac_endian           = EDMAC_LITTLE_ENDIAN,
-       .register_type          = SH_ETH_REG_GIGABIT,
        .phy_interface          = PHY_INTERFACE_MODE_MII,
 };
 
index 3354a85c90f7091c776ab248df641d9dddb3990d..fa8885b2d5a5fda2c345a23cfd75973c87a22c50 100644 (file)
@@ -89,7 +89,6 @@ static struct sh_mobile_sdhi_info sdhi0_info = {
 static struct sh_eth_plat_data ether_platform_data __initdata = {
        .phy            = 0x01,
        .edmac_endian   = EDMAC_LITTLE_ENDIAN,
-       .register_type  = SH_ETH_REG_FAST_RCAR,
        .phy_interface  = PHY_INTERFACE_MODE_RMII,
        /*
         * Although the LINK signal is available on the board, it's connected to
diff --git a/arch/arm/mach-spear/include/mach/debug-macro.S b/arch/arm/mach-spear/include/mach/debug-macro.S
deleted file mode 100644 (file)
index 75b05ad..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/debug-macro.S
- *
- * Debugging macro include header for spear platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/amba/serial.h>
-#include <mach/spear.h>
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp, #SPEAR_DBG_UART_BASE               @ Physical base
-               mov     \rv, #VA_SPEAR_DBG_UART_BASE            @ Virtual base
-               .endm
-
-               .macro  senduart, rd, rx
-               strb    \rd, [\rx, #UART01x_DR]                 @ ASC_TX_BUFFER
-               .endm
-
-               .macro  waituart, rd, rx
-1001:          ldr     \rd, [\rx, #UART01x_FR]                 @ FLAG REGISTER
-               tst     \rd, #UART01x_FR_TXFF                   @ TX_FULL
-               bne     1001b
-               .endm
-
-               .macro  busyuart, rd, rx
-1002:          ldr     \rd, [\rx, #UART01x_FR]                 @ FLAG REGISTER
-               tst     \rd, #UART011_FR_TXFE                   @ TX_EMPTY
-               beq     1002b
-               .endm
index cf3a5369eeca0c79f52a4f52fd9aa783db8da21b..5cdc53d9b6533cc914471b2c09588e0d96210be5 100644 (file)
@@ -39,7 +39,6 @@
 
 /* Debug uart for linux, will be used for debug and uncompress messages */
 #define SPEAR_DBG_UART_BASE            SPEAR_ICM1_UART_BASE
-#define VA_SPEAR_DBG_UART_BASE         VA_SPEAR_ICM1_UART_BASE
 
 /* Sysctl base for spear platform */
 #define SPEAR_SYS_CTRL_BASE            SPEAR_ICM3_SYS_CTRL_BASE
@@ -86,7 +85,6 @@
 
 /* Debug uart for linux, will be used for debug and uncompress messages */
 #define SPEAR_DBG_UART_BASE                    UART_BASE
-#define VA_SPEAR_DBG_UART_BASE                 VA_UART_BASE
 
 #endif /* SPEAR13XX */
 
index bf9b6be5b18091bc3b94464f74502939b596ee40..fe1f3e26b88b114b2c47ecb082b358a6a220bea7 100644 (file)
@@ -4,7 +4,6 @@
 
 obj-y                          := cpu.o devices.o devices-common.o \
                                   id.o usb.o timer.o pm.o
-obj-$(CONFIG_CPU_IDLE)          += cpuidle.o
 obj-$(CONFIG_CACHE_L2X0)       += cache-l2x0.o
 obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
 obj-$(CONFIG_MACH_MOP500)      += board-mop500.o board-mop500-sdi.o \
diff --git a/arch/arm/mach-versatile/include/mach/debug-macro.S b/arch/arm/mach-versatile/include/mach/debug-macro.S
deleted file mode 100644 (file)
index d0fbd7f..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/* arch/arm/mach-versatile/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- *  Copyright (C) 1994-1999 Russell King
- *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-               .macro  addruart, rp, rv, tmp
-               mov     \rp,      #0x001F0000
-               orr     \rp, \rp, #0x00001000
-               orr     \rv, \rp, #0xf1000000   @ virtual base
-               orr     \rp, \rp,  #0x10000000  @ physical base
-               .endm
-
-#include <asm/hardware/debug-pl01x.S>
index db5c2cab8fda4251bb636aa5567afafe5cfdb406..cd2c88e7a8f7557bfe299a7a6b364c395a969428 100644 (file)
@@ -809,15 +809,18 @@ config KUSER_HELPERS
          the CPU type fitted to the system.  This permits binaries to be
          run on ARMv4 through to ARMv7 without modification.
 
+         See Documentation/arm/kernel_user_helpers.txt for details.
+
          However, the fixed address nature of these helpers can be used
          by ROP (return orientated programming) authors when creating
          exploits.
 
          If all of the binaries and libraries which run on your platform
          are built specifically for your platform, and make no use of
-         these helpers, then you can turn this option off.  However,
-         when such an binary or library is run, it will receive a SIGILL
-         signal, which will terminate the program.
+         these helpers, then you can turn this option off to hinder
+         such exploits. However, in that case, if a binary or library
+         relying on those helpers is run, it will receive a SIGILL signal,
+         which will terminate the program.
 
          Say N here only if you are absolutely certain that you do not
          need these helpers; otherwise, the safe option is to say Y.
index d70e0aba0c9d9b4a0d10a78c627e159b1bf90d9b..f6a4bb2d25510a4bea13bd820d4f73540f0874c4 100644 (file)
@@ -417,9 +417,9 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
                outer_cache.disable = l2x0_disable;
        }
 
-       printk(KERN_INFO "%s cache controller enabled\n", type);
-       printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
-                       ways, cache_id, aux, l2x0_size);
+       pr_info("%s cache controller enabled\n", type);
+       pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
+               ways, cache_id, aux, l2x0_size >> 10);
 }
 
 #ifdef CONFIG_OF
@@ -929,7 +929,9 @@ static const struct of_device_id l2x0_ids[] __initconst = {
          .data = (void *)&aurora_no_outer_data},
        { .compatible = "marvell,aurora-outer-cache",
          .data = (void *)&aurora_with_outer_data},
-       { .compatible = "bcm,bcm11351-a2-pl310-cache",
+       { .compatible = "brcm,bcm11351-a2-pl310-cache",
+         .data = (void *)&bcm_l2x0_data},
+       { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
          .data = (void *)&bcm_l2x0_data},
        {}
 };
index 3d1e4a205b0b9325f0b8bc7bc231e735f06871e7..66781bf34077cb540a67f845eeeb919c2decfc4e 100644 (file)
  * of type casting from pmd_t * to pte_t *.
  */
 
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd = NULL;
-
-       pgd = pgd_offset(mm, addr);
-       if (pgd_present(*pgd)) {
-               pud = pud_offset(pgd, addr);
-               if (pud_present(*pud))
-                       pmd = pmd_offset(pud, addr);
-       }
-
-       return (pte_t *)pmd;
-}
-
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write)
 {
@@ -68,33 +52,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
        return 0;
 }
 
-pte_t *huge_pte_alloc(struct mm_struct *mm,
-                       unsigned long addr, unsigned long sz)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pte_t *pte = NULL;
-
-       pgd = pgd_offset(mm, addr);
-       pud = pud_alloc(mm, pgd, addr);
-       if (pud)
-               pte = (pte_t *)pmd_alloc(mm, pud, addr);
-
-       return pte;
-}
-
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-               pmd_t *pmd, int write)
-{
-       struct page *page;
-
-       page = pte_page(*(pte_t *)pmd);
-       if (page)
-               page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
-       return page;
-}
-
 int pmd_huge(pmd_t pmd)
 {
        return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
index 15225d829d7173b6169076ad063a388ceefe1248..0d3f0d860df39ca4f20dab0d5488b6c9e9fa414f 100644 (file)
@@ -77,7 +77,7 @@ static int __init parse_tag_initrd2(const struct tag *tag)
 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
 
 #ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        phys_initrd_start = start;
        phys_initrd_size = end - start;
@@ -231,7 +231,7 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 }
 #endif
 
-void __init setup_dma_zone(struct machine_desc *mdesc)
+void __init setup_dma_zone(const struct machine_desc *mdesc)
 {
 #ifdef CONFIG_ZONE_DMA
        if (mdesc->dma_zone_size) {
@@ -335,7 +335,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
        return phys;
 }
 
-void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
+void __init arm_memblock_init(struct meminfo *mi,
+       const struct machine_desc *mdesc)
 {
        int i;
 
index 53cdbd39ec8e23c1facab667f56fa9441aa870c6..b1d17eeb59b895cd429e762d082d6c5c56c3ff57 100644 (file)
@@ -1186,7 +1186,7 @@ void __init arm_mm_memblock_reserve(void)
  * called function.  This means you can't use any function or debugging
  * method which may touch any device, otherwise the kernel _will_ crash.
  */
-static void __init devicemaps_init(struct machine_desc *mdesc)
+static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
        struct map_desc map;
        unsigned long addr;
@@ -1319,7 +1319,7 @@ static void __init map_lowmem(void)
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
  */
-void __init paging_init(struct machine_desc *mdesc)
+void __init paging_init(const struct machine_desc *mdesc)
 {
        void *zero_page;
 
index 1fa50100ab6af1f57067af8ff06c4a05d1fcb3ff..34d4ab217babb39b563e493cb4d4643963d33a64 100644 (file)
@@ -299,7 +299,7 @@ void __init sanity_check_meminfo(void)
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
  */
-void __init paging_init(struct machine_desc *mdesc)
+void __init paging_init(const struct machine_desc *mdesc)
 {
        early_trap_init((void *)CONFIG_VECTORS_BASE);
        mpu_setup();
index d5146b98c8d12bd59037017244bc0bf5ab9c4099..db79b62c92fb1c26ef777b54c4c8f49fe94047a6 100644 (file)
@@ -514,6 +514,32 @@ ENTRY(cpu_feroceon_set_pte_ext)
 #endif
        mov     pc, lr
 
+/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
+.globl cpu_feroceon_suspend_size
+.equ   cpu_feroceon_suspend_size, 4 * 3
+#ifdef CONFIG_ARM_CPU_SUSPEND
+ENTRY(cpu_feroceon_do_suspend)
+       stmfd   sp!, {r4 - r6, lr}
+       mrc     p15, 0, r4, c13, c0, 0  @ PID
+       mrc     p15, 0, r5, c3, c0, 0   @ Domain ID
+       mrc     p15, 0, r6, c1, c0, 0   @ Control register
+       stmia   r0, {r4 - r6}
+       ldmfd   sp!, {r4 - r6, pc}
+ENDPROC(cpu_feroceon_do_suspend)
+
+ENTRY(cpu_feroceon_do_resume)
+       mov     ip, #0
+       mcr     p15, 0, ip, c8, c7, 0   @ invalidate I+D TLBs
+       mcr     p15, 0, ip, c7, c7, 0   @ invalidate I+D caches
+       ldmia   r0, {r4 - r6}
+       mcr     p15, 0, r4, c13, c0, 0  @ PID
+       mcr     p15, 0, r5, c3, c0, 0   @ Domain ID
+       mcr     p15, 0, r1, c2, c0, 0   @ TTB address
+       mov     r0, r6                  @ control register
+       b       cpu_resume_mmu
+ENDPROC(cpu_feroceon_do_resume)
+#endif
+
        .type   __feroceon_setup, #function
 __feroceon_setup:
        mov     r0, #0
index 8e11e96eab5ef6dbb07f79c3dd747252401d5a52..c83f27b6bdda0eac2c566d40b4d99e5428be650b 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/platform_device.h>
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <asm/irq.h>
 #include <mach/hardware.h>
@@ -60,6 +62,30 @@ struct ssp_device *pxa_ssp_request(int port, const char *label)
 }
 EXPORT_SYMBOL(pxa_ssp_request);
 
+struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
+                                     const char *label)
+{
+       struct ssp_device *ssp = NULL;
+
+       mutex_lock(&ssp_lock);
+
+       list_for_each_entry(ssp, &ssp_list, node) {
+               if (ssp->of_node == of_node && ssp->use_count == 0) {
+                       ssp->use_count++;
+                       ssp->label = label;
+                       break;
+               }
+       }
+
+       mutex_unlock(&ssp_lock);
+
+       if (&ssp->node == &ssp_list)
+               return NULL;
+
+       return ssp;
+}
+EXPORT_SYMBOL(pxa_ssp_request_of);
+
 void pxa_ssp_free(struct ssp_device *ssp)
 {
        mutex_lock(&ssp_lock);
@@ -72,96 +98,126 @@ void pxa_ssp_free(struct ssp_device *ssp)
 }
 EXPORT_SYMBOL(pxa_ssp_free);
 
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_ssp_of_ids[] = {
+       { .compatible = "mrvl,pxa25x-ssp",      .data = (void *) PXA25x_SSP },
+       { .compatible = "mvrl,pxa25x-nssp",     .data = (void *) PXA25x_NSSP },
+       { .compatible = "mrvl,pxa27x-ssp",      .data = (void *) PXA27x_SSP },
+       { .compatible = "mrvl,pxa3xx-ssp",      .data = (void *) PXA3xx_SSP },
+       { .compatible = "mvrl,pxa168-ssp",      .data = (void *) PXA168_SSP },
+       { .compatible = "mrvl,pxa910-ssp",      .data = (void *) PXA910_SSP },
+       { .compatible = "mrvl,ce4100-ssp",      .data = (void *) CE4100_SSP },
+       { .compatible = "mrvl,lpss-ssp",        .data = (void *) LPSS_SSP },
+       { },
+};
+MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
+#endif
+
 static int pxa_ssp_probe(struct platform_device *pdev)
 {
-       const struct platform_device_id *id = platform_get_device_id(pdev);
        struct resource *res;
        struct ssp_device *ssp;
-       int ret = 0;
+       struct device *dev = &pdev->dev;
 
-       ssp = kzalloc(sizeof(struct ssp_device), GFP_KERNEL);
-       if (ssp == NULL) {
-               dev_err(&pdev->dev, "failed to allocate memory");
+       ssp = devm_kzalloc(dev, sizeof(struct ssp_device), GFP_KERNEL);
+       if (ssp == NULL)
                return -ENOMEM;
-       }
-       ssp->pdev = pdev;
 
-       ssp->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(ssp->clk)) {
-               ret = PTR_ERR(ssp->clk);
-               goto err_free;
-       }
+       ssp->pdev = pdev;
 
-       res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-       if (res == NULL) {
-               dev_err(&pdev->dev, "no SSP RX DRCMR defined\n");
-               ret = -ENODEV;
-               goto err_free_clk;
-       }
-       ssp->drcmr_rx = res->start;
+       ssp->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(ssp->clk))
+               return PTR_ERR(ssp->clk);
+
+       if (dev->of_node) {
+               struct of_phandle_args dma_spec;
+               struct device_node *np = dev->of_node;
+
+               /*
+                * FIXME: we should allocate the DMA channel from this
+                * context and pass the channel down to the ssp users.
+                * For now, we lookup the rx and tx indices manually
+                */
+
+               /* rx */
+               of_parse_phandle_with_args(np, "dmas", "#dma-cells",
+                                          0, &dma_spec);
+               ssp->drcmr_rx = dma_spec.args[0];
+               of_node_put(dma_spec.np);
+
+               /* tx */
+               of_parse_phandle_with_args(np, "dmas", "#dma-cells",
+                                          1, &dma_spec);
+               ssp->drcmr_tx = dma_spec.args[0];
+               of_node_put(dma_spec.np);
+       } else {
+               res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+               if (res == NULL) {
+                       dev_err(dev, "no SSP RX DRCMR defined\n");
+                       return -ENODEV;
+               }
+               ssp->drcmr_rx = res->start;
 
-       res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-       if (res == NULL) {
-               dev_err(&pdev->dev, "no SSP TX DRCMR defined\n");
-               ret = -ENODEV;
-               goto err_free_clk;
+               res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+               if (res == NULL) {
+                       dev_err(dev, "no SSP TX DRCMR defined\n");
+                       return -ENODEV;
+               }
+               ssp->drcmr_tx = res->start;
        }
-       ssp->drcmr_tx = res->start;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res == NULL) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               ret = -ENODEV;
-               goto err_free_clk;
+               dev_err(dev, "no memory resource defined\n");
+               return -ENODEV;
        }
 
-       res = request_mem_region(res->start, resource_size(res),
-                       pdev->name);
+       res = devm_request_mem_region(dev, res->start, resource_size(res),
+                                     pdev->name);
        if (res == NULL) {
-               dev_err(&pdev->dev, "failed to request memory resource\n");
-               ret = -EBUSY;
-               goto err_free_clk;
+               dev_err(dev, "failed to request memory resource\n");
+               return -EBUSY;
        }
 
        ssp->phys_base = res->start;
 
-       ssp->mmio_base = ioremap(res->start, resource_size(res));
+       ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res));
        if (ssp->mmio_base == NULL) {
-               dev_err(&pdev->dev, "failed to ioremap() registers\n");
-               ret = -ENODEV;
-               goto err_free_mem;
+               dev_err(dev, "failed to ioremap() registers\n");
+               return -ENODEV;
        }
 
        ssp->irq = platform_get_irq(pdev, 0);
        if (ssp->irq < 0) {
-               dev_err(&pdev->dev, "no IRQ resource defined\n");
-               ret = -ENODEV;
-               goto err_free_io;
+               dev_err(dev, "no IRQ resource defined\n");
+               return -ENODEV;
+       }
+
+       if (dev->of_node) {
+               const struct of_device_id *id =
+                       of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
+               ssp->type = (int) id->data;
+       } else {
+               const struct platform_device_id *id =
+                       platform_get_device_id(pdev);
+               ssp->type = (int) id->driver_data;
+
+               /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
+                * starts from 0, do a translation here
+                */
+               ssp->port_id = pdev->id + 1;
        }
 
-       /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
-        * starts from 0, do a translation here
-        */
-       ssp->port_id = pdev->id + 1;
        ssp->use_count = 0;
-       ssp->type = (int)id->driver_data;
+       ssp->of_node = dev->of_node;
 
        mutex_lock(&ssp_lock);
        list_add(&ssp->node, &ssp_list);
        mutex_unlock(&ssp_lock);
 
        platform_set_drvdata(pdev, ssp);
-       return 0;
 
-err_free_io:
-       iounmap(ssp->mmio_base);
-err_free_mem:
-       release_mem_region(res->start, resource_size(res));
-err_free_clk:
-       clk_put(ssp->clk);
-err_free:
-       kfree(ssp);
-       return ret;
+       return 0;
 }
 
 static int pxa_ssp_remove(struct platform_device *pdev)
@@ -201,8 +257,9 @@ static struct platform_driver pxa_ssp_driver = {
        .probe          = pxa_ssp_probe,
        .remove         = pxa_ssp_remove,
        .driver         = {
-               .owner  = THIS_MODULE,
-               .name   = "pxa2xx-ssp",
+               .owner          = THIS_MODULE,
+               .name           = "pxa2xx-ssp",
+               .of_match_table = of_match_ptr(pxa_ssp_of_ids),
        },
        .id_table       = ssp_id_table,
 };
index 8d10dc8a1e17b34776a366f4b71fcca68c21bf8a..3e5d3115a2a6847ee41fc3d6d23ffde8fa0a7917 100644 (file)
 ENTRY(vfp_support_entry)
        DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
 
+       ldr     r3, [sp, #S_PSR]        @ Neither lazy restore nor FP exceptions
+       and     r3, r3, #MODE_MASK      @ are supported in kernel mode
+       teq     r3, #USR_MODE
+       bne     vfp_kmode_exception     @ Returns through lr
+
        VFPFMRX r1, FPEXC               @ Is the VFP enabled?
        DBGSTR1 "fpexc %08x", r1
        tst     r1, #FPEXC_EN
index 5dfbb0b8e7f4484ddeb97974080a51bfaec0dc3e..52b8f40b1c73d48d206d497a6fadc381b5ddd888 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/user.h>
+#include <linux/export.h>
 
 #include <asm/cp15.h>
 #include <asm/cputype.h>
@@ -648,6 +649,72 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
        return NOTIFY_OK;
 }
 
+void vfp_kmode_exception(void)
+{
+       /*
+        * If we reach this point, a floating point exception has been raised
+        * while running in kernel mode. If the NEON/VFP unit was enabled at the
+        * time, it means a VFP instruction has been issued that requires
+        * software assistance to complete, something which is not currently
+        * supported in kernel mode.
+        * If the NEON/VFP unit was disabled, and the location pointed to below
+        * is properly preceded by a call to kernel_neon_begin(), something has
+        * caused the task to be scheduled out and back in again. In this case,
+        * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
+        * be helpful in localizing the problem.
+        */
+       if (fmrx(FPEXC) & FPEXC_EN)
+               pr_crit("BUG: unsupported FP instruction in kernel mode\n");
+       else
+               pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+       struct thread_info *thread = current_thread_info();
+       unsigned int cpu;
+       u32 fpexc;
+
+       /*
+        * Kernel mode NEON is only allowed outside of interrupt context
+        * with preemption disabled. This will make sure that the kernel
+        * mode NEON register contents never need to be preserved.
+        */
+       BUG_ON(in_interrupt());
+       cpu = get_cpu();
+
+       fpexc = fmrx(FPEXC) | FPEXC_EN;
+       fmxr(FPEXC, fpexc);
+
+       /*
+        * Save the userland NEON/VFP state. Under UP,
+        * the owner could be a task other than 'current'
+        */
+       if (vfp_state_in_hw(cpu, thread))
+               vfp_save_state(&thread->vfpstate, fpexc);
+#ifndef CONFIG_SMP
+       else if (vfp_current_hw_state[cpu] != NULL)
+               vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+#endif
+       vfp_current_hw_state[cpu] = NULL;
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+       /* Disable the NEON/VFP unit. */
+       fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+       put_cpu();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
 /*
  * VFP support code initialisation.
  */
@@ -731,4 +798,4 @@ static int __init vfp_init(void)
        return 0;
 }
 
-late_initcall(vfp_init);
+core_initcall(vfp_init);
index 9737e97f9f382740422f5bd431adc35933f917c0..ae323a45c28c2dac8c18a71c2c1f7a526132536e 100644 (file)
@@ -96,6 +96,9 @@ config SWIOTLB
 config IOMMU_HELPER
        def_bool SWIOTLB
 
+config KERNEL_MODE_NEON
+       def_bool y
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
new file mode 100644 (file)
index 0000000..b0cc58a
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * linux/arch/arm64/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define cpu_has_neon()         (1)
+
+void kernel_neon_begin(void);
+void kernel_neon_end(void);
index 3c3ca7d361e4282d0dd533aeb5844bb7c202f724..5f101e63dfc16c0c88f0761d3693db5e6745d385 100644 (file)
@@ -16,6 +16,8 @@
 #ifndef __ASM_PGTABLE_2LEVEL_TYPES_H
 #define __ASM_PGTABLE_2LEVEL_TYPES_H
 
+#include <asm/types.h>
+
 typedef u64 pteval_t;
 typedef u64 pgdval_t;
 typedef pgdval_t pmdval_t;
index 4489615f14a90629872a33f48ede410094d4a9c9..4e94424938a4c3717304b6a86247101f5abd5f90 100644 (file)
@@ -16,6 +16,8 @@
 #ifndef __ASM_PGTABLE_3LEVEL_TYPES_H
 #define __ASM_PGTABLE_3LEVEL_TYPES_H
 
+#include <asm/types.h>
+
 typedef u64 pteval_t;
 typedef u64 pmdval_t;
 typedef u64 pgdval_t;
index e8b8357aedb42e1fb23d6c736a7fcd2ea5003429..1f2e4d5a5c0fd65ec86d5f487256dc7de9c4584e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
+#include <linux/hardirq.h>
 
 #include <asm/fpsimd.h>
 #include <asm/cputype.h>
@@ -83,6 +84,33 @@ void fpsimd_flush_thread(void)
        fpsimd_load_state(&current->thread.fpsimd_state);
 }
 
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+       /* Avoid using the NEON in interrupt context */
+       BUG_ON(in_interrupt());
+       preempt_disable();
+
+       if (current->mm)
+               fpsimd_save_state(&current->thread.fpsimd_state);
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+       if (current->mm)
+               fpsimd_load_state(&current->thread.fpsimd_state);
+
+       preempt_enable();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
 /*
  * FP/SIMD support code initialisation.
  */
index 53dcae49e72965cc663e6676802b5c174b41a4cf..7090c126797cb27e3db102e830d5de72c14c3310 100644 (file)
        .quad   TEXT_OFFSET                     // Image load offset from start of RAM
        .quad   0                               // reserved
        .quad   0                               // reserved
+       .quad   0                               // reserved
+       .quad   0                               // reserved
+       .quad   0                               // reserved
+       .byte   0x41                            // Magic number, "ARM\x64"
+       .byte   0x52
+       .byte   0x4d
+       .byte   0x64
+       .word   0                               // reserved
 
 ENTRY(stext)
        mov     x21, x0                         // x21=FDT
index 9ba33c40cdf8f841e974f68e599f0f97e87138ff..cea1594ff933e92e304a6996a4642f18f322487a 100644 (file)
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
 static int
 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-       int mapping = (*event_map)[config];
+       int mapping;
+
+       if (config >= PERF_COUNT_HW_MAX)
+               return -EINVAL;
+
+       mapping = (*event_map)[config];
        return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
@@ -317,7 +322,13 @@ validate_event(struct pmu_hw_events *hw_events,
        struct hw_perf_event fake_event = event->hw;
        struct pmu *leader_pmu = event->group_leader->pmu;
 
-       if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+       if (is_software_event(event))
+               return 1;
+
+       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+               return 1;
+
+       if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
                return 1;
 
        return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -773,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 /*
  * PMXEVTYPER: Event selection reg
  */
-#define        ARMV8_EVTYPE_MASK       0xc00000ff      /* Mask for writable bits */
+#define        ARMV8_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
 #define        ARMV8_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
 
 /*
index 67e8d7ce3fe7a68ac1c974058de83b8d6226086e..de2de5db628de2382e555b4256b9ad1f0f92ada5 100644 (file)
@@ -44,8 +44,7 @@ static unsigned long phys_initrd_size __initdata = 0;
 
 phys_addr_t memstart_addr __read_mostly = 0;
 
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-                                           unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        phys_initrd_start = start;
        phys_initrd_size = end - start;
index 20388750d56447ee92324ee50cadfc0dacb5c89b..64919b0da7aa53a5464a4dff92452c5aa94b46b1 100644 (file)
@@ -58,7 +58,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = {
        .dclkmax                = 28330000,
 };
 
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
        .default_bpp            = 16,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
@@ -96,7 +96,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = {
        .dclkmax                = 7000000,
 };
 
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
        .default_bpp            = 16,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
@@ -134,7 +134,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = {
        .dclkmax                = 6400000,
 };
 
-static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = {
        .default_bpp            = 16,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
@@ -145,7 +145,7 @@ static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = {
 };
 #endif
 
-static void atevklcd10x_lcdc_power_control(int on)
+static void atevklcd10x_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on)
 {
        gpio_set_value(GPIO_PIN_PB(15), on);
 }
index 7de083d19b7ee0b5cf9f63a14f953b7311a1564e..1ba09e4c02b14d08c1bc0211866e43c1aec5735f 100644 (file)
@@ -83,7 +83,7 @@ static struct fb_monspecs __initdata lcd_fb_default_monspecs = {
        .dclkmax                = 9260000,
 };
 
-static struct atmel_lcdfb_info __initdata rmt_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata rmt_lcdc_data = {
        .default_bpp            = 24,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
@@ -126,7 +126,7 @@ static struct fb_monspecs __initdata lcd_fb_default_monspecs = {
        .dclkmax                = 9260000,
 };
 
-static struct atmel_lcdfb_info __initdata rmt_lcdc_data = {
+static struct atmel_lcdfb_pdata __initdata rmt_lcdc_data = {
        .default_bpp            = 24,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
index 9392d3252865a6886025167fe151e7356acf8f64..653cc09e536c7555272b2d2988f6db67ffa61ce9 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef __ARCH_AVR32_BOARDS_ATSTK1000_ATSTK1000_H
 #define __ARCH_AVR32_BOARDS_ATSTK1000_ATSTK1000_H
 
-extern struct atmel_lcdfb_info atstk1000_lcdc_data;
+extern struct atmel_lcdfb_pdata atstk1000_lcdc_data;
 
 void atstk1000_setup_j2_leds(void);
 
index 2d6b560115d9fa19c250f52a0a6e7c27bea27e81..b6b88f5e0b43a6d1e8c330e512021ec27935e7d5 100644 (file)
@@ -55,7 +55,7 @@ static struct fb_monspecs __initdata atstk1000_default_monspecs = {
        .dclkmax                = 30000000,
 };
 
-struct atmel_lcdfb_info __initdata atstk1000_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata atstk1000_lcdc_data = {
        .default_bpp            = 24,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
index 27bd6fbe21cb5f76253ad88f50f4a12c13f9778e..7b1f2cd854008c16117cf5c39c06f4bd2115a9fa 100644 (file)
@@ -125,7 +125,7 @@ static struct fb_monspecs __initdata favr32_default_monspecs = {
        .dclkmax                = 28000000,
 };
 
-struct atmel_lcdfb_info __initdata favr32_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata favr32_lcdc_data = {
        .default_bpp            = 16,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
index 9d1efd1cd42534076307d59c1444a663c3b8764d..dc0e317f2ecdddc62c23939dd0863749c4491de8 100644 (file)
@@ -77,7 +77,7 @@ static struct fb_monspecs __initdata hammerhead_hda350t_monspecs = {
        .dclkmax                = 10000000,
 };
 
-struct atmel_lcdfb_info __initdata hammerhead_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata hammerhead_lcdc_data = {
        .default_bpp            = 24,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
index 85a543cd4abcf26c3e517975029b8b694f197999..e7683ee7ed408d8ebe46930bf9ee4cb6a0885e91 100644 (file)
@@ -45,7 +45,7 @@ static struct fb_monspecs merisc_fb_monspecs = {
        .dclkmax        = 30000000,
 };
 
-struct atmel_lcdfb_info merisc_lcdc_data = {
+struct atmel_lcdfb_pdata merisc_lcdc_data = {
        .default_bpp            = 24,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
index 05358aa5ef7d210ed42d2bb6f68be48b3fa4c690..1cb8e9cc5cfaedb6d00ed8e589393531c37bf4ed 100644 (file)
@@ -8,7 +8,7 @@
  * published by the Free Software Foundation.
  */
 
-extern struct atmel_lcdfb_info mimc200_lcdc_data;
+extern struct atmel_lcdfb_pdata mimc200_lcdc_data;
 
 #include <linux/clk.h>
 #include <linux/etherdevice.h>
@@ -71,7 +71,7 @@ static struct fb_monspecs __initdata mimc200_default_monspecs = {
        .dclkmax                = 25200000,
 };
 
-struct atmel_lcdfb_info __initdata mimc200_lcdc_data = {
+struct atmel_lcdfb_pdata __initdata mimc200_lcdc_data = {
        .default_bpp            = 16,
        .default_dmacon         = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
        .default_lcdcon2        = (ATMEL_LCDC_DISTYPE_TFT
index 7f8759a8a92a08b8ed8e772d9f08c9d3404d8697..a1f4d1e91b522e03f486b41759a1b25a2d44e36f 100644 (file)
@@ -1439,7 +1439,7 @@ fail:
  *  LCDC
  * -------------------------------------------------------------------- */
 #if defined(CONFIG_CPU_AT32AP7000) || defined(CONFIG_CPU_AT32AP7002)
-static struct atmel_lcdfb_info atmel_lcdfb0_data;
+static struct atmel_lcdfb_pdata atmel_lcdfb0_data;
 static struct resource atmel_lcdfb0_resource[] = {
        {
                .start          = 0xff000000,
@@ -1467,12 +1467,12 @@ static struct clk atmel_lcdfb0_pixclk = {
 };
 
 struct platform_device *__init
-at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
+at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_pdata *data,
                     unsigned long fbmem_start, unsigned long fbmem_len,
                     u64 pin_mask)
 {
        struct platform_device *pdev;
-       struct atmel_lcdfb_info *info;
+       struct atmel_lcdfb_pdata *info;
        struct fb_monspecs *monspecs;
        struct fb_videomode *modedb;
        unsigned int modedb_size;
@@ -1529,7 +1529,7 @@ at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
        }
 
        info = pdev->dev.platform_data;
-       memcpy(info, data, sizeof(struct atmel_lcdfb_info));
+       memcpy(info, data, sizeof(struct atmel_lcdfb_pdata));
        info->default_monspecs = monspecs;
 
        pdev->name = "at32ap-lcdfb";
@@ -1983,6 +1983,9 @@ at32_add_device_nand(unsigned int id, struct atmel_nand_data *data)
                                ARRAY_SIZE(smc_cs3_resource)))
                goto fail;
 
+       /* For at32ap7000, we use the reset workaround for nand driver */
+       data->need_reset_workaround = true;
+
        if (platform_device_add_data(pdev, data,
                                sizeof(struct atmel_nand_data)))
                goto fail;
index d485b0391357cab467faefab4e07ba20e0aad2ac..f1a316d52c738b0bac6653c876c3c7c817f28a66 100644 (file)
@@ -44,9 +44,9 @@ struct platform_device *
 at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n);
 void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n);
 
-struct atmel_lcdfb_info;
+struct atmel_lcdfb_pdata;
 struct platform_device *
-at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data,
+at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_pdata *data,
                     unsigned long fbmem_start, unsigned long fbmem_len,
                     u64 pin_mask);
 
index bdb56f09d0acc42eacf87398e48e08645ac727c8..287d0e64dfba58b955c10b4b8d4268140271c9c8 100644 (file)
@@ -33,8 +33,7 @@ void __init early_init_devtree(void *params)
 
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-               unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        initrd_start = (unsigned long)__va(start);
        initrd_end = (unsigned long)__va(end);
index 3201ddb8da6a039d3fd56b3fa13969648a004d08..c699d32598728552d81159815d5ec356f40b76b1 100644 (file)
@@ -99,9 +99,6 @@ config ETRAX_KMALLOCED_MODULES
        help
          Enable module allocation with kmalloc instead of vmalloc.
 
-config OOM_REBOOT
-       bool "Enable reboot at out of memory"
-
 source "kernel/Kconfig.preempt"
 
 source mm/Kconfig
@@ -175,12 +172,6 @@ config ETRAX_FLASH_BUSWIDTH
        help
          Width in bytes of the NOR Flash bus (1, 2 or 4). Is usually 2.
 
-config ETRAX_NANDFLASH_BUSWIDTH
-       int "Buswidth of NAND flash in bytes"
-       default "1"
-       help
-         Width in bytes of the NAND flash (1 or 2).
-
 config ETRAX_FLASH1_SIZE
        int "FLASH1 size (dec, in MB. 0 = Unknown)"
        default "0"
@@ -272,38 +263,6 @@ config ETRAX_AXISFLASHMAP
          This option enables MTD mapping of flash devices.  Needed to use
          flash memories.  If unsure, say Y.
 
-config ETRAX_RTC
-       bool "Real Time Clock support"
-       depends on ETRAX_I2C
-       help
-         Enables drivers for the Real-Time Clock battery-backed chips on
-         some products. The kernel reads the time when booting, and
-         the date can be set using ioctl(fd, RTC_SET_TIME, &rt) with rt a
-         rtc_time struct (see <file:arch/cris/include/asm/rtc.h>) on the
-         /dev/rtc device.  You can check the time with cat /proc/rtc, but
-         normal time reading should be done using libc function time and
-         friends.
-
-choice
-       prompt "RTC chip"
-       depends on ETRAX_RTC
-       default ETRAX_DS1302
-
-config ETRAX_DS1302
-       depends on ETRAX_ARCH_V10
-       bool "DS1302"
-       help
-         Enables the driver for the DS1302 Real-Time Clock battery-backed
-         chip on some products.
-
-config ETRAX_PCF8563
-       bool "PCF8563"
-       help
-         Enables the driver for the PCF8563 Real-Time Clock battery-backed
-         chip on some products.
-
-endchoice
-
 config ETRAX_SYNCHRONOUS_SERIAL
        bool "Synchronous serial-port support"
        help
@@ -578,26 +537,6 @@ config ETRAX_SERIAL_PORT3_DMA5_IN
        depends on ETRAX_ARCH_V10
        bool "DMA 5"
 
-config ETRAX_SERIAL_PORT3_DMA9_IN
-       bool "Ser3 uses DMA9 for input"
-       depends on ETRAXFS
-       help
-         Enables the DMA9 input channel for ser3 (ttyS3).
-         If you do not enable DMA, an interrupt for each character will be
-         used when receiving data.
-         Normally you want to use DMA, unless you use the DMA channel for
-         something else.
-
-config ETRAX_SERIAL_PORT3_DMA3_IN
-       bool "Ser3 uses DMA3 for input"
-       depends on CRIS_MACH_ARTPEC3
-       help
-         Enables the DMA3 input channel for ser3 (ttyS3).
-         If you do not enable DMA, an interrupt for each character will be
-         used when receiving data.
-         Normally you want to use DMA, unless you use the DMA channel for
-         something else.
-
 endchoice
 
 choice
@@ -615,26 +554,6 @@ config ETRAX_SERIAL_PORT3_DMA4_OUT
        depends on ETRAX_ARCH_V10
        bool "DMA 4"
 
-config ETRAX_SERIAL_PORT3_DMA8_OUT
-       bool "Ser3 uses DMA8 for output"
-       depends on ETRAXFS
-       help
-         Enables the DMA8 output channel for ser3 (ttyS3).
-         If you do not enable DMA, an interrupt for each character will be
-         used when transmitting data.
-         Normally you want to use DMA, unless you use the DMA channel for
-         something else.
-
-config ETRAX_SERIAL_PORT3_DMA2_OUT
-       bool "Ser3 uses DMA2 for output"
-       depends on CRIS_MACH_ARTPEC3
-       help
-         Enables the DMA2 output channel for ser3 (ttyS3).
-         If you do not enable DMA, an interrupt for each character will be
-         used when transmitting data.
-         Normally you want to use DMA, unless you use the DMA channel for
-         something else.
-
 endchoice
 
 endmenu
index daf5f19b61a12bd54e23acd09a0db76132228993..239dab0b95c131034a8aaba771bcb53c833be3e9 100644 (file)
@@ -417,16 +417,6 @@ config ETRAX_USB_HOST
           for CTRL and BULK traffic only, INTR traffic may work as well
           however (depending on the requirements of timeliness).
 
-config ETRAX_USB_HOST_PORT1
-       bool "USB port 1 enabled"
-       depends on ETRAX_USB_HOST
-       default n
-
-config ETRAX_USB_HOST_PORT2
-       bool "USB port 2 enabled"
-       depends on ETRAX_USB_HOST
-       default n
-
 config ETRAX_PTABLE_SECTOR
        int "Byte-offset of partition table sector"
        depends on ETRAX_AXISFLASHMAP
@@ -527,19 +517,6 @@ config ETRAX_GPIO
          Remember that you need to setup the port directions appropriately in
          the General configuration.
 
-config ETRAX_PA_BUTTON_BITMASK
-       hex "PA-buttons bitmask"
-       depends on ETRAX_GPIO
-       default "02"
-       help
-         This is a bitmask with information about what bits on PA that
-         are used for buttons.
-         Most products has a so called TEST button on PA1, if that's true
-         use 02 here.
-         Use 00 if there are no buttons on PA.
-         If the bitmask is <> 00 a button driver will be included in the gpio
-         driver. ETRAX general I/O support must be enabled.
-
 config ETRAX_PA_CHANGEABLE_DIR
        hex "PA user changeable dir mask"
        depends on ETRAX_GPIO
@@ -580,51 +557,4 @@ config ETRAX_PB_CHANGEABLE_BITS
          Bit set = changeable.
          You probably want 00 here.
 
-config ETRAX_DS1302_RST_ON_GENERIC_PORT
-       bool "DS1302 RST on Generic Port"
-       depends on ETRAX_DS1302
-       help
-         If your product has the RST signal line for the DS1302 RTC on the
-         Generic Port then say Y here, otherwise leave it as N in which
-         case the RST signal line is assumed to be connected to Port PB
-         (just like the SCL and SDA lines).
-
-config ETRAX_DS1302_RSTBIT
-       int "DS1302 RST bit number"
-       depends on ETRAX_DS1302
-       default "2"
-       help
-         This is the bit number for the RST signal line of the DS1302 RTC on
-         the selected port. If you have selected the generic port then it
-         should be bit 27, otherwise your best bet is bit 5.
-
-config ETRAX_DS1302_SCLBIT
-       int "DS1302 SCL bit number"
-       depends on ETRAX_DS1302
-       default "1"
-       help
-         This is the bit number for the SCL signal line of the DS1302 RTC on
-         Port PB. This is probably best left at 3.
-
-config ETRAX_DS1302_SDABIT
-       int "DS1302 SDA bit number"
-       depends on ETRAX_DS1302
-       default "0"
-       help
-         This is the bit number for the SDA signal line of the DS1302 RTC on
-         Port PB. This is probably best left at 2.
-
-config ETRAX_DS1302_TRICKLE_CHARGE
-       int "DS1302 Trickle charger value"
-       depends on ETRAX_DS1302
-       default "0"
-       help
-         This controls the initial value of the trickle charge register.
-         0 = disabled (use this if you are unsure or have a non rechargeable battery)
-         Otherwise the following values can be OR:ed together to control the
-         charge current:
-         1 = 2kohm, 2 = 4kohm, 3 = 4kohm
-         4 = 1 diode, 8 = 2 diodes
-         Allowed values are (increasing current): 0, 11, 10, 9, 7, 6, 5
-
 endif
index 44bf2e88c26e49a7308bcec4df4da97297bfa6c7..e5c13183b97c99fd52ad454cf2dfd7593bcb6c7d 100644 (file)
@@ -6,7 +6,5 @@ obj-$(CONFIG_ETRAX_AXISFLASHMAP)        += axisflashmap.o
 obj-$(CONFIG_ETRAX_I2C)                        += i2c.o
 obj-$(CONFIG_ETRAX_I2C_EEPROM)         += eeprom.o
 obj-$(CONFIG_ETRAX_GPIO)               += gpio.o
-obj-$(CONFIG_ETRAX_DS1302)             += ds1302.o
-obj-$(CONFIG_ETRAX_PCF8563)            += pcf8563.o
 obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
 
index 1d866d3ee2f85329bfbd4b94130c6ccaf1035804..6792503aaf79effd5adfcf50591d0978de6c729c 100644 (file)
@@ -19,64 +19,6 @@ config ETRAX_NO_PHY
          switch. This option should normally be disabled. If enabled,
          speed and duplex will be locked to 100 Mbit and full duplex.
 
-config ETRAX_ETHERNET_IFACE0
-       depends on ETRAX_ETHERNET
-       bool "Enable network interface 0"
-
-config ETRAX_ETHERNET_IFACE1
-       depends on (ETRAX_ETHERNET && ETRAXFS)
-       bool "Enable network interface 1 (uses DMA6 and DMA7)"
-
-config ETRAX_ETHERNET_GBIT
-       depends on (ETRAX_ETHERNET && CRIS_MACH_ARTPEC3)
-       bool "Enable gigabit Ethernet support"
-
-choice
-       prompt "Eth0 led group"
-       depends on ETRAX_ETHERNET_IFACE0
-       default ETRAX_ETH0_USE_LEDGRP0
-
-config ETRAX_ETH0_USE_LEDGRP0
-       bool "Use LED grp 0"
-       depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
-       help
-         Use LED grp 0 for eth0
-
-config ETRAX_ETH0_USE_LEDGRP1
-       bool "Use LED grp 1"
-       depends on ETRAX_NBR_LED_GRP_TWO
-       help
-         Use LED grp 1 for eth0
-
-config ETRAX_ETH0_USE_LEDGRPNULL
-       bool "Use no LEDs for eth0"
-       help
-         Use no LEDs for eth0
-endchoice
-
-choice
-       prompt "Eth1 led group"
-       depends on ETRAX_ETHERNET_IFACE1
-       default ETRAX_ETH1_USE_LEDGRP1
-
-config ETRAX_ETH1_USE_LEDGRP0
-       bool "Use LED grp 0"
-       depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
-       help
-         Use LED grp 0 for eth1
-
-config ETRAX_ETH1_USE_LEDGRP1
-       bool "Use LED grp 1"
-       depends on ETRAX_NBR_LED_GRP_TWO
-       help
-         Use LED grp 1 for eth1
-
-config ETRAX_ETH1_USE_LEDGRPNULL
-       bool "Use no LEDs for eth1"
-       help
-         Use no LEDs for eth1
-endchoice
-
 config ETRAXFS_SERIAL
        bool "Serial-port support"
        depends on ETRAX_ARCH_V32
@@ -108,261 +50,24 @@ config ETRAX_SERIAL_PORT0
          if you do not need DMA to something else.
          ser0 can use dma4 or dma6 for output and dma5 or dma7 for input.
 
-choice
-       prompt "Ser0 default port type "
-       depends on ETRAX_SERIAL_PORT0
-       default ETRAX_SERIAL_PORT0_TYPE_232
-       help
-         Type of serial port.
-
-config ETRAX_SERIAL_PORT0_TYPE_232
-       bool "Ser0 is a RS-232 port"
-       help
-         Configure serial port 0 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT0_TYPE_485HD
-       bool "Ser0 is a half duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 0 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT0_TYPE_485FD
-       bool "Ser0 is a full duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 0 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER0_DTR_BIT
-       string "Ser 0 DTR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_RI_BIT
-       string "Ser 0 RI bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_DSR_BIT
-       string "Ser 0 DSR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT0
-
-config ETRAX_SER0_CD_BIT
-       string "Ser 0 CD bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT0
-
 config ETRAX_SERIAL_PORT1
        bool "Serial port 1 enabled"
        depends on ETRAXFS_SERIAL
        help
          Enables the ETRAX FS serial driver for ser1 (ttyS1).
 
-choice
-       prompt "Ser1 default port type"
-       depends on ETRAX_SERIAL_PORT1
-       default ETRAX_SERIAL_PORT1_TYPE_232
-       help
-         Type of serial port.
-
-config ETRAX_SERIAL_PORT1_TYPE_232
-       bool "Ser1 is a RS-232 port"
-       help
-         Configure serial port 1 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT1_TYPE_485HD
-       bool "Ser1 is a half duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 1 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT1_TYPE_485FD
-       bool "Ser1 is a full duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 1 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER1_DTR_BIT
-       string "Ser 1 DTR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_RI_BIT
-       string "Ser 1 RI bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_DSR_BIT
-       string "Ser 1 DSR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT1
-
-config ETRAX_SER1_CD_BIT
-       string "Ser 1 CD bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT1
-
 config ETRAX_SERIAL_PORT2
        bool "Serial port 2 enabled"
        depends on ETRAXFS_SERIAL
        help
          Enables the ETRAX FS serial driver for ser2 (ttyS2).
 
-choice
-       prompt "Ser2 default port type"
-       depends on ETRAX_SERIAL_PORT2
-       default ETRAX_SERIAL_PORT2_TYPE_232
-       help
-         What DMA channel to use for ser2
-
-config ETRAX_SERIAL_PORT2_TYPE_232
-       bool "Ser2 is a RS-232 port"
-       help
-         Configure serial port 2 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT2_TYPE_485HD
-       bool "Ser2 is a half duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 2 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT2_TYPE_485FD
-       bool "Ser2 is a full duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 2 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-
-config ETRAX_SER2_DTR_BIT
-       string "Ser 2 DTR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_RI_BIT
-       string "Ser 2 RI bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_DSR_BIT
-       string "Ser 2 DSR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT2
-
-config ETRAX_SER2_CD_BIT
-       string "Ser 2 CD bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT2
-
 config ETRAX_SERIAL_PORT3
        bool "Serial port 3 enabled"
        depends on ETRAXFS_SERIAL
        help
          Enables the ETRAX FS serial driver for ser3 (ttyS3).
 
-choice
-       prompt "Ser3 default port type"
-       depends on ETRAX_SERIAL_PORT3
-       default ETRAX_SERIAL_PORT3_TYPE_232
-       help
-         What DMA channel to use for ser3.
-
-config ETRAX_SERIAL_PORT3_TYPE_232
-       bool "Ser3 is a RS-232 port"
-       help
-         Configure serial port 3 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT3_TYPE_485HD
-       bool "Ser3 is a half duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 3 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT3_TYPE_485FD
-       bool "Ser3 is a full duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 3 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-config ETRAX_SER3_DTR_BIT
-       string "Ser 3 DTR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_RI_BIT
-       string "Ser 3 RI bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_DSR_BIT
-       string "Ser 3 DSR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SER3_CD_BIT
-       string "Ser 3 CD bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT3
-
-config ETRAX_SERIAL_PORT4
-       bool "Serial port 4 enabled"
-       depends on ETRAXFS_SERIAL && CRIS_MACH_ARTPEC3
-       help
-         Enables the ETRAX FS serial driver for ser4 (ttyS4).
-
-choice
-       prompt "Ser4 default port type"
-       depends on ETRAX_SERIAL_PORT4
-       default ETRAX_SERIAL_PORT4_TYPE_232
-       help
-         What DMA channel to use for ser4.
-
-config ETRAX_SERIAL_PORT4_TYPE_232
-       bool "Ser4 is a RS-232 port"
-       help
-         Configure serial port 4 to be a RS-232 port.
-
-config ETRAX_SERIAL_PORT4_TYPE_485HD
-       bool "Ser4 is a half duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 4 to be a half duplex (two wires) RS-485 port.
-
-config ETRAX_SERIAL_PORT4_TYPE_485FD
-       bool "Ser4 is a full duplex RS-485 port"
-       depends on ETRAX_RS485
-       help
-         Configure serial port 4 to be a full duplex (four wires) RS-485 port.
-endchoice
-
-choice
-       prompt "Ser4 DMA in channel "
-       depends on ETRAX_SERIAL_PORT4
-       default ETRAX_SERIAL_PORT4_NO_DMA_IN
-       help
-         What DMA channel to use for ser4.
-
-
-config ETRAX_SERIAL_PORT4_NO_DMA_IN
-       bool "Ser4 uses no DMA for input"
-       help
-         Do not use DMA for ser4 input.
-
-config ETRAX_SERIAL_PORT4_DMA9_IN
-       bool "Ser4 uses DMA9 for input"
-       depends on ETRAX_SERIAL_PORT4
-       help
-         Enables the DMA9 input channel for ser4 (ttyS4).
-         If you do not enable DMA, an interrupt for each character will be
-         used when receiving data.
-         Normally you want to use DMA, unless you use the DMA channel for
-         something else.
-
-endchoice
-
-config ETRAX_SER4_DTR_BIT
-       string "Ser 4 DTR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_RI_BIT
-       string "Ser 4 RI bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_DSR_BIT
-       string "Ser 4 DSR bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT4
-
-config ETRAX_SER4_CD_BIT
-       string "Ser 4 CD bit (empty = not used)"
-       depends on ETRAX_SERIAL_PORT4
-
 config ETRAX_SYNCHRONOUS_SERIAL
        bool "Synchronous serial-port support"
        depends on ETRAX_ARCH_V32
@@ -703,32 +408,6 @@ config ETRAX_SPI_SSER0
          want to build it as a module, which will be named spi_crisv32_sser.
          (You need to select MMC separately.)
 
-config ETRAX_SPI_SSER0_DMA
-       bool "DMA for SPI on sser0 enabled"
-       depends on ETRAX_SPI_SSER0
-       depends on !ETRAX_SERIAL_PORT1_DMA4_OUT && !ETRAX_SERIAL_PORT1_DMA5_IN
-       default y
-       help
-         Say Y if using DMA (dma4/dma5) for SPI on synchronous serial port 0.
-
-config ETRAX_SPI_MMC_CD_SSER0_PIN
-       string "MMC/SD card detect pin for SPI on sser0"
-       depends on ETRAX_SPI_SSER0 && MMC_SPI
-       default "pd11"
-       help
-         The pin to use for SD/MMC card detect.  This pin should be pulled up
-         and grounded when a card is present.  If defined as " " (space), no
-         pin is selected.  A card must then always be inserted for proper
-         action.
-
-config ETRAX_SPI_MMC_WP_SSER0_PIN
-       string "MMC/SD card write-protect pin for SPI on sser0"
-       depends on ETRAX_SPI_SSER0 && MMC_SPI
-       default "pd10"
-       help
-         The pin to use for the SD/MMC write-protect signal for a memory
-         card.  If defined as " " (space), the card is considered writable.
-
 config ETRAX_SPI_SSER1
        tristate "SPI using synchronous serial port 1 (sser1)"
        depends on ETRAX_SPI_MMC
@@ -742,32 +421,6 @@ config ETRAX_SPI_SSER1
          want to build it as a module, which will be named spi_crisv32_sser.
          (You need to select MMC separately.)
 
-config ETRAX_SPI_SSER1_DMA
-       bool "DMA for SPI on sser1 enabled"
-       depends on ETRAX_SPI_SSER1 && !ETRAX_ETHERNET_IFACE1
-       depends on !ETRAX_SERIAL_PORT0_DMA6_OUT && !ETRAX_SERIAL_PORT0_DMA7_IN
-       default y
-       help
-         Say Y if using DMA (dma6/dma7) for SPI on synchronous serial port 1.
-
-config ETRAX_SPI_MMC_CD_SSER1_PIN
-       string "MMC/SD card detect pin for SPI on sser1"
-       depends on ETRAX_SPI_SSER1 && MMC_SPI
-       default "pd12"
-       help
-         The pin to use for SD/MMC card detect.  This pin should be pulled up
-         and grounded when a card is present.  If defined as " " (space), no
-         pin is selected.  A card must then always be inserted for proper
-         action.
-
-config ETRAX_SPI_MMC_WP_SSER1_PIN
-       string "MMC/SD card write-protect pin for SPI on sser1"
-       depends on ETRAX_SPI_SSER1 && MMC_SPI
-       default "pd9"
-       help
-         The pin to use for the SD/MMC write-protect signal for a memory
-         card.  If defined as " " (space), the card is considered writable.
-
 config ETRAX_SPI_GPIO
        tristate "Bitbanged SPI using gpio pins"
        depends on ETRAX_SPI_MMC
@@ -782,51 +435,4 @@ config ETRAX_SPI_GPIO
          Say m to build it as a module, which will be called spi_crisv32_gpio.
          (You need to select MMC separately.)
 
-# The default match that of sser0, only because that's how it was tested.
-config ETRAX_SPI_CS_PIN
-       string "SPI chip select pin"
-       depends on ETRAX_SPI_GPIO
-       default "pc3"
-       help
-         The pin to use for SPI chip select.
-
-config ETRAX_SPI_CLK_PIN
-       string "SPI clock pin"
-       depends on ETRAX_SPI_GPIO
-       default "pc1"
-       help
-         The pin to use for the SPI clock.
-
-config ETRAX_SPI_DATAIN_PIN
-       string "SPI MISO (data in) pin"
-       depends on ETRAX_SPI_GPIO
-       default "pc16"
-       help
-         The pin to use for SPI data in from the device.
-
-config ETRAX_SPI_DATAOUT_PIN
-       string "SPI MOSI (data out) pin"
-       depends on ETRAX_SPI_GPIO
-       default "pc0"
-       help
-         The pin to use for SPI data out to the device.
-
-config ETRAX_SPI_MMC_CD_GPIO_PIN
-       string "MMC/SD card detect pin for SPI using gpio (space for none)"
-       depends on ETRAX_SPI_GPIO && MMC_SPI
-       default "pd11"
-       help
-         The pin to use for SD/MMC card detect.  This pin should be pulled up
-         and grounded when a card is present.  If defined as " " (space), no
-         pin is selected.  A card must then always be inserted for proper
-         action.
-
-config ETRAX_SPI_MMC_WP_GPIO_PIN
-       string "MMC/SD card write-protect pin for SPI using gpio (space for none)"
-       depends on ETRAX_SPI_GPIO && MMC_SPI
-       default "pd10"
-       help
-         The pin to use for the SD/MMC write-protect signal for a memory
-         card.  If defined as " " (space), the card is considered writable.
-
 endif
index 7796aafc711e6582f006089c86d42ef6592dbcf4..87547271a595fca051e42e998a19f853a50a1a8f 100644 (file)
@@ -15,10 +15,6 @@ config ETRAX_SERIAL_PORTS
        int
        default 5
 
-config ETRAX_DDR
-       bool
-       default y
-
 config ETRAX_DDR2_MRS
        hex "DDR2 MRS"
        default "0"
index c0a29b96b92b032fdcdddb47702e3383724781e8..15b815df29c165809c4e6e229ede6a077d9e8e71 100644 (file)
@@ -47,7 +47,6 @@ struct task_struct;
  */
 
 #define task_pt_regs(task) user_regs(task_thread_info(task))
-#define current_regs() task_pt_regs(current)
 
 unsigned long get_wchan(struct task_struct *p);
 
diff --git a/arch/cris/include/uapi/asm/kvm_para.h b/arch/cris/include/uapi/asm/kvm_para.h
new file mode 100644 (file)
index 0000000..14fab8f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
index 0aa35f0eb0dbfa0cc5fc34c443645ae484c68dc3..deb67843693cddbf94ce2c2044999730a9425845 100644 (file)
@@ -320,7 +320,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
  *  are examined.
  */
 
-void __init pcibios_fixup_bus(struct pci_bus *bus)
+void pcibios_fixup_bus(struct pci_bus *bus)
 {
 #if 0
        printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
index 1ed4c8fedb8370e7b8403e36244b2fce043e0aae..185d3d18d0ec08d8319a317432074de0a8abca54 100644 (file)
@@ -7,6 +7,6 @@
 /* Use normal IO mappings for DMI */
 #define dmi_ioremap ioremap
 #define dmi_iounmap(x,l) iounmap(x)
-#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
+#define dmi_alloc(l) kzalloc(l, GFP_ATOMIC)
 
 #endif
index bac1639bc3207acf97192e3300e6a961eca10fa7..314458b9933a1f895975a468328e6a1e92f234c0 100644 (file)
@@ -11,8 +11,7 @@ Elf64_Half elf_core_extra_phdrs(void)
        return GATE_EHDR->e_phnum;
 }
 
-int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-                              unsigned long limit)
+int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
        const struct elf_phdr *const gate_phdrs =
                (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
@@ -35,15 +34,13 @@ int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
                        phdr.p_offset += ofs;
                }
                phdr.p_paddr = 0; /* match other core phdrs */
-               *size += sizeof(phdr);
-               if (*size > limit || !dump_write(file, &phdr, sizeof(phdr)))
+               if (!dump_emit(cprm, &phdr, sizeof(phdr)))
                        return 0;
        }
        return 1;
 }
 
-int elf_core_write_extra_data(struct file *file, size_t *size,
-                             unsigned long limit)
+int elf_core_write_extra_data(struct coredump_params *cprm)
 {
        const struct elf_phdr *const gate_phdrs =
                (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
@@ -53,9 +50,7 @@ int elf_core_write_extra_data(struct file *file, size_t *size,
                if (gate_phdrs[i].p_type == PT_LOAD) {
                        void *addr = (void *)gate_phdrs[i].p_vaddr;
                        size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz);
-
-                       *size += memsz;
-                       if (*size > limit || !dump_write(file, addr, memsz))
+                       if (!dump_emit(cprm, addr, memsz))
                                return 0;
                        break;
                }
index 821170e5f6ed4e29e2135af409258d60d7fb4b4f..c3cda41af801487e1637160ce8e5c4854f3cc713 100644 (file)
@@ -11,6 +11,7 @@ config M68K
        select VIRT_TO_BUS
        select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
        select GENERIC_CPU_DEVICES
+       select GENERIC_IOMAP
        select GENERIC_STRNCPY_FROM_USER if MMU
        select GENERIC_STRNLEN_USER if MMU
        select FPU if MMU
@@ -72,7 +73,6 @@ source "kernel/Kconfig.freezer"
 config MMU
        bool "MMU-based Paged Memory Management Support"
        default y
-       select GENERIC_IOMAP
        help
          Select if you want MMU-based virtualised addressing space
          support by paged memory management. If unsure, say 'Y'.
index b9ab0a69561cac3de87657e06700f8c0cf456413..61dc643c0b05ca458d3a6370756ec9d4cd72771e 100644 (file)
@@ -150,18 +150,6 @@ config XCOPILOT_BUGS
        help
          Support the bugs of Xcopilot.
 
-config UC5272
-       bool "Arcturus Networks uC5272 dimm board support"
-       depends on M5272
-       help
-         Support for the Arcturus Networks uC5272 dimm board.
-
-config UC5282
-       bool "Arcturus Networks uC5282 board support"
-       depends on M528x
-       help
-         Support for the Arcturus Networks uC5282 dimm board.
-
 config UCSIMM
        bool "uCsimm module support"
        depends on M68EZ328
@@ -205,23 +193,15 @@ config UCQUICC
        help
          Support for the Lineo uCquicc board.
 
-config ARNEWSH
-       bool
-
 config ARN5206
        bool "Arnewsh 5206 board support"
        depends on M5206
-       select ARNEWSH
        help
          Support for the Arnewsh 5206 board.
 
-config FREESCALE
-       bool
-
 config M5206eC3
        bool "Motorola M5206eC3 board support"
        depends on M5206e
-       select FREESCALE
        help
          Support for the Motorola M5206eC3 board.
 
@@ -231,88 +211,24 @@ config ELITE
        help
          Support for the Motorola M5206eLITE board.
 
-config M5208EVB
-       bool "Freescale M5208EVB board support"
-       depends on M520x
-       select FREESCALE
-       help
-         Support for the Freescale Coldfire M5208EVB.
-
 config M5235EVB
        bool "Freescale M5235EVB support"
        depends on M523x
-       select FREESCALE
        help
          Support for the Freescale M5235EVB board.
 
 config M5249C3
        bool "Motorola M5249C3 board support"
        depends on M5249
-       select FREESCALE
        help
          Support for the Motorola M5249C3 board.
 
-config M5271EVB
-       bool "Freescale (Motorola) M5271EVB board support"
-       depends on M5271
-       select FREESCALE
-       help
-         Support for the Freescale (Motorola) M5271EVB board.
-
-config M5275EVB
-       bool "Freescale (Motorola) M5275EVB board support"
-       depends on M5275
-       select FREESCALE
-       help
-         Support for the Freescale (Motorola) M5275EVB board.
-
 config M5272C3
        bool "Motorola M5272C3 board support"
        depends on M5272
-       select FREESCALE
        help
          Support for the Motorola M5272C3 board.
 
-config senTec
-       bool
-
-config COBRA5272
-       bool "senTec COBRA5272 board support"
-       depends on M5272
-       select senTec
-       help
-         Support for the senTec COBRA5272 board.
-
-config AVNET
-       bool
-
-config AVNET5282
-       bool "Avnet 5282 board support"
-       depends on M528x
-       select AVNET
-       help
-         Support for the Avnet 5282 board.
-
-config M5282EVB
-       bool "Motorola M5282EVB board support"
-       depends on M528x
-       select FREESCALE
-       help
-         Support for the Motorola M5282EVB board.
-
-config COBRA5282
-       bool "senTec COBRA5282 board support"
-       depends on M528x
-       select senTec
-       help
-         Support for the senTec COBRA5282 board.
-
-config SOM5282EM
-       bool "EMAC.Inc SOM5282EM board support"
-       depends on M528x
-       help
-         Support for the EMAC.Inc SOM5282EM module.
-
 config WILDFIRE
        bool "Intec Automation Inc. WildFire board support"
        depends on M528x
@@ -328,14 +244,12 @@ config WILDFIREMOD
 config ARN5307
        bool "Arnewsh 5307 board support"
        depends on M5307
-       select ARNEWSH
        help
          Support for the Arnewsh 5307 board.
 
 config M5307C3
        bool "Motorola M5307C3 board support"
        depends on M5307
-       select FREESCALE
        help
          Support for the Motorola M5307C3 board.
 
@@ -345,30 +259,9 @@ config SECUREEDGEMP3
        help
          Support for the SnapGear SecureEdge/MP3 platform.
 
-config M5329EVB
-       bool "Freescale (Motorola) M5329EVB board support"
-       depends on M532x
-       select FREESCALE
-       help
-         Support for the Freescale (Motorola) M5329EVB board.
-
-config COBRA5329
-       bool "senTec COBRA5329 board support"
-       depends on M532x
-       help
-         Support for the senTec COBRA5329 board.
-
-config M5373EVB
-       bool "Freescale M5373EVB board support"
-       depends on M537x
-       select FREESCALE
-       help
-         Support for the Freescale M5373EVB board.
-
 config M5407C3
        bool "Motorola M5407C3 board support"
        depends on M5407
-       select FREESCALE
        help
          Support for the Motorola M5407C3 board.
 
@@ -402,39 +295,12 @@ config NETtel
        help
          Support for the SnapGear NETtel/SecureEdge/SnapGear boards.
 
-config SNAPGEAR
-       bool "SnapGear router board support"
-       depends on NETtel
-       help
-         Special additional support for SnapGear router boards.
-
-config SNEHA
-       bool
-
-config CPU16B
-       bool "Sneha Technologies S.L. Sarasvati board support"
-       depends on M5272
-       select SNEHA
-       help
-         Support for the SNEHA CPU16B board.
-
 config MOD5272
        bool "Netburner MOD-5272 board support"
        depends on M5272
        help
          Support for the Netburner MOD-5272 board.
 
-config SAVANT
-       bool
-
-config SAVANTrosie1
-       bool "Savant Rosie1 board support"
-       depends on M523x
-       select SAVANT
-       help
-         Support for the Savant Rosie1 board.
-
-
 if !MMU || COLDFIRE
 
 comment "Machine Options"
index 353bf754a9725a490e12eb9528555d256fd797fd..e1534783e94e5f1c7d47e7fb713bcace07bf080f 100644 (file)
@@ -4,6 +4,7 @@
 #ifdef __KERNEL__
 
 #include <asm/virtconvert.h>
+#include <asm-generic/iomap.h>
 
 /*
  * These are for ISA/PCI shared memory _only_ and should never be used
index 7c360dac00b7e39bf7b3336e16810962374575b2..38b024a0b0451ba7a934ea5d4f5563411d406b6d 100644 (file)
@@ -48,6 +48,9 @@ extern unsigned long _ramend;
 #include <asm/page_no.h>
 #endif
 
+#define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | VM_EXEC | \
+                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
 #include <asm-generic/getorder.h>
 
 #endif /* _M68K_PAGE_H */
index 89f201434b5aa2575a5837bc6730346e06cc7e30..5029f73e6294763239b7f1766ad073e29b273520 100644 (file)
@@ -173,7 +173,4 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
 
 #endif /* __ASSEMBLY__ */
 
-#define VM_DATA_DEFAULT_FLAGS  (VM_READ | VM_WRITE | VM_EXEC | \
-                                VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
 #endif /* _M68K_PAGE_MM_H */
index 911ba472e6c4abbd83dac862a561d98eb22526aa..5b16f5d61b44cb4f2cbcb7372d4f98061bef6da0 100644 (file)
@@ -118,7 +118,7 @@ void (*mach_power_off)(void);
  *
  * Returns:
  */
-void parse_uboot_commandline(char *commandp, int size)
+static void __init parse_uboot_commandline(char *commandp, int size)
 {
        extern unsigned long _init_sp;
        unsigned long *sp;
index 2a16df3d931283f0f55ac4c95571dbe3629678c3..57fd286e4b0b410fe93cb0596c93488afa8b4b4e 100644 (file)
@@ -50,6 +50,7 @@
 #include <asm/pgtable.h>
 #include <asm/traps.h>
 #include <asm/ucontext.h>
+#include <asm/cacheflush.h>
 
 #ifdef CONFIG_MMU
 
@@ -181,6 +182,13 @@ static inline void push_cache (unsigned long vaddr)
                asm volatile ("movec %0,%%caar\n\t"
                              "movec %1,%%cacr"
                              : : "r" (vaddr + 4), "r" (temp));
+       } else {
+               /* CPU_IS_COLDFIRE */
+#if defined(CONFIG_CACHE_COPYBACK)
+               flush_cf_dcache(0, DCACHE_MAX_ADDR);
+#endif
+               /* Invalidate instruction cache for the pushed bytes */
+               clear_cf_icache(vaddr, vaddr + 8);
        }
 }
 
index a86eb66835aaaf11b7125960987a44ea9c84941d..e53caf4c3bfbf9d141e1f49857ce08f8da785cc2 100644 (file)
@@ -15,6 +15,7 @@
 
 /***************************************************************************/
 
+#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/rtc.h>
@@ -42,7 +43,7 @@ void m68328_reset (void)
 
 /***************************************************************************/
 
-void config_BSP(char *command, int len)
+void __init config_BSP(char *command, int len)
 {
   printk(KERN_INFO "\n68328 support D. Jeff Dionne <jeff@uclinux.org>\n");
   printk(KERN_INFO "68328 support Kenneth Albanowski <kjahds@kjshds.com>\n");
index a6eb72d750084f9a19dae6a6d43a80724a8a9d7f..332b5e8605fcdf2d81772babc7b54bde55dd802d 100644 (file)
@@ -13,6 +13,7 @@
 
 /***************************************************************************/
 
+#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/rtc.h>
@@ -52,7 +53,7 @@ _bsc1(unsigned char *, gethwaddr, int, a)
 _bsc1(char *, getbenv, char *, a)
 #endif
 
-void config_BSP(char *command, int len)
+void __init config_BSP(char *command, int len)
 {
   unsigned char *p;
 
index eb6964fbec09a7f703702b2f8219d5c27f85e564..fd6658358af1aa00c95848bf62f7aed8cf8d15c6 100644 (file)
@@ -14,6 +14,7 @@
 
 /***************************************************************************/
 
+#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/kd.h>
@@ -59,7 +60,7 @@ static void m68vz328_reset(void)
        );
 }
 
-static void init_hardware(char *command, int size)
+static void __init init_hardware(char *command, int size)
 {
 #ifdef CONFIG_DIRECT_IO_ACCESS
        SCR = 0x10;                                     /* allow user access to internal registers */
@@ -145,7 +146,7 @@ _bsc0(char *, getserialnum)
 _bsc1(unsigned char *, gethwaddr, int, a)
 _bsc1(char *, getbenv, char *, a)
 
-static void init_hardware(char *command, int size)
+static void __init init_hardware(char *command, int size)
 {
        char *p;
 
@@ -167,7 +168,7 @@ static void m68vz328_reset(void)
 {
 }
 
-static void init_hardware(char *command, int size)
+static void __init init_hardware(char *command, int size)
 {
 }
 
@@ -175,7 +176,7 @@ static void init_hardware(char *command, int size)
 #endif
 /***************************************************************************/
 
-void config_BSP(char *command, int size)
+void __init config_BSP(char *command, int size)
 {
        printk(KERN_INFO "68VZ328 DragonBallVZ support (c) 2001 Lineo, Inc.\n");
 
index 8e4e10cc00803387b8386afdb0fe8c655f37a356..315727b7ff40f4170cc05e5d432bd4542dd1102f 100644 (file)
@@ -31,6 +31,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/param.h>
@@ -77,7 +78,7 @@ void m360_cpm_reset(void);
 
 
 
-void m360_cpm_reset()
+void __init m360_cpm_reset()
 {
 /*     pte_t              *pte; */
 
index 9877cefad1e7640dd27622410c04f882acdb37e3..0570741e5500b221003e3295f4596673cfa53b4b 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <stdarg.h>
+#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
@@ -140,7 +141,7 @@ _bsc1(char *, getbenv, char *, a)
 #endif
 
 
-void config_BSP(char *command, int len)
+void __init config_BSP(char *command, int len)
 {
   unsigned char *p;
 
index 2a3c860c75250c15bc5bcf8c4c9532aa2772e2de..973640f46752f0247c75de8f5940c3005b23841b 100644 (file)
@@ -16,6 +16,8 @@ config META21_FPGA
 
 config SOC_TZ1090
        bool "Toumaz Xenif TZ1090 SoC (Comet)"
+       select ARCH_WANT_OPTIONAL_GPIOLIB
+       select IMGPDC_IRQ
        select METAG_LNKGET_AROUND_CACHE
        select METAG_META21
        select METAG_SMP_WRITE_REORDERING
index 853744652b93460875cc9a36caf2fad3fc3953b5..24ea7d2e9138032c713088e70f5175cfa26155a4 100644 (file)
@@ -8,6 +8,8 @@
 
 #include "skeleton.dtsi"
 
+#include <dt-bindings/interrupt-controller/irq.h>
+
 / {
        compatible = "toumaz,tz1090", "img,meta";
 
                #size-cells = <1>;
                ranges;
 
+               pdc: pdc@0x02006000 {
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+
+                       reg = <0x02006000 0x1000>;
+                       compatible = "img,pdc-intc";
+
+                       num-perips = <3>;
+                       num-syswakes = <3>;
+
+                       interrupts = <18 IRQ_TYPE_LEVEL_HIGH>, /* Syswakes */
+                                    <30 IRQ_TYPE_LEVEL_HIGH>, /* Perip 0 (RTC) */
+                                    <29 IRQ_TYPE_LEVEL_HIGH>, /* Perip 1 (IR) */
+                                    <31 IRQ_TYPE_LEVEL_HIGH>; /* Perip 2 (WDT) */
+               };
+
                pinctrl: pinctrl@02005800 {
                        #gpio-range-cells = <3>;
                        compatible = "img,tz1090-pinctrl";
                        compatible = "img,tz1090-pdc-pinctrl";
                        reg = <0x02006500 0x100>;
                };
+
+               gpios: gpios@02005800 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "img,tz1090-gpio";
+                       reg = <0x02005800 0x90>;
+
+                       gpios0: bank@0 {
+                               gpio-controller;
+                               interrupt-controller;
+                               #gpio-cells = <2>;
+                               #interrupt-cells = <2>;
+                               reg = <0>;
+                               interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+                               gpio-ranges = <&pinctrl 0 0 30>;
+                       };
+                       gpios1: bank@1 {
+                               gpio-controller;
+                               interrupt-controller;
+                               #gpio-cells = <2>;
+                               #interrupt-cells = <2>;
+                               reg = <1>;
+                               interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+                               gpio-ranges = <&pinctrl 0 30 30>;
+                       };
+                       gpios2: bank@2 {
+                               gpio-controller;
+                               interrupt-controller;
+                               #gpio-cells = <2>;
+                               #interrupt-cells = <2>;
+                               reg = <2>;
+                               interrupts = <15 IRQ_TYPE_LEVEL_HIGH>;
+                               gpio-ranges = <&pinctrl 0 60 30>;
+                       };
+               };
+
+               pdc_gpios: gpios@02006500 {
+                       gpio-controller;
+                       #gpio-cells = <2>;
+
+                       compatible = "img,tz1090-pdc-gpio";
+                       reg = <0x02006500 0x100>;
+
+                       interrupt-parent = <&pdc>;
+                       interrupts =    <8  IRQ_TYPE_NONE>,
+                                       <9  IRQ_TYPE_NONE>,
+                                       <10 IRQ_TYPE_NONE>;
+                       gpio-ranges = <&pdc_pinctrl 0 0 7>;
+               };
        };
 };
index 28813f164730f7110ccf470d6ef610932f166580..123919534b80fe3724612b61697441305d5b10a6 100644 (file)
@@ -407,10 +407,9 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 #endif
 
 #ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-                                           unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
-       pr_err("%s(%lx, %lx)\n",
+       pr_err("%s(%llx, %llx)\n",
               __func__, start, end);
 }
 #endif /* CONFIG_OF_FLATTREE */
index 0a2c68f9f9b0d61cf14e3f28de7ef23d87adcced..62e2e8f2c5d617a2a1c3271166441630b63b85cf 100644 (file)
@@ -136,8 +136,7 @@ void __init early_init_devtree(void *params)
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-               unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        initrd_start = (unsigned long)__va(start);
        initrd_end = (unsigned long)__va(end);
index bdb8ea100e73ee8637c8ea587378000292b0b37b..1b93bf0892a0d5550418bed47f99f1dfa07e4305 100644 (file)
@@ -657,67 +657,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                                  struct device_node *dev, int primary)
 {
-       const u32 *ranges;
-       int rlen;
-       int pna = of_n_addr_cells(dev);
-       int np = pna + 5;
        int memno = 0, isa_hole = -1;
-       u32 pci_space;
-       unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
        unsigned long long isa_mb = 0;
        struct resource *res;
+       struct of_pci_range range;
+       struct of_pci_range_parser parser;
 
        pr_info("PCI host bridge %s %s ranges:\n",
               dev->full_name, primary ? "(primary)" : "");
 
-       /* Get ranges property */
-       ranges = of_get_property(dev, "ranges", &rlen);
-       if (ranges == NULL)
+       /* Check for ranges property */
+       if (of_pci_range_parser_init(&parser, dev))
                return;
 
-       /* Parse it */
        pr_debug("Parsing ranges property...\n");
-       while ((rlen -= np * 4) >= 0) {
+       for_each_of_pci_range(&parser, &range) {
                /* Read next ranges element */
-               pci_space = ranges[0];
-               pci_addr = of_read_number(ranges + 1, 2);
-               cpu_addr = of_translate_address(dev, ranges + 3);
-               size = of_read_number(ranges + pna + 3, 2);
-
                pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
-                               pci_space, pci_addr);
+                               range.pci_space, range.pci_addr);
                pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
-                                       cpu_addr, size);
-
-               ranges += np;
+                                       range.cpu_addr, range.size);
 
                /* If we failed translation or got a zero-sized region
                 * (some FW try to feed us with non sensical zero sized regions
                 * such as power3 which look like some kind of attempt
                 * at exposing the VGA memory hole)
                 */
-               if (cpu_addr == OF_BAD_ADDR || size == 0)
+               if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
                        continue;
 
-               /* Now consume following elements while they are contiguous */
-               for (; rlen >= np * sizeof(u32);
-                    ranges += np, rlen -= np * 4) {
-                       if (ranges[0] != pci_space)
-                               break;
-                       pci_next = of_read_number(ranges + 1, 2);
-                       cpu_next = of_translate_address(dev, ranges + 3);
-                       if (pci_next != pci_addr + size ||
-                           cpu_next != cpu_addr + size)
-                               break;
-                       size += of_read_number(ranges + pna + 3, 2);
-               }
-
                /* Act based on address space type */
                res = NULL;
-               switch ((pci_space >> 24) & 0x3) {
-               case 1:         /* PCI IO space */
+               switch (range.flags & IORESOURCE_TYPE_BITS) {
+               case IORESOURCE_IO:
                        pr_info("  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
-                              cpu_addr, cpu_addr + size - 1, pci_addr);
+                               range.cpu_addr, range.cpu_addr + range.size - 1,
+                               range.pci_addr);
 
                        /* We support only one IO range */
                        if (hose->pci_io_size) {
@@ -725,11 +700,12 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                                continue;
                        }
                        /* On 32 bits, limit I/O space to 16MB */
-                       if (size > 0x01000000)
-                               size = 0x01000000;
+                       if (range.size > 0x01000000)
+                               range.size = 0x01000000;
 
                        /* 32 bits needs to map IOs here */
-                       hose->io_base_virt = ioremap(cpu_addr, size);
+                       hose->io_base_virt = ioremap(range.cpu_addr,
+                                               range.size);
 
                        /* Expect trouble if pci_addr is not 0 */
                        if (primary)
@@ -738,19 +714,20 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                        /* pci_io_size and io_base_phys always represent IO
                         * space starting at 0 so we factor in pci_addr
                         */
-                       hose->pci_io_size = pci_addr + size;
-                       hose->io_base_phys = cpu_addr - pci_addr;
+                       hose->pci_io_size = range.pci_addr + range.size;
+                       hose->io_base_phys = range.cpu_addr - range.pci_addr;
 
                        /* Build resource */
                        res = &hose->io_resource;
-                       res->flags = IORESOURCE_IO;
-                       res->start = pci_addr;
+                       range.cpu_addr = range.pci_addr;
+
                        break;
-               case 2:         /* PCI Memory space */
-               case 3:         /* PCI 64 bits Memory space */
+               case IORESOURCE_MEM:
                        pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
-                              cpu_addr, cpu_addr + size - 1, pci_addr,
-                              (pci_space & 0x40000000) ? "Prefetch" : "");
+                               range.cpu_addr, range.cpu_addr + range.size - 1,
+                               range.pci_addr,
+                               (range.pci_space & 0x40000000) ?
+                               "Prefetch" : "");
 
                        /* We support only 3 memory ranges */
                        if (memno >= 3) {
@@ -758,13 +735,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                                continue;
                        }
                        /* Handles ISA memory hole space here */
-                       if (pci_addr == 0) {
-                               isa_mb = cpu_addr;
+                       if (range.pci_addr == 0) {
+                               isa_mb = range.cpu_addr;
                                isa_hole = memno;
                                if (primary || isa_mem_base == 0)
-                                       isa_mem_base = cpu_addr;
-                               hose->isa_mem_phys = cpu_addr;
-                               hose->isa_mem_size = size;
+                                       isa_mem_base = range.cpu_addr;
+                               hose->isa_mem_phys = range.cpu_addr;
+                               hose->isa_mem_size = range.size;
                        }
 
                        /* We get the PCI/Mem offset from the first range or
@@ -772,30 +749,23 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                         * hole. If they don't match, bugger.
                         */
                        if (memno == 0 ||
-                           (isa_hole >= 0 && pci_addr != 0 &&
+                           (isa_hole >= 0 && range.pci_addr != 0 &&
                             hose->pci_mem_offset == isa_mb))
-                               hose->pci_mem_offset = cpu_addr - pci_addr;
-                       else if (pci_addr != 0 &&
-                                hose->pci_mem_offset != cpu_addr - pci_addr) {
+                               hose->pci_mem_offset = range.cpu_addr -
+                                                       range.pci_addr;
+                       else if (range.pci_addr != 0 &&
+                                hose->pci_mem_offset != range.cpu_addr -
+                                                       range.pci_addr) {
                                pr_info(" \\--> Skipped (offset mismatch) !\n");
                                continue;
                        }
 
                        /* Build resource */
                        res = &hose->mem_resources[memno++];
-                       res->flags = IORESOURCE_MEM;
-                       if (pci_space & 0x40000000)
-                               res->flags |= IORESOURCE_PREFETCH;
-                       res->start = cpu_addr;
                        break;
                }
-               if (res != NULL) {
-                       res->name = dev->full_name;
-                       res->end = res->start + size - 1;
-                       res->parent = NULL;
-                       res->sibling = NULL;
-                       res->child = NULL;
-               }
+               if (res != NULL)
+                       of_pci_range_to_resource(&range, dev, res);
        }
 
        /* If there's an ISA hole and the pci_mem_offset is -not- matching
index e652e578a679aa3d1c5c41e336121bedb298ad72..4b50d40f7451ad86eba859b8a02ea792ff060b35 100644 (file)
@@ -35,6 +35,8 @@ struct bcm963xx_nvram {
        u32     checksum_high;
 };
 
+#define BCM63XX_DEFAULT_PSI_SIZE       64
+
 static struct bcm963xx_nvram nvram;
 static int mac_addr_used;
 
@@ -114,3 +116,12 @@ int bcm63xx_nvram_get_mac_address(u8 *mac)
        return 0;
 }
 EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
+
+int bcm63xx_nvram_get_psi_size(void)
+{
+       if (nvram.psi_size > 0)
+               return nvram.psi_size;
+
+       return BCM63XX_DEFAULT_PSI_SIZE;
+}
+EXPORT_SYMBOL(bcm63xx_nvram_get_psi_size);
index 48b08eb9d9e4bd29dc97f5ec97c3f763852ff4aa..b212ae12e5ac7dc8ca35dfb24324ba638353f85e 100644 (file)
@@ -8,6 +8,7 @@
  *   written by Ralf Baechle <ralf@linux-mips.org>
  */
 #include <linux/compiler.h>
+#include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/console.h>
@@ -1139,3 +1140,30 @@ static int __init edac_devinit(void)
        return err;
 }
 device_initcall(edac_devinit);
+
+static void __initdata *octeon_dummy_iospace;
+
+static int __init octeon_no_pci_init(void)
+{
+       /*
+        * Initially assume there is no PCI. The PCI/PCIe platform code will
+        * later re-initialize these to correct values if they are present.
+        */
+       octeon_dummy_iospace = vzalloc(IO_SPACE_LIMIT);
+       set_io_port_base((unsigned long)octeon_dummy_iospace);
+       ioport_resource.start = MAX_RESOURCE;
+       ioport_resource.end = 0;
+       return 0;
+}
+core_initcall(octeon_no_pci_init);
+
+static int __init octeon_no_pci_release(void)
+{
+       /*
+        * Release the allocated memory if a real IO space is there.
+        */
+       if ((unsigned long)octeon_dummy_iospace != mips_io_port_base)
+               vfree(octeon_dummy_iospace);
+       return 0;
+}
+late_initcall(octeon_no_pci_release);
index 9b54b7a403d446b59073fe39fec03e0db7a432e8..454ddf9bb76f8a5fc5660bc6ab02c4d5380f7593 100644 (file)
@@ -1,2 +1,15 @@
 # MIPS headers
+generic-y += cputime.h
+generic-y += current.h
+generic-y += emergency-restart.h
+generic-y += local64.h
+generic-y += mutex.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += serial.h
 generic-y += trace_clock.h
+generic-y += ucontext.h
+generic-y += xor.h
index 552a65a0cf2b5438778d6b3d975a8e518a522373..27bd060d716e334e65be1e547a8c478d45273fa7 100644 (file)
@@ -65,44 +65,33 @@ static inline unsigned long bmips_read_zscm_reg(unsigned int offset)
 {
        unsigned long ret;
 
-       __asm__ __volatile__(
-               ".set push\n"
-               ".set noreorder\n"
-               "cache %1, 0(%2)\n"
-               "sync\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "mfc0 %0, $28, 3\n"
-               "_ssnop\n"
-               ".set pop\n"
-               : "=&r" (ret)
-               : "i" (Index_Load_Tag_S), "r" (ZSCM_REG_BASE + offset)
-               : "memory");
+       barrier();
+       cache_op(Index_Load_Tag_S, ZSCM_REG_BASE + offset);
+       __sync();
+       _ssnop();
+       _ssnop();
+       _ssnop();
+       _ssnop();
+       _ssnop();
+       _ssnop();
+       _ssnop();
+       ret = read_c0_ddatalo();
+       _ssnop();
+
        return ret;
 }
 
 static inline void bmips_write_zscm_reg(unsigned int offset, unsigned long data)
 {
-       __asm__ __volatile__(
-               ".set push\n"
-               ".set noreorder\n"
-               "mtc0 %0, $28, 3\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "cache %1, 0(%2)\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               "_ssnop\n"
-               : /* no outputs */
-               : "r" (data),
-                 "i" (Index_Store_Tag_S), "r" (ZSCM_REG_BASE + offset)
-               : "memory");
+       write_c0_ddatalo(data);
+       _ssnop();
+       _ssnop();
+       _ssnop();
+       cache_op(Index_Store_Tag_S, ZSCM_REG_BASE + offset);
+       _ssnop();
+       _ssnop();
+       _ssnop();
+       barrier();
 }
 
 #endif /* !defined(__ASSEMBLY__) */
index 632bbe5a79ea5bfe53769939964ce3a4d3bfd8e9..c19861518c322aa218eb4dadd49e4bae46c7008a 100644 (file)
 #define PRID_IMP_CAVIUM_CN68XX 0x9100
 #define PRID_IMP_CAVIUM_CN66XX 0x9200
 #define PRID_IMP_CAVIUM_CN61XX 0x9300
+#define PRID_IMP_CAVIUM_CNF71XX 0x9400
+#define PRID_IMP_CAVIUM_CN78XX 0x9500
+#define PRID_IMP_CAVIUM_CN70XX 0x9600
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_INGENIC
@@ -272,7 +275,7 @@ enum cpu_type_enum {
         */
        CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
        CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
-       CPU_XLR, CPU_XLP,
+       CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
 
        CPU_LAST
 };
diff --git a/arch/mips/include/asm/cputime.h b/arch/mips/include/asm/cputime.h
deleted file mode 100644 (file)
index c00eacb..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __MIPS_CPUTIME_H
-#define __MIPS_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __MIPS_CPUTIME_H */
diff --git a/arch/mips/include/asm/current.h b/arch/mips/include/asm/current.h
deleted file mode 100644 (file)
index 4c51401..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/current.h>
diff --git a/arch/mips/include/asm/emergency-restart.h b/arch/mips/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 108d8c4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/mips/include/asm/local64.h b/arch/mips/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
index 4e0b6bc1165edcbae2f44663390daa2c63c017d9..348df49dcc9f35ad7ce4129bde4ab66d1a103b00 100644 (file)
@@ -30,4 +30,6 @@ u8 *bcm63xx_nvram_get_name(void);
  */
 int bcm63xx_nvram_get_mac_address(u8 *mac);
 
+int bcm63xx_nvram_get_psi_size(void);
+
 #endif /* BCM63XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mutex.h b/arch/mips/include/asm/mutex.h
deleted file mode 100644 (file)
index 458c1f7..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/mips/include/asm/parport.h b/arch/mips/include/asm/parport.h
deleted file mode 100644 (file)
index cf252af..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/mips/include/asm/percpu.h b/arch/mips/include/asm/percpu.h
deleted file mode 100644 (file)
index 844e763..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_PERCPU_H
-#define __ASM_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ASM_PERCPU_H */
diff --git a/arch/mips/include/asm/scatterlist.h b/arch/mips/include/asm/scatterlist.h
deleted file mode 100644 (file)
index 7ee0e64..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SCATTERLIST_H
-#define __ASM_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_SCATTERLIST_H */
diff --git a/arch/mips/include/asm/sections.h b/arch/mips/include/asm/sections.h
deleted file mode 100644 (file)
index b7e3726..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_SECTIONS_H
-#define _ASM_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#endif /* _ASM_SECTIONS_H */
diff --git a/arch/mips/include/asm/segment.h b/arch/mips/include/asm/segment.h
deleted file mode 100644 (file)
index 92ac001..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif /* _ASM_SEGMENT_H */
diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h
deleted file mode 100644 (file)
index a0cb0ca..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/mips/include/asm/ucontext.h b/arch/mips/include/asm/ucontext.h
deleted file mode 100644 (file)
index 9bc07b9..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/mips/include/asm/xor.h b/arch/mips/include/asm/xor.h
deleted file mode 100644 (file)
index c82eb12..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
index 350ccccadcb99e3696540d77a208989b93ac01a4..be7196eacb8890a1875123a6473ee620d65f5514 100644 (file)
@@ -1,7 +1,9 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
-header-y += auxvec.h
+generic-y += auxvec.h
+generic-y += ipcbuf.h
+
 header-y += bitsperlong.h
 header-y += break.h
 header-y += byteorder.h
@@ -11,7 +13,6 @@ header-y += fcntl.h
 header-y += inst.h
 header-y += ioctl.h
 header-y += ioctls.h
-header-y += ipcbuf.h
 header-y += kvm_para.h
 header-y += mman.h
 header-y += msgbuf.h
diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h
deleted file mode 100644 (file)
index 7cf7f2d..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _ASM_AUXVEC_H
-#define _ASM_AUXVEC_H
-
-#endif /* _ASM_AUXVEC_H */
diff --git a/arch/mips/include/uapi/asm/ipcbuf.h b/arch/mips/include/uapi/asm/ipcbuf.h
deleted file mode 100644 (file)
index 84c7e51..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
index 4c6167a178754ecab7b0b456129c6d979626b399..8e8feb851f6b999e0de558517262f9c7e1788ae7 100644 (file)
@@ -852,10 +852,17 @@ platform:
        case PRID_IMP_CAVIUM_CN63XX:
        case PRID_IMP_CAVIUM_CN66XX:
        case PRID_IMP_CAVIUM_CN68XX:
+       case PRID_IMP_CAVIUM_CNF71XX:
                c->cputype = CPU_CAVIUM_OCTEON2;
                __cpu_name[cpu] = "Cavium Octeon II";
                set_elf_platform(cpu, "octeon2");
                break;
+       case PRID_IMP_CAVIUM_CN70XX:
+       case PRID_IMP_CAVIUM_CN78XX:
+               c->cputype = CPU_CAVIUM_OCTEON3;
+               __cpu_name[cpu] = "Cavium Octeon III";
+               set_elf_platform(cpu, "octeon3");
+               break;
        default:
                printk(KERN_INFO "Unknown Octeon chip!\n");
                c->cputype = CPU_UNKNOWN;
index 0c655deeea4adff8fd575cd5d09b372cd3f820e5..42f8875d244444fb65f79592df7a6e4514a84e9d 100644 (file)
@@ -166,6 +166,7 @@ void __init check_wait(void)
        case CPU_CAVIUM_OCTEON:
        case CPU_CAVIUM_OCTEON_PLUS:
        case CPU_CAVIUM_OCTEON2:
+       case CPU_CAVIUM_OCTEON3:
        case CPU_JZRISC:
        case CPU_LOONGSON1:
        case CPU_XLR:
index 7e954042f2526e66f21579f73d609452a5b1d726..0fa0b69cdd53bcc7e7d7d0bf836d3e40c767b4a4 100644 (file)
@@ -58,8 +58,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-                                           unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        initrd_start = (unsigned long)__va(start);
        initrd_end = (unsigned long)__va(end);
index e773659ccf9f8f607db709109e39b0cacb6f7989..46048d24328c759b0bf4189c612929015f139f69 100644 (file)
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                return 1;
                break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+       case lwc2_op: /* This is bbit0 on Octeon */
+               if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
+                       *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc + 8;
+               return 1;
+       case ldc2_op: /* This is bbit032 on Octeon */
+               if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
+                       *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc + 8;
+               return 1;
+       case swc2_op: /* This is bbit1 on Octeon */
+               if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+                       *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc + 8;
+               return 1;
+       case sdc2_op: /* This is bbit132 on Octeon */
+               if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
+                       *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc + 8;
+               return 1;
+#endif
        case cop0_op:
        case cop1_op:
        case cop2_op:
index a0bcdbb81d410c31fd41507dd4f0d200e3eb865b..729e7702b1de86212653cf56b49662aa0637c1d9 100644 (file)
@@ -224,6 +224,20 @@ static void probe_octeon(void)
                c->options |= MIPS_CPU_PREFETCH;
                break;
 
+       case CPU_CAVIUM_OCTEON3:
+               c->icache.linesz = 128;
+               c->icache.sets = 16;
+               c->icache.ways = 39;
+               c->icache.flags |= MIPS_CACHE_VTAG;
+               icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
+
+               c->dcache.linesz = 128;
+               c->dcache.ways = 32;
+               c->dcache.sets = 8;
+               dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+               c->options |= MIPS_CPU_PREFETCH;
+               break;
+
        default:
                panic("Unsupported Cavium Networks CPU type");
                break;
index 30a494db99c2a0eb4d51aa64ca410de956801837..79bca3130bd15f51bccae23012fa9a821cadb653 100644 (file)
 
 #define FASTPATH_SIZE  128
 
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 LEAF(tlbmiss_handler_setup_pgd)
        .space          16 * 4
 END(tlbmiss_handler_setup_pgd)
 EXPORT(tlbmiss_handler_setup_pgd_end)
+#endif
 
 LEAF(handle_tlbm)
        .space          FASTPATH_SIZE * 4
index 556cb48157704d8c576da3fc0dda6fb10c375c3e..821b45175dc1a961c3f86a799228ab5a3eb512cf 100644 (file)
@@ -85,6 +85,7 @@ static int use_bbit_insns(void)
        case CPU_CAVIUM_OCTEON:
        case CPU_CAVIUM_OCTEON_PLUS:
        case CPU_CAVIUM_OCTEON2:
+       case CPU_CAVIUM_OCTEON3:
                return 1;
        default:
                return 0;
@@ -95,6 +96,7 @@ static int use_lwx_insns(void)
 {
        switch (current_cpu_type()) {
        case CPU_CAVIUM_OCTEON2:
+       case CPU_CAVIUM_OCTEON3:
                return 1;
        default:
                return 0;
index ef3897ef0dc711d05ea06383b2080f17d2eaa643..d5378ef3c0f7652f413b5de8247b323779d5af91 100644 (file)
@@ -75,8 +75,7 @@ static void nlm_usb_intr_en(int node, int port)
        port_addr = nlm_get_usb_regbase(node, port);
        val = nlm_read_usb_reg(port_addr, USB_INT_EN);
        val = USB_CTRL_INTERRUPT_EN  | USB_OHCI_INTERRUPT_EN |
-               USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN  |
-               USB_OHCI_INTERRUPT_EN | USB_OHCI_INTERRUPT2_EN;
+               USB_OHCI_INTERRUPT1_EN | USB_OHCI_INTERRUPT2_EN;
        nlm_write_usb_reg(port_addr, USB_INT_EN, val);
 }
 
index 95c2ea815cacc4c6962025086fd4de4bcc0ef4a9..59cccd95688b500fa809b90edc71f24bf17a9859 100644 (file)
@@ -586,15 +586,16 @@ static int __init octeon_pci_setup(void)
        else
                octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
 
-       /* PCI I/O and PCI MEM values */
-       set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
-       ioport_resource.start = 0;
-       ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
        if (!octeon_is_pci_host()) {
                pr_notice("Not in host mode, PCI Controller not initialized\n");
                return 0;
        }
 
+       /* PCI I/O and PCI MEM values */
+       set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
+       ioport_resource.start = 0;
+       ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1;
+
        pr_notice("%s Octeon big bar support\n",
                  (octeon_dma_bar_type ==
                  OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling");
index 5869e3fa5dd3ac4f9b31cd63140ac01592593ddd..150215a9171145655a5973cd376916699810f3aa 100644 (file)
@@ -96,8 +96,7 @@ void __init early_init_devtree(void *params)
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-               unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        initrd_start = (unsigned long)__va(start);
        initrd_end = (unsigned long)__va(end);
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
new file mode 100644 (file)
index 0000000..7f03614
--- /dev/null
@@ -0,0 +1,340 @@
+CONFIG_LOCALVERSION="-32bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_SMP=y
+CONFIG_HZ_100=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+CONFIG_PD6729=m
+CONFIG_I82092=m
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_LLC2=m
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_PCMCIA=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECS=m
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_FUJITSU is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_XIRCOM is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_KEYBOARD_HIL_OLD=m
+CONFIG_KEYBOARD_HIL=m
+CONFIG_MOUSE_SERIAL=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=m
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_AGP=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_G=y
+CONFIG_FB_VOODOO1=m
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_AD1889=m
+# CONFIG_SND_PCMCIA is not set
+CONFIG_SND_HARMONY=m
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_DMADEVICES=y
+CONFIG_AUXDISPLAY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=m
+# CONFIG_NFS_V2 is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_LATENCYTOP=y
+CONFIG_LKDTM=m
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
new file mode 100644 (file)
index 0000000..6952cb5
--- /dev/null
@@ -0,0 +1,350 @@
+CONFIG_LOCALVERSION="-64bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_PA8X00=y
+CONFIG_MLONGCALLS=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+# CONFIG_COMPACTION is not set
+CONFIG_HPPB=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_PCI=y
+CONFIG_PCI_STUB=m
+CONFIG_PCI_IOV=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=m
+CONFIG_INET_DIAG=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_ADVANCED is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_DCB=y
+CONFIG_AF_RXRPC=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_IDE=y
+CONFIG_IDE_GD=m
+CONFIG_IDE_GD_ATAPI=y
+CONFIG_BLK_DEV_IDECD=m
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SIIMAGE=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ISCSI_BOOT_SYSFS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_SCSI_LASI700=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_RAID=m
+CONFIG_DM_UEVENT=y
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+CONFIG_HP100=m
+CONFIG_E1000=y
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+CONFIG_QLA3XXX=m
+CONFIG_QLCNIC=m
+CONFIG_QLGE=m
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_STE10XP=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_HP_SDC_RTC=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_HP_SDC=m
+CONFIG_HIL_MLC=m
+CONFIG_SERIO_RAW=m
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_NOZOMI=m
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_JSM=m
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_TCG_TPM=m
+CONFIG_TCG_ATMEL=m
+CONFIG_PTP_1588_CLOCK=m
+CONFIG_SENSORS_I5K_AMB=m
+CONFIG_SENSORS_F71882FG=m
+CONFIG_SENSORS_PC87427=m
+CONFIG_SENSORS_VT1211=m
+CONFIG_SENSORS_VT8231=m
+CONFIG_SENSORS_W83627EHF=m
+CONFIG_WATCHDOG=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_HTC_PASIC3=m
+CONFIG_LPC_SCH=m
+CONFIG_MFD_SM501=m
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_DRM_RADEON_UMS=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+CONFIG_HID=m
+CONFIG_HIDRAW=y
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_HID_NTRIG=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_USB_HID=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_MON=m
+CONFIG_USB_WUSB_CBAF=m
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+CONFIG_USB_TMC=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_STAGING=y
+# CONFIG_NET_VENDOR_SILICOM is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_ISO9660_FS=y
+CONFIG_UDF_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_SYSV_FS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V4=m
+CONFIG_NFS_V4_1=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_UTF8=m
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=m
+CONFIG_LIBCRC32C=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
index dbd9d3c991e86ca61e16fff87047bd6956b21420..174c6a12269aed65b22a18d89a8af1ad82013a57 100644 (file)
@@ -312,6 +312,26 @@ config MATH_EMULATION
          such as fsqrt on cores that do have an FPU but do not implement
          them (such as Freescale BookE).
 
+choice
+       prompt "Math emulation options"
+       default MATH_EMULATION_FULL
+       depends on MATH_EMULATION
+
+config MATH_EMULATION_FULL
+       bool "Emulate all the floating point instructions"
+       ---help---
+         Select this option will enable the kernel to support to emulate
+         all the floating point instructions. If your SoC doesn't have
+         a FPU, you should select this.
+
+config MATH_EMULATION_HW_UNIMPLEMENTED
+       bool "Just emulate the FPU unimplemented instructions"
+       ---help---
+         Select this if you know there does have a hardware FPU on your
+         SoC, but some floating point instructions are not implemented by that.
+
+endchoice
+
 config PPC_TRANSACTIONAL_MEM
        bool "Transactional Memory support for POWERPC"
        depends on PPC_BOOK3S_64
index c32ae5ce9fffca3b81dbaa72f6ed3ab576a29a28..554734ff302e89234c0d3ae51d1f11a840119c57 100644 (file)
@@ -22,6 +22,7 @@ zImage.initrd
 zImage.bin.*
 zImage.chrp
 zImage.coff
+zImage.epapr
 zImage.holly
 zImage.*lds
 zImage.miboot
index 923156d03b30dc1a16ac4a6aad1c66a60ceaffe9..508dbdf33c8132eeb6ce6c6bad2514c63d4c00aa 100644 (file)
@@ -33,7 +33,7 @@
  */
 
 /include/ "fsl/b4420si-pre.dtsi"
-/include/ "b4qds.dts"
+/include/ "b4qds.dtsi"
 
 / {
        model = "fsl,B4420QDS";
index 78907f38bb77d1a1ad0d7711971611711fd7a216..6bb3707ffe3d994cc56030883a9e9be5be2456bc 100644 (file)
@@ -33,7 +33,7 @@
  */
 
 /include/ "fsl/b4860si-pre.dtsi"
-/include/ "b4qds.dts"
+/include/ "b4qds.dtsi"
 
 / {
        model = "fsl,B4860QDS";
index 73991547c69b9481759351737d4c4deb27e0bf7b..4c617bf8cdb24af8f3a202d40c4cc98b776768fd 100644 (file)
                };
        };
 
-/include/ "qoriq-mpic.dtsi"
+/include/ "qoriq-mpic4.3.dtsi"
 
        guts: global-utilities@e0000 {
                compatible = "fsl,b4-device-config";
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-mpic4.3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-mpic4.3.dtsi
new file mode 100644 (file)
index 0000000..64f713c
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * QorIQ MPIC device tree stub [ controller @ offset 0x40000 ]
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+mpic: pic@40000 {
+       interrupt-controller;
+       #address-cells = <0>;
+       #interrupt-cells = <4>;
+       reg = <0x40000 0x40000>;
+       compatible = "fsl,mpic";
+       device_type = "open-pic";
+       clock-frequency = <0x0>;
+};
+
+timer@41100 {
+       compatible = "fsl,mpic-global-timer";
+       reg = <0x41100 0x100 0x41300 4>;
+       interrupts = <0 0 3 0
+                     1 0 3 0
+                     2 0 3 0
+                     3 0 3 0>;
+};
+
+msi0: msi@41600 {
+       compatible = "fsl,mpic-msi-v4.3";
+       reg = <0x41600 0x200 0x44148 4>;
+       interrupts = <
+               0xe0 0 0 0
+               0xe1 0 0 0
+               0xe2 0 0 0
+               0xe3 0 0 0
+               0xe4 0 0 0
+               0xe5 0 0 0
+               0xe6 0 0 0
+               0xe7 0 0 0
+               0x100 0 0 0
+               0x101 0 0 0
+               0x102 0 0 0
+               0x103 0 0 0
+               0x104 0 0 0
+               0x105 0 0 0
+               0x106 0 0 0
+               0x107 0 0 0>;
+};
+
+msi1: msi@41800 {
+       compatible = "fsl,mpic-msi-v4.3";
+       reg = <0x41800 0x200 0x45148 4>;
+       interrupts = <
+               0xe8 0 0 0
+               0xe9 0 0 0
+               0xea 0 0 0
+               0xeb 0 0 0
+               0xec 0 0 0
+               0xed 0 0 0
+               0xee 0 0 0
+               0xef 0 0 0
+               0x108 0 0 0
+               0x109 0 0 0
+               0x10a 0 0 0
+               0x10b 0 0 0
+               0x10c 0 0 0
+               0x10d 0 0 0
+               0x10e 0 0 0
+               0x10f 0 0 0>;
+};
+
+msi2: msi@41a00 {
+       compatible = "fsl,mpic-msi-v4.3";
+       reg = <0x41a00 0x200 0x46148 4>;
+       interrupts = <
+               0xf0 0 0 0
+               0xf1 0 0 0
+               0xf2 0 0 0
+               0xf3 0 0 0
+               0xf4 0 0 0
+               0xf5 0 0 0
+               0xf6 0 0 0
+               0xf7 0 0 0
+               0x110 0 0 0
+               0x111 0 0 0
+               0x112 0 0 0
+               0x113 0 0 0
+               0x114 0 0 0
+               0x115 0 0 0
+               0x116 0 0 0
+               0x117 0 0 0>;
+};
+
+msi3: msi@41c00 {
+       compatible = "fsl,mpic-msi-v4.3";
+       reg = <0x41c00 0x200 0x47148 4>;
+       interrupts = <
+               0xf8 0 0 0
+               0xf9 0 0 0
+               0xfa 0 0 0
+               0xfb 0 0 0
+               0xfc 0 0 0
+               0xfd 0 0 0
+               0xfe 0 0 0
+               0xff 0 0 0
+               0x118 0 0 0
+               0x119 0 0 0
+               0x11a 0 0 0
+               0x11b 0 0 0
+               0x11c 0 0 0
+               0x11d 0 0 0
+               0x11e 0 0 0
+               0x11f 0 0 0>;
+};
+
+timer@42100 {
+       compatible = "fsl,mpic-global-timer";
+       reg = <0x42100 0x100 0x42300 4>;
+       interrupts = <4 0 3 0
+                     5 0 3 0
+                     6 0 3 0
+                     7 0 3 0>;
+};
index bd611a9cad32c9ccaac631c9e015380e971c9593..510afa362de141465b50aada0cf0e7c9ceb83536 100644 (file)
                        16 2 1 30>;
        };
 
-/include/ "qoriq-mpic.dtsi"
+/include/ "qoriq-mpic4.3.dtsi"
 
        guts: global-utilities@e0000 {
                compatible = "fsl,t4240-device-config", "fsl,qoriq-device-config-2.0";
diff --git a/arch/powerpc/boot/dts/p1020rdb-pd.dts b/arch/powerpc/boot/dts/p1020rdb-pd.dts
new file mode 100644 (file)
index 0000000..987017e
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * P1020 RDB-PD Device Tree Source (32-bit address map)
+ *
+ * Copyright 2013 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+       model = "fsl,P1020RDB-PD";
+       compatible = "fsl,P1020RDB-PD";
+
+       memory {
+               device_type = "memory";
+       };
+
+       lbc: localbus@ffe05000 {
+               reg = <0x0 0xffe05000 0x0 0x1000>;
+
+               /* NOR, NAND flash, L2 switch and CPLD */
+               ranges = <0x0 0x0 0x0 0xec000000 0x04000000
+                         0x1 0x0 0x0 0xff800000 0x00040000
+                         0x2 0x0 0x0 0xffa00000 0x00020000
+                         0x3 0x0 0x0 0xffb00000 0x00020000>;
+
+               nor@0,0 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x0 0x0 0x4000000>;
+                       bank-width = <2>;
+                       device-width = <1>;
+
+                       partition@0 {
+                               /* 128KB for DTB Image */
+                               reg = <0x0 0x00020000>;
+                               label = "NOR DTB Image";
+                       };
+
+                       partition@20000 {
+                               /* 3.875 MB for Linux Kernel Image */
+                               reg = <0x00020000 0x003e0000>;
+                               label = "NOR Linux Kernel Image";
+                       };
+
+                       partition@400000 {
+                               /* 58MB for Root file System */
+                               reg = <0x00400000 0x03a00000>;
+                               label = "NOR Root File System";
+                       };
+
+                       partition@3e00000 {
+                               /* This location must not be altered  */
+                               /* 1M for Vitesse 7385 Switch firmware */
+                               reg = <0x3e00000 0x00100000>;
+                               label = "NOR Vitesse-7385 Firmware";
+                               read-only;
+                       };
+
+                       partition@3f00000 {
+                               /* This location must not be altered  */
+                               /* 512KB for u-boot Bootloader Image */
+                               /* 512KB for u-boot Environment Variables */
+                               reg = <0x03f00000 0x00100000>;
+                               label = "NOR U-Boot Image";
+                               read-only;
+                       };
+               };
+
+               nand@1,0 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "fsl,p1020-fcm-nand",
+                                    "fsl,elbc-fcm-nand";
+                       reg = <0x1 0x0 0x40000>;
+
+                       partition@0 {
+                               /* This location must not be altered  */
+                               /* 1MB for u-boot Bootloader Image */
+                               reg = <0x0 0x00100000>;
+                               label = "NAND U-Boot Image";
+                               read-only;
+                       };
+
+                       partition@100000 {
+                               /* 1MB for DTB Image */
+                               reg = <0x00100000 0x00100000>;
+                               label = "NAND DTB Image";
+                       };
+
+                       partition@200000 {
+                               /* 4MB for Linux Kernel Image */
+                               reg = <0x00200000 0x00400000>;
+                               label = "NAND Linux Kernel Image";
+                       };
+
+                       partition@600000 {
+                               /* 122MB for File System Image */
+                               reg = <0x00600000 0x07a00000>;
+                               label = "NAND File System Image";
+                       };
+               };
+
+               cpld@2,0 {
+                       compatible = "fsl,p1020rdb-pd-cpld";
+                       reg = <0x2 0x0 0x20000>;
+               };
+
+               L2switch@3,0 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "vitesse-7385";
+                       reg = <0x3 0x0 0x20000>;
+               };
+       };
+
+       soc: soc@ffe00000 {
+               ranges = <0x0 0x0 0xffe00000 0x100000>;
+
+               i2c@3000 {
+                       rtc@68 {
+                               compatible = "dallas,ds1339";
+                               reg = <0x68>;
+                       };
+               };
+
+               spi@7000 {
+                       flash@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               compatible = "spansion,s25sl12801";
+                               reg = <0>;
+                               /* input clock */
+                               spi-max-frequency = <40000000>;
+
+                               partition@0 {
+                                       /* 512KB for u-boot Bootloader Image */
+                                       reg = <0x0 0x00080000>;
+                                       label = "SPI U-Boot Image";
+                                       read-only;
+                               };
+
+                               partition@80000 {
+                                       /* 512KB for DTB Image*/
+                                       reg = <0x00080000 0x00080000>;
+                                       label = "SPI DTB Image";
+                               };
+
+                               partition@100000 {
+                                       /* 4MB for Linux Kernel Image */
+                                       reg = <0x00100000 0x00400000>;
+                                       label = "SPI Linux Kernel Image";
+                               };
+
+                               partition@500000 {
+                                       /* 11MB for FS System Image */
+                                       reg = <0x00500000 0x00b00000>;
+                                       label = "SPI File System Image";
+                               };
+                       };
+
+                       slic@0 {
+                               compatible = "zarlink,le88266";
+                               reg = <1>;
+                               spi-max-frequency = <8000000>;
+                       };
+
+                       slic@1 {
+                               compatible = "zarlink,le88266";
+                               reg = <2>;
+                               spi-max-frequency = <8000000>;
+                       };
+               };
+
+               mdio@24000 {
+                       phy0: ethernet-phy@0 {
+                               interrupts = <3 1 0 0>;
+                               reg = <0x0>;
+                       };
+
+                       phy1: ethernet-phy@1 {
+                               interrupts = <2 1 0 0>;
+                               reg = <0x1>;
+                       };
+               };
+
+               mdio@25000 {
+                       tbi1: tbi-phy@11 {
+                               reg = <0x11>;
+                               device_type = "tbi-phy";
+                       };
+               };
+
+               mdio@26000 {
+                       tbi2: tbi-phy@11 {
+                               reg = <0x11>;
+                               device_type = "tbi-phy";
+                       };
+               };
+
+               enet0: ethernet@b0000 {
+                       fixed-link = <1 1 1000 0 0>;
+                       phy-connection-type = "rgmii-id";
+               };
+
+               enet1: ethernet@b1000 {
+                       phy-handle = <&phy0>;
+                       tbi-handle = <&tbi1>;
+                       phy-connection-type = "sgmii";
+               };
+
+               enet2: ethernet@b2000 {
+                       phy-handle = <&phy1>;
+                       phy-connection-type = "rgmii-id";
+               };
+
+               usb@22000 {
+                       phy_type = "ulpi";
+               };
+       };
+
+       pci0: pcie@ffe09000 {
+               reg = <0x0 0xffe09000 0x0 0x1000>;
+               ranges = <0x2000000 0x0 0xa0000000 0x0 0xa0000000 0x0 0x20000000
+                         0x1000000 0x0 0x00000000 0x0 0xffc10000 0x0 0x10000>;
+               pcie@0 {
+                       ranges = <0x2000000 0x0 0xa0000000
+                                 0x2000000 0x0 0xa0000000
+                                 0x0 0x20000000
+
+                                 0x1000000 0x0 0x0
+                                 0x1000000 0x0 0x0
+                                 0x0 0x100000>;
+               };
+       };
+
+       pci1: pcie@ffe0a000 {
+               reg = <0x0 0xffe0a000 0x0 0x1000>;
+               ranges = <0x2000000 0x0 0x80000000 0x0 0x80000000 0x0 0x20000000
+                         0x1000000 0x0 0x00000000 0x0 0xffc00000 0x0 0x10000>;
+               pcie@0 {
+                       ranges = <0x2000000 0x0 0x80000000
+                                 0x2000000 0x0 0x80000000
+                                 0x0 0x20000000
+
+                                 0x1000000 0x0 0x0
+                                 0x1000000 0x0 0x0
+                                 0x0 0x100000>;
+               };
+       };
+};
+
+/include/ "fsl/p1020si-post.dtsi"
index b80bcc69d1f79963c9e4595388e8af6bffe35a16..18badca726c69762e9e4353c54cb064a12a4f94f 100644 (file)
@@ -63,6 +63,7 @@ CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_PROC_DEVICETREE=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
index 60027c2a7034b8bb7f88a8cfa498cc120a0e91c4..3dfab4c40c76117ae6664d2af8d4a1e9a0eb8e8a 100644 (file)
@@ -70,6 +70,7 @@ CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
index 6c8b020806ff6da89c5a7b86d5bf4879444e888d..fa94fb3bb44db843e41a99c3f97799b99cac811f 100644 (file)
@@ -27,6 +27,8 @@ CONFIG_P5040_DS=y
 CONFIG_T4240_QDS=y
 # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
 CONFIG_BINFMT_MISC=m
+CONFIG_MATH_EMULATION=y
+CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y
 CONFIG_FSL_IFC=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_MSI=y
@@ -59,6 +61,7 @@ CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_MTD=y
 CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_OF_PARTS=y
index 09116c6a6719df021771e8f489ab3c67ee43cf6b..23fec79964cf8a030416a93ef8c95741aad138b8 100644 (file)
@@ -42,6 +42,7 @@ CONFIG_INET_ESP=y
 # CONFIG_IPV6 is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
 CONFIG_MTD_CHAR=y
index 5a58882e351e73e6246f9b417a4f82ef934b7139..b90c7af2ca1eec64ba8111524b7adb9777d82d86 100644 (file)
@@ -78,6 +78,7 @@ CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_MTD=y
 CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_OF_PARTS=y
index 152fa05b15e47ae1512015e924f9402252df52f1..9ced851885347bb0c9d92b1b139edade9274fcbe 100644 (file)
@@ -81,6 +81,7 @@ CONFIG_IPV6=y
 CONFIG_IP_SCTP=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_MTD=y
 CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_OF_PARTS=y
index 6e82f5f9a6fd232357eda6737725fb5ed5d4703a..4b237aa35660ffae9a8b5506fdba78cd58281a2e 100644 (file)
 #define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
 #define PPC_LR_STKOFF  16
 #define PPC_MIN_STKFRM 112
+
+#ifdef __BIG_ENDIAN__
+#define LDX_BE stringify_in_c(ldx)
+#define STDX_BE        stringify_in_c(stdx)
+#else
+#define LDX_BE stringify_in_c(ldbrx)
+#define STDX_BE        stringify_in_c(stdbrx)
+#endif
+
 #else /* 32-bit */
 
 /* operations for longs and pointers */
index 906f46e3100642455fef5ae957b68e7db9be0275..89fc382648bc8c9f20b673df66377614150f7578 100644 (file)
@@ -13,6 +13,7 @@ extern void btext_update_display(unsigned long phys, int width, int height,
 extern void btext_setup_display(int width, int height, int depth, int pitch,
                                unsigned long address);
 extern void btext_prepare_BAT(void);
+extern void btext_map(void);
 extern void btext_unmap(void);
 
 extern void btext_drawchar(char c);
index b843e35122e8934d7ea902e71ef0cae36f670611..5b9312220e849e40dad8d7516b303b619431d08b 100644 (file)
@@ -32,13 +32,7 @@ extern void flush_dcache_page(struct page *page);
 
 extern void __flush_disable_L1(void);
 
-extern void __flush_icache_range(unsigned long, unsigned long);
-static inline void flush_icache_range(unsigned long start, unsigned long stop)
-{
-       if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
-               __flush_icache_range(start, stop);
-}
-
+extern void flush_icache_range(unsigned long, unsigned long);
 extern void flush_icache_user_range(struct vm_area_struct *vma,
                                    struct page *page, unsigned long addr,
                                    int len);
index 77e97dd0c15d8b495ee245a3fbcc14aa38a2f785..38faeded7d595d506de55188ca8180c6e31ba67f 100644 (file)
@@ -28,6 +28,9 @@ struct dev_archdata {
                void            *iommu_table_base;
        } dma_data;
 
+#ifdef CONFIG_IOMMU_API
+       void                    *iommu_domain;
+#endif
 #ifdef CONFIG_SWIOTLB
        dma_addr_t              max_direct_dma_addr;
 #endif
index 63f2a22e9954a10f6996308de941de6c0fbcad83..5a8b82aa7241c1d6fae279ebf80864a94db32558 100644 (file)
@@ -46,8 +46,6 @@ extern struct ppc_emulated {
        struct ppc_emulated_entry unaligned;
 #ifdef CONFIG_MATH_EMULATION
        struct ppc_emulated_entry math;
-#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
-       struct ppc_emulated_entry 8xx;
 #endif
 #ifdef CONFIG_VSX
        struct ppc_emulated_entry vsx;
index d3d634274d2c4de6fe63610f872d3eca1eb7738e..86b0ac79990ca4ebda1ade6d67524d588bb148e9 100644 (file)
 extern bool epapr_paravirt_enabled;
 extern u32 epapr_hypercall_start[];
 
+#ifdef CONFIG_EPAPR_PARAVIRT
+int __init epapr_paravirt_early_init(void);
+#else
+static inline int epapr_paravirt_early_init(void) { return 0; }
+#endif
+
 /*
  * We use "uintptr_t" to define a register because it's guaranteed to be a
  * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
index 07ca627e52c0b25ed45fb61128b3dc2210a4f541..cca12f08484201aaf86a16920a2edcf7921a6fd5 100644 (file)
 #define EX_LR          72
 #define EX_CFAR                80
 #define EX_PPR         88      /* SMT thread status register (priority) */
+#define EX_CTR         96
 
 #ifdef CONFIG_RELOCATABLE
 #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)                   \
        ld      r12,PACAKBASE(r13);     /* get high part of &label */   \
        mfspr   r11,SPRN_##h##SRR0;     /* save SRR0 */                 \
        LOAD_HANDLER(r12,label);                                        \
-       mtlr    r12;                                                    \
+       mtctr   r12;                                                    \
        mfspr   r12,SPRN_##h##SRR1;     /* and SRR1 */                  \
        li      r10,MSR_RI;                                             \
        mtmsrd  r10,1;                  /* Set RI (EE=0) */             \
-       blr;
+       bctr;
 #else
 /* If not relocatable, we can jump directly -- and save messing with LR */
 #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)                   \
 
 #if defined(CONFIG_RELOCATABLE)
 /*
- * If we support interrupts with relocation on AND we're a relocatable
- * kernel, we need to use LR to get to the 2nd level handler.  So, save/restore
- * it when required.
+ * If we support interrupts with relocation on AND we're a relocatable kernel,
+ * we need to use CTR to get to the 2nd level handler.  So, save/restore it
+ * when required.
  */
-#define SAVE_LR(reg, area)     mflr    reg ;   std     reg,area+EX_LR(r13)
-#define GET_LR(reg, area)                      ld      reg,area+EX_LR(r13)
-#define RESTORE_LR(reg, area)  ld      reg,area+EX_LR(r13) ; mtlr reg
+#define SAVE_CTR(reg, area)    mfctr   reg ;   std     reg,area+EX_CTR(r13)
+#define GET_CTR(reg, area)                     ld      reg,area+EX_CTR(r13)
+#define RESTORE_CTR(reg, area) ld      reg,area+EX_CTR(r13) ; mtctr reg
 #else
-/* ...else LR is unused and in register. */
-#define SAVE_LR(reg, area)
-#define GET_LR(reg, area)      mflr    reg
-#define RESTORE_LR(reg, area)
+/* ...else CTR is unused and in register. */
+#define SAVE_CTR(reg, area)
+#define GET_CTR(reg, area)     mfctr   reg
+#define RESTORE_CTR(reg, area)
 #endif
 
 /*
@@ -164,7 +165,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
 #define __EXCEPTION_PROLOG_1(area, extra, vec)                         \
        OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);         \
        OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);          \
-       SAVE_LR(r10, area);                                             \
+       SAVE_CTR(r10, area);                                            \
        mfcr    r9;                                                     \
        extra(vec);                                                     \
        std     r11,area+EX_R11(r13);                                   \
@@ -270,7 +271,7 @@ do_kvm_##n:                                                         \
        sth     r1,PACA_TRAP_SAVE(r13);                                    \
        std     r3,area+EX_R3(r13);                                        \
        addi    r3,r13,area;            /* r3 -> where regs are saved*/    \
-       RESTORE_LR(r1, area);                                              \
+       RESTORE_CTR(r1, area);                                             \
        b       bad_stack;                                                 \
 3:     std     r9,_CCR(r1);            /* save CR in stackframe        */ \
        std     r11,_NIP(r1);           /* save SRR0 in stackframe      */ \
@@ -298,10 +299,10 @@ do_kvm_##n:                                                               \
        ld      r10,area+EX_CFAR(r13);                                     \
        std     r10,ORIG_GPR3(r1);                                         \
        END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66);            \
-       GET_LR(r9,area);                /* Get LR, later save to stack  */ \
+       mflr    r9;                     /* Get LR, later save to stack  */ \
        ld      r2,PACATOC(r13);        /* get kernel TOC into r2       */ \
        std     r9,_LINK(r1);                                              \
-       mfctr   r10;                    /* save CTR in stackframe       */ \
+       GET_CTR(r10, area);                                                \
        std     r10,_CTR(r1);                                              \
        lbz     r10,PACASOFTIRQEN(r13);                            \
        mfspr   r11,SPRN_XER;           /* save XER in stackframe       */ \
@@ -479,7 +480,7 @@ label##_relon_hv:                                                   \
  */
 
 /* Exception addition: Hard disable interrupts */
-#define DISABLE_INTS   SOFT_DISABLE_INTS(r10,r11)
+#define DISABLE_INTS   RECONCILE_IRQ_STATE(r10,r11)
 
 #define ADD_NVGPRS                             \
        bl      .save_nvgprs
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
new file mode 100644 (file)
index 0000000..caa1b21
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_PAMU_STASH_H
+#define __FSL_PAMU_STASH_H
+
+/* cache stash targets */
+enum pamu_stash_target {
+       PAMU_ATTR_CACHE_L1 = 1,
+       PAMU_ATTR_CACHE_L2,
+       PAMU_ATTR_CACHE_L3,
+};
+
+/*
+ * This attribute allows configuring stashig specific parameters
+ * in the PAMU hardware.
+ */
+
+struct pamu_stash_attribute {
+       u32     cpu;    /* cpu number */
+       u32     cache;  /* cache to stash to: L1,L2,L3 */
+};
+
+#endif  /* __FSL_PAMU_STASH_H */
index dd15e5e37d6d47b2862516e15c130116471ac633..5a64757dc0d1eb0206f2499a7e26e39fec7cda21 100644 (file)
@@ -69,8 +69,18 @@ extern unsigned long pci_dram_offset;
 
 extern resource_size_t isa_mem_base;
 
-#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO)
-#error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits
+/* Boolean set by platform if PIO accesses are suppored while _IO_BASE
+ * is not set or addresses cannot be translated to MMIO. This is typically
+ * set when the platform supports "special" PIO accesses via a non memory
+ * mapped mechanism, and allows things like the early udbg UART code to
+ * function.
+ */
+extern bool isa_io_special;
+
+#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
+#error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits
+#endif
 #endif
 
 /*
@@ -222,9 +232,9 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
  * for PowerPC is as close as possible to the x86 version of these, and thus
  * provides fairly heavy weight barriers for the non-raw versions
  *
- * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO
- * allowing the platform to provide its own implementation of some or all
- * of the accessors.
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO
+ * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its
+ * own implementation of some or all of the accessors.
  */
 
 /*
@@ -240,8 +250,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
 
 /* Indirect IO address tokens:
  *
- * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks
- * on all IOs. (Note that this is all 64 bits only for now)
+ * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks
+ * on all MMIOs. (Note that this is all 64 bits only for now)
  *
  * To help platforms who may need to differenciate MMIO addresses in
  * their hooks, a bitfield is reserved for use by the platform near the
@@ -263,11 +273,14 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
  *
  * The direct IO mapping operations will then mask off those bits
  * before doing the actual access, though that only happen when
- * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that
+ * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that
  * mechanism
+ *
+ * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes
+ * all PIO functions call through a hook.
  */
 
-#ifdef CONFIG_PPC_INDIRECT_IO
+#ifdef CONFIG_PPC_INDIRECT_MMIO
 #define PCI_IO_IND_TOKEN_MASK  0x0fff000000000000ul
 #define PCI_IO_IND_TOKEN_SHIFT 48
 #define PCI_FIX_ADDR(addr)                                             \
@@ -672,7 +685,7 @@ extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
 extern void __iounmap_at(void *ea, unsigned long size);
 
 /*
- * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation
+ * When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation
  * which needs some additional definitions here. They basically allow PIO
  * space overall to be 1GB. This will work as long as we never try to use
  * iomap to map MMIO below 1GB which should be fine on ppc64
index 6f9b6e23dc5af74c8ee1423b052d2ada384c3d36..f51a5580bfd0b2dfbe1abb5edd68e5ab2d3541b7 100644 (file)
 #define TRACE_DISABLE_INTS     TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
 
 /*
- * This is used by assembly code to soft-disable interrupts
+ * This is used by assembly code to soft-disable interrupts first and
+ * reconcile irq state.
  */
-#define SOFT_DISABLE_INTS(__rA, __rB)          \
+#define RECONCILE_IRQ_STATE(__rA, __rB)                \
        lbz     __rA,PACASOFTIRQEN(r13);        \
        lbz     __rB,PACAIRQHAPPENED(r13);      \
        cmpwi   cr0,__rA,0;                     \
@@ -58,7 +59,7 @@
 #define TRACE_ENABLE_INTS
 #define TRACE_DISABLE_INTS
 
-#define SOFT_DISABLE_INTS(__rA, __rB)          \
+#define RECONCILE_IRQ_STATE(__rA, __rB)                \
        lbz     __rA,PACAIRQHAPPENED(r13);      \
        li      __rB,0;                         \
        ori     __rA,__rA,PACA_IRQ_HARD_DIS;    \
index 9b12f88d4adb4f0977c46bedf2f555b055f71955..4470d1e34d23174d8ec33cb1d9b32fd5ea8dd78c 100644 (file)
 struct lppaca {
        /* cacheline 1 contains read-only data */
 
-       u32     desc;                   /* Eye catcher 0xD397D781 */
-       u16     size;                   /* Size of this struct */
-       u16     reserved1;
-       u16     reserved2:14;
-       u8      shared_proc:1;          /* Shared processor indicator */
-       u8      secondary_thread:1;     /* Secondary thread indicator */
+       __be32  desc;                   /* Eye catcher 0xD397D781 */
+       __be16  size;                   /* Size of this struct */
+       u8      reserved1[3];
+       u8      __old_status;           /* Old status, including shared proc */
        u8      reserved3[14];
-       volatile u32 dyn_hw_node_id;    /* Dynamic hardware node id */
-       volatile u32 dyn_hw_proc_id;    /* Dynamic hardware proc id */
+       volatile __be32 dyn_hw_node_id; /* Dynamic hardware node id */
+       volatile __be32 dyn_hw_proc_id; /* Dynamic hardware proc id */
        u8      reserved4[56];
        volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
                                          /* associativity change counters */
@@ -73,9 +71,9 @@ struct lppaca {
        u8      fpregs_in_use;
        u8      pmcregs_in_use;
        u8      reserved8[28];
-       u64     wait_state_cycles;      /* Wait cycles for this proc */
+       __be64  wait_state_cycles;      /* Wait cycles for this proc */
        u8      reserved9[28];
-       u16     slb_count;              /* # of SLBs to maintain */
+       __be16  slb_count;              /* # of SLBs to maintain */
        u8      idle;                   /* Indicate OS is idle */
        u8      vmxregs_in_use;
 
@@ -89,17 +87,17 @@ struct lppaca {
         * NOTE: This value will ALWAYS be zero for dedicated processors and
         * will NEVER be zero for shared processors (ie, initialized to a 1).
         */
-       volatile u32 yield_count;
-       volatile u32 dispersion_count;  /* dispatch changed physical cpu */
-       volatile u64 cmo_faults;        /* CMO page fault count */
-       volatile u64 cmo_fault_time;    /* CMO page fault time */
+       volatile __be32 yield_count;
+       volatile __be32 dispersion_count; /* dispatch changed physical cpu */
+       volatile __be64 cmo_faults;     /* CMO page fault count */
+       volatile __be64 cmo_fault_time; /* CMO page fault time */
        u8      reserved10[104];
 
        /* cacheline 4-5 */
 
-       u32     page_ins;               /* CMO Hint - # page ins by OS */
+       __be32  page_ins;               /* CMO Hint - # page ins by OS */
        u8      reserved11[148];
-       volatile u64 dtl_idx;           /* Dispatch Trace Log head index */
+       volatile __be64 dtl_idx;                /* Dispatch Trace Log head index */
        u8      reserved12[96];
 } __attribute__((__aligned__(0x400)));
 
@@ -107,18 +105,30 @@ extern struct lppaca lppaca[];
 
 #define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
 
+/*
+ * Old kernels used a reserved bit in the VPA to determine if it was running
+ * in shared processor mode. New kernels look for a non zero yield count
+ * but KVM still needs to set the bit to keep the old stuff happy.
+ */
+#define LPPACA_OLD_SHARED_PROC         2
+
+static inline bool lppaca_shared_proc(struct lppaca *l)
+{
+       return l->yield_count != 0;
+}
+
 /*
  * SLB shadow buffer structure as defined in the PAPR.  The save_area
  * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
  * ESID is stored in the lower 64bits, then the VSID.
  */
 struct slb_shadow {
-       u32     persistent;             /* Number of persistent SLBs */
-       u32     buffer_length;          /* Total shadow buffer length */
-       u64     reserved;
+       __be32  persistent;             /* Number of persistent SLBs */
+       __be32  buffer_length;          /* Total shadow buffer length */
+       __be64  reserved;
        struct  {
-               u64     esid;
-               u64     vsid;
+               __be64     esid;
+               __be64  vsid;
        } save_area[SLB_NUM_BOLTED];
 } ____cacheline_aligned;
 
@@ -130,14 +140,14 @@ extern struct slb_shadow slb_shadow[];
 struct dtl_entry {
        u8      dispatch_reason;
        u8      preempt_reason;
-       u16     processor_id;
-       u32     enqueue_to_dispatch_time;
-       u32     ready_to_enqueue_time;
-       u32     waiting_to_ready_time;
-       u64     timebase;
-       u64     fault_addr;
-       u64     srr0;
-       u64     srr1;
+       __be16  processor_id;
+       __be32  enqueue_to_dispatch_time;
+       __be32  ready_to_enqueue_time;
+       __be32  waiting_to_ready_time;
+       __be64  timebase;
+       __be64  fault_addr;
+       __be64  srr0;
+       __be64  srr1;
 };
 
 #define DISPATCH_LOG_BYTES     4096    /* bytes per cpu */
index 4a1ac9fbf18697035d346b96b2ead190abf58cb9..754f93d208fa62b084e9887db006dc461973c68d 100644 (file)
@@ -396,7 +396,14 @@ extern struct bus_type mpic_subsys;
 #define        MPIC_REGSET_TSI108              MPIC_REGSET(1)  /* Tsi108/109 PIC */
 
 /* Get the version of primary MPIC */
+#ifdef CONFIG_MPIC
 extern u32 fsl_mpic_primary_get_version(void);
+#else
+static inline u32 fsl_mpic_primary_get_version(void)
+{
+       return 0;
+}
+#endif
 
 /* Allocate the controller structure and setup the linux irq descs
  * for the range if interrupts passed in. No HW initialization is
index 029fe85722aaac41a6c5ff0e0ed4c3ae4dd5cfc2..48ad6780c6d980d3fc5ac4921c0f3fdd12291e80 100644 (file)
@@ -124,6 +124,10 @@ extern int opal_enter_rtas(struct rtas_args *args,
 #define OPAL_PCI_POLL                          62
 #define OPAL_PCI_MSI_EOI                       63
 #define OPAL_PCI_GET_PHB_DIAG_DATA2            64
+#define OPAL_XSCOM_READ                                65
+#define OPAL_XSCOM_WRITE                       66
+#define OPAL_LPC_READ                          67
+#define OPAL_LPC_WRITE                         68
 
 #ifndef __ASSEMBLY__
 
@@ -337,6 +341,17 @@ enum OpalEpowStatus {
        OPAL_EPOW_OVER_INTERNAL_TEMP = 3
 };
 
+/*
+ * Address cycle types for LPC accesses. These also correspond
+ * to the content of the first cell of the "reg" property for
+ * device nodes on the LPC bus
+ */
+enum OpalLPCAddressType {
+       OPAL_LPC_MEM    = 0,
+       OPAL_LPC_IO     = 1,
+       OPAL_LPC_FW     = 2,
+};
+
 struct opal_machine_check_event {
        enum OpalMCE_Version    version:8;      /* 0x00 */
        uint8_t                 in_use;         /* 0x01 */
@@ -632,6 +647,14 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
                            uint16_t *pci_error_type, uint16_t *severity);
 int64_t opal_pci_poll(uint64_t phb_id);
 
+int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val);
+int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
+
+int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+                      uint32_t addr, uint32_t data, uint32_t sz);
+int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+                     uint32_t addr, uint32_t *data, uint32_t sz);
+
 /* Internal functions */
 extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
 
@@ -664,6 +687,8 @@ extern int opal_machine_check(struct pt_regs *regs);
 
 extern void opal_shutdown(void);
 
+extern void opal_lpc_init(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_H */
index 77c91e74b612675cd7312ea1ada7dfba7b997e9c..a5954cebbc5594228c655906da83714eb6c772cd 100644 (file)
@@ -68,8 +68,13 @@ struct paca_struct {
         * instruction.  They must travel together and be properly
         * aligned.
         */
+#ifdef __BIG_ENDIAN__
        u16 lock_token;                 /* Constant 0x8000, used in locks */
        u16 paca_index;                 /* Logical processor number */
+#else
+       u16 paca_index;                 /* Logical processor number */
+       u16 lock_token;                 /* Constant 0x8000, used in locks */
+#endif
 
        u64 kernel_toc;                 /* Kernel TOC address */
        u64 kernelbase;                 /* Base address of kernel */
@@ -93,9 +98,9 @@ struct paca_struct {
         * Now, starting in cacheline 2, the exception save areas
         */
        /* used for most interrupts/exceptions */
-       u64 exgen[12] __attribute__((aligned(0x80)));
-       u64 exmc[12];           /* used for machine checks */
-       u64 exslb[12];          /* used for SLB/segment table misses
+       u64 exgen[13] __attribute__((aligned(0x80)));
+       u64 exmc[13];           /* used for machine checks */
+       u64 exslb[13];          /* used for SLB/segment table misses
                                 * on the linear mapping */
        /* SLB related definitions */
        u16 vmalloc_sllp;
index 32d0d2018faf84afe15814f13cb7f1fe4933fa4e..4ca90a39d6d01af63da46c73d19b816b0979e538 100644 (file)
@@ -159,7 +159,7 @@ struct pci_dn {
 
        int     pci_ext_config_space;   /* for pci devices */
 
-       int     force_32bit_msi:1;
+       bool    force_32bit_msi;
 
        struct  pci_dev *pcidev;        /* back-pointer to the pci device */
 #ifdef CONFIG_EEH
index 718a9fa94e68eebafec29bcaa4f0214b55db9846..a58165450f6fa659cbd261624855c7791a4c3fc5 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/types.h>
 #include <asm/hw_irq.h>
 
-#define MAX_HWEVENTS 4
+#define MAX_HWEVENTS 6
 
 /* event flags */
 #define FSL_EMB_EVENT_VALID      1
index eccfc161e58e7275da6aebd1d21090de1d4e86ab..d7fe9f5b46d457cf0895c7dee542bc2e40fb3e49 100644 (file)
 #define        __REGA0_R30     30
 #define        __REGA0_R31     31
 
+/* opcode and xopcode for instructions */
+#define OP_TRAP 3
+#define OP_TRAP_64 2
+
+#define OP_31_XOP_TRAP      4
+#define OP_31_XOP_LWZX      23
+#define OP_31_XOP_DCBST     54
+#define OP_31_XOP_LWZUX     55
+#define OP_31_XOP_TRAP_64   68
+#define OP_31_XOP_DCBF      86
+#define OP_31_XOP_LBZX      87
+#define OP_31_XOP_STWX      151
+#define OP_31_XOP_STBX      215
+#define OP_31_XOP_LBZUX     119
+#define OP_31_XOP_STBUX     247
+#define OP_31_XOP_LHZX      279
+#define OP_31_XOP_LHZUX     311
+#define OP_31_XOP_MFSPR     339
+#define OP_31_XOP_LHAX      343
+#define OP_31_XOP_LHAUX     375
+#define OP_31_XOP_STHX      407
+#define OP_31_XOP_STHUX     439
+#define OP_31_XOP_MTSPR     467
+#define OP_31_XOP_DCBI      470
+#define OP_31_XOP_LWBRX     534
+#define OP_31_XOP_TLBSYNC   566
+#define OP_31_XOP_STWBRX    662
+#define OP_31_XOP_LHBRX     790
+#define OP_31_XOP_STHBRX    918
+
+#define OP_LWZ  32
+#define OP_LD   58
+#define OP_LWZU 33
+#define OP_LBZ  34
+#define OP_LBZU 35
+#define OP_STW  36
+#define OP_STWU 37
+#define OP_STD  62
+#define OP_STB  38
+#define OP_STBU 39
+#define OP_LHZ  40
+#define OP_LHZU 41
+#define OP_LHA  42
+#define OP_LHAU 43
+#define OP_STH  44
+#define OP_STHU 45
+
 /* sorted alphabetically */
 #define PPC_INST_BHRBE                 0x7c00025c
 #define PPC_INST_CLRBHRB               0x7c00035c
index 2f1b6c5f8174f4a4759086f21668dda5c8477b1d..4ebb4f8f41887d686c336e1e8b0ce02d6709f41d 100644 (file)
@@ -54,7 +54,8 @@ BEGIN_FW_FTR_SECTION;                                                 \
        /* from user - see if there are any DTL entries to process */   \
        ld      r10,PACALPPACAPTR(r13); /* get ptr to VPA */            \
        ld      r11,PACA_DTL_RIDX(r13); /* get log read index */        \
-       ld      r10,LPPACA_DTLIDX(r10); /* get log write index */       \
+       addi    r10,r10,LPPACA_DTLIDX;                                  \
+       LDX_BE  r10,0,r10;              /* get log write index */       \
        cmpd    cr1,r11,r10;                                            \
        beq+    cr1,33f;                                                \
        bl      .accumulate_stolen_time;                                \
@@ -219,19 +220,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 #define REST_8VSRS(n,b,base)   REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
 #define REST_16VSRS(n,b,base)  REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
 #define REST_32VSRS(n,b,base)  REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
-/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
-#define SAVE_VSRU(n,b,base)    li b,THREAD_VR0+(16*(n));  STXVD2X(n+32,R##base,R##b)
-#define SAVE_2VSRSU(n,b,base)  SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
-#define SAVE_4VSRSU(n,b,base)  SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
-#define SAVE_8VSRSU(n,b,base)  SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
-#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
-#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
-#define REST_VSRU(n,b,base)    li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,R##base,R##b)
-#define REST_2VSRSU(n,b,base)  REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
-#define REST_4VSRSU(n,b,base)  REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
-#define REST_8VSRSU(n,b,base)  REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
-#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
-#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
 
 /*
  * b = base register for addressing, o = base offset from register of 1st EVR
index bc2da154f68be579d19e81e5789073e436ac838b..e6ec2cffba16a91aa10645d71d105d9aafd44a71 100644 (file)
@@ -38,8 +38,9 @@ extern unsigned long pci_address_to_pio(phys_addr_t address);
 /* Parse the ibm,dma-window property of an OF node into the busno, phys and
  * size parameters.
  */
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
-               unsigned long *busno, unsigned long *phys, unsigned long *size);
+void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
+                        unsigned long *busno, unsigned long *phys,
+                        unsigned long *size);
 
 extern void kdump_move_device_tree(void);
 
@@ -58,6 +59,8 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; }
 
 extern void of_instantiate_rtc(void);
 
+extern int of_get_ibm_chip_id(struct device_node *np);
+
 /* The of_drconf_cell struct defines the layout of the LMB array
  * specified in the device tree property
  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory
index 99222e27f1739c543faa916d3d766be5f99e5fbd..dcafcd6ffcdde7772b739d83f599b4f6f2ac1746 100644 (file)
 #define MSR_64BIT      MSR_SF
 
 /* Server variant */
-#define MSR_           MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
-#define MSR_KERNEL     MSR_ | MSR_64BIT
-#define MSR_USER32     MSR_ | MSR_PR | MSR_EE
-#define MSR_USER64     MSR_USER32 | MSR_64BIT
+#define MSR_           (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#define MSR_KERNEL     (MSR_ | MSR_64BIT)
+#define MSR_USER32     (MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64     (MSR_USER32 | MSR_64BIT)
 #elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
 /* Default MSR for kernel mode. */
 #define MSR_KERNEL     (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
index b417de3cc2c4c336ad491edbf8cbf6cc2b433365..ed8f836da094a428088c76cdb2e3def321774821 100644 (file)
 #if defined(CONFIG_PPC_BOOK3E_64)
 #define MSR_64BIT      MSR_CM
 
-#define MSR_           MSR_ME | MSR_CE
-#define MSR_KERNEL     MSR_ | MSR_64BIT
-#define MSR_USER32     MSR_ | MSR_PR | MSR_EE
-#define MSR_USER64     MSR_USER32 | MSR_64BIT
+#define MSR_           (MSR_ME | MSR_CE)
+#define MSR_KERNEL     (MSR_ | MSR_64BIT)
+#define MSR_USER32     (MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64     (MSR_USER32 | MSR_64BIT)
 #elif defined (CONFIG_40x)
 #define MSR_KERNEL     (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
 #define MSR_USER       (MSR_KERNEL|MSR_PR|MSR_EE)
index 77bb71cfd991cd7236cd4467c01828739d70eb6e..0e3ddf5177f655016a6aa6e27fa278c3b820cd5c 100644 (file)
 /* Freescale Book E Performance Monitor APU Registers */
 #define PMRN_PMC0      0x010   /* Performance Monitor Counter 0 */
 #define PMRN_PMC1      0x011   /* Performance Monitor Counter 1 */
-#define PMRN_PMC2      0x012   /* Performance Monitor Counter 1 */
-#define PMRN_PMC3      0x013   /* Performance Monitor Counter 1 */
+#define PMRN_PMC2      0x012   /* Performance Monitor Counter 2 */
+#define PMRN_PMC3      0x013   /* Performance Monitor Counter 3 */
+#define PMRN_PMC4      0x014   /* Performance Monitor Counter 4 */
+#define PMRN_PMC5      0x015   /* Performance Monitor Counter 5 */
 #define PMRN_PMLCA0    0x090   /* PM Local Control A0 */
 #define PMRN_PMLCA1    0x091   /* PM Local Control A1 */
 #define PMRN_PMLCA2    0x092   /* PM Local Control A2 */
 #define PMRN_PMLCA3    0x093   /* PM Local Control A3 */
+#define PMRN_PMLCA4    0x094   /* PM Local Control A4 */
+#define PMRN_PMLCA5    0x095   /* PM Local Control A5 */
 
 #define PMLCA_FC       0x80000000      /* Freeze Counter */
 #define PMLCA_FCS      0x40000000      /* Freeze in Supervisor */
 #define PMLCA_FCM1     0x10000000      /* Freeze when PMM==1 */
 #define PMLCA_FCM0     0x08000000      /* Freeze when PMM==0 */
 #define PMLCA_CE       0x04000000      /* Condition Enable */
+#define PMLCA_FGCS1    0x00000002      /* Freeze in guest state */
+#define PMLCA_FGCS0    0x00000001      /* Freeze in hypervisor state */
 
-#define PMLCA_EVENT_MASK 0x00ff0000    /* Event field */
+#define PMLCA_EVENT_MASK 0x01ff0000    /* Event field */
 #define PMLCA_EVENT_SHIFT      16
 
 #define PMRN_PMLCB0    0x110   /* PM Local Control B0 */
 #define PMRN_PMLCB1    0x111   /* PM Local Control B1 */
 #define PMRN_PMLCB2    0x112   /* PM Local Control B2 */
 #define PMRN_PMLCB3    0x113   /* PM Local Control B3 */
+#define PMRN_PMLCB4    0x114   /* PM Local Control B4 */
+#define PMRN_PMLCB5    0x115   /* PM Local Control B5 */
 
 #define PMLCB_THRESHMUL_MASK   0x0700  /* Threshold Multiple Field */
 #define PMLCB_THRESHMUL_SHIFT  8
 
 #define PMRN_UPMC0     0x000   /* User Performance Monitor Counter 0 */
 #define PMRN_UPMC1     0x001   /* User Performance Monitor Counter 1 */
-#define PMRN_UPMC2     0x002   /* User Performance Monitor Counter 1 */
-#define PMRN_UPMC3     0x003   /* User Performance Monitor Counter 1 */
+#define PMRN_UPMC2     0x002   /* User Performance Monitor Counter 2 */
+#define PMRN_UPMC3     0x003   /* User Performance Monitor Counter 3 */
+#define PMRN_UPMC4     0x004   /* User Performance Monitor Counter 4 */
+#define PMRN_UPMC5     0x005   /* User Performance Monitor Counter 5 */
 #define PMRN_UPMLCA0   0x080   /* User PM Local Control A0 */
 #define PMRN_UPMLCA1   0x081   /* User PM Local Control A1 */
 #define PMRN_UPMLCA2   0x082   /* User PM Local Control A2 */
 #define PMRN_UPMLCA3   0x083   /* User PM Local Control A3 */
+#define PMRN_UPMLCA4   0x084   /* User PM Local Control A4 */
+#define PMRN_UPMLCA5   0x085   /* User PM Local Control A5 */
 #define PMRN_UPMLCB0   0x100   /* User PM Local Control B0 */
 #define PMRN_UPMLCB1   0x101   /* User PM Local Control B1 */
 #define PMRN_UPMLCB2   0x102   /* User PM Local Control B2 */
 #define PMRN_UPMLCB3   0x103   /* User PM Local Control B3 */
+#define PMRN_UPMLCB4   0x104   /* User PM Local Control B4 */
+#define PMRN_UPMLCB5   0x105   /* User PM Local Control B5 */
 #define PMRN_UPMGC0    0x180   /* User PM Global Control 0 */
 
 
index c7a8bfc9f6f5e4c3551fef8dc5e3e69d045153e6..9bd52c65e66f9800734b70fdf9f3d6b649c992ba 100644 (file)
  *
  */
 
-typedef u32 rtas_arg_t;
+typedef __be32 rtas_arg_t;
 
 struct rtas_args {
-       u32 token;
-       u32 nargs;
-       u32 nret; 
+       __be32 token;
+       __be32 nargs;
+       __be32 nret; 
        rtas_arg_t args[16];
        rtas_arg_t *rets;     /* Pointer to return values in args[]. */
 };  
index 48cfc858abd6dfa70c4c95172a5e4003fa62c53d..98da78e0c2c0eeda19f1852faa725742aa43af76 100644 (file)
@@ -112,6 +112,7 @@ static inline struct cpumask *cpu_core_mask(int cpu)
 }
 
 extern int cpu_to_core_id(int cpu);
+extern int cpu_to_chip_id(int cpu);
 
 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
  *
@@ -186,6 +187,8 @@ extern int smt_enabled_at_boot;
 extern int smp_mpic_probe(void);
 extern void smp_mpic_setup_cpu(int cpu);
 extern int smp_generic_kick_cpu(int nr);
+extern int smp_generic_cpu_bootable(unsigned int nr);
+
 
 extern void smp_generic_give_timebase(void);
 extern void smp_generic_take_timebase(void);
index 5b23f910ee57ea06817815cf0f5688fc0c024b93..5f54a744dcc5e26921ddafe1d267985f71dd8540 100644 (file)
 
 #ifdef CONFIG_PPC64
 /* use 0x800000yy when locked, where yy == CPU number */
+#ifdef __BIG_ENDIAN__
 #define LOCK_TOKEN     (*(u32 *)(&get_paca()->lock_token))
 #else
+#define LOCK_TOKEN     (*(u32 *)(&get_paca()->paca_index))
+#endif
+#else
 #define LOCK_TOKEN     1
 #endif
 
@@ -96,7 +100,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 #if defined(CONFIG_PPC_SPLPAR)
 /* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (local_paca->lppaca_ptr->shared_proc)
+#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
 extern void __spin_yield(arch_spinlock_t *lock);
 extern void __rw_yield(arch_rwlock_t *lock);
 #else /* SPLPAR */
index 93f280e23279e77172cd56e373c4429e11be01b6..37b7ca39ec9f1b5ec2d3e574a8806de34ccff555 100644 (file)
@@ -235,6 +235,7 @@ extern long spu_sys_callback(struct spu_syscall_block *s);
 
 /* syscalls implemented in spufs */
 struct file;
+struct coredump_params;
 struct spufs_calls {
        long (*create_thread)(const char __user *name,
                                        unsigned int flags, umode_t mode,
@@ -242,7 +243,7 @@ struct spufs_calls {
        long (*spu_run)(struct file *filp, __u32 __user *unpc,
                                                __u32 __user *ustatus);
        int (*coredump_extra_notes_size)(void);
-       int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset);
+       int (*coredump_extra_notes_write)(struct coredump_params *cprm);
        void (*notify_spus_active)(void);
        struct module *owner;
 };
index 294c2cedcf7a622b478b748a1c5c3ddb8f537674..2be5618cdec6874d79315de6f8f2ecf3e2e8d938 100644 (file)
@@ -25,11 +25,8 @@ static inline void save_tar(struct thread_struct *prev)
 static inline void save_tar(struct thread_struct *prev) {}
 #endif
 
-extern void giveup_fpu(struct task_struct *);
 extern void load_up_fpu(void);
-extern void disable_kernel_fp(void);
 extern void enable_kernel_fp(void);
-extern void flush_fp_to_thread(struct task_struct *);
 extern void enable_kernel_altivec(void);
 extern void load_up_altivec(struct task_struct *);
 extern int emulate_altivec(struct pt_regs *);
@@ -47,6 +44,14 @@ static inline void discard_lazy_cpu_state(void)
 }
 #endif
 
+#ifdef CONFIG_PPC_FPU
+extern void flush_fp_to_thread(struct task_struct *);
+extern void giveup_fpu(struct task_struct *);
+#else
+static inline void flush_fp_to_thread(struct task_struct *t) { }
+static inline void giveup_fpu(struct task_struct *t) { }
+#endif
+
 #ifdef CONFIG_ALTIVEC
 extern void flush_altivec_to_thread(struct task_struct *);
 extern void giveup_altivec(struct task_struct *);
index 161ab662843b0f84ffa0e2d0dbbebd8945545dc3..89e3ef2496ac72c4e03d17657833930de22f6b8e 100644 (file)
@@ -96,6 +96,7 @@ static inline int prrn_is_enabled(void)
 #ifdef CONFIG_PPC64
 #include <asm/smp.h>
 
+#define topology_physical_package_id(cpu)      (cpu_to_chip_id(cpu))
 #define topology_thread_cpumask(cpu)   (per_cpu(cpu_sibling_map, cpu))
 #define topology_core_cpumask(cpu)     (per_cpu(cpu_core_map, cpu))
 #define topology_core_id(cpu)          (cpu_to_core_id(cpu))
index dc590919f8eb370d1568d54f7c002d58f63b2dad..b51fba10e733d453ac29e0c535ff419d480f206d 100644 (file)
@@ -27,10 +27,11 @@ extern void udbg_printf(const char *fmt, ...)
        __attribute__ ((format (printf, 1, 2)));
 extern void udbg_progress(char *s, unsigned short hex);
 
-extern void udbg_init_uart(void __iomem *comport, unsigned int speed,
-                          unsigned int clock);
-extern unsigned int udbg_probe_uart_speed(void __iomem *comport,
-                                         unsigned int clock);
+extern void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
+extern void udbg_uart_init_pio(unsigned long port, unsigned int stride);
+
+extern void udbg_uart_setup(unsigned int speed, unsigned int clock);
+extern unsigned int udbg_probe_uart_speed(unsigned int clock);
 
 struct device_node;
 extern void udbg_scc_init(int force_scc);
index 05b8d560cfbab318f394b56e35c6e2df78b2eebc..7e39c9146a71f63c1701118ec4a2e16a243341b5 100644 (file)
@@ -107,26 +107,25 @@ typedef elf_gregset_t32 compat_elf_gregset_t;
 # define ELF_NVRREG    34      /* includes vscr & vrsave in split vectors */
 # define ELF_NVSRHALFREG 32    /* Half the vsx registers */
 # define ELF_GREG_TYPE elf_greg_t64
+# define ELF_ARCH      EM_PPC64
+# define ELF_CLASS     ELFCLASS64
+typedef elf_greg_t64 elf_greg_t;
+typedef elf_gregset_t64 elf_gregset_t;
 #else
 # define ELF_NEVRREG   34      /* includes acc (as 2) */
 # define ELF_NVRREG    33      /* includes vscr */
 # define ELF_GREG_TYPE elf_greg_t32
 # define ELF_ARCH      EM_PPC
 # define ELF_CLASS     ELFCLASS32
-# define ELF_DATA      ELFDATA2MSB
+typedef elf_greg_t32 elf_greg_t;
+typedef elf_gregset_t32 elf_gregset_t;
 #endif /* __powerpc64__ */
 
-#ifndef ELF_ARCH
-# define ELF_ARCH      EM_PPC64
-# define ELF_CLASS     ELFCLASS64
-# define ELF_DATA      ELFDATA2MSB
-  typedef elf_greg_t64 elf_greg_t;
-  typedef elf_gregset_t64 elf_gregset_t;
+#ifdef __BIG_ENDIAN__
+#define ELF_DATA       ELFDATA2MSB
 #else
-  /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
-  typedef elf_greg_t32 elf_greg_t;
-  typedef elf_gregset_t32 elf_gregset_t;
-#endif /* ELF_ARCH */
+#define ELF_DATA       ELFDATA2LSB
+#endif
 
 /* Floating point registers */
 typedef double elf_fpreg_t;
index a8619bfe879e0168e5dbaca852ccfae3c176b907..5b280f59b5ca6d0e2abb0fac854191b6baf84e87 100644 (file)
@@ -117,9 +117,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE)        += ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)  += ftrace.o
 
-obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
-
-ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
+ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
 obj-y                          += iomap.o
 endif
 
index ee5b690a0bedff7f4e9e6f485e9dc3c31df31a8a..52e5758ea3689825923ff04d7dc6f602f98dda98 100644 (file)
@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
        nb = aligninfo[instr].len;
        flags = aligninfo[instr].flags;
 
+       /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
+       if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
+               nb = 8;
+               flags = LD+SW;
+       } else if (IS_XFORM(instruction) &&
+                  ((instruction >> 1) & 0x3ff) == 660) {
+               nb = 8;
+               flags = ST+SW;
+       }
+
        /* Byteswap little endian loads and stores */
        swiz = 0;
        if (regs->msr & MSR_LE) {
index ac8f52732fde2948675be0983a5a47dd8163584c..0428992fdb4bf9690b44daf19abff613d5d4fc6a 100644 (file)
 static void scrollscreen(void);
 #endif
 
-static void draw_byte(unsigned char c, long locX, long locY);
-static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
-static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
-static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
-
 #define __force_data __attribute__((__section__(".data")))
 
 static int g_loc_X __force_data;
@@ -52,6 +47,26 @@ static unsigned char vga_font[cmapsz];
 int boot_text_mapped __force_data = 0;
 int force_printk_to_btext = 0;
 
+extern void rmci_on(void);
+extern void rmci_off(void);
+
+static inline void rmci_maybe_on(void)
+{
+#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
+       if (!(mfmsr() & MSR_DR))
+               rmci_on();
+#endif
+}
+
+static inline void rmci_maybe_off(void)
+{
+#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
+       if (!(mfmsr() & MSR_DR))
+               rmci_off();
+#endif
+}
+
+
 #ifdef CONFIG_PPC32
 /* Calc BAT values for mapping the display and store them
  * in disp_BAT.  Those values are then used from head.S to map
@@ -134,7 +149,7 @@ void __init btext_unmap(void)
  *    changes.
  */
 
-static void map_boot_text(void)
+void btext_map(void)
 {
        unsigned long base, offset, size;
        unsigned char *vbase;
@@ -209,7 +224,7 @@ int btext_initialize(struct device_node *np)
        dispDeviceRect[2] = width;
        dispDeviceRect[3] = height;
 
-       map_boot_text();
+       btext_map();
 
        return 0;
 }
@@ -283,7 +298,7 @@ void btext_update_display(unsigned long phys, int width, int height,
                iounmap(logicalDisplayBase);
                boot_text_mapped = 0;
        }
-       map_boot_text();
+       btext_map();
        g_loc_X = 0;
        g_loc_Y = 0;
        g_max_loc_X = width / 8;
@@ -298,6 +313,7 @@ void btext_clearscreen(void)
                                        (dispDeviceDepth >> 3)) >> 2;
        int i,j;
 
+       rmci_maybe_on();
        for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
        {
                unsigned int *ptr = base;
@@ -305,6 +321,7 @@ void btext_clearscreen(void)
                        *(ptr++) = 0;
                base += (dispDeviceRowBytes >> 2);
        }
+       rmci_maybe_off();
 }
 
 void btext_flushscreen(void)
@@ -355,6 +372,8 @@ static void scrollscreen(void)
                                   (dispDeviceDepth >> 3)) >> 2;
        int i,j;
 
+       rmci_maybe_on();
+
        for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
        {
                unsigned int *src_ptr = src;
@@ -371,9 +390,116 @@ static void scrollscreen(void)
                        *(dst_ptr++) = 0;
                dst += (dispDeviceRowBytes >> 2);
        }
+
+       rmci_maybe_off();
 }
 #endif /* ndef NO_SCROLL */
 
+static unsigned int expand_bits_8[16] = {
+       0x00000000,
+       0x000000ff,
+       0x0000ff00,
+       0x0000ffff,
+       0x00ff0000,
+       0x00ff00ff,
+       0x00ffff00,
+       0x00ffffff,
+       0xff000000,
+       0xff0000ff,
+       0xff00ff00,
+       0xff00ffff,
+       0xffff0000,
+       0xffff00ff,
+       0xffffff00,
+       0xffffffff
+};
+
+static unsigned int expand_bits_16[4] = {
+       0x00000000,
+       0x0000ffff,
+       0xffff0000,
+       0xffffffff
+};
+
+
+static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
+{
+       int l, bits;
+       int fg = 0xFFFFFFFFUL;
+       int bg = 0x00000000UL;
+
+       for (l = 0; l < 16; ++l)
+       {
+               bits = *font++;
+               base[0] = (-(bits >> 7) & fg) ^ bg;
+               base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
+               base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
+               base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
+               base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
+               base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
+               base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
+               base[7] = (-(bits & 1) & fg) ^ bg;
+               base = (unsigned int *) ((char *)base + rb);
+       }
+}
+
+static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
+{
+       int l, bits;
+       int fg = 0xFFFFFFFFUL;
+       int bg = 0x00000000UL;
+       unsigned int *eb = (int *)expand_bits_16;
+
+       for (l = 0; l < 16; ++l)
+       {
+               bits = *font++;
+               base[0] = (eb[bits >> 6] & fg) ^ bg;
+               base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
+               base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
+               base[3] = (eb[bits & 3] & fg) ^ bg;
+               base = (unsigned int *) ((char *)base + rb);
+       }
+}
+
+static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
+{
+       int l, bits;
+       int fg = 0x0F0F0F0FUL;
+       int bg = 0x00000000UL;
+       unsigned int *eb = (int *)expand_bits_8;
+
+       for (l = 0; l < 16; ++l)
+       {
+               bits = *font++;
+               base[0] = (eb[bits >> 4] & fg) ^ bg;
+               base[1] = (eb[bits & 0xf] & fg) ^ bg;
+               base = (unsigned int *) ((char *)base + rb);
+       }
+}
+
+static noinline void draw_byte(unsigned char c, long locX, long locY)
+{
+       unsigned char *base     = calc_base(locX << 3, locY << 4);
+       unsigned char *font     = &vga_font[((unsigned int)c) * 16];
+       int rb                  = dispDeviceRowBytes;
+
+       rmci_maybe_on();
+       switch(dispDeviceDepth) {
+       case 24:
+       case 32:
+               draw_byte_32(font, (unsigned int *)base, rb);
+               break;
+       case 15:
+       case 16:
+               draw_byte_16(font, (unsigned int *)base, rb);
+               break;
+       case 8:
+               draw_byte_8(font, (unsigned int *)base, rb);
+               break;
+       }
+       rmci_maybe_off();
+}
+
 void btext_drawchar(char c)
 {
        int cline = 0;
@@ -465,107 +591,12 @@ void btext_drawhex(unsigned long v)
        btext_drawchar(' ');
 }
 
-static void draw_byte(unsigned char c, long locX, long locY)
-{
-       unsigned char *base     = calc_base(locX << 3, locY << 4);
-       unsigned char *font     = &vga_font[((unsigned int)c) * 16];
-       int rb                  = dispDeviceRowBytes;
-
-       switch(dispDeviceDepth) {
-       case 24:
-       case 32:
-               draw_byte_32(font, (unsigned int *)base, rb);
-               break;
-       case 15:
-       case 16:
-               draw_byte_16(font, (unsigned int *)base, rb);
-               break;
-       case 8:
-               draw_byte_8(font, (unsigned int *)base, rb);
-               break;
-       }
-}
-
-static unsigned int expand_bits_8[16] = {
-       0x00000000,
-       0x000000ff,
-       0x0000ff00,
-       0x0000ffff,
-       0x00ff0000,
-       0x00ff00ff,
-       0x00ffff00,
-       0x00ffffff,
-       0xff000000,
-       0xff0000ff,
-       0xff00ff00,
-       0xff00ffff,
-       0xffff0000,
-       0xffff00ff,
-       0xffffff00,
-       0xffffffff
-};
-
-static unsigned int expand_bits_16[4] = {
-       0x00000000,
-       0x0000ffff,
-       0xffff0000,
-       0xffffffff
-};
-
-
-static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
-{
-       int l, bits;
-       int fg = 0xFFFFFFFFUL;
-       int bg = 0x00000000UL;
-
-       for (l = 0; l < 16; ++l)
-       {
-               bits = *font++;
-               base[0] = (-(bits >> 7) & fg) ^ bg;
-               base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
-               base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
-               base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
-               base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
-               base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
-               base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
-               base[7] = (-(bits & 1) & fg) ^ bg;
-               base = (unsigned int *) ((char *)base + rb);
-       }
-}
-
-static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
-{
-       int l, bits;
-       int fg = 0xFFFFFFFFUL;
-       int bg = 0x00000000UL;
-       unsigned int *eb = (int *)expand_bits_16;
-
-       for (l = 0; l < 16; ++l)
-       {
-               bits = *font++;
-               base[0] = (eb[bits >> 6] & fg) ^ bg;
-               base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
-               base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
-               base[3] = (eb[bits & 3] & fg) ^ bg;
-               base = (unsigned int *) ((char *)base + rb);
-       }
-}
-
-static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
+void __init udbg_init_btext(void)
 {
-       int l, bits;
-       int fg = 0x0F0F0F0FUL;
-       int bg = 0x00000000UL;
-       unsigned int *eb = (int *)expand_bits_8;
-
-       for (l = 0; l < 16; ++l)
-       {
-               bits = *font++;
-               base[0] = (eb[bits >> 4] & fg) ^ bg;
-               base[1] = (eb[bits & 0xf] & fg) ^ bg;
-               base = (unsigned int *) ((char *)base + rb);
-       }
+       /* If btext is enabled, we might have a BAT setup for early display,
+        * thus we do enable some very basic udbg output
+        */
+       udbg_putc = btext_drawchar;
 }
 
 static unsigned char vga_font[cmapsz] = {
@@ -913,10 +944,3 @@ static unsigned char vga_font[cmapsz] = {
 0x00, 0x00, 0x00, 0x00,
 };
 
-void __init udbg_init_btext(void)
-{
-       /* If btext is enabled, we might have a BAT setup for early display,
-        * thus we do enable some very basic udbg output
-        */
-       udbg_putc = btext_drawchar;
-}
index 9262cf2bec4bd6e1e2edd7d91d476e4b900daeb3..654932727873da2ed0073d70d77efb271713a28a 100644 (file)
@@ -196,7 +196,7 @@ static void cache_cpu_set(struct cache *cache, int cpu)
 static int cache_size(const struct cache *cache, unsigned int *ret)
 {
        const char *propname;
-       const u32 *cache_size;
+       const __be32 *cache_size;
 
        propname = cache_type_info[cache->type].size_prop;
 
@@ -204,7 +204,7 @@ static int cache_size(const struct cache *cache, unsigned int *ret)
        if (!cache_size)
                return -ENODEV;
 
-       *ret = *cache_size;
+       *ret = of_read_number(cache_size, 1);
        return 0;
 }
 
@@ -222,7 +222,7 @@ static int cache_size_kb(const struct cache *cache, unsigned int *ret)
 /* not cache_line_size() because that's a macro in include/linux/cache.h */
 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
 {
-       const u32 *line_size;
+       const __be32 *line_size;
        int i, lim;
 
        lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
@@ -239,14 +239,14 @@ static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
        if (!line_size)
                return -ENODEV;
 
-       *ret = *line_size;
+       *ret = of_read_number(line_size, 1);
        return 0;
 }
 
 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
 {
        const char *propname;
-       const u32 *nr_sets;
+       const __be32 *nr_sets;
 
        propname = cache_type_info[cache->type].nr_sets_prop;
 
@@ -254,7 +254,7 @@ static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
        if (!nr_sets)
                return -ENODEV;
 
-       *ret = *nr_sets;
+       *ret = of_read_number(nr_sets, 1);
        return 0;
 }
 
index 0b9af015bedcb8382350efe5066779930607044b..bfb18c7290b7a13fccd520f064228b876430d807 100644 (file)
@@ -75,7 +75,7 @@ _GLOBAL(__setup_cpu_e500v2)
        bl      __e500_icache_setup
        bl      __e500_dcache_setup
        bl      __setup_e500_ivors
-#ifdef CONFIG_FSL_RIO
+#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
        /* Ensure that RFXE is set */
        mfspr   r3,SPRN_HID1
        oris    r3,r3,HID1_RFXE@h
index 22973a74df7342b1146a3dd2881432c3cd8ad2b8..597d954e58601b36f376953898faf9178b818239 100644 (file)
@@ -2105,7 +2105,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                        MMU_FTR_USE_TLBILX,
                .icache_bsize           = 64,
                .dcache_bsize           = 64,
-               .num_pmcs               = 4,
+               .num_pmcs               = 6,
                .oprofile_cpu_type      = "ppc/e6500",
                .oprofile_type          = PPC_OPROFILE_FSL_EMB,
                .cpu_setup              = __setup_cpu_e6500,
index 2bd0b885b0fe901328d9ae44f7703cca0a159676..4524500f30d66dd31a17ea69e8d22f54dcb74ae9 100644 (file)
@@ -102,7 +102,8 @@ BEGIN_FW_FTR_SECTION
        /* if from user, see if there are any DTL entries to process */
        ld      r10,PACALPPACAPTR(r13)  /* get ptr to VPA */
        ld      r11,PACA_DTL_RIDX(r13)  /* get log read index */
-       ld      r10,LPPACA_DTLIDX(r10)  /* get log write index */
+       addi    r10,r10,LPPACA_DTLIDX
+       LDX_BE  r10,0,r10               /* get log write index */
        cmpd    cr1,r11,r10
        beq+    cr1,33f
        bl      .accumulate_stolen_time
@@ -522,9 +523,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
         */
        ld      r9,PACA_SLBSHADOWPTR(r13)
        li      r12,0
-       std     r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
-       std     r7,SLBSHADOW_STACKVSID(r9)  /* Save VSID */
-       std     r0,SLBSHADOW_STACKESID(r9)  /* Save ESID */
+       std     r12,SLBSHADOW_STACKESID(r9)     /* Clear ESID */
+       li      r12,SLBSHADOW_STACKVSID
+       STDX_BE r7,r12,r9                       /* Save VSID */
+       li      r12,SLBSHADOW_STACKESID
+       STDX_BE r0,r12,r9                       /* Save ESID */
 
        /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
         * we have 1TB segments, the only CPUs known to have the errata
@@ -737,9 +740,9 @@ resume_kernel:
 
        /*
         * Here we are preempting the current task. We want to make
-        * sure we are soft-disabled first
+        * sure we are soft-disabled first and reconcile irq state.
         */
-       SOFT_DISABLE_INTS(r3,r4)
+       RECONCILE_IRQ_STATE(r3,r4)
 1:     bl      .preempt_schedule_irq
 
        /* Re-test flags and eventually loop */
index d44a571e45a79dae9b7976c7e281c93021b2847e..6300c13bbde46da37cf9637f95711e1a544c94ba 100644 (file)
@@ -30,22 +30,20 @@ extern u32 epapr_ev_idle_start[];
 
 bool epapr_paravirt_enabled;
 
-static int __init epapr_paravirt_init(void)
+static int __init early_init_dt_scan_epapr(unsigned long node,
+                                          const char *uname,
+                                          int depth, void *data)
 {
-       struct device_node *hyper_node;
        const u32 *insts;
-       int len, i;
+       unsigned long len;
+       int i;
 
-       hyper_node = of_find_node_by_path("/hypervisor");
-       if (!hyper_node)
-               return -ENODEV;
-
-       insts = of_get_property(hyper_node, "hcall-instructions", &len);
+       insts = of_get_flat_dt_prop(node, "hcall-instructions", &len);
        if (!insts)
-               return -ENODEV;
+               return 0;
 
        if (len % 4 || len > (4 * 4))
-               return -ENODEV;
+               return -1;
 
        for (i = 0; i < (len / 4); i++) {
                patch_instruction(epapr_hypercall_start + i, insts[i]);
@@ -55,13 +53,19 @@ static int __init epapr_paravirt_init(void)
        }
 
 #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
-       if (of_get_property(hyper_node, "has-idle", NULL))
+       if (of_get_flat_dt_prop(node, "has-idle", NULL))
                ppc_md.power_save = epapr_ev_idle;
 #endif
 
        epapr_paravirt_enabled = true;
 
+       return 1;
+}
+
+int __init epapr_paravirt_early_init(void)
+{
+       of_scan_flat_dt(early_init_dt_scan_epapr, NULL);
+
        return 0;
 }
 
-early_initcall(epapr_paravirt_init);
index 645170a07ada1da7abf4fddc814fcbaad9c071c9..2d067049db27f1e56ca421c0a223e958125f60ad 100644 (file)
@@ -198,9 +198,9 @@ exc_##n##_common:                                                       \
 /* This second version is meant for exceptions that don't immediately
  * hard-enable. We set a bit in paca->irq_happened to ensure that
  * a subsequent call to arch_local_irq_restore() will properly
- * hard-enable and avoid the fast-path
+ * hard-enable and avoid the fast-path, and then reconcile irq state.
  */
-#define INTS_DISABLE   SOFT_DISABLE_INTS(r3,r4)
+#define INTS_DISABLE   RECONCILE_IRQ_STATE(r3,r4)
 
 /* This is called by exceptions that used INTS_KEEP (that did not touch
  * irq indicators in the PACA). This will restore MSR:EE to it's previous
index 902ca3c6b4b6496d8e606623187c9f1da1500f2a..a44ce5dedbacde3b66c93a836568f0fef8de1238 100644 (file)
@@ -367,11 +367,7 @@ denorm_exception_hv:
        HMT_MEDIUM_PPR_DISCARD
        mtspr   SPRN_SPRG_HSCRATCH0,r13
        EXCEPTION_PROLOG_0(PACA_EXGEN)
-       std     r11,PACA_EXGEN+EX_R11(r13)
-       std     r12,PACA_EXGEN+EX_R12(r13)
-       mfspr   r9,SPRN_SPRG_HSCRATCH0
-       std     r9,PACA_EXGEN+EX_R13(r13)
-       mfcr    r9
+       EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
 
 #ifdef CONFIG_PPC_DENORMALISATION
        mfspr   r10,SPRN_HSRR1
@@ -381,6 +377,7 @@ denorm_exception_hv:
        bne+    denorm_assist
 #endif
 
+       KVMTEST(0x1500)
        EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
 
@@ -501,6 +498,10 @@ denorm_done:
        mtcrf   0x80,r9
        ld      r9,PACA_EXGEN+EX_R9(r13)
        RESTORE_PPR_PACA(PACA_EXGEN, r10)
+BEGIN_FTR_SECTION
+       ld      r10,PACA_EXGEN+EX_CFAR(r13)
+       mtspr   SPRN_CFAR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
        ld      r10,PACA_EXGEN+EX_R10(r13)
        ld      r11,PACA_EXGEN+EX_R11(r13)
        ld      r12,PACA_EXGEN+EX_R12(r13)
index 8a9b6f59822d666edf25ebc77326f2a5acbc8482..67ee0d6c1070b3f02702dbc8442a905f3b39b8d6 100644 (file)
@@ -822,14 +822,6 @@ finish_tlb_load:
        rfi                     /* Should sync shadow TLBs */
        b       .               /* prevent prefetch past rfi */
 
-/* extern void giveup_fpu(struct task_struct *prev)
- *
- * The PowerPC 4xx family of processors do not have an FPU, so this just
- * returns.
- */
-_ENTRY(giveup_fpu)
-       blr
-
 /* This is where the main kernel code starts.
  */
 start_here:
index 97e2671cde7f4ae662264d0bbf377f95c0d6e58e..c334f53453f708ea64c7d9c1322cc92418a5b687 100644 (file)
@@ -784,16 +784,6 @@ _GLOBAL(__fixup_440A_mcheck)
        sync
        blr
 
-/*
- * extern void giveup_fpu(struct task_struct *prev)
- *
- * The 44x core does not have an FPU.
- */
-#ifndef CONFIG_PPC_FPU
-_GLOBAL(giveup_fpu)
-       blr
-#endif
-
 _GLOBAL(set_context)
 
 #ifdef CONFIG_BDI_SWITCH
index b61363d557b571abee2d1092d8153390ea8c5a6b..3d11d8038deec122ad9735202e98f4567998a070 100644 (file)
@@ -703,6 +703,7 @@ _GLOBAL(relative_toc)
        mtlr    r0
        blr
 
+.balign 8
 p_toc: .llong  __toc_start + 0x8000 - 0b
 
 /*
index b2a5860accfb9c3fa607e2cf4210802e574d6fd2..1b92a97b1b046d3721c9aed9912775de27dccd6d 100644 (file)
@@ -691,10 +691,6 @@ modified_instr:
        b       151b
 #endif
 
-       .globl  giveup_fpu
-giveup_fpu:
-       blr
-
 /*
  * This is where the main kernel code starts.
  */
index d10a7cacccd203b7af0814a19f3ea9f6ed1f000a..289afaffbbb5df2ecba595e01646a5b5d4bfd964 100644 (file)
@@ -947,16 +947,6 @@ _GLOBAL(giveup_spe)
        blr
 #endif /* CONFIG_SPE */
 
-/*
- * extern void giveup_fpu(struct task_struct *prev)
- *
- * Not all FSL Book-E cores have an FPU
- */
-#ifndef CONFIG_PPC_FPU
-_GLOBAL(giveup_fpu)
-       blr
-#endif
-
 /*
  * extern void abort(void)
  *
index fa0b54b2a362b737ac6d013326c7eee8071c617f..24b968f8e4d83fdb61349ec1067d18583953613f 100644 (file)
@@ -53,6 +53,7 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
        return NULL;
 }
 
+#ifdef CONFIG_PPC_INDIRECT_MMIO
 struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
 {
        unsigned hugepage_shift;
@@ -90,13 +91,25 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
 
        return bus;
 }
+#else /* CONFIG_PPC_INDIRECT_MMIO */
+struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
+{
+       return NULL;
+}
+#endif /* !CONFIG_PPC_INDIRECT_MMIO */
 
+#ifdef CONFIG_PPC_INDIRECT_PIO
 struct iowa_bus *iowa_pio_find_bus(unsigned long port)
 {
        unsigned long vaddr = (unsigned long)pci_io_base + port;
        return iowa_pci_find(vaddr, 0);
 }
-
+#else
+struct iowa_bus *iowa_pio_find_bus(unsigned long port)
+{
+       return NULL;
+}
+#endif
 
 #define DEF_PCI_AC_RET(name, ret, at, al, space, aa)           \
 static ret iowa_##name at                                      \
@@ -137,6 +150,7 @@ static const struct ppc_pci_io iowa_pci_io = {
 
 };
 
+#ifdef CONFIG_PPC_INDIRECT_MMIO
 static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
                                  unsigned long flags, void *caller)
 {
@@ -151,6 +165,9 @@ static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
        }
        return res;
 }
+#else /* CONFIG_PPC_INDIRECT_MMIO */
+#define iowa_ioremap NULL
+#endif /* !CONFIG_PPC_INDIRECT_MMIO */
 
 /* Enable IO workaround */
 static void io_workaround_init(void)
index 886381f32c3d45e9988a614fd3ba58b04249a52f..2a2b4aeab80fd83ca276bcccfef6387f3324d565 100644 (file)
@@ -25,6 +25,9 @@
 #include <asm/firmware.h>
 #include <asm/bug.h>
 
+/* See definition in io.h */
+bool isa_io_special;
+
 void _insb(const volatile u8 __iomem *port, void *buf, long count)
 {
        u8 *tbuf = buf;
index 0733b05eb856b05476621296b45fbde7661ef6a8..22e88dd2f34ad7b43c27fe1361f8ed9213f9f960 100644 (file)
@@ -99,7 +99,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
                legacy_serial_count = index + 1;
 
        /* Check if there is a port who already claimed our slot */
-       if (legacy_serial_infos[index].np != 0) {
+       if (legacy_serial_infos[index].np != NULL) {
                /* if we still have some room, move it, else override */
                if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) {
                        printk(KERN_DEBUG "Moved legacy port %d -> %d\n",
@@ -152,7 +152,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
                                      struct device_node *soc_dev)
 {
        u64 addr;
-       const u32 *addrp;
+       const __be32 *addrp;
        upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ
                | UPF_FIXED_PORT;
        struct device_node *tsi = of_get_parent(np);
@@ -221,14 +221,19 @@ static int __init add_legacy_isa_port(struct device_node *np,
        /* Translate ISA address. If it fails, we still register the port
         * with no translated address so that it can be picked up as an IO
         * port later by the serial driver
+        *
+        * Note: Don't even try on P8 lpc, we know it's not directly mapped
         */
-       taddr = of_translate_address(np, reg);
-       if (taddr == OF_BAD_ADDR)
+       if (!of_device_is_compatible(isa_brg, "ibm,power8-lpc")) {
+               taddr = of_translate_address(np, reg);
+               if (taddr == OF_BAD_ADDR)
+                       taddr = 0;
+       } else
                taddr = 0;
 
        /* Add port, irq will be dealt with later */
-       return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr,
-                              NO_IRQ, UPF_BOOT_AUTOCONF, 0);
+       return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]),
+                              taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0);
 
 }
 
@@ -237,7 +242,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
                                      struct device_node *pci_dev)
 {
        u64 addr, base;
-       const u32 *addrp;
+       const __be32 *addrp;
        unsigned int flags;
        int iotype, index = -1, lindex = 0;
 
@@ -270,7 +275,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
        if (iotype == UPIO_MEM)
                base = addr;
        else
-               base = addrp[2];
+               base = of_read_number(&addrp[2], 1);
 
        /* Try to guess an index... If we have subdevices of the pci dev,
         * we get to their "reg" property
@@ -307,19 +312,31 @@ static int __init add_legacy_pci_port(struct device_node *np,
 
 static void __init setup_legacy_serial_console(int console)
 {
-       struct legacy_serial_info *info =
-               &legacy_serial_infos[console];
+       struct legacy_serial_info *info = &legacy_serial_infos[console];
+       struct plat_serial8250_port *port = &legacy_serial_ports[console];
        void __iomem *addr;
 
-       if (info->taddr == 0)
-               return;
-       addr = ioremap(info->taddr, 0x1000);
-       if (addr == NULL)
-               return;
+       /* Check if a translated MMIO address has been found */
+       if (info->taddr) {
+               addr = ioremap(info->taddr, 0x1000);
+               if (addr == NULL)
+                       return;
+               udbg_uart_init_mmio(addr, 1);
+       } else {
+               /* Check if it's PIO and we support untranslated PIO */
+               if (port->iotype == UPIO_PORT && isa_io_special)
+                       udbg_uart_init_pio(port->iobase, 1);
+               else
+                       return;
+       }
+
+       /* Try to query the current speed */
        if (info->speed == 0)
-               info->speed = udbg_probe_uart_speed(addr, info->clock);
+               info->speed = udbg_probe_uart_speed(info->clock);
+
+       /* Set it up */
        DBG("default console speed = %d\n", info->speed);
-       udbg_init_uart(addr, info->speed, info->clock);
+       udbg_uart_setup(info->speed, info->clock);
 }
 
 /*
@@ -367,10 +384,13 @@ void __init find_legacy_serial_ports(void)
        /* Next, fill our array with ISA ports */
        for_each_node_by_type(np, "serial") {
                struct device_node *isa = of_get_parent(np);
-               if (isa && !strcmp(isa->name, "isa")) {
-                       index = add_legacy_isa_port(np, isa);
-                       if (index >= 0 && np == stdout)
-                               legacy_serial_console = index;
+               if (isa && (!strcmp(isa->name, "isa") ||
+                           !strcmp(isa->name, "lpc"))) {
+                       if (of_device_is_available(np)) {
+                               index = add_legacy_isa_port(np, isa);
+                               if (index >= 0 && np == stdout)
+                                       legacy_serial_console = index;
+                       }
                }
                of_node_put(isa);
        }
index d92f3871e9cf959b583cf35b03de95929fc253ed..0204089ebdd46c1555d689207cd0b5eb617e8038 100644 (file)
@@ -165,7 +165,7 @@ static void parse_ppp_data(struct seq_file *m)
                   ppp_data.active_system_procs);
 
        /* pool related entries are appropriate for shared configs */
-       if (lppaca_of(0).shared_proc) {
+       if (lppaca_shared_proc(get_lppaca())) {
                unsigned long pool_idle_time, pool_procs;
 
                seq_printf(m, "pool=%d\n", ppp_data.pool_num);
@@ -387,8 +387,8 @@ static void pseries_cmo_data(struct seq_file *m)
                return;
 
        for_each_possible_cpu(cpu) {
-               cmo_faults += lppaca_of(cpu).cmo_faults;
-               cmo_fault_time += lppaca_of(cpu).cmo_fault_time;
+               cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
+               cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
        }
 
        seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
@@ -406,8 +406,9 @@ static void splpar_dispatch_data(struct seq_file *m)
        unsigned long dispatch_dispersions = 0;
 
        for_each_possible_cpu(cpu) {
-               dispatches += lppaca_of(cpu).yield_count;
-               dispatch_dispersions += lppaca_of(cpu).dispersion_count;
+               dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
+               dispatch_dispersions +=
+                       be32_to_cpu(lppaca_of(cpu).dispersion_count);
        }
 
        seq_printf(m, "dispatches=%lu\n", dispatches);
@@ -473,7 +474,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
        seq_printf(m, "partition_potential_processors=%d\n",
                   partition_potential_processors);
 
-       seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc);
+       seq_printf(m, "shared_processor_mode=%d\n",
+                  lppaca_shared_proc(get_lppaca()));
 
        seq_printf(m, "slb_size=%d\n", mmu_slb_size);
 
index e469f30e6eeb88b4668e11a86d287e99b67aeae2..777d999f563bb377bff3217358aacdc7667f1fd4 100644 (file)
@@ -327,8 +327,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
  *
  * flush_icache_range(unsigned long start, unsigned long stop)
  */
-_KPROBE(__flush_icache_range)
+_KPROBE(flush_icache_range)
 BEGIN_FTR_SECTION
+       isync
        blr                             /* for 601, do nothing */
 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
        li      r5,L1_CACHE_BYTES-1
index 6820e45f557b73b848fda82c512332752b760dc2..971d7e78aff20e1ca801dd923dfc77337e834ad5 100644 (file)
@@ -67,8 +67,10 @@ PPC64_CACHES:
  *   flush all bytes from start through stop-1 inclusive
  */
 
-_KPROBE(__flush_icache_range)
-
+_KPROBE(flush_icache_range)
+BEGIN_FTR_SECTION
+       blr
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 /*
  * Flush the data cache to memory 
  * 
@@ -247,6 +249,37 @@ _GLOBAL(__bswapdi2)
        blr
 
 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
+
+_GLOBAL(rmci_on)
+       sync
+       isync
+       li      r3,0x100
+       rldicl  r3,r3,32,0
+       mfspr   r5,SPRN_HID4
+       or      r5,r5,r3
+       sync
+       mtspr   SPRN_HID4,r5
+       isync
+       slbia
+       isync
+       sync
+       blr
+
+_GLOBAL(rmci_off)
+       sync
+       isync
+       li      r3,0x100
+       rldicl  r3,r3,32,0
+       mfspr   r5,SPRN_HID4
+       andc    r5,r5,r3
+       sync
+       mtspr   SPRN_HID4,r5
+       isync
+       slbia
+       isync
+       sync
+       blr
+
 /*
  * Do an IO access in real mode
  */
@@ -416,19 +449,6 @@ _GLOBAL(scom970_write)
        blr
 #endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
 
-
-/*
- * disable_kernel_fp()
- * Disable the FPU.
- */
-_GLOBAL(disable_kernel_fp)
-       mfmsr   r3
-       rldicl  r0,r3,(63-MSR_FP_LG),1
-       rldicl  r3,r0,(MSR_FP_LG+1),0
-       mtmsrd  r3                      /* disable use of fpu now */
-       isync
-       blr
-
 /* kexec_wait(phys_cpu)
  *
  * wait for the flag to change, indicating this kernel is going away but
index f8f24685f10ac210cab7077e14fb74b20f5288f9..3fc16e3beb9f8dca2ea5f260bb59d2dc20bef3f3 100644 (file)
@@ -34,10 +34,10 @@ extern unsigned long __toc_start;
  */
 struct lppaca lppaca[] = {
        [0 ... (NR_LPPACAS-1)] = {
-               .desc = 0xd397d781,     /* "LpPa" */
-               .size = sizeof(struct lppaca),
+               .desc = cpu_to_be32(0xd397d781),        /* "LpPa" */
+               .size = cpu_to_be16(sizeof(struct lppaca)),
                .fpregs_in_use = 1,
-               .slb_count = 64,
+               .slb_count = cpu_to_be16(64),
                .vmxregs_in_use = 0,
                .page_ins = 0,
        },
@@ -101,8 +101,8 @@ static inline void free_lppacas(void) { }
  */
 struct slb_shadow slb_shadow[] __cacheline_aligned = {
        [0 ... (NR_CPUS-1)] = {
-               .persistent = SLB_NUM_BOLTED,
-               .buffer_length = sizeof(struct slb_shadow),
+               .persistent = cpu_to_be32(SLB_NUM_BOLTED),
+               .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)),
        },
 };
 
index 7d22a675fe1a3338d5f1a9d64eb44ce70d8f8f7b..eae0ee00ca25352fbdc555ffc1c10b8312bb8906 100644 (file)
@@ -306,7 +306,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
        unsigned long io_offset = 0;
        int i, res_bit;
 
-       if (hose == 0)
+       if (hose == NULL)
                return NULL;            /* should never happen */
 
        /* If memory, add on the PCI bridge address offset */
@@ -667,7 +667,7 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                                  struct device_node *dev, int primary)
 {
-       const u32 *ranges;
+       const __be32 *ranges;
        int rlen;
        int pna = of_n_addr_cells(dev);
        int np = pna + 5;
@@ -687,7 +687,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
        /* Parse it */
        while ((rlen -= np * 4) >= 0) {
                /* Read next ranges element */
-               pci_space = ranges[0];
+               pci_space = of_read_number(ranges, 1);
                pci_addr = of_read_number(ranges + 1, 2);
                cpu_addr = of_translate_address(dev, ranges + 3);
                size = of_read_number(ranges + pna + 3, 2);
@@ -704,7 +704,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
                /* Now consume following elements while they are contiguous */
                for (; rlen >= np * sizeof(u32);
                     ranges += np, rlen -= np * 4) {
-                       if (ranges[0] != pci_space)
+                       if (of_read_number(ranges, 1) != pci_space)
                                break;
                        pci_next = of_read_number(ranges + 1, 2);
                        cpu_next = of_translate_address(dev, ranges + 3);
@@ -1055,8 +1055,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
         * bases. This is -not- called when generating the PCI tree from
         * the OF device-tree.
         */
-       if (bus->self != NULL)
-               pci_read_bridge_bases(bus);
+       pci_read_bridge_bases(bus);
 
        /* Now fixup the bus bus */
        pcibios_setup_bus_self(bus);
@@ -1578,7 +1577,7 @@ fake_pci_bus(struct pci_controller *hose, int busnr)
 {
        static struct pci_bus bus;
 
-       if (hose == 0) {
+       if (hose == NULL) {
                printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
        }
        bus.number = busnr;
index 2e8629654ca872443e89e30455b05f54c1850eb0..a9e311f7a9dd58026e1fbd108ae1239415aa6153 100644 (file)
@@ -109,7 +109,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
        hose = pci_bus_to_host(bus);
 
        /* Check if we have IOs allocated */
-       if (hose->io_base_alloc == 0)
+       if (hose->io_base_alloc == NULL)
                return 0;
 
        pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name);
@@ -272,7 +272,7 @@ static void quirk_radeon_32bit_msi(struct pci_dev *dev)
        struct pci_dn *pdn = pci_get_pdn(dev);
 
        if (pdn)
-               pdn->force_32bit_msi = 1;
+               pdn->force_32bit_msi = true;
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
index df038442548a1397fb3cf5c9458fc39379a381a2..1f61fab59d9b282aedaa5dad969e760a90acb10e 100644 (file)
@@ -47,9 +47,8 @@ struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
 void *update_dn_pci_info(struct device_node *dn, void *data)
 {
        struct pci_controller *phb = data;
-       const int *type =
-               of_get_property(dn, "ibm,pci-config-space-type", NULL);
-       const u32 *regs;
+       const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
+       const __be32 *regs;
        struct pci_dn *pdn;
 
        pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
@@ -63,12 +62,14 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
 #endif
        regs = of_get_property(dn, "reg", NULL);
        if (regs) {
+               u32 addr = of_read_number(regs, 1);
+
                /* First register entry is addr (00BBSS00)  */
-               pdn->busno = (regs[0] >> 16) & 0xff;
-               pdn->devfn = (regs[0] >> 8) & 0xff;
+               pdn->busno = (addr >> 16) & 0xff;
+               pdn->devfn = (addr >> 8) & 0xff;
        }
 
-       pdn->pci_ext_config_space = (type && *type == 1);
+       pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
        return NULL;
 }
 
@@ -98,12 +99,13 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
 
        /* We started with a phb, iterate all childs */
        for (dn = start->child; dn; dn = nextdn) {
-               const u32 *classp;
-               u32 class;
+               const __be32 *classp;
+               u32 class = 0;
 
                nextdn = NULL;
                classp = of_get_property(dn, "class-code", NULL);
-               class = classp ? *classp : 0;
+               if (classp)
+                       class = of_read_number(classp, 1);
 
                if (pre && ((ret = pre(dn, data)) != NULL))
                        return ret;
index 15d9105323bf5048009428c4bfc83d06edffb38f..4368ec6fdc8c4084c149150a45e6ee2f3150fdcf 100644 (file)
  */
 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
 {
-       const u32 *prop;
+       const __be32 *prop;
        int len;
 
        prop = of_get_property(np, name, &len);
        if (prop && len >= 4)
-               return *prop;
+               return of_read_number(prop, 1);
        return def;
 }
 
@@ -77,7 +77,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
        unsigned int flags;
        struct pci_bus_region region;
        struct resource *res;
-       const u32 *addrs;
+       const __be32 *addrs;
        u32 i;
        int proplen;
 
@@ -86,14 +86,14 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
                return;
        pr_debug("    parse addresses (%d bytes) @ %p\n", proplen, addrs);
        for (; proplen >= 20; proplen -= 20, addrs += 5) {
-               flags = pci_parse_of_flags(addrs[0], 0);
+               flags = pci_parse_of_flags(of_read_number(addrs, 1), 0);
                if (!flags)
                        continue;
                base = of_read_number(&addrs[1], 2);
                size = of_read_number(&addrs[3], 2);
                if (!size)
                        continue;
-               i = addrs[0] & 0xff;
+               i = of_read_number(addrs, 1) & 0xff;
                pr_debug("  base: %llx, size: %llx, i: %x\n",
                         (unsigned long long)base,
                         (unsigned long long)size, i);
@@ -207,7 +207,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
 {
        struct device_node *node = dev->dev.of_node;
        struct pci_bus *bus;
-       const u32 *busrange, *ranges;
+       const __be32 *busrange, *ranges;
        int len, i, mode;
        struct pci_bus_region region;
        struct resource *res;
@@ -230,9 +230,11 @@ void of_scan_pci_bridge(struct pci_dev *dev)
                return;
        }
 
-       bus = pci_find_bus(pci_domain_nr(dev->bus), busrange[0]);
+       bus = pci_find_bus(pci_domain_nr(dev->bus),
+                          of_read_number(busrange, 1));
        if (!bus) {
-               bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
+               bus = pci_add_new_bus(dev->bus, dev,
+                                     of_read_number(busrange, 1));
                if (!bus) {
                        printk(KERN_ERR "Failed to create pci bus for %s\n",
                               node->full_name);
@@ -241,7 +243,8 @@ void of_scan_pci_bridge(struct pci_dev *dev)
        }
 
        bus->primary = dev->bus->number;
-       pci_bus_insert_busn_res(bus, busrange[0], busrange[1]);
+       pci_bus_insert_busn_res(bus, of_read_number(busrange, 1),
+                               of_read_number(busrange+1, 1));
        bus->bridge_ctl = 0;
 
        /* parse ranges property */
@@ -254,7 +257,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
        }
        i = 1;
        for (; len >= 32; len -= 32, ranges += 8) {
-               flags = pci_parse_of_flags(ranges[0], 1);
+               flags = pci_parse_of_flags(of_read_number(ranges, 1), 1);
                size = of_read_number(&ranges[6], 2);
                if (flags == 0 || size == 0)
                        continue;
index c29666586998f27dd0f4d5aa8db721320a82cc2d..21646dbe1bb3c7a48df59ba14dea18431abe12be 100644 (file)
@@ -96,7 +96,9 @@ EXPORT_SYMBOL(pci_dram_offset);
 
 EXPORT_SYMBOL(start_thread);
 
+#ifdef CONFIG_PPC_FPU
 EXPORT_SYMBOL(giveup_fpu);
+#endif
 #ifdef CONFIG_ALTIVEC
 EXPORT_SYMBOL(giveup_altivec);
 #endif /* CONFIG_ALTIVEC */
@@ -111,7 +113,6 @@ EXPORT_SYMBOL(giveup_spe);
 #ifndef CONFIG_PPC64
 EXPORT_SYMBOL(flush_instruction_cache);
 #endif
-EXPORT_SYMBOL(__flush_icache_range);
 EXPORT_SYMBOL(flush_dcache_range);
 
 #ifdef CONFIG_SMP
index 8083be20fe5ec2c6608ab979f988cc0d31eb6862..6f428da53e2085b877334270286069e2ba54da37 100644 (file)
@@ -74,6 +74,7 @@ struct task_struct *last_task_used_vsx = NULL;
 struct task_struct *last_task_used_spe = NULL;
 #endif
 
+#ifdef CONFIG_PPC_FPU
 /*
  * Make sure the floating-point register state in the
  * the thread_struct is up to date for task tsk.
@@ -107,6 +108,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
        }
 }
 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
+#endif
 
 void enable_kernel_fp(void)
 {
index eb23ac92abb9089cea9ebc7fa19fd3a7e1404dbd..655459d6aff200c31a1ac6bf2374f6b27c6d9313 100644 (file)
@@ -215,16 +215,16 @@ static void __init check_cpu_pa_features(unsigned long node)
 #ifdef CONFIG_PPC_STD_MMU_64
 static void __init check_cpu_slb_size(unsigned long node)
 {
-       u32 *slb_size_ptr;
+       __be32 *slb_size_ptr;
 
        slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
        if (slb_size_ptr != NULL) {
-               mmu_slb_size = *slb_size_ptr;
+               mmu_slb_size = be32_to_cpup(slb_size_ptr);
                return;
        }
        slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
        if (slb_size_ptr != NULL) {
-               mmu_slb_size = *slb_size_ptr;
+               mmu_slb_size = be32_to_cpup(slb_size_ptr);
        }
 }
 #else
@@ -279,11 +279,11 @@ static void __init check_cpu_feature_properties(unsigned long node)
 {
        unsigned long i;
        struct feature_property *fp = feature_properties;
-       const u32 *prop;
+       const __be32 *prop;
 
        for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
                prop = of_get_flat_dt_prop(node, fp->name, NULL);
-               if (prop && *prop >= fp->min_value) {
+               if (prop && be32_to_cpup(prop) >= fp->min_value) {
                        cur_cpu_spec->cpu_features |= fp->cpu_feature;
                        cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
                }
@@ -295,8 +295,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
                                          void *data)
 {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       const u32 *prop;
-       const u32 *intserv;
+       const __be32 *prop;
+       const __be32 *intserv;
        int i, nthreads;
        unsigned long len;
        int found = -1;
@@ -324,8 +324,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
                 * version 2 of the kexec param format adds the phys cpuid of
                 * booted proc.
                 */
-               if (initial_boot_params->version >= 2) {
-                       if (intserv[i] == initial_boot_params->boot_cpuid_phys) {
+               if (be32_to_cpu(initial_boot_params->version) >= 2) {
+                       if (be32_to_cpu(intserv[i]) ==
+                           be32_to_cpu(initial_boot_params->boot_cpuid_phys)) {
                                found = boot_cpu_count;
                                found_thread = i;
                        }
@@ -347,9 +348,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
 
        if (found >= 0) {
                DBG("boot cpu: logical %d physical %d\n", found,
-                       intserv[found_thread]);
+                       be32_to_cpu(intserv[found_thread]));
                boot_cpuid = found;
-               set_hard_smp_processor_id(found, intserv[found_thread]);
+               set_hard_smp_processor_id(found,
+                       be32_to_cpu(intserv[found_thread]));
 
                /*
                 * PAPR defines "logical" PVR values for cpus that
@@ -366,8 +368,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
                 * it uses 0x0f000001.
                 */
                prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
-               if (prop && (*prop & 0xff000000) == 0x0f000000)
-                       identify_cpu(0, *prop);
+               if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
+                       identify_cpu(0, be32_to_cpup(prop));
 
                identical_pvr_fixup(node);
        }
@@ -389,7 +391,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
 int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
                                         int depth, void *data)
 {
-       unsigned long *lprop;
+       unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
 
        /* Use common scan routine to determine if this is the chosen node */
        if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
@@ -454,7 +456,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
        if (dm == NULL || l < sizeof(__be32))
                return 0;
 
-       n = *dm++;      /* number of entries */
+       n = of_read_number(dm++, 1);    /* number of entries */
        if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32))
                return 0;
 
@@ -466,7 +468,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
 
        for (; n != 0; --n) {
                base = dt_mem_next_cell(dt_root_addr_cells, &dm);
-               flags = dm[3];
+               flags = of_read_number(&dm[3], 1);
                /* skip DRC index, pad, assoc. list index, flags */
                dm += 4;
                /* skip this block if the reserved bit is set in flags (0x80)
@@ -550,8 +552,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-               unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        initrd_start = (unsigned long)__va(start);
        initrd_end = (unsigned long)__va(end);
@@ -591,16 +592,16 @@ static void __init early_reserve_mem_dt(void)
 static void __init early_reserve_mem(void)
 {
        u64 base, size;
-       u64 *reserve_map;
+       __be64 *reserve_map;
        unsigned long self_base;
        unsigned long self_size;
 
-       reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
-                                       initial_boot_params->off_mem_rsvmap);
+       reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
+                       be32_to_cpu(initial_boot_params->off_mem_rsvmap));
 
        /* before we do anything, lets reserve the dt blob */
        self_base = __pa((unsigned long)initial_boot_params);
-       self_size = initial_boot_params->totalsize;
+       self_size = be32_to_cpu(initial_boot_params->totalsize);
        memblock_reserve(self_base, self_size);
 
        /* Look for the new "reserved-regions" property in the DT */
@@ -620,15 +621,15 @@ static void __init early_reserve_mem(void)
         * Handle the case where we might be booting from an old kexec
         * image that setup the mem_rsvmap as pairs of 32-bit values
         */
-       if (*reserve_map > 0xffffffffull) {
+       if (be64_to_cpup(reserve_map) > 0xffffffffull) {
                u32 base_32, size_32;
-               u32 *reserve_map_32 = (u32 *)reserve_map;
+               __be32 *reserve_map_32 = (__be32 *)reserve_map;
 
                DBG("Found old 32-bit reserve map\n");
 
                while (1) {
-                       base_32 = *(reserve_map_32++);
-                       size_32 = *(reserve_map_32++);
+                       base_32 = be32_to_cpup(reserve_map_32++);
+                       size_32 = be32_to_cpup(reserve_map_32++);
                        if (size_32 == 0)
                                break;
                        /* skip if the reservation is for the blob */
@@ -644,8 +645,8 @@ static void __init early_reserve_mem(void)
 
        /* Handle the reserve map in the fdt blob if it exists */
        while (1) {
-               base = *(reserve_map++);
-               size = *(reserve_map++);
+               base = be64_to_cpup(reserve_map++);
+               size = be64_to_cpup(reserve_map++);
                if (size == 0)
                        break;
                DBG("reserving: %llx -> %llx\n", base, size);
@@ -795,6 +796,32 @@ struct device_node *of_find_next_cache_node(struct device_node *np)
        return NULL;
 }
 
+/**
+ * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
+ * @np: device node of the device
+ *
+ * This looks for a property "ibm,chip-id" in the node or any
+ * of its parents and returns its content, or -1 if it cannot
+ * be found.
+ */
+int of_get_ibm_chip_id(struct device_node *np)
+{
+       of_node_get(np);
+       while(np) {
+               struct device_node *old = np;
+               const __be32 *prop;
+
+               prop = of_get_property(np, "ibm,chip-id", NULL);
+               if (prop) {
+                       of_node_put(np);
+                       return be32_to_cpup(prop);
+               }
+               np = of_get_parent(np);
+               of_node_put(old);
+       }
+       return -1;
+}
+
 #ifdef CONFIG_PPC_PSERIES
 /*
  * Fix up the uninitialized fields in a new device node:
@@ -877,7 +904,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
        hardid = get_hard_smp_processor_id(cpu);
 
        for_each_node_by_type(np, "cpu") {
-               const u32 *intserv;
+               const __be32 *intserv;
                unsigned int plen, t;
 
                /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
@@ -886,10 +913,10 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
                intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
                                &plen);
                if (intserv == NULL) {
-                       const u32 *reg = of_get_property(np, "reg", NULL);
+                       const __be32 *reg = of_get_property(np, "reg", NULL);
                        if (reg == NULL)
                                continue;
-                       if (*reg == hardid) {
+                       if (be32_to_cpup(reg) == hardid) {
                                if (thread)
                                        *thread = 0;
                                return np;
@@ -897,7 +924,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
                } else {
                        plen /= sizeof(u32);
                        for (t = 0; t < plen; t++) {
-                               if (hardid == intserv[t]) {
+                               if (hardid == be32_to_cpu(intserv[t])) {
                                        if (thread)
                                                *thread = t;
                                        return np;
@@ -917,7 +944,7 @@ static int __init export_flat_device_tree(void)
        struct dentry *d;
 
        flat_dt_blob.data = initial_boot_params;
-       flat_dt_blob.size = initial_boot_params->totalsize;
+       flat_dt_blob.size = be32_to_cpu(initial_boot_params->totalsize);
 
        d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
                                powerpc_debugfs_root, &flat_dt_blob);
index 607902424e7377046943a14f7a8aeb56471327e4..7b6391b68fb882b20abdf59703e6addd64ac27fc 100644 (file)
@@ -107,10 +107,10 @@ int of_workarounds;
 typedef u32 prom_arg_t;
 
 struct prom_args {
-        u32 service;
-        u32 nargs;
-        u32 nret;
-        prom_arg_t args[10];
+        __be32 service;
+        __be32 nargs;
+        __be32 nret;
+        __be32 args[10];
 };
 
 struct prom_t {
@@ -123,11 +123,11 @@ struct prom_t {
 };
 
 struct mem_map_entry {
-       u64     base;
-       u64     size;
+       __be64  base;
+       __be64  size;
 };
 
-typedef u32 cell_t;
+typedef __be32 cell_t;
 
 extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
                    unsigned long r6, unsigned long r7, unsigned long r8,
@@ -219,13 +219,13 @@ static int __init call_prom(const char *service, int nargs, int nret, ...)
        struct prom_args args;
        va_list list;
 
-       args.service = ADDR(service);
-       args.nargs = nargs;
-       args.nret = nret;
+       args.service = cpu_to_be32(ADDR(service));
+       args.nargs = cpu_to_be32(nargs);
+       args.nret = cpu_to_be32(nret);
 
        va_start(list, nret);
        for (i = 0; i < nargs; i++)
-               args.args[i] = va_arg(list, prom_arg_t);
+               args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
        va_end(list);
 
        for (i = 0; i < nret; i++)
@@ -234,7 +234,7 @@ static int __init call_prom(const char *service, int nargs, int nret, ...)
        if (enter_prom(&args, prom_entry) < 0)
                return PROM_ERROR;
 
-       return (nret > 0) ? args.args[nargs] : 0;
+       return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
 }
 
 static int __init call_prom_ret(const char *service, int nargs, int nret,
@@ -244,13 +244,13 @@ static int __init call_prom_ret(const char *service, int nargs, int nret,
        struct prom_args args;
        va_list list;
 
-       args.service = ADDR(service);
-       args.nargs = nargs;
-       args.nret = nret;
+       args.service = cpu_to_be32(ADDR(service));
+       args.nargs = cpu_to_be32(nargs);
+       args.nret = cpu_to_be32(nret);
 
        va_start(list, rets);
        for (i = 0; i < nargs; i++)
-               args.args[i] = va_arg(list, prom_arg_t);
+               args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
        va_end(list);
 
        for (i = 0; i < nret; i++)
@@ -261,9 +261,9 @@ static int __init call_prom_ret(const char *service, int nargs, int nret,
 
        if (rets != NULL)
                for (i = 1; i < nret; ++i)
-                       rets[i-1] = args.args[nargs+i];
+                       rets[i-1] = be32_to_cpu(args.args[nargs+i]);
 
-       return (nret > 0) ? args.args[nargs] : 0;
+       return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
 }
 
 
@@ -527,7 +527,7 @@ static int __init prom_setprop(phandle node, const char *nodename,
 #define islower(c)     ('a' <= (c) && (c) <= 'z')
 #define toupper(c)     (islower(c) ? ((c) - 'a' + 'A') : (c))
 
-unsigned long prom_strtoul(const char *cp, const char **endp)
+static unsigned long prom_strtoul(const char *cp, const char **endp)
 {
        unsigned long result = 0, base = 10, value;
 
@@ -552,7 +552,7 @@ unsigned long prom_strtoul(const char *cp, const char **endp)
        return result;
 }
 
-unsigned long prom_memparse(const char *ptr, const char **retptr)
+static unsigned long prom_memparse(const char *ptr, const char **retptr)
 {
        unsigned long ret = prom_strtoul(ptr, retptr);
        int shift = 0;
@@ -724,7 +724,8 @@ unsigned char ibm_architecture_vec[] = {
 
 };
 
-/* Old method - ELF header with PT_NOTE sections */
+/* Old method - ELF header with PT_NOTE sections only works on BE */
+#ifdef __BIG_ENDIAN__
 static struct fake_elf {
        Elf32_Ehdr      elfhdr;
        Elf32_Phdr      phdr[2];
@@ -810,6 +811,7 @@ static struct fake_elf {
                }
        }
 };
+#endif /* __BIG_ENDIAN__ */
 
 static int __init prom_count_smt_threads(void)
 {
@@ -852,9 +854,9 @@ static int __init prom_count_smt_threads(void)
 
 static void __init prom_send_capabilities(void)
 {
-       ihandle elfloader, root;
+       ihandle root;
        prom_arg_t ret;
-       u32 *cores;
+       __be32 *cores;
 
        root = call_prom("open", 1, 1, ADDR("/"));
        if (root != 0) {
@@ -864,15 +866,15 @@ static void __init prom_send_capabilities(void)
                 * (we assume this is the same for all cores) and use it to
                 * divide NR_CPUS.
                 */
-               cores = (u32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
-               if (*cores != NR_CPUS) {
+               cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
+               if (be32_to_cpup(cores) != NR_CPUS) {
                        prom_printf("WARNING ! "
                                    "ibm_architecture_vec structure inconsistent: %lu!\n",
-                                   *cores);
+                                   be32_to_cpup(cores));
                } else {
-                       *cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
+                       *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()));
                        prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
-                                   *cores, NR_CPUS);
+                                   be32_to_cpup(cores), NR_CPUS);
                }
 
                /* try calling the ibm,client-architecture-support method */
@@ -893,17 +895,24 @@ static void __init prom_send_capabilities(void)
                prom_printf(" not implemented\n");
        }
 
-       /* no ibm,client-architecture-support call, try the old way */
-       elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
-       if (elfloader == 0) {
-               prom_printf("couldn't open /packages/elf-loader\n");
-               return;
+#ifdef __BIG_ENDIAN__
+       {
+               ihandle elfloader;
+
+               /* no ibm,client-architecture-support call, try the old way */
+               elfloader = call_prom("open", 1, 1,
+                                     ADDR("/packages/elf-loader"));
+               if (elfloader == 0) {
+                       prom_printf("couldn't open /packages/elf-loader\n");
+                       return;
+               }
+               call_prom("call-method", 3, 1, ADDR("process-elf-header"),
+                         elfloader, ADDR(&fake_elf));
+               call_prom("close", 1, 0, elfloader);
        }
-       call_prom("call-method", 3, 1, ADDR("process-elf-header"),
-                       elfloader, ADDR(&fake_elf));
-       call_prom("close", 1, 0, elfloader);
+#endif /* __BIG_ENDIAN__ */
 }
-#endif
+#endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
 
 /*
  * Memory allocation strategy... our layout is normally:
@@ -1050,11 +1059,11 @@ static unsigned long __init prom_next_cell(int s, cell_t **cellp)
                p++;
                s--;
        }
-       r = *p++;
+       r = be32_to_cpu(*p++);
 #ifdef CONFIG_PPC64
        if (s > 1) {
                r <<= 32;
-               r |= *(p++);
+               r |= be32_to_cpu(*(p++));
        }
 #endif
        *cellp = p;
@@ -1087,8 +1096,8 @@ static void __init reserve_mem(u64 base, u64 size)
 
        if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
                prom_panic("Memory reserve map exhausted !\n");
-       mem_reserve_map[cnt].base = base;
-       mem_reserve_map[cnt].size = size;
+       mem_reserve_map[cnt].base = cpu_to_be64(base);
+       mem_reserve_map[cnt].size = cpu_to_be64(size);
        mem_reserve_cnt = cnt + 1;
 }
 
@@ -1102,6 +1111,7 @@ static void __init prom_init_mem(void)
        char *path, type[64];
        unsigned int plen;
        cell_t *p, *endp;
+       __be32 val;
        u32 rac, rsc;
 
        /*
@@ -1109,12 +1119,14 @@ static void __init prom_init_mem(void)
         * 1) top of RMO (first node)
         * 2) top of memory
         */
-       rac = 2;
-       prom_getprop(prom.root, "#address-cells", &rac, sizeof(rac));
-       rsc = 1;
-       prom_getprop(prom.root, "#size-cells", &rsc, sizeof(rsc));
-       prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
-       prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
+       val = cpu_to_be32(2);
+       prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
+       rac = be32_to_cpu(val);
+       val = cpu_to_be32(1);
+       prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
+       rsc = be32_to_cpu(val);
+       prom_debug("root_addr_cells: %x\n", rac);
+       prom_debug("root_size_cells: %x\n", rsc);
 
        prom_debug("scanning memory:\n");
        path = prom_scratch;
@@ -1222,25 +1234,23 @@ static void __init prom_init_mem(void)
 
 static void __init prom_close_stdin(void)
 {
-       ihandle val;
+       __be32 val;
+       ihandle stdin;
 
-       if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0)
-               call_prom("close", 1, 0, val);
+       if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
+               stdin = be32_to_cpu(val);
+               call_prom("close", 1, 0, stdin);
+       }
 }
 
 #ifdef CONFIG_PPC_POWERNV
 
-static u64 __initdata prom_opal_size;
-static u64 __initdata prom_opal_align;
-static int __initdata prom_rtas_start_cpu;
-static u64 __initdata prom_rtas_data;
-static u64 __initdata prom_rtas_entry;
-
 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
 static u64 __initdata prom_opal_base;
 static u64 __initdata prom_opal_entry;
 #endif
 
+#ifdef __BIG_ENDIAN__
 /* XXX Don't change this structure without updating opal-takeover.S */
 static struct opal_secondary_data {
        s64                             ack;    /*  0 */
@@ -1248,6 +1258,12 @@ static struct opal_secondary_data {
        struct opal_takeover_args       args;   /* 16 */
 } opal_secondary_data;
 
+static u64 __initdata prom_opal_align;
+static u64 __initdata prom_opal_size;
+static int __initdata prom_rtas_start_cpu;
+static u64 __initdata prom_rtas_data;
+static u64 __initdata prom_rtas_entry;
+
 extern char opal_secondary_entry;
 
 static void __init prom_query_opal(void)
@@ -1265,6 +1281,7 @@ static void __init prom_query_opal(void)
        }
 
        prom_printf("Querying for OPAL presence... ");
+
        rc = opal_query_takeover(&prom_opal_size,
                                 &prom_opal_align);
        prom_debug("(rc = %ld) ", rc);
@@ -1425,6 +1442,7 @@ static void __init prom_opal_takeover(void)
        for (;;)
                opal_do_takeover(args);
 }
+#endif /* __BIG_ENDIAN__ */
 
 /*
  * Allocate room for and instantiate OPAL
@@ -1435,6 +1453,7 @@ static void __init prom_instantiate_opal(void)
        ihandle opal_inst;
        u64 base, entry;
        u64 size = 0, align = 0x10000;
+       __be64 val64;
        u32 rets[2];
 
        prom_debug("prom_instantiate_opal: start...\n");
@@ -1444,11 +1463,14 @@ static void __init prom_instantiate_opal(void)
        if (!PHANDLE_VALID(opal_node))
                return;
 
-       prom_getprop(opal_node, "opal-runtime-size", &size, sizeof(size));
+       val64 = 0;
+       prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
+       size = be64_to_cpu(val64);
        if (size == 0)
                return;
-       prom_getprop(opal_node, "opal-runtime-alignment", &align,
-                    sizeof(align));
+       val64 = 0;
+       prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
+       align = be64_to_cpu(val64);
 
        base = alloc_down(size, align, 0);
        if (base == 0) {
@@ -1505,6 +1527,7 @@ static void __init prom_instantiate_rtas(void)
        phandle rtas_node;
        ihandle rtas_inst;
        u32 base, entry = 0;
+       __be32 val;
        u32 size = 0;
 
        prom_debug("prom_instantiate_rtas: start...\n");
@@ -1514,7 +1537,9 @@ static void __init prom_instantiate_rtas(void)
        if (!PHANDLE_VALID(rtas_node))
                return;
 
-       prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
+       val = 0;
+       prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
+       size = be32_to_cpu(val);
        if (size == 0)
                return;
 
@@ -1541,12 +1566,14 @@ static void __init prom_instantiate_rtas(void)
 
        reserve_mem(base, size);
 
+       val = cpu_to_be32(base);
        prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
-                    &base, sizeof(base));
+                    &val, sizeof(val));
+       val = cpu_to_be32(entry);
        prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
-                    &entry, sizeof(entry));
+                    &val, sizeof(val));
 
-#ifdef CONFIG_PPC_POWERNV
+#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
        /* PowerVN takeover hack */
        prom_rtas_data = base;
        prom_rtas_entry = entry;
@@ -1620,6 +1647,7 @@ static void __init prom_instantiate_sml(void)
 /*
  * Allocate room for and initialize TCE tables
  */
+#ifdef __BIG_ENDIAN__
 static void __init prom_initialize_tce_table(void)
 {
        phandle node;
@@ -1748,7 +1776,8 @@ static void __init prom_initialize_tce_table(void)
        /* Flag the first invalid entry */
        prom_debug("ending prom_initialize_tce_table\n");
 }
-#endif
+#endif /* __BIG_ENDIAN__ */
+#endif /* CONFIG_PPC64 */
 
 /*
  * With CHRP SMP we need to use the OF to start the other processors.
@@ -1777,7 +1806,6 @@ static void __init prom_initialize_tce_table(void)
 static void __init prom_hold_cpus(void)
 {
        unsigned long i;
-       unsigned int reg;
        phandle node;
        char type[64];
        unsigned long *spinloop
@@ -1803,6 +1831,9 @@ static void __init prom_hold_cpus(void)
 
        /* look for cpus */
        for (node = 0; prom_next_node(&node); ) {
+               unsigned int cpu_no;
+               __be32 reg;
+
                type[0] = 0;
                prom_getprop(node, "device_type", type, sizeof(type));
                if (strcmp(type, "cpu") != 0)
@@ -1813,10 +1844,11 @@ static void __init prom_hold_cpus(void)
                        if (strcmp(type, "okay") != 0)
                                continue;
 
-               reg = -1;
+               reg = cpu_to_be32(-1); /* make sparse happy */
                prom_getprop(node, "reg", &reg, sizeof(reg));
+               cpu_no = be32_to_cpu(reg);
 
-               prom_debug("cpu hw idx   = %lu\n", reg);
+               prom_debug("cpu hw idx   = %lu\n", cpu_no);
 
                /* Init the acknowledge var which will be reset by
                 * the secondary cpu when it awakens from its OF
@@ -1824,24 +1856,24 @@ static void __init prom_hold_cpus(void)
                 */
                *acknowledge = (unsigned long)-1;
 
-               if (reg != prom.cpu) {
+               if (cpu_no != prom.cpu) {
                        /* Primary Thread of non-boot cpu or any thread */
-                       prom_printf("starting cpu hw idx %lu... ", reg);
+                       prom_printf("starting cpu hw idx %lu... ", cpu_no);
                        call_prom("start-cpu", 3, 0, node,
-                                 secondary_hold, reg);
+                                 secondary_hold, cpu_no);
 
                        for (i = 0; (i < 100000000) && 
                             (*acknowledge == ((unsigned long)-1)); i++ )
                                mb();
 
-                       if (*acknowledge == reg)
+                       if (*acknowledge == cpu_no)
                                prom_printf("done\n");
                        else
                                prom_printf("failed: %x\n", *acknowledge);
                }
 #ifdef CONFIG_SMP
                else
-                       prom_printf("boot cpu hw idx %lu\n", reg);
+                       prom_printf("boot cpu hw idx %lu\n", cpu_no);
 #endif /* CONFIG_SMP */
        }
 
@@ -1895,6 +1927,7 @@ static void __init prom_find_mmu(void)
        prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
        prom_getprop(prom.chosen, "mmu", &prom.mmumap,
                     sizeof(prom.mmumap));
+       prom.mmumap = be32_to_cpu(prom.mmumap);
        if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
                of_workarounds &= ~OF_WA_CLAIM;         /* hmmm */
 }
@@ -1906,17 +1939,19 @@ static void __init prom_init_stdout(void)
 {
        char *path = of_stdout_device;
        char type[16];
-       u32 val;
+       phandle stdout_node;
+       __be32 val;
 
        if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
                prom_panic("cannot find stdout");
 
-       prom.stdout = val;
+       prom.stdout = be32_to_cpu(val);
 
        /* Get the full OF pathname of the stdout device */
        memset(path, 0, 256);
        call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
-       val = call_prom("instance-to-package", 1, 1, prom.stdout);
+       stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
+       val = cpu_to_be32(stdout_node);
        prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
                     &val, sizeof(val));
        prom_printf("OF stdout device is: %s\n", of_stdout_device);
@@ -1925,9 +1960,9 @@ static void __init prom_init_stdout(void)
 
        /* If it's a display, note it */
        memset(type, 0, sizeof(type));
-       prom_getprop(val, "device_type", type, sizeof(type));
+       prom_getprop(stdout_node, "device_type", type, sizeof(type));
        if (strcmp(type, "display") == 0)
-               prom_setprop(val, path, "linux,boot-display", NULL, 0);
+               prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
 }
 
 static int __init prom_find_machine_type(void)
@@ -2082,6 +2117,22 @@ static void __init prom_check_displays(void)
                                           clut[2]) != 0)
                                break;
 #endif /* CONFIG_LOGO_LINUX_CLUT224 */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
+               if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
+                   PROM_ERROR) {
+                       u32 width, height, pitch, addr;
+
+                       prom_printf("Setting btext !\n");
+                       prom_getprop(node, "width", &width, 4);
+                       prom_getprop(node, "height", &height, 4);
+                       prom_getprop(node, "linebytes", &pitch, 4);
+                       prom_getprop(node, "address", &addr, 4);
+                       prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
+                                   width, height, pitch, addr);
+                       btext_setup_display(width, height, 8, pitch, addr);
+               }
+#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
        }
 }
 
@@ -2117,8 +2168,10 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
        return ret;
 }
 
-#define dt_push_token(token, mem_start, mem_end) \
-       do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
+#define dt_push_token(token, mem_start, mem_end) do {                  \
+               void *room = make_room(mem_start, mem_end, 4, 4);       \
+               *(__be32 *)room = cpu_to_be32(token);                   \
+       } while(0)
 
 static unsigned long __init dt_find_string(char *str)
 {
@@ -2291,7 +2344,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
                        dt_push_token(4, mem_start, mem_end);
                        dt_push_token(soff, mem_start, mem_end);
                        valp = make_room(mem_start, mem_end, 4, 4);
-                       *(u32 *)valp = node;
+                       *(__be32 *)valp = cpu_to_be32(node);
                }
        }
 
@@ -2364,16 +2417,16 @@ static void __init flatten_device_tree(void)
        dt_struct_end = PAGE_ALIGN(mem_start);
 
        /* Finish header */
-       hdr->boot_cpuid_phys = prom.cpu;
-       hdr->magic = OF_DT_HEADER;
-       hdr->totalsize = dt_struct_end - dt_header_start;
-       hdr->off_dt_struct = dt_struct_start - dt_header_start;
-       hdr->off_dt_strings = dt_string_start - dt_header_start;
-       hdr->dt_strings_size = dt_string_end - dt_string_start;
-       hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - dt_header_start;
-       hdr->version = OF_DT_VERSION;
+       hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
+       hdr->magic = cpu_to_be32(OF_DT_HEADER);
+       hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
+       hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
+       hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
+       hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
+       hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
+       hdr->version = cpu_to_be32(OF_DT_VERSION);
        /* Version 16 is not backward compatible */
-       hdr->last_comp_version = 0x10;
+       hdr->last_comp_version = cpu_to_be32(0x10);
 
        /* Copy the reserve map in */
        memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
@@ -2384,8 +2437,8 @@ static void __init flatten_device_tree(void)
                prom_printf("reserved memory map:\n");
                for (i = 0; i < mem_reserve_cnt; i++)
                        prom_printf("  %x - %x\n",
-                                   mem_reserve_map[i].base,
-                                   mem_reserve_map[i].size);
+                                   be64_to_cpu(mem_reserve_map[i].base),
+                                   be64_to_cpu(mem_reserve_map[i].size));
        }
 #endif
        /* Bump mem_reserve_cnt to cause further reservations to fail
@@ -2397,7 +2450,6 @@ static void __init flatten_device_tree(void)
                    dt_string_start, dt_string_end);
        prom_printf("Device tree struct  0x%x -> 0x%x\n",
                    dt_struct_start, dt_struct_end);
-
 }
 
 #ifdef CONFIG_PPC_MAPLE
@@ -2730,18 +2782,19 @@ static void __init fixup_device_tree(void)
 
 static void __init prom_find_boot_cpu(void)
 {
-       u32 getprop_rval;
+       __be32 rval;
        ihandle prom_cpu;
        phandle cpu_pkg;
 
-       prom.cpu = 0;
-       if (prom_getprop(prom.chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
+       rval = 0;
+       if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
                return;
+       prom_cpu = be32_to_cpu(rval);
 
        cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
-       prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
-       prom.cpu = getprop_rval;
+       prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
+       prom.cpu = be32_to_cpu(rval);
 
        prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
 }
@@ -2750,15 +2803,15 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
        if (r3 && r4 && r4 != 0xdeadbeef) {
-               unsigned long val;
+               __be64 val;
 
                prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
                prom_initrd_end = prom_initrd_start + r4;
 
-               val = prom_initrd_start;
+               val = cpu_to_be64(prom_initrd_start);
                prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
                             &val, sizeof(val));
-               val = prom_initrd_end;
+               val = cpu_to_be64(prom_initrd_end);
                prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
                             &val, sizeof(val));
 
@@ -2915,7 +2968,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
         */
        prom_check_displays();
 
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
        /*
         * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
         * that uses the allocator, we need to make sure we get the top of memory
@@ -2934,6 +2987,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
                prom_instantiate_rtas();
 
 #ifdef CONFIG_PPC_POWERNV
+#ifdef __BIG_ENDIAN__
        /* Detect HAL and try instanciating it & doing takeover */
        if (of_platform == PLATFORM_PSERIES_LPAR) {
                prom_query_opal();
@@ -2941,9 +2995,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
                        prom_opal_hold_cpus();
                        prom_opal_takeover();
                }
-       } else if (of_platform == PLATFORM_OPAL)
+       } else
+#endif /* __BIG_ENDIAN__ */
+       if (of_platform == PLATFORM_OPAL)
                prom_instantiate_opal();
-#endif
+#endif /* CONFIG_PPC_POWERNV */
 
 #ifdef CONFIG_PPC64
        /* instantiate sml */
@@ -2962,10 +3018,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
        /*
         * Fill in some infos for use by the kernel later on
         */
-       if (prom_memory_limit)
+       if (prom_memory_limit) {
+               __be64 val = cpu_to_be64(prom_memory_limit);
                prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
-                            &prom_memory_limit,
-                            sizeof(prom_memory_limit));
+                            &val, sizeof(val));
+       }
 #ifdef CONFIG_PPC64
        if (prom_iommu_off)
                prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
index 3765da6be4f250d2a7aec7887460c2a0e89e6255..b0c263da219a2b4a0bcfc7ed09b69c26cb12432c 100644 (file)
@@ -22,7 +22,8 @@ __secondary_hold_acknowledge __secondary_hold_spinloop __start
 strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
 opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
-boot_command_line __prom_init_toc_start __prom_init_toc_end"
+boot_command_line __prom_init_toc_start __prom_init_toc_end
+btext_setup_display"
 
 NM="$1"
 OBJ="$2"
index 4e1331b8eb33e5c613394e312b2799d365fa07f8..6295e646f78cca6a204d4becdb2ec7d8ccdfde40 100644 (file)
@@ -7,28 +7,27 @@
 #include <linux/of_address.h>
 #include <asm/prom.h>
 
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
-               unsigned long *busno, unsigned long *phys, unsigned long *size)
+void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
+                        unsigned long *busno, unsigned long *phys,
+                        unsigned long *size)
 {
-       const u32 *dma_window;
        u32 cells;
-       const unsigned char *prop;
-
-       dma_window = dma_window_prop;
+       const __be32 *prop;
 
        /* busno is always one cell */
-       *busno = *(dma_window++);
+       *busno = of_read_number(dma_window, 1);
+       dma_window++;
 
        prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
        if (!prop)
                prop = of_get_property(dn, "#address-cells", NULL);
 
-       cells = prop ? *(u32 *)prop : of_n_addr_cells(dn);
+       cells = prop ? of_read_number(prop, 1) : of_n_addr_cells(dn);
        *phys = of_read_number(dma_window, cells);
 
        dma_window += cells;
 
        prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
-       cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
+       cells = prop ? of_read_number(prop, 1) : of_n_size_cells(dn);
        *size = of_read_number(dma_window, cells);
 }
index 80b5ef403f685d177c0c974eaaf443b615cd8c75..4cf674d7d5ae184b14c0c4e08985dfe1ad4779a1 100644 (file)
@@ -91,7 +91,7 @@ static void unlock_rtas(unsigned long flags)
  * are designed only for very early low-level debugging, which
  * is why the token is hard-coded to 10.
  */
-static void call_rtas_display_status(char c)
+static void call_rtas_display_status(unsigned char c)
 {
        struct rtas_args *args = &rtas.args;
        unsigned long s;
@@ -100,11 +100,11 @@ static void call_rtas_display_status(char c)
                return;
        s = lock_rtas();
 
-       args->token = 10;
-       args->nargs = 1;
-       args->nret  = 1;
-       args->rets  = (rtas_arg_t *)&(args->args[1]);
-       args->args[0] = (unsigned char)c;
+       args->token = cpu_to_be32(10);
+       args->nargs = cpu_to_be32(1);
+       args->nret  = cpu_to_be32(1);
+       args->rets  = &(args->args[1]);
+       args->args[0] = cpu_to_be32(c);
 
        enter_rtas(__pa(args));
 
@@ -204,7 +204,7 @@ void rtas_progress(char *s, unsigned short hex)
 {
        struct device_node *root;
        int width;
-       const int *p;
+       const __be32 *p;
        char *os;
        static int display_character, set_indicator;
        static int display_width, display_lines, form_feed;
@@ -221,13 +221,13 @@ void rtas_progress(char *s, unsigned short hex)
                if ((root = of_find_node_by_path("/rtas"))) {
                        if ((p = of_get_property(root,
                                        "ibm,display-line-length", NULL)))
-                               display_width = *p;
+                               display_width = be32_to_cpu(*p);
                        if ((p = of_get_property(root,
                                        "ibm,form-feed", NULL)))
-                               form_feed = *p;
+                               form_feed = be32_to_cpu(*p);
                        if ((p = of_get_property(root,
                                        "ibm,display-number-of-lines", NULL)))
-                               display_lines = *p;
+                               display_lines = be32_to_cpu(*p);
                        row_width = of_get_property(root,
                                        "ibm,display-truncation-length", NULL);
                        of_node_put(root);
@@ -322,11 +322,11 @@ EXPORT_SYMBOL(rtas_progress);             /* needed by rtas_flash module */
 
 int rtas_token(const char *service)
 {
-       const int *tokp;
+       const __be32 *tokp;
        if (rtas.dev == NULL)
                return RTAS_UNKNOWN_SERVICE;
        tokp = of_get_property(rtas.dev, service, NULL);
-       return tokp ? *tokp : RTAS_UNKNOWN_SERVICE;
+       return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
 }
 EXPORT_SYMBOL(rtas_token);
 
@@ -380,11 +380,11 @@ static char *__fetch_rtas_last_error(char *altbuf)
 
        bufsz = rtas_get_error_log_max();
 
-       err_args.token = rtas_last_error_token;
-       err_args.nargs = 2;
-       err_args.nret = 1;
-       err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf);
-       err_args.args[1] = bufsz;
+       err_args.token = cpu_to_be32(rtas_last_error_token);
+       err_args.nargs = cpu_to_be32(2);
+       err_args.nret = cpu_to_be32(1);
+       err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
+       err_args.args[1] = cpu_to_be32(bufsz);
        err_args.args[2] = 0;
 
        save_args = rtas.args;
@@ -433,13 +433,13 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
        s = lock_rtas();
        rtas_args = &rtas.args;
 
-       rtas_args->token = token;
-       rtas_args->nargs = nargs;
-       rtas_args->nret  = nret;
-       rtas_args->rets  = (rtas_arg_t *)&(rtas_args->args[nargs]);
+       rtas_args->token = cpu_to_be32(token);
+       rtas_args->nargs = cpu_to_be32(nargs);
+       rtas_args->nret  = cpu_to_be32(nret);
+       rtas_args->rets  = &(rtas_args->args[nargs]);
        va_start(list, outputs);
        for (i = 0; i < nargs; ++i)
-               rtas_args->args[i] = va_arg(list, rtas_arg_t);
+               rtas_args->args[i] = cpu_to_be32(va_arg(list, __u32));
        va_end(list);
 
        for (i = 0; i < nret; ++i)
@@ -449,13 +449,13 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
 
        /* A -1 return code indicates that the last command couldn't
           be completed due to a hardware error. */
-       if (rtas_args->rets[0] == -1)
+       if (be32_to_cpu(rtas_args->rets[0]) == -1)
                buff_copy = __fetch_rtas_last_error(NULL);
 
        if (nret > 1 && outputs != NULL)
                for (i = 0; i < nret-1; ++i)
-                       outputs[i] = rtas_args->rets[i+1];
-       ret = (nret > 0)? rtas_args->rets[0]: 0;
+                       outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
+       ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
 
        unlock_rtas(s);
 
@@ -588,8 +588,8 @@ bool rtas_indicator_present(int token, int *maxindex)
 {
        int proplen, count, i;
        const struct indicator_elem {
-               u32 token;
-               u32 maxindex;
+               __be32 token;
+               __be32 maxindex;
        } *indicators;
 
        indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
@@ -599,10 +599,10 @@ bool rtas_indicator_present(int token, int *maxindex)
        count = proplen / sizeof(struct indicator_elem);
 
        for (i = 0; i < count; i++) {
-               if (indicators[i].token != token)
+               if (__be32_to_cpu(indicators[i].token) != token)
                        continue;
                if (maxindex)
-                       *maxindex = indicators[i].maxindex;
+                       *maxindex = __be32_to_cpu(indicators[i].maxindex);
                return true;
        }
 
@@ -1097,19 +1097,19 @@ void __init rtas_initialize(void)
         */
        rtas.dev = of_find_node_by_name(NULL, "rtas");
        if (rtas.dev) {
-               const u32 *basep, *entryp, *sizep;
+               const __be32 *basep, *entryp, *sizep;
 
                basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
                sizep = of_get_property(rtas.dev, "rtas-size", NULL);
                if (basep != NULL && sizep != NULL) {
-                       rtas.base = *basep;
-                       rtas.size = *sizep;
+                       rtas.base = __be32_to_cpu(*basep);
+                       rtas.size = __be32_to_cpu(*sizep);
                        entryp = of_get_property(rtas.dev,
                                        "linux,rtas-entry", NULL);
                        if (entryp == NULL) /* Ugh */
                                rtas.entry = rtas.base;
                        else
-                               rtas.entry = *entryp;
+                               rtas.entry = __be32_to_cpu(*entryp);
                } else
                        rtas.dev = NULL;
        }
index 63d051f5b7a514b0ec7888b5eabd2dde1a665cbe..3d261c071fc88b9ed7a1ac7de43c7587d5a9b60e 100644 (file)
@@ -436,7 +436,8 @@ void __init smp_setup_cpu_maps(void)
        DBG("smp_setup_cpu_maps()\n");
 
        while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) {
-               const int *intserv;
+               const __be32 *intserv;
+               __be32 cpu_be;
                int j, len;
 
                DBG("  * %s...\n", dn->full_name);
@@ -450,15 +451,17 @@ void __init smp_setup_cpu_maps(void)
                } else {
                        DBG("    no ibm,ppc-interrupt-server#s -> 1 thread\n");
                        intserv = of_get_property(dn, "reg", NULL);
-                       if (!intserv)
-                               intserv = &cpu; /* assume logical == phys */
+                       if (!intserv) {
+                               cpu_be = cpu_to_be32(cpu);
+                               intserv = &cpu_be;      /* assume logical == phys */
+                       }
                }
 
                for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
                        DBG("    thread %d -> cpu %d (hard id %d)\n",
-                           j, cpu, intserv[j]);
+                           j, cpu, be32_to_cpu(intserv[j]));
                        set_cpu_present(cpu, true);
-                       set_hard_smp_processor_id(cpu, intserv[j]);
+                       set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
                        set_cpu_possible(cpu, true);
                        cpu++;
                }
index a8f54ecb091f866a7c93cb76d36d1a3702bf2184..a4bbcae72578617688f510d05aaed2883aef7c10 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/serial.h>
 #include <asm/udbg.h>
 #include <asm/mmu_context.h>
+#include <asm/epapr_hcalls.h>
 
 #include "setup.h"
 
@@ -128,6 +129,8 @@ notrace void __init machine_init(u64 dt_ptr)
        /* Do some early initialization based on the flat device tree */
        early_init_devtree(__va(dt_ptr));
 
+       epapr_paravirt_early_init();
+
        early_init_mmu();
 
        probe_machine();
@@ -326,5 +329,4 @@ void __init setup_arch(char **cmdline_p)
 
        /* Initialize the MMU context management stuff */
        mmu_context_init();
-
 }
index 389fb8077cc9cea25746b12497673dd573d35c56..45f2d1fac670ef64f8b87027c853ba4b7736686d 100644 (file)
@@ -10,7 +10,7 @@
  *      2 of the License, or (at your option) any later version.
  */
 
-#undef DEBUG
+#define DEBUG
 
 #include <linux/export.h>
 #include <linux/string.h>
@@ -66,6 +66,7 @@
 #include <asm/code-patching.h>
 #include <asm/kvm_ppc.h>
 #include <asm/hugetlb.h>
+#include <asm/epapr_hcalls.h>
 
 #include "setup.h"
 
@@ -215,6 +216,8 @@ void __init early_setup(unsigned long dt_ptr)
         */
        early_init_devtree(__va(dt_ptr));
 
+       epapr_paravirt_early_init();
+
        /* Now we know the logical id of our boot cpu, setup the paca. */
        setup_paca(&paca[boot_cpuid]);
        fixup_boot_paca();
@@ -237,6 +240,18 @@ void __init early_setup(unsigned long dt_ptr)
        reserve_hugetlb_gpages();
 
        DBG(" <- early_setup()\n");
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
+       /*
+        * This needs to be done *last* (after the above DBG() even)
+        *
+        * Right after we return from this function, we turn on the MMU
+        * which means the real-mode access trick that btext does will
+        * no longer work, it needs to switch to using a real MMU
+        * mapping. This call will ensure that it does
+        */
+       btext_map();
+#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
 }
 
 #ifdef CONFIG_SMP
@@ -305,14 +320,14 @@ static void __init initialize_cache_info(void)
                 * d-cache and i-cache sizes... -Peter
                 */
                if (num_cpus == 1) {
-                       const u32 *sizep, *lsizep;
+                       const __be32 *sizep, *lsizep;
                        u32 size, lsize;
 
                        size = 0;
                        lsize = cur_cpu_spec->dcache_bsize;
                        sizep = of_get_property(np, "d-cache-size", NULL);
                        if (sizep != NULL)
-                               size = *sizep;
+                               size = be32_to_cpu(*sizep);
                        lsizep = of_get_property(np, "d-cache-block-size",
                                                 NULL);
                        /* fallback if block size missing */
@@ -321,8 +336,8 @@ static void __init initialize_cache_info(void)
                                                         "d-cache-line-size",
                                                         NULL);
                        if (lsizep != NULL)
-                               lsize = *lsizep;
-                       if (sizep == 0 || lsizep == 0)
+                               lsize = be32_to_cpu(*lsizep);
+                       if (sizep == NULL || lsizep == NULL)
                                DBG("Argh, can't find dcache properties ! "
                                    "sizep: %p, lsizep: %p\n", sizep, lsizep);
 
@@ -335,7 +350,7 @@ static void __init initialize_cache_info(void)
                        lsize = cur_cpu_spec->icache_bsize;
                        sizep = of_get_property(np, "i-cache-size", NULL);
                        if (sizep != NULL)
-                               size = *sizep;
+                               size = be32_to_cpu(*sizep);
                        lsizep = of_get_property(np, "i-cache-block-size",
                                                 NULL);
                        if (lsizep == NULL)
@@ -343,8 +358,8 @@ static void __init initialize_cache_info(void)
                                                         "i-cache-line-size",
                                                         NULL);
                        if (lsizep != NULL)
-                               lsize = *lsizep;
-                       if (sizep == 0 || lsizep == 0)
+                               lsize = be32_to_cpu(*lsizep);
+                       if (sizep == NULL || lsizep == NULL)
                                DBG("Argh, can't find icache properties ! "
                                    "sizep: %p, lsizep: %p\n", sizep, lsizep);
 
@@ -701,8 +716,7 @@ void __init setup_per_cpu_areas(void)
 #endif
 
 
-#ifdef CONFIG_PPC_INDIRECT_IO
+#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
 struct ppc_pci_io ppc_pci_io;
 EXPORT_SYMBOL(ppc_pci_io);
-#endif /* CONFIG_PPC_INDIRECT_IO */
-
+#endif
index 0f83122e6676cf8dd8692d3e64f72a2c961f34bb..bebdf1a1a5403df741ea389102f1b69b80daf60e 100644 (file)
@@ -436,7 +436,10 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
         * use altivec. Since VSCR only contains 32 bits saved in the least
         * significant bits of a vector, we "cheat" and stuff VRSAVE in the
         * most significant bits of that same vector. --BenH
+        * Note that the current VRSAVE value is in the SPR at this point.
         */
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               current->thread.vrsave = mfspr(SPRN_VRSAVE);
        if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
                return 1;
 #endif /* CONFIG_ALTIVEC */
@@ -557,6 +560,8 @@ static int save_tm_user_regs(struct pt_regs *regs,
         * significant bits of a vector, we "cheat" and stuff VRSAVE in the
         * most significant bits of that same vector. --BenH
         */
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               current->thread.vrsave = mfspr(SPRN_VRSAVE);
        if (__put_user(current->thread.vrsave,
                       (u32 __user *)&frame->mc_vregs[32]))
                return 1;
@@ -696,6 +701,8 @@ static long restore_user_regs(struct pt_regs *regs,
        /* Always get VRSAVE back */
        if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
                return 1;
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               mtspr(SPRN_VRSAVE, current->thread.vrsave);
 #endif /* CONFIG_ALTIVEC */
        if (copy_fpr_from_user(current, &sr->mc_fregs))
                return 1;
@@ -809,6 +816,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
            __get_user(current->thread.transact_vrsave,
                       (u32 __user *)&tm_sr->mc_vregs[32]))
                return 1;
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               mtspr(SPRN_VRSAVE, current->thread.vrsave);
 #endif /* CONFIG_ALTIVEC */
 
        regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
index 887e99d85bc270eefeb949d30dcf4d45fd3d3647..f93ec2835a13f01294a9b3d5c225686a04666702 100644 (file)
@@ -96,8 +96,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
        unsigned long msr = regs->msr;
        long err = 0;
 
-       flush_fp_to_thread(current);
-
 #ifdef CONFIG_ALTIVEC
        err |= __put_user(v_regs, &sc->v_regs);
 
@@ -114,6 +112,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
        /* We always copy to/from vrsave, it's 0 if we don't have or don't
         * use altivec.
         */
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               current->thread.vrsave = mfspr(SPRN_VRSAVE);
        err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
 #else /* CONFIG_ALTIVEC */
        err |= __put_user(0, &sc->v_regs);
@@ -217,6 +217,8 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
        /* We always copy to/from vrsave, it's 0 if we don't have or don't
         * use altivec.
         */
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               current->thread.vrsave = mfspr(SPRN_VRSAVE);
        err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
        if (msr & MSR_VEC)
                err |= __put_user(current->thread.transact_vrsave,
@@ -346,16 +348,18 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
        if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
                return -EFAULT;
        /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
-       if (v_regs != 0 && (msr & MSR_VEC) != 0)
+       if (v_regs != NULL && (msr & MSR_VEC) != 0)
                err |= __copy_from_user(current->thread.vr, v_regs,
                                        33 * sizeof(vector128));
        else if (current->thread.used_vr)
                memset(current->thread.vr, 0, 33 * sizeof(vector128));
        /* Always get VRSAVE back */
-       if (v_regs != 0)
+       if (v_regs != NULL)
                err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
        else
                current->thread.vrsave = 0;
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               mtspr(SPRN_VRSAVE, current->thread.vrsave);
 #endif /* CONFIG_ALTIVEC */
        /* restore floating point */
        err |= copy_fpr_from_user(current, &sc->fp_regs);
@@ -463,7 +467,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                                    tm_v_regs, 34 * sizeof(vector128)))
                return -EFAULT;
        /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
-       if (v_regs != 0 && tm_v_regs != 0 && (msr & MSR_VEC) != 0) {
+       if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
                err |= __copy_from_user(current->thread.vr, v_regs,
                                        33 * sizeof(vector128));
                err |= __copy_from_user(current->thread.transact_vr, tm_v_regs,
@@ -474,7 +478,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                memset(current->thread.transact_vr, 0, 33 * sizeof(vector128));
        }
        /* Always get VRSAVE back */
-       if (v_regs != 0 && tm_v_regs != 0) {
+       if (v_regs != NULL && tm_v_regs != NULL) {
                err |= __get_user(current->thread.vrsave,
                                  (u32 __user *)&v_regs[33]);
                err |= __get_user(current->thread.transact_vrsave,
@@ -484,6 +488,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                current->thread.vrsave = 0;
                current->thread.transact_vrsave = 0;
        }
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               mtspr(SPRN_VRSAVE, current->thread.vrsave);
 #endif /* CONFIG_ALTIVEC */
        /* restore floating point */
        err |= copy_fpr_from_user(current, &sc->fp_regs);
index 38b0ba65a73566238c3e4dbf92bc9f9d33bdf83d..442d8e23f8f4088368c6ebda73375a28de2ce66a 100644 (file)
@@ -81,6 +81,28 @@ int smt_enabled_at_boot = 1;
 
 static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
 
+/*
+ * Returns 1 if the specified cpu should be brought up during boot.
+ * Used to inhibit booting threads if they've been disabled or
+ * limited on the command line
+ */
+int smp_generic_cpu_bootable(unsigned int nr)
+{
+       /* Special case - we inhibit secondary thread startup
+        * during boot if the user requests it.
+        */
+       if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
+               if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+                       return 0;
+               if (smt_enabled_at_boot
+                   && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
+                       return 0;
+       }
+
+       return 1;
+}
+
+
 #ifdef CONFIG_PPC64
 int smp_generic_kick_cpu(int nr)
 {
@@ -172,7 +194,7 @@ int smp_request_message_ipi(int virq, int msg)
 #endif
        err = request_irq(virq, smp_ipi_action[msg],
                          IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
-                         smp_ipi_name[msg], 0);
+                         smp_ipi_name[msg], NULL);
        WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
                virq, smp_ipi_name[msg], err);
 
@@ -210,6 +232,12 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
        smp_ops->cause_ipi(cpu, info->data);
 }
 
+#ifdef __BIG_ENDIAN__
+#define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
+#else
+#define IPI_MESSAGE(A) (1 << (8 * (A)))
+#endif
+
 irqreturn_t smp_ipi_demux(void)
 {
        struct cpu_messages *info = &__get_cpu_var(ipi_message);
@@ -219,19 +247,14 @@ irqreturn_t smp_ipi_demux(void)
 
        do {
                all = xchg(&info->messages, 0);
-
-#ifdef __BIG_ENDIAN
-               if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
+               if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
                        generic_smp_call_function_interrupt();
-               if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE)))
+               if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
                        scheduler_ipi();
-               if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE)))
+               if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNC_SINGLE))
                        generic_smp_call_function_single_interrupt();
-               if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK)))
+               if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
                        debug_ipi_action(0, NULL);
-#else
-#error Unsupported ENDIAN
-#endif
        } while (info->messages);
 
        return IRQ_HANDLED;
@@ -574,6 +597,21 @@ out:
        return id;
 }
 
+/* Return the value of the chip-id property corresponding
+ * to the given logical cpu.
+ */
+int cpu_to_chip_id(int cpu)
+{
+       struct device_node *np;
+
+       np = of_get_cpu_node(cpu, NULL);
+       if (!np)
+               return -1;
+
+       of_node_put(np);
+       return of_get_ibm_chip_id(np);
+}
+
 /* Helper routines for cpu to core mapping */
 int cpu_core_index_of_thread(int cpu)
 {
@@ -587,6 +625,33 @@ int cpu_first_thread_of_core(int core)
 }
 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
 
+static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
+{
+       const struct cpumask *mask;
+       struct device_node *np;
+       int i, plen;
+       const __be32 *prop;
+
+       mask = add ? cpu_online_mask : cpu_present_mask;
+       for_each_cpu(i, mask) {
+               np = of_get_cpu_node(i, NULL);
+               if (!np)
+                       continue;
+               prop = of_get_property(np, "ibm,chip-id", &plen);
+               if (prop && plen == sizeof(int) &&
+                   of_read_number(prop, 1) == chipid) {
+                       if (add) {
+                               cpumask_set_cpu(cpu, cpu_core_mask(i));
+                               cpumask_set_cpu(i, cpu_core_mask(cpu));
+                       } else {
+                               cpumask_clear_cpu(cpu, cpu_core_mask(i));
+                               cpumask_clear_cpu(i, cpu_core_mask(cpu));
+                       }
+               }
+               of_node_put(np);
+       }
+}
+
 /* Must be called when no change can occur to cpu_present_mask,
  * i.e. during cpu online or offline.
  */
@@ -609,11 +674,51 @@ static struct device_node *cpu_to_l2cache(int cpu)
        return cache;
 }
 
+static void traverse_core_siblings(int cpu, bool add)
+{
+       struct device_node *l2_cache, *np;
+       const struct cpumask *mask;
+       int i, chip, plen;
+       const __be32 *prop;
+
+       /* First see if we have ibm,chip-id properties in cpu nodes */
+       np = of_get_cpu_node(cpu, NULL);
+       if (np) {
+               chip = -1;
+               prop = of_get_property(np, "ibm,chip-id", &plen);
+               if (prop && plen == sizeof(int))
+                       chip = of_read_number(prop, 1);
+               of_node_put(np);
+               if (chip >= 0) {
+                       traverse_siblings_chip_id(cpu, add, chip);
+                       return;
+               }
+       }
+
+       l2_cache = cpu_to_l2cache(cpu);
+       mask = add ? cpu_online_mask : cpu_present_mask;
+       for_each_cpu(i, mask) {
+               np = cpu_to_l2cache(i);
+               if (!np)
+                       continue;
+               if (np == l2_cache) {
+                       if (add) {
+                               cpumask_set_cpu(cpu, cpu_core_mask(i));
+                               cpumask_set_cpu(i, cpu_core_mask(cpu));
+                       } else {
+                               cpumask_clear_cpu(cpu, cpu_core_mask(i));
+                               cpumask_clear_cpu(i, cpu_core_mask(cpu));
+                       }
+               }
+               of_node_put(np);
+       }
+       of_node_put(l2_cache);
+}
+
 /* Activate a secondary processor. */
 void start_secondary(void *unused)
 {
        unsigned int cpu = smp_processor_id();
-       struct device_node *l2_cache;
        int i, base;
 
        atomic_inc(&init_mm.mm_count);
@@ -652,18 +757,7 @@ void start_secondary(void *unused)
                cpumask_set_cpu(cpu, cpu_core_mask(base + i));
                cpumask_set_cpu(base + i, cpu_core_mask(cpu));
        }
-       l2_cache = cpu_to_l2cache(cpu);
-       for_each_online_cpu(i) {
-               struct device_node *np = cpu_to_l2cache(i);
-               if (!np)
-                       continue;
-               if (np == l2_cache) {
-                       cpumask_set_cpu(cpu, cpu_core_mask(i));
-                       cpumask_set_cpu(i, cpu_core_mask(cpu));
-               }
-               of_node_put(np);
-       }
-       of_node_put(l2_cache);
+       traverse_core_siblings(cpu, true);
 
        smp_wmb();
        notify_cpu_starting(cpu);
@@ -719,7 +813,6 @@ int arch_sd_sibling_asym_packing(void)
 #ifdef CONFIG_HOTPLUG_CPU
 int __cpu_disable(void)
 {
-       struct device_node *l2_cache;
        int cpu = smp_processor_id();
        int base, i;
        int err;
@@ -739,20 +832,7 @@ int __cpu_disable(void)
                cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
                cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
        }
-
-       l2_cache = cpu_to_l2cache(cpu);
-       for_each_present_cpu(i) {
-               struct device_node *np = cpu_to_l2cache(i);
-               if (!np)
-                       continue;
-               if (np == l2_cache) {
-                       cpumask_clear_cpu(cpu, cpu_core_mask(i));
-                       cpumask_clear_cpu(i, cpu_core_mask(cpu));
-               }
-               of_node_put(np);
-       }
-       of_node_put(l2_cache);
-
+       traverse_core_siblings(cpu, false);
 
        return 0;
 }
diff --git a/arch/powerpc/kernel/softemu8xx.c b/arch/powerpc/kernel/softemu8xx.c
deleted file mode 100644 (file)
index 29b2f81..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Software emulation of some PPC instructions for the 8xx core.
- *
- * Copyright (C) 1998 Dan Malek (dmalek@jlc.net)
- *
- * Software floating emuation for the MPC8xx processor.  I did this mostly
- * because it was easier than trying to get the libraries compiled for
- * software floating point.  The goal is still to get the libraries done,
- * but I lost patience and needed some hacks to at least get init and
- * shells running.  The first problem is the setjmp/longjmp that save
- * and restore the floating point registers.
- *
- * For this emulation, our working registers are found on the register
- * save area.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/interrupt.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-/* Eventually we may need a look-up table, but this works for now.
-*/
-#define LFS    48
-#define LFD    50
-#define LFDU   51
-#define STFD   54
-#define STFDU  55
-#define FMR    63
-
-void print_8xx_pte(struct mm_struct *mm, unsigned long addr)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-
-       printk(" pte @ 0x%8lx: ", addr);
-       pgd = pgd_offset(mm, addr & PAGE_MASK);
-       if (pgd) {
-               pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK),
-                                addr & PAGE_MASK);
-               if (pmd && pmd_present(*pmd)) {
-                       pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
-                       if (pte) {
-                               printk(" (0x%08lx)->(0x%08lx)->0x%08lx\n",
-                                       (long)pgd, (long)pte, (long)pte_val(*pte));
-#define pp ((long)pte_val(*pte))
-                               printk(" RPN: %05lx PP: %lx SPS: %lx SH: %lx "
-                                      "CI: %lx v: %lx\n",
-                                      pp>>12,    /* rpn */
-                                      (pp>>10)&3, /* pp */
-                                      (pp>>3)&1, /* small */
-                                      (pp>>2)&1, /* shared */
-                                      (pp>>1)&1, /* cache inhibit */
-                                      pp&1       /* valid */
-                                      );
-#undef pp
-                       }
-                       else {
-                               printk("no pte\n");
-                       }
-               }
-               else {
-                       printk("no pmd\n");
-               }
-       }
-       else {
-               printk("no pgd\n");
-       }
-}
-
-int get_8xx_pte(struct mm_struct *mm, unsigned long addr)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-       int retval = 0;
-
-       pgd = pgd_offset(mm, addr & PAGE_MASK);
-       if (pgd) {
-               pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK),
-                                addr & PAGE_MASK);
-               if (pmd && pmd_present(*pmd)) {
-                       pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
-                       if (pte) {
-                               retval = (int)pte_val(*pte);
-                       }
-               }
-       }
-       return retval;
-}
-
-/*
- * We return 0 on success, 1 on unimplemented instruction, and EFAULT
- * if a load/store faulted.
- */
-int Soft_emulate_8xx(struct pt_regs *regs)
-{
-       u32 inst, instword;
-       u32 flreg, idxreg, disp;
-       int retval;
-       s16 sdisp;
-       u32 *ea, *ip;
-
-       retval = 0;
-
-       instword = *((u32 *)regs->nip);
-       inst = instword >> 26;
-
-       flreg = (instword >> 21) & 0x1f;
-       idxreg = (instword >> 16) & 0x1f;
-       disp = instword & 0xffff;
-
-       ea = (u32 *)(regs->gpr[idxreg] + disp);
-       ip = (u32 *)&current->thread.TS_FPR(flreg);
-
-       switch ( inst )
-       {
-       case LFD:
-               /* this is a 16 bit quantity that is sign extended
-                * so use a signed short here -- Cort
-                */
-               sdisp = (instword & 0xffff);
-               ea = (u32 *)(regs->gpr[idxreg] + sdisp);
-               if (copy_from_user(ip, ea, sizeof(double)))
-                       retval = -EFAULT;
-               break;
-
-       case LFDU:
-               if (copy_from_user(ip, ea, sizeof(double)))
-                       retval = -EFAULT;
-               else
-                       regs->gpr[idxreg] = (u32)ea;
-               break;
-       case LFS:
-               sdisp = (instword & 0xffff);
-               ea = (u32 *)(regs->gpr[idxreg] + sdisp);
-               if (copy_from_user(ip, ea, sizeof(float)))
-                       retval = -EFAULT;
-               break;
-       case STFD:
-               /* this is a 16 bit quantity that is sign extended
-                * so use a signed short here -- Cort
-                */
-               sdisp = (instword & 0xffff);
-               ea = (u32 *)(regs->gpr[idxreg] + sdisp);
-               if (copy_to_user(ea, ip, sizeof(double)))
-                       retval = -EFAULT;
-               break;
-
-       case STFDU:
-               if (copy_to_user(ea, ip, sizeof(double)))
-                       retval = -EFAULT;
-               else
-                       regs->gpr[idxreg] = (u32)ea;
-               break;
-       case FMR:
-               /* assume this is a fp move -- Cort */
-               memcpy(ip, &current->thread.TS_FPR((instword>>11)&0x1f),
-                      sizeof(double));
-               break;
-       default:
-               retval = 1;
-               printk("Bad emulation %s/%d\n"
-                      " NIP: %08lx instruction: %08x opcode: %x "
-                      "A: %x B: %x C: %x code: %x rc: %x\n",
-                      current->comm,current->pid,
-                      regs->nip,
-                      instword,inst,
-                      (instword>>16)&0x1f,
-                      (instword>>11)&0x1f,
-                      (instword>>6)&0x1f,
-                      (instword>>1)&0x3ff,
-                      instword&1);
-               {
-                       int pa;
-                       print_8xx_pte(current->mm,regs->nip);
-                       pa = get_8xx_pte(current->mm,regs->nip) & PAGE_MASK;
-                       pa |= (regs->nip & ~PAGE_MASK);
-                       pa = (unsigned long)__va(pa);
-                       printk("Kernel VA for NIP %x ", pa);
-                       print_8xx_pte(current->mm,pa);
-               }
-       }
-
-       if (retval == 0)
-               regs->nip += 4;
-
-       return retval;
-}
index 11a39307dd714b0602b9f7e3e4f540345057b71c..0f204053e5b5a49e29df9d2f4113d208cb65417e 100644 (file)
@@ -141,6 +141,14 @@ _GLOBAL(swsusp_arch_resume)
        lis     r11,swsusp_save_area@h
        ori     r11,r11,swsusp_save_area@l
 
+       /*
+        * Mappings from virtual addresses to physical addresses may be
+        * different than they were prior to restoring hibernation state. 
+        * Invalidate the TLB so that the boot CPU is using the new
+        * mappings.
+        */
+       bl      _tlbil_all
+
        lwz     r4,SL_SPRG0(r11)
        mtsprg  0,r4
        lwz     r4,SL_SPRG1(r11)
index 65ab9e9093772e63d74e8c9b22ef00089718d6e8..b2bcd34f72d2b099b7ac621543f7226e13b26da5 100644 (file)
@@ -210,18 +210,18 @@ static u64 scan_dispatch_log(u64 stop_tb)
        if (!dtl)
                return 0;
 
-       if (i == vpa->dtl_idx)
+       if (i == be64_to_cpu(vpa->dtl_idx))
                return 0;
-       while (i < vpa->dtl_idx) {
+       while (i < be64_to_cpu(vpa->dtl_idx)) {
                if (dtl_consumer)
                        dtl_consumer(dtl, i);
-               dtb = dtl->timebase;
-               tb_delta = dtl->enqueue_to_dispatch_time +
-                       dtl->ready_to_enqueue_time;
+               dtb = be64_to_cpu(dtl->timebase);
+               tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
+                       be32_to_cpu(dtl->ready_to_enqueue_time);
                barrier();
-               if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
+               if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
                        /* buffer has overflowed */
-                       i = vpa->dtl_idx - N_DISPATCH_LOG;
+                       i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
                        dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
                        continue;
                }
@@ -269,7 +269,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
 {
        u64 stolen = 0;
 
-       if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
+       if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
                stolen = scan_dispatch_log(stop_tb);
                get_paca()->system_time -= stolen;
        }
@@ -612,7 +612,7 @@ unsigned long long sched_clock(void)
 static int __init get_freq(char *name, int cells, unsigned long *val)
 {
        struct device_node *cpu;
-       const unsigned int *fp;
+       const __be32 *fp;
        int found = 0;
 
        /* The cpu node should have timebase and clock frequency properties */
index 0554d1f6d70dae8eeb964806ccd869b9e2a5e89f..7b60b98514691ee554d1140e408c9802f83d21a4 100644 (file)
@@ -155,10 +155,10 @@ _GLOBAL(tm_reclaim)
        mfvscr  vr0
        li      r6, THREAD_TRANSACT_VSCR
        stvx    vr0, r3, r6
+dont_backup_vec:
        mfspr   r0, SPRN_VRSAVE
        std     r0, THREAD_TRANSACT_VRSAVE(r3)
 
-dont_backup_vec:
        andi.   r0, r4, MSR_FP
        beq     dont_backup_fp
 
@@ -341,11 +341,11 @@ _GLOBAL(tm_recheckpoint)
        lvx     vr0, r3, r5
        mtvscr  vr0
        REST_32VRS(0, r5, r3)                   /* r5 scratch, r3 THREAD ptr */
+dont_restore_vec:
        ld      r5, THREAD_VRSAVE(r3)
        mtspr   SPRN_VRSAVE, r5
 #endif
 
-dont_restore_vec:
        andi.   r0, r4, MSR_FP
        beq     dont_restore_fp
 
index e435bc089ea3ccccb98d0acbc490071e9d75bb18..01ac64321e9693e91f1326d0036d97136a536066 100644 (file)
@@ -60,6 +60,7 @@
 #include <asm/switch_to.h>
 #include <asm/tm.h>
 #include <asm/debug.h>
+#include <sysdev/fsl_pci.h>
 
 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
 int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -565,6 +566,8 @@ int machine_check_e500(struct pt_regs *regs)
        if (reason & MCSR_BUS_RBERR) {
                if (fsl_rio_mcheck_exception(regs))
                        return 1;
+               if (fsl_pci_mcheck_exception(regs))
+                       return 1;
        }
 
        printk("Machine check in kernel mode.\n");
@@ -962,7 +965,7 @@ static int emulate_instruction(struct pt_regs *regs)
        u32 instword;
        u32 rd;
 
-       if (!user_mode(regs) || (regs->msr & MSR_LE))
+       if (!user_mode(regs))
                return -EINVAL;
        CHECK_FULL_REGS(regs);
 
@@ -1050,11 +1053,41 @@ int is_valid_bugaddr(unsigned long addr)
        return is_kernel_addr(addr);
 }
 
+#ifdef CONFIG_MATH_EMULATION
+static int emulate_math(struct pt_regs *regs)
+{
+       int ret;
+       extern int do_mathemu(struct pt_regs *regs);
+
+       ret = do_mathemu(regs);
+       if (ret >= 0)
+               PPC_WARN_EMULATED(math, regs);
+
+       switch (ret) {
+       case 0:
+               emulate_single_step(regs);
+               return 0;
+       case 1: {
+                       int code = 0;
+                       code = __parse_fpscr(current->thread.fpscr.val);
+                       _exception(SIGFPE, regs, code, regs->nip);
+                       return 0;
+               }
+       case -EFAULT:
+               _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
+               return 0;
+       }
+
+       return -1;
+}
+#else
+static inline int emulate_math(struct pt_regs *regs) { return -1; }
+#endif
+
 void __kprobes program_check_exception(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
        unsigned int reason = get_reason(regs);
-       extern int do_mathemu(struct pt_regs *regs);
 
        /* We can now get here via a FP Unavailable exception if the core
         * has no FPU, in that case the reason flags will be 0 */
@@ -1120,7 +1153,6 @@ void __kprobes program_check_exception(struct pt_regs *regs)
        if (!arch_irq_disabled_regs(regs))
                local_irq_enable();
 
-#ifdef CONFIG_MATH_EMULATION
        /* (reason & REASON_ILLEGAL) would be the obvious thing here,
         * but there seems to be a hardware bug on the 405GP (RevD)
         * that means ESR is sometimes set incorrectly - either to
@@ -1129,31 +1161,8 @@ void __kprobes program_check_exception(struct pt_regs *regs)
         * instruction or only on FP instructions, whether there is a
         * pattern to occurrences etc. -dgibson 31/Mar/2003
         */
-
-       /*
-        * If we support a HW FPU, we need to ensure the FP state
-        * if flushed into the thread_struct before attempting
-        * emulation
-        */
-#ifdef CONFIG_PPC_FPU
-       flush_fp_to_thread(current);
-#endif
-       switch (do_mathemu(regs)) {
-       case 0:
-               emulate_single_step(regs);
+       if (!emulate_math(regs))
                goto bail;
-       case 1: {
-                       int code = 0;
-                       code = __parse_fpscr(current->thread.fpscr.val);
-                       _exception(SIGFPE, regs, code, regs->nip);
-                       goto bail;
-               }
-       case -EFAULT:
-               _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
-               goto bail;
-       }
-       /* fall through on any other errors */
-#endif /* CONFIG_MATH_EMULATION */
 
        /* Try to emulate it if we should. */
        if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
@@ -1444,11 +1453,6 @@ void performance_monitor_exception(struct pt_regs *regs)
 #ifdef CONFIG_8xx
 void SoftwareEmulation(struct pt_regs *regs)
 {
-       extern int do_mathemu(struct pt_regs *);
-#if defined(CONFIG_MATH_EMULATION)
-       int errcode;
-#endif
-
        CHECK_FULL_REGS(regs);
 
        if (!user_mode(regs)) {
@@ -1456,31 +1460,10 @@ void SoftwareEmulation(struct pt_regs *regs)
                die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
        }
 
-#ifdef CONFIG_MATH_EMULATION
-       errcode = do_mathemu(regs);
-       if (errcode >= 0)
-               PPC_WARN_EMULATED(math, regs);
-
-       switch (errcode) {
-       case 0:
-               emulate_single_step(regs);
+       if (!emulate_math(regs))
                return;
-       case 1: {
-                       int code = 0;
-                       code = __parse_fpscr(current->thread.fpscr.val);
-                       _exception(SIGFPE, regs, code, regs->nip);
-                       return;
-               }
-       case -EFAULT:
-               _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
-               return;
-       default:
-               _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-               return;
-       }
-#else
+
        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-#endif
 }
 #endif /* CONFIG_8xx */
 
index 6837f839ab78714dd435c3c70fe06215fd77835b..75702e207b2986c4741cf243c2a49c955c30e05b 100644 (file)
@@ -18,23 +18,19 @@ extern void real_writeb(u8 data, volatile u8 __iomem *addr);
 extern u8 real_205_readb(volatile u8 __iomem  *addr);
 extern void real_205_writeb(u8 data, volatile u8 __iomem *addr);
 
-struct NS16550 {
-       /* this struct must be packed */
-       unsigned char rbr;  /* 0 */
-       unsigned char ier;  /* 1 */
-       unsigned char fcr;  /* 2 */
-       unsigned char lcr;  /* 3 */
-       unsigned char mcr;  /* 4 */
-       unsigned char lsr;  /* 5 */
-       unsigned char msr;  /* 6 */
-       unsigned char scr;  /* 7 */
-};
-
-#define thr rbr
-#define iir fcr
-#define dll rbr
-#define dlm ier
-#define dlab lcr
+#define UART_RBR       0
+#define UART_IER       1
+#define UART_FCR       2
+#define UART_LCR       3
+#define UART_MCR       4
+#define UART_LSR       5
+#define UART_MSR       6
+#define UART_SCR       7
+#define UART_THR       UART_RBR
+#define UART_IIR       UART_FCR
+#define UART_DLL       UART_RBR
+#define UART_DLM       UART_IER
+#define UART_DLAB      UART_LCR
 
 #define LSR_DR   0x01  /* Data ready */
 #define LSR_OE   0x02  /* Overrun */
@@ -47,52 +43,62 @@ struct NS16550 {
 
 #define LCR_DLAB 0x80
 
-static struct NS16550 __iomem *udbg_comport;
+static u8 (*udbg_uart_in)(unsigned int reg);
+static void (*udbg_uart_out)(unsigned int reg, u8 data);
 
-static void udbg_550_flush(void)
+static void udbg_uart_flush(void)
 {
-       if (udbg_comport) {
-               while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0)
-                       /* wait for idle */;
-       }
+       if (!udbg_uart_in)
+               return;
+
+       /* wait for idle */
+       while ((udbg_uart_in(UART_LSR) & LSR_THRE) == 0)
+               cpu_relax();
 }
 
-static void udbg_550_putc(char c)
+static void udbg_uart_putc(char c)
 {
-       if (udbg_comport) {
-               if (c == '\n')
-                       udbg_550_putc('\r');
-               udbg_550_flush();
-               out_8(&udbg_comport->thr, c);
-       }
+       if (!udbg_uart_out)
+               return;
+
+       if (c == '\n')
+               udbg_uart_putc('\r');
+       udbg_uart_flush();
+       udbg_uart_out(UART_THR, c);
 }
 
-static int udbg_550_getc_poll(void)
+static int udbg_uart_getc_poll(void)
 {
-       if (udbg_comport) {
-               if ((in_8(&udbg_comport->lsr) & LSR_DR) != 0)
-                       return in_8(&udbg_comport->rbr);
-               else
-                       return -1;
-       }
+       if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR))
+               return udbg_uart_in(UART_RBR);
        return -1;
 }
 
-static int udbg_550_getc(void)
+static int udbg_uart_getc(void)
 {
-       if (udbg_comport) {
-               while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0)
-                       /* wait for char */;
-               return in_8(&udbg_comport->rbr);
-       }
-       return -1;
+       if (!udbg_uart_in)
+               return -1;
+       /* wait for char */
+       while (!(udbg_uart_in(UART_LSR) & LSR_DR))
+               cpu_relax();
+       return udbg_uart_in(UART_RBR);
+}
+
+static void udbg_use_uart(void)
+{
+       udbg_putc = udbg_uart_putc;
+       udbg_flush = udbg_uart_flush;
+       udbg_getc = udbg_uart_getc;
+       udbg_getc_poll = udbg_uart_getc_poll;
 }
 
-void udbg_init_uart(void __iomem *comport, unsigned int speed,
-                   unsigned int clock)
+void udbg_uart_setup(unsigned int speed, unsigned int clock)
 {
        unsigned int dll, base_bauds;
 
+       if (!udbg_uart_out)
+               return;
+
        if (clock == 0)
                clock = 1843200;
        if (speed == 0)
@@ -101,51 +107,43 @@ void udbg_init_uart(void __iomem *comport, unsigned int speed,
        base_bauds = clock / 16;
        dll = base_bauds / speed;
 
-       if (comport) {
-               udbg_comport = (struct NS16550 __iomem *)comport;
-               out_8(&udbg_comport->lcr, 0x00);
-               out_8(&udbg_comport->ier, 0xff);
-               out_8(&udbg_comport->ier, 0x00);
-               out_8(&udbg_comport->lcr, LCR_DLAB);
-               out_8(&udbg_comport->dll, dll & 0xff);
-               out_8(&udbg_comport->dlm, dll >> 8);
-               /* 8 data, 1 stop, no parity */
-               out_8(&udbg_comport->lcr, 0x03);
-               /* RTS/DTR */
-               out_8(&udbg_comport->mcr, 0x03);
-               /* Clear & enable FIFOs */
-               out_8(&udbg_comport->fcr ,0x07);
-               udbg_putc = udbg_550_putc;
-               udbg_flush = udbg_550_flush;
-               udbg_getc = udbg_550_getc;
-               udbg_getc_poll = udbg_550_getc_poll;
-       }
+       udbg_uart_out(UART_LCR, 0x00);
+       udbg_uart_out(UART_IER, 0xff);
+       udbg_uart_out(UART_IER, 0x00);
+       udbg_uart_out(UART_LCR, LCR_DLAB);
+       udbg_uart_out(UART_DLL, dll & 0xff);
+       udbg_uart_out(UART_DLM, dll >> 8);
+       /* 8 data, 1 stop, no parity */
+       udbg_uart_out(UART_LCR, 0x3);
+       /* RTS/DTR */
+       udbg_uart_out(UART_MCR, 0x3);
+       /* Clear & enable FIFOs */
+       udbg_uart_out(UART_FCR, 0x7);
 }
 
-unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
+unsigned int udbg_probe_uart_speed(unsigned int clock)
 {
        unsigned int dll, dlm, divisor, prescaler, speed;
        u8 old_lcr;
-       struct NS16550 __iomem *port = comport;
 
-       old_lcr = in_8(&port->lcr);
+       old_lcr = udbg_uart_in(UART_LCR);
 
        /* select divisor latch registers.  */
-       out_8(&port->lcr, LCR_DLAB);
+       udbg_uart_out(UART_LCR, old_lcr | LCR_DLAB);
 
        /* now, read the divisor */
-       dll = in_8(&port->dll);
-       dlm = in_8(&port->dlm);
+       dll = udbg_uart_in(UART_DLL);
+       dlm = udbg_uart_in(UART_DLM);
        divisor = dlm << 8 | dll;
 
        /* check prescaling */
-       if (in_8(&port->mcr) & 0x80)
+       if (udbg_uart_in(UART_MCR) & 0x80)
                prescaler = 4;
        else
                prescaler = 1;
 
        /* restore the LCR */
-       out_8(&port->lcr, old_lcr);
+       udbg_uart_out(UART_LCR, old_lcr);
 
        /* calculate speed */
        speed = (clock / prescaler) / (divisor * 16);
@@ -157,195 +155,155 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
        return speed;
 }
 
-#ifdef CONFIG_PPC_MAPLE
-void udbg_maple_real_flush(void)
+static union {
+       unsigned char __iomem *mmio_base;
+       unsigned long pio_base;
+} udbg_uart;
+
+static unsigned int udbg_uart_stride = 1;
+
+static u8 udbg_uart_in_pio(unsigned int reg)
 {
-       if (udbg_comport) {
-               while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
-                       /* wait for idle */;
-       }
+       return inb(udbg_uart.pio_base + (reg * udbg_uart_stride));
 }
 
-void udbg_maple_real_putc(char c)
+static void udbg_uart_out_pio(unsigned int reg, u8 data)
 {
-       if (udbg_comport) {
-               if (c == '\n')
-                       udbg_maple_real_putc('\r');
-               udbg_maple_real_flush();
-               real_writeb(c, &udbg_comport->thr); eieio();
-       }
+       outb(data, udbg_uart.pio_base + (reg * udbg_uart_stride));
 }
 
-void __init udbg_init_maple_realmode(void)
+void udbg_uart_init_pio(unsigned long port, unsigned int stride)
 {
-       udbg_comport = (struct NS16550 __iomem *)0xf40003f8;
-
-       udbg_putc = udbg_maple_real_putc;
-       udbg_flush = udbg_maple_real_flush;
-       udbg_getc = NULL;
-       udbg_getc_poll = NULL;
+       if (!port)
+               return;
+       udbg_uart.pio_base = port;
+       udbg_uart_stride = stride;
+       udbg_uart_in = udbg_uart_in_pio;
+       udbg_uart_out = udbg_uart_out_pio;
+       udbg_use_uart();
 }
-#endif /* CONFIG_PPC_MAPLE */
 
-#ifdef CONFIG_PPC_PASEMI
-void udbg_pas_real_flush(void)
+static u8 udbg_uart_in_mmio(unsigned int reg)
 {
-       if (udbg_comport) {
-               while ((real_205_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
-                       /* wait for idle */;
-       }
+       return in_8(udbg_uart.mmio_base + (reg * udbg_uart_stride));
 }
 
-void udbg_pas_real_putc(char c)
+static void udbg_uart_out_mmio(unsigned int reg, u8 data)
 {
-       if (udbg_comport) {
-               if (c == '\n')
-                       udbg_pas_real_putc('\r');
-               udbg_pas_real_flush();
-               real_205_writeb(c, &udbg_comport->thr); eieio();
-       }
+       out_8(udbg_uart.mmio_base + (reg * udbg_uart_stride), data);
 }
 
-void udbg_init_pas_realmode(void)
-{
-       udbg_comport = (struct NS16550 __iomem *)0xfcff03f8UL;
 
-       udbg_putc = udbg_pas_real_putc;
-       udbg_flush = udbg_pas_real_flush;
-       udbg_getc = NULL;
-       udbg_getc_poll = NULL;
+void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
+{
+       if (!addr)
+               return;
+       udbg_uart.mmio_base = addr;
+       udbg_uart_stride = stride;
+       udbg_uart_in = udbg_uart_in_mmio;
+       udbg_uart_out = udbg_uart_out_mmio;
+       udbg_use_uart();
 }
-#endif /* CONFIG_PPC_MAPLE */
 
-#ifdef CONFIG_PPC_EARLY_DEBUG_44x
-#include <platforms/44x/44x.h>
+#ifdef CONFIG_PPC_MAPLE
+
+#define UDBG_UART_MAPLE_ADDR   ((void __iomem *)0xf40003f8)
 
-static void udbg_44x_as1_flush(void)
+static u8 udbg_uart_in_maple(unsigned int reg)
 {
-       if (udbg_comport) {
-               while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
-                       /* wait for idle */;
-       }
+       return real_readb(UDBG_UART_MAPLE_ADDR + reg);
 }
 
-static void udbg_44x_as1_putc(char c)
+static void udbg_uart_out_maple(unsigned int reg, u8 val)
 {
-       if (udbg_comport) {
-               if (c == '\n')
-                       udbg_44x_as1_putc('\r');
-               udbg_44x_as1_flush();
-               as1_writeb(c, &udbg_comport->thr); eieio();
-       }
+       real_writeb(val, UDBG_UART_MAPLE_ADDR + reg);
 }
 
-static int udbg_44x_as1_getc(void)
+void __init udbg_init_maple_realmode(void)
 {
-       if (udbg_comport) {
-               while ((as1_readb(&udbg_comport->lsr) & LSR_DR) == 0)
-                       ; /* wait for char */
-               return as1_readb(&udbg_comport->rbr);
-       }
-       return -1;
+       udbg_uart_in = udbg_uart_in_maple;
+       udbg_uart_out = udbg_uart_out_maple;
+       udbg_use_uart();
 }
 
-void __init udbg_init_44x_as1(void)
-{
-       udbg_comport =
-               (struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
+#endif /* CONFIG_PPC_MAPLE */
 
-       udbg_putc = udbg_44x_as1_putc;
-       udbg_flush = udbg_44x_as1_flush;
-       udbg_getc = udbg_44x_as1_getc;
-}
-#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+#ifdef CONFIG_PPC_PASEMI
 
-#ifdef CONFIG_PPC_EARLY_DEBUG_40x
-static void udbg_40x_real_flush(void)
+#define UDBG_UART_PAS_ADDR     ((void __iomem *)0xfcff03f8UL)
+
+static u8 udbg_uart_in_pas(unsigned int reg)
 {
-       if (udbg_comport) {
-               while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
-                       /* wait for idle */;
-       }
+       return real_205_readb(UDBG_UART_PAS_ADDR + reg);
 }
 
-static void udbg_40x_real_putc(char c)
+static void udbg_uart_out_pas(unsigned int reg, u8 val)
 {
-       if (udbg_comport) {
-               if (c == '\n')
-                       udbg_40x_real_putc('\r');
-               udbg_40x_real_flush();
-               real_writeb(c, &udbg_comport->thr); eieio();
-       }
+       real_205_writeb(val, UDBG_UART_PAS_ADDR + reg);
 }
 
-static int udbg_40x_real_getc(void)
+void __init udbg_init_pas_realmode(void)
 {
-       if (udbg_comport) {
-               while ((real_readb(&udbg_comport->lsr) & LSR_DR) == 0)
-                       ; /* wait for char */
-               return real_readb(&udbg_comport->rbr);
-       }
-       return -1;
+       udbg_uart_in = udbg_uart_in_pas;
+       udbg_uart_out = udbg_uart_out_pas;
+       udbg_use_uart();
 }
 
-void __init udbg_init_40x_realmode(void)
-{
-       udbg_comport = (struct NS16550 __iomem *)
-               CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR;
+#endif /* CONFIG_PPC_PASEMI */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_44x
 
-       udbg_putc = udbg_40x_real_putc;
-       udbg_flush = udbg_40x_real_flush;
-       udbg_getc = udbg_40x_real_getc;
-       udbg_getc_poll = NULL;
+#include <platforms/44x/44x.h>
+
+static u8 udbg_uart_in_44x_as1(unsigned int reg)
+{
+       return as1_readb((void __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR + reg);
 }
-#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
 
-#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
-static void udbg_wsp_flush(void)
+static void udbg_uart_out_44x_as1(unsigned int reg, u8 val)
 {
-       if (udbg_comport) {
-               while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0)
-                       /* wait for idle */;
-       }
+       as1_writeb(val, (void __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR + reg);
 }
 
-static void udbg_wsp_putc(char c)
+void __init udbg_init_44x_as1(void)
 {
-       if (udbg_comport) {
-               if (c == '\n')
-                       udbg_wsp_putc('\r');
-               udbg_wsp_flush();
-               writeb(c, &udbg_comport->thr); eieio();
-       }
+       udbg_uart_in = udbg_uart_in_44x_as1;
+       udbg_uart_out = udbg_uart_out_44x_as1;
+       udbg_use_uart();
 }
 
-static int udbg_wsp_getc(void)
+#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_40x
+
+static u8 udbg_uart_in_40x(unsigned int reg)
 {
-       if (udbg_comport) {
-               while ((readb(&udbg_comport->lsr) & LSR_DR) == 0)
-                       ; /* wait for char */
-               return readb(&udbg_comport->rbr);
-       }
-       return -1;
+       return real_readb((void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR
+                         + reg);
 }
 
-static int udbg_wsp_getc_poll(void)
+static void udbg_uart_out_40x(unsigned int reg, u8 val)
 {
-       if (udbg_comport)
-               if (readb(&udbg_comport->lsr) & LSR_DR)
-                       return readb(&udbg_comport->rbr);
-       return -1;
+       real_writeb(val, (void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR
+                   + reg);
 }
 
-void __init udbg_init_wsp(void)
+void __init udbg_init_40x_realmode(void)
 {
-       udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT;
+       udbg_uart_in = udbg_uart_in_40x;
+       udbg_uart_out = udbg_uart_out_40x;
+       udbg_use_uart();
+}
 
-       udbg_init_uart(udbg_comport, 57600, 50000000);
+#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
+
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
 
-       udbg_putc = udbg_wsp_putc;
-       udbg_flush = udbg_wsp_flush;
-       udbg_getc = udbg_wsp_getc;
-       udbg_getc_poll = udbg_wsp_getc_poll;
+void __init udbg_init_wsp(void)
+{
+       udbg_uart_init_mmio((void *)WSP_UART_VIRT, 1);
+       udbg_uart_setup(57600, 50000000);
 }
+
 #endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
index 536016d792baf8a8a7208c4cd2f300c1672d49f2..78a350670de32b4ba7013ea8fefe2ab5c0d5d626 100644 (file)
@@ -1153,7 +1153,7 @@ EXPORT_SYMBOL(vio_h_cop_sync);
 
 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
 {
-       const unsigned char *dma_window;
+       const __be32 *dma_window;
        struct iommu_table *tbl;
        unsigned long offset, size;
 
@@ -1312,8 +1312,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
 {
        struct vio_dev *viodev;
        struct device_node *parent_node;
-       const unsigned int *unit_address;
-       const unsigned int *pfo_resid = NULL;
+       const __be32 *prop;
        enum vio_dev_family family;
        const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
 
@@ -1360,6 +1359,8 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
        /* we need the 'device_type' property, in order to match with drivers */
        viodev->family = family;
        if (viodev->family == VDEVICE) {
+               unsigned int unit_address;
+
                if (of_node->type != NULL)
                        viodev->type = of_node->type;
                else {
@@ -1368,24 +1369,24 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
                        goto out;
                }
 
-               unit_address = of_get_property(of_node, "reg", NULL);
-               if (unit_address == NULL) {
+               prop = of_get_property(of_node, "reg", NULL);
+               if (prop == NULL) {
                        pr_warn("%s: node %s missing 'reg'\n",
                                        __func__, of_node_name);
                        goto out;
                }
-               dev_set_name(&viodev->dev, "%x", *unit_address);
+               unit_address = of_read_number(prop, 1);
+               dev_set_name(&viodev->dev, "%x", unit_address);
                viodev->irq = irq_of_parse_and_map(of_node, 0);
-               viodev->unit_address = *unit_address;
+               viodev->unit_address = unit_address;
        } else {
                /* PFO devices need their resource_id for submitting COP_OPs
                 * This is an optional field for devices, but is required when
                 * performing synchronous ops */
-               pfo_resid = of_get_property(of_node, "ibm,resource-id", NULL);
-               if (pfo_resid != NULL)
-                       viodev->resource_id = *pfo_resid;
+               prop = of_get_property(of_node, "ibm,resource-id", NULL);
+               if (prop != NULL)
+                       viodev->resource_id = of_read_number(prop, 1);
 
-               unit_address = NULL;
                dev_set_name(&viodev->dev, "%s", of_node_name);
                viodev->type = of_node_name;
                viodev->irq = 0;
@@ -1622,7 +1623,6 @@ static struct vio_dev *vio_find_name(const char *name)
  */
 struct vio_dev *vio_find_node(struct device_node *vnode)
 {
-       const uint32_t *unit_address;
        char kobj_name[20];
        struct device_node *vnode_parent;
        const char *dev_type;
@@ -1638,10 +1638,13 @@ struct vio_dev *vio_find_node(struct device_node *vnode)
 
        /* construct the kobject name from the device node */
        if (!strcmp(dev_type, "vdevice")) {
-               unit_address = of_get_property(vnode, "reg", NULL);
-               if (!unit_address)
+               const __be32 *prop;
+               
+               prop = of_get_property(vnode, "reg", NULL);
+               if (!prop)
                        return NULL;
-               snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
+               snprintf(kobj_name, sizeof(kobj_name), "%x",
+                        (uint32_t)of_read_number(prop, 1));
        } else if (!strcmp(dev_type, "ibm,platform-facilities"))
                snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
        else
index 4f0caecc0f9dea4a72d3c548a0e1ac58b7ae754b..4f12e8f0c7187b3bf2887e4db6af39da36ce44d2 100644 (file)
  * Authors: Alexander Graf <agraf@suse.de>
  */
 
+#ifdef __LITTLE_ENDIAN__
+#error Need to fix SLB shadow accesses in little endian mode
+#endif
+
 #define SHADOW_SLB_ESID(num)   (SLBSHADOW_SAVEAREA + (num * 0x10))
 #define SHADOW_SLB_VSID(num)   (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
 #define UNBOLT_SLB_ENTRY(num) \
index 7629cd3eb91ad69e9460204a7f7c2e8d081a1084..e8d51cb767522d5b2ec435363b3c88c1646f52d2 100644 (file)
@@ -217,7 +217,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
 
 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
 {
-       vpa->shared_proc = 1;
+       vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
        vpa->yield_count = 1;
 }
 
index fc25689a9f35076e61d83ca024a08e2bdf7564c1..c3785d4aeed766c8de371ca9334db8b386a42a23 100644 (file)
@@ -363,7 +363,11 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
                                 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
 }
 
+#ifdef __BIG_ENDIAN__
 #define LOCK_TOKEN     (*(u32 *)(&get_paca()->lock_token))
+#else
+#define LOCK_TOKEN     (*(u32 *)(&get_paca()->paca_index))
+#endif
 
 static inline int try_lock_tlbie(unsigned int *lock)
 {
index b02f91e4c70dc5341bc450fa68f22b102ccd462e..b93e3cd8bf2b746f20bc68b21bc79f626e9cc364 100644 (file)
 #include <asm/kvm_book3s_asm.h>
 #include <asm/mmu-hash64.h>
 
+#ifdef __LITTLE_ENDIAN__
+#error Need to fix lppaca and SLB shadow accesses in little endian mode
+#endif
+
 /*****************************************************************************
  *                                                                           *
  *        Real Mode handlers that need to be in the linear mapping           *
@@ -389,7 +393,11 @@ toc_tlbie_lock:
        .tc     native_tlbie_lock[TC],native_tlbie_lock
        .previous
        ld      r3,toc_tlbie_lock@toc(2)
+#ifdef __BIG_ENDIAN__
        lwz     r8,PACA_LOCK_TOKEN(r13)
+#else
+       lwz     r8,PACAPACAINDEX(r13)
+#endif
 24:    lwarx   r0,0,r3
        cmpwi   r0,0
        bne     24b
@@ -964,7 +972,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 32:    ld      r4,VCPU_KVM(r9)         /* pointer to struct kvm */
 
        /* Take the guest's tlbie_lock */
+#ifdef __BIG_ENDIAN__
        lwz     r8,PACA_LOCK_TOKEN(r13)
+#else
+       lwz     r8,PACAPACAINDEX(r13)
+#endif
        addi    r3,r4,KVM_TLBIE_LOCK
 24:    lwarx   r0,0,r3
        cmpwi   r0,0
index 2c52ada3077536c04e145b63d243859f8d32cab0..751cd45f65a0de5fcd23864e5dba7b82b1f8999a 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/kvm_ppc.h>
 #include <asm/disassemble.h>
+#include <asm/ppc-opcode.h>
 #include "timing.h"
 #include "trace.h"
 
-#define OP_TRAP 3
-#define OP_TRAP_64 2
-
-#define OP_31_XOP_TRAP      4
-#define OP_31_XOP_LWZX      23
-#define OP_31_XOP_DCBST     54
-#define OP_31_XOP_TRAP_64   68
-#define OP_31_XOP_DCBF      86
-#define OP_31_XOP_LBZX      87
-#define OP_31_XOP_STWX      151
-#define OP_31_XOP_STBX      215
-#define OP_31_XOP_LBZUX     119
-#define OP_31_XOP_STBUX     247
-#define OP_31_XOP_LHZX      279
-#define OP_31_XOP_LHZUX     311
-#define OP_31_XOP_MFSPR     339
-#define OP_31_XOP_LHAX      343
-#define OP_31_XOP_STHX      407
-#define OP_31_XOP_STHUX     439
-#define OP_31_XOP_MTSPR     467
-#define OP_31_XOP_DCBI      470
-#define OP_31_XOP_LWBRX     534
-#define OP_31_XOP_TLBSYNC   566
-#define OP_31_XOP_STWBRX    662
-#define OP_31_XOP_LHBRX     790
-#define OP_31_XOP_STHBRX    918
-
-#define OP_LWZ  32
-#define OP_LD   58
-#define OP_LWZU 33
-#define OP_LBZ  34
-#define OP_LBZU 35
-#define OP_STW  36
-#define OP_STWU 37
-#define OP_STD  62
-#define OP_STB  38
-#define OP_STBU 39
-#define OP_LHZ  40
-#define OP_LHZU 41
-#define OP_LHA  42
-#define OP_LHAU 43
-#define OP_STH  44
-#define OP_STHU 45
-
 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
 {
        unsigned long dec_nsec;
index bb7cfecf2788ffcdd37542ec9333d69e08cbf427..0c9c8d7d07345cd2f5def8c2801b466c8330a48b 100644 (file)
@@ -32,7 +32,7 @@ void __spin_yield(arch_spinlock_t *lock)
                return;
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       yield_count = lppaca_of(holder_cpu).yield_count;
+       yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
@@ -57,7 +57,7 @@ void __rw_yield(arch_rwlock_t *rw)
                return;         /* no write lock at present */
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       yield_count = lppaca_of(holder_cpu).yield_count;
+       yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
index 8d035d2d42a62579bab50b19f605613d81ba56d5..1b46ab4f64178786a6c68024b18c04eb385858e0 100644 (file)
@@ -1,15 +1,15 @@
-
-obj-$(CONFIG_MATH_EMULATION)   += fabs.o fadd.o fadds.o fcmpo.o fcmpu.o \
-                                       fctiw.o fctiwz.o fdiv.o fdivs.o \
-                                       fmadd.o fmadds.o fmsub.o fmsubs.o \
-                                       fmul.o fmuls.o fnabs.o fneg.o \
-                                       fnmadd.o fnmadds.o fnmsub.o fnmsubs.o \
-                                       fres.o fre.o frsp.o fsel.o lfs.o \
-                                       frsqrte.o frsqrtes.o \
-                                       fsqrt.o fsqrts.o fsub.o fsubs.o \
-                                       mcrfs.o mffs.o mtfsb0.o mtfsb1.o \
-                                       mtfsf.o mtfsfi.o stfiwx.o stfs.o \
-                                       math.o fmr.o lfd.o stfd.o
+math-emu-common-objs = math.o fre.o fsqrt.o fsqrts.o frsqrtes.o mtfsf.o mtfsfi.o
+obj-$(CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED) += $(math-emu-common-objs)
+obj-$(CONFIG_MATH_EMULATION_FULL) += $(math-emu-common-objs) fabs.o fadd.o \
+                                       fadds.o fcmpo.o fcmpu.o fctiw.o \
+                                       fctiwz.o fdiv.o fdivs.o  fmadd.o \
+                                       fmadds.o fmsub.o fmsubs.o fmul.o \
+                                       fmuls.o fnabs.o fneg.o fnmadd.o \
+                                       fnmadds.o fnmsub.o fnmsubs.o fres.o \
+                                       frsp.o fsel.o lfs.o frsqrte.o fsub.o \
+                                       fsubs.o  mcrfs.o mffs.o mtfsb0.o \
+                                       mtfsb1.o stfiwx.o stfs.o math.o \
+                                       fmr.o lfd.o stfd.o
 
 obj-$(CONFIG_SPE)              += math_efp.o
 
index 0328e66e0799e6d7a5999430833df30a1c7296ab..ab151f04050264fdc4c4a37af8b815b86c6e60de 100644 (file)
@@ -7,12 +7,27 @@
 
 #include <asm/uaccess.h>
 #include <asm/reg.h>
+#include <asm/switch_to.h>
 
 #include <asm/sfp-machine.h>
 #include <math-emu/double.h>
 
 #define FLOATFUNC(x)   extern int x(void *, void *, void *, void *)
 
+/* The instructions list which may be not implemented by a hardware FPU */
+FLOATFUNC(fre);
+FLOATFUNC(frsqrtes);
+FLOATFUNC(fsqrt);
+FLOATFUNC(fsqrts);
+FLOATFUNC(mtfsf);
+FLOATFUNC(mtfsfi);
+
+#ifdef CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED
+#undef FLOATFUNC(x)
+#define FLOATFUNC(x)   static inline int x(void *op1, void *op2, void *op3, \
+                                                void *op4) { }
+#endif
+
 FLOATFUNC(fadd);
 FLOATFUNC(fadds);
 FLOATFUNC(fdiv);
@@ -42,8 +57,6 @@ FLOATFUNC(mcrfs);
 FLOATFUNC(mffs);
 FLOATFUNC(mtfsb0);
 FLOATFUNC(mtfsb1);
-FLOATFUNC(mtfsf);
-FLOATFUNC(mtfsfi);
 
 FLOATFUNC(lfd);
 FLOATFUNC(lfs);
@@ -58,13 +71,9 @@ FLOATFUNC(fnabs);
 FLOATFUNC(fneg);
 
 /* Optional */
-FLOATFUNC(fre);
 FLOATFUNC(fres);
 FLOATFUNC(frsqrte);
-FLOATFUNC(frsqrtes);
 FLOATFUNC(fsel);
-FLOATFUNC(fsqrt);
-FLOATFUNC(fsqrts);
 
 
 #define OP31           0x1f            /*   31 */
@@ -154,7 +163,6 @@ FLOATFUNC(fsqrts);
 #define XEU    15
 #define XFLB   10
 
-#ifdef CONFIG_MATH_EMULATION
 static int
 record_exception(struct pt_regs *regs, int eflag)
 {
@@ -212,7 +220,6 @@ record_exception(struct pt_regs *regs, int eflag)
 
        return (fpscr & FPSCR_FEX) ? 1 : 0;
 }
-#endif /* CONFIG_MATH_EMULATION */
 
 int
 do_mathemu(struct pt_regs *regs)
@@ -222,56 +229,13 @@ do_mathemu(struct pt_regs *regs)
        signed short sdisp;
        u32 insn = 0;
        int idx = 0;
-#ifdef CONFIG_MATH_EMULATION
        int (*func)(void *, void *, void *, void *);
        int type = 0;
        int eflag, trap;
-#endif
 
        if (get_user(insn, (u32 *)pc))
                return -EFAULT;
 
-#ifndef CONFIG_MATH_EMULATION
-       switch (insn >> 26) {
-       case LFD:
-               idx = (insn >> 16) & 0x1f;
-               sdisp = (insn & 0xffff);
-               op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
-               op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp);
-               lfd(op0, op1, op2, op3);
-               break;
-       case LFDU:
-               idx = (insn >> 16) & 0x1f;
-               sdisp = (insn & 0xffff);
-               op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
-               op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp);
-               lfd(op0, op1, op2, op3);
-               regs->gpr[idx] = (unsigned long)op1;
-               break;
-       case STFD:
-               idx = (insn >> 16) & 0x1f;
-               sdisp = (insn & 0xffff);
-               op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
-               op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp);
-               stfd(op0, op1, op2, op3);
-               break;
-       case STFDU:
-               idx = (insn >> 16) & 0x1f;
-               sdisp = (insn & 0xffff);
-               op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
-               op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp);
-               stfd(op0, op1, op2, op3);
-               regs->gpr[idx] = (unsigned long)op1;
-               break;
-       case OP63:
-               op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
-               op1 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f);
-               fmr(op0, op1, op2, op3);
-               break;
-       default:
-               goto illegal;
-       }
-#else /* CONFIG_MATH_EMULATION */
        switch (insn >> 26) {
        case LFS:       func = lfs;     type = D;       break;
        case LFSU:      func = lfs;     type = DU;      break;
@@ -416,21 +380,16 @@ do_mathemu(struct pt_regs *regs)
        case XE:
                idx = (insn >> 16) & 0x1f;
                op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
-               if (!idx) {
-                       if (((insn >> 1) & 0x3ff) == STFIWX)
-                               op1 = (void *)(regs->gpr[(insn >> 11) & 0x1f]);
-                       else
-                               goto illegal;
-               } else {
-                       op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]);
-               }
-
+               op1 = (void *)((idx ? regs->gpr[idx] : 0)
+                               + regs->gpr[(insn >> 11) & 0x1f]);
                break;
 
        case XEU:
                idx = (insn >> 16) & 0x1f;
+               if (!idx)
+                       goto illegal;
                op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
-               op1 = (void *)((idx ? regs->gpr[idx] : 0)
+               op1 = (void *)(regs->gpr[idx]
                                + regs->gpr[(insn >> 11) & 0x1f]);
                break;
 
@@ -465,6 +424,13 @@ do_mathemu(struct pt_regs *regs)
                goto illegal;
        }
 
+       /*
+        * If we support a HW FPU, we need to ensure the FP state
+        * is flushed into the thread_struct before attempting
+        * emulation
+        */
+       flush_fp_to_thread(current);
+
        eflag = func(op0, op1, op2, op3);
 
        if (insn & 1) {
@@ -485,7 +451,6 @@ do_mathemu(struct pt_regs *regs)
        default:
                break;
        }
-#endif /* CONFIG_MATH_EMULATION */
 
        regs->nip += 4;
        return 0;
index 8726779e1409b5da36c1e1ea852276cf47d4252c..76d8e7cc7805348098f622e0043a4364c9a7f1a1 100644 (file)
@@ -443,8 +443,12 @@ good_area:
                                      regs, address);
 #ifdef CONFIG_PPC_SMLPAR
                        if (firmware_has_feature(FW_FEATURE_CMO)) {
+                               u32 page_ins;
+
                                preempt_disable();
-                               get_lppaca()->page_ins += (1 << PAGE_FACTOR);
+                               page_ins = be32_to_cpu(get_lppaca()->page_ins);
+                               page_ins += 1 << PAGE_FACTOR;
+                               get_lppaca()->page_ins = cpu_to_be32(page_ins);
                                preempt_enable();
                        }
 #endif /* CONFIG_PPC_SMLPAR */
index 49822d90ea965ff6703db24fca3180b479c39101..6936547018b89e21bbe63b2371c9607917fd66dd 100644 (file)
@@ -117,8 +117,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
        return 1;
 }
 
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
-                       struct page **pages)
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+                         struct page **pages)
 {
        struct mm_struct *mm = current->mm;
        unsigned long addr, len, end;
@@ -135,7 +135,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 
        if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
                                        start, len)))
-               goto slow_irqon;
+               return 0;
 
        pr_devel("  aligned: %lx .. %lx\n", start, end);
 
@@ -166,30 +166,35 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                         (void *)pgd_val(pgd));
                next = pgd_addr_end(addr, end);
                if (pgd_none(pgd))
-                       goto slow;
+                       break;
                if (pgd_huge(pgd)) {
                        if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
                                         write, pages, &nr))
-                               goto slow;
+                               break;
                } else if (is_hugepd(pgdp)) {
                        if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
                                        addr, next, write, pages, &nr))
-                               goto slow;
+                               break;
                } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
-                       goto slow;
+                       break;
        } while (pgdp++, addr = next, addr != end);
 
        local_irq_enable();
 
-       VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
        return nr;
+}
 
-       {
-               int ret;
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+                       struct page **pages)
+{
+       struct mm_struct *mm = current->mm;
+       int nr, ret;
+
+       start &= PAGE_MASK;
+       nr = __get_user_pages_fast(start, nr_pages, write, pages);
+       ret = nr;
 
-slow:
-               local_irq_enable();
-slow_irqon:
+       if (nr < nr_pages) {
                pr_devel("  slow path ! nr = %d\n", nr);
 
                /* Try to get the remaining pages with get_user_pages */
@@ -198,7 +203,7 @@ slow_irqon:
 
                down_read(&mm->mmap_sem);
                ret = get_user_pages(current, mm, start,
-                       (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
+                                    nr_pages - nr, write, 0, pages, NULL);
                up_read(&mm->mmap_sem);
 
                /* Have to be a bit careful with return values */
@@ -208,9 +213,9 @@ slow_irqon:
                        else
                                ret += nr;
                }
-
-               return ret;
        }
+
+       return ret;
 }
 
 #endif /* __HAVE_ARCH_PTE_SPECIAL */
index 6ecc38bd5b2429eb08b89e3eb09cc6264c602e5c..bde8b55897551a60b15ad6017c7910a6cd3278b8 100644 (file)
@@ -907,7 +907,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
 
        if (ea >= spt->maxaddr)
                return 0;
-       if (ea < 0x100000000) {
+       if (ea < 0x100000000UL) {
                /* addresses below 4GB use spt->low_prot */
                sbpm = spt->low_prot;
        } else {
index 01e2db97a210019df0e8b3988585baa670ab1edf..d47d3dab4870988c77540de11030f747b085fc22 100644 (file)
@@ -52,7 +52,7 @@
 #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
 /* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */
 #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
-#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
+#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_KERNEL_START"
 #endif
 #endif
 #define MAX_LOW_MEM    CONFIG_LOWMEM_SIZE
index 5850798826cde0be9570368cb7968a9ea279b1e1..c916127f10c363e68731ec30eb5f98a799a24313 100644 (file)
@@ -58,7 +58,7 @@ static int form1_affinity;
 
 #define MAX_DISTANCE_REF_POINTS 4
 static int distance_ref_points_depth;
-static const unsigned int *distance_ref_points;
+static const __be32 *distance_ref_points;
 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
 
 /*
@@ -179,7 +179,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 
 /* must hold reference to node during call */
-static const int *of_get_associativity(struct device_node *dev)
+static const __be32 *of_get_associativity(struct device_node *dev)
 {
        return of_get_property(dev, "ibm,associativity", NULL);
 }
@@ -189,9 +189,9 @@ static const int *of_get_associativity(struct device_node *dev)
  * it exists (the property exists only in kexec/kdump kernels,
  * added by kexec-tools)
  */
-static const u32 *of_get_usable_memory(struct device_node *memory)
+static const __be32 *of_get_usable_memory(struct device_node *memory)
 {
-       const u32 *prop;
+       const __be32 *prop;
        u32 len;
        prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
        if (!prop || len < sizeof(unsigned int))
@@ -219,7 +219,7 @@ int __node_distance(int a, int b)
 }
 
 static void initialize_distance_lookup_table(int nid,
-               const unsigned int *associativity)
+               const __be32 *associativity)
 {
        int i;
 
@@ -227,29 +227,32 @@ static void initialize_distance_lookup_table(int nid,
                return;
 
        for (i = 0; i < distance_ref_points_depth; i++) {
-               distance_lookup_table[nid][i] =
-                       associativity[distance_ref_points[i]];
+               const __be32 *entry;
+
+               entry = &associativity[be32_to_cpu(distance_ref_points[i])];
+               distance_lookup_table[nid][i] = of_read_number(entry, 1);
        }
 }
 
 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
  * info is found.
  */
-static int associativity_to_nid(const unsigned int *associativity)
+static int associativity_to_nid(const __be32 *associativity)
 {
        int nid = -1;
 
        if (min_common_depth == -1)
                goto out;
 
-       if (associativity[0] >= min_common_depth)
-               nid = associativity[min_common_depth];
+       if (of_read_number(associativity, 1) >= min_common_depth)
+               nid = of_read_number(&associativity[min_common_depth], 1);
 
        /* POWER4 LPAR uses 0xffff as invalid node */
        if (nid == 0xffff || nid >= MAX_NUMNODES)
                nid = -1;
 
-       if (nid > 0 && associativity[0] >= distance_ref_points_depth)
+       if (nid > 0 &&
+           of_read_number(associativity, 1) >= distance_ref_points_depth)
                initialize_distance_lookup_table(nid, associativity);
 
 out:
@@ -262,7 +265,7 @@ out:
 static int of_node_to_nid_single(struct device_node *device)
 {
        int nid = -1;
-       const unsigned int *tmp;
+       const __be32 *tmp;
 
        tmp = of_get_associativity(device);
        if (tmp)
@@ -334,7 +337,7 @@ static int __init find_min_common_depth(void)
        }
 
        if (form1_affinity) {
-               depth = distance_ref_points[0];
+               depth = of_read_number(distance_ref_points, 1);
        } else {
                if (distance_ref_points_depth < 2) {
                        printk(KERN_WARNING "NUMA: "
@@ -342,7 +345,7 @@ static int __init find_min_common_depth(void)
                        goto err;
                }
 
-               depth = distance_ref_points[1];
+               depth = of_read_number(&distance_ref_points[1], 1);
        }
 
        /*
@@ -376,12 +379,12 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
        of_node_put(memory);
 }
 
-static unsigned long read_n_cells(int n, const unsigned int **buf)
+static unsigned long read_n_cells(int n, const __be32 **buf)
 {
        unsigned long result = 0;
 
        while (n--) {
-               result = (result << 32) | **buf;
+               result = (result << 32) | of_read_number(*buf, 1);
                (*buf)++;
        }
        return result;
@@ -391,17 +394,17 @@ static unsigned long read_n_cells(int n, const unsigned int **buf)
  * Read the next memblock list entry from the ibm,dynamic-memory property
  * and return the information in the provided of_drconf_cell structure.
  */
-static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
+static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
 {
-       const u32 *cp;
+       const __be32 *cp;
 
        drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
 
        cp = *cellp;
-       drmem->drc_index = cp[0];
-       drmem->reserved = cp[1];
-       drmem->aa_index = cp[2];
-       drmem->flags = cp[3];
+       drmem->drc_index = of_read_number(cp, 1);
+       drmem->reserved = of_read_number(&cp[1], 1);
+       drmem->aa_index = of_read_number(&cp[2], 1);
+       drmem->flags = of_read_number(&cp[3], 1);
 
        *cellp = cp + 4;
 }
@@ -413,16 +416,16 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
  * list entries followed by N memblock list entries.  Each memblock list entry
  * contains information as laid out in the of_drconf_cell struct above.
  */
-static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
+static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
 {
-       const u32 *prop;
+       const __be32 *prop;
        u32 len, entries;
 
        prop = of_get_property(memory, "ibm,dynamic-memory", &len);
        if (!prop || len < sizeof(unsigned int))
                return 0;
 
-       entries = *prop++;
+       entries = of_read_number(prop++, 1);
 
        /* Now that we know the number of entries, revalidate the size
         * of the property read in to ensure we have everything
@@ -440,7 +443,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
  */
 static u64 of_get_lmb_size(struct device_node *memory)
 {
-       const u32 *prop;
+       const __be32 *prop;
        u32 len;
 
        prop = of_get_property(memory, "ibm,lmb-size", &len);
@@ -453,7 +456,7 @@ static u64 of_get_lmb_size(struct device_node *memory)
 struct assoc_arrays {
        u32     n_arrays;
        u32     array_sz;
-       const u32 *arrays;
+       const __be32 *arrays;
 };
 
 /*
@@ -469,15 +472,15 @@ struct assoc_arrays {
 static int of_get_assoc_arrays(struct device_node *memory,
                               struct assoc_arrays *aa)
 {
-       const u32 *prop;
+       const __be32 *prop;
        u32 len;
 
        prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
        if (!prop || len < 2 * sizeof(unsigned int))
                return -1;
 
-       aa->n_arrays = *prop++;
-       aa->array_sz = *prop++;
+       aa->n_arrays = of_read_number(prop++, 1);
+       aa->array_sz = of_read_number(prop++, 1);
 
        /* Now that we know the number of arrays and size of each array,
         * revalidate the size of the property read in.
@@ -504,7 +507,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
            !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
            drmem->aa_index < aa->n_arrays) {
                index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
-               nid = aa->arrays[index];
+               nid = of_read_number(&aa->arrays[index], 1);
 
                if (nid == 0xffff || nid >= MAX_NUMNODES)
                        nid = default_nid;
@@ -595,7 +598,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
  * Reads the counter for a given entry in
  * linux,drconf-usable-memory property
  */
-static inline int __init read_usm_ranges(const u32 **usm)
+static inline int __init read_usm_ranges(const __be32 **usm)
 {
        /*
         * For each lmb in ibm,dynamic-memory a corresponding
@@ -612,7 +615,7 @@ static inline int __init read_usm_ranges(const u32 **usm)
  */
 static void __init parse_drconf_memory(struct device_node *memory)
 {
-       const u32 *uninitialized_var(dm), *usm;
+       const __be32 *uninitialized_var(dm), *usm;
        unsigned int n, rc, ranges, is_kexec_kdump = 0;
        unsigned long lmb_size, base, size, sz;
        int nid;
@@ -721,7 +724,7 @@ static int __init parse_numa_properties(void)
                unsigned long size;
                int nid;
                int ranges;
-               const unsigned int *memcell_buf;
+               const __be32 *memcell_buf;
                unsigned int len;
 
                memcell_buf = of_get_property(memory,
@@ -1106,7 +1109,7 @@ early_param("numa", early_numa);
 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
                                     unsigned long scn_addr)
 {
-       const u32 *dm;
+       const __be32 *dm;
        unsigned int drconf_cell_cnt, rc;
        unsigned long lmb_size;
        struct assoc_arrays aa;
@@ -1159,7 +1162,7 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
        for_each_node_by_type(memory, "memory") {
                unsigned long start, size;
                int ranges;
-               const unsigned int *memcell_buf;
+               const __be32 *memcell_buf;
                unsigned int len;
 
                memcell_buf = of_get_property(memory, "reg", &len);
@@ -1232,7 +1235,7 @@ static u64 hot_add_drconf_memory_max(void)
         struct device_node *memory = NULL;
         unsigned int drconf_cell_cnt = 0;
         u64 lmb_size = 0;
-        const u32 *dm = 0;
+       const __be32 *dm = 0;
 
         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
         if (memory) {
@@ -1337,40 +1340,41 @@ static int update_cpu_associativity_changes_mask(void)
  * Convert the associativity domain numbers returned from the hypervisor
  * to the sequence they would appear in the ibm,associativity property.
  */
-static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
+static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
 {
        int i, nr_assoc_doms = 0;
-       const u16 *field = (const u16*) packed;
+       const __be16 *field = (const __be16 *) packed;
 
 #define VPHN_FIELD_UNUSED      (0xffff)
 #define VPHN_FIELD_MSB         (0x8000)
 #define VPHN_FIELD_MASK                (~VPHN_FIELD_MSB)
 
        for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
-               if (*field == VPHN_FIELD_UNUSED) {
+               if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
                        /* All significant fields processed, and remaining
                         * fields contain the reserved value of all 1's.
                         * Just store them.
                         */
-                       unpacked[i] = *((u32*)field);
+                       unpacked[i] = *((__be32 *)field);
                        field += 2;
-               } else if (*field & VPHN_FIELD_MSB) {
+               } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
                        /* Data is in the lower 15 bits of this field */
-                       unpacked[i] = *field & VPHN_FIELD_MASK;
+                       unpacked[i] = cpu_to_be32(
+                               be16_to_cpup(field) & VPHN_FIELD_MASK);
                        field++;
                        nr_assoc_doms++;
                } else {
                        /* Data is in the lower 15 bits of this field
                         * concatenated with the next 16 bit field
                         */
-                       unpacked[i] = *((u32*)field);
+                       unpacked[i] = *((__be32 *)field);
                        field += 2;
                        nr_assoc_doms++;
                }
        }
 
        /* The first cell contains the length of the property */
-       unpacked[0] = nr_assoc_doms;
+       unpacked[0] = cpu_to_be32(nr_assoc_doms);
 
        return nr_assoc_doms;
 }
@@ -1379,7 +1383,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
  * Retrieve the new associativity information for a virtual processor's
  * home node.
  */
-static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
+static long hcall_vphn(unsigned long cpu, __be32 *associativity)
 {
        long rc;
        long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
@@ -1393,7 +1397,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
 }
 
 static long vphn_get_associativity(unsigned long cpu,
-                                       unsigned int *associativity)
+                                       __be32 *associativity)
 {
        long rc;
 
@@ -1450,7 +1454,7 @@ int arch_update_cpu_topology(void)
 {
        unsigned int cpu, sibling, changed = 0;
        struct topology_update_data *updates, *ud;
-       unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
+       __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
        cpumask_t updated_cpus;
        struct device *dev;
        int weight, new_nid, i = 0;
@@ -1609,7 +1613,7 @@ int start_topology_update(void)
 #endif
                }
        } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
-                  get_lppaca()->shared_proc) {
+                  lppaca_shared_proc(get_lppaca())) {
                if (!vphn_enabled) {
                        prrn_enabled = 0;
                        vphn_enabled = 1;
index a538c80db2df068da5dac56aa5076f484db434d8..9d1d33cd2be528598eb9a3e0c436a0c041094656 100644 (file)
@@ -66,8 +66,10 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
         * we only update the current CPU's SLB shadow buffer.
         */
        get_slb_shadow()->save_area[entry].esid = 0;
-       get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
-       get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
+       get_slb_shadow()->save_area[entry].vsid =
+                               cpu_to_be64(mk_vsid_data(ea, ssize, flags));
+       get_slb_shadow()->save_area[entry].esid =
+                               cpu_to_be64(mk_esid_data(ea, ssize, entry));
 }
 
 static inline void slb_shadow_clear(unsigned long entry)
@@ -112,7 +114,8 @@ static void __slb_flush_and_rebolt(void)
        } else {
                /* Update stack entry; others don't change */
                slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
-               ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
+               ksp_vsid_data =
+                       be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
        }
 
        /* We need to do this all in asm, so we're sure we don't touch
index aa74acb0fdfcb441c60fb3499da96b90ca88e72a..a770df2dae7050bafd28daf1c354165113e378f9 100644 (file)
@@ -105,7 +105,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
                limit = spt->maxaddr;
        for (; addr < limit; addr = next) {
                next = pmd_addr_end(addr, limit);
-               if (addr < 0x100000000) {
+               if (addr < 0x100000000UL) {
                        spm = spt->low_prot;
                } else {
                        spm = spt->protptrs[addr >> SBP_L3_SHIFT];
@@ -219,7 +219,7 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
        for (limit = addr + len; addr < limit; addr = next) {
                next = pmd_addr_end(addr, limit);
                err = -ENOMEM;
-               if (addr < 0x100000000) {
+               if (addr < 0x100000000UL) {
                        spm = spt->low_prot;
                } else {
                        spm = spt->protptrs[addr >> SBP_L3_SHIFT];
index ccc1daa33aed9c9d5f3b32a82e369da970769e73..2a82d3ed464d09123db06c59c429014176611272 100644 (file)
@@ -46,6 +46,12 @@ static inline u32 get_pmlca(int ctr)
                case 3:
                        pmlca = mfpmr(PMRN_PMLCA3);
                        break;
+               case 4:
+                       pmlca = mfpmr(PMRN_PMLCA4);
+                       break;
+               case 5:
+                       pmlca = mfpmr(PMRN_PMLCA5);
+                       break;
                default:
                        panic("Bad ctr number\n");
        }
@@ -68,6 +74,12 @@ static inline void set_pmlca(int ctr, u32 pmlca)
                case 3:
                        mtpmr(PMRN_PMLCA3, pmlca);
                        break;
+               case 4:
+                       mtpmr(PMRN_PMLCA4, pmlca);
+                       break;
+               case 5:
+                       mtpmr(PMRN_PMLCA5, pmlca);
+                       break;
                default:
                        panic("Bad ctr number\n");
        }
@@ -84,6 +96,10 @@ static inline unsigned int ctr_read(unsigned int i)
                        return mfpmr(PMRN_PMC2);
                case 3:
                        return mfpmr(PMRN_PMC3);
+               case 4:
+                       return mfpmr(PMRN_PMC4);
+               case 5:
+                       return mfpmr(PMRN_PMC5);
                default:
                        return 0;
        }
@@ -104,6 +120,12 @@ static inline void ctr_write(unsigned int i, unsigned int val)
                case 3:
                        mtpmr(PMRN_PMC3, val);
                        break;
+               case 4:
+                       mtpmr(PMRN_PMC4, val);
+                       break;
+               case 5:
+                       mtpmr(PMRN_PMC5, val);
+                       break;
                default:
                        break;
        }
@@ -133,6 +155,14 @@ static void init_pmc_stop(int ctr)
                        mtpmr(PMRN_PMLCA3, pmlca);
                        mtpmr(PMRN_PMLCB3, pmlcb);
                        break;
+               case 4:
+                       mtpmr(PMRN_PMLCA4, pmlca);
+                       mtpmr(PMRN_PMLCB4, pmlcb);
+                       break;
+               case 5:
+                       mtpmr(PMRN_PMLCA5, pmlca);
+                       mtpmr(PMRN_PMLCB5, pmlcb);
+                       break;
                default:
                        panic("Bad ctr number!\n");
        }
index 510fae10513d11eaa49abfe3f395370fba830473..60d71eea919c7f2e64b8d74e6732e4883fa3f61b 100644 (file)
@@ -9,7 +9,7 @@ obj64-$(CONFIG_PPC_PERF_CTRS)   += power4-pmu.o ppc970-pmu.o power5-pmu.o \
 obj32-$(CONFIG_PPC_PERF_CTRS)  += mpc7450-pmu.o
 
 obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
-obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o
+obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o
 
 obj-$(CONFIG_PPC64)            += $(obj64-y)
 obj-$(CONFIG_PPC32)            += $(obj32-y)
index eeae308cf98253888a1af8792dd150e4410b08ef..29b89e863d7cc11328cb2d93e08f4b4598ec47a2 100644 (file)
@@ -24,7 +24,7 @@
 #define BHRB_MAX_ENTRIES       32
 #define BHRB_TARGET            0x0000000000000002
 #define BHRB_PREDICTION                0x0000000000000001
-#define BHRB_EA                        0xFFFFFFFFFFFFFFFC
+#define BHRB_EA                        0xFFFFFFFFFFFFFFFCUL
 
 struct cpu_hw_events {
        int n_events;
index 106c533546758280ae59da38ce45a2c88a7f49b5..d35ae52c69dca3a20fb96e5dcb9fccc05bd5a54d 100644 (file)
@@ -70,6 +70,12 @@ static unsigned long read_pmc(int idx)
        case 3:
                val = mfpmr(PMRN_PMC3);
                break;
+       case 4:
+               val = mfpmr(PMRN_PMC4);
+               break;
+       case 5:
+               val = mfpmr(PMRN_PMC5);
+               break;
        default:
                printk(KERN_ERR "oops trying to read PMC%d\n", idx);
                val = 0;
@@ -95,6 +101,12 @@ static void write_pmc(int idx, unsigned long val)
        case 3:
                mtpmr(PMRN_PMC3, val);
                break;
+       case 4:
+               mtpmr(PMRN_PMC4, val);
+               break;
+       case 5:
+               mtpmr(PMRN_PMC5, val);
+               break;
        default:
                printk(KERN_ERR "oops trying to write PMC%d\n", idx);
        }
@@ -120,6 +132,12 @@ static void write_pmlca(int idx, unsigned long val)
        case 3:
                mtpmr(PMRN_PMLCA3, val);
                break;
+       case 4:
+               mtpmr(PMRN_PMLCA4, val);
+               break;
+       case 5:
+               mtpmr(PMRN_PMLCA5, val);
+               break;
        default:
                printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
        }
@@ -145,6 +163,12 @@ static void write_pmlcb(int idx, unsigned long val)
        case 3:
                mtpmr(PMRN_PMLCB3, val);
                break;
+       case 4:
+               mtpmr(PMRN_PMLCB4, val);
+               break;
+       case 5:
+               mtpmr(PMRN_PMLCB5, val);
+               break;
        default:
                printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
        }
@@ -462,6 +486,12 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
        int num_restricted;
        int i;
 
+       if (ppmu->n_counter > MAX_HWEVENTS) {
+               WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
+                       ppmu->n_counter, MAX_HWEVENTS);
+               ppmu->n_counter = MAX_HWEVENTS;
+       }
+
        switch (event->attr.type) {
        case PERF_TYPE_HARDWARE:
                ev = event->attr.config;
diff --git a/arch/powerpc/perf/e6500-pmu.c b/arch/powerpc/perf/e6500-pmu.c
new file mode 100644 (file)
index 0000000..3d877aa
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Performance counter support for e6500 family processors.
+ *
+ * Author: Priyanka Jain, Priyanka.Jain@freescale.com
+ * Based on e500-pmu.c
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/string.h>
+#include <linux/perf_event.h>
+#include <asm/reg.h>
+#include <asm/cputable.h>
+
+/*
+ * Map of generic hardware event types to hardware events
+ * Zero if unsupported
+ */
+static int e6500_generic_events[] = {
+       [PERF_COUNT_HW_CPU_CYCLES] = 1,
+       [PERF_COUNT_HW_INSTRUCTIONS] = 2,
+       [PERF_COUNT_HW_CACHE_MISSES] = 221,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
+       [PERF_COUNT_HW_BRANCH_MISSES] = 15,
+};
+
+#define C(x)   PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int e6500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {
+                               /*RESULT_ACCESS         RESULT_MISS */
+               [C(OP_READ)] = {        27,             222     },
+               [C(OP_WRITE)] = {       28,             223     },
+               [C(OP_PREFETCH)] = {    29,             0       },
+       },
+       [C(L1I)] = {
+                               /*RESULT_ACCESS         RESULT_MISS */
+               [C(OP_READ)] = {        2,              254     },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    37,             0       },
+       },
+       /*
+        * Assuming LL means L2, it's not a good match for this model.
+        * It does not have separate read/write events (but it does have
+        * separate instruction/data events).
+        */
+       [C(LL)] = {
+                               /*RESULT_ACCESS         RESULT_MISS */
+               [C(OP_READ)] = {        0,              0       },
+               [C(OP_WRITE)] = {       0,              0       },
+               [C(OP_PREFETCH)] = {    0,              0       },
+       },
+       /*
+        * There are data/instruction MMU misses, but that's a miss on
+        * the chip's internal level-one TLB which is probably not
+        * what the user wants.  Instead, unified level-two TLB misses
+        * are reported here.
+        */
+       [C(DTLB)] = {
+                               /*RESULT_ACCESS         RESULT_MISS */
+               [C(OP_READ)] = {        26,             66      },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+       [C(BPU)] = {
+                               /*RESULT_ACCESS         RESULT_MISS */
+               [C(OP_READ)] = {        12,             15      },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+       [C(NODE)] = {
+                               /* RESULT_ACCESS        RESULT_MISS */
+               [C(OP_READ)] = {        -1,             -1      },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+};
+
+static int num_events = 512;
+
+/* Upper half of event id is PMLCb, for threshold events */
+static u64 e6500_xlate_event(u64 event_id)
+{
+       u32 event_low = (u32)event_id;
+       if (event_low >= num_events ||
+               (event_id & (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)))
+               return 0;
+
+       return FSL_EMB_EVENT_VALID;
+}
+
+static struct fsl_emb_pmu e6500_pmu = {
+       .name                   = "e6500 family",
+       .n_counter              = 6,
+       .n_restricted           = 0,
+       .xlate_event            = e6500_xlate_event,
+       .n_generic              = ARRAY_SIZE(e6500_generic_events),
+       .generic_events         = e6500_generic_events,
+       .cache_events           = &e6500_cache_events,
+};
+
+static int init_e6500_pmu(void)
+{
+       if (!cur_cpu_spec->oprofile_cpu_type ||
+               strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e6500"))
+               return -ENODEV;
+
+       return register_fsl_emb_pmu(&e6500_pmu);
+}
+
+early_initcall(init_e6500_pmu);
index b89ef65392dc229b4ec026e23ddd9c5228ed05f1..b69221ba07fd21868dc7c1892384969fb6c03d53 100644 (file)
@@ -373,8 +373,9 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
        case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
        case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
        case MPC52xx_IRQ_L1_CRIT:
+       default:
                pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
-                       __func__, l2irq);
+                       __func__, l1irq);
                irq_set_chip(virq, &no_irq_chip);
                return 0;
        }
index c59c617eee9336803d683466ab50611ee9527808..aa3690bae415f8156d7a7b16dce3ae8c8de12d2a 100644 (file)
@@ -53,12 +53,6 @@ void __init corenet_ds_setup_arch(void)
 {
        mpc85xx_smp_init();
 
-#if defined(CONFIG_PCI) && defined(CONFIG_PPC64)
-       pci_devs_phb_init();
-#endif
-
-       fsl_pci_assign_primary();
-
        swiotlb_detect_4g();
 
        pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
index ede8771d6f02d259a68a20dbb93469bcce87e981..53b6fb0a3d560a5be2e2aa0dd186075d34ff794b 100644 (file)
@@ -160,6 +160,7 @@ machine_arch_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices);
 machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices);
 machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices);
 machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
+machine_arch_initcall(p1020_rdb_pd, mpc85xx_common_publish_devices);
 machine_arch_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
 machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
 machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices);
@@ -193,6 +194,13 @@ static int __init p1020_rdb_pc_probe(void)
        return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PC");
 }
 
+static int __init p1020_rdb_pd_probe(void)
+{
+       unsigned long root = of_get_flat_dt_root();
+
+       return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PD");
+}
+
 static int __init p1021_rdb_pc_probe(void)
 {
        unsigned long root = of_get_flat_dt_root();
@@ -351,6 +359,20 @@ define_machine(p1020_rdb_pc) {
        .progress               = udbg_progress,
 };
 
+define_machine(p1020_rdb_pd) {
+       .name                   = "P1020RDB-PD",
+       .probe                  = p1020_rdb_pd_probe,
+       .setup_arch             = mpc85xx_rdb_setup_arch,
+       .init_IRQ               = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+       .pcibios_fixup_bus      = fsl_pcibios_fixup_bus,
+#endif
+       .get_irq                = mpic_get_irq,
+       .restart                = fsl_rstcr_restart,
+       .calibrate_decr         = generic_calibrate_decr,
+       .progress               = udbg_progress,
+};
+
 define_machine(p1024_rdb) {
        .name                   = "P1024 RDB",
        .probe                  = p1024_rdb_probe,
index 5ced4f5bb2b2e70dbc4a2a90b1a4679b5ae802a2..ea9c6269ead06e6d8e7fcfecaf2c82be34adbf31 100644 (file)
@@ -255,6 +255,7 @@ out:
 
 struct smp_ops_t smp_85xx_ops = {
        .kick_cpu = smp_85xx_kick_cpu,
+       .cpu_bootable = smp_generic_cpu_bootable,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_disable    = generic_cpu_disable,
        .cpu_die        = generic_cpu_die,
index d703775bda309db6a1b5161eeffd4003351cb1d3..bf9c6d4cd26c34770c53aedcb12fd1a2ed6f4e5c 100644 (file)
@@ -202,17 +202,12 @@ config PPC_P7_NAP
        bool
        default n
 
-config PPC_INDIRECT_IO
-       bool
-       select GENERIC_IOMAP
-
 config PPC_INDIRECT_PIO
        bool
-       select PPC_INDIRECT_IO
+       select GENERIC_IOMAP
 
 config PPC_INDIRECT_MMIO
        bool
-       select PPC_INDIRECT_IO
 
 config PPC_IO_WORKAROUNDS
        bool
index 946306b1bb4ed5e12c24f8df55b97577924af756..b53560660b72e72030a6235d295be8ab1fd9c722 100644 (file)
@@ -697,7 +697,7 @@ static int __init cell_iommu_get_window(struct device_node *np,
                                         unsigned long *base,
                                         unsigned long *size)
 {
-       const void *dma_window;
+       const __be32 *dma_window;
        unsigned long index;
 
        /* Use ibm,dma-window if available, else, hard code ! */
index f75f6fcac7296267d44b8a28d3d2b16446878acc..90745eaa45fe9551b71c2479fc1ce8060f8abd0c 100644 (file)
@@ -136,25 +136,12 @@ static int smp_cell_kick_cpu(int nr)
        return 0;
 }
 
-static int smp_cell_cpu_bootable(unsigned int nr)
-{
-       /* Special case - we inhibit secondary thread startup
-        * during boot if the user requests it.  Odd-numbered
-        * cpus are assumed to be secondary threads.
-        */
-       if (system_state == SYSTEM_BOOTING &&
-           cpu_has_feature(CPU_FTR_SMT) &&
-           !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
-               return 0;
-
-       return 1;
-}
 static struct smp_ops_t bpa_iic_smp_ops = {
        .message_pass   = iic_message_pass,
        .probe          = smp_iic_probe,
        .kick_cpu       = smp_cell_kick_cpu,
        .setup_cpu      = smp_cell_setup_cpu,
-       .cpu_bootable   = smp_cell_cpu_bootable,
+       .cpu_bootable   = smp_generic_cpu_bootable,
 };
 
 /* This is called very early */
index db4e638cf4081cd813eeed56d7203548eda19134..a238bc3f3a07e4e1e31e42e142701cbd40b52a1c 100644 (file)
@@ -126,7 +126,7 @@ int elf_coredump_extra_notes_size(void)
        return ret;
 }
 
-int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset)
+int elf_coredump_extra_notes_write(struct coredump_params *cprm)
 {
        struct spufs_calls *calls;
        int ret;
@@ -135,7 +135,7 @@ int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset)
        if (!calls)
                return 0;
 
-       ret = calls->coredump_extra_notes_write(file, foffset);
+       ret = calls->coredump_extra_notes_write(cprm);
 
        spufs_calls_put(calls);
 
index c9500ea7be2ff8dbf596eb4e19af87ffe6178ecc..8011c61dfc3ce0f86f953aa1966636c63096fee4 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/gfp.h>
 #include <linux/list.h>
 #include <linux/syscalls.h>
+#include <linux/coredump.h>
 
 #include <asm/uaccess.h>
 
@@ -48,44 +49,6 @@ static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer,
        return ++ret; /* count trailing NULL */
 }
 
-/*
- * These are the only things you should do on a core-file: use only these
- * functions to write out all the necessary info.
- */
-static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset)
-{
-       unsigned long limit = rlimit(RLIMIT_CORE);
-       ssize_t written;
-
-       if (*foffset + nr > limit)
-               return -EIO;
-
-       written = file->f_op->write(file, addr, nr, &file->f_pos);
-       *foffset += written;
-
-       if (written != nr)
-               return -EIO;
-
-       return 0;
-}
-
-static int spufs_dump_align(struct file *file, char *buf, loff_t new_off,
-                           loff_t *foffset)
-{
-       int rc, size;
-
-       size = min((loff_t)PAGE_SIZE, new_off - *foffset);
-       memset(buf, 0, size);
-
-       rc = 0;
-       while (rc == 0 && new_off > *foffset) {
-               size = min((loff_t)PAGE_SIZE, new_off - *foffset);
-               rc = spufs_dump_write(file, buf, size, foffset);
-       }
-
-       return rc;
-}
-
 static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
 {
        int i, sz, total = 0;
@@ -165,7 +128,7 @@ int spufs_coredump_extra_notes_size(void)
 }
 
 static int spufs_arch_write_note(struct spu_context *ctx, int i,
-                                 struct file *file, int dfd, loff_t *foffset)
+                                 struct coredump_params *cprm, int dfd)
 {
        loff_t pos = 0;
        int sz, rc, nread, total = 0;
@@ -186,23 +149,20 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
        en.n_descsz = sz;
        en.n_type = NT_SPU;
 
-       rc = spufs_dump_write(file, &en, sizeof(en), foffset);
-       if (rc)
+       rc = -EIO;
+       if (!dump_emit(cprm, &en, sizeof(en)))
                goto out;
 
-       rc = spufs_dump_write(file, fullname, en.n_namesz, foffset);
-       if (rc)
+       if (!dump_emit(cprm, fullname, en.n_namesz))
                goto out;
 
-       rc = spufs_dump_align(file, buf, roundup(*foffset, 4), foffset);
-       if (rc)
+       if (!dump_align(cprm, 4))
                goto out;
 
        do {
                nread = do_coredump_read(i, ctx, buf, bufsz, &pos);
                if (nread > 0) {
-                       rc = spufs_dump_write(file, buf, nread, foffset);
-                       if (rc)
+                       if (!dump_emit(cprm, buf, nread))
                                goto out;
                        total += nread;
                }
@@ -213,15 +173,15 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
                goto out;
        }
 
-       rc = spufs_dump_align(file, buf, roundup(*foffset - total + sz, 4),
-                             foffset);
-
+       if (!dump_align(cprm, 4))
+               goto out;
+       rc = 0;
 out:
        free_page((unsigned long)buf);
        return rc;
 }
 
-int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
+int spufs_coredump_extra_notes_write(struct coredump_params *cprm)
 {
        struct spu_context *ctx;
        int fd, j, rc;
@@ -233,7 +193,7 @@ int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
                        return rc;
 
                for (j = 0; spufs_coredump_read[j].name != NULL; j++) {
-                       rc = spufs_arch_write_note(ctx, j, file, fd, foffset);
+                       rc = spufs_arch_write_note(ctx, j, cprm, fd);
                        if (rc) {
                                spu_release_saved(ctx);
                                return rc;
index f3900427ffab5173ee05041a7375b9633312e835..87ba7cf99cd754590ffaf2f038926ae9098f4ca9 100644 (file)
@@ -620,12 +620,16 @@ spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
                case Opt_uid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       root->i_uid = option;
+                       root->i_uid = make_kuid(current_user_ns(), option);
+                       if (!uid_valid(root->i_uid))
+                               return 0;
                        break;
                case Opt_gid:
                        if (match_int(&args[0], &option))
                                return 0;
-                       root->i_gid = option;
+                       root->i_gid = make_kgid(current_user_ns(), option);
+                       if (!gid_valid(root->i_gid))
+                               return 0;
                        break;
                case Opt_mode:
                        if (match_octal(&args[0], &option))
index 67852ade4c013d813e49694d7e61e87173821846..28b89730ebfbaf96e828b672b74389f391f28af2 100644 (file)
@@ -252,7 +252,7 @@ long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
                        umode_t mode, struct file *filp);
 /* ELF coredump callbacks for writing SPU ELF notes */
 extern int spufs_coredump_extra_notes_size(void);
-extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset);
+extern int spufs_coredump_extra_notes_write(struct coredump_params *);
 
 extern const struct file_operations spufs_context_fops;
 
index c24684c818ab015cc9140d9870f26dc97bfb162a..6fae5eb99ea6febff2fdd465fcf19c64d9114643 100644 (file)
@@ -7,6 +7,8 @@ config PPC_POWERNV
        select PPC_P7_NAP
        select PPC_PCI_CHOICE if EMBEDDED
        select EPAPR_BOOT
+       select PPC_INDIRECT_PIO
+       select PPC_UDBG_16550
        default y
 
 config POWERNV_MSI
index 7fe595152478a08a756d89277a993cf2650d42ef..300c437d713cf1a6b4c73d2b2bb58830483219fe 100644 (file)
@@ -1,5 +1,5 @@
 obj-y                  += setup.o opal-takeover.o opal-wrappers.o opal.o
-obj-y                  += opal-rtc.o opal-nvram.o
+obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o
 
 obj-$(CONFIG_SMP)      += smp.o
 obj-$(CONFIG_PCI)      += pci.o pci-p5ioc2.o pci-ioda.o
index 0cd1c4a717550d150ade10ec180c405cec48aa76..cf42e74514fa192e3a8d64ed0d41ada445a974e3 100644 (file)
 #include "powernv.h"
 #include "pci.h"
 
-/* Debugging option */
-#ifdef IODA_EEH_DBG_ON
-#define IODA_EEH_DBG(args...)  pr_info(args)
-#else
-#define IODA_EEH_DBG(args...)
-#endif
-
 static char *hub_diag = NULL;
 static int ioda_eeh_nb_init = 0;
 
@@ -823,17 +816,17 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
 
                /* If OPAL API returns error, we needn't proceed */
                if (rc != OPAL_SUCCESS) {
-                       IODA_EEH_DBG("%s: Invalid return value on "
-                                    "PHB#%x (0x%lx) from opal_pci_next_error",
-                                    __func__, hose->global_number, rc);
+                       pr_devel("%s: Invalid return value on "
+                                "PHB#%x (0x%lx) from opal_pci_next_error",
+                                __func__, hose->global_number, rc);
                        continue;
                }
 
                /* If the PHB doesn't have error, stop processing */
                if (err_type == OPAL_EEH_NO_ERROR ||
                    severity == OPAL_EEH_SEV_NO_ERROR) {
-                       IODA_EEH_DBG("%s: No error found on PHB#%x\n",
-                                    __func__, hose->global_number);
+                       pr_devel("%s: No error found on PHB#%x\n",
+                                __func__, hose->global_number);
                        continue;
                }
 
@@ -842,8 +835,9 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
                 * highest priority reported upon multiple errors on the
                 * specific PHB.
                 */
-               IODA_EEH_DBG("%s: Error (%d, %d, %d) on PHB#%x\n",
-                       err_type, severity, pe_no, hose->global_number);
+               pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
+                        __func__, err_type, severity,
+                        frozen_pe_no, hose->global_number);
                switch (err_type) {
                case OPAL_EEH_IOC_ERROR:
                        if (severity == OPAL_EEH_SEV_IOC_DEAD) {
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
new file mode 100644 (file)
index 0000000..a7614bb
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * PowerNV LPC bus handling.
+ *
+ * Copyright 2013 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/bug.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/xics.h>
+#include <asm/opal.h>
+
+static int opal_lpc_chip_id = -1;
+
+static u8 opal_lpc_inb(unsigned long port)
+{
+       int64_t rc;
+       uint32_t data;
+
+       if (opal_lpc_chip_id < 0 || port > 0xffff)
+               return 0xff;
+       rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
+       return rc ? 0xff : data;
+}
+
+static __le16 __opal_lpc_inw(unsigned long port)
+{
+       int64_t rc;
+       uint32_t data;
+
+       if (opal_lpc_chip_id < 0 || port > 0xfffe)
+               return 0xffff;
+       if (port & 1)
+               return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
+       rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
+       return rc ? 0xffff : data;
+}
+static u16 opal_lpc_inw(unsigned long port)
+{
+       return le16_to_cpu(__opal_lpc_inw(port));
+}
+
+static __le32 __opal_lpc_inl(unsigned long port)
+{
+       int64_t rc;
+       uint32_t data;
+
+       if (opal_lpc_chip_id < 0 || port > 0xfffc)
+               return 0xffffffff;
+       if (port & 3)
+               return (__le32)opal_lpc_inb(port    ) << 24 |
+                      (__le32)opal_lpc_inb(port + 1) << 16 |
+                      (__le32)opal_lpc_inb(port + 2) <<  8 |
+                              opal_lpc_inb(port + 3);
+       rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
+       return rc ? 0xffffffff : data;
+}
+
+static u32 opal_lpc_inl(unsigned long port)
+{
+       return le32_to_cpu(__opal_lpc_inl(port));
+}
+
+static void opal_lpc_outb(u8 val, unsigned long port)
+{
+       if (opal_lpc_chip_id < 0 || port > 0xffff)
+               return;
+       opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 1);
+}
+
+static void __opal_lpc_outw(__le16 val, unsigned long port)
+{
+       if (opal_lpc_chip_id < 0 || port > 0xfffe)
+               return;
+       if (port & 1) {
+               opal_lpc_outb(val >> 8, port);
+               opal_lpc_outb(val     , port + 1);
+               return;
+       }
+       opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 2);
+}
+
+static void opal_lpc_outw(u16 val, unsigned long port)
+{
+       __opal_lpc_outw(cpu_to_le16(val), port);
+}
+
+static void __opal_lpc_outl(__le32 val, unsigned long port)
+{
+       if (opal_lpc_chip_id < 0 || port > 0xfffc)
+               return;
+       if (port & 3) {
+               opal_lpc_outb(val >> 24, port);
+               opal_lpc_outb(val >> 16, port + 1);
+               opal_lpc_outb(val >>  8, port + 2);
+               opal_lpc_outb(val      , port + 3);
+               return;
+       }
+       opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 4);
+}
+
+static void opal_lpc_outl(u32 val, unsigned long port)
+{
+       __opal_lpc_outl(cpu_to_le32(val), port);
+}
+
+static void opal_lpc_insb(unsigned long p, void *b, unsigned long c)
+{
+       u8 *ptr = b;
+
+       while(c--)
+               *(ptr++) = opal_lpc_inb(p);
+}
+
+static void opal_lpc_insw(unsigned long p, void *b, unsigned long c)
+{
+       __le16 *ptr = b;
+
+       while(c--)
+               *(ptr++) = __opal_lpc_inw(p);
+}
+
+static void opal_lpc_insl(unsigned long p, void *b, unsigned long c)
+{
+       __le32 *ptr = b;
+
+       while(c--)
+               *(ptr++) = __opal_lpc_inl(p);
+}
+
+static void opal_lpc_outsb(unsigned long p, const void *b, unsigned long c)
+{
+       const u8 *ptr = b;
+
+       while(c--)
+               opal_lpc_outb(*(ptr++), p);
+}
+
+static void opal_lpc_outsw(unsigned long p, const void *b, unsigned long c)
+{
+       const __le16 *ptr = b;
+
+       while(c--)
+               __opal_lpc_outw(*(ptr++), p);
+}
+
+static void opal_lpc_outsl(unsigned long p, const void *b, unsigned long c)
+{
+       const __le32 *ptr = b;
+
+       while(c--)
+               __opal_lpc_outl(*(ptr++), p);
+}
+
+static const struct ppc_pci_io opal_lpc_io = {
+       .inb    = opal_lpc_inb,
+       .inw    = opal_lpc_inw,
+       .inl    = opal_lpc_inl,
+       .outb   = opal_lpc_outb,
+       .outw   = opal_lpc_outw,
+       .outl   = opal_lpc_outl,
+       .insb   = opal_lpc_insb,
+       .insw   = opal_lpc_insw,
+       .insl   = opal_lpc_insl,
+       .outsb  = opal_lpc_outsb,
+       .outsw  = opal_lpc_outsw,
+       .outsl  = opal_lpc_outsl,
+};
+
+void opal_lpc_init(void)
+{
+       struct device_node *np;
+
+       /*
+        * Look for a Power8 LPC bus tagged as "primary",
+        * we currently support only one though the OPAL APIs
+        * support any number.
+        */
+       for_each_compatible_node(np, NULL, "ibm,power8-lpc") {
+               if (!of_device_is_available(np))
+                       continue;
+               if (!of_get_property(np, "primary", NULL))
+                       continue;
+               opal_lpc_chip_id = of_get_ibm_chip_id(np);
+               break;
+       }
+       if (opal_lpc_chip_id < 0)
+               return;
+
+       /* Setup special IO ops */
+       ppc_pci_io = opal_lpc_io;
+       isa_io_special = true;
+
+       pr_info("OPAL: Power8 LPC bus found, chip ID %d\n", opal_lpc_chip_id);
+}
index e88863ffb13543d15d6812942a5d9560c69f5c0b..42c06fba3994ad817984d58369832db032451026 100644 (file)
@@ -111,3 +111,7 @@ OPAL_CALL(opal_pci_next_error,                      OPAL_PCI_NEXT_ERROR);
 OPAL_CALL(opal_pci_poll,                       OPAL_PCI_POLL);
 OPAL_CALL(opal_pci_msi_eoi,                    OPAL_PCI_MSI_EOI);
 OPAL_CALL(opal_pci_get_phb_diag_data2,         OPAL_PCI_GET_PHB_DIAG_DATA2);
+OPAL_CALL(opal_xscom_read,                     OPAL_XSCOM_READ);
+OPAL_CALL(opal_xscom_write,                    OPAL_XSCOM_WRITE);
+OPAL_CALL(opal_lpc_read,                       OPAL_LPC_READ);
+OPAL_CALL(opal_lpc_write,                      OPAL_LPC_WRITE);
index 106301fd2fa590a48b8cce46d0ba5b20013a9412..2911abe550f1d9182ce793f0ec4777b0c85295b7 100644 (file)
@@ -380,18 +380,20 @@ static int __init opal_init(void)
                pr_warn("opal: Node not found\n");
                return -ENODEV;
        }
+
+       /* Register OPAL consoles if any ports */
        if (firmware_has_feature(FW_FEATURE_OPALv2))
                consoles = of_find_node_by_path("/ibm,opal/consoles");
        else
                consoles = of_node_get(opal_node);
-
-       /* Register serial ports */
-       for_each_child_of_node(consoles, np) {
-               if (strcmp(np->name, "serial"))
-                       continue;
-               of_platform_device_create(np, NULL, NULL);
+       if (consoles) {
+               for_each_child_of_node(consoles, np) {
+                       if (strcmp(np->name, "serial"))
+                               continue;
+                       of_platform_device_create(np, NULL, NULL);
+               }
+               of_node_put(consoles);
        }
-       of_node_put(consoles);
 
        /* Find all OPAL interrupts and request them */
        irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
@@ -422,7 +424,7 @@ void opal_shutdown(void)
 
        for (i = 0; i < opal_irq_count; i++) {
                if (opal_irqs[i])
-                       free_irq(opal_irqs[i], 0);
+                       free_irq(opal_irqs[i], NULL);
                opal_irqs[i] = 0;
        }
 }
index d8140b125e62508720388d683a5028199d62ff28..74a5a5773b1fbce0c31f567ef6536e75038fff98 100644 (file)
@@ -1104,16 +1104,16 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
                                  u64 hub_id, int ioda_type)
 {
        struct pci_controller *hose;
-       static int primary = 1;
        struct pnv_phb *phb;
        unsigned long size, m32map_off, iomap_off, pemap_off;
        const u64 *prop64;
        const u32 *prop32;
+       int len;
        u64 phb_id;
        void *aux;
        long rc;
 
-       pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
+       pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
 
        prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
        if (!prop64) {
@@ -1124,20 +1124,31 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
        pr_debug("  PHB-ID  : 0x%016llx\n", phb_id);
 
        phb = alloc_bootmem(sizeof(struct pnv_phb));
-       if (phb) {
-               memset(phb, 0, sizeof(struct pnv_phb));
-               phb->hose = hose = pcibios_alloc_controller(np);
+       if (!phb) {
+               pr_err("  Out of memory !\n");
+               return;
        }
-       if (!phb || !phb->hose) {
-               pr_err("PCI: Failed to allocate PCI controller for %s\n",
+
+       /* Allocate PCI controller */
+       memset(phb, 0, sizeof(struct pnv_phb));
+       phb->hose = hose = pcibios_alloc_controller(np);
+       if (!phb->hose) {
+               pr_err("  Can't allocate PCI controller for %s\n",
                       np->full_name);
+               free_bootmem((unsigned long)phb, sizeof(struct pnv_phb));
                return;
        }
 
        spin_lock_init(&phb->lock);
-       /* XXX Use device-tree */
-       hose->first_busno = 0;
-       hose->last_busno = 0xff;
+       prop32 = of_get_property(np, "bus-range", &len);
+       if (prop32 && len == 8) {
+               hose->first_busno = prop32[0];
+               hose->last_busno = prop32[1];
+       } else {
+               pr_warn("  Broken <bus-range> on %s\n", np->full_name);
+               hose->first_busno = 0;
+               hose->last_busno = 0xff;
+       }
        hose->private_data = phb;
        phb->hub_id = hub_id;
        phb->opal_id = phb_id;
@@ -1152,8 +1163,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
                phb->model = PNV_PHB_MODEL_UNKNOWN;
 
        /* Parse 32-bit and IO ranges (if any) */
-       pci_process_bridge_OF_ranges(phb->hose, np, primary);
-       primary = 0;
+       pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
 
        /* Get registers */
        phb->regs = of_iomap(np, 0);
@@ -1177,22 +1187,23 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
        phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
        phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
 
-       /* Allocate aux data & arrays
-        *
-        * XXX TODO: Don't allocate io segmap on PHB3
-        */
+       /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
        size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
        m32map_off = size;
        size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
        iomap_off = size;
-       size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
+       if (phb->type == PNV_PHB_IODA1) {
+               iomap_off = size;
+               size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
+       }
        pemap_off = size;
        size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
        aux = alloc_bootmem(size);
        memset(aux, 0, size);
        phb->ioda.pe_alloc = aux;
        phb->ioda.m32_segmap = aux + m32map_off;
-       phb->ioda.io_segmap = aux + iomap_off;
+       if (phb->type == PNV_PHB_IODA1)
+               phb->ioda.io_segmap = aux + iomap_off;
        phb->ioda.pe_array = aux + pemap_off;
        set_bit(0, phb->ioda.pe_alloc);
 
index a1c6f83fc3916efab1a092e1c7b102ff4a3c788a..de6819be1f95ea11911bbadd5080fb05db66e884 100644 (file)
@@ -15,4 +15,6 @@ static inline void pnv_pci_init(void) { }
 static inline void pnv_pci_shutdown(void) { }
 #endif
 
+extern void pnv_lpc_init(void);
+
 #endif /* _POWERNV_H */
index 84438af96c052b7e465d6b6a03d333ba9e7b37ba..4ddb339700b9453b1c02b793a0d03b302984c570 100644 (file)
@@ -54,6 +54,12 @@ static void __init pnv_setup_arch(void)
 
 static void __init pnv_init_early(void)
 {
+       /*
+        * Initialize the LPC bus now so that legacy serial
+        * ports can be found on it
+        */
+       opal_lpc_init();
+
 #ifdef CONFIG_HVC_OPAL
        if (firmware_has_feature(FW_FEATURE_OPAL))
                hvc_opal_init_early();
index 89e3857af4e0913679448c5cc78c6549d17b29e9..908672bdcea6b2c77d75763a05d10f476e2b2d75 100644 (file)
@@ -46,22 +46,6 @@ static void pnv_smp_setup_cpu(int cpu)
                xics_setup_cpu();
 }
 
-static int pnv_smp_cpu_bootable(unsigned int nr)
-{
-       /* Special case - we inhibit secondary thread startup
-        * during boot if the user requests it.
-        */
-       if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
-               if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
-                       return 0;
-               if (smt_enabled_at_boot
-                   && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
-                       return 0;
-       }
-
-       return 1;
-}
-
 int pnv_smp_kick_cpu(int nr)
 {
        unsigned int pcpu = get_hard_smp_processor_id(nr);
@@ -195,7 +179,7 @@ static struct smp_ops_t pnv_smp_ops = {
        .probe          = xics_smp_probe,
        .kick_cpu       = pnv_smp_kick_cpu,
        .setup_cpu      = pnv_smp_setup_cpu,
-       .cpu_bootable   = pnv_smp_cpu_bootable,
+       .cpu_bootable   = smp_generic_cpu_bootable,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_disable    = pnv_smp_cpu_disable,
        .cpu_die        = generic_cpu_die,
index 0cc0ac07a55dc661e0e0ad3acceaa55d33818800..238240e02ef80c8cbcb1cdbb07061953b844e1ca 100644 (file)
@@ -87,7 +87,7 @@ static void consume_dtle(struct dtl_entry *dtle, u64 index)
        barrier();
 
        /* check for hypervisor ring buffer overflow, ignore this entry if so */
-       if (index + N_DISPATCH_LOG < vpa->dtl_idx)
+       if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
                return;
 
        ++wp;
index 217ca5c75b2007f32615266fb20f36fce359a2cd..1e490cf63a0d9669d85846b000b44ff9cb9a09c6 100644 (file)
@@ -123,7 +123,7 @@ static void pseries_mach_cpu_die(void)
                cede_latency_hint = 2;
 
                get_lppaca()->idle = 1;
-               if (!get_lppaca()->shared_proc)
+               if (!lppaca_shared_proc(get_lppaca()))
                        get_lppaca()->donate_dedicated_cpu = 1;
 
                while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
@@ -137,7 +137,7 @@ static void pseries_mach_cpu_die(void)
 
                local_irq_disable();
 
-               if (!get_lppaca()->shared_proc)
+               if (!lppaca_shared_proc(get_lppaca()))
                        get_lppaca()->donate_dedicated_cpu = 0;
                get_lppaca()->idle = 0;
 
index b344f94b040091b60eac57a80c5218a2e5071ebc..ef6d59aea29e64a0d4cb43243003b5a2ff9069b8 100644 (file)
  */
 int hvc_get_chars(uint32_t vtermno, char *buf, int count)
 {
-       unsigned long got;
+       long ret;
+       unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+       unsigned long *lbuf = (unsigned long *)buf;
+
+       ret = plpar_hcall(H_GET_TERM_CHAR, retbuf, vtermno);
+       lbuf[0] = be64_to_cpu(retbuf[1]);
+       lbuf[1] = be64_to_cpu(retbuf[2]);
 
-       if (plpar_get_term_char(vtermno, &got, buf) == H_SUCCESS)
-               return got;
+       if (ret == H_SUCCESS)
+               return retbuf[0];
 
        return 0;
 }
@@ -69,8 +75,9 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
        if (count > MAX_VIO_PUT_CHARS)
                count = MAX_VIO_PUT_CHARS;
 
-       ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0],
-                                lbuf[1]);
+       ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count,
+                                cpu_to_be64(lbuf[0]),
+                                cpu_to_be64(lbuf[1]));
        if (ret == H_SUCCESS)
                return count;
        if (ret == H_BUSY)
index 23fc1dcf44344543a9e3336b61e5f0d9f2bf6201..9087f97687814feb7e3235bcf7b270e25f96049f 100644 (file)
@@ -530,7 +530,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
 static void iommu_table_setparms_lpar(struct pci_controller *phb,
                                      struct device_node *dn,
                                      struct iommu_table *tbl,
-                                     const void *dma_window)
+                                     const __be32 *dma_window)
 {
        unsigned long offset, size;
 
@@ -630,7 +630,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
        struct iommu_table *tbl;
        struct device_node *dn, *pdn;
        struct pci_dn *ppci;
-       const void *dma_window = NULL;
+       const __be32 *dma_window = NULL;
 
        dn = pci_bus_to_OF_node(bus);
 
@@ -1152,7 +1152,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
 {
        struct device_node *pdn, *dn;
        struct iommu_table *tbl;
-       const void *dma_window = NULL;
+       const __be32 *dma_window = NULL;
        struct pci_dn *pci;
 
        pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
@@ -1201,7 +1201,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
        bool ddw_enabled = false;
        struct device_node *pdn, *dn;
        struct pci_dev *pdev;
-       const void *dma_window = NULL;
+       const __be32 *dma_window = NULL;
        u64 dma_offset;
 
        if (!dev->dma_mask)
index 8bad880bd177a2efaf9887b0509593401112aee4..0b7c86e3d75d66631271d8638d71a66267645a6a 100644 (file)
@@ -106,7 +106,7 @@ void vpa_init(int cpu)
                lppaca_of(cpu).dtl_idx = 0;
 
                /* hypervisor reads buffer length from this field */
-               dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
+               dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
                ret = register_dtl(hwcpu, __pa(dtl));
                if (ret)
                        pr_err("WARNING: DTL registration of cpu %d (hw %d) "
@@ -724,7 +724,7 @@ int h_get_mpp(struct hvcall_mpp_data *mpp_data)
 
        mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
        mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
-       mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff;
+       mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
 
        mpp_data->pool_size = retbuf[4];
        mpp_data->loan_request = retbuf[5];
index 6a5f2b1f32ca167d3ec6b39b0dfd69067d6b37d2..d276cd3edd8f51641db54dfb459cb2efe4fa7f91 100644 (file)
@@ -539,36 +539,6 @@ static int zip_oops(size_t text_len)
 }
 
 #ifdef CONFIG_PSTORE
-/* Derived from logfs_uncompress */
-int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen)
-{
-       int err, ret;
-
-       ret = -EIO;
-       err = zlib_inflateInit(&stream);
-       if (err != Z_OK)
-               goto error;
-
-       stream.next_in = in;
-       stream.avail_in = inlen;
-       stream.total_in = 0;
-       stream.next_out = out;
-       stream.avail_out = outlen;
-       stream.total_out = 0;
-
-       err = zlib_inflate(&stream, Z_FINISH);
-       if (err != Z_STREAM_END)
-               goto error;
-
-       err = zlib_inflateEnd(&stream);
-       if (err != Z_OK)
-               goto error;
-
-       ret = stream.total_out;
-error:
-       return ret;
-}
-
 static int nvram_pstore_open(struct pstore_info *psi)
 {
        /* Reset the iterator to start reading partitions again */
@@ -584,7 +554,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
  * @part:               pstore writes data to registered buffer in parts,
  *                      part number will indicate the same.
  * @count:              Indicates oops count
- * @hsize:              Size of header added by pstore
+ * @compressed:         Flag to indicate the log is compressed
  * @size:               number of bytes written to the registered buffer
  * @psi:                registered pstore_info structure
  *
@@ -595,7 +565,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
 static int nvram_pstore_write(enum pstore_type_id type,
                                enum kmsg_dump_reason reason,
                                u64 *id, unsigned int part, int count,
-                               size_t hsize, size_t size,
+                               bool compressed, size_t size,
                                struct pstore_info *psi)
 {
        int rc;
@@ -611,30 +581,11 @@ static int nvram_pstore_write(enum pstore_type_id type,
        oops_hdr->report_length = (u16) size;
        oops_hdr->timestamp = get_seconds();
 
-       if (big_oops_buf) {
-               rc = zip_oops(size);
-               /*
-                * If compression fails copy recent log messages from
-                * big_oops_buf to oops_data.
-                */
-               if (rc != 0) {
-                       size_t diff = size - oops_data_sz + hsize;
-
-                       if (size > oops_data_sz) {
-                               memcpy(oops_data, big_oops_buf, hsize);
-                               memcpy(oops_data + hsize, big_oops_buf + diff,
-                                       oops_data_sz - hsize);
-
-                               oops_hdr->report_length = (u16) oops_data_sz;
-                       } else
-                               memcpy(oops_data, big_oops_buf, size);
-               } else
-                       err_type = ERR_TYPE_KERNEL_PANIC_GZ;
-       }
+       if (compressed)
+               err_type = ERR_TYPE_KERNEL_PANIC_GZ;
 
        rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
-               (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
-               count);
+               (int) (sizeof(*oops_hdr) + size), err_type, count);
 
        if (rc != 0)
                return rc;
@@ -650,12 +601,12 @@ static int nvram_pstore_write(enum pstore_type_id type,
  */
 static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
                                int *count, struct timespec *time, char **buf,
-                               struct pstore_info *psi)
+                               bool *compressed, struct pstore_info *psi)
 {
        struct oops_log_info *oops_hdr;
        unsigned int err_type, id_no, size = 0;
        struct nvram_os_partition *part = NULL;
-       char *buff = NULL, *big_buff = NULL;
+       char *buff = NULL;
        int sig = 0;
        loff_t p;
 
@@ -719,8 +670,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
                *id = id_no;
 
        if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
-               int length, unzipped_len;
-               size_t hdr_size;
+               size_t length, hdr_size;
 
                oops_hdr = (struct oops_log_info *)buff;
                if (oops_hdr->version < OOPS_HDR_VERSION) {
@@ -741,23 +691,10 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
                memcpy(*buf, buff + hdr_size, length);
                kfree(buff);
 
-               if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
-                       big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
-                       if (!big_buff)
-                               return -ENOMEM;
-
-                       unzipped_len = nvram_decompress(*buf, big_buff,
-                                               length, big_oops_buf_sz);
-
-                       if (unzipped_len < 0) {
-                               pr_err("nvram: decompression failed, returned "
-                                       "rc %d\n", unzipped_len);
-                               kfree(big_buff);
-                       } else {
-                               *buf = big_buff;
-                               length = unzipped_len;
-                       }
-               }
+               if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
+                       *compressed = true;
+               else
+                       *compressed = false;
                return length;
        }
 
@@ -777,13 +714,8 @@ static int nvram_pstore_init(void)
 {
        int rc = 0;
 
-       if (big_oops_buf) {
-               nvram_pstore_info.buf = big_oops_buf;
-               nvram_pstore_info.bufsize = big_oops_buf_sz;
-       } else {
-               nvram_pstore_info.buf = oops_data;
-               nvram_pstore_info.bufsize = oops_data_sz;
-       }
+       nvram_pstore_info.buf = oops_data;
+       nvram_pstore_info.bufsize = oops_data_sz;
 
        rc = pstore_register(&nvram_pstore_info);
        if (rc != 0)
@@ -802,7 +734,6 @@ static int nvram_pstore_init(void)
 static void __init nvram_init_oops_partition(int rtas_partition_exists)
 {
        int rc;
-       size_t size;
 
        rc = pseries_nvram_init_os_partition(&oops_log_partition);
        if (rc != 0) {
@@ -823,6 +754,11 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
        oops_data = oops_buf + sizeof(struct oops_log_info);
        oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
 
+       rc = nvram_pstore_init();
+
+       if (!rc)
+               return;
+
        /*
         * Figure compression (preceded by elimination of each line's <n>
         * severity prefix) will reduce the oops/panic report to at most
@@ -831,9 +767,8 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
        big_oops_buf_sz = (oops_data_sz * 100) / 45;
        big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
        if (big_oops_buf) {
-               size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
-                       zlib_inflate_workspacesize());
-               stream.workspace = kmalloc(size, GFP_KERNEL);
+               stream.workspace =  kmalloc(zlib_deflate_workspacesize(
+                                       WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
                if (!stream.workspace) {
                        pr_err("nvram: No memory for compression workspace; "
                                "skipping compression of %s partition data\n",
@@ -847,11 +782,6 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
                stream.workspace = NULL;
        }
 
-       rc = nvram_pstore_init();
-
-       if (!rc)
-               return;
-
        rc = kmsg_dump_register(&nvram_kmsg_dumper);
        if (rc != 0) {
                pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
index f35787b6a5e051d45275b5e211daffa1fc3c9cde..417d0bfa451052256c333f9b5021c89332244377 100644 (file)
@@ -256,30 +256,6 @@ static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
        return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
 }
 
-static inline long plpar_get_term_char(unsigned long termno,
-               unsigned long *len_ret, char *buf_ret)
-{
-       long rc;
-       unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-       unsigned long *lbuf = (unsigned long *)buf_ret; /* TODO: alignment? */
-
-       rc = plpar_hcall(H_GET_TERM_CHAR, retbuf, termno);
-
-       *len_ret = retbuf[0];
-       lbuf[0] = retbuf[1];
-       lbuf[1] = retbuf[2];
-
-       return rc;
-}
-
-static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
-               const char *buffer)
-{
-       unsigned long *lbuf = (unsigned long *)buffer;  /* TODO: alignment? */
-       return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
-                       lbuf[1]);
-}
-
 /* Set various resource mode parameters */
 static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
                unsigned long value1, unsigned long value2)
index 4644efa069411e8f797ff53f1a853620278ca3a4..14899b1db1e92a09cbc3df78dcfd37b567d6e535 100644 (file)
@@ -45,7 +45,11 @@ static inline void idle_loop_prolog(unsigned long *in_purr)
 
 static inline void idle_loop_epilog(unsigned long in_purr)
 {
-       get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
+       u64 wait_cycles;
+
+       wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
+       wait_cycles += mfspr(SPRN_PURR) - in_purr;
+       get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
        get_lppaca()->idle = 0;
 }
 
@@ -308,7 +312,7 @@ static int pseries_idle_probe(void)
                return -EPERM;
        }
 
-       if (get_lppaca()->shared_proc)
+       if (lppaca_shared_proc(get_lppaca()))
                cpuidle_state_table = shared_states;
        else
                cpuidle_state_table = dedicated_states;
index a91e6dadda2cb120bb81b46695cbce4735e67973..92767791f93b647e11cac5ec5156a0ab7f34bd49 100644 (file)
@@ -108,8 +108,8 @@ err:
  * energy consumption.
  */
 
-#define FLAGS_MODE1    0x004E200000080E01
-#define FLAGS_MODE2    0x004E200000080401
+#define FLAGS_MODE1    0x004E200000080E01UL
+#define FLAGS_MODE2    0x004E200000080401UL
 #define FLAGS_ACTIVATE  0x100
 
 static ssize_t get_best_energy_list(char *page, int activate)
index c11c8238797c30574c9f77f88c561af049a26ab9..33d619665cb727022b2865cd5567759118531b23 100644 (file)
@@ -183,7 +183,7 @@ static void __init pseries_mpic_init_IRQ(void)
        np = of_find_node_by_path("/");
        naddr = of_n_addr_cells(np);
        opprop = of_get_property(np, "platform-open-pic", &opplen);
-       if (opprop != 0) {
+       if (opprop != NULL) {
                openpic_addr = of_read_number(opprop, naddr);
                printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
        }
@@ -323,7 +323,7 @@ static int alloc_dispatch_logs(void)
        get_paca()->lppaca_ptr->dtl_idx = 0;
 
        /* hypervisor reads buffer length from this field */
-       dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES;
+       dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
        ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
        if (ret)
                pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
index 306643cc9dbcc6cf8d1a317a84fc98c2c4af0c10..ca2d1f6647f4047096649bd750e254debb740d26 100644 (file)
@@ -187,22 +187,6 @@ static int smp_pSeries_kick_cpu(int nr)
        return 0;
 }
 
-static int smp_pSeries_cpu_bootable(unsigned int nr)
-{
-       /* Special case - we inhibit secondary thread startup
-        * during boot if the user requests it.
-        */
-       if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
-               if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
-                       return 0;
-               if (smt_enabled_at_boot
-                   && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
-                       return 0;
-       }
-
-       return 1;
-}
-
 /* Only used on systems that support multiple IPI mechanisms */
 static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
 {
@@ -237,7 +221,7 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
        .probe          = pSeries_smp_probe,
        .kick_cpu       = smp_pSeries_kick_cpu,
        .setup_cpu      = smp_xics_setup_cpu,
-       .cpu_bootable   = smp_pSeries_cpu_bootable,
+       .cpu_bootable   = smp_generic_cpu_bootable,
 };
 
 /* This is called very early */
index 62ef21afb89a8a56189aa6638ee7a0483f6f64b0..a563a8aaf8122381682b419b1306f1a7f64db692 100644 (file)
@@ -17,7 +17,6 @@ extern void scom_init_wsp(void);
 extern void a2_setup_smp(void);
 extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
                               struct device_node *np);
-extern int smp_a2_cpu_bootable(unsigned int nr);
 extern int smp_a2_kick_cpu(int nr);
 
 extern void opb_pic_init(void);
index ab02db3d02d8f89cd684b0f370db403c5a24d1e3..77efbaec7b9cb84c4cdd0bac06f49d22d297a66b 100644 (file)
 #include "fsl_msi.h"
 #include "fsl_pci.h"
 
+#define MSIIR_OFFSET_MASK      0xfffff
+#define MSIIR_IBS_SHIFT                0
+#define MSIIR_SRS_SHIFT                5
+#define MSIIR1_IBS_SHIFT       4
+#define MSIIR1_SRS_SHIFT       0
+#define MSI_SRS_MASK           0xf
+#define MSI_IBS_MASK           0x1f
+
+#define msi_hwirq(msi, msir_index, intr_index) \
+               ((msir_index) << (msi)->srs_shift | \
+                ((intr_index) << (msi)->ibs_shift))
+
 static LIST_HEAD(msi_head);
 
 struct fsl_msi_feature {
@@ -80,18 +92,19 @@ static const struct irq_domain_ops fsl_msi_host_ops = {
 
 static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
 {
-       int rc;
+       int rc, hwirq;
 
-       rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
+       rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
                              msi_data->irqhost->of_node);
        if (rc)
                return rc;
 
-       rc = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
-       if (rc < 0) {
-               msi_bitmap_free(&msi_data->bitmap);
-               return rc;
-       }
+       /*
+        * Reserve all the hwirqs
+        * The available hwirqs will be released in fsl_msi_setup_hwirq()
+        */
+       for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
+               msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
 
        return 0;
 }
@@ -144,8 +157,9 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
 
        msg->data = hwirq;
 
-       pr_debug("%s: allocated srs: %d, ibs: %d\n",
-               __func__, hwirq / IRQS_PER_MSI_REG, hwirq % IRQS_PER_MSI_REG);
+       pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
+                (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
+                (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
 }
 
 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -255,7 +269,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
 
        msir_index = cascade_data->index;
 
-       if (msir_index >= NR_MSI_REG)
+       if (msir_index >= NR_MSI_REG_MAX)
                cascade_irq = NO_IRQ;
 
        irqd_set_chained_irq_inprogress(idata);
@@ -285,8 +299,8 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
                intr_index = ffs(msir_value) - 1;
 
                cascade_irq = irq_linear_revmap(msi_data->irqhost,
-                               msir_index * IRQS_PER_MSI_REG +
-                                       intr_index + have_shift);
+                               msi_hwirq(msi_data, msir_index,
+                                         intr_index + have_shift));
                if (cascade_irq != NO_IRQ)
                        generic_handle_irq(cascade_irq);
                have_shift += intr_index + 1;
@@ -316,7 +330,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
 
        if (msi->list.prev != NULL)
                list_del(&msi->list);
-       for (i = 0; i < NR_MSI_REG; i++) {
+       for (i = 0; i < NR_MSI_REG_MAX; i++) {
                virq = msi->msi_virqs[i];
                if (virq != NO_IRQ) {
                        cascade_data = irq_get_handler_data(virq);
@@ -339,7 +353,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
                               int offset, int irq_index)
 {
        struct fsl_msi_cascade_data *cascade_data = NULL;
-       int virt_msir;
+       int virt_msir, i;
 
        virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
        if (virt_msir == NO_IRQ) {
@@ -360,6 +374,11 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
        irq_set_handler_data(virt_msir, cascade_data);
        irq_set_chained_handler(virt_msir, fsl_msi_cascade);
 
+       /* Release the hwirqs corresponding to this MSI register */
+       for (i = 0; i < IRQS_PER_MSI_REG; i++)
+               msi_bitmap_free_hwirqs(&msi->bitmap,
+                                      msi_hwirq(msi, offset, i), 1);
+
        return 0;
 }
 
@@ -368,14 +387,12 @@ static int fsl_of_msi_probe(struct platform_device *dev)
 {
        const struct of_device_id *match;
        struct fsl_msi *msi;
-       struct resource res;
+       struct resource res, msiir;
        int err, i, j, irq_index, count;
-       int rc;
        const u32 *p;
        const struct fsl_msi_feature *features;
        int len;
        u32 offset;
-       static const u32 all_avail[] = { 0, NR_MSI_IRQS };
 
        match = of_match_device(fsl_of_msi_ids, &dev->dev);
        if (!match)
@@ -392,7 +409,7 @@ static int fsl_of_msi_probe(struct platform_device *dev)
        platform_set_drvdata(dev, msi);
 
        msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
-                                     NR_MSI_IRQS, &fsl_msi_host_ops, msi);
+                                     NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
 
        if (msi->irqhost == NULL) {
                dev_err(&dev->dev, "No memory for MSI irqhost\n");
@@ -421,6 +438,16 @@ static int fsl_of_msi_probe(struct platform_device *dev)
                }
                msi->msiir_offset =
                        features->msiir_offset + (res.start & 0xfffff);
+
+               /*
+                * First read the MSIIR/MSIIR1 offset from dts
+                * On failure use the hardcode MSIIR offset
+                */
+               if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
+                       msi->msiir_offset = features->msiir_offset +
+                                           (res.start & MSIIR_OFFSET_MASK);
+               else
+                       msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
        }
 
        msi->feature = features->fsl_pic_ip;
@@ -431,42 +458,66 @@ static int fsl_of_msi_probe(struct platform_device *dev)
         */
        msi->phandle = dev->dev.of_node->phandle;
 
-       rc = fsl_msi_init_allocator(msi);
-       if (rc) {
+       err = fsl_msi_init_allocator(msi);
+       if (err) {
                dev_err(&dev->dev, "Error allocating MSI bitmap\n");
                goto error_out;
        }
 
        p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
-       if (p && len % (2 * sizeof(u32)) != 0) {
-               dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
-                       __func__);
-               err = -EINVAL;
-               goto error_out;
-       }
 
-       if (!p) {
-               p = all_avail;
-               len = sizeof(all_avail);
-       }
+       if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3")) {
+               msi->srs_shift = MSIIR1_SRS_SHIFT;
+               msi->ibs_shift = MSIIR1_IBS_SHIFT;
+               if (p)
+                       dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
+                               __func__);
+
+               for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
+                    irq_index++) {
+                       err = fsl_msi_setup_hwirq(msi, dev,
+                                                 irq_index, irq_index);
+                       if (err)
+                               goto error_out;
+               }
+       } else {
+               static const u32 all_avail[] =
+                       { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
 
-       for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
-               if (p[i * 2] % IRQS_PER_MSI_REG ||
-                   p[i * 2 + 1] % IRQS_PER_MSI_REG) {
-                       printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
-                              __func__, dev->dev.of_node->full_name,
-                              p[i * 2 + 1], p[i * 2]);
+               msi->srs_shift = MSIIR_SRS_SHIFT;
+               msi->ibs_shift = MSIIR_IBS_SHIFT;
+
+               if (p && len % (2 * sizeof(u32)) != 0) {
+                       dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
+                               __func__);
                        err = -EINVAL;
                        goto error_out;
                }
 
-               offset = p[i * 2] / IRQS_PER_MSI_REG;
-               count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
+               if (!p) {
+                       p = all_avail;
+                       len = sizeof(all_avail);
+               }
 
-               for (j = 0; j < count; j++, irq_index++) {
-                       err = fsl_msi_setup_hwirq(msi, dev, offset + j, irq_index);
-                       if (err)
+               for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
+                       if (p[i * 2] % IRQS_PER_MSI_REG ||
+                           p[i * 2 + 1] % IRQS_PER_MSI_REG) {
+                               pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
+                                      __func__, dev->dev.of_node->full_name,
+                                      p[i * 2 + 1], p[i * 2]);
+                               err = -EINVAL;
                                goto error_out;
+                       }
+
+                       offset = p[i * 2] / IRQS_PER_MSI_REG;
+                       count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
+
+                       for (j = 0; j < count; j++, irq_index++) {
+                               err = fsl_msi_setup_hwirq(msi, dev, offset + j,
+                                                         irq_index);
+                               if (err)
+                                       goto error_out;
+                       }
                }
        }
 
@@ -508,6 +559,10 @@ static const struct of_device_id fsl_of_msi_ids[] = {
                .compatible = "fsl,mpic-msi",
                .data = &mpic_msi_feature,
        },
+       {
+               .compatible = "fsl,mpic-msi-v4.3",
+               .data = &mpic_msi_feature,
+       },
        {
                .compatible = "fsl,ipic-msi",
                .data = &ipic_msi_feature,
index 8225f8653f787185d0a24dea20b11fc0e2966208..df9aa9fe0933b1f1f08ca8dcb270218b955e5b5b 100644 (file)
 #include <linux/of.h>
 #include <asm/msi_bitmap.h>
 
-#define NR_MSI_REG             8
+#define NR_MSI_REG_MSIIR       8  /* MSIIR can index 8 MSI registers */
+#define NR_MSI_REG_MSIIR1      16 /* MSIIR1 can index 16 MSI registers */
+#define NR_MSI_REG_MAX         NR_MSI_REG_MSIIR1
 #define IRQS_PER_MSI_REG       32
-#define NR_MSI_IRQS    (NR_MSI_REG * IRQS_PER_MSI_REG)
+#define NR_MSI_IRQS_MAX        (NR_MSI_REG_MAX * IRQS_PER_MSI_REG)
 
 #define FSL_PIC_IP_MASK   0x0000000F
 #define FSL_PIC_IP_MPIC   0x00000001
@@ -31,9 +33,11 @@ struct fsl_msi {
        unsigned long cascade_irq;
 
        u32 msiir_offset; /* Offset of MSIIR, relative to start of CCSR */
+       u32 ibs_shift; /* Shift of interrupt bit select */
+       u32 srs_shift; /* Shift of the shared interrupt register select */
        void __iomem *msi_regs;
        u32 feature;
-       int msi_virqs[NR_MSI_REG];
+       int msi_virqs[NR_MSI_REG_MAX];
 
        struct msi_bitmap bitmap;
 
index 46ac1ddea6832107b045f0391ca3c4f1d86be579..ccfb50ddfe38f7242b071c6519aaa1eb43daf6a2 100644 (file)
 #include <linux/memblock.h>
 #include <linux/log2.h>
 #include <linux/slab.h>
+#include <linux/uaccess.h>
 
 #include <asm/io.h>
 #include <asm/prom.h>
 #include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
 #include <asm/machdep.h>
+#include <asm/disassemble.h>
+#include <asm/ppc-opcode.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
@@ -64,7 +68,7 @@ static int fsl_pcie_check_link(struct pci_controller *hose)
        if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
                if (hose->ops->read == fsl_indirect_read_config) {
                        struct pci_bus bus;
-                       bus.number = 0;
+                       bus.number = hose->first_busno;
                        bus.sysdata = hose;
                        bus.ops = hose->ops;
                        indirect_read_config(&bus, 0, PCIE_LTSSM, 4, &val);
@@ -297,10 +301,10 @@ static void setup_pci_atmu(struct pci_controller *hose)
        if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
                /* Size window to exact size if power-of-two or one size up */
                if ((1ull << mem_log) != mem) {
+                       mem_log++;
                        if ((1ull << mem_log) > mem)
                                pr_info("%s: Setting PCI inbound window "
                                        "greater than memory size\n", name);
-                       mem_log++;
                }
 
                piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
@@ -373,7 +377,9 @@ static void setup_pci_atmu(struct pci_controller *hose)
        }
 
        if (hose->dma_window_size < mem) {
-#ifndef CONFIG_SWIOTLB
+#ifdef CONFIG_SWIOTLB
+               ppc_swiotlb_enable = 1;
+#else
                pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to "
                        "map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
                         name);
@@ -868,6 +874,160 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose)
        return 0;
 }
 
+#ifdef CONFIG_E500
+static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
+{
+       unsigned int rd, ra, rb, d;
+
+       rd = get_rt(inst);
+       ra = get_ra(inst);
+       rb = get_rb(inst);
+       d = get_d(inst);
+
+       switch (get_op(inst)) {
+       case 31:
+               switch (get_xop(inst)) {
+               case OP_31_XOP_LWZX:
+               case OP_31_XOP_LWBRX:
+                       regs->gpr[rd] = 0xffffffff;
+                       break;
+
+               case OP_31_XOP_LWZUX:
+                       regs->gpr[rd] = 0xffffffff;
+                       regs->gpr[ra] += regs->gpr[rb];
+                       break;
+
+               case OP_31_XOP_LBZX:
+                       regs->gpr[rd] = 0xff;
+                       break;
+
+               case OP_31_XOP_LBZUX:
+                       regs->gpr[rd] = 0xff;
+                       regs->gpr[ra] += regs->gpr[rb];
+                       break;
+
+               case OP_31_XOP_LHZX:
+               case OP_31_XOP_LHBRX:
+                       regs->gpr[rd] = 0xffff;
+                       break;
+
+               case OP_31_XOP_LHZUX:
+                       regs->gpr[rd] = 0xffff;
+                       regs->gpr[ra] += regs->gpr[rb];
+                       break;
+
+               case OP_31_XOP_LHAX:
+                       regs->gpr[rd] = ~0UL;
+                       break;
+
+               case OP_31_XOP_LHAUX:
+                       regs->gpr[rd] = ~0UL;
+                       regs->gpr[ra] += regs->gpr[rb];
+                       break;
+
+               default:
+                       return 0;
+               }
+               break;
+
+       case OP_LWZ:
+               regs->gpr[rd] = 0xffffffff;
+               break;
+
+       case OP_LWZU:
+               regs->gpr[rd] = 0xffffffff;
+               regs->gpr[ra] += (s16)d;
+               break;
+
+       case OP_LBZ:
+               regs->gpr[rd] = 0xff;
+               break;
+
+       case OP_LBZU:
+               regs->gpr[rd] = 0xff;
+               regs->gpr[ra] += (s16)d;
+               break;
+
+       case OP_LHZ:
+               regs->gpr[rd] = 0xffff;
+               break;
+
+       case OP_LHZU:
+               regs->gpr[rd] = 0xffff;
+               regs->gpr[ra] += (s16)d;
+               break;
+
+       case OP_LHA:
+               regs->gpr[rd] = ~0UL;
+               break;
+
+       case OP_LHAU:
+               regs->gpr[rd] = ~0UL;
+               regs->gpr[ra] += (s16)d;
+               break;
+
+       default:
+               return 0;
+       }
+
+       return 1;
+}
+
+static int is_in_pci_mem_space(phys_addr_t addr)
+{
+       struct pci_controller *hose;
+       struct resource *res;
+       int i;
+
+       list_for_each_entry(hose, &hose_list, list_node) {
+               if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
+                       continue;
+
+               for (i = 0; i < 3; i++) {
+                       res = &hose->mem_resources[i];
+                       if ((res->flags & IORESOURCE_MEM) &&
+                               addr >= res->start && addr <= res->end)
+                               return 1;
+               }
+       }
+       return 0;
+}
+
+int fsl_pci_mcheck_exception(struct pt_regs *regs)
+{
+       u32 inst;
+       int ret;
+       phys_addr_t addr = 0;
+
+       /* Let KVM/QEMU deal with the exception */
+       if (regs->msr & MSR_GS)
+               return 0;
+
+#ifdef CONFIG_PHYS_64BIT
+       addr = mfspr(SPRN_MCARU);
+       addr <<= 32;
+#endif
+       addr += mfspr(SPRN_MCAR);
+
+       if (is_in_pci_mem_space(addr)) {
+               if (user_mode(regs)) {
+                       pagefault_disable();
+                       ret = get_user(regs->nip, &inst);
+                       pagefault_enable();
+               } else {
+                       ret = probe_kernel_address(regs->nip, inst);
+               }
+
+               if (mcheck_handle_load(regs, inst)) {
+                       regs->nip += 4;
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+#endif
+
 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 static const struct of_device_id pci_ids[] = {
        { .compatible = "fsl,mpc8540-pci", },
@@ -928,28 +1088,10 @@ static int fsl_pci_probe(struct platform_device *pdev)
 {
        int ret;
        struct device_node *node;
-#ifdef CONFIG_SWIOTLB
-       struct pci_controller *hose;
-#endif
 
        node = pdev->dev.of_node;
        ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
 
-#ifdef CONFIG_SWIOTLB
-       if (ret == 0) {
-               hose = pci_find_hose_for_OF_device(pdev->dev.of_node);
-
-               /*
-                * if we couldn't map all of DRAM via the dma windows
-                * we need SWIOTLB to handle buffers located outside of
-                * dma capable memory region
-                */
-               if (memblock_end_of_DRAM() - 1 > hose->dma_window_base_cur +
-                               hose->dma_window_size)
-                       ppc_swiotlb_enable = 1;
-       }
-#endif
-
        mpc85xx_pci_err_probe(pdev);
 
        return 0;
index 72b5625330e27be63b3d1d3084aac15e189c3b7f..8d455df584711c84c004468be1656fd4b55e36eb 100644 (file)
 
 struct platform_device;
 
+
+/* FSL PCI controller BRR1 register */
+#define PCI_FSL_BRR1      0xbf8
+#define PCI_FSL_BRR1_VER 0xffff
+
 #define PCIE_LTSSM     0x0404          /* PCIE Link Training and Status */
 #define PCIE_LTSSM_L0  0x16            /* L0 state */
 #define PCIE_IP_REV_2_2                0x02080202 /* PCIE IP block version Rev2.2 */
@@ -126,5 +131,11 @@ static inline int mpc85xx_pci_err_probe(struct platform_device *op)
 }
 #endif
 
+#ifdef CONFIG_FSL_PCI
+extern int fsl_pci_mcheck_exception(struct pt_regs *);
+#else
+static inline int fsl_pci_mcheck_exception(struct pt_regs *regs) {return 0; }
+#endif
+
 #endif /* __POWERPC_FSL_PCI_H */
 #endif /* __KERNEL__ */
index 7cd728b3b5e44129a7b49c0438454ed23e4f8aee..9dee47071af8a74bc61cf433da5366d8faef1d11 100644 (file)
@@ -216,7 +216,7 @@ static int __init icp_native_init_one_node(struct device_node *np,
                                           unsigned int *indx)
 {
        unsigned int ilen;
-       const u32 *ireg;
+       const __be32 *ireg;
        int i;
        int reg_tuple_size;
        int num_servers = 0;
index 9049d9f444857fea1c0d50a87164fa63e785097b..fe0cca4771648f22690dd8e216967798d8cf4843 100644 (file)
@@ -49,7 +49,7 @@ void xics_update_irq_servers(void)
        int i, j;
        struct device_node *np;
        u32 ilen;
-       const u32 *ireg;
+       const __be32 *ireg;
        u32 hcpuid;
 
        /* Find the server numbers for the boot cpu. */
@@ -75,8 +75,8 @@ void xics_update_irq_servers(void)
         * default distribution server
         */
        for (j = 0; j < i; j += 2) {
-               if (ireg[j] == hcpuid) {
-                       xics_default_distrib_server = ireg[j+1];
+               if (be32_to_cpu(ireg[j]) == hcpuid) {
+                       xics_default_distrib_server = be32_to_cpu(ireg[j+1]);
                        break;
                }
        }
@@ -383,7 +383,7 @@ void __init xics_register_ics(struct ics *ics)
 static void __init xics_get_server_size(void)
 {
        struct device_node *np;
-       const u32 *isize;
+       const __be32 *isize;
 
        /* We fetch the interrupt server size from the first ICS node
         * we find if any
@@ -394,7 +394,7 @@ static void __init xics_get_server_size(void)
        isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
        if (!isize)
                return;
-       xics_interrupt_server_size = *isize;
+       xics_interrupt_server_size = be32_to_cpu(*isize);
        of_node_put(np);
 }
 
index 8a4cae78f03c91e510f307d25e8f5467211725c3..8b7892bf6d8b0640f13e852330f0a2e7f83b1b15 100644 (file)
@@ -116,6 +116,7 @@ config S390
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+       select HAVE_GENERIC_HARDIRQS
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZ4
@@ -445,6 +446,16 @@ config PCI_NR_FUNCTIONS
          This allows you to specify the maximum number of PCI functions which
          this kernel will support.
 
+config PCI_NR_MSI
+       int "Maximum number of MSI interrupts (64-32768)"
+       range 64 32768
+       default "256"
+       help
+         This defines the number of virtual interrupts the kernel will
+         provide for MSI interrupts. If you configure your system to have
+         too few drivers will fail to allocate MSI interrupts for all
+         PCI devices.
+
 source "drivers/pci/Kconfig"
 source "drivers/pci/pcie/Kconfig"
 source "drivers/pci/hotplug/Kconfig"
index 4066cee0c2d2635e32a59fecb0eaf2e05c565776..4bbb5957ed1b6db504cec7328100af49d70607e3 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef _ASM_S390_AIRQ_H
 #define _ASM_S390_AIRQ_H
 
+#include <linux/bit_spinlock.h>
+
 struct airq_struct {
        struct hlist_node list;         /* Handler queueing. */
        void (*handler)(struct airq_struct *);  /* Thin-interrupt handler */
@@ -23,4 +25,69 @@ struct airq_struct {
 int register_adapter_interrupt(struct airq_struct *airq);
 void unregister_adapter_interrupt(struct airq_struct *airq);
 
+/* Adapter interrupt bit vector */
+struct airq_iv {
+       unsigned long *vector;  /* Adapter interrupt bit vector */
+       unsigned long *avail;   /* Allocation bit mask for the bit vector */
+       unsigned long *bitlock; /* Lock bit mask for the bit vector */
+       unsigned long *ptr;     /* Pointer associated with each bit */
+       unsigned int *data;     /* 32 bit value associated with each bit */
+       unsigned long bits;     /* Number of bits in the vector */
+       unsigned long end;      /* Number of highest allocated bit + 1 */
+       spinlock_t lock;        /* Lock to protect alloc & free */
+};
+
+#define AIRQ_IV_ALLOC  1       /* Use an allocation bit mask */
+#define AIRQ_IV_BITLOCK        2       /* Allocate the lock bit mask */
+#define AIRQ_IV_PTR    4       /* Allocate the ptr array */
+#define AIRQ_IV_DATA   8       /* Allocate the data array */
+
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
+void airq_iv_release(struct airq_iv *iv);
+unsigned long airq_iv_alloc_bit(struct airq_iv *iv);
+void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit);
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+                          unsigned long end);
+
+static inline unsigned long airq_iv_end(struct airq_iv *iv)
+{
+       return iv->end;
+}
+
+static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit)
+{
+       const unsigned long be_to_le = BITS_PER_LONG - 1;
+       bit_spin_lock(bit ^ be_to_le, iv->bitlock);
+}
+
+static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit)
+{
+       const unsigned long be_to_le = BITS_PER_LONG - 1;
+       bit_spin_unlock(bit ^ be_to_le, iv->bitlock);
+}
+
+static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit,
+                                   unsigned int data)
+{
+       iv->data[bit] = data;
+}
+
+static inline unsigned int airq_iv_get_data(struct airq_iv *iv,
+                                           unsigned long bit)
+{
+       return iv->data[bit];
+}
+
+static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit,
+                                  unsigned long ptr)
+{
+       iv->ptr[bit] = ptr;
+}
+
+static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv,
+                                           unsigned long bit)
+{
+       return iv->ptr[bit];
+}
+
 #endif /* _ASM_S390_AIRQ_H */
index 7d46767587337c3874bd61c43e01a0a72f965303..10135a38673c04894c69e36ee244a756ec28d30e 100644 (file)
@@ -216,7 +216,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
        addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
        asm volatile(
                "       oc      %O0(1,%R0),%1"
-               : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
+               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
 }
 
 static inline void 
@@ -244,7 +244,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
        addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
        asm volatile(
                "       nc      %O0(1,%R0),%1"
-               : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
+               : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
 }
 
 static inline void 
@@ -271,7 +271,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
        addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
        asm volatile(
                "       xc      %O0(1,%R0),%1"
-               : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
+               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
 }
 
 static inline void 
@@ -301,7 +301,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
        ch = *(unsigned char *) addr;
        asm volatile(
                "       oc      %O0(1,%R0),%1"
-               : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
+               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
                : "cc", "memory");
        return (ch >> (nr & 7)) & 1;
 }
@@ -320,7 +320,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
        ch = *(unsigned char *) addr;
        asm volatile(
                "       nc      %O0(1,%R0),%1"
-               : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
+               : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
                : "cc", "memory");
        return (ch >> (nr & 7)) & 1;
 }
@@ -339,7 +339,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
        ch = *(unsigned char *) addr;
        asm volatile(
                "       xc      %O0(1,%R0),%1"
-               : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
+               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
                : "cc", "memory");
        return (ch >> (nr & 7)) & 1;
 }
index 0c82ba86e997d5d088b019daa677968fccdaf7e6..a908d2941c5d90ab305f8e7c490585e20113579a 100644 (file)
@@ -20,4 +20,9 @@
 
 #define HARDIRQ_BITS   8
 
+static inline void ack_bad_irq(unsigned int irq)
+{
+       printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+
 #endif /* __ASM_HARDIRQ_H */
index bd90359d6d22e723a8de5d3ed09450ef066a617e..11eae5f55b709d1e37e69f2b44eb762d48475e6e 100644 (file)
@@ -17,6 +17,9 @@
 
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t pte);
+pte_t huge_ptep_get(pte_t *ptep);
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                             unsigned long addr, pte_t *ptep);
 
 /*
  * If the arch doesn't supply something else, assume that hugepage
@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file,
 int arch_prepare_hugepage(struct page *page);
 void arch_release_hugepage(struct page *page);
 
-static inline pte_t huge_pte_wrprotect(pte_t pte)
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+                                 pte_t *ptep)
 {
-       pte_val(pte) |= _PAGE_RO;
-       return pte;
+       pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
 }
 
-static inline int huge_pte_none(pte_t pte)
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long address, pte_t *ptep)
 {
-       return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
-               !(pte_val(pte) & _SEGMENT_ENTRY_RO);
+       huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
 }
 
-static inline pte_t huge_ptep_get(pte_t *ptep)
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
 {
-       pte_t pte = *ptep;
-       unsigned long mask;
-
-       if (!MACHINE_HAS_HPAGE) {
-               ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
-               if (ptep) {
-                       mask = pte_val(pte) &
-                               (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
-                       pte = pte_mkhuge(*ptep);
-                       pte_val(pte) |= mask;
-               }
+       int changed = !pte_same(huge_ptep_get(ptep), pte);
+       if (changed) {
+               huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+               set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
        }
-       return pte;
+       return changed;
 }
 
-static inline void __pmd_csp(pmd_t *pmdp)
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
 {
-       register unsigned long reg2 asm("2") = pmd_val(*pmdp);
-       register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
-                                              _SEGMENT_ENTRY_INV;
-       register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
-
-       asm volatile(
-               "       csp %1,%3"
-               : "=m" (*pmdp)
-               : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+       pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
+       set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
 }
 
-static inline void huge_ptep_invalidate(struct mm_struct *mm,
-                                       unsigned long address, pte_t *ptep)
-{
-       pmd_t *pmdp = (pmd_t *) ptep;
-
-       if (MACHINE_HAS_IDTE)
-               __pmd_idte(address, pmdp);
-       else
-               __pmd_csp(pmdp);
-       pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
-}
-
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
-                                           unsigned long addr, pte_t *ptep)
-{
-       pte_t pte = huge_ptep_get(ptep);
-
-       huge_ptep_invalidate(mm, addr, ptep);
-       return pte;
-}
-
-#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
-({                                                                         \
-       int __changed = !pte_same(huge_ptep_get(__ptep), __entry);          \
-       if (__changed) {                                                    \
-               huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep);       \
-               set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);   \
-       }                                                                   \
-       __changed;                                                          \
-})
-
-#define huge_ptep_set_wrprotect(__mm, __addr, __ptep)                  \
-({                                                                     \
-       pte_t __pte = huge_ptep_get(__ptep);                            \
-       if (huge_pte_write(__pte)) {                                    \
-               huge_ptep_invalidate(__mm, __addr, __ptep);             \
-               set_huge_pte_at(__mm, __addr, __ptep,                   \
-                               huge_pte_wrprotect(__pte));             \
-       }                                                               \
-})
-
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
-                                        unsigned long address, pte_t *ptep)
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
 {
-       huge_ptep_invalidate(vma->vm_mm, address, ptep);
+       return mk_pte(page, pgprot);
 }
 
-static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+static inline int huge_pte_none(pte_t pte)
 {
-       pte_t pte;
-       pmd_t pmd;
-
-       pmd = mk_pmd_phys(page_to_phys(page), pgprot);
-       pte_val(pte) = pmd_val(pmd);
-       return pte;
+       return pte_none(pte);
 }
 
 static inline int huge_pte_write(pte_t pte)
 {
-       pmd_t pmd;
-
-       pmd_val(pmd) = pte_val(pte);
-       return pmd_write(pmd);
+       return pte_write(pte);
 }
 
 static inline int huge_pte_dirty(pte_t pte)
 {
-       /* No dirty bit in the segment table entry. */
-       return 0;
+       return pte_dirty(pte);
 }
 
 static inline pte_t huge_pte_mkwrite(pte_t pte)
 {
-       pmd_t pmd;
-
-       pmd_val(pmd) = pte_val(pte);
-       pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
-       return pte;
+       return pte_mkwrite(pte);
 }
 
 static inline pte_t huge_pte_mkdirty(pte_t pte)
 {
-       /* No dirty bit in the segment table entry. */
-       return pte;
+       return pte_mkdirty(pte);
 }
 
-static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+static inline pte_t huge_pte_wrprotect(pte_t pte)
 {
-       pmd_t pmd;
-
-       pmd_val(pmd) = pte_val(pte);
-       pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
-       return pte;
+       return pte_wrprotect(pte);
 }
 
-static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
-                                 pte_t *ptep)
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
 {
-       pmd_clear((pmd_t *) ptep);
+       return pte_modify(pte, newprot);
 }
 
 #endif /* _ASM_S390_HUGETLB_H */
index 7e3d2586c1ffaa84adea001e6b40afc6fa65aa73..ee96a8b697f9479ce45e46f02b166efa6cfe54f8 100644 (file)
@@ -4,19 +4,8 @@
 #include <linux/msi.h>
 #include <linux/pci.h>
 
-static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
-{
-       return __irq_get_msi_desc(irq);
-}
-
-/* Must be called with msi map lock held */
-static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
-{
-       if (!msi)
-               return -EINVAL;
-
-       msi->irq = irq;
-       return 0;
-}
+void __init init_airq_interrupts(void);
+void __init init_cio_interrupts(void);
+void __init init_ext_interrupts(void);
 
 #endif
index 87c17bfb2968e8423fed70784417b36384754f91..1eaa3625803c1d30f41ee4a98847c82619a93857 100644 (file)
@@ -1,17 +1,28 @@
 #ifndef _ASM_IRQ_H
 #define _ASM_IRQ_H
 
+#define EXT_INTERRUPT  1
+#define IO_INTERRUPT   2
+#define THIN_INTERRUPT 3
+
+#define NR_IRQS_BASE   4
+
+#ifdef CONFIG_PCI_NR_MSI
+# define NR_IRQS       (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
+#else
+# define NR_IRQS       NR_IRQS_BASE
+#endif
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ         0
+
+#ifndef __ASSEMBLY__
+
 #include <linux/hardirq.h>
 #include <linux/percpu.h>
 #include <linux/cache.h>
 #include <linux/types.h>
 
-enum interruption_main_class {
-       EXTERNAL_INTERRUPT,
-       IO_INTERRUPT,
-       NR_IRQS
-};
-
 enum interruption_class {
        IRQEXT_CLK,
        IRQEXT_EXC,
@@ -72,14 +83,8 @@ void service_subclass_irq_unregister(void);
 void measurement_alert_subclass_register(void);
 void measurement_alert_subclass_unregister(void);
 
-#ifdef CONFIG_LOCKDEP
-#  define disable_irq_nosync_lockdep(irq)      disable_irq_nosync(irq)
-#  define disable_irq_nosync_lockdep_irqsave(irq, flags) \
-                                               disable_irq_nosync(irq)
-#  define disable_irq_lockdep(irq)             disable_irq(irq)
-#  define enable_irq_lockdep(irq)              enable_irq(irq)
-#  define enable_irq_lockdep_irqrestore(irq, flags) \
-                                               enable_irq(irq)
-#endif
+#define irq_canonicalize(irq)  (irq)
+
+#endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_IRQ_H */
index 084e7755ed9b7958f3f9cb9db9c02e8a4a8004b2..7b7fce4e846941832282adb57e760e49c506ac0a 100644 (file)
@@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        WARN_ON(atomic_read(&prev->context.attach_count) < 0);
        atomic_inc(&next->context.attach_count);
        /* Check for TLBs not flushed yet */
-       if (next->context.flush_mm)
-               __tlb_flush_mm(next);
+       __tlb_flush_mm_lazy(next);
 }
 
 #define enter_lazy_tlb(mm,tsk) do { } while (0)
index 5d64fb7619ccfc41047c3b1bb6ad45ade2704d1d..27f04801ec8bc2178d6286f69a037190b64dbae5 100644 (file)
 
 void storage_key_init_range(unsigned long start, unsigned long end);
 
-static inline unsigned long pfmf(unsigned long function, unsigned long address)
-{
-       asm volatile(
-               "       .insn   rre,0xb9af0000,%[function],%[address]"
-               : [address] "+a" (address)
-               : [function] "d" (function)
-               : "memory");
-       return address;
-}
-
 static inline void clear_page(void *page)
 {
        register unsigned long reg1 asm ("1") = 0;
index 6e577ba0e5daa12256590018dfeddb4071a1e892..d0872769d44edb5e1438e02058efef6550fe40fc 100644 (file)
@@ -53,14 +53,9 @@ struct zpci_fmb {
        atomic64_t unmapped_pages;
 } __packed __aligned(16);
 
-struct msi_map {
-       unsigned long irq;
-       struct msi_desc *msi;
-       struct hlist_node msi_chain;
-};
-
-#define ZPCI_NR_MSI_VECS       64
-#define ZPCI_MSI_MASK          (ZPCI_NR_MSI_VECS - 1)
+#define ZPCI_MSI_VEC_BITS      11
+#define ZPCI_MSI_VEC_MAX       (1 << ZPCI_MSI_VEC_BITS)
+#define ZPCI_MSI_VEC_MASK      (ZPCI_MSI_VEC_MAX - 1)
 
 enum zpci_state {
        ZPCI_FN_STATE_RESERVED,
@@ -91,8 +86,7 @@ struct zpci_dev {
 
        /* IRQ stuff */
        u64             msi_addr;       /* MSI address */
-       struct zdev_irq_map *irq_map;
-       struct msi_map *msi_map[ZPCI_NR_MSI_VECS];
+       struct airq_iv *aibv;           /* adapter interrupt bit vector */
        unsigned int    aisb;           /* number of the summary bit */
 
        /* DMA stuff */
@@ -151,14 +145,6 @@ int clp_add_pci_device(u32, u32, int);
 int clp_enable_fh(struct zpci_dev *, u8);
 int clp_disable_fh(struct zpci_dev *);
 
-/* MSI */
-struct msi_desc *__irq_get_msi_desc(unsigned int);
-int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
-int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
-void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
-int zpci_msihash_init(void);
-void zpci_msihash_exit(void);
-
 #ifdef CONFIG_PCI
 /* Error handling and recovery */
 void zpci_event_error(void *);
index e6a2bdd4d7059e4e5bfa9e86c77a08554f6cfb1a..df6eac9f0cb4e324069e3bae65246a7a99f02888 100644 (file)
@@ -79,11 +79,11 @@ struct zpci_fib {
 } __packed;
 
 
-int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
-int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
-int s390pci_load(u64 *data, u64 req, u64 offset);
-int s390pci_store(u64 data, u64 req, u64 offset);
-int s390pci_store_block(const u64 *data, u64 req, u64 offset);
-void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int zpci_mod_fc(u64 req, struct zpci_fib *fib);
+int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
+int zpci_load(u64 *data, u64 req, u64 offset);
+int zpci_store(u64 data, u64 req, u64 offset);
+int zpci_store_block(const u64 *data, u64 req, u64 offset);
+void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
 
 #endif
index 83a9caa6ae530fb1f500d86c3bf207f32f2a8b21..d194d544d6943df1f39d31087cbe9acdf0fc449c 100644 (file)
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr)  \
        u64 data;                                                               \
        int rc;                                                                 \
                                                                                \
-       rc = s390pci_load(&data, req, ZPCI_OFFSET(addr));                       \
+       rc = zpci_load(&data, req, ZPCI_OFFSET(addr));                          \
        if (rc)                                                                 \
                data = -1ULL;                                                   \
        return (RETTYPE) data;                                                  \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val,                          \
        u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH);               \
        u64 data = (VALTYPE) val;                                               \
                                                                                \
-       s390pci_store(data, req, ZPCI_OFFSET(addr));                            \
+       zpci_store(data, req, ZPCI_OFFSET(addr));                               \
 }
 
 zpci_read(8, u64)
@@ -83,7 +83,7 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
                val = 0;                /* let FW report error */
                break;
        }
-       return s390pci_store(val, req, offset);
+       return zpci_store(val, req, offset);
 }
 
 static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
@@ -91,7 +91,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
        u64 data;
        int cc;
 
-       cc = s390pci_load(&data, req, offset);
+       cc = zpci_load(&data, req, offset);
        if (cc)
                goto out;
 
@@ -115,7 +115,7 @@ out:
 
 static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
 {
-       return s390pci_store_block(data, req, offset);
+       return zpci_store_block(data, req, offset);
 }
 
 static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
index 75fb726de91f802a68293d67c3b8541281044d3c..a38d78cd595b1c6071cdf0c9774297f0ea85131d 100644 (file)
@@ -217,63 +217,50 @@ extern unsigned long MODULES_END;
 
 /* Hardware bits in the page table entry */
 #define _PAGE_CO       0x100           /* HW Change-bit override */
-#define _PAGE_RO       0x200           /* HW read-only bit  */
+#define _PAGE_PROTECT  0x200           /* HW read-only bit  */
 #define _PAGE_INVALID  0x400           /* HW invalid bit    */
+#define _PAGE_LARGE    0x800           /* Bit to mark a large pte */
 
 /* Software bits in the page table entry */
-#define _PAGE_SWT      0x001           /* SW pte type bit t */
-#define _PAGE_SWX      0x002           /* SW pte type bit x */
-#define _PAGE_SWC      0x004           /* SW pte changed bit */
-#define _PAGE_SWR      0x008           /* SW pte referenced bit */
-#define _PAGE_SWW      0x010           /* SW pte write bit */
+#define _PAGE_PRESENT  0x001           /* SW pte present bit */
+#define _PAGE_TYPE     0x002           /* SW pte type bit */
+#define _PAGE_DIRTY    0x004           /* SW pte dirty bit */
+#define _PAGE_YOUNG    0x008           /* SW pte young bit */
+#define _PAGE_WRITE    0x010           /* SW pte write bit */
 #define _PAGE_SPECIAL  0x020           /* SW associated with special page */
 #define __HAVE_ARCH_PTE_SPECIAL
 
 /* Set of bits not changed in pte_modify */
 #define _PAGE_CHG_MASK         (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
-                                _PAGE_SWC | _PAGE_SWR)
-
-/* Six different types of pages. */
-#define _PAGE_TYPE_EMPTY       0x400
-#define _PAGE_TYPE_NONE                0x401
-#define _PAGE_TYPE_SWAP                0x403
-#define _PAGE_TYPE_FILE                0x601   /* bit 0x002 is used for offset !! */
-#define _PAGE_TYPE_RO          0x200
-#define _PAGE_TYPE_RW          0x000
+                                _PAGE_DIRTY | _PAGE_YOUNG)
 
 /*
- * Only four types for huge pages, using the invalid bit and protection bit
- * of a segment table entry.
- */
-#define _HPAGE_TYPE_EMPTY      0x020   /* _SEGMENT_ENTRY_INV */
-#define _HPAGE_TYPE_NONE       0x220
-#define _HPAGE_TYPE_RO         0x200   /* _SEGMENT_ENTRY_RO  */
-#define _HPAGE_TYPE_RW         0x000
-
-/*
- * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
- * pte_none and pte_file to find out the pte type WITHOUT holding the page
- * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
- * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
- * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
- * This change is done while holding the lock, but the intermediate step
- * of a previously valid pte with the hw invalid bit set can be observed by
- * handle_pte_fault. That makes it necessary that all valid pte types with
- * the hw invalid bit set must be distinguishable from the four pte types
- * empty, none, swap and file.
+ * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
+ * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
+ * is used to distinguish present from not-present ptes. It is changed only
+ * with the page table lock held.
+ *
+ * The following table gives the different possible bit combinations for
+ * the pte hardware and software bits in the last 12 bits of a pte:
  *
- *                     irxt  ipte  irxt
- * _PAGE_TYPE_EMPTY    1000   ->   1000
- * _PAGE_TYPE_NONE     1001   ->   1001
- * _PAGE_TYPE_SWAP     1011   ->   1011
- * _PAGE_TYPE_FILE     11?1   ->   11?1
- * _PAGE_TYPE_RO       0100   ->   1100
- * _PAGE_TYPE_RW       0000   ->   1000
+ *                     842100000000
+ *                     000084210000
+ *                     000000008421
+ *                     .IR....wdytp
+ * empty               .10....00000
+ * swap                        .10....xxx10
+ * file                        .11....xxxx0
+ * prot-none, clean    .10....00x01
+ * prot-none, dirty    .10....01x01
+ * read-only, clean    .01....00x01
+ * read-only, dirty    .01....01x01
+ * read-write, clean   .01....10x01
+ * read-write, dirty   .00....11x01
  *
- * pte_none is true for bits combinations 1000, 1010, 1100, 1110
- * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
- * pte_file is true for bits combinations 1101, 1111
- * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
+ * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
+ * pte_none    is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
+ * pte_file    is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
+ * pte_swap    is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
  */
 
 #ifndef CONFIG_64BIT
@@ -287,13 +274,13 @@ extern unsigned long MODULES_END;
 
 /* Bits in the segment table entry */
 #define _SEGMENT_ENTRY_ORIGIN  0x7fffffc0UL    /* page table origin        */
-#define _SEGMENT_ENTRY_RO      0x200   /* page protection bit              */
-#define _SEGMENT_ENTRY_INV     0x20    /* invalid segment table entry      */
+#define _SEGMENT_ENTRY_PROTECT 0x200   /* page protection bit              */
+#define _SEGMENT_ENTRY_INVALID 0x20    /* invalid segment table entry      */
 #define _SEGMENT_ENTRY_COMMON  0x10    /* common segment bit               */
 #define _SEGMENT_ENTRY_PTL     0x0f    /* page table length                */
 
 #define _SEGMENT_ENTRY         (_SEGMENT_ENTRY_PTL)
-#define _SEGMENT_ENTRY_EMPTY   (_SEGMENT_ENTRY_INV)
+#define _SEGMENT_ENTRY_EMPTY   (_SEGMENT_ENTRY_INVALID)
 
 /* Page status table bits for virtualization */
 #define PGSTE_ACC_BITS 0xf0000000UL
@@ -324,8 +311,8 @@ extern unsigned long MODULES_END;
 
 /* Bits in the region table entry */
 #define _REGION_ENTRY_ORIGIN   ~0xfffUL/* region/segment table origin      */
-#define _REGION_ENTRY_RO       0x200   /* region protection bit            */
-#define _REGION_ENTRY_INV      0x20    /* invalid region table entry       */
+#define _REGION_ENTRY_PROTECT  0x200   /* region protection bit            */
+#define _REGION_ENTRY_INVALID  0x20    /* invalid region table entry       */
 #define _REGION_ENTRY_TYPE_MASK        0x0c    /* region/segment table type mask   */
 #define _REGION_ENTRY_TYPE_R1  0x0c    /* region first table type          */
 #define _REGION_ENTRY_TYPE_R2  0x08    /* region second table type         */
@@ -333,11 +320,11 @@ extern unsigned long MODULES_END;
 #define _REGION_ENTRY_LENGTH   0x03    /* region third length              */
 
 #define _REGION1_ENTRY         (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
-#define _REGION1_ENTRY_EMPTY   (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
+#define _REGION1_ENTRY_EMPTY   (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
 #define _REGION2_ENTRY         (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
-#define _REGION2_ENTRY_EMPTY   (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
+#define _REGION2_ENTRY_EMPTY   (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
 #define _REGION3_ENTRY         (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
-#define _REGION3_ENTRY_EMPTY   (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
+#define _REGION3_ENTRY_EMPTY   (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
 
 #define _REGION3_ENTRY_LARGE   0x400   /* RTTE-format control, large page  */
 #define _REGION3_ENTRY_RO      0x200   /* page protection bit              */
@@ -346,16 +333,17 @@ extern unsigned long MODULES_END;
 /* Bits in the segment table entry */
 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address       */
 #define _SEGMENT_ENTRY_ORIGIN  ~0x7ffUL/* segment table origin             */
-#define _SEGMENT_ENTRY_RO      0x200   /* page protection bit              */
-#define _SEGMENT_ENTRY_INV     0x20    /* invalid segment table entry      */
+#define _SEGMENT_ENTRY_PROTECT 0x200   /* page protection bit              */
+#define _SEGMENT_ENTRY_INVALID 0x20    /* invalid segment table entry      */
 
 #define _SEGMENT_ENTRY         (0)
-#define _SEGMENT_ENTRY_EMPTY   (_SEGMENT_ENTRY_INV)
+#define _SEGMENT_ENTRY_EMPTY   (_SEGMENT_ENTRY_INVALID)
 
 #define _SEGMENT_ENTRY_LARGE   0x400   /* STE-format control, large page   */
 #define _SEGMENT_ENTRY_CO      0x100   /* change-recording override   */
+#define _SEGMENT_ENTRY_SPLIT   0x001   /* THP splitting bit */
+
 #define _SEGMENT_ENTRY_SPLIT_BIT 0     /* THP splitting bit number */
-#define _SEGMENT_ENTRY_SPLIT   (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
 
 /* Set of bits not changed in pmd_modify */
 #define _SEGMENT_CHG_MASK      (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
@@ -386,14 +374,13 @@ extern unsigned long MODULES_END;
 /*
  * Page protection definitions.
  */
-#define PAGE_NONE      __pgprot(_PAGE_TYPE_NONE)
-#define PAGE_RO                __pgprot(_PAGE_TYPE_RO)
-#define PAGE_RW                __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
-#define PAGE_RWC       __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
+#define PAGE_NONE      __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
+#define PAGE_READ      __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
+#define PAGE_WRITE     __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_PROTECT)
 
-#define PAGE_KERNEL    PAGE_RWC
-#define PAGE_SHARED    PAGE_KERNEL
-#define PAGE_COPY      PAGE_RO
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
+#define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
 
 /*
  * On s390 the page table entry has an invalid bit and a read-only bit.
@@ -402,35 +389,30 @@ extern unsigned long MODULES_END;
  */
          /*xwr*/
 #define __P000 PAGE_NONE
-#define __P001 PAGE_RO
-#define __P010 PAGE_RO
-#define __P011 PAGE_RO
-#define __P100 PAGE_RO
-#define __P101 PAGE_RO
-#define __P110 PAGE_RO
-#define __P111 PAGE_RO
+#define __P001 PAGE_READ
+#define __P010 PAGE_READ
+#define __P011 PAGE_READ
+#define __P100 PAGE_READ
+#define __P101 PAGE_READ
+#define __P110 PAGE_READ
+#define __P111 PAGE_READ
 
 #define __S000 PAGE_NONE
-#define __S001 PAGE_RO
-#define __S010 PAGE_RW
-#define __S011 PAGE_RW
-#define __S100 PAGE_RO
-#define __S101 PAGE_RO
-#define __S110 PAGE_RW
-#define __S111 PAGE_RW
+#define __S001 PAGE_READ
+#define __S010 PAGE_WRITE
+#define __S011 PAGE_WRITE
+#define __S100 PAGE_READ
+#define __S101 PAGE_READ
+#define __S110 PAGE_WRITE
+#define __S111 PAGE_WRITE
 
 /*
  * Segment entry (large page) protection definitions.
  */
-#define SEGMENT_NONE   __pgprot(_HPAGE_TYPE_NONE)
-#define SEGMENT_RO     __pgprot(_HPAGE_TYPE_RO)
-#define SEGMENT_RW     __pgprot(_HPAGE_TYPE_RW)
-
-static inline int mm_exclusive(struct mm_struct *mm)
-{
-       return likely(mm == current->active_mm &&
-                     atomic_read(&mm->context.attach_count) <= 1);
-}
+#define SEGMENT_NONE   __pgprot(_SEGMENT_ENTRY_INVALID | \
+                                _SEGMENT_ENTRY_PROTECT)
+#define SEGMENT_READ   __pgprot(_SEGMENT_ENTRY_PROTECT)
+#define SEGMENT_WRITE  __pgprot(0)
 
 static inline int mm_has_pgste(struct mm_struct *mm)
 {
@@ -467,7 +449,7 @@ static inline int pgd_none(pgd_t pgd)
 {
        if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
                return 0;
-       return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
+       return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
 }
 
 static inline int pgd_bad(pgd_t pgd)
@@ -478,7 +460,7 @@ static inline int pgd_bad(pgd_t pgd)
         * invalid for either table entry.
         */
        unsigned long mask =
-               ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
+               ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
                ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
        return (pgd_val(pgd) & mask) != 0;
 }
@@ -494,7 +476,7 @@ static inline int pud_none(pud_t pud)
 {
        if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
                return 0;
-       return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
+       return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
 }
 
 static inline int pud_large(pud_t pud)
@@ -512,7 +494,7 @@ static inline int pud_bad(pud_t pud)
         * invalid for either table entry.
         */
        unsigned long mask =
-               ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
+               ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
                ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
        return (pud_val(pud) & mask) != 0;
 }
@@ -521,21 +503,18 @@ static inline int pud_bad(pud_t pud)
 
 static inline int pmd_present(pmd_t pmd)
 {
-       unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
-       return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
-              !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
+       return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
 }
 
 static inline int pmd_none(pmd_t pmd)
 {
-       return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
-              !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
+       return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
 }
 
 static inline int pmd_large(pmd_t pmd)
 {
 #ifdef CONFIG_64BIT
-       return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
+       return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
 #else
        return 0;
 #endif
@@ -543,7 +522,7 @@ static inline int pmd_large(pmd_t pmd)
 
 static inline int pmd_bad(pmd_t pmd)
 {
-       unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
+       unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INVALID;
        return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
 }
 
@@ -563,7 +542,7 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
 #define __HAVE_ARCH_PMD_WRITE
 static inline int pmd_write(pmd_t pmd)
 {
-       return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
+       return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
 }
 
 static inline int pmd_young(pmd_t pmd)
@@ -571,23 +550,23 @@ static inline int pmd_young(pmd_t pmd)
        return 0;
 }
 
-static inline int pte_none(pte_t pte)
+static inline int pte_present(pte_t pte)
 {
-       return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
+       /* Bit pattern: (pte & 0x001) == 0x001 */
+       return (pte_val(pte) & _PAGE_PRESENT) != 0;
 }
 
-static inline int pte_present(pte_t pte)
+static inline int pte_none(pte_t pte)
 {
-       unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
-       return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
-               (!(pte_val(pte) & _PAGE_INVALID) &&
-                !(pte_val(pte) & _PAGE_SWT));
+       /* Bit pattern: pte == 0x400 */
+       return pte_val(pte) == _PAGE_INVALID;
 }
 
 static inline int pte_file(pte_t pte)
 {
-       unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
-       return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
+       /* Bit pattern: (pte & 0x601) == 0x600 */
+       return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
+               == (_PAGE_INVALID | _PAGE_PROTECT);
 }
 
 static inline int pte_special(pte_t pte)
@@ -634,6 +613,15 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
 #endif
 }
 
+static inline pgste_t pgste_get(pte_t *ptep)
+{
+       unsigned long pgste = 0;
+#ifdef CONFIG_PGSTE
+       pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
+#endif
+       return __pgste(pgste);
+}
+
 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
 {
 #ifdef CONFIG_PGSTE
@@ -695,7 +683,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
        /* Transfer referenced bit to kvm user bits and pte */
        if (young) {
                pgste_val(pgste) |= PGSTE_UR_BIT;
-               pte_val(*ptep) |= _PAGE_SWR;
+               pte_val(*ptep) |= _PAGE_YOUNG;
        }
 #endif
        return pgste;
@@ -723,13 +711,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
 
 static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
 {
-       if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
+       if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
                /*
                 * Without enhanced suppression-on-protection force
                 * the dirty bit on for all writable ptes.
                 */
-               pte_val(entry) |= _PAGE_SWC;
-               pte_val(entry) &= ~_PAGE_RO;
+               pte_val(entry) |= _PAGE_DIRTY;
+               pte_val(entry) &= ~_PAGE_PROTECT;
        }
        *ptep = entry;
 }
@@ -841,18 +829,18 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  */
 static inline int pte_write(pte_t pte)
 {
-       return (pte_val(pte) & _PAGE_SWW) != 0;
+       return (pte_val(pte) & _PAGE_WRITE) != 0;
 }
 
 static inline int pte_dirty(pte_t pte)
 {
-       return (pte_val(pte) & _PAGE_SWC) != 0;
+       return (pte_val(pte) & _PAGE_DIRTY) != 0;
 }
 
 static inline int pte_young(pte_t pte)
 {
 #ifdef CONFIG_PGSTE
-       if (pte_val(pte) & _PAGE_SWR)
+       if (pte_val(pte) & _PAGE_YOUNG)
                return 1;
 #endif
        return 0;
@@ -880,12 +868,12 @@ static inline void pud_clear(pud_t *pud)
 
 static inline void pmd_clear(pmd_t *pmdp)
 {
-       pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+       pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
 }
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
-       pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+       pte_val(*ptep) = _PAGE_INVALID;
 }
 
 /*
@@ -896,49 +884,49 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        pte_val(pte) &= _PAGE_CHG_MASK;
        pte_val(pte) |= pgprot_val(newprot);
-       if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
-               pte_val(pte) &= ~_PAGE_RO;
+       if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
+               pte_val(pte) &= ~_PAGE_PROTECT;
        return pte;
 }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
-       pte_val(pte) &= ~_PAGE_SWW;
-       /* Do not clobber _PAGE_TYPE_NONE pages!  */
+       pte_val(pte) &= ~_PAGE_WRITE;
+       /* Do not clobber PROT_NONE pages!  */
        if (!(pte_val(pte) & _PAGE_INVALID))
-               pte_val(pte) |= _PAGE_RO;
+               pte_val(pte) |= _PAGE_PROTECT;
        return pte;
 }
 
 static inline pte_t pte_mkwrite(pte_t pte)
 {
-       pte_val(pte) |= _PAGE_SWW;
-       if (pte_val(pte) & _PAGE_SWC)
-               pte_val(pte) &= ~_PAGE_RO;
+       pte_val(pte) |= _PAGE_WRITE;
+       if (pte_val(pte) & _PAGE_DIRTY)
+               pte_val(pte) &= ~_PAGE_PROTECT;
        return pte;
 }
 
 static inline pte_t pte_mkclean(pte_t pte)
 {
-       pte_val(pte) &= ~_PAGE_SWC;
-       /* Do not clobber _PAGE_TYPE_NONE pages!  */
+       pte_val(pte) &= ~_PAGE_DIRTY;
+       /* Do not clobber PROT_NONE pages!  */
        if (!(pte_val(pte) & _PAGE_INVALID))
-               pte_val(pte) |= _PAGE_RO;
+               pte_val(pte) |= _PAGE_PROTECT;
        return pte;
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-       pte_val(pte) |= _PAGE_SWC;
-       if (pte_val(pte) & _PAGE_SWW)
-               pte_val(pte) &= ~_PAGE_RO;
+       pte_val(pte) |= _PAGE_DIRTY;
+       if (pte_val(pte) & _PAGE_WRITE)
+               pte_val(pte) &= ~_PAGE_PROTECT;
        return pte;
 }
 
 static inline pte_t pte_mkold(pte_t pte)
 {
 #ifdef CONFIG_PGSTE
-       pte_val(pte) &= ~_PAGE_SWR;
+       pte_val(pte) &= ~_PAGE_YOUNG;
 #endif
        return pte;
 }
@@ -957,7 +945,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
 #ifdef CONFIG_HUGETLB_PAGE
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-       pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
+       pte_val(pte) |= _PAGE_LARGE;
        return pte;
 }
 #endif
@@ -1047,6 +1035,17 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
        }
 }
 
+static inline void ptep_flush_lazy(struct mm_struct *mm,
+                                  unsigned long address, pte_t *ptep)
+{
+       int active = (mm == current->active_mm) ? 1 : 0;
+
+       if (atomic_read(&mm->context.attach_count) > active)
+               __ptep_ipte(address, ptep);
+       else
+               mm->context.flush_mm = 1;
+}
+
 /*
  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
  * both clear the TLB for the unmapped pte. The reason is that
@@ -1067,16 +1066,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
        pgste_t pgste;
        pte_t pte;
 
-       mm->context.flush_mm = 1;
        if (mm_has_pgste(mm)) {
                pgste = pgste_get_lock(ptep);
                pgste = pgste_ipte_notify(mm, address, ptep, pgste);
        }
 
        pte = *ptep;
-       if (!mm_exclusive(mm))
-               __ptep_ipte(address, ptep);
-       pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+       ptep_flush_lazy(mm, address, ptep);
+       pte_val(*ptep) = _PAGE_INVALID;
 
        if (mm_has_pgste(mm)) {
                pgste = pgste_update_all(&pte, pgste);
@@ -1093,15 +1090,13 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
        pgste_t pgste;
        pte_t pte;
 
-       mm->context.flush_mm = 1;
        if (mm_has_pgste(mm)) {
                pgste = pgste_get_lock(ptep);
                pgste_ipte_notify(mm, address, ptep, pgste);
        }
 
        pte = *ptep;
-       if (!mm_exclusive(mm))
-               __ptep_ipte(address, ptep);
+       ptep_flush_lazy(mm, address, ptep);
 
        if (mm_has_pgste(mm)) {
                pgste = pgste_update_all(&pte, pgste);
@@ -1117,7 +1112,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
        pgste_t pgste;
 
        if (mm_has_pgste(mm)) {
-               pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
+               pgste = pgste_get(ptep);
                pgste_set_key(ptep, pgste, pte);
                pgste_set_pte(ptep, pte);
                pgste_set_unlock(ptep, pgste);
@@ -1139,7 +1134,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
 
        pte = *ptep;
        __ptep_ipte(address, ptep);
-       pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+       pte_val(*ptep) = _PAGE_INVALID;
 
        if (mm_has_pgste(vma->vm_mm)) {
                pgste = pgste_update_all(&pte, pgste);
@@ -1163,18 +1158,17 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
        pgste_t pgste;
        pte_t pte;
 
-       if (mm_has_pgste(mm)) {
+       if (!full && mm_has_pgste(mm)) {
                pgste = pgste_get_lock(ptep);
-               if (!full)
-                       pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+               pgste = pgste_ipte_notify(mm, address, ptep, pgste);
        }
 
        pte = *ptep;
        if (!full)
-               __ptep_ipte(address, ptep);
-       pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+               ptep_flush_lazy(mm, address, ptep);
+       pte_val(*ptep) = _PAGE_INVALID;
 
-       if (mm_has_pgste(mm)) {
+       if (!full && mm_has_pgste(mm)) {
                pgste = pgste_update_all(&pte, pgste);
                pgste_set_unlock(ptep, pgste);
        }
@@ -1189,14 +1183,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
        pte_t pte = *ptep;
 
        if (pte_write(pte)) {
-               mm->context.flush_mm = 1;
                if (mm_has_pgste(mm)) {
                        pgste = pgste_get_lock(ptep);
                        pgste = pgste_ipte_notify(mm, address, ptep, pgste);
                }
 
-               if (!mm_exclusive(mm))
-                       __ptep_ipte(address, ptep);
+               ptep_flush_lazy(mm, address, ptep);
                pte = pte_wrprotect(pte);
 
                if (mm_has_pgste(mm)) {
@@ -1248,10 +1240,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
        unsigned long physpage = page_to_phys(page);
        pte_t __pte = mk_pte_phys(physpage, pgprot);
 
-       if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
-               pte_val(__pte) |= _PAGE_SWC;
-               pte_val(__pte) &= ~_PAGE_RO;
-       }
+       if (pte_write(__pte) && PageDirty(page))
+               __pte = pte_mkdirty(__pte);
        return __pte;
 }
 
@@ -1313,7 +1303,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
        unsigned long sto = (unsigned long) pmdp -
                            pmd_index(address) * sizeof(pmd_t);
 
-       if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
+       if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
                asm volatile(
                        "       .insn   rrf,0xb98e0000,%2,%3,0,0"
                        : "=m" (*pmdp)
@@ -1324,18 +1314,31 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
        }
 }
 
+static inline void __pmd_csp(pmd_t *pmdp)
+{
+       register unsigned long reg2 asm("2") = pmd_val(*pmdp);
+       register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
+                                              _SEGMENT_ENTRY_INVALID;
+       register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
+
+       asm volatile(
+               "       csp %1,%3"
+               : "=m" (*pmdp)
+               : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+}
+
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
 {
        /*
-        * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
+        * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
         * Convert to segment table entry format.
         */
        if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
                return pgprot_val(SEGMENT_NONE);
-       if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
-               return pgprot_val(SEGMENT_RO);
-       return pgprot_val(SEGMENT_RW);
+       if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
+               return pgprot_val(SEGMENT_READ);
+       return pgprot_val(SEGMENT_WRITE);
 }
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
@@ -1354,9 +1357,9 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
 
 static inline pmd_t pmd_mkwrite(pmd_t pmd)
 {
-       /* Do not clobber _HPAGE_TYPE_NONE pages! */
-       if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
-               pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
+       /* Do not clobber PROT_NONE pages! */
+       if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INVALID))
+               pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
        return pmd;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
@@ -1378,7 +1381,7 @@ static inline int pmd_trans_splitting(pmd_t pmd)
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                              pmd_t *pmdp, pmd_t entry)
 {
-       if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
+       if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
                pmd_val(entry) |= _SEGMENT_ENTRY_CO;
        *pmdp = entry;
 }
@@ -1391,7 +1394,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
 
 static inline pmd_t pmd_wrprotect(pmd_t pmd)
 {
-       pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
+       pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
        return pmd;
 }
 
@@ -1547,7 +1550,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 {
        pte_t pte;
        offset &= __SWP_OFFSET_MASK;
-       pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
+       pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
                ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
        return pte;
 }
@@ -1570,7 +1573,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 
 #define pgoff_to_pte(__off) \
        ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
-                  | _PAGE_TYPE_FILE })
+                  | _PAGE_INVALID | _PAGE_PROTECT })
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/s390/include/asm/serial.h b/arch/s390/include/asm/serial.h
new file mode 100644 (file)
index 0000000..5b3e48e
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _ASM_S390_SERIAL_H
+#define _ASM_S390_SERIAL_H
+
+#define BASE_BAUD 0
+
+#endif /* _ASM_S390_SERIAL_H */
index 80b6f11263c456233a6defaac28fd502bc7ad9f4..6dbd559763c9996c099f14e4ce6926e15930b7f8 100644 (file)
@@ -8,6 +8,7 @@
 #define __ASM_SWITCH_TO_H
 
 #include <linux/thread_info.h>
+#include <asm/ptrace.h>
 
 extern struct task_struct *__switch_to(void *, void *);
 extern void update_cr_regs(struct task_struct *task);
@@ -68,12 +69,16 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs)
 
 static inline void save_access_regs(unsigned int *acrs)
 {
-       asm volatile("stam 0,15,%0" : "=Q" (*acrs));
+       typedef struct { int _[NUM_ACRS]; } acrstype;
+
+       asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
 }
 
 static inline void restore_access_regs(unsigned int *acrs)
 {
-       asm volatile("lam 0,15,%0" : : "Q" (*acrs));
+       typedef struct { int _[NUM_ACRS]; } acrstype;
+
+       asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
 }
 
 #define switch_to(prev,next,last) do {                                 \
index 6d6d92b4ea113fbc692cf4dda3cac36d803128fc..2cb846c4b37f1561ac77f2ef687239987c9e6b86 100644 (file)
@@ -63,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
 
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
+       __tlb_flush_mm_lazy(tlb->mm);
        tlb_table_flush(tlb);
 }
 
 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
                                  unsigned long start, unsigned long end)
 {
-       tlb_table_flush(tlb);
+       tlb_flush_mmu(tlb);
 }
 
 /*
index 6b32af30878cc6276c57079aeefcda38ea6ea685..f9fef0425feecdd808e33bcbe4a457b8ece374ac 100644 (file)
@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
                __tlb_flush_full(mm);
 }
 
-static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
+static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
 {
        if (mm->context.flush_mm) {
                __tlb_flush_mm(mm);
@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
 
 static inline void flush_tlb_mm(struct mm_struct *mm)
 {
-       __tlb_flush_mm_cond(mm);
+       __tlb_flush_mm_lazy(mm);
 }
 
 static inline void flush_tlb_range(struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end)
 {
-       __tlb_flush_mm_cond(vma->vm_mm);
+       __tlb_flush_mm_lazy(vma->vm_mm);
 }
 
 static inline void flush_tlb_kernel_range(unsigned long start,
index be7a408be7a16bafde657665edb36fcde01c8866..5ca70b4b72cb63d3585f68d97fb5d845db706ef6 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/unistd.h>
 #include <asm/page.h>
 #include <asm/sigp.h>
+#include <asm/irq.h>
 
 __PT_R0      = __PT_GPRS
 __PT_R1      = __PT_GPRS + 4
@@ -435,6 +436,11 @@ io_skip:
 io_loop:
        l       %r1,BASED(.Ldo_IRQ)
        lr      %r2,%r11                # pass pointer to pt_regs
+       lhi     %r3,IO_INTERRUPT
+       tm      __PT_INT_CODE+8(%r11),0x80      # adapter interrupt ?
+       jz      io_call
+       lhi     %r3,THIN_INTERRUPT
+io_call:
        basr    %r14,%r1                # call do_IRQ
        tm      __LC_MACHINE_FLAGS+2,0x10       # MACHINE_FLAG_LPAR
        jz      io_return
@@ -584,9 +590,10 @@ ext_skip:
        mvc     __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
        mvc     __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
        TRACE_IRQS_OFF
+       l       %r1,BASED(.Ldo_IRQ)
        lr      %r2,%r11                # pass pointer to pt_regs
-       l       %r1,BASED(.Ldo_extint)
-       basr    %r14,%r1                # call do_extint
+       lhi     %r3,EXT_INTERRUPT
+       basr    %r14,%r1                # call do_IRQ
        j       io_return
 
 /*
@@ -902,7 +909,6 @@ cleanup_idle_wait:
 .Ldo_machine_check:    .long   s390_do_machine_check
 .Lhandle_mcck:         .long   s390_handle_mcck
 .Ldo_IRQ:              .long   do_IRQ
-.Ldo_extint:           .long   do_extint
 .Ldo_signal:           .long   do_signal
 .Ldo_notify_resume:    .long   do_notify_resume
 .Ldo_per_trap:         .long   do_per_trap
index 1c039d0c24c7e8b6b65e1aec20307adcabca581b..980c7aa1cc5ca2bee34c9af1f977f9e39783c31a 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/unistd.h>
 #include <asm/page.h>
 #include <asm/sigp.h>
+#include <asm/irq.h>
 
 __PT_R0      = __PT_GPRS
 __PT_R1      = __PT_GPRS + 8
@@ -468,6 +469,11 @@ io_skip:
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 io_loop:
        lgr     %r2,%r11                # pass pointer to pt_regs
+       lghi    %r3,IO_INTERRUPT
+       tm      __PT_INT_CODE+8(%r11),0x80      # adapter interrupt ?
+       jz      io_call
+       lghi    %r3,THIN_INTERRUPT
+io_call:
        brasl   %r14,do_IRQ
        tm      __LC_MACHINE_FLAGS+6,0x10       # MACHINE_FLAG_LPAR
        jz      io_return
@@ -623,7 +629,8 @@ ext_skip:
        TRACE_IRQS_OFF
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        lgr     %r2,%r11                # pass pointer to pt_regs
-       brasl   %r14,do_extint
+       lghi    %r3,EXT_INTERRUPT
+       brasl   %r14,do_IRQ
        j       io_return
 
 /*
index 54b0995514e8721508d9c98cb801d3bf36195c0a..b34ba0ea96a9e86e4f391ba6088a827b75b84b5b 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cputime.h>
 #include <asm/lowcore.h>
 #include <asm/irq.h>
+#include <asm/hw_irq.h>
 #include "entry.h"
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -42,9 +43,10 @@ struct irq_class {
  * Since the external and I/O interrupt fields are already sums we would end
  * up with having a sum which accounts each interrupt twice.
  */
-static const struct irq_class irqclass_main_desc[NR_IRQS] = {
-       [EXTERNAL_INTERRUPT] = {.name = "EXT"},
-       [IO_INTERRUPT]       = {.name = "I/O"}
+static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
+       [EXT_INTERRUPT]  = {.name = "EXT"},
+       [IO_INTERRUPT]   = {.name = "I/O"},
+       [THIN_INTERRUPT] = {.name = "AIO"},
 };
 
 /*
@@ -86,6 +88,28 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
        [CPU_RST]    = {.name = "RST", .desc = "[CPU] CPU Restart"},
 };
 
+void __init init_IRQ(void)
+{
+       irq_reserve_irqs(0, THIN_INTERRUPT);
+       init_cio_interrupts();
+       init_airq_interrupts();
+       init_ext_interrupts();
+}
+
+void do_IRQ(struct pt_regs *regs, int irq)
+{
+       struct pt_regs *old_regs;
+
+       old_regs = set_irq_regs(regs);
+       irq_enter();
+       if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+               /* Serve timer interrupts first. */
+               clock_comparator_work();
+       generic_handle_irq(irq);
+       irq_exit();
+       set_irq_regs(old_regs);
+}
+
 /*
  * show_interrupts is needed by /proc/interrupts.
  */
@@ -100,27 +124,36 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(cpu)
                        seq_printf(p, "CPU%d       ", cpu);
                seq_putc(p, '\n');
+               goto out;
        }
        if (irq < NR_IRQS) {
+               if (irq >= NR_IRQS_BASE)
+                       goto out;
                seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
                for_each_online_cpu(cpu)
-                       seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]);
+                       seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
                seq_putc(p, '\n');
-               goto skip_arch_irqs;
+               goto out;
        }
        for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
                seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
                for_each_online_cpu(cpu)
-                       seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]);
+                       seq_printf(p, "%10u ",
+                                  per_cpu(irq_stat, cpu).irqs[irq]);
                if (irqclass_sub_desc[irq].desc)
                        seq_printf(p, "  %s", irqclass_sub_desc[irq].desc);
                seq_putc(p, '\n');
        }
-skip_arch_irqs:
+out:
        put_online_cpus();
        return 0;
 }
 
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+       return 0;
+}
+
 /*
  * Switch to the asynchronous interrupt stack for softirq execution.
  */
@@ -159,14 +192,6 @@ asmlinkage void do_softirq(void)
        local_irq_restore(flags);
 }
 
-#ifdef CONFIG_PROC_FS
-void init_irq_proc(void)
-{
-       if (proc_mkdir("irq", NULL))
-               create_prof_cpu_mask();
-}
-#endif
-
 /*
  * ext_int_hash[index] is the list head for all external interrupts that hash
  * to this index.
@@ -183,14 +208,6 @@ struct ext_int_info {
 /* ext_int_hash_lock protects the handler lists for external interrupts */
 DEFINE_SPINLOCK(ext_int_hash_lock);
 
-static void __init init_external_interrupts(void)
-{
-       int idx;
-
-       for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
-               INIT_LIST_HEAD(&ext_int_hash[idx]);
-}
-
 static inline int ext_hash(u16 code)
 {
        return (code + (code >> 9)) & 0xff;
@@ -234,20 +251,13 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
 }
 EXPORT_SYMBOL(unregister_external_interrupt);
 
-void __irq_entry do_extint(struct pt_regs *regs)
+static irqreturn_t do_ext_interrupt(int irq, void *dummy)
 {
+       struct pt_regs *regs = get_irq_regs();
        struct ext_code ext_code;
-       struct pt_regs *old_regs;
        struct ext_int_info *p;
        int index;
 
-       old_regs = set_irq_regs(regs);
-       irq_enter();
-       if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
-               /* Serve timer interrupts first. */
-               clock_comparator_work();
-       }
-       kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
        ext_code = *(struct ext_code *) &regs->int_code;
        if (ext_code.code != 0x1004)
                __get_cpu_var(s390_idle).nohz_delay = 1;
@@ -259,13 +269,25 @@ void __irq_entry do_extint(struct pt_regs *regs)
                        p->handler(ext_code, regs->int_parm,
                                   regs->int_parm_long);
        rcu_read_unlock();
-       irq_exit();
-       set_irq_regs(old_regs);
+
+       return IRQ_HANDLED;
 }
 
-void __init init_IRQ(void)
+static struct irqaction external_interrupt = {
+       .name    = "EXT",
+       .handler = do_ext_interrupt,
+};
+
+void __init init_ext_interrupts(void)
 {
-       init_external_interrupts();
+       int idx;
+
+       for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
+               INIT_LIST_HEAD(&ext_int_hash[idx]);
+
+       irq_set_chip_and_handler(EXT_INTERRUPT,
+                                &dummy_irq_chip, handle_percpu_irq);
+       setup_irq(EXT_INTERRUPT, &external_interrupt);
 }
 
 static DEFINE_SPINLOCK(sc_irq_lock);
@@ -313,69 +335,3 @@ void measurement_alert_subclass_unregister(void)
        spin_unlock(&ma_subclass_lock);
 }
 EXPORT_SYMBOL(measurement_alert_subclass_unregister);
-
-#ifdef CONFIG_SMP
-void synchronize_irq(unsigned int irq)
-{
-       /*
-        * Not needed, the handler is protected by a lock and IRQs that occur
-        * after the handler is deleted are just NOPs.
-        */
-}
-EXPORT_SYMBOL_GPL(synchronize_irq);
-#endif
-
-#ifndef CONFIG_PCI
-
-/* Only PCI devices have dynamically-defined IRQ handlers */
-
-int request_irq(unsigned int irq, irq_handler_t handler,
-               unsigned long irqflags, const char *devname, void *dev_id)
-{
-       return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(request_irq);
-
-void free_irq(unsigned int irq, void *dev_id)
-{
-       WARN_ON(1);
-}
-EXPORT_SYMBOL_GPL(free_irq);
-
-void enable_irq(unsigned int irq)
-{
-       WARN_ON(1);
-}
-EXPORT_SYMBOL_GPL(enable_irq);
-
-void disable_irq(unsigned int irq)
-{
-       WARN_ON(1);
-}
-EXPORT_SYMBOL_GPL(disable_irq);
-
-#endif /* !CONFIG_PCI */
-
-void disable_irq_nosync(unsigned int irq)
-{
-       disable_irq(irq);
-}
-EXPORT_SYMBOL_GPL(disable_irq_nosync);
-
-unsigned long probe_irq_on(void)
-{
-       return 0;
-}
-EXPORT_SYMBOL_GPL(probe_irq_on);
-
-int probe_irq_off(unsigned long val)
-{
-       return 0;
-}
-EXPORT_SYMBOL_GPL(probe_irq_off);
-
-unsigned int probe_irq_mask(unsigned long val)
-{
-       return val;
-}
-EXPORT_SYMBOL_GPL(probe_irq_mask);
index 504175ebf8b0fe301fcb974214a8f18b04439d0f..c4c0338198791d4a187c73f8490c0fc39595078d 100644 (file)
@@ -214,10 +214,7 @@ static int notrace s390_revalidate_registers(struct mci *mci)
                        : "0", "cc");
 #endif
        /* Revalidate clock comparator register */
-       if (S390_lowcore.clock_comparator == -1)
-               set_clock_comparator(S390_lowcore.mcck_clock);
-       else
-               set_clock_comparator(S390_lowcore.clock_comparator);
+       set_clock_comparator(S390_lowcore.clock_comparator);
        /* Check if old PSW is valid */
        if (!mci->wp)
                /*
index e9fadb04e3c61e0b71b6eb238a12941359edba0d..2bc08039140f651cc596e2ac10fb52686adb6460 100644 (file)
@@ -1299,7 +1299,7 @@ int regs_query_register_offset(const char *name)
 
        if (!name || *name != 'r')
                return -EINVAL;
-       if (strict_strtoul(name + 1, 10, &offset))
+       if (kstrtoul(name + 1, 10, &offset))
                return -EINVAL;
        if (offset >= NUM_GPRS)
                return -EINVAL;
index d7776281cb60ff0cd2738ab9c949d68c6f805d1f..05d75c413137879a30fded476638b0b9c4a001f5 100644 (file)
@@ -63,7 +63,7 @@ static int __init vdso_setup(char *s)
        else if (strncmp(s, "off", 4) == 0)
                vdso_enabled = 0;
        else {
-               rc = strict_strtoul(s, 0, &val);
+               rc = kstrtoul(s, 0, &val);
                vdso_enabled = rc ? 0 : !!val;
        }
        return !rc;
@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
 
        clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
                    PAGE_SIZE << SEGMENT_ORDER);
-       clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
+       clear_table((unsigned long *) page_table, _PAGE_INVALID,
                    256*sizeof(unsigned long));
 
        *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
-       *(unsigned long *) page_table = _PAGE_RO + page_frame;
+       *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
 
        psal = (u32 *) (page_table + 256*sizeof(unsigned long));
        aste = psal + 32;
index 50ea137a2d3c296859b600c0ef861c07fcce5209..1694d738b17527aad71850c8fc772e755d26ca54 100644 (file)
@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm,
        switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
        case _ASCE_TYPE_REGION1:
                table = table + ((address >> 53) & 0x7ff);
-               if (unlikely(*table & _REGION_ENTRY_INV))
+               if (unlikely(*table & _REGION_ENTRY_INVALID))
                        return -0x39UL;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                /* fallthrough */
        case _ASCE_TYPE_REGION2:
                table = table + ((address >> 42) & 0x7ff);
-               if (unlikely(*table & _REGION_ENTRY_INV))
+               if (unlikely(*table & _REGION_ENTRY_INVALID))
                        return -0x3aUL;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                /* fallthrough */
        case _ASCE_TYPE_REGION3:
                table = table + ((address >> 31) & 0x7ff);
-               if (unlikely(*table & _REGION_ENTRY_INV))
+               if (unlikely(*table & _REGION_ENTRY_INVALID))
                        return -0x3bUL;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                /* fallthrough */
        case _ASCE_TYPE_SEGMENT:
                table = table + ((address >> 20) & 0x7ff);
-               if (unlikely(*table & _SEGMENT_ENTRY_INV))
+               if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
                        return -0x10UL;
                if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
-                       if (write && (*table & _SEGMENT_ENTRY_RO))
+                       if (write && (*table & _SEGMENT_ENTRY_PROTECT))
                                return -0x04UL;
                        return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
                                (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm,
        table = table + ((address >> 12) & 0xff);
        if (unlikely(*table & _PAGE_INVALID))
                return -0x11UL;
-       if (write && (*table & _PAGE_RO))
+       if (write && (*table & _PAGE_PROTECT))
                return -0x04UL;
        return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
 }
@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm,
        unsigned long *table = (unsigned long *)__pa(mm->pgd);
 
        table = table + ((address >> 20) & 0x7ff);
-       if (unlikely(*table & _SEGMENT_ENTRY_INV))
+       if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
                return -0x10UL;
        table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
        table = table + ((address >> 12) & 0xff);
        if (unlikely(*table & _PAGE_INVALID))
                return -0x11UL;
-       if (write && (*table & _PAGE_RO))
+       if (write && (*table & _PAGE_PROTECT))
                return -0x04UL;
        return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
 }
index 3ad65b04ac1508a62290e57e7a7013d4edbe994d..46d517c3c76366c7459b7f539888a29064a43d78 100644 (file)
@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
                seq_printf(m, "I\n");
                return;
        }
-       seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW ");
+       seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
        seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : "   ");
        seq_putc(m, '\n');
 }
@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
 }
 
 /*
- * The actual page table walker functions. In order to keep the implementation
- * of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO
- * flags to note_page() if a region, segment or page table entry is invalid or
- * read-only.
- * After all it's just a hint that the current level being walked contains an
- * invalid or read-only entry.
+ * The actual page table walker functions. In order to keep the
+ * implementation of print_prot() short, we only check and pass
+ * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
+ * segment or page table entry is invalid or read-only.
+ * After all it's just a hint that the current level being walked
+ * contains an invalid or read-only entry.
  */
 static void walk_pte_level(struct seq_file *m, struct pg_state *st,
                           pmd_t *pmd, unsigned long addr)
@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
        for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
                st->current_address = addr;
                pte = pte_offset_kernel(pmd, addr);
-               prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID);
+               prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
                note_page(m, st, prot, 4);
                addr += PAGE_SIZE;
        }
 }
 
 #ifdef CONFIG_64BIT
-#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO)
+#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
 #else
 #define _PMD_PROT_MASK 0
 #endif
index 1f5315d1215c2640f5691555801e9ff7885c7fcf..5d758db27bdced58d929d736363bcafc09c199ab 100644 (file)
@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
        pte_t *ptep, pte;
        struct page *page;
 
-       mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
+       mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
 
        ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
        do {
@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
        struct page *head, *page, *tail;
        int refs;
 
-       result = write ? 0 : _SEGMENT_ENTRY_RO;
-       mask = result | _SEGMENT_ENTRY_INV;
+       result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
+       mask = result | _SEGMENT_ENTRY_INVALID;
        if ((pmd_val(pmd) & mask) != result)
                return 0;
        VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
index 121089d578029191c61c71843b7e311c6be317df..bcdb99e001ac596bca486435fa12b7515f0b24f0 100644 (file)
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
 
+static inline pmd_t __pte_to_pmd(pte_t pte)
+{
+       int none, prot;
+       pmd_t pmd;
+
+       /*
+        * Convert encoding       pte bits        pmd bits
+        *                      .IR.....wdtp    ..R...I.....
+        * empty                .10.....0000 -> ..0...1.....
+        * prot-none, clean     .10.....0001 -> ..1...1.....
+        * prot-none, dirty     .10.....0101 -> ..1...1.....
+        * read-only, clean     .01.....0001 -> ..1...0.....
+        * read-only, dirty     .01.....0101 -> ..1...0.....
+        * read-write, clean    .01.....1001 -> ..0...0.....
+        * read-write, dirty    .00.....1101 -> ..0...0.....
+        * Huge ptes are dirty by definition, a clean pte is made dirty
+        * by the conversion.
+        */
+       if (pte_present(pte)) {
+               pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
+               if (pte_val(pte) & _PAGE_INVALID)
+                       pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+               none = (pte_val(pte) & _PAGE_PRESENT) &&
+                       (pte_val(pte) & _PAGE_INVALID);
+               prot = (pte_val(pte) & _PAGE_PROTECT);
+               if (prot || none)
+                       pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+       } else
+               pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
+       return pmd;
+}
+
+static inline pte_t __pmd_to_pte(pmd_t pmd)
+{
+       pte_t pte;
+
+       /*
+        * Convert encoding       pmd bits        pte bits
+        *                      ..R...I.....    .IR.....wdtp
+        * empty                ..0...1..... -> .10.....0000
+        * prot-none, young     ..1...1..... -> .10.....0101
+        * read-only, young     ..1...0..... -> .01.....0101
+        * read-write, young    ..0...0..... -> .00.....1101
+        * Huge ptes are dirty by definition
+        */
+       if (pmd_present(pmd)) {
+               pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
+                       (pmd_val(pmd) & PAGE_MASK);
+               if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
+                       pte_val(pte) |= _PAGE_INVALID;
+               else {
+                       if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
+                               pte_val(pte) |= _PAGE_PROTECT;
+                       else
+                               pte_val(pte) |= _PAGE_WRITE;
+               }
+       } else
+               pte_val(pte) = _PAGE_INVALID;
+       return pte;
+}
 
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
-                                  pte_t *pteptr, pte_t pteval)
+                    pte_t *ptep, pte_t pte)
 {
-       pmd_t *pmdp = (pmd_t *) pteptr;
-       unsigned long mask;
+       pmd_t pmd;
 
+       pmd = __pte_to_pmd(pte);
        if (!MACHINE_HAS_HPAGE) {
-               pteptr = (pte_t *) pte_page(pteval)[1].index;
-               mask = pte_val(pteval) &
-                               (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
-               pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
+               pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
+               pmd_val(pmd) |= pte_page(pte)[1].index;
+       } else
+               pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
+       *(pmd_t *) ptep = pmd;
+}
+
+pte_t huge_ptep_get(pte_t *ptep)
+{
+       unsigned long origin;
+       pmd_t pmd;
+
+       pmd = *(pmd_t *) ptep;
+       if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
+               origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
+               pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
+               pmd_val(pmd) |= *(unsigned long *) origin;
        }
+       return __pmd_to_pte(pmd);
+}
 
-       pmd_val(*pmdp) = pte_val(pteval);
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                             unsigned long addr, pte_t *ptep)
+{
+       pmd_t *pmdp = (pmd_t *) ptep;
+       pte_t pte = huge_ptep_get(ptep);
+
+       if (MACHINE_HAS_IDTE)
+               __pmd_idte(addr, pmdp);
+       else
+               __pmd_csp(pmdp);
+       pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+       return pte;
 }
 
 int arch_prepare_hugepage(struct page *page)
@@ -58,7 +144,7 @@ void arch_release_hugepage(struct page *page)
        ptep = (pte_t *) page[1].index;
        if (!ptep)
                return;
-       clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY,
+       clear_table((unsigned long *) ptep, _PAGE_INVALID,
                    PTRS_PER_PTE * sizeof(pte_t));
        page_table_free(&init_mm, (unsigned long *) ptep);
        page[1].index = 0;
index 80adfbf75065d487a572226eafd21450bc84962d..990397420e6bcf8262b92806b5f5b57bff273373 100644 (file)
@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
                pte = pte_offset_kernel(pmd, address);
                if (!enable) {
                        __ptep_ipte(address, pte);
-                       pte_val(*pte) = _PAGE_TYPE_EMPTY;
+                       pte_val(*pte) = _PAGE_INVALID;
                        continue;
                }
                pte_val(*pte) = __pa(address);
index a8154a1a2c942ee0eb1a93d77c8dd872e26b3dd0..befaea7003f745a80502a9576331c65db150295f 100644 (file)
@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
        struct gmap_rmap *rmap;
        struct page *page;
 
-       if (*table & _SEGMENT_ENTRY_INV)
+       if (*table & _SEGMENT_ENTRY_INVALID)
                return 0;
        page = pfn_to_page(*table >> PAGE_SHIFT);
        mp = (struct gmap_pgtable *) page->index;
@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
                kfree(rmap);
                break;
        }
-       *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+       *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
        return 1;
 }
 
@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap,
                return -ENOMEM;
        new = (unsigned long *) page_to_phys(page);
        crst_table_init(new, init);
-       if (*table & _REGION_ENTRY_INV) {
+       if (*table & _REGION_ENTRY_INVALID) {
                list_add(&page->lru, &gmap->crst_list);
                *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
                        (*table & _REGION_ENTRY_TYPE_MASK);
@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
        for (off = 0; off < len; off += PMD_SIZE) {
                /* Walk the guest addr space page table */
                table = gmap->table + (((to + off) >> 53) & 0x7ff);
-               if (*table & _REGION_ENTRY_INV)
+               if (*table & _REGION_ENTRY_INVALID)
                        goto out;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + (((to + off) >> 42) & 0x7ff);
-               if (*table & _REGION_ENTRY_INV)
+               if (*table & _REGION_ENTRY_INVALID)
                        goto out;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + (((to + off) >> 31) & 0x7ff);
-               if (*table & _REGION_ENTRY_INV)
+               if (*table & _REGION_ENTRY_INVALID)
                        goto out;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + (((to + off) >> 20) & 0x7ff);
 
                /* Clear segment table entry in guest address space. */
                flush |= gmap_unlink_segment(gmap, table);
-               *table = _SEGMENT_ENTRY_INV;
+               *table = _SEGMENT_ENTRY_INVALID;
        }
 out:
        spin_unlock(&gmap->mm->page_table_lock);
@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
        for (off = 0; off < len; off += PMD_SIZE) {
                /* Walk the gmap address space page table */
                table = gmap->table + (((to + off) >> 53) & 0x7ff);
-               if ((*table & _REGION_ENTRY_INV) &&
+               if ((*table & _REGION_ENTRY_INVALID) &&
                    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
                        goto out_unmap;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + (((to + off) >> 42) & 0x7ff);
-               if ((*table & _REGION_ENTRY_INV) &&
+               if ((*table & _REGION_ENTRY_INVALID) &&
                    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
                        goto out_unmap;
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + (((to + off) >> 31) & 0x7ff);
-               if ((*table & _REGION_ENTRY_INV) &&
+               if ((*table & _REGION_ENTRY_INVALID) &&
                    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
                        goto out_unmap;
                table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
 
                /* Store 'from' address in an invalid segment table entry. */
                flush |= gmap_unlink_segment(gmap, table);
-               *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
+               *table =  (from + off) | (_SEGMENT_ENTRY_INVALID |
+                                         _SEGMENT_ENTRY_PROTECT);
        }
        spin_unlock(&gmap->mm->page_table_lock);
        up_read(&gmap->mm->mmap_sem);
@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
        unsigned long *table;
 
        table = gmap->table + ((address >> 53) & 0x7ff);
-       if (unlikely(*table & _REGION_ENTRY_INV))
+       if (unlikely(*table & _REGION_ENTRY_INVALID))
                return ERR_PTR(-EFAULT);
        table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
        table = table + ((address >> 42) & 0x7ff);
-       if (unlikely(*table & _REGION_ENTRY_INV))
+       if (unlikely(*table & _REGION_ENTRY_INVALID))
                return ERR_PTR(-EFAULT);
        table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
        table = table + ((address >> 31) & 0x7ff);
-       if (unlikely(*table & _REGION_ENTRY_INV))
+       if (unlikely(*table & _REGION_ENTRY_INVALID))
                return ERR_PTR(-EFAULT);
        table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
        table = table + ((address >> 20) & 0x7ff);
@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
                return PTR_ERR(segment_ptr);
        /* Convert the gmap address to an mm address. */
        segment = *segment_ptr;
-       if (!(segment & _SEGMENT_ENTRY_INV)) {
+       if (!(segment & _SEGMENT_ENTRY_INVALID)) {
                page = pfn_to_page(segment >> PAGE_SHIFT);
                mp = (struct gmap_pgtable *) page->index;
                return mp->vmaddr | (address & ~PMD_MASK);
-       } else if (segment & _SEGMENT_ENTRY_RO) {
+       } else if (segment & _SEGMENT_ENTRY_PROTECT) {
                vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
                return vmaddr | (address & ~PMD_MASK);
        }
@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
        page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
        mp = (struct gmap_pgtable *) page->index;
        list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
-               *rmap->entry =
-                       _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+               *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
+                                            _SEGMENT_ENTRY_PROTECT);
                list_del(&rmap->list);
                kfree(rmap);
                flush = 1;
@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
        /* Convert the gmap address to an mm address. */
        while (1) {
                segment = *segment_ptr;
-               if (!(segment & _SEGMENT_ENTRY_INV)) {
+               if (!(segment & _SEGMENT_ENTRY_INVALID)) {
                        /* Page table is present */
                        page = pfn_to_page(segment >> PAGE_SHIFT);
                        mp = (struct gmap_pgtable *) page->index;
                        return mp->vmaddr | (address & ~PMD_MASK);
                }
-               if (!(segment & _SEGMENT_ENTRY_RO))
+               if (!(segment & _SEGMENT_ENTRY_PROTECT))
                        /* Nothing mapped in the gmap address space. */
                        break;
                rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
        while (address < to) {
                /* Walk the gmap address space page table */
                table = gmap->table + ((address >> 53) & 0x7ff);
-               if (unlikely(*table & _REGION_ENTRY_INV)) {
+               if (unlikely(*table & _REGION_ENTRY_INVALID)) {
                        address = (address + PMD_SIZE) & PMD_MASK;
                        continue;
                }
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + ((address >> 42) & 0x7ff);
-               if (unlikely(*table & _REGION_ENTRY_INV)) {
+               if (unlikely(*table & _REGION_ENTRY_INVALID)) {
                        address = (address + PMD_SIZE) & PMD_MASK;
                        continue;
                }
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + ((address >> 31) & 0x7ff);
-               if (unlikely(*table & _REGION_ENTRY_INV)) {
+               if (unlikely(*table & _REGION_ENTRY_INVALID)) {
                        address = (address + PMD_SIZE) & PMD_MASK;
                        continue;
                }
                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
                table = table + ((address >> 20) & 0x7ff);
-               if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
+               if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
                        address = (address + PMD_SIZE) & PMD_MASK;
                        continue;
                }
@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
                        continue;
                /* Set notification bit in the pgste of the pte */
                entry = *ptep;
-               if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) {
+               if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
                        pgste = pgste_get_lock(ptep);
                        pgste_val(pgste) |= PGSTE_IN_BIT;
                        pgste_set_unlock(ptep, pgste);
@@ -752,7 +753,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
        page->index = (unsigned long) mp;
        atomic_set(&page->_mapcount, 3);
        table = (unsigned long *) page_to_phys(page);
-       clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
+       clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
        clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
        return table;
 }
@@ -878,7 +879,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
                pgtable_page_ctor(page);
                atomic_set(&page->_mapcount, 1);
                table = (unsigned long *) page_to_phys(page);
-               clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
+               clear_table(table, _PAGE_INVALID, PAGE_SIZE);
                spin_lock_bh(&mm->context.list_lock);
                list_add(&page->lru, &mm->context.pgtable_list);
        } else {
@@ -1007,7 +1008,6 @@ void tlb_table_flush(struct mmu_gather *tlb)
        struct mmu_table_batch **batch = &tlb->batch;
 
        if (*batch) {
-               __tlb_flush_mm(tlb->mm);
                call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
                *batch = NULL;
        }
@@ -1017,11 +1017,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 {
        struct mmu_table_batch **batch = &tlb->batch;
 
+       tlb->mm->context.flush_mm = 1;
        if (*batch == NULL) {
                *batch = (struct mmu_table_batch *)
                        __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
                if (*batch == NULL) {
-                       __tlb_flush_mm(tlb->mm);
+                       __tlb_flush_mm_lazy(tlb->mm);
                        tlb_remove_table_one(table);
                        return;
                }
@@ -1029,7 +1030,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
        }
        (*batch)->tables[(*batch)->nr++] = table;
        if ((*batch)->nr == MAX_TABLE_BATCH)
-               tlb_table_flush(tlb);
+               tlb_flush_mmu(tlb);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -1198,9 +1199,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
                list_del(lh);
        }
        ptep = (pte_t *) pgtable;
-       pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+       pte_val(*ptep) = _PAGE_INVALID;
        ptep++;
-       pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+       pte_val(*ptep) = _PAGE_INVALID;
        return pgtable;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
index 8b268fcc4612e92a1f9ab70eed81a4c664adaa8b..e1299d40818ddcb04967fc913a4893a6c5939b9a 100644 (file)
@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
                pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
        if (!pte)
                return NULL;
-       clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+       clear_table((unsigned long *) pte, _PAGE_INVALID,
                    PTRS_PER_PTE * sizeof(pte_t));
        return pte;
 }
@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
                    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
                        pud_val(*pu_dir) = __pa(address) |
                                _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
-                               (ro ? _REGION_ENTRY_RO : 0);
+                               (ro ? _REGION_ENTRY_PROTECT : 0);
                        address += PUD_SIZE;
                        continue;
                }
@@ -118,7 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
                    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
                        pmd_val(*pm_dir) = __pa(address) |
                                _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
-                               (ro ? _SEGMENT_ENTRY_RO : 0);
+                               (ro ? _SEGMENT_ENTRY_PROTECT : 0);
                        address += PMD_SIZE;
                        continue;
                }
@@ -131,7 +131,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
                }
 
                pt_dir = pte_offset_kernel(pm_dir, address);
-               pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0);
+               pte_val(*pt_dir) = __pa(address) |
+                       pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
                address += PAGE_SIZE;
        }
        ret = 0;
@@ -154,7 +155,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
        pte_t *pt_dir;
        pte_t  pte;
 
-       pte_val(pte) = _PAGE_TYPE_EMPTY;
+       pte_val(pte) = _PAGE_INVALID;
        while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
@@ -255,7 +256,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
                        new_page =__pa(vmem_alloc_pages(0));
                        if (!new_page)
                                goto out;
-                       pte_val(*pt_dir) = __pa(new_page);
+                       pte_val(*pt_dir) =
+                               __pa(new_page) | pgprot_val(PAGE_KERNEL);
                }
                address += PAGE_SIZE;
        }
index 086a2e37935d22cd48d9cb879d30651d89c03e2f..a9e1dc4ae442bacc688d392509de7e0607c7147d 100644 (file)
@@ -2,5 +2,5 @@
 # Makefile for the s390 PCI subsystem.
 #
 
-obj-$(CONFIG_PCI)      += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
+obj-$(CONFIG_PCI)      += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
                           pci_event.o pci_debug.o pci_insn.o
index e2956ad39a4f59ac2e8ffbe2a8626b2d9c84d363..d65dc4f50e2a7ed67be41db1ad711593650b3794 100644 (file)
@@ -42,7 +42,6 @@
 #define        SIC_IRQ_MODE_SINGLE             1
 
 #define ZPCI_NR_DMA_SPACES             1
-#define ZPCI_MSI_VEC_BITS              6
 #define ZPCI_NR_DEVICES                        CONFIG_PCI_NR_FUNCTIONS
 
 /* list of all detected zpci devices */
@@ -51,36 +50,23 @@ EXPORT_SYMBOL_GPL(zpci_list);
 DEFINE_MUTEX(zpci_list_lock);
 EXPORT_SYMBOL_GPL(zpci_list_lock);
 
-static struct pci_hp_callback_ops *hotplug_ops;
 
-static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
-static DEFINE_SPINLOCK(zpci_domain_lock);
+static void zpci_enable_irq(struct irq_data *data);
+static void zpci_disable_irq(struct irq_data *data);
 
-struct callback {
-       irq_handler_t   handler;
-       void            *data;
+static struct irq_chip zpci_irq_chip = {
+       .name = "zPCI",
+       .irq_unmask = zpci_enable_irq,
+       .irq_mask = zpci_disable_irq,
 };
 
-struct zdev_irq_map {
-       unsigned long   aibv;           /* AI bit vector */
-       int             msi_vecs;       /* consecutive MSI-vectors used */
-       int             __unused;
-       struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
-       spinlock_t      lock;           /* protect callbacks against de-reg */
-};
+static struct pci_hp_callback_ops *hotplug_ops;
 
-struct intr_bucket {
-       /* amap of adapters, one bit per dev, corresponds to one irq nr */
-       unsigned long   *alloc;
-       /* AI summary bit, global page for all devices */
-       unsigned long   *aisb;
-       /* pointer to aibv and callback data in zdev */
-       struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
-       /* protects the whole bucket struct */
-       spinlock_t      lock;
-};
+static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
+static DEFINE_SPINLOCK(zpci_domain_lock);
 
-static struct intr_bucket *bucket;
+static struct airq_iv *zpci_aisb_iv;
+static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
 
 /* Adapter interrupt definitions */
 static void zpci_irq_handler(struct airq_struct *airq);
@@ -96,27 +82,8 @@ static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
 struct zpci_iomap_entry *zpci_iomap_start;
 EXPORT_SYMBOL_GPL(zpci_iomap_start);
 
-/* highest irq summary bit */
-static int __read_mostly aisb_max;
-
-static struct kmem_cache *zdev_irq_cache;
 static struct kmem_cache *zdev_fmb_cache;
 
-static inline int irq_to_msi_nr(unsigned int irq)
-{
-       return irq & ZPCI_MSI_MASK;
-}
-
-static inline int irq_to_dev_nr(unsigned int irq)
-{
-       return irq >> ZPCI_MSI_VEC_BITS;
-}
-
-static inline struct zdev_irq_map *get_imap(unsigned int irq)
-{
-       return bucket->imap[irq_to_dev_nr(irq)];
-}
-
 struct zpci_dev *get_zdev(struct pci_dev *pdev)
 {
        return (struct zpci_dev *) pdev->sysdata;
@@ -160,8 +127,7 @@ int pci_proc_domain(struct pci_bus *bus)
 EXPORT_SYMBOL_GPL(pci_proc_domain);
 
 /* Modify PCI: Register adapter interruptions */
-static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
-                             u64 aibv)
+static int zpci_set_airq(struct zpci_dev *zdev)
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
        struct zpci_fib *fib;
@@ -172,14 +138,14 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
                return -ENOMEM;
 
        fib->isc = PCI_ISC;
-       fib->noi = zdev->irq_map->msi_vecs;
        fib->sum = 1;           /* enable summary notifications */
-       fib->aibv = aibv;
-       fib->aibvo = 0;         /* every function has its own page */
-       fib->aisb = (u64) bucket->aisb + aisb / 8;
-       fib->aisbo = aisb & ZPCI_MSI_MASK;
+       fib->noi = airq_iv_end(zdev->aibv);
+       fib->aibv = (unsigned long) zdev->aibv->vector;
+       fib->aibvo = 0;         /* each zdev has its own interrupt vector */
+       fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
+       fib->aisbo = zdev->aisb & 63;
 
-       rc = s390pci_mod_fc(req, fib);
+       rc = zpci_mod_fc(req, fib);
        pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
 
        free_page((unsigned long) fib);
@@ -209,7 +175,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
        fib->iota = args->iota;
        fib->fmb_addr = args->fmb_addr;
 
-       rc = s390pci_mod_fc(req, fib);
+       rc = zpci_mod_fc(req, fib);
        free_page((unsigned long) fib);
        return rc;
 }
@@ -234,7 +200,7 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
 }
 
 /* Modify PCI: Unregister adapter interruptions */
-static int zpci_unregister_airq(struct zpci_dev *zdev)
+static int zpci_clear_airq(struct zpci_dev *zdev)
 {
        struct mod_pci_args args = { 0, 0, 0, 0 };
 
@@ -283,7 +249,7 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
        u64 data;
        int rc;
 
-       rc = s390pci_load(&data, req, offset);
+       rc = zpci_load(&data, req, offset);
        if (!rc) {
                data = data << ((8 - len) * 8);
                data = le64_to_cpu(data);
@@ -301,25 +267,46 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
 
        data = cpu_to_le64(data);
        data = data >> ((8 - len) * 8);
-       rc = s390pci_store(data, req, offset);
+       rc = zpci_store(data, req, offset);
        return rc;
 }
 
-void enable_irq(unsigned int irq)
+static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
 {
-       struct msi_desc *msi = irq_get_msi_desc(irq);
+       int offset, pos;
+       u32 mask_bits;
+
+       if (msi->msi_attrib.is_msix) {
+               offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+                       PCI_MSIX_ENTRY_VECTOR_CTRL;
+               msi->masked = readl(msi->mask_base + offset);
+               writel(flag, msi->mask_base + offset);
+       } else if (msi->msi_attrib.maskbit) {
+               pos = (long) msi->mask_base;
+               pci_read_config_dword(msi->dev, pos, &mask_bits);
+               mask_bits &= ~(mask);
+               mask_bits |= flag & mask;
+               pci_write_config_dword(msi->dev, pos, mask_bits);
+       } else
+               return 0;
+
+       msi->msi_attrib.maskbit = !!flag;
+       return 1;
+}
+
+static void zpci_enable_irq(struct irq_data *data)
+{
+       struct msi_desc *msi = irq_get_msi_desc(data->irq);
 
        zpci_msi_set_mask_bits(msi, 1, 0);
 }
-EXPORT_SYMBOL_GPL(enable_irq);
 
-void disable_irq(unsigned int irq)
+static void zpci_disable_irq(struct irq_data *data)
 {
-       struct msi_desc *msi = irq_get_msi_desc(irq);
+       struct msi_desc *msi = irq_get_msi_desc(data->irq);
 
        zpci_msi_set_mask_bits(msi, 1, 1);
 }
-EXPORT_SYMBOL_GPL(disable_irq);
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
@@ -404,152 +391,147 @@ static struct pci_ops pci_root_ops = {
        .write = pci_write,
 };
 
-/* store the last handled bit to implement fair scheduling of devices */
-static DEFINE_PER_CPU(unsigned long, next_sbit);
-
 static void zpci_irq_handler(struct airq_struct *airq)
 {
-       unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
-       int rescan = 0, max = aisb_max;
-       struct zdev_irq_map *imap;
+       unsigned long si, ai;
+       struct airq_iv *aibv;
+       int irqs_on = 0;
 
        inc_irq_stat(IRQIO_PCI);
-       sbit = start;
-
-scan:
-       /* find summary_bit */
-       for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
-               clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
-               last = sbit;
+       for (si = 0;;) {
+               /* Scan adapter summary indicator bit vector */
+               si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
+               if (si == -1UL) {
+                       if (irqs_on++)
+                               /* End of second scan with interrupts on. */
+                               break;
+                       /* First scan complete, reenable interrupts. */
+                       zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+                       si = 0;
+                       continue;
+               }
 
-               /* find vector bit */
-               imap = bucket->imap[sbit];
-               for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
+               /* Scan the adapter interrupt vector for this device. */
+               aibv = zpci_aibv[si];
+               for (ai = 0;;) {
+                       ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
+                       if (ai == -1UL)
+                               break;
                        inc_irq_stat(IRQIO_MSI);
-                       clear_bit(63 - mbit, &imap->aibv);
-
-                       spin_lock(&imap->lock);
-                       if (imap->cb[mbit].handler)
-                               imap->cb[mbit].handler(mbit,
-                                       imap->cb[mbit].data);
-                       spin_unlock(&imap->lock);
+                       airq_iv_lock(aibv, ai);
+                       generic_handle_irq(airq_iv_get_data(aibv, ai));
+                       airq_iv_unlock(aibv, ai);
                }
        }
-
-       if (rescan)
-               goto out;
-
-       /* scan the skipped bits */
-       if (start > 0) {
-               sbit = 0;
-               max = start;
-               start = 0;
-               goto scan;
-       }
-
-       /* enable interrupts again */
-       set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
-
-       /* check again to not lose initiative */
-       rmb();
-       max = aisb_max;
-       sbit = find_first_bit_left(bucket->aisb, max);
-       if (sbit != max) {
-               rescan++;
-               goto scan;
-       }
-out:
-       /* store next device bit to scan */
-       __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
 }
 
-/* msi_vecs - number of requested interrupts, 0 place function to error state */
-static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
+int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 {
        struct zpci_dev *zdev = get_zdev(pdev);
-       unsigned int aisb, msi_nr;
+       unsigned int hwirq, irq, msi_vecs;
+       unsigned long aisb;
        struct msi_desc *msi;
+       struct msi_msg msg;
        int rc;
 
-       /* store the number of used MSI vectors */
-       zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
-
-       spin_lock(&bucket->lock);
-       aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
-       /* alloc map exhausted? */
-       if (aisb == PAGE_SIZE) {
-               spin_unlock(&bucket->lock);
-               return -EIO;
-       }
-       set_bit(aisb, bucket->alloc);
-       spin_unlock(&bucket->lock);
+       pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
+       if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
+               return -EINVAL;
+       msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
+       msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
 
+       /* Allocate adapter summary indicator bit */
+       rc = -EIO;
+       aisb = airq_iv_alloc_bit(zpci_aisb_iv);
+       if (aisb == -1UL)
+               goto out;
        zdev->aisb = aisb;
-       if (aisb + 1 > aisb_max)
-               aisb_max = aisb + 1;
 
-       /* wire up IRQ shortcut pointer */
-       bucket->imap[zdev->aisb] = zdev->irq_map;
-       pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
+       /* Create adapter interrupt vector */
+       rc = -ENOMEM;
+       zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
+       if (!zdev->aibv)
+               goto out_si;
 
-       /* TODO: irq number 0 wont be found if we return less than requested MSIs.
-        * ignore it for now and fix in common code.
-        */
-       msi_nr = aisb << ZPCI_MSI_VEC_BITS;
+       /* Wire up shortcut pointer */
+       zpci_aibv[aisb] = zdev->aibv;
 
+       /* Request MSI interrupts */
+       hwirq = 0;
        list_for_each_entry(msi, &pdev->msi_list, list) {
-               rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
-                                         aisb << ZPCI_MSI_VEC_BITS);
+               rc = -EIO;
+               irq = irq_alloc_desc(0);        /* Alloc irq on node 0 */
+               if (irq == NO_IRQ)
+                       goto out_msi;
+               rc = irq_set_msi_desc(irq, msi);
                if (rc)
-                       return rc;
-               msi_nr++;
+                       goto out_msi;
+               irq_set_chip_and_handler(irq, &zpci_irq_chip,
+                                        handle_simple_irq);
+               msg.data = hwirq;
+               msg.address_lo = zdev->msi_addr & 0xffffffff;
+               msg.address_hi = zdev->msi_addr >> 32;
+               write_msi_msg(irq, &msg);
+               airq_iv_set_data(zdev->aibv, hwirq, irq);
+               hwirq++;
        }
 
-       rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
-       if (rc) {
-               clear_bit(aisb, bucket->alloc);
-               dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
-               return rc;
+       /* Enable adapter interrupts */
+       rc = zpci_set_airq(zdev);
+       if (rc)
+               goto out_msi;
+
+       return (msi_vecs == nvec) ? 0 : msi_vecs;
+
+out_msi:
+       list_for_each_entry(msi, &pdev->msi_list, list) {
+               if (hwirq-- == 0)
+                       break;
+               irq_set_msi_desc(msi->irq, NULL);
+               irq_free_desc(msi->irq);
+               msi->msg.address_lo = 0;
+               msi->msg.address_hi = 0;
+               msi->msg.data = 0;
+               msi->irq = 0;
        }
-       return (zdev->irq_map->msi_vecs == msi_vecs) ?
-               0 : zdev->irq_map->msi_vecs;
+       zpci_aibv[aisb] = NULL;
+       airq_iv_release(zdev->aibv);
+out_si:
+       airq_iv_free_bit(zpci_aisb_iv, aisb);
+out:
+       dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
+       return rc;
 }
 
-static void zpci_teardown_msi(struct pci_dev *pdev)
+void arch_teardown_msi_irqs(struct pci_dev *pdev)
 {
        struct zpci_dev *zdev = get_zdev(pdev);
        struct msi_desc *msi;
-       int aisb, rc;
+       int rc;
 
-       rc = zpci_unregister_airq(zdev);
+       pr_info("%s: on pdev: %p\n", __func__, pdev);
+
+       /* Disable adapter interrupts */
+       rc = zpci_clear_airq(zdev);
        if (rc) {
                dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
                return;
        }
 
-       msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
-       aisb = irq_to_dev_nr(msi->irq);
-
-       list_for_each_entry(msi, &pdev->msi_list, list)
-               zpci_teardown_msi_irq(zdev, msi);
-
-       clear_bit(aisb, bucket->alloc);
-       if (aisb + 1 == aisb_max)
-               aisb_max--;
-}
-
-int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
-       pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
-       if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
-               return -EINVAL;
-       return zpci_setup_msi(pdev, nvec);
-}
+       /* Release MSI interrupts */
+       list_for_each_entry(msi, &pdev->msi_list, list) {
+               zpci_msi_set_mask_bits(msi, 1, 1);
+               irq_set_msi_desc(msi->irq, NULL);
+               irq_free_desc(msi->irq);
+               msi->msg.address_lo = 0;
+               msi->msg.address_hi = 0;
+               msi->msg.data = 0;
+               msi->irq = 0;
+       }
 
-void arch_teardown_msi_irqs(struct pci_dev *pdev)
-{
-       pr_info("%s: on pdev: %p\n", __func__, pdev);
-       zpci_teardown_msi(pdev);
+       zpci_aibv[zdev->aisb] = NULL;
+       airq_iv_release(zdev->aibv);
+       airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
 }
 
 static void zpci_map_resources(struct zpci_dev *zdev)
@@ -589,24 +571,11 @@ struct zpci_dev *zpci_alloc_device(void)
 
        /* Alloc memory for our private pci device data */
        zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
-       if (!zdev)
-               return ERR_PTR(-ENOMEM);
-
-       /* Alloc aibv & callback space */
-       zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
-       if (!zdev->irq_map)
-               goto error;
-       WARN_ON((u64) zdev->irq_map & 0xff);
-       return zdev;
-
-error:
-       kfree(zdev);
-       return ERR_PTR(-ENOMEM);
+       return zdev ? : ERR_PTR(-ENOMEM);
 }
 
 void zpci_free_device(struct zpci_dev *zdev)
 {
-       kmem_cache_free(zdev_irq_cache, zdev->irq_map);
        kfree(zdev);
 }
 
@@ -641,110 +610,34 @@ int pcibios_add_platform_entries(struct pci_dev *pdev)
        return zpci_sysfs_add_device(&pdev->dev);
 }
 
-int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
-{
-       int msi_nr = irq_to_msi_nr(irq);
-       struct zdev_irq_map *imap;
-       struct msi_desc *msi;
-
-       msi = irq_get_msi_desc(irq);
-       if (!msi)
-               return -EIO;
-
-       imap = get_imap(irq);
-       spin_lock_init(&imap->lock);
-
-       pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
-       imap->cb[msi_nr].handler = handler;
-       imap->cb[msi_nr].data = data;
-
-       /*
-        * The generic MSI code returns with the interrupt disabled on the
-        * card, using the MSI mask bits. Firmware doesn't appear to unmask
-        * at that level, so we do it here by hand.
-        */
-       zpci_msi_set_mask_bits(msi, 1, 0);
-       return 0;
-}
-
-void zpci_free_irq(unsigned int irq)
-{
-       struct zdev_irq_map *imap = get_imap(irq);
-       int msi_nr = irq_to_msi_nr(irq);
-       unsigned long flags;
-
-       pr_debug("%s: for irq: %d\n", __func__, irq);
-
-       spin_lock_irqsave(&imap->lock, flags);
-       imap->cb[msi_nr].handler = NULL;
-       imap->cb[msi_nr].data = NULL;
-       spin_unlock_irqrestore(&imap->lock, flags);
-}
-
-int request_irq(unsigned int irq, irq_handler_t handler,
-               unsigned long irqflags, const char *devname, void *dev_id)
-{
-       pr_debug("%s: irq: %d  handler: %p  flags: %lx  dev: %s\n",
-               __func__, irq, handler, irqflags, devname);
-
-       return zpci_request_irq(irq, handler, dev_id);
-}
-EXPORT_SYMBOL_GPL(request_irq);
-
-void free_irq(unsigned int irq, void *dev_id)
-{
-       zpci_free_irq(irq);
-}
-EXPORT_SYMBOL_GPL(free_irq);
-
 static int __init zpci_irq_init(void)
 {
-       int cpu, rc;
-
-       bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
-       if (!bucket)
-               return -ENOMEM;
-
-       bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
-       if (!bucket->aisb) {
-               rc = -ENOMEM;
-               goto out_aisb;
-       }
-
-       bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
-       if (!bucket->alloc) {
-               rc = -ENOMEM;
-               goto out_alloc;
-       }
+       int rc;
 
        rc = register_adapter_interrupt(&zpci_airq);
        if (rc)
-               goto out_ai;
+               goto out;
        /* Set summary to 1 to be called every time for the ISC. */
        *zpci_airq.lsi_ptr = 1;
 
-       for_each_online_cpu(cpu)
-               per_cpu(next_sbit, cpu) = 0;
+       rc = -ENOMEM;
+       zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
+       if (!zpci_aisb_iv)
+               goto out_airq;
 
-       spin_lock_init(&bucket->lock);
-       set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+       zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
        return 0;
 
-out_ai:
-       free_page((unsigned long) bucket->alloc);
-out_alloc:
-       free_page((unsigned long) bucket->aisb);
-out_aisb:
-       kfree(bucket);
+out_airq:
+       unregister_adapter_interrupt(&zpci_airq);
+out:
        return rc;
 }
 
 static void zpci_irq_exit(void)
 {
-       free_page((unsigned long) bucket->alloc);
-       free_page((unsigned long) bucket->aisb);
+       airq_iv_release(zpci_aisb_iv);
        unregister_adapter_interrupt(&zpci_airq);
-       kfree(bucket);
 }
 
 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
@@ -967,15 +860,10 @@ static inline int barsize(u8 size)
 
 static int zpci_mem_init(void)
 {
-       zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
-                               L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
-       if (!zdev_irq_cache)
-               goto error_zdev;
-
        zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
                                16, 0, NULL);
        if (!zdev_fmb_cache)
-               goto error_fmb;
+               goto error_zdev;
 
        /* TODO: use realloc */
        zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
@@ -986,8 +874,6 @@ static int zpci_mem_init(void)
 
 error_iomap:
        kmem_cache_destroy(zdev_fmb_cache);
-error_fmb:
-       kmem_cache_destroy(zdev_irq_cache);
 error_zdev:
        return -ENOMEM;
 }
@@ -995,7 +881,6 @@ error_zdev:
 static void zpci_mem_exit(void)
 {
        kfree(zpci_iomap_start);
-       kmem_cache_destroy(zdev_irq_cache);
        kmem_cache_destroy(zdev_fmb_cache);
 }
 
@@ -1044,16 +929,12 @@ static int __init pci_base_init(void)
 
        rc = zpci_debug_init();
        if (rc)
-               return rc;
+               goto out;
 
        rc = zpci_mem_init();
        if (rc)
                goto out_mem;
 
-       rc = zpci_msihash_init();
-       if (rc)
-               goto out_hash;
-
        rc = zpci_irq_init();
        if (rc)
                goto out_irq;
@@ -1073,11 +954,10 @@ out_find:
 out_dma:
        zpci_irq_exit();
 out_irq:
-       zpci_msihash_exit();
-out_hash:
        zpci_mem_exit();
 out_mem:
        zpci_debug_exit();
+out:
        return rc;
 }
 subsys_initcall(pci_base_init);
index a2343c1f6e0494e0904871a10a69c312fb89fa47..2125310aa891b7beb4f6c9d15b5fe44a18a58ca0 100644 (file)
@@ -170,8 +170,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
                 */
                goto no_refresh;
 
-       rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
-                                  nr_pages * PAGE_SIZE);
+       rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
+                               nr_pages * PAGE_SIZE);
 
 no_refresh:
        spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
index 22eeb9d7ffebd3147d96b8b899af3fb92156b7b5..85267c058af8067d1b6527f05ec483ab0a15c8b0 100644 (file)
@@ -27,7 +27,7 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
        return cc;
 }
 
-int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
+int zpci_mod_fc(u64 req, struct zpci_fib *fib)
 {
        u8 cc, status;
 
@@ -61,7 +61,7 @@ static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
        return cc;
 }
 
-int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
+int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
 {
        u8 cc, status;
 
@@ -78,7 +78,7 @@ int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
 }
 
 /* Set Interruption Controls */
-void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
 {
        asm volatile (
                "       .insn   rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
@@ -109,7 +109,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
        return cc;
 }
 
-int s390pci_load(u64 *data, u64 req, u64 offset)
+int zpci_load(u64 *data, u64 req, u64 offset)
 {
        u8 status;
        int cc;
@@ -125,7 +125,7 @@ int s390pci_load(u64 *data, u64 req, u64 offset)
                            __func__, cc, status, req, offset);
        return (cc > 0) ? -EIO : cc;
 }
-EXPORT_SYMBOL_GPL(s390pci_load);
+EXPORT_SYMBOL_GPL(zpci_load);
 
 /* PCI Store */
 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
@@ -147,7 +147,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
        return cc;
 }
 
-int s390pci_store(u64 data, u64 req, u64 offset)
+int zpci_store(u64 data, u64 req, u64 offset)
 {
        u8 status;
        int cc;
@@ -163,7 +163,7 @@ int s390pci_store(u64 data, u64 req, u64 offset)
                        __func__, cc, status, req, offset);
        return (cc > 0) ? -EIO : cc;
 }
-EXPORT_SYMBOL_GPL(s390pci_store);
+EXPORT_SYMBOL_GPL(zpci_store);
 
 /* PCI Store Block */
 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
@@ -183,7 +183,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
        return cc;
 }
 
-int s390pci_store_block(const u64 *data, u64 req, u64 offset)
+int zpci_store_block(const u64 *data, u64 req, u64 offset)
 {
        u8 status;
        int cc;
@@ -199,4 +199,4 @@ int s390pci_store_block(const u64 *data, u64 req, u64 offset)
                            __func__, cc, status, req, offset);
        return (cc > 0) ? -EIO : cc;
 }
-EXPORT_SYMBOL_GPL(s390pci_store_block);
+EXPORT_SYMBOL_GPL(zpci_store_block);
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
deleted file mode 100644 (file)
index b097aed..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright IBM Corp. 2012
- *
- * Author(s):
- *   Jan Glauber <jang@linux.vnet.ibm.com>
- */
-
-#define COMPONENT "zPCI"
-#define pr_fmt(fmt) COMPONENT ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/rculist.h>
-#include <linux/hash.h>
-#include <linux/pci.h>
-#include <linux/msi.h>
-#include <asm/hw_irq.h>
-
-/* mapping of irq numbers to msi_desc */
-static struct hlist_head *msi_hash;
-static const unsigned int msi_hash_bits = 8;
-#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
-#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
-
-static DEFINE_SPINLOCK(msi_map_lock);
-
-struct msi_desc *__irq_get_msi_desc(unsigned int irq)
-{
-       struct msi_map *map;
-
-       hlist_for_each_entry_rcu(map,
-                       &msi_hash[msi_hashfn(irq)], msi_chain)
-               if (map->irq == irq)
-                       return map->msi;
-       return NULL;
-}
-
-int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
-{
-       if (msi->msi_attrib.is_msix) {
-               int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
-                       PCI_MSIX_ENTRY_VECTOR_CTRL;
-               msi->masked = readl(msi->mask_base + offset);
-               writel(flag, msi->mask_base + offset);
-       } else {
-               if (msi->msi_attrib.maskbit) {
-                       int pos;
-                       u32 mask_bits;
-
-                       pos = (long) msi->mask_base;
-                       pci_read_config_dword(msi->dev, pos, &mask_bits);
-                       mask_bits &= ~(mask);
-                       mask_bits |= flag & mask;
-                       pci_write_config_dword(msi->dev, pos, mask_bits);
-               } else {
-                       return 0;
-               }
-       }
-
-       msi->msi_attrib.maskbit = !!flag;
-       return 1;
-}
-
-int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
-                       unsigned int nr, int offset)
-{
-       struct msi_map *map;
-       struct msi_msg msg;
-       int rc;
-
-       map = kmalloc(sizeof(*map), GFP_KERNEL);
-       if (map == NULL)
-               return -ENOMEM;
-
-       map->irq = nr;
-       map->msi = msi;
-       zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
-       INIT_HLIST_NODE(&map->msi_chain);
-
-       pr_debug("%s hashing irq: %u  to bucket nr: %llu\n",
-               __func__, nr, msi_hashfn(nr));
-       hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
-
-       spin_lock(&msi_map_lock);
-       rc = irq_set_msi_desc(nr, msi);
-       if (rc) {
-               spin_unlock(&msi_map_lock);
-               hlist_del_rcu(&map->msi_chain);
-               kfree(map);
-               zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
-               return rc;
-       }
-       spin_unlock(&msi_map_lock);
-
-       msg.data = nr - offset;
-       msg.address_lo = zdev->msi_addr & 0xffffffff;
-       msg.address_hi = zdev->msi_addr >> 32;
-       write_msi_msg(nr, &msg);
-       return 0;
-}
-
-void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
-{
-       int irq = msi->irq & ZPCI_MSI_MASK;
-       struct msi_map *map;
-
-       msi->msg.address_lo = 0;
-       msi->msg.address_hi = 0;
-       msi->msg.data = 0;
-       msi->irq = 0;
-       zpci_msi_set_mask_bits(msi, 1, 1);
-
-       spin_lock(&msi_map_lock);
-       map = zdev->msi_map[irq];
-       hlist_del_rcu(&map->msi_chain);
-       kfree(map);
-       zdev->msi_map[irq] = NULL;
-       spin_unlock(&msi_map_lock);
-}
-
-/*
- * The msi hash table has 256 entries which is good for 4..20
- * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
- * the hash table size adjustable later.
- */
-int __init zpci_msihash_init(void)
-{
-       unsigned int i;
-
-       msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
-       if (!msi_hash)
-               return -ENOMEM;
-
-       for (i = 0; i < MSI_HASH_BUCKETS; i++)
-               INIT_HLIST_HEAD(&msi_hash[i]);
-       return 0;
-}
-
-void __init zpci_msihash_exit(void)
-{
-       kfree(msi_hash);
-}
index 4d94dff9015c97fe27a58affaab8c8a4c37b132b..7291e2f11a4748e64f22475b6b1be2efc8fba304 100644 (file)
@@ -80,7 +80,6 @@ static struct resource sh_eth_resources[] = {
 static struct sh_eth_plat_data sh7763_eth_pdata = {
        .phy = 0,
        .edmac_endian = EDMAC_LITTLE_ENDIAN,
-       .register_type = SH_ETH_REG_GIGABIT,
        .phy_interface = PHY_INTERFACE_MODE_MII,
 };
 
index 4f114d1cd0198ea78160adf44450c8420ac8cfa1..25c5a932f9fed68f104283d9d64e71de2c158a2d 100644 (file)
@@ -77,7 +77,6 @@ static struct resource sh_eth0_resources[] = {
 static struct sh_eth_plat_data sh7757_eth0_pdata = {
        .phy = 1,
        .edmac_endian = EDMAC_LITTLE_ENDIAN,
-       .register_type = SH_ETH_REG_FAST_SH4,
        .set_mdio_gate = sh7757_eth_set_mdio_gate,
 };
 
@@ -106,7 +105,6 @@ static struct resource sh_eth1_resources[] = {
 static struct sh_eth_plat_data sh7757_eth1_pdata = {
        .phy = 1,
        .edmac_endian = EDMAC_LITTLE_ENDIAN,
-       .register_type = SH_ETH_REG_FAST_SH4,
        .set_mdio_gate = sh7757_eth_set_mdio_gate,
 };
 
@@ -151,7 +149,6 @@ static struct resource sh_eth_giga0_resources[] = {
 static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
        .phy = 18,
        .edmac_endian = EDMAC_LITTLE_ENDIAN,
-       .register_type = SH_ETH_REG_GIGABIT,
        .set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
        .phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
 };
@@ -186,7 +183,6 @@ static struct resource sh_eth_giga1_resources[] = {
 static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
        .phy = 19,
        .edmac_endian = EDMAC_LITTLE_ENDIAN,
-       .register_type = SH_ETH_REG_GIGABIT,
        .set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
        .phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
 };
index 61fade0ffa965736427b2b2091ad50d097b59d39..a4f630f04ea3a0e73740fadfed834806fdc78ef8 100644 (file)
@@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = {
 static struct sh_eth_plat_data sh_eth_plat = {
        .phy = 0x1f, /* SMSC LAN8700 */
        .edmac_endian = EDMAC_LITTLE_ENDIAN,
-       .register_type = SH_ETH_REG_FAST_SH4,
        .phy_interface = PHY_INTERFACE_MODE_MII,
        .ether_link_active_low = 1
 };
index b70180ef3e2978832f33f2aec85322a183978b22..e96e053f260935d383d26d14420f0f4ade724eff 100644 (file)
@@ -365,7 +365,7 @@ static struct platform_device keysc_device = {
 static struct resource sh_eth_resources[] = {
        [0] = {
                .start = SH_ETH_ADDR,
-               .end   = SH_ETH_ADDR + 0x1FC,
+               .end   = SH_ETH_ADDR + 0x1FC - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -377,6 +377,7 @@ static struct resource sh_eth_resources[] = {
 static struct sh_eth_plat_data sh_eth_plat = {
        .phy = 0x1f, /* SMSC LAN8187 */
        .edmac_endian = EDMAC_LITTLE_ENDIAN,
+       .phy_interace = PHY_INTERFACE_MODE_MII,
 };
 
 static struct platform_device sh_eth_device = {
index 50ba481fa240c155c0764572d11c5ef74cedaac8..2c8fb04685d4c05cf2aa2954c615290505f200b8 100644 (file)
@@ -88,7 +88,6 @@ static struct resource sh_eth_resources[] = {
 static struct sh_eth_plat_data sh7763_eth_pdata = {
        .phy = 1,
        .edmac_endian = EDMAC_LITTLE_ENDIAN,
-       .register_type = SH_ETH_REG_GIGABIT,
        .phy_interface = PHY_INTERFACE_MODE_MII,
 };
 
index ec9ad593c3da743bacbd5875a577709a94e3333b..01a38696137e9952c9e07f91669685b3b349b50d 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/kdebug.h>
 #include <linux/types.h>
+#include <cpu/ubc.h>
 
 struct arch_hw_breakpoint {
        char            *name; /* Contains name of the symbol to set bkpt */
@@ -15,17 +16,6 @@ struct arch_hw_breakpoint {
        u16             type;
 };
 
-enum {
-       SH_BREAKPOINT_READ      = (1 << 1),
-       SH_BREAKPOINT_WRITE     = (1 << 2),
-       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
-
-       SH_BREAKPOINT_LEN_1     = (1 << 12),
-       SH_BREAKPOINT_LEN_2     = (1 << 13),
-       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
-       SH_BREAKPOINT_LEN_8     = (1 << 14),
-};
-
 struct sh_ubc {
        const char      *name;
        unsigned int    num_events;
diff --git a/arch/sh/include/cpu-common/cpu/ubc.h b/arch/sh/include/cpu-common/cpu/ubc.h
new file mode 100644 (file)
index 0000000..b604619
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef __ARCH_SH_CPU_UBC_H__
+#define __ARCH_SH_CPU_UBC_H__
+
+enum {
+       SH_BREAKPOINT_READ      = (1 << 1),
+       SH_BREAKPOINT_WRITE     = (1 << 2),
+       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+       SH_BREAKPOINT_LEN_1     = (1 << 12),
+       SH_BREAKPOINT_LEN_2     = (1 << 13),
+       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+       SH_BREAKPOINT_LEN_8     = (1 << 14),
+};
+
+#define UBC_64BIT      1
+
+#endif /* __ARCH_SH_CPU_UBC_H__ */
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
new file mode 100644 (file)
index 0000000..3371f90
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __ARCH_SH_CPU_UBC_H__
+#define __ARCH_SH_CPU_UBC_H__
+
+enum {
+       SH_BREAKPOINT_READ      = (1 << 2),
+       SH_BREAKPOINT_WRITE     = (1 << 3),
+       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+       SH_BREAKPOINT_LEN_1     = (1 << 0),
+       SH_BREAKPOINT_LEN_2     = (1 << 1),
+       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+};
+
+#endif /* __ARCH_SH_CPU_UBC_H__ */
index bb11e19251784289f50ed7ce089af68ff23674eb..19472817e27444c0e3fd3e5d65793af4d7d02981 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/serial.h>
 #include <linux/serial_sci.h>
+#include <linux/sh_eth.h>
 #include <linux/sh_timer.h>
 #include <linux/io.h>
 
@@ -110,10 +111,16 @@ static struct platform_device scif2_device = {
        },
 };
 
+static struct sh_eth_plat_data eth_platform_data = {
+       .phy            = 1,
+       .edmac_endian   = EDMAC_LITTLE_ENDIAN,
+       .phy_interace   = PHY_INTERFACE_MODE_MII,
+};
+
 static struct resource eth_resources[] = {
        [0] = {
                .start = 0xfb000000,
-               .end =   0xfb0001c8,
+               .end = 0xfb0001c7,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -127,7 +134,7 @@ static struct platform_device eth_device = {
        .name = "sh7619-ether",
        .id = -1,
        .dev = {
-               .platform_data = (void *)1,
+               .platform_data = &eth_platform_data,
        },
        .num_resources = ARRAY_SIZE(eth_resources),
        .resource = eth_resources,
index 990195d9845607bfcca4a8b3cfcd05c41df582bc..92f0da4c86a7533e18269e6f5d439130e1a71815 100644 (file)
@@ -22,3 +22,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7264)   := pinmux-sh7264.o
 pinmux-$(CONFIG_CPU_SUBTYPE_SH7269)    := pinmux-sh7269.o
 
 obj-$(CONFIG_GPIOLIB)                  += $(pinmux-y)
+obj-$(CONFIG_HAVE_HW_BREAKPOINT)       += ubc.o
diff --git a/arch/sh/kernel/cpu/sh2a/ubc.c b/arch/sh/kernel/cpu/sh2a/ubc.c
new file mode 100644 (file)
index 0000000..ef95a9b
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/ubc.c
+ *
+ * On-chip UBC support for SH-2A CPUs.
+ *
+ * Copyright (C) 2009 - 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/hw_breakpoint.h>
+
+#define UBC_BAR(idx)   (0xfffc0400 + (0x10 * idx))
+#define UBC_BAMR(idx)  (0xfffc0404 + (0x10 * idx))
+#define UBC_BBR(idx)   (0xfffc04A0 + (0x10 * idx))
+#define UBC_BDR(idx)   (0xfffc0408 + (0x10 * idx))
+#define UBC_BDMR(idx)  (0xfffc040C + (0x10 * idx))
+
+#define UBC_BRCR       0xfffc04C0
+
+/* BBR */
+#define UBC_BBR_UBID   (1 << 13)     /* User Break Interrupt Disable */
+#define UBC_BBR_DBE    (1 << 12)     /* Data Break Enable */
+#define UBC_BBR_CD_C   (1 << 6)      /* C Bus Cycle */
+#define UBC_BBR_CD_I   (2 << 6)      /* I Bus Cycle */
+#define UBC_BBR_ID_I   (1 << 4)      /* Break Condition is instruction fetch cycle */
+#define UBC_BBR_ID_D   (2 << 4)      /* Break Condition is data access cycle */
+#define UBC_BBR_ID_ID  (3 << 4)      /* Break Condition is instruction fetch or data access cycle */
+
+#define UBC_CRR_BIE    (1 << 0)
+
+/* CBR */
+#define UBC_CBR_CE     (1 << 0)
+
+static struct sh_ubc sh2a_ubc;
+
+static void sh2a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
+{
+       __raw_writel(UBC_BBR_DBE | UBC_BBR_CD_C | UBC_BBR_ID_ID |
+                    info->len | info->type, UBC_BBR(idx));
+       __raw_writel(info->address, UBC_BAR(idx));
+}
+
+static void sh2a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
+{
+       __raw_writel(UBC_BBR_UBID, UBC_BBR(idx));
+       __raw_writel(0, UBC_BAR(idx));
+}
+
+static void sh2a_ubc_enable_all(unsigned long mask)
+{
+       int i;
+
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               if (mask & (1 << i))
+                       __raw_writel(__raw_readl(UBC_BBR(i)) & ~UBC_BBR_UBID,
+                                    UBC_BBR(i));
+}
+
+static void sh2a_ubc_disable_all(void)
+{
+       int i;
+       
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               __raw_writel(__raw_readl(UBC_BBR(i)) | UBC_BBR_UBID,
+                            UBC_BBR(i));
+}
+
+static unsigned long sh2a_ubc_active_mask(void)
+{
+       unsigned long active = 0;
+       int i;
+
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               if (!(__raw_readl(UBC_BBR(i)) & UBC_BBR_UBID))
+                       active |= (1 << i);
+
+       return active;
+}
+
+static unsigned long sh2a_ubc_triggered_mask(void)
+{
+       unsigned int ret, mask;
+       
+       mask = 0;
+       ret = __raw_readl(UBC_BRCR);
+       if ((ret & (1 << 15)) || (ret & (1 << 13))) {
+               mask |= (1 << 0); /* Match condition for channel 0 */
+       } else 
+               mask &= ~(1 << 0);
+       
+       if ((ret & (1 << 14)) || (ret & (1 << 12))) {
+               mask |= (1 << 1); /* Match condition for channel 1 */
+       } else 
+               mask &= ~(1 << 1);
+
+       return mask;
+}
+
+static void sh2a_ubc_clear_triggered_mask(unsigned long mask)
+{
+       if (mask & (1 << 0)) /* Channel 0 statisfied break condition */
+               __raw_writel(__raw_readl(UBC_BRCR) &
+                            ~((1 << 15) | (1 << 13)), UBC_BRCR);
+       
+       if (mask & (1 << 1)) /* Channel 1 statisfied break condition */
+               __raw_writel(__raw_readl(UBC_BRCR) &
+                            ~((1 << 14) | (1 << 12)), UBC_BRCR);
+}
+
+static struct sh_ubc sh2a_ubc = {
+       .name                   = "SH-2A",
+       .num_events             = 2,
+       .trap_nr                = 0x1e0,
+       .enable                 = sh2a_ubc_enable,
+       .disable                = sh2a_ubc_disable,
+       .enable_all             = sh2a_ubc_enable_all,
+       .disable_all            = sh2a_ubc_disable_all,
+       .active_mask            = sh2a_ubc_active_mask,
+       .triggered_mask         = sh2a_ubc_triggered_mask,
+       .clear_triggered_mask   = sh2a_ubc_clear_triggered_mask,
+};
+
+static int __init sh2a_ubc_init(void)
+{
+       struct clk *ubc_iclk = clk_get(NULL, "ubc0");
+       int i;
+
+       /*
+        * The UBC MSTP bit is optional, as not all platforms will have
+        * it. Just ignore it if we can't find it.
+        */
+       if (IS_ERR(ubc_iclk))
+               ubc_iclk = NULL;
+
+       clk_enable(ubc_iclk);
+
+       for (i = 0; i < sh2a_ubc.num_events; i++) {
+               __raw_writel(0, UBC_BAMR(i));
+               __raw_writel(0, UBC_BBR(i));
+       }
+
+       clk_disable(ubc_iclk);
+
+       sh2a_ubc.clk = ubc_iclk;
+
+       return register_sh_ubc(&sh2a_ubc);
+}
+arch_initcall(sh2a_ubc_init);
index d3062259211679218c288b7cebd88517737c1482..e3abfd4277e22502f632f206a99425ed540fdb2b 100644 (file)
@@ -91,13 +91,11 @@ static struct cpuidle_driver cpuidle_driver = {
 
 int __init sh_mobile_setup_cpuidle(void)
 {
-       int ret;
-
        if (sh_mobile_sleep_supported & SUSP_SH_SF)
                cpuidle_driver.states[1].disabled = false;
 
        if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
                cpuidle_driver.states[2].disabled = false;
 
-       return cpuidle_register(&cpuidle_driver);
+       return cpuidle_register(&cpuidle_driver, NULL);
 }
index f9173766ec4be2393e4207efe7092741f35fc271..ac4922ad3c148d3fc0883afe9ccb8a50d078fc76 100644 (file)
@@ -113,9 +113,11 @@ static int get_hbp_len(u16 hbp_len)
        case SH_BREAKPOINT_LEN_4:
                len_in_bytes = 4;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                len_in_bytes = 8;
                break;
+#endif
        }
        return len_in_bytes;
 }
@@ -149,9 +151,11 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
        case SH_BREAKPOINT_LEN_4:
                *gen_len = HW_BREAKPOINT_LEN_4;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                *gen_len = HW_BREAKPOINT_LEN_8;
                break;
+#endif
        default:
                return -EINVAL;
        }
@@ -190,9 +194,11 @@ static int arch_build_bp_info(struct perf_event *bp)
        case HW_BREAKPOINT_LEN_4:
                info->len = SH_BREAKPOINT_LEN_4;
                break;
+#ifdef UBC_64BIT
        case HW_BREAKPOINT_LEN_8:
                info->len = SH_BREAKPOINT_LEN_8;
                break;
+#endif
        default:
                return -EINVAL;
        }
@@ -240,9 +246,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        case SH_BREAKPOINT_LEN_4:
                align = 3;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                align = 7;
                break;
+#endif
        default:
                return ret;
        }
index c7de3323819c5389a8c1855f09aa542cd67f4d41..8d284801f2322ec26f35bdd7b4e4953ccf993bc8 100644 (file)
@@ -48,8 +48,8 @@ do {  save_and_clear_fpu();                                           \
        "wrpr   %%g0, 14, %%pil\n\t"                                    \
        "brz,pt %%o7, switch_to_pc\n\t"                                 \
        " mov   %%g7, %0\n\t"                                           \
-       "sethi  %%hi(ret_from_syscall), %%g1\n\t"                       \
-       "jmpl   %%g1 + %%lo(ret_from_syscall), %%g0\n\t"                \
+       "sethi  %%hi(ret_from_fork), %%g1\n\t"                          \
+       "jmpl   %%g1 + %%lo(ret_from_fork), %%g0\n\t"                   \
        " nop\n\t"                                                      \
        ".globl switch_to_pc\n\t"                                       \
        "switch_to_pc:\n\t"                                             \
index e4de74c2c9b0d8082c80f57d757ef7577fbb8b0d..cb5d272d658acce52fc85a77b1e730a40c5d885a 100644 (file)
@@ -327,6 +327,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA3:
        case SUN4V_CHIP_NIAGARA4:
        case SUN4V_CHIP_NIAGARA5:
+       case SUN4V_CHIP_SPARC64X:
                rover_inc_table = niagara_iterate_method;
                break;
        default:
index e2a030045089f8c06acc55167e332e4ab13a1da5..33c02b15f47859a8262677d08635fcfdbb8872cb 100644 (file)
@@ -839,7 +839,7 @@ sys_sigreturn:
         nop
 
        call    syscall_trace
-        nop
+        mov    1, %o1
 
 1:
        /* We don't want to muck with user registers like a
index c8759550799f0beb13b9108d60de83b19c2fab04..53c0a82e60308d541271707681b046824ad79456 100644 (file)
@@ -42,7 +42,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 {
        struct thread_info *t = task_thread_info(p);
        extern unsigned int switch_to_pc;
-       extern unsigned int ret_from_syscall;
+       extern unsigned int ret_from_fork;
        struct reg_window *win;
        unsigned long pc, cwp;
        int i;
@@ -66,7 +66,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
                gdb_regs[i] = 0;
 
        if (t->new_child)
-               pc = (unsigned long) &ret_from_syscall;
+               pc = (unsigned long) &ret_from_fork;
        else
                pc = (unsigned long) &switch_to_pc;
 
index 0746e5e32b372ab3a81eb64c2aae79d957b6e88a..fde5a419cf27e0fdd4173aa2993312928bc120ec 100644 (file)
@@ -25,11 +25,10 @@ kvmap_itlb:
         */
 kvmap_itlb_4v:
 
-kvmap_itlb_nonlinear:
        /* Catch kernel NULL pointer calls.  */
        sethi           %hi(PAGE_SIZE), %g5
        cmp             %g4, %g5
-       bleu,pn         %xcc, kvmap_dtlb_longpath
+       blu,pn          %xcc, kvmap_itlb_longpath
         nop
 
        KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
index 7ff45e4ba6815080a29e02a64ad79bfdf9c1ed12..a34833099addecd65609155702538f11bbab981f 100644 (file)
@@ -1087,7 +1087,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
        audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-               trace_sys_exit(regs, regs->u_regs[UREG_G1]);
+               trace_sys_exit(regs, regs->u_regs[UREG_I0]);
 
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall_exit(regs, 0);
index 13785547e435274bb6fa079968a9d516dc25800f..3fdb455e3318fab7626763ba6fb9b907e02203d8 100644 (file)
@@ -499,12 +499,14 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
-                   sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+                   sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_BLKINIT;
                if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
-                   sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+                   sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_N2;
        }
 
@@ -530,13 +532,15 @@ static void __init init_sparc64_elf_hwcap(void)
                        if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
-                           sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+                           sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
                                        AV_SPARC_ASI_BLK_INIT |
                                        AV_SPARC_POPC);
                        if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
-                           sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+                           sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
                                        AV_SPARC_FMAF);
                }
index 22a1098961f54a5f15c9ed87ec7d2c3ff3920e85..d950197a17e16229d0b62147b5956924d656baf4 100644 (file)
@@ -98,8 +98,8 @@ sys_clone:
        ba,pt   %xcc, sparc_do_fork
         add    %sp, PTREGS_OFF, %o2
 
-       .globl  ret_from_syscall
-ret_from_syscall:
+       .globl  ret_from_fork
+ret_from_fork:
        /* Clear current_thread_info()->new_child. */
        stb     %g0, [%g6 + TI_NEW_CHILD]
        call    schedule_tail
@@ -152,7 +152,7 @@ linux_syscall_trace32:
        srl     %i4, 0, %o4
        srl     %i1, 0, %o1
        srl     %i2, 0, %o2
-       ba,pt   %xcc, 2f
+       ba,pt   %xcc, 5f
         srl    %i3, 0, %o3
 
 linux_syscall_trace:
@@ -182,13 +182,13 @@ linux_sparc_syscall32:
        srl     %i1, 0, %o1                             ! IEU0  Group
        ldx     [%g6 + TI_FLAGS], %l0           ! Load
 
-       srl     %i5, 0, %o5                             ! IEU1
+       srl     %i3, 0, %o3                             ! IEU0
        srl     %i2, 0, %o2                             ! IEU0  Group
        andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
        bne,pn  %icc, linux_syscall_trace32             ! CTI
         mov    %i0, %l5                                ! IEU1
-       call    %l7                                     ! CTI   Group brk forced
-        srl    %i3, 0, %o3                             ! IEU0
+5:     call    %l7                                     ! CTI   Group brk forced
+        srl    %i5, 0, %o5                             ! IEU1
        ba,a,pt %xcc, 3f
 
        /* Linux native system calls enter here... */
index 24565a7ffe6d2fa1de2e9bc164b926e66fe6019f..5c872e7e224238caf157888aba2fd8a342663782 100644 (file)
@@ -5,7 +5,6 @@ config TILE
        def_bool y
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
-       select HAVE_KVM if !TILEGX
        select GENERIC_FIND_FIRST_BIT
        select SYSCTL_EXCEPTION_TRACE
        select USE_GENERIC_SMP_HELPERS
@@ -26,6 +25,7 @@ config TILE
        select HAVE_SYSCALL_TRACEPOINTS
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select HAVE_DEBUG_STACKOVERFLOW
+       select ARCH_WANT_FRAME_POINTERS
 
 # FIXME: investigate whether we need/want these options.
 #      select HAVE_IOREMAP_PROT
@@ -64,6 +64,9 @@ config HUGETLB_SUPER_PAGES
        depends on HUGETLB_PAGE && TILEGX
        def_bool y
 
+config GENERIC_TIME_VSYSCALL
+       def_bool y
+
 # FIXME: tilegx can implement a more efficient rwsem.
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
@@ -111,11 +114,22 @@ config SMP
 
 config HVC_TILE
        depends on TTY
+       depends on !KVM_GUEST
        select HVC_DRIVER
+       select HVC_IRQ if TILEGX
        def_bool y
 
 config TILEGX
-       bool "Building with TILE-Gx (64-bit) compiler and toolchain"
+       bool "Building for TILE-Gx (64-bit) processor"
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_DYNAMIC_FTRACE
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_KPROBES
+       select HAVE_KRETPROBES
+       select HAVE_KVM if !KVM_GUEST
+       select HAVE_ARCH_KGDB
 
 config TILEPRO
        def_bool !TILEGX
@@ -194,7 +208,7 @@ config SYSVIPC_COMPAT
        def_bool y
        depends on COMPAT && SYSVIPC
 
-# We do not currently support disabling HIGHMEM on tile64 and tilepro.
+# We do not currently support disabling HIGHMEM on tilepro.
 config HIGHMEM
        bool # "Support for more than 512 MB of RAM"
        default !TILEGX
@@ -300,6 +314,8 @@ config PAGE_OFFSET
 
 source "mm/Kconfig"
 
+source "kernel/Kconfig.preempt"
+
 config CMDLINE_BOOL
        bool "Built-in kernel command line"
        default n
@@ -353,11 +369,22 @@ config HARDWALL
        bool "Hardwall support to allow access to user dynamic network"
        default y
 
+config KVM_GUEST
+       bool "Build kernel as guest for KVM"
+       default n
+       depends on TILEGX
+       select VIRTIO
+       select VIRTIO_RING
+       select VIRTIO_CONSOLE
+       ---help---
+         This will build a kernel that runs at a lower protection level
+         than the default kernel and is suitable to run under KVM.
+
 config KERNEL_PL
        int "Processor protection level for kernel"
        range 1 2
-       default 2 if TILEGX
-       default 1 if !TILEGX
+       default 2 if TILEGX && !KVM_GUEST
+       default 1 if !TILEGX || KVM_GUEST
        ---help---
          Since MDE 4.2, the Tilera hypervisor runs the kernel
          at PL2 by default.  If running under an older hypervisor,
@@ -396,8 +423,20 @@ config NO_IOMEM
 config NO_IOPORT
        def_bool !PCI
 
+config TILE_PCI_IO
+       bool "PCI I/O space support"
+       default n
+       depends on PCI
+       depends on TILEGX
+       ---help---
+         Enable PCI I/O space support on TILEGx. Since the PCI I/O space
+         is used by few modern PCIe endpoint devices, its support is disabled
+         by default to save the TRIO PIO Region resource for other purposes.
+
 source "drivers/pci/Kconfig"
 
+source "drivers/pci/pcie/Kconfig"
+
 config TILE_USB
        tristate "Tilera USB host adapter support"
        default y
@@ -433,3 +472,20 @@ source "crypto/Kconfig"
 source "lib/Kconfig"
 
 source "arch/tile/kvm/Kconfig"
+
+menu "CPU Frequency scaling"
+       depends on TILEGX
+
+source "drivers/cpufreq/Kconfig"
+
+config CPU_FREQ_TILEGX
+       tristate "CPUfreq driver for TILE-Gx"
+       depends on CPU_FREQ && TILEGX
+       default n
+       select STOP_MACHINE
+       ---help---
+         This enables the CPUfreq driver for TILE-Gx CPUs.  Say Y here
+         if you want to be able to dynamically adjust CPU frequency
+         while the system is running.
+
+endmenu
index 9165ea979e85dcf2aa057ae23a780ec70c7eda97..19734d3ab1e877e14df5b353399e6191d88016ec 100644 (file)
@@ -14,14 +14,12 @@ config EARLY_PRINTK
          with klogd/syslogd. You should normally N here,
          unless you want to debug such a crash.
 
-config DEBUG_EXTRA_FLAGS
-       string "Additional compiler arguments when building with '-g'"
-       depends on DEBUG_INFO
-       default ""
+config TILE_HVGLUE_TRACE
+       bool "Provide wrapper functions for hypervisor ABI calls"
+       default n
        help
-         Debug info can be large, and flags like
-         `-femit-struct-debug-baseonly' can reduce the kernel file
-         size and build time noticeably.  Such flags are often
-         helpful if the main use of debug info is line number info.
+         Provide wrapper functions for the hypervisor ABI calls
+         defined in arch/tile/kernel/hvglue.S.  This allows tracing
+         mechanisms, etc., to have visibility into those calls.
 
 endmenu
index 3d15364c60714bf661da140c8f6e67cde20001b3..3107e83b5eb183fe177c42955ef8c00c12fbea3d 100644 (file)
@@ -30,10 +30,6 @@ endif
 # In kernel modules, this causes load failures due to unsupported relocations.
 KBUILD_CFLAGS   += -fno-asynchronous-unwind-tables
 
-ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
-KBUILD_CFLAGS   += $(CONFIG_DEBUG_EXTRA_FLAGS)
-endif
-
 LIBGCC_PATH     := \
   $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
 
@@ -62,6 +58,7 @@ libs-y                += $(LIBGCC_PATH)
 
 # See arch/tile/Kbuild for content of core part of the kernel
 core-y         += arch/tile/
+core-$(CONFIG_KVM) += arch/tile/kvm/
 
 core-$(CONFIG_TILE_GXIO) += arch/tile/gxio/
 
index d221f8d6de8b5ea4d0346c440f8e1f48d7d82e1c..d4e10d58071b8fbf021cf2d45e93f3ff95b05496 100644 (file)
@@ -26,3 +26,8 @@ config TILE_GXIO_TRIO
 config TILE_GXIO_USB_HOST
        bool
        select TILE_GXIO
+
+# Support direct access to the TILE-Gx UART hardware from kernel space.
+config TILE_GXIO_UART
+       bool
+       select TILE_GXIO
index 8684bcaa74ea8233dccd9fff2e67085aa693a15a..26ae2c72746737746d3d0ea524236e940ffe044c 100644 (file)
@@ -6,4 +6,5 @@ obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o
 obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o
 obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o
 obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o
+obj-$(CONFIG_TILE_GXIO_UART) += uart.o iorpc_uart.o
 obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o
index 31b87bf8c027e201fbc853e46bd5b99cbac864d6..4f8f3d619c4a652b428029f0bc2c0362775d79dd 100644 (file)
@@ -387,6 +387,27 @@ int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac)
 
 EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
 
+struct link_set_attr_aux_param {
+       int mac;
+       uint32_t attr;
+       int64_t val;
+};
+
+int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
+                                uint32_t attr, int64_t val)
+{
+       struct link_set_attr_aux_param temp;
+       struct link_set_attr_aux_param *params = &temp;
+
+       params->mac = mac;
+       params->attr = attr;
+       params->val = val;
+
+       return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+                            sizeof(*params), GXIO_MPIPE_OP_LINK_SET_ATTR_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_link_set_attr_aux);
 
 struct get_timestamp_aux_param {
        uint64_t sec;
@@ -454,6 +475,51 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
 
 EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
 
+struct adjust_timestamp_freq_param {
+       int32_t ppb;
+};
+
+int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
+                                    int32_t ppb)
+{
+       struct adjust_timestamp_freq_param temp;
+       struct adjust_timestamp_freq_param *params = &temp;
+
+       params->ppb = ppb;
+
+       return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+                            sizeof(*params),
+                            GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
+
+struct config_edma_ring_blks_param {
+       unsigned int ering;
+       unsigned int max_blks;
+       unsigned int min_snf_blks;
+       unsigned int db;
+};
+
+int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,
+                                    unsigned int ering, unsigned int max_blks,
+                                    unsigned int min_snf_blks, unsigned int db)
+{
+       struct config_edma_ring_blks_param temp;
+       struct config_edma_ring_blks_param *params = &temp;
+
+       params->ering = ering;
+       params->max_blks = max_blks;
+       params->min_snf_blks = min_snf_blks;
+       params->db = db;
+
+       return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+                            sizeof(*params),
+                            GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
+
 struct arm_pollfd_param {
        union iorpc_pollfd pollfd;
 };
index d0254aa60cbac3966e0f5c6a3ce9d6bb2995c907..64883aabeb9c19d2ce3d5e72f4c4040813c33e83 100644 (file)
 #include "gxio/iorpc_mpipe_info.h"
 
 
+struct instance_aux_param {
+       _gxio_mpipe_link_name_t name;
+};
+
+int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
+                                _gxio_mpipe_link_name_t name)
+{
+       struct instance_aux_param temp;
+       struct instance_aux_param *params = &temp;
+
+       params->name = name;
+
+       return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+                            sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
+
 struct enumerate_aux_param {
        _gxio_mpipe_link_name_t name;
        _gxio_mpipe_link_mac_t mac;
index cef4b2209cda7351b84e893909fc7346e59ea479..da6e18e049c35a5093d90522993cfdcf75dd2b76 100644 (file)
@@ -61,6 +61,29 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
 
 EXPORT_SYMBOL(gxio_trio_alloc_memory_maps);
 
+struct alloc_scatter_queues_param {
+       unsigned int count;
+       unsigned int first;
+       unsigned int flags;
+};
+
+int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
+                                  unsigned int count, unsigned int first,
+                                  unsigned int flags)
+{
+       struct alloc_scatter_queues_param temp;
+       struct alloc_scatter_queues_param *params = &temp;
+
+       params->count = count;
+       params->first = first;
+       params->flags = flags;
+
+       return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+                            sizeof(*params),
+                            GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES);
+}
+
+EXPORT_SYMBOL(gxio_trio_alloc_scatter_queues);
 
 struct alloc_pio_regions_param {
        unsigned int count;
diff --git a/arch/tile/gxio/iorpc_uart.c b/arch/tile/gxio/iorpc_uart.c
new file mode 100644 (file)
index 0000000..b9a6d61
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#include "gxio/iorpc_uart.h"
+
+struct cfg_interrupt_param {
+       union iorpc_interrupt interrupt;
+};
+
+int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
+                           int inter_y, int inter_ipi, int inter_event)
+{
+       struct cfg_interrupt_param temp;
+       struct cfg_interrupt_param *params = &temp;
+
+       params->interrupt.kernel.x = inter_x;
+       params->interrupt.kernel.y = inter_y;
+       params->interrupt.kernel.ipi = inter_ipi;
+       params->interrupt.kernel.event = inter_event;
+
+       return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+                            sizeof(*params), GXIO_UART_OP_CFG_INTERRUPT);
+}
+
+EXPORT_SYMBOL(gxio_uart_cfg_interrupt);
+
+struct get_mmio_base_param {
+       HV_PTE base;
+};
+
+int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base)
+{
+       int __result;
+       struct get_mmio_base_param temp;
+       struct get_mmio_base_param *params = &temp;
+
+       __result =
+           hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
+                        GXIO_UART_OP_GET_MMIO_BASE);
+       *base = params->base;
+
+       return __result;
+}
+
+EXPORT_SYMBOL(gxio_uart_get_mmio_base);
+
+struct check_mmio_offset_param {
+       unsigned long offset;
+       unsigned long size;
+};
+
+int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
+                               unsigned long offset, unsigned long size)
+{
+       struct check_mmio_offset_param temp;
+       struct check_mmio_offset_param *params = &temp;
+
+       params->offset = offset;
+       params->size = size;
+
+       return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+                            sizeof(*params), GXIO_UART_OP_CHECK_MMIO_OFFSET);
+}
+
+EXPORT_SYMBOL(gxio_uart_check_mmio_offset);
index e71c63390acc03b35ed8678fa70d5c959aefee56..5301a9ffbae10917d1a7bd38ffd0ea136d8def6c 100644 (file)
@@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
        int fd;
        int i;
 
+       if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
+               return -EINVAL;
+
        snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
        fd = hv_dev_open((HV_VirtAddr) file, 0);
+
+       context->fd = fd;
+
        if (fd < 0) {
                if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
                        return fd;
@@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
                        return -ENODEV;
        }
 
-       context->fd = fd;
-
        /* Map in the MMIO space. */
        context->mmio_cfg_base = (void __force *)
                iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
@@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
        for (i = 0; i < 8; i++)
                context->__stacks.stacks[i] = 255;
 
+       context->instance = mpipe_index;
+
        return 0;
 
       fast_failed:
        iounmap((void __force __iomem *)(context->mmio_cfg_base));
       cfg_failed:
        hv_dev_close(context->fd);
+       context->fd = -1;
        return -ENODEV;
 }
 
@@ -383,7 +390,7 @@ EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
 
 int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
                           gxio_mpipe_context_t *context,
-                          unsigned int edma_ring_id,
+                          unsigned int ering,
                           unsigned int channel,
                           void *mem, unsigned int mem_size,
                           unsigned int mem_flags)
@@ -394,7 +401,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
        /* Offset used to read number of completed commands. */
        MPIPE_EDMA_POST_REGION_ADDR_t offset;
 
-       int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel,
+       int result = gxio_mpipe_init_edma_ring(context, ering, channel,
                                               mem, mem_size, mem_flags);
        if (result < 0)
                return result;
@@ -405,7 +412,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
        offset.region =
                MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
                MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
-       offset.ring = edma_ring_id;
+       offset.ring = ering;
 
        __gxio_dma_queue_init(&equeue->dma_queue,
                              context->mmio_fast_base + offset.word,
@@ -413,6 +420,9 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
        equeue->edescs = mem;
        equeue->mask_num_entries = num_entries - 1;
        equeue->log2_num_entries = __builtin_ctz(num_entries);
+       equeue->context = context;
+       equeue->ering = ering;
+       equeue->channel = channel;
 
        return 0;
 }
@@ -493,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void)
        return contextp;
 }
 
+int gxio_mpipe_link_instance(const char *link_name)
+{
+       _gxio_mpipe_link_name_t name;
+       gxio_mpipe_context_t *context = _gxio_get_link_context();
+
+       if (!context)
+               return GXIO_ERR_NO_DEVICE;
+
+       strncpy(name.name, link_name, sizeof(name.name));
+       name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
+
+       return gxio_mpipe_info_instance_aux(context, name);
+}
+
 int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
 {
        int rv;
@@ -543,3 +567,12 @@ int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
 }
 
 EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
+
+int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
+                            int64_t val)
+{
+       return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
+                                           val);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);
diff --git a/arch/tile/gxio/uart.c b/arch/tile/gxio/uart.c
new file mode 100644 (file)
index 0000000..ba58517
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/*
+ * Implementation of UART gxio calls.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+#include <gxio/uart.h>
+#include <gxio/iorpc_globals.h>
+#include <gxio/iorpc_uart.h>
+#include <gxio/kiorpc.h>
+
+int gxio_uart_init(gxio_uart_context_t *context, int uart_index)
+{
+       char file[32];
+       int fd;
+
+       snprintf(file, sizeof(file), "uart/%d/iorpc", uart_index);
+       fd = hv_dev_open((HV_VirtAddr) file, 0);
+       if (fd < 0) {
+               if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
+                       return fd;
+               else
+                       return -ENODEV;
+       }
+
+       context->fd = fd;
+
+       /* Map in the MMIO space. */
+       context->mmio_base = (void __force *)
+               iorpc_ioremap(fd, HV_UART_MMIO_OFFSET, HV_UART_MMIO_SIZE);
+
+       if (context->mmio_base == NULL) {
+               hv_dev_close(context->fd);
+               context->fd = -1;
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_uart_init);
+
+int gxio_uart_destroy(gxio_uart_context_t *context)
+{
+       iounmap((void __force __iomem *)(context->mmio_base));
+       hv_dev_close(context->fd);
+
+       context->mmio_base = NULL;
+       context->fd = -1;
+
+       return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_uart_destroy);
+
+/* UART register write wrapper. */
+void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
+                    uint64_t word)
+{
+       __gxio_mmio_write(context->mmio_base + offset, word);
+}
+
+EXPORT_SYMBOL_GPL(gxio_uart_write);
+
+/* UART register read wrapper. */
+uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset)
+{
+       return __gxio_mmio_read(context->mmio_base + offset);
+}
+
+EXPORT_SYMBOL_GPL(gxio_uart_read);
index d3000a871a21ee63787c531ab6fc1ae3295267e4..c0ddedcae0857a20eef0930624aae479e1fde3af 100644 (file)
 
 #ifndef __ASSEMBLER__
 
+/*
+ * Map SQ Doorbell Format.
+ * This describes the format of the write-only doorbell register that exists
+ * in the last 8-bytes of the MAP_SQ_BASE/LIM range.  This register is only
+ * writable from PCIe space.  Writes to this register will not be written to
+ * Tile memory space and thus no IO VA translation is required if the last
+ * page of the BASE/LIM range is not otherwise written.
+ */
+
+__extension__
+typedef union
+{
+  struct
+  {
+#ifndef __BIG_ENDIAN__
+    /*
+     * When written with a 1, the associated MAP_SQ region's doorbell
+     * interrupt will be triggered once all previous writes are visible to
+     * Tile software.
+     */
+    uint_reg_t doorbell   : 1;
+    /*
+     * When written with a 1, the descriptor at the head of the associated
+     * MAP_SQ's FIFO will be dequeued.
+     */
+    uint_reg_t pop        : 1;
+    /* Reserved. */
+    uint_reg_t __reserved : 62;
+#else   /* __BIG_ENDIAN__ */
+    uint_reg_t __reserved : 62;
+    uint_reg_t pop        : 1;
+    uint_reg_t doorbell   : 1;
+#endif
+  };
+
+  uint_reg_t word;
+} TRIO_MAP_SQ_DOORBELL_FMT_t;
+
+
 /*
  * Tile PIO Region Configuration - CFG Address Format.
  * This register describes the address format for PIO accesses when the
diff --git a/arch/tile/include/arch/uart.h b/arch/tile/include/arch/uart.h
new file mode 100644 (file)
index 0000000..0796697
--- /dev/null
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_UART_H__
+#define __ARCH_UART_H__
+
+#include <arch/abi.h>
+#include <arch/uart_def.h>
+
+#ifndef __ASSEMBLER__
+
+/* Divisor. */
+
+__extension__
+typedef union
+{
+  struct
+  {
+#ifndef __BIG_ENDIAN__
+    /*
+     * Baud Rate Divisor.  Desired_baud_rate = REF_CLK frequency / (baud *
+     * 16).
+     *                       Note: REF_CLK is always 125 MHz, the default
+     * divisor = 68, baud rate = 125M/(68*16) = 115200 baud.
+     */
+    uint_reg_t divisor    : 12;
+    /* Reserved. */
+    uint_reg_t __reserved : 52;
+#else   /* __BIG_ENDIAN__ */
+    uint_reg_t __reserved : 52;
+    uint_reg_t divisor    : 12;
+#endif
+  };
+
+  uint_reg_t word;
+} UART_DIVISOR_t;
+
+/* FIFO Count. */
+
+__extension__
+typedef union
+{
+  struct
+  {
+#ifndef __BIG_ENDIAN__
+    /*
+     * n: n active entries in the receive FIFO (max is 2**8). Each entry has
+     * 8 bits.
+     * 0: no active entry in the receive FIFO (that is empty).
+     */
+    uint_reg_t rfifo_count  : 9;
+    /* Reserved. */
+    uint_reg_t __reserved_0 : 7;
+    /*
+     * n: n active entries in the transmit FIFO (max is 2**8). Each entry has
+     * 8 bits.
+     * 0: no active entry in the transmit FIFO (that is empty).
+     */
+    uint_reg_t tfifo_count  : 9;
+    /* Reserved. */
+    uint_reg_t __reserved_1 : 7;
+    /*
+     * n: n active entries in the write FIFO (max is 2**2). Each entry has 8
+     * bits.
+     * 0: no active entry in the write FIFO (that is empty).
+     */
+    uint_reg_t wfifo_count  : 3;
+    /* Reserved. */
+    uint_reg_t __reserved_2 : 29;
+#else   /* __BIG_ENDIAN__ */
+    uint_reg_t __reserved_2 : 29;
+    uint_reg_t wfifo_count  : 3;
+    uint_reg_t __reserved_1 : 7;
+    uint_reg_t tfifo_count  : 9;
+    uint_reg_t __reserved_0 : 7;
+    uint_reg_t rfifo_count  : 9;
+#endif
+  };
+
+  uint_reg_t word;
+} UART_FIFO_COUNT_t;
+
+/* FLAG. */
+
+__extension__
+typedef union
+{
+  struct
+  {
+#ifndef __BIG_ENDIAN__
+    /* Reserved. */
+    uint_reg_t __reserved_0 : 1;
+    /* 1: receive FIFO is empty */
+    uint_reg_t rfifo_empty  : 1;
+    /* 1: write FIFO is empty. */
+    uint_reg_t wfifo_empty  : 1;
+    /* 1: transmit FIFO is empty. */
+    uint_reg_t tfifo_empty  : 1;
+    /* 1: receive FIFO is full. */
+    uint_reg_t rfifo_full   : 1;
+    /* 1: write FIFO is full. */
+    uint_reg_t wfifo_full   : 1;
+    /* 1: transmit FIFO is full. */
+    uint_reg_t tfifo_full   : 1;
+    /* Reserved. */
+    uint_reg_t __reserved_1 : 57;
+#else   /* __BIG_ENDIAN__ */
+    uint_reg_t __reserved_1 : 57;
+    uint_reg_t tfifo_full   : 1;
+    uint_reg_t wfifo_full   : 1;
+    uint_reg_t rfifo_full   : 1;
+    uint_reg_t tfifo_empty  : 1;
+    uint_reg_t wfifo_empty  : 1;
+    uint_reg_t rfifo_empty  : 1;
+    uint_reg_t __reserved_0 : 1;
+#endif
+  };
+
+  uint_reg_t word;
+} UART_FLAG_t;
+
+/*
+ * Interrupt Vector Mask.
+ * Each bit in this register corresponds to a specific interrupt. When set,
+ * the associated interrupt will not be dispatched.
+ */
+
+__extension__
+typedef union
+{
+  struct
+  {
+#ifndef __BIG_ENDIAN__
+    /* Read data FIFO read and no data available */
+    uint_reg_t rdat_err       : 1;
+    /* Write FIFO was written but it was full */
+    uint_reg_t wdat_err       : 1;
+    /* Stop bit not found when current data was received */
+    uint_reg_t frame_err      : 1;
+    /* Parity error was detected when current data was received */
+    uint_reg_t parity_err     : 1;
+    /* Data was received but the receive FIFO was full */
+    uint_reg_t rfifo_overflow : 1;
+    /*
+     * An almost full event is reached when data is to be written to the
+     * receive FIFO, and the receive FIFO has more than or equal to
+     * BUFFER_THRESHOLD.RFIFO_AFULL bytes.
+     */
+    uint_reg_t rfifo_afull    : 1;
+    /* Reserved. */
+    uint_reg_t __reserved_0   : 1;
+    /* An entry in the transmit FIFO was popped */
+    uint_reg_t tfifo_re       : 1;
+    /* An entry has been pushed into the receive FIFO */
+    uint_reg_t rfifo_we       : 1;
+    /* An entry of the write FIFO has been popped */
+    uint_reg_t wfifo_re       : 1;
+    /* Rshim read receive FIFO in protocol mode */
+    uint_reg_t rfifo_err      : 1;
+    /*
+     * An almost empty event is reached when data is to be read from the
+     * transmit FIFO, and the transmit FIFO has less than or equal to
+     * BUFFER_THRESHOLD.TFIFO_AEMPTY bytes.
+     */
+    uint_reg_t tfifo_aempty   : 1;
+    /* Reserved. */
+    uint_reg_t __reserved_1   : 52;
+#else   /* __BIG_ENDIAN__ */
+    uint_reg_t __reserved_1   : 52;
+    uint_reg_t tfifo_aempty   : 1;
+    uint_reg_t rfifo_err      : 1;
+    uint_reg_t wfifo_re       : 1;
+    uint_reg_t rfifo_we       : 1;
+    uint_reg_t tfifo_re       : 1;
+    uint_reg_t __reserved_0   : 1;
+    uint_reg_t rfifo_afull    : 1;
+    uint_reg_t rfifo_overflow : 1;
+    uint_reg_t parity_err     : 1;
+    uint_reg_t frame_err      : 1;
+    uint_reg_t wdat_err       : 1;
+    uint_reg_t rdat_err       : 1;
+#endif
+  };
+
+  uint_reg_t word;
+} UART_INTERRUPT_MASK_t;
+
+/*
+ * Interrupt vector, write-one-to-clear.
+ * Each bit in this register corresponds to a specific interrupt. Hardware
+ * sets the bit when the associated condition has occurred. Writing a 1
+ * clears the status bit.
+ */
+
+__extension__
+typedef union
+{
+  struct
+  {
+#ifndef __BIG_ENDIAN__
+    /* Read data FIFO read and no data available */
+    uint_reg_t rdat_err       : 1;
+    /* Write FIFO was written but it was full */
+    uint_reg_t wdat_err       : 1;
+    /* Stop bit not found when current data was received */
+    uint_reg_t frame_err      : 1;
+    /* Parity error was detected when current data was received */
+    uint_reg_t parity_err     : 1;
+    /* Data was received but the receive FIFO was full */
+    uint_reg_t rfifo_overflow : 1;
+    /*
+     * Data was received and the receive FIFO is now almost full (more than
+     * BUFFER_THRESHOLD.RFIFO_AFULL bytes in it)
+     */
+    uint_reg_t rfifo_afull    : 1;
+    /* Reserved. */
+    uint_reg_t __reserved_0   : 1;
+    /* An entry in the transmit FIFO was popped */
+    uint_reg_t tfifo_re       : 1;
+    /* An entry has been pushed into the receive FIFO */
+    uint_reg_t rfifo_we       : 1;
+    /* An entry of the write FIFO has been popped */
+    uint_reg_t wfifo_re       : 1;
+    /* Rshim read receive FIFO in protocol mode */
+    uint_reg_t rfifo_err      : 1;
+    /*
+     * Data was read from the transmit FIFO and now it is almost empty (less
+     * than or equal to BUFFER_THRESHOLD.TFIFO_AEMPTY bytes in it).
+     */
+    uint_reg_t tfifo_aempty   : 1;
+    /* Reserved. */
+    uint_reg_t __reserved_1   : 52;
+#else   /* __BIG_ENDIAN__ */
+    uint_reg_t __reserved_1   : 52;
+    uint_reg_t tfifo_aempty   : 1;
+    uint_reg_t rfifo_err      : 1;
+    uint_reg_t wfifo_re       : 1;
+    uint_reg_t rfifo_we       : 1;
+    uint_reg_t tfifo_re       : 1;
+    uint_reg_t __reserved_0   : 1;
+    uint_reg_t rfifo_afull    : 1;
+    uint_reg_t rfifo_overflow : 1;
+    uint_reg_t parity_err     : 1;
+    uint_reg_t frame_err      : 1;
+    uint_reg_t wdat_err       : 1;
+    uint_reg_t rdat_err       : 1;
+#endif
+  };
+
+  uint_reg_t word;
+} UART_INTERRUPT_STATUS_t;
+
+/* Type. */
+
+__extension__
+typedef union
+{
+  struct
+  {
+#ifndef __BIG_ENDIAN__
+    /* Number of stop bits, rx and tx */
+    uint_reg_t sbits        : 1;
+    /* Reserved. */
+    uint_reg_t __reserved_0 : 1;
+    /* Data word size, rx and tx */
+    uint_reg_t dbits        : 1;
+    /* Reserved. */
+    uint_reg_t __reserved_1 : 1;
+    /* Parity selection, rx and tx */
+    uint_reg_t ptype        : 3;
+    /* Reserved. */
+    uint_reg_t __reserved_2 : 57;
+#else   /* __BIG_ENDIAN__ */
+    uint_reg_t __reserved_2 : 57;
+    uint_reg_t ptype        : 3;
+    uint_reg_t __reserved_1 : 1;
+    uint_reg_t dbits        : 1;
+    uint_reg_t __reserved_0 : 1;
+    uint_reg_t sbits        : 1;
+#endif
+  };
+
+  uint_reg_t word;
+} UART_TYPE_t;
+#endif /* !defined(__ASSEMBLER__) */
+
+#endif /* !defined(__ARCH_UART_H__) */
diff --git a/arch/tile/include/arch/uart_def.h b/arch/tile/include/arch/uart_def.h
new file mode 100644 (file)
index 0000000..42bcaf5
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_UART_DEF_H__
+#define __ARCH_UART_DEF_H__
+#define UART_DIVISOR 0x0158
+#define UART_FIFO_COUNT 0x0110
+#define UART_FLAG 0x0108
+#define UART_INTERRUPT_MASK 0x0208
+#define UART_INTERRUPT_MASK__RDAT_ERR_SHIFT 0
+#define UART_INTERRUPT_MASK__RDAT_ERR_WIDTH 1
+#define UART_INTERRUPT_MASK__RDAT_ERR_RESET_VAL 1
+#define UART_INTERRUPT_MASK__RDAT_ERR_RMASK 0x1
+#define UART_INTERRUPT_MASK__RDAT_ERR_MASK  0x1
+#define UART_INTERRUPT_MASK__RDAT_ERR_FIELD 0,0
+#define UART_INTERRUPT_MASK__WDAT_ERR_SHIFT 1
+#define UART_INTERRUPT_MASK__WDAT_ERR_WIDTH 1
+#define UART_INTERRUPT_MASK__WDAT_ERR_RESET_VAL 1
+#define UART_INTERRUPT_MASK__WDAT_ERR_RMASK 0x1
+#define UART_INTERRUPT_MASK__WDAT_ERR_MASK  0x2
+#define UART_INTERRUPT_MASK__WDAT_ERR_FIELD 1,1
+#define UART_INTERRUPT_MASK__FRAME_ERR_SHIFT 2
+#define UART_INTERRUPT_MASK__FRAME_ERR_WIDTH 1
+#define UART_INTERRUPT_MASK__FRAME_ERR_RESET_VAL 1
+#define UART_INTERRUPT_MASK__FRAME_ERR_RMASK 0x1
+#define UART_INTERRUPT_MASK__FRAME_ERR_MASK  0x4
+#define UART_INTERRUPT_MASK__FRAME_ERR_FIELD 2,2
+#define UART_INTERRUPT_MASK__PARITY_ERR_SHIFT 3
+#define UART_INTERRUPT_MASK__PARITY_ERR_WIDTH 1
+#define UART_INTERRUPT_MASK__PARITY_ERR_RESET_VAL 1
+#define UART_INTERRUPT_MASK__PARITY_ERR_RMASK 0x1
+#define UART_INTERRUPT_MASK__PARITY_ERR_MASK  0x8
+#define UART_INTERRUPT_MASK__PARITY_ERR_FIELD 3,3
+#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_SHIFT 4
+#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_WIDTH 1
+#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RESET_VAL 1
+#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RMASK 0x1
+#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_MASK  0x10
+#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_FIELD 4,4
+#define UART_INTERRUPT_MASK__RFIFO_AFULL_SHIFT 5
+#define UART_INTERRUPT_MASK__RFIFO_AFULL_WIDTH 1
+#define UART_INTERRUPT_MASK__RFIFO_AFULL_RESET_VAL 1
+#define UART_INTERRUPT_MASK__RFIFO_AFULL_RMASK 0x1
+#define UART_INTERRUPT_MASK__RFIFO_AFULL_MASK  0x20
+#define UART_INTERRUPT_MASK__RFIFO_AFULL_FIELD 5,5
+#define UART_INTERRUPT_MASK__TFIFO_RE_SHIFT 7
+#define UART_INTERRUPT_MASK__TFIFO_RE_WIDTH 1
+#define UART_INTERRUPT_MASK__TFIFO_RE_RESET_VAL 1
+#define UART_INTERRUPT_MASK__TFIFO_RE_RMASK 0x1
+#define UART_INTERRUPT_MASK__TFIFO_RE_MASK  0x80
+#define UART_INTERRUPT_MASK__TFIFO_RE_FIELD 7,7
+#define UART_INTERRUPT_MASK__RFIFO_WE_SHIFT 8
+#define UART_INTERRUPT_MASK__RFIFO_WE_WIDTH 1
+#define UART_INTERRUPT_MASK__RFIFO_WE_RESET_VAL 1
+#define UART_INTERRUPT_MASK__RFIFO_WE_RMASK 0x1
+#define UART_INTERRUPT_MASK__RFIFO_WE_MASK  0x100
+#define UART_INTERRUPT_MASK__RFIFO_WE_FIELD 8,8
+#define UART_INTERRUPT_MASK__WFIFO_RE_SHIFT 9
+#define UART_INTERRUPT_MASK__WFIFO_RE_WIDTH 1
+#define UART_INTERRUPT_MASK__WFIFO_RE_RESET_VAL 1
+#define UART_INTERRUPT_MASK__WFIFO_RE_RMASK 0x1
+#define UART_INTERRUPT_MASK__WFIFO_RE_MASK  0x200
+#define UART_INTERRUPT_MASK__WFIFO_RE_FIELD 9,9
+#define UART_INTERRUPT_MASK__RFIFO_ERR_SHIFT 10
+#define UART_INTERRUPT_MASK__RFIFO_ERR_WIDTH 1
+#define UART_INTERRUPT_MASK__RFIFO_ERR_RESET_VAL 1
+#define UART_INTERRUPT_MASK__RFIFO_ERR_RMASK 0x1
+#define UART_INTERRUPT_MASK__RFIFO_ERR_MASK  0x400
+#define UART_INTERRUPT_MASK__RFIFO_ERR_FIELD 10,10
+#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_SHIFT 11
+#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_WIDTH 1
+#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RESET_VAL 1
+#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RMASK 0x1
+#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_MASK  0x800
+#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_FIELD 11,11
+#define UART_INTERRUPT_STATUS 0x0200
+#define UART_RECEIVE_DATA 0x0148
+#define UART_TRANSMIT_DATA 0x0140
+#define UART_TYPE 0x0160
+#define UART_TYPE__SBITS_SHIFT 0
+#define UART_TYPE__SBITS_WIDTH 1
+#define UART_TYPE__SBITS_RESET_VAL 1
+#define UART_TYPE__SBITS_RMASK 0x1
+#define UART_TYPE__SBITS_MASK  0x1
+#define UART_TYPE__SBITS_FIELD 0,0
+#define UART_TYPE__SBITS_VAL_ONE_SBITS 0x0
+#define UART_TYPE__SBITS_VAL_TWO_SBITS 0x1
+#define UART_TYPE__DBITS_SHIFT 2
+#define UART_TYPE__DBITS_WIDTH 1
+#define UART_TYPE__DBITS_RESET_VAL 0
+#define UART_TYPE__DBITS_RMASK 0x1
+#define UART_TYPE__DBITS_MASK  0x4
+#define UART_TYPE__DBITS_FIELD 2,2
+#define UART_TYPE__DBITS_VAL_EIGHT_DBITS 0x0
+#define UART_TYPE__DBITS_VAL_SEVEN_DBITS 0x1
+#define UART_TYPE__PTYPE_SHIFT 4
+#define UART_TYPE__PTYPE_WIDTH 3
+#define UART_TYPE__PTYPE_RESET_VAL 3
+#define UART_TYPE__PTYPE_RMASK 0x7
+#define UART_TYPE__PTYPE_MASK  0x70
+#define UART_TYPE__PTYPE_FIELD 4,6
+#define UART_TYPE__PTYPE_VAL_NONE 0x0
+#define UART_TYPE__PTYPE_VAL_MARK 0x1
+#define UART_TYPE__PTYPE_VAL_SPACE 0x2
+#define UART_TYPE__PTYPE_VAL_EVEN 0x3
+#define UART_TYPE__PTYPE_VAL_ODD 0x4
+#endif /* !defined(__ARCH_UART_DEF_H__) */
index b17b9b8e53cdcab7a9eb4f7a95e86402c27f2812..664d6ad23f80626a62e17dbfae3c3b82cd0f6bb0 100644 (file)
@@ -11,12 +11,13 @@ generic-y += errno.h
 generic-y += exec.h
 generic-y += fb.h
 generic-y += fcntl.h
+generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
 generic-y += ipcbuf.h
 generic-y += irq_regs.h
-generic-y += kdebug.h
 generic-y += local.h
+generic-y += local64.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += param.h
index e7fb5cfb9597be376b6d35e3c7acabea07e0806d..96156f5ba640e9e9dd1e915643ed01ebde0feaec 100644 (file)
@@ -252,21 +252,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
  * Internal definitions only beyond this point.
  */
 
-#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
-  (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
-
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
-
-/* Number of entries in atomic_lock_ptr[]. */
-#define ATOMIC_HASH_L1_SHIFT 6
-#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
-
-/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
-#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
-#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
-
-#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
 /*
  * Number of atomic locks in atomic_locks[]. Must be a power of two.
  * There is no reason for more than PAGE_SIZE / 8 entries, since that
@@ -281,8 +266,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
 extern int atomic_locks[];
 #endif
 
-#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
 /*
  * All the code that may fault while holding an atomic lock must
  * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
index 990a217a0b728d2eb7be81cd7e8e3f76cb2830eb..a9a73da5865d4cae6f8803723c9e9488df095ced 100644 (file)
@@ -77,7 +77,6 @@
 
 #define __sync()       __insn_mf()
 
-#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
 #include <hv/syscall_public.h>
 /*
  * Issue an uncacheable load to each memory controller, then
@@ -96,7 +95,6 @@ static inline void __mb_incoherent(void)
                       "r20", "r21", "r22", "r23", "r24",
                       "r25", "r26", "r27", "r28", "r29");
 }
-#endif
 
 /* Fence to guarantee visibility of stores to incoherent memory. */
 static inline void
@@ -104,7 +102,6 @@ mb_incoherent(void)
 {
        __insn_mf();
 
-#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
        {
 #if CHIP_HAS_TILE_WRITE_PENDING()
                const unsigned long WRITE_TIMEOUT_CYCLES = 400;
@@ -116,7 +113,6 @@ mb_incoherent(void)
 #endif /* CHIP_HAS_TILE_WRITE_PENDING() */
                (void) __mb_incoherent();
        }
-#endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
 }
 
 #define fast_wmb()     __sync()
index bd186c4eaa505947299f0a1d98de1dbe9b9102f6..d5a206865036a6c3c95f61c478187442a52b4567 100644 (file)
 #include <asm/bitops_32.h>
 #endif
 
-/**
- * __ffs - find first set bit in word
- * @word: The word to search
- *
- * Undefined if no set bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
-       return __builtin_ctzl(word);
-}
-
 /**
  * ffz - find first zero bit in word
  * @word: The word to search
@@ -50,33 +39,6 @@ static inline unsigned long ffz(unsigned long word)
        return __builtin_ctzl(~word);
 }
 
-/**
- * __fls - find last set bit in word
- * @word: The word to search
- *
- * Undefined if no set bit exists, so code should check against 0 first.
- */
-static inline unsigned long __fls(unsigned long word)
-{
-       return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
-}
-
-/**
- * ffs - find first set bit in word
- * @x: the word to search
- *
- * This is defined the same way as the libc and compiler builtin ffs
- * routines, therefore differs in spirit from the other bitops.
- *
- * ffs(value) returns 0 if value is 0 or the position of the first
- * set bit if value is nonzero. The first (least significant) bit
- * is at position 1.
- */
-static inline int ffs(int x)
-{
-       return __builtin_ffs(x);
-}
-
 static inline int fls64(__u64 w)
 {
        return (sizeof(__u64) * 8) - __builtin_clzll(w);
@@ -118,6 +80,9 @@ static inline unsigned long __arch_hweight64(__u64 w)
        return __builtin_popcountll(w);
 }
 
+#include <asm-generic/bitops/builtin-__ffs.h>
+#include <asm-generic/bitops/builtin-__fls.h>
+#include <asm-generic/bitops/builtin-ffs.h>
 #include <asm-generic/bitops/const_hweight.h>
 #include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/find.h>
index a9a529964e07d379b5c5ef1c9019e34498f71bae..6160761d5f611319ecd9f838d3f407c934e61ae6 100644 (file)
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
 /*
- * Attribute for data that is kept read/write coherent until the end of
- * initialization, then bumped to read/only incoherent for performance.
+ * Originally we used small TLB pages for kernel data and grouped some
+ * things together as "write once", enforcing the property at the end
+ * of initialization by making those pages read-only and non-coherent.
+ * This allowed better cache utilization since cache inclusion did not
+ * need to be maintained.  However, to do this requires an extra TLB
+ * entry, which on balance is more of a performance hit than the
+ * non-coherence is a performance gain, so we now just make "read
+ * mostly" and "write once" be synonyms.  We keep the attribute
+ * separate in case we change our minds at a future date.
  */
-#define __write_once __attribute__((__section__(".w1data")))
+#define __write_once __read_mostly
 
 #endif /* _ASM_TILE_CACHE_H */
index 0fc63c488edf28f923d82fafcbe9bc662b6884f0..92ee4c8a4f7675318c948613429cd51a23ad6da0 100644 (file)
@@ -75,23 +75,6 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
        memcpy((dst), (src), (len))
 
-/*
- * Invalidate a VA range; pads to L2 cacheline boundaries.
- *
- * Note that on TILE64, __inv_buffer() actually flushes modified
- * cache lines in addition to invalidating them, i.e., it's the
- * same as __finv_buffer().
- */
-static inline void __inv_buffer(void *buffer, size_t size)
-{
-       char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
-       char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
-       while (next < finish) {
-               __insn_inv(next);
-               next += CHIP_INV_STRIDE();
-       }
-}
-
 /* Flush a VA range; pads to L2 cacheline boundaries. */
 static inline void __flush_buffer(void *buffer, size_t size)
 {
@@ -115,13 +98,6 @@ static inline void __finv_buffer(void *buffer, size_t size)
 }
 
 
-/* Invalidate a VA range and wait for it to be complete. */
-static inline void inv_buffer(void *buffer, size_t size)
-{
-       __inv_buffer(buffer, size);
-       mb();
-}
-
 /*
  * Flush a locally-homecached VA range and wait for the evicted
  * cachelines to hit memory.
@@ -142,6 +118,26 @@ static inline void finv_buffer_local(void *buffer, size_t size)
        mb_incoherent();
 }
 
+#ifdef __tilepro__
+/* Invalidate a VA range; pads to L2 cacheline boundaries. */
+static inline void __inv_buffer(void *buffer, size_t size)
+{
+       char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
+       char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
+       while (next < finish) {
+               __insn_inv(next);
+               next += CHIP_INV_STRIDE();
+       }
+}
+
+/* Invalidate a VA range and wait for it to be complete. */
+static inline void inv_buffer(void *buffer, size_t size)
+{
+       __inv_buffer(buffer, size);
+       mb();
+}
+#endif
+
 /*
  * Flush and invalidate a VA range that is homed remotely, waiting
  * until the memory controller holds the flushed values.  If "hfh" is
index 276f067e36406e52657f7f0c8f95f9e71cdf28df..1da5bfbd8c61d135edda020c17724de4ab315ebe 100644 (file)
@@ -68,6 +68,12 @@ extern unsigned long __cmpxchg_called_with_bad_pointer(void);
 
 #define tas(ptr) (xchg((ptr), 1))
 
+#define cmpxchg64(ptr, o, n)                                           \
+({                                                                     \
+       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
+       cmpxchg((ptr), (o), (n));                                       \
+})
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_TILE_CMPXCHG_H */
index f2ff191376b4c0b1956836560e5cf9f9299d9e25..6f522d5691323c49f95cff44be3d62e3724bce4c 100644 (file)
 #include <linux/cache.h>
 #include <linux/io.h>
 
+#ifdef __tilegx__
+#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+#endif
+
 extern struct dma_map_ops *tile_dma_map_ops;
 extern struct dma_map_ops *gx_pci_dma_map_ops;
 extern struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
 
 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
@@ -44,12 +49,12 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
 
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
-       return paddr + get_dma_offset(dev);
+       return paddr;
 }
 
 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 {
-       return daddr - get_dma_offset(dev);
+       return daddr;
 }
 
 static inline void dma_mark_clean(void *addr, size_t size) {}
@@ -88,7 +93,10 @@ dma_set_mask(struct device *dev, u64 mask)
        struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
        /* Handle legacy PCI devices with limited memory addressability. */
-       if ((dma_ops == gx_pci_dma_map_ops) && (mask <= DMA_BIT_MASK(32))) {
+       if ((dma_ops == gx_pci_dma_map_ops ||
+            dma_ops == gx_hybrid_pci_dma_map_ops ||
+            dma_ops == gx_legacy_pci_dma_map_ops) &&
+           (mask <= DMA_BIT_MASK(32))) {
                set_dma_ops(dev, gx_legacy_pci_dma_map_ops);
                set_dma_offset(dev, 0);
                if (mask > dev->archdata.max_direct_dma_addr)
index ff8a93408823206254a370e2aae4e5ea9c5d7de1..41d9878a968688559bcca8304c7c18351c3a0751 100644 (file)
@@ -30,7 +30,6 @@ typedef unsigned long elf_greg_t;
 #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
-#define EM_TILE64  187
 #define EM_TILEPRO 188
 #define EM_TILEGX  191
 
@@ -132,6 +131,15 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
 struct linux_binprm;
 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
                                       int executable_stack);
+#define ARCH_DLINFO \
+do { \
+       NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
+} while (0)
+
+struct mm_struct;
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+#define arch_randomize_brk arch_randomize_brk
+
 #ifdef CONFIG_COMPAT
 
 #define COMPAT_ELF_PLATFORM "tilegx-m32"
index e16dbf929cb549c0cc344415cf0f561f57f2ba8f..c6b9c1b38fd1f0e195f1bf55340ee45eacd79305 100644 (file)
@@ -78,14 +78,6 @@ enum fixed_addresses {
 #endif
 };
 
-extern void __set_fixmap(enum fixed_addresses idx,
-                        unsigned long phys, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
-               __set_fixmap(idx, phys, PAGE_KERNEL)
-#define clear_fixmap(idx) \
-               __set_fixmap(idx, 0, __pgprot(0))
-
 #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
 #define __FIXADDR_BOOT_SIZE    (__end_of_fixed_addresses << PAGE_SHIFT)
 #define FIXADDR_START          (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
index 461459b06d9882e476d748c8984a0ab73f8dac4c..13a9bb81a8ab4754ff93da4c8f17da8a134ce4ec 100644 (file)
 #ifndef _ASM_TILE_FTRACE_H
 #define _ASM_TILE_FTRACE_H
 
-/* empty */
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(__mcount))
+#define MCOUNT_INSN_SIZE 8             /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void __mcount(void);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /*  CONFIG_DYNAMIC_FTRACE */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_FUNCTION_TRACER */
 
 #endif /* _ASM_TILE_FTRACE_H */
index 5909ac3d7218348c1c7d6f043cb6c56e0236b2db..1a6ef1b69cb13cd4a8560c007a16a77cbbde36cf 100644 (file)
@@ -43,6 +43,7 @@
            ".pushsection .fixup,\"ax\"\n"                      \
            "0: { movei %0, %5; j 9f }\n"                       \
            ".section __ex_table,\"a\"\n"                       \
+           ".align 8\n"                                        \
            ".quad 1b, 0b\n"                                    \
            ".popsection\n"                                     \
            "9:"                                                \
index 7b777132864293aac11e15bf81b7141f1d63b498..7ddd1b8d6910a882bc1e8639367978c5adc95816 100644 (file)
@@ -33,8 +33,7 @@ struct zone;
 
 /*
  * Is this page immutable (unwritable) and thus able to be cached more
- * widely than would otherwise be possible?  On tile64 this means we
- * mark the PTE to cache locally; on tilepro it means we have "nc" set.
+ * widely than would otherwise be possible?  This means we have "nc" set.
  */
 #define PAGE_HOME_IMMUTABLE -2
 
@@ -44,16 +43,8 @@ struct zone;
  */
 #define PAGE_HOME_INCOHERENT -3
 
-#if CHIP_HAS_CBOX_HOME_MAP()
 /* Home for the page is distributed via hash-for-home. */
 #define PAGE_HOME_HASH -4
-#endif
-
-/* Homing is unknown or unspecified.  Not valid for page_home(). */
-#define PAGE_HOME_UNKNOWN -5
-
-/* Home on the current cpu.  Not valid for page_home(). */
-#define PAGE_HOME_HERE -6
 
 /* Support wrapper to use instead of explicit hv_flush_remote(). */
 extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
index 31672918064cf781d9cf0197d812237125c4d8d0..023659b7288a552545ebf875d983ff1dd4e4cfc8 100644 (file)
@@ -19,7 +19,8 @@
 #include <linux/bug.h>
 #include <asm/page.h>
 
-#define IO_SPACE_LIMIT 0xfffffffful
+/* Maximum PCI I/O space address supported. */
+#define IO_SPACE_LIMIT 0xffffffff
 
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
@@ -42,6 +43,8 @@
  * long before casting it to a pointer to avoid compiler warnings.
  */
 #if CHIP_HAS_MMIO()
+extern void *generic_remap_prot(resource_size_t phys_addr, unsigned long size,
+       unsigned long flags, pgprot_t prot);
 extern void __iomem *ioremap(resource_size_t offset, unsigned long size);
 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
        pgprot_t pgprot);
@@ -254,7 +257,7 @@ static inline void writeq(u64 val, unsigned long addr)
 
 static inline void memset_io(volatile void *dst, int val, size_t len)
 {
-       int x;
+       size_t x;
        BUG_ON((unsigned long)dst & 0x3);
        val = (val & 0xff) * 0x01010101;
        for (x = 0; x < len; x += 4)
@@ -264,7 +267,7 @@ static inline void memset_io(volatile void *dst, int val, size_t len)
 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
                                 size_t len)
 {
-       int x;
+       size_t x;
        BUG_ON((unsigned long)src & 0x3);
        for (x = 0; x < len; x += 4)
                *(u32 *)(dst + x) = readl(src + x);
@@ -273,7 +276,7 @@ static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
 static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
                                size_t len)
 {
-       int x;
+       size_t x;
        BUG_ON((unsigned long)dst & 0x3);
        for (x = 0; x < len; x += 4)
                writel(*(u32 *)(src + x), dst + x);
@@ -281,8 +284,108 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
 
 #endif
 
+#if CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO)
+
+static inline u8 inb(unsigned long addr)
+{
+       return readb((volatile void __iomem *) addr);
+}
+
+static inline u16 inw(unsigned long addr)
+{
+       return readw((volatile void __iomem *) addr);
+}
+
+static inline u32 inl(unsigned long addr)
+{
+       return readl((volatile void __iomem *) addr);
+}
+
+static inline void outb(u8 b, unsigned long addr)
+{
+       writeb(b, (volatile void __iomem *) addr);
+}
+
+static inline void outw(u16 b, unsigned long addr)
+{
+       writew(b, (volatile void __iomem *) addr);
+}
+
+static inline void outl(u32 b, unsigned long addr)
+{
+       writel(b, (volatile void __iomem *) addr);
+}
+
+static inline void insb(unsigned long addr, void *buffer, int count)
+{
+       if (count) {
+               u8 *buf = buffer;
+               do {
+                       u8 x = inb(addr);
+                       *buf++ = x;
+               } while (--count);
+       }
+}
+
+static inline void insw(unsigned long addr, void *buffer, int count)
+{
+       if (count) {
+               u16 *buf = buffer;
+               do {
+                       u16 x = inw(addr);
+                       *buf++ = x;
+               } while (--count);
+       }
+}
+
+static inline void insl(unsigned long addr, void *buffer, int count)
+{
+       if (count) {
+               u32 *buf = buffer;
+               do {
+                       u32 x = inl(addr);
+                       *buf++ = x;
+               } while (--count);
+       }
+}
+
+static inline void outsb(unsigned long addr, const void *buffer, int count)
+{
+       if (count) {
+               const u8 *buf = buffer;
+               do {
+                       outb(*buf++, addr);
+               } while (--count);
+       }
+}
+
+static inline void outsw(unsigned long addr, const void *buffer, int count)
+{
+       if (count) {
+               const u16 *buf = buffer;
+               do {
+                       outw(*buf++, addr);
+               } while (--count);
+       }
+}
+
+static inline void outsl(unsigned long addr, const void *buffer, int count)
+{
+       if (count) {
+               const u32 *buf = buffer;
+               do {
+                       outl(*buf++, addr);
+               } while (--count);
+       }
+}
+
+extern void __iomem *ioport_map(unsigned long port, unsigned int len);
+extern void ioport_unmap(void __iomem *addr);
+
+#else
+
 /*
- * The Tile architecture does not support IOPORT, even with PCI.
+ * The TilePro architecture does not support IOPORT, even with PCI.
  * Unfortunately we can't yet simply not declare these methods,
  * since some generic code that compiles into the kernel, but
  * we never run, uses them unconditionally.
@@ -290,7 +393,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
 
 static inline long ioport_panic(void)
 {
+#ifdef __tilegx__
+       panic("PCI IO space support is disabled. Configure the kernel with"
+             " CONFIG_TILE_PCI_IO to enable it");
+#else
        panic("inb/outb and friends do not exist on tile");
+#endif
        return 0;
 }
 
@@ -335,13 +443,6 @@ static inline void outl(u32 b, unsigned long addr)
        ioport_panic();
 }
 
-#define inb_p(addr)    inb(addr)
-#define inw_p(addr)    inw(addr)
-#define inl_p(addr)    inl(addr)
-#define outb_p(x, addr)        outb((x), (addr))
-#define outw_p(x, addr)        outw((x), (addr))
-#define outl_p(x, addr)        outl((x), (addr))
-
 static inline void insb(unsigned long addr, void *buffer, int count)
 {
        ioport_panic();
@@ -372,6 +473,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
        ioport_panic();
 }
 
+#endif /* CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO) */
+
+#define inb_p(addr)    inb(addr)
+#define inw_p(addr)    inw(addr)
+#define inl_p(addr)    inl(addr)
+#define outb_p(x, addr)        outb((x), (addr))
+#define outw_p(x, addr)        outw((x), (addr))
+#define outl_p(x, addr)        outl((x), (addr))
+
 #define ioread16be(addr)       be16_to_cpu(ioread16(addr))
 #define ioread32be(addr)       be32_to_cpu(ioread32(addr))
 #define iowrite16be(v, addr)   iowrite16(be16_to_cpu(v), (addr))
index c96f9bbb760d54d79b2c0cc7208bf4e37964de9c..71af5747874d6a47b90534bd54a4d56bbcd138ad 100644 (file)
 DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 #define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
 
+#ifdef CONFIG_DEBUG_PREEMPT
+/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
+extern unsigned int debug_smp_processor_id(void);
+# define smp_processor_id() debug_smp_processor_id()
+#endif
+
 /* Disable interrupts. */
 #define arch_local_irq_disable() \
        interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
@@ -132,9 +138,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 #define arch_local_irq_disable_all() \
        interrupt_mask_set_mask(-1ULL)
 
+/*
+ * Read the set of maskable interrupts.
+ * We avoid the preemption warning here via __this_cpu_ptr since even
+ * if irqs are already enabled, it's harmless to read the wrong cpu's
+ * enabled mask.
+ */
+#define arch_local_irqs_enabled() \
+       (*__this_cpu_ptr(&interrupts_enabled_mask))
+
 /* Re-enable all maskable interrupts. */
 #define arch_local_irq_enable() \
-       interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))
+       interrupt_mask_reset_mask(arch_local_irqs_enabled())
 
 /* Disable or enable interrupts based on flag argument. */
 #define arch_local_irq_restore(disabled) do { \
@@ -161,7 +176,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Prevent the given interrupt from being enabled next time we enable irqs. */
 #define arch_local_irq_mask(interrupt) \
-       (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
+       this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
 
 /* Prevent the given interrupt from being enabled immediately. */
 #define arch_local_irq_mask_now(interrupt) do { \
@@ -171,7 +186,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Allow the given interrupt to be enabled next time we enable irqs. */
 #define arch_local_irq_unmask(interrupt) \
-       (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
+       this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
 
 /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
 #define arch_local_irq_unmask_now(interrupt) do { \
diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h
new file mode 100644 (file)
index 0000000..5bbbfa9
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_KDEBUG_H
+#define _ASM_TILE_KDEBUG_H
+
+#include <linux/notifier.h>
+
+enum die_val {
+       DIE_OOPS = 1,
+       DIE_BREAK,
+       DIE_SSTEPBP,
+       DIE_PAGE_FAULT,
+       DIE_COMPILED_BPT
+};
+
+#endif /* _ASM_TILE_KDEBUG_H */
diff --git a/arch/tile/include/asm/kgdb.h b/arch/tile/include/asm/kgdb.h
new file mode 100644 (file)
index 0000000..280c181
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * TILE-Gx KGDB support.
+ */
+
+#ifndef __TILE_KGDB_H__
+#define __TILE_KGDB_H__
+
+#include <linux/kdebug.h>
+#include <arch/opcode.h>
+
+#define GDB_SIZEOF_REG         sizeof(unsigned long)
+
+/*
+ * TILE-Gx gdb is expecting the following register layout:
+ * 56 GPRs(R0 - R52, TP, SP, LR), 8 special GPRs(networks and ZERO),
+ * plus the PC and the faultnum.
+ *
+ * Even though kernel not use the 8 special GPRs, they need to be present
+ * in the registers sent for correct processing in the host-side gdb.
+ *
+ */
+#define DBG_MAX_REG_NUM                (56+8+2)
+#define NUMREGBYTES            (DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
+
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound
+ * buffers at least NUMREGBYTES*2 are needed for register packets,
+ * Longer buffer is needed to list all threads.
+ */
+#define BUFMAX                 2048
+
+#define BREAK_INSTR_SIZE       TILEGX_BUNDLE_SIZE_IN_BYTES
+
+/*
+ * Require cache flush for set/clear a software breakpoint or write memory.
+ */
+#define CACHE_FLUSH_IS_SAFE    1
+
+/*
+ * The compiled-in breakpoint instruction can be used to "break" into
+ * the debugger via magic system request key (sysrq-G).
+ */
+static tile_bundle_bits compiled_bpt = TILEGX_BPT_BUNDLE | DIE_COMPILED_BPT;
+
+enum tilegx_regnum {
+       TILEGX_PC_REGNUM = TREG_LAST_GPR + 9,
+       TILEGX_FAULTNUM_REGNUM,
+};
+
+/*
+ * Generate a breakpoint exception to "break" into the debugger.
+ */
+static inline void arch_kgdb_breakpoint(void)
+{
+       asm volatile (".quad %0\n\t"
+                     ::""(compiled_bpt));
+}
+
+#endif /* __TILE_KGDB_H__ */
diff --git a/arch/tile/include/asm/kprobes.h b/arch/tile/include/asm/kprobes.h
new file mode 100644 (file)
index 0000000..d8f9a83
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * arch/tile/include/asm/kprobes.h
+ *
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_KPROBES_H
+#define _ASM_TILE_KPROBES_H
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#include <arch/opcode.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE                  2
+
+#define kretprobe_blacklist_size 0
+
+typedef tile_bundle_bits kprobe_opcode_t;
+
+#define flush_insn_slot(p)                                             \
+       flush_icache_range((unsigned long)p->addr,                      \
+                          (unsigned long)p->addr +                     \
+                          (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
+
+struct kprobe;
+
+/* Architecture specific copy of original instruction. */
+struct arch_specific_insn {
+       kprobe_opcode_t *insn;
+};
+
+struct prev_kprobe {
+       struct kprobe *kp;
+       unsigned long status;
+       unsigned long saved_pc;
+};
+
+#define MAX_JPROBES_STACK_SIZE 128
+#define MAX_JPROBES_STACK_ADDR \
+       (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \
+               - sizeof(struct pt_regs))
+
+#define MIN_JPROBES_STACK_SIZE(ADDR)                                   \
+       ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR)   \
+               ? MAX_JPROBES_STACK_ADDR - (ADDR)                       \
+               : MAX_JPROBES_STACK_SIZE)
+
+/* per-cpu kprobe control block. */
+struct kprobe_ctlblk {
+       unsigned long kprobe_status;
+       unsigned long kprobe_saved_pc;
+       unsigned long jprobe_saved_sp;
+       struct prev_kprobe prev_kprobe;
+       struct pt_regs jprobe_saved_regs;
+       char jprobes_stack[MAX_JPROBES_STACK_SIZE];
+};
+
+extern tile_bundle_bits breakpoint2_insn;
+extern tile_bundle_bits breakpoint_insn;
+
+void arch_remove_kprobe(struct kprobe *);
+
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+                            unsigned long val, void *data);
+
+#endif /* _ASM_TILE_KPROBES_H */
diff --git a/arch/tile/include/asm/kvm.h b/arch/tile/include/asm/kvm.h
new file mode 100644 (file)
index 0000000..2ea6c41
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+#ifndef _ASM_TILE_KVM_H
+#define _ASM_TILE_KVM_H
+
+#include <hv/hypervisor.h>
+#include <uapi/asm/kvm.h>
+
+#ifndef __ASSEMBLER__
+/* For hv_*() */
+#define KVM_EMULATE(name) [HV_SYS_##name] = kvm_emulate_hv_##name,
+#define USER_EMULATE(name) [HV_SYS_##name] = kvm_deliver_to_user,
+#define NO_EMULATE(name) [HV_SYS_##name] = kvm_emulate_illegal,
+#define BOTH_EMULATE(name) [HV_SYS_##name] = kvm_emulate_hv_##name,
+/* For others */
+#define USER_HCALL(name) [KVM_HCALL_##name] = kvm_deliver_to_user,
+#endif
+#endif /* _ASM_TILE_KVM_H */
diff --git a/arch/tile/include/asm/kvm_host.h b/arch/tile/include/asm/kvm_host.h
new file mode 100644 (file)
index 0000000..58b6bf3
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_KVM_HOST_H
+#define _ASM_TILE_KVM_HOST_H
+
+#define KVM_MAX_VCPUS 64
+#define KVM_USER_MEM_SLOTS 32
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+/* For now, claim we have no huge pages. */
+#define KVM_HPAGE_GFN_SHIFT(x)  0
+#define KVM_NR_PAGE_SIZES       1
+#define KVM_PAGES_PER_HPAGE(x)  1
+
+/* Max number of message tags for hv_send/receive_message() */
+#define MAX_MSG_TAG    (sizeof(unsigned long) * 8)
+
+/* Bits in pending_downcalls */
+#define DOWNCALL_MESSAGE_RCV     0x01  /**< Message receive */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct kvm_vcpu_stat {
+       /* None yet. */
+};
+
+struct kvm_vcpu_arch {
+       struct pt_regs regs;
+       struct kvm_sregs sregs;
+       unsigned long host_sp; /* Host "real" sp during vmresume. */
+       HV_Context guest_context;
+       unsigned long pending_msgs; /* Pending guest messages */
+       unsigned long ipi_events; /* Pending guest ipi events. */
+       unsigned long ipi_gpa; /* pa for hv_get_ipi_pte() */
+       pte_t ipi_gpte; /* pte for hv_get_ipi_pte() */
+       unsigned long fault_addr;  /* addr for VPGTABLE_MISS faults */
+       int suspended;  /* true for cores not yet started by host */
+       unsigned long timer_control;  /* AUX_TILE_TIMER_CONTROL value */
+       unsigned long vmexit_cycles;  /* cycle count of last vmexit */
+};
+
+struct kvm_vm_stat {
+       /*
+        * FIXME - does this make sense for us?  It's used in common KVM
+        * code.
+        */
+       u32 remote_tlb_flush;
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_arch {
+       pgd_t *vpgd;
+       unsigned long resv_gpa_start; /* For special purpose. */
+       struct completion smp_start;
+};
+
+struct kvm_vcpu;
+
+extern void kvm_vmresume(struct pt_regs *guest,
+                        unsigned long *host_sp_ptr);
+extern void kvm_vmexit(unsigned long host_sp);
+extern void kvm_trigger_vmexit(struct pt_regs *regs, int exit_reason);
+extern void kvm_do_hypervisor_call(struct pt_regs *regs, int fault_num);
+extern void kvm_do_vpgtable_miss(struct pt_regs *regs, int fault_num,
+                                unsigned long, unsigned long);
+extern void kvm_do_vguest_fatal(struct pt_regs *regs, int fault_num);
+
+extern void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+
+#define gpud_offset(kvm, pgd, address) pud_offset(pgd, address)
+
+#define gpud_page_vaddr(kvm, pud) gfn_to_hva(kvm, pud_pfn(pud))
+
+#define gpmd_offset(kvm, pud, address) \
+       ((pmd_t *)gpud_page_vaddr(kvm, *(pud)) + pmd_index(address))
+
+#define gpmd_page_vaddr(kvm, pmd) gfn_to_hva(kvm, pmd_pfn(pmd))
+
+#define gpte_offset_kernel(kvm, pmd, address) \
+       ((pte_t *) gpmd_page_vaddr(kvm, *(pmd)) + pte_index(address))
+
+#endif /* __ASSEMBLY__*/
+
+#endif /* _ASM_TILE_KVM_HOST_H */
similarity index 67%
rename from arch/tile/include/asm/hw_irq.h
rename to arch/tile/include/asm/kvm_para.h
index 4fac5fbf333eaa1251a222adf29d6c006c3cd6a7..c8c31d53cbe530a6f7db582116e29adaeff62417 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
  *   NON INFRINGEMENT.  See the GNU General Public License for
  *   more details.
  */
+#ifndef _ASM_TILE_KVM_PARA_H
+#define _ASM_TILE_KVM_PARA_H
 
-#ifndef _ASM_TILE_HW_IRQ_H
-#define _ASM_TILE_HW_IRQ_H
+#include <uapi/asm/kvm_para.h>
 
-#endif /* _ASM_TILE_HW_IRQ_H */
+int hcall_virtio(unsigned long instrument, unsigned long mem);
+#endif /* _ASM_TILE_KVM_PARA_H */
diff --git a/arch/tile/include/asm/kvm_virtio.h b/arch/tile/include/asm/kvm_virtio.h
new file mode 100644 (file)
index 0000000..8faa959
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+#ifndef _ASM_TILE_KVM_VIRTIO_H
+#define _ASM_TILE_KVM_VIRTIO_H
+
+#include <uapi/asm/kvm_virtio.h>
+
+
+struct kvm_device {
+       struct virtio_device vdev;
+       struct kvm_device_desc *desc;
+       unsigned long desc_pa;
+};
+
+#endif /* _ASM_TILE_KVM_VIRTIO_H */
index e2c789096795222970d1906f708c828b1e3b5d1a..0cab1182bde1f04b2287b51100ce36efd69a68a2 100644 (file)
@@ -22,6 +22,7 @@ struct mm_context {
         * semaphore but atomically, but it is conservatively set.
         */
        unsigned long priority_cached;
+       unsigned long vdso_base;
 };
 
 typedef struct mm_context mm_context_t;
index 37f0b741dee796f2b37b5b3ce1771c55c1037fd2..4734215e2ad451ecb5b9d3e655ccc72f31ac80f0 100644 (file)
@@ -45,7 +45,7 @@ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
 
 static inline void install_page_table(pgd_t *pgdir, int asid)
 {
-       pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir);
+       pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
        __install_page_table(pgdir, asid, *ptep);
 }
 
index 9d3dbce8f953167aca40e12eb56dc376ee0acc73..804f1098b6cdc4730a1d9cf852f08ecc711234d8 100644 (file)
@@ -42,7 +42,7 @@ static inline int pfn_to_nid(unsigned long pfn)
 
 #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
 
-static inline int pfn_valid(int pfn)
+static inline int pfn_valid(unsigned long pfn)
 {
        int nid = pfn_to_nid(pfn);
 
index 44ed07ccd3d2153b0d89a06c54fa89d2302f1393..a8b546b0abb47d6a453aafa90341773ea1e99c97 100644 (file)
@@ -16,7 +16,6 @@
 #define _ASM_TILE_MODULE_H
 
 #include <arch/chip.h>
-
 #include <asm-generic/module.h>
 
 /* We can't use modules built with different page sizes. */
 # define MODULE_PGSZ ""
 #endif
 
+/* Tag guest Linux, since it uses different SPRs, etc. */
+#if CONFIG_KERNEL_PL == 2
+#define MODULE_PL ""
+#else
+#define MODULE_PL " guest"
+#endif
+
 /* We don't really support no-SMP so tag if someone tries. */
 #ifdef CONFIG_SMP
 #define MODULE_NOSMP ""
@@ -35,6 +41,6 @@
 #define MODULE_NOSMP " nosmp"
 #endif
 
-#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
+#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_PL MODULE_NOSMP
 
 #endif /* _ASM_TILE_MODULE_H */
index dd033a4fd627ce5d08b52477b7f3d9ca2723a513..bf6b35ce80369702c773515995423e9dd4e51725 100644 (file)
 #define PAGE_MASK      (~(PAGE_SIZE - 1))
 #define HPAGE_MASK     (~(HPAGE_SIZE - 1))
 
+/*
+ * We do define AT_SYSINFO_EHDR to support vDSO,
+ * but don't use the gate mechanism.
+ */
+#define __HAVE_ARCH_GATE_AREA          1
+
 /*
  * If the Kconfig doesn't specify, set a maximum zone order that
  * is enough so that we can create huge pages from small pages given
@@ -142,8 +148,17 @@ static inline __attribute_const__ int get_order(unsigned long size)
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 #endif
 
+#ifdef CONFIG_KVM_GUEST
+/* Paravirtualized guests get half the VA, and thus half the PA. */
+#define MAX_PA_WIDTH (CHIP_PA_WIDTH() - 1)
+#define MAX_VA_WIDTH (CHIP_VA_WIDTH() - 1)
+#else
+#define MAX_PA_WIDTH CHIP_PA_WIDTH()
+#define MAX_VA_WIDTH CHIP_VA_WIDTH()
+#endif
+
 /* Each memory controller has PAs distinct in their high bits. */
-#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS())
+#define NR_PA_HIGHBIT_SHIFT (MAX_PA_WIDTH - CHIP_LOG_NUM_MSHIMS())
 #define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
 #define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
 #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
@@ -154,7 +169,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
  * We reserve the lower half of memory for user-space programs, and the
  * upper half for system code.  We re-map all of physical memory in the
  * upper half, which takes a quarter of our VA space.  Then we have
- * the vmalloc regions.  The supervisor code lives at 0xfffffff700000000,
+ * the vmalloc regions.  The supervisor code lives at the highest address,
  * with the hypervisor above that.
  *
  * Loadable kernel modules are placed immediately after the static
@@ -166,26 +181,25 @@ static inline __attribute_const__ int get_order(unsigned long size)
  * Similarly, for now we don't play any struct page mapping games.
  */
 
-#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH()
+#if MAX_PA_WIDTH + 2 > MAX_VA_WIDTH
 # error Too much PA to map with the VA available!
 #endif
-#define HALF_VA_SPACE           (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
 
-#define MEM_LOW_END            (HALF_VA_SPACE - 1)         /* low half */
-#define MEM_HIGH_START         (-HALF_VA_SPACE)            /* high half */
-#define PAGE_OFFSET            MEM_HIGH_START
-#define FIXADDR_BASE           _AC(0xfffffff400000000, UL) /* 4 GB */
-#define FIXADDR_TOP            _AC(0xfffffff500000000, UL) /* 4 GB */
+#ifdef CONFIG_KVM_GUEST
+#define PAGE_OFFSET            (_AC(1, UL) << (MAX_VA_WIDTH - 1))
+#define KERNEL_HIGH_VADDR      (_AC(1, UL) << MAX_VA_WIDTH)
+#else
+#define PAGE_OFFSET            (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
+#define KERNEL_HIGH_VADDR      _AC(0xfffffff800000000, UL)  /* high 32GB */
+#endif
+
+#define FIXADDR_BASE           (KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */
+#define FIXADDR_TOP            (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
 #define _VMALLOC_START         FIXADDR_TOP
-#define HUGE_VMAP_BASE         _AC(0xfffffff600000000, UL) /* 4 GB */
-#define MEM_SV_START           _AC(0xfffffff700000000, UL) /* 256 MB */
-#define MEM_SV_INTRPT          MEM_SV_START
-#define MEM_MODULE_START       _AC(0xfffffff710000000, UL) /* 256 MB */
+#define HUGE_VMAP_BASE         (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
+#define MEM_SV_START           (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
+#define MEM_MODULE_START       (MEM_SV_START + (256*1024*1024)) /* 256 MB */
 #define MEM_MODULE_END         (MEM_MODULE_START + (256*1024*1024))
-#define MEM_HV_START           _AC(0xfffffff800000000, UL) /* 32 GB */
-
-/* Highest DTLB address we will use */
-#define KERNEL_HIGH_VADDR      MEM_SV_START
 
 #else /* !__tilegx__ */
 
@@ -207,25 +221,18 @@ static inline __attribute_const__ int get_order(unsigned long size)
  * values, and after that, we show "typical" values, since the actual
  * addresses depend on kernel #defines.
  *
- * MEM_HV_INTRPT                   0xfe000000
- * MEM_SV_INTRPT (kernel code)     0xfd000000
+ * MEM_HV_START                    0xfe000000
+ * MEM_SV_START  (kernel code)     0xfd000000
  * MEM_USER_INTRPT (user vector)   0xfc000000
- * FIX_KMAP_xxx                    0xf8000000 (via NR_CPUS * KM_TYPE_NR)
- * PKMAP_BASE                      0xf7000000 (via LAST_PKMAP)
- * HUGE_VMAP                       0xf3000000 (via CONFIG_NR_HUGE_VMAPS)
- * VMALLOC_START                   0xf0000000 (via __VMALLOC_RESERVE)
+ * FIX_KMAP_xxx                    0xfa000000 (via NR_CPUS * KM_TYPE_NR)
+ * PKMAP_BASE                      0xf9000000 (via LAST_PKMAP)
+ * VMALLOC_START                   0xf7000000 (via VMALLOC_RESERVE)
  * mapped LOWMEM                   0xc0000000
  */
 
 #define MEM_USER_INTRPT                _AC(0xfc000000, UL)
-#if CONFIG_KERNEL_PL == 1
-#define MEM_SV_INTRPT          _AC(0xfd000000, UL)
-#define MEM_HV_INTRPT          _AC(0xfe000000, UL)
-#else
-#define MEM_GUEST_INTRPT       _AC(0xfd000000, UL)
-#define MEM_SV_INTRPT          _AC(0xfe000000, UL)
-#define MEM_HV_INTRPT          _AC(0xff000000, UL)
-#endif
+#define MEM_SV_START           _AC(0xfd000000, UL)
+#define MEM_HV_START           _AC(0xfe000000, UL)
 
 #define INTRPT_SIZE            0x4000
 
@@ -246,7 +253,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
 
 #endif /* __tilegx__ */
 
-#ifndef __ASSEMBLY__
+#if !defined(__ASSEMBLY__) && !defined(VDSO_BUILD)
 
 #ifdef CONFIG_HIGHMEM
 
@@ -332,6 +339,7 @@ static inline int pfn_valid(unsigned long pfn)
 
 struct mm_struct;
 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
+extern pte_t *virt_to_kpte(unsigned long kaddr);
 
 #endif /* !__ASSEMBLY__ */
 
index 54a924208d3ce5e16ec27615975fcd2e6335c054..dfedd7ac7298dc93424a7862ef20b2c8ce73da1f 100644 (file)
@@ -17,7 +17,6 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/pci.h>
-#include <linux/numa.h>
 #include <asm-generic/pci_iomap.h>
 
 #ifndef __tilegx__
@@ -29,7 +28,6 @@ struct pci_controller {
        int index;              /* PCI domain number */
        struct pci_bus *root_bus;
 
-       int first_busno;
        int last_busno;
 
        int hv_cfg_fd[2];       /* config{0,1} fds for this PCIe controller */
@@ -124,6 +122,11 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
  * the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit
  * devices, we create a separate map region that handles the low
  * 4GB.
+ *
+ * This design lets us avoid the "PCI hole" problem where the host bridge
+ * won't pass DMA traffic with target addresses that happen to fall within the
+ * BAR space. This enables us to use all the physical memory for DMA, instead
+ * of wasting the same amount of physical memory as the BAR window size.
  */
 #define        TILE_PCI_MEM_MAP_BASE_OFFSET    (1ULL << CHIP_PA_WIDTH())
 
@@ -145,6 +148,10 @@ struct pci_controller {
 
        int pio_mem_index;      /* PIO region index for memory access */
 
+#ifdef CONFIG_TILE_PCI_IO
+       int pio_io_index;       /* PIO region index for I/O space access */
+#endif
+
        /*
         * Mem-Map regions for all the memory controllers so that Linux can
         * map all of its physical memory space to the PCI bus.
@@ -154,6 +161,10 @@ struct pci_controller {
        int index;              /* PCI domain number */
        struct pci_bus *root_bus;
 
+       /* PCI I/O space resource for this controller. */
+       struct resource io_space;
+       char io_space_name[32];
+
        /* PCI memory space resource for this controller. */
        struct resource mem_space;
        char mem_space_name[32];
@@ -166,13 +177,11 @@ struct pci_controller {
 
        /* Table that maps the INTx numbers to Linux irq numbers. */
        int irq_intx_table[4];
-
-       /* Address ranges that are routed to this controller/bridge. */
-       struct resource mem_resources[3];
 };
 
 extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
 extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
+extern int num_trio_shims;
 
 extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
 
@@ -211,7 +220,8 @@ static inline int pcibios_assign_all_busses(void)
 }
 
 #define PCIBIOS_MIN_MEM                0
-#define PCIBIOS_MIN_IO         0
+/* Minimum PCI I/O address, starting at the page boundary. */
+#define PCIBIOS_MIN_IO         PAGE_SIZE
 
 /* Use any cpu for PCI. */
 #define cpumask_of_pcibus(bus) cpu_online_mask
index 4ce4a7a99c244c5546f37465f6ebceafbb55173e..63142ab3b3dd8337bf1b6cc2c165af65fc85181a 100644 (file)
@@ -84,10 +84,12 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
 /* We have no pmd or pud since we are strictly a two-level page table */
 #include <asm-generic/pgtable-nopmd.h>
 
+static inline int pud_huge_page(pud_t pud)     { return 0; }
+
 /* We don't define any pgds for these addresses. */
 static inline int pgd_addr_invalid(unsigned long addr)
 {
-       return addr >= MEM_HV_INTRPT;
+       return addr >= MEM_HV_START;
 }
 
 /*
index 2492fa5478e74077d7dfe60c588a01b6c8083867..3421177f737002ee8c8addf515e30615729d08b5 100644 (file)
 /* We have no pud since we are a three-level page table. */
 #include <asm-generic/pgtable-nopud.h>
 
+/*
+ * pmds are the same as pgds and ptes, so converting is a no-op.
+ */
+#define pmd_pte(pmd) (pmd)
+#define pmdp_ptep(pmdp) (pmdp)
+#define pte_pmd(pte) (pte)
+
+#define pud_pte(pud) ((pud).pgd)
+
 static inline int pud_none(pud_t pud)
 {
        return pud_val(pud) == 0;
@@ -73,6 +82,11 @@ static inline int pud_present(pud_t pud)
        return pud_val(pud) & _PAGE_PRESENT;
 }
 
+static inline int pud_huge_page(pud_t pud)
+{
+       return pud_val(pud) & _PAGE_HUGE_PAGE;
+}
+
 #define pmd_ERROR(e) \
        pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
 
@@ -89,6 +103,9 @@ static inline int pud_bad(pud_t pud)
 /* Return the page-table frame number (ptfn) that a pud_t points at. */
 #define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
 
+/* Return the page frame number (pfn) that a pud_t points at. */
+#define pud_pfn(pud) pte_pfn(pud_pte(pud))
+
 /*
  * A given kernel pud_t maps to a kernel pmd_t table at a specific
  * virtual address.  Since kernel pmd_t tables can be aligned at
@@ -123,8 +140,7 @@ static inline unsigned long pgd_addr_normalize(unsigned long addr)
 /* We don't define any pgds for these addresses. */
 static inline int pgd_addr_invalid(unsigned long addr)
 {
-       return addr >= MEM_HV_START ||
-               (addr > MEM_LOW_END && addr < MEM_HIGH_START);
+       return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
 }
 
 /*
@@ -152,13 +168,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
        return hv_pte(__insn_exch(&ptep->val, 0UL));
 }
 
-/*
- * pmds are the same as pgds and ptes, so converting is a no-op.
- */
-#define pmd_pte(pmd) (pmd)
-#define pmdp_ptep(pmdp) (pmdp)
-#define pte_pmd(pte) (pte)
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_TILE_PGTABLE_64_H */
index b3f104953da2a67db4684cbad564a5547ad7aee4..42323636c459f3577e6053bad26c36c4adb1535b 100644 (file)
@@ -15,6 +15,8 @@
 #ifndef _ASM_TILE_PROCESSOR_H
 #define _ASM_TILE_PROCESSOR_H
 
+#include <arch/chip.h>
+
 #ifndef __ASSEMBLY__
 
 /*
@@ -25,7 +27,6 @@
 #include <asm/ptrace.h>
 #include <asm/percpu.h>
 
-#include <arch/chip.h>
 #include <arch/spr_def.h>
 
 struct task_struct;
@@ -110,18 +111,16 @@ struct thread_struct {
        unsigned long long interrupt_mask;
        /* User interrupt-control 0 state */
        unsigned long intctrl_0;
-#if CHIP_HAS_PROC_STATUS_SPR()
+       /* Is this task currently doing a backtrace? */
+       bool in_backtrace;
        /* Any other miscellaneous processor state bits */
        unsigned long proc_status;
-#endif
 #if !CHIP_HAS_FIXED_INTVEC_BASE()
        /* Interrupt base for PL0 interrupts */
        unsigned long interrupt_vector_base;
 #endif
-#if CHIP_HAS_TILE_RTF_HWM()
        /* Tile cache retry fifo high-water mark */
        unsigned long tile_rtf_hwm;
-#endif
 #if CHIP_HAS_DSTREAM_PF()
        /* Data stream prefetch control */
        unsigned long dstream_pf;
@@ -134,21 +133,16 @@ struct thread_struct {
        /* Async DMA TLB fault information */
        struct async_tlb dma_async_tlb;
 #endif
-#if CHIP_HAS_SN_PROC()
-       /* Was static network processor when we were switched out? */
-       int sn_proc_running;
-       /* Async SNI TLB fault information */
-       struct async_tlb sn_async_tlb;
-#endif
 };
 
 #endif /* !__ASSEMBLY__ */
 
 /*
  * Start with "sp" this many bytes below the top of the kernel stack.
- * This preserves the invariant that a called function may write to *sp.
+ * This allows us to be cache-aware when handling the initial save
+ * of the pt_regs value to the stack.
  */
-#define STACK_TOP_DELTA 8
+#define STACK_TOP_DELTA 64
 
 /*
  * When entering the kernel via a fault, start with the top of the
@@ -164,7 +158,7 @@ struct thread_struct {
 #ifndef __ASSEMBLY__
 
 #ifdef __tilegx__
-#define TASK_SIZE_MAX          (MEM_LOW_END + 1)
+#define TASK_SIZE_MAX          (_AC(1, UL) << (MAX_VA_WIDTH - 1))
 #else
 #define TASK_SIZE_MAX          PAGE_OFFSET
 #endif
@@ -178,10 +172,10 @@ struct thread_struct {
 #define TASK_SIZE              TASK_SIZE_MAX
 #endif
 
-/* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */
-#define VDSO_BASE              (TASK_SIZE - PAGE_SIZE)
+#define VDSO_BASE      ((unsigned long)current->active_mm->context.vdso_base)
+#define VDSO_SYM(x)    (VDSO_BASE + (unsigned long)(x))
 
-#define STACK_TOP              VDSO_BASE
+#define STACK_TOP              TASK_SIZE
 
 /* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
 #define STACK_TOP_MAX          TASK_SIZE_MAX
@@ -232,21 +226,28 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags);
 unsigned long get_wchan(struct task_struct *p);
 
 /* Return initial ksp value for given task. */
-#define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE)
+#define task_ksp0(task) \
+       ((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
 
 /* Return some info about the user process TASK. */
-#define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA)
 #define task_pt_regs(task) \
-  ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
+       ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
 #define current_pt_regs()                                   \
-  ((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
-                      (KSTK_PTREGS_GAP - 1)) - 1)
+       ((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
+                           STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
 #define task_sp(task)  (task_pt_regs(task)->sp)
 #define task_pc(task)  (task_pt_regs(task)->pc)
 /* Aliases for pc and sp (used in fs/proc/array.c) */
 #define KSTK_EIP(task) task_pc(task)
 #define KSTK_ESP(task) task_sp(task)
 
+/* Fine-grained unaligned JIT support */
+#define GET_UNALIGN_CTL(tsk, adr)      get_unalign_ctl((tsk), (adr))
+#define SET_UNALIGN_CTL(tsk, val)      set_unalign_ctl((tsk), (val))
+
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
 /* Standard format for printing registers and other word-size data. */
 #ifdef __tilegx__
 # define REGFMT "0x%016lx"
@@ -275,7 +276,6 @@ extern char chip_model[64];
 /* Data on which physical memory controller corresponds to which NUMA node. */
 extern int node_controller[];
 
-#if CHIP_HAS_CBOX_HOME_MAP()
 /* Does the heap allocator return hash-for-home pages by default? */
 extern int hash_default;
 
@@ -285,11 +285,6 @@ extern int kstack_hash;
 /* Does MAP_ANONYMOUS return hash-for-home pages by default? */
 #define uheap_hash hash_default
 
-#else
-#define hash_default 0
-#define kstack_hash 0
-#define uheap_hash 0
-#endif
 
 /* Are we using huge pages in the TLB for kernel data? */
 extern int kdata_huge;
@@ -337,7 +332,6 @@ extern int kdata_huge;
 
 /*
  * Provide symbolic constants for PLs.
- * Note that assembly code assumes that USER_PL is zero.
  */
 #define USER_PL 0
 #if CONFIG_KERNEL_PL == 2
@@ -346,20 +340,38 @@ extern int kdata_huge;
 #define KERNEL_PL CONFIG_KERNEL_PL
 
 /* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
-#define CPU_LOG_MASK_VALUE 12
-#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1)
-#if CONFIG_NR_CPUS > CPU_MASK_VALUE
-# error Too many cpus!
+#ifdef __tilegx__
+#define CPU_SHIFT 48
+#if CHIP_VA_WIDTH() > CPU_SHIFT
+# error Too many VA bits!
 #endif
+#define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
+#define raw_smp_processor_id() \
+       ((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
+#define get_current_ksp0() \
+       ((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
+                         (64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
+#define next_current_ksp0(task) ({ \
+       unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
+       unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
+       __ksp0 | __cpu; \
+})
+#else
+#define LOG2_NR_CPU_IDS 6
+#define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
 #define raw_smp_processor_id() \
-       ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE)
+       ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
 #define get_current_ksp0() \
-       (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE)
+       (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
 #define next_current_ksp0(task) ({ \
        unsigned long __ksp0 = task_ksp0(task); \
        int __cpu = raw_smp_processor_id(); \
-       BUG_ON(__ksp0 & CPU_MASK_VALUE); \
+       BUG_ON(__ksp0 & MAX_CPU_ID); \
        __ksp0 | __cpu; \
 })
+#endif
+#if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
+# error Too many cpus!
+#endif
 
 #endif /* _ASM_TILE_PROCESSOR_H */
index fd412260aff75d5d8505e1d629be607c4eb999ac..b9620c077abc9acc7b4927f9bc5d417175bb5a94 100644 (file)
@@ -33,12 +33,13 @@ typedef unsigned long pt_reg_t;
 
 #ifndef __ASSEMBLY__
 
+#define regs_return_value(regs) ((regs)->regs[0])
 #define instruction_pointer(regs) ((regs)->pc)
 #define profile_pc(regs) instruction_pointer(regs)
 #define user_stack_pointer(regs) ((regs)->sp)
 
 /* Does the process account for user or for system time? */
-#define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL)
+#define user_mode(regs) (EX1_PL((regs)->ex1) < KERNEL_PL)
 
 /* Fill in a struct pt_regs with the current kernel registers. */
 struct pt_regs *get_pt_regs(struct pt_regs *);
@@ -79,8 +80,7 @@ extern void single_step_execve(void);
 
 struct task_struct;
 
-extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
-                        int error_code);
+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs);
 
 #ifdef __tilegx__
 /* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
index 7d8a935a9238a0191b0692a68ec85bcc57a9e3c7..5d5d3b739a6b2964669012d46d5e144a077b8c62 100644 (file)
@@ -25,10 +25,16 @@ extern char _sinitdata[], _einitdata[];
 /* Write-once data is writable only till the end of initialization. */
 extern char __w1data_begin[], __w1data_end[];
 
+extern char vdso_start[], vdso_end[];
+#ifdef CONFIG_COMPAT
+extern char vdso32_start[], vdso32_end[];
+#endif
 
 /* Not exactly sections, but PC comparison points in the code. */
 extern char __rt_sigreturn[], __rt_sigreturn_end[];
-#ifndef __tilegx__
+#ifdef __tilegx__
+extern char __start_unalign_asm_code[], __end_unalign_asm_code[];
+#else
 extern char sys_cmpxchg[], __sys_cmpxchg_end[];
 extern char __sys_cmpxchg_grab_lock[];
 extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
index d048888c5d9aa8427fdee84ba324c7f662c7b6e9..e98909033e5b9e9b646d12024385c6f3ef9b46cc 100644 (file)
@@ -24,9 +24,8 @@
  */
 #define MAXMEM_PFN     PFN_DOWN(MAXMEM)
 
+int tile_console_write(const char *buf, int count);
 void early_panic(const char *fmt, ...);
-void warn_early_printk(void);
-void __init disable_early_printk(void);
 
 /* Init-time routine to do tile-specific per-cpu setup. */
 void setup_cpu(int boot);
index 1aa759aeb5b3f0db2842a22c99b2f493dca10cf7..9a326b64f7aeae059ad7de57d2744a3270f6e18b 100644 (file)
@@ -101,10 +101,8 @@ void print_disabled_cpus(void);
 extern struct cpumask cpu_lotar_map;
 #define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map)
 
-#if CHIP_HAS_CBOX_HOME_MAP()
 /* Which processors are used for hash-for-home mapping */
 extern struct cpumask hash_for_home_map;
-#endif
 
 /* Which cpus can have their cache flushed by hv_flush_remote(). */
 extern struct cpumask cpu_cacheable_map;
index 5f8b6a095fd84a0748ea645c96a4b3908ba6d6fb..9a12b9c7e5d36d13ba94fd8bdd5f3d824996f233 100644 (file)
@@ -27,7 +27,7 @@
  * Return the "current" portion of a ticket lock value,
  * i.e. the number that currently owns the lock.
  */
-static inline int arch_spin_current(u32 val)
+static inline u32 arch_spin_current(u32 val)
 {
        return val >> __ARCH_SPIN_CURRENT_SHIFT;
 }
@@ -36,7 +36,7 @@ static inline int arch_spin_current(u32 val)
  * Return the "next" portion of a ticket lock value,
  * i.e. the number that the next task to try to acquire the lock will get.
  */
-static inline int arch_spin_next(u32 val)
+static inline u32 arch_spin_next(u32 val)
 {
        return val & __ARCH_SPIN_NEXT_MASK;
 }
index 7535cf1a30e487a22ee88a4c762d411a507793b6..92b271bd9ebd7cb7527a12385c9f12cf10029d8e 100644 (file)
 #define __HAVE_ARCH_MEMMOVE
 #define __HAVE_ARCH_STRCHR
 #define __HAVE_ARCH_STRLEN
+#define __HAVE_ARCH_STRNLEN
 
 extern __kernel_size_t strlen(const char *);
+extern __kernel_size_t strnlen(const char *, __kernel_size_t);
 extern char *strchr(const char *s, int c);
 extern void *memchr(const void *s, int c, size_t n);
 extern void *memset(void *, int, __kernel_size_t);
index b8f888cbe6b030c46bf7d54e58353de476565eaa..8e9150f93c56abf537bb0a9fefcbe729a8c7361d 100644 (file)
@@ -49,17 +49,32 @@ extern struct task_struct *__switch_to(struct task_struct *prev,
 /* Address that switched-away from tasks are at. */
 extern unsigned long get_switch_to_pc(void);
 
+/*
+ * Normally we notify the simulator whenever we change from one pid
+ * to another, so it can track symbol files appropriately on the fly.
+ * For now, we don't do this for the guest Linux, since we don't
+ * have a way to tell the simulator that we are entering a separate
+ * pid space when we are in the guest.
+ */
+#ifdef CONFIG_KVM_GUEST
+#define notify_sim_task_change(prev) do { } while (0)
+#else
+#define notify_sim_task_change(prev) do {                              \
+       if (unlikely((prev)->state == TASK_DEAD))                       \
+               __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |     \
+                            ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
+       __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH |           \
+                    (current->pid << _SIM_CONTROL_OPERATOR_BITS));     \
+} while (0)
+#endif
+
 /*
  * Kernel threads can check to see if they need to migrate their
  * stack whenever they return from a context switch; for user
  * threads, we defer until they are returning to user-space.
  */
 #define finish_arch_switch(prev) do {                                     \
-       if (unlikely((prev)->state == TASK_DEAD))                         \
-               __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |       \
-                       ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS));     \
-       __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH |             \
-               (current->pid << _SIM_CONTROL_OPERATOR_BITS));            \
+       notify_sim_task_change(prev);                                     \
        if (current->mm == NULL && !kstack_hash &&                        \
            current_thread_info()->homecache_cpu != smp_processor_id())   \
                homecache_migrate_kthread();                              \
index d1733dee98a2d448f93dd6d84dd60adee0b84ed8..1c26cdf69828c9b04824712fec6abc0cd931a09e 100644 (file)
@@ -18,7 +18,9 @@
 
 #include <asm/processor.h>
 #include <asm/page.h>
+
 #ifndef __ASSEMBLY__
+struct kvm_vcpu;
 
 /*
  * Low level task data that assembly code needs immediate access to.
@@ -39,6 +41,14 @@ struct thread_info {
        struct restart_block    restart_block;
        struct single_step_state *step_state;   /* single step state
                                                   (if non-zero) */
+       int                     align_ctl;      /* controls unaligned access */
+#ifdef __tilegx__
+       unsigned long           unalign_jit_tmp[4]; /* temp r0..r3 storage */
+       void __user             *unalign_jit_base; /* unalign fixup JIT base */
+#endif
+#ifdef CONFIG_KVM
+       struct kvm_vcpu         *vcpu;          /* vcpu during vmresume */
+#endif
 };
 
 /*
@@ -56,6 +66,7 @@ struct thread_info {
                .fn = do_no_restart_syscall,    \
        },                                      \
        .step_state     = NULL,                 \
+       .align_ctl      = 0,                    \
 }
 
 #define init_thread_info       (init_thread_union.thread_info)
@@ -111,8 +122,8 @@ extern void _cpu_idle(void);
 
 /*
  * Thread information flags that various assembly files may need to access.
- * Keep flags accessed frequently in low bits, particular since it makes
- * it easier to build constants in assembly.
+ * Keep flags accessed frequently in low bits, since it makes it
+ * easier to build constants in assembly.
  */
 #define TIF_SIGPENDING         0       /* signal pending */
 #define TIF_NEED_RESCHED       1       /* rescheduling necessary */
@@ -125,6 +136,7 @@ extern void _cpu_idle(void);
 #define TIF_MEMDIE             7       /* OOM killer at work */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SYSCALL_TRACEPOINT 9       /* syscall tracepoint instrumentation */
+#define TIF_VIRT_EXIT          10      /* force exit of task in vmresume */
 
 #define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
@@ -136,11 +148,12 @@ extern void _cpu_idle(void);
 #define _TIF_MEMDIE            (1<<TIF_MEMDIE)
 #define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_VIRT_EXIT         (1<<TIF_VIRT_EXIT)
 
 /* Work to do on any return to user space. */
-#define _TIF_ALLWORK_MASK \
-  (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
-   _TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
+#define _TIF_ALLWORK_MASK                                      \
+       (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|     \
+        _TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME|_TIF_VIRT_EXIT)
 
 /* Work to do at syscall entry. */
 #define _TIF_SYSCALL_ENTRY_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
index dc987d53e2a92fffce3eda254ad42b169233f564..0417617c77fb6a103df0c1eea4307e4871f7b502 100644 (file)
 
 typedef unsigned long long cycles_t;
 
+#ifdef CONFIG_KVM_GUEST
+#define INT_LINUX_TIMER INT_AUX_TILE_TIMER
+#define SPR_LINUX_TIMER_CONTROL SPR_AUX_TILE_TIMER_CONTROL
+#else
+#define INT_LINUX_TIMER INT_TILE_TIMER
+#define SPR_LINUX_TIMER_CONTROL SPR_TILE_TIMER_CONTROL
+#endif
+
 #if CHIP_HAS_SPLIT_CYCLE()
 cycles_t get_cycles(void);
 #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
@@ -40,6 +48,10 @@ static inline cycles_t get_cycles(void)
 
 cycles_t get_clock_rate(void);
 
+#ifdef __tilegx__
+unsigned int set_clock_rate(unsigned int new_rate);
+#endif
+
 /* Convert nanoseconds to core clock cycles. */
 cycles_t ns2cycles(unsigned long nsecs);
 
index e28c3df4176a918c09fc56dfa702c17e0f48e401..4b99a1c3aab2533c3bafe930cb6f92b5fc17117c 100644 (file)
 #ifndef _ASM_TILE_TRAPS_H
 #define _ASM_TILE_TRAPS_H
 
+#ifndef __ASSEMBLY__
 #include <arch/chip.h>
 
 /* mm/fault.c */
 void do_page_fault(struct pt_regs *, int fault_num,
                   unsigned long address, unsigned long write);
-#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
+#if CHIP_HAS_TILE_DMA()
 void do_async_page_fault(struct pt_regs *);
 #endif
 
@@ -69,6 +70,16 @@ void gx_singlestep_handle(struct pt_regs *, int fault_num);
 
 /* kernel/intvec_64.S */
 void fill_ra_stack(void);
+
+/* Handle unalign data fixup. */
+extern void do_unaligned(struct pt_regs *regs, int vecnum);
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __tilegx__
+/* 128 byte JIT per unalign fixup. */
+#define UNALIGN_JIT_SHIFT    7
 #endif
 
 #endif /* _ASM_TILE_TRAPS_H */
index e4d44bd7df271f6b5445f5e7d5e593fe69cc58c7..b6cde3209b963d76ba35815d45b003ef929691f2 100644 (file)
@@ -127,8 +127,10 @@ extern int fixup_exception(struct pt_regs *regs);
 
 #ifdef __LP64__
 #define _ASM_PTR       ".quad"
+#define _ASM_ALIGN     ".align 8"
 #else
 #define _ASM_PTR       ".long"
+#define _ASM_ALIGN     ".align 4"
 #endif
 
 #define __get_user_asm(OP, x, ptr, ret)                                        \
@@ -137,6 +139,7 @@ extern int fixup_exception(struct pt_regs *regs);
                     "0: { movei %1, 0; movei %0, %3 }\n"               \
                     "j 9f\n"                                           \
                     ".section __ex_table,\"a\"\n"                      \
+                    _ASM_ALIGN "\n"                                    \
                     _ASM_PTR " 1b, 0b\n"                               \
                     ".popsection\n"                                    \
                     "9:"                                               \
@@ -168,6 +171,7 @@ extern int fixup_exception(struct pt_regs *regs);
                             "0: { movei %1, 0; movei %2, 0 }\n"        \
                             "{ movei %0, %4; j 9f }\n"                 \
                             ".section __ex_table,\"a\"\n"              \
+                            ".align 4\n"                               \
                             ".word 1b, 0b\n"                           \
                             ".word 2b, 0b\n"                           \
                             ".popsection\n"                            \
@@ -224,6 +228,7 @@ extern int __get_user_bad(void)
                     ".pushsection .fixup,\"ax\"\n"                     \
                     "0: { movei %0, %3; j 9f }\n"                      \
                     ".section __ex_table,\"a\"\n"                      \
+                    _ASM_ALIGN "\n"                                    \
                     _ASM_PTR " 1b, 0b\n"                               \
                     ".popsection\n"                                    \
                     "9:"                                               \
@@ -248,6 +253,7 @@ extern int __get_user_bad(void)
                             ".pushsection .fixup,\"ax\"\n"             \
                             "0: { movei %0, %4; j 9f }\n"              \
                             ".section __ex_table,\"a\"\n"              \
+                            ".align 4\n"                               \
                             ".word 1b, 0b\n"                           \
                             ".word 2b, 0b\n"                           \
                             ".popsection\n"                            \
@@ -566,37 +572,6 @@ static inline unsigned long __must_check flush_user(
        return len;
 }
 
-/**
- * inv_user: - Invalidate a block of memory in user space from cache.
- * @mem:   Destination address, in user space.
- * @len:   Number of bytes to invalidate.
- *
- * Returns number of bytes that could not be invalidated.
- * On success, this will be zero.
- *
- * Note that on Tile64, the "inv" operation is in fact a
- * "flush and invalidate", so cache write-backs will occur prior
- * to the cache being marked invalid.
- */
-extern unsigned long inv_user_asm(void __user *mem, unsigned long len);
-static inline unsigned long __must_check __inv_user(
-       void __user *mem, unsigned long len)
-{
-       int retval;
-
-       might_fault();
-       retval = inv_user_asm(mem, len);
-       mb_incoherent();
-       return retval;
-}
-static inline unsigned long __must_check inv_user(
-       void __user *mem, unsigned long len)
-{
-       if (access_ok(VERIFY_WRITE, mem, len))
-               return __inv_user(mem, len);
-       return len;
-}
-
 /**
  * finv_user: - Flush-inval a block of memory in user space from cache.
  * @mem:   Destination address, in user space.
index 37dfbe5988722d4810c20693400bec5480b7d6c6..5a58a0d11449522c705a446a79f1f0b99fd06c1c 100644 (file)
 #ifndef _ASM_TILE_UNALIGNED_H
 #define _ASM_TILE_UNALIGNED_H
 
-#include <linux/unaligned/le_struct.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-#define get_unaligned  __get_unaligned_le
-#define put_unaligned  __put_unaligned_le
+/*
+ * We could implement faster get_unaligned_[be/le]64 using the ldna
+ * instruction on tilegx; however, we need to either copy all of the
+ * other generic functions to here (which is pretty ugly) or else
+ * modify both the generic code and other arch code to allow arch
+ * specific unaligned data access functions.  Given these functions
+ * are not often called, we'll stick with the generic version.
+ */
+#include <asm-generic/unaligned.h>
 
 /*
  * Is the kernel doing fixups of unaligned accesses?  If <0, no kernel
diff --git a/arch/tile/include/asm/vdso.h b/arch/tile/include/asm/vdso.h
new file mode 100644 (file)
index 0000000..9f6a78d
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef __TILE_VDSO_H__
+#define __TILE_VDSO_H__
+
+#include <linux/types.h>
+
+/*
+ * Note about the vdso_data structure:
+ *
+ * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
+ * structure is supposed to be known only to the function in the vdso
+ * itself and may change without notice.
+ */
+
+struct vdso_data {
+       __u64 tz_update_count;  /* Timezone atomicity ctr             */
+       __u64 tb_update_count;  /* Timebase atomicity ctr             */
+       __u64 xtime_tod_stamp;  /* TOD clock for xtime                */
+       __u64 xtime_clock_sec;  /* Kernel time second                 */
+       __u64 xtime_clock_nsec; /* Kernel time nanosecond             */
+       __u64 wtom_clock_sec;   /* Wall to monotonic clock second     */
+       __u64 wtom_clock_nsec;  /* Wall to monotonic clock nanosecond */
+       __u32 mult;             /* Cycle to nanosecond multiplier     */
+       __u32 shift;            /* Cycle to nanosecond divisor (power of two) */
+       __u32 tz_minuteswest;   /* Minutes west of Greenwich          */
+       __u32 tz_dsttime;       /* Type of dst correction             */
+};
+
+extern struct vdso_data *vdso_data;
+
+/* __vdso_rt_sigreturn is defined with the addresses in the vdso page. */
+extern void __vdso_rt_sigreturn(void);
+
+extern int setup_vdso_pages(void);
+
+#endif /* __TILE_VDSO_H__ */
index 9d50fce1b1a7961aa15b6409ecdc587fce0959fa..fdd07f88cfd7124458a4d31fb1a098764e15bb9a 100644 (file)
 #define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210)
 #define GXIO_MPIPE_OP_LINK_OPEN_AUX    IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211)
 #define GXIO_MPIPE_OP_LINK_CLOSE_AUX   IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212)
+#define GXIO_MPIPE_OP_LINK_SET_ATTR_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1213)
 
-#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e)
-#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f)
-#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220)
+#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121e)
+#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121f)
+#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1220)
+#define GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1221)
+#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1222)
 #define GXIO_MPIPE_OP_ARM_POLLFD       IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
 #define GXIO_MPIPE_OP_CLOSE_POLLFD     IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
 #define GXIO_MPIPE_OP_GET_MMIO_BASE    IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
@@ -114,6 +117,8 @@ int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
 
 int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac);
 
+int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
+                                uint32_t attr, int64_t val);
 
 int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
                                 uint64_t * nsec, uint64_t * cycles);
@@ -124,6 +129,9 @@ int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
 int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
                                    int64_t nsec);
 
+int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
+                                    int32_t ppb);
+
 int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
 
 int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
index 0bcf3f71ce8be0150237efde998b1ac8998b9512..476c5e5ca22cfe53714e699b667abf2995a192de 100644 (file)
 #include <asm/pgtable.h>
 
 
+#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250)
 #define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
 #define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
 #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
 
 
+int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
+                                _gxio_mpipe_link_name_t name);
+
 int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
                                  unsigned int idx,
                                  _gxio_mpipe_link_name_t * name,
index 58105c31228b3b6643783e926b2db7886739ba01..d95b96fd6c934d284f3b0669db7e960cc1c37b7a 100644 (file)
@@ -30,6 +30,7 @@
 
 #define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
 
+#define GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e)
 #define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
 
 #define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
@@ -54,6 +55,10 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
                                unsigned int flags);
 
 
+int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
+                                  unsigned int count, unsigned int first,
+                                  unsigned int flags);
+
 int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context,
                                unsigned int count, unsigned int first,
                                unsigned int flags);
diff --git a/arch/tile/include/gxio/iorpc_uart.h b/arch/tile/include/gxio/iorpc_uart.h
new file mode 100644 (file)
index 0000000..55429d4
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#ifndef __GXIO_UART_LINUX_RPC_H__
+#define __GXIO_UART_LINUX_RPC_H__
+
+#include <hv/iorpc.h>
+
+#include <hv/drv_uart_intf.h>
+#include <gxio/uart.h>
+#include <gxio/kiorpc.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/pgtable.h>
+
+#define GXIO_UART_OP_CFG_INTERRUPT     IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1900)
+#define GXIO_UART_OP_GET_MMIO_BASE     IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
+#define GXIO_UART_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
+
+int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
+                           int inter_y, int inter_ipi, int inter_event);
+
+int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base);
+
+int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
+                               unsigned long offset, unsigned long size);
+
+#endif /* !__GXIO_UART_LINUX_RPC_H__ */
index b74f470ed11e58c9f8515ee50347db59ff9027e5..e37cf4f0cffd42a14f3d392d9701ef8a4da746f5 100644 (file)
@@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
  */
 typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
 
+/*
+ * Max # of mpipe instances. 2 currently.
+ */
+#define GXIO_MPIPE_INSTANCE_MAX  HV_MPIPE_INSTANCE_MAX
+
+#define NR_MPIPE_MAX   GXIO_MPIPE_INSTANCE_MAX
+
 /* Get the "va" field from an "idesc".
  *
  * This is the address at which the ingress hardware copied the first
@@ -311,6 +318,9 @@ typedef struct {
        /* File descriptor for calling up to Linux (and thus the HV). */
        int fd;
 
+       /* Corresponding mpipe instance #. */
+       int instance;
+
        /* The VA at which configuration registers are mapped. */
        char *mmio_cfg_base;
 
@@ -810,7 +820,7 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
 /* Initialize an eDMA ring, using the given memory and size.
  *
  * @param context An initialized mPIPE context.
- * @param ring The eDMA ring index.
+ * @param ering The eDMA ring index.
  * @param channel The channel to use.  This must be one of the channels
  * associated with the context's set of open links.
  * @param mem A physically contiguous region of memory to be filled
@@ -823,10 +833,37 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
  * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
  */
 extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
-                                    unsigned int ring, unsigned int channel,
+                                    unsigned int ering, unsigned int channel,
                                     void *mem, size_t mem_size,
                                     unsigned int mem_flags);
 
+/* Set the "max_blks", "min_snf_blks", and "db" fields of
+ * ::MPIPE_EDMA_RG_INIT_DAT_THRESH_t for a given edma ring.
+ *
+ * The global pool of dynamic blocks will be automatically adjusted.
+ *
+ * This function should not be called after any egress has been done
+ * on the edma ring.
+ *
+ * Most applications should just use gxio_mpipe_equeue_set_snf_size().
+ *
+ * @param context An initialized mPIPE context.
+ * @param ering The eDMA ring index.
+ * @param max_blks The number of blocks to dedicate to the ring
+ * (normally min_snf_blks + 1).  Must be greater than min_snf_blocks.
+ * @param min_snf_blks The number of blocks which must be stored
+ * prior to starting to send the packet (normally 12).
+ * @param db Whether to allow use of dynamic blocks by the ring
+ * (normally 1).
+ *
+ * @return 0 on success, negative on error.
+ */
+extern int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
+                                           unsigned int ering,
+                                           unsigned int max_blks,
+                                           unsigned int min_snf_blks,
+                                           unsigned int db);
+
 /*****************************************************************
  *                      Classifier Program                        *
  ******************************************************************/
@@ -1288,15 +1325,39 @@ typedef struct {
        /* The log2() of the number of entries. */
        unsigned long log2_num_entries;
 
+       /* The context. */
+       gxio_mpipe_context_t *context;
+
+       /* The ering. */
+       unsigned int ering;
+
+       /* The channel. */
+       unsigned int channel;
+
 } gxio_mpipe_equeue_t;
 
 /* Initialize an "equeue".
  *
- * Takes the equeue plus the same args as gxio_mpipe_init_edma_ring().
+ * This function uses gxio_mpipe_init_edma_ring() to initialize the
+ * underlying edma_ring using the provided arguments.
+ *
+ * @param equeue An egress queue to be initialized.
+ * @param context An initialized mPIPE context.
+ * @param ering The eDMA ring index.
+ * @param channel The channel to use.  This must be one of the channels
+ * associated with the context's set of open links.
+ * @param mem A physically contiguous region of memory to be filled
+ * with a ring of ::gxio_mpipe_edesc_t structures.
+ * @param mem_size Number of bytes in the ring.  Must be 512, 2048,
+ * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
+ * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
+ *
+ * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
+ * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
  */
 extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
                                  gxio_mpipe_context_t *context,
-                                 unsigned int edma_ring_id,
+                                 unsigned int ering,
                                  unsigned int channel,
                                  void *mem, unsigned int mem_size,
                                  unsigned int mem_flags);
@@ -1494,6 +1555,37 @@ static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue,
                                            completion_slot, update);
 }
 
+/* Set the snf (store and forward) size for an equeue.
+ *
+ * The snf size for an equeue defaults to 1536, and encodes the size
+ * of the largest packet for which egress is guaranteed to avoid
+ * transmission underruns and/or corrupt checksums under heavy load.
+ *
+ * The snf size affects a global resource pool which cannot support,
+ * for example, all 24 equeues each requesting an snf size of 8K.
+ *
+ * To ensure that jumbo packets can be egressed properly, the snf size
+ * should be set to the size of the largest possible packet, which
+ * will usually be limited by the size of the app's largest buffer.
+ *
+ * This is a convenience wrapper around
+ * gxio_mpipe_config_edma_ring_blks().
+ *
+ * This function should not be called after any egress has been done
+ * on the equeue.
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param size The snf size, in bytes.
+ * @return Zero on success, negative error otherwise.
+ */
+static inline int gxio_mpipe_equeue_set_snf_size(gxio_mpipe_equeue_t *equeue,
+                                                size_t size)
+{
+       int blks = (size + 127) / 128;
+       return gxio_mpipe_config_edma_ring_blks(equeue->context, equeue->ering,
+                                               blks + 1, blks, 1);
+}
+
 /*****************************************************************
  *                        Link Management                         *
  ******************************************************************/
@@ -1634,6 +1726,24 @@ typedef struct {
        uint8_t mac;
 } gxio_mpipe_link_t;
 
+/* Translate a link name to the instance number of the mPIPE shim which is
+ *  connected to that link.  This call does not verify whether the link is
+ *  currently available, and does not reserve any link resources;
+ *  gxio_mpipe_link_open() must be called to perform those functions.
+ *
+ *  Typically applications will call this function to translate a link name
+ *  to an mPIPE instance number; call gxio_mpipe_init(), passing it that
+ *  instance number, to initialize the mPIPE shim; and then call
+ *  gxio_mpipe_link_open(), passing it the same link name plus the mPIPE
+ *  context, to configure the link.
+ *
+ * @param link_name Name of the link; see @ref gxio_mpipe_link_names.
+ * @return The mPIPE instance number which is associated with the named
+ *  link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does
+ *  not exist.
+ */
+extern int gxio_mpipe_link_instance(const char *link_name);
+
 /* Retrieve one of this system's legal link names, and its MAC address.
  *
  * @param index Link name index.  If a system supports N legal link names,
@@ -1697,6 +1807,17 @@ static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link)
        return link->channel;
 }
 
+/* Set a link attribute.
+ *
+ * @param link A properly initialized link state object.
+ * @param attr An attribute from the set of @ref gxio_mpipe_link_attrs.
+ * @param val New value of the attribute.
+ * @return 0 if the attribute was successfully set, or a negative error
+ *  code.
+ */
+extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
+                                   int64_t val);
+
 ///////////////////////////////////////////////////////////////////
 //                             Timestamp                         //
 ///////////////////////////////////////////////////////////////////
@@ -1733,4 +1854,18 @@ extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
 extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
                                       int64_t delta);
 
+/** Adjust the mPIPE timestamp clock frequency.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ppb A 32-bit signed PPB (Parts Per Billion) value to adjust.
+ * The absolute value of ppb must be less than or equal to 1000000000.
+ * Values less than about 30000 will generally cause a GXIO_ERR_INVAL
+ * return due to the granularity of the hardware that converts reference
+ * clock cycles into seconds and nanoseconds.
+ * @return If the call was successful, zero; otherwise, a negative error
+ *  code.
+ */
+extern int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t* context,
+                                            int32_t ppb);
+
 #endif /* !_GXIO_MPIPE_H_ */
diff --git a/arch/tile/include/gxio/uart.h b/arch/tile/include/gxio/uart.h
new file mode 100644 (file)
index 0000000..438ee7e
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _GXIO_UART_H_
+#define _GXIO_UART_H_
+
+#include "common.h"
+
+#include <hv/drv_uart_intf.h>
+#include <hv/iorpc.h>
+
+/*
+ *
+ * An API for manipulating UART interface.
+ */
+
+/*
+ *
+ * The Rshim allows access to the processor's UART interface.
+ */
+
+/* A context object used to manage UART resources. */
+typedef struct {
+
+       /* File descriptor for calling up to the hypervisor. */
+       int fd;
+
+       /* The VA at which our MMIO registers are mapped. */
+       char *mmio_base;
+
+} gxio_uart_context_t;
+
+/* Request UART interrupts.
+ *
+ *  Request that interrupts be delivered to a tile when the UART's
+ *  Receive FIFO is written, or the Write FIFO is read.
+ *
+ * @param context Pointer to a properly initialized gxio_uart_context_t.
+ * @param bind_cpu_x X coordinate of CPU to which interrupt will be delivered.
+ * @param bind_cpu_y Y coordinate of CPU to which interrupt will be delivered.
+ * @param bind_interrupt IPI interrupt number.
+ * @param bind_event Sub-interrupt event bit number; a negative value can
+ *  disable the interrupt.
+ * @return Zero if all of the requested UART events were successfully
+ *  configured to interrupt.
+ */
+extern int gxio_uart_cfg_interrupt(gxio_uart_context_t *context,
+                                  int bind_cpu_x,
+                                  int bind_cpu_y,
+                                  int bind_interrupt, int bind_event);
+
+/* Initialize a UART context.
+ *
+ *  A properly initialized context must be obtained before any of the other
+ *  gxio_uart routines may be used.
+ *
+ * @param context Pointer to a gxio_uart_context_t, which will be initialized
+ *  by this routine, if it succeeds.
+ * @param uart_index Index of the UART to use.
+ * @return Zero if the context was successfully initialized, else a
+ *  GXIO_ERR_xxx error code.
+ */
+extern int gxio_uart_init(gxio_uart_context_t *context, int uart_index);
+
+/* Destroy a UART context.
+ *
+ *  Once destroyed, a context may not be used with any gxio_uart routines
+ *  other than gxio_uart_init().  After this routine returns, no further
+ *  interrupts requested on this context will be delivered.  The state and
+ *  configuration of the pins which had been attached to this context are
+ *  unchanged by this operation.
+ *
+ * @param context Pointer to a gxio_uart_context_t.
+ * @return Zero if the context was successfully destroyed, else a
+ *  GXIO_ERR_xxx error code.
+ */
+extern int gxio_uart_destroy(gxio_uart_context_t *context);
+
+/* Write UART register.
+ * @param context Pointer to a gxio_uart_context_t.
+ * @param offset UART register offset.
+ * @param word Data will be wrote to UART reigister.
+ */
+extern void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
+                           uint64_t word);
+
+/* Read UART register.
+ * @param context Pointer to a gxio_uart_context_t.
+ * @param offset UART register offset.
+ * @return Data read from UART register.
+ */
+extern uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset);
+
+#endif /* _GXIO_UART_H_ */
index 6cdae3bf046efb8e8cfd9e815ef6a2856b848593..c97e416dd963b585c8907097408fa53aebf55bcb 100644 (file)
@@ -23,6 +23,9 @@
 #include <arch/mpipe_constants.h>
 
 
+/** Number of mPIPE instances supported */
+#define HV_MPIPE_INSTANCE_MAX   (2)
+
 /** Number of buffer stacks (32). */
 #define HV_MPIPE_NUM_BUFFER_STACKS \
   (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
index ef9f3f52ee27dd9975f540274be54aaa1d90655a..237e04dee66c7b3759782cd0632c4507c9007028 100644 (file)
@@ -64,8 +64,9 @@ struct pcie_port_property
    *  will not consider it an error if the link comes up as a x8 link. */
   uint8_t allow_x8: 1;
 
-  /** Reserved. */
-  uint8_t reserved: 1;
+  /** If true, this link is connected to a device which may or may not
+   *  be present. */
+  uint8_t removable: 1;
 
 };
 
@@ -167,6 +168,9 @@ pcie_stream_intr_config_sel_t;
 struct pcie_trio_ports_property
 {
   struct pcie_port_property ports[TILEGX_TRIO_PCIES];
+
+  /** Set if this TRIO belongs to a Gx72 device. */
+  uint8_t is_gx72;
 };
 
 /* Flags indicating traffic class. */
diff --git a/arch/tile/include/hv/drv_uart_intf.h b/arch/tile/include/hv/drv_uart_intf.h
new file mode 100644 (file)
index 0000000..f5379e2
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/**
+ * Interface definitions for the UART driver.
+ */
+
+#ifndef _SYS_HV_DRV_UART_INTF_H
+#define _SYS_HV_DRV_UART_INTF_H
+
+#include <arch/uart.h>
+
+/** Number of UART ports supported. */
+#define TILEGX_UART_NR        2
+
+/** The mmap file offset (PA) of the UART MMIO region. */
+#define HV_UART_MMIO_OFFSET   0
+
+/** The maximum size of the UARTs MMIO region (64K Bytes). */
+#define HV_UART_MMIO_SIZE     (1UL << 16)
+
+#endif /* _SYS_HV_DRV_UART_INTF_H */
index 837dca5328c296c3ebe3fee2a76f076d9857186b..e94497468c500d167e1a9cdecc323f74f28f59f7 100644 (file)
 /** hv_set_pte_super_shift */
 #define HV_DISPATCH_SET_PTE_SUPER_SHIFT           57
 
+/** hv_set_speed */
+#define HV_DISPATCH_SET_SPEED                     58
+
+/** hv_install_virt_context */
+#define HV_DISPATCH_INSTALL_VIRT_CONTEXT          59
+
+/** hv_inquire_virt_context */
+#define HV_DISPATCH_INQUIRE_VIRT_CONTEXT          60
+
+/** hv_install_guest_context */
+#define HV_DISPATCH_INSTALL_GUEST_CONTEXT         61
+
+/** hv_inquire_guest_context */
+#define HV_DISPATCH_INQUIRE_GUEST_CONTEXT         62
+
+/** hv_console_set_ipi */
+#define HV_DISPATCH_CONSOLE_SET_IPI               63
+
 /** One more than the largest dispatch value */
-#define _HV_DISPATCH_END                          58
+#define _HV_DISPATCH_END                          64
 
 
 #ifndef __ASSEMBLER__
@@ -541,14 +559,24 @@ typedef enum {
   HV_CONFSTR_CPUMOD_REV      = 18,
 
   /** Human-readable CPU module description. */
-  HV_CONFSTR_CPUMOD_DESC     = 19
+  HV_CONFSTR_CPUMOD_DESC     = 19,
+
+  /** Per-tile hypervisor statistics.  When this identifier is specified,
+   *  the hv_confstr call takes two extra arguments.  The first is the
+   *  HV_XY_TO_LOTAR of the target tile's coordinates.  The second is
+   *  a flag word.  The only current flag is the lowest bit, which means
+   *  "zero out the stats instead of retrieving them"; in this case the
+   *  buffer and buffer length are ignored. */
+  HV_CONFSTR_HV_STATS        = 20
 
 } HV_ConfstrQuery;
 
 /** Query a configuration string from the hypervisor.
  *
  * @param query Identifier for the specific string to be retrieved
- *        (HV_CONFSTR_xxx).
+ *        (HV_CONFSTR_xxx).  Some strings may require or permit extra
+ *        arguments to be appended which select specific objects to be
+ *        described; see the string descriptions above.
  * @param buf Buffer in which to place the string.
  * @param len Length of the buffer.
  * @return If query is valid, then the length of the corresponding string,
@@ -556,21 +584,16 @@ typedef enum {
  *        was truncated.  If query is invalid, HV_EINVAL.  If the specified
  *        buffer is not writable by the client, HV_EFAULT.
  */
-int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len);
+int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len, ...);
 
 /** Tile coordinate */
 typedef struct
 {
-#ifndef __BIG_ENDIAN__
   /** X coordinate, relative to supervisor's top-left coordinate */
   int x;
 
   /** Y coordinate, relative to supervisor's top-left coordinate */
   int y;
-#else
-  int y;
-  int x;
-#endif
 } HV_Coord;
 
 
@@ -585,6 +608,30 @@ typedef struct
  */
 int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte);
 
+/** Configure the console interrupt.
+ *
+ * When the console client interrupt is enabled, the hypervisor will
+ * deliver the specified IPI to the client in the following situations:
+ *
+ * - The console has at least one character available for input.
+ *
+ * - The console can accept new characters for output, and the last call
+ *   to hv_console_write() did not write all of the characters requested
+ *   by the client.
+ *
+ * Note that in some system configurations, console interrupt will not
+ * be available; clients should be prepared for this routine to fail and
+ * to fall back to periodic console polling in that case.
+ *
+ * @param ipi Index of the IPI register which will receive the interrupt.
+ * @param event IPI event number for console interrupt. If less than 0,
+ *        disable the console IPI interrupt.
+ * @param coord Tile to be targeted for console interrupt.
+ * @return 0 on success, otherwise, HV_EINVAL if illegal parameter,
+ *         HV_ENOTSUP if console interrupt are not available.
+ */
+int hv_console_set_ipi(int ipi, int event, HV_Coord coord);
+
 #else /* !CHIP_HAS_IPI() */
 
 /** A set of interrupts. */
@@ -689,6 +736,43 @@ HV_RTCTime hv_get_rtc(void);
  */
 void hv_set_rtc(HV_RTCTime time);
 
+
+/** Value returned from hv_set_speed(). */
+typedef struct {
+  /** The new speed achieved, in Hertz, or a negative error code. */
+  long new_speed;
+
+  /** A cycle counter value, in the post-speed-change time domain. */
+  __hv64 end_cycle;
+
+  /** Time elapsed in nanoseconds between start_cycle (passed to
+   *  hv_set_speed(), in the pre-speed-change time domain) and end_cycle
+   *  (returned in this structure). */
+  __hv64 delta_ns;
+} HV_SetSpeed;
+
+
+/** Set the processor clock speed.
+ * @param speed Clock speed in hertz.
+ * @param start_cycle Initial cycle counter value; see the definition of
+ *  HV_SetSpeed for how this is used.
+ * @param flags Flags (HV_SET_SPEED_xxx).
+ * @return A HV_SetSpeed structure.
+ */
+HV_SetSpeed hv_set_speed(unsigned long speed, __hv64 start_cycle,
+                         unsigned long flags);
+
+/** Don't set the speed, just check the value and return the speed we would
+ *  have set if this flag had not been specified.  When this flag is
+ *  specified, the start_cycle parameter is ignored, and the end_cycle and
+ *  delta_ns values in the HV_SetSpeed structure are undefined. */
+#define HV_SET_SPEED_DRYRUN   0x1
+
+/** If the precise speed specified is not supported by the hardware, round
+ *  it up to the next higher supported frequency if necessary; without this
+ *  flag, we round down. */
+#define HV_SET_SPEED_ROUNDUP  0x2
+
 /** Installs a context, comprising a page table and other attributes.
  *
  *  Once this service completes, page_table will be used to translate
@@ -721,12 +805,15 @@ void hv_set_rtc(HV_RTCTime time);
  *  new page table does not need to contain any mapping for the
  *  hv_install_context address itself.
  *
- *  At most one HV_CTX_PG_SM_* flag may be specified in "flags";
+ *  At most one HV_CTX_PG_SM_* flag may be specified in the flags argument;
  *  if multiple flags are specified, HV_EINVAL is returned.
  *  Specifying none of the flags results in using the default page size.
  *  All cores participating in a given client must request the same
  *  page size, or the results are undefined.
  *
+ *  To disable an installed page table, install HV_CTX_NONE.  The access
+ *  and asid fields are ignored.
+ *
  * @param page_table Root of the page table.
  * @param access PTE providing info on how to read the page table.  This
  *   value must be consistent between multiple tiles sharing a page table,
@@ -742,16 +829,101 @@ int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid,
 
 #endif /* !__ASSEMBLER__ */
 
+#define HV_CTX_NONE         ((HV_PhysAddr)-1)  /**< Disable page table. */
+
 #define HV_CTX_DIRECTIO     0x1   /**< Direct I/O requests are accepted from
                                        PL0. */
 
+#define HV_CTX_GUEST_CACHE  0x4   /**< Let guest control caching flags (only
+                                       usable with hv_install_virt_context.) */
+
 #define HV_CTX_PG_SM_4K     0x10  /**< Use 4K small pages, if available. */
 #define HV_CTX_PG_SM_16K    0x20  /**< Use 16K small pages, if available. */
 #define HV_CTX_PG_SM_64K    0x40  /**< Use 64K small pages, if available. */
 #define HV_CTX_PG_SM_MASK   0xf0  /**< Mask of all possible small pages. */
 
+
 #ifndef __ASSEMBLER__
 
+/** Install a virtualization context.
+ *
+ * When a virtualization context is installed, all faults from PL0 or
+ * PL1 are handled via a "guest context" and then post-processed by
+ * the "virtualization context"; faults at PL2 are still handled by
+ * the normal context.  For guest faults, the "guest PAs" produced by
+ * the guest page table are passed through the virtualization page
+ * table as pseudo-VAs, generating the true CPA as a result.  See the
+ * individual HV_PTE_xxx bits for the effect the bits have when
+ * present in the virtualization page table.  The ASID is currently
+ * ignored in this syscall, but it might be used later, so the API
+ * includes it.  The HV_CTX_GUEST_CACHE flag indicates that all
+ * cache-related flags should be taken from the primary page table,
+ * not the virtualization page table.
+ *
+ * Once the virtualization context is installed, a guest context
+ * should also be installed; otherwise a VA-equals-PA context will be
+ * used for accesses at PL 0 or 1, i.e. VAs will be passed directly to
+ * the virtualization context to generate CPAs.
+ *
+ * When entering client PL after being at guest or user PL, the
+ * client is expected to call hv_flush_all() to clear any TLB mappings
+ * that might otherwise conflict.  Similarly, hv_flush_all() should
+ * be called before returning to guest or user PL with a virtualization
+ * context installed, so that any TLB mappings are cleared.  Future
+ * work may include adding a "vpid" or similar namespace so that
+ * the TLBs may be managed independently.
+ *
+ * Subsequent guest page table installations will have their root PA
+ * and PTE cached after translating through the virtualization
+ * context, so if entries in the virtualization page table are
+ * modified or removed, the guest context should be re-installed.
+ * This, in conjunction with flushing the TLB on return to the guest,
+ * will ensure that the new virtualization entries are honored.
+ *
+ * @param page_table Root of the page table.
+ * @param access PTE providing info on how to read the page table.  This
+ *   value must be consistent between multiple tiles sharing a page table,
+ *   and must also be consistent with any virtual mappings the client
+ *   may be using to access the page table.
+ * @param asid HV_ASID the page table is to be used for (currently ignored).
+ * @param flags Context flags, denoting attributes or privileges of the
+ *   current virtualization context (see below).
+ * @return Zero on success, or a hypervisor error code on failure.
+ */
+
+int hv_install_virt_context(HV_PhysAddr page_table, HV_PTE access,
+                            HV_ASID asid, __hv32 flags);
+
+
+
+/** Install a guest context.
+ *
+ * The guest context is only consulted when a virtualization context
+ * is also installed, and for faults that occur below the client's PL.
+ * If no guest context is installed, in such a case, a VA=PA context
+ * is used instead.
+ *
+ * The access PTE will only be honored if the virtualization table was
+ * installed with HV_CTX_GUEST_CACHE.
+ *
+ * A virtualization context must already be installed prior to
+ * installing the guest context.
+ *
+ * @param page_table Root of the page table; the value is the guest's
+ *   physical address (GPA), not a CPA.
+ * @param access PTE providing info on how to read the page table.  This
+ *   value must be consistent between multiple tiles sharing a page table,
+ *   and must also be consistent with any virtual mappings the client
+ *   may be using to access the page table.
+ * @param asid HV_ASID the page table is to be used for.
+ * @param flags Context flags, denoting attributes or privileges of the
+ *   current context (HV_CTX_xxx).
+ * @return Zero on success, or a hypervisor error code on failure.
+ */
+
+int hv_install_guest_context(HV_PhysAddr page_table, HV_PTE access,
+                             HV_ASID asid, __hv32 flags);
+
 
 /** Set the number of pages ganged together by HV_PTE_SUPER at a
  * particular level of the page table.
@@ -761,7 +933,7 @@ int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid,
  * "super" page size must be less than the span of the next level in
  * the page table.  The largest size that can be requested is 64GB.
  *
- * The shift value is initially "0" for all page table levels,
+ * The shift value is initially 0 for all page table levels,
  * indicating that the HV_PTE_SUPER bit is effectively ignored.
  *
  * If you change the count from one non-zero value to another, the
@@ -792,11 +964,26 @@ typedef struct
 } HV_Context;
 
 /** Retrieve information about the currently installed context.
- * @return The data passed to the last successful hv_install_context call.
+ * @return The data passed to the last successful call to
+ * hv_install_context().
  */
 HV_Context hv_inquire_context(void);
 
 
+/** Retrieve information about the currently installed virtualization context.
+ * @return The data passed to the last successful call to
+ * hv_install_virt_context().
+ */
+HV_Context hv_inquire_virt_context(void);
+
+
+/** Retrieve information about the currently installed guest context.
+ * @return The data passed to the last successful call to
+ * hv_install_guest_context().
+ */
+HV_Context hv_inquire_guest_context(void);
+
+
 /** Flushes all translations associated with the named address space
  *  identifier from the TLB and any other hypervisor data structures.
  *  Translations installed with the "global" bit are not flushed.
@@ -855,7 +1042,7 @@ int hv_flush_pages(HV_VirtAddr start, HV_PageSize page_size,
 /** Flushes all non-global translations (if preserve_global is true),
  *  or absolutely all translations (if preserve_global is false).
  *
- * @param preserve_global Non-zero if we want to preserve "global" mappings.
+ * @param preserve_global Non-zero if we want to preserve global mappings.
  * @return Zero on success, or a hypervisor error code on failure.
 */
 int hv_flush_all(int preserve_global);
@@ -929,7 +1116,11 @@ typedef enum {
   HV_INQ_TILES_HFH_CACHE       = 2,
 
   /** The set of tiles that can be legally used as a LOTAR for a PTE. */
-  HV_INQ_TILES_LOTAR           = 3
+  HV_INQ_TILES_LOTAR           = 3,
+
+  /** The set of "shared" driver tiles that the hypervisor may
+   *  periodically interrupt. */
+  HV_INQ_TILES_SHARED          = 4
 } HV_InqTileSet;
 
 /** Returns specific information about various sets of tiles within the
@@ -1092,13 +1283,8 @@ HV_VirtAddrRange hv_inquire_virtual(int idx);
 /** A range of ASID values. */
 typedef struct
 {
-#ifndef __BIG_ENDIAN__
   HV_ASID start;        /**< First ASID in the range. */
   unsigned int size;    /**< Number of ASIDs. Zero for an invalid range. */
-#else
-  unsigned int size;    /**< Number of ASIDs. Zero for an invalid range. */
-  HV_ASID start;        /**< First ASID in the range. */
-#endif
 } HV_ASIDRange;
 
 /** Returns information about a range of ASIDs.
@@ -1214,14 +1400,21 @@ void hv_downcall_dispatch(void);
  */
 /** Message receive downcall interrupt vector */
 #define INT_MESSAGE_RCV_DWNCL    INT_BOOT_ACCESS
+/** Device interrupt downcall interrupt vector */
+#define INT_DEV_INTR_DWNCL       INT_WORLD_ACCESS
+#ifdef __tilegx__
+/** Virtualization page table miss downcall interrupt vector */
+#define INT_VPGTABLE_MISS_DWNCL  INT_I_ASID
+/** Virtualization guest illegal page table */
+#define INT_VGUEST_FATAL_DWNCL   INT_D_ASID
+#else
 /** DMA TLB miss downcall interrupt vector */
 #define INT_DMATLB_MISS_DWNCL    INT_DMA_ASID
-/** Static nework processor instruction TLB miss interrupt vector */
-#define INT_SNITLB_MISS_DWNCL    INT_SNI_ASID
 /** DMA TLB access violation downcall interrupt vector */
 #define INT_DMATLB_ACCESS_DWNCL  INT_DMA_CPL
-/** Device interrupt downcall interrupt vector */
-#define INT_DEV_INTR_DWNCL       INT_WORLD_ACCESS
+/** Static nework processor instruction TLB miss interrupt vector */
+#define INT_SNITLB_MISS_DWNCL    INT_SNI_ASID
+#endif
 
 #ifndef __ASSEMBLER__
 
@@ -1422,7 +1615,6 @@ typedef enum
 /** Message recipient. */
 typedef struct
 {
-#ifndef __BIG_ENDIAN__
   /** X coordinate, relative to supervisor's top-left coordinate */
   unsigned int x:11;
 
@@ -1431,11 +1623,6 @@ typedef struct
 
   /** Status of this recipient */
   HV_Recip_State state:10;
-#else //__BIG_ENDIAN__
-  HV_Recip_State state:10;
-  unsigned int y:11;
-  unsigned int x:11;
-#endif
 } HV_Recipient;
 
 /** Send a message to a set of recipients.
@@ -1990,8 +2177,16 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
 #define HV_PTE_PTFN_BITS             29  /**< Number of bits in a PTFN */
 
 /*
- * Legal values for the PTE's mode field
+ * Legal values for the PTE's mode field.
+ *
+ * If a virtualization page table is installed, this field is only honored
+ * in the primary page table if HV_CTX_GUEST_CACHE was set when the page
+ * table was installed, otherwise only in the virtualization page table.
+ * Note that if HV_CTX_GUEST_CACHE is not set, guests will only be able
+ * to access MMIO resources via pseudo PAs that map to MMIO in the
+ * virtualization page table.
  */
+
 /** Data is not resident in any caches; loads and stores access memory
  *  directly.
  */
@@ -2110,6 +2305,8 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits.
  *
  * This bit is ignored in level-1 PTEs unless the Page bit is set.
+ * This bit is ignored in the primary page table if a virtualization
+ * page table is installed.
  */
 #define HV_PTE_GLOBAL                (__HV_PTE_ONE << HV_PTE_INDEX_GLOBAL)
 
@@ -2123,6 +2320,7 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits.
  *
  * This bit is ignored in level-1 PTEs unless the Page bit is set.
+ * This bit is ignored in the virtualization page table.
  */
 #define HV_PTE_USER                  (__HV_PTE_ONE << HV_PTE_INDEX_USER)
 
@@ -2134,7 +2332,7 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  * has been cleared, subsequent references are not guaranteed to set
  * it again until the translation has been flushed from the TLB.
  *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
+ * This bit is ignored in level-0 or level-1 PTEs unless the Page bit is set.
  */
 #define HV_PTE_ACCESSED              (__HV_PTE_ONE << HV_PTE_INDEX_ACCESSED)
 
@@ -2146,7 +2344,7 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  * has been cleared, subsequent references are not guaranteed to set
  * it again until the translation has been flushed from the TLB.
  *
- * This bit is ignored in level-1 PTEs unless the Page bit is set.
+ * This bit is ignored in level-0 or level-1 PTEs unless the Page bit is set.
  */
 #define HV_PTE_DIRTY                 (__HV_PTE_ONE << HV_PTE_INDEX_DIRTY)
 
@@ -2188,6 +2386,10 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  *
  * In level-1 PTEs, if the Page bit is clear, this bit determines how the
  * level-2 page table is accessed.
+ *
+ * If a virtualization page table is installed, this field is only honored
+ * in the primary page table if HV_CTX_GUEST_CACHE was set when the page
+ * table was installed, otherwise only in the virtualization page table.
  */
 #define HV_PTE_NC                    (__HV_PTE_ONE << HV_PTE_INDEX_NC)
 
@@ -2201,6 +2403,10 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  *
  * In level-1 PTEs, if the Page bit is clear, this bit
  * determines how the level-2 page table is accessed.
+ *
+ * If a virtualization page table is installed, this field is only honored
+ * in the primary page table if HV_CTX_GUEST_CACHE was set when the page
+ * table was installed, otherwise only in the virtualization page table.
  */
 #define HV_PTE_NO_ALLOC_L1           (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L1)
 
@@ -2214,6 +2420,10 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  *
  * In level-1 PTEs, if the Page bit is clear, this bit determines how the
  * level-2 page table is accessed.
+ *
+ * If a virtualization page table is installed, this field is only honored
+ * in the primary page table if HV_CTX_GUEST_CACHE was set when the page
+ * table was installed, otherwise only in the virtualization page table.
  */
 #define HV_PTE_NO_ALLOC_L2           (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L2)
 
@@ -2233,6 +2443,10 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  * the page map directly to memory.
  *
  * This bit is ignored in level-1 PTEs unless the Page bit is set.
+ *
+ * If a virtualization page table is installed, this field is only honored
+ * in the primary page table if HV_CTX_GUEST_CACHE was set when the page
+ * table was installed, otherwise only in the virtualization page table.
  */
 #define HV_PTE_CACHED_PRIORITY       (__HV_PTE_ONE << \
                                       HV_PTE_INDEX_CACHED_PRIORITY)
@@ -2246,6 +2460,8 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  * It is illegal for this bit to be clear if the Writable bit is set.
  *
  * This bit is ignored in level-1 PTEs unless the Page bit is set.
+ * If a virtualization page table is present, the final Readable status
+ * is the logical "and" of this bit in both page tables.
  */
 #define HV_PTE_READABLE              (__HV_PTE_ONE << HV_PTE_INDEX_READABLE)
 
@@ -2256,6 +2472,8 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  * PTE.
  *
  * This bit is ignored in level-1 PTEs unless the Page bit is set.
+ * If a virtualization page table is present, the final Writable status
+ * is the logical "and" of this bit in both page tables.
  */
 #define HV_PTE_WRITABLE              (__HV_PTE_ONE << HV_PTE_INDEX_WRITABLE)
 
@@ -2268,6 +2486,8 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
  * than one.
  *
  * This bit is ignored in level-1 PTEs unless the Page bit is set.
+ * If a virtualization page table is present, the final Executable status
+ * is the logical "and" of this bit in both page tables.
  */
 #define HV_PTE_EXECUTABLE            (__HV_PTE_ONE << HV_PTE_INDEX_EXECUTABLE)
 
index 4ebc34f4768dda009d5ed4213880174dd78231da..97dfbecec6b6bb36e24cca16d7a1eee4995d8ae4 100644 (file)
@@ -1,7 +1,6 @@
 # UAPI Header export list
 header-y += abi.h
 header-y += chip.h
-header-y += chip_tile64.h
 header-y += chip_tilegx.h
 header-y += chip_tilepro.h
 header-y += icache.h
index 926d3db0e91e9c0485f3bcfb80faac8eec7e7a37..4c91f90b93698d6b776ea341c92e3bdd0ecaa118 100644 (file)
@@ -12,9 +12,7 @@
  *   more details.
  */
 
-#if __tile_chip__ == 0
-#include <arch/chip_tile64.h>
-#elif __tile_chip__ == 1
+#if __tile_chip__ == 1
 #include <arch/chip_tilepro.h>
 #elif defined(__tilegx__)
 #include <arch/chip_tilegx.h>
diff --git a/arch/tile/include/uapi/arch/chip_tile64.h b/arch/tile/include/uapi/arch/chip_tile64.h
deleted file mode 100644 (file)
index 261aaba..0000000
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- *   This program is free software; you can redistribute it and/or
- *   modify it under the terms of the GNU General Public License
- *   as published by the Free Software Foundation, version 2.
- *
- *   This program is distributed in the hope that it will be useful, but
- *   WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *   NON INFRINGEMENT.  See the GNU General Public License for
- *   more details.
- */
-
-/*
- * @file
- * Global header file.
- * This header file specifies defines for TILE64.
- */
-
-#ifndef __ARCH_CHIP_H__
-#define __ARCH_CHIP_H__
-
-/** Specify chip version.
- * When possible, prefer the CHIP_xxx symbols below for future-proofing.
- * This is intended for cross-compiling; native compilation should
- * use the predefined __tile_chip__ symbol.
- */
-#define TILE_CHIP 0
-
-/** Specify chip revision.
- * This provides for the case of a respin of a particular chip type;
- * the normal value for this symbol is "0".
- * This is intended for cross-compiling; native compilation should
- * use the predefined __tile_chip_rev__ symbol.
- */
-#define TILE_CHIP_REV 0
-
-/** The name of this architecture. */
-#define CHIP_ARCH_NAME "tile64"
-
-/** The ELF e_machine type for binaries for this chip. */
-#define CHIP_ELF_TYPE() EM_TILE64
-
-/** The alternate ELF e_machine type for binaries for this chip. */
-#define CHIP_COMPAT_ELF_TYPE() 0x2506
-
-/** What is the native word size of the machine? */
-#define CHIP_WORD_SIZE() 32
-
-/** How many bits of a virtual address are used. Extra bits must be
- * the sign extension of the low bits.
- */
-#define CHIP_VA_WIDTH() 32
-
-/** How many bits are in a physical address? */
-#define CHIP_PA_WIDTH() 36
-
-/** Size of the L2 cache, in bytes. */
-#define CHIP_L2_CACHE_SIZE() 65536
-
-/** Log size of an L2 cache line in bytes. */
-#define CHIP_L2_LOG_LINE_SIZE() 6
-
-/** Size of an L2 cache line, in bytes. */
-#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
-
-/** Associativity of the L2 cache. */
-#define CHIP_L2_ASSOC() 2
-
-/** Size of the L1 data cache, in bytes. */
-#define CHIP_L1D_CACHE_SIZE() 8192
-
-/** Log size of an L1 data cache line in bytes. */
-#define CHIP_L1D_LOG_LINE_SIZE() 4
-
-/** Size of an L1 data cache line, in bytes. */
-#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
-
-/** Associativity of the L1 data cache. */
-#define CHIP_L1D_ASSOC() 2
-
-/** Size of the L1 instruction cache, in bytes. */
-#define CHIP_L1I_CACHE_SIZE() 8192
-
-/** Log size of an L1 instruction cache line in bytes. */
-#define CHIP_L1I_LOG_LINE_SIZE() 6
-
-/** Size of an L1 instruction cache line, in bytes. */
-#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
-
-/** Associativity of the L1 instruction cache. */
-#define CHIP_L1I_ASSOC() 1
-
-/** Stride with which flush instructions must be issued. */
-#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
-
-/** Stride with which inv instructions must be issued. */
-#define CHIP_INV_STRIDE() CHIP_L1D_LINE_SIZE()
-
-/** Stride with which finv instructions must be issued. */
-#define CHIP_FINV_STRIDE() CHIP_L1D_LINE_SIZE()
-
-/** Can the local cache coherently cache data that is homed elsewhere? */
-#define CHIP_HAS_COHERENT_LOCAL_CACHE() 0
-
-/** How many simultaneous outstanding victims can the L2 cache have? */
-#define CHIP_MAX_OUTSTANDING_VICTIMS() 2
-
-/** Does the TLB support the NC and NOALLOC bits? */
-#define CHIP_HAS_NC_AND_NOALLOC_BITS() 0
-
-/** Does the chip support hash-for-home caching? */
-#define CHIP_HAS_CBOX_HOME_MAP() 0
-
-/** Number of entries in the chip's home map tables. */
-/* #define CHIP_CBOX_HOME_MAP_SIZE() -- does not apply to chip 0 */
-
-/** Do uncacheable requests miss in the cache regardless of whether
- * there is matching data? */
-#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 0
-
-/** Does the mf instruction wait for victims? */
-#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 1
-
-/** Does the chip have an "inv" instruction that doesn't also flush? */
-#define CHIP_HAS_INV() 0
-
-/** Does the chip have a "wh64" instruction? */
-#define CHIP_HAS_WH64() 0
-
-/** Does this chip have a 'dword_align' instruction? */
-#define CHIP_HAS_DWORD_ALIGN() 0
-
-/** Number of performance counters. */
-#define CHIP_PERFORMANCE_COUNTERS() 2
-
-/** Does this chip have auxiliary performance counters? */
-#define CHIP_HAS_AUX_PERF_COUNTERS() 0
-
-/** Is the CBOX_MSR1 SPR supported? */
-#define CHIP_HAS_CBOX_MSR1() 0
-
-/** Is the TILE_RTF_HWM SPR supported? */
-#define CHIP_HAS_TILE_RTF_HWM() 0
-
-/** Is the TILE_WRITE_PENDING SPR supported? */
-#define CHIP_HAS_TILE_WRITE_PENDING() 0
-
-/** Is the PROC_STATUS SPR supported? */
-#define CHIP_HAS_PROC_STATUS_SPR() 0
-
-/** Is the DSTREAM_PF SPR supported? */
-#define CHIP_HAS_DSTREAM_PF() 0
-
-/** Log of the number of mshims we have. */
-#define CHIP_LOG_NUM_MSHIMS() 2
-
-/** Are the bases of the interrupt vector areas fixed? */
-#define CHIP_HAS_FIXED_INTVEC_BASE() 1
-
-/** Are the interrupt masks split up into 2 SPRs? */
-#define CHIP_HAS_SPLIT_INTR_MASK() 1
-
-/** Is the cycle count split up into 2 SPRs? */
-#define CHIP_HAS_SPLIT_CYCLE() 1
-
-/** Does the chip have a static network? */
-#define CHIP_HAS_SN() 1
-
-/** Does the chip have a static network processor? */
-#define CHIP_HAS_SN_PROC() 1
-
-/** Size of the L1 static network processor instruction cache, in bytes. */
-#define CHIP_L1SNI_CACHE_SIZE() 2048
-
-/** Does the chip have DMA support in each tile? */
-#define CHIP_HAS_TILE_DMA() 1
-
-/** Does the chip have the second revision of the directly accessible
- *  dynamic networks?  This encapsulates a number of characteristics,
- *  including the absence of the catch-all, the absence of inline message
- *  tags, the absence of support for network context-switching, and so on.
- */
-#define CHIP_HAS_REV1_XDN() 0
-
-/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
-#define CHIP_HAS_CMPEXCH() 0
-
-/** Does the chip have memory-mapped I/O support? */
-#define CHIP_HAS_MMIO() 0
-
-/** Does the chip have post-completion interrupts? */
-#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0
-
-/** Does the chip have native single step support? */
-#define CHIP_HAS_SINGLE_STEP() 0
-
-#ifndef __OPEN_SOURCE__  /* features only relevant to hypervisor-level code */
-
-/** How many entries are present in the instruction TLB? */
-#define CHIP_ITLB_ENTRIES() 8
-
-/** How many entries are present in the data TLB? */
-#define CHIP_DTLB_ENTRIES() 16
-
-/** How many MAF entries does the XAUI shim have? */
-#define CHIP_XAUI_MAF_ENTRIES() 16
-
-/** Does the memory shim have a source-id table? */
-#define CHIP_HAS_MSHIM_SRCID_TABLE() 1
-
-/** Does the L1 instruction cache clear on reset? */
-#define CHIP_HAS_L1I_CLEAR_ON_RESET() 0
-
-/** Does the chip come out of reset with valid coordinates on all tiles?
- * Note that if defined, this also implies that the upper left is 1,1.
- */
-#define CHIP_HAS_VALID_TILE_COORD_RESET() 0
-
-/** Does the chip have unified packet formats? */
-#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 0
-
-/** Does the chip support write reordering? */
-#define CHIP_HAS_WRITE_REORDERING() 0
-
-/** Does the chip support Y-X routing as well as X-Y? */
-#define CHIP_HAS_Y_X_ROUTING() 0
-
-/** Is INTCTRL_3 managed with the correct MPL? */
-#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 0
-
-/** Is it possible to configure the chip to be big-endian? */
-#define CHIP_HAS_BIG_ENDIAN_CONFIG() 0
-
-/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
-#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
-
-/** Is the DIAG_TRACE_WAY SPR supported? */
-#define CHIP_HAS_DIAG_TRACE_WAY() 0
-
-/** Is the MEM_STRIPE_CONFIG SPR supported? */
-#define CHIP_HAS_MEM_STRIPE_CONFIG() 0
-
-/** Are the TLB_PERF SPRs supported? */
-#define CHIP_HAS_TLB_PERF() 0
-
-/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
-#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
-
-/** Does the chip support rev1 DMA packets? */
-#define CHIP_HAS_REV1_DMA_PACKETS() 0
-
-/** Does the chip have an IPI shim? */
-#define CHIP_HAS_IPI() 0
-
-#endif /* !__OPEN_SOURCE__ */
-#endif /* __ARCH_CHIP_H__ */
index c14d02c816005c64cba946074b6a329ea8eb6cc4..d76ff2db745e2ea39a157d7bdd2a3f77b14b7b97 100644 (file)
@@ -61,6 +61,7 @@ typedef tilegx_bundle_bits tile_bundle_bits;
 #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
 #define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
   TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
 
 /* 64-bit pattern for a { bpt ; nop } bundle. */
 #define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
index 71b763b8ce839a84715c1719bad89a3d9a7b96e1..4451cff1a861ffacdabb192dfe91aa53cfe6abca 100644 (file)
@@ -71,6 +71,7 @@ typedef tilepro_bundle_bits tile_bundle_bits;
 #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES
 #define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
   TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_BPT_BUNDLE TILEPRO_BPT_BUNDLE
 
 /* 64-bit pattern for a { bpt ; nop } bundle. */
 #define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL
index e54b7b0527f368523b64e957eddaa28e4549b29e..36fb24ce60ea9ebc96fd469488c44c4e693519e9 100644 (file)
@@ -611,6 +611,25 @@ sim_profiler_chip_clear(unsigned int mask)
   __insn_mtspr(SPR_SIM_CONTROL, SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask));
 }
 
+/**
+ * Set vCPU number for a given task.
+ * @param vcpu Virtual cpu to set.
+ */
+static __inline void
+sim_set_vcpu(int vcpu)
+{
+  __insn_mtspr(SPR_SIM_CONTROL,
+               SIM_CONTROL_VCPU | (vcpu << _SIM_CONTROL_OPERATOR_BITS));
+}
+
+/** Clear vCPU status for a given task. */
+static __inline void
+sim_clear_vcpu(void)
+{
+  __insn_mtspr(SPR_SIM_CONTROL,
+               SIM_CONTROL_VCPU | (-1 << _SIM_CONTROL_OPERATOR_BITS));
+}
+
 
 /*
  * Event support.
index 4b44a2b6a09ae84c5bdf02615929ce0dc6621460..b9aad66d7ccf4f50ee3c64e8cd775f66656a1c49 100644 (file)
  */
 #define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36
 
+/**
+ * If written to SPR_SIM_CONTROL, combined with a signed virtual cpu
+ * number shifted by 8, will tag any identification of the cpu that
+ * task is running on with the given virtual cpu number.  If the
+ * virtual cpu number is -1, the tag is removed.
+ */
+#define SIM_CONTROL_VCPU 37
+
 
 /*
  * Syscall numbers for use with "sim_syscall()".
index c689446e62844e9e7e0b15d6f3d683f2b5719d08..f5ae4dfc310922ffdc1541f5073941ec2f2b9d78 100644 (file)
 #define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
 #define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
 #define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
+#define SPR_MPL_GPV_SET_0 0x0600
+#define SPR_MPL_GPV_SET_1 0x0601
+#define SPR_MPL_GPV_SET_2 0x0602
 #define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
 #define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
 #define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
 #define SPR_MPL_IDN_TIMER_SET_0 0x3400
 #define SPR_MPL_IDN_TIMER_SET_1 0x3401
 #define SPR_MPL_IDN_TIMER_SET_2 0x3402
+#define SPR_MPL_ILL_SET_0 0x0400
+#define SPR_MPL_ILL_SET_1 0x0401
+#define SPR_MPL_ILL_SET_2 0x0402
 #define SPR_MPL_INTCTRL_0_SET_0 0x4a00
 #define SPR_MPL_INTCTRL_0_SET_1 0x4a01
 #define SPR_MPL_INTCTRL_0_SET_2 0x4a02
 #define SPR_MPL_SN_NOTIFY_SET_0 0x2a00
 #define SPR_MPL_SN_NOTIFY_SET_1 0x2a01
 #define SPR_MPL_SN_NOTIFY_SET_2 0x2a02
+#define SPR_MPL_SWINT_0_SET_0 0x1c00
+#define SPR_MPL_SWINT_0_SET_1 0x1c01
+#define SPR_MPL_SWINT_0_SET_2 0x1c02
+#define SPR_MPL_SWINT_1_SET_0 0x1a00
+#define SPR_MPL_SWINT_1_SET_1 0x1a01
+#define SPR_MPL_SWINT_1_SET_2 0x1a02
 #define SPR_MPL_UDN_ACCESS_SET_0 0x0c00
 #define SPR_MPL_UDN_ACCESS_SET_1 0x0c01
 #define SPR_MPL_UDN_ACCESS_SET_2 0x0c02
 #define SPR_MPL_UDN_TIMER_SET_0 0x3600
 #define SPR_MPL_UDN_TIMER_SET_1 0x3601
 #define SPR_MPL_UDN_TIMER_SET_2 0x3602
+#define SPR_MPL_UNALIGN_DATA_SET_0 0x1e00
+#define SPR_MPL_UNALIGN_DATA_SET_1 0x1e01
+#define SPR_MPL_UNALIGN_DATA_SET_2 0x1e02
 #define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00
 #define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01
 #define SPR_MPL_WORLD_ACCESS_SET_2 0x4e02
 #define SPR_SIM_CONTROL 0x4e0c
 #define SPR_SNCTL 0x0805
 #define SPR_SNCTL__FRZFABRIC_MASK  0x1
-#define SPR_SNCTL__FRZPROC_MASK  0x2
-#define SPR_SNPC 0x080b
 #define SPR_SNSTATIC 0x080c
 #define SPR_SYSTEM_SAVE_0_0 0x4b00
 #define SPR_SYSTEM_SAVE_0_1 0x4b01
index 67a6c1751e3b68f8b0c5cff6352cdd99dfb13291..727cda706fc563b0de89767a81a3581d8125ffa8 100644 (file)
 #define SPR_AUX_PERF_COUNT_1 0x2106
 #define SPR_AUX_PERF_COUNT_CTL 0x2107
 #define SPR_AUX_PERF_COUNT_STS 0x2108
+#define SPR_AUX_TILE_TIMER_CONTROL 0x1705
+#define SPR_AUX_TILE_TIMER_CONTROL__COUNT_MASK  0xffffffff
+#define SPR_AUX_TILE_TIMER_CONTROL__DISABLE_SHIFT 62
+#define SPR_AUX_TILE_TIMER_CONTROL__UNDERFLOW_SHIFT 63
 #define SPR_CMPEXCH_VALUE 0x2780
 #define SPR_CYCLE 0x2781
 #define SPR_DONE 0x2705
 #define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
 #define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
 #define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
+#define SPR_MPL_GPV_SET_0 0x0900
+#define SPR_MPL_GPV_SET_1 0x0901
+#define SPR_MPL_GPV_SET_2 0x0902
 #define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
 #define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
 #define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
 #define SPR_MPL_IDN_TIMER_SET_0 0x1800
 #define SPR_MPL_IDN_TIMER_SET_1 0x1801
 #define SPR_MPL_IDN_TIMER_SET_2 0x1802
+#define SPR_MPL_ILL_SET_0 0x0800
+#define SPR_MPL_ILL_SET_1 0x0801
+#define SPR_MPL_ILL_SET_2 0x0802
+#define SPR_MPL_ILL_TRANS_SET_0 0x1000
+#define SPR_MPL_ILL_TRANS_SET_1 0x1001
+#define SPR_MPL_ILL_TRANS_SET_2 0x1002
 #define SPR_MPL_INTCTRL_0_SET_0 0x2500
 #define SPR_MPL_INTCTRL_0_SET_1 0x2501
 #define SPR_MPL_INTCTRL_0_SET_2 0x2502
 #define SPR_MPL_PERF_COUNT_SET_0 0x2000
 #define SPR_MPL_PERF_COUNT_SET_1 0x2001
 #define SPR_MPL_PERF_COUNT_SET_2 0x2002
+#define SPR_MPL_SINGLE_STEP_1_SET_0 0x0300
+#define SPR_MPL_SINGLE_STEP_1_SET_1 0x0301
+#define SPR_MPL_SINGLE_STEP_1_SET_2 0x0302
+#define SPR_MPL_SWINT_0_SET_0 0x0f00
+#define SPR_MPL_SWINT_0_SET_1 0x0f01
+#define SPR_MPL_SWINT_0_SET_2 0x0f02
+#define SPR_MPL_SWINT_1_SET_0 0x0e00
+#define SPR_MPL_SWINT_1_SET_1 0x0e01
+#define SPR_MPL_SWINT_1_SET_2 0x0e02
 #define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
 #define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
 #define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
 #define SPR_MPL_UDN_TIMER_SET_0 0x1900
 #define SPR_MPL_UDN_TIMER_SET_1 0x1901
 #define SPR_MPL_UDN_TIMER_SET_2 0x1902
+#define SPR_MPL_UNALIGN_DATA_SET_0 0x1100
+#define SPR_MPL_UNALIGN_DATA_SET_1 0x1101
+#define SPR_MPL_UNALIGN_DATA_SET_2 0x1102
 #define SPR_MPL_WORLD_ACCESS_SET_0 0x2700
 #define SPR_MPL_WORLD_ACCESS_SET_1 0x2701
 #define SPR_MPL_WORLD_ACCESS_SET_2 0x2702
index c20db8e428bf698cca387f3c71a29e44f1df48ce..f07cc245ec41b629952251cb02f871a5fbdd05b6 100644 (file)
@@ -6,7 +6,9 @@ header-y += bitsperlong.h
 header-y += byteorder.h
 header-y += cachectl.h
 header-y += hardwall.h
+header-y += kvm.h
 header-y += kvm_para.h
+header-y += kvm_virtio.h
 header-y += mman.h
 header-y += ptrace.h
 header-y += setup.h
index 1d393edb0641d5f15fd5401c69122f4f8596717d..c93e92709f14326d278afe9cabc4de6f5d7b9381 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef _ASM_TILE_AUXVEC_H
 #define _ASM_TILE_AUXVEC_H
 
-/* No extensions to auxvec */
+/* The vDSO location. */
+#define AT_SYSINFO_EHDR         33
 
 #endif /* _ASM_TILE_AUXVEC_H */
index af4c9f9154d18d7fee24a78dda9b02d691527846..572ddcad2090643fc68c2eeb13f0e8b1b1504a31 100644 (file)
@@ -29,8 +29,8 @@
  * to honor the arguments at some point.)
  *
  * Flush and invalidation of memory can normally be performed with the
- * __insn_flush(), __insn_inv(), and __insn_finv() instructions from
- * userspace.  The DCACHE option to the system call allows userspace
+ * __insn_flush() and __insn_finv() instructions from userspace.
+ * The DCACHE option to the system call allows userspace
  * to flush the entire L1+L2 data cache from the core.  In this case,
  * the address and length arguments are not used.  The DCACHE flush is
  * restricted to the current core, not all cores in the address space.
diff --git a/arch/tile/include/uapi/asm/kvm.h b/arch/tile/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..4346520
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _UAPI_ASM_TILE_KVM_H
+#define _UAPI_ASM_TILE_KVM_H
+
+#ifndef __ASSEMBLER__
+#include <linux/ptrace.h>
+#endif
+
+#include <arch/abi.h>
+
+/*
+ * For Hypervisor syscalls. Note this comes from the hv: syscall.h,
+ * with small modifications: Remove HV_SYS_fence_incoherent.
+ */
+/* Syscall allowed from guest PL bit mask. */
+#define HV_SYS_GUEST_SHIFT                12
+#define HV_SYS_GUEST_MASK                 (1 << HV_SYS_GUEST_SHIFT)
+/* downcall_dispatch; this syscall number must be zero */
+#define HV_SYS_downcall_dispatch          0
+/* install_context */
+#define HV_SYS_install_context            1
+/* sysconf */
+#define HV_SYS_sysconf                    2
+/* get_rtc */
+#define HV_SYS_get_rtc                    3
+/* set_rtc */
+#define HV_SYS_set_rtc                    4
+/* flush_asid */
+#define HV_SYS_flush_asid                 5
+/* flush_page */
+#define HV_SYS_flush_page                 6
+/* flush_pages */
+#define HV_SYS_flush_pages                7
+/* restart */
+#define HV_SYS_restart                    8
+/* halt */
+#define HV_SYS_halt                       9
+/* power_off */
+#define HV_SYS_power_off                 10
+/* inquire_physical */
+#define HV_SYS_inquire_physical          11
+/* inquire_memory_controller */
+#define HV_SYS_inquire_memory_controller 12
+/* inquire_virtual */
+#define HV_SYS_inquire_virtual           13
+/* inquire_asid */
+#define HV_SYS_inquire_asid              14
+/* console_read_if_ready */
+#define HV_SYS_console_read_if_ready     15
+/* console_write */
+#define HV_SYS_console_write             16
+/* init */
+#define HV_SYS_init                      17
+/* inquire_topology */
+#define HV_SYS_inquire_topology          18
+/* fs_findfile */
+#define HV_SYS_fs_findfile               19
+/* fs_fstat */
+#define HV_SYS_fs_fstat                  20
+/* fs_pread */
+#define HV_SYS_fs_pread                  21
+/* physaddr_read64 */
+#define HV_SYS_physaddr_read64           22
+/* physaddr_write64 */
+#define HV_SYS_physaddr_write64          23
+/* get_command_line */
+#define HV_SYS_get_command_line          24
+/* set_caching */
+#define HV_SYS_set_caching               25
+/* bzero_page */
+#define HV_SYS_bzero_page                26
+/* register_message_state */
+#define HV_SYS_register_message_state    27
+/* send_message */
+#define HV_SYS_send_message              28
+/* receive_message */
+#define HV_SYS_receive_message           29
+/* inquire_context */
+#define HV_SYS_inquire_context           30
+/* start_all_tiles */
+#define HV_SYS_start_all_tiles           31
+/* dev_open */
+#define HV_SYS_dev_open                  32
+/* dev_close */
+#define HV_SYS_dev_close                 33
+/* dev_pread */
+#define HV_SYS_dev_pread                 34
+/* dev_pwrite */
+#define HV_SYS_dev_pwrite                35
+/* dev_poll */
+#define HV_SYS_dev_poll                  36
+/* dev_poll_cancel */
+#define HV_SYS_dev_poll_cancel           37
+/* dev_preada */
+#define HV_SYS_dev_preada                38
+/* dev_pwritea */
+#define HV_SYS_dev_pwritea               39
+/* flush_remote */
+#define HV_SYS_flush_remote              40
+/* console_putc */
+#define HV_SYS_console_putc              41
+/* inquire_tiles */
+#define HV_SYS_inquire_tiles             42
+/* confstr */
+#define HV_SYS_confstr                   43
+/* reexec */
+#define HV_SYS_reexec                    44
+/* set_command_line */
+#define HV_SYS_set_command_line          45
+
+/* store_mapping */
+#define HV_SYS_store_mapping             52
+/* inquire_realpa */
+#define HV_SYS_inquire_realpa            53
+/* flush_all */
+#define HV_SYS_flush_all                 54
+/* get_ipi_pte */
+#define HV_SYS_get_ipi_pte               55
+/* set_pte_super_shift */
+#define HV_SYS_set_pte_super_shift       56
+/* set_speed */
+#define HV_SYS_set_speed                 57
+/* install_virt_context */
+#define HV_SYS_install_virt_context      58
+/* inquire_virt_context */
+#define HV_SYS_inquire_virt_context      59
+/* inquire_guest_context */
+#define HV_SYS_install_guest_context     60
+/* inquire_guest_context */
+#define HV_SYS_inquire_guest_context     61
+
+/*
+ * Number of hypercall (from guest os to host os) other than hv_*().
+ * We leave the previous 128 entries to the usual hv_*() calls
+ * as defined in hypervisor.h.
+ */
+#define KVM_OTHER_HCALL                  128
+
+/* Hypercall index for virtio. */
+#define KVM_HCALL_virtio                 128
+
+/* One greater than the maximum hypercall number. */
+#define KVM_NUM_HCALLS                   256
+
+#ifndef __ASSEMBLER__
+
+struct kvm_regs {
+       struct pt_regs regs;
+};
+
+#define FOR_EACH_GUEST_SPR(f)                  \
+       f(INTERRUPT_MASK_1);                    \
+       f(INTERRUPT_VECTOR_BASE_1);             \
+       f(EX_CONTEXT_1_0);                      \
+       f(EX_CONTEXT_1_1);                      \
+       f(SYSTEM_SAVE_1_0);                     \
+       f(SYSTEM_SAVE_1_1);                     \
+       f(SYSTEM_SAVE_1_2);                     \
+       f(SYSTEM_SAVE_1_3);                     \
+       f(INTCTRL_1_STATUS);                    \
+       f(IPI_MASK_1);                          \
+       f(IPI_EVENT_1);                         \
+       f(SINGLE_STEP_CONTROL_1);               \
+       f(SINGLE_STEP_EN_1_1);                  \
+
+struct kvm_sregs {
+#define DECLARE_SPR(f) unsigned long f
+       FOR_EACH_GUEST_SPR(DECLARE_SPR)
+#undef DECLARE_SPR
+};
+
+struct kvm_fpu {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#ifndef __KERNEL__
+/* For hv_*() */
+#define KVM_EMULATE(name) [HV_SYS_##name] = qemu_emulate_illegal,
+#define USER_EMULATE(name) [HV_SYS_##name] = qemu_emulate_hv_##name,
+#define NO_EMULATE(name) [HV_SYS_##name] = qemu_emulate_illegal,
+#define BOTH_EMULATE(name) [HV_SYS_##name] = qemu_emulate_hv_##name,
+/* For others */
+#define USER_HCALL(name) [KVM_HCALL_##name] = qemu_handle_##name,
+#endif
+
+#define HCALL_DEFS \
+       /* For hv_*() */ \
+       KVM_EMULATE(init) \
+       NO_EMULATE(install_context) \
+       KVM_EMULATE(sysconf) \
+       KVM_EMULATE(get_rtc) \
+       KVM_EMULATE(set_rtc) \
+       NO_EMULATE(flush_asid) \
+       NO_EMULATE(flush_page) \
+       NO_EMULATE(flush_pages) \
+       USER_EMULATE(restart) \
+       USER_EMULATE(halt) \
+       USER_EMULATE(power_off) \
+       USER_EMULATE(inquire_physical) \
+       USER_EMULATE(inquire_memory_controller) \
+       KVM_EMULATE(inquire_virtual) \
+       KVM_EMULATE(inquire_asid) \
+       NO_EMULATE(console_read_if_ready) \
+       NO_EMULATE(console_write) \
+       NO_EMULATE(downcall_dispatch) \
+       KVM_EMULATE(inquire_topology) \
+       USER_EMULATE(fs_findfile) \
+       USER_EMULATE(fs_fstat) \
+       USER_EMULATE(fs_pread) \
+       KVM_EMULATE(physaddr_read64) \
+       KVM_EMULATE(physaddr_write64) \
+       USER_EMULATE(get_command_line) \
+       USER_EMULATE(set_caching) \
+       NO_EMULATE(bzero_page) \
+       KVM_EMULATE(register_message_state) \
+       KVM_EMULATE(send_message) \
+       KVM_EMULATE(receive_message) \
+       KVM_EMULATE(inquire_context) \
+       KVM_EMULATE(start_all_tiles) \
+       USER_EMULATE(dev_open) \
+       USER_EMULATE(dev_close) \
+       USER_EMULATE(dev_pread) \
+       USER_EMULATE(dev_pwrite) \
+       USER_EMULATE(dev_poll) \
+       USER_EMULATE(dev_poll_cancel) \
+       USER_EMULATE(dev_preada) \
+       USER_EMULATE(dev_pwritea) \
+       USER_EMULATE(flush_remote) \
+       NO_EMULATE(console_putc) \
+       KVM_EMULATE(inquire_tiles) \
+       KVM_EMULATE(confstr) \
+       USER_EMULATE(reexec) \
+       USER_EMULATE(set_command_line) \
+       USER_EMULATE(store_mapping) \
+       NO_EMULATE(inquire_realpa) \
+       NO_EMULATE(flush_all) \
+       KVM_EMULATE(get_ipi_pte) \
+       KVM_EMULATE(set_pte_super_shift) \
+       KVM_EMULATE(set_speed) \
+       /* For others */ \
+       USER_HCALL(virtio)
+
+#endif
+
+#endif /* _UAPI_ASM_TILE_KVM_H */
diff --git a/arch/tile/include/uapi/asm/kvm_virtio.h b/arch/tile/include/uapi/asm/kvm_virtio.h
new file mode 100644 (file)
index 0000000..d94f535
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _UAPI_ASM_TILE_KVM_VIRTIO_H
+#define _UAPI_ASM_TILE_KVM_VIRTIO_H
+
+#include <linux/types.h>
+
+#define KVM_VIRTIO_UNKNOWN     0
+#define KVM_VIRTIO_NOTIFY      1
+#define KVM_VIRTIO_RESET       2
+#define KVM_VIRTIO_SET_STATUS  3
+
+struct kvm_device_desc {
+       /* The device type: console, network, disk etc.  Type 0 terminates. */
+       __u8 type;
+       /* The number of virtqueues (first in config array) */
+       __u8 num_vq;
+       /*
+        * The number of bytes of feature bits.  Multiply by 2: one for host
+        * features and one for Guest acknowledgements.
+        */
+       __u8 feature_len;
+       /* The number of bytes of the config array after virtqueues. */
+       __u8 config_len;
+       /* A status byte, written by the Guest. */
+       __u8 status;
+       __u64 config[0];
+};
+
+struct kvm_vqinfo {
+       /* Pointer to the information contained in the device config. */
+       struct kvm_vqconfig *config;
+       /* The address where we mapped the virtio ring, so we can unmap it. */
+       void *pages;
+};
+
+struct kvm_vqconfig {
+       /* The physical address of the virtio ring */
+       __u64 pa;
+       /* The number of entries in the virtio_ring */
+       __u64 num;
+       /* The interrupt we get when something happens. Set by the guest. */
+       __u32 irq;
+
+};
+
+
+#endif /* _UAPI_ASM_TILE_KVM_VIRTIO_H */
index 5334be8e253822c6c1b14741dc39d9ac49c4fdb7..4fb7098ff7456425a064f0c58beac0b3a23b8b60 100644 (file)
@@ -3,11 +3,17 @@
 #
 
 extra-y := vmlinux.lds head_$(BITS).o
-obj-y := backtrace.o entry.o irq.o messaging.o \
+obj-y := backtrace.o entry.o hvglue.o irq.o messaging.o \
        pci-dma.o proc.o process.o ptrace.o reboot.o \
-       setup.o signal.o single_step.o stack.o sys.o sysfs.o time.o traps.o \
+       setup.o signal.o single_step.o stack.o sys.o \
+       sysfs.o time.o traps.o unaligned.o vdso.o \
        intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
 
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+endif
+
 obj-$(CONFIG_HARDWALL)         += hardwall.o
 obj-$(CONFIG_COMPAT)           += compat.o compat_signal.o
 obj-$(CONFIG_SMP)              += smpboot.o smp.o tlb.o
@@ -20,3 +26,10 @@ else
 obj-$(CONFIG_PCI)              += pci.o
 endif
 obj-$(CONFIG_TILE_USB)         += usb.o
+obj-$(CONFIG_TILE_HVGLUE_TRACE)        += hvglue_trace.o
+obj-$(CONFIG_FUNCTION_TRACER)  += ftrace.o mcount_64.o
+obj-$(CONFIG_KPROBES)          += kprobes.o
+obj-$(CONFIG_KVM_GUEST)                += kvm_virtio.o
+obj-$(CONFIG_KGDB)             += kgdb.o
+
+obj-y                          += vdso/
index 01ddf19cc36dc7ac354eb361b997aea0553ff08a..80877ff3233140428f8479e48831616434bc58f0 100644 (file)
  * Generates definitions from c-type structures used by assembly sources.
  */
 
-#include <linux/kbuild.h>
-#include <linux/thread_info.h>
-#include <linux/sched.h>
-#include <linux/hardirq.h>
-#include <linux/ptrace.h>
-#include <hv/hypervisor.h>
-
 /* Check for compatible compiler early in the build. */
 #ifdef CONFIG_TILEGX
 # ifndef __tilegx__
 # endif
 #else
 # ifdef __tilegx__
-#  error Can not build TILEPro/TILE64 configurations with tilegx compiler
+#  error Can not build TILEPro configurations with tilegx compiler
 # endif
 #endif
 
+#include <linux/kbuild.h>
+#include <linux/thread_info.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/ptrace.h>
+#include <hv/hypervisor.h>
+#ifdef CONFIG_KVM
+#include <linux/kvm_host.h>
+#endif
+
 void foo(void)
 {
-       DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \
+       DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET,
               offsetof(struct single_step_state, buffer));
-       DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \
+       DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET,
               offsetof(struct single_step_state, flags));
-       DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \
+       DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET,
               offsetof(struct single_step_state, orig_pc));
-       DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \
+       DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET,
               offsetof(struct single_step_state, next_pc));
-       DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \
+       DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET,
               offsetof(struct single_step_state, branch_next_pc));
-       DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \
+       DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET,
               offsetof(struct single_step_state, update_value));
 
-       DEFINE(THREAD_INFO_TASK_OFFSET, \
+       DEFINE(THREAD_INFO_TASK_OFFSET,
               offsetof(struct thread_info, task));
-       DEFINE(THREAD_INFO_FLAGS_OFFSET, \
+       DEFINE(THREAD_INFO_FLAGS_OFFSET,
               offsetof(struct thread_info, flags));
-       DEFINE(THREAD_INFO_STATUS_OFFSET, \
+       DEFINE(THREAD_INFO_STATUS_OFFSET,
               offsetof(struct thread_info, status));
-       DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \
+       DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
               offsetof(struct thread_info, homecache_cpu));
-       DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \
+       DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
+              offsetof(struct thread_info, preempt_count));
+       DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
               offsetof(struct thread_info, step_state));
+#ifdef __tilegx__
+       DEFINE(THREAD_INFO_UNALIGN_JIT_BASE_OFFSET,
+              offsetof(struct thread_info, unalign_jit_base));
+       DEFINE(THREAD_INFO_UNALIGN_JIT_TMP_OFFSET,
+              offsetof(struct thread_info, unalign_jit_tmp));
+#endif
+#ifdef CONFIG_KVM
+       DEFINE(THREAD_INFO_VCPU_OFFSET,
+              offsetof(struct thread_info, vcpu));
+#endif
 
        DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET,
               offsetof(struct task_struct, thread.ksp));
        DEFINE(TASK_STRUCT_THREAD_PC_OFFSET,
               offsetof(struct task_struct, thread.pc));
 
-       DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \
+       DEFINE(HV_TOPOLOGY_WIDTH_OFFSET,
               offsetof(HV_Topology, width));
-       DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \
+       DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET,
               offsetof(HV_Topology, height));
 
-       DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \
+       DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET,
               offsetof(irq_cpustat_t, irq_syscall_count));
 }
index d0a052e725befa3dba32721b34bbf9867be5d10d..85e00b2f39bfe98d58c416daebd00b301958e2eb 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/ucontext.h>
 #include <asm/sigframe.h>
 #include <asm/syscalls.h>
+#include <asm/vdso.h>
 #include <arch/interrupts.h>
 
 struct compat_ucontext {
@@ -227,7 +228,7 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        if (err)
                goto give_sigsegv;
 
-       restorer = VDSO_BASE;
+       restorer = VDSO_SYM(&__vdso_rt_sigreturn);
        if (ka->sa.sa_flags & SA_RESTORER)
                restorer = ptr_to_compat_reg(ka->sa.sa_restorer);
 
index 34d72a151bf396cdd732e08c1819c9faafe22da7..53f2be453aed39426ca78e55cdb77bba497e3673 100644 (file)
 #include <linux/string.h>
 #include <linux/irqflags.h>
 #include <linux/printk.h>
+#ifdef CONFIG_KVM_GUEST
+#include <linux/virtio_console.h>
+#include <linux/kvm_para.h>
+#include <asm/kvm_virtio.h>
+#endif
 #include <asm/setup.h>
 #include <hv/hypervisor.h>
 
 static void early_hv_write(struct console *con, const char *s, unsigned n)
 {
-       hv_console_write((HV_VirtAddr) s, n);
+#ifdef CONFIG_KVM_GUEST
+       char buf[512];
+
+       if (n > sizeof(buf) - 1)
+               n = sizeof(buf) - 1;
+       memcpy(buf, s, n);
+       buf[n] = '\0';
+
+       hcall_virtio(KVM_VIRTIO_NOTIFY, __pa(buf));
+#else
+       tile_console_write(s, n);
+
+       /*
+        * Convert NL to NLCR (close enough to CRNL) during early boot.
+        * We assume newlines are at the ends of strings, which turns out
+        * to be good enough for early boot console output.
+        */
+       if (n && s[n-1] == '\n')
+               tile_console_write("\r", 1);
+#endif
 }
 
 static struct console early_hv_console = {
        .name =         "earlyhv",
        .write =        early_hv_write,
-       .flags =        CON_PRINTBUFFER,
+       .flags =        CON_PRINTBUFFER | CON_BOOT,
        .index =        -1,
 };
 
-/* Direct interface for emergencies */
-static int early_console_complete;
-
 void early_panic(const char *fmt, ...)
 {
        va_list ap;
@@ -43,51 +64,21 @@ void early_panic(const char *fmt, ...)
        va_start(ap, fmt);
        early_printk("Kernel panic - not syncing: ");
        early_vprintk(fmt, ap);
-       early_console->write(early_console, "\n", 1);
+       early_printk("\n");
        va_end(ap);
        dump_stack();
        hv_halt();
 }
 
-static int __initdata keep_early;
-
 static int __init setup_early_printk(char *str)
 {
        if (early_console)
                return 1;
 
-       if (str != NULL && strncmp(str, "keep", 4) == 0)
-               keep_early = 1;
-
        early_console = &early_hv_console;
        register_console(early_console);
 
        return 0;
 }
 
-void __init disable_early_printk(void)
-{
-       early_console_complete = 1;
-       if (!early_console)
-               return;
-       if (!keep_early) {
-               early_printk("disabling early console\n");
-               unregister_console(early_console);
-               early_console = NULL;
-       } else {
-               early_printk("keeping early console\n");
-       }
-}
-
-void warn_early_printk(void)
-{
-       if (early_console_complete || early_console)
-               return;
-       early_printk("\
-Machine shutting down before console output is fully initialized.\n\
-You may wish to reboot and add the option 'earlyprintk' to your\n\
-boot command line to see any diagnostic early console output.\n\
-");
-}
-
 early_param("earlyprintk", setup_early_printk);
index f116cb0bce20b9cfa395f8876c6ddfa4f858e98b..3d9175992a203391bd2918b4c59532257207b702 100644 (file)
@@ -27,22 +27,6 @@ STD_ENTRY(current_text_addr)
        { move r0, lr; jrp lr }
        STD_ENDPROC(current_text_addr)
 
-/*
- * We don't run this function directly, but instead copy it to a page
- * we map into every user process.  See vdso_setup().
- *
- * Note that libc has a copy of this function that it uses to compare
- * against the PC when a stack backtrace ends, so if this code is
- * changed, the libc implementation(s) should also be updated.
- */
-       .pushsection .data
-ENTRY(__rt_sigreturn)
-       moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn
-       swint1
-       ENDPROC(__rt_sigreturn)
-       ENTRY(__rt_sigreturn_end)
-       .popsection
-
 STD_ENTRY(dump_stack)
        { move r2, lr; lnk r1 }
        { move r4, r52; addli r1, r1, dump_stack - . }
diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..f1c4520
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * TILE-Gx specific ftrace support
+ */
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ftrace.h>
+#include <asm/sections.h>
+
+#include <arch/opcode.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+static inline tilegx_bundle_bits NOP(void)
+{
+       return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
+               create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
+               create_Opcode_X0(RRR_0_OPCODE_X0) |
+               create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
+               create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
+               create_Opcode_X1(RRR_0_OPCODE_X1);
+}
+
+static int machine_stopped __read_mostly;
+
+int ftrace_arch_code_modify_prepare(void)
+{
+       machine_stopped = 1;
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       flush_icache_range(0, CHIP_L1I_CACHE_SIZE());
+       machine_stopped = 0;
+       return 0;
+}
+
+/*
+ * Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic
+ * tracer just add one cycle overhead to every kernel function when disabled.
+ */
+static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
+                                      bool link)
+{
+       tilegx_bundle_bits opcode_x0, opcode_x1;
+       long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
+
+       if (link) {
+               /* opcode: jal addr */
+               opcode_x1 =
+                       create_Opcode_X1(JUMP_OPCODE_X1) |
+                       create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
+                       create_JumpOff_X1(pcrel_by_instr);
+       } else {
+               /* opcode: j addr */
+               opcode_x1 =
+                       create_Opcode_X1(JUMP_OPCODE_X1) |
+                       create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
+                       create_JumpOff_X1(pcrel_by_instr);
+       }
+
+       if (addr == FTRACE_ADDR) {
+               /* opcode: or r10, lr, zero */
+               opcode_x0 =
+                       create_Dest_X0(10) |
+                       create_SrcA_X0(TREG_LR) |
+                       create_SrcB_X0(TREG_ZERO) |
+                       create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) |
+                       create_Opcode_X0(RRR_0_OPCODE_X0);
+       } else {
+               /* opcode: fnop */
+               opcode_x0 =
+                       create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
+                       create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
+                       create_Opcode_X0(RRR_0_OPCODE_X0);
+       }
+
+       return opcode_x1 | opcode_x0;
+}
+
+static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
+{
+       return NOP();
+}
+
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+{
+       return ftrace_gen_branch(pc, addr, true);
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned long old,
+                             unsigned long new)
+{
+       unsigned long pc_wr;
+
+       /* Check if the address is in kernel text space and module space. */
+       if (!kernel_text_address(pc))
+               return -EINVAL;
+
+       /* Operate on writable kernel text mapping. */
+       pc_wr = pc - MEM_SV_START + PAGE_OFFSET;
+
+       if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       smp_wmb();
+
+       if (!machine_stopped && num_online_cpus() > 1)
+               flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+       return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long pc, old;
+       unsigned long new;
+       int ret;
+
+       pc = (unsigned long)&ftrace_call;
+       memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
+       new = ftrace_call_replace(pc, (unsigned long)func);
+
+       ret = ftrace_modify_code(pc, old, new);
+
+       return ret;
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned long new, old;
+       unsigned long ip = rec->ip;
+
+       old = ftrace_nop_replace(rec);
+       new = ftrace_call_replace(ip, addr);
+
+       return ftrace_modify_code(rec->ip, old, new);
+}
+
+int ftrace_make_nop(struct module *mod,
+                   struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned long ip = rec->ip;
+       unsigned long old;
+       unsigned long new;
+       int ret;
+
+       old = ftrace_call_replace(ip, addr);
+       new = ftrace_nop_replace(rec);
+       ret = ftrace_modify_code(ip, old, new);
+
+       return ret;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+       *(unsigned long *)data = 0;
+
+       return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long) &return_to_handler;
+       struct ftrace_graph_ent trace;
+       unsigned long old;
+       int err;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
+       *parent = return_hooker;
+
+       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+                                      frame_pointer);
+       if (err == -EBUSY) {
+               *parent = old;
+               return;
+       }
+
+       trace.func = self_addr;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace)) {
+               current->curr_ret_stack--;
+               *parent = old;
+       }
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+
+static int __ftrace_modify_caller(unsigned long *callsite,
+                                 void (*func) (void), bool enable)
+{
+       unsigned long caller_fn = (unsigned long) func;
+       unsigned long pc = (unsigned long) callsite;
+       unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
+       unsigned long nop = NOP();
+       unsigned long old = enable ? nop : branch;
+       unsigned long new = enable ? branch : nop;
+
+       return ftrace_modify_code(pc, old, new);
+}
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+       int ret;
+
+       ret = __ftrace_modify_caller(&ftrace_graph_call,
+                                    ftrace_graph_caller,
+                                    enable);
+
+       return ret;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 38ac189d95751a1a7d7a0fe074ad76179037f8a0..df27a1fd94a310a759612a5c6706b1011a94f787 100644 (file)
@@ -272,9 +272,9 @@ static void hardwall_setup_func(void *info)
        struct hardwall_info *r = info;
        struct hardwall_type *hwt = r->type;
 
-       int cpu = smp_processor_id();
-       int x = cpu % smp_width;
-       int y = cpu / smp_width;
+       int cpu = smp_processor_id();  /* on_each_cpu disables preemption */
+       int x = cpu_x(cpu);
+       int y = cpu_y(cpu);
        int bits = 0;
        if (x == r->ulhc_x)
                bits |= W_PROTECT;
@@ -317,6 +317,7 @@ static void hardwall_protect_rectangle(struct hardwall_info *r)
        on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
 }
 
+/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
 void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
 {
        struct hardwall_info *rect;
@@ -325,7 +326,6 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
        struct siginfo info;
        int cpu = smp_processor_id();
        int found_processes;
-       unsigned long flags;
        struct pt_regs *old_regs = set_irq_regs(regs);
 
        irq_enter();
@@ -346,7 +346,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
        BUG_ON(hwt->disabled);
 
        /* This tile trapped a network access; find the rectangle. */
-       spin_lock_irqsave(&hwt->lock, flags);
+       spin_lock(&hwt->lock);
        list_for_each_entry(rect, &hwt->list, list) {
                if (cpumask_test_cpu(cpu, &rect->cpumask))
                        break;
@@ -401,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
                pr_notice("hardwall: no associated processes!\n");
 
  done:
-       spin_unlock_irqrestore(&hwt->lock, flags);
+       spin_unlock(&hwt->lock);
 
        /*
         * We have to disable firewall interrupts now, or else when we
@@ -540,6 +540,14 @@ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
                }
        }
 
+       /*
+        * Eliminate cpus that are not part of this Linux client.
+        * Note that this allows for configurations that we might not want to
+        * support, such as one client on every even cpu, another client on
+        * every odd cpu.
+        */
+       cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
+
        /* Confirm it doesn't overlap and add it to the list. */
        spin_lock_irqsave(&hwt->lock, flags);
        list_for_each_entry(iter, &hwt->list, list) {
@@ -612,7 +620,7 @@ static int hardwall_activate(struct hardwall_info *info)
 
 /*
  * Deactivate a task's hardwall.  Must hold lock for hardwall_type.
- * This method may be called from free_task(), so we don't want to
+ * This method may be called from exit_thread(), so we don't want to
  * rely on too many fields of struct task_struct still being valid.
  * We assume the cpus_allowed, pid, and comm fields are still valid.
  */
@@ -653,7 +661,7 @@ static int hardwall_deactivate(struct hardwall_type *hwt,
                return -EINVAL;
 
        printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
-              task->pid, task->comm, hwt->name, smp_processor_id());
+              task->pid, task->comm, hwt->name, raw_smp_processor_id());
        return 0;
 }
 
@@ -795,8 +803,8 @@ static void reset_xdn_network_state(struct hardwall_type *hwt)
        /* Reset UDN coordinates to their standard value */
        {
                unsigned int cpu = smp_processor_id();
-               unsigned int x = cpu % smp_width;
-               unsigned int y = cpu / smp_width;
+               unsigned int x = cpu_x(cpu);
+               unsigned int y = cpu_y(cpu);
                __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
        }
 
index ac115307e5e4e3189d7e89b3c644ecd65c10c384..8d5b40ff29222edee807dd18615450565221f64e 100644 (file)
@@ -39,12 +39,12 @@ ENTRY(_start)
        }
        {
          moveli r0, _HV_VERSION_OLD_HV_INIT
-         jal hv_init
+         jal _hv_init
        }
        /* Get a reasonable default ASID in r0 */
        {
          move r0, zero
-         jal hv_inquire_asid
+         jal _hv_inquire_asid
        }
        /* Install the default page table */
        {
@@ -64,7 +64,7 @@ ENTRY(_start)
          auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET)
        }
        {
-         inv r6
+         finv r6
          move r1, zero   /* high 32 bits of CPA is zero */
        }
        {
@@ -73,12 +73,12 @@ ENTRY(_start)
        }
        {
          auli lr, lr, ha16(1f)
-         j hv_install_context
+         j _hv_install_context
        }
 1:
 
        /* Get our processor number and save it away in SAVE_K_0. */
-       jal hv_inquire_topology
+       jal _hv_inquire_topology
        mulll_uu r4, r1, r2        /* r1 == y, r2 == width */
        add r4, r4, r0             /* r0 == x, so r4 == cpu == y*width + x */
 
@@ -86,7 +86,7 @@ ENTRY(_start)
        /*
         * Load up our per-cpu offset.  When the first (master) tile
         * boots, this value is still zero, so we will load boot_pc
-        * with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
+        * with start_kernel, and boot_sp at the top of init_stack.
         * The master tile initializes the per-cpu offset array, so that
         * when subsequent (secondary) tiles boot, they will instead load
         * from their per-cpu versions of boot_sp and boot_pc.
@@ -126,7 +126,6 @@ ENTRY(_start)
        lw sp, r1
        or r4, sp, r4
        mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
-       addi sp, sp, -STACK_TOP_DELTA
        {
          move lr, zero   /* stop backtraces in the called function */
          jr r0
@@ -163,8 +162,8 @@ ENTRY(swapper_pg_dir)
        .set addr, addr + PGDIR_SIZE
        .endr
 
-       /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
-       PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
+       /* The true text VAs are mapped as VA = PA + MEM_SV_START */
+       PTE MEM_SV_START, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
                              (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
        .org swapper_pg_dir + PGDIR_SIZE
        END(swapper_pg_dir)
index 6093964fa5c72a891c16d59e5c8d855cceb2ccb2..bd0e12f283f3e456e20d84a90f7a35027caf835a 100644 (file)
 #include <arch/chip.h>
 #include <arch/spr_def.h>
 
+/* Extract two 32-bit bit values that were read into one register. */
+#ifdef __BIG_ENDIAN__
+#define GET_FIRST_INT(rd, rs) shrsi rd, rs, 32
+#define GET_SECOND_INT(rd, rs) addxi rd, rs, 0
+#else
+#define GET_FIRST_INT(rd, rs) addxi rd, rs, 0
+#define GET_SECOND_INT(rd, rs) shrsi rd, rs, 32
+#endif
+
 /*
  * This module contains the entry code for kernel images. It performs the
  * minimal setup needed to call the generic C routines.
@@ -46,11 +55,11 @@ ENTRY(_start)
          movei r2, TILE_CHIP_REV
          movei r3, KERNEL_PL
        }
-       jal hv_init
+       jal _hv_init
        /* Get a reasonable default ASID in r0 */
        {
          move r0, zero
-         jal hv_inquire_asid
+         jal _hv_inquire_asid
        }
 
        /*
@@ -61,7 +70,7 @@ ENTRY(_start)
         * other CPUs should see a properly-constructed page table.
         */
        {
-         v4int_l r2, zero, r0    /* ASID for hv_install_context */
+         GET_FIRST_INT(r2, r0)    /* ASID for hv_install_context */
          moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
        }
        {
@@ -77,7 +86,7 @@ ENTRY(_start)
        {
          /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
          bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
-         inv r4
+         finv r4
        }
        bnez r7, .Lno_write
        {
@@ -121,29 +130,24 @@ ENTRY(_start)
        }
        {
          moveli r3, CTX_PAGE_FLAG
-         j hv_install_context
+         j _hv_install_context
        }
 1:
 
        /* Install the interrupt base. */
-       moveli r0, hw2_last(MEM_SV_START)
-       shl16insli r0, r0, hw1(MEM_SV_START)
-       shl16insli r0, r0, hw0(MEM_SV_START)
+       moveli r0, hw2_last(intrpt_start)
+       shl16insli r0, r0, hw1(intrpt_start)
+       shl16insli r0, r0, hw0(intrpt_start)
        mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
 
-       /*
-        * Get our processor number and save it away in SAVE_K_0.
-        * Extract stuff from the topology structure: r4 = y, r6 = x,
-        * r5 = width.  FIXME: consider whether we want to just make these
-        * 64-bit values (and if so fix smp_topology write below, too).
-        */
-       jal hv_inquire_topology
+       /* Get our processor number and save it away in SAVE_K_0. */
+       jal _hv_inquire_topology
        {
-         v4int_l r5, zero, r1    /* r5 = width */
-         shrui r4, r0, 32        /* r4 = y */
+         GET_FIRST_INT(r5, r1)   /* r5 = width */
+         GET_SECOND_INT(r4, r0)  /* r4 = y */
        }
        {
-         v4int_l r6, zero, r0    /* r6 = x */
+         GET_FIRST_INT(r6, r0)   /* r6 = x */
          mul_lu_lu r4, r4, r5
        }
        {
@@ -154,7 +158,7 @@ ENTRY(_start)
        /*
         * Load up our per-cpu offset.  When the first (master) tile
         * boots, this value is still zero, so we will load boot_pc
-        * with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
+        * with start_kernel, and boot_sp with at the top of init_stack.
         * The master tile initializes the per-cpu offset array, so that
         * when subsequent (secondary) tiles boot, they will instead load
         * from their per-cpu versions of boot_sp and boot_pc.
@@ -198,9 +202,9 @@ ENTRY(_start)
        }
        ld r0, r0
        ld sp, r1
-       or r4, sp, r4
+       shli r4, r4, CPU_SHIFT
+       bfins r4, sp, 0, CPU_SHIFT-1
        mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
-       addi sp, sp, -STACK_TOP_DELTA
        {
          move lr, zero   /* stop backtraces in the called function */
          jr r0
diff --git a/arch/tile/kernel/hvglue.S b/arch/tile/kernel/hvglue.S
new file mode 100644 (file)
index 0000000..2914a9e
--- /dev/null
@@ -0,0 +1,81 @@
+/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
+.macro gensym sym, val, size
+.org \val
+.global _\sym
+.type _\sym,function
+_\sym:
+.size _\sym,\size
+#ifndef CONFIG_TILE_HVGLUE_TRACE
+.globl \sym
+.set \sym,_\sym
+#endif
+.endm
+
+.section .hvglue,"x",@nobits
+.align 8
+gensym hv_init, 0x20, 32
+gensym hv_install_context, 0x40, 32
+gensym hv_sysconf, 0x60, 32
+gensym hv_get_rtc, 0x80, 32
+gensym hv_set_rtc, 0xa0, 32
+gensym hv_flush_asid, 0xc0, 32
+gensym hv_flush_page, 0xe0, 32
+gensym hv_flush_pages, 0x100, 32
+gensym hv_restart, 0x120, 32
+gensym hv_halt, 0x140, 32
+gensym hv_power_off, 0x160, 32
+gensym hv_inquire_physical, 0x180, 32
+gensym hv_inquire_memory_controller, 0x1a0, 32
+gensym hv_inquire_virtual, 0x1c0, 32
+gensym hv_inquire_asid, 0x1e0, 32
+gensym hv_nanosleep, 0x200, 32
+gensym hv_console_read_if_ready, 0x220, 32
+gensym hv_console_write, 0x240, 32
+gensym hv_downcall_dispatch, 0x260, 32
+gensym hv_inquire_topology, 0x280, 32
+gensym hv_fs_findfile, 0x2a0, 32
+gensym hv_fs_fstat, 0x2c0, 32
+gensym hv_fs_pread, 0x2e0, 32
+gensym hv_physaddr_read64, 0x300, 32
+gensym hv_physaddr_write64, 0x320, 32
+gensym hv_get_command_line, 0x340, 32
+gensym hv_set_caching, 0x360, 32
+gensym hv_bzero_page, 0x380, 32
+gensym hv_register_message_state, 0x3a0, 32
+gensym hv_send_message, 0x3c0, 32
+gensym hv_receive_message, 0x3e0, 32
+gensym hv_inquire_context, 0x400, 32
+gensym hv_start_all_tiles, 0x420, 32
+gensym hv_dev_open, 0x440, 32
+gensym hv_dev_close, 0x460, 32
+gensym hv_dev_pread, 0x480, 32
+gensym hv_dev_pwrite, 0x4a0, 32
+gensym hv_dev_poll, 0x4c0, 32
+gensym hv_dev_poll_cancel, 0x4e0, 32
+gensym hv_dev_preada, 0x500, 32
+gensym hv_dev_pwritea, 0x520, 32
+gensym hv_flush_remote, 0x540, 32
+gensym hv_console_putc, 0x560, 32
+gensym hv_inquire_tiles, 0x580, 32
+gensym hv_confstr, 0x5a0, 32
+gensym hv_reexec, 0x5c0, 32
+gensym hv_set_command_line, 0x5e0, 32
+gensym hv_clear_intr, 0x600, 32
+gensym hv_enable_intr, 0x620, 32
+gensym hv_disable_intr, 0x640, 32
+gensym hv_raise_intr, 0x660, 32
+gensym hv_trigger_ipi, 0x680, 32
+gensym hv_store_mapping, 0x6a0, 32
+gensym hv_inquire_realpa, 0x6c0, 32
+gensym hv_flush_all, 0x6e0, 32
+gensym hv_get_ipi_pte, 0x700, 32
+gensym hv_set_pte_super_shift, 0x720, 32
+gensym hv_set_speed, 0x740, 32
+gensym hv_install_virt_context, 0x760, 32
+gensym hv_inquire_virt_context, 0x780, 32
+gensym hv_install_guest_context, 0x7a0, 32
+gensym hv_inquire_guest_context, 0x7c0, 32
+gensym hv_console_set_ipi, 0x7e0, 32
+gensym hv_glue_internals, 0x800, 2048
+gensym hcall_virtio, 0x1000, 32
+gensym hv_hcall_internals, 0x1020, 28640
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds
deleted file mode 100644 (file)
index d44c5a6..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
-hv_init = TEXT_OFFSET + 0x10020;
-hv_install_context = TEXT_OFFSET + 0x10040;
-hv_sysconf = TEXT_OFFSET + 0x10060;
-hv_get_rtc = TEXT_OFFSET + 0x10080;
-hv_set_rtc = TEXT_OFFSET + 0x100a0;
-hv_flush_asid = TEXT_OFFSET + 0x100c0;
-hv_flush_page = TEXT_OFFSET + 0x100e0;
-hv_flush_pages = TEXT_OFFSET + 0x10100;
-hv_restart = TEXT_OFFSET + 0x10120;
-hv_halt = TEXT_OFFSET + 0x10140;
-hv_power_off = TEXT_OFFSET + 0x10160;
-hv_inquire_physical = TEXT_OFFSET + 0x10180;
-hv_inquire_memory_controller = TEXT_OFFSET + 0x101a0;
-hv_inquire_virtual = TEXT_OFFSET + 0x101c0;
-hv_inquire_asid = TEXT_OFFSET + 0x101e0;
-hv_nanosleep = TEXT_OFFSET + 0x10200;
-hv_console_read_if_ready = TEXT_OFFSET + 0x10220;
-hv_console_write = TEXT_OFFSET + 0x10240;
-hv_downcall_dispatch = TEXT_OFFSET + 0x10260;
-hv_inquire_topology = TEXT_OFFSET + 0x10280;
-hv_fs_findfile = TEXT_OFFSET + 0x102a0;
-hv_fs_fstat = TEXT_OFFSET + 0x102c0;
-hv_fs_pread = TEXT_OFFSET + 0x102e0;
-hv_physaddr_read64 = TEXT_OFFSET + 0x10300;
-hv_physaddr_write64 = TEXT_OFFSET + 0x10320;
-hv_get_command_line = TEXT_OFFSET + 0x10340;
-hv_set_caching = TEXT_OFFSET + 0x10360;
-hv_bzero_page = TEXT_OFFSET + 0x10380;
-hv_register_message_state = TEXT_OFFSET + 0x103a0;
-hv_send_message = TEXT_OFFSET + 0x103c0;
-hv_receive_message = TEXT_OFFSET + 0x103e0;
-hv_inquire_context = TEXT_OFFSET + 0x10400;
-hv_start_all_tiles = TEXT_OFFSET + 0x10420;
-hv_dev_open = TEXT_OFFSET + 0x10440;
-hv_dev_close = TEXT_OFFSET + 0x10460;
-hv_dev_pread = TEXT_OFFSET + 0x10480;
-hv_dev_pwrite = TEXT_OFFSET + 0x104a0;
-hv_dev_poll = TEXT_OFFSET + 0x104c0;
-hv_dev_poll_cancel = TEXT_OFFSET + 0x104e0;
-hv_dev_preada = TEXT_OFFSET + 0x10500;
-hv_dev_pwritea = TEXT_OFFSET + 0x10520;
-hv_flush_remote = TEXT_OFFSET + 0x10540;
-hv_console_putc = TEXT_OFFSET + 0x10560;
-hv_inquire_tiles = TEXT_OFFSET + 0x10580;
-hv_confstr = TEXT_OFFSET + 0x105a0;
-hv_reexec = TEXT_OFFSET + 0x105c0;
-hv_set_command_line = TEXT_OFFSET + 0x105e0;
-hv_clear_intr = TEXT_OFFSET + 0x10600;
-hv_enable_intr = TEXT_OFFSET + 0x10620;
-hv_disable_intr = TEXT_OFFSET + 0x10640;
-hv_raise_intr = TEXT_OFFSET + 0x10660;
-hv_trigger_ipi = TEXT_OFFSET + 0x10680;
-hv_store_mapping = TEXT_OFFSET + 0x106a0;
-hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
-hv_flush_all = TEXT_OFFSET + 0x106e0;
-hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
-hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
-hv_glue_internals = TEXT_OFFSET + 0x10740;
diff --git a/arch/tile/kernel/hvglue_trace.c b/arch/tile/kernel/hvglue_trace.c
new file mode 100644 (file)
index 0000000..3b15c76
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/*
+ * Pull in the hypervisor header so we declare all the ABI functions
+ * with the underscore versions, then undef the names so that we can
+ * provide our own wrapper versions.
+ */
+#define hv_init _hv_init
+#define hv_install_context _hv_install_context
+#define hv_sysconf _hv_sysconf
+#define hv_get_rtc _hv_get_rtc
+#define hv_set_rtc _hv_set_rtc
+#define hv_flush_asid _hv_flush_asid
+#define hv_flush_page _hv_flush_page
+#define hv_flush_pages _hv_flush_pages
+#define hv_restart _hv_restart
+#define hv_halt _hv_halt
+#define hv_power_off _hv_power_off
+#define hv_inquire_physical _hv_inquire_physical
+#define hv_inquire_memory_controller _hv_inquire_memory_controller
+#define hv_inquire_virtual _hv_inquire_virtual
+#define hv_inquire_asid _hv_inquire_asid
+#define hv_nanosleep _hv_nanosleep
+#define hv_console_read_if_ready _hv_console_read_if_ready
+#define hv_console_write _hv_console_write
+#define hv_downcall_dispatch _hv_downcall_dispatch
+#define hv_inquire_topology _hv_inquire_topology
+#define hv_fs_findfile _hv_fs_findfile
+#define hv_fs_fstat _hv_fs_fstat
+#define hv_fs_pread _hv_fs_pread
+#define hv_physaddr_read64 _hv_physaddr_read64
+#define hv_physaddr_write64 _hv_physaddr_write64
+#define hv_get_command_line _hv_get_command_line
+#define hv_set_caching _hv_set_caching
+#define hv_bzero_page _hv_bzero_page
+#define hv_register_message_state _hv_register_message_state
+#define hv_send_message _hv_send_message
+#define hv_receive_message _hv_receive_message
+#define hv_inquire_context _hv_inquire_context
+#define hv_start_all_tiles _hv_start_all_tiles
+#define hv_dev_open _hv_dev_open
+#define hv_dev_close _hv_dev_close
+#define hv_dev_pread _hv_dev_pread
+#define hv_dev_pwrite _hv_dev_pwrite
+#define hv_dev_poll _hv_dev_poll
+#define hv_dev_poll_cancel _hv_dev_poll_cancel
+#define hv_dev_preada _hv_dev_preada
+#define hv_dev_pwritea _hv_dev_pwritea
+#define hv_flush_remote _hv_flush_remote
+#define hv_console_putc _hv_console_putc
+#define hv_inquire_tiles _hv_inquire_tiles
+#define hv_confstr _hv_confstr
+#define hv_reexec _hv_reexec
+#define hv_set_command_line _hv_set_command_line
+#define hv_clear_intr _hv_clear_intr
+#define hv_enable_intr _hv_enable_intr
+#define hv_disable_intr _hv_disable_intr
+#define hv_raise_intr _hv_raise_intr
+#define hv_trigger_ipi _hv_trigger_ipi
+#define hv_store_mapping _hv_store_mapping
+#define hv_inquire_realpa _hv_inquire_realpa
+#define hv_flush_all _hv_flush_all
+#define hv_get_ipi_pte _hv_get_ipi_pte
+#define hv_set_pte_super_shift _hv_set_pte_super_shift
+#define hv_set_speed _hv_set_speed
+#define hv_install_virt_context _hv_install_virt_context
+#define hv_inquire_virt_context _hv_inquire_virt_context
+#define hv_install_guest_context _hv_install_guest_context
+#define hv_inquire_guest_context _hv_inquire_guest_context
+#define hv_console_set_ipi _hv_console_set_ipi
+#include <hv/hypervisor.h>
+#undef hv_init
+#undef hv_install_context
+#undef hv_sysconf
+#undef hv_get_rtc
+#undef hv_set_rtc
+#undef hv_flush_asid
+#undef hv_flush_page
+#undef hv_flush_pages
+#undef hv_restart
+#undef hv_halt
+#undef hv_power_off
+#undef hv_inquire_physical
+#undef hv_inquire_memory_controller
+#undef hv_inquire_virtual
+#undef hv_inquire_asid
+#undef hv_nanosleep
+#undef hv_console_read_if_ready
+#undef hv_console_write
+#undef hv_downcall_dispatch
+#undef hv_inquire_topology
+#undef hv_fs_findfile
+#undef hv_fs_fstat
+#undef hv_fs_pread
+#undef hv_physaddr_read64
+#undef hv_physaddr_write64
+#undef hv_get_command_line
+#undef hv_set_caching
+#undef hv_bzero_page
+#undef hv_register_message_state
+#undef hv_send_message
+#undef hv_receive_message
+#undef hv_inquire_context
+#undef hv_start_all_tiles
+#undef hv_dev_open
+#undef hv_dev_close
+#undef hv_dev_pread
+#undef hv_dev_pwrite
+#undef hv_dev_poll
+#undef hv_dev_poll_cancel
+#undef hv_dev_preada
+#undef hv_dev_pwritea
+#undef hv_flush_remote
+#undef hv_console_putc
+#undef hv_inquire_tiles
+#undef hv_confstr
+#undef hv_reexec
+#undef hv_set_command_line
+#undef hv_clear_intr
+#undef hv_enable_intr
+#undef hv_disable_intr
+#undef hv_raise_intr
+#undef hv_trigger_ipi
+#undef hv_store_mapping
+#undef hv_inquire_realpa
+#undef hv_flush_all
+#undef hv_get_ipi_pte
+#undef hv_set_pte_super_shift
+#undef hv_set_speed
+#undef hv_install_virt_context
+#undef hv_inquire_virt_context
+#undef hv_install_guest_context
+#undef hv_inquire_guest_context
+#undef hv_console_set_ipi
+
+/*
+ * Provide macros based on <linux/syscalls.h> to provide a wrapper
+ * function that invokes the same function with an underscore prefix.
+ * We can't use the existing __SC_xxx macros because we need to
+ * support up to nine arguments rather than up to six, and also this
+ * way the file stands alone from possible changes in the
+ * implementation of <linux/syscalls.h>.
+ */
+#define HV_WRAP0(type, name)                                   \
+       type name(void);                                        \
+       type name(void)                                         \
+       {                                                       \
+               return _##name();                               \
+       }
+#define __HV_DECL1(t1, a1)     t1 a1
+#define __HV_DECL2(t2, a2, ...) t2 a2, __HV_DECL1(__VA_ARGS__)
+#define __HV_DECL3(t3, a3, ...) t3 a3, __HV_DECL2(__VA_ARGS__)
+#define __HV_DECL4(t4, a4, ...) t4 a4, __HV_DECL3(__VA_ARGS__)
+#define __HV_DECL5(t5, a5, ...) t5 a5, __HV_DECL4(__VA_ARGS__)
+#define __HV_DECL6(t6, a6, ...) t6 a6, __HV_DECL5(__VA_ARGS__)
+#define __HV_DECL7(t7, a7, ...) t7 a7, __HV_DECL6(__VA_ARGS__)
+#define __HV_DECL8(t8, a8, ...) t8 a8, __HV_DECL7(__VA_ARGS__)
+#define __HV_DECL9(t9, a9, ...) t9 a9, __HV_DECL8(__VA_ARGS__)
+#define __HV_PASS1(t1, a1)     a1
+#define __HV_PASS2(t2, a2, ...) a2, __HV_PASS1(__VA_ARGS__)
+#define __HV_PASS3(t3, a3, ...) a3, __HV_PASS2(__VA_ARGS__)
+#define __HV_PASS4(t4, a4, ...) a4, __HV_PASS3(__VA_ARGS__)
+#define __HV_PASS5(t5, a5, ...) a5, __HV_PASS4(__VA_ARGS__)
+#define __HV_PASS6(t6, a6, ...) a6, __HV_PASS5(__VA_ARGS__)
+#define __HV_PASS7(t7, a7, ...) a7, __HV_PASS6(__VA_ARGS__)
+#define __HV_PASS8(t8, a8, ...) a8, __HV_PASS7(__VA_ARGS__)
+#define __HV_PASS9(t9, a9, ...) a9, __HV_PASS8(__VA_ARGS__)
+#define HV_WRAPx(x, type, name, ...)                           \
+       type name(__HV_DECL##x(__VA_ARGS__));                   \
+       type name(__HV_DECL##x(__VA_ARGS__))                    \
+       {                                                       \
+               return _##name(__HV_PASS##x(__VA_ARGS__));      \
+       }
+#define HV_WRAP1(type, name, ...) HV_WRAPx(1, type, name, __VA_ARGS__)
+#define HV_WRAP2(type, name, ...) HV_WRAPx(2, type, name, __VA_ARGS__)
+#define HV_WRAP3(type, name, ...) HV_WRAPx(3, type, name, __VA_ARGS__)
+#define HV_WRAP4(type, name, ...) HV_WRAPx(4, type, name, __VA_ARGS__)
+#define HV_WRAP5(type, name, ...) HV_WRAPx(5, type, name, __VA_ARGS__)
+#define HV_WRAP6(type, name, ...) HV_WRAPx(6, type, name, __VA_ARGS__)
+#define HV_WRAP7(type, name, ...) HV_WRAPx(7, type, name, __VA_ARGS__)
+#define HV_WRAP8(type, name, ...) HV_WRAPx(8, type, name, __VA_ARGS__)
+#define HV_WRAP9(type, name, ...) HV_WRAPx(9, type, name, __VA_ARGS__)
+
+/* List all the hypervisor API functions. */
+HV_WRAP4(void, hv_init, HV_VersionNumber, interface_version_number,
+        int, chip_num, int, chip_rev_num, int, client_pl)
+HV_WRAP1(long, hv_sysconf, HV_SysconfQuery, query)
+HV_WRAP3(int, hv_confstr, HV_ConfstrQuery, query, HV_VirtAddr, buf, int, len)
+#if CHIP_HAS_IPI()
+HV_WRAP3(int, hv_get_ipi_pte, HV_Coord, tile, int, pl, HV_PTE*, pte)
+HV_WRAP3(int, hv_console_set_ipi, int, ipi, int, event, HV_Coord, coord);
+#else
+HV_WRAP1(void, hv_enable_intr, HV_IntrMask, enab_mask)
+HV_WRAP1(void, hv_disable_intr, HV_IntrMask, disab_mask)
+HV_WRAP1(void, hv_clear_intr, HV_IntrMask, clear_mask)
+HV_WRAP1(void, hv_raise_intr, HV_IntrMask, raise_mask)
+HV_WRAP2(HV_Errno, hv_trigger_ipi, HV_Coord, tile, int, interrupt)
+#endif /* !CHIP_HAS_IPI() */
+HV_WRAP3(int, hv_store_mapping, HV_VirtAddr, va, unsigned int, len,
+        HV_PhysAddr, pa)
+HV_WRAP2(HV_PhysAddr, hv_inquire_realpa, HV_PhysAddr, cpa, unsigned int, len)
+HV_WRAP0(HV_RTCTime, hv_get_rtc)
+HV_WRAP1(void, hv_set_rtc, HV_RTCTime, time)
+HV_WRAP3(HV_SetSpeed, hv_set_speed, unsigned long, speed, __hv64, start_cycle,
+        unsigned long, flags)
+HV_WRAP4(int, hv_install_context, HV_PhysAddr, page_table, HV_PTE, access,
+        HV_ASID, asid, __hv32, flags)
+HV_WRAP4(int, hv_install_virt_context, HV_PhysAddr, page_table, HV_PTE, access,
+        HV_ASID, asid, __hv32, flags)
+HV_WRAP4(int, hv_install_guest_context, HV_PhysAddr, page_table, HV_PTE, access,
+        HV_ASID, asid, __hv32, flags)
+HV_WRAP2(int, hv_set_pte_super_shift, int, level, int, log2_count)
+HV_WRAP0(HV_Context, hv_inquire_context)
+HV_WRAP0(HV_Context, hv_inquire_virt_context)
+HV_WRAP0(HV_Context, hv_inquire_guest_context)
+HV_WRAP1(int, hv_flush_asid, HV_ASID, asid)
+HV_WRAP2(int, hv_flush_page, HV_VirtAddr, address, HV_PageSize, page_size)
+HV_WRAP3(int, hv_flush_pages, HV_VirtAddr, start, HV_PageSize, page_size,
+        unsigned long, size)
+HV_WRAP1(int, hv_flush_all, int, preserve_global)
+HV_WRAP2(void, hv_restart, HV_VirtAddr, cmd, HV_VirtAddr, args)
+HV_WRAP0(void, hv_halt)
+HV_WRAP0(void, hv_power_off)
+HV_WRAP1(int, hv_reexec, HV_PhysAddr, entry)
+HV_WRAP0(HV_Topology, hv_inquire_topology)
+HV_WRAP3(HV_Errno, hv_inquire_tiles, HV_InqTileSet, set, HV_VirtAddr, cpumask,
+        int, length)
+HV_WRAP1(HV_PhysAddrRange, hv_inquire_physical, int, idx)
+HV_WRAP2(HV_MemoryControllerInfo, hv_inquire_memory_controller, HV_Coord, coord,
+        int, controller)
+HV_WRAP1(HV_VirtAddrRange, hv_inquire_virtual, int, idx)
+HV_WRAP1(HV_ASIDRange, hv_inquire_asid, int, idx)
+HV_WRAP1(void, hv_nanosleep, int, nanosecs)
+HV_WRAP0(int, hv_console_read_if_ready)
+HV_WRAP1(void, hv_console_putc, int, byte)
+HV_WRAP2(int, hv_console_write, HV_VirtAddr, bytes, int, len)
+HV_WRAP0(void, hv_downcall_dispatch)
+HV_WRAP1(int, hv_fs_findfile, HV_VirtAddr, filename)
+HV_WRAP1(HV_FS_StatInfo, hv_fs_fstat, int, inode)
+HV_WRAP4(int, hv_fs_pread, int, inode, HV_VirtAddr, buf,
+        int, length, int, offset)
+HV_WRAP2(unsigned long long, hv_physaddr_read64, HV_PhysAddr, addr,
+        HV_PTE, access)
+HV_WRAP3(void, hv_physaddr_write64, HV_PhysAddr, addr, HV_PTE, access,
+        unsigned long long, val)
+HV_WRAP2(int, hv_get_command_line, HV_VirtAddr, buf, int, length)
+HV_WRAP2(HV_Errno, hv_set_command_line, HV_VirtAddr, buf, int, length)
+HV_WRAP1(void, hv_set_caching, unsigned long, bitmask)
+HV_WRAP2(void, hv_bzero_page, HV_VirtAddr, va, unsigned int, size)
+HV_WRAP1(HV_Errno, hv_register_message_state, HV_MsgState*, msgstate)
+HV_WRAP4(int, hv_send_message, HV_Recipient *, recips, int, nrecip,
+        HV_VirtAddr, buf, int, buflen)
+HV_WRAP3(HV_RcvMsgInfo, hv_receive_message, HV_MsgState, msgstate,
+        HV_VirtAddr, buf, int, buflen)
+HV_WRAP0(void, hv_start_all_tiles)
+HV_WRAP2(int, hv_dev_open, HV_VirtAddr, name, __hv32, flags)
+HV_WRAP1(int, hv_dev_close, int, devhdl)
+HV_WRAP5(int, hv_dev_pread, int, devhdl, __hv32, flags, HV_VirtAddr, va,
+        __hv32, len, __hv64, offset)
+HV_WRAP5(int, hv_dev_pwrite, int, devhdl, __hv32, flags, HV_VirtAddr, va,
+        __hv32, len, __hv64, offset)
+HV_WRAP3(int, hv_dev_poll, int, devhdl, __hv32, events, HV_IntArg, intarg)
+HV_WRAP1(int, hv_dev_poll_cancel, int, devhdl)
+HV_WRAP6(int, hv_dev_preada, int, devhdl, __hv32, flags, __hv32, sgl_len,
+        HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
+HV_WRAP6(int, hv_dev_pwritea, int, devhdl, __hv32, flags, __hv32, sgl_len,
+        HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
+HV_WRAP9(int, hv_flush_remote, HV_PhysAddr, cache_pa,
+        unsigned long, cache_control, unsigned long*, cache_cpumask,
+        HV_VirtAddr, tlb_va, unsigned long, tlb_length,
+        unsigned long, tlb_pgsize, unsigned long*, tlb_cpumask,
+        HV_Remote_ASID*, asids, int, asidcount)
index cb52d66343ed7aa72d3cc5e22535df5b3b6a0e2a..00f06448b0c92df6bd9093faa352c071680bcbf6 100644 (file)
 #include <arch/interrupts.h>
 #include <arch/spr_def.h>
 
-#ifdef CONFIG_PREEMPT
-# error "No support for kernel preemption currently"
-#endif
-
 #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
 
 #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
 
-#if !CHIP_HAS_WH64()
-       /* By making this an empty macro, we can use wh64 in the code. */
-       .macro  wh64 reg
-       .endm
-#endif
-
        .macro  push_reg reg, ptr=sp, delta=-4
        {
         sw     \ptr, \reg
@@ -189,7 +179,7 @@ intvec_\vecname:
         * point sp at the top aligned address on the actual stack page.
         */
        mfspr   r0, SPR_SYSTEM_SAVE_K_0
-       mm      r0, r0, zero, LOG2_THREAD_SIZE, 31
+       mm      r0, r0, zero, LOG2_NR_CPU_IDS, 31
 
 0:
        /*
@@ -207,6 +197,9 @@ intvec_\vecname:
         *    cache line 1: r14...r29
         *    cache line 0: 2 x frame, r0..r13
         */
+#if STACK_TOP_DELTA != 64
+#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
+#endif
        andi    r0, r0, -64
 
        /*
@@ -326,18 +319,14 @@ intvec_\vecname:
         movei  r3, -1   /* not used, but set for consistency */
        }
        .else
-#if CHIP_HAS_AUX_PERF_COUNTERS()
        .ifc \c_routine, op_handle_aux_perf_interrupt
        {
         mfspr  r2, AUX_PERF_COUNT_STS
         movei  r3, -1   /* not used, but set for consistency */
        }
        .else
-#endif
        movei   r3, 0
-#if CHIP_HAS_AUX_PERF_COUNTERS()
        .endif
-#endif
        .endif
        .endif
        .endif
@@ -354,7 +343,7 @@ intvec_\vecname:
 #ifdef __COLLECT_LINKER_FEEDBACK__
        .pushsection .text.intvec_feedback,"ax"
        .org    (\vecnum << 5)
-       FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
+       FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
        jrp     lr
        .popsection
 #endif
@@ -468,7 +457,7 @@ intvec_\vecname:
        }
        {
         auli   r21, r21, ha16(__per_cpu_offset)
-        mm     r20, r20, zero, 0, LOG2_THREAD_SIZE-1
+        mm     r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
        }
        s2a     r20, r20, r21
        lw      tp, r20
@@ -562,7 +551,6 @@ intvec_\vecname:
        .endif
        mtspr   INTERRUPT_CRITICAL_SECTION, zero
 
-#if CHIP_HAS_WH64()
        /*
         * Prepare the first 256 stack bytes to be rapidly accessible
         * without having to fetch the background data.  We don't really
@@ -583,7 +571,6 @@ intvec_\vecname:
         addi   r52, r52, -64
        }
        wh64    r52
-#endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
        .ifnc \function,handle_nmi
@@ -762,7 +749,7 @@ intvec_\vecname:
        .macro  dc_dispatch vecnum, vecname
        .org    (\vecnum << 8)
 intvec_\vecname:
-       j       hv_downcall_dispatch
+       j       _hv_downcall_dispatch
        ENDPROC(intvec_\vecname)
        .endm
 
@@ -807,33 +794,50 @@ handle_interrupt:
 STD_ENTRY(interrupt_return)
        /* If we're resuming to kernel space, don't check thread flags. */
        {
-        bnz    r30, .Lrestore_all  /* NMIs don't special-case user-space */
+        bnz    r30, restore_all  /* NMIs don't special-case user-space */
         PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
        }
        lw      r29, r29
        andi    r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
+       bzt     r29, .Lresume_userspace
+
+#ifdef CONFIG_PREEMPT
+       /* Returning to kernel space. Check if we need preemption. */
+       GET_THREAD_INFO(r29)
+       addli   r28, r29, THREAD_INFO_FLAGS_OFFSET
        {
-        bzt    r29, .Lresume_userspace
-        PTREGS_PTR(r29, PTREGS_OFFSET_PC)
+        lw     r28, r28
+        addli  r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
        }
+       {
+        andi   r28, r28, _TIF_NEED_RESCHED
+        lw     r29, r29
+       }
+       bzt     r28, 1f
+       bnz     r29, 1f
+       jal     preempt_schedule_irq
+       FEEDBACK_REENTER(interrupt_return)
+1:
+#endif
 
        /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
        {
-        lw     r28, r29
+        PTREGS_PTR(r29, PTREGS_OFFSET_PC)
         moveli r27, lo16(_cpu_idle_nap)
        }
        {
+        lw     r28, r29
         auli   r27, r27, ha16(_cpu_idle_nap)
        }
        {
         seq    r27, r27, r28
        }
        {
-        bbns   r27, .Lrestore_all
+        bbns   r27, restore_all
         addi   r28, r28, 8
        }
        sw      r29, r28
-       j       .Lrestore_all
+       j       restore_all
 
 .Lresume_userspace:
        FEEDBACK_REENTER(interrupt_return)
@@ -871,7 +875,7 @@ STD_ENTRY(interrupt_return)
         auli   r1, r1, ha16(_TIF_ALLWORK_MASK)
        }
        and     r1, r29, r1
-       bzt     r1, .Lrestore_all
+       bzt     r1, restore_all
 
        /*
         * Make sure we have all the registers saved for signal
@@ -910,7 +914,9 @@ STD_ENTRY(interrupt_return)
         * profile interrupt will actually disable interrupts in both SPRs
         * before returning, which is OK.)
         */
-.Lrestore_all:
+       .global restore_all
+       .type restore_all, @function
+restore_all:
        PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
        {
         lw     r0, r0
@@ -1420,7 +1426,6 @@ handle_ill:
        {
         lw     r0, r0          /* indirect thru thread_info to get task_info*/
         addi   r1, sp, C_ABI_SAVE_AREA_SIZE  /* put ptregs pointer into r1 */
-        move   r2, zero        /* load error code into r2 */
        }
 
        jal     send_sigtrap    /* issue a SIGTRAP */
@@ -1518,12 +1523,10 @@ STD_ENTRY(_sys_clone)
        __HEAD
        .align 64
        /* Align much later jump on the start of a cache line. */
-#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
        nop
 #if PAGE_SIZE >= 0x10000
        nop
 #endif
-#endif
 ENTRY(sys_cmpxchg)
 
        /*
@@ -1557,45 +1560,6 @@ ENTRY(sys_cmpxchg)
 # error Code here assumes PAGE_OFFSET can be loaded with just hi16()
 #endif
 
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
-       {
-        /* Check for unaligned input. */
-        bnz    sp, .Lcmpxchg_badaddr
-        mm     r25, r0, zero, 3, PAGE_SHIFT-1
-       }
-       {
-        crc32_32 r25, zero, r25
-        moveli r21, lo16(atomic_lock_ptr)
-       }
-       {
-        auli   r21, r21, ha16(atomic_lock_ptr)
-        auli   r23, zero, hi16(PAGE_OFFSET)  /* hugepage-aligned */
-       }
-       {
-        shri   r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
-        slt_u  r23, r0, r23
-        lw     r26, r0  /* see comment in the "#else" for the "lw r26". */
-       }
-       {
-        s2a    r21, r20, r21
-        bbns   r23, .Lcmpxchg_badaddr
-       }
-       {
-        lw     r21, r21
-        seqi   r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
-        andi   r25, r25, ATOMIC_HASH_L2_SIZE - 1
-       }
-       {
-        /* Branch away at this point if we're doing a 64-bit cmpxchg. */
-        bbs    r23, .Lcmpxchg64
-        andi   r23, r0, 7       /* Precompute alignment for cmpxchg64. */
-       }
-       {
-        s2a    ATOMIC_LOCK_REG_NAME, r25, r21
-        j      .Lcmpxchg32_tns   /* see comment in the #else for the jump. */
-       }
-
-#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
        {
         /* Check for unaligned input. */
         bnz    sp, .Lcmpxchg_badaddr
@@ -1609,7 +1573,7 @@ ENTRY(sys_cmpxchg)
          * Because of C pointer arithmetic, we want to compute this:
          *
          * ((char*)atomic_locks +
-         *  (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2))
+         *  (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
          *
          * Instead of two shifts we just ">> 1", and use 'mm'
          * to ignore the low and high bits we don't want.
@@ -1620,12 +1584,9 @@ ENTRY(sys_cmpxchg)
 
         /*
          * Ensure that the TLB is loaded before we take out the lock.
-         * On tilepro, this will start fetching the value all the way
-         * into our L1 as well (and if it gets modified before we
-         * grab the lock, it will be invalidated from our cache
-         * before we reload it).  On tile64, we'll start fetching it
-         * into our L1 if we're the home, and if we're not, we'll
-         * still at least start fetching it into the home's L2.
+         * This will start fetching the value all the way into our L1
+         * as well (and if it gets modified before we grab the lock,
+         * it will be invalidated from our cache before we reload it).
          */
         lw     r26, r0
        }
@@ -1668,8 +1629,6 @@ ENTRY(sys_cmpxchg)
         j      .Lcmpxchg32_tns
        }
 
-#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
 /* Symbol for do_page_fault_ics() to use to compare against the PC. */
 .global __sys_cmpxchg_grab_lock
 __sys_cmpxchg_grab_lock:
@@ -1807,9 +1766,6 @@ __sys_cmpxchg_grab_lock:
        .align 64
 .Lcmpxchg64:
        {
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
-        s2a    ATOMIC_LOCK_REG_NAME, r25, r21
-#endif
         bzt     r23, .Lcmpxchg64_tns
        }
        j       .Lcmpxchg_badaddr
@@ -1875,8 +1831,8 @@ int_unalign:
        push_extra_callee_saves r0
        j       do_trap
 
-/* Include .intrpt1 array of interrupt vectors */
-       .section ".intrpt1", "ax"
+/* Include .intrpt array of interrupt vectors */
+       .section ".intrpt", "ax"
 
 #define op_handle_perf_interrupt bad_intr
 #define op_handle_aux_perf_interrupt bad_intr
@@ -1944,10 +1900,8 @@ int_unalign:
                     do_page_fault
        int_hand     INT_SN_CPL, SN_CPL, bad_intr
        int_hand     INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
-#if CHIP_HAS_AUX_PERF_COUNTERS()
        int_hand     INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
                     op_handle_aux_perf_interrupt, handle_nmi
-#endif
 
        /* Synthetic interrupt delivered only by the simulator */
        int_hand     INT_BREAKPOINT, BREAKPOINT, do_breakpoint
index 85d483957027e243c15065dbdc5a0e4b3af17ead..8299f295b394d29b93604b3fa5b0f1561664260b 100644 (file)
 #include <linux/linkage.h>
 #include <linux/errno.h>
 #include <linux/unistd.h>
+#include <linux/init.h>
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/irqflags.h>
 #include <asm/asm-offsets.h>
 #include <asm/types.h>
+#include <asm/traps.h>
 #include <asm/signal.h>
 #include <hv/hypervisor.h>
 #include <arch/abi.h>
 #include <arch/interrupts.h>
 #include <arch/spr_def.h>
-
-#ifdef CONFIG_PREEMPT
-# error "No support for kernel preemption currently"
+#include <arch/opcode.h>
+#ifdef CONFIG_KVM
+#include <asm/kvm_host.h>
 #endif
 
 #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
 
 #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
 
+#if CONFIG_KERNEL_PL == 1 || CONFIG_KERNEL_PL == 2
+/*
+ * Set "result" non-zero if ex1 holds the PL of the kernel
+ * (with or without ICS being set).  Note this works only
+ * because we never find the PL at level 3.
+ */
+# define IS_KERNEL_EX1(result, ex1) andi result, ex1, CONFIG_KERNEL_PL
+#else
+# error Recode IS_KERNEL_EX1 for CONFIG_KERNEL_PL
+#endif
 
        .macro  push_reg reg, ptr=sp, delta=-8
        {
        }
        .endm
 
+       /*
+        * Unalign data exception fast handling: In order to handle
+        * unaligned data access, a fast JIT version is generated and stored
+        * in a specific area in user space. We first need to do a quick poke
+        * to see if the JIT is available. We use certain bits in the fault
+        * PC (3 to 9 is used for 16KB page size) as index to address the JIT
+        * code area. The first 64bit word is the fault PC, and the 2nd one is
+        * the fault bundle itself. If these 2 words both match, then we
+        * directly "iret" to JIT code. If not, a slow path is invoked to
+        * generate new JIT code. Note: the current JIT code WILL be
+        * overwritten if it existed. So, ideally we can handle 128 unalign
+        * fixups via JIT. For lookup efficiency and to effectively support
+        * tight loops with multiple unaligned reference, a simple
+        * direct-mapped cache is used.
+        *
+        * SPR_EX_CONTEXT_K_0 is modified to return to JIT code.
+        * SPR_EX_CONTEXT_K_1 has ICS set.
+        * SPR_EX_CONTEXT_0_0 is setup to user program's next PC.
+        * SPR_EX_CONTEXT_0_1 = 0.
+        */
+       .macro int_hand_unalign_fast  vecnum, vecname
+       .org  (\vecnum << 8)
+intvec_\vecname:
+       /* Put r3 in SPR_SYSTEM_SAVE_K_1.  */
+       mtspr   SPR_SYSTEM_SAVE_K_1, r3
+
+       mfspr   r3, SPR_EX_CONTEXT_K_1
+       /*
+        * Examine if exception comes from user without ICS set.
+        * If not, just go directly to the slow path.
+        */
+       bnez    r3, hand_unalign_slow_nonuser
+
+       mfspr   r3, SPR_SYSTEM_SAVE_K_0
+
+       /* Get &thread_info->unalign_jit_tmp[0] in r3. */
+       bfexts  r3, r3, 0, CPU_SHIFT-1
+       mm      r3, zero, LOG2_THREAD_SIZE, 63
+       addli   r3, r3, THREAD_INFO_UNALIGN_JIT_TMP_OFFSET
+
+       /*
+        * Save r0, r1, r2 into thread_info array r3 points to
+        * from low to high memory in order.
+        */
+       st_add  r3, r0, 8
+       st_add  r3, r1, 8
+       {
+        st_add r3, r2, 8
+        andi   r2, sp, 7
+       }
+
+       /* Save stored r3 value so we can revert it on a page fault. */
+       mfspr   r1, SPR_SYSTEM_SAVE_K_1
+       st      r3, r1
+
+       {
+        /* Generate a SIGBUS if sp is not 8-byte aligned. */
+        bnez   r2, hand_unalign_slow_badsp
+       }
+
+       /*
+        * Get the thread_info in r0; load r1 with pc. Set the low bit of sp
+        * as an indicator to the page fault code in case we fault.
+        */
+       {
+        ori    sp, sp, 1
+        mfspr  r1, SPR_EX_CONTEXT_K_0
+       }
+
+       /* Add the jit_info offset in thread_info; extract r1 [3:9] into r2. */
+       {
+        addli  r0, r3, THREAD_INFO_UNALIGN_JIT_BASE_OFFSET - \
+         (THREAD_INFO_UNALIGN_JIT_TMP_OFFSET + (3 * 8))
+        bfextu r2, r1, 3, (2 + PAGE_SHIFT - UNALIGN_JIT_SHIFT)
+       }
+
+       /* Load the jit_info; multiply r2 by 128. */
+       {
+        ld     r0, r0
+        shli   r2, r2, UNALIGN_JIT_SHIFT
+       }
+
+       /*
+        * If r0 is NULL, the JIT page is not mapped, so go to slow path;
+        * add offset r2 to r0 at the same time.
+        */
+       {
+        beqz   r0, hand_unalign_slow
+        add    r2, r0, r2
+       }
+
+        /*
+        * We are loading from userspace (both the JIT info PC and
+        * instruction word, and the instruction word we executed)
+        * and since either could fault while holding the interrupt
+        * critical section, we must tag this region and check it in
+        * do_page_fault() to handle it properly.
+        */
+ENTRY(__start_unalign_asm_code)
+
+       /* Load first word of JIT in r0 and increment r2 by 8. */
+       ld_add  r0, r2, 8
+
+       /*
+        * Compare the PC with the 1st word in JIT; load the fault bundle
+        * into r1.
+        */
+       {
+        cmpeq  r0, r0, r1
+        ld     r1, r1
+       }
+
+       /* Go to slow path if PC doesn't match. */
+       beqz    r0, hand_unalign_slow
+
+       /*
+        * Load the 2nd word of JIT, which is supposed to be the fault
+        * bundle for a cache hit. Increment r2; after this bundle r2 will
+        * point to the potential start of the JIT code we want to run.
+        */
+       ld_add  r0, r2, 8
+
+       /* No further accesses to userspace are done after this point. */
+ENTRY(__end_unalign_asm_code)
+
+       /* Compare the real bundle with what is saved in the JIT area. */
+       {
+        cmpeq  r0, r1, r0
+        mtspr  SPR_EX_CONTEXT_0_1, zero
+       }
+
+       /* Go to slow path if the fault bundle does not match. */
+       beqz    r0, hand_unalign_slow
+
+       /*
+        * A cache hit is found.
+        * r2 points to start of JIT code (3rd word).
+        * r0 is the fault pc.
+        * r1 is the fault bundle.
+        * Reset the low bit of sp.
+        */
+       {
+        mfspr  r0, SPR_EX_CONTEXT_K_0
+        andi   sp, sp, ~1
+       }
+
+       /* Write r2 into EX_CONTEXT_K_0 and increment PC. */
+       {
+        mtspr  SPR_EX_CONTEXT_K_0, r2
+        addi   r0, r0, 8
+       }
+
+       /*
+        * Set ICS on kernel EX_CONTEXT_K_1 in order to "iret" to
+        * user with ICS set. This way, if the JIT fixup causes another
+        * unalign exception (which shouldn't be possible) the user
+        * process will be terminated with SIGBUS. Also, our fixup will
+        * run without interleaving with external interrupts.
+        * Each fixup is at most 14 bundles, so it won't hold ICS for long.
+        */
+       {
+        movei  r1, PL_ICS_EX1(USER_PL, 1)
+        mtspr  SPR_EX_CONTEXT_0_0, r0
+       }
+
+       {
+        mtspr  SPR_EX_CONTEXT_K_1, r1
+        addi   r3, r3, -(3 * 8)
+       }
+
+       /* Restore r0..r3. */
+       ld_add  r0, r3, 8
+       ld_add  r1, r3, 8
+       ld_add  r2, r3, 8
+       ld      r3, r3
+
+       iret
+       ENDPROC(intvec_\vecname)
+       .endm
 
 #ifdef __COLLECT_LINKER_FEEDBACK__
        .pushsection .text.intvec_feedback,"ax"
@@ -118,15 +309,21 @@ intvec_feedback:
         * The "processing" argument specifies the code for processing
         * the interrupt. Defaults to "handle_interrupt".
         */
-       .macro  int_hand vecnum, vecname, c_routine, processing=handle_interrupt
-       .org    (\vecnum << 8)
+       .macro __int_hand vecnum, vecname, c_routine,processing=handle_interrupt
 intvec_\vecname:
        /* Temporarily save a register so we have somewhere to work. */
 
        mtspr   SPR_SYSTEM_SAVE_K_1, r0
        mfspr   r0, SPR_EX_CONTEXT_K_1
 
-       andi    r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
+       /*
+        * The unalign data fastpath code sets the low bit in sp to
+        * force us to reset it here on fault.
+        */
+       {
+        blbs   sp, 2f
+        IS_KERNEL_EX1(r0, r0)
+       }
 
        .ifc    \vecnum, INT_DOUBLE_FAULT
        /*
@@ -164,10 +361,6 @@ intvec_\vecname:
         *
         * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
         * any path that turns into a downcall to one of our TLB handlers.
-        *
-        * FIXME: if we end up never using this path, perhaps we should
-        * prevent the hypervisor from generating downcalls in this case.
-        * The advantage of getting a downcall is we can panic in Linux.
         */
        mfspr   r0, SPR_SYSTEM_SAVE_K_2
        {
@@ -176,15 +369,15 @@ intvec_\vecname:
        }
        .endif
 
-
+2:
        /*
-        * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
-        * the current stack top in the higher bits.  So we recover
-        * our stack top by just masking off the low bits, then
+        * SYSTEM_SAVE_K_0 holds the cpu number in the high bits, and
+        * the current stack top in the lower bits.  So we recover
+        * our starting stack value by sign-extending the low bits, then
         * point sp at the top aligned address on the actual stack page.
         */
        mfspr   r0, SPR_SYSTEM_SAVE_K_0
-       mm      r0, zero, LOG2_THREAD_SIZE, 63
+       bfexts  r0, r0, 0, CPU_SHIFT-1
 
 0:
        /*
@@ -206,6 +399,9 @@ intvec_\vecname:
         *    cache line 1: r6...r13
         *    cache line 0: 2 x frame, r0..r5
         */
+#if STACK_TOP_DELTA != 64
+#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
+#endif
        andi    r0, r0, -64
 
        /*
@@ -304,8 +500,12 @@ intvec_\vecname:
        mfspr   r2, SPR_SYSTEM_SAVE_K_3   /* address of page fault */
        mfspr   r3, SPR_SYSTEM_SAVE_K_2   /* info about page fault */
        .else
+       .ifc \c_routine, kvm_vpgtable_miss
+       mfspr   r2, SPR_SYSTEM_SAVE_K_3   /* address of page fault */
+       mfspr   r3, SPR_SYSTEM_SAVE_K_2   /* info about page fault */
+       .else
        .ifc \vecnum, INT_ILL_TRANS
-       mfspr   r2, ILL_TRANS_REASON
+       mfspr   r2, ILL_VA_PC
        .else
        .ifc \vecnum, INT_DOUBLE_FAULT
        mfspr   r2, SPR_SYSTEM_SAVE_K_2   /* double fault info from HV */
@@ -315,12 +515,11 @@ intvec_\vecname:
        .else
        .ifc \c_routine, op_handle_perf_interrupt
        mfspr   r2, PERF_COUNT_STS
-#if CHIP_HAS_AUX_PERF_COUNTERS()
        .else
        .ifc \c_routine, op_handle_aux_perf_interrupt
        mfspr   r2, AUX_PERF_COUNT_STS
        .endif
-#endif
+       .endif
        .endif
        .endif
        .endif
@@ -339,7 +538,7 @@ intvec_\vecname:
 #ifdef __COLLECT_LINKER_FEEDBACK__
        .pushsection .text.intvec_feedback,"ax"
        .org    (\vecnum << 5)
-       FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
+       FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
        jrp     lr
        .popsection
 #endif
@@ -455,24 +654,25 @@ intvec_\vecname:
        /*
         * If we will be returning to the kernel, we will need to
         * reset the interrupt masks to the state they had before.
-        * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
+        * Set DISABLE_IRQ in flags iff we came from kernel pl with
+        * irqs disabled.
         */
-       mfspr   r32, SPR_EX_CONTEXT_K_1
+       mfspr   r22, SPR_EX_CONTEXT_K_1
        {
-        andi   r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
+        IS_KERNEL_EX1(r22, r22)
         PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
        }
-       beqzt   r32, 1f       /* zero if from user space */
-       IRQS_DISABLED(r32)    /* zero if irqs enabled */
+       beqzt  r22, 1f        /* zero if from user space */
+       IRQS_DISABLED(r22)    /* zero if irqs enabled */
 #if PT_FLAGS_DISABLE_IRQ != 1
 # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
 #endif
 1:
        .ifnc \function,handle_syscall
        /* Record the fact that we saved the caller-save registers above. */
-       ori     r32, r32, PT_FLAGS_CALLER_SAVES
+       ori     r22, r22, PT_FLAGS_CALLER_SAVES
        .endif
-       st      r21, r32
+       st      r21, r22
 
        /*
         * we've captured enough state to the stack (including in
@@ -503,7 +703,7 @@ intvec_\vecname:
        }
        {
         shl16insli r21, r21, hw1(__per_cpu_offset)
-        bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
+        bfextu r20, r20, CPU_SHIFT, 63
        }
        shl16insli r21, r21, hw0(__per_cpu_offset)
        shl3add r20, r20, r21
@@ -512,12 +712,29 @@ intvec_\vecname:
        move    tp, zero
 #endif
 
+       /*
+        * Prepare the first 256 stack bytes to be rapidly accessible
+        * without having to fetch the background data.
+        */
+       addi    r52, sp, -64
+       {
+        wh64   r52
+        addi   r52, r52, -64
+       }
+       {
+        wh64   r52
+        addi   r52, r52, -64
+       }
+       {
+        wh64   r52
+        addi   r52, r52, -64
+       }
+       wh64    r52
+
 #ifdef __COLLECT_LINKER_FEEDBACK__
        /*
         * Notify the feedback routines that we were in the
-        * appropriate fixed interrupt vector area.  Note that we
-        * still have ICS set at this point, so we can't invoke any
-        * atomic operations or we will panic.  The feedback
+        * appropriate fixed interrupt vector area.  The feedback
         * routines internally preserve r0..r10 and r30 up.
         */
        .ifnc \function,handle_syscall
@@ -536,23 +753,15 @@ intvec_\vecname:
 #endif
 
        /*
-        * Prepare the first 256 stack bytes to be rapidly accessible
-        * without having to fetch the background data.
+        * Stash any interrupt state in r30..r33 for now.
+        * This makes it easier to call C code in the code that follows.
+        * We don't need to on the syscall path since we reload
+        * them from the stack instead.
         */
-       addi    r52, sp, -64
-       {
-        wh64   r52
-        addi   r52, r52, -64
-       }
-       {
-        wh64   r52
-        addi   r52, r52, -64
-       }
-       {
-        wh64   r52
-        addi   r52, r52, -64
-       }
-       wh64    r52
+       .ifnc \function,handle_syscall
+       { move r30, r0; move r31, r1 }
+       { move r32, r2; move r33, r3 }
+       .endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
        .ifnc \function,handle_nmi
@@ -563,17 +772,8 @@ intvec_\vecname:
         * For syscalls, we already have the register state saved away
         * on the stack, so we don't bother to do any register saves here,
         * and later we pop the registers back off the kernel stack.
-        * For interrupt handlers, save r0-r3 in callee-saved registers.
         */
-       .ifnc \function,handle_syscall
-       { move r30, r0; move r31, r1 }
-       { move r32, r2; move r33, r3 }
-       .endif
        TRACE_IRQS_OFF
-       .ifnc \function,handle_syscall
-       { move r0, r30; move r1, r31 }
-       { move r2, r32; move r3, r33 }
-       .endif
        .endif
 #endif
 
@@ -585,7 +785,7 @@ intvec_\vecname:
        .macro  dc_dispatch vecnum, vecname
        .org    (\vecnum << 8)
 intvec_\vecname:
-       j       hv_downcall_dispatch
+       j       _hv_downcall_dispatch
        ENDPROC(intvec_\vecname)
        .endm
 
@@ -622,18 +822,51 @@ handle_interrupt:
 STD_ENTRY(interrupt_return)
        /* If we're resuming to kernel space, don't check thread flags. */
        {
-        bnez   r30, .Lrestore_all  /* NMIs don't special-case user-space */
+        bnez   r30, restore_all  /* NMIs don't special-case user-space */
         PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
        }
        ld      r29, r29
-       andi    r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
+       IS_KERNEL_EX1(r29, r29)
        {
         beqzt  r29, .Lresume_userspace
-        PTREGS_PTR(r29, PTREGS_OFFSET_PC)
+        move   r29, sp
+       }
+
+#ifdef CONFIG_PREEMPT
+       /* Returning to kernel space. Check if we need preemption. */
+       EXTRACT_THREAD_INFO(r29)
+       addli   r28, r29, THREAD_INFO_FLAGS_OFFSET
+       {
+        ld     r28, r28
+        addli  r26, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
+       }
+       {
+        andi   r27, r28, _TIF_NEED_RESCHED
+        ld4s   r26, r26
+       }
+       beqzt   r27, 1f
+       bnez    r26, 1f
+#ifdef CONFIG_KVM
+       addli   r27, r29, THREAD_INFO_VCPU_OFFSET
+       ld      r27, r27
+       {
+        beqzt  r27, 0f
+        movei  r1, KVM_EXIT_AGAIN
        }
+       push_extra_callee_saves r0
+       j       kvm_trigger_vmexit
+0:
+#endif
+       jal     preempt_schedule_irq
+       FEEDBACK_REENTER(interrupt_return)
+1:
+#endif
 
        /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
-       moveli  r27, hw2_last(_cpu_idle_nap)
+       {
+        moveli r27, hw2_last(_cpu_idle_nap)
+        PTREGS_PTR(r29, PTREGS_OFFSET_PC)
+       }
        {
         ld     r28, r29
         shl16insli r27, r27, hw1(_cpu_idle_nap)
@@ -645,11 +878,11 @@ STD_ENTRY(interrupt_return)
         cmpeq  r27, r27, r28
        }
        {
-        blbc   r27, .Lrestore_all
+        blbc   r27, restore_all
         addi   r28, r28, 8
        }
        st      r29, r28
-       j       .Lrestore_all
+       j       restore_all
 
 .Lresume_userspace:
        FEEDBACK_REENTER(interrupt_return)
@@ -689,7 +922,7 @@ STD_ENTRY(interrupt_return)
         shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
        }
        and     r1, r29, r1
-       beqzt   r1, .Lrestore_all
+       beqzt   r1, restore_all
 
        /*
         * Make sure we have all the registers saved for signal
@@ -721,14 +954,16 @@ STD_ENTRY(interrupt_return)
         * ICS can only be used in very tight chunks of code to avoid
         * tripping over various assertions that it is off.
         */
-.Lrestore_all:
+       .global restore_all
+       .type restore_all, @function
+restore_all:
        PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
        {
         ld      r0, r0
         PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
        }
        {
-        andi   r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
+        IS_KERNEL_EX1(r0, r0)
         ld     r32, r32
        }
        bnez    r0, 1f
@@ -799,7 +1034,7 @@ STD_ENTRY(interrupt_return)
        pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
        {
         mtspr  SPR_EX_CONTEXT_K_1, lr
-        andi   lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
+        IS_KERNEL_EX1(lr, lr)
        }
        {
         mtspr  SPR_EX_CONTEXT_K_0, r21
@@ -1223,10 +1458,51 @@ STD_ENTRY(_sys_clone)
        j       sys_clone
        STD_ENDPROC(_sys_clone)
 
-/* The single-step support may need to read all the registers. */
+       /*
+        * Recover r3, r2, r1 and r0 here saved by unalign fast vector.
+        * The vector area limit is 32 bundles, so we handle the reload here.
+        * r0, r1, r2 are in thread_info from low to high memory in order.
+        * r3 points to location the original r3 was saved.
+        * We put this code in the __HEAD section so it can be reached
+        * via a conditional branch from the fast path.
+        */
+       __HEAD
+hand_unalign_slow:
+       andi    sp, sp, ~1
+hand_unalign_slow_badsp:
+       addi    r3, r3, -(3 * 8)
+       ld_add  r0, r3, 8
+       ld_add  r1, r3, 8
+       ld      r2, r3
+hand_unalign_slow_nonuser:
+       mfspr   r3, SPR_SYSTEM_SAVE_K_1
+       __int_hand     INT_UNALIGN_DATA, UNALIGN_DATA_SLOW, int_unalign
+
+/* The unaligned data support needs to read all the registers. */
 int_unalign:
        push_extra_callee_saves r0
-       j       do_trap
+       j       do_unaligned
+ENDPROC(hand_unalign_slow)
+
+#ifdef CONFIG_KVM
+/*
+ * Any call path that may lead to a vmexit needs to save the full
+ * callee-save register state, since if we vmexit we don't unwind
+ * the callee-saves from the C function stack frames, and instead
+ * just save away the register state from the interrupt handler as-is
+ * and later reload it directly and call back into the guest.
+ */
+       .macro  save_callee_saves_and_tailcall func
+kvm_\func:
+       push_extra_callee_saves r0
+       j       kvm_do_\func
+       ENDPROC(\func)
+       .endm
+
+       save_callee_saves_and_tailcall hypervisor_call
+       save_callee_saves_and_tailcall vpgtable_miss
+       save_callee_saves_and_tailcall vguest_fatal
+#endif
 
 /* Fill the return address stack with nonzero entries. */
 STD_ENTRY(fill_ra_stack)
@@ -1240,14 +1516,68 @@ STD_ENTRY(fill_ra_stack)
 4:     jrp     r0
        STD_ENDPROC(fill_ra_stack)
 
-/* Include .intrpt1 array of interrupt vectors */
-       .section ".intrpt1", "ax"
+#ifdef CONFIG_KVM
+/*
+ * Handle the downcall dispatch service.  On entry, the client's
+ * system save register 3 holds the original contents of
+ * REG_SYSCALL_NR_NAME, which we need to restore before we iret to
+ * the correct interrupt vector.
+ * Note that we only support the INT_MESSAGE_RCV_DWNCL interrupt
+ * here, since this is the only interrupt handled this way on GX.
+ */
+handle_downcall_dispatch:
+       /*
+        * If we were called from PL0, jump back to slow path.
+        * We check just the low bit to make sure it's set, since we
+        * can only be called from PL0 or PL1.
+        */
+       mfspr   TREG_SYSCALL_NR_NAME, SPR_EX_CONTEXT_K_1
+       blbc    TREG_SYSCALL_NR_NAME, intvec_SWINT_0
+
+       /* Set the PC to the downcall interrupt vector, and PL to guest. */
+       mfspr   TREG_SYSCALL_NR_NAME, SPR_INTERRUPT_VECTOR_BASE_1
+       addli   TREG_SYSCALL_NR_NAME, TREG_SYSCALL_NR_NAME, \
+               INT_MESSAGE_RCV_DWNCL << 8
+       {
+        mtspr  SPR_EX_CONTEXT_K_0, TREG_SYSCALL_NR_NAME
+        movei  TREG_SYSCALL_NR_NAME, GUEST_PL | SPR_EX_CONTEXT_1_1__ICS_MASK
+       }
+       mtspr   SPR_EX_CONTEXT_K_1, TREG_SYSCALL_NR_NAME
+
+       /* Restore REG_SYSCALL_NR_NAME and return to the new vector. */
+       mfspr   TREG_SYSCALL_NR_NAME, SPR_SYSTEM_SAVE_1_3
+       iret
+
+       .macro int_hand_kvm_hcall  vecnum, vecname, c_routine, \
+              processing=handle_interrupt
+       .org   (\vecnum << 8)
+               /* Need special code for downcall dispatch syscall. */
+               beqz TREG_SYSCALL_NR_NAME, handle_downcall_dispatch
+               __int_hand   \vecnum, \vecname, \c_routine, \processing
+       .endm
+
+#endif /* CONFIG_KVM */
+
+       .macro int_hand  vecnum, vecname, c_routine, processing=handle_interrupt
+       .org   (\vecnum << 8)
+               __int_hand   \vecnum, \vecname, \c_routine, \processing
+       .endm
+
+/* Include .intrpt array of interrupt vectors */
+       .section ".intrpt", "ax"
+       .global intrpt_start
+intrpt_start:
 
 #define op_handle_perf_interrupt bad_intr
 #define op_handle_aux_perf_interrupt bad_intr
 
 #ifndef CONFIG_HARDWALL
 #define do_hardwall_trap bad_intr
+#endif
+
+#ifndef CONFIG_KVM
+#define kvm_vpgtable_miss bad_intr
+#define kvm_vguest_fatal bad_intr
 #endif
 
        int_hand     INT_MEM_ERROR, MEM_ERROR, do_trap
@@ -1270,14 +1600,24 @@ STD_ENTRY(fill_ra_stack)
        int_hand     INT_SWINT_3, SWINT_3, do_trap
        int_hand     INT_SWINT_2, SWINT_2, do_trap
        int_hand     INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
+#ifdef CONFIG_KVM
+       int_hand_kvm_hcall INT_SWINT_0, SWINT_0, kvm_hypervisor_call
+#else
        int_hand     INT_SWINT_0, SWINT_0, do_trap
+#endif
        int_hand     INT_ILL_TRANS, ILL_TRANS, do_trap
-       int_hand     INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
+       int_hand_unalign_fast INT_UNALIGN_DATA, UNALIGN_DATA
        int_hand     INT_DTLB_MISS, DTLB_MISS, do_page_fault
        int_hand     INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
        int_hand     INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
        int_hand     INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
+#ifndef CONFIG_KVM_GUEST
        int_hand     INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
+       int_hand     INT_AUX_TILE_TIMER, AUX_TILE_TIMER, bad_intr
+#else
+       int_hand     INT_TILE_TIMER, TILE_TIMER, bad_intr
+       int_hand     INT_AUX_TILE_TIMER, AUX_TILE_TIMER, do_timer_interrupt
+#endif
        int_hand     INT_IDN_TIMER, IDN_TIMER, bad_intr
        int_hand     INT_UDN_TIMER, UDN_TIMER, bad_intr
        int_hand     INT_IDN_AVAIL, IDN_AVAIL, bad_intr
@@ -1307,8 +1647,10 @@ STD_ENTRY(fill_ra_stack)
        int_hand     INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
                     hv_message_intr
        int_hand     INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr
-       int_hand     INT_I_ASID, I_ASID, bad_intr
-       int_hand     INT_D_ASID, D_ASID, bad_intr
+       int_hand     INT_VPGTABLE_MISS_DWNCL, VPGTABLE_MISS_DWNCL, \
+                    kvm_vpgtable_miss
+       int_hand     INT_VGUEST_FATAL_DWNCL, VGUEST_FATAL_DWNCL, \
+                    kvm_vguest_fatal
        int_hand     INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
 
        /* Synthetic interrupt delivered only by the simulator */
index 3ccf2cd7182eb73b8789489f30a6786cb4ec25d7..0586fdb9352d2b2ca88cf48af4125216f135b3ae 100644 (file)
@@ -55,7 +55,8 @@ static DEFINE_PER_CPU(int, irq_depth);
 
 /* State for allocating IRQs on Gx. */
 #if CHIP_HAS_IPI()
-static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE);
+static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) &
+                                     (~(1UL << IRQ_RESCHEDULE));
 static DEFINE_SPINLOCK(available_irqs_lock);
 #endif
 
@@ -73,7 +74,8 @@ static DEFINE_SPINLOCK(available_irqs_lock);
 
 /*
  * The interrupt handling path, implemented in terms of HV interrupt
- * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
+ * emulation on TILEPro, and IPI hardware on TILE-Gx.
+ * Entered with interrupts disabled.
  */
 void tile_dev_intr(struct pt_regs *regs, int intnum)
 {
@@ -233,7 +235,7 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type)
 {
        /*
         * We use handle_level_irq() by default because the pending
-        * interrupt vector (whether modeled by the HV on TILE64 and
+        * interrupt vector (whether modeled by the HV on
         * TILEPro or implemented in hardware on TILE-Gx) has
         * level-style semantics for each bit.  An interrupt fires
         * whenever a bit is high, not just at edges.
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
new file mode 100644 (file)
index 0000000..4cd8838
--- /dev/null
@@ -0,0 +1,499 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * TILE-Gx KGDB support.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+
+static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
+static unsigned long stepped_addr;
+static tile_bundle_bits stepped_instr;
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+       { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
+       { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
+       { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
+       { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
+       { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
+       { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
+       { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
+       { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
+       { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
+       { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
+       { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
+       { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
+       { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
+       { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
+       { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
+       { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
+       { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
+       { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
+       { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
+       { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
+       { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
+       { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
+       { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
+       { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
+       { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
+       { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
+       { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
+       { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
+       { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
+       { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
+       { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
+       { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
+       { "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
+       { "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
+       { "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
+       { "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
+       { "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
+       { "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
+       { "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
+       { "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
+       { "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
+       { "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
+       { "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
+       { "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
+       { "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
+       { "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
+       { "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
+       { "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
+       { "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
+       { "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
+       { "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
+       { "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
+       { "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
+       { "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
+       { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
+       { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
+       { "sn", GDB_SIZEOF_REG, -1},
+       { "idn0", GDB_SIZEOF_REG, -1},
+       { "idn1", GDB_SIZEOF_REG, -1},
+       { "udn0", GDB_SIZEOF_REG, -1},
+       { "udn1", GDB_SIZEOF_REG, -1},
+       { "udn2", GDB_SIZEOF_REG, -1},
+       { "udn3", GDB_SIZEOF_REG, -1},
+       { "zero", GDB_SIZEOF_REG, -1},
+       { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
+       { "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+       if (regno >= DBG_MAX_REG_NUM || regno < 0)
+               return NULL;
+
+       if (dbg_reg_def[regno].offset != -1)
+               memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+                      dbg_reg_def[regno].size);
+       else
+               memset(mem, 0, dbg_reg_def[regno].size);
+       return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+       if (regno >= DBG_MAX_REG_NUM || regno < 0)
+               return -EINVAL;
+
+       if (dbg_reg_def[regno].offset != -1)
+               memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+                      dbg_reg_def[regno].size);
+       return 0;
+}
+
+/*
+ * Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+       int reg;
+       struct pt_regs *thread_regs;
+       unsigned long *ptr = gdb_regs;
+
+       if (task == NULL)
+               return;
+
+       /* Initialize to zero. */
+       memset(gdb_regs, 0, NUMREGBYTES);
+
+       thread_regs = task_pt_regs(task);
+       for (reg = 0; reg <= TREG_LAST_GPR; reg++)
+               *(ptr++) = thread_regs->regs[reg];
+
+       gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
+       gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+       regs->pc = pc;
+}
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+       kgdb_nmicallback(raw_smp_processor_id(), NULL);
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+       local_irq_enable();
+       smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+       local_irq_disable();
+}
+
+/*
+ * Convert a kernel address to the writable kernel text mapping.
+ */
+static unsigned long writable_address(unsigned long addr)
+{
+       unsigned long ret = 0;
+
+       if (core_kernel_text(addr))
+               ret = addr - MEM_SV_START + PAGE_OFFSET;
+       else if (is_module_text_address(addr))
+               ret = addr;
+       else
+               pr_err("Unknown virtual address 0x%lx\n", addr);
+
+       return ret;
+}
+
+/*
+ * Calculate the new address for after a step.
+ */
+static unsigned long get_step_address(struct pt_regs *regs)
+{
+       int src_reg;
+       int jump_off;
+       int br_off;
+       unsigned long addr;
+       unsigned int opcode;
+       tile_bundle_bits bundle;
+
+       /* Move to the next instruction by default. */
+       addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
+       bundle = *(unsigned long *)instruction_pointer(regs);
+
+       /* 0: X mode, Otherwise: Y mode. */
+       if (bundle & TILEGX_BUNDLE_MODE_MASK) {
+               if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
+                   get_RRROpcodeExtension_Y1(bundle) ==
+                   UNARY_RRR_1_OPCODE_Y1) {
+                       opcode = get_UnaryOpcodeExtension_Y1(bundle);
+
+                       switch (opcode) {
+                       case JALR_UNARY_OPCODE_Y1:
+                       case JALRP_UNARY_OPCODE_Y1:
+                       case JR_UNARY_OPCODE_Y1:
+                       case JRP_UNARY_OPCODE_Y1:
+                               src_reg = get_SrcA_Y1(bundle);
+                               dbg_get_reg(src_reg, &addr, regs);
+                               break;
+                       }
+               }
+       } else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
+               if (get_RRROpcodeExtension_X1(bundle) ==
+                   UNARY_RRR_0_OPCODE_X1) {
+                       opcode = get_UnaryOpcodeExtension_X1(bundle);
+
+                       switch (opcode) {
+                       case JALR_UNARY_OPCODE_X1:
+                       case JALRP_UNARY_OPCODE_X1:
+                       case JR_UNARY_OPCODE_X1:
+                       case JRP_UNARY_OPCODE_X1:
+                               src_reg = get_SrcA_X1(bundle);
+                               dbg_get_reg(src_reg, &addr, regs);
+                               break;
+                       }
+               }
+       } else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
+               opcode = get_JumpOpcodeExtension_X1(bundle);
+
+               switch (opcode) {
+               case JAL_JUMP_OPCODE_X1:
+               case J_JUMP_OPCODE_X1:
+                       jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
+                       addr = regs->pc +
+                               (jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
+                       break;
+               }
+       } else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
+               br_off = 0;
+               opcode = get_BrType_X1(bundle);
+
+               switch (opcode) {
+               case BEQZT_BRANCH_OPCODE_X1:
+               case BEQZ_BRANCH_OPCODE_X1:
+                       if (get_SrcA_X1(bundle) == 0)
+                               br_off = get_BrOff_X1(bundle);
+                       break;
+               case BGEZT_BRANCH_OPCODE_X1:
+               case BGEZ_BRANCH_OPCODE_X1:
+                       if (get_SrcA_X1(bundle) >= 0)
+                               br_off = get_BrOff_X1(bundle);
+                       break;
+               case BGTZT_BRANCH_OPCODE_X1:
+               case BGTZ_BRANCH_OPCODE_X1:
+                       if (get_SrcA_X1(bundle) > 0)
+                               br_off = get_BrOff_X1(bundle);
+                       break;
+               case BLBCT_BRANCH_OPCODE_X1:
+               case BLBC_BRANCH_OPCODE_X1:
+                       if (!(get_SrcA_X1(bundle) & 1))
+                               br_off = get_BrOff_X1(bundle);
+                       break;
+               case BLBST_BRANCH_OPCODE_X1:
+               case BLBS_BRANCH_OPCODE_X1:
+                       if (get_SrcA_X1(bundle) & 1)
+                               br_off = get_BrOff_X1(bundle);
+                       break;
+               case BLEZT_BRANCH_OPCODE_X1:
+               case BLEZ_BRANCH_OPCODE_X1:
+                       if (get_SrcA_X1(bundle) <= 0)
+                               br_off = get_BrOff_X1(bundle);
+                       break;
+               case BLTZT_BRANCH_OPCODE_X1:
+               case BLTZ_BRANCH_OPCODE_X1:
+                       if (get_SrcA_X1(bundle) < 0)
+                               br_off = get_BrOff_X1(bundle);
+                       break;
+               case BNEZT_BRANCH_OPCODE_X1:
+               case BNEZ_BRANCH_OPCODE_X1:
+                       if (get_SrcA_X1(bundle) != 0)
+                               br_off = get_BrOff_X1(bundle);
+                       break;
+               }
+
+               if (br_off != 0) {
+                       br_off = sign_extend(br_off, 17);
+                       addr = regs->pc +
+                               (br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
+               }
+       }
+
+       return addr;
+}
+
+/*
+ * Replace the next instruction after the current instruction with a
+ * breakpoint instruction.
+ */
+static void do_single_step(struct pt_regs *regs)
+{
+       unsigned long addr_wr;
+
+       /* Determine where the target instruction will send us to. */
+       stepped_addr = get_step_address(regs);
+       probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
+                         BREAK_INSTR_SIZE);
+
+       addr_wr = writable_address(stepped_addr);
+       probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
+                          BREAK_INSTR_SIZE);
+       smp_wmb();
+       flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
+}
+
+static void undo_single_step(struct pt_regs *regs)
+{
+       unsigned long addr_wr;
+
+       if (stepped_instr == 0)
+               return;
+
+       addr_wr = writable_address(stepped_addr);
+       probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
+                          BREAK_INSTR_SIZE);
+       stepped_instr = 0;
+       smp_wmb();
+       flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger.
+ */
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+       int ret;
+       unsigned long flags;
+       struct die_args *args = (struct die_args *)ptr;
+       struct pt_regs *regs = args->regs;
+
+#ifdef CONFIG_KPROBES
+       /*
+        * Return immediately if the kprobes fault notifier has set
+        * DIE_PAGE_FAULT.
+        */
+       if (cmd == DIE_PAGE_FAULT)
+               return NOTIFY_DONE;
+#endif /* CONFIG_KPROBES */
+
+       switch (cmd) {
+       case DIE_BREAK:
+       case DIE_COMPILED_BPT:
+               break;
+       case DIE_SSTEPBP:
+               local_irq_save(flags);
+               kgdb_handle_exception(0, SIGTRAP, 0, regs);
+               local_irq_restore(flags);
+               return NOTIFY_STOP;
+       default:
+               /* Userspace events, ignore. */
+               if (user_mode(regs))
+                       return NOTIFY_DONE;
+       }
+
+       local_irq_save(flags);
+       ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
+       local_irq_restore(flags);
+       if (ret)
+               return NOTIFY_DONE;
+
+       return NOTIFY_STOP;
+}
+
+static struct notifier_block kgdb_notifier = {
+       .notifier_call = kgdb_notify,
+};
+
+/*
+ * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
+ * @vector: The error vector of the exception that happened.
+ * @signo: The signal number of the exception that happened.
+ * @err_code: The error code of the exception that happened.
+ * @remcom_in_buffer: The buffer of the packet we have read.
+ * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ * @regs: The &struct pt_regs of the current process.
+ *
+ * This function MUST handle the 'c' and 's' command packets,
+ * as well packets to set / remove a hardware breakpoint, if used.
+ * If there are additional packets which the hardware needs to handle,
+ * they are handled here. The code should return -1 if it wants to
+ * process more packets, and a %0 or %1 if it wants to exit from the
+ * kgdb callback.
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+                              char *remcom_in_buffer, char *remcom_out_buffer,
+                              struct pt_regs *regs)
+{
+       char *ptr;
+       unsigned long address;
+
+       /* Undo any stepping we may have done. */
+       undo_single_step(regs);
+
+       switch (remcom_in_buffer[0]) {
+       case 'c':
+       case 's':
+       case 'D':
+       case 'k':
+               /*
+                * Try to read optional parameter, pc unchanged if no parm.
+                * If this was a compiled-in breakpoint, we need to move
+                * to the next instruction or we will just breakpoint
+                * over and over again.
+                */
+               ptr = &remcom_in_buffer[1];
+               if (kgdb_hex2long(&ptr, &address))
+                       regs->pc = address;
+               else if (*(unsigned long *)regs->pc == compiled_bpt)
+                       regs->pc += BREAK_INSTR_SIZE;
+
+               if (remcom_in_buffer[0] == 's') {
+                       do_single_step(regs);
+                       kgdb_single_step = 1;
+                       atomic_set(&kgdb_cpu_doing_single_step,
+                                  raw_smp_processor_id());
+               } else
+                       atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+               return 0;
+       }
+
+       return -1; /* this means that we do not want to exit from the handler */
+}
+
+struct kgdb_arch arch_kgdb_ops;
+
+/*
+ * kgdb_arch_init - Perform any architecture specific initalization.
+ *
+ * This function will handle the initalization of any architecture
+ * specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+       tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
+
+       memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
+       return register_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+       unregister_die_notifier(&kgdb_notifier);
+}
+
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+       int err;
+       unsigned long addr_wr = writable_address(bpt->bpt_addr);
+
+       if (addr_wr == 0)
+               return -1;
+
+       err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+                               BREAK_INSTR_SIZE);
+       if (err)
+               return err;
+
+       err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
+                                BREAK_INSTR_SIZE);
+       smp_wmb();
+       flush_icache_range((unsigned long)bpt->bpt_addr,
+                          (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
+       return err;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+       int err;
+       unsigned long addr_wr = writable_address(bpt->bpt_addr);
+
+       if (addr_wr == 0)
+               return -1;
+
+       err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
+                                BREAK_INSTR_SIZE);
+       smp_wmb();
+       flush_icache_range((unsigned long)bpt->bpt_addr,
+                          (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
+       return err;
+}
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
new file mode 100644 (file)
index 0000000..1129f52
--- /dev/null
@@ -0,0 +1,528 @@
+/*
+ * arch/tile/kernel/kprobes.c
+ * Kprobes on TILE-Gx
+ *
+ * Some portions copied from the MIPS version.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ *
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#include <arch/opcode.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE;
+tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
+
+/*
+ * Check whether instruction is branch or jump, or if executing it
+ * has different results depending on where it is executed (e.g. lnk).
+ */
+static int __kprobes insn_has_control(kprobe_opcode_t insn)
+{
+       if (get_Mode(insn) != 0) {   /* Y-format bundle */
+               if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 ||
+                   get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1)
+                       return 0;
+
+               switch (get_UnaryOpcodeExtension_Y1(insn)) {
+               case JALRP_UNARY_OPCODE_Y1:
+               case JALR_UNARY_OPCODE_Y1:
+               case JRP_UNARY_OPCODE_Y1:
+               case JR_UNARY_OPCODE_Y1:
+               case LNK_UNARY_OPCODE_Y1:
+                       return 1;
+               default:
+                       return 0;
+               }
+       }
+
+       switch (get_Opcode_X1(insn)) {
+       case BRANCH_OPCODE_X1:  /* branch instructions */
+       case JUMP_OPCODE_X1:    /* jump instructions: j and jal */
+               return 1;
+
+       case RRR_0_OPCODE_X1:   /* other jump instructions */
+               if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1)
+                       return 0;
+               switch (get_UnaryOpcodeExtension_X1(insn)) {
+               case JALRP_UNARY_OPCODE_X1:
+               case JALR_UNARY_OPCODE_X1:
+               case JRP_UNARY_OPCODE_X1:
+               case JR_UNARY_OPCODE_X1:
+               case LNK_UNARY_OPCODE_X1:
+                       return 1;
+               default:
+                       return 0;
+               }
+       default:
+               return 0;
+       }
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+       unsigned long addr = (unsigned long)p->addr;
+
+       if (addr & (sizeof(kprobe_opcode_t) - 1))
+               return -EINVAL;
+
+       if (insn_has_control(*p->addr)) {
+               pr_notice("Kprobes for control instructions are not "
+                         "supported\n");
+               return -EINVAL;
+       }
+
+       /* insn: must be on special executable page on tile. */
+       p->ainsn.insn = get_insn_slot();
+       if (!p->ainsn.insn)
+               return -ENOMEM;
+
+       /*
+        * In the kprobe->ainsn.insn[] array we store the original
+        * instruction at index zero and a break trap instruction at
+        * index one.
+        */
+       memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
+       p->ainsn.insn[1] = breakpoint2_insn;
+       p->opcode = *p->addr;
+
+       return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+       unsigned long addr_wr;
+
+       /* Operate on writable kernel text mapping. */
+       addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET;
+
+       if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
+               sizeof(breakpoint_insn)))
+               pr_err("%s: failed to enable kprobe\n", __func__);
+
+       smp_wmb();
+       flush_insn_slot(p);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *kp)
+{
+       unsigned long addr_wr;
+
+       /* Operate on writable kernel text mapping. */
+       addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET;
+
+       if (probe_kernel_write((void *)addr_wr, &kp->opcode,
+               sizeof(kp->opcode)))
+               pr_err("%s: failed to enable kprobe\n", __func__);
+
+       smp_wmb();
+       flush_insn_slot(kp);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+       if (p->ainsn.insn) {
+               free_insn_slot(p->ainsn.insn, 0);
+               p->ainsn.insn = NULL;
+       }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+       kcb->prev_kprobe.kp = kprobe_running();
+       kcb->prev_kprobe.status = kcb->kprobe_status;
+       kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+       kcb->kprobe_status = kcb->prev_kprobe.status;
+       kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+                       struct kprobe_ctlblk *kcb)
+{
+       __get_cpu_var(current_kprobe) = p;
+       kcb->kprobe_saved_pc = regs->pc;
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+       /* Single step inline if the instruction is a break. */
+       if (p->opcode == breakpoint_insn ||
+           p->opcode == breakpoint2_insn)
+               regs->pc = (unsigned long)p->addr;
+       else
+               regs->pc = (unsigned long)&p->ainsn.insn[0];
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+       struct kprobe *p;
+       int ret = 0;
+       kprobe_opcode_t *addr;
+       struct kprobe_ctlblk *kcb;
+
+       addr = (kprobe_opcode_t *)regs->pc;
+
+       /*
+        * We don't want to be preempted for the entire
+        * duration of kprobe processing.
+        */
+       preempt_disable();
+       kcb = get_kprobe_ctlblk();
+
+       /* Check we're not actually recursing. */
+       if (kprobe_running()) {
+               p = get_kprobe(addr);
+               if (p) {
+                       if (kcb->kprobe_status == KPROBE_HIT_SS &&
+                           p->ainsn.insn[0] == breakpoint_insn) {
+                               goto no_kprobe;
+                       }
+                       /*
+                        * We have reentered the kprobe_handler(), since
+                        * another probe was hit while within the handler.
+                        * We here save the original kprobes variables and
+                        * just single step on the instruction of the new probe
+                        * without calling any user handlers.
+                        */
+                       save_previous_kprobe(kcb);
+                       set_current_kprobe(p, regs, kcb);
+                       kprobes_inc_nmissed_count(p);
+                       prepare_singlestep(p, regs);
+                       kcb->kprobe_status = KPROBE_REENTER;
+                       return 1;
+               } else {
+                       if (*addr != breakpoint_insn) {
+                               /*
+                                * The breakpoint instruction was removed by
+                                * another cpu right after we hit, no further
+                                * handling of this interrupt is appropriate.
+                                */
+                               ret = 1;
+                               goto no_kprobe;
+                       }
+                       p = __get_cpu_var(current_kprobe);
+                       if (p->break_handler && p->break_handler(p, regs))
+                               goto ss_probe;
+               }
+               goto no_kprobe;
+       }
+
+       p = get_kprobe(addr);
+       if (!p) {
+               if (*addr != breakpoint_insn) {
+                       /*
+                        * The breakpoint instruction was removed right
+                        * after we hit it.  Another cpu has removed
+                        * either a probepoint or a debugger breakpoint
+                        * at this address.  In either case, no further
+                        * handling of this interrupt is appropriate.
+                        */
+                       ret = 1;
+               }
+               /* Not one of ours: let kernel handle it. */
+               goto no_kprobe;
+       }
+
+       set_current_kprobe(p, regs, kcb);
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+       if (p->pre_handler && p->pre_handler(p, regs)) {
+               /* Handler has already set things up, so skip ss setup. */
+               return 1;
+       }
+
+ss_probe:
+       prepare_singlestep(p, regs);
+       kcb->kprobe_status = KPROBE_HIT_SS;
+       return 1;
+
+no_kprobe:
+       preempt_enable_no_resched();
+       return ret;
+}
+
+/*
+ * Called after single-stepping.  p->addr is the address of the
+ * instruction that has been replaced by the breakpoint. To avoid the
+ * SMP problems that can occur when we temporarily put back the
+ * original opcode to single-step, we single-stepped a copy of the
+ * instruction.  The address of this copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+                                      struct pt_regs *regs,
+                                      struct kprobe_ctlblk *kcb)
+{
+       unsigned long orig_pc = kcb->kprobe_saved_pc;
+       regs->pc = orig_pc + 8;
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+       struct kprobe *cur = kprobe_running();
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       if (!cur)
+               return 0;
+
+       if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+               cur->post_handler(cur, regs, 0);
+       }
+
+       resume_execution(cur, regs, kcb);
+
+       /* Restore back the original saved kprobes variables and continue. */
+       if (kcb->kprobe_status == KPROBE_REENTER) {
+               restore_previous_kprobe(kcb);
+               goto out;
+       }
+       reset_current_kprobe();
+out:
+       preempt_enable_no_resched();
+
+       return 1;
+}
+
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+       struct kprobe *cur = kprobe_running();
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+               return 1;
+
+       if (kcb->kprobe_status & KPROBE_HIT_SS) {
+               /*
+                * We are here because the instruction being single
+                * stepped caused a page fault. We reset the current
+                * kprobe and the ip points back to the probe address
+                * and allow the page fault handler to continue as a
+                * normal page fault.
+                */
+               resume_execution(cur, regs, kcb);
+               reset_current_kprobe();
+               preempt_enable_no_resched();
+       }
+       return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+                                      unsigned long val, void *data)
+{
+       struct die_args *args = (struct die_args *)data;
+       int ret = NOTIFY_DONE;
+
+       switch (val) {
+       case DIE_BREAK:
+               if (kprobe_handler(args->regs))
+                       ret = NOTIFY_STOP;
+               break;
+       case DIE_SSTEPBP:
+               if (post_kprobe_handler(args->regs))
+                       ret = NOTIFY_STOP;
+               break;
+       case DIE_PAGE_FAULT:
+               /* kprobe_running() needs smp_processor_id(). */
+               preempt_disable();
+
+               if (kprobe_running()
+                   && kprobe_fault_handler(args->regs, args->trapnr))
+                       ret = NOTIFY_STOP;
+               preempt_enable();
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct jprobe *jp = container_of(p, struct jprobe, kp);
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       kcb->jprobe_saved_regs = *regs;
+       kcb->jprobe_saved_sp = regs->sp;
+
+       memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
+              MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+
+       regs->pc = (unsigned long)(jp->entry);
+
+       return 1;
+}
+
+/* Defined in the inline asm below. */
+void jprobe_return_end(void);
+
+void __kprobes jprobe_return(void)
+{
+       asm volatile(
+               "bpt\n\t"
+               ".globl jprobe_return_end\n"
+               "jprobe_return_end:\n");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+       if (regs->pc >= (unsigned long)jprobe_return &&
+           regs->pc <= (unsigned long)jprobe_return_end) {
+               *regs = kcb->jprobe_saved_regs;
+               memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
+                      MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+               preempt_enable_no_resched();
+
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe causes the
+ *   handlers to fire
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+       asm volatile(
+               "nop\n\t"
+               ".global kretprobe_trampoline\n"
+               "kretprobe_trampoline:\n\t"
+               "nop\n\t"
+               : : : "memory");
+}
+
+void kretprobe_trampoline(void);
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+                                     struct pt_regs *regs)
+{
+       ri->ret_addr = (kprobe_opcode_t *) regs->lr;
+
+       /* Replace the return addr with trampoline addr */
+       regs->lr = (unsigned long)kretprobe_trampoline;
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit.
+ */
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+                                               struct pt_regs *regs)
+{
+       struct kretprobe_instance *ri = NULL;
+       struct hlist_head *head, empty_rp;
+       struct hlist_node *tmp;
+       unsigned long flags, orig_ret_address = 0;
+       unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
+
+       INIT_HLIST_HEAD(&empty_rp);
+       kretprobe_hash_lock(current, &head, &flags);
+
+       /*
+        * It is possible to have multiple instances associated with a given
+        * task either because multiple functions in the call path have
+        * a return probe installed on them, and/or more than one return
+        * return probe was registered for a target function.
+        *
+        * We can handle this because:
+        *     - instances are always inserted at the head of the list
+        *     - when multiple return probes are registered for the same
+        *       function, the first instance's ret_addr will point to the
+        *       real return address, and all the rest will point to
+        *       kretprobe_trampoline
+        */
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+
+               if (ri->rp && ri->rp->handler)
+                       ri->rp->handler(ri, regs);
+
+               orig_ret_address = (unsigned long)ri->ret_addr;
+               recycle_rp_inst(ri, &empty_rp);
+
+               if (orig_ret_address != trampoline_address) {
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+               }
+       }
+
+       kretprobe_assert(ri, orig_ret_address, trampoline_address);
+       instruction_pointer(regs) = orig_ret_address;
+
+       reset_current_kprobe();
+       kretprobe_hash_unlock(current, &flags);
+       preempt_enable_no_resched();
+
+       hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+               hlist_del(&ri->hlist);
+               kfree(ri);
+       }
+       /*
+        * By returning a non-zero value, we are telling
+        * kprobe_handler() that we don't want the post_handler
+        * to run (and have re-enabled preemption)
+        */
+       return 1;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+       if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+               return 1;
+
+       return 0;
+}
+
+static struct kprobe trampoline_p = {
+       .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+       .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+       register_kprobe(&trampoline_p);
+       return 0;
+}
diff --git a/arch/tile/kernel/kvm_virtio.c b/arch/tile/kernel/kvm_virtio.c
new file mode 100644 (file)
index 0000000..c6b6c6a
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+/* Referred lguest & s390 implemenation */
+/*
+ * kvm_virtio.c - virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/bootmem.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/export.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_console.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_pci.h>
+
+#include <linux/kvm_para.h>
+#include <asm/kvm_virtio.h>
+
+static void *kvm_devices;
+
+/*
+ * TODO: We actually does not use PCI virtio here. We use this
+ * because qemu: virtqueue_init() uses VIRTIO_PCI_VRING_ALIGN.
+ * Maybe we should change them to generic definitions in both qemu & Linux.
+ * Besides, Let's check whether the alignment value (4096, i.e. default
+ * x86 page size) affects performance later.
+ */
+#define KVM_TILE_VIRTIO_RING_ALIGN     VIRTIO_PCI_VRING_ALIGN
+#define to_kvmdev(vd)  container_of(vd, struct kvm_device, vdev)
+
+/*
+ * memory layout: (Total: PAGE_SIZE)
+ * <device 0>
+ * - kvm device descriptor
+ *        struct kvm_device_desc
+ * - vqueue configuration (totally desc->num_vq)
+ *        struct kvm_vqconfig
+ *        ......
+ *        struct kvm_vqconfig
+ * - feature bits (size: desc->feature_len * 2)
+ * - config space (size: desc->config_len)
+ * <device 1>
+ * ......
+ */
+static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
+{
+       return (struct kvm_vqconfig *)(desc + 1);
+}
+
+static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
+{
+       return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
+}
+
+static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
+{
+       return kvm_vq_features(desc) + desc->feature_len * 2;
+}
+
+/*
+ * The total size of the config page used by this device (incl. desc)
+ */
+static unsigned desc_size(const struct kvm_device_desc *desc)
+{
+       return sizeof(*desc)
+               + desc->num_vq * sizeof(struct kvm_vqconfig)
+               + desc->feature_len * 2
+               + desc->config_len;
+}
+
+/* This gets the device's feature bits. */
+static u32 kvm_get_features(struct virtio_device *vdev)
+{
+       unsigned int i;
+       u32 features = 0;
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+       u8 *in_features = kvm_vq_features(desc);
+
+       for (i = 0; i < min(desc->feature_len * 8, 32); i++)
+               if (in_features[i / 8] & (1 << (i % 8)))
+                       features |= (1 << i);
+       return features;
+}
+
+static void kvm_finalize_features(struct virtio_device *vdev)
+{
+       unsigned int i, bits;
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+       /* Second half of bitmap is features we accept. */
+       u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
+
+       /* Give virtio_ring a chance to accept features. */
+       vring_transport_features(vdev);
+
+       memset(out_features, 0, desc->feature_len);
+       bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
+       for (i = 0; i < bits; i++) {
+               if (test_bit(i, vdev->features))
+                       out_features[i / 8] |= (1 << (i % 8));
+       }
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void kvm_get(struct virtio_device *vdev, unsigned int offset,
+                  void *buf, unsigned len)
+{
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+       BUG_ON(offset + len > desc->config_len);
+       memcpy(buf, kvm_vq_configspace(desc) + offset, len);
+}
+
+static void kvm_set(struct virtio_device *vdev, unsigned int offset,
+                  const void *buf, unsigned len)
+{
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+       BUG_ON(offset + len > desc->config_len);
+       memcpy(kvm_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access
+ * the status field of the device descriptor. set_status will also
+ * make a hypercall to the host, to tell about status changes
+ */
+static u8 kvm_get_status(struct virtio_device *vdev)
+{
+       return to_kvmdev(vdev)->desc->status;
+}
+
+static void kvm_set_status(struct virtio_device *vdev, u8 status)
+{
+       BUG_ON(!status);
+       to_kvmdev(vdev)->desc->status = status;
+       hcall_virtio(KVM_VIRTIO_SET_STATUS, to_kvmdev(vdev)->desc_pa);
+}
+
+/*
+ * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
+ * descriptor address. The Host will zero the status and all the
+ * features.
+ */
+static void kvm_reset(struct virtio_device *vdev)
+{
+       hcall_virtio(KVM_VIRTIO_RESET, to_kvmdev(vdev)->desc_pa);
+}
+
+/*
+ * When the virtio_ring code wants to notify the Host, it calls us here and we
+ * make a hypercall.  We hand the address  of the virtqueue so the Host
+ * knows which virtqueue we're talking about.
+ */
+static void kvm_notify(struct virtqueue *vq)
+{
+       struct kvm_vqinfo *vqi = vq->priv;
+
+       hcall_virtio(KVM_VIRTIO_NOTIFY, vqi->config->pa);
+}
+
+/*
+ * Must set some caching mode to keep set_pte() happy.
+ * It doesn't matter what we choose, because the PFN
+ * is illegal, so we're going to take a page fault anyway.
+ */
+static inline pgprot_t io_prot(void)
+{
+       return hv_pte_set_mode(PAGE_KERNEL, HV_PTE_MODE_UNCACHED);
+}
+
+/*
+ * This routine finds the first virtqueue described in the configuration of
+ * this device and sets it up.
+ */
+static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
+                                    unsigned index,
+                                    void (*callback)(struct virtqueue *vq),
+                                    const char *name)
+{
+       struct kvm_device *kdev = to_kvmdev(vdev);
+       struct kvm_vqinfo *vqi;
+       struct kvm_vqconfig *config;
+       struct virtqueue *vq;
+       long irq;
+       int err = -EINVAL;
+
+       if (index >= kdev->desc->num_vq)
+               return ERR_PTR(-ENOENT);
+
+       vqi = kzalloc(sizeof(*vqi), GFP_KERNEL);
+       if (!vqi)
+               return ERR_PTR(-ENOMEM);
+
+       config = kvm_vq_config(kdev->desc)+index;
+
+       vqi->config = config;
+       vqi->pages = generic_remap_prot(config->pa,
+                               vring_size(config->num,
+                                       KVM_TILE_VIRTIO_RING_ALIGN),
+                                       0, io_prot());
+       if (!vqi->pages) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       vq = vring_new_virtqueue(index, config->num, KVM_TILE_VIRTIO_RING_ALIGN,
+                                vdev, 0, vqi->pages,
+                                kvm_notify, callback, name);
+       if (!vq) {
+               err = -ENOMEM;
+               goto unmap;
+       }
+
+       /*
+        * Trigger the IPI interrupt in SW way.
+        * TODO: We do not need to create one irq for each vq. A bit wasteful.
+        */
+       irq = create_irq();
+       if (irq < 0) {
+               err = -ENXIO;
+               goto del_virtqueue;
+       }
+
+       tile_irq_activate(irq, TILE_IRQ_SW_CLEAR);
+
+       if (request_irq(irq, vring_interrupt, 0, dev_name(&vdev->dev), vq)) {
+               err = -ENXIO;
+               destroy_irq(irq);
+               goto del_virtqueue;
+       }
+
+       config->irq = irq;
+
+       vq->priv = vqi;
+       return vq;
+
+del_virtqueue:
+       vring_del_virtqueue(vq);
+unmap:
+       vunmap(vqi->pages);
+out:
+       return ERR_PTR(err);
+}
+
+static void kvm_del_vq(struct virtqueue *vq)
+{
+       struct kvm_vqinfo *vqi = vq->priv;
+
+       vring_del_virtqueue(vq);
+       vunmap(vqi->pages);
+       kfree(vqi);
+}
+
+static void kvm_del_vqs(struct virtio_device *vdev)
+{
+       struct virtqueue *vq, *n;
+
+       list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+               kvm_del_vq(vq);
+}
+
+static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+                       struct virtqueue *vqs[],
+                       vq_callback_t *callbacks[],
+                       const char *names[])
+{
+       struct kvm_device *kdev = to_kvmdev(vdev);
+       int i;
+
+       /* We must have this many virtqueues. */
+       if (nvqs > kdev->desc->num_vq)
+               return -ENOENT;
+
+       for (i = 0; i < nvqs; ++i) {
+               vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]);
+               if (IS_ERR(vqs[i]))
+                       goto error;
+       }
+       return 0;
+
+error:
+       kvm_del_vqs(vdev);
+       return PTR_ERR(vqs[i]);
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops kvm_vq_config_ops = {
+       .get_features = kvm_get_features,
+       .finalize_features = kvm_finalize_features,
+       .get = kvm_get,
+       .set = kvm_set,
+       .get_status = kvm_get_status,
+       .set_status = kvm_set_status,
+       .reset = kvm_reset,
+       .find_vqs = kvm_find_vqs,
+       .del_vqs = kvm_del_vqs,
+};
+
+/*
+ * The root device for the kvm virtio devices.
+ * This makes them appear as /sys/devices/kvm_tile/0,1,2 not /sys/devices/0,1,2.
+ */
+static struct device *kvm_root;
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
+{
+       struct kvm_device *kdev;
+
+       kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+       if (!kdev) {
+               pr_emerg("Cannot allocate kvm dev %u type %u\n",
+                        offset, d->type);
+               return;
+       }
+
+       kdev->vdev.dev.parent = kvm_root;
+       kdev->vdev.id.device = d->type;
+       kdev->vdev.config = &kvm_vq_config_ops;
+       kdev->desc = d;
+       kdev->desc_pa = PFN_PHYS(max_pfn) + offset;
+
+       if (register_virtio_device(&kdev->vdev) != 0) {
+               pr_err("Failed to register kvm device %u type %u\n",
+                      offset, d->type);
+               kfree(kdev);
+       }
+}
+
+/*
+ * scan_devices() simply iterates through the device page.
+ * The type 0 is reserved to mean "end of devices".
+ */
+static void scan_devices(void)
+{
+       unsigned int i;
+       struct kvm_device_desc *d;
+
+       for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
+               d = kvm_devices + i;
+
+               if (d->type == 0)
+                       break;
+
+               add_kvm_device(d, i);
+       }
+}
+
+/*
+ * Init function for virtio.
+ * devices are in a single page above the top of "normal" mem.
+ */
+static int __init kvm_devices_init(void)
+{
+       int rc = -ENOMEM;
+
+       kvm_root = root_device_register("kvm_tile");
+       if (IS_ERR(kvm_root)) {
+               rc = PTR_ERR(kvm_root);
+               pr_err("Could not register kvm_tile root device");
+               return rc;
+       }
+
+       kvm_devices = generic_remap_prot(PFN_PHYS(max_pfn), PAGE_SIZE,
+                                        0, io_prot());
+       if (!kvm_devices) {
+               kvm_devices = NULL;
+               root_device_unregister(kvm_root);
+               return rc;
+       }
+
+       scan_devices();
+       return 0;
+}
+
+/* code for early console output with virtio_console */
+static __init int early_put_chars(u32 vtermno, const char *buf, int len)
+{
+       char scratch[512];
+
+       if (len > sizeof(scratch) - 1)
+               len = sizeof(scratch) - 1;
+       scratch[len] = '\0';
+       memcpy(scratch, buf, len);
+       hcall_virtio(KVM_VIRTIO_NOTIFY, __pa(scratch));
+
+       return len;
+}
+
+static int __init tile_virtio_console_init(void)
+{
+       return virtio_cons_early_init(early_put_chars);
+}
+console_initcall(tile_virtio_console_init);
+
+/*
+ * We do this after core stuff, but before the drivers.
+ */
+postcore_initcall(kvm_devices_init);
diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S
new file mode 100644 (file)
index 0000000..70d7bb0
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * TILE-Gx specific __mcount support
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+#define REGSIZE 8
+
+       .text
+       .global __mcount
+
+       .macro  MCOUNT_SAVE_REGS
+       addli   sp, sp, -REGSIZE
+       {
+        st     sp, lr
+        addli  r29, sp, - (12 * REGSIZE)
+       }
+       {
+        addli  sp, sp, - (13 * REGSIZE)
+        st     r29, sp
+       }
+       addli   r29, r29, REGSIZE
+       { st    r29, r0; addli  r29, r29, REGSIZE }
+       { st    r29, r1; addli  r29, r29, REGSIZE }
+       { st    r29, r2; addli  r29, r29, REGSIZE }
+       { st    r29, r3; addli  r29, r29, REGSIZE }
+       { st    r29, r4; addli  r29, r29, REGSIZE }
+       { st    r29, r5; addli  r29, r29, REGSIZE }
+       { st    r29, r6; addli  r29, r29, REGSIZE }
+       { st    r29, r7; addli  r29, r29, REGSIZE }
+       { st    r29, r8; addli  r29, r29, REGSIZE }
+       { st    r29, r9; addli  r29, r29, REGSIZE }
+       { st    r29, r10; addli r29, r29, REGSIZE }
+       .endm
+
+       .macro  MCOUNT_RESTORE_REGS
+       addli   r29, sp, (2 * REGSIZE)
+       { ld    r0, r29; addli  r29, r29, REGSIZE }
+       { ld    r1, r29; addli  r29, r29, REGSIZE }
+       { ld    r2, r29; addli  r29, r29, REGSIZE }
+       { ld    r3, r29; addli  r29, r29, REGSIZE }
+       { ld    r4, r29; addli  r29, r29, REGSIZE }
+       { ld    r5, r29; addli  r29, r29, REGSIZE }
+       { ld    r6, r29; addli  r29, r29, REGSIZE }
+       { ld    r7, r29; addli  r29, r29, REGSIZE }
+       { ld    r8, r29; addli  r29, r29, REGSIZE }
+       { ld    r9, r29; addli  r29, r29, REGSIZE }
+       { ld    r10, r29; addli lr, sp, (13 * REGSIZE) }
+       { ld    lr, lr;  addli  sp, sp, (14 * REGSIZE) }
+       .endm
+
+       .macro  RETURN_BACK
+       { move  r12, lr; move   lr, r10 }
+       jrp     r12
+       .endm
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+       .align  64
+STD_ENTRY(__mcount)
+__mcount:
+       j       ftrace_stub
+STD_ENDPROC(__mcount)
+
+       .align  64
+STD_ENTRY(ftrace_caller)
+       moveli  r11, hw2_last(function_trace_stop)
+       { shl16insli    r11, r11, hw1(function_trace_stop); move r12, lr }
+       { shl16insli    r11, r11, hw0(function_trace_stop); move lr, r10 }
+       ld      r11, r11
+       beqz    r11, 1f
+       jrp     r12
+
+1:
+       { move  r10, lr; move   lr, r12 }
+       MCOUNT_SAVE_REGS
+
+       /* arg1: self return address */
+       /* arg2: parent's return address */
+       { move  r0, lr; move    r1, r10 }
+
+       .global ftrace_call
+ftrace_call:
+       /*
+        * a placeholder for the call to a real tracing function, i.e.
+        * ftrace_trace_function()
+        */
+       nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .global ftrace_graph_call
+ftrace_graph_call:
+       /*
+        * a placeholder for the call to a real tracing function, i.e.
+        * ftrace_graph_caller()
+        */
+       nop
+#endif
+       MCOUNT_RESTORE_REGS
+       .global ftrace_stub
+ftrace_stub:
+       RETURN_BACK
+STD_ENDPROC(ftrace_caller)
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+       .align  64
+STD_ENTRY(__mcount)
+       moveli  r11, hw2_last(function_trace_stop)
+       { shl16insli    r11, r11, hw1(function_trace_stop); move r12, lr }
+       { shl16insli    r11, r11, hw0(function_trace_stop); move lr, r10 }
+       ld      r11, r11
+       beqz    r11, 1f
+       jrp     r12
+
+1:
+       { move  r10, lr; move   lr, r12 }
+       {
+        moveli r11, hw2_last(ftrace_trace_function)
+        moveli r13, hw2_last(ftrace_stub)
+       }
+       {
+        shl16insli     r11, r11, hw1(ftrace_trace_function)
+        shl16insli     r13, r13, hw1(ftrace_stub)
+       }
+       {
+        shl16insli     r11, r11, hw0(ftrace_trace_function)
+        shl16insli     r13, r13, hw0(ftrace_stub)
+       }
+
+       ld      r11, r11
+       sub     r14, r13, r11
+       bnez    r14, static_trace
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       moveli  r15, hw2_last(ftrace_graph_return)
+       shl16insli      r15, r15, hw1(ftrace_graph_return)
+       shl16insli      r15, r15, hw0(ftrace_graph_return)
+       ld      r15, r15
+       sub     r15, r15, r13
+       bnez    r15, ftrace_graph_caller
+
+       {
+        moveli r16, hw2_last(ftrace_graph_entry)
+        moveli r17, hw2_last(ftrace_graph_entry_stub)
+       }
+       {
+        shl16insli     r16, r16, hw1(ftrace_graph_entry)
+        shl16insli     r17, r17, hw1(ftrace_graph_entry_stub)
+       }
+       {
+        shl16insli     r16, r16, hw0(ftrace_graph_entry)
+        shl16insli     r17, r17, hw0(ftrace_graph_entry_stub)
+       }
+       ld      r16, r16
+       sub     r17, r16, r17
+       bnez    r17, ftrace_graph_caller
+
+#endif
+       RETURN_BACK
+
+static_trace:
+       MCOUNT_SAVE_REGS
+
+       /* arg1: self return address */
+       /* arg2: parent's return address */
+       { move  r0, lr; move    r1, r10 }
+
+       /* call ftrace_trace_function() */
+       jalr    r11
+
+       MCOUNT_RESTORE_REGS
+
+       .global ftrace_stub
+ftrace_stub:
+       RETURN_BACK
+STD_ENDPROC(__mcount)
+
+#endif /* ! CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+STD_ENTRY(ftrace_graph_caller)
+ftrace_graph_caller:
+#ifndef CONFIG_DYNAMIC_FTRACE
+       MCOUNT_SAVE_REGS
+#endif
+
+       /* arg1: Get the location of the parent's return address */
+       addi    r0, sp, 12 * REGSIZE
+       /* arg2: Get self return address */
+       move    r1, lr
+
+       jal prepare_ftrace_return
+
+       MCOUNT_RESTORE_REGS
+       RETURN_BACK
+STD_ENDPROC(ftrace_graph_caller)
+
+       .global return_to_handler
+return_to_handler:
+       MCOUNT_SAVE_REGS
+
+       jal     ftrace_return_to_handler
+       /* restore the real parent address */
+       move    r11, r0
+
+       MCOUNT_RESTORE_REGS
+       jr      r11
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index b9fe80ec108989fa341060542816d9760fd62d63..d94f4872e94f36c3989768ee4731d74609f7e044 100644 (file)
@@ -36,8 +36,9 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
                                     dma_addr_t *dma_handle, gfp_t gfp,
                                     struct dma_attrs *attrs)
 {
-       u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
-       int node = dev_to_node(dev);
+       u64 dma_mask = (dev && dev->coherent_dma_mask) ?
+               dev->coherent_dma_mask : DMA_BIT_MASK(32);
+       int node = dev ? dev_to_node(dev) : 0;
        int order = get_order(size);
        struct page *pg;
        dma_addr_t addr;
@@ -256,7 +257,7 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
        BUG_ON(!valid_dma_direction(direction));
 
        __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
-                           dma_address & PAGE_OFFSET, size, direction);
+                           dma_address & (PAGE_SIZE - 1), size, direction);
 }
 
 static void tile_dma_sync_single_for_cpu(struct device *dev,
@@ -357,7 +358,7 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
 
        addr = page_to_phys(pg);
 
-       *dma_handle = phys_to_dma(dev, addr);
+       *dma_handle = addr + get_dma_offset(dev);
 
        return page_address(pg);
 }
@@ -387,7 +388,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
                sg->dma_address = sg_phys(sg);
                __dma_prep_pa_range(sg->dma_address, sg->length, direction);
 
-               sg->dma_address = phys_to_dma(dev, sg->dma_address);
+               sg->dma_address = sg->dma_address + get_dma_offset(dev);
 #ifdef CONFIG_NEED_SG_DMA_LENGTH
                sg->dma_length = sg->length;
 #endif
@@ -422,7 +423,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
        BUG_ON(offset + size > PAGE_SIZE);
        __dma_prep_page(page, offset, size, direction);
 
-       return phys_to_dma(dev, page_to_pa(page) + offset);
+       return page_to_pa(page) + offset + get_dma_offset(dev);
 }
 
 static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
@@ -432,10 +433,10 @@ static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
 {
        BUG_ON(!valid_dma_direction(direction));
 
-       dma_address = dma_to_phys(dev, dma_address);
+       dma_address -= get_dma_offset(dev);
 
        __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
-                           dma_address & PAGE_OFFSET, size, direction);
+                           dma_address & (PAGE_SIZE - 1), size, direction);
 }
 
 static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
@@ -445,7 +446,7 @@ static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
 {
        BUG_ON(!valid_dma_direction(direction));
 
-       dma_handle = dma_to_phys(dev, dma_handle);
+       dma_handle -= get_dma_offset(dev);
 
        __dma_complete_pa_range(dma_handle, size, direction);
 }
@@ -456,7 +457,7 @@ static void tile_pci_dma_sync_single_for_device(struct device *dev,
                                                enum dma_data_direction
                                                direction)
 {
-       dma_handle = dma_to_phys(dev, dma_handle);
+       dma_handle -= get_dma_offset(dev);
 
        __dma_prep_pa_range(dma_handle, size, direction);
 }
@@ -558,21 +559,43 @@ static struct dma_map_ops pci_swiotlb_dma_ops = {
        .mapping_error = swiotlb_dma_mapping_error,
 };
 
+static struct dma_map_ops pci_hybrid_dma_ops = {
+       .alloc = tile_swiotlb_alloc_coherent,
+       .free = tile_swiotlb_free_coherent,
+       .map_page = tile_pci_dma_map_page,
+       .unmap_page = tile_pci_dma_unmap_page,
+       .map_sg = tile_pci_dma_map_sg,
+       .unmap_sg = tile_pci_dma_unmap_sg,
+       .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
+       .sync_single_for_device = tile_pci_dma_sync_single_for_device,
+       .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
+       .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
+       .mapping_error = tile_pci_dma_mapping_error,
+       .dma_supported = tile_pci_dma_supported
+};
+
 struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
+struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
 #else
 struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
 #endif
 EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
+EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
 
 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
 int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
        struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
-       /* Handle legacy PCI devices with limited memory addressability. */
-       if (((dma_ops == gx_pci_dma_map_ops) ||
-           (dma_ops == gx_legacy_pci_dma_map_ops)) &&
+       /* Handle hybrid PCI devices with limited memory addressability. */
+       if ((dma_ops == gx_pci_dma_map_ops ||
+            dma_ops == gx_hybrid_pci_dma_map_ops ||
+            dma_ops == gx_legacy_pci_dma_map_ops) &&
            (mask <= DMA_BIT_MASK(32))) {
+               if (dma_ops == gx_pci_dma_map_ops)
+                       set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
+
                if (mask > dev->archdata.max_direct_dma_addr)
                        mask = dev->archdata.max_direct_dma_addr;
        }
@@ -584,3 +607,21 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
 }
 EXPORT_SYMBOL(dma_set_coherent_mask);
 #endif
+
+#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
+/*
+ * The generic dma_get_required_mask() uses the highest physical address
+ * (max_pfn) to provide the hint to the PCI drivers regarding 32-bit or
+ * 64-bit DMA configuration. Since TILEGx has I/O TLB/MMU, allowing the
+ * DMAs to use the full 64-bit PCI address space and not limited by
+ * the physical memory space, we always let the PCI devices use
+ * 64-bit DMA if they have that capability, by returning the 64-bit
+ * DMA mask here. The device driver has the option to use 32-bit DMA if
+ * the device is not capable of 64-bit DMA.
+ */
+u64 dma_get_required_mask(struct device *dev)
+{
+       return DMA_BIT_MASK(64);
+}
+EXPORT_SYMBOL_GPL(dma_get_required_mask);
+#endif
index 67237d34c2e2ad7f8b3b1c43f3f28bfe4d6c1acc..b7180e6e900da624e14758138037fd103009881f 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/capability.h>
 #include <linux/sched.h>
 #include <linux/errno.h>
-#include <linux/bootmem.h>
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
@@ -52,6 +51,8 @@
  *
  */
 
+static int pci_probe = 1;
+
 /*
  * This flag tells if the platform is TILEmpower that needs
  * special configuration for the PLX switch chip.
@@ -144,6 +145,11 @@ int __init tile_pci_init(void)
 {
        int i;
 
+       if (!pci_probe) {
+               pr_info("PCI: disabled by boot argument\n");
+               return 0;
+       }
+
        pr_info("PCI: Searching for controllers...\n");
 
        /* Re-init number of PCIe controllers to support hot-plug feature. */
@@ -192,7 +198,6 @@ int __init tile_pci_init(void)
                        controller->hv_cfg_fd[0] = hv_cfg_fd0;
                        controller->hv_cfg_fd[1] = hv_cfg_fd1;
                        controller->hv_mem_fd = hv_mem_fd;
-                       controller->first_busno = 0;
                        controller->last_busno = 0xff;
                        controller->ops = &tile_cfg_ops;
 
@@ -283,7 +288,7 @@ int __init pcibios_init(void)
         * known to require at least 20ms here, but we use a more
         * conservative value.
         */
-       mdelay(250);
+       msleep(250);
 
        /* Scan all of the recorded PCI controllers.  */
        for (i = 0; i < TILE_NUM_PCIE; i++) {
@@ -304,18 +309,10 @@ int __init pcibios_init(void)
 
                        pr_info("PCI: initializing controller #%d\n", i);
 
-                       /*
-                        * This comes from the generic Linux PCI driver.
-                        *
-                        * It reads the PCI tree for this bus into the Linux
-                        * data structures.
-                        *
-                        * This is inlined in linux/pci.h and calls into
-                        * pci_scan_bus_parented() in probe.c.
-                        */
                        pci_add_resource(&resources, &ioport_resource);
                        pci_add_resource(&resources, &iomem_resource);
-                       bus = pci_scan_root_bus(NULL, 0, controller->ops, controller, &resources);
+                       bus = pci_scan_root_bus(NULL, 0, controller->ops,
+                                               controller, &resources);
                        controller->root_bus = bus;
                        controller->last_busno = bus->busn_res.end;
                }
@@ -388,6 +385,16 @@ void pcibios_set_master(struct pci_dev *dev)
        /* No special bus mastering setup handling. */
 }
 
+/* Process any "pci=" kernel boot arguments. */
+char *__init pcibios_setup(char *str)
+{
+       if (!strcmp(str, "off")) {
+               pci_probe = 0;
+               return NULL;
+       }
+       return str;
+}
+
 /*
  * Enable memory and/or address decoding, as appropriate, for the
  * device described by the 'dev' struct.
index 11425633b2d7a03e2b04cadf144fbde7efe585cf..66ef9db5c2fbefbabdddf1ff7cf94d3283e89c90 100644 (file)
@@ -69,19 +69,32 @@ static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
  * a HW PCIe link-training bug. The exact delay is specified with
  * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
  * where T is the TRIO instance number, P is the port number and S is
- * the delay in seconds. If the delay is not provided, the value
- * will be DEFAULT_RC_DELAY.
+ * the delay in seconds. If the argument is specified, but the delay is
+ * not provided, the value will be DEFAULT_RC_DELAY.
  */
 static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
 
 /* Default number of seconds that the PCIe RC port probe can be delayed. */
 #define DEFAULT_RC_DELAY       10
 
-/* Max number of seconds that the PCIe RC port probe can be delayed. */
-#define MAX_RC_DELAY           20
+/* The PCI I/O space size in each PCI domain. */
+#define IO_SPACE_SIZE          0x10000
+
+/* Provide shorter versions of some very long constant names. */
+#define AUTO_CONFIG_RC \
+       TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
+#define AUTO_CONFIG_RC_G1      \
+       TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
+#define AUTO_CONFIG_EP \
+       TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
+#define AUTO_CONFIG_EP_G1      \
+       TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
 
 /* Array of the PCIe ports configuration info obtained from the BIB. */
-struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
+struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO];
+
+/* Number of configured TRIO instances. */
+int num_trio_shims;
 
 /* All drivers share the TRIO contexts defined here. */
 gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
@@ -89,24 +102,21 @@ gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
 /* Pointer to an array of PCIe RC controllers. */
 struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
 int num_rc_controllers;
-static int num_ep_controllers;
 
 static struct pci_ops tile_cfg_ops;
 
 /* Mask of CPUs that should receive PCIe interrupts. */
 static struct cpumask intr_cpus_map;
 
-/*
- * We don't need to worry about the alignment of resources.
- */
+/* We don't need to worry about the alignment of resources. */
 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
-                               resource_size_t size, resource_size_t align)
+                                      resource_size_t size,
+                                      resource_size_t align)
 {
        return res->start;
 }
 EXPORT_SYMBOL(pcibios_align_resource);
 
-
 /*
  * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
  * For now, we simply send interrupts to non-dataplane CPUs.
@@ -134,24 +144,19 @@ static int tile_irq_cpu(int irq)
        return cpu;
 }
 
-/*
- * Open a file descriptor to the TRIO shim.
- */
+/* Open a file descriptor to the TRIO shim. */
 static int tile_pcie_open(int trio_index)
 {
        gxio_trio_context_t *context = &trio_contexts[trio_index];
        int ret;
+       int mac;
 
-       /*
-        * This opens a file descriptor to the TRIO shim.
-        */
+       /* This opens a file descriptor to the TRIO shim. */
        ret = gxio_trio_init(context, trio_index);
        if (ret < 0)
-               return ret;
+               goto gxio_trio_init_failure;
 
-       /*
-        * Allocate an ASID for the kernel.
-        */
+       /* Allocate an ASID for the kernel. */
        ret = gxio_trio_alloc_asids(context, 1, 0, 0);
        if (ret < 0) {
                pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
@@ -189,31 +194,97 @@ static int tile_pcie_open(int trio_index)
        }
 #endif
 
+       /* Get the properties of the PCIe ports on this TRIO instance. */
+       ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
+       if (ret < 0) {
+               pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
+                      " on TRIO %d\n", ret, trio_index);
+               goto get_port_property_failure;
+       }
+
+       context->mmio_base_mac =
+               iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
+       if (context->mmio_base_mac == NULL) {
+               pr_err("PCI: TRIO config space mapping failure, error %d,"
+                      " on TRIO %d\n", ret, trio_index);
+               ret = -ENOMEM;
+
+               goto trio_mmio_mapping_failure;
+       }
+
+       /* Check the port strap state which will override the BIB setting. */
+       for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) {
+               TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
+               unsigned int reg_offset;
+
+               /* Ignore ports that are not specified in the BIB. */
+               if (!pcie_ports[trio_index].ports[mac].allow_rc &&
+                   !pcie_ports[trio_index].ports[mac].allow_ep)
+                       continue;
+
+               reg_offset =
+                       (TRIO_PCIE_INTFC_PORT_CONFIG <<
+                               TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+                       (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
+                               TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
+                       (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+               port_config.word =
+                       __gxio_mmio_read(context->mmio_base_mac + reg_offset);
+
+               if (port_config.strap_state != AUTO_CONFIG_RC &&
+                   port_config.strap_state != AUTO_CONFIG_RC_G1) {
+                       /*
+                        * If this is really intended to be an EP port, record
+                        * it so that the endpoint driver will know about it.
+                        */
+                       if (port_config.strap_state == AUTO_CONFIG_EP ||
+                           port_config.strap_state == AUTO_CONFIG_EP_G1)
+                               pcie_ports[trio_index].ports[mac].allow_ep = 1;
+               }
+       }
+
        return ret;
 
+trio_mmio_mapping_failure:
+get_port_property_failure:
 asid_alloc_failure:
 #ifdef USE_SHARED_PCIE_CONFIG_REGION
 pio_alloc_failure:
 #endif
        hv_dev_close(context->fd);
+gxio_trio_init_failure:
+       context->fd = -1;
 
        return ret;
 }
 
-static void
-tilegx_legacy_irq_ack(struct irq_data *d)
+static int __init tile_trio_init(void)
+{
+       int i;
+
+       /* We loop over all the TRIO shims. */
+       for (i = 0; i < TILEGX_NUM_TRIO; i++) {
+               if (tile_pcie_open(i) < 0)
+                       continue;
+               num_trio_shims++;
+       }
+
+       return 0;
+}
+postcore_initcall(tile_trio_init);
+
+static void tilegx_legacy_irq_ack(struct irq_data *d)
 {
        __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
 }
 
-static void
-tilegx_legacy_irq_mask(struct irq_data *d)
+static void tilegx_legacy_irq_mask(struct irq_data *d)
 {
        __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
 }
 
-static void
-tilegx_legacy_irq_unmask(struct irq_data *d)
+static void tilegx_legacy_irq_unmask(struct irq_data *d)
 {
        __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
 }
@@ -234,8 +305,7 @@ static struct irq_chip tilegx_legacy_irq_chip = {
  * to Linux which just calls handle_level_irq() after clearing the
  * MAC INTx Assert status bit associated with this interrupt.
  */
-static void
-trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
+static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
 {
        struct pci_controller *controller = irq_desc_get_handler_data(desc);
        gxio_trio_context_t *trio_context = controller->trio;
@@ -301,9 +371,7 @@ static int tile_init_irqs(struct pci_controller *controller)
                        goto free_irqs;
                }
 
-               /*
-                * Register the IRQ handler with the kernel.
-                */
+               /* Register the IRQ handler with the kernel. */
                irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
                                        trio_handle_level_irq);
                irq_set_chip_data(irq, (void *)(uint64_t)i);
@@ -319,15 +387,40 @@ free_irqs:
        return -1;
 }
 
+/*
+ * Return 1 if the port is strapped to operate in RC mode.
+ */
+static int
+strapped_for_rc(gxio_trio_context_t *trio_context, int mac)
+{
+       TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
+       unsigned int reg_offset;
+
+       /* Check the port configuration. */
+       reg_offset =
+               (TRIO_PCIE_INTFC_PORT_CONFIG <<
+                       TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+               (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
+                       TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
+               (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+       port_config.word =
+               __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset);
+
+       if (port_config.strap_state == AUTO_CONFIG_RC ||
+           port_config.strap_state == AUTO_CONFIG_RC_G1)
+               return 1;
+       else
+               return 0;
+}
+
 /*
  * Find valid controllers and fill in pci_controller structs for each
  * of them.
  *
- * Returns the number of controllers discovered.
+ * Return the number of controllers discovered.
  */
 int __init tile_pci_init(void)
 {
-       int num_trio_shims = 0;
        int ctl_index = 0;
        int i, j;
 
@@ -338,64 +431,62 @@ int __init tile_pci_init(void)
 
        pr_info("PCI: Searching for controllers...\n");
 
-       /*
-        * We loop over all the TRIO shims.
-        */
-       for (i = 0; i < TILEGX_NUM_TRIO; i++) {
-               int ret;
-
-               ret = tile_pcie_open(i);
-               if (ret < 0)
-                       continue;
-
-               num_trio_shims++;
-       }
-
        if (num_trio_shims == 0 || sim_is_simulator())
                return 0;
 
        /*
-        * Now determine which PCIe ports are configured to operate in RC mode.
-        * We look at the Board Information Block first and then see if there
-        * are any overriding configuration by the HW strapping pin.
+        * Now determine which PCIe ports are configured to operate in RC
+        * mode. There is a differece in the port configuration capability
+        * between the Gx36 and Gx72 devices.
+        *
+        * The Gx36 has configuration capability for each of the 3 PCIe
+        * interfaces (disable, auto endpoint, auto RC, etc.).
+        * On the Gx72, you can only select one of the 3 PCIe interfaces per
+        * TRIO to train automatically. Further, the allowable training modes
+        * are reduced to four options (auto endpoint, auto RC, stream x1,
+        * stream x4).
+        *
+        * For Gx36 ports, it must be allowed to be in RC mode by the
+        * Board Information Block, and the hardware strapping pins must be
+        * set to RC mode.
+        *
+        * For Gx72 ports, the port will operate in RC mode if either of the
+        * following is true:
+        * 1. It is allowed to be in RC mode by the Board Information Block,
+        *    and the BIB doesn't allow the EP mode.
+        * 2. It is allowed to be in either the RC or the EP mode by the BIB,
+        *    and the hardware strapping pin is set to RC mode.
         */
        for (i = 0; i < TILEGX_NUM_TRIO; i++) {
                gxio_trio_context_t *context = &trio_contexts[i];
-               int ret;
 
                if (context->fd < 0)
                        continue;
 
-               ret = hv_dev_pread(context->fd, 0,
-                       (HV_VirtAddr)&pcie_ports[i][0],
-                       sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES,
-                       GXIO_TRIO_OP_GET_PORT_PROPERTY);
-               if (ret < 0) {
-                       pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
-                               " on TRIO %d\n", ret, i);
-                       continue;
-               }
-
                for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
-                       if (pcie_ports[i][j].allow_rc) {
+                       int is_rc = 0;
+
+                       if (pcie_ports[i].is_gx72 &&
+                           pcie_ports[i].ports[j].allow_rc) {
+                               if (!pcie_ports[i].ports[j].allow_ep ||
+                                   strapped_for_rc(context, j))
+                                       is_rc = 1;
+                       } else if (pcie_ports[i].ports[j].allow_rc &&
+                                  strapped_for_rc(context, j)) {
+                               is_rc = 1;
+                       }
+                       if (is_rc) {
                                pcie_rc[i][j] = 1;
                                num_rc_controllers++;
                        }
-                       else if (pcie_ports[i][j].allow_ep) {
-                               num_ep_controllers++;
-                       }
                }
        }
 
-       /*
-        * Return if no PCIe ports are configured to operate in RC mode.
-        */
+       /* Return if no PCIe ports are configured to operate in RC mode. */
        if (num_rc_controllers == 0)
                return 0;
 
-       /*
-        * Set the TRIO pointer and MAC index for each PCIe RC port.
-        */
+       /* Set the TRIO pointer and MAC index for each PCIe RC port. */
        for (i = 0; i < TILEGX_NUM_TRIO; i++) {
                for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
                        if (pcie_rc[i][j]) {
@@ -411,26 +502,32 @@ int __init tile_pci_init(void)
        }
 
 out:
-       /*
-        * Configure each PCIe RC port.
-        */
+       /* Configure each PCIe RC port. */
        for (i = 0; i < num_rc_controllers; i++) {
-               /*
-                * Configure the PCIe MAC to run in RC mode.
-                */
 
+               /* Configure the PCIe MAC to run in RC mode. */
                struct pci_controller *controller = &pci_controllers[i];
 
                controller->index = i;
                controller->ops = &tile_cfg_ops;
 
+               controller->io_space.start = PCIBIOS_MIN_IO +
+                       (i * IO_SPACE_SIZE);
+               controller->io_space.end = controller->io_space.start +
+                       IO_SPACE_SIZE - 1;
+               BUG_ON(controller->io_space.end > IO_SPACE_LIMIT);
+               controller->io_space.flags = IORESOURCE_IO;
+               snprintf(controller->io_space_name,
+                        sizeof(controller->io_space_name),
+                        "PCI I/O domain %d", i);
+               controller->io_space.name = controller->io_space_name;
+
                /*
                 * The PCI memory resource is located above the PA space.
                 * For every host bridge, the BAR window or the MMIO aperture
                 * is in range [3GB, 4GB - 1] of a 4GB space beyond the
                 * PA space.
                 */
-
                controller->mem_offset = TILE_PCI_MEM_START +
                        (i * TILE_PCI_BAR_WINDOW_TOP);
                controller->mem_space.start = controller->mem_offset +
@@ -458,7 +555,6 @@ static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
        return controller->irq_intx_table[pin - 1];
 }
 
-
 static void fixup_read_and_payload_sizes(struct pci_controller *controller)
 {
        gxio_trio_context_t *trio_context = controller->trio;
@@ -472,9 +568,7 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
 
        mac = controller->mac;
 
-       /*
-        * Set our max read request size to be 4KB.
-        */
+       /* Set our max read request size to be 4KB. */
        reg_offset =
                (TRIO_PCIE_RC_DEVICE_CONTROL <<
                        TRIO_CFG_REGION_ADDR__REG_SHIFT) |
@@ -483,10 +577,10 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
                (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
 
        dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
-                                               reg_offset);
+                                             reg_offset);
        dev_control.max_read_req_sz = 5;
        __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
-                                               dev_control.word);
+                           dev_control.word);
 
        /*
         * Set the max payload size supported by this Gx PCIe MAC.
@@ -502,10 +596,10 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
                (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
 
        rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
-                                               reg_offset);
+                                            reg_offset);
        rc_dev_cap.mps_sup = 1;
        __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
-                                               rc_dev_cap.word);
+                           rc_dev_cap.word);
 
        /* Configure PCI Express MPS setting. */
        list_for_each_entry(child, &root_bus->children, node) {
@@ -533,7 +627,7 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
                                    dev_control.max_payload_size,
                                    dev_control.max_read_req_sz,
                                    mac);
-        if (err < 0) {
+       if (err < 0) {
                pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
                        "MAC %d on TRIO %d\n",
                        mac, controller->trio_index);
@@ -570,21 +664,14 @@ static int setup_pcie_rc_delay(char *str)
                if (!isdigit(*str))
                        return -EINVAL;
                delay = simple_strtoul(str, (char **)&str, 10);
-               if (delay > MAX_RC_DELAY)
-                       return -EINVAL;
        }
 
        rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
-       pr_info("Delaying PCIe RC link training for %u sec"
-               " on MAC %lu on TRIO %lu\n", rc_delay[trio_index][mac],
-               mac, trio_index);
        return 0;
 }
 early_param("pcie_rc_delay", setup_pcie_rc_delay);
 
-/*
- * PCI initialization entry point, called by subsys_initcall.
- */
+/* PCI initialization entry point, called by subsys_initcall. */
 int __init pcibios_init(void)
 {
        resource_size_t offset;
@@ -594,34 +681,9 @@ int __init pcibios_init(void)
 
        tile_pci_init();
 
-       if (num_rc_controllers == 0 && num_ep_controllers == 0)
+       if (num_rc_controllers == 0)
                return 0;
 
-       /*
-        * We loop over all the TRIO shims and set up the MMIO mappings.
-        */
-       for (i = 0; i < TILEGX_NUM_TRIO; i++) {
-               gxio_trio_context_t *context = &trio_contexts[i];
-
-               if (context->fd < 0)
-                       continue;
-
-               /*
-                * Map in the MMIO space for the MAC.
-                */
-               offset = 0;
-               context->mmio_base_mac =
-                       iorpc_ioremap(context->fd, offset,
-                                     HV_TRIO_CONFIG_IOREMAP_SIZE);
-               if (context->mmio_base_mac == NULL) {
-                       pr_err("PCI: MAC map failure on TRIO %d\n", i);
-
-                       hv_dev_close(context->fd);
-                       context->fd = -1;
-                       continue;
-               }
-       }
-
        /*
         * Delay a bit in case devices aren't ready.  Some devices are
         * known to require at least 20ms here, but we use a more
@@ -633,7 +695,6 @@ int __init pcibios_init(void)
        for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
                struct pci_controller *controller = &pci_controllers[i];
                gxio_trio_context_t *trio_context = controller->trio;
-               TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
                TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
                TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
                struct pci_bus *bus;
@@ -650,75 +711,64 @@ int __init pcibios_init(void)
                mac = controller->mac;
 
                /*
-                * Check the port strap state which will override the BIB
-                * setting.
+                * Check for PCIe link-up status to decide if we need
+                * to force the link to come up.
                 */
-
                reg_offset =
-                       (TRIO_PCIE_INTFC_PORT_CONFIG <<
+                       (TRIO_PCIE_INTFC_PORT_STATUS <<
                                TRIO_CFG_REGION_ADDR__REG_SHIFT) |
                        (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
-                               TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+                               TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
                        (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
 
-               port_config.word =
+               port_status.word =
                        __gxio_mmio_read(trio_context->mmio_base_mac +
                                         reg_offset);
-
-               if ((port_config.strap_state !=
-                       TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC) &&
-                       (port_config.strap_state !=
-                       TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1)) {
-                       /*
-                        * If this is really intended to be an EP port,
-                        * record it so that the endpoint driver will know about it.
-                        */
-                       if (port_config.strap_state ==
-                       TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT ||
-                       port_config.strap_state ==
-                       TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1)
-                               pcie_ports[trio_index][mac].allow_ep = 1;
-
-                       continue;
+               if (!port_status.dl_up) {
+                       if (rc_delay[trio_index][mac]) {
+                               pr_info("Delaying PCIe RC TRIO init %d sec"
+                                       " on MAC %d on TRIO %d\n",
+                                       rc_delay[trio_index][mac], mac,
+                                       trio_index);
+                               msleep(rc_delay[trio_index][mac] * 1000);
+                       }
+                       ret = gxio_trio_force_rc_link_up(trio_context, mac);
+                       if (ret < 0)
+                               pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
+                                       "MAC %d on TRIO %d\n", mac, trio_index);
                }
 
-               /*
-                * Delay the RC link training if needed.
-                */
-               if (rc_delay[trio_index][mac])
-                       msleep(rc_delay[trio_index][mac] * 1000);
-
-               ret = gxio_trio_force_rc_link_up(trio_context, mac);
-               if (ret < 0)
-                       pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
-                               "MAC %d on TRIO %d\n", mac, trio_index);
-
                pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
                        trio_index, controller->mac);
 
-               /*
-                * Wait a bit here because some EP devices take longer
-                * to come up.
-                */
-               msleep(1000);
-
-               /*
-                * Check for PCIe link-up status.
-                */
-
-               reg_offset =
-                       (TRIO_PCIE_INTFC_PORT_STATUS <<
-                               TRIO_CFG_REGION_ADDR__REG_SHIFT) |
-                       (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
-                               TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
-                       (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+               /* Delay the bus probe if needed. */
+               if (rc_delay[trio_index][mac]) {
+                       pr_info("Delaying PCIe RC bus enumerating %d sec"
+                               " on MAC %d on TRIO %d\n",
+                               rc_delay[trio_index][mac], mac,
+                               trio_index);
+                       msleep(rc_delay[trio_index][mac] * 1000);
+               } else {
+                       /*
+                        * Wait a bit here because some EP devices
+                        * take longer to come up.
+                        */
+                       msleep(1000);
+               }
 
+               /* Check for PCIe link-up status again. */
                port_status.word =
                        __gxio_mmio_read(trio_context->mmio_base_mac +
                                         reg_offset);
                if (!port_status.dl_up) {
-                       pr_err("PCI: link is down, MAC %d on TRIO %d\n",
-                               mac, trio_index);
+                       if (pcie_ports[trio_index].ports[mac].removable) {
+                               pr_info("PCI: link is down, MAC %d on TRIO %d\n",
+                                       mac, trio_index);
+                               pr_info("This is expected if no PCIe card"
+                                       " is connected to this link\n");
+                       } else
+                               pr_err("PCI: link is down, MAC %d on TRIO %d\n",
+                                       mac, trio_index);
                        continue;
                }
 
@@ -744,7 +794,6 @@ int __init pcibios_init(void)
                 * Change the device ID so that Linux bus crawl doesn't confuse
                 * the internal bridge with any Tilera endpoints.
                 */
-
                reg_offset =
                        (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
                                TRIO_CFG_REGION_ADDR__REG_SHIFT) |
@@ -757,10 +806,7 @@ int __init pcibios_init(void)
                                    TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
                                    TILERA_VENDOR_ID);
 
-               /*
-                * Set the internal P2P bridge class code.
-                */
-
+               /* Set the internal P2P bridge class code. */
                reg_offset =
                        (TRIO_PCIE_RC_REVISION_ID <<
                                TRIO_CFG_REGION_ADDR__REG_SHIFT) |
@@ -771,26 +817,22 @@ int __init pcibios_init(void)
                class_code_revision =
                        __gxio_mmio_read32(trio_context->mmio_base_mac +
                                           reg_offset);
-               class_code_revision = (class_code_revision & 0xff ) |
-                                       (PCI_CLASS_BRIDGE_PCI << 16);
+               class_code_revision = (class_code_revision & 0xff) |
+                       (PCI_CLASS_BRIDGE_PCI << 16);
 
                __gxio_mmio_write32(trio_context->mmio_base_mac +
                                    reg_offset, class_code_revision);
 
 #ifdef USE_SHARED_PCIE_CONFIG_REGION
 
-               /*
-                * Map in the MMIO space for the PIO region.
-                */
+               /* Map in the MMIO space for the PIO region. */
                offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
                        (((unsigned long long)mac) <<
                        TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
 
 #else
 
-               /*
-                * Alloc a PIO region for PCI config access per MAC.
-                */
+               /* Alloc a PIO region for PCI config access per MAC. */
                ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
                if (ret < 0) {
                        pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
@@ -801,9 +843,7 @@ int __init pcibios_init(void)
 
                trio_context->pio_cfg_index[mac] = ret;
 
-               /*
-                * For PIO CFG, the bus_address_hi parameter is 0.
-                */
+               /* For PIO CFG, the bus_address_hi parameter is 0. */
                ret = gxio_trio_init_pio_region_aux(trio_context,
                        trio_context->pio_cfg_index[mac],
                        mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
@@ -820,9 +860,15 @@ int __init pcibios_init(void)
 
 #endif
 
+               /*
+                * To save VMALLOC space, we take advantage of the fact that
+                * bit 29 in the PIO CFG address format is reserved 0. With
+                * TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT being 30,
+                * this cuts VMALLOC space usage from 1GB to 512MB per mac.
+                */
                trio_context->mmio_base_pio_cfg[mac] =
-                       iorpc_ioremap(trio_context->fd, offset,
-                       (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT));
+                       iorpc_ioremap(trio_context->fd, offset, (1UL <<
+                       (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
                if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
                        pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
                                mac, trio_index);
@@ -830,9 +876,7 @@ int __init pcibios_init(void)
                        continue;
                }
 
-               /*
-                * Initialize the PCIe interrupts.
-                */
+               /* Initialize the PCIe interrupts. */
                if (tile_init_irqs(controller)) {
                        pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
                                mac, trio_index);
@@ -843,17 +887,16 @@ int __init pcibios_init(void)
                /*
                 * The PCI memory resource is located above the PA space.
                 * The memory range for the PCI root bus should not overlap
-                * with the physical RAM
+                * with the physical RAM.
                 */
                pci_add_resource_offset(&resources, &controller->mem_space,
                                        controller->mem_offset);
-
+               pci_add_resource(&resources, &controller->io_space);
                controller->first_busno = next_busno;
                bus = pci_scan_root_bus(NULL, next_busno, controller->ops,
                                        controller, &resources);
                controller->root_bus = bus;
                next_busno = bus->busn_res.end + 1;
-
        }
 
        /* Do machine dependent PCI interrupt routing */
@@ -865,7 +908,6 @@ int __init pcibios_init(void)
         * It allocates all of the resources (I/O memory, etc)
         * associated with the devices read in above.
         */
-
        pci_assign_unassigned_resources();
 
        /* Record the I/O resources in the PCI controller structure. */
@@ -873,9 +915,6 @@ int __init pcibios_init(void)
                struct pci_controller *controller = &pci_controllers[i];
                gxio_trio_context_t *trio_context = controller->trio;
                struct pci_bus *root_bus = pci_controllers[i].root_bus;
-               struct pci_bus *next_bus;
-               uint32_t bus_address_hi;
-               struct pci_dev *dev;
                int ret;
                int j;
 
@@ -889,43 +928,12 @@ int __init pcibios_init(void)
                /* Configure the max_payload_size values for this domain. */
                fixup_read_and_payload_sizes(controller);
 
-               list_for_each_entry(dev, &root_bus->devices, bus_list) {
-                       /* Find the PCI host controller, ie. the 1st bridge. */
-                       if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
-                               (PCI_SLOT(dev->devfn) == 0)) {
-                               next_bus = dev->subordinate;
-                               pci_controllers[i].mem_resources[0] =
-                                       *next_bus->resource[0];
-                               pci_controllers[i].mem_resources[1] =
-                                        *next_bus->resource[1];
-                               pci_controllers[i].mem_resources[2] =
-                                        *next_bus->resource[2];
-
-                               break;
-                       }
-               }
-
-               if (pci_controllers[i].mem_resources[1].flags & IORESOURCE_MEM)
-                       bus_address_hi =
-                               pci_controllers[i].mem_resources[1].start >> 32;
-               else if (pci_controllers[i].mem_resources[2].flags & IORESOURCE_PREFETCH)
-                       bus_address_hi =
-                               pci_controllers[i].mem_resources[2].start >> 32;
-               else {
-                       /* This is unlikely. */
-                       pr_err("PCI: no memory resources on TRIO %d mac %d\n",
-                               controller->trio_index, controller->mac);
-                       continue;
-               }
-
-               /*
-                * Alloc a PIO region for PCI memory access for each RC port.
-                */
+               /* Alloc a PIO region for PCI memory access for each RC port. */
                ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
                if (ret < 0) {
                        pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
-                               "give up\n", controller->trio_index,
-                               controller->mac);
+                              "give up\n", controller->trio_index,
+                              controller->mac);
 
                        continue;
                }
@@ -943,12 +951,45 @@ int __init pcibios_init(void)
                                                    0);
                if (ret < 0) {
                        pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
-                               "give up\n", controller->trio_index,
-                               controller->mac);
+                              "give up\n", controller->trio_index,
+                              controller->mac);
 
                        continue;
                }
 
+#ifdef CONFIG_TILE_PCI_IO
+               /*
+                * Alloc a PIO region for PCI I/O space access for each RC port.
+                */
+               ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
+               if (ret < 0) {
+                       pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, "
+                              "give up\n", controller->trio_index,
+                              controller->mac);
+
+                       continue;
+               }
+
+               controller->pio_io_index = ret;
+
+               /*
+                * For PIO IO, the bus_address_hi parameter is hard-coded 0
+                * because PCI I/O address space is 32-bit.
+                */
+               ret = gxio_trio_init_pio_region_aux(trio_context,
+                                                   controller->pio_io_index,
+                                                   controller->mac,
+                                                   0,
+                                                   HV_TRIO_PIO_FLAG_IO_SPACE);
+               if (ret < 0) {
+                       pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, "
+                              "give up\n", controller->trio_index,
+                              controller->mac);
+
+                       continue;
+               }
+#endif
+
                /*
                 * Configure a Mem-Map region for each memory controller so
                 * that Linux can map all of its PA space to the PCI bus.
@@ -963,9 +1004,9 @@ int __init pcibios_init(void)
                                                          0);
                        if (ret < 0) {
                                pr_err("PCI: Mem-Map alloc failure on TRIO %d "
-                                       "mac %d for MC %d, give up\n",
-                                       controller->trio_index,
-                                       controller->mac, j);
+                                      "mac %d for MC %d, give up\n",
+                                      controller->trio_index,
+                                      controller->mac, j);
 
                                goto alloc_mem_map_failed;
                        }
@@ -996,9 +1037,9 @@ int __init pcibios_init(void)
                                GXIO_TRIO_ORDER_MODE_UNORDERED);
                        if (ret < 0) {
                                pr_err("PCI: Mem-Map init failure on TRIO %d "
-                                       "mac %d for MC %d, give up\n",
-                                       controller->trio_index,
-                                       controller->mac, j);
+                                      "mac %d for MC %d, give up\n",
+                                      controller->trio_index,
+                                      controller->mac, j);
 
                                goto alloc_mem_map_failed;
                        }
@@ -1007,23 +1048,19 @@ int __init pcibios_init(void)
 alloc_mem_map_failed:
                        break;
                }
-
        }
 
        return 0;
 }
 subsys_initcall(pcibios_init);
 
-/* Note: to be deleted after Linux 3.6 merge. */
+/* No bus fixups needed. */
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
 }
 
-/*
- * This can be called from the generic PCI layer, but doesn't need to
- * do anything.
- */
-char *pcibios_setup(char *str)
+/* Process any "pci=" kernel boot arguments. */
+char *__init pcibios_setup(char *str)
 {
        if (!strcmp(str, "off")) {
                pci_probe = 0;
@@ -1034,8 +1071,7 @@ char *pcibios_setup(char *str)
 
 /*
  * Enable memory address decoding, as appropriate, for the
- * device described by the 'dev' struct. The I/O decoding
- * is disabled, though the TILE-Gx supports I/O addressing.
+ * device described by the 'dev' struct.
  *
  * This is called from the generic PCI layer, and can be called
  * for bridges or endpoints.
@@ -1065,19 +1101,15 @@ void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
        resource_size_t start;
        resource_size_t end;
        int trio_fd;
-       int i, j;
+       int i;
 
        start = phys_addr;
        end = phys_addr + size - 1;
 
        /*
-        * In the following, each PCI controller's mem_resources[1]
-        * represents its (non-prefetchable) PCI memory resource and
-        * mem_resources[2] refers to its prefetchable PCI memory resource.
-        * By searching phys_addr in each controller's mem_resources[], we can
+        * By searching phys_addr in each controller's mem_space, we can
         * determine the controller that should accept the PCI memory access.
         */
-
        for (i = 0; i < num_rc_controllers; i++) {
                /*
                 * Skip controllers that are not properly initialized or
@@ -1086,25 +1118,18 @@ void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
                if (pci_controllers[i].root_bus == NULL)
                        continue;
 
-               for (j = 1; j < 3; j++) {
-                       bar_start =
-                               pci_controllers[i].mem_resources[j].start;
-                       bar_end =
-                               pci_controllers[i].mem_resources[j].end;
-
-                       if ((start >= bar_start) && (end <= bar_end)) {
-
-                               controller = &pci_controllers[i];
+               bar_start = pci_controllers[i].mem_space.start;
+               bar_end = pci_controllers[i].mem_space.end;
 
-                               goto got_it;
-                       }
+               if ((start >= bar_start) && (end <= bar_end)) {
+                       controller = &pci_controllers[i];
+                       break;
                }
        }
 
        if (controller == NULL)
                return NULL;
 
-got_it:
        trio_fd = controller->trio->fd;
 
        /* Convert the resource start to the bus address offset. */
@@ -1112,14 +1137,71 @@ got_it:
 
        offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
 
-       /*
-        * We need to keep the PCI bus address's in-page offset in the VA.
-        */
+       /* We need to keep the PCI bus address's in-page offset in the VA. */
        return iorpc_ioremap(trio_fd, offset, size) +
-               (phys_addr & (PAGE_SIZE - 1));
+               (start & (PAGE_SIZE - 1));
 }
 EXPORT_SYMBOL(ioremap);
 
+#ifdef CONFIG_TILE_PCI_IO
+/* Map a PCI I/O address into VA space. */
+void __iomem *ioport_map(unsigned long port, unsigned int size)
+{
+       struct pci_controller *controller = NULL;
+       resource_size_t bar_start;
+       resource_size_t bar_end;
+       resource_size_t offset;
+       resource_size_t start;
+       resource_size_t end;
+       int trio_fd;
+       int i;
+
+       start = port;
+       end = port + size - 1;
+
+       /*
+        * By searching the port in each controller's io_space, we can
+        * determine the controller that should accept the PCI I/O access.
+        */
+       for (i = 0; i < num_rc_controllers; i++) {
+               /*
+                * Skip controllers that are not properly initialized or
+                * have down links.
+                */
+               if (pci_controllers[i].root_bus == NULL)
+                       continue;
+
+               bar_start = pci_controllers[i].io_space.start;
+               bar_end = pci_controllers[i].io_space.end;
+
+               if ((start >= bar_start) && (end <= bar_end)) {
+                       controller = &pci_controllers[i];
+                       break;
+               }
+       }
+
+       if (controller == NULL)
+               return NULL;
+
+       trio_fd = controller->trio->fd;
+
+       /* Convert the resource start to the bus address offset. */
+       port -= controller->io_space.start;
+
+       offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port;
+
+       /* We need to keep the PCI bus address's in-page offset in the VA. */
+       return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1));
+}
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+       iounmap(addr);
+}
+EXPORT_SYMBOL(ioport_unmap);
+#endif
+
 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
 {
        iounmap(addr);
@@ -1141,7 +1223,6 @@ EXPORT_SYMBOL(pci_iounmap);
  * offset is in bytes, from the start of config space for the
  * specified bus & device.
  */
-
 static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
                         int size, u32 *val)
 {
@@ -1191,7 +1272,6 @@ static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
         * Accesses to the directly attached device have to be
         * sent as type-0 configs.
         */
-
        if (busnum == (controller->first_busno + 1)) {
                /*
                 * There is only one device off of our built-in P2P bridge.
@@ -1213,9 +1293,8 @@ static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
         * Note that we don't set the mac field in cfg_addr because the
         * mapping is per port.
         */
-
        mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
-                       cfg_addr.word;
+               cfg_addr.word;
 
 valid_device:
 
@@ -1319,7 +1398,6 @@ static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
         * Accesses to the directly attached device have to be
         * sent as type-0 configs.
         */
-
        if (busnum == (controller->first_busno + 1)) {
                /*
                 * There is only one device off of our built-in P2P bridge.
@@ -1341,7 +1419,6 @@ static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
         * Note that we don't set the mac field in cfg_addr because the
         * mapping is per port.
         */
-
        mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
                        cfg_addr.word;
 
@@ -1379,11 +1456,8 @@ static struct pci_ops tile_cfg_ops = {
 };
 
 
-/*
- * MSI support starts here.
- */
-static unsigned int
-tilegx_msi_startup(struct irq_data *d)
+/* MSI support starts here. */
+static unsigned int tilegx_msi_startup(struct irq_data *d)
 {
        if (d->msi_desc)
                unmask_msi_irq(d);
@@ -1391,21 +1465,18 @@ tilegx_msi_startup(struct irq_data *d)
        return 0;
 }
 
-static void
-tilegx_msi_ack(struct irq_data *d)
+static void tilegx_msi_ack(struct irq_data *d)
 {
        __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
 }
 
-static void
-tilegx_msi_mask(struct irq_data *d)
+static void tilegx_msi_mask(struct irq_data *d)
 {
        mask_msi_irq(d);
        __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
 }
 
-static void
-tilegx_msi_unmask(struct irq_data *d)
+static void tilegx_msi_unmask(struct irq_data *d)
 {
        __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
        unmask_msi_irq(d);
@@ -1462,32 +1533,55 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
        trio_context = controller->trio;
 
        /*
-        * Allocate the Mem-Map that will accept the MSI write and
-        * trigger the TILE-side interrupts.
+        * Allocate a scatter-queue that will accept the MSI write and
+        * trigger the TILE-side interrupts. We use the scatter-queue regions
+        * before the mem map regions, because the latter are needed by more
+        * applications.
         */
-       mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
-       if (mem_map < 0) {
-               dev_printk(KERN_INFO, &pdev->dev,
-                       "%s Mem-Map alloc failure. "
-                       "Failed to initialize MSI interrupts. "
-                       "Falling back to legacy interrupts.\n",
-                       desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
+       mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0);
+       if (mem_map >= 0) {
+               TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{
+                       .pop = 0,
+                       .doorbell = 1,
+               }};
+
+               mem_map += TRIO_NUM_MAP_MEM_REGIONS;
+               mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
+                       mem_map * MEM_MAP_INTR_REGION_SIZE;
+               mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
+
+               msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8;
+               msg.data = (unsigned int)doorbell_template.word;
+       } else {
+               /* SQ regions are out, allocate from map mem regions. */
+               mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
+               if (mem_map < 0) {
+                       dev_printk(KERN_INFO, &pdev->dev,
+                               "%s Mem-Map alloc failure. "
+                               "Failed to initialize MSI interrupts. "
+                               "Falling back to legacy interrupts.\n",
+                               desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
+                       ret = -ENOMEM;
+                       goto msi_mem_map_alloc_failure;
+               }
 
-               ret = -ENOMEM;
-               goto msi_mem_map_alloc_failure;
+               mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
+                       mem_map * MEM_MAP_INTR_REGION_SIZE;
+               mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
+
+               msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 -
+                       TRIO_MAP_MEM_REG_INT0;
+
+               msg.data = mem_map;
        }
 
        /* We try to distribute different IRQs to different tiles. */
        cpu = tile_irq_cpu(irq);
 
        /*
-        * Now call up to the HV to configure the Mem-Map interrupt and
+        * Now call up to the HV to configure the MSI interrupt and
         * set up the IPI binding.
         */
-       mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
-               mem_map * MEM_MAP_INTR_REGION_SIZE;
-       mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
-
        ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
                                        KERNEL_PL, irq, controller->mac,
                                        mem_map, mem_map_base, mem_map_limit,
@@ -1500,13 +1594,9 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
 
        irq_set_msi_desc(irq, desc);
 
-       msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - TRIO_MAP_MEM_REG_INT0;
-
        msg.address_hi = msi_addr >> 32;
        msg.address_lo = msi_addr & 0xffffffff;
 
-       msg.data = mem_map;
-
        write_msi_msg(irq, &msg);
        irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
        irq_set_handler_data(irq, controller);
index dafc447b5125ad13bea8a37a62e637b8994e93cc..681100c59fda1bfc0ae3f31d5957194634ee5faf 100644 (file)
@@ -113,7 +113,6 @@ arch_initcall(proc_tile_init);
  * Support /proc/sys/tile directory
  */
 
-#ifndef __tilegx__  /* FIXME: GX: no support for unaligned access yet */
 static ctl_table unaligned_subtable[] = {
        {
                .procname       = "enabled",
@@ -160,4 +159,3 @@ static int __init proc_sys_tile_init(void)
 }
 
 arch_initcall(proc_sys_tile_init);
-#endif
index 8ac304484f988a685118d4d4475b48cee1156060..781ac3368da5a5d30227d5f2fb28907f54d44d4f 100644 (file)
 #include <linux/kernel.h>
 #include <linux/tracehook.h>
 #include <linux/signal.h>
+#include <linux/kvm_host.h>
 #include <asm/stack.h>
 #include <asm/switch_to.h>
 #include <asm/homecache.h>
 #include <asm/syscalls.h>
 #include <asm/traps.h>
 #include <asm/setup.h>
+#include <asm/uaccess.h>
 #ifdef CONFIG_HARDWALL
 #include <asm/hardwall.h>
 #endif
@@ -74,19 +76,6 @@ void arch_release_thread_info(struct thread_info *info)
 {
        struct single_step_state *step_state = info->step_state;
 
-#ifdef CONFIG_HARDWALL
-       /*
-        * We free a thread_info from the context of the task that has
-        * been scheduled next, so the original task is already dead.
-        * Calling deactivate here just frees up the data structures.
-        * If the task we're freeing held the last reference to a
-        * hardwall fd, it would have been released prior to this point
-        * anyway via exit_files(), and the hardwall_task.info pointers
-        * would be NULL by now.
-        */
-       hardwall_deactivate_all(info->task);
-#endif
-
        if (step_state) {
 
                /*
@@ -160,6 +149,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
         */
        task_thread_info(p)->step_state = NULL;
 
+#ifdef __tilegx__
+       /*
+        * Do not clone unalign jit fixup from the parent; each thread
+        * must allocate its own on demand.
+        */
+       task_thread_info(p)->unalign_jit_base = NULL;
+#endif
+
        /*
         * Copy the registers onto the kernel stack so the
         * return-from-interrupt code will reload it into registers.
@@ -191,16 +188,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
        memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
 #endif
 
-#if CHIP_HAS_SN_PROC()
-       /* Likewise, the new thread is not running static processor code. */
-       p->thread.sn_proc_running = 0;
-       memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
-#endif
-
-#if CHIP_HAS_PROC_STATUS_SPR()
        /* New thread has its miscellaneous processor state bits clear. */
        p->thread.proc_status = 0;
-#endif
 
 #ifdef CONFIG_HARDWALL
        /* New thread does not own any networks. */
@@ -218,19 +207,32 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
        return 0;
 }
 
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+       task_thread_info(tsk)->align_ctl = val;
+       return 0;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
+{
+       return put_user(task_thread_info(tsk)->align_ctl,
+                       (unsigned int __user *)adr);
+}
+
+static struct task_struct corrupt_current = { .comm = "<corrupt>" };
+
 /*
  * Return "current" if it looks plausible, or else a pointer to a dummy.
  * This can be helpful if we are just trying to emit a clean panic.
  */
 struct task_struct *validate_current(void)
 {
-       static struct task_struct corrupt = { .comm = "<corrupt>" };
        struct task_struct *tsk = current;
        if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
                     (high_memory && (void *)tsk > high_memory) ||
                     ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
                pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
-               tsk = &corrupt;
+               tsk = &corrupt_current;
        }
        return tsk;
 }
@@ -238,11 +240,13 @@ struct task_struct *validate_current(void)
 /* Take and return the pointer to the previous task, for schedule_tail(). */
 struct task_struct *sim_notify_fork(struct task_struct *prev)
 {
+#ifndef CONFIG_KVM_GUEST   /* see notify_sim_task_change() */
        struct task_struct *tsk = current;
        __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
                     (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
        __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
                     (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
+#endif
        return prev;
 }
 
@@ -369,15 +373,11 @@ static void save_arch_state(struct thread_struct *t)
        t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
        t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
        t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
-#if CHIP_HAS_PROC_STATUS_SPR()
        t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
-#endif
 #if !CHIP_HAS_FIXED_INTVEC_BASE()
        t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
 #endif
-#if CHIP_HAS_TILE_RTF_HWM()
        t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
-#endif
 #if CHIP_HAS_DSTREAM_PF()
        t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
 #endif
@@ -398,15 +398,11 @@ static void restore_arch_state(const struct thread_struct *t)
        __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
        __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
        __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
-#if CHIP_HAS_PROC_STATUS_SPR()
        __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
-#endif
 #if !CHIP_HAS_FIXED_INTVEC_BASE()
        __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
 #endif
-#if CHIP_HAS_TILE_RTF_HWM()
        __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
-#endif
 #if CHIP_HAS_DSTREAM_PF()
        __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
 #endif
@@ -415,32 +411,22 @@ static void restore_arch_state(const struct thread_struct *t)
 
 void _prepare_arch_switch(struct task_struct *next)
 {
-#if CHIP_HAS_SN_PROC()
-       int snctl;
-#endif
 #if CHIP_HAS_TILE_DMA()
        struct tile_dma_state *dma = &current->thread.tile_dma_state;
        if (dma->enabled)
                save_tile_dma_state(dma);
 #endif
-#if CHIP_HAS_SN_PROC()
-       /*
-        * Suspend the static network processor if it was running.
-        * We do not suspend the fabric itself, just like we don't
-        * try to suspend the UDN.
-        */
-       snctl = __insn_mfspr(SPR_SNCTL);
-       current->thread.sn_proc_running =
-               (snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
-       if (current->thread.sn_proc_running)
-               __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
-#endif
 }
 
 
 struct task_struct *__sched _switch_to(struct task_struct *prev,
                                       struct task_struct *next)
 {
+#ifdef CONFIG_KVM
+       /* vmexit is needed before context switch. */
+       BUG_ON(task_thread_info(prev)->vcpu);
+#endif
+
        /* DMA state is already saved; save off other arch state. */
        save_arch_state(&prev->thread);
 
@@ -462,17 +448,6 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
        /* Restore other arch state. */
        restore_arch_state(&next->thread);
 
-#if CHIP_HAS_SN_PROC()
-       /*
-        * Restart static network processor in the new process
-        * if it was running before.
-        */
-       if (next->thread.sn_proc_running) {
-               int snctl = __insn_mfspr(SPR_SNCTL);
-               __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
-       }
-#endif
-
 #ifdef CONFIG_HARDWALL
        /* Enable or disable access to the network registers appropriately. */
        hardwall_switch_tasks(prev, next);
@@ -510,11 +485,37 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
        /* Enable interrupts; they are disabled again on return to caller. */
        local_irq_enable();
 
+#ifdef CONFIG_KVM
+       /*
+        * Some work requires us to exit the VM first.  Typically this
+        * allows the process running the VM to respond to the work
+        * (e.g. a signal), or allows the VM mechanism to latch
+        * modified host state (e.g. a "hypervisor" message sent to a
+        * different vcpu).  It also means that if we are considering
+        * calling schedule(), we exit the VM first, so we never have
+        * to worry about context-switching into a VM.
+        */
+       if (current_thread_info()->vcpu) {
+               u32 do_exit = thread_info_flags &
+                       (_TIF_NEED_RESCHED|_TIF_SIGPENDING|_TIF_VIRT_EXIT);
+
+               if (thread_info_flags & _TIF_VIRT_EXIT)
+                       clear_thread_flag(TIF_VIRT_EXIT);
+               if (do_exit) {
+                       kvm_trigger_vmexit(regs, KVM_EXIT_AGAIN);
+                       /*NORETURN*/
+               }
+       }
+#endif
+
+       /* Enable interrupts; they are disabled again on return to caller. */
+       local_irq_enable();
+
        if (thread_info_flags & _TIF_NEED_RESCHED) {
                schedule();
                return 1;
        }
-#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
+#if CHIP_HAS_TILE_DMA()
        if (thread_info_flags & _TIF_ASYNC_TLB) {
                do_async_page_fault(regs);
                return 1;
@@ -529,11 +530,12 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
                tracehook_notify_resume(regs);
                return 1;
        }
-       if (thread_info_flags & _TIF_SINGLESTEP) {
+
+       /* Handle a few flags here that stay set. */
+       if (thread_info_flags & _TIF_SINGLESTEP)
                single_step_once(regs);
-               return 0;
-       }
-       panic("work_pending: bad flags %#x\n", thread_info_flags);
+
+       return 0;
 }
 
 unsigned long get_wchan(struct task_struct *p)
@@ -564,7 +566,15 @@ void flush_thread(void)
  */
 void exit_thread(void)
 {
-       /* Nothing */
+#ifdef CONFIG_HARDWALL
+       /*
+        * Remove the task from the list of tasks that are associated
+        * with any live hardwalls.  (If the task that is exiting held
+        * the last reference to a hardwall fd, it would already have
+        * been released and deactivated at this point.)
+        */
+       hardwall_deactivate_all(current);
+#endif
 }
 
 void show_regs(struct pt_regs *regs)
@@ -573,23 +583,24 @@ void show_regs(struct pt_regs *regs)
        int i;
 
        pr_err("\n");
-       show_regs_print_info(KERN_ERR);
+       if (tsk != &corrupt_current)
+               show_regs_print_info(KERN_ERR);
 #ifdef __tilegx__
-       for (i = 0; i < 51; i += 3)
+       for (i = 0; i < 17; i++)
                pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
-                      i, regs->regs[i], i+1, regs->regs[i+1],
-                      i+2, regs->regs[i+2]);
-       pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
-              regs->regs[51], regs->regs[52], regs->tp);
+                      i, regs->regs[i], i+18, regs->regs[i+18],
+                      i+36, regs->regs[i+36]);
+       pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
+              regs->regs[17], regs->regs[35], regs->tp);
        pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
 #else
-       for (i = 0; i < 52; i += 4)
+       for (i = 0; i < 13; i++)
                pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
                       " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
-                      i, regs->regs[i], i+1, regs->regs[i+1],
-                      i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
-       pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
-              regs->regs[52], regs->tp, regs->sp, regs->lr);
+                      i, regs->regs[i], i+14, regs->regs[i+14],
+                      i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
+       pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
+              regs->regs[13], regs->tp, regs->sp, regs->lr);
 #endif
        pr_err(" pc : "REGFMT" ex1: %ld     faultnum: %ld\n",
               regs->pc, regs->ex1, regs->faultnum);
index 0f83ed4602b2fb878bd8b110632b8b92bf49259c..de98c6ddf136dbeebee00b7d8b1c553cc8bf56c1 100644 (file)
@@ -265,6 +265,21 @@ int do_syscall_trace_enter(struct pt_regs *regs)
 
 void do_syscall_trace_exit(struct pt_regs *regs)
 {
+       long errno;
+
+       /*
+        * The standard tile calling convention returns the value (or negative
+        * errno) in r0, and zero (or positive errno) in r1.
+        * It saves a couple of cycles on the hot path to do this work in
+        * registers only as we return, rather than updating the in-memory
+        * struct ptregs.
+        */
+       errno = (long) regs->regs[0];
+       if (errno < 0 && errno > -4096)
+               regs->regs[1] = -errno;
+       else
+               regs->regs[1] = 0;
+
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall_exit(regs, 0);
 
@@ -272,7 +287,7 @@ void do_syscall_trace_exit(struct pt_regs *regs)
                trace_sys_exit(regs, regs->regs[0]);
 }
 
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs)
 {
        struct siginfo info;
 
@@ -288,5 +303,5 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
 /* Handle synthetic interrupt delivered only by the simulator. */
 void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
 {
-       send_sigtrap(current, regs, fault_num);
+       send_sigtrap(current, regs);
 }
index d1b5c913ae724d4a4f794d0590e263d305a4a58a..6c5d2c070a12af145950a857fcd698b9b2bcc224 100644 (file)
@@ -27,7 +27,6 @@
 
 void machine_halt(void)
 {
-       warn_early_printk();
        arch_local_irq_disable_all();
        smp_send_stop();
        hv_halt();
@@ -35,7 +34,6 @@ void machine_halt(void)
 
 void machine_power_off(void)
 {
-       warn_early_printk();
        arch_local_irq_disable_all();
        smp_send_stop();
        hv_power_off();
index c12280c2d9048a5c28ae4608a60391f853bdee1d..542cae17a93aebc316b184b1e8bfb36da292ed88 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/switch_to.h>
 
 /*
- * See <asm/system.h>; called with prev and next task_struct pointers.
+ * See <asm/switch_to.h>; called with prev and next task_struct pointers.
  * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
  *
  * We want to save pc/sp in "prev", and get the new pc/sp from "next".
@@ -39,7 +39,7 @@
  */
 
 #if CALLEE_SAVED_REGS_COUNT != 24
-# error Mismatch between <asm/system.h> and kernel/entry.S
+# error Mismatch between <asm/switch_to.h> and kernel/entry.S
 #endif
 #define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4)
 
index 0829fd01fa30be16324309ea7e52b421724ff75e..bbffcc6f340f9a431b7ba0a29a273b10b3ee04ef 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/switch_to.h>
 
 /*
- * See <asm/system.h>; called with prev and next task_struct pointers.
+ * See <asm/switch_to.h>; called with prev and next task_struct pointers.
  * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
  *
  * We want to save pc/sp in "prev", and get the new pc/sp from "next".
@@ -39,7 +39,7 @@
  */
 
 #if CALLEE_SAVED_REGS_COUNT != 24
-# error Mismatch between <asm/system.h> and kernel/entry.S
+# error Mismatch between <asm/switch_to.h> and kernel/entry.S
 #endif
 #define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 8)
 
index 010b418515f8529e65f90eb65a65d3afcb5b35cb..e44fbcf8cbd5d22748791efe0b6606d32426d387 100644 (file)
 #include <asm/page.h>
 #include <hv/hypervisor.h>
 
-#define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA
-
-#define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f))
-
-#define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC)
-#define ___hv_halt         ___hv_dispatch(HV_DISPATCH_HALT)
-#define ___hv_reexec       ___hv_dispatch(HV_DISPATCH_REEXEC)
-#define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE)
-
 #undef RELOCATE_NEW_KERNEL_VERBOSE
 
 STD_ENTRY(relocate_new_kernel)
@@ -43,8 +34,8 @@ STD_ENTRY(relocate_new_kernel)
        addi    sp, sp, -8
        /* we now have a stack (whether we need one or not) */
 
-       moveli  r40, lo16(___hv_console_putc)
-       auli    r40, r40, ha16(___hv_console_putc)
+       moveli  r40, lo16(hv_console_putc)
+       auli    r40, r40, ha16(hv_console_putc)
 
 #ifdef RELOCATE_NEW_KERNEL_VERBOSE
        moveli  r0, 'r'
@@ -86,7 +77,6 @@ STD_ENTRY(relocate_new_kernel)
        move    r30, sp
        addi    sp, sp, -8
 
-#if CHIP_HAS_CBOX_HOME_MAP()
        /*
         * On TILEPro, we need to flush all tiles' caches, since we may
         * have been doing hash-for-home caching there.  Note that we
@@ -114,15 +104,14 @@ STD_ENTRY(relocate_new_kernel)
        }
        {
         move   r8, zero         /* asids */
-        moveli r20, lo16(___hv_flush_remote)
+        moveli r20, lo16(hv_flush_remote)
        }
        {
         move   r9, zero         /* asidcount */
-        auli   r20, r20, ha16(___hv_flush_remote)
+        auli   r20, r20, ha16(hv_flush_remote)
        }
 
        jalr    r20
-#endif
 
        /* r33 is destination pointer, default to zero */
 
@@ -175,8 +164,8 @@ STD_ENTRY(relocate_new_kernel)
        move    r0, r32
        moveli  r1, 0           /* arg to hv_reexec is 64 bits */
 
-       moveli  r41, lo16(___hv_reexec)
-       auli    r41, r41, ha16(___hv_reexec)
+       moveli  r41, lo16(hv_reexec)
+       auli    r41, r41, ha16(hv_reexec)
 
        jalr    r41
 
@@ -267,8 +256,8 @@ STD_ENTRY(relocate_new_kernel)
        moveli  r0, '\n'
        jalr    r40
 .Lhalt:
-       moveli  r41, lo16(___hv_halt)
-       auli    r41, r41, ha16(___hv_halt)
+       moveli  r41, lo16(hv_halt)
+       auli    r41, r41, ha16(hv_halt)
 
        jalr    r41
        STD_ENDPROC(relocate_new_kernel)
index 1c09a4f5a4ea186708d35b455107259a634e869f..d9d8cf6176e887af1bb3ef283e0727a997608c59 100644 (file)
@@ -34,11 +34,11 @@ STD_ENTRY(relocate_new_kernel)
        addi    sp, sp, -8
        /* we now have a stack (whether we need one or not) */
 
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
        moveli  r40, hw2_last(hv_console_putc)
        shl16insli r40, r40, hw1(hv_console_putc)
        shl16insli r40, r40, hw0(hv_console_putc)
 
-#ifdef RELOCATE_NEW_KERNEL_VERBOSE
        moveli  r0, 'r'
        jalr    r40
 
@@ -78,7 +78,6 @@ STD_ENTRY(relocate_new_kernel)
        move    r30, sp
        addi    sp, sp, -16
 
-#if CHIP_HAS_CBOX_HOME_MAP()
        /*
         * On TILE-GX, we need to flush all tiles' caches, since we may
         * have been doing hash-for-home caching there.  Note that we
@@ -116,7 +115,6 @@ STD_ENTRY(relocate_new_kernel)
        shl16insli      r20, r20, hw0(hv_flush_remote)
 
        jalr    r20
-#endif
 
        /* r33 is destination pointer, default to zero */
 
@@ -176,10 +174,12 @@ STD_ENTRY(relocate_new_kernel)
 
        /* we should not get here */
 
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
        moveli  r0, '?'
        jalr    r40
        moveli  r0, '\n'
        jalr    r40
+#endif
 
        j       .Lhalt
 
@@ -237,7 +237,9 @@ STD_ENTRY(relocate_new_kernel)
        j       .Lloop
 
 
-.Lerr: moveli  r0, 'e'
+.Lerr:
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+       moveli  r0, 'e'
        jalr    r40
        moveli  r0, 'r'
        jalr    r40
@@ -245,6 +247,7 @@ STD_ENTRY(relocate_new_kernel)
        jalr    r40
        moveli  r0, '\n'
        jalr    r40
+#endif
 .Lhalt:
        moveli r41, hw2_last(hv_halt)
        shl16insli r41, r41, hw1(hv_halt)
index eceb8344280f27ee1906dbfb52d150d836ff4e9c..b69e43ce93910e79bb4da1089d67a10698bd770e 100644 (file)
@@ -154,6 +154,65 @@ static int __init setup_maxnodemem(char *str)
 }
 early_param("maxnodemem", setup_maxnodemem);
 
+struct memmap_entry {
+       u64 addr;       /* start of memory segment */
+       u64 size;       /* size of memory segment */
+};
+static struct memmap_entry memmap_map[64];
+static int memmap_nr;
+
+static void add_memmap_region(u64 addr, u64 size)
+{
+       if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
+               pr_err("Ooops! Too many entries in the memory map!\n");
+               return;
+       }
+       memmap_map[memmap_nr].addr = addr;
+       memmap_map[memmap_nr].size = size;
+       memmap_nr++;
+}
+
+static int __init setup_memmap(char *p)
+{
+       char *oldp;
+       u64 start_at, mem_size;
+
+       if (!p)
+               return -EINVAL;
+
+       if (!strncmp(p, "exactmap", 8)) {
+               pr_err("\"memmap=exactmap\" not valid on tile\n");
+               return 0;
+       }
+
+       oldp = p;
+       mem_size = memparse(p, &p);
+       if (p == oldp)
+               return -EINVAL;
+
+       if (*p == '@') {
+               pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
+       } else if (*p == '#') {
+               pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
+       } else if (*p == '$') {
+               start_at = memparse(p+1, &p);
+               add_memmap_region(start_at, mem_size);
+       } else {
+               if (mem_size == 0)
+                       return -EINVAL;
+               maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
+                       (HPAGE_SHIFT - PAGE_SHIFT);
+       }
+       return *p == '\0' ? 0 : -EINVAL;
+}
+early_param("memmap", setup_memmap);
+
+static int __init setup_mem(char *str)
+{
+       return setup_maxmem(str);
+}
+early_param("mem", setup_mem);  /* compatibility with x86 */
+
 static int __init setup_isolnodes(char *str)
 {
        char buf[MAX_NUMNODES * 5];
@@ -209,7 +268,7 @@ early_param("vmalloc", parse_vmalloc);
 /*
  * Determine for each controller where its lowmem is mapped and how much of
  * it is mapped there.  On controller zero, the first few megabytes are
- * already mapped in as code at MEM_SV_INTRPT, so in principle we could
+ * already mapped in as code at MEM_SV_START, so in principle we could
  * start our data mappings higher up, but for now we don't bother, to avoid
  * additional confusion.
  *
@@ -614,11 +673,12 @@ static void __init setup_bootmem_allocator_node(int i)
        /*
         * Throw away any memory aliased by the PCI region.
         */
-       if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
-               reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
-                               PFN_PHYS(pci_reserve_end_pfn -
-                                        pci_reserve_start_pfn),
+       if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
+               start = max(pci_reserve_start_pfn, start);
+               end = min(pci_reserve_end_pfn, end);
+               reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
                                BOOTMEM_EXCLUSIVE);
+       }
 #endif
 }
 
@@ -628,6 +688,31 @@ static void __init setup_bootmem_allocator(void)
        for (i = 0; i < MAX_NUMNODES; ++i)
                setup_bootmem_allocator_node(i);
 
+       /* Reserve any memory excluded by "memmap" arguments. */
+       for (i = 0; i < memmap_nr; ++i) {
+               struct memmap_entry *m = &memmap_map[i];
+               reserve_bootmem(m->addr, m->size, 0);
+       }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (initrd_start) {
+               /* Make sure the initrd memory region is not modified. */
+               if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
+                                   BOOTMEM_EXCLUSIVE)) {
+                       pr_crit("The initrd memory region has been polluted. Disabling it.\n");
+                       initrd_start = 0;
+                       initrd_end = 0;
+               } else {
+                       /*
+                        * Translate initrd_start & initrd_end from PA to VA for
+                        * future access.
+                        */
+                       initrd_start += PAGE_OFFSET;
+                       initrd_end += PAGE_OFFSET;
+               }
+       }
+#endif
+
 #ifdef CONFIG_KEXEC
        if (crashk_res.start != crashk_res.end)
                reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0);
@@ -961,9 +1046,6 @@ void setup_cpu(int boot)
        arch_local_irq_unmask(INT_DMATLB_MISS);
        arch_local_irq_unmask(INT_DMATLB_ACCESS);
 #endif
-#if CHIP_HAS_SN_PROC()
-       arch_local_irq_unmask(INT_SNITLB_MISS);
-#endif
 #ifdef __tilegx__
        arch_local_irq_unmask(INT_SINGLE_STEP_K);
 #endif
@@ -978,10 +1060,6 @@ void setup_cpu(int boot)
        /* Static network is not restricted. */
        __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
 #endif
-#if CHIP_HAS_SN_PROC()
-       __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
-       __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
-#endif
 
        /*
         * Set the MPL for interrupt control 0 & 1 to the corresponding
@@ -989,7 +1067,20 @@ void setup_cpu(int boot)
         * SPRs, as well as the interrupt mask.
         */
        __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
+
+#ifdef CONFIG_KVM
+       /*
+        * If we launch a guest kernel, it will need some interrupts
+        * that otherwise are not used by the host or by userspace.
+        * Set them to MPL 1 now and leave them alone going forward;
+        * they are masked in the host so will never fire there anyway,
+        * and we mask them at PL1 as we exit the guest.
+        */
        __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
+       __insn_mtspr(SPR_MPL_SINGLE_STEP_1_SET_1, 1);
+       __insn_mtspr(SPR_MPL_AUX_TILE_TIMER_SET_1, 1);
+       __insn_mtspr(SPR_MPL_IPI_1_SET_1, 1);
+#endif
 
        /* Initialize IRQ support for this cpu. */
        setup_irq_regs();
@@ -1029,6 +1120,10 @@ static void __init load_hv_initrd(void)
        int fd, rc;
        void *initrd;
 
+       /* If initrd has already been set, skip initramfs file in hvfs. */
+       if (initrd_start)
+               return;
+
        fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
        if (fd == HV_ENOENT) {
                if (set_initramfs_file) {
@@ -1067,6 +1162,25 @@ void __init free_initrd_mem(unsigned long begin, unsigned long end)
        free_bootmem(__pa(begin), end - begin);
 }
 
+static int __init setup_initrd(char *str)
+{
+       char *endp;
+       unsigned long initrd_size;
+
+       initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
+       if (initrd_size == 0 || *endp != '@')
+               return -EINVAL;
+
+       initrd_start = simple_strtoul(endp+1, &endp, 0);
+       if (initrd_start == 0)
+               return -EINVAL;
+
+       initrd_end = initrd_start + initrd_size;
+
+       return 0;
+}
+early_param("initrd", setup_initrd);
+
 #else
 static inline void load_hv_initrd(void) {}
 #endif /* CONFIG_BLK_DEV_INITRD */
@@ -1134,7 +1248,7 @@ static void __init validate_va(void)
 #ifndef __tilegx__   /* FIXME: GX: probably some validation relevant here */
        /*
         * Similarly, make sure we're only using allowed VAs.
-        * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT,
+        * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
         * and 0 .. KERNEL_HIGH_VADDR.
         * In addition, make sure we CAN'T use the end of memory, since
         * we use the last chunk of each pgd for the pgd_list.
@@ -1149,7 +1263,7 @@ static void __init validate_va(void)
                if (range.size == 0)
                        break;
                if (range.start <= MEM_USER_INTRPT &&
-                   range.start + range.size >= MEM_HV_INTRPT)
+                   range.start + range.size >= MEM_HV_START)
                        user_kernel_ok = 1;
                if (range.start == 0)
                        max_va = range.size;
@@ -1183,7 +1297,6 @@ static void __init validate_va(void)
 struct cpumask __write_once cpu_lotar_map;
 EXPORT_SYMBOL(cpu_lotar_map);
 
-#if CHIP_HAS_CBOX_HOME_MAP()
 /*
  * hash_for_home_map lists all the tiles that hash-for-home data
  * will be cached on.  Note that this may includes tiles that are not
@@ -1193,7 +1306,6 @@ EXPORT_SYMBOL(cpu_lotar_map);
  */
 struct cpumask hash_for_home_map;
 EXPORT_SYMBOL(hash_for_home_map);
-#endif
 
 /*
  * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
@@ -1286,7 +1398,6 @@ static void __init setup_cpu_maps(void)
                cpu_lotar_map = *cpu_possible_mask;
        }
 
-#if CHIP_HAS_CBOX_HOME_MAP()
        /* Retrieve set of CPUs used for hash-for-home caching */
        rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
                              (HV_VirtAddr) hash_for_home_map.bits,
@@ -1294,9 +1405,6 @@ static void __init setup_cpu_maps(void)
        if (rc < 0)
                early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
        cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
-#else
-       cpu_cacheable_map = *cpu_possible_mask;
-#endif
 }
 
 
@@ -1492,7 +1600,7 @@ void __init setup_per_cpu_areas(void)
 
                        /* Update the vmalloc mapping and page home. */
                        unsigned long addr = (unsigned long)ptr + i;
-                       pte_t *ptep = virt_to_pte(NULL, addr);
+                       pte_t *ptep = virt_to_kpte(addr);
                        pte_t pte = *ptep;
                        BUG_ON(pfn != pte_pfn(pte));
                        pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
@@ -1501,12 +1609,12 @@ void __init setup_per_cpu_areas(void)
 
                        /* Update the lowmem mapping for consistency. */
                        lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
-                       ptep = virt_to_pte(NULL, lowmem_va);
+                       ptep = virt_to_kpte(lowmem_va);
                        if (pte_huge(*ptep)) {
                                printk(KERN_DEBUG "early shatter of huge page"
                                       " at %#lx\n", lowmem_va);
                                shatter_pmd((pmd_t *)ptep);
-                               ptep = virt_to_pte(NULL, lowmem_va);
+                               ptep = virt_to_kpte(lowmem_va);
                                BUG_ON(pte_huge(*ptep));
                        }
                        BUG_ON(pfn != pte_pfn(*ptep));
@@ -1561,11 +1669,11 @@ insert_non_bus_resource(void)
 #endif
 
 static struct resource* __init
-insert_ram_resource(u64 start_pfn, u64 end_pfn)
+insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
 {
        struct resource *res =
                kzalloc(sizeof(struct resource), GFP_ATOMIC);
-       res->name = "System RAM";
+       res->name = reserved ? "Reserved" : "System RAM";
        res->start = start_pfn << PAGE_SHIFT;
        res->end = (end_pfn << PAGE_SHIFT) - 1;
        res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
@@ -1585,7 +1693,7 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn)
 static int __init request_standard_resources(void)
 {
        int i;
-       enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
+       enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
 
 #if defined(CONFIG_PCI) && !defined(__tilegx__)
        insert_non_bus_resource();
@@ -1600,11 +1708,11 @@ static int __init request_standard_resources(void)
                    end_pfn > pci_reserve_start_pfn) {
                        if (end_pfn > pci_reserve_end_pfn)
                                insert_ram_resource(pci_reserve_end_pfn,
-                                                    end_pfn);
+                                                   end_pfn, 0);
                        end_pfn = pci_reserve_start_pfn;
                }
 #endif
-               insert_ram_resource(start_pfn, end_pfn);
+               insert_ram_resource(start_pfn, end_pfn, 0);
        }
 
        code_resource.start = __pa(_text - CODE_DELTA);
@@ -1615,6 +1723,13 @@ static int __init request_standard_resources(void)
        insert_resource(&iomem_resource, &code_resource);
        insert_resource(&iomem_resource, &data_resource);
 
+       /* Mark any "memmap" regions busy for the resource manager. */
+       for (i = 0; i < memmap_nr; ++i) {
+               struct memmap_entry *m = &memmap_map[i];
+               insert_ram_resource(PFN_DOWN(m->addr),
+                                   PFN_UP(m->addr + m->size - 1), 1);
+       }
+
 #ifdef CONFIG_KEXEC
        insert_resource(&iomem_resource, &crashk_res);
 #endif
index 9531845bf6611e9a42bb78328b080168e482bd99..2d1dbf38a9abe8ff9bfd8fcc18d8e18819889c23 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/ucontext.h>
 #include <asm/sigframe.h>
 #include <asm/syscalls.h>
+#include <asm/vdso.h>
 #include <arch/interrupts.h>
 
 #define DEBUG_SIG 0
@@ -190,7 +191,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        if (err)
                goto give_sigsegv;
 
-       restorer = VDSO_BASE;
+       restorer = VDSO_SYM(&__vdso_rt_sigreturn);
        if (ka->sa.sa_flags & SA_RESTORER)
                restorer = (unsigned long) ka->sa.sa_restorer;
 
index 27742e87e25596842c8e0d2030834ad0e5e89b5d..de07fa7d1315e34c93bfa82d995c1393cadcf096 100644 (file)
  *   more details.
  *
  * A code-rewriter that enables instruction single-stepping.
- * Derived from iLib's single-stepping code.
  */
 
-#ifndef __tilegx__   /* Hardware support for single step unavailable. */
-
-/* These functions are only used on the TILE platform */
+#include <linux/smp.h>
+#include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/thread_info.h>
 #include <linux/uaccess.h>
 #include <linux/mman.h>
 #include <linux/types.h>
 #include <linux/err.h>
+#include <linux/prctl.h>
 #include <asm/cacheflush.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
 #include <asm/unaligned.h>
 #include <arch/abi.h>
+#include <arch/spr_def.h>
 #include <arch/opcode.h>
 
-#define signExtend17(val) sign_extend((val), 17)
-#define TILE_X1_MASK (0xffffffffULL << 31)
-
-int unaligned_printk;
 
-static int __init setup_unaligned_printk(char *str)
-{
-       long val;
-       if (strict_strtol(str, 0, &val) != 0)
-               return 0;
-       unaligned_printk = val;
-       pr_info("Printk for each unaligned data accesses is %s\n",
-               unaligned_printk ? "enabled" : "disabled");
-       return 1;
-}
-__setup("unaligned_printk=", setup_unaligned_printk);
+#ifndef __tilegx__   /* Hardware support for single step unavailable. */
 
-unsigned int unaligned_fixup_count;
+#define signExtend17(val) sign_extend((val), 17)
+#define TILE_X1_MASK (0xffffffffULL << 31)
 
 enum mem_op {
        MEMOP_NONE,
@@ -56,12 +45,13 @@ enum mem_op {
        MEMOP_STORE_POSTINCR
 };
 
-static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
+static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n,
+       s32 offset)
 {
-       tile_bundle_bits result;
+       tilepro_bundle_bits result;
 
        /* mask out the old offset */
-       tile_bundle_bits mask = create_BrOff_X1(-1);
+       tilepro_bundle_bits mask = create_BrOff_X1(-1);
        result = n & (~mask);
 
        /* or in the new offset */
@@ -70,10 +60,11 @@ static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
        return result;
 }
 
-static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
+static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest,
+       int src)
 {
-       tile_bundle_bits result;
-       tile_bundle_bits op;
+       tilepro_bundle_bits result;
+       tilepro_bundle_bits op;
 
        result = n & (~TILE_X1_MASK);
 
@@ -87,13 +78,13 @@ static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
        return result;
 }
 
-static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
+static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n)
 {
        return move_X1(n, TREG_ZERO, TREG_ZERO);
 }
 
-static inline tile_bundle_bits addi_X1(
-       tile_bundle_bits n, int dest, int src, int imm)
+static inline tilepro_bundle_bits addi_X1(
+       tilepro_bundle_bits n, int dest, int src, int imm)
 {
        n &= ~TILE_X1_MASK;
 
@@ -107,15 +98,26 @@ static inline tile_bundle_bits addi_X1(
        return n;
 }
 
-static tile_bundle_bits rewrite_load_store_unaligned(
+static tilepro_bundle_bits rewrite_load_store_unaligned(
        struct single_step_state *state,
-       tile_bundle_bits bundle,
+       tilepro_bundle_bits bundle,
        struct pt_regs *regs,
        enum mem_op mem_op,
        int size, int sign_ext)
 {
        unsigned char __user *addr;
        int val_reg, addr_reg, err, val;
+       int align_ctl;
+
+       align_ctl = unaligned_fixup;
+       switch (task_thread_info(current)->align_ctl) {
+       case PR_UNALIGN_NOPRINT:
+               align_ctl = 1;
+               break;
+       case PR_UNALIGN_SIGBUS:
+               align_ctl = 0;
+               break;
+       }
 
        /* Get address and value registers */
        if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
@@ -160,7 +162,7 @@ static tile_bundle_bits rewrite_load_store_unaligned(
         * tilepro hardware would be doing, if it could provide us with the
         * actual bad address in an SPR, which it doesn't.
         */
-       if (unaligned_fixup == 0) {
+       if (align_ctl == 0) {
                siginfo_t info = {
                        .si_signo = SIGBUS,
                        .si_code = BUS_ADRALN,
@@ -209,14 +211,14 @@ static tile_bundle_bits rewrite_load_store_unaligned(
 
        if (err) {
                siginfo_t info = {
-                       .si_signo = SIGSEGV,
-                       .si_code = SEGV_MAPERR,
+                       .si_signo = SIGBUS,
+                       .si_code = BUS_ADRALN,
                        .si_addr = addr
                };
-               trace_unhandled_signal("segfault", regs,
-                                      (unsigned long)addr, SIGSEGV);
+               trace_unhandled_signal("bad address for unaligned fixup", regs,
+                                      (unsigned long)addr, SIGBUS);
                force_sig_info(info.si_signo, &info, current);
-               return (tile_bundle_bits) 0;
+               return (tilepro_bundle_bits) 0;
        }
 
        if (unaligned_printk || unaligned_fixup_count == 0) {
@@ -285,7 +287,7 @@ void single_step_execve(void)
        ti->step_state = NULL;
 }
 
-/**
+/*
  * single_step_once() - entry point when single stepping has been triggered.
  * @regs: The machine register state
  *
@@ -304,20 +306,31 @@ void single_step_execve(void)
  */
 void single_step_once(struct pt_regs *regs)
 {
-       extern tile_bundle_bits __single_step_ill_insn;
-       extern tile_bundle_bits __single_step_j_insn;
-       extern tile_bundle_bits __single_step_addli_insn;
-       extern tile_bundle_bits __single_step_auli_insn;
+       extern tilepro_bundle_bits __single_step_ill_insn;
+       extern tilepro_bundle_bits __single_step_j_insn;
+       extern tilepro_bundle_bits __single_step_addli_insn;
+       extern tilepro_bundle_bits __single_step_auli_insn;
        struct thread_info *info = (void *)current_thread_info();
        struct single_step_state *state = info->step_state;
        int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
-       tile_bundle_bits __user *buffer, *pc;
-       tile_bundle_bits bundle;
+       tilepro_bundle_bits __user *buffer, *pc;
+       tilepro_bundle_bits bundle;
        int temp_reg;
        int target_reg = TREG_LR;
        int err;
        enum mem_op mem_op = MEMOP_NONE;
        int size = 0, sign_ext = 0;  /* happy compiler */
+       int align_ctl;
+
+       align_ctl = unaligned_fixup;
+       switch (task_thread_info(current)->align_ctl) {
+       case PR_UNALIGN_NOPRINT:
+               align_ctl = 1;
+               break;
+       case PR_UNALIGN_SIGBUS:
+               align_ctl = 0;
+               break;
+       }
 
        asm(
 "    .pushsection .rodata.single_step\n"
@@ -390,7 +403,7 @@ void single_step_once(struct pt_regs *regs)
        if (regs->faultnum == INT_SWINT_1)
                regs->pc -= 8;
 
-       pc = (tile_bundle_bits __user *)(regs->pc);
+       pc = (tilepro_bundle_bits __user *)(regs->pc);
        if (get_user(bundle, pc) != 0) {
                pr_err("Couldn't read instruction at %p trying to step\n", pc);
                return;
@@ -533,7 +546,6 @@ void single_step_once(struct pt_regs *regs)
                        }
                        break;
 
-#if CHIP_HAS_WH64()
                /* postincrement operations */
                case IMM_0_OPCODE_X1:
                        switch (get_ImmOpcodeExtension_X1(bundle)) {
@@ -568,7 +580,6 @@ void single_step_once(struct pt_regs *regs)
                                break;
                        }
                        break;
-#endif /* CHIP_HAS_WH64() */
                }
 
                if (state->update) {
@@ -627,9 +638,9 @@ void single_step_once(struct pt_regs *regs)
 
        /*
         * Check if we need to rewrite an unaligned load/store.
-        * Returning zero is a special value meaning we need to SIGSEGV.
+        * Returning zero is a special value meaning we generated a signal.
         */
-       if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
+       if (mem_op != MEMOP_NONE && align_ctl >= 0) {
                bundle = rewrite_load_store_unaligned(state, bundle, regs,
                                                      mem_op, size, sign_ext);
                if (bundle == 0)
@@ -668,9 +679,9 @@ void single_step_once(struct pt_regs *regs)
                }
 
                /* End with a jump back to the next instruction */
-               delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
+               delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) -
                        (unsigned long)buffer) >>
-                       TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
+                       TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
                bundle = __single_step_j_insn;
                bundle |= create_JOffLong_X1(delta);
                err |= __put_user(bundle, buffer++);
@@ -698,9 +709,6 @@ void single_step_once(struct pt_regs *regs)
 }
 
 #else
-#include <linux/smp.h>
-#include <linux/ptrace.h>
-#include <arch/spr_def.h>
 
 static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
 
@@ -743,10 +751,10 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
        } else if ((*ss_pc != regs->pc) ||
                   (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
 
-               ptrace_notify(SIGTRAP);
                control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
                control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
                __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
+               send_sigtrap(current, regs);
        }
 }
 
index cbc73a8b8fe1e23fd58ce86ae759f525fd3d8ad6..6da740e7b8927ebf5c8dd2d17497b6feb8b05916 100644 (file)
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <asm/cacheflush.h>
+#include <asm/homecache.h>
 
-HV_Topology smp_topology __write_once;
+/*
+ * We write to width and height with a single store in head_NN.S,
+ * so make the variable aligned to "long".
+ */
+HV_Topology smp_topology __write_once __aligned(sizeof(long));
 EXPORT_SYMBOL(smp_topology);
 
 #if CHIP_HAS_IPI()
@@ -100,8 +105,8 @@ static void smp_start_cpu_interrupt(void)
 /* Handler to stop the current cpu. */
 static void smp_stop_cpu_interrupt(void)
 {
-       set_cpu_online(smp_processor_id(), 0);
        arch_local_irq_disable_all();
+       set_cpu_online(smp_processor_id(), 0);
        for (;;)
                asm("nap; nop");
 }
@@ -167,9 +172,16 @@ static void ipi_flush_icache_range(void *info)
 void flush_icache_range(unsigned long start, unsigned long end)
 {
        struct ipi_flush flush = { start, end };
-       preempt_disable();
-       on_each_cpu(ipi_flush_icache_range, &flush, 1);
-       preempt_enable();
+
+       /* If invoked with irqs disabled, we can not issue IPIs. */
+       if (irqs_disabled())
+               flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0,
+                       NULL, NULL, 0);
+       else {
+               preempt_disable();
+               on_each_cpu(ipi_flush_icache_range, &flush, 1);
+               preempt_enable();
+       }
 }
 
 
@@ -215,30 +227,34 @@ void __init ipi_init(void)
 
 #if CHIP_HAS_IPI()
 
-void smp_send_reschedule(int cpu)
+static void __smp_send_reschedule(int cpu)
 {
-       WARN_ON(cpu_is_offline(cpu));
-
        /*
         * We just want to do an MMIO store.  The traditional writeq()
         * functions aren't really correct here, since they're always
         * directed at the PCI shim.  For now, just do a raw store,
-        * casting away the __iomem attribute.
+        * casting away the __iomem attribute.  We do the store as a
+        * single asm() instruction to ensure that we can force a step
+        * over it in the KVM case, if we are not binding vcpus to cpus,
+        * rather than require it to be possible to issue validly.
         */
-       ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
+       unsigned long *addr =
+               &((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE];
+       asm volatile("st %0, zero" :: "r" (addr));
 }
 
 #else
 
-void smp_send_reschedule(int cpu)
+static void __smp_send_reschedule(int cpu)
 {
-       HV_Coord coord;
-
-       WARN_ON(cpu_is_offline(cpu));
-
-       coord.y = cpu_y(cpu);
-       coord.x = cpu_x(cpu);
+       HV_Coord coord = { .y = cpu_y(cpu), .x = cpu_x(cpu) };
        hv_trigger_ipi(coord, IRQ_RESCHEDULE);
 }
 
 #endif /* CHIP_HAS_IPI() */
+
+void smp_send_reschedule(int cpu)
+{
+       WARN_ON(cpu_is_offline(cpu));
+       __smp_send_reschedule(cpu);
+}
index a535655b7089676756ed847bc93ad9aed7a07cea..732e9d1386618066ad8cb294e0155a6fcd70417d 100644 (file)
@@ -142,13 +142,15 @@ static struct cpumask cpu_started;
  */
 static void start_secondary(void)
 {
-       int cpuid = smp_processor_id();
+       int cpuid;
+
+       preempt_disable();
+
+       cpuid = smp_processor_id();
 
        /* Set our thread pointer appropriately. */
        set_my_cpu_offset(__per_cpu_offset[cpuid]);
 
-       preempt_disable();
-
        /*
         * In large machines even this will slow us down, since we
         * will be contending for for the printk spinlock.
index af8dfc9665f673982d27b65efd8ee52e495baa03..362284af3afd31ab39081447b6f1295a866c0ab9 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/switch_to.h>
 #include <asm/sigframe.h>
 #include <asm/stack.h>
+#include <asm/vdso.h>
 #include <arch/abi.h>
 #include <arch/interrupts.h>
 
@@ -102,9 +103,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
            p->sp >= sp) {
                if (kbt->verbose)
                        pr_err("  <%s while in kernel mode>\n", fault);
-       } else if (EX1_PL(p->ex1) == USER_PL &&
-           p->pc < PAGE_OFFSET &&
-           p->sp < PAGE_OFFSET) {
+       } else if (user_mode(p) &&
+                  p->sp < PAGE_OFFSET && p->sp != 0) {
                if (kbt->verbose)
                        pr_err("  <%s while in user mode>\n", fault);
        } else if (kbt->verbose) {
@@ -120,7 +120,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
 /* Is the pc pointing to a sigreturn trampoline? */
 static int is_sigreturn(unsigned long pc)
 {
-       return (pc == VDSO_BASE);
+       return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn));
 }
 
 /* Return a pt_regs pointer for a valid signal handler frame */
@@ -129,7 +129,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
 {
        BacktraceIterator *b = &kbt->it;
 
-       if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET &&
+       if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET &&
            b->sp % sizeof(long) == 0) {
                int retval;
                pagefault_disable();
@@ -195,21 +195,21 @@ static int KBacktraceIterator_next_item_inclusive(
  */
 static void validate_stack(struct pt_regs *regs)
 {
-       int cpu = smp_processor_id();
+       int cpu = raw_smp_processor_id();
        unsigned long ksp0 = get_current_ksp0();
-       unsigned long ksp0_base = ksp0 THREAD_SIZE;
+       unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
        unsigned long sp = stack_pointer;
 
        if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
-               pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
+               pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
                       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
-                      cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
+                      cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
        }
 
        else if (sp < ksp0_base + sizeof(struct thread_info)) {
-               pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
+               pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
                       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
-                      cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
+                      cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
        }
 }
 
@@ -351,6 +351,26 @@ static void describe_addr(struct KBacktraceIterator *kbt,
                 vma->vm_start, vma->vm_end - vma->vm_start);
 }
 
+/*
+ * Avoid possible crash recursion during backtrace.  If it happens, it
+ * makes it easy to lose the actual root cause of the failure, so we
+ * put a simple guard on all the backtrace loops.
+ */
+static bool start_backtrace(void)
+{
+       if (current->thread.in_backtrace) {
+               pr_err("Backtrace requested while in backtrace!\n");
+               return false;
+       }
+       current->thread.in_backtrace = true;
+       return true;
+}
+
+static void end_backtrace(void)
+{
+       current->thread.in_backtrace = false;
+}
+
 /*
  * This method wraps the backtracer's more generic support.
  * It is only invoked from the architecture-specific code; show_stack()
@@ -361,6 +381,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
        int i;
        int have_mmap_sem = 0;
 
+       if (!start_backtrace())
+               return;
        if (headers) {
                /*
                 * Add a blank line since if we are called from panic(),
@@ -371,7 +393,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
                pr_err("Starting stack dump of tid %d, pid %d (%s)"
                       " on cpu %d at cycle %lld\n",
                       kbt->task->pid, kbt->task->tgid, kbt->task->comm,
-                      smp_processor_id(), get_cycles());
+                      raw_smp_processor_id(), get_cycles());
        }
        kbt->verbose = 1;
        i = 0;
@@ -402,6 +424,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
                pr_err("Stack dump complete\n");
        if (have_mmap_sem)
                up_read(&kbt->task->mm->mmap_sem);
+       end_backtrace();
 }
 EXPORT_SYMBOL(tile_show_stack);
 
@@ -463,6 +486,8 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
        int skip = trace->skip;
        int i = 0;
 
+       if (!start_backtrace())
+               goto done;
        if (task == NULL || task == current)
                KBacktraceIterator_init_current(&kbt);
        else
@@ -476,6 +501,8 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
                        break;
                trace->entries[i++] = kbt.it.pc;
        }
+       end_backtrace();
+done:
        trace->nr_entries = i;
 }
 EXPORT_SYMBOL(save_stack_trace_tsk);
index b881a7be24bdff566a89adacb8e2760d63ea120b..38debe7060618939cb38693972b2aa8ef2ac047a 100644 (file)
 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
                unsigned long, flags)
 {
+       /* DCACHE is not particularly effective if not bound to one cpu. */
        if (flags & DCACHE)
-               homecache_evict(cpumask_of(smp_processor_id()));
+               homecache_evict(cpumask_of(raw_smp_processor_id()));
+
        if (flags & ICACHE)
                flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
                             0, 0, 0, NULL, NULL, 0);
index e25b0a89c18f8e7c63cc3391477f8cadb255f468..575bda86f36f806370e4bcf240edae4e87f79388 100644 (file)
@@ -69,7 +69,11 @@ static ssize_t type_show(struct device *dev,
                            struct device_attribute *attr,
                            char *page)
 {
+#ifdef CONFIG_KVM_GUEST
+       return sprintf(page, "KVM\n");
+#else
        return sprintf(page, "tilera\n");
+#endif
 }
 static DEVICE_ATTR(type, 0444, type_show, NULL);
 
@@ -157,6 +161,67 @@ hvconfig_bin_read(struct file *filp, struct kobject *kobj,
        return count;
 }
 
+static ssize_t hv_stats_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *page)
+{
+       int cpu = dev->id;
+       long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
+
+       ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS,
+                              (unsigned long)page, PAGE_SIZE - 1,
+                              lotar, 0);
+       n = n < 0 ? 0 : min(n, (ssize_t)PAGE_SIZE - 1);
+       page[n] = '\0';
+       return n;
+}
+
+static ssize_t hv_stats_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *page,
+                             size_t count)
+{
+       int cpu = dev->id;
+       long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
+
+       ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS, 0, 0, lotar, 1);
+       return n < 0 ? n : count;
+}
+
+static DEVICE_ATTR(hv_stats, 0644, hv_stats_show, hv_stats_store);
+
+static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif)
+{
+       int err, cpu = dev->id;
+
+       if (!cpu_online(cpu))
+               return 0;
+
+       err = sysfs_create_file(&dev->kobj, &dev_attr_hv_stats.attr);
+
+       return err;
+}
+
+static int hv_stats_device_remove(struct device *dev,
+                                 struct subsys_interface *sif)
+{
+       int cpu = dev->id;
+
+       if (!cpu_online(cpu))
+               return 0;
+
+       sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr);
+       return 0;
+}
+
+
+static struct subsys_interface hv_stats_interface = {
+       .name                   = "hv_stats",
+       .subsys                 = &cpu_subsys,
+       .add_dev                = hv_stats_device_add,
+       .remove_dev             = hv_stats_device_remove,
+};
+
 static int __init create_sysfs_entries(void)
 {
        int err = 0;
@@ -188,6 +253,21 @@ static int __init create_sysfs_entries(void)
                err = sysfs_create_bin_file(hypervisor_kobj, &hvconfig_bin);
        }
 
+       if (!err) {
+               /*
+                * Don't bother adding the hv_stats files on each CPU if
+                * our hypervisor doesn't supply statistics.
+                */
+               int cpu = raw_smp_processor_id();
+               long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
+               char dummy;
+               ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS,
+                                      (unsigned long) &dummy, 1,
+                                      lotar, 0);
+               if (n >= 0)
+                       err = subsys_interface_register(&hv_stats_interface);
+       }
+
        return err;
 }
 subsys_initcall(create_sysfs_entries);
index 7c353d8c2da926c9b057616db17f269fd6981f66..8a494fac1e319d2eca65ff889f437e7c824897b8 100644 (file)
 #include <linux/smp.h>
 #include <linux/delay.h>
 #include <linux/module.h>
+#include <linux/stop_machine.h>
+#include <linux/timekeeper_internal.h>
 #include <asm/irq_regs.h>
 #include <asm/traps.h>
+#include <asm/vdso.h>
 #include <hv/hypervisor.h>
 #include <arch/interrupts.h>
 #include <arch/spr_def.h>
@@ -35,7 +38,7 @@
  */
 
 /* How many cycles per second we are running at. */
-static cycles_t cycles_per_sec __write_once;
+static cycles_t cycles_per_sec;
 
 cycles_t get_clock_rate(void)
 {
@@ -66,7 +69,8 @@ EXPORT_SYMBOL(get_cycles);
  */
 #define SCHED_CLOCK_SHIFT 10
 
-static unsigned long sched_clock_mult __write_once;
+static unsigned long sched_clock_mult;
+static long long sched_clock_offset;
 
 static cycles_t clocksource_get_cycles(struct clocksource *cs)
 {
@@ -90,6 +94,7 @@ void __init setup_clock(void)
        cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED);
        sched_clock_mult =
                clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT);
+       sched_clock_offset = 0;
 }
 
 void __init calibrate_delay(void)
@@ -110,28 +115,22 @@ void __init time_init(void)
        setup_tile_timer();
 }
 
-
 /*
  * Define the tile timer clock event device.  The timer is driven by
- * the TILE_TIMER_CONTROL register, which consists of a 31-bit down
+ * the TILE_[AUX_]TIMER_CONTROL register, which consists of a 31-bit down
  * counter, plus bit 31, which signifies that the counter has wrapped
- * from zero to (2**31) - 1.  The INT_TILE_TIMER interrupt will be
+ * from zero to (2**31) - 1.  The INT_[AUX_]TILE_TIMER interrupt will be
  * raised as long as bit 31 is set.
- *
- * The TILE_MINSEC value represents the largest range of real-time
- * we can possibly cover with the timer, based on MAX_TICK combined
- * with the slowest reasonable clock rate we might run at.
  */
 
 #define MAX_TICK 0x7fffffff   /* we have 31 bits of countdown timer */
-#define TILE_MINSEC 5         /* timer covers no more than 5 seconds */
 
 static int tile_timer_set_next_event(unsigned long ticks,
                                     struct clock_event_device *evt)
 {
        BUG_ON(ticks > MAX_TICK);
-       __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
-       arch_local_irq_unmask_now(INT_TILE_TIMER);
+       __insn_mtspr(SPR_LINUX_TIMER_CONTROL, ticks);
+       arch_local_irq_unmask_now(INT_LINUX_TIMER);
        return 0;
 }
 
@@ -142,17 +141,12 @@ static int tile_timer_set_next_event(unsigned long ticks,
 static void tile_timer_set_mode(enum clock_event_mode mode,
                                struct clock_event_device *evt)
 {
-       arch_local_irq_mask_now(INT_TILE_TIMER);
+       arch_local_irq_mask_now(INT_LINUX_TIMER);
 }
 
-/*
- * Set min_delta_ns to 1 microsecond, since it takes about
- * that long to fire the interrupt.
- */
 static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
        .name = "tile timer",
        .features = CLOCK_EVT_FEAT_ONESHOT,
-       .min_delta_ns = 1000,
        .rating = 100,
        .irq = -1,
        .set_next_event = tile_timer_set_next_event,
@@ -163,18 +157,18 @@ void setup_tile_timer(void)
 {
        struct clock_event_device *evt = &__get_cpu_var(tile_timer);
 
-       /* Fill in fields that are speed-specific. */
-       clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
-       evt->max_delta_ns = clockevent_delta2ns(MAX_TICK, evt);
-
        /* Mark as being for this cpu only. */
        evt->cpumask = cpumask_of(smp_processor_id());
 
        /* Start out with timer not firing. */
-       arch_local_irq_mask_now(INT_TILE_TIMER);
+       arch_local_irq_mask_now(INT_LINUX_TIMER);
 
-       /* Register tile timer. */
-       clockevents_register_device(evt);
+       /*
+        * Register tile timer.  Set min_delta to 1 microsecond, since
+        * it takes about that long to fire the interrupt.
+        */
+       clockevents_config_and_register(evt, cycles_per_sec,
+                                       cycles_per_sec / 1000000, MAX_TICK);
 }
 
 /* Called from the interrupt vector. */
@@ -187,7 +181,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
         * Mask the timer interrupt here, since we are a oneshot timer
         * and there are now by definition no events pending.
         */
-       arch_local_irq_mask(INT_TILE_TIMER);
+       arch_local_irq_mask(INT_LINUX_TIMER);
 
        /* Track time spent here in an interrupt context */
        irq_enter();
@@ -215,8 +209,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
  */
 unsigned long long sched_clock(void)
 {
-       return clocksource_cyc2ns(get_cycles(),
-                                 sched_clock_mult, SCHED_CLOCK_SHIFT);
+       return clocksource_cyc2ns(get_cycles(), sched_clock_mult,
+                                 SCHED_CLOCK_SHIFT) + sched_clock_offset;
 }
 
 int setup_profiling_timer(unsigned int multiplier)
@@ -237,3 +231,174 @@ cycles_t ns2cycles(unsigned long nsecs)
        struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer);
        return ((u64)nsecs * dev->mult) >> dev->shift;
 }
+
+void update_vsyscall_tz(void)
+{
+       /* Userspace gettimeofday will spin while this value is odd. */
+       ++vdso_data->tz_update_count;
+       smp_wmb();
+       vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
+       vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+       smp_wmb();
+       ++vdso_data->tz_update_count;
+}
+
+void update_vsyscall(struct timekeeper *tk)
+{
+       struct timespec wall_time = tk_xtime(tk);
+       struct timespec *wtm = &tk->wall_to_monotonic;
+       struct clocksource *clock = tk->clock;
+
+       if (clock != &cycle_counter_cs)
+               return;
+
+       /* Userspace gettimeofday will spin while this value is odd. */
+       ++vdso_data->tb_update_count;
+       smp_wmb();
+       vdso_data->xtime_tod_stamp = clock->cycle_last;
+       vdso_data->xtime_clock_sec = wall_time.tv_sec;
+       vdso_data->xtime_clock_nsec = wall_time.tv_nsec;
+       vdso_data->wtom_clock_sec = wtm->tv_sec;
+       vdso_data->wtom_clock_nsec = wtm->tv_nsec;
+       vdso_data->mult = clock->mult;
+       vdso_data->shift = clock->shift;
+       smp_wmb();
+       ++vdso_data->tb_update_count;
+}
+
+
+#ifdef __tilegx__
+
+/* Arguments to the _set_clock_rate stop_machine() handler. */
+struct _set_clock_rate_args {
+       unsigned int new_rate;
+       int master_cpu;
+};
+
+/*
+ * Flag used to tell other CPUs to proceed once the master CPU has changed
+ * the actual CPU clock rate (barrier is positive), or failed to do so
+ * (barrier is negative).
+ */
+static int _set_clock_rate_barrier;
+
+/* Routine to actually do the clock rate change, called via stop_machine(). */
+static int _set_clock_rate(void *arg)
+{
+       struct _set_clock_rate_args *args = arg;
+       struct clock_event_device *evt = &__get_cpu_var(tile_timer);
+
+       /*
+        * Only one CPU needs to change the timekeeping parameters and
+        * change the clock rate.
+        */
+       if (args->master_cpu == smp_processor_id()) {
+               unsigned long long old_sched_clock;
+               long new_speed;
+               cycle_t start_cycle;
+               HV_SetSpeed hvss;
+
+               /*
+                * Sync up the time before changing the clock rate.  If we
+                * aren't using the clocksource we think we are, bail.
+                */
+               if (timekeeping_chfreq_prep(&cycle_counter_cs, &start_cycle)) {
+                       smp_wmb();
+                       _set_clock_rate_barrier = -1;
+                       return -ESRCH;
+               }
+
+               /*
+                * We'll adjust the offset below so that the new scheduler
+                * clock matches the value we read here, plus the time
+                * spent updating the CPU speed.  This causes us to lose a
+                * tiny bit of time, but it stays monotonic.
+                */
+               old_sched_clock = sched_clock();
+
+               /* Change the speed.  If that fails, bail. */
+               hvss = hv_set_speed(args->new_rate, start_cycle, 0);
+               new_speed = hvss.new_speed;
+               if (new_speed < 0) {
+                       smp_wmb();
+                       _set_clock_rate_barrier = -1;
+                       return -ENOSYS;
+               }
+
+               /*
+                * Change the clocksource frequency, and update the
+                * timekeeping state to account for the time we spent
+                * changing the speed, then update our internal state.
+                */
+               timekeeping_chfreq(new_speed, hvss.end_cycle, hvss.delta_ns);
+
+               cycles_per_sec = new_speed;
+
+               sched_clock_mult =
+                       clocksource_hz2mult(cycles_per_sec,
+                                           SCHED_CLOCK_SHIFT);
+
+               sched_clock_offset = old_sched_clock -
+                                    (sched_clock() - sched_clock_offset) +
+                                    hvss.delta_ns;
+
+               loops_per_jiffy = cycles_per_sec / HZ;
+
+               smp_wmb();
+               _set_clock_rate_barrier = 1;
+       } else {
+               /* Wait until the master CPU changes the speed, or fails. */
+               while (!_set_clock_rate_barrier)
+                       udelay(10);
+       }
+
+       /*
+        * All CPUs need to change the event timer configuration, but we
+        * don't want to do anything if the master CPU failed to
+        * reconfigure the clocksource and change the speed.
+        */
+       if (_set_clock_rate_barrier < 0)
+               return 0;
+
+       if (clockevents_update_freq(evt, cycles_per_sec) == -ETIME) {
+               /*
+                * The event that we'd previously been set for is
+                * in the past.  Instead of just losing it, which
+                * causes havoc, make it happen right now.
+                */
+               tile_timer_set_next_event(0, evt);
+       }
+
+       return 0;
+}
+
+/*
+ * Change the clock speed, and return the speed we ended up with; both are
+ * in hertz.
+ */
+unsigned int set_clock_rate(unsigned int new_rate)
+{
+       int stop_status;
+       struct _set_clock_rate_args args = {
+               .new_rate = new_rate,
+               /*
+                * We just need a valid CPU here; we don't care if we get
+                * rescheduled somewhere else after we set this.
+                */
+               .master_cpu = raw_smp_processor_id(),
+       };
+
+       _set_clock_rate_barrier = 0;
+       smp_wmb();
+
+       stop_status = stop_machine(_set_clock_rate, &args,
+                                  cpu_online_mask);
+
+       if (stop_status)
+               pr_err("Got unexpected status %d from stop_machine when "
+                      "changing clock speed\n", stop_status);
+
+       return cycles_per_sec;
+};
+
+#endif /* __tilegx__ */
index 3fd54d5bbd4c53d61f1b2ff0b6d08f7df7340b1a..f23b53515671bb13b0e21b93815e665f6fd6eebc 100644 (file)
@@ -91,8 +91,14 @@ void flush_tlb_all(void)
        }
 }
 
+/*
+ * Callers need to flush the L1I themselves if necessary, e.g. for
+ * kernel module unload.  Otherwise we assume callers are not using
+ * executable pgprot_t's.  Using EVICT_L1I means that dataplane cpus
+ * will get an unnecessary interrupt otherwise.
+ */
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-       flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
+       flush_remote(0, 0, NULL,
                     start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0);
 }
index 5b19a23c890801ef4f577228e083518d09302a58..8f41de22a67dd810301c6b6315c4c7eeb985df67 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
+#include <linux/kdebug.h>
 #include <linux/module.h>
 #include <linux/reboot.h>
 #include <linux/uaccess.h>
@@ -29,7 +30,7 @@
 
 void __init trap_init(void)
 {
-       /* Nothing needed here since we link code at .intrpt1 */
+       /* Nothing needed here since we link code at .intrpt */
 }
 
 int unaligned_fixup = 1;
@@ -100,13 +101,7 @@ static int retry_gpv(unsigned int gpv_reason)
 
 #endif /* CHIP_HAS_TILE_DMA() */
 
-#ifdef __tilegx__
-#define bundle_bits tilegx_bundle_bits
-#else
-#define bundle_bits tile_bundle_bits
-#endif
-
-extern bundle_bits bpt_code;
+extern tile_bundle_bits bpt_code;
 
 asm(".pushsection .rodata.bpt_code,\"a\";"
     ".align 8;"
@@ -114,7 +109,7 @@ asm(".pushsection .rodata.bpt_code,\"a\";"
     ".size bpt_code,.-bpt_code;"
     ".popsection");
 
-static int special_ill(bundle_bits bundle, int *sigp, int *codep)
+static int special_ill(tile_bundle_bits bundle, int *sigp, int *codep)
 {
        int sig, code, maxcode;
 
@@ -214,16 +209,59 @@ static const char *const int_name[] = {
 #endif
 };
 
+static int do_bpt(struct pt_regs *regs)
+{
+       unsigned long bundle, bcode, bpt;
+
+       bundle = *(unsigned long *)instruction_pointer(regs);
+
+       /*
+        * bpt shoule be { bpt; nop }, which is 0x286a44ae51485000ULL.
+        * we encode the unused least significant bits for other purpose.
+        */
+       bpt = bundle & ~((1ULL << 12) - 1);
+       if (bpt != TILE_BPT_BUNDLE)
+               return 0;
+
+       bcode = bundle & ((1ULL << 12) - 1);
+       /*
+        * notify the kprobe handlers, if instruction is likely to
+        * pertain to them.
+        */
+       switch (bcode) {
+       /* breakpoint_insn */
+       case 0:
+               notify_die(DIE_BREAK, "debug", regs, bundle,
+                       INT_ILL, SIGTRAP);
+               break;
+       /* compiled_bpt */
+       case DIE_COMPILED_BPT:
+               notify_die(DIE_COMPILED_BPT, "debug", regs, bundle,
+                       INT_ILL, SIGTRAP);
+               break;
+       /* breakpoint2_insn */
+       case DIE_SSTEPBP:
+               notify_die(DIE_SSTEPBP, "single_step", regs, bundle,
+                       INT_ILL, SIGTRAP);
+               break;
+       default:
+               return 0;
+       }
+
+       return 1;
+}
+
 void __kprobes do_trap(struct pt_regs *regs, int fault_num,
                       unsigned long reason)
 {
        siginfo_t info = { 0 };
        int signo, code;
        unsigned long address = 0;
-       bundle_bits instr;
+       tile_bundle_bits instr;
 
-       /* Re-enable interrupts. */
-       local_irq_enable();
+       /* Re-enable interrupts, if they were previously enabled. */
+       if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
+               local_irq_enable();
 
        /*
         * If it hits in kernel mode and we can't fix it up, just exit the
@@ -231,7 +269,12 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
         */
        if (!user_mode(regs)) {
                const char *name;
-               if (fixup_exception(regs))  /* only UNALIGN_DATA in practice */
+               char buf[100];
+               if (fault_num == INT_ILL && do_bpt(regs)) {
+                       /* breakpoint */
+                       return;
+               }
+               if (fixup_exception(regs))  /* ILL_TRANS or UNALIGN_DATA */
                        return;
                if (fault_num >= 0 &&
                    fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
@@ -239,10 +282,16 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
                        name = int_name[fault_num];
                else
                        name = "Unknown interrupt";
-               pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
-                        fault_num, name, regs->pc);
                if (fault_num == INT_GPV)
-                       pr_alert("GPV_REASON is %#lx\n", reason);
+                       snprintf(buf, sizeof(buf), "; GPV_REASON %#lx", reason);
+#ifdef __tilegx__
+               else if (fault_num == INT_ILL_TRANS)
+                       snprintf(buf, sizeof(buf), "; address %#lx", reason);
+#endif
+               else
+                       buf[0] = '\0';
+               pr_alert("Kernel took bad trap %d (%s) at PC %#lx%s\n",
+                        fault_num, name, regs->pc, buf);
                show_regs(regs);
                do_exit(SIGKILL);  /* FIXME: implement i386 die() */
                return;
@@ -324,11 +373,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
                fill_ra_stack();
 
                signo = SIGSEGV;
+               address = reason;
                code = SEGV_MAPERR;
-               if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK)
-                       address = regs->pc;
-               else
-                       address = 0;  /* FIXME: GX: single-step for address */
                break;
        }
 #endif
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
new file mode 100644 (file)
index 0000000..b425fb6
--- /dev/null
@@ -0,0 +1,1609 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * A code-rewriter that handles unaligned exception.
+ */
+
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/thread_info.h>
+#include <linux/uaccess.h>
+#include <linux/mman.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/compat.h>
+#include <linux/prctl.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <arch/abi.h>
+#include <arch/spr_def.h>
+#include <arch/opcode.h>
+
+
+/*
+ * This file handles unaligned exception for tile-Gx. The tilepro's unaligned
+ * exception is supported out of single_step.c
+ */
+
+int unaligned_printk;
+
+static int __init setup_unaligned_printk(char *str)
+{
+       long val;
+       if (kstrtol(str, 0, &val) != 0)
+               return 0;
+       unaligned_printk = val;
+       pr_info("Printk for each unaligned data accesses is %s\n",
+               unaligned_printk ? "enabled" : "disabled");
+       return 1;
+}
+__setup("unaligned_printk=", setup_unaligned_printk);
+
+unsigned int unaligned_fixup_count;
+
+#ifdef __tilegx__
+
+/*
+ * Unalign data jit fixup code fragement. Reserved space is 128 bytes.
+ * The 1st 64-bit word saves fault PC address, 2nd word is the fault
+ * instruction bundle followed by 14 JIT bundles.
+ */
+
+struct unaligned_jit_fragment {
+       unsigned long       pc;
+       tilegx_bundle_bits  bundle;
+       tilegx_bundle_bits  insn[14];
+};
+
+/*
+ * Check if a nop or fnop at bundle's pipeline X0.
+ */
+
+static bool is_bundle_x0_nop(tilegx_bundle_bits bundle)
+{
+       return (((get_UnaryOpcodeExtension_X0(bundle) ==
+                 NOP_UNARY_OPCODE_X0) &&
+                (get_RRROpcodeExtension_X0(bundle) ==
+                 UNARY_RRR_0_OPCODE_X0) &&
+                (get_Opcode_X0(bundle) ==
+                 RRR_0_OPCODE_X0)) ||
+               ((get_UnaryOpcodeExtension_X0(bundle) ==
+                 FNOP_UNARY_OPCODE_X0) &&
+                (get_RRROpcodeExtension_X0(bundle) ==
+                 UNARY_RRR_0_OPCODE_X0) &&
+                (get_Opcode_X0(bundle) ==
+                 RRR_0_OPCODE_X0)));
+}
+
+/*
+ * Check if nop or fnop at bundle's pipeline X1.
+ */
+
+static bool is_bundle_x1_nop(tilegx_bundle_bits bundle)
+{
+       return (((get_UnaryOpcodeExtension_X1(bundle) ==
+                 NOP_UNARY_OPCODE_X1) &&
+                (get_RRROpcodeExtension_X1(bundle) ==
+                 UNARY_RRR_0_OPCODE_X1) &&
+                (get_Opcode_X1(bundle) ==
+                 RRR_0_OPCODE_X1)) ||
+               ((get_UnaryOpcodeExtension_X1(bundle) ==
+                 FNOP_UNARY_OPCODE_X1) &&
+                (get_RRROpcodeExtension_X1(bundle) ==
+                 UNARY_RRR_0_OPCODE_X1) &&
+                (get_Opcode_X1(bundle) ==
+                 RRR_0_OPCODE_X1)));
+}
+
+/*
+ * Check if nop or fnop at bundle's Y0 pipeline.
+ */
+
+static bool is_bundle_y0_nop(tilegx_bundle_bits bundle)
+{
+       return (((get_UnaryOpcodeExtension_Y0(bundle) ==
+                 NOP_UNARY_OPCODE_Y0) &&
+                (get_RRROpcodeExtension_Y0(bundle) ==
+                 UNARY_RRR_1_OPCODE_Y0) &&
+                (get_Opcode_Y0(bundle) ==
+                 RRR_1_OPCODE_Y0)) ||
+               ((get_UnaryOpcodeExtension_Y0(bundle) ==
+                 FNOP_UNARY_OPCODE_Y0) &&
+                (get_RRROpcodeExtension_Y0(bundle) ==
+                 UNARY_RRR_1_OPCODE_Y0) &&
+                (get_Opcode_Y0(bundle) ==
+                 RRR_1_OPCODE_Y0)));
+}
+
+/*
+ * Check if nop or fnop at bundle's pipeline Y1.
+ */
+
+static bool is_bundle_y1_nop(tilegx_bundle_bits bundle)
+{
+       return (((get_UnaryOpcodeExtension_Y1(bundle) ==
+                 NOP_UNARY_OPCODE_Y1) &&
+                (get_RRROpcodeExtension_Y1(bundle) ==
+                 UNARY_RRR_1_OPCODE_Y1) &&
+                (get_Opcode_Y1(bundle) ==
+                 RRR_1_OPCODE_Y1)) ||
+               ((get_UnaryOpcodeExtension_Y1(bundle) ==
+                 FNOP_UNARY_OPCODE_Y1) &&
+                (get_RRROpcodeExtension_Y1(bundle) ==
+                 UNARY_RRR_1_OPCODE_Y1) &&
+                (get_Opcode_Y1(bundle) ==
+                 RRR_1_OPCODE_Y1)));
+}
+
+/*
+ * Test if a bundle's y0 and y1 pipelines are both nop or fnop.
+ */
+
+static bool is_y0_y1_nop(tilegx_bundle_bits bundle)
+{
+       return is_bundle_y0_nop(bundle) && is_bundle_y1_nop(bundle);
+}
+
+/*
+ * Test if a bundle's x0 and x1 pipelines are both nop or fnop.
+ */
+
+static bool is_x0_x1_nop(tilegx_bundle_bits bundle)
+{
+       return is_bundle_x0_nop(bundle) && is_bundle_x1_nop(bundle);
+}
+
+/*
+ * Find the destination, source registers of fault unalign access instruction
+ * at X1 or Y2. Also, allocate up to 3 scratch registers clob1, clob2 and
+ * clob3, which are guaranteed different from any register used in the fault
+ * bundle. r_alias is used to return if the other instructions other than the
+ * unalign load/store shares same register with ra, rb and rd.
+ */
+
+static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra,
+                     uint64_t *rb, uint64_t *clob1, uint64_t *clob2,
+                     uint64_t *clob3, bool *r_alias)
+{
+       int i;
+       uint64_t reg;
+       uint64_t reg_map = 0, alias_reg_map = 0, map;
+       bool alias;
+
+       *ra = -1;
+       *rb = -1;
+
+       if (rd)
+               *rd = -1;
+
+       *clob1 = -1;
+       *clob2 = -1;
+       *clob3 = -1;
+       alias = false;
+
+       /*
+        * Parse fault bundle, find potential used registers and mark
+        * corresponding bits in reg_map and alias_map. These 2 bit maps
+        * are used to find the scratch registers and determine if there
+        * is register alais.
+        */
+       if (bundle & TILEGX_BUNDLE_MODE_MASK) {  /* Y Mode Bundle. */
+
+               reg = get_SrcA_Y2(bundle);
+               reg_map |= 1ULL << reg;
+               *ra = reg;
+               reg = get_SrcBDest_Y2(bundle);
+               reg_map |= 1ULL << reg;
+
+               if (rd) {
+                       /* Load. */
+                       *rd = reg;
+                       alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
+               } else {
+                       /* Store. */
+                       *rb = reg;
+                       alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
+               }
+
+               if (!is_bundle_y1_nop(bundle)) {
+                       reg = get_SrcA_Y1(bundle);
+                       reg_map |= (1ULL << reg);
+                       map = (1ULL << reg);
+
+                       reg = get_SrcB_Y1(bundle);
+                       reg_map |= (1ULL << reg);
+                       map |= (1ULL << reg);
+
+                       reg = get_Dest_Y1(bundle);
+                       reg_map |= (1ULL << reg);
+                       map |= (1ULL << reg);
+
+                       if (map & alias_reg_map)
+                               alias = true;
+               }
+
+               if (!is_bundle_y0_nop(bundle)) {
+                       reg = get_SrcA_Y0(bundle);
+                       reg_map |= (1ULL << reg);
+                       map = (1ULL << reg);
+
+                       reg = get_SrcB_Y0(bundle);
+                       reg_map |= (1ULL << reg);
+                       map |= (1ULL << reg);
+
+                       reg = get_Dest_Y0(bundle);
+                       reg_map |= (1ULL << reg);
+                       map |= (1ULL << reg);
+
+                       if (map & alias_reg_map)
+                               alias = true;
+               }
+       } else  { /* X Mode Bundle. */
+
+               reg = get_SrcA_X1(bundle);
+               reg_map |= (1ULL << reg);
+               *ra = reg;
+               if (rd) {
+                       /* Load. */
+                       reg = get_Dest_X1(bundle);
+                       reg_map |= (1ULL << reg);
+                       *rd = reg;
+                       alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
+               } else {
+                       /* Store. */
+                       reg = get_SrcB_X1(bundle);
+                       reg_map |= (1ULL << reg);
+                       *rb = reg;
+                       alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
+               }
+
+               if (!is_bundle_x0_nop(bundle)) {
+                       reg = get_SrcA_X0(bundle);
+                       reg_map |= (1ULL << reg);
+                       map = (1ULL << reg);
+
+                       reg = get_SrcB_X0(bundle);
+                       reg_map |= (1ULL << reg);
+                       map |= (1ULL << reg);
+
+                       reg = get_Dest_X0(bundle);
+                       reg_map |= (1ULL << reg);
+                       map |= (1ULL << reg);
+
+                       if (map & alias_reg_map)
+                               alias = true;
+               }
+       }
+
+       /*
+        * "alias" indicates if the unalign access registers have collision
+        * with others in the same bundle. We jsut simply test all register
+        * operands case (RRR), ignored the case with immidate. If a bundle
+        * has no register alias, we may do fixup in a simple or fast manner.
+        * So if an immidata field happens to hit with a register, we may end
+        * up fall back to the generic handling.
+        */
+
+       *r_alias = alias;
+
+       /* Flip bits on reg_map. */
+       reg_map ^= -1ULL;
+
+       /* Scan reg_map lower 54(TREG_SP) bits to find 3 set bits. */
+       for (i = 0; i < TREG_SP; i++) {
+               if (reg_map & (0x1ULL << i)) {
+                       if (*clob1 == -1) {
+                               *clob1 = i;
+                       } else if (*clob2 == -1) {
+                               *clob2 = i;
+                       } else if (*clob3 == -1) {
+                               *clob3 = i;
+                               return;
+                       }
+               }
+       }
+}
+
+/*
+ * Sanity check for register ra, rb, rd, clob1/2/3. Return true if any of them
+ * is unexpected.
+ */
+
+static bool check_regs(uint64_t rd, uint64_t ra, uint64_t rb,
+                      uint64_t clob1, uint64_t clob2,  uint64_t clob3)
+{
+       bool unexpected = false;
+       if ((ra >= 56) && (ra != TREG_ZERO))
+               unexpected = true;
+
+       if ((clob1 >= 56) || (clob2 >= 56) || (clob3 >= 56))
+               unexpected = true;
+
+       if (rd != -1) {
+               if ((rd >= 56) && (rd != TREG_ZERO))
+                       unexpected = true;
+       } else {
+               if ((rb >= 56) && (rb != TREG_ZERO))
+                       unexpected = true;
+       }
+       return unexpected;
+}
+
+
+#define  GX_INSN_X0_MASK   ((1ULL << 31) - 1)
+#define  GX_INSN_X1_MASK   (((1ULL << 31) - 1) << 31)
+#define  GX_INSN_Y0_MASK   ((0xFULL << 27) | (0xFFFFFULL))
+#define  GX_INSN_Y1_MASK   (GX_INSN_Y0_MASK << 31)
+#define  GX_INSN_Y2_MASK   ((0x7FULL << 51) | (0x7FULL << 20))
+
+#ifdef __LITTLE_ENDIAN
+#define  GX_INSN_BSWAP(_bundle_)    (_bundle_)
+#else
+#define  GX_INSN_BSWAP(_bundle_)    swab64(_bundle_)
+#endif /* __LITTLE_ENDIAN */
+
+/*
+ * __JIT_CODE(.) creates template bundles in .rodata.unalign_data section.
+ * The corresponding static function jix_x#_###(.) generates partial or
+ * whole bundle based on the template and given arguments.
+ */
+
+#define __JIT_CODE(_X_)                                                \
+       asm (".pushsection .rodata.unalign_data, \"a\"\n"       \
+            _X_"\n"                                            \
+            ".popsection\n")
+
+__JIT_CODE("__unalign_jit_x1_mtspr:   {mtspr 0,  r0}");
+static tilegx_bundle_bits jit_x1_mtspr(int spr, int reg)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_mtspr;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_mtspr) & GX_INSN_X1_MASK) |
+               create_MT_Imm14_X1(spr) | create_SrcA_X1(reg);
+}
+
+__JIT_CODE("__unalign_jit_x1_mfspr:   {mfspr r0, 0}");
+static tilegx_bundle_bits  jit_x1_mfspr(int reg, int spr)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_mfspr;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_mfspr) & GX_INSN_X1_MASK) |
+               create_MF_Imm14_X1(spr) | create_Dest_X1(reg);
+}
+
+__JIT_CODE("__unalign_jit_x0_addi:   {addi  r0, r0, 0; iret}");
+static tilegx_bundle_bits  jit_x0_addi(int rd, int ra, int imm8)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x0_addi;
+       return (GX_INSN_BSWAP(__unalign_jit_x0_addi) & GX_INSN_X0_MASK) |
+               create_Dest_X0(rd) | create_SrcA_X0(ra) |
+               create_Imm8_X0(imm8);
+}
+
+__JIT_CODE("__unalign_jit_x1_ldna:   {ldna  r0, r0}");
+static tilegx_bundle_bits  jit_x1_ldna(int rd, int ra)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_ldna;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_ldna) &  GX_INSN_X1_MASK) |
+               create_Dest_X1(rd) | create_SrcA_X1(ra);
+}
+
+__JIT_CODE("__unalign_jit_x0_dblalign:   {dblalign r0, r0 ,r0}");
+static tilegx_bundle_bits  jit_x0_dblalign(int rd, int ra, int rb)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x0_dblalign;
+       return (GX_INSN_BSWAP(__unalign_jit_x0_dblalign) & GX_INSN_X0_MASK) |
+               create_Dest_X0(rd) | create_SrcA_X0(ra) |
+               create_SrcB_X0(rb);
+}
+
+__JIT_CODE("__unalign_jit_x1_iret:   {iret}");
+static tilegx_bundle_bits  jit_x1_iret(void)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_iret;
+       return GX_INSN_BSWAP(__unalign_jit_x1_iret) & GX_INSN_X1_MASK;
+}
+
+__JIT_CODE("__unalign_jit_x01_fnop:   {fnop;fnop}");
+static tilegx_bundle_bits  jit_x0_fnop(void)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x01_fnop;
+       return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X0_MASK;
+}
+
+static tilegx_bundle_bits  jit_x1_fnop(void)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x01_fnop;
+       return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X1_MASK;
+}
+
+__JIT_CODE("__unalign_jit_y2_dummy:   {fnop; fnop; ld zero, sp}");
+static tilegx_bundle_bits  jit_y2_dummy(void)
+{
+       extern  tilegx_bundle_bits __unalign_jit_y2_dummy;
+       return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y2_MASK;
+}
+
+static tilegx_bundle_bits  jit_y1_fnop(void)
+{
+       extern  tilegx_bundle_bits __unalign_jit_y2_dummy;
+       return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y1_MASK;
+}
+
+__JIT_CODE("__unalign_jit_x1_st1_add:  {st1_add r1, r0, 0}");
+static tilegx_bundle_bits  jit_x1_st1_add(int ra, int rb, int imm8)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_st1_add;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_st1_add) &
+               (~create_SrcA_X1(-1)) &
+               GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
+               create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
+}
+
+__JIT_CODE("__unalign_jit_x1_st:  {crc32_8 r1, r0, r0; st  r0, r0}");
+static tilegx_bundle_bits  jit_x1_st(int ra, int rb)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_st;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_st) & GX_INSN_X1_MASK) |
+               create_SrcA_X1(ra) | create_SrcB_X1(rb);
+}
+
+__JIT_CODE("__unalign_jit_x1_st_add:  {st_add  r1, r0, 0}");
+static tilegx_bundle_bits  jit_x1_st_add(int ra, int rb, int imm8)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_st_add;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_st_add) &
+               (~create_SrcA_X1(-1)) &
+               GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
+               create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
+}
+
+__JIT_CODE("__unalign_jit_x1_ld:  {crc32_8 r1, r0, r0; ld  r0, r0}");
+static tilegx_bundle_bits  jit_x1_ld(int rd, int ra)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_ld;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_ld) & GX_INSN_X1_MASK) |
+               create_Dest_X1(rd) | create_SrcA_X1(ra);
+}
+
+__JIT_CODE("__unalign_jit_x1_ld_add:  {ld_add  r1, r0, 0}");
+static tilegx_bundle_bits  jit_x1_ld_add(int rd, int ra, int imm8)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_ld_add;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_ld_add) &
+               (~create_Dest_X1(-1)) &
+               GX_INSN_X1_MASK) | create_Dest_X1(rd) |
+               create_SrcA_X1(ra) | create_Imm8_X1(imm8);
+}
+
+__JIT_CODE("__unalign_jit_x0_bfexts:  {bfexts r0, r0, 0, 0}");
+static tilegx_bundle_bits  jit_x0_bfexts(int rd, int ra, int bfs, int bfe)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x0_bfexts;
+       return (GX_INSN_BSWAP(__unalign_jit_x0_bfexts) &
+               GX_INSN_X0_MASK) |
+               create_Dest_X0(rd) | create_SrcA_X0(ra) |
+               create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
+}
+
+__JIT_CODE("__unalign_jit_x0_bfextu:  {bfextu r0, r0, 0, 0}");
+static tilegx_bundle_bits  jit_x0_bfextu(int rd, int ra, int bfs, int bfe)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x0_bfextu;
+       return (GX_INSN_BSWAP(__unalign_jit_x0_bfextu) &
+               GX_INSN_X0_MASK) |
+               create_Dest_X0(rd) | create_SrcA_X0(ra) |
+               create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
+}
+
+__JIT_CODE("__unalign_jit_x1_addi:  {bfextu r1, r1, 0, 0; addi r0, r0, 0}");
+static tilegx_bundle_bits  jit_x1_addi(int rd, int ra, int imm8)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_addi;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_addi) & GX_INSN_X1_MASK) |
+               create_Dest_X1(rd) | create_SrcA_X1(ra) |
+               create_Imm8_X1(imm8);
+}
+
+__JIT_CODE("__unalign_jit_x0_shrui:  {shrui r0, r0, 0; iret}");
+static tilegx_bundle_bits  jit_x0_shrui(int rd, int ra, int imm6)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x0_shrui;
+       return (GX_INSN_BSWAP(__unalign_jit_x0_shrui) &
+               GX_INSN_X0_MASK) |
+               create_Dest_X0(rd) | create_SrcA_X0(ra) |
+               create_ShAmt_X0(imm6);
+}
+
+__JIT_CODE("__unalign_jit_x0_rotli:  {rotli r0, r0, 0; iret}");
+static tilegx_bundle_bits  jit_x0_rotli(int rd, int ra, int imm6)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x0_rotli;
+       return (GX_INSN_BSWAP(__unalign_jit_x0_rotli) &
+               GX_INSN_X0_MASK) |
+               create_Dest_X0(rd) | create_SrcA_X0(ra) |
+               create_ShAmt_X0(imm6);
+}
+
+__JIT_CODE("__unalign_jit_x1_bnezt:  {bnezt r0, __unalign_jit_x1_bnezt}");
+static tilegx_bundle_bits  jit_x1_bnezt(int ra, int broff)
+{
+       extern  tilegx_bundle_bits __unalign_jit_x1_bnezt;
+       return (GX_INSN_BSWAP(__unalign_jit_x1_bnezt) &
+               GX_INSN_X1_MASK) |
+               create_SrcA_X1(ra) | create_BrOff_X1(broff);
+}
+
+#undef __JIT_CODE
+
+/*
+ * This function generates unalign fixup JIT.
+ *
+ * We fist find unalign load/store instruction's destination, source
+ * reguisters: ra, rb and rd. and 3 scratch registers by calling
+ * find_regs(...). 3 scratch clobbers should not alias with any register
+ * used in the fault bundle. Then analyze the fault bundle to determine
+ * if it's a load or store, operand width, branch or address increment etc.
+ * At last generated JIT is copied into JIT code area in user space.
+ */
+
+static
+void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
+                   int align_ctl)
+{
+       struct thread_info *info = current_thread_info();
+       struct unaligned_jit_fragment frag;
+       struct unaligned_jit_fragment *jit_code_area;
+       tilegx_bundle_bits bundle_2 = 0;
+       /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */
+       bool     bundle_2_enable = true;
+       uint64_t ra, rb, rd = -1, clob1, clob2, clob3;
+       /*
+        * Indicate if the unalign access
+        * instruction's registers hit with
+        * others in the same bundle.
+        */
+       bool     alias = false;
+       bool     load_n_store = true;
+       bool     load_store_signed = false;
+       unsigned int  load_store_size = 8;
+       bool     y1_br = false;  /* True, for a branch in same bundle at Y1.*/
+       int      y1_br_reg = 0;
+       /* True for link operation. i.e. jalr or lnk at Y1 */
+       bool     y1_lr = false;
+       int      y1_lr_reg = 0;
+       bool     x1_add = false;/* True, for load/store ADD instruction at X1*/
+       int      x1_add_imm8 = 0;
+       bool     unexpected = false;
+       int      n = 0, k;
+
+       jit_code_area =
+               (struct unaligned_jit_fragment *)(info->unalign_jit_base);
+
+       memset((void *)&frag, 0, sizeof(frag));
+
+       /* 0: X mode, Otherwise: Y mode. */
+       if (bundle & TILEGX_BUNDLE_MODE_MASK) {
+               unsigned int mod, opcode;
+
+               if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
+                   get_RRROpcodeExtension_Y1(bundle) ==
+                   UNARY_RRR_1_OPCODE_Y1) {
+
+                       opcode = get_UnaryOpcodeExtension_Y1(bundle);
+
+                       /*
+                        * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1
+                        * pipeline.
+                        */
+                       switch (opcode) {
+                       case JALR_UNARY_OPCODE_Y1:
+                       case JALRP_UNARY_OPCODE_Y1:
+                               y1_lr = true;
+                               y1_lr_reg = 55; /* Link register. */
+                               /* FALLTHROUGH */
+                       case JR_UNARY_OPCODE_Y1:
+                       case JRP_UNARY_OPCODE_Y1:
+                               y1_br = true;
+                               y1_br_reg = get_SrcA_Y1(bundle);
+                               break;
+                       case LNK_UNARY_OPCODE_Y1:
+                               /* "lnk" at Y1 pipeline. */
+                               y1_lr = true;
+                               y1_lr_reg = get_Dest_Y1(bundle);
+                               break;
+                       }
+               }
+
+               opcode = get_Opcode_Y2(bundle);
+               mod = get_Mode(bundle);
+
+               /*
+                *  bundle_2 is bundle after making Y2 as a dummy operation
+                *  - ld zero, sp
+                */
+               bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy();
+
+               /* Make Y1 as fnop if Y1 is a branch or lnk operation. */
+               if (y1_br || y1_lr) {
+                       bundle_2 &= ~(GX_INSN_Y1_MASK);
+                       bundle_2 |= jit_y1_fnop();
+               }
+
+               if (is_y0_y1_nop(bundle_2))
+                       bundle_2_enable = false;
+
+               if (mod == MODE_OPCODE_YC2) {
+                       /* Store. */
+                       load_n_store = false;
+                       load_store_size = 1 << opcode;
+                       load_store_signed = false;
+                       find_regs(bundle, 0, &ra, &rb, &clob1, &clob2,
+                                 &clob3, &alias);
+                       if (load_store_size > 8)
+                               unexpected = true;
+               } else {
+                       /* Load. */
+                       load_n_store = true;
+                       if (mod == MODE_OPCODE_YB2) {
+                               switch (opcode) {
+                               case LD_OPCODE_Y2:
+                                       load_store_signed = false;
+                                       load_store_size = 8;
+                                       break;
+                               case LD4S_OPCODE_Y2:
+                                       load_store_signed = true;
+                                       load_store_size = 4;
+                                       break;
+                               case LD4U_OPCODE_Y2:
+                                       load_store_signed = false;
+                                       load_store_size = 4;
+                                       break;
+                               default:
+                                       unexpected = true;
+                               }
+                       } else if (mod == MODE_OPCODE_YA2) {
+                               if (opcode == LD2S_OPCODE_Y2) {
+                                       load_store_signed = true;
+                                       load_store_size = 2;
+                               } else if (opcode == LD2U_OPCODE_Y2) {
+                                       load_store_signed = false;
+                                       load_store_size = 2;
+                               } else
+                                       unexpected = true;
+                       } else
+                               unexpected = true;
+                       find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2,
+                                 &clob3, &alias);
+               }
+       } else {
+               unsigned int opcode;
+
+               /* bundle_2 is bundle after making X1 as "fnop". */
+               bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop();
+
+               if (is_x0_x1_nop(bundle_2))
+                       bundle_2_enable = false;
+
+               if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
+                       opcode = get_UnaryOpcodeExtension_X1(bundle);
+
+                       if (get_RRROpcodeExtension_X1(bundle) ==
+                           UNARY_RRR_0_OPCODE_X1) {
+                               load_n_store = true;
+                               find_regs(bundle, &rd, &ra, &rb, &clob1,
+                                         &clob2, &clob3, &alias);
+
+                               switch (opcode) {
+                               case LD_UNARY_OPCODE_X1:
+                                       load_store_signed = false;
+                                       load_store_size = 8;
+                                       break;
+                               case LD4S_UNARY_OPCODE_X1:
+                                       load_store_signed = true;
+                                       /* FALLTHROUGH */
+                               case LD4U_UNARY_OPCODE_X1:
+                                       load_store_size = 4;
+                                       break;
+
+                               case LD2S_UNARY_OPCODE_X1:
+                                       load_store_signed = true;
+                                       /* FALLTHROUGH */
+                               case LD2U_UNARY_OPCODE_X1:
+                                       load_store_size = 2;
+                                       break;
+                               default:
+                                       unexpected = true;
+                               }
+                       } else {
+                               load_n_store = false;
+                               load_store_signed = false;
+                               find_regs(bundle, 0, &ra, &rb,
+                                         &clob1, &clob2, &clob3,
+                                         &alias);
+
+                               opcode = get_RRROpcodeExtension_X1(bundle);
+                               switch (opcode) {
+                               case ST_RRR_0_OPCODE_X1:
+                                       load_store_size = 8;
+                                       break;
+                               case ST4_RRR_0_OPCODE_X1:
+                                       load_store_size = 4;
+                                       break;
+                               case ST2_RRR_0_OPCODE_X1:
+                                       load_store_size = 2;
+                                       break;
+                               default:
+                                       unexpected = true;
+                               }
+                       }
+               } else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) {
+                       load_n_store = true;
+                       opcode = get_Imm8OpcodeExtension_X1(bundle);
+                       switch (opcode) {
+                       case LD_ADD_IMM8_OPCODE_X1:
+                               load_store_size = 8;
+                               break;
+
+                       case LD4S_ADD_IMM8_OPCODE_X1:
+                               load_store_signed = true;
+                               /* FALLTHROUGH */
+                       case LD4U_ADD_IMM8_OPCODE_X1:
+                               load_store_size = 4;
+                               break;
+
+                       case LD2S_ADD_IMM8_OPCODE_X1:
+                               load_store_signed = true;
+                               /* FALLTHROUGH */
+                       case LD2U_ADD_IMM8_OPCODE_X1:
+                               load_store_size = 2;
+                               break;
+
+                       case ST_ADD_IMM8_OPCODE_X1:
+                               load_n_store = false;
+                               load_store_size = 8;
+                               break;
+                       case ST4_ADD_IMM8_OPCODE_X1:
+                               load_n_store = false;
+                               load_store_size = 4;
+                               break;
+                       case ST2_ADD_IMM8_OPCODE_X1:
+                               load_n_store = false;
+                               load_store_size = 2;
+                               break;
+                       default:
+                               unexpected = true;
+                       }
+
+                       if (!unexpected) {
+                               x1_add = true;
+                               if (load_n_store)
+                                       x1_add_imm8 = get_Imm8_X1(bundle);
+                               else
+                                       x1_add_imm8 = get_Dest_Imm8_X1(bundle);
+                       }
+
+                       find_regs(bundle, load_n_store ? (&rd) : NULL,
+                                 &ra, &rb, &clob1, &clob2, &clob3, &alias);
+               } else
+                       unexpected = true;
+       }
+
+       /*
+        * Some sanity check for register numbers extracted from fault bundle.
+        */
+       if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true)
+               unexpected = true;
+
+       /* Give warning if register ra has an aligned address. */
+       if (!unexpected)
+               WARN_ON(!((load_store_size - 1) & (regs->regs[ra])));
+
+
+       /*
+        * Fault came from kernel space, here we only need take care of
+        * unaligned "get_user/put_user" macros defined in "uaccess.h".
+        * Basically, we will handle bundle like this:
+        * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0}
+        * (Refer to file "arch/tile/include/asm/uaccess.h" for details).
+        * For either load or store, byte-wise operation is performed by calling
+        * get_user() or put_user(). If the macro returns non-zero value,
+        * set the value to rx, otherwise set zero to rx. Finally make pc point
+        * to next bundle and return.
+        */
+
+       if (EX1_PL(regs->ex1) != USER_PL) {
+
+               unsigned long rx = 0;
+               unsigned long x = 0, ret = 0;
+
+               if (y1_br || y1_lr || x1_add ||
+                   (load_store_signed !=
+                    (load_n_store && load_store_size == 4))) {
+                       /* No branch, link, wrong sign-ext or load/store add. */
+                       unexpected = true;
+               } else if (!unexpected) {
+                       if (bundle & TILEGX_BUNDLE_MODE_MASK) {
+                               /*
+                                * Fault bundle is Y mode.
+                                * Check if the Y1 and Y0 is the form of
+                                * { movei rx, 0; nop/fnop }, if yes,
+                                * find the rx.
+                                */
+
+                               if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1)
+                                   && (get_SrcA_Y1(bundle) == TREG_ZERO) &&
+                                   (get_Imm8_Y1(bundle) == 0) &&
+                                   is_bundle_y0_nop(bundle)) {
+                                       rx = get_Dest_Y1(bundle);
+                               } else if ((get_Opcode_Y0(bundle) ==
+                                           ADDI_OPCODE_Y0) &&
+                                          (get_SrcA_Y0(bundle) == TREG_ZERO) &&
+                                          (get_Imm8_Y0(bundle) == 0) &&
+                                          is_bundle_y1_nop(bundle)) {
+                                       rx = get_Dest_Y0(bundle);
+                               } else {
+                                       unexpected = true;
+                               }
+                       } else {
+                               /*
+                                * Fault bundle is X mode.
+                                * Check if the X0 is 'movei rx, 0',
+                                * if yes, find the rx.
+                                */
+
+                               if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0)
+                                   && (get_Imm8OpcodeExtension_X0(bundle) ==
+                                       ADDI_IMM8_OPCODE_X0) &&
+                                   (get_SrcA_X0(bundle) == TREG_ZERO) &&
+                                   (get_Imm8_X0(bundle) == 0)) {
+                                       rx = get_Dest_X0(bundle);
+                               } else {
+                                       unexpected = true;
+                               }
+                       }
+
+                       /* rx should be less than 56. */
+                       if (!unexpected && (rx >= 56))
+                               unexpected = true;
+               }
+
+               if (!search_exception_tables(regs->pc)) {
+                       /* No fixup in the exception tables for the pc. */
+                       unexpected = true;
+               }
+
+               if (unexpected) {
+                       /* Unexpected unalign kernel fault. */
+                       struct task_struct *tsk = validate_current();
+
+                       bust_spinlocks(1);
+
+                       show_regs(regs);
+
+                       if (unlikely(tsk->pid < 2)) {
+                               panic("Kernel unalign fault running %s!",
+                                     tsk->pid ? "init" : "the idle task");
+                       }
+#ifdef SUPPORT_DIE
+                       die("Oops", regs);
+#endif
+                       bust_spinlocks(1);
+
+                       do_group_exit(SIGKILL);
+
+               } else {
+                       unsigned long i, b = 0;
+                       unsigned char *ptr =
+                               (unsigned char *)regs->regs[ra];
+                       if (load_n_store) {
+                               /* handle get_user(x, ptr) */
+                               for (i = 0; i < load_store_size; i++) {
+                                       ret = get_user(b, ptr++);
+                                       if (!ret) {
+                                               /* Success! update x. */
+#ifdef __LITTLE_ENDIAN
+                                               x |= (b << (8 * i));
+#else
+                                               x <<= 8;
+                                               x |= b;
+#endif /* __LITTLE_ENDIAN */
+                                       } else {
+                                               x = 0;
+                                               break;
+                                       }
+                               }
+
+                               /* Sign-extend 4-byte loads. */
+                               if (load_store_size == 4)
+                                       x = (long)(int)x;
+
+                               /* Set register rd. */
+                               regs->regs[rd] = x;
+
+                               /* Set register rx. */
+                               regs->regs[rx] = ret;
+
+                               /* Bump pc. */
+                               regs->pc += 8;
+
+                       } else {
+                               /* Handle put_user(x, ptr) */
+                               x = regs->regs[rb];
+#ifdef __LITTLE_ENDIAN
+                               b = x;
+#else
+                               /*
+                                * Swap x in order to store x from low
+                                * to high memory same as the
+                                * little-endian case.
+                                */
+                               switch (load_store_size) {
+                               case 8:
+                                       b = swab64(x);
+                                       break;
+                               case 4:
+                                       b = swab32(x);
+                                       break;
+                               case 2:
+                                       b = swab16(x);
+                                       break;
+                               }
+#endif /* __LITTLE_ENDIAN */
+                               for (i = 0; i < load_store_size; i++) {
+                                       ret = put_user(b, ptr++);
+                                       if (ret)
+                                               break;
+                                       /* Success! shift 1 byte. */
+                                       b >>= 8;
+                               }
+                               /* Set register rx. */
+                               regs->regs[rx] = ret;
+
+                               /* Bump pc. */
+                               regs->pc += 8;
+                       }
+               }
+
+               unaligned_fixup_count++;
+
+               if (unaligned_printk) {
+                       pr_info("%s/%d. Unalign fixup for kernel access "
+                               "to userspace %lx.",
+                               current->comm, current->pid, regs->regs[ra]);
+               }
+
+               /* Done! Return to the exception handler. */
+               return;
+       }
+
+       if ((align_ctl == 0) || unexpected) {
+               siginfo_t info = {
+                       .si_signo = SIGBUS,
+                       .si_code = BUS_ADRALN,
+                       .si_addr = (unsigned char __user *)0
+               };
+               if (unaligned_printk)
+                       pr_info("Unalign bundle: unexp @%llx, %llx",
+                               (unsigned long long)regs->pc,
+                               (unsigned long long)bundle);
+
+               if (ra < 56) {
+                       unsigned long uaa = (unsigned long)regs->regs[ra];
+                       /* Set bus Address. */
+                       info.si_addr = (unsigned char __user *)uaa;
+               }
+
+               unaligned_fixup_count++;
+
+               trace_unhandled_signal("unaligned fixup trap", regs,
+                                      (unsigned long)info.si_addr, SIGBUS);
+               force_sig_info(info.si_signo, &info, current);
+               return;
+       }
+
+#ifdef __LITTLE_ENDIAN
+#define UA_FIXUP_ADDR_DELTA          1
+#define UA_FIXUP_BFEXT_START(_B_)    0
+#define UA_FIXUP_BFEXT_END(_B_)     (8 * (_B_) - 1)
+#else /* __BIG_ENDIAN */
+#define UA_FIXUP_ADDR_DELTA          -1
+#define UA_FIXUP_BFEXT_START(_B_)   (64 - 8 * (_B_))
+#define UA_FIXUP_BFEXT_END(_B_)      63
+#endif /* __LITTLE_ENDIAN */
+
+
+
+       if ((ra != rb) && (rd != TREG_SP) && !alias &&
+           !y1_br && !y1_lr && !x1_add) {
+               /*
+                * Simple case: ra != rb and no register alias found,
+                * and no branch or link. This will be the majority.
+                * We can do a little better for simplae case than the
+                * generic scheme below.
+                */
+               if (!load_n_store) {
+                       /*
+                        * Simple store: ra != rb, no need for scratch register.
+                        * Just store and rotate to right bytewise.
+                        */
+#ifdef __BIG_ENDIAN
+                       frag.insn[n++] =
+                               jit_x0_addi(ra, ra, load_store_size - 1) |
+                               jit_x1_fnop();
+#endif /* __BIG_ENDIAN */
+                       for (k = 0; k < load_store_size; k++) {
+                               /* Store a byte. */
+                               frag.insn[n++] =
+                                       jit_x0_rotli(rb, rb, 56) |
+                                       jit_x1_st1_add(ra, rb,
+                                                      UA_FIXUP_ADDR_DELTA);
+                       }
+#ifdef __BIG_ENDIAN
+                       frag.insn[n] = jit_x1_addi(ra, ra, 1);
+#else
+                       frag.insn[n] = jit_x1_addi(ra, ra,
+                                                  -1 * load_store_size);
+#endif /* __LITTLE_ENDIAN */
+
+                       if (load_store_size == 8) {
+                               frag.insn[n] |= jit_x0_fnop();
+                       } else if (load_store_size == 4) {
+                               frag.insn[n] |= jit_x0_rotli(rb, rb, 32);
+                       } else { /* = 2 */
+                               frag.insn[n] |= jit_x0_rotli(rb, rb, 16);
+                       }
+                       n++;
+                       if (bundle_2_enable)
+                               frag.insn[n++] = bundle_2;
+                       frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
+               } else {
+                       if (rd == ra) {
+                               /* Use two clobber registers: clob1/2. */
+                               frag.insn[n++] =
+                                       jit_x0_addi(TREG_SP, TREG_SP, -16) |
+                                       jit_x1_fnop();
+                               frag.insn[n++] =
+                                       jit_x0_addi(clob1, ra, 7) |
+                                       jit_x1_st_add(TREG_SP, clob1, -8);
+                               frag.insn[n++] =
+                                       jit_x0_addi(clob2, ra, 0) |
+                                       jit_x1_st(TREG_SP, clob2);
+                               frag.insn[n++] =
+                                       jit_x0_fnop() |
+                                       jit_x1_ldna(rd, ra);
+                               frag.insn[n++] =
+                                       jit_x0_fnop() |
+                                       jit_x1_ldna(clob1, clob1);
+                               /*
+                                * Note: we must make sure that rd must not
+                                * be sp. Recover clob1/2 from stack.
+                                */
+                               frag.insn[n++] =
+                                       jit_x0_dblalign(rd, clob1, clob2) |
+                                       jit_x1_ld_add(clob2, TREG_SP, 8);
+                               frag.insn[n++] =
+                                       jit_x0_fnop() |
+                                       jit_x1_ld_add(clob1, TREG_SP, 16);
+                       } else {
+                               /* Use one clobber register: clob1 only. */
+                               frag.insn[n++] =
+                                       jit_x0_addi(TREG_SP, TREG_SP, -16) |
+                                       jit_x1_fnop();
+                               frag.insn[n++] =
+                                       jit_x0_addi(clob1, ra, 7) |
+                                       jit_x1_st(TREG_SP, clob1);
+                               frag.insn[n++] =
+                                       jit_x0_fnop() |
+                                       jit_x1_ldna(rd, ra);
+                               frag.insn[n++] =
+                                       jit_x0_fnop() |
+                                       jit_x1_ldna(clob1, clob1);
+                               /*
+                                * Note: we must make sure that rd must not
+                                * be sp. Recover clob1 from stack.
+                                */
+                               frag.insn[n++] =
+                                       jit_x0_dblalign(rd, clob1, ra) |
+                                       jit_x1_ld_add(clob1, TREG_SP, 16);
+                       }
+
+                       if (bundle_2_enable)
+                               frag.insn[n++] = bundle_2;
+                       /*
+                        * For non 8-byte load, extract corresponding bytes and
+                        * signed extension.
+                        */
+                       if (load_store_size == 4) {
+                               if (load_store_signed)
+                                       frag.insn[n++] =
+                                               jit_x0_bfexts(
+                                                       rd, rd,
+                                                       UA_FIXUP_BFEXT_START(4),
+                                                       UA_FIXUP_BFEXT_END(4)) |
+                                               jit_x1_fnop();
+                               else
+                                       frag.insn[n++] =
+                                               jit_x0_bfextu(
+                                                       rd, rd,
+                                                       UA_FIXUP_BFEXT_START(4),
+                                                       UA_FIXUP_BFEXT_END(4)) |
+                                               jit_x1_fnop();
+                       } else if (load_store_size == 2) {
+                               if (load_store_signed)
+                                       frag.insn[n++] =
+                                               jit_x0_bfexts(
+                                                       rd, rd,
+                                                       UA_FIXUP_BFEXT_START(2),
+                                                       UA_FIXUP_BFEXT_END(2)) |
+                                               jit_x1_fnop();
+                               else
+                                       frag.insn[n++] =
+                                               jit_x0_bfextu(
+                                                       rd, rd,
+                                                       UA_FIXUP_BFEXT_START(2),
+                                                       UA_FIXUP_BFEXT_END(2)) |
+                                               jit_x1_fnop();
+                       }
+
+                       frag.insn[n++] =
+                               jit_x0_fnop()  |
+                               jit_x1_iret();
+               }
+       } else if (!load_n_store) {
+
+               /*
+                * Generic memory store cases: use 3 clobber registers.
+                *
+                * Alloc space for saveing clob2,1,3 on user's stack.
+                * register clob3 points to where clob2 saved, followed by
+                * clob1 and 3 from high to low memory.
+                */
+               frag.insn[n++] =
+                       jit_x0_addi(TREG_SP, TREG_SP, -32)    |
+                       jit_x1_fnop();
+               frag.insn[n++] =
+                       jit_x0_addi(clob3, TREG_SP, 16)  |
+                       jit_x1_st_add(TREG_SP, clob3, 8);
+#ifdef __LITTLE_ENDIAN
+               frag.insn[n++] =
+                       jit_x0_addi(clob1, ra, 0)   |
+                       jit_x1_st_add(TREG_SP, clob1, 8);
+#else
+               frag.insn[n++] =
+                       jit_x0_addi(clob1, ra, load_store_size - 1)   |
+                       jit_x1_st_add(TREG_SP, clob1, 8);
+#endif
+               if (load_store_size == 8) {
+                       /*
+                        * We save one byte a time, not for fast, but compact
+                        * code. After each store, data source register shift
+                        * right one byte. unchanged after 8 stores.
+                        */
+                       frag.insn[n++] =
+                               jit_x0_addi(clob2, TREG_ZERO, 7)     |
+                               jit_x1_st_add(TREG_SP, clob2, 16);
+                       frag.insn[n++] =
+                               jit_x0_rotli(rb, rb, 56)      |
+                               jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
+                       frag.insn[n++] =
+                               jit_x0_addi(clob2, clob2, -1) |
+                               jit_x1_bnezt(clob2, -1);
+                       frag.insn[n++] =
+                               jit_x0_fnop()                 |
+                               jit_x1_addi(clob2, y1_br_reg, 0);
+               } else if (load_store_size == 4) {
+                       frag.insn[n++] =
+                               jit_x0_addi(clob2, TREG_ZERO, 3)     |
+                               jit_x1_st_add(TREG_SP, clob2, 16);
+                       frag.insn[n++] =
+                               jit_x0_rotli(rb, rb, 56)      |
+                               jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
+                       frag.insn[n++] =
+                               jit_x0_addi(clob2, clob2, -1) |
+                               jit_x1_bnezt(clob2, -1);
+                       /*
+                        * same as 8-byte case, but need shift another 4
+                        * byte to recover rb for 4-byte store.
+                        */
+                       frag.insn[n++] = jit_x0_rotli(rb, rb, 32)      |
+                               jit_x1_addi(clob2, y1_br_reg, 0);
+               } else { /* =2 */
+                       frag.insn[n++] =
+                               jit_x0_addi(clob2, rb, 0)     |
+                               jit_x1_st_add(TREG_SP, clob2, 16);
+                       for (k = 0; k < 2; k++) {
+                               frag.insn[n++] =
+                                       jit_x0_shrui(rb, rb, 8)  |
+                                       jit_x1_st1_add(clob1, rb,
+                                                      UA_FIXUP_ADDR_DELTA);
+                       }
+                       frag.insn[n++] =
+                               jit_x0_addi(rb, clob2, 0)       |
+                               jit_x1_addi(clob2, y1_br_reg, 0);
+               }
+
+               if (bundle_2_enable)
+                       frag.insn[n++] = bundle_2;
+
+               if (y1_lr) {
+                       frag.insn[n++] =
+                               jit_x0_fnop()                    |
+                               jit_x1_mfspr(y1_lr_reg,
+                                            SPR_EX_CONTEXT_0_0);
+               }
+               if (y1_br) {
+                       frag.insn[n++] =
+                               jit_x0_fnop()                    |
+                               jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
+                                            clob2);
+               }
+               if (x1_add) {
+                       frag.insn[n++] =
+                               jit_x0_addi(ra, ra, x1_add_imm8) |
+                               jit_x1_ld_add(clob2, clob3, -8);
+               } else {
+                       frag.insn[n++] =
+                               jit_x0_fnop()                    |
+                               jit_x1_ld_add(clob2, clob3, -8);
+               }
+               frag.insn[n++] =
+                       jit_x0_fnop()   |
+                       jit_x1_ld_add(clob1, clob3, -8);
+               frag.insn[n++] = jit_x0_fnop()   | jit_x1_ld(clob3, clob3);
+               frag.insn[n++] = jit_x0_fnop()   | jit_x1_iret();
+
+       } else {
+               /*
+                * Generic memory load cases.
+                *
+                * Alloc space for saveing clob1,2,3 on user's stack.
+                * register clob3 points to where clob1 saved, followed
+                * by clob2 and 3 from high to low memory.
+                */
+
+               frag.insn[n++] =
+                       jit_x0_addi(TREG_SP, TREG_SP, -32) |
+                       jit_x1_fnop();
+               frag.insn[n++] =
+                       jit_x0_addi(clob3, TREG_SP, 16) |
+                       jit_x1_st_add(TREG_SP, clob3, 8);
+               frag.insn[n++] =
+                       jit_x0_addi(clob2, ra, 0) |
+                       jit_x1_st_add(TREG_SP, clob2, 8);
+
+               if (y1_br) {
+                       frag.insn[n++] =
+                               jit_x0_addi(clob1, y1_br_reg, 0) |
+                               jit_x1_st_add(TREG_SP, clob1, 16);
+               } else {
+                       frag.insn[n++] =
+                               jit_x0_fnop() |
+                               jit_x1_st_add(TREG_SP, clob1, 16);
+               }
+
+               if (bundle_2_enable)
+                       frag.insn[n++] = bundle_2;
+
+               if (y1_lr) {
+                       frag.insn[n++] =
+                               jit_x0_fnop()  |
+                               jit_x1_mfspr(y1_lr_reg,
+                                            SPR_EX_CONTEXT_0_0);
+               }
+
+               if (y1_br) {
+                       frag.insn[n++] =
+                               jit_x0_fnop() |
+                               jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
+                                            clob1);
+               }
+
+               frag.insn[n++] =
+                       jit_x0_addi(clob1, clob2, 7)      |
+                       jit_x1_ldna(rd, clob2);
+               frag.insn[n++] =
+                       jit_x0_fnop()                     |
+                       jit_x1_ldna(clob1, clob1);
+               frag.insn[n++] =
+                       jit_x0_dblalign(rd, clob1, clob2) |
+                       jit_x1_ld_add(clob1, clob3, -8);
+               if (x1_add) {
+                       frag.insn[n++] =
+                               jit_x0_addi(ra, ra, x1_add_imm8) |
+                               jit_x1_ld_add(clob2, clob3, -8);
+               } else {
+                       frag.insn[n++] =
+                               jit_x0_fnop()  |
+                               jit_x1_ld_add(clob2, clob3, -8);
+               }
+
+               frag.insn[n++] =
+                       jit_x0_fnop() |
+                       jit_x1_ld(clob3, clob3);
+
+               if (load_store_size == 4) {
+                       if (load_store_signed)
+                               frag.insn[n++] =
+                                       jit_x0_bfexts(
+                                               rd, rd,
+                                               UA_FIXUP_BFEXT_START(4),
+                                               UA_FIXUP_BFEXT_END(4)) |
+                                       jit_x1_fnop();
+                       else
+                               frag.insn[n++] =
+                                       jit_x0_bfextu(
+                                               rd, rd,
+                                               UA_FIXUP_BFEXT_START(4),
+                                               UA_FIXUP_BFEXT_END(4)) |
+                                       jit_x1_fnop();
+               } else if (load_store_size == 2) {
+                       if (load_store_signed)
+                               frag.insn[n++] =
+                                       jit_x0_bfexts(
+                                               rd, rd,
+                                               UA_FIXUP_BFEXT_START(2),
+                                               UA_FIXUP_BFEXT_END(2)) |
+                                       jit_x1_fnop();
+                       else
+                               frag.insn[n++] =
+                                       jit_x0_bfextu(
+                                               rd, rd,
+                                               UA_FIXUP_BFEXT_START(2),
+                                               UA_FIXUP_BFEXT_END(2)) |
+                                       jit_x1_fnop();
+               }
+
+               frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
+       }
+
+       /* Max JIT bundle count is 14. */
+       WARN_ON(n > 14);
+
+       if (!unexpected) {
+               int status = 0;
+               int idx = (regs->pc >> 3) &
+                       ((1ULL << (PAGE_SHIFT - UNALIGN_JIT_SHIFT)) - 1);
+
+               frag.pc = regs->pc;
+               frag.bundle = bundle;
+
+               if (unaligned_printk) {
+                       pr_info("%s/%d, Unalign fixup: pc=%lx "
+                               "bundle=%lx %d %d %d %d %d %d %d %d.",
+                               current->comm, current->pid,
+                               (unsigned long)frag.pc,
+                               (unsigned long)frag.bundle,
+                               (int)alias, (int)rd, (int)ra,
+                               (int)rb, (int)bundle_2_enable,
+                               (int)y1_lr, (int)y1_br, (int)x1_add);
+
+                       for (k = 0; k < n; k += 2)
+                               pr_info("[%d] %016llx %016llx", k,
+                                       (unsigned long long)frag.insn[k],
+                                       (unsigned long long)frag.insn[k+1]);
+               }
+
+               /* Swap bundle byte order for big endian sys. */
+#ifdef __BIG_ENDIAN
+               frag.bundle = GX_INSN_BSWAP(frag.bundle);
+               for (k = 0; k < n; k++)
+                       frag.insn[k] = GX_INSN_BSWAP(frag.insn[k]);
+#endif /* __BIG_ENDIAN */
+
+               status = copy_to_user((void __user *)&jit_code_area[idx],
+                                     &frag, sizeof(frag));
+               if (status) {
+                       /* Fail to copy JIT into user land. send SIGSEGV. */
+                       siginfo_t info = {
+                               .si_signo = SIGSEGV,
+                               .si_code = SEGV_MAPERR,
+                               .si_addr = (void __user *)&jit_code_area[idx]
+                       };
+
+                       pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx",
+                               current->pid, current->comm,
+                               (unsigned long long)&jit_code_area[idx]);
+
+                       trace_unhandled_signal("segfault in unalign fixup",
+                                              regs,
+                                              (unsigned long)info.si_addr,
+                                              SIGSEGV);
+                       force_sig_info(info.si_signo, &info, current);
+                       return;
+               }
+
+
+               /* Do a cheaper increment, not accurate. */
+               unaligned_fixup_count++;
+               __flush_icache_range((unsigned long)&jit_code_area[idx],
+                                    (unsigned long)&jit_code_area[idx] +
+                                    sizeof(frag));
+
+               /* Setup SPR_EX_CONTEXT_0_0/1 for returning to user program.*/
+               __insn_mtspr(SPR_EX_CONTEXT_0_0, regs->pc + 8);
+               __insn_mtspr(SPR_EX_CONTEXT_0_1, PL_ICS_EX1(USER_PL, 0));
+
+               /* Modify pc at the start of new JIT. */
+               regs->pc = (unsigned long)&jit_code_area[idx].insn[0];
+               /* Set ICS in SPR_EX_CONTEXT_K_1. */
+               regs->ex1 = PL_ICS_EX1(USER_PL, 1);
+       }
+}
+
+
+/*
+ * C function to generate unalign data JIT. Called from unalign data
+ * interrupt handler.
+ *
+ * First check if unalign fix is disabled or exception did not not come from
+ * user space or sp register points to unalign address, if true, generate a
+ * SIGBUS. Then map a page into user space as JIT area if it is not mapped
+ * yet. Genenerate JIT code by calling jit_bundle_gen(). After that return
+ * back to exception handler.
+ *
+ * The exception handler will "iret" to new generated JIT code after
+ * restoring caller saved registers. In theory, the JIT code will perform
+ * another "iret" to resume user's program.
+ */
+
+void do_unaligned(struct pt_regs *regs, int vecnum)
+{
+       tilegx_bundle_bits __user  *pc;
+       tilegx_bundle_bits bundle;
+       struct thread_info *info = current_thread_info();
+       int align_ctl;
+
+       /* Checks the per-process unaligned JIT flags */
+       align_ctl = unaligned_fixup;
+       switch (task_thread_info(current)->align_ctl) {
+       case PR_UNALIGN_NOPRINT:
+               align_ctl = 1;
+               break;
+       case PR_UNALIGN_SIGBUS:
+               align_ctl = 0;
+               break;
+       }
+
+       /* Enable iterrupt in order to access user land. */
+       local_irq_enable();
+
+       /*
+        * The fault came from kernel space. Two choices:
+        * (a) unaligned_fixup < 1, we will first call get/put_user fixup
+        *     to return -EFAULT. If no fixup, simply panic the kernel.
+        * (b) unaligned_fixup >=1, we will try to fix the unaligned access
+        *     if it was triggered by get_user/put_user() macros. Panic the
+        *     kernel if it is not fixable.
+        */
+
+       if (EX1_PL(regs->ex1) != USER_PL) {
+
+               if (align_ctl < 1) {
+                       unaligned_fixup_count++;
+                       /* If exception came from kernel, try fix it up. */
+                       if (fixup_exception(regs)) {
+                               if (unaligned_printk)
+                                       pr_info("Unalign fixup: %d %llx @%llx",
+                                               (int)unaligned_fixup,
+                                               (unsigned long long)regs->ex1,
+                                               (unsigned long long)regs->pc);
+                               return;
+                       }
+                       /* Not fixable. Go panic. */
+                       panic("Unalign exception in Kernel. pc=%lx",
+                             regs->pc);
+                       return;
+               } else {
+                       /*
+                        * Try to fix the exception. If we can't, panic the
+                        * kernel.
+                        */
+                       bundle = GX_INSN_BSWAP(
+                               *((tilegx_bundle_bits *)(regs->pc)));
+                       jit_bundle_gen(regs, bundle, align_ctl);
+                       return;
+               }
+       }
+
+       /*
+        * Fault came from user with ICS or stack is not aligned.
+        * If so, we will trigger SIGBUS.
+        */
+       if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) {
+               siginfo_t info = {
+                       .si_signo = SIGBUS,
+                       .si_code = BUS_ADRALN,
+                       .si_addr = (unsigned char __user *)0
+               };
+
+               if (unaligned_printk)
+                       pr_info("Unalign fixup: %d %llx @%llx",
+                               (int)unaligned_fixup,
+                               (unsigned long long)regs->ex1,
+                               (unsigned long long)regs->pc);
+
+               unaligned_fixup_count++;
+
+               trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
+               force_sig_info(info.si_signo, &info, current);
+               return;
+       }
+
+
+       /* Read the bundle casued the exception! */
+       pc = (tilegx_bundle_bits __user *)(regs->pc);
+       if (get_user(bundle, pc) != 0) {
+               /* Probably never be here since pc is valid user address.*/
+               siginfo_t info = {
+                       .si_signo = SIGSEGV,
+                       .si_code = SEGV_MAPERR,
+                       .si_addr = (void __user *)pc
+               };
+               pr_err("Couldn't read instruction at %p trying to step\n", pc);
+               trace_unhandled_signal("segfault in unalign fixup", regs,
+                                      (unsigned long)info.si_addr, SIGSEGV);
+               force_sig_info(info.si_signo, &info, current);
+               return;
+       }
+
+       if (!info->unalign_jit_base) {
+               void __user *user_page;
+
+               /*
+                * Allocate a page in userland.
+                * For 64-bit processes we try to place the mapping far
+                * from anything else that might be going on (specifically
+                * 64 GB below the top of the user address space).  If it
+                * happens not to be possible to put it there, it's OK;
+                * the kernel will choose another location and we'll
+                * remember it for later.
+                */
+               if (is_compat_task())
+                       user_page = NULL;
+               else
+                       user_page = (void __user *)(TASK_SIZE - (1UL << 36)) +
+                               (current->pid << PAGE_SHIFT);
+
+               user_page = (void __user *) vm_mmap(NULL,
+                                                   (unsigned long)user_page,
+                                                   PAGE_SIZE,
+                                                   PROT_EXEC | PROT_READ |
+                                                   PROT_WRITE,
+#ifdef CONFIG_HOMECACHE
+                                                   MAP_CACHE_HOME_TASK |
+#endif
+                                                   MAP_PRIVATE |
+                                                   MAP_ANONYMOUS,
+                                                   0);
+
+               if (IS_ERR((void __force *)user_page)) {
+                       pr_err("Out of kernel pages trying do_mmap.\n");
+                       return;
+               }
+
+               /* Save the address in the thread_info struct */
+               info->unalign_jit_base = user_page;
+               if (unaligned_printk)
+                       pr_info("Unalign bundle: %d:%d, allocate page @%llx",
+                               raw_smp_processor_id(), current->pid,
+                               (unsigned long long)user_page);
+       }
+
+       /* Generate unalign JIT */
+       jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
+}
+
+#endif /* __tilegx__ */
diff --git a/arch/tile/kernel/vdso.c b/arch/tile/kernel/vdso.c
new file mode 100644 (file)
index 0000000..1533af2
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include <asm/vdso.h>
+#include <asm/mman.h>
+#include <asm/sections.h>
+
+#include <arch/sim.h>
+
+/* The alignment of the vDSO. */
+#define VDSO_ALIGNMENT  PAGE_SIZE
+
+
+static unsigned int vdso_pages;
+static struct page **vdso_pagelist;
+
+#ifdef CONFIG_COMPAT
+static unsigned int vdso32_pages;
+static struct page **vdso32_pagelist;
+#endif
+static int vdso_ready;
+
+/*
+ * The vdso data page.
+ */
+static union {
+       struct vdso_data        data;
+       u8                      page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+
+struct vdso_data *vdso_data = &vdso_data_store.data;
+
+static unsigned int __read_mostly vdso_enabled = 1;
+
+static struct page **vdso_setup(void *vdso_kbase, unsigned int pages)
+{
+       int i;
+       struct page **pagelist;
+
+       pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL);
+       BUG_ON(pagelist == NULL);
+       for (i = 0; i < pages - 1; i++) {
+               struct page *pg = virt_to_page(vdso_kbase + i*PAGE_SIZE);
+               ClearPageReserved(pg);
+               pagelist[i] = pg;
+       }
+       pagelist[pages - 1] = virt_to_page(vdso_data);
+       pagelist[pages] = NULL;
+
+       return pagelist;
+}
+
+static int __init vdso_init(void)
+{
+       int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT;
+
+       /*
+        * We can disable vDSO support generally, but we need to retain
+        * one page to support the two-bundle (16-byte) rt_sigreturn path.
+        */
+       if (!vdso_enabled) {
+               size_t offset = (unsigned long)&__vdso_rt_sigreturn;
+               static struct page *sigret_page;
+               sigret_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               BUG_ON(sigret_page == NULL);
+               vdso_pagelist = &sigret_page;
+               vdso_pages = 1;
+               BUG_ON(offset >= PAGE_SIZE);
+               memcpy(page_address(sigret_page) + offset,
+                      vdso_start + offset, 16);
+#ifdef CONFIG_COMPAT
+               vdso32_pages = vdso_pages;
+               vdso32_pagelist = vdso_pagelist;
+#endif
+               vdso_ready = 1;
+               return 0;
+       }
+
+       vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+       vdso_pages += data_pages;
+       vdso_pagelist = vdso_setup(vdso_start, vdso_pages);
+
+#ifdef CONFIG_COMPAT
+       vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT;
+       vdso32_pages += data_pages;
+       vdso32_pagelist = vdso_setup(vdso32_start, vdso32_pages);
+#endif
+
+       smp_wmb();
+       vdso_ready = 1;
+
+       return 0;
+}
+arch_initcall(vdso_init);
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+       if (vma->vm_mm && vma->vm_start == VDSO_BASE)
+               return "[vdso]";
+#ifndef __tilegx__
+       if (vma->vm_start == MEM_USER_INTRPT)
+               return "[intrpt]";
+#endif
+       return NULL;
+}
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+       return NULL;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long address)
+{
+       return 0;
+}
+
+int in_gate_area_no_mm(unsigned long address)
+{
+       return 0;
+}
+
+int setup_vdso_pages(void)
+{
+       struct page **pagelist;
+       unsigned long pages;
+       struct mm_struct *mm = current->mm;
+       unsigned long vdso_base = 0;
+       int retval = 0;
+
+       if (!vdso_ready)
+               return 0;
+
+       mm->context.vdso_base = 0;
+
+       pagelist = vdso_pagelist;
+       pages = vdso_pages;
+#ifdef CONFIG_COMPAT
+       if (is_compat_task()) {
+               pagelist = vdso32_pagelist;
+               pages = vdso32_pages;
+       }
+#endif
+
+       /*
+        * vDSO has a problem and was disabled, just don't "enable" it for the
+        * process.
+        */
+       if (pages == 0)
+               return 0;
+
+       vdso_base = get_unmapped_area(NULL, vdso_base,
+                                     (pages << PAGE_SHIFT) +
+                                     ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+                                     0, 0);
+       if (IS_ERR_VALUE(vdso_base)) {
+               retval = vdso_base;
+               return retval;
+       }
+
+       /* Add required alignment. */
+       vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
+
+       /*
+        * Put vDSO base into mm struct. We need to do this before calling
+        * install_special_mapping or the perf counter mmap tracking code
+        * will fail to recognise it as a vDSO (since arch_vma_name fails).
+        */
+       mm->context.vdso_base = vdso_base;
+
+       /*
+        * our vma flags don't have VM_WRITE so by default, the process isn't
+        * allowed to write those pages.
+        * gdb can break that with ptrace interface, and thus trigger COW on
+        * those pages but it's then your responsibility to never do that on
+        * the "data" page of the vDSO or you'll stop getting kernel updates
+        * and your nice userland gettimeofday will be totally dead.
+        * It's fine to use that for setting breakpoints in the vDSO code
+        * pages though
+        */
+       retval = install_special_mapping(mm, vdso_base,
+                                        pages << PAGE_SHIFT,
+                                        VM_READ|VM_EXEC |
+                                        VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+                                        pagelist);
+       if (retval)
+               mm->context.vdso_base = 0;
+
+       return retval;
+}
+
+static __init int vdso_func(char *s)
+{
+       return kstrtouint(s, 0, &vdso_enabled);
+}
+__setup("vdso=", vdso_func);
diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile
new file mode 100644 (file)
index 0000000..e2b7a2f
--- /dev/null
@@ -0,0 +1,118 @@
+# Symbols present in the vdso
+vdso-syms = rt_sigreturn gettimeofday
+
+# Files to link into the vdso
+obj-vdso = $(patsubst %, v%.o, $(vdso-syms))
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+# vdso32 is only for tilegx -m32 compat task.
+VDSO32-$(CONFIG_COMPAT) := y
+
+obj-y += vdso.o
+obj-$(VDSO32-y) += vdso32.o
+extra-y += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+CFLAGS_REMOVE_vdso.o = -pg
+CFLAGS_REMOVE_vdso32.o = -pg
+CFLAGS_REMOVE_vrt_sigreturn.o = -pg
+CFLAGS_REMOVE_vrt_sigreturn32.o = -pg
+CFLAGS_REMOVE_vgettimeofday.o = -pg
+CFLAGS_REMOVE_vgettimeofday32.o = -pg
+
+ifdef CONFIG_FEEDBACK_COLLECT
+# vDSO code runs in userspace, not collecting feedback data.
+CFLAGS_REMOVE_vdso.o = -ffeedback-generate
+CFLAGS_REMOVE_vdso32.o = -ffeedback-generate
+CFLAGS_REMOVE_vrt_sigreturn.o = -ffeedback-generate
+CFLAGS_REMOVE_vrt_sigreturn32.o = -ffeedback-generate
+CFLAGS_REMOVE_vgettimeofday.o = -ffeedback-generate
+CFLAGS_REMOVE_vgettimeofday32.o = -ffeedback-generate
+endif
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+
+# Force dependency
+$(obj)/vdso.o: $(obj)/vdso.so
+
+# link rule for the .so file, .lds has to be first
+SYSCFLAGS_vdso.so.dbg = $(c_flags)
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
+       $(call if_changed,vdsold)
+
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO.  With ld -R we can then refer to
+# these symbols in the kernel code rather than hand-coded addresses.
+extra-y += vdso-syms.o
+$(obj)/built-in.o: $(obj)/vdso-syms.o
+$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
+
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+                            $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+SYSCFLAGS_vdso_syms.o = -r
+$(obj)/vdso-syms.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
+       $(call if_changed,vdsold)
+
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+       $(call if_changed,objcopy)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Add -lgcc so tilepro gets static muldi3 and lshrdi3 definitions.
+# Make sure only to export the intended __vdso_xxx symbol offsets.
+quiet_cmd_vdsold = VDSOLD  $@
+      cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
+                           -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
+                   $(CROSS_COMPILE)objcopy \
+                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+      cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+       @mkdir -p $(MODLIB)/vdso
+       $(call cmd,vdso_install)
+
+vdso32.so: $(obj)/vdso32.so.dbg
+       $(call cmd,vdso_install)
+
+vdso_install: vdso.so
+vdso32_install: vdso32.so
+
+
+KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
+KBUILD_AFLAGS_32 += -m32 -s
+KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 += -m32 -fPIC -shared
+
+obj-vdso32 = $(patsubst %, v%32.o, $(vdso-syms))
+obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
+
+targets += $(obj-vdso32) vdso32.so vdso32.so.dbg
+
+$(obj-vdso32:%=%): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
+$(obj-vdso32:%=%): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+
+$(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c
+       $(call if_changed,cc_o_c)
+
+$(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S
+       $(call if_changed,as_o_S)
+
+# Force dependency
+$(obj)/vdso32.o: $(obj)/vdso32.so
+
+SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
+                           $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+$(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32)
+       $(call if_changed,vdsold)
diff --git a/arch/tile/kernel/vdso/vdso.S b/arch/tile/kernel/vdso/vdso.S
new file mode 100644 (file)
index 0000000..3467adb
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+       __PAGE_ALIGNED_DATA
+
+       .global vdso_start, vdso_end
+       .align PAGE_SIZE
+vdso_start:
+       .incbin "arch/tile/kernel/vdso/vdso.so"
+       .align PAGE_SIZE
+vdso_end:
+
+       .previous
diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S
new file mode 100644 (file)
index 0000000..041cd6c
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#define VDSO_VERSION_STRING    LINUX_2.6
+
+
+OUTPUT_ARCH(tile)
+
+/* The ELF entry point can be used to set the AT_SYSINFO value. */
+ENTRY(__vdso_rt_sigreturn);
+
+
+SECTIONS
+{
+       . = SIZEOF_HEADERS;
+
+       .hash           : { *(.hash) }                  :text
+       .gnu.hash       : { *(.gnu.hash) }
+       .dynsym         : { *(.dynsym) }
+       .dynstr         : { *(.dynstr) }
+       .gnu.version    : { *(.gnu.version) }
+       .gnu.version_d  : { *(.gnu.version_d) }
+       .gnu.version_r  : { *(.gnu.version_r) }
+
+       .note           : { *(.note.*) }                :text   :note
+       .dynamic        : { *(.dynamic) }               :text   :dynamic
+
+       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
+       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
+
+       .rodata  : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+
+       /*
+        * This linker script is used both with -r and with -shared.
+        * For the layouts to match, we need to skip more than enough
+        * space for the dynamic symbol table et al. If this amount
+        * is insufficient, ld -shared will barf. Just increase it here.
+        */
+       . = 0x1000;
+       .text           : { *(.text .text.*) }          :text
+
+       .data           : {
+               *(.got.plt) *(.got)
+               *(.data .data.* .gnu.linkonce.d.*)
+               *(.dynbss)
+               *(.bss .bss.* .gnu.linkonce.b.*)
+       }
+}
+
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+       text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+       dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
+       note            PT_NOTE         FLAGS(4);               /* PF_R */
+       eh_frame_hdr    PT_GNU_EH_FRAME;
+}
+
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION
+{
+       VDSO_VERSION_STRING {
+       global:
+               __vdso_rt_sigreturn;
+               __vdso_gettimeofday;
+               gettimeofday;
+       local:*;
+       };
+}
diff --git a/arch/tile/kernel/vdso/vdso32.S b/arch/tile/kernel/vdso/vdso32.S
new file mode 100644 (file)
index 0000000..1d1ac32
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+       __PAGE_ALIGNED_DATA
+
+       .global vdso32_start, vdso32_end
+       .align PAGE_SIZE
+vdso32_start:
+       .incbin "arch/tile/kernel/vdso/vdso32.so"
+       .align PAGE_SIZE
+vdso32_end:
+
+       .previous
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
new file mode 100644 (file)
index 0000000..51ec8e4
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#define VDSO_BUILD  /* avoid some shift warnings for -m32 in <asm/page.h> */
+#include <linux/time.h>
+#include <asm/timex.h>
+#include <asm/vdso.h>
+
+#if CHIP_HAS_SPLIT_CYCLE()
+static inline cycles_t get_cycles_inline(void)
+{
+       unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
+       unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
+       unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH);
+
+       while (unlikely(high != high2)) {
+               low = __insn_mfspr(SPR_CYCLE_LOW);
+               high = high2;
+               high2 = __insn_mfspr(SPR_CYCLE_HIGH);
+       }
+
+       return (((cycles_t)high) << 32) | low;
+}
+#define get_cycles get_cycles_inline
+#endif
+
+/*
+ * Find out the vDSO data page address in the process address space.
+ */
+inline unsigned long get_datapage(void)
+{
+       unsigned long ret;
+
+       /* vdso data page located in the 2nd vDSO page. */
+       asm volatile ("lnk %0" : "=r"(ret));
+       ret &= ~(PAGE_SIZE - 1);
+       ret += PAGE_SIZE;
+
+       return ret;
+}
+
+int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+       cycles_t cycles;
+       unsigned long count, sec, ns;
+       volatile struct vdso_data *vdso_data;
+
+       vdso_data = (struct vdso_data *)get_datapage();
+       /* The use of the timezone is obsolete, normally tz is NULL. */
+       if (unlikely(tz != NULL)) {
+               while (1) {
+                       /* Spin until the update finish. */
+                       count = vdso_data->tz_update_count;
+                       if (count & 1)
+                               continue;
+
+                       tz->tz_minuteswest = vdso_data->tz_minuteswest;
+                       tz->tz_dsttime = vdso_data->tz_dsttime;
+
+                       /* Check whether updated, read again if so. */
+                       if (count == vdso_data->tz_update_count)
+                               break;
+               }
+       }
+
+       if (unlikely(tv == NULL))
+               return 0;
+
+       while (1) {
+               /* Spin until the update finish. */
+               count = vdso_data->tb_update_count;
+               if (count & 1)
+                       continue;
+
+               cycles = (get_cycles() - vdso_data->xtime_tod_stamp);
+               ns = (cycles * vdso_data->mult) >> vdso_data->shift;
+               sec = vdso_data->xtime_clock_sec;
+               ns += vdso_data->xtime_clock_nsec;
+               if (ns >= NSEC_PER_SEC) {
+                       ns -= NSEC_PER_SEC;
+                       sec += 1;
+               }
+
+               /* Check whether updated, read again if so. */
+               if (count == vdso_data->tb_update_count)
+                       break;
+       }
+
+       tv->tv_sec = sec;
+       tv->tv_usec = ns / 1000;
+
+       return 0;
+}
+
+int gettimeofday(struct timeval *tv, struct timezone *tz)
+       __attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/tile/kernel/vdso/vrt_sigreturn.S b/arch/tile/kernel/vdso/vrt_sigreturn.S
new file mode 100644 (file)
index 0000000..6326caf
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/linkage.h>
+#include <arch/abi.h>
+#include <asm/unistd.h>
+
+/*
+ * Note that libc has a copy of this function that it uses to compare
+ * against the PC when a stack backtrace ends, so if this code is
+ * changed, the libc implementation(s) should also be updated.
+ */
+ENTRY(__vdso_rt_sigreturn)
+       moveli TREG_SYSCALL_NR_NAME, __NR_rt_sigreturn
+       swint1
+       /* We don't use ENDPROC to avoid tagging this symbol as FUNC,
+        * which confuses the perf tool.
+        */
+       END(__vdso_rt_sigreturn)
index a13ed902afbbd645c521a7751f5930d76b5c8704..f1819423ffc9f7b006098729605506d85f617acb 100644 (file)
@@ -5,7 +5,7 @@
 #include <hv/hypervisor.h>
 
 /* Text loads starting from the supervisor interrupt vector address. */
-#define TEXT_OFFSET MEM_SV_INTRPT
+#define TEXT_OFFSET MEM_SV_START
 
 OUTPUT_ARCH(tile)
 ENTRY(_start)
@@ -13,7 +13,7 @@ jiffies = jiffies_64;
 
 PHDRS
 {
-  intrpt1 PT_LOAD ;
+  intrpt PT_LOAD ;
   text PT_LOAD ;
   data PT_LOAD ;
 }
@@ -24,14 +24,17 @@ SECTIONS
   #define LOAD_OFFSET TEXT_OFFSET
 
   /* Interrupt vectors */
-  .intrpt1 (LOAD_OFFSET) : AT ( 0 )   /* put at the start of physical memory */
+  .intrpt (LOAD_OFFSET) : AT ( 0 )   /* put at the start of physical memory */
   {
     _text = .;
-    *(.intrpt1)
-  } :intrpt1 =0
+    *(.intrpt)
+  } :intrpt =0
 
   /* Hypervisor call vectors */
-  #include "hvglue.lds"
+  . = ALIGN(0x10000);
+  .hvglue : AT (ADDR(.hvglue) - LOAD_OFFSET) {
+    *(.hvglue)
+  } :NONE
 
   /* Now the real code */
   . = ALIGN(0x20000);
@@ -40,7 +43,11 @@ SECTIONS
     HEAD_TEXT
     SCHED_TEXT
     LOCK_TEXT
+    KPROBES_TEXT
+    IRQENTRY_TEXT
     __fix_text_end = .;   /* tile-cpack won't rearrange before this */
+    ALIGN_FUNCTION();
+    *(.hottext*)
     TEXT_TEXT
     *(.text.*)
     *(.coldtext*)
@@ -67,20 +74,8 @@ SECTIONS
   __init_end = .;
 
   _sdata = .;                   /* Start of data section */
-
   RO_DATA_SECTION(PAGE_SIZE)
-
-  /* initially writeable, then read-only */
-  . = ALIGN(PAGE_SIZE);
-  __w1data_begin = .;
-  .w1data : AT(ADDR(.w1data) - LOAD_OFFSET) {
-    VMLINUX_SYMBOL(__w1data_begin) = .;
-    *(.w1data)
-    VMLINUX_SYMBOL(__w1data_end) = .;
-  }
-
   RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
-
   _edata = .;
 
   EXCEPTION_TABLE(L2_CACHE_BYTES)
index 2298cb1daff74e411ac0a616252e84988bfdd06f..65f7f9db0c9c4a6ab54e910da3fcab81487c739a 100644 (file)
@@ -27,9 +27,6 @@ config KVM
          This module provides access to the hardware capabilities through
          a character device node named /dev/kvm.
 
-         To compile this as a module, choose M here: the module
-         will be called kvm.
-
          If unsure, say N.
 
 source drivers/vhost/Kconfig
diff --git a/arch/tile/kvm/Makefile b/arch/tile/kvm/Makefile
new file mode 100644 (file)
index 0000000..2c3d206
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+ccflags-y := -Ivirt/kvm -Iarch/tile/kvm
+
+kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o)
+
+kvm-y += kvm-tile.o
+kvm-y += entry.o
+
+obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/tile/kvm/entry.S b/arch/tile/kvm/entry.S
new file mode 100644 (file)
index 0000000..07aa3a6
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/switch_to.h>
+#include <asm/processor.h>
+#include <arch/spr_def.h>
+#include <arch/abi.h>
+
+#define FRAME_SIZE ((4 + CALLEE_SAVED_REGS_COUNT) * 8)
+#define SAVE_REG(r) { st r12, r; addi r12, r12, 8 }
+#define LOAD_REG(r) { ld r, r12; addi r12, r12, 8 }
+#define FOR_EACH_CALLEE_SAVED_REG(f)                                   \
+                                                       f(r30); f(r31); \
+       f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \
+       f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
+       f(r48); f(r49); f(r50); f(r51); f(r52);
+
+/*
+ * Called with interrupts disabled from kvm_tile_run() and is responsible
+ * just for saving the callee-save registers and the stack pointer, then
+ * resetting ksp0 so subsequent interrupts don't wipe the kernel stack.
+ * It uses restore_all in intvec_64.S to jump back into the guest.
+ * The kvm_vmexit function below undoes the stack manipulation.
+ */
+STD_ENTRY(kvm_vmresume)
+       /* Do function prolog and save callee-saves on stack. */
+       {
+         move r10, sp
+         st sp, lr
+       }
+       {
+         addli r11, sp, -FRAME_SIZE + 8
+         addli sp, sp, -FRAME_SIZE
+       }
+       {
+         st r11, r10
+         addi r12, sp, 16
+       }
+       FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
+       SAVE_REG(tp)
+       SAVE_REG(lr)
+
+       /* Save frame pointer in thread_info so we can get it back later. */
+       st r1, sp
+
+       /* Set the ksp0 for this core to be below this frame. */
+       mfspr r10, SPR_SYSTEM_SAVE_K_0
+       bfins r10, sp, 0, CPU_SHIFT-1
+       mtspr SPR_SYSTEM_SAVE_K_0, r10
+
+       /* sp points to ABI save area below pt_regs for restore_all. */
+       addli sp, r0, -C_ABI_SAVE_AREA_SIZE
+
+       /* Execute an "interrupt return" to the guest. */
+       {
+        movei r30, 0
+        j restore_all
+       }
+       STD_ENDPROC(kvm_vmresume)
+
+/*
+ * Called with interrupts disabled from kvm_trigger_vmexit(); returns with
+ * interrupts still disabled to kvm_vmresume()'s caller, discarding all the
+ * stack contents below the kvm_vmresume() frame.  kvm_vmresume()'s caller
+ * is responsible for resetting SPR_SYSTEM_SAVE_K_0 to its previous value.
+ */
+STD_ENTRY(kvm_vmexit)
+       {
+        move sp, r0
+        addi r12, r0, 16
+       }
+       FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
+       LOAD_REG(tp)
+       LOAD_REG(lr)
+       {
+         addli sp, sp, FRAME_SIZE
+         jrp lr
+       }
+       STD_ENDPROC(kvm_vmexit)
diff --git a/arch/tile/kvm/kvm-tile.c b/arch/tile/kvm/kvm-tile.c
new file mode 100644 (file)
index 0000000..4c33991
--- /dev/null
@@ -0,0 +1,1581 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm_types.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/traps.h>
+#include <asm/pgalloc.h>
+#include <hv/hypervisor.h>
+#include <linux/rtc.h>
+#include <asm/atomic.h>
+#include <asm/tlbflush.h>
+#include <arch/spr_def.h>
+#include <arch/sim.h>
+#include <generated/utsrelease.h>
+
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { NULL }
+};
+
+static pte_t *get_vpgd_pte(struct kvm *kvm, unsigned long address)
+{
+       struct mm_struct *mm = kvm->mm;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       if (kvm->arch.vpgd == NULL)
+               kvm->arch.vpgd = pgd_alloc(kvm->mm);
+       pgd = kvm->arch.vpgd + pgd_index(address);
+       pud = pud_alloc(mm, pgd, address);
+       if (!pud)
+               return NULL;
+       pmd = pmd_alloc(mm, pud, address);
+       if (!pmd)
+               return NULL;
+       return pte_alloc_kernel(pmd, address);
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
+/* FIXME: support huge pages. */
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  struct kvm_userspace_memory_region *mem,
+                                  enum kvm_mr_change change)
+{
+       unsigned long gpa, i;
+
+       gpa = mem->guest_phys_addr;
+       for (i = 0; i < mem->memory_size; i += PAGE_SIZE, gpa += PAGE_SIZE)
+               if (get_vpgd_pte(kvm, gpa) == NULL)
+                       return -ENOMEM;
+
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                  struct kvm_userspace_memory_region *mem,
+                                  const struct kvm_memory_slot *old,
+                                  enum kvm_mr_change change)
+{
+       unsigned long gpa, address, pfn, i;
+       struct page *page[1];
+       pte_t *ptep, *vptep;
+
+       gpa = mem->guest_phys_addr;
+       address = mem->userspace_addr;
+       for (i = 0; i < mem->memory_size;
+            i += PAGE_SIZE, gpa += PAGE_SIZE, address += PAGE_SIZE) {
+               vptep = get_vpgd_pte(kvm, gpa);
+               BUG_ON(vptep == NULL);
+               get_user_pages_fast(address, 1, 1, page);
+               pfn = page_to_pfn(page[0]);
+               ptep = virt_to_pte(NULL, (unsigned long)__va(PFN_PHYS(pfn)));
+               *vptep = *ptep;
+       }
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+       kvm_arch_flush_shadow_all(kvm);
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       return 0;
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+                       unsigned int ioctl, unsigned long arg)
+{
+       return 0;
+}
+
+static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, unsigned long irq)
+{
+       if (irq < 0)
+               return -EINVAL;
+
+       set_bit(irq, &vcpu->arch.ipi_events);
+       kvm_vcpu_kick(vcpu);
+
+       return 0;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       int r = 0;
+
+       switch (ioctl) {
+       case KVM_INTERRUPT: {
+               struct kvm_interrupt irq;
+
+               r = -EFAULT;
+               if (copy_from_user(&irq, argp, sizeof(irq)))
+                       goto out;
+               r = kvm_vcpu_ioctl_interrupt(vcpu, irq.irq);
+               if (r)
+                       goto out;
+               r = 0;
+               break;
+       }
+       default:
+               r = -EINVAL;
+       }
+
+out:
+       return r;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       return 0;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+                              struct kvm_dirty_log *log)
+{
+       return 0;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+                      unsigned int ioctl, unsigned long arg)
+{
+       long r = -EINVAL;
+
+       return r;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                 struct kvm_translation *tr)
+{
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long page_size;
+       unsigned long gva = tr->linear_address;
+       unsigned long gpgd_gpa, gpmd_gpa, gpte_gpa;
+       pud_t gpud;
+       pmd_t gpmd;
+       pte_t gpte;
+
+       /* Get guest pgd (aka pud for three-level tables). */
+       gpgd_gpa = vcpu->arch.guest_context.page_table +
+               (sizeof(pgd_t) * pgd_index(gva));
+       if (kvm_read_guest(kvm, gpgd_gpa, &gpud, sizeof(pgd_t)) < 0)
+               goto fail;
+       if (!pud_present(gpud))
+               goto fail;
+
+       /* Get guest pmd. */
+       if (pud_huge_page(gpud)) {
+               /* FIXME: no super huge page support yet. */
+               if (pte_super(*(pte_t *)&gpud))
+                       goto fail;
+               gpte = *(pte_t *)&gpud;
+               page_size = PGDIR_SIZE;
+               goto ok;
+       }
+       gpmd_gpa = (pud_ptfn(gpud) << HV_LOG2_PAGE_TABLE_ALIGN) +
+               (sizeof(pmd_t) * pmd_index(gva));
+       if (kvm_read_guest(kvm, gpmd_gpa, &gpmd, sizeof(pmd_t)) < 0)
+               goto fail;
+       if (!pmd_present(gpmd))
+               goto fail;
+
+       /* Get guest pte. */
+       if (pmd_huge_page(gpmd)) {
+               /* FIXME: no super huge page support yet. */
+               if (pte_super(*(pte_t *)&gpmd))
+                       goto fail;
+               gpte = *(pte_t *)&gpmd;
+               page_size = PMD_SIZE;
+               goto ok;
+       }
+       gpte_gpa = (pmd_ptfn(gpmd) << HV_LOG2_PAGE_TABLE_ALIGN) +
+               (sizeof(pte_t) * pte_index(gva));
+       if (kvm_read_guest(kvm, gpte_gpa, &gpte, sizeof(pte_t)) < 0)
+               goto fail;
+       if (!pte_present(gpte))
+               goto fail;
+
+       page_size = PAGE_SIZE;
+
+ok:
+       tr->physical_address =
+               PFN_PHYS(pte_pfn(gpte)) + (gva & (page_size - 1));
+       tr->valid = 1;
+       tr->writeable = pte_write(gpte);
+       tr->usermode = pte_user(gpte);
+
+       return 0;
+
+fail:
+       tr->valid = 0;
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       regs->regs = vcpu->arch.regs;
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       vcpu->arch.regs = regs->regs;
+       vcpu->arch.regs.flags = PT_FLAGS_CALLER_SAVES | PT_FLAGS_RESTORE_REGS;
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       *sregs = vcpu->arch.sregs;
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       vcpu->arch.sregs = *sregs;
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                       struct kvm_guest_debug *dbg)
+{
+       return 0;
+}
+
+/*
+ * panic_hv() will dump stack info of both guest os and host os, and set
+ * proper exit reason so that qemu can terminate the guest process.
+ *
+ * FIXME: Probably KVM_EXIT_EXCEPTION?  If using KVM_EXIT_EXCEPTION,
+ * current qemu process will "hang" (killable but Ctrl+C not working),
+ * so use KVM_EXIT_SHUTDOWN here temporarily.
+ */
+static int panic_hv(struct kvm_vcpu *vcpu, const char *fmt, ...)
+{
+       char panic_buf[256];
+       struct pt_regs *regs;
+       va_list ap;
+       int i;
+
+       va_start(ap, fmt);
+       vsnprintf(panic_buf, sizeof(panic_buf), fmt, ap);
+       va_end(ap);
+       pr_err("KVM guest panic (vcpu %d) - %s\n", vcpu->vcpu_id, panic_buf);
+
+       /* Show guest os info */
+       regs = &vcpu->arch.regs;
+       for (i = 0; i < 17; i++)
+               pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
+                      i, regs->regs[i], i+18, regs->regs[i+18],
+                      i+36, regs->regs[i+36]);
+       pr_err(" r18: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
+              regs->regs[18], regs->regs[35], regs->tp);
+       pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
+       pr_err(" pc : "REGFMT" ex1: %ld     faultnum: %ld\n",
+              regs->pc, regs->ex1, regs->faultnum);
+
+       /* Show host os info */
+       pr_err("\nKVM stack in the host:\n");
+       dump_stack();
+
+       /* Shut down the guest os */
+       pr_err("Shutting down guest.\n");
+       vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+       return 0;
+}
+
+/* Copied from virt/kvm/kvm_main.c */
+static int next_segment(unsigned long len, int offset)
+{
+       if (len > PAGE_SIZE - offset)
+               return PAGE_SIZE - offset;
+       else
+               return len;
+}
+
+static int kvm_read_guest_va(struct kvm_vcpu *vcpu, unsigned long gva,
+                            void *data, unsigned long len)
+{
+       struct kvm *kvm = vcpu->kvm;
+       int seg;
+       int offset = offset_in_page(gva);
+       int ret;
+
+       while ((seg = next_segment(len, offset)) != 0) {
+               struct kvm_translation tr;
+               tr.linear_address = gva;
+               kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
+               if (!tr.valid)
+                       return -EFAULT;
+               ret = kvm_read_guest_page(kvm, PFN_DOWN(tr.physical_address),
+                                         data, offset, seg);
+               if (ret < 0)
+                       return ret;
+               offset = 0;
+               len -= seg;
+               data += seg;
+               gva += seg;
+       }
+       return 0;
+}
+
+static int kvm_write_guest_va(struct kvm_vcpu *vcpu, unsigned long gva,
+                             const void *data, unsigned long len)
+{
+       struct kvm *kvm = vcpu->kvm;
+       int seg;
+       int offset = offset_in_page(gva);
+       int ret;
+
+       while ((seg = next_segment(len, offset)) != 0) {
+               struct kvm_translation tr;
+               tr.linear_address = gva;
+               kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
+               if (!tr.valid)
+                       return -EFAULT;
+               ret = kvm_write_guest_page(kvm, PFN_DOWN(tr.physical_address),
+                                          data, offset, seg);
+               if (ret < 0)
+                       return ret;
+               offset = 0;
+               len -= seg;
+               data += seg;
+               gva += seg;
+       }
+       return 0;
+}
+
+static int kvm_clear_guest_va(struct kvm_vcpu *vcpu, unsigned long gva,
+                             unsigned long len)
+{
+       struct kvm *kvm = vcpu->kvm;
+       int seg;
+       int offset = offset_in_page(gva);
+       int ret;
+
+       while ((seg = next_segment(len, offset)) != 0) {
+               struct kvm_translation tr;
+               tr.linear_address = gva;
+               kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
+               if (!tr.valid)
+                       return -EFAULT;
+               ret = kvm_clear_guest_page(kvm, PFN_DOWN(tr.physical_address),
+                                          offset, seg);
+               if (ret < 0)
+                       return ret;
+               offset = 0;
+               len -= seg;
+               gva += seg;
+       }
+       return 0;
+}
+
+/*
+ * The following functions are emulation functions for various
+ * hypervisor system calls (i.e. hv_*()). Return value:
+ *   1 if the host os can emulate it completely.
+ *   < 0 if errors occur and then qemu will handle them.
+ *   0 if qemu emulation is needed.
+ * In both the < 0 and the == 0 cases, exit reason should
+ * be set for qemu handling.
+ */
+
+/* generic handler for hypercall which needs user (QEMU) to handle. */
+static int kvm_deliver_to_user(struct kvm_vcpu *vcpu)
+{
+       vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
+       return 0;
+}
+
+/* handler for illegal hypercall */
+static int kvm_emulate_illegal(struct kvm_vcpu *vcpu)
+{
+       return panic_hv(vcpu, "Illegal kvm hypercall: %ld",
+                       (unsigned long)vcpu->arch.regs.regs[10]);
+}
+
+static int kvm_emulate_hv_init(struct kvm_vcpu *vcpu)
+{
+       int version = vcpu->arch.regs.regs[0];
+       int chip_num = vcpu->arch.regs.regs[1];
+       int chip_rev_num = vcpu->arch.regs.regs[2];
+       int client_pl = vcpu->arch.regs.regs[3];
+
+       if (client_pl != 1)
+               return panic_hv(vcpu, "Guest is requesting PL %d, but KVM"
+                               " guests must request PL 1.\n"
+                               "Reconfigure your guest with KVM_GUEST set.\n",
+                               client_pl);
+
+       if (version != HV_VERSION)
+               return panic_hv(vcpu, "Client built for hv version %d, but"
+                               " this hv is version %d\n",
+                               version, HV_VERSION);
+
+       if (chip_num != TILE_CHIP)
+               return panic_hv(vcpu, "Client built for chip %d, but this"
+                               " hardware is chip %d\n",
+                               chip_num, TILE_CHIP);
+
+       if (chip_rev_num != TILE_CHIP_REV)
+               return panic_hv(vcpu, "Client built for chip rev %d, but this"
+                               " hardware is chip rev %d\n",
+                               chip_rev_num, TILE_CHIP_REV);
+
+       return 1;
+}
+
+static int kvm_emulate_hv_sysconf(struct kvm_vcpu *vcpu)
+{
+       HV_SysconfQuery query = (HV_SysconfQuery)vcpu->arch.regs.regs[0];
+       long rc;
+
+       switch (query) {
+       case HV_SYSCONF_PAGE_SIZE_SMALL:
+               rc = PAGE_SIZE;
+               break;
+
+       case HV_SYSCONF_PAGE_SIZE_LARGE:
+               rc = HPAGE_SIZE;
+               break;
+
+       case HV_SYSCONF_VALID_PAGE_SIZES:
+#if PAGE_SHIFT == 16
+               rc = HV_CTX_PG_SM_64K;
+#elif PAGE_SHIFT == 14
+               rc = HV_CTX_PG_SM_16K;
+#else
+# error Fix hv_sysconf emulation for new page size
+#endif
+               break;
+
+       case HV_SYSCONF_PAGE_SIZE_JUMBO:
+               rc = 0;  /* FIXME add super page support */
+               break;
+
+       case HV_SYSCONF_CPU_SPEED:
+       case HV_SYSCONF_CPU_TEMP:
+       case HV_SYSCONF_BOARD_TEMP:
+               rc = hv_sysconf(query);
+               break;
+
+       default:
+               rc = -EINVAL;
+               break;
+       }
+
+       vcpu->arch.regs.regs[0] = rc;
+       return 1;
+}
+
+static int kvm_emulate_hv_confstr(struct kvm_vcpu *vcpu)
+{
+       HV_SysconfQuery query = (HV_SysconfQuery)vcpu->arch.regs.regs[0];
+       long buflen = vcpu->arch.regs.regs[2];
+       char hvbuf[256];
+       const char *p;
+       long rc;
+
+       switch (query) {
+
+       /* For hardware attributes, just pass to the hypervisor. */
+       case HV_CONFSTR_BOARD_PART_NUM:
+       case HV_CONFSTR_BOARD_SERIAL_NUM:
+       case HV_CONFSTR_CHIP_SERIAL_NUM:
+       case HV_CONFSTR_BOARD_REV:
+       case HV_CONFSTR_CHIP_MODEL:
+       case HV_CONFSTR_BOARD_DESC:
+       case HV_CONFSTR_MEZZ_PART_NUM:
+       case HV_CONFSTR_MEZZ_SERIAL_NUM:
+       case HV_CONFSTR_MEZZ_REV:
+       case HV_CONFSTR_MEZZ_DESC:
+       case HV_CONFSTR_SWITCH_CONTROL:
+       case HV_CONFSTR_CHIP_REV:
+       case HV_CONFSTR_CPUMOD_PART_NUM:
+       case HV_CONFSTR_CPUMOD_SERIAL_NUM:
+       case HV_CONFSTR_CPUMOD_REV:
+       case HV_CONFSTR_CPUMOD_DESC:
+               rc = hv_confstr(query, (HV_VirtAddr)hvbuf, sizeof(hvbuf));
+               if (rc > sizeof(hvbuf)) {
+                       /* Not the best answer, but very unlikely anyway. */
+                       rc = sizeof(hvbuf);
+                       hvbuf[sizeof(hvbuf)-1] = '\0';
+               }
+               p = hvbuf;
+               break;
+
+       /* For hypervisor version info, just report the kernel version. */
+       case HV_CONFSTR_HV_SW_VER:
+               p = UTS_RELEASE;
+               break;
+       case HV_CONFSTR_HV_CONFIG:
+       case HV_CONFSTR_HV_CONFIG_VER:
+               p = "";
+               break;
+
+       default:
+               rc = HV_EINVAL;
+               goto done;
+       }
+
+       rc = strlen(p) + 1;  /* include NUL */
+       if (kvm_write_guest_va(vcpu, vcpu->arch.regs.regs[1],
+                              p, min(rc, buflen)))
+               rc = HV_EFAULT;
+
+done:
+       vcpu->arch.regs.regs[0] = rc;
+       return 1;
+}
+
+static int kvm_emulate_hv_get_rtc(struct kvm_vcpu *vcpu)
+{
+       HV_RTCTime *hvtm = (HV_RTCTime *) &vcpu->arch.regs.regs[0];
+       struct rtc_time tm;
+       struct timeval tv;
+
+       do_gettimeofday(&tv);
+       rtc_time_to_tm(tv.tv_sec, &tm);
+       hvtm->tm_sec = tm.tm_sec;
+       hvtm->tm_min = tm.tm_min;
+       hvtm->tm_hour = tm.tm_hour;
+       hvtm->tm_mday = tm.tm_mday;
+       hvtm->tm_mon = tm.tm_mon;
+       hvtm->tm_year = tm.tm_year;
+       hvtm->flags = 0;
+
+       return 1;
+}
+
+static int kvm_emulate_hv_set_rtc(struct kvm_vcpu *vcpu)
+{
+       /* Do nothing here. */
+       pr_warn("hv_set_rtc() will not work in kvm guest\n");
+       return 1;
+}
+
+static int kvm_emulate_hv_inquire_virtual(struct kvm_vcpu *vcpu)
+{
+       int idx = vcpu->arch.regs.regs[0];
+       HV_VirtAddrRange *var = (HV_VirtAddrRange *)&vcpu->arch.regs.regs[0];
+
+       switch (idx) {
+       case 0:
+               var->start =                  0UL;
+               var->size  =       0x20000000000UL;
+               break;
+       case 1:
+               var->start = 0xFFFFFFFF80000000UL;
+               var->size  =         0x80000000UL;
+               break;
+       default:
+               var->start =                  0UL;
+               var->size  =                  0UL;
+               break;
+       }
+
+       return 1;
+}
+
+/* Give all the ASIDs to the guest; we flush the whole TLB anyway. */
+static int kvm_emulate_hv_inquire_asid(struct kvm_vcpu *vcpu)
+{
+       int idx = vcpu->arch.regs.regs[0];
+       HV_ASIDRange *var = (HV_ASIDRange *)&vcpu->arch.regs.regs[0];
+
+       if (idx == 0) {
+               var->start = min_asid;
+               var->size = max_asid - min_asid + 1;
+       } else {
+               var->start = 0;
+               var->size = 0;
+       }
+
+       return 1;
+}
+
+static int kvm_emulate_hv_inquire_topology(struct kvm_vcpu *vcpu)
+{
+       HV_Topology *tp;
+       int cpus;
+
+       /* Depends on the definition of struct HV_Topology */
+       tp = (HV_Topology *)&vcpu->arch.regs.regs[0];
+
+       cpus = atomic_read(&vcpu->kvm->online_vcpus);
+       tp->coord.x = vcpu->vcpu_id;
+       tp->coord.y = 0;
+       tp->width = cpus;
+       tp->height = 1;
+
+       return 1;
+}
+
+static int xy_to_vcpu(struct kvm *kvm, int x, int y)
+{
+       if (y != 0 || x < 0 || x >= atomic_read(&kvm->online_vcpus))
+               return -1;
+       return x;
+}
+
+/*
+ * The primary vcpu is the one that initially runs while the others
+ * all block.  It is the only that is allowed to call hv_start_all_tiles().
+ * The other cpus are secondary.
+ */
+static bool is_secondary_vcpu(struct kvm_vcpu *vcpu)
+{
+       return vcpu->vcpu_id != 0;
+}
+
+static int kvm_emulate_hv_start_all_tiles(struct kvm_vcpu *vcpu)
+{
+       struct completion *c = &vcpu->kvm->arch.smp_start;
+       if (is_secondary_vcpu(vcpu) || completion_done(c))
+               return panic_hv(vcpu, "start_all_tiles() called again");
+       complete_all(c);
+       return 1;
+}
+
+static int kvm_emulate_hv_physaddr_read64(struct kvm_vcpu *vcpu)
+{
+       gpa_t gpa = vcpu->arch.regs.regs[0];
+       HV_PTE *access = (HV_PTE *) &vcpu->arch.regs.regs[1];
+       gfn_t gfn;
+       pfn_t pfn;
+       hpa_t hpa;
+
+       gfn = gpa_to_gfn(gpa);
+       pfn = gfn_to_pfn(vcpu->kvm, gfn);
+       if (is_error_pfn(pfn))
+               return panic_hv(vcpu, "bogus PA %llx in physaddr_write64()",
+                        gpa);
+       hpa = pfn_to_hpa(pfn) | (gpa & ~PAGE_MASK);
+
+       vcpu->arch.regs.regs[0] = hv_physaddr_read64(hpa, *access);
+
+       return 1;
+}
+
+static int kvm_emulate_hv_physaddr_write64(struct kvm_vcpu *vcpu)
+{
+       gpa_t gpa = vcpu->arch.regs.regs[0];
+       HV_PTE *access = (HV_PTE *)vcpu->arch.regs.regs[1];
+       uint64_t val = vcpu->arch.regs.regs[2];
+       gfn_t gfn;
+       pfn_t pfn;
+       hpa_t hpa;
+
+       gfn = gpa_to_gfn(gpa);
+       pfn = gfn_to_pfn(vcpu->kvm, gfn);
+       if (is_error_pfn(pfn))
+               return panic_hv(vcpu, "bogus PA %llx in physaddr_write64()",
+                        gpa);
+       hpa = pfn_to_hpa(pfn) | (gpa & ~PAGE_MASK);
+
+       hv_physaddr_write64(hpa, *access, val);
+
+       return 1;
+}
+
+static int kvm_emulate_hv_register_message_state(struct kvm_vcpu *vcpu)
+{
+       /* Do we care about the argument msgstate? */
+       vcpu->arch.regs.regs[0] = HV_OK;
+
+       return 1;
+}
+
+/*
+ * NOTE: we may coalesce multiple messages with the same tag to the
+ * same recepient.  Currently the only messages used by Linux are
+ * start/stop cpu (where coalescing is OK), and the smp_call_function()
+ * IPI message tag.  In the latter case we rely on the generic
+ * smp_call_function code to properly handle this, and since it only
+ * uses the IPI as a way to wake up the generic list-walking code,
+ * it's OK if we coalesce several IPI deliveries before the recipient
+ * core takes action.
+ */
+static int kvm_emulate_hv_send_message(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_vcpu *vcpui;
+       HV_Recipient recip[NR_CPUS];
+       HV_Recipient *recips = (HV_Recipient *)vcpu->arch.regs.regs[0];
+       int nrecip = vcpu->arch.regs.regs[1];
+       int buflen = vcpu->arch.regs.regs[3];
+       int sent, vcpu_id, tag;
+
+       /* NOTE: we only support the Linux usage of buflen == sizeof(int). */
+       if (unlikely(buflen != sizeof(int) ||
+                    nrecip >= atomic_read(&kvm->online_vcpus))) {
+               vcpu->arch.regs.regs[0] = HV_EINVAL;
+               return 1;
+       }
+
+       /* Get the buf info */
+       if (kvm_read_guest_va(vcpu, vcpu->arch.regs.regs[2],
+                             &tag, sizeof(tag))) {
+               vcpu->arch.regs.regs[0] = HV_EFAULT;
+               return 1;
+       }
+
+       /* Range-check the tag value. */
+       if (tag < 0 || tag >= MAX_MSG_TAG) {
+               vcpu->arch.regs.regs[0] = HV_EFAULT;
+               return 1;
+       }
+
+       /* Get all the recipients */
+       if (kvm_read_guest_va(vcpu, (unsigned long)recips, &recip,
+                             nrecip * sizeof(HV_Recipient))) {
+               vcpu->arch.regs.regs[0] = HV_EFAULT;
+               return 1;
+       }
+
+       for (sent = 0; sent < nrecip; sent++) {
+               if (recip[sent].state != HV_TO_BE_SENT)
+                       continue;
+               vcpu_id = xy_to_vcpu(kvm, recip[sent].x, recip[sent].y);
+               if (unlikely(vcpu_id < 0 || vcpu_id == vcpu->vcpu_id)) {
+                       recip[sent].state = HV_BAD_RECIP;
+                       continue;
+               }
+               vcpui = kvm_get_vcpu(kvm, vcpu_id);
+               set_bit(tag, &vcpui->arch.pending_msgs);
+               kvm_vcpu_kick(vcpui);
+               recip[sent].state = HV_SENT;
+       }
+
+       if (kvm_write_guest_va(vcpu, (unsigned long)recips, &recip,
+                              nrecip * sizeof(HV_Recipient))) {
+               vcpu->arch.regs.regs[0] = HV_EFAULT;
+               return 1;
+       }
+
+       vcpu->arch.regs.regs[0] = sent;
+
+       return 1;
+}
+
+static int kvm_emulate_hv_receive_message(struct kvm_vcpu *vcpu)
+{
+       HV_RcvMsgInfo *rmi = (HV_RcvMsgInfo *)&vcpu->arch.regs.regs[0];
+       int buflen = vcpu->arch.regs.regs[3];
+       int tag;
+
+       /* Currently we only support messages from other tiles. */
+       rmi->source = HV_MSG_TILE;
+
+       if (buflen <= sizeof(int)) {
+               rmi->msglen = HV_E2BIG;
+               return 1;
+       }
+
+       tag = find_first_bit(&vcpu->arch.pending_msgs, MAX_MSG_TAG);
+       if (tag >= MAX_MSG_TAG) {
+               /* No more messages */
+               rmi->msglen = 0;
+               return 1;
+       }
+
+       if (kvm_write_guest_va(vcpu, vcpu->arch.regs.regs[2],
+                              &tag, sizeof(int))) {
+               rmi->msglen = HV_EFAULT;
+               return 1;
+       }
+
+       /*
+        * This clear_bit could race with a set_bit as another core
+        * delivers a new smp_function_call to this core.  However,
+        * the smp_function_call code will have set up the additional
+        * smp_function_call data on the kernel's list prior to
+        * raising the interrupt, so even if we lose the new
+        * interrupt due to the race, we still haven't dispatched
+        * to the original interrupt handler, and when we do, it
+        * will find both smp_function_calls waiting for it, so the
+        * race is harmless.  This is consistent with the fact that
+        * the generic code is trying to support pretty much
+        * arbitrary architecture-dependent IPI semantics, so it
+        * is very conservative about what it assumes.
+        *
+        * Also note that we only clear_bit on the core that owns
+        * the mask, so there's no race condition caused by the
+        * find_first_bit above and the clear_bit here, since once
+        * a bit is found it will stay set until this point.
+        */
+       clear_bit(tag, &vcpu->arch.pending_msgs);
+       rmi->msglen = sizeof(int);
+       return 1;
+}
+
+static int kvm_emulate_hv_inquire_context(struct kvm_vcpu *vcpu)
+{
+       HV_Context *ctx = (HV_Context *) &vcpu->arch.regs.regs[0];
+
+       *ctx = hv_inquire_guest_context();
+
+       return 1;
+}
+
+static int kvm_emulate_hv_inquire_tiles(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       HV_InqTileSet set = vcpu->arch.regs.regs[0];
+       unsigned long gva = vcpu->arch.regs.regs[1];
+       int length = vcpu->arch.regs.regs[2];
+       struct cpumask mask = CPU_MASK_NONE;
+       int cpus, i, retval, bytes2copy, bytes2zero;
+
+       switch (set) {
+       case HV_INQ_TILES_AVAIL:
+       case HV_INQ_TILES_HFH_CACHE:
+       case HV_INQ_TILES_LOTAR:
+               cpus = atomic_read(&kvm->online_vcpus);
+               for (i = 0; i < cpus; ++i)
+                       cpumask_set_cpu(i, &mask);
+               break;
+       case HV_INQ_TILES_SHARED:
+               break;
+       default:
+               retval = HV_EINVAL;
+               goto done;
+       }
+
+       bytes2copy = (length > sizeof(mask)) ? sizeof(mask) : length;
+       bytes2zero = length - bytes2copy;
+
+       if (kvm_write_guest_va(vcpu, gva, &mask, bytes2copy)) {
+               retval = HV_EFAULT;
+               goto done;
+       }
+
+       if (kvm_clear_guest_va(vcpu, gva + bytes2copy, bytes2zero)) {
+               retval = HV_EFAULT;
+               goto done;
+       }
+
+       retval = HV_OK;
+done:
+       vcpu->arch.regs.regs[0] = retval;
+       return 1;
+}
+
+static int kvm_emulate_hv_get_ipi_pte(struct kvm_vcpu *vcpu)
+{
+       HV_Coord vtarget = *(HV_Coord *)&vcpu->arch.regs.regs[0];
+       int pl = (int) vcpu->arch.regs.regs[1];
+       struct kvm_vcpu *target_vcpu;
+       int vcpu_id;
+
+       vcpu_id = vtarget.x;
+       if (pl != GUEST_PL || vtarget.y != 0 || vcpu_id < 0 ||
+           vcpu_id >= atomic_read(&vcpu->kvm->online_vcpus)) {
+               vcpu->arch.regs.regs[0] = HV_EINVAL;
+               return 1;
+       }
+
+       target_vcpu = kvm_get_vcpu(vcpu->kvm, vcpu_id);
+       if (kvm_write_guest_va(vcpu, vcpu->arch.regs.regs[2],
+                           &target_vcpu->arch.ipi_gpte, sizeof(pte_t))) {
+               vcpu->arch.regs.regs[0] = HV_EFAULT;
+               return 1;
+       }
+
+       vcpu->arch.regs.regs[0] = HV_OK;
+
+       return 1;
+}
+
+struct kvm_vcpu *ipi_vcpu_lookup(struct kvm *kvm, unsigned long gpa)
+{
+       struct kvm_vcpu *vcpui;
+       unsigned long idx;
+
+       kvm_for_each_vcpu(idx, vcpui, kvm)
+               if (vcpui->arch.ipi_gpa == gpa)
+                       return vcpui;
+
+       return NULL;
+}
+
+/*
+ * Most page faults will be downcall-ed from hv to and be handled directly
+ * by either guest os or host os. This function is used to handle the
+ * rest cases.
+ */
+static int handle_mmio(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_translation tr;
+       struct kvm_vcpu *ipi_vcpu;
+
+       tr.linear_address = (__u64) vcpu->arch.fault_addr;
+       kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
+       if (!tr.valid)
+               return 0;
+
+       /* ipi PTE for rescheduling interrupt? */
+       ipi_vcpu = ipi_vcpu_lookup(kvm, tr.physical_address);
+       if (!ipi_vcpu)
+               return 0;
+
+       set_bit(IRQ_RESCHEDULE, &ipi_vcpu->arch.ipi_events);
+       kvm_vcpu_kick(ipi_vcpu);
+
+       /* Juke the PC past the store instruction. */
+       vcpu->arch.regs.pc += 8;
+       return 1;
+}
+
+static int kvm_emulate_hv_set_pte_super_shift(struct kvm_vcpu *vcpu)
+{
+       /*
+        * We do not expect this call in guest so far. At least guest os
+        * should just follow host os instead of *set*. Besides,
+        * hv_set_pte_super_shift() will not be called in guest os with
+        * current guest os setting.
+        */
+       vcpu->arch.regs.regs[0] = HV_EINVAL;
+
+       return 1;
+}
+
+static int kvm_emulate_hv_set_speed(struct kvm_vcpu *vcpu)
+{
+       HV_SetSpeed *hvss = (HV_SetSpeed *) &vcpu->arch.regs.regs[0];
+
+       hvss->new_speed = HV_EPERM;
+       hvss->end_cycle = 0;
+       hvss->delta_ns = 0;
+
+       return 1;
+}
+
+static int (*hcall_handlers[KVM_NUM_HCALLS])(struct kvm_vcpu *vcpu) = {
+       HCALL_DEFS
+};
+
+static int kvm_handle_exit(struct kvm_vcpu *vcpu)
+{
+       unsigned long hcall_idx;
+
+       switch (vcpu->run->exit_reason) {
+       case KVM_EXIT_HYPERCALL:
+               hcall_idx = vcpu->arch.regs.regs[10];
+               if (unlikely(hcall_idx >= KVM_NUM_HCALLS ||
+                            hcall_handlers[hcall_idx] == NULL))
+                       return kvm_emulate_illegal(vcpu);
+
+               /* Juke us past the swint0 when we return. */
+               vcpu->arch.regs.pc += 8;
+
+               return hcall_handlers[hcall_idx](vcpu);
+
+       case KVM_EXIT_MMIO:
+               if (handle_mmio(vcpu))
+                       return 1;
+               return panic_hv(vcpu, "Out-of-bounds client memory access");
+
+       case KVM_EXIT_AGAIN:
+               return 1;
+
+       default:
+               return 0;
+       }
+}
+
+static void kvm_kick_func(void *info)
+{
+       struct kvm_vcpu *vcpu = info;
+
+       /* If this is not the thread that we expect, just return. */
+       if (unlikely(vcpu->pid != get_task_pid(current, PIDTYPE_PID)))
+               return;
+
+       /* Setting this flag will cause a vmexit instead of a vmresume. */
+       set_thread_flag(TIF_VIRT_EXIT);
+}
+
+/* Note this function has been a standard kvm interface in latest Linux. */
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+       int me, cpu;
+
+       /* If it is waiting in kvm_vcpu_block(), wake it up. */
+       if (waitqueue_active(&vcpu->wq))
+               wake_up_interruptible(&vcpu->wq);
+
+       /* If we are kicking our own vcpu, make sure we vmexit. */
+       if (vcpu == current_thread_info()->vcpu) {
+               set_thread_flag(TIF_VIRT_EXIT);
+               return;
+       }
+
+       /*
+        * If the vcpu is running the guest, interrupt its cpu,
+        * causing it to vmexit by setting TIF_VIRT_EXIT.  Note we can
+        * race with a guest already doing a vmexit, but that is benign.
+        */
+       cpu = vcpu->cpu;
+       me = get_cpu();
+       if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
+               if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
+                       smp_call_function_single(cpu, kvm_kick_func, vcpu, 0);
+       put_cpu();
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
+
+/*
+ * Any interrupt that would normally be handled by the host at PL2
+ * needs to be reassigned to the guest at PL1 as we enter.
+ *
+ * The TLB interrupts remain handled by the hypervisor and are downcalled
+ * to the appropriate host or guest as necessary.
+ *
+ * FIXME: We don't give the UDN interrupts for now; at some point we
+ * plan to allow an option to pin the vcpus and report the true
+ * geometry to the guest, at which point passing the UDN access would
+ * make sense.
+ *
+ * FIXME: For now we don't pass the profiling interrupts to the guest,
+ * and instead require profiling be run in the host; we should be able
+ * to support guest-level profiling pretty easily, but we need to
+ * think about whether there are vcpu migration issues there.
+ */
+static void kvm_grant_mpls(void)
+{
+       __insn_mtspr(SPR_MPL_SWINT_1_SET_1, 1);
+       __insn_mtspr(SPR_MPL_ILL_SET_1, 1);
+       __insn_mtspr(SPR_MPL_GPV_SET_1, 1);
+       __insn_mtspr(SPR_MPL_ILL_TRANS_SET_1, 1);
+       __insn_mtspr(SPR_MPL_UNALIGN_DATA_SET_1, 1);
+}
+
+static void kvm_ungrant_mpls(void)
+{
+       __insn_mtspr(SPR_MPL_SWINT_1_SET_2, 1);
+       __insn_mtspr(SPR_MPL_ILL_SET_2, 1);
+       __insn_mtspr(SPR_MPL_GPV_SET_2, 1);
+       __insn_mtspr(SPR_MPL_ILL_TRANS_SET_2, 1);
+       __insn_mtspr(SPR_MPL_UNALIGN_DATA_SET_2, 1);
+}
+
+/*
+ * There is lots of state that is (for the non-virtualized case) held
+ * permanently in SPRs, or that is in any case not context-switched.
+ * The next two routines switch in and out all the SPR state.
+ *
+ * We try to fix the timer so that when we restart, we fix up the
+ * timer value so that will fire at the correct wall-clock time even
+ * if we have been scheduled out for a little bit.  This may also
+ * mean we end up firing it immediately on return, and suffer a
+ * timer delay in the guest.
+ */
+static void kvm_save_sprs(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.timer_control = __insn_mfspr(SPR_AUX_TILE_TIMER_CONTROL);
+       vcpu->arch.vmexit_cycles = get_cycles();
+
+#define SAVE_SPR(x) vcpu->arch.sregs.x = __insn_mfspr(SPR_ ## x)
+       FOR_EACH_GUEST_SPR(SAVE_SPR);
+#undef SAVE_SPR
+}
+
+static void kvm_restore_sprs(struct kvm_vcpu *vcpu)
+{
+       unsigned long count = vcpu->arch.timer_control;
+       unsigned long underflow =
+               (count >> SPR_AUX_TILE_TIMER_CONTROL__UNDERFLOW_SHIFT) & 1;
+       unsigned long disabled =
+               (count >> SPR_AUX_TILE_TIMER_CONTROL__DISABLE_SHIFT) & 1;
+
+       if (!disabled) {
+               unsigned long delta = get_cycles() - vcpu->arch.vmexit_cycles;
+               count &= SPR_AUX_TILE_TIMER_CONTROL__COUNT_MASK;
+               underflow |= delta > count;
+               count -= delta;
+               count &= SPR_AUX_TILE_TIMER_CONTROL__COUNT_MASK;
+               count |= (underflow << SPR_AUX_TILE_TIMER_CONTROL__UNDERFLOW_SHIFT);
+       }
+       __insn_mtspr(SPR_AUX_TILE_TIMER_CONTROL, count);
+
+#define RESTORE_SPR(x) __insn_mtspr(SPR_ ## x, vcpu->arch.sregs.x)
+       FOR_EACH_GUEST_SPR(RESTORE_SPR);
+#undef RESTORE_SPR
+}
+
+/*
+ * When entering the guest, we need to eliminate any PL0 translations
+ * that were in use by qemu, since the guest's PL0 translations will
+ * be different.  We also flush PL1 translations in case there have
+ * been changes to the virtualization page table, etc.
+ *
+ * FIXME: Add a way to just flush PL0/PL1, or just flush below
+ * the host PAGE_OFFSET, or add vpid support, etc.
+ */
+static void kvm_guest_context_enter(struct kvm_vcpu *vcpu)
+{
+       HV_Context *ctx;
+       pgd_t *vpgdir;
+       pte_t *ptep;
+       int rc;
+
+       /* Install virtualization context */
+       vpgdir = vcpu->kvm->arch.vpgd;
+       BUG_ON(vpgdir == NULL);
+       ptep = virt_to_pte(NULL, (unsigned long)vpgdir);
+       rc = hv_install_virt_context(__pa(vpgdir), *ptep, 0, 0);
+       WARN_ON_ONCE(rc < 0);
+
+       /* Install guest context */
+       ctx = &vcpu->arch.guest_context;
+       rc = hv_install_guest_context(ctx->page_table, ctx->access,
+                                     ctx->asid, ctx->flags);
+       WARN_ONCE(rc < 0, "install_guest_context(%#llx,%#llx,%#x,%#x): %d\n",
+                 ctx->page_table, ctx->access.val,
+                 ctx->asid, ctx->flags, rc);
+
+       hv_flush_all(0);
+}
+
+/*
+ * De-install the virtualization context so we take faults below the
+ * host Linux PL in the normal manner going forward.
+ *
+ * We flush all the TLB mappings as we exit the guest, since the
+ * guest has been using the ASIDs as it pleases, and may have installed
+ * incompatible mappings for qemu's process as well.  Note that we don't
+ * worry about host-PL interrupts that occur while the guest is running,
+ * on the assumption that such interrupts can't touch userspace
+ * addresses legally anyway.
+ *
+ * NOTE: we may want to add a hypervisor call to just flush mappings
+ * below PL2 and use that here instead.
+ */
+static void kvm_guest_context_exit(struct kvm_vcpu *vcpu)
+{
+       int rc;
+
+       /* Remember guest context */
+       vcpu->arch.guest_context = hv_inquire_guest_context();
+
+       /* Disable virtualization context */
+       rc = hv_install_virt_context(HV_CTX_NONE, hv_pte(0), 0, 0);
+       WARN_ON_ONCE(rc < 0);
+
+       /* Flush everything in the TLB. */
+       hv_flush_all(0);
+}
+
+static void kvm_inject_interrupts(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Capture current set of ipi_events.  We might race with
+        * another thread adding an event, but if so we'll just miss
+        * it on this go-around and see it next time.
+        */
+       vcpu->arch.sregs.IPI_EVENT_1 |= __insn_exch(&vcpu->arch.ipi_events, 0);
+
+       /*
+        * Note: We could set PC and EX1 for the guest os to jump
+        * directly to the INT_MESSAGE_RCV_DWNCL handler if the interrupt
+        * is unmasked and the guest is not at PL1 with ICS set.
+        * But in fact it's about as fast to just set INTCTRL_1_STATUS
+        * here and then run the short INTCTRL_1 handler in the guest.
+        */
+       vcpu->arch.sregs.INTCTRL_1_STATUS = (vcpu->arch.pending_msgs != 0);
+}
+
+static void kvm_tile_run(struct kvm_vcpu *vcpu)
+{
+       struct thread_info *ti = current_thread_info();
+       unsigned long prev_k_0 = __insn_mfspr(SPR_SYSTEM_SAVE_K_0);
+
+       /*
+        * Disable interrupts while we set up the guest state.
+        * This way, if we race with another core trying to tell us
+        * to fix up our guest state, we will take the kick only as
+        * we actually try to enter the guest, and instead we will
+        * vmexit and end up retrying.
+        */
+       local_irq_disable();
+       kvm_guest_context_enter(vcpu);
+       clear_bit(KVM_REQ_KICK, &vcpu->requests);
+       ti->vcpu = vcpu;
+       vcpu->cpu = get_cpu();
+       kvm_inject_interrupts(vcpu);
+       kvm_grant_mpls();
+       kvm_restore_sprs(vcpu);
+
+       /* Calling this function irets into the guest. */
+       kvm_vmresume(&vcpu->arch.regs, &vcpu->arch.host_sp);
+
+       /* We resume here due to a call to kvm_vmexit. */
+       __insn_mtspr(SPR_SYSTEM_SAVE_K_0, prev_k_0);
+
+       vcpu->cpu = -1;
+       put_cpu();
+       ti->vcpu = NULL;
+       set_bit(KVM_REQ_KICK, &vcpu->requests);
+       vcpu->run->ready_for_interrupt_injection = 1;
+       kvm_ungrant_mpls();
+       kvm_save_sprs(vcpu);
+       __insn_mtspr(SPR_INTERRUPT_MASK_1, -1UL);
+       kvm_guest_context_exit(vcpu);
+       local_irq_enable();
+}
+
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       int r = 1;
+
+       while (r > 0) {
+               kvm_guest_enter();
+               kvm_tile_run(vcpu);
+               kvm_guest_exit();
+
+               r = kvm_handle_exit(vcpu);
+               /*
+                * <0: error for userspace.
+                * =0: QEMU to handle.
+                * >0: host os can handle it fully.
+                */
+               if (r <= 0)
+                       break;
+
+               if (signal_pending(current)) {
+                       vcpu->run->exit_reason = KVM_EXIT_INTR;
+                       r = -EINTR;
+                       break;
+               }
+
+#ifdef CONFIG_HOMECACHE
+               if (current_thread_info()->homecache_cpu !=
+                   smp_processor_id()) {
+                       /* Do homecache migration when returning to qemu. */
+                       vcpu->run->exit_reason = KVM_EXIT_INTR;
+                       r = -EINTR;
+                       break;
+               }
+#endif
+
+               kvm_resched(vcpu);
+       }
+
+       return r;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       int r;
+       sigset_t sigsaved;
+
+       /* Secondary cpus must wait until they are told they can start. */
+       if (vcpu->arch.suspended) {
+               struct completion *c = &vcpu->kvm->arch.smp_start;
+               if (wait_for_completion_interruptible(c))
+                       return -EINTR;
+               vcpu->arch.suspended = 0;
+       }
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       r = __vcpu_run(vcpu, kvm_run);
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       int i;
+       unsigned long resv_gfn_start;
+       struct kvm_memory_slot *s;
+       struct kvm *kvm = vcpu->kvm;
+
+       if (!kvm->arch.resv_gpa_start) {
+               resv_gfn_start = 0;
+
+               for (i = 0; i < KVM_USER_MEM_SLOTS; i++) {
+                       s = &kvm->memslots->memslots[i];
+
+                       if (!s->npages)
+                               continue;
+
+                       if ((s->base_gfn + s->npages) > resv_gfn_start)
+                               resv_gfn_start = s->base_gfn + s->npages;
+               }
+
+               kvm->arch.resv_gpa_start = PFN_PHYS(resv_gfn_start);
+       }
+
+       /* Initialize to enter fake PA=VA mode in hypervisor. */
+       vcpu->arch.guest_context.page_table = HV_CTX_NONE;
+
+       vcpu->arch.ipi_gpa =
+               kvm->arch.resv_gpa_start + (vcpu->vcpu_id * PAGE_SIZE);
+       vcpu->arch.ipi_gpte =
+               pfn_pte(PFN_DOWN(vcpu->arch.ipi_gpa), PAGE_KERNEL);
+
+       /* Mark the core suspended if it is not the boot cpu. */
+       vcpu->arch.suspended = is_secondary_vcpu(vcpu);
+
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       /* Notify simulator that this task handles this vcpu. */
+       sim_set_vcpu(vcpu->vcpu_id);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       sim_clear_vcpu();
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       /* FIXME: some archs set up a cache for these structs? */
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+       int rc;
+
+       if (!vcpu)
+               return ERR_PTR(-ENOMEM);
+
+       rc = kvm_vcpu_init(vcpu, kvm, id);
+       if (rc) {
+               kfree(vcpu);
+               return ERR_PTR(rc);
+       }
+
+       return vcpu;
+}
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       memset(&vcpu->arch.regs, 0, sizeof(struct pt_regs));
+       memset(&vcpu->arch.sregs, 0, sizeof(struct pt_regs));
+       vcpu->arch.sregs.IPI_MASK_1 = -1UL;
+       vcpu->arch.sregs.INTERRUPT_MASK_1 = -1UL;
+       vcpu->arch.sregs.INTERRUPT_VECTOR_BASE_1 = 0xfd000000;
+       return 0;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_vcpu_uninit(vcpu);
+       kfree(vcpu);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       return kvm_arch_vcpu_destroy(vcpu);
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       if (type)
+               return -EINVAL;
+
+       init_completion(&kvm->arch.smp_start);
+       return 0;
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_arch_vcpu_free(vcpu);
+
+       /* Seems to be unnecessary? */
+       mutex_lock(&kvm->lock);
+       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+               kvm->vcpus[i] = NULL;
+
+       atomic_set(&kvm->online_vcpus, 0);
+       mutex_unlock(&kvm->lock);
+
+       /* FIXME: release all the pmds and ptes as well! */
+       if (kvm->arch.vpgd)
+               pgd_free(kvm->mm, kvm->arch.vpgd);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+/* Called from guest hv glue via swint0 traps. */
+void kvm_do_hypervisor_call(struct pt_regs *regs, int fault_num)
+{
+       /* Hypercalls are only valid from PL1. */
+       if (EX1_PL(regs->ex1) != 0) {
+               kvm_trigger_vmexit(regs, KVM_EXIT_HYPERCALL);
+               /*NORETURN*/
+       }
+       do_trap(regs, fault_num, 0);
+}
+
+void kvm_do_vpgtable_miss(struct pt_regs *regs, int fault_num,
+                         unsigned long fault_addr, unsigned long write)
+{
+       struct kvm_vcpu *vcpu = current_thread_info()->vcpu;
+       BUG_ON(vcpu == NULL);
+       vcpu->arch.fault_addr = fault_addr;
+       kvm_trigger_vmexit(regs, KVM_EXIT_MMIO);
+       /*NORETURN*/
+}
+
+void kvm_do_vguest_fatal(struct pt_regs *regs, int fault_num)
+{
+       kvm_trigger_vmexit(regs, KVM_EXIT_SHUTDOWN);
+       /*NORETURN*/
+}
+
+void kvm_trigger_vmexit(struct pt_regs *regs, int exit_reason)
+{
+       struct kvm_vcpu *vcpu = current_thread_info()->vcpu;
+       vcpu->run->exit_reason = exit_reason;
+       vcpu->arch.regs = *regs;
+       vcpu->arch.regs.flags = PT_FLAGS_CALLER_SAVES | PT_FLAGS_RESTORE_REGS;
+       kvm_vmexit(vcpu->arch.host_sp);
+       /*NORETURN*/
+}
+
+static int __init kvm_tile_init(void)
+{
+       return kvm_init(NULL, sizeof(struct kvm_vcpu),
+                       __alignof__(struct kvm_vcpu), THIS_MODULE);
+}
+
+static void __exit kvm_tile_exit(void)
+{
+       kvm_exit();
+}
+
+module_init(kvm_tile_init);
+module_exit(kvm_tile_exit);
index 985f59858234d98cf7a29df45d91edc0e6deb973..c4211cbb2021553627534f6071a47cea39f27605 100644 (file)
@@ -4,15 +4,15 @@
 
 lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \
        memmove.o memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
-       strchr_$(BITS).o strlen_$(BITS).o
-
-ifeq ($(CONFIG_TILEGX),y)
-CFLAGS_REMOVE_memcpy_user_64.o = -fno-omit-frame-pointer
-lib-y += memcpy_user_64.o
-else
-lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o
-endif
+       strchr_$(BITS).o strlen_$(BITS).o strnlen_$(BITS).o
 
+lib-$(CONFIG_TILEGX) += memcpy_user_64.o
+lib-$(CONFIG_TILEPRO) += atomic_32.o atomic_asm_32.o
 lib-$(CONFIG_SMP) += spinlock_$(BITS).o usercopy_$(BITS).o
 
 obj-$(CONFIG_MODULES) += exports.o
+
+# The finv_buffer_remote() and copy_{to,from}_user() routines can't
+# have -pg added, since they both rely on being leaf functions.
+CFLAGS_REMOVE_cacheflush.o = -pg
+CFLAGS_REMOVE_memcpy_user_64.o = -pg
index f5cada70c3c85c8b77fab9fd66a39ad32939d1de..5d91d1860640f55814d884c1d8e25ece9202671b 100644 (file)
 #include <linux/atomic.h>
 #include <arch/chip.h>
 
-/* See <asm/atomic_32.h> */
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
-
-/*
- * A block of memory containing locks for atomic ops. Each instance of this
- * struct will be homed on a different CPU.
- */
-struct atomic_locks_on_cpu {
-       int lock[ATOMIC_HASH_L2_SIZE];
-} __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4)));
-
-static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool);
-
-/* The locks we'll use until __init_atomic_per_cpu is called. */
-static struct atomic_locks_on_cpu __initdata initial_atomic_locks;
-
-/* Hash into this vector to get a pointer to lock for the given atomic. */
-struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
-       __write_once = {
-       [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks)
-};
-
-#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
 /* This page is remapped on startup to be hash-for-home. */
 int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
 
-#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
 int *__atomic_hashed_lock(volatile void *v)
 {
        /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
-       unsigned long i =
-               (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
-       unsigned long n = __insn_crc32_32(0, i);
-
-       /* Grab high bits for L1 index. */
-       unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT);
-       /* Grab low bits for L2 index. */
-       unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1);
-
-       return &atomic_lock_ptr[l1_index]->lock[l2_index];
-#else
        /*
         * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
         * Using mm works here because atomic_locks is page aligned.
@@ -72,26 +34,13 @@ int *__atomic_hashed_lock(volatile void *v)
                                      (unsigned long)atomic_locks,
                                      2, (ATOMIC_HASH_SHIFT + 2) - 1);
        return (int *)ptr;
-#endif
 }
 
 #ifdef CONFIG_SMP
 /* Return whether the passed pointer is a valid atomic lock pointer. */
 static int is_atomic_lock(int *p)
 {
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
-       int i;
-       for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
-
-               if (p >= &atomic_lock_ptr[i]->lock[0] &&
-                   p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) {
-                       return 1;
-               }
-       }
-       return 0;
-#else
        return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
-#endif
 }
 
 void __atomic_fault_unlock(int *irqlock_word)
@@ -208,54 +157,8 @@ struct __get_user __atomic_bad_address(int __user *addr)
 }
 
 
-#if CHIP_HAS_CBOX_HOME_MAP()
-static int __init noatomichash(char *str)
-{
-       pr_warning("noatomichash is deprecated.\n");
-       return 1;
-}
-__setup("noatomichash", noatomichash);
-#endif
-
 void __init __init_atomic_per_cpu(void)
 {
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
-
-       unsigned int i;
-       int actual_cpu;
-
-       /*
-        * Before this is called from setup, we just have one lock for
-        * all atomic objects/operations.  Here we replace the
-        * elements of atomic_lock_ptr so that they point at per_cpu
-        * integers.  This seemingly over-complex approach stems from
-        * the fact that DEFINE_PER_CPU defines an entry for each cpu
-        * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1.  But
-        * for efficient hashing of atomics to their locks we want a
-        * compile time constant power of 2 for the size of this
-        * table, so we use ATOMIC_HASH_SIZE.
-        *
-        * Here we populate atomic_lock_ptr from the per cpu
-        * atomic_lock_pool, interspersing by actual cpu so that
-        * subsequent elements are homed on consecutive cpus.
-        */
-
-       actual_cpu = cpumask_first(cpu_possible_mask);
-
-       for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
-               /*
-                * Preincrement to slightly bias against using cpu 0,
-                * which has plenty of stuff homed on it already.
-                */
-               actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask);
-               if (actual_cpu >= nr_cpu_ids)
-                       actual_cpu = cpumask_first(cpu_possible_mask);
-
-               atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu);
-       }
-
-#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
        /* Validate power-of-two and "bigger than cpus" assumption */
        BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
        BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
@@ -279,6 +182,4 @@ void __init __init_atomic_per_cpu(void)
         * That should not produce more indices than ATOMIC_HASH_SIZE.
         */
        BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
-
-#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 }
index 30638042691ddba9ed1fee52d8c086da2502dbd5..6bda3132cd619e0e019e5b840d28bbc72fa9e9bb 100644 (file)
@@ -164,6 +164,7 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic)
        STD_ENDPROC(__atomic\name)
        .ifc \bitwidth,32
        .pushsection __ex_table,"a"
+       .align  4
        .word   1b, __atomic\name
        .word   2b, __atomic\name
        .word   __atomic\name, __atomic_bad_address
index 8f8ad814b1398619314b5a26d62695279d4f63f1..9c0ec22009a5ed833fab76a6e8ee94a9c1a1685f 100644 (file)
@@ -36,7 +36,8 @@ static inline void force_load(char *p)
  * core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting
  * until the memory controller holds the flushed values.
  */
-void finv_buffer_remote(void *buffer, size_t size, int hfh)
+void __attribute__((optimize("omit-frame-pointer")))
+finv_buffer_remote(void *buffer, size_t size, int hfh)
 {
        char *p, *base;
        size_t step_size, load_count;
@@ -147,18 +148,21 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
                force_load(p);
 
        /*
-        * Repeat, but with inv's instead of loads, to get rid of the
+        * Repeat, but with finv's instead of loads, to get rid of the
         * data we just loaded into our own cache and the old home L3.
-        * No need to unroll since inv's don't target a register.
+        * No need to unroll since finv's don't target a register.
+        * The finv's are guaranteed not to actually flush the data in
+        * the buffer back to their home, since we just read it, so the
+        * lines are clean in cache; we will only invalidate those lines.
         */
        p = (char *)buffer + size - 1;
-       __insn_inv(p);
+       __insn_finv(p);
        p -= step_size;
        p = (char *)((unsigned long)p | (step_size - 1));
        for (; p >= base; p -= step_size)
-               __insn_inv(p);
+               __insn_finv(p);
 
-       /* Wait for the load+inv's (and thus finvs) to have completed. */
+       /* Wait for these finv's (and thus the first finvs) to be done. */
        __insn_mf();
 
 #ifdef __tilegx__
index a93b02a252227c3b8bd40929e663e79f3f757862..1590282b54b70dcc2fc8acedcc40894127d92fdd 100644 (file)
@@ -22,7 +22,6 @@ EXPORT_SYMBOL(strnlen_user_asm);
 EXPORT_SYMBOL(strncpy_from_user_asm);
 EXPORT_SYMBOL(clear_user_asm);
 EXPORT_SYMBOL(flush_user_asm);
-EXPORT_SYMBOL(inv_user_asm);
 EXPORT_SYMBOL(finv_user_asm);
 
 /* arch/tile/kernel/entry.S */
@@ -34,6 +33,12 @@ EXPORT_SYMBOL(dump_stack);
 /* arch/tile/kernel/head.S */
 EXPORT_SYMBOL(empty_zero_page);
 
+#ifdef CONFIG_FUNCTION_TRACER
+/* arch/tile/kernel/mcount_64.S */
+#include <asm/ftrace.h>
+EXPORT_SYMBOL(__mcount);
+#endif /* CONFIG_FUNCTION_TRACER */
+
 /* arch/tile/lib/, various memcpy files */
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__copy_to_user_inatomic);
@@ -45,18 +50,26 @@ EXPORT_SYMBOL(__copy_in_user_inatomic);
 
 /* hypervisor glue */
 #include <hv/hypervisor.h>
+EXPORT_SYMBOL(hv_confstr);
+EXPORT_SYMBOL(hv_dev_close);
 EXPORT_SYMBOL(hv_dev_open);
+EXPORT_SYMBOL(hv_dev_poll);
+EXPORT_SYMBOL(hv_dev_poll_cancel);
 EXPORT_SYMBOL(hv_dev_pread);
-EXPORT_SYMBOL(hv_dev_pwrite);
 EXPORT_SYMBOL(hv_dev_preada);
+EXPORT_SYMBOL(hv_dev_pwrite);
 EXPORT_SYMBOL(hv_dev_pwritea);
-EXPORT_SYMBOL(hv_dev_poll);
-EXPORT_SYMBOL(hv_dev_poll_cancel);
-EXPORT_SYMBOL(hv_dev_close);
-EXPORT_SYMBOL(hv_sysconf);
-EXPORT_SYMBOL(hv_confstr);
+EXPORT_SYMBOL(hv_flush_all);
 EXPORT_SYMBOL(hv_get_rtc);
+#ifdef __tilegx__
+EXPORT_SYMBOL(hv_inquire_guest_context);
+EXPORT_SYMBOL(hv_install_guest_context);
+EXPORT_SYMBOL(hv_install_virt_context);
+#endif
+EXPORT_SYMBOL(hv_physaddr_read64);
+EXPORT_SYMBOL(hv_physaddr_write64);
 EXPORT_SYMBOL(hv_set_rtc);
+EXPORT_SYMBOL(hv_sysconf);
 
 /* libgcc.a */
 uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
index 6f867dbf7c56df79a25ebcc091dfb9b5fa5321b7..f8196b3a950ef4f6fc8b97e615b7d4cd1e352e55 100644 (file)
@@ -36,7 +36,7 @@ void *memchr(const void *s, int c, size_t n)
        p = (const uint64_t *)(s_int & -8);
 
        /* Create eight copies of the byte for which we are looking. */
-       goal = 0x0101010101010101ULL * (uint8_t) c;
+       goal = copy_byte(c);
 
        /* Read the first word, but munge it so that bytes before the array
         * will not match goal.
index 2a419a6122db78e4b45c532de3b5ef6d24cc1cad..a2771ae5da53072188b1b367423af5678579dced 100644 (file)
 
 #include <linux/linkage.h>
 
-/* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */
-#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
-#define memcpy __memcpy_asm
-#define __copy_to_user_inatomic __copy_to_user_inatomic_asm
-#define __copy_from_user_inatomic __copy_from_user_inatomic_asm
-#define __copy_from_user_zeroing __copy_from_user_zeroing_asm
-#endif
-
 #define IS_MEMCPY        0
 #define IS_COPY_FROM_USER  1
 #define IS_COPY_FROM_USER_ZEROING  2
@@ -44,6 +36,7 @@
  */
 #define EX \
        .pushsection __ex_table, "a"; \
+       .align 4; \
        .word 9f, memcpy_common_fixup; \
        .popsection; \
        9
@@ -158,12 +151,9 @@ EX:        { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
 
        { addi r3, r1, 60; andi r9, r9, -64 }
 
-#if CHIP_HAS_WH64()
        /* No need to prefetch dst, we'll just do the wh64
         * right before we copy a line.
         */
-#endif
-
 EX:    { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
        /* Intentionally stall for a few cycles to leave L2 cache alone. */
        { bnzt zero, .; move r27, lr }
@@ -171,21 +161,6 @@ EX:        { lw r6, r3; addi r3, r3, 64 }
        /* Intentionally stall for a few cycles to leave L2 cache alone. */
        { bnzt zero, . }
 EX:    { lw r7, r3; addi r3, r3, 64 }
-#if !CHIP_HAS_WH64()
-       /* Prefetch the dest */
-       /* Intentionally stall for a few cycles to leave L2 cache alone. */
-       { bnzt zero, . }
-       /* Use a real load to cause a TLB miss if necessary.  We aren't using
-        * r28, so this should be fine.
-        */
-EX:    { lw r28, r9; addi r9, r9, 64 }
-       /* Intentionally stall for a few cycles to leave L2 cache alone. */
-       { bnzt zero, . }
-       { prefetch r9; addi r9, r9, 64 }
-       /* Intentionally stall for a few cycles to leave L2 cache alone. */
-       { bnzt zero, . }
-       { prefetch r9; addi r9, r9, 64 }
-#endif
        /* Intentionally stall for a few cycles to leave L2 cache alone. */
        { bz zero, .Lbig_loop2 }
 
@@ -286,13 +261,8 @@ EX:        { lw r7, r3; addi r3, r3, 64 }
        /* Fill second L1D line. */
 EX:    { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
 
-#if CHIP_HAS_WH64()
        /* Prepare destination line for writing. */
 EX:    { wh64 r9; addi r9, r9, 64 }
-#else
-       /* Prefetch dest line */
-       { prefetch r9; addi r9, r9, 64 }
-#endif
        /* Load seven words that are L1D hits to cover wh64 L2 usage. */
 
        /* Load the three remaining words from the last L1D line, which
@@ -330,16 +300,7 @@ EX:        { lw r18, r1; addi r1, r1, 4 }                  /* r18 = WORD_8 */
 EX:    { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
 EX:    { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
 EX:    { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
-#if CHIP_HAS_WH64()
 EX:    { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
-#else
-       /* Back up the r9 to a cache line we are already storing to
-        * if it gets past the end of the dest vector.  Strictly speaking,
-        * we don't need to back up to the start of a cache line, but it's free
-        * and tidy, so why not?
-        */
-EX:    { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */
-#endif
        /* Store second L1D line. */
 EX:    { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
 EX:    { sw r0, r19; addi r0, r0, 4 }                  /* store(WORD_5) */
@@ -403,7 +364,6 @@ EX: { sb r0, r3;   addi r0, r0, 1; addi r2, r2, -1 }
 
 .Ldest_is_word_aligned:
 
-#if CHIP_HAS_DWORD_ALIGN()
 EX:    { andi r8, r0, 63; lwadd_na r6, r1, 4}
        { slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned }
 
@@ -511,26 +471,6 @@ EX:        { swadd r0, r13, 4; addi r2, r2, -32 }
        /* Move r1 back to the point where it corresponds to r0. */
        { addi r1, r1, -4 }
 
-#else /* !CHIP_HAS_DWORD_ALIGN() */
-
-       /* Compute right/left shift counts and load initial source words. */
-       { andi r5, r1, -4; andi r3, r1, 3 }
-EX:    { lw r6, r5; addi r5, r5, 4; shli r3, r3, 3 }
-EX:    { lw r7, r5; addi r5, r5, 4; sub r4, zero, r3 }
-
-       /* Load and store one word at a time, using shifts and ORs
-        * to correct for the misaligned src.
-        */
-.Lcopy_unaligned_src_loop:
-       { shr r6, r6, r3; shl r8, r7, r4 }
-EX:    { lw r7, r5; or r8, r8, r6; move r6, r7 }
-EX:    { sw r0, r8; addi r0, r0, 4; addi r2, r2, -4 }
-       { addi r5, r5, 4; slti_u r8, r2, 8 }
-       { bzt r8, .Lcopy_unaligned_src_loop; addi r1, r1, 4 }
-
-       { bz r2, .Lcopy_unaligned_done }
-#endif /* !CHIP_HAS_DWORD_ALIGN() */
-
        /* Fall through */
 
 /*
@@ -614,5 +554,6 @@ memcpy_fixup_loop:
        .size memcpy_common_fixup, . - memcpy_common_fixup
 
        .section __ex_table,"a"
+       .align 4
        .word .Lcfu, .Lcopy_from_user_fixup_zero_remainder
        .word .Lctu, .Lcopy_to_user_fixup_done
index c79b8e7c6828bd384e0c354dccf72553ef1c9536..4815354b8cd2c0bc50192f43b568b48bc92ba47e 100644 (file)
 /* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
 
 /* Must be 8 bytes in size. */
-#define word_t uint64_t
+#define op_t uint64_t
 
-#if CHIP_L2_LINE_SIZE() != 64 && CHIP_L2_LINE_SIZE() != 128
-#error "Assumes 64 or 128 byte line size"
+/* Threshold value for when to enter the unrolled loops. */
+#define        OP_T_THRES      16
+
+#if CHIP_L2_LINE_SIZE() != 64
+#error "Assumes 64 byte line size"
 #endif
 
 /* How many cache lines ahead should we prefetch? */
-#define PREFETCH_LINES_AHEAD 3
+#define PREFETCH_LINES_AHEAD 4
 
 /*
  * Provide "base versions" of load and store for the normal code path.
@@ -51,15 +54,16 @@ void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n)
  * macros to return a count of uncopied bytes due to mm fault.
  */
 #define RETVAL 0
-int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
+int __attribute__((optimize("omit-frame-pointer")))
+USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
 #endif
 {
        char *__restrict dst1 = (char *)dstv;
        const char *__restrict src1 = (const char *)srcv;
        const char *__restrict src1_end;
        const char *__restrict prefetch;
-       word_t *__restrict dst8;    /* 8-byte pointer to destination memory. */
-       word_t final; /* Final bytes to write to trailing word, if any */
+       op_t *__restrict dst8;    /* 8-byte pointer to destination memory. */
+       op_t final; /* Final bytes to write to trailing word, if any */
        long i;
 
        if (n < 16) {
@@ -79,104 +83,228 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
        for (i = 0; i < PREFETCH_LINES_AHEAD; i++) {
                __insn_prefetch(prefetch);
                prefetch += CHIP_L2_LINE_SIZE();
-               prefetch = (prefetch > src1_end) ? prefetch : src1;
+               prefetch = (prefetch < src1_end) ? prefetch : src1;
        }
 
        /* Copy bytes until dst is word-aligned. */
-       for (; (uintptr_t)dst1 & (sizeof(word_t) - 1); n--)
+       for (; (uintptr_t)dst1 & (sizeof(op_t) - 1); n--)
                ST1(dst1++, LD1(src1++));
 
        /* 8-byte pointer to destination memory. */
-       dst8 = (word_t *)dst1;
-
-       if (__builtin_expect((uintptr_t)src1 & (sizeof(word_t) - 1), 0)) {
-               /*
-                * Misaligned copy.  Copy 8 bytes at a time, but don't
-                * bother with other fanciness.
-                *
-                * TODO: Consider prefetching and using wh64 as well.
-                */
-
-               /* Create an aligned src8. */
-               const word_t *__restrict src8 =
-                       (const word_t *)((uintptr_t)src1 & -sizeof(word_t));
-               word_t b;
-
-               word_t a = LD8(src8++);
-               for (; n >= sizeof(word_t); n -= sizeof(word_t)) {
-                       b = LD8(src8++);
-                       a = __insn_dblalign(a, b, src1);
-                       ST8(dst8++, a);
-                       a = b;
+       dst8 = (op_t *)dst1;
+
+       if (__builtin_expect((uintptr_t)src1 & (sizeof(op_t) - 1), 0)) {
+               /* Unaligned copy. */
+
+               op_t  tmp0 = 0, tmp1 = 0, tmp2, tmp3;
+               const op_t *src8 = (const op_t *) ((uintptr_t)src1 &
+                                                  -sizeof(op_t));
+               const void *srci = (void *)src1;
+               int m;
+
+               m = (CHIP_L2_LINE_SIZE() << 2) -
+                       (((uintptr_t)dst8) & ((CHIP_L2_LINE_SIZE() << 2) - 1));
+               m = (n < m) ? n : m;
+               m /= sizeof(op_t);
+
+               /* Copy until 'dst' is cache-line-aligned. */
+               n -= (sizeof(op_t) * m);
+
+               switch (m % 4) {
+               case 0:
+                       if (__builtin_expect(!m, 0))
+                               goto _M0;
+                       tmp1 = LD8(src8++);
+                       tmp2 = LD8(src8++);
+                       goto _8B3;
+               case 2:
+                       m += 2;
+                       tmp3 = LD8(src8++);
+                       tmp0 = LD8(src8++);
+                       goto _8B1;
+               case 3:
+                       m += 1;
+                       tmp2 = LD8(src8++);
+                       tmp3 = LD8(src8++);
+                       goto _8B2;
+               case 1:
+                       m--;
+                       tmp0 = LD8(src8++);
+                       tmp1 = LD8(src8++);
+                       if (__builtin_expect(!m, 0))
+                               goto _8B0;
+               }
+
+               do {
+                       tmp2 = LD8(src8++);
+                       tmp0 =  __insn_dblalign(tmp0, tmp1, srci);
+                       ST8(dst8++, tmp0);
+_8B3:
+                       tmp3 = LD8(src8++);
+                       tmp1 = __insn_dblalign(tmp1, tmp2, srci);
+                       ST8(dst8++, tmp1);
+_8B2:
+                       tmp0 = LD8(src8++);
+                       tmp2 = __insn_dblalign(tmp2, tmp3, srci);
+                       ST8(dst8++, tmp2);
+_8B1:
+                       tmp1 = LD8(src8++);
+                       tmp3 = __insn_dblalign(tmp3, tmp0, srci);
+                       ST8(dst8++, tmp3);
+                       m -= 4;
+               } while (m);
+
+_8B0:
+               tmp0 = __insn_dblalign(tmp0, tmp1, srci);
+               ST8(dst8++, tmp0);
+               src8--;
+
+_M0:
+               if (__builtin_expect(n >= CHIP_L2_LINE_SIZE(), 0)) {
+                       op_t tmp4, tmp5, tmp6, tmp7, tmp8;
+
+                       prefetch = ((const char *)src8) +
+                               CHIP_L2_LINE_SIZE() * PREFETCH_LINES_AHEAD;
+
+                       for (tmp0 = LD8(src8++); n >= CHIP_L2_LINE_SIZE();
+                            n -= CHIP_L2_LINE_SIZE()) {
+                               /* Prefetch and advance to next line to
+                                  prefetch, but don't go past the end.  */
+                               __insn_prefetch(prefetch);
+
+                               /* Make sure prefetch got scheduled
+                                  earlier.  */
+                               __asm__ ("" : : : "memory");
+
+                               prefetch += CHIP_L2_LINE_SIZE();
+                               prefetch = (prefetch < src1_end) ? prefetch :
+                                       (const char *) src8;
+
+                               tmp1 = LD8(src8++);
+                               tmp2 = LD8(src8++);
+                               tmp3 = LD8(src8++);
+                               tmp4 = LD8(src8++);
+                               tmp5 = LD8(src8++);
+                               tmp6 = LD8(src8++);
+                               tmp7 = LD8(src8++);
+                               tmp8 = LD8(src8++);
+
+                               tmp0 = __insn_dblalign(tmp0, tmp1, srci);
+                               tmp1 = __insn_dblalign(tmp1, tmp2, srci);
+                               tmp2 = __insn_dblalign(tmp2, tmp3, srci);
+                               tmp3 = __insn_dblalign(tmp3, tmp4, srci);
+                               tmp4 = __insn_dblalign(tmp4, tmp5, srci);
+                               tmp5 = __insn_dblalign(tmp5, tmp6, srci);
+                               tmp6 = __insn_dblalign(tmp6, tmp7, srci);
+                               tmp7 = __insn_dblalign(tmp7, tmp8, srci);
+
+                               __insn_wh64(dst8);
+
+                               ST8(dst8++, tmp0);
+                               ST8(dst8++, tmp1);
+                               ST8(dst8++, tmp2);
+                               ST8(dst8++, tmp3);
+                               ST8(dst8++, tmp4);
+                               ST8(dst8++, tmp5);
+                               ST8(dst8++, tmp6);
+                               ST8(dst8++, tmp7);
+
+                               tmp0 = tmp8;
+                       }
+                       src8--;
+               }
+
+               /* Copy the rest 8-byte chunks. */
+               if (n >= sizeof(op_t)) {
+                       tmp0 = LD8(src8++);
+                       for (; n >= sizeof(op_t); n -= sizeof(op_t)) {
+                               tmp1 = LD8(src8++);
+                               tmp0 = __insn_dblalign(tmp0, tmp1, srci);
+                               ST8(dst8++, tmp0);
+                               tmp0 = tmp1;
+                       }
+                       src8--;
                }
 
                if (n == 0)
                        return RETVAL;
 
-               b = ((const char *)src8 <= src1_end) ? *src8 : 0;
+               tmp0 = LD8(src8++);
+               tmp1 = ((const char *)src8 <= src1_end)
+                       ? LD8((op_t *)src8) : 0;
+               final = __insn_dblalign(tmp0, tmp1, srci);
 
-               /*
-                * Final source bytes to write to trailing partial
-                * word, if any.
-                */
-               final = __insn_dblalign(a, b, src1);
        } else {
                /* Aligned copy. */
 
-               const word_t* __restrict src8 = (const word_t *)src1;
+               const op_t *__restrict src8 = (const op_t *)src1;
 
                /* src8 and dst8 are both word-aligned. */
                if (n >= CHIP_L2_LINE_SIZE()) {
                        /* Copy until 'dst' is cache-line-aligned. */
                        for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1);
-                            n -= sizeof(word_t))
+                            n -= sizeof(op_t))
                                ST8(dst8++, LD8(src8++));
 
                        for (; n >= CHIP_L2_LINE_SIZE(); ) {
-                               __insn_wh64(dst8);
+                               op_t tmp0, tmp1, tmp2, tmp3;
+                               op_t tmp4, tmp5, tmp6, tmp7;
 
                                /*
                                 * Prefetch and advance to next line
-                                * to prefetch, but don't go past the end
+                                * to prefetch, but don't go past the
+                                * end.
                                 */
                                __insn_prefetch(prefetch);
+
+                               /* Make sure prefetch got scheduled
+                                  earlier.  */
+                               __asm__ ("" : : : "memory");
+
                                prefetch += CHIP_L2_LINE_SIZE();
-                               prefetch = (prefetch > src1_end) ? prefetch :
+                               prefetch = (prefetch < src1_end) ? prefetch :
                                        (const char *)src8;
 
                                /*
-                                * Copy an entire cache line.  Manually
-                                * unrolled to avoid idiosyncracies of
-                                * compiler unrolling.
+                                * Do all the loads before wh64.  This
+                                * is necessary if [src8, src8+7] and
+                                * [dst8, dst8+7] share the same cache
+                                * line and dst8 <= src8, as can be
+                                * the case when called from memmove,
+                                * or with code tested on x86 whose
+                                * memcpy always works with forward
+                                * copies.
                                 */
-#define COPY_WORD(offset) ({ ST8(dst8+offset, LD8(src8+offset)); n -= 8; })
-                               COPY_WORD(0);
-                               COPY_WORD(1);
-                               COPY_WORD(2);
-                               COPY_WORD(3);
-                               COPY_WORD(4);
-                               COPY_WORD(5);
-                               COPY_WORD(6);
-                               COPY_WORD(7);
-#if CHIP_L2_LINE_SIZE() == 128
-                               COPY_WORD(8);
-                               COPY_WORD(9);
-                               COPY_WORD(10);
-                               COPY_WORD(11);
-                               COPY_WORD(12);
-                               COPY_WORD(13);
-                               COPY_WORD(14);
-                               COPY_WORD(15);
-#elif CHIP_L2_LINE_SIZE() != 64
-# error Fix code that assumes particular L2 cache line sizes
-#endif
+                               tmp0 = LD8(src8++);
+                               tmp1 = LD8(src8++);
+                               tmp2 = LD8(src8++);
+                               tmp3 = LD8(src8++);
+                               tmp4 = LD8(src8++);
+                               tmp5 = LD8(src8++);
+                               tmp6 = LD8(src8++);
+                               tmp7 = LD8(src8++);
+
+                               /* wh64 and wait for tmp7 load completion. */
+                               __asm__ ("move %0, %0; wh64 %1\n"
+                                        : : "r"(tmp7), "r"(dst8));
 
-                               dst8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
-                               src8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
+                               ST8(dst8++, tmp0);
+                               ST8(dst8++, tmp1);
+                               ST8(dst8++, tmp2);
+                               ST8(dst8++, tmp3);
+                               ST8(dst8++, tmp4);
+                               ST8(dst8++, tmp5);
+                               ST8(dst8++, tmp6);
+                               ST8(dst8++, tmp7);
+
+                               n -= CHIP_L2_LINE_SIZE();
                        }
+#if CHIP_L2_LINE_SIZE() != 64
+# error "Fix code that assumes particular L2 cache line size."
+#endif
                }
 
-               for (; n >= sizeof(word_t); n -= sizeof(word_t))
+               for (; n >= sizeof(op_t); n -= sizeof(op_t))
                        ST8(dst8++, LD8(src8++));
 
                if (__builtin_expect(n == 0, 1))
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
deleted file mode 100644 (file)
index 3bc4b4e..0000000
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- *   This program is free software; you can redistribute it and/or
- *   modify it under the terms of the GNU General Public License
- *   as published by the Free Software Foundation, version 2.
- *
- *   This program is distributed in the hope that it will be useful, but
- *   WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *   NON INFRINGEMENT.  See the GNU General Public License for
- *   more details.
- */
-
-#include <linux/string.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <asm/fixmap.h>
-#include <asm/kmap_types.h>
-#include <asm/tlbflush.h>
-#include <hv/hypervisor.h>
-#include <arch/chip.h>
-
-
-#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
-
-/* Defined in memcpy.S */
-extern unsigned long __memcpy_asm(void *to, const void *from, unsigned long n);
-extern unsigned long __copy_to_user_inatomic_asm(
-       void __user *to, const void *from, unsigned long n);
-extern unsigned long __copy_from_user_inatomic_asm(
-       void *to, const void __user *from, unsigned long n);
-extern unsigned long __copy_from_user_zeroing_asm(
-       void *to, const void __user *from, unsigned long n);
-
-typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
-
-/* Size above which to consider TLB games for performance */
-#define LARGE_COPY_CUTOFF 2048
-
-/* Communicate to the simulator what we are trying to do. */
-#define sim_allow_multiple_caching(b) \
-  __insn_mtspr(SPR_SIM_CONTROL, \
-   SIM_CONTROL_ALLOW_MULTIPLE_CACHING | ((b) << _SIM_CONTROL_OPERATOR_BITS))
-
-/*
- * Copy memory by briefly enabling incoherent cacheline-at-a-time mode.
- *
- * We set up our own source and destination PTEs that we fully control.
- * This is the only way to guarantee that we don't race with another
- * thread that is modifying the PTE; we can't afford to try the
- * copy_{to,from}_user() technique of catching the interrupt, since
- * we must run with interrupts disabled to avoid the risk of some
- * other code seeing the incoherent data in our cache.  (Recall that
- * our cache is indexed by PA, so even if the other code doesn't use
- * our kmap_atomic virtual addresses, they'll still hit in cache using
- * the normal VAs that aren't supposed to hit in cache.)
- */
-static void memcpy_multicache(void *dest, const void *source,
-                             pte_t dst_pte, pte_t src_pte, int len)
-{
-       int idx;
-       unsigned long flags, newsrc, newdst;
-       pmd_t *pmdp;
-       pte_t *ptep;
-       int type0, type1;
-       int cpu = get_cpu();
-
-       /*
-        * Disable interrupts so that we don't recurse into memcpy()
-        * in an interrupt handler, nor accidentally reference
-        * the PA of the source from an interrupt routine.  Also
-        * notify the simulator that we're playing games so we don't
-        * generate spurious coherency warnings.
-        */
-       local_irq_save(flags);
-       sim_allow_multiple_caching(1);
-
-       /* Set up the new dest mapping */
-       type0 = kmap_atomic_idx_push();
-       idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
-       newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
-       pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
-       ptep = pte_offset_kernel(pmdp, newdst);
-       if (pte_val(*ptep) != pte_val(dst_pte)) {
-               set_pte(ptep, dst_pte);
-               local_flush_tlb_page(NULL, newdst, PAGE_SIZE);
-       }
-
-       /* Set up the new source mapping */
-       type1 = kmap_atomic_idx_push();
-       idx += (type0 - type1);
-       src_pte = hv_pte_set_nc(src_pte);
-       src_pte = hv_pte_clear_writable(src_pte);  /* be paranoid */
-       newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
-       pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc);
-       ptep = pte_offset_kernel(pmdp, newsrc);
-       __set_pte(ptep, src_pte);   /* set_pte() would be confused by this */
-       local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);
-
-       /* Actually move the data. */
-       __memcpy_asm((void *)newdst, (const void *)newsrc, len);
-
-       /*
-        * Remap the source as locally-cached and not OLOC'ed so that
-        * we can inval without also invaling the remote cpu's cache.
-        * This also avoids known errata with inv'ing cacheable oloc data.
-        */
-       src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3);
-       src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */
-       __set_pte(ptep, src_pte);   /* set_pte() would be confused by this */
-       local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);
-
-       /*
-        * Do the actual invalidation, covering the full L2 cache line
-        * at the end since __memcpy_asm() is somewhat aggressive.
-        */
-       __inv_buffer((void *)newsrc, len);
-
-       /*
-        * We're done: notify the simulator that all is back to normal,
-        * and re-enable interrupts and pre-emption.
-        */
-       kmap_atomic_idx_pop();
-       kmap_atomic_idx_pop();
-       sim_allow_multiple_caching(0);
-       local_irq_restore(flags);
-       put_cpu();
-}
-
-/*
- * Identify large copies from remotely-cached memory, and copy them
- * via memcpy_multicache() if they look good, otherwise fall back
- * to the particular kind of copying passed as the memcpy_t function.
- */
-static unsigned long fast_copy(void *dest, const void *source, int len,
-                              memcpy_t func)
-{
-       /*
-        * Check if it's big enough to bother with.  We may end up doing a
-        * small copy via TLB manipulation if we're near a page boundary,
-        * but presumably we'll make it up when we hit the second page.
-        */
-       while (len >= LARGE_COPY_CUTOFF) {
-               int copy_size, bytes_left_on_page;
-               pte_t *src_ptep, *dst_ptep;
-               pte_t src_pte, dst_pte;
-               struct page *src_page, *dst_page;
-
-               /* Is the source page oloc'ed to a remote cpu? */
-retry_source:
-               src_ptep = virt_to_pte(current->mm, (unsigned long)source);
-               if (src_ptep == NULL)
-                       break;
-               src_pte = *src_ptep;
-               if (!hv_pte_get_present(src_pte) ||
-                   !hv_pte_get_readable(src_pte) ||
-                   hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
-                       break;
-               if (get_remote_cache_cpu(src_pte) == smp_processor_id())
-                       break;
-               src_page = pfn_to_page(pte_pfn(src_pte));
-               get_page(src_page);
-               if (pte_val(src_pte) != pte_val(*src_ptep)) {
-                       put_page(src_page);
-                       goto retry_source;
-               }
-               if (pte_huge(src_pte)) {
-                       /* Adjust the PTE to correspond to a small page */
-                       int pfn = pte_pfn(src_pte);
-                       pfn += (((unsigned long)source & (HPAGE_SIZE-1))
-                               >> PAGE_SHIFT);
-                       src_pte = pfn_pte(pfn, src_pte);
-                       src_pte = pte_mksmall(src_pte);
-               }
-
-               /* Is the destination page writable? */
-retry_dest:
-               dst_ptep = virt_to_pte(current->mm, (unsigned long)dest);
-               if (dst_ptep == NULL) {
-                       put_page(src_page);
-                       break;
-               }
-               dst_pte = *dst_ptep;
-               if (!hv_pte_get_present(dst_pte) ||
-                   !hv_pte_get_writable(dst_pte)) {
-                       put_page(src_page);
-                       break;
-               }
-               dst_page = pfn_to_page(pte_pfn(dst_pte));
-               if (dst_page == src_page) {
-                       /*
-                        * Source and dest are on the same page; this
-                        * potentially exposes us to incoherence if any
-                        * part of src and dest overlap on a cache line.
-                        * Just give up rather than trying to be precise.
-                        */
-                       put_page(src_page);
-                       break;
-               }
-               get_page(dst_page);
-               if (pte_val(dst_pte) != pte_val(*dst_ptep)) {
-                       put_page(dst_page);
-                       goto retry_dest;
-               }
-               if (pte_huge(dst_pte)) {
-                       /* Adjust the PTE to correspond to a small page */
-                       int pfn = pte_pfn(dst_pte);
-                       pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
-                               >> PAGE_SHIFT);
-                       dst_pte = pfn_pte(pfn, dst_pte);
-                       dst_pte = pte_mksmall(dst_pte);
-               }
-
-               /* All looks good: create a cachable PTE and copy from it */
-               copy_size = len;
-               bytes_left_on_page =
-                       PAGE_SIZE - (((int)source) & (PAGE_SIZE-1));
-               if (copy_size > bytes_left_on_page)
-                       copy_size = bytes_left_on_page;
-               bytes_left_on_page =
-                       PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1));
-               if (copy_size > bytes_left_on_page)
-                       copy_size = bytes_left_on_page;
-               memcpy_multicache(dest, source, dst_pte, src_pte, copy_size);
-
-               /* Release the pages */
-               put_page(dst_page);
-               put_page(src_page);
-
-               /* Continue on the next page */
-               dest += copy_size;
-               source += copy_size;
-               len -= copy_size;
-       }
-
-       return func(dest, source, len);
-}
-
-void *memcpy(void *to, const void *from, __kernel_size_t n)
-{
-       if (n < LARGE_COPY_CUTOFF)
-               return (void *)__memcpy_asm(to, from, n);
-       else
-               return (void *)fast_copy(to, from, n, __memcpy_asm);
-}
-
-unsigned long __copy_to_user_inatomic(void __user *to, const void *from,
-                                     unsigned long n)
-{
-       if (n < LARGE_COPY_CUTOFF)
-               return __copy_to_user_inatomic_asm(to, from, n);
-       else
-               return fast_copy(to, from, n, __copy_to_user_inatomic_asm);
-}
-
-unsigned long __copy_from_user_inatomic(void *to, const void __user *from,
-                                       unsigned long n)
-{
-       if (n < LARGE_COPY_CUTOFF)
-               return __copy_from_user_inatomic_asm(to, from, n);
-       else
-               return fast_copy(to, from, n, __copy_from_user_inatomic_asm);
-}
-
-unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
-                                      unsigned long n)
-{
-       if (n < LARGE_COPY_CUTOFF)
-               return __copy_from_user_zeroing_asm(to, from, n);
-       else
-               return fast_copy(to, from, n, __copy_from_user_zeroing_asm);
-}
-
-#endif /* !CHIP_HAS_COHERENT_LOCAL_CACHE() */
index 37440caa7370fe3d4c052086c6143bb6ab8e583c..88c7016492c4dcbcddbc11e5851e86d9fe2608aa 100644 (file)
@@ -31,6 +31,7 @@
                    ".pushsection .coldtext.memcpy,\"ax\";"     \
                    "2: { move r0, %2; jrp lr };"               \
                    ".section __ex_table,\"a\";"                \
+                   ".align 8;"                                 \
                    ".quad 1b, 2b;"                             \
                    ".popsection"                               \
                    : "=m" (*(p)) : "r" (v), "r" (n));          \
@@ -43,6 +44,7 @@
                    ".pushsection .coldtext.memcpy,\"ax\";"     \
                    "2: { move r0, %2; jrp lr };"               \
                    ".section __ex_table,\"a\";"                \
+                   ".align 8;"                                 \
                    ".quad 1b, 2b;"                             \
                    ".popsection"                               \
                    : "=r" (__v) : "m" (*(p)), "r" (n));        \
index 57dbb3a5bff86a59e71054100c4ac1c885402a7b..2042bfe6595f6ca8dc98540cb18b9bccb7a1bc38 100644 (file)
  *   more details.
  */
 
-#include <arch/chip.h>
-
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
-
-#undef memset
+#include <arch/chip.h>
 
 void *memset(void *s, int c, size_t n)
 {
@@ -26,11 +23,7 @@ void *memset(void *s, int c, size_t n)
        int n32;
        uint32_t v16, v32;
        uint8_t *out8 = s;
-#if !CHIP_HAS_WH64()
-       int ahead32;
-#else
        int to_align32;
-#endif
 
        /* Experimentation shows that a trivial tight loop is a win up until
         * around a size of 20, where writing a word at a time starts to win.
@@ -61,21 +54,6 @@ void *memset(void *s, int c, size_t n)
                return s;
        }
 
-#if !CHIP_HAS_WH64()
-       /* Use a spare issue slot to start prefetching the first cache
-        * line early. This instruction is free as the store can be buried
-        * in otherwise idle issue slots doing ALU ops.
-        */
-       __insn_prefetch(out8);
-
-       /* We prefetch the end so that a short memset that spans two cache
-        * lines gets some prefetching benefit. Again we believe this is free
-        * to issue.
-        */
-       __insn_prefetch(&out8[n - 1]);
-#endif /* !CHIP_HAS_WH64() */
-
-
        /* Align 'out8'. We know n >= 3 so this won't write past the end. */
        while (((uintptr_t) out8 & 3) != 0) {
                *out8++ = c;
@@ -96,90 +74,6 @@ void *memset(void *s, int c, size_t n)
        /* This must be at least 8 or the following loop doesn't work. */
 #define CACHE_LINE_SIZE_IN_WORDS (CHIP_L2_LINE_SIZE() / 4)
 
-#if !CHIP_HAS_WH64()
-
-       ahead32 = CACHE_LINE_SIZE_IN_WORDS;
-
-       /* We already prefetched the first and last cache lines, so
-        * we only need to do more prefetching if we are storing
-        * to more than two cache lines.
-        */
-       if (n32 > CACHE_LINE_SIZE_IN_WORDS * 2) {
-               int i;
-
-               /* Prefetch the next several cache lines.
-                * This is the setup code for the software-pipelined
-                * loop below.
-                */
-#define MAX_PREFETCH 5
-               ahead32 = n32 & -CACHE_LINE_SIZE_IN_WORDS;
-               if (ahead32 > MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS)
-                       ahead32 = MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS;
-
-               for (i = CACHE_LINE_SIZE_IN_WORDS;
-                    i < ahead32; i += CACHE_LINE_SIZE_IN_WORDS)
-                       __insn_prefetch(&out32[i]);
-       }
-
-       if (n32 > ahead32) {
-               while (1) {
-                       int j;
-
-                       /* Prefetch by reading one word several cache lines
-                        * ahead.  Since loads are non-blocking this will
-                        * cause the full cache line to be read while we are
-                        * finishing earlier cache lines.  Using a store
-                        * here causes microarchitectural performance
-                        * problems where a victimizing store miss goes to
-                        * the head of the retry FIFO and locks the pipe for
-                        * a few cycles.  So a few subsequent stores in this
-                        * loop go into the retry FIFO, and then later
-                        * stores see other stores to the same cache line
-                        * are already in the retry FIFO and themselves go
-                        * into the retry FIFO, filling it up and grinding
-                        * to a halt waiting for the original miss to be
-                        * satisfied.
-                        */
-                       __insn_prefetch(&out32[ahead32]);
-
-#if CACHE_LINE_SIZE_IN_WORDS % 4 != 0
-#error "Unhandled CACHE_LINE_SIZE_IN_WORDS"
-#endif
-
-                       n32 -= CACHE_LINE_SIZE_IN_WORDS;
-
-                       /* Save icache space by only partially unrolling
-                        * this loop.
-                        */
-                       for (j = CACHE_LINE_SIZE_IN_WORDS / 4; j > 0; j--) {
-                               *out32++ = v32;
-                               *out32++ = v32;
-                               *out32++ = v32;
-                               *out32++ = v32;
-                       }
-
-                       /* To save compiled code size, reuse this loop even
-                        * when we run out of prefetching to do by dropping
-                        * ahead32 down.
-                        */
-                       if (n32 <= ahead32) {
-                               /* Not even a full cache line left,
-                                * so stop now.
-                                */
-                               if (n32 < CACHE_LINE_SIZE_IN_WORDS)
-                                       break;
-
-                               /* Choose a small enough value that we don't
-                                * prefetch past the end.  There's no sense
-                                * in touching cache lines we don't have to.
-                                */
-                               ahead32 = CACHE_LINE_SIZE_IN_WORDS - 1;
-                       }
-               }
-       }
-
-#else /* CHIP_HAS_WH64() */
-
        /* Determine how many words we need to emit before the 'out32'
         * pointer becomes aligned modulo the cache line size.
         */
@@ -236,8 +130,6 @@ void *memset(void *s, int c, size_t n)
                n32 &= CACHE_LINE_SIZE_IN_WORDS - 1;
        }
 
-#endif /* CHIP_HAS_WH64() */
-
        /* Now handle any leftover values. */
        if (n32 != 0) {
                do {
index 3873085711d58fce89b714ed20dd1a090ceaf486..03ef69cd73decac97b4e6e8c584c800ca0fc58e7 100644 (file)
  *   more details.
  */
 
-#include <arch/chip.h>
-
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/module.h>
-
-#undef memset
+#include <arch/chip.h>
+#include "string-endian.h"
 
 void *memset(void *s, int c, size_t n)
 {
@@ -70,8 +68,7 @@ void *memset(void *s, int c, size_t n)
        n64 = n >> 3;
 
        /* Tile input byte out to 64 bits. */
-       /* KLUDGE */
-       v64 = 0x0101010101010101ULL * (uint8_t)c;
+       v64 = copy_byte(c);
 
        /* This must be at least 8 or the following loop doesn't work. */
 #define CACHE_LINE_SIZE_IN_DOUBLEWORDS (CHIP_L2_LINE_SIZE() / 8)
index c94e6f7ae7b53d2eabe658c3945c9f19bd2e9814..841fe69630190503a3135e65b6e637447242207d 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/string.h>
 #include <linux/module.h>
 
-#undef strchr
-
 char *strchr(const char *s, int c)
 {
        int z, g;
index f39f9dc422b02e44853ff54e14a0dc3cdd277380..fe6e31c06f8deb1bc2d96ddc8f17bdb070105720 100644 (file)
@@ -26,7 +26,7 @@ char *strchr(const char *s, int c)
        const uint64_t *p = (const uint64_t *)(s_int & -8);
 
        /* Create eight copies of the byte for which we are looking. */
-       const uint64_t goal = 0x0101010101010101ULL * (uint8_t) c;
+       const uint64_t goal = copy_byte(c);
 
        /* Read the first aligned word, but force bytes before the string to
         * match neither zero nor goal (we make sure the high bit of each
index c0eed7ce69c31b03c1574c37ffb60d95ea78f463..2e49cbfe937173637ec713eb1ad60e12f74b3896 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
 #define CFZ(x) __insn_clz(x)
 #define REVCZ(x) __insn_ctz(x)
 #endif
+
+/*
+ * Create eight copies of the byte in a uint64_t.  Byte Shuffle uses
+ * the bytes of srcB as the index into the dest vector to select a
+ * byte.  With all indices of zero, the first byte is copied into all
+ * the other bytes.
+ */
+static inline uint64_t copy_byte(uint8_t byte)
+{
+       return __insn_shufflebytes(byte, 0, 0);
+}
index 4974292a553498bc789dae5e4e938e95e311051c..f26f88e11e4a8d9885ba10eec773b4181a5739a8 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/string.h>
 #include <linux/module.h>
 
-#undef strlen
-
 size_t strlen(const char *s)
 {
        /* Get an aligned pointer. */
diff --git a/arch/tile/lib/strnlen_32.c b/arch/tile/lib/strnlen_32.c
new file mode 100644 (file)
index 0000000..1434141
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+size_t strnlen(const char *s, size_t count)
+{
+       /* Get an aligned pointer. */
+       const uintptr_t s_int = (uintptr_t) s;
+       const uint32_t *p = (const uint32_t *)(s_int & -4);
+       size_t bytes_read = sizeof(*p) - (s_int & (sizeof(*p) - 1));
+       size_t len;
+       uint32_t v, bits;
+
+       /* Avoid page fault risk by not reading any bytes when count is 0. */
+       if (count == 0)
+               return 0;
+
+       /* Read first word, but force bytes before the string to be nonzero. */
+       v = *p | ((1 << ((s_int << 3) & 31)) - 1);
+
+       while ((bits = __insn_seqb(v, 0)) == 0) {
+               if (bytes_read >= count) {
+                       /* Read COUNT bytes and didn't find the terminator. */
+                       return count;
+               }
+               v = *++p;
+               bytes_read += sizeof(v);
+       }
+
+       len = ((const char *) p) + (__insn_ctz(bits) >> 3) - s;
+       return (len < count ? len : count);
+}
+EXPORT_SYMBOL(strnlen);
diff --git a/arch/tile/lib/strnlen_64.c b/arch/tile/lib/strnlen_64.c
new file mode 100644 (file)
index 0000000..2e8de6a
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include "string-endian.h"
+
+size_t strnlen(const char *s, size_t count)
+{
+       /* Get an aligned pointer. */
+       const uintptr_t s_int = (uintptr_t) s;
+       const uint64_t *p = (const uint64_t *)(s_int & -8);
+       size_t bytes_read = sizeof(*p) - (s_int & (sizeof(*p) - 1));
+       size_t len;
+       uint64_t v, bits;
+
+       /* Avoid page fault risk by not reading any bytes when count is 0. */
+       if (count == 0)
+               return 0;
+
+       /* Read and MASK the first word. */
+       v = *p | MASK(s_int);
+
+       while ((bits = __insn_v1cmpeqi(v, 0)) == 0) {
+               if (bytes_read >= count) {
+                       /* Read COUNT bytes and didn't find the terminator. */
+                       return count;
+               }
+               v = *++p;
+               bytes_read += sizeof(v);
+       }
+
+       len = ((const char *) p) + (CFZ(bits) >> 3) - s;
+       return (len < count ? len : count);
+}
+EXPORT_SYMBOL(strnlen);
index b62d002af0096fe89793f5fb4eba6c357ef55922..1bc162224638b48f7ba16c27f0317749d399184e 100644 (file)
@@ -36,6 +36,7 @@ strnlen_user_fault:
        { move r0, zero; jrp lr }
        ENDPROC(strnlen_user_fault)
        .section __ex_table,"a"
+       .align 4
        .word 1b, strnlen_user_fault
        .popsection
 
@@ -47,18 +48,20 @@ strnlen_user_fault:
  */
 STD_ENTRY(strncpy_from_user_asm)
        { bz r2, 2f; move r3, r0 }
-1:      { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
+1:     { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
        { sb r0, r4; addi r0, r0, 1 }
-       bz r2, 2f
-       bnzt r4, 1b
-       addi r0, r0, -1   /* don't count the trailing NUL */
-2:      { sub r0, r0, r3; jrp lr }
+       bz r4, 2f
+       bnzt r2, 1b
+       { sub r0, r0, r3; jrp lr }
+2:     addi r0, r0, -1   /* don't count the trailing NUL */
+       { sub r0, r0, r3; jrp lr }
        STD_ENDPROC(strncpy_from_user_asm)
        .pushsection .fixup,"ax"
 strncpy_from_user_fault:
        { movei r0, -EFAULT; jrp lr }
        ENDPROC(strncpy_from_user_fault)
        .section __ex_table,"a"
+       .align 4
        .word 1b, strncpy_from_user_fault
        .popsection
 
@@ -77,6 +80,7 @@ STD_ENTRY(clear_user_asm)
        bnzt r1, 1b
 2:      { move r0, r1; jrp lr }
        .pushsection __ex_table,"a"
+       .align 4
        .word 1b, 2b
        .popsection
 
@@ -86,6 +90,7 @@ STD_ENTRY(clear_user_asm)
 2:      { move r0, r1; jrp lr }
        STD_ENDPROC(clear_user_asm)
        .pushsection __ex_table,"a"
+       .align 4
        .word 1b, 2b
        .popsection
 
@@ -105,25 +110,7 @@ STD_ENTRY(flush_user_asm)
 2:      { move r0, r1; jrp lr }
        STD_ENDPROC(flush_user_asm)
        .pushsection __ex_table,"a"
-       .word 1b, 2b
-       .popsection
-
-/*
- * inv_user_asm takes the user target address in r0 and the
- * number of bytes to invalidate in r1.
- * It returns the number of not inv'able bytes (hopefully zero) in r0.
- */
-STD_ENTRY(inv_user_asm)
-       bz r1, 2f
-       { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
-       { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
-       { and r0, r0, r2; and r1, r1, r2 }
-       { sub r1, r1, r0 }
-1:      { inv r0; addi r1, r1, -CHIP_INV_STRIDE() }
-       { addi r0, r0, CHIP_INV_STRIDE(); bnzt r1, 1b }
-2:      { move r0, r1; jrp lr }
-       STD_ENDPROC(inv_user_asm)
-       .pushsection __ex_table,"a"
+       .align 4
        .word 1b, 2b
        .popsection
 
@@ -143,5 +130,6 @@ STD_ENTRY(finv_user_asm)
 2:      { move r0, r1; jrp lr }
        STD_ENDPROC(finv_user_asm)
        .pushsection __ex_table,"a"
+       .align 4
        .word 1b, 2b
        .popsection
index adb2dbbc70cd037d5d30b0d5c448da17470db3ef..b3b31a3306f8099ab38dfec13c97885306a8fbc0 100644 (file)
@@ -36,6 +36,7 @@ strnlen_user_fault:
        { move r0, zero; jrp lr }
        ENDPROC(strnlen_user_fault)
        .section __ex_table,"a"
+       .align 8
        .quad 1b, strnlen_user_fault
        .popsection
 
@@ -47,18 +48,20 @@ strnlen_user_fault:
  */
 STD_ENTRY(strncpy_from_user_asm)
        { beqz r2, 2f; move r3, r0 }
-1:      { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
+1:     { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
        { st1 r0, r4; addi r0, r0, 1 }
-       beqz r2, 2f
-       bnezt r4, 1b
-       addi r0, r0, -1   /* don't count the trailing NUL */
-2:      { sub r0, r0, r3; jrp lr }
+       beqz r4, 2f
+       bnezt r2, 1b
+       { sub r0, r0, r3; jrp lr }
+2:     addi r0, r0, -1   /* don't count the trailing NUL */
+       { sub r0, r0, r3; jrp lr }
        STD_ENDPROC(strncpy_from_user_asm)
        .pushsection .fixup,"ax"
 strncpy_from_user_fault:
        { movei r0, -EFAULT; jrp lr }
        ENDPROC(strncpy_from_user_fault)
        .section __ex_table,"a"
+       .align 8
        .quad 1b, strncpy_from_user_fault
        .popsection
 
@@ -77,6 +80,7 @@ STD_ENTRY(clear_user_asm)
        bnezt r1, 1b
 2:      { move r0, r1; jrp lr }
        .pushsection __ex_table,"a"
+       .align 8
        .quad 1b, 2b
        .popsection
 
@@ -86,6 +90,7 @@ STD_ENTRY(clear_user_asm)
 2:      { move r0, r1; jrp lr }
        STD_ENDPROC(clear_user_asm)
        .pushsection __ex_table,"a"
+       .align 8
        .quad 1b, 2b
        .popsection
 
@@ -105,25 +110,7 @@ STD_ENTRY(flush_user_asm)
 2:      { move r0, r1; jrp lr }
        STD_ENDPROC(flush_user_asm)
        .pushsection __ex_table,"a"
-       .quad 1b, 2b
-       .popsection
-
-/*
- * inv_user_asm takes the user target address in r0 and the
- * number of bytes to invalidate in r1.
- * It returns the number of not inv'able bytes (hopefully zero) in r0.
- */
-STD_ENTRY(inv_user_asm)
-       beqz r1, 2f
-       { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
-       { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
-       { and r0, r0, r2; and r1, r1, r2 }
-       { sub r1, r1, r0 }
-1:      { inv r0; addi r1, r1, -CHIP_INV_STRIDE() }
-       { addi r0, r0, CHIP_INV_STRIDE(); bnezt r1, 1b }
-2:      { move r0, r1; jrp lr }
-       STD_ENDPROC(inv_user_asm)
-       .pushsection __ex_table,"a"
+       .align 8
        .quad 1b, 2b
        .popsection
 
@@ -143,5 +130,6 @@ STD_ENTRY(finv_user_asm)
 2:      { move r0, r1; jrp lr }
        STD_ENDPROC(finv_user_asm)
        .pushsection __ex_table,"a"
+       .align 8
        .quad 1b, 2b
        .popsection
index 743c951c61b061b516e1bb3ea06b397e42700d91..86cff48c42969302c04a2a80f16651c407503d7b 100644 (file)
@@ -21,7 +21,8 @@
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
-#include <arch/sim_def.h>
+#include <asm/vdso.h>
+#include <arch/sim.h>
 
 /* Notify a running simulator, if any, that an exec just occurred. */
 static void sim_notify_exec(const char *binary_name)
@@ -38,21 +39,57 @@ static void sim_notify_exec(const char *binary_name)
 
 static int notify_exec(struct mm_struct *mm)
 {
-       int retval = 0;  /* failure */
-
-       if (mm->exe_file) {
-               char *buf = (char *) __get_free_page(GFP_KERNEL);
-               if (buf) {
-                       char *path = d_path(&mm->exe_file->f_path,
-                                           buf, PAGE_SIZE);
-                       if (!IS_ERR(path)) {
-                               sim_notify_exec(path);
-                               retval = 1;
-                       }
-                       free_page((unsigned long)buf);
+       char *buf, *path;
+       struct vm_area_struct *vma;
+
+#ifndef CONFIG_KVM_GUEST   /* see notify_sim_task_change() */
+       if (!sim_is_simulator())
+#endif
+               return 1;
+
+       if (mm->exe_file == NULL)
+               return 0;
+
+       for (vma = current->mm->mmap; ; vma = vma->vm_next) {
+               if (vma == NULL)
+                       return 0;
+               if (vma->vm_file == mm->exe_file)
+                       break;
+       }
+
+       buf = (char *) __get_free_page(GFP_KERNEL);
+       if (buf == NULL)
+               return 0;
+
+       path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE);
+       if (IS_ERR(path)) {
+               free_page((unsigned long)buf);
+               return 0;
+       }
+
+       /*
+        * Notify simulator of an ET_DYN object so we know the load address.
+        * The somewhat cryptic overuse of SIM_CONTROL_DLOPEN allows us
+        * to be backward-compatible with older simulator releases.
+        */
+       if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) {
+               char buf[64];
+               int i;
+
+               snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start);
+               for (i = 0; ; ++i) {
+                       char c = buf[i];
+                       __insn_mtspr(SPR_SIM_CONTROL,
+                                    (SIM_CONTROL_DLOPEN
+                                     | (c << _SIM_CONTROL_OPERATOR_BITS)));
+                       if (c == '\0')
+                               break;
                }
        }
-       return retval;
+
+       sim_notify_exec(path);
+       free_page((unsigned long)buf);
+       return 1;
 }
 
 /* Notify a running simulator, if any, that we loaded an interpreter. */
@@ -68,37 +105,10 @@ static void sim_notify_interp(unsigned long load_addr)
 }
 
 
-/* Kernel address of page used to map read-only kernel data into userspace. */
-static void *vdso_page;
-
-/* One-entry array used for install_special_mapping. */
-static struct page *vdso_pages[1];
-
-static int __init vdso_setup(void)
-{
-       vdso_page = (void *)get_zeroed_page(GFP_ATOMIC);
-       memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn);
-       vdso_pages[0] = virt_to_page(vdso_page);
-       return 0;
-}
-device_initcall(vdso_setup);
-
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
-       if (vma->vm_private_data == vdso_pages)
-               return "[vdso]";
-#ifndef __tilegx__
-       if (vma->vm_start == MEM_USER_INTRPT)
-               return "[intrpt]";
-#endif
-       return NULL;
-}
-
 int arch_setup_additional_pages(struct linux_binprm *bprm,
                                int executable_stack)
 {
        struct mm_struct *mm = current->mm;
-       unsigned long vdso_base;
        int retval = 0;
 
        down_write(&mm->mmap_sem);
@@ -111,14 +121,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
        if (!notify_exec(mm))
                sim_notify_exec(bprm->filename);
 
-       /*
-        * MAYWRITE to allow gdb to COW and set breakpoints
-        */
-       vdso_base = VDSO_BASE;
-       retval = install_special_mapping(mm, vdso_base, PAGE_SIZE,
-                                        VM_READ|VM_EXEC|
-                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-                                        vdso_pages);
+       retval = setup_vdso_pages();
 
 #ifndef __tilegx__
        /*
index f7f99f90cbe0ebf7630416814daf846490e525eb..111d5a9b76f146c541d4fb17d56aacead9e7782d 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/hugetlb.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/kdebug.h>
 
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
@@ -122,10 +123,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
        pmd_k = pmd_offset(pud_k, address);
        if (!pmd_present(*pmd_k))
                return NULL;
-       if (!pmd_present(*pmd)) {
+       if (!pmd_present(*pmd))
                set_pmd(pmd, *pmd_k);
-               arch_flush_lazy_mmu_mode();
-       } else
+       else
                BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
        return pmd_k;
 }
@@ -283,7 +283,7 @@ static int handle_page_fault(struct pt_regs *regs,
        flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
                 (write ? FAULT_FLAG_WRITE : 0));
 
-       is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
+       is_kernel_mode = !user_mode(regs);
 
        tsk = validate_current();
 
@@ -466,28 +466,15 @@ good_area:
                }
        }
 
-#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
-       /*
-        * If this was an asynchronous fault,
-        * restart the appropriate engine.
-        */
-       switch (fault_num) {
 #if CHIP_HAS_TILE_DMA()
+       /* If this was a DMA TLB fault, restart the DMA engine. */
+       switch (fault_num) {
        case INT_DMATLB_MISS:
        case INT_DMATLB_MISS_DWNCL:
        case INT_DMATLB_ACCESS:
        case INT_DMATLB_ACCESS_DWNCL:
                __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
                break;
-#endif
-#if CHIP_HAS_SN_PROC()
-       case INT_SNITLB_MISS:
-       case INT_SNITLB_MISS_DWNCL:
-               __insn_mtspr(SPR_SNCTL,
-                            __insn_mfspr(SPR_SNCTL) &
-                            ~SPR_SNCTL__FRZPROC_MASK);
-               break;
-#endif
        }
 #endif
 
@@ -722,8 +709,60 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
 {
        int is_page_fault;
 
+#ifdef CONFIG_KPROBES
+       /*
+        * This is to notify the fault handler of the kprobes.  The
+        * exception code is redundant as it is also carried in REGS,
+        * but we pass it anyhow.
+        */
+       if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
+                      regs->faultnum, SIGSEGV) == NOTIFY_STOP)
+               return;
+#endif
+
+#ifdef __tilegx__
+       /*
+        * We don't need early do_page_fault_ics() support, since unlike
+        * Pro we don't need to worry about unlocking the atomic locks.
+        * There is only one current case in GX where we touch any memory
+        * under ICS other than our own kernel stack, and we handle that
+        * here.  (If we crash due to trying to touch our own stack,
+        * we're in too much trouble for C code to help out anyway.)
+        */
+       if (write & ~1) {
+               unsigned long pc = write & ~1;
+               if (pc >= (unsigned long) __start_unalign_asm_code &&
+                   pc < (unsigned long) __end_unalign_asm_code) {
+                       struct thread_info *ti = current_thread_info();
+                       /*
+                        * Our EX_CONTEXT is still what it was from the
+                        * initial unalign exception, but now we've faulted
+                        * on the JIT page.  We would like to complete the
+                        * page fault however is appropriate, and then retry
+                        * the instruction that caused the unalign exception.
+                        * Our state has been "corrupted" by setting the low
+                        * bit in "sp", and stashing r0..r3 in the
+                        * thread_info area, so we revert all of that, then
+                        * continue as if this were a normal page fault.
+                        */
+                       regs->sp &= ~1UL;
+                       regs->regs[0] = ti->unalign_jit_tmp[0];
+                       regs->regs[1] = ti->unalign_jit_tmp[1];
+                       regs->regs[2] = ti->unalign_jit_tmp[2];
+                       regs->regs[3] = ti->unalign_jit_tmp[3];
+                       write &= 1;
+               } else {
+                       pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
+                                current->comm, current->pid, pc, address);
+                       show_regs(regs);
+                       do_group_exit(SIGKILL);
+                       return;
+               }
+       }
+#else
        /* This case should have been handled by do_page_fault_ics(). */
        BUG_ON(write & ~1);
+#endif
 
 #if CHIP_HAS_TILE_DMA()
        /*
@@ -751,10 +790,6 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
 #if CHIP_HAS_TILE_DMA()
        case INT_DMATLB_MISS:
        case INT_DMATLB_MISS_DWNCL:
-#endif
-#if CHIP_HAS_SN_PROC()
-       case INT_SNITLB_MISS:
-       case INT_SNITLB_MISS_DWNCL:
 #endif
                is_page_fault = 1;
                break;
@@ -771,8 +806,8 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
                panic("Bad fault number %d in do_page_fault", fault_num);
        }
 
-#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
-       if (EX1_PL(regs->ex1) != USER_PL) {
+#if CHIP_HAS_TILE_DMA()
+       if (!user_mode(regs)) {
                struct async_tlb *async;
                switch (fault_num) {
 #if CHIP_HAS_TILE_DMA()
@@ -782,12 +817,6 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
                case INT_DMATLB_ACCESS_DWNCL:
                        async = &current->thread.dma_async_tlb;
                        break;
-#endif
-#if CHIP_HAS_SN_PROC()
-               case INT_SNITLB_MISS:
-               case INT_SNITLB_MISS_DWNCL:
-                       async = &current->thread.sn_async_tlb;
-                       break;
 #endif
                default:
                        async = NULL;
@@ -821,14 +850,22 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
 }
 
 
-#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
+#if CHIP_HAS_TILE_DMA()
 /*
- * Check an async_tlb structure to see if a deferred fault is waiting,
- * and if so pass it to the page-fault code.
+ * This routine effectively re-issues asynchronous page faults
+ * when we are returning to user space.
  */
-static void handle_async_page_fault(struct pt_regs *regs,
-                                   struct async_tlb *async)
+void do_async_page_fault(struct pt_regs *regs)
 {
+       struct async_tlb *async = &current->thread.dma_async_tlb;
+
+       /*
+        * Clear thread flag early.  If we re-interrupt while processing
+        * code here, we will reset it and recall this routine before
+        * returning to user space.
+        */
+       clear_thread_flag(TIF_ASYNC_TLB);
+
        if (async->fault_num) {
                /*
                 * Clear async->fault_num before calling the page-fault
@@ -842,35 +879,15 @@ static void handle_async_page_fault(struct pt_regs *regs,
                                  async->address, async->is_write);
        }
 }
-
-/*
- * This routine effectively re-issues asynchronous page faults
- * when we are returning to user space.
- */
-void do_async_page_fault(struct pt_regs *regs)
-{
-       /*
-        * Clear thread flag early.  If we re-interrupt while processing
-        * code here, we will reset it and recall this routine before
-        * returning to user space.
-        */
-       clear_thread_flag(TIF_ASYNC_TLB);
-
-#if CHIP_HAS_TILE_DMA()
-       handle_async_page_fault(regs, &current->thread.dma_async_tlb);
-#endif
-#if CHIP_HAS_SN_PROC()
-       handle_async_page_fault(regs, &current->thread.sn_async_tlb);
-#endif
-}
-#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
+#endif /* CHIP_HAS_TILE_DMA() */
 
 
 void vmalloc_sync_all(void)
 {
 #ifdef __tilegx__
        /* Currently all L1 kernel pmd's are static and shared. */
-       BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START));
+       BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) !=
+                    pgd_index(VMALLOC_START));
 #else
        /*
         * Note that races in the updates of insync and start aren't
index 347d123b14be961785fb2031727371c18b95723a..0dc218294770c23658158ccccb5ca566f60a8939 100644 (file)
@@ -114,7 +114,6 @@ static void kmap_atomic_register(struct page *page, int type,
 
        list_add(&amp->list, &amp_list);
        set_pte(ptep, pteval);
-       arch_flush_lazy_mmu_mode();
 
        spin_unlock(&amp_lock);
        homecache_kpte_unlock(flags);
@@ -259,7 +258,6 @@ void __kunmap_atomic(void *kvaddr)
                BUG_ON(vaddr >= (unsigned long)high_memory);
        }
 
-       arch_flush_lazy_mmu_mode();
        pagefault_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 1ae911939a18bdd6050e9a928b099471efeb7599..004ba568d93f2bb4a2fa256c16da04720a4857f9 100644 (file)
 #include "migrate.h"
 
 
-#if CHIP_HAS_COHERENT_LOCAL_CACHE()
-
 /*
  * The noallocl2 option suppresses all use of the L2 cache to cache
- * locally from a remote home.  There's no point in using it if we
- * don't have coherent local caching, though.
+ * locally from a remote home.
  */
 static int __write_once noallocl2;
 static int __init set_noallocl2(char *str)
@@ -58,12 +55,6 @@ static int __init set_noallocl2(char *str)
 }
 early_param("noallocl2", set_noallocl2);
 
-#else
-
-#define noallocl2 0
-
-#endif
-
 
 /*
  * Update the irq_stat for cpus that we are going to interrupt
@@ -172,7 +163,8 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
 
 static void homecache_finv_page_va(void* va, int home)
 {
-       if (home == smp_processor_id()) {
+       int cpu = get_cpu();
+       if (home == cpu) {
                finv_buffer_local(va, PAGE_SIZE);
        } else if (home == PAGE_HOME_HASH) {
                finv_buffer_remote(va, PAGE_SIZE, 1);
@@ -180,6 +172,7 @@ static void homecache_finv_page_va(void* va, int home)
                BUG_ON(home < 0 || home >= NR_CPUS);
                finv_buffer_remote(va, PAGE_SIZE, 0);
        }
+       put_cpu();
 }
 
 void homecache_finv_map_page(struct page *page, int home)
@@ -198,7 +191,7 @@ void homecache_finv_map_page(struct page *page, int home)
 #else
        va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
 #endif
-       ptep = virt_to_pte(NULL, (unsigned long)va);
+       ptep = virt_to_kpte(va);
        pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
        __set_pte(ptep, pte_set_home(pte, home));
        homecache_finv_page_va((void *)va, home);
@@ -263,10 +256,8 @@ static int pte_to_home(pte_t pte)
                return PAGE_HOME_INCOHERENT;
        case HV_PTE_MODE_UNCACHED:
                return PAGE_HOME_UNCACHED;
-#if CHIP_HAS_CBOX_HOME_MAP()
        case HV_PTE_MODE_CACHE_HASH_L3:
                return PAGE_HOME_HASH;
-#endif
        }
        panic("Bad PTE %#llx\n", pte.val);
 }
@@ -323,20 +314,16 @@ pte_t pte_set_home(pte_t pte, int home)
                                                      HV_PTE_MODE_CACHE_NO_L3);
                        }
                } else
-#if CHIP_HAS_CBOX_HOME_MAP()
                if (hash_default)
                        pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
                else
-#endif
                        pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
                pte = hv_pte_set_nc(pte);
                break;
 
-#if CHIP_HAS_CBOX_HOME_MAP()
        case PAGE_HOME_HASH:
                pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
                break;
-#endif
 
        default:
                BUG_ON(home < 0 || home >= NR_CPUS ||
@@ -346,7 +333,6 @@ pte_t pte_set_home(pte_t pte, int home)
                break;
        }
 
-#if CHIP_HAS_NC_AND_NOALLOC_BITS()
        if (noallocl2)
                pte = hv_pte_set_no_alloc_l2(pte);
 
@@ -355,7 +341,6 @@ pte_t pte_set_home(pte_t pte, int home)
            hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
                pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
        }
-#endif
 
        /* Checking this case here gives a better panic than from the hv. */
        BUG_ON(hv_pte_get_mode(pte) == 0);
@@ -371,19 +356,13 @@ EXPORT_SYMBOL(pte_set_home);
  * so they're not suitable for anything but infrequent use.
  */
 
-#if CHIP_HAS_CBOX_HOME_MAP()
-static inline int initial_page_home(void) { return PAGE_HOME_HASH; }
-#else
-static inline int initial_page_home(void) { return 0; }
-#endif
-
 int page_home(struct page *page)
 {
        if (PageHighMem(page)) {
-               return initial_page_home();
+               return PAGE_HOME_HASH;
        } else {
                unsigned long kva = (unsigned long)page_address(page);
-               return pte_to_home(*virt_to_pte(NULL, kva));
+               return pte_to_home(*virt_to_kpte(kva));
        }
 }
 EXPORT_SYMBOL(page_home);
@@ -402,7 +381,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
                     NULL, 0);
 
        for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
-               pte_t *ptep = virt_to_pte(NULL, kva);
+               pte_t *ptep = virt_to_kpte(kva);
                pte_t pteval = *ptep;
                BUG_ON(!pte_present(pteval) || pte_huge(pteval));
                __set_pte(ptep, pte_set_home(pteval, home));
@@ -436,7 +415,7 @@ struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
 void __homecache_free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
-               homecache_change_page_home(page, order, initial_page_home());
+               homecache_change_page_home(page, order, PAGE_HOME_HASH);
                if (order == 0) {
                        free_hot_cold_page(page, 0);
                } else {
index 650ccff8378cd6a8ddd42c557ab3bea2667d0a7d..e514899e1100319dc83fe69530f1aad67b17ceea 100644 (file)
@@ -49,38 +49,6 @@ int huge_shift[HUGE_SHIFT_ENTRIES] = {
 #endif
 };
 
-/*
- * This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
- * It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
- * It locks the user pagetable, and bumps up the mm->nr_ptes field,
- * but otherwise allocate the page table using the kernel versions.
- */
-static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
-                               unsigned long address)
-{
-       pte_t *new;
-
-       if (pmd_none(*pmd)) {
-               new = pte_alloc_one_kernel(mm, address);
-               if (!new)
-                       return NULL;
-
-               smp_wmb(); /* See comment in __pte_alloc */
-
-               spin_lock(&mm->page_table_lock);
-               if (likely(pmd_none(*pmd))) {  /* Has another populated it ? */
-                       mm->nr_ptes++;
-                       pmd_populate_kernel(mm, pmd, new);
-                       new = NULL;
-               } else
-                       VM_BUG_ON(pmd_trans_splitting(*pmd));
-               spin_unlock(&mm->page_table_lock);
-               if (new)
-                       pte_free_kernel(mm, new);
-       }
-
-       return pte_offset_kernel(pmd, address);
-}
 #endif
 
 pte_t *huge_pte_alloc(struct mm_struct *mm,
@@ -109,7 +77,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                else {
                        if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
                                panic("Unexpected page size %#lx\n", sz);
-                       return pte_alloc_hugetlb(mm, pmd, addr);
+                       return pte_alloc_map(mm, NULL, pmd, addr);
                }
        }
 #else
@@ -144,14 +112,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 
        /* Get the top-level page table entry. */
        pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
-       if (!pgd_present(*pgd))
-               return NULL;
 
        /* We don't have four levels. */
        pud = pud_offset(pgd, addr);
 #ifndef __PAGETABLE_PUD_FOLDED
 # error support fourth page table level
 #endif
+       if (!pud_present(*pud))
+               return NULL;
 
        /* Check for an L0 huge PTE, if we have three levels. */
 #ifndef __PAGETABLE_PMD_FOLDED
index e182958c707de30629c514819b2dd2ca386a9d6c..4e316deb92fd58f084e57d01a42104e65c68c29b 100644 (file)
@@ -106,10 +106,8 @@ pte_t *get_prealloc_pte(unsigned long pfn)
  */
 static int initial_heap_home(void)
 {
-#if CHIP_HAS_CBOX_HOME_MAP()
        if (hash_default)
                return PAGE_HOME_HASH;
-#endif
        return smp_processor_id();
 }
 
@@ -190,14 +188,11 @@ static void __init page_table_range_init(unsigned long start,
 }
 
 
-#if CHIP_HAS_CBOX_HOME_MAP()
-
 static int __initdata ktext_hash = 1;  /* .text pages */
 static int __initdata kdata_hash = 1;  /* .data and .bss pages */
 int __write_once hash_default = 1;     /* kernel allocator pages */
 EXPORT_SYMBOL(hash_default);
 int __write_once kstack_hash = 1;      /* if no homecaching, use h4h */
-#endif /* CHIP_HAS_CBOX_HOME_MAP */
 
 /*
  * CPUs to use to for striping the pages of kernel data.  If hash-for-home
@@ -215,14 +210,12 @@ int __write_once kdata_huge;       /* if no homecaching, small pages */
 static pgprot_t __init construct_pgprot(pgprot_t prot, int home)
 {
        prot = pte_set_home(prot, home);
-#if CHIP_HAS_CBOX_HOME_MAP()
        if (home == PAGE_HOME_IMMUTABLE) {
                if (ktext_hash)
                        prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3);
                else
                        prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3);
        }
-#endif
        return prot;
 }
 
@@ -234,22 +227,17 @@ static pgprot_t __init init_pgprot(ulong address)
 {
        int cpu;
        unsigned long page;
-       enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
+       enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
 
-#if CHIP_HAS_CBOX_HOME_MAP()
        /* For kdata=huge, everything is just hash-for-home. */
        if (kdata_huge)
                return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
-#endif
 
        /* We map the aliased pages of permanent text inaccessible. */
        if (address < (ulong) _sinittext - CODE_DELTA)
                return PAGE_NONE;
 
-       /*
-        * We map read-only data non-coherent for performance.  We could
-        * use neighborhood caching on TILE64, but it's not clear it's a win.
-        */
+       /* We map read-only data non-coherent for performance. */
        if ((address >= (ulong) __start_rodata &&
             address < (ulong) __end_rodata) ||
            address == (ulong) empty_zero_page) {
@@ -257,11 +245,9 @@ static pgprot_t __init init_pgprot(ulong address)
        }
 
 #ifndef __tilegx__
-#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
        /* Force the atomic_locks[] array page to be hash-for-home. */
        if (address == (ulong) atomic_locks)
                return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
-#endif
 #endif
 
        /*
@@ -280,19 +266,9 @@ static pgprot_t __init init_pgprot(ulong address)
        if (address >= (ulong) _end || address < (ulong) _einitdata)
                return construct_pgprot(PAGE_KERNEL, initial_heap_home());
 
-#if CHIP_HAS_CBOX_HOME_MAP()
        /* Use hash-for-home if requested for data/bss. */
        if (kdata_hash)
                return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
-#endif
-
-       /*
-        * Make the w1data homed like heap to start with, to avoid
-        * making it part of the page-striped data area when we're just
-        * going to convert it to read-only soon anyway.
-        */
-       if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end)
-               return construct_pgprot(PAGE_KERNEL, initial_heap_home());
 
        /*
         * Otherwise we just hand out consecutive cpus.  To avoid
@@ -301,7 +277,7 @@ static pgprot_t __init init_pgprot(ulong address)
         * the requested address, while walking cpu home around kdata_mask.
         * This is typically no more than a dozen or so iterations.
         */
-       page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK;
+       page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
        BUG_ON(address < page || address >= (ulong)_end);
        cpu = cpumask_first(&kdata_mask);
        for (; page < address; page += PAGE_SIZE) {
@@ -311,10 +287,8 @@ static pgprot_t __init init_pgprot(ulong address)
                if (page == (ulong)empty_zero_page)
                        continue;
 #ifndef __tilegx__
-#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
                if (page == (ulong)atomic_locks)
                        continue;
-#endif
 #endif
                cpu = cpumask_next(cpu, &kdata_mask);
                if (cpu == NR_CPUS)
@@ -358,7 +332,7 @@ static int __init setup_ktext(char *str)
 
        ktext_arg_seen = 1;
 
-       /* Default setting on Tile64: use a huge page */
+       /* Default setting: use a huge page */
        if (strcmp(str, "huge") == 0)
                pr_info("ktext: using one huge locally cached page\n");
 
@@ -404,10 +378,8 @@ static inline pgprot_t ktext_set_nocache(pgprot_t prot)
 {
        if (!ktext_nocache)
                prot = hv_pte_set_nc(prot);
-#if CHIP_HAS_NC_AND_NOALLOC_BITS()
        else
                prot = hv_pte_set_no_alloc_l2(prot);
-#endif
        return prot;
 }
 
@@ -440,7 +412,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
        struct cpumask kstripe_mask;
        int rc, i;
 
-#if CHIP_HAS_CBOX_HOME_MAP()
        if (ktext_arg_seen && ktext_hash) {
                pr_warning("warning: \"ktext\" boot argument ignored"
                           " if \"kcache_hash\" sets up text hash-for-home\n");
@@ -457,7 +428,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
                          " kcache_hash=all or =allbutstack\n");
                kdata_huge = 0;
        }
-#endif
 
        /*
         * Set up a mask for cpus to use for kernel striping.
@@ -538,7 +508,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
                }
        }
 
-       address = MEM_SV_INTRPT;
+       address = MEM_SV_START;
        pmd = get_pmd(pgtables, address);
        pfn = 0;  /* code starts at PA 0 */
        if (ktext_small) {
@@ -585,13 +555,11 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
        } else {
                pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
                pteval = pte_mkhuge(pteval);
-#if CHIP_HAS_CBOX_HOME_MAP()
                if (ktext_hash) {
                        pteval = hv_pte_set_mode(pteval,
                                                 HV_PTE_MODE_CACHE_HASH_L3);
                        pteval = ktext_set_nocache(pteval);
                } else
-#endif /* CHIP_HAS_CBOX_HOME_MAP() */
                if (cpumask_weight(&ktext_mask) == 1) {
                        pteval = set_remote_cache_cpu(pteval,
                                              cpumask_first(&ktext_mask));
@@ -777,10 +745,7 @@ void __init paging_init(void)
 
        kernel_physical_mapping_init(pgd_base);
 
-       /*
-        * Fixed mappings, only the page table structure has to be
-        * created - mappings will be set by set_fixmap():
-        */
+       /* Fixed mappings, only the page table structure has to be created. */
        page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1),
                              FIXADDR_TOP, pgd_base);
 
@@ -941,26 +906,6 @@ void __init pgtable_cache_init(void)
                panic("pgtable_cache_init(): Cannot create pgd cache");
 }
 
-#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
-/*
- * The __w1data area holds data that is only written during initialization,
- * and is read-only and thus freely cacheable thereafter.  Fix the page
- * table entries that cover that region accordingly.
- */
-static void mark_w1data_ro(void)
-{
-       /* Loop over page table entries */
-       unsigned long addr = (unsigned long)__w1data_begin;
-       BUG_ON((addr & (PAGE_SIZE-1)) != 0);
-       for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
-               unsigned long pfn = kaddr_to_pfn((void *)addr);
-               pte_t *ptep = virt_to_pte(NULL, addr);
-               BUG_ON(pte_huge(*ptep));   /* not relevant for kdata_huge */
-               set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
-       }
-}
-#endif
-
 #ifdef CONFIG_DEBUG_PAGEALLOC
 static long __write_once initfree;
 #else
@@ -1000,7 +945,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
                 */
                int pfn = kaddr_to_pfn((void *)addr);
                struct page *page = pfn_to_page(pfn);
-               pte_t *ptep = virt_to_pte(NULL, addr);
+               pte_t *ptep = virt_to_kpte(addr);
                if (!initfree) {
                        /*
                         * If debugging page accesses then do not free
@@ -1024,15 +969,11 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
 
 void free_initmem(void)
 {
-       const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET;
+       const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;
 
        /*
-        * Evict the dirty initdata on the boot cpu, evict the w1data
-        * wherever it's homed, and evict all the init code everywhere.
-        * We are guaranteed that no one will touch the init pages any
-        * more, and although other cpus may be touching the w1data,
-        * we only actually change the caching on tile64, which won't
-        * be keeping local copies in the other tiles' caches anyway.
+        * Evict the cache on all cores to avoid incoherence.
+        * We are guaranteed that no one will touch the init pages any more.
         */
        homecache_evict(&cpu_cacheable_map);
 
@@ -1043,26 +984,11 @@ void free_initmem(void)
 
        /*
         * Free the pages mapped from 0xc0000000 that correspond to code
-        * pages from MEM_SV_INTRPT that we won't use again after init.
+        * pages from MEM_SV_START that we won't use again after init.
         */
        free_init_pages("unused kernel text",
                        (unsigned long)_sinittext - text_delta,
                        (unsigned long)_einittext - text_delta);
-
-#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
-       /*
-        * Upgrade the .w1data section to globally cached.
-        * We don't do this on tilepro, since the cache architecture
-        * pretty much makes it irrelevant, and in any case we end
-        * up having racing issues with other tiles that may touch
-        * the data after we flush the cache but before we update
-        * the PTEs and flush the TLBs, causing sharer shootdowns
-        * later.  Even though this is to clean data, it seems like
-        * an unnecessary complication.
-        */
-       mark_w1data_ro();
-#endif
-
        /* Do a global TLB flush so everyone sees the changes. */
        flush_tlb_all();
 }
index 5305814bf187f52aa89f6b84e5f886cf273f5814..772085491bf9dfa6d615aab7734841888a6ff15e 100644 (file)
@@ -136,7 +136,7 @@ STD_ENTRY(flush_and_install_context)
         move r8, zero  /* asids */
         move r9, zero  /* asidcount */
        }
-       jal hv_flush_remote
+       jal _hv_flush_remote
        bnz r0, .Ldone
 
        /* Now install the new page table. */
@@ -152,7 +152,7 @@ STD_ENTRY(flush_and_install_context)
         move r4, r_asid
         moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
        }
-       jal hv_install_context
+       jal _hv_install_context
        bnz r0, .Ldone
 
        /* Finally, flush the TLB. */
index 1d15b10833d113ee077f8ea032503819ea4c257e..a49eee38f8723a3e78ba8cf235fd746fc514d3e4 100644 (file)
@@ -123,7 +123,7 @@ STD_ENTRY(flush_and_install_context)
        }
        {
         move r8, zero  /* asidcount */
-        jal hv_flush_remote
+        jal _hv_flush_remote
        }
        bnez r0, 1f
 
@@ -136,7 +136,7 @@ STD_ENTRY(flush_and_install_context)
         move r2, r_asid
         moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
        }
-       jal hv_install_context
+       jal _hv_install_context
        bnez r0, 1f
 
        /* Finally, flush the TLB. */
index d67d91ebf63e693e1562f2b465de5e69fe490547..851a94e6ae58061824917c82ee2ae0d8fe0f4b41 100644 (file)
@@ -58,16 +58,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 #else
        int is_32bit = 0;
 #endif
+       unsigned long random_factor = 0UL;
+
+       /*
+        *  8 bits of randomness in 32bit mmaps, 24 address space bits
+        * 12 bits of randomness in 64bit mmaps, 28 address space bits
+        */
+       if (current->flags & PF_RANDOMIZE) {
+               if (is_32bit)
+                       random_factor = get_random_int() % (1<<8);
+               else
+                       random_factor = get_random_int() % (1<<12);
+
+               random_factor <<= PAGE_SHIFT;
+       }
 
        /*
         * Use standard layout if the expected stack growth is unlimited
         * or we are running native 64 bits.
         */
-       if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
-               mm->mmap_base = TASK_UNMAPPED_BASE;
+       if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
                mm->get_unmapped_area = arch_get_unmapped_area;
        } else {
                mm->mmap_base = mmap_base(mm);
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+       unsigned long range_end = mm->brk + 0x02000000;
+       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+}
index dfd63ce873273b2d9055b416e58e4d81e3eeb920..33b006ab559c7121a73c0f12a66bf163c686c4cc 100644 (file)
@@ -83,55 +83,6 @@ void show_mem(unsigned int filter)
        }
 }
 
-/*
- * Associate a virtual page frame with a given physical page frame
- * and protection flags for that frame.
- */
-static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-
-       pgd = swapper_pg_dir + pgd_index(vaddr);
-       if (pgd_none(*pgd)) {
-               BUG();
-               return;
-       }
-       pud = pud_offset(pgd, vaddr);
-       if (pud_none(*pud)) {
-               BUG();
-               return;
-       }
-       pmd = pmd_offset(pud, vaddr);
-       if (pmd_none(*pmd)) {
-               BUG();
-               return;
-       }
-       pte = pte_offset_kernel(pmd, vaddr);
-       /* <pfn,flags> stored as-is, to permit clearing entries */
-       set_pte(pte, pfn_pte(pfn, flags));
-
-       /*
-        * It's enough to flush this one mapping.
-        * This appears conservative since it is only called
-        * from __set_fixmap.
-        */
-       local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
-}
-
-void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
-{
-       unsigned long address = __fix_to_virt(idx);
-
-       if (idx >= __end_of_fixed_addresses) {
-               BUG();
-               return;
-       }
-       set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-}
-
 /**
  * shatter_huge_page() - ensure a given address is mapped by a small page.
  *
@@ -374,6 +325,17 @@ void ptep_set_wrprotect(struct mm_struct *mm,
 
 #endif
 
+/*
+ * Return a pointer to the PTE that corresponds to the given
+ * address in the given page table.  A NULL page table just uses
+ * the standard kernel page table; the preferred API in this case
+ * is virt_to_kpte().
+ *
+ * The returned pointer can point to a huge page in other levels
+ * of the page table than the bottom, if the huge page is present
+ * in the page table.  For bottom-level PTEs, the returned pointer
+ * can point to a PTE that is either present or not.
+ */
 pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
 {
        pgd_t *pgd;
@@ -387,13 +349,23 @@ pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
        pud = pud_offset(pgd, addr);
        if (!pud_present(*pud))
                return NULL;
+       if (pud_huge_page(*pud))
+               return (pte_t *)pud;
        pmd = pmd_offset(pud, addr);
-       if (pmd_huge_page(*pmd))
-               return (pte_t *)pmd;
        if (!pmd_present(*pmd))
                return NULL;
+       if (pmd_huge_page(*pmd))
+               return (pte_t *)pmd;
        return pte_offset_kernel(pmd, addr);
 }
+EXPORT_SYMBOL(virt_to_pte);
+
+pte_t *virt_to_kpte(unsigned long kaddr)
+{
+       BUG_ON(kaddr < PAGE_OFFSET);
+       return virt_to_pte(NULL, kaddr);
+}
+EXPORT_SYMBOL(virt_to_kpte);
 
 pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
 {
@@ -532,25 +504,18 @@ void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
 
 #if CHIP_HAS_MMIO()
 
-/* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
-void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
-                          pgprot_t home)
+void *generic_remap_prot(resource_size_t phys_addr, unsigned long size,
+                   unsigned long flags, pgprot_t prot)
 {
        void *addr;
        struct vm_struct *area;
        unsigned long offset, last_addr;
-       pgprot_t pgprot;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
        if (!size || last_addr < phys_addr)
                return NULL;
 
-       /* Create a read/write, MMIO VA mapping homed at the requested shim. */
-       pgprot = PAGE_KERNEL;
-       pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
-       pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
-
        /*
         * Mappings have to be page-aligned
         */
@@ -561,17 +526,35 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
        /*
         * Ok, go for it..
         */
-       area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
+       area = get_vm_area(size, flags);
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
        addr = area->addr;
        if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
-                              phys_addr, pgprot)) {
-               remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
+                              phys_addr, prot)) {
+               free_vm_area(area);
                return NULL;
        }
-       return (__force void __iomem *) (offset + (char *)addr);
+       return (void *) (offset + (char *)addr);
+}
+EXPORT_SYMBOL(generic_remap_prot);
+
+/* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
+void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
+                          pgprot_t home)
+{
+       pgprot_t pgprot;
+       unsigned long flags;
+
+       /* Create a read/write, MMIO VA mapping homed at the requested shim. */
+       pgprot = PAGE_KERNEL;
+       pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
+       pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
+       flags = VM_IOREMAP; /* | other flags? */
+
+       return (__force void __iomem *) generic_remap_prot(phys_addr,
+                                                          size, flags, pgprot);
 }
 EXPORT_SYMBOL(ioremap_prot);
 
index 5cb86ccd4acb9e6e105098525cb8fd7592a0cbd6..c171dcbf192d9b2ae1aa31c5b91f02ffb0b9220d 100644 (file)
@@ -62,7 +62,7 @@ static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 }
 
 /* camellia sboxes */
-const u64 camellia_sp10011110[256] = {
+__visible const u64 camellia_sp10011110[256] = {
        0x7000007070707000ULL, 0x8200008282828200ULL, 0x2c00002c2c2c2c00ULL,
        0xec0000ecececec00ULL, 0xb30000b3b3b3b300ULL, 0x2700002727272700ULL,
        0xc00000c0c0c0c000ULL, 0xe50000e5e5e5e500ULL, 0xe40000e4e4e4e400ULL,
@@ -151,7 +151,7 @@ const u64 camellia_sp10011110[256] = {
        0x9e00009e9e9e9e00ULL,
 };
 
-const u64 camellia_sp22000222[256] = {
+__visible const u64 camellia_sp22000222[256] = {
        0xe0e0000000e0e0e0ULL, 0x0505000000050505ULL, 0x5858000000585858ULL,
        0xd9d9000000d9d9d9ULL, 0x6767000000676767ULL, 0x4e4e0000004e4e4eULL,
        0x8181000000818181ULL, 0xcbcb000000cbcbcbULL, 0xc9c9000000c9c9c9ULL,
@@ -240,7 +240,7 @@ const u64 camellia_sp22000222[256] = {
        0x3d3d0000003d3d3dULL,
 };
 
-const u64 camellia_sp03303033[256] = {
+__visible const u64 camellia_sp03303033[256] = {
        0x0038380038003838ULL, 0x0041410041004141ULL, 0x0016160016001616ULL,
        0x0076760076007676ULL, 0x00d9d900d900d9d9ULL, 0x0093930093009393ULL,
        0x0060600060006060ULL, 0x00f2f200f200f2f2ULL, 0x0072720072007272ULL,
@@ -329,7 +329,7 @@ const u64 camellia_sp03303033[256] = {
        0x004f4f004f004f4fULL,
 };
 
-const u64 camellia_sp00444404[256] = {
+__visible const u64 camellia_sp00444404[256] = {
        0x0000707070700070ULL, 0x00002c2c2c2c002cULL, 0x0000b3b3b3b300b3ULL,
        0x0000c0c0c0c000c0ULL, 0x0000e4e4e4e400e4ULL, 0x0000575757570057ULL,
        0x0000eaeaeaea00eaULL, 0x0000aeaeaeae00aeULL, 0x0000232323230023ULL,
@@ -418,7 +418,7 @@ const u64 camellia_sp00444404[256] = {
        0x00009e9e9e9e009eULL,
 };
 
-const u64 camellia_sp02220222[256] = {
+__visible const u64 camellia_sp02220222[256] = {
        0x00e0e0e000e0e0e0ULL, 0x0005050500050505ULL, 0x0058585800585858ULL,
        0x00d9d9d900d9d9d9ULL, 0x0067676700676767ULL, 0x004e4e4e004e4e4eULL,
        0x0081818100818181ULL, 0x00cbcbcb00cbcbcbULL, 0x00c9c9c900c9c9c9ULL,
@@ -507,7 +507,7 @@ const u64 camellia_sp02220222[256] = {
        0x003d3d3d003d3d3dULL,
 };
 
-const u64 camellia_sp30333033[256] = {
+__visible const u64 camellia_sp30333033[256] = {
        0x3800383838003838ULL, 0x4100414141004141ULL, 0x1600161616001616ULL,
        0x7600767676007676ULL, 0xd900d9d9d900d9d9ULL, 0x9300939393009393ULL,
        0x6000606060006060ULL, 0xf200f2f2f200f2f2ULL, 0x7200727272007272ULL,
@@ -596,7 +596,7 @@ const u64 camellia_sp30333033[256] = {
        0x4f004f4f4f004f4fULL,
 };
 
-const u64 camellia_sp44044404[256] = {
+__visible const u64 camellia_sp44044404[256] = {
        0x7070007070700070ULL, 0x2c2c002c2c2c002cULL, 0xb3b300b3b3b300b3ULL,
        0xc0c000c0c0c000c0ULL, 0xe4e400e4e4e400e4ULL, 0x5757005757570057ULL,
        0xeaea00eaeaea00eaULL, 0xaeae00aeaeae00aeULL, 0x2323002323230023ULL,
@@ -685,7 +685,7 @@ const u64 camellia_sp44044404[256] = {
        0x9e9e009e9e9e009eULL,
 };
 
-const u64 camellia_sp11101110[256] = {
+__visible const u64 camellia_sp11101110[256] = {
        0x7070700070707000ULL, 0x8282820082828200ULL, 0x2c2c2c002c2c2c00ULL,
        0xececec00ececec00ULL, 0xb3b3b300b3b3b300ULL, 0x2727270027272700ULL,
        0xc0c0c000c0c0c000ULL, 0xe5e5e500e5e5e500ULL, 0xe4e4e400e4e4e400ULL,
@@ -828,8 +828,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
 
        subRL[1] ^= (subRL[1] & ~subRL[9]) << 32;
        /* modified for FLinv(kl2) */
-       dw = (subRL[1] & subRL[9]) >> 32,
-               subRL[1] ^= rol32(dw, 1);
+       dw = (subRL[1] & subRL[9]) >> 32;
+       subRL[1] ^= rol32(dw, 1);
 
        /* round 8 */
        subRL[11] ^= subRL[1];
@@ -840,8 +840,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
 
        subRL[1] ^= (subRL[1] & ~subRL[17]) << 32;
        /* modified for FLinv(kl4) */
-       dw = (subRL[1] & subRL[17]) >> 32,
-               subRL[1] ^= rol32(dw, 1);
+       dw = (subRL[1] & subRL[17]) >> 32;
+       subRL[1] ^= rol32(dw, 1);
 
        /* round 14 */
        subRL[19] ^= subRL[1];
@@ -859,8 +859,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
        } else {
                subRL[1] ^= (subRL[1] & ~subRL[25]) << 32;
                /* modified for FLinv(kl6) */
-               dw = (subRL[1] & subRL[25]) >> 32,
-                       subRL[1] ^= rol32(dw, 1);
+               dw = (subRL[1] & subRL[25]) >> 32;
+               subRL[1] ^= rol32(dw, 1);
 
                /* round 20 */
                subRL[27] ^= subRL[1];
@@ -882,8 +882,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
 
                kw4 ^= (kw4 & ~subRL[24]) << 32;
                /* modified for FL(kl5) */
-               dw = (kw4 & subRL[24]) >> 32,
-                       kw4 ^= rol32(dw, 1);
+               dw = (kw4 & subRL[24]) >> 32;
+               kw4 ^= rol32(dw, 1);
        }
 
        /* round 17 */
@@ -895,8 +895,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
 
        kw4 ^= (kw4 & ~subRL[16]) << 32;
        /* modified for FL(kl3) */
-       dw = (kw4 & subRL[16]) >> 32,
-               kw4 ^= rol32(dw, 1);
+       dw = (kw4 & subRL[16]) >> 32;
+       kw4 ^= rol32(dw, 1);
 
        /* round 11 */
        subRL[14] ^= kw4;
@@ -907,8 +907,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
 
        kw4 ^= (kw4 & ~subRL[8]) << 32;
        /* modified for FL(kl1) */
-       dw = (kw4 & subRL[8]) >> 32,
-               kw4 ^= rol32(dw, 1);
+       dw = (kw4 & subRL[8]) >> 32;
+       kw4 ^= rol32(dw, 1);
 
        /* round 5 */
        subRL[6] ^= kw4;
@@ -928,8 +928,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
        SET_SUBKEY_LR(6, subRL[5] ^ subRL[7]);                  /* round 5 */
 
        tl = (subRL[10] >> 32) ^ (subRL[10] & ~subRL[8]);
-       dw = tl & (subRL[8] >> 32),                             /* FL(kl1) */
-               tr = subRL[10] ^ rol32(dw, 1);
+       dw = tl & (subRL[8] >> 32);                             /* FL(kl1) */
+       tr = subRL[10] ^ rol32(dw, 1);
        tt = (tr | ((u64)tl << 32));
 
        SET_SUBKEY_LR(7, subRL[6] ^ tt);                        /* round 6 */
@@ -937,8 +937,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
        SET_SUBKEY_LR(9, subRL[9]);                             /* FLinv(kl2) */
 
        tl = (subRL[7] >> 32) ^ (subRL[7] & ~subRL[9]);
-       dw = tl & (subRL[9] >> 32),                             /* FLinv(kl2) */
-               tr = subRL[7] ^ rol32(dw, 1);
+       dw = tl & (subRL[9] >> 32);                             /* FLinv(kl2) */
+       tr = subRL[7] ^ rol32(dw, 1);
        tt = (tr | ((u64)tl << 32));
 
        SET_SUBKEY_LR(10, subRL[11] ^ tt);                      /* round 7 */
@@ -948,8 +948,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
        SET_SUBKEY_LR(14, subRL[13] ^ subRL[15]);               /* round 11 */
 
        tl = (subRL[18] >> 32) ^ (subRL[18] & ~subRL[16]);
-       dw = tl & (subRL[16] >> 32),                            /* FL(kl3) */
-               tr = subRL[18] ^ rol32(dw, 1);
+       dw = tl & (subRL[16] >> 32);                            /* FL(kl3) */
+       tr = subRL[18] ^ rol32(dw, 1);
        tt = (tr | ((u64)tl << 32));
 
        SET_SUBKEY_LR(15, subRL[14] ^ tt);                      /* round 12 */
@@ -957,8 +957,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
        SET_SUBKEY_LR(17, subRL[17]);                           /* FLinv(kl4) */
 
        tl = (subRL[15] >> 32) ^ (subRL[15] & ~subRL[17]);
-       dw = tl & (subRL[17] >> 32),                            /* FLinv(kl4) */
-               tr = subRL[15] ^ rol32(dw, 1);
+       dw = tl & (subRL[17] >> 32);                            /* FLinv(kl4) */
+       tr = subRL[15] ^ rol32(dw, 1);
        tt = (tr | ((u64)tl << 32));
 
        SET_SUBKEY_LR(18, subRL[19] ^ tt);                      /* round 13 */
@@ -972,8 +972,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
                SET_SUBKEY_LR(24, subRL[24] ^ subRL[23]);       /* kw3 */
        } else {
                tl = (subRL[26] >> 32) ^ (subRL[26] & ~subRL[24]);
-               dw = tl & (subRL[24] >> 32),                    /* FL(kl5) */
-                       tr = subRL[26] ^ rol32(dw, 1);
+               dw = tl & (subRL[24] >> 32);                    /* FL(kl5) */
+               tr = subRL[26] ^ rol32(dw, 1);
                tt = (tr | ((u64)tl << 32));
 
                SET_SUBKEY_LR(23, subRL[22] ^ tt);              /* round 18 */
@@ -981,8 +981,8 @@ static void camellia_setup_tail(u64 *subkey, u64 *subRL, int max)
                SET_SUBKEY_LR(25, subRL[25]);                   /* FLinv(kl6) */
 
                tl = (subRL[23] >> 32) ^ (subRL[23] & ~subRL[25]);
-               dw = tl & (subRL[25] >> 32),                    /* FLinv(kl6) */
-                       tr = subRL[23] ^ rol32(dw, 1);
+               dw = tl & (subRL[25] >> 32);                    /* FLinv(kl6) */
+               tr = subRL[23] ^ rol32(dw, 1);
                tt = (tr | ((u64)tl << 32));
 
                SET_SUBKEY_LR(26, subRL[27] ^ tt);              /* round 19 */
index bae3aba95b15ab298562afe1b657d63b706578e0..b987cd54fa46a6505f3e44af3f8543870855db0f 100644 (file)
@@ -130,14 +130,6 @@ static void set_brk(unsigned long start, unsigned long end)
 
 #include <linux/coredump.h>
 
-#define DUMP_WRITE(addr, nr)                        \
-       if (!dump_write(file, (void *)(addr), (nr))) \
-               goto end_coredump;
-
-#define DUMP_SEEK(offset)              \
-       if (!dump_seek(file, offset))   \
-               goto end_coredump;
-
 #define START_DATA()   (u.u_tsize << PAGE_SHIFT)
 #define START_STACK(u) (u.start_stack)
 
@@ -190,22 +182,26 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
 
        set_fs(KERNEL_DS);
        /* struct user */
-       DUMP_WRITE(&dump, sizeof(dump));
+       if (!dump_emit(cprm, &dump, sizeof(dump)))
+               goto end_coredump;
        /* Now dump all of the user data.  Include malloced stuff as well */
-       DUMP_SEEK(PAGE_SIZE - sizeof(dump));
+       if (!dump_align(cprm, PAGE_SIZE))
+               goto end_coredump;
        /* now we start writing out the user space info */
        set_fs(USER_DS);
        /* Dump the data area */
        if (dump.u_dsize != 0) {
                dump_start = START_DATA(dump);
                dump_size = dump.u_dsize << PAGE_SHIFT;
-               DUMP_WRITE(dump_start, dump_size);
+               if (!dump_emit(cprm, dump_start, dump_size))
+                       goto end_coredump;
        }
        /* Now prepare to dump the stack area */
        if (dump.u_ssize != 0) {
                dump_start = START_STACK(dump);
                dump_size = dump.u_ssize << PAGE_SHIFT;
-               DUMP_WRITE(dump_start, dump_size);
+               if (!dump_emit(cprm, dump_start, dump_size))
+                       goto end_coredump;
        }
 end_coredump:
        set_fs(fs);
index 653668d140f994e543ad52e46d0c8402d5fe9259..4a8cb8d7cbd5d2b0febd4333931b459e75f1ea1d 100644 (file)
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
         */
        if (boot_params->sentinel) {
                /* fields in boot_params are left uninitialized, clear them */
-               memset(&boot_params->olpc_ofw_header, 0,
+               memset(&boot_params->ext_ramdisk_image, 0,
                       (char *)&boot_params->efi_info -
-                       (char *)&boot_params->olpc_ofw_header);
+                       (char *)&boot_params->ext_ramdisk_image);
                memset(&boot_params->kbd_status, 0,
                       (char *)&boot_params->hdr -
                       (char *)&boot_params->kbd_status);
index 50e5c58ced23b2ec8537569a71ae4ac41566281f..4c019179a57dd97d6b48ae064ef1faea0dc2e7f7 100644 (file)
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
 
 extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
 extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size);
+extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
 
 #ifdef CONFIG_MICROCODE_AMD_EARLY
 #ifdef CONFIG_X86_32
index 24cf5aefb7048b496082fd271755e3b186e23540..4f4a3d98c170477caff77d71693f203ae9a6fb34 100644 (file)
@@ -942,35 +942,6 @@ extern int set_tsc_mode(unsigned int val);
 
 extern u16 amd_get_nb_id(int cpu);
 
-struct aperfmperf {
-       u64 aperf, mperf;
-};
-
-static inline void get_aperfmperf(struct aperfmperf *am)
-{
-       WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
-
-       rdmsrl(MSR_IA32_APERF, am->aperf);
-       rdmsrl(MSR_IA32_MPERF, am->mperf);
-}
-
-#define APERFMPERF_SHIFT 10
-
-static inline
-unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
-                                   struct aperfmperf *new)
-{
-       u64 aperf = new->aperf - old->aperf;
-       u64 mperf = new->mperf - old->mperf;
-       unsigned long ratio = aperf;
-
-       mperf >>= APERFMPERF_SHIFT;
-       if (mperf)
-               ratio = div64_u64(aperf, mperf);
-
-       return ratio;
-}
-
 extern unsigned long arch_align_stack(unsigned long sp);
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
index 7ea79c5fa1f217ff3c8aaf0afba935411fe61c6e..492b29802f571b0363a22fe686f708c8235c317a 100644 (file)
@@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = {
 
 #define AVX_XOR_SPEED \
 do { \
-       if (cpu_has_avx) \
+       if (cpu_has_avx && cpu_has_osxsave) \
                xor_speed(&xor_block_avx); \
 } while (0)
 
 #define AVX_SELECT(FASTEST) \
-       (cpu_has_avx ? &xor_block_avx : FASTEST)
+       (cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
 
 #else
 
index f654ecefea5b6d5348df41195a529a4dce303261..08a089043ccfbb669c889ac034091a55aaa92b75 100644 (file)
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
 
 static const int amd_erratum_383[];
 static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(const int *erratum);
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
 
 static void init_amd(struct cpuinfo_x86 *c)
 {
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
                value &= ~(1ULL << 24);
                wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
 
-               if (cpu_has_amd_erratum(amd_erratum_383))
+               if (cpu_has_amd_erratum(c, amd_erratum_383))
                        set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
        }
 
-       if (cpu_has_amd_erratum(amd_erratum_400))
+       if (cpu_has_amd_erratum(c, amd_erratum_400))
                set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
 
        rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
 static const int amd_erratum_383[] =
        AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
 
-static bool cpu_has_amd_erratum(const int *erratum)
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
 {
-       struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
        int osvw_id = *erratum++;
        u32 range;
        u32 ms;
 
-       /*
-        * If called early enough that current_cpu_data hasn't been initialized
-        * yet, fall back to boot_cpu_data.
-        */
-       if (cpu->x86 == 0)
-               cpu = &boot_cpu_data;
-
-       if (cpu->x86_vendor != X86_VENDOR_AMD)
-               return false;
-
        if (osvw_id >= 0 && osvw_id < 65536 &&
            cpu_has(cpu, X86_FEATURE_OSVW)) {
                u64 osvw_len;
index 69eb2fa254942e23537266c4e8a4d0a77c7ee04f..376dc7873447c80e27b8b765e6ff41a8f737da8a 100644 (file)
@@ -52,8 +52,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-                                           unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        initrd_start = (unsigned long)__va(start);
        initrd_end = (unsigned long)__va(end);
index 7a0adb7ee43397aa9a9fcbf733c3b1c9308f2b71..7123b5df479d872def8ff437fcd407c5c4d5ca50 100644 (file)
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
        return 0;
 }
 
-static unsigned int verify_patch_size(int cpu, u32 patch_size,
+static unsigned int verify_patch_size(u8 family, u32 patch_size,
                                      unsigned int size)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
        u32 max_size;
 
 #define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
 #define F15H_MPB_MAX_SIZE 4096
 #define F16H_MPB_MAX_SIZE 3458
 
-       switch (c->x86) {
+       switch (family) {
        case 0x14:
                max_size = F14H_MPB_MAX_SIZE;
                break;
@@ -277,9 +276,8 @@ static void cleanup(void)
  * driver cannot continue functioning normally. In such cases, we tear
  * down everything we've used up so far and exit.
  */
-static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
+static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_header_amd *mc_hdr;
        struct ucode_patch *patch;
        unsigned int patch_size, crnt_size, ret;
@@ -299,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
 
        /* check if patch is for the current family */
        proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
-       if (proc_fam != c->x86)
+       if (proc_fam != family)
                return crnt_size;
 
        if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -308,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
                return crnt_size;
        }
 
-       ret = verify_patch_size(cpu, patch_size, leftover);
+       ret = verify_patch_size(family, patch_size, leftover);
        if (!ret) {
                pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
                return crnt_size;
@@ -339,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
        return crnt_size;
 }
 
-static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+                                            size_t size)
 {
        enum ucode_state ret = UCODE_ERROR;
        unsigned int leftover;
@@ -362,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
        }
 
        while (leftover) {
-               crnt_size = verify_and_add_patch(cpu, fw, leftover);
+               crnt_size = verify_and_add_patch(family, fw, leftover);
                if (crnt_size < 0)
                        return ret;
 
@@ -373,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
        return UCODE_OK;
 }
 
-enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
+enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
 {
        enum ucode_state ret;
 
        /* free old equiv table */
        free_equiv_cpu_table();
 
-       ret = __load_microcode_amd(cpu, data, size);
+       ret = __load_microcode_amd(family, data, size);
 
        if (ret != UCODE_OK)
                cleanup();
 
 #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
        /* save BSP's matching patch for early load */
-       if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
-               struct ucode_patch *p = find_patch(cpu);
+       if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
+               struct ucode_patch *p = find_patch(smp_processor_id());
                if (p) {
                        memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
                        memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -441,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
                goto fw_release;
        }
 
-       ret = load_microcode_amd(cpu, fw->data, fw->size);
+       ret = load_microcode_amd(c->x86, fw->data, fw->size);
 
  fw_release:
        release_firmware(fw);
index 1d14ffee57495a9793d8f9f5f01073958da6ee3e..6073104ccaa36bca776290155e42a30bdd444a8d 100644 (file)
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
        uci->cpu_sig.sig = cpuid_eax(0x00000001);
 }
 #else
-static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
-                                                struct ucode_cpu_info *uci)
+void load_ucode_amd_ap(void)
 {
+       unsigned int cpu = smp_processor_id();
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        u32 rev, eax;
 
        rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
        eax = cpuid_eax(0x00000001);
 
-       uci->cpu_sig.sig = eax;
        uci->cpu_sig.rev = rev;
-       c->microcode = rev;
-       c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
-}
-
-void load_ucode_amd_ap(void)
-{
-       unsigned int cpu = smp_processor_id();
-
-       collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
+       uci->cpu_sig.sig = eax;
 
        if (cpu && !ucode_loaded) {
                void *ucode;
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void)
                        return;
 
                ucode = (void *)(initrd_start + ucode_offset);
-               if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK)
+               eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+               if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
                        return;
+
                ucode_loaded = true;
        }
 
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void)
 {
        enum ucode_state ret;
        void *ucode;
+       u32 eax;
+
 #ifdef CONFIG_X86_32
        unsigned int bsp = boot_cpu_data.cpu_index;
        struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void)
                return 0;
 
        ucode = (void *)(initrd_start + ucode_offset);
-       ret = load_microcode_amd(0, ucode, ucode_size);
+       eax   = cpuid_eax(0x00000001);
+       eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+
+       ret = load_microcode_amd(eax, ucode, ucode_size);
        if (ret != UCODE_OK)
                return -EINVAL;
 
index addf7b58f4e839f171cb8d3b68594e6a36e77344..91a4496db43429de294e481075782295b76ff0e6 100644 (file)
@@ -301,6 +301,15 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
        return 0;
 }
 
+static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
+{
+       if (!tboot_enabled())
+               return 0;
+
+       pr_warning("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)");
+       return -ENODEV;
+}
+
 static atomic_t ap_wfs_count;
 
 static int tboot_wait_for_aps(int num_aps)
@@ -422,6 +431,7 @@ static __init int tboot_late_init(void)
 #endif
 
        acpi_os_set_prepare_sleep(&tboot_sleep);
+       acpi_os_set_prepare_extended_sleep(&tboot_extended_sleep);
        return 0;
 }
 
index 6a22c19da6633601c73c1136964950d61f1c855b..7c1fde50ecb0f4efcc6d512d5074977921384592 100644 (file)
@@ -7,8 +7,7 @@
  * kernel and insert a module (lg.ko) which allows us to run other Linux
  * kernels the same way we'd run processes.  We call the first kernel the Host,
  * and the others the Guests.  The program which sets up and configures Guests
- * (such as the example in Documentation/virtual/lguest/lguest.c) is called the
- * Launcher.
+ * (such as the example in tools/lguest/lguest.c) is called the Launcher.
  *
  * Secondly, we only run specially modified Guests, not normal kernels: setting
  * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
index cdd0da9dd530b643c5c8f8673f7ae76c5fb8602c..266ca912f62e00e96eab405ce6290f3d7466cec3 100644 (file)
@@ -146,6 +146,7 @@ int __init
 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
 {
        u64 start, end;
+       u32 hotpluggable;
        int node, pxm;
 
        if (srat_disabled())
@@ -154,7 +155,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
                goto out_err_bad_srat;
        if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
                goto out_err;
-       if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
+       hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
+       if (hotpluggable && !save_add_info())
                goto out_err;
 
        start = ma->base_address;
@@ -174,9 +176,10 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
 
        node_set(node, numa_nodes_parsed);
 
-       printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
-              node, pxm,
-              (unsigned long long) start, (unsigned long long) end - 1);
+       pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n",
+               node, pxm,
+               (unsigned long long) start, (unsigned long long) end - 1,
+               hotpluggable ? " hotplug" : "");
 
        return 0;
 out_err_bad_srat:
index 94919e307f8e97c52d4cd69cee64cf7b0eb68584..db6b1ab43255f0179ae6d539197828ce6834c96f 100644 (file)
@@ -210,6 +210,8 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
                r = &dev->resource[idx];
                if (!r->flags)
                        continue;
+               if (r->parent)  /* Already allocated */
+                       continue;
                if (!r->start || pci_claim_resource(dev, idx) < 0) {
                        /*
                         * Something is wrong with the region.
@@ -318,6 +320,8 @@ static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
        r = &dev->resource[PCI_ROM_RESOURCE];
        if (!r->flags || !r->start)
                return;
+       if (r->parent) /* Already allocated */
+               return;
 
        if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
                r->end -= r->start;
index 082e88129712b4eb9e2027852c890a02ff31a1c7..5596c7bdd327b1af38138a3d32be36be3e21cb17 100644 (file)
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
        if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
                return -ENODEV;
 
-       if (start > end)
+       if (start > end || !addr)
                return -EINVAL;
 
        mutex_lock(&pci_mmcfg_lock);
@@ -716,11 +716,6 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
                return -EEXIST;
        }
 
-       if (!addr) {
-               mutex_unlock(&pci_mmcfg_lock);
-               return -EINVAL;
-       }
-
        rc = -EBUSY;
        cfg = pci_mmconfig_alloc(seg, start, end, addr);
        if (cfg == NULL) {
index 6eb18c42a28a3584546e87a57f9cccf5af3473e3..903fded507869b45cc7b304e9b9bb0169db7b00a 100644 (file)
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/smp.h>
 
-#include <asm/acpi.h>
 #include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/smp.h>
 #include <asm/pci_x86.h>
 #include <asm/hw_irq.h>
 #include <asm/io_apic.h>
@@ -43,7 +43,7 @@
 #define PCI_FIXED_BAR_4_SIZE   0x14
 #define PCI_FIXED_BAR_5_SIZE   0x1c
 
-static int pci_soc_mode = 0;
+static int pci_soc_mode;
 
 /**
  * fixed_bar_cap - return the offset of the fixed BAR cap if found
@@ -141,7 +141,8 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
  */
 static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
 {
-       /* This is a workaround for A0 LNC bug where PCI status register does
+       /*
+        * This is a workaround for A0 LNC bug where PCI status register does
         * not have new CAP bit set. can not be written by SW either.
         *
         * PCI header type in real LNC indicates a single function device, this
@@ -154,7 +155,7 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
                                || devfn == PCI_DEVFN(0, 0)
                                || devfn == PCI_DEVFN(3, 0)))
                return 1;
-       return 0; /* langwell on others */
+       return 0; /* Langwell on others */
 }
 
 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
@@ -172,7 +173,8 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 {
        int offset;
 
-       /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read
+       /*
+        * On MRST, there is no PCI ROM BAR, this will cause a subsequent read
         * to ROM BAR return 0 then being ignored.
         */
        if (where == PCI_ROM_ADDRESS)
@@ -210,7 +212,8 @@ static int mrst_pci_irq_enable(struct pci_dev *dev)
 
        pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
 
-       /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
+       /*
+        * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
         * IOAPIC RTE entries, so we just enable RTE for the device.
         */
        irq_attr.ioapic = mp_find_ioapic(dev->irq);
@@ -235,7 +238,7 @@ struct pci_ops pci_mrst_ops = {
  */
 int __init pci_mrst_init(void)
 {
-       printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
+       pr_info("Intel MID platform detected, using MID PCI ops\n");
        pci_mmcfg_late_init();
        pcibios_enable_irq = mrst_pci_irq_enable;
        pci_root_ops = pci_mrst_ops;
@@ -244,17 +247,21 @@ int __init pci_mrst_init(void)
        return 1;
 }
 
-/* Langwell devices are not true pci devices, they are not subject to 10 ms
- * d3 to d0 delay required by pci spec.
+/*
+ * Langwell devices are not true PCI devices; they are not subject to 10 ms
+ * d3 to d0 delay required by PCI spec.
  */
 static void pci_d3delay_fixup(struct pci_dev *dev)
 {
-       /* PCI fixups are effectively decided compile time. If we have a dual
-          SoC/non-SoC kernel we don't want to mangle d3 on non SoC devices */
-        if (!pci_soc_mode)
-            return;
-       /* true pci devices in lincroft should allow type 1 access, the rest
-        * are langwell fake pci devices.
+       /*
+        * PCI fixups are effectively decided compile time. If we have a dual
+        * SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices.
+        */
+       if (!pci_soc_mode)
+               return;
+       /*
+        * True PCI devices in Lincroft should allow type 1 access, the rest
+        * are Langwell fake PCI devices.
         */
        if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
                return;
index 6bb49b687c9791a379317343d6597d996fa72402..a623ee6d9455d4d7c8e9f3b9b57452b39062f86c 100644 (file)
@@ -11,8 +11,7 @@ Elf32_Half elf_core_extra_phdrs(void)
        return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
 }
 
-int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-                              unsigned long limit)
+int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
        if ( vsyscall_ehdr ) {
                const struct elfhdr *const ehdrp =
@@ -32,19 +31,16 @@ int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
                                phdr.p_offset += ofs;
                        }
                        phdr.p_paddr = 0; /* match other core phdrs */
-                       *size += sizeof(phdr);
-                       if (*size > limit
-                           || !dump_write(file, &phdr, sizeof(phdr)))
+                       if (!dump_emit(cprm, &phdr, sizeof(phdr)))
                                return 0;
                }
        }
        return 1;
 }
 
-int elf_core_write_extra_data(struct file *file, size_t *size,
-                             unsigned long limit)
+int elf_core_write_extra_data(struct coredump_params *cprm)
 {
-       if ( vsyscall_ehdr ) {
+       if (vsyscall_ehdr) {
                const struct elfhdr *const ehdrp =
                        (struct elfhdr *) vsyscall_ehdr;
                const struct elf_phdr *const phdrp =
@@ -56,9 +52,7 @@ int elf_core_write_extra_data(struct file *file, size_t *size,
                                void *addr = (void *) phdrp[i].p_vaddr;
                                size_t filesz = phdrp[i].p_filesz;
 
-                               *size += filesz;
-                               if (*size > limit
-                                   || !dump_write(file, addr, filesz))
+                               if (!dump_emit(cprm, addr, filesz))
                                        return 0;
                        }
                }
index 42a8bba0b0ead4235add5133b7e849187641da53..101012bc1ff6d4d99ce408ad8614d1f6f3e2322e 100644 (file)
@@ -170,8 +170,7 @@ static int __init parse_tag_fdt(const bp_tag_t *tag)
 
 __tagtable(BP_TAG_FDT, parse_tag_fdt);
 
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
-               unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 {
        initrd_start = (void *)__va(start);
        initrd_end = (void *)__va(end);
index 290792a13e3cc184f0ea67176589050e6323b675..e90c7c164c83b8dc58f36393bb5748c0110e6184 100644 (file)
@@ -437,10 +437,10 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
        return &blkg->rl;
 }
 
-static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
-                            u64 val)
+static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+                            struct cftype *cftype, u64 val)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+       struct blkcg *blkcg = css_to_blkcg(css);
        struct blkcg_gq *blkg;
        int i;
 
@@ -614,15 +614,13 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
 {
        struct blkcg_policy *pol = blkcg_policy[pd->plid];
        struct blkcg_gq *pos_blkg;
-       struct cgroup *pos_cgrp;
-       u64 sum;
+       struct cgroup_subsys_state *pos_css;
+       u64 sum = 0;
 
        lockdep_assert_held(pd->blkg->q->queue_lock);
 
-       sum = blkg_stat_read((void *)pd + off);
-
        rcu_read_lock();
-       blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+       blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
                struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
                struct blkg_stat *stat = (void *)pos_pd + off;
 
@@ -649,16 +647,14 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
 {
        struct blkcg_policy *pol = blkcg_policy[pd->plid];
        struct blkcg_gq *pos_blkg;
-       struct cgroup *pos_cgrp;
-       struct blkg_rwstat sum;
+       struct cgroup_subsys_state *pos_css;
+       struct blkg_rwstat sum = { };
        int i;
 
        lockdep_assert_held(pd->blkg->q->queue_lock);
 
-       sum = blkg_rwstat_read((void *)pd + off);
-
        rcu_read_lock();
-       blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+       blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
                struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
                struct blkg_rwstat *rwstat = (void *)pos_pd + off;
                struct blkg_rwstat tmp;
@@ -765,18 +761,18 @@ struct cftype blkcg_files[] = {
 
 /**
  * blkcg_css_offline - cgroup css_offline callback
- * @cgroup: cgroup of interest
+ * @css: css of interest
  *
- * This function is called when @cgroup is about to go away and responsible
- * for shooting down all blkgs associated with @cgroup.  blkgs should be
+ * This function is called when @css is about to go away and responsible
+ * for shooting down all blkgs associated with @css.  blkgs should be
  * removed while holding both q and blkcg locks.  As blkcg lock is nested
  * inside q lock, this function performs reverse double lock dancing.
  *
  * This is the blkcg counterpart of ioc_release_fn().
  */
-static void blkcg_css_offline(struct cgroup *cgroup)
+static void blkcg_css_offline(struct cgroup_subsys_state *css)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+       struct blkcg *blkcg = css_to_blkcg(css);
 
        spin_lock_irq(&blkcg->lock);
 
@@ -798,21 +794,21 @@ static void blkcg_css_offline(struct cgroup *cgroup)
        spin_unlock_irq(&blkcg->lock);
 }
 
-static void blkcg_css_free(struct cgroup *cgroup)
+static void blkcg_css_free(struct cgroup_subsys_state *css)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+       struct blkcg *blkcg = css_to_blkcg(css);
 
        if (blkcg != &blkcg_root)
                kfree(blkcg);
 }
 
-static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        static atomic64_t id_seq = ATOMIC64_INIT(0);
        struct blkcg *blkcg;
-       struct cgroup *parent = cgroup->parent;
 
-       if (!parent) {
+       if (!parent_css) {
                blkcg = &blkcg_root;
                goto done;
        }
@@ -883,14 +879,15 @@ void blkcg_exit_queue(struct request_queue *q)
  * of the main cic data structures.  For now we allow a task to change
  * its cgroup only if it's the only owner of its ioc.
  */
-static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup_subsys_state *css,
+                           struct cgroup_taskset *tset)
 {
        struct task_struct *task;
        struct io_context *ioc;
        int ret = 0;
 
        /* task_lock() is needed to avoid races with exit_io_context() */
-       cgroup_taskset_for_each(task, cgrp, tset) {
+       cgroup_taskset_for_each(task, css, tset) {
                task_lock(task);
                ioc = task->io_context;
                if (ioc && atomic_read(&ioc->nr_tasks) > 1)
@@ -1127,7 +1124,7 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
 
        /* kill the intf files first */
        if (pol->cftypes)
-               cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
+               cgroup_rm_cftypes(pol->cftypes);
 
        /* unregister and update blkgs */
        blkcg_policy[pol->plid] = NULL;
index 8056c03a3382f595212530f6bd1c4dd1a5282ac8..ae6969a7ffd4aa9bebd9373c13a73a503e3b4042 100644 (file)
@@ -179,22 +179,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
 
 
-static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
+static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
 {
-       return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
-                           struct blkcg, css);
+       return css ? container_of(css, struct blkcg, css) : NULL;
 }
 
 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
 {
-       return container_of(task_subsys_state(tsk, blkio_subsys_id),
-                           struct blkcg, css);
+       return css_to_blkcg(task_css(tsk, blkio_subsys_id));
 }
 
 static inline struct blkcg *bio_blkcg(struct bio *bio)
 {
        if (bio && bio->bi_css)
-               return container_of(bio->bi_css, struct blkcg, css);
+               return css_to_blkcg(bio->bi_css);
        return task_blkcg(current);
 }
 
@@ -206,9 +204,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
  */
 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
 {
-       struct cgroup *pcg = blkcg->css.cgroup->parent;
-
-       return pcg ? cgroup_to_blkcg(pcg) : NULL;
+       return css_to_blkcg(css_parent(&blkcg->css));
 }
 
 /**
@@ -288,32 +284,33 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
 /**
  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
  * @d_blkg: loop cursor pointing to the current descendant
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
  * @p_blkg: target blkg to walk descendants of
  *
  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
  * read locked.  If called under either blkcg or queue lock, the iteration
  * is guaranteed to include all and only online blkgs.  The caller may
- * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip
- * subtree.
+ * update @pos_css by calling css_rightmost_descendant() to skip subtree.
+ * @p_blkg is included in the iteration and the first node to be visited.
  */
-#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg)         \
-       cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
-               if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
+#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)          \
+       css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)   \
+               if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
                                              (p_blkg)->q, false)))
 
 /**
  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
  * @d_blkg: loop cursor pointing to the current descendant
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
  * @p_blkg: target blkg to walk descendants of
  *
  * Similar to blkg_for_each_descendant_pre() but performs post-order
- * traversal instead.  Synchronization rules are the same.
+ * traversal instead.  Synchronization rules are the same.  @p_blkg is
+ * included in the iteration and the last node to be visited.
  */
-#define blkg_for_each_descendant_post(d_blkg, pos_cgrp, p_blkg)                \
-       cgroup_for_each_descendant_post((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
-               if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
+#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)         \
+       css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)  \
+               if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
                                              (p_blkg)->q, false)))
 
 /**
@@ -576,7 +573,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
 static inline void blkcg_deactivate_policy(struct request_queue *q,
                                           const struct blkcg_policy *pol) { }
 
-static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
 
 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
index 08a32dfd3844cfeeacae4d38164c96f05f1b0c83..8331aba9426f2c75ffb29ff65a3b89effa6b5262 100644 (file)
@@ -1293,10 +1293,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 }
 
-static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
-                              struct seq_file *sf)
+static int tg_print_cpu_rwstat(struct cgroup_subsys_state *css,
+                              struct cftype *cft, struct seq_file *sf)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
 
        blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
                          cft->private, true);
@@ -1325,31 +1325,31 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
        return __blkg_prfill_u64(sf, pd, v);
 }
 
-static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
-                            struct seq_file *sf)
+static int tg_print_conf_u64(struct cgroup_subsys_state *css,
+                            struct cftype *cft, struct seq_file *sf)
 {
-       blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+       blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_u64,
                          &blkcg_policy_throtl, cft->private, false);
        return 0;
 }
 
-static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
-                             struct seq_file *sf)
+static int tg_print_conf_uint(struct cgroup_subsys_state *css,
+                             struct cftype *cft, struct seq_file *sf)
 {
-       blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+       blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_uint,
                          &blkcg_policy_throtl, cft->private, false);
        return 0;
 }
 
-static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
-                      bool is_u64)
+static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
+                      const char *buf, bool is_u64)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
        struct blkg_conf_ctx ctx;
        struct throtl_grp *tg;
        struct throtl_service_queue *sq;
        struct blkcg_gq *blkg;
-       struct cgroup *pos_cgrp;
+       struct cgroup_subsys_state *pos_css;
        int ret;
 
        ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
@@ -1379,8 +1379,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
         * restrictions in the whole hierarchy and allows them to bypass
         * blk-throttle.
         */
-       tg_update_has_rules(tg);
-       blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg)
+       blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
                tg_update_has_rules(blkg_to_tg(blkg));
 
        /*
@@ -1403,16 +1402,16 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
        return 0;
 }
 
-static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
                           const char *buf)
 {
-       return tg_set_conf(cgrp, cft, buf, true);
+       return tg_set_conf(css, cft, buf, true);
 }
 
-static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
                            const char *buf)
 {
-       return tg_set_conf(cgrp, cft, buf, false);
+       return tg_set_conf(css, cft, buf, false);
 }
 
 static struct cftype throtl_files[] = {
@@ -1623,7 +1622,7 @@ void blk_throtl_drain(struct request_queue *q)
 {
        struct throtl_data *td = q->td;
        struct blkcg_gq *blkg;
-       struct cgroup *pos_cgrp;
+       struct cgroup_subsys_state *pos_css;
        struct bio *bio;
        int rw;
 
@@ -1636,11 +1635,9 @@ void blk_throtl_drain(struct request_queue *q)
         * better to walk service_queue tree directly but blkg walk is
         * easier.
         */
-       blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg)
+       blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
                tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
 
-       tg_drain_bios(&td_root_tg(td)->service_queue);
-
        /* finally, transfer bios from top-level tg's into the td */
        tg_drain_bios(&td->service_queue);
 
index d5bbdcfd0dab5f56fa76f9ca08dc979fe16d1a35..dabb9d02cf9a509655f67ae169ac0c3a706d8f8e 100644 (file)
@@ -1607,12 +1607,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf,
        return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
 }
 
-static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
-                                   struct seq_file *sf)
+static int cfqg_print_weight_device(struct cgroup_subsys_state *css,
+                                   struct cftype *cft, struct seq_file *sf)
 {
-       blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
-                         cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
-                         false);
+       blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device,
+                         &blkcg_policy_cfq, 0, false);
        return 0;
 }
 
@@ -1626,35 +1625,34 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
        return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
 }
 
-static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
+static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css,
                                         struct cftype *cft,
                                         struct seq_file *sf)
 {
-       blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
-                         cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
-                         false);
+       blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device,
+                         &blkcg_policy_cfq, 0, false);
        return 0;
 }
 
-static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
+static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft,
                            struct seq_file *sf)
 {
-       seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
+       seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight);
        return 0;
 }
 
-static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
-                                struct seq_file *sf)
+static int cfq_print_leaf_weight(struct cgroup_subsys_state *css,
+                                struct cftype *cft, struct seq_file *sf)
 {
-       seq_printf(sf, "%u\n",
-                  cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
+       seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight);
        return 0;
 }
 
-static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
-                                   const char *buf, bool is_leaf_weight)
+static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
+                                   struct cftype *cft, const char *buf,
+                                   bool is_leaf_weight)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
        struct blkg_conf_ctx ctx;
        struct cfq_group *cfqg;
        int ret;
@@ -1680,22 +1678,22 @@ static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
        return ret;
 }
 
-static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
-                                 const char *buf)
+static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
+                                 struct cftype *cft, const char *buf)
 {
-       return __cfqg_set_weight_device(cgrp, cft, buf, false);
+       return __cfqg_set_weight_device(css, cft, buf, false);
 }
 
-static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
-                                      const char *buf)
+static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
+                                      struct cftype *cft, const char *buf)
 {
-       return __cfqg_set_weight_device(cgrp, cft, buf, true);
+       return __cfqg_set_weight_device(css, cft, buf, true);
 }
 
-static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
-                           bool is_leaf_weight)
+static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
+                           u64 val, bool is_leaf_weight)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
        struct blkcg_gq *blkg;
 
        if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
@@ -1727,30 +1725,32 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
        return 0;
 }
 
-static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
+                         u64 val)
 {
-       return __cfq_set_weight(cgrp, cft, val, false);
+       return __cfq_set_weight(css, cft, val, false);
 }
 
-static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
+                              struct cftype *cft, u64 val)
 {
-       return __cfq_set_weight(cgrp, cft, val, true);
+       return __cfq_set_weight(css, cft, val, true);
 }
 
-static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
+static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft,
                           struct seq_file *sf)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
 
        blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
                          cft->private, false);
        return 0;
 }
 
-static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
-                            struct seq_file *sf)
+static int cfqg_print_rwstat(struct cgroup_subsys_state *css,
+                            struct cftype *cft, struct seq_file *sf)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
 
        blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
                          cft->private, true);
@@ -1773,20 +1773,20 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
        return __blkg_prfill_rwstat(sf, pd, &sum);
 }
 
-static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
-                                    struct seq_file *sf)
+static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css,
+                                    struct cftype *cft, struct seq_file *sf)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
 
        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
                          &blkcg_policy_cfq, cft->private, false);
        return 0;
 }
 
-static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
-                                      struct seq_file *sf)
+static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css,
+                                      struct cftype *cft, struct seq_file *sf)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
 
        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
                          &blkcg_policy_cfq, cft->private, true);
@@ -1810,10 +1810,10 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
 }
 
 /* print avg_queue_size */
-static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
-                                    struct seq_file *sf)
+static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css,
+                                    struct cftype *cft, struct seq_file *sf)
 {
-       struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+       struct blkcg *blkcg = css_to_blkcg(css);
 
        blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
                          &blkcg_policy_cfq, 0, false);
index 47f2e5c717591847ed598db32b209bd6f1bbd254..fd0d6b454975c4dba37c5d4cbaff179110351431 100644 (file)
@@ -62,7 +62,7 @@ static inline u8 byte(const u32 x, const unsigned n)
 
 static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
 
-const u32 crypto_ft_tab[4][256] = {
+__visible const u32 crypto_ft_tab[4][256] = {
        {
                0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
                0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
@@ -326,7 +326,7 @@ const u32 crypto_ft_tab[4][256] = {
        }
 };
 
-const u32 crypto_fl_tab[4][256] = {
+__visible const u32 crypto_fl_tab[4][256] = {
        {
                0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
                0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
@@ -590,7 +590,7 @@ const u32 crypto_fl_tab[4][256] = {
        }
 };
 
-const u32 crypto_it_tab[4][256] = {
+__visible const u32 crypto_it_tab[4][256] = {
        {
                0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
                0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
@@ -854,7 +854,7 @@ const u32 crypto_it_tab[4][256] = {
        }
 };
 
-const u32 crypto_il_tab[4][256] = {
+__visible const u32 crypto_il_tab[4][256] = {
        {
                0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
                0x00000030, 0x00000036, 0x000000a5, 0x00000038,
index 3b6180336d3d54b9011bb8f90d15a220cf06b642..320ea4d8a0f516422942273c7b36fa50d34bdbc7 100644 (file)
@@ -391,7 +391,7 @@ EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
  *     @mask: Mask for type comparison
  *
  *     This function should not be used by new algorithm types.
- *     Plesae use crypto_alloc_tfm instead.
+ *     Please use crypto_alloc_tfm instead.
  *
  *     crypto_alloc_base() will first attempt to locate an already loaded
  *     algorithm.  If that fails and the kernel supports dynamically loadable
index 75efa20523053661e35fe209feaab50b418dc364..26bcd7a2d6b473a2b2d1b12edd51b271705a46f0 100644 (file)
@@ -388,8 +388,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
        /* round 6 */
        subL[7] ^= subL[1]; subR[7] ^= subR[1];
        subL[1] ^= subR[1] & ~subR[9];
-       dw = subL[1] & subL[9],
-               subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
+       dw = subL[1] & subL[9];
+       subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
        /* round 8 */
        subL[11] ^= subL[1]; subR[11] ^= subR[1];
        /* round 10 */
@@ -397,8 +397,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
        /* round 12 */
        subL[15] ^= subL[1]; subR[15] ^= subR[1];
        subL[1] ^= subR[1] & ~subR[17];
-       dw = subL[1] & subL[17],
-               subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
+       dw = subL[1] & subL[17];
+       subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
        /* round 14 */
        subL[19] ^= subL[1]; subR[19] ^= subR[1];
        /* round 16 */
@@ -413,8 +413,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
                kw4l = subL[25]; kw4r = subR[25];
        } else {
                subL[1] ^= subR[1] & ~subR[25];
-               dw = subL[1] & subL[25],
-                       subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
+               dw = subL[1] & subL[25];
+               subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
                /* round 20 */
                subL[27] ^= subL[1]; subR[27] ^= subR[1];
                /* round 22 */
@@ -433,8 +433,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
                /* round 19 */
                subL[26] ^= kw4l; subR[26] ^= kw4r;
                kw4l ^= kw4r & ~subR[24];
-               dw = kw4l & subL[24],
-                       kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
+               dw = kw4l & subL[24];
+               kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
        }
        /* round 17 */
        subL[22] ^= kw4l; subR[22] ^= kw4r;
@@ -443,8 +443,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
        /* round 13 */
        subL[18] ^= kw4l; subR[18] ^= kw4r;
        kw4l ^= kw4r & ~subR[16];
-       dw = kw4l & subL[16],
-               kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
+       dw = kw4l & subL[16];
+       kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
        /* round 11 */
        subL[14] ^= kw4l; subR[14] ^= kw4r;
        /* round 9 */
@@ -452,8 +452,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
        /* round 7 */
        subL[10] ^= kw4l; subR[10] ^= kw4r;
        kw4l ^= kw4r & ~subR[8];
-       dw = kw4l & subL[8],
-               kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
+       dw = kw4l & subL[8];
+       kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
        /* round 5 */
        subL[6] ^= kw4l; subR[6] ^= kw4r;
        /* round 3 */
@@ -477,8 +477,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
        SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */
        SUBKEY_R(6) = subR[5] ^ subR[7];
        tl = subL[10] ^ (subR[10] & ~subR[8]);
-       dw = tl & subL[8],  /* FL(kl1) */
-               tr = subR[10] ^ rol32(dw, 1);
+       dw = tl & subL[8];  /* FL(kl1) */
+       tr = subR[10] ^ rol32(dw, 1);
        SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
        SUBKEY_R(7) = subR[6] ^ tr;
        SUBKEY_L(8) = subL[8];       /* FL(kl1) */
@@ -486,8 +486,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
        SUBKEY_L(9) = subL[9];       /* FLinv(kl2) */
        SUBKEY_R(9) = subR[9];
        tl = subL[7] ^ (subR[7] & ~subR[9]);
-       dw = tl & subL[9],  /* FLinv(kl2) */
-               tr = subR[7] ^ rol32(dw, 1);
+       dw = tl & subL[9];  /* FLinv(kl2) */
+       tr = subR[7] ^ rol32(dw, 1);
        SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
        SUBKEY_R(10) = tr ^ subR[11];
        SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
@@ -499,8 +499,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
        SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */
        SUBKEY_R(14) = subR[13] ^ subR[15];
        tl = subL[18] ^ (subR[18] & ~subR[16]);
-       dw = tl & subL[16], /* FL(kl3) */
-               tr = subR[18] ^ rol32(dw, 1);
+       dw = tl & subL[16]; /* FL(kl3) */
+       tr = subR[18] ^ rol32(dw, 1);
        SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
        SUBKEY_R(15) = subR[14] ^ tr;
        SUBKEY_L(16) = subL[16];     /* FL(kl3) */
@@ -508,8 +508,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
        SUBKEY_L(17) = subL[17];     /* FLinv(kl4) */
        SUBKEY_R(17) = subR[17];
        tl = subL[15] ^ (subR[15] & ~subR[17]);
-       dw = tl & subL[17], /* FLinv(kl4) */
-               tr = subR[15] ^ rol32(dw, 1);
+       dw = tl & subL[17]; /* FLinv(kl4) */
+       tr = subR[15] ^ rol32(dw, 1);
        SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
        SUBKEY_R(18) = tr ^ subR[19];
        SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
@@ -527,8 +527,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
                SUBKEY_R(24) = subR[24] ^ subR[23];
        } else {
                tl = subL[26] ^ (subR[26] & ~subR[24]);
-               dw = tl & subL[24], /* FL(kl5) */
-                       tr = subR[26] ^ rol32(dw, 1);
+               dw = tl & subL[24]; /* FL(kl5) */
+               tr = subR[26] ^ rol32(dw, 1);
                SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
                SUBKEY_R(23) = subR[22] ^ tr;
                SUBKEY_L(24) = subL[24];     /* FL(kl5) */
@@ -536,8 +536,8 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
                SUBKEY_L(25) = subL[25];     /* FLinv(kl6) */
                SUBKEY_R(25) = subR[25];
                tl = subL[23] ^ (subR[23] & ~subR[25]);
-               dw = tl & subL[25], /* FLinv(kl6) */
-                       tr = subR[23] ^ rol32(dw, 1);
+               dw = tl & subL[25]; /* FLinv(kl6) */
+               tr = subR[23] ^ rol32(dw, 1);
                SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
                SUBKEY_R(26) = tr ^ subR[27];
                SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
index a15f523d5f56bc4195f4876164afd8e0d44ab58c..117dd8250f27b604d9cf1b4d6a95efa1e38dfedc 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/module.h>
 #include <crypto/cast_common.h>
 
-const u32 cast_s1[256] = {
+__visible const u32 cast_s1[256] = {
        0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
        0x9c004dd3, 0x6003e540, 0xcf9fc949,
        0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
@@ -83,7 +83,7 @@ const u32 cast_s1[256] = {
 };
 EXPORT_SYMBOL_GPL(cast_s1);
 
-const u32 cast_s2[256] = {
+__visible const u32 cast_s2[256] = {
        0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
        0xeec5207a, 0x55889c94, 0x72fc0651,
        0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
@@ -151,7 +151,7 @@ const u32 cast_s2[256] = {
 };
 EXPORT_SYMBOL_GPL(cast_s2);
 
-const u32 cast_s3[256] = {
+__visible const u32 cast_s3[256] = {
        0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
        0x369fe44b, 0x8c1fc644, 0xaececa90,
        0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
@@ -219,7 +219,7 @@ const u32 cast_s3[256] = {
 };
 EXPORT_SYMBOL_GPL(cast_s3);
 
-const u32 cast_s4[256] = {
+__visible const u32 cast_s4[256] = {
        0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
        0x64ad8c57, 0x85510443, 0xfa020ed1,
        0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
index 7281b8a93ad3a4c486f3ca18721e73f79094659e..79ca2278c2a38a6c022d7d3ccbae30981434e238 100644 (file)
@@ -124,3 +124,25 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
        scatterwalk_done(&walk, out, 0);
 }
 EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
+
+int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
+{
+       int offset = 0, n = 0;
+
+       /* num_bytes is too small */
+       if (num_bytes < sg->length)
+               return -1;
+
+       do {
+               offset += sg->length;
+               n++;
+               sg = scatterwalk_sg_next(sg);
+
+               /* num_bytes is too large */
+               if (unlikely(!sg && (num_bytes < offset)))
+                       return -1;
+       } while (sg && (num_bytes > offset));
+
+       return n;
+}
+EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen);
index ecddf921a9db401ee30bea1bf24b30f0ee8dd85d..b7bc2e70895a2ba3ca628e5854128092b6b20e38 100644 (file)
@@ -3224,7 +3224,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
        if (i >= 0)
                rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
                                             type, mask);
-       if (j >= 0)
+       if (j >= 0 && j != i)
                rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
                                             type, mask);
 
index 100bd724f64828e3c42f4e664ced388f346f01e4..3278a210c435320df0e98a9dacbab2892b180176 100644 (file)
@@ -91,24 +91,6 @@ config ACPI_EC_DEBUGFS
          Thus this option is a debug option that helps to write ACPI drivers
          and can be used to identify ACPI code or EC firmware bugs.
 
-config ACPI_PROC_EVENT
-       bool "Deprecated /proc/acpi/event support"
-       depends on PROC_FS
-       default y
-       help
-         A user-space daemon, acpid, typically reads /proc/acpi/event
-         and handles all ACPI-generated events.
-
-         These events are now delivered to user-space either
-         via the input layer or as netlink events.
-
-         This build option enables the old code for legacy
-         user-space implementation.  After some time, this will
-         be moved under CONFIG_ACPI_PROCFS, and then deleted.
-
-         Say Y here to retain the old behaviour.  Say N if your
-         user-space is newer than kernel 2.6.23 (September 2007).
-
 config ACPI_AC
        tristate "AC Adapter"
        depends on X86
index 4f4e741d34b2c9616808177c31e7b488ee00ed8e..f37beaa32750106dffdf9fd7aeac764f1f0094f2 100644 (file)
@@ -267,7 +267,6 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
                        msleep(ac_sleep_before_get_state_ms);
 
                acpi_ac_get_state(ac);
-               acpi_bus_generate_proc_event(device, event, (u32) ac->state);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), event,
                                                  (u32) ac->state);
index 27bb6a91de5f6cf257fd85562fb12bcc033baecc..6230637054c6ccd10c7fd95b999684c6b9752616 100644 (file)
@@ -452,7 +452,6 @@ static void acpi_pad_notify(acpi_handle handle, u32 event,
        switch (event) {
        case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
                acpi_pad_handle_notify(handle);
-               acpi_bus_generate_proc_event(device, event, 0);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                        dev_name(&device->dev), event, 0);
                break;
index fafec5ddf17f17c326181988cf3f7d3d625c442f..1bde12708f9e112c708bb71ae8d6162af5e367a9 100644 (file)
@@ -52,7 +52,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
        struct platform_device_info pdevinfo;
        struct resource_list_entry *rentry;
        struct list_head resource_list;
-       struct resource *resources;
+       struct resource *resources = NULL;
        int count;
 
        /* If the ACPI node already has a physical device attached, skip it. */
@@ -61,20 +61,22 @@ int acpi_create_platform_device(struct acpi_device *adev,
 
        INIT_LIST_HEAD(&resource_list);
        count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
-       if (count <= 0)
+       if (count < 0) {
                return 0;
+       } else if (count > 0) {
+               resources = kmalloc(count * sizeof(struct resource),
+                                   GFP_KERNEL);
+               if (!resources) {
+                       dev_err(&adev->dev, "No memory for resources\n");
+                       acpi_dev_free_resource_list(&resource_list);
+                       return -ENOMEM;
+               }
+               count = 0;
+               list_for_each_entry(rentry, &resource_list, node)
+                       resources[count++] = rentry->res;
 
-       resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL);
-       if (!resources) {
-               dev_err(&adev->dev, "No memory for resources\n");
                acpi_dev_free_resource_list(&resource_list);
-               return -ENOMEM;
        }
-       count = 0;
-       list_for_each_entry(rentry, &resource_list, node)
-               resources[count++] = rentry->res;
-
-       acpi_dev_free_resource_list(&resource_list);
 
        memset(&pdevinfo, 0, sizeof(pdevinfo));
        /*
index 5a74a9c1e42c85a6999f9a4ef780728f8bdc5bcb..f29e06efa47976eba16e3b4e449a8b5a9235909f 100644 (file)
@@ -178,14 +178,17 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
                return -ENODEV;
 
+       cpu_maps_update_begin();
+       cpu_hotplug_begin();
+
        ret = acpi_map_lsapic(pr->handle, &pr->id);
        if (ret)
-               return ret;
+               goto out;
 
        ret = arch_register_cpu(pr->id);
        if (ret) {
                acpi_unmap_lsapic(pr->id);
-               return ret;
+               goto out;
        }
 
        /*
@@ -195,7 +198,11 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
         */
        pr_info("CPU%d has been hot-added\n", pr->id);
        pr->flags.need_hotplug_init = 1;
-       return 0;
+
+out:
+       cpu_hotplug_done();
+       cpu_maps_update_done();
+       return ret;
 }
 #else
 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
@@ -452,11 +459,15 @@ static void acpi_processor_remove(struct acpi_device *device)
        per_cpu(processor_device_array, pr->id) = NULL;
        per_cpu(processors, pr->id) = NULL;
 
+       cpu_maps_update_begin();
+       cpu_hotplug_begin();
+
        /* Remove the CPU. */
-       get_online_cpus();
        arch_unregister_cpu(pr->id);
        acpi_unmap_lsapic(pr->id);
-       put_online_cpus();
+
+       cpu_hotplug_done();
+       cpu_maps_update_done();
 
        try_offline_node(cpu_to_node(pr->id));
 
index b8d38117a20c9f5971b719bea11d90017e571d69..90e846f985fa30caa109c8de336ac8e677a29bee 100644 (file)
@@ -138,6 +138,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
  */
 u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_ssdt_table_load, FALSE);
 
+/*
+ * We keep track of the latest version of Windows that has been requested by
+ * the BIOS.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_osi_data, 0);
+
 /* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
 
 struct acpi_table_fadt acpi_gbl_FADT;
@@ -285,7 +291,6 @@ ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
 ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
 ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
 ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN u8 acpi_gbl_osi_data;
 ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
 ACPI_EXTERN struct acpi_address_range
     *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
index d4a4901637cd01f06e2646a145abdeaff16773ec..0ed00669cd217a0754087ed61bc89465e31804f3 100644 (file)
@@ -942,6 +942,9 @@ struct acpi_interface_info {
 
 #define ACPI_OSI_INVALID                0x01
 #define ACPI_OSI_DYNAMIC                0x02
+#define ACPI_OSI_FEATURE                0x04
+#define ACPI_OSI_DEFAULT_INVALID        0x08
+#define ACPI_OSI_OPTIONAL_FEATURE       (ACPI_OSI_FEATURE | ACPI_OSI_DEFAULT_INVALID | ACPI_OSI_INVALID)
 
 struct acpi_port_info {
        char *name;
@@ -1030,6 +1033,7 @@ struct acpi_external_list {
        u8 type;
        u8 flags;
        u8 resolved;
+       u8 emitted;
 };
 
 /* Values for Flags field above */
index b83dc32a5ae053eee5a935e30aaedaf0f7b6f3c3..40b04bd5579e36abec92165270c44f576e8fddc4 100644 (file)
@@ -104,8 +104,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
                       acpi_handle start_object,
                       u32 max_depth,
                       u32 flags,
-                      acpi_walk_callback pre_order_visit,
-                      acpi_walk_callback post_order_visit,
+                      acpi_walk_callback descending_callback,
+                      acpi_walk_callback ascending_callback,
                       void *context, void **return_value);
 
 struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
index 7755e915a007a9aec4ecefce08f2bfdceb7b2a7f..c54f42c64fe2ca78ae7d9269b96c69ad2461c552 100644 (file)
 
 acpi_status acpi_allocate_root_table(u32 initial_table_count);
 
+/*
+ * tbxfroot - Root pointer utilities
+ */
+acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
+
+u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length);
+
 /*
  * tbfadt - FADT parse/convert/validate
  */
index 3c76edea6803faf0cf8c7db1b76431b5abafbf04..d5a62a6182bb96826e5ee7144d8e7be228cd91ea 100644 (file)
@@ -470,6 +470,8 @@ acpi_status acpi_ut_install_interface(acpi_string interface_name);
 
 acpi_status acpi_ut_remove_interface(acpi_string interface_name);
 
+acpi_status acpi_ut_update_interfaces(u8 action);
+
 struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name);
 
 acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
@@ -616,7 +618,7 @@ int acpi_ut_stricmp(char *string1, char *string2);
 
 acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
 
-void acpi_ut_print_string(char *string, u8 max_length);
+void acpi_ut_print_string(char *string, u16 max_length);
 
 void ut_convert_backslashes(char *pathname);
 
index 9037f17c9608f70a6bc2df68eca5b55043ad9723..7842700346a48b0cfa542d46def472ba92643212 100644 (file)
@@ -125,7 +125,6 @@ acpi_status acpi_ev_gpe_initialize(void)
                /* GPE block 0 exists (has both length and address > 0) */
 
                register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
-
                gpe_number_max =
                    (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
 
@@ -204,16 +203,6 @@ acpi_status acpi_ev_gpe_initialize(void)
                goto cleanup;
        }
 
-       /* Check for Max GPE number out-of-range */
-
-       if (gpe_number_max > ACPI_GPE_MAX) {
-               ACPI_ERROR((AE_INFO,
-                           "Maximum GPE number from FADT is too large: 0x%X",
-                           gpe_number_max));
-               status = AE_BAD_VALUE;
-               goto cleanup;
-       }
-
       cleanup:
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
        return_ACPI_STATUS(AE_OK);
index c740f24e3101069af922a5e5842a77fb60d34710..4d046faac48cbb09b2c0aa6c4a0c5e8550333ec5 100644 (file)
@@ -338,6 +338,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
 {
        u8 *target;
        char *name;
+       const char *reference_name;
        u8 count;
 
        if (!info) {
@@ -426,10 +427,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
 
                case ACPI_EXD_REFERENCE:
 
+                       reference_name = acpi_ut_get_reference_name(obj_desc);
                        acpi_ex_out_string("Class Name",
-                                          ACPI_CAST_PTR(char,
-                                                        acpi_ut_get_reference_name
-                                                        (obj_desc)));
+                                          ACPI_CAST_PTR(char, reference_name));
                        acpi_ex_dump_reference_obj(obj_desc);
                        break;
 
index 814b4a3d656ad1dc3d9acf438258735569017656..2cdd41d8ade6f6c3ebef1d7a629eba1792aaeedc 100644 (file)
@@ -962,10 +962,17 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
                                         */
                                        return_desc =
                                            *(operand[0]->reference.where);
-                                       if (return_desc) {
-                                               acpi_ut_add_reference
-                                                   (return_desc);
+                                       if (!return_desc) {
+                                               /*
+                                                * Element is NULL, do not allow the dereference.
+                                                * This provides compatibility with other ACPI
+                                                * implementations.
+                                                */
+                                               return_ACPI_STATUS
+                                                   (AE_AML_UNINITIALIZED_ELEMENT);
                                        }
+
+                                       acpi_ut_add_reference(return_desc);
                                        break;
 
                                default:
@@ -990,11 +997,40 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
                                                                         acpi_namespace_node
                                                                         *)
                                                                        return_desc);
-                               }
+                                       if (!return_desc) {
+                                               break;
+                                       }
 
-                               /* Add another reference to the object! */
+                                       /*
+                                        * June 2013:
+                                        * buffer_fields/field_units require additional resolution
+                                        */
+                                       switch (return_desc->common.type) {
+                                       case ACPI_TYPE_BUFFER_FIELD:
+                                       case ACPI_TYPE_LOCAL_REGION_FIELD:
+                                       case ACPI_TYPE_LOCAL_BANK_FIELD:
+                                       case ACPI_TYPE_LOCAL_INDEX_FIELD:
 
-                               acpi_ut_add_reference(return_desc);
+                                               status =
+                                                   acpi_ex_read_data_from_field
+                                                   (walk_state, return_desc,
+                                                    &temp_desc);
+                                               if (ACPI_FAILURE(status)) {
+                                                       goto cleanup;
+                                               }
+
+                                               return_desc = temp_desc;
+                                               break;
+
+                                       default:
+
+                                               /* Add another reference to the object */
+
+                                               acpi_ut_add_reference
+                                                   (return_desc);
+                                               break;
+                                       }
+                               }
                                break;
 
                        default:
index 5e5f76230f5eb673057f86c7d61c24539471a1eb..414076818d40ee234d05970e5675023d1bff5b20 100644 (file)
@@ -43,6 +43,7 @@
  */
 
 #include <acpi/acpi.h>
+#include <linux/acpi.h>
 #include "accommon.h"
 
 #define _COMPONENT          ACPI_HARDWARE
@@ -128,6 +129,14 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
 
        ACPI_FLUSH_CPU_CACHE();
 
+       status = acpi_os_prepare_extended_sleep(sleep_state,
+                                               acpi_gbl_sleep_type_a,
+                                               acpi_gbl_sleep_type_b);
+       if (ACPI_SKIP(status))
+               return_ACPI_STATUS(AE_OK);
+       if (ACPI_FAILURE(status))
+               return_ACPI_STATUS(status);
+
        /*
         * Set the SLP_TYP and SLP_EN bits.
         *
index 0c1a8bbd05d6662949671d30ee1074da584da908..2d7d22ebc782151189a9d76a828cf9948a267603 100644 (file)
@@ -100,8 +100,13 @@ acpi_status acpi_get_timer(u32 * ticks)
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
-       status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
+       /* ACPI 5.0A: PM Timer is optional */
+
+       if (!acpi_gbl_FADT.xpm_timer_block.address) {
+               return_ACPI_STATUS(AE_SUPPORT);
+       }
 
+       status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
        return_ACPI_STATUS(status);
 }
 
@@ -148,6 +153,12 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
+       /* ACPI 5.0A: PM Timer is optional */
+
+       if (!acpi_gbl_FADT.xpm_timer_block.address) {
+               return_ACPI_STATUS(AE_SUPPORT);
+       }
+
        /*
         * Compute Tick Delta:
         * Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
index 24b71a01bf9398340c6c01f68c990ac7caa8c51f..098e7666cbc9450ecae2a92fa605425b3346b9c3 100644 (file)
@@ -150,6 +150,15 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node,
                goto exit;
        }
 
+       /*
+        *
+        * 4) If there is no return value and it is optional, just return
+        * AE_OK (_WAK).
+        */
+       if (!(*return_object_ptr)) {
+               goto exit;
+       }
+
        /*
         * For returned Package objects, check the type of all sub-objects.
         * Note: Package may have been newly created by call above.
@@ -268,7 +277,12 @@ acpi_ns_check_object_type(struct acpi_evaluate_info *info,
 
        acpi_ut_get_expected_return_types(type_buffer, expected_btypes);
 
-       if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
+       if (!return_object) {
+               ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+                                     info->node_flags,
+                                     "Expected return object of type %s",
+                                     type_buffer));
+       } else if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
                ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
                                      info->node_flags,
                                      "Return type mismatch - found %s, expected %s",
index e70911a9e441d3ded8c21cfa4c17a45c00ed936e..e81f15ef659a5c85dbf19e020b20a7ae75523dcd 100644 (file)
@@ -156,9 +156,9 @@ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
  *              max_depth           - Depth to which search is to reach
  *              flags               - Whether to unlock the NS before invoking
  *                                    the callback routine
- *              pre_order_visit     - Called during tree pre-order visit
+ *              descending_callback - Called during tree descent
  *                                    when an object of "Type" is found
- *              post_order_visit    - Called during tree post-order visit
+ *              ascending_callback  - Called during tree ascent
  *                                    when an object of "Type" is found
  *              context             - Passed to user function(s) above
  *              return_value        - from the user_function if terminated
@@ -185,8 +185,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
                       acpi_handle start_node,
                       u32 max_depth,
                       u32 flags,
-                      acpi_walk_callback pre_order_visit,
-                      acpi_walk_callback post_order_visit,
+                      acpi_walk_callback descending_callback,
+                      acpi_walk_callback ascending_callback,
                       void *context, void **return_value)
 {
        acpi_status status;
@@ -255,22 +255,22 @@ acpi_ns_walk_namespace(acpi_object_type type,
                        }
 
                        /*
-                        * Invoke the user function, either pre-order or post-order
+                        * Invoke the user function, either descending, ascending,
                         * or both.
                         */
                        if (!node_previously_visited) {
-                               if (pre_order_visit) {
+                               if (descending_callback) {
                                        status =
-                                           pre_order_visit(child_node, level,
-                                                           context,
-                                                           return_value);
+                                           descending_callback(child_node,
+                                                               level, context,
+                                                               return_value);
                                }
                        } else {
-                               if (post_order_visit) {
+                               if (ascending_callback) {
                                        status =
-                                           post_order_visit(child_node, level,
-                                                            context,
-                                                            return_value);
+                                           ascending_callback(child_node,
+                                                              level, context,
+                                                              return_value);
                                }
                        }
 
index f553cfdb71ddd6ec350b63077f6eda49f30daac9..b38b4b07f86e31d8920a7b1467ba4fb6604fc6eb 100644 (file)
@@ -533,9 +533,9 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
  * PARAMETERS:  type                - acpi_object_type to search for
  *              start_object        - Handle in namespace where search begins
  *              max_depth           - Depth to which search is to reach
- *              pre_order_visit     - Called during tree pre-order visit
+ *              descending_callback - Called during tree descent
  *                                    when an object of "Type" is found
- *              post_order_visit    - Called during tree post-order visit
+ *              ascending_callback  - Called during tree ascent
  *                                    when an object of "Type" is found
  *              context             - Passed to user function(s) above
  *              return_value        - Location where return value of
@@ -563,8 +563,8 @@ acpi_status
 acpi_walk_namespace(acpi_object_type type,
                    acpi_handle start_object,
                    u32 max_depth,
-                   acpi_walk_callback pre_order_visit,
-                   acpi_walk_callback post_order_visit,
+                   acpi_walk_callback descending_callback,
+                   acpi_walk_callback ascending_callback,
                    void *context, void **return_value)
 {
        acpi_status status;
@@ -574,7 +574,7 @@ acpi_walk_namespace(acpi_object_type type,
        /* Parameter validation */
 
        if ((type > ACPI_TYPE_LOCAL_MAX) ||
-           (!max_depth) || (!pre_order_visit && !post_order_visit)) {
+           (!max_depth) || (!descending_callback && !ascending_callback)) {
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
@@ -606,9 +606,9 @@ acpi_walk_namespace(acpi_object_type type,
        }
 
        status = acpi_ns_walk_namespace(type, start_object, max_depth,
-                                       ACPI_NS_WALK_UNLOCK, pre_order_visit,
-                                       post_order_visit, context,
-                                       return_value);
+                                       ACPI_NS_WALK_UNLOCK,
+                                       descending_callback, ascending_callback,
+                                       context, return_value);
 
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 
index f3a4d95899f71ea8578362fa35b5867423028dd2..83c164434580d3bd12229be0278d6e7e70e0e6b8 100644 (file)
@@ -158,6 +158,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
 {
        acpi_status status;
        struct acpi_namespace_node *node;
+       char *node_name;
 
        /* Parameter validation */
 
@@ -202,7 +203,8 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
 
        /* Just copy the ACPI name from the Node and zero terminate it */
 
-       ACPI_MOVE_NAME(buffer->pointer, acpi_ut_get_node_name(node));
+       node_name = acpi_ut_get_node_name(node);
+       ACPI_MOVE_NAME(buffer->pointer, node_name);
        ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
        status = AE_OK;
 
@@ -379,9 +381,14 @@ acpi_get_object_info(acpi_handle handle,
                 * Get extra info for ACPI Device/Processor objects only:
                 * Run the _STA, _ADR and, sx_w, and _sx_d methods.
                 *
-                * Note: none of these methods are required, so they may or may
+                * Notes: none of these methods are required, so they may or may
                 * not be present for this device. The Info->Valid bitfield is used
                 * to indicate which methods were found and run successfully.
+                *
+                * For _STA, if the method does not exist, then (as per the ACPI
+                * specification), the returned current_status flags will indicate
+                * that the device is present/functional/enabled. Otherwise, the
+                * current_status flags reflect the value returned from _STA.
                 */
 
                /* Execute the Device._STA method */
index 33b00d22300a80153e8c7d4c60f273f3364377c1..9d99f21896935873cd868f68a767f7f074370ec6 100644 (file)
@@ -117,7 +117,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
         ACPI_FADT_OFFSET(pm_timer_block),
         ACPI_FADT_OFFSET(pm_timer_length),
         ACPI_PM_TIMER_WIDTH,
-        ACPI_FADT_REQUIRED},
+        ACPI_FADT_SEPARATE_LENGTH},    /* ACPI 5.0A: Timer is optional */
 
        {"Gpe0Block",
         ACPI_FADT_OFFSET(xgpe0_block),
@@ -574,7 +574,7 @@ static void acpi_tb_validate_fadt(void)
 
                if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
                        /*
-                        * Field is required (Pm1a_event, Pm1a_control, pm_timer).
+                        * Field is required (Pm1a_event, Pm1a_control).
                         * Both the address and length must be non-zero.
                         */
                        if (!address64->address || !length) {
index 7c2ecfb7c2c37dbb448e7703d2689d994fe8931f..948c95e80d44765f41e82bb62c92bb4c3405bb01 100644 (file)
 #define _COMPONENT          ACPI_TABLES
 ACPI_MODULE_NAME("tbxfroot")
 
-/* Local prototypes */
-static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
-
-static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_tb_validate_rsdp
@@ -64,8 +59,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
  * DESCRIPTION: Validate the RSDP (ptr)
  *
  ******************************************************************************/
-
-static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
+acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
 {
 
        /*
@@ -74,7 +68,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
         * Note: Sometimes there exists more than one RSDP in memory; the valid
         * RSDP has a valid checksum, all others have an invalid checksum.
         */
-       if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP,
+       if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
                         sizeof(ACPI_SIG_RSDP) - 1) != 0) {
 
                /* Nope, BAD Signature */
@@ -231,7 +225,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
  * DESCRIPTION: Search a block of memory for the RSDP signature
  *
  ******************************************************************************/
-static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
+u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length)
 {
        acpi_status status;
        u8 *mem_rover;
index ee83adb97b1e73827a6a7740de03edb3680e2a28..4fd68971019bdd69bcdf5ede57e26abaea3bf09f 100644 (file)
@@ -239,7 +239,8 @@ acpi_ut_evaluate_numeric_object(char *object_name,
  * RETURN:      Status
  *
  * DESCRIPTION: Executes _STA for selected device and stores results in
- *              *Flags.
+ *              *Flags. If _STA does not exist, then the device is assumed
+ *              to be present/functional/enabled (as per the ACPI spec).
  *
  *              NOTE: Internal function, no parameter validation
  *
@@ -257,6 +258,11 @@ acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
                                         ACPI_BTYPE_INTEGER, &obj_desc);
        if (ACPI_FAILURE(status)) {
                if (AE_NOT_FOUND == status) {
+                       /*
+                        * if _STA does not exist, then (as per the ACPI specification),
+                        * the returned flags will indicate that the device is present,
+                        * functional, and enabled.
+                        */
                        ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
                                          "_STA on %4.4s was not found, assuming device is present\n",
                                          acpi_ut_get_node_name(device_node)));
index f736448a8606755e1400e197a02bc0674fd597b3..d6f26bf8a0626d0d7497baeaf1ce931d3a76c9ba 100644 (file)
@@ -336,7 +336,6 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_trace_dbg_layer = 0;
        acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
        acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
-       acpi_gbl_osi_data = 0;
        acpi_gbl_osi_mutex = NULL;
        acpi_gbl_reg_methods_executed = FALSE;
 
index 7e807725c636896aeeb83c3b35022eb2ad88848f..8856bd37bc763f2a94c157acb513aefa82342875 100644 (file)
@@ -77,21 +77,20 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
 
        /* Feature Group Strings */
 
-       {"Extended Address Space Descriptor", NULL, 0, 0}
+       {"Extended Address Space Descriptor", NULL, ACPI_OSI_FEATURE, 0},
 
        /*
         * All "optional" feature group strings (features that are implemented
-        * by the host) should be dynamically added by the host via
-        * acpi_install_interface and should not be manually added here.
-        *
-        * Examples of optional feature group strings:
-        *
-        * "Module Device"
-        * "Processor Device"
-        * "3.0 Thermal Model"
-        * "3.0 _SCP Extensions"
-        * "Processor Aggregator Device"
+        * by the host) should be dynamically modified to VALID by the host via
+        * acpi_install_interface or acpi_update_interfaces. Such optional feature
+        * group strings are set as INVALID by default here.
         */
+
+       {"Module Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
+       {"Processor Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
+       {"3.0 Thermal Model", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
+       {"3.0 _SCP Extensions", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
+       {"Processor Aggregator Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0}
 };
 
 /*******************************************************************************
@@ -158,11 +157,20 @@ acpi_status acpi_ut_interface_terminate(void)
        while (next_interface) {
                acpi_gbl_supported_interfaces = next_interface->next;
 
-               /* Only interfaces added at runtime can be freed */
-
                if (next_interface->flags & ACPI_OSI_DYNAMIC) {
+
+                       /* Only interfaces added at runtime can be freed */
+
                        ACPI_FREE(next_interface->name);
                        ACPI_FREE(next_interface);
+               } else {
+                       /* Interface is in static list. Reset it to invalid or valid. */
+
+                       if (next_interface->flags & ACPI_OSI_DEFAULT_INVALID) {
+                               next_interface->flags |= ACPI_OSI_INVALID;
+                       } else {
+                               next_interface->flags &= ~ACPI_OSI_INVALID;
+                       }
                }
 
                next_interface = acpi_gbl_supported_interfaces;
@@ -276,6 +284,49 @@ acpi_status acpi_ut_remove_interface(acpi_string interface_name)
        return (AE_NOT_EXIST);
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_update_interfaces
+ *
+ * PARAMETERS:  action              - Actions to be performed during the
+ *                                    update
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Update _OSI interface strings, disabling or enabling OS vendor
+ *              strings or/and feature group strings.
+ *              Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_update_interfaces(u8 action)
+{
+       struct acpi_interface_info *next_interface;
+
+       next_interface = acpi_gbl_supported_interfaces;
+       while (next_interface) {
+               if (((next_interface->flags & ACPI_OSI_FEATURE) &&
+                    (action & ACPI_FEATURE_STRINGS)) ||
+                   (!(next_interface->flags & ACPI_OSI_FEATURE) &&
+                    (action & ACPI_VENDOR_STRINGS))) {
+                       if (action & ACPI_DISABLE_INTERFACES) {
+
+                               /* Mark the interfaces as invalid */
+
+                               next_interface->flags |= ACPI_OSI_INVALID;
+                       } else {
+                               /* Mark the interfaces as valid */
+
+                               next_interface->flags &= ~ACPI_OSI_INVALID;
+                       }
+               }
+
+               next_interface = next_interface->next;
+       }
+
+       return (AE_OK);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_get_interface
index c53759b76a3f97d0fe5558f36ad9b656fbc905fa..cb1e9cc32d5f8d89e7d8ba954d5c0928a0ff5f66 100644 (file)
@@ -333,7 +333,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
  * FUNCTION:    acpi_ut_print_string
  *
  * PARAMETERS:  string          - Null terminated ASCII string
- *              max_length      - Maximum output length
+ *              max_length      - Maximum output length. Used to constrain the
+ *                                length of strings during debug output only.
  *
  * RETURN:      None
  *
@@ -342,7 +343,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
  *
  ******************************************************************************/
 
-void acpi_ut_print_string(char *string, u8 max_length)
+void acpi_ut_print_string(char *string, u16 max_length)
 {
        u32 i;
 
index 6505774f223e372239a0ee5fadfc00e4aaaf0446..03a211e6e26ab0a3789b7192fc6386ecd456bb1c 100644 (file)
@@ -387,6 +387,34 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
 
 ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
 
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_update_interfaces
+ *
+ * PARAMETERS:  action              - Actions to be performed during the
+ *                                    update
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Update _OSI interface strings, disabling or enabling OS vendor
+ *              string or/and feature group strings.
+ *
+ ****************************************************************************/
+acpi_status acpi_update_interfaces(u8 action)
+{
+       acpi_status status;
+
+       status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       status = acpi_ut_update_interfaces(action);
+
+       acpi_os_release_mutex(acpi_gbl_osi_mutex);
+       return (status);
+}
+
 /*****************************************************************************
  *
  * FUNCTION:    acpi_check_address_range
@@ -402,6 +430,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
  *              ASL operation region address ranges.
  *
  ****************************************************************************/
+
 u32
 acpi_check_address_range(acpi_adr_space_type space_id,
                         acpi_physical_address address,
index 88d0b0f9f92b73706e330e62a4bcf70dd4b0befa..822b1ed3b00f6460ec07a3eb3ffa1301b46cbaeb 100644 (file)
@@ -284,8 +284,10 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
        if (!src)
                return -ENOMEM;
        dst = ioremap(ctx->dst_base + offset, ctx->var2);
-       if (!dst)
+       if (!dst) {
+               iounmap(src);
                return -ENOMEM;
+       }
 
        memmove(dst, src, ctx->var2);
 
@@ -933,9 +935,9 @@ static int erst_open_pstore(struct pstore_info *psi);
 static int erst_close_pstore(struct pstore_info *psi);
 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
                           struct timespec *time, char **buf,
-                          struct pstore_info *psi);
+                          bool *compressed, struct pstore_info *psi);
 static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
-                      u64 *id, unsigned int part, int count, size_t hsize,
+                      u64 *id, unsigned int part, int count, bool compressed,
                       size_t size, struct pstore_info *psi);
 static int erst_clearer(enum pstore_type_id type, u64 id, int count,
                        struct timespec time, struct pstore_info *psi);
@@ -956,6 +958,9 @@ static struct pstore_info erst_info = {
 #define CPER_SECTION_TYPE_DMESG                                                \
        UUID_LE(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54,     \
                0x94, 0x19, 0xeb, 0x12)
+#define CPER_SECTION_TYPE_DMESG_Z                                      \
+       UUID_LE(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d,     \
+               0x34, 0xdd, 0xfa, 0xc6)
 #define CPER_SECTION_TYPE_MCE                                          \
        UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96,     \
                0x04, 0x4a, 0x38, 0xfc)
@@ -989,7 +994,7 @@ static int erst_close_pstore(struct pstore_info *psi)
 
 static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
                           struct timespec *time, char **buf,
-                          struct pstore_info *psi)
+                          bool *compressed, struct pstore_info *psi)
 {
        int rc;
        ssize_t len = 0;
@@ -1034,7 +1039,12 @@ skip:
        }
        memcpy(*buf, rcd->data, len - sizeof(*rcd));
        *id = record_id;
+       *compressed = false;
        if (uuid_le_cmp(rcd->sec_hdr.section_type,
+                       CPER_SECTION_TYPE_DMESG_Z) == 0) {
+               *type = PSTORE_TYPE_DMESG;
+               *compressed = true;
+       } else if (uuid_le_cmp(rcd->sec_hdr.section_type,
                        CPER_SECTION_TYPE_DMESG) == 0)
                *type = PSTORE_TYPE_DMESG;
        else if (uuid_le_cmp(rcd->sec_hdr.section_type,
@@ -1055,7 +1065,7 @@ out:
 }
 
 static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
-                      u64 *id, unsigned int part, int count, size_t hsize,
+                      u64 *id, unsigned int part, int count, bool compressed,
                       size_t size, struct pstore_info *psi)
 {
        struct cper_pstore_record *rcd = (struct cper_pstore_record *)
@@ -1085,7 +1095,10 @@ static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
        rcd->sec_hdr.flags = CPER_SEC_PRIMARY;
        switch (type) {
        case PSTORE_TYPE_DMESG:
-               rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG;
+               if (compressed)
+                       rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG_Z;
+               else
+                       rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG;
                break;
        case PSTORE_TYPE_MCE:
                rcd->sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
index d405fbad406ac75cc9bb41c8734a457d60e14222..2c9958cd7a4350ae675b76ad338904999ae149c1 100644 (file)
@@ -527,18 +527,14 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
 static int acpi_battery_set_alarm(struct acpi_battery *battery)
 {
        acpi_status status = 0;
-       union acpi_object arg0 = { .type = ACPI_TYPE_INTEGER };
-       struct acpi_object_list arg_list = { 1, &arg0 };
 
        if (!acpi_battery_present(battery) ||
            !test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
                return -ENODEV;
 
-       arg0.integer.value = battery->alarm;
-
        mutex_lock(&battery->lock);
-       status = acpi_evaluate_object(battery->device->handle, "_BTP",
-                                &arg_list, NULL);
+       status = acpi_execute_simple_method(battery->device->handle, "_BTP",
+                                           battery->alarm);
        mutex_unlock(&battery->lock);
 
        if (ACPI_FAILURE(status))
@@ -550,12 +546,8 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery)
 
 static int acpi_battery_init_alarm(struct acpi_battery *battery)
 {
-       acpi_status status = AE_OK;
-       acpi_handle handle = NULL;
-
        /* See if alarms are supported, and if so, set default */
-       status = acpi_get_handle(battery->device->handle, "_BTP", &handle);
-       if (ACPI_FAILURE(status)) {
+       if (!acpi_has_method(battery->device->handle, "_BTP")) {
                clear_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
                return 0;
        }
@@ -1036,8 +1028,6 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
        if (event == ACPI_BATTERY_NOTIFY_INFO)
                acpi_battery_refresh(battery);
        acpi_battery_update(battery);
-       acpi_bus_generate_proc_event(device, event,
-                                    acpi_battery_present(battery));
        acpi_bus_generate_netlink_event(device->pnp.device_class,
                                        dev_name(&device->dev), event,
                                        acpi_battery_present(battery));
@@ -1068,7 +1058,7 @@ static int acpi_battery_add(struct acpi_device *device)
 {
        int result = 0;
        struct acpi_battery *battery = NULL;
-       acpi_handle handle;
+
        if (!device)
                return -EINVAL;
        battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
@@ -1080,8 +1070,7 @@ static int acpi_battery_add(struct acpi_device *device)
        device->driver_data = battery;
        mutex_init(&battery->lock);
        mutex_init(&battery->sysfs_lock);
-       if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
-                       "_BIX", &handle)))
+       if (acpi_has_method(battery->device->handle, "_BIX"))
                set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
        result = acpi_battery_update(battery);
        if (result)
index cb9629638def78fdd1432dc06102c2a5f5097a24..a404127768045f5b2fb1fe7a6035cf5ec2a0f93a 100644 (file)
@@ -192,6 +192,12 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
        acpi_osi_setup("!Windows 2009");
        return 0;
 }
+static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
+{
+       printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
+       acpi_osi_setup("!Windows 2012");
+       return 0;
+}
 
 static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
        {
@@ -267,6 +273,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
                },
        },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ASUS Zenbook Prime UX31A",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index a5bb33bab4485307f7969b19ff9d9ea9cb3f67e2..b587ec8257b2190758eca2b8b3306a933d4ae16d 100644 (file)
@@ -89,27 +89,6 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
                                 Device Management
    -------------------------------------------------------------------------- */
 
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
-{
-       acpi_status status;
-
-       if (!device)
-               return -EINVAL;
-
-       /* TBD: Support fixed-feature devices */
-
-       status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
-       if (ACPI_FAILURE(status) || !*device) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
-                                 handle));
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-EXPORT_SYMBOL(acpi_bus_get_device);
-
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta)
 {
@@ -345,104 +324,6 @@ static void acpi_bus_osc_support(void)
        /* do we need to check other returned cap? Sounds no */
 }
 
-/* --------------------------------------------------------------------------
-                                Event Management
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROC_EVENT
-static DEFINE_SPINLOCK(acpi_bus_event_lock);
-
-LIST_HEAD(acpi_bus_event_list);
-DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue);
-
-extern int event_is_open;
-
-int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, u8 type, int data)
-{
-       struct acpi_bus_event *event;
-       unsigned long flags;
-
-       /* drop event on the floor if no one's listening */
-       if (!event_is_open)
-               return 0;
-
-       event = kzalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
-       if (!event)
-               return -ENOMEM;
-
-       strcpy(event->device_class, device_class);
-       strcpy(event->bus_id, bus_id);
-       event->type = type;
-       event->data = data;
-
-       spin_lock_irqsave(&acpi_bus_event_lock, flags);
-       list_add_tail(&event->node, &acpi_bus_event_list);
-       spin_unlock_irqrestore(&acpi_bus_event_lock, flags);
-
-       wake_up_interruptible(&acpi_bus_event_queue);
-
-       return 0;
-
-}
-
-EXPORT_SYMBOL_GPL(acpi_bus_generate_proc_event4);
-
-int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
-{
-       if (!device)
-               return -EINVAL;
-       return acpi_bus_generate_proc_event4(device->pnp.device_class,
-                                            device->pnp.bus_id, type, data);
-}
-
-EXPORT_SYMBOL(acpi_bus_generate_proc_event);
-
-int acpi_bus_receive_event(struct acpi_bus_event *event)
-{
-       unsigned long flags;
-       struct acpi_bus_event *entry = NULL;
-
-       DECLARE_WAITQUEUE(wait, current);
-
-
-       if (!event)
-               return -EINVAL;
-
-       if (list_empty(&acpi_bus_event_list)) {
-
-               set_current_state(TASK_INTERRUPTIBLE);
-               add_wait_queue(&acpi_bus_event_queue, &wait);
-
-               if (list_empty(&acpi_bus_event_list))
-                       schedule();
-
-               remove_wait_queue(&acpi_bus_event_queue, &wait);
-               set_current_state(TASK_RUNNING);
-
-               if (signal_pending(current))
-                       return -ERESTARTSYS;
-       }
-
-       spin_lock_irqsave(&acpi_bus_event_lock, flags);
-       if (!list_empty(&acpi_bus_event_list)) {
-               entry = list_entry(acpi_bus_event_list.next,
-                                  struct acpi_bus_event, node);
-               list_del(&entry->node);
-       }
-       spin_unlock_irqrestore(&acpi_bus_event_lock, flags);
-
-       if (!entry)
-               return -ENODEV;
-
-       memcpy(event, entry, sizeof(struct acpi_bus_event));
-
-       kfree(entry);
-
-       return 0;
-}
-
-#endif /* CONFIG_ACPI_PROC_EVENT */
-
 /* --------------------------------------------------------------------------
                              Notification Handling
    -------------------------------------------------------------------------- */
@@ -499,19 +380,6 @@ static void acpi_bus_check_scope(acpi_handle handle)
         */
 }
 
-static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
-int register_acpi_bus_notifier(struct notifier_block *nb)
-{
-       return blocking_notifier_chain_register(&acpi_bus_notify_list, nb);
-}
-EXPORT_SYMBOL_GPL(register_acpi_bus_notifier);
-
-void unregister_acpi_bus_notifier(struct notifier_block *nb)
-{
-       blocking_notifier_chain_unregister(&acpi_bus_notify_list, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier);
-
 /**
  * acpi_bus_notify
  * ---------------
@@ -525,9 +393,6 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
                          type, handle));
 
-       blocking_notifier_call_chain(&acpi_bus_notify_list,
-               type, (void *)handle);
-
        switch (type) {
 
        case ACPI_NOTIFY_BUS_CHECK:
@@ -593,8 +458,6 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
 static int __init acpi_bus_init_irq(void)
 {
        acpi_status status;
-       union acpi_object arg = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list arg_list = { 1, &arg };
        char *message = NULL;
 
 
@@ -623,9 +486,7 @@ static int __init acpi_bus_init_irq(void)
 
        printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message);
 
-       arg.integer.value = acpi_irq_model;
-
-       status = acpi_evaluate_object(NULL, "\\_PIC", &arg_list, NULL);
+       status = acpi_execute_simple_method(NULL, "\\_PIC", acpi_irq_model);
        if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
                ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PIC"));
                return -ENODEV;
@@ -715,7 +576,6 @@ static int __init acpi_bus_init(void)
 {
        int result;
        acpi_status status;
-       extern acpi_status acpi_os_initialize1(void);
 
        acpi_os_initialize1();
 
index d2e617b5b3f661c8d0effd34628eb4225875d0f4..a55773801c5f1fb739df9597dbbde86d272b092c 100644 (file)
@@ -303,8 +303,6 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
 
                        pm_wakeup_event(&device->dev, 0);
                }
-
-               acpi_bus_generate_proc_event(device, event, ++button->pushed);
                break;
        default:
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
index 4ab807dc851812a8c4d64265298f38c610bf22ba..59d3202f6b36fc197afe259b343764cb2a9621aa 100644 (file)
@@ -159,26 +159,29 @@ int acpi_device_set_power(struct acpi_device *device, int state)
        int result = 0;
        bool cut_power = false;
 
-       if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
+       if (!device || !device->flags.power_manageable
+           || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
                return -EINVAL;
 
        /* Make sure this is a valid target state */
 
        if (state == device->power.state) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n",
+                                 device->pnp.bus_id,
                                  acpi_power_state_string(state)));
                return 0;
        }
 
        if (!device->power.states[state].flags.valid) {
-               printk(KERN_WARNING PREFIX "Device does not support %s\n",
-                      acpi_power_state_string(state));
+               dev_warn(&device->dev, "Power state %s not supported\n",
+                        acpi_power_state_string(state));
                return -ENODEV;
        }
        if (device->parent && (state < device->parent->power.state)) {
-               printk(KERN_WARNING PREFIX
-                             "Cannot set device to a higher-powered"
-                             " state than parent\n");
+               dev_warn(&device->dev,
+                        "Cannot transition to power state %s for parent in %s\n",
+                        acpi_power_state_string(state),
+                        acpi_power_state_string(device->parent->power.state));
                return -ENODEV;
        }
 
@@ -191,8 +194,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
 
        if (state < device->power.state && state != ACPI_STATE_D0
            && device->power.state >= ACPI_STATE_D3_HOT) {
-               printk(KERN_WARNING PREFIX
-                       "Cannot transition to non-D0 state from D3\n");
+               dev_warn(&device->dev,
+                        "Cannot transition to non-D0 state from D3\n");
                return -ENODEV;
        }
 
@@ -219,10 +222,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
 
  end:
        if (result) {
-               printk(KERN_WARNING PREFIX
-                             "Device [%s] failed to transition to %s\n",
-                             device->pnp.bus_id,
-                             acpi_power_state_string(state));
+               dev_warn(&device->dev, "Failed to change power state to %s\n",
+                        acpi_power_state_string(state));
        } else {
                device->power.state = state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -244,13 +245,6 @@ int acpi_bus_set_power(acpi_handle handle, int state)
        if (result)
                return result;
 
-       if (!device->flags.power_manageable) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                               "Device [%s] is not power manageable\n",
-                               dev_name(&device->dev)));
-               return -ENODEV;
-       }
-
        return acpi_device_set_power(device, state);
 }
 EXPORT_SYMBOL(acpi_bus_set_power);
index 826560753389ca3f1d2dd1bc2670f15f059970ae..05ea4be01a832ccb3d8b38e4318c66fee0f83a89 100644 (file)
@@ -51,8 +51,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
        " the driver to wait for userspace to write the undock sysfs file "
        " before undocking");
 
-static struct atomic_notifier_head dock_notifier_list;
-
 static const struct acpi_device_id dock_device_ids[] = {
        {"LNXDOCK", 0},
        {"", 0},
@@ -63,8 +61,6 @@ struct dock_station {
        acpi_handle handle;
        unsigned long last_dock_time;
        u32 flags;
-       spinlock_t dd_lock;
-       struct mutex hp_lock;
        struct list_head dependent_devices;
 
        struct list_head sibling;
@@ -91,6 +87,12 @@ struct dock_dependent_device {
 #define DOCK_EVENT     3
 #define UNDOCK_EVENT   2
 
+enum dock_callback_type {
+       DOCK_CALL_HANDLER,
+       DOCK_CALL_FIXUP,
+       DOCK_CALL_UEVENT,
+};
+
 /*****************************************************************************
  *                         Dock Dependent device functions                   *
  *****************************************************************************/
@@ -101,7 +103,7 @@ struct dock_dependent_device {
  *
  * Add the dependent device to the dock's dependent device list.
  */
-static int
+static int __init
 add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
 {
        struct dock_dependent_device *dd;
@@ -112,14 +114,21 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
 
        dd->handle = handle;
        INIT_LIST_HEAD(&dd->list);
-
-       spin_lock(&ds->dd_lock);
        list_add_tail(&dd->list, &ds->dependent_devices);
-       spin_unlock(&ds->dd_lock);
 
        return 0;
 }
 
+static void remove_dock_dependent_devices(struct dock_station *ds)
+{
+       struct dock_dependent_device *dd, *aux;
+
+       list_for_each_entry_safe(dd, aux, &ds->dependent_devices, list) {
+               list_del(&dd->list);
+               kfree(dd);
+       }
+}
+
 /**
  * dock_init_hotplug - Initialize a hotplug device on a docking station.
  * @dd: Dock-dependent device.
@@ -135,19 +144,16 @@ static int dock_init_hotplug(struct dock_dependent_device *dd,
        int ret = 0;
 
        mutex_lock(&hotplug_lock);
-
-       if (dd->hp_context) {
+       if (WARN_ON(dd->hp_context)) {
                ret = -EEXIST;
        } else {
                dd->hp_refcount = 1;
                dd->hp_ops = ops;
                dd->hp_context = context;
                dd->hp_release = release;
+               if (init)
+                       init(context);
        }
-
-       if (!WARN_ON(ret) && init)
-               init(context);
-
        mutex_unlock(&hotplug_lock);
        return ret;
 }
@@ -162,27 +168,22 @@ static int dock_init_hotplug(struct dock_dependent_device *dd,
  */
 static void dock_release_hotplug(struct dock_dependent_device *dd)
 {
-       void (*release)(void *) = NULL;
-       void *context = NULL;
-
        mutex_lock(&hotplug_lock);
-
        if (dd->hp_context && !--dd->hp_refcount) {
+               void (*release)(void *) = dd->hp_release;
+               void *context = dd->hp_context;
+
                dd->hp_ops = NULL;
-               context = dd->hp_context;
                dd->hp_context = NULL;
-               release = dd->hp_release;
                dd->hp_release = NULL;
+               if (release)
+                       release(context);
        }
-
-       if (release && context)
-               release(context);
-
        mutex_unlock(&hotplug_lock);
 }
 
 static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
-                              bool uevent)
+                              enum dock_callback_type cb_type)
 {
        acpi_notify_handler cb = NULL;
        bool run = false;
@@ -192,8 +193,18 @@ static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
        if (dd->hp_context) {
                run = true;
                dd->hp_refcount++;
-               if (dd->hp_ops)
-                       cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler;
+               if (dd->hp_ops) {
+                       switch (cb_type) {
+                       case DOCK_CALL_FIXUP:
+                               cb = dd->hp_ops->fixup;
+                               break;
+                       case DOCK_CALL_UEVENT:
+                               cb = dd->hp_ops->uevent;
+                               break;
+                       default:
+                               cb = dd->hp_ops->handler;
+                       }
+               }
        }
 
        mutex_unlock(&hotplug_lock);
@@ -220,63 +231,17 @@ find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
 {
        struct dock_dependent_device *dd;
 
-       spin_lock(&ds->dd_lock);
-       list_for_each_entry(dd, &ds->dependent_devices, list) {
-               if (handle == dd->handle) {
-                       spin_unlock(&ds->dd_lock);
+       list_for_each_entry(dd, &ds->dependent_devices, list)
+               if (handle == dd->handle)
                        return dd;
-               }
-       }
-       spin_unlock(&ds->dd_lock);
+
        return NULL;
 }
 
 /*****************************************************************************
  *                         Dock functions                                    *
  *****************************************************************************/
-/**
- * is_dock - see if a device is a dock station
- * @handle: acpi handle of the device
- *
- * If an acpi object has a _DCK method, then it is by definition a dock
- * station, so return true.
- */
-static int is_dock(acpi_handle handle)
-{
-       acpi_status status;
-       acpi_handle tmp;
-
-       status = acpi_get_handle(handle, "_DCK", &tmp);
-       if (ACPI_FAILURE(status))
-               return 0;
-       return 1;
-}
-
-static int is_ejectable(acpi_handle handle)
-{
-       acpi_status status;
-       acpi_handle tmp;
-
-       status = acpi_get_handle(handle, "_EJ0", &tmp);
-       if (ACPI_FAILURE(status))
-               return 0;
-       return 1;
-}
-
-static int is_ata(acpi_handle handle)
-{
-       acpi_handle tmp;
-
-       if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
-          (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
-          (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
-          (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
-               return 1;
-
-       return 0;
-}
-
-static int is_battery(acpi_handle handle)
+static int __init is_battery(acpi_handle handle)
 {
        struct acpi_device_info *info;
        int ret = 1;
@@ -292,17 +257,13 @@ static int is_battery(acpi_handle handle)
        return ret;
 }
 
-static int is_ejectable_bay(acpi_handle handle)
+/* Check whether ACPI object is an ejectable battery or disk bay */
+static bool __init is_ejectable_bay(acpi_handle handle)
 {
-       acpi_handle phandle;
+       if (acpi_has_method(handle, "_EJ0") && is_battery(handle))
+               return true;
 
-       if (!is_ejectable(handle))
-               return 0;
-       if (is_battery(handle) || is_ata(handle))
-               return 1;
-       if (!acpi_get_parent(handle, &phandle) && is_ata(phandle))
-               return 1;
-       return 0;
+       return acpi_bay_match(handle);
 }
 
 /**
@@ -320,7 +281,7 @@ int is_dock_device(acpi_handle handle)
        if (!dock_station_count)
                return 0;
 
-       if (is_dock(handle))
+       if (acpi_dock_match(handle))
                return 1;
 
        list_for_each_entry(dock_station, &dock_stations, sibling)
@@ -359,10 +320,8 @@ static int dock_present(struct dock_station *ds)
  *  handle if one does not exist already.  This should cause
  *  acpi to scan for drivers for the given devices, and call
  *  matching driver's add routine.
- *
- *  Returns a pointer to the acpi_device corresponding to the handle.
  */
-static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
+static void dock_create_acpi_device(acpi_handle handle)
 {
        struct acpi_device *device;
        int ret;
@@ -375,10 +334,7 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
                ret = acpi_bus_scan(handle);
                if (ret)
                        pr_debug("error adding bus, %x\n", -ret);
-
-               acpi_bus_get_device(handle, &device);
        }
-       return device;
 }
 
 /**
@@ -397,9 +353,29 @@ static void dock_remove_acpi_device(acpi_handle handle)
 }
 
 /**
- * hotplug_dock_devices - insert or remove devices on the dock station
+ * hot_remove_dock_devices - Remove dock station devices.
+ * @ds: Dock station.
+ */
+static void hot_remove_dock_devices(struct dock_station *ds)
+{
+       struct dock_dependent_device *dd;
+
+       /*
+        * Walk the list in reverse order so that devices that have been added
+        * last are removed first (in case there are some indirect dependencies
+        * between them).
+        */
+       list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
+               dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
+
+       list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
+               dock_remove_acpi_device(dd->handle);
+}
+
+/**
+ * hotplug_dock_devices - Insert devices on a dock station.
  * @ds: the dock station
- * @event: either bus check or eject request
+ * @event: either bus check or device check request
  *
  * Some devices on the dock station need to have drivers called
  * to perform hotplug operations after a dock event has occurred.
@@ -410,27 +386,21 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
 {
        struct dock_dependent_device *dd;
 
-       mutex_lock(&ds->hp_lock);
+       /* Call driver specific post-dock fixups. */
+       list_for_each_entry(dd, &ds->dependent_devices, list)
+               dock_hotplug_event(dd, event, DOCK_CALL_FIXUP);
 
-       /*
-        * First call driver specific hotplug functions
-        */
+       /* Call driver specific hotplug functions. */
        list_for_each_entry(dd, &ds->dependent_devices, list)
-               dock_hotplug_event(dd, event, false);
+               dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
 
        /*
-        * Now make sure that an acpi_device is created for each
-        * dependent device, or removed if this is an eject request.
-        * This will cause acpi_drivers to be stopped/started if they
-        * exist
+        * Now make sure that an acpi_device is created for each dependent
+        * device.  That will cause scan handlers to be attached to device
+        * objects or acpi_drivers to be stopped/started if they are present.
         */
-       list_for_each_entry(dd, &ds->dependent_devices, list) {
-               if (event == ACPI_NOTIFY_EJECT_REQUEST)
-                       dock_remove_acpi_device(dd->handle);
-               else
-                       dock_create_acpi_device(dd->handle);
-       }
-       mutex_unlock(&ds->hp_lock);
+       list_for_each_entry(dd, &ds->dependent_devices, list)
+               dock_create_acpi_device(dd->handle);
 }
 
 static void dock_event(struct dock_station *ds, u32 event, int num)
@@ -453,43 +423,12 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
                kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
 
        list_for_each_entry(dd, &ds->dependent_devices, list)
-               dock_hotplug_event(dd, event, true);
+               dock_hotplug_event(dd, event, DOCK_CALL_UEVENT);
 
        if (num != DOCK_EVENT)
                kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
 }
 
-/**
- * eject_dock - respond to a dock eject request
- * @ds: the dock station
- *
- * This is called after _DCK is called, to execute the dock station's
- * _EJ0 method.
- */
-static void eject_dock(struct dock_station *ds)
-{
-       struct acpi_object_list arg_list;
-       union acpi_object arg;
-       acpi_status status;
-       acpi_handle tmp;
-
-       /* all dock devices should have _EJ0, but check anyway */
-       status = acpi_get_handle(ds->handle, "_EJ0", &tmp);
-       if (ACPI_FAILURE(status)) {
-               pr_debug("No _EJ0 support for dock device\n");
-               return;
-       }
-
-       arg_list.count = 1;
-       arg_list.pointer = &arg;
-       arg.type = ACPI_TYPE_INTEGER;
-       arg.integer.value = 1;
-
-       status = acpi_evaluate_object(ds->handle, "_EJ0", &arg_list, NULL);
-       if (ACPI_FAILURE(status))
-               pr_debug("Failed to evaluate _EJ0!\n");
-}
-
 /**
  * handle_dock - handle a dock event
  * @ds: the dock station
@@ -550,27 +489,6 @@ static inline void complete_undock(struct dock_station *ds)
        ds->flags &= ~(DOCK_UNDOCKING);
 }
 
-static void dock_lock(struct dock_station *ds, int lock)
-{
-       struct acpi_object_list arg_list;
-       union acpi_object arg;
-       acpi_status status;
-
-       arg_list.count = 1;
-       arg_list.pointer = &arg;
-       arg.type = ACPI_TYPE_INTEGER;
-       arg.integer.value = !!lock;
-       status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
-       if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-               if (lock)
-                       acpi_handle_warn(ds->handle,
-                               "Locking device failed (0x%x)\n", status);
-               else
-                       acpi_handle_warn(ds->handle,
-                               "Unlocking device failed (0x%x)\n", status);
-       }
-}
-
 /**
  * dock_in_progress - see if we are in the middle of handling a dock event
  * @ds: the dock station
@@ -587,37 +505,6 @@ static int dock_in_progress(struct dock_station *ds)
        return 0;
 }
 
-/**
- * register_dock_notifier - add yourself to the dock notifier list
- * @nb: the callers notifier block
- *
- * If a driver wishes to be notified about dock events, they can
- * use this function to put a notifier block on the dock notifier list.
- * this notifier call chain will be called after a dock event, but
- * before hotplugging any new devices.
- */
-int register_dock_notifier(struct notifier_block *nb)
-{
-       if (!dock_station_count)
-               return -ENODEV;
-
-       return atomic_notifier_chain_register(&dock_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(register_dock_notifier);
-
-/**
- * unregister_dock_notifier - remove yourself from the dock notifier list
- * @nb: the callers notifier block
- */
-void unregister_dock_notifier(struct notifier_block *nb)
-{
-       if (!dock_station_count)
-               return;
-
-       atomic_notifier_chain_unregister(&dock_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_dock_notifier);
-
 /**
  * register_hotplug_dock_device - register a hotplug function
  * @handle: the handle of the device
@@ -703,10 +590,10 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
         */
        dock_event(ds, event, UNDOCK_EVENT);
 
-       hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
+       hot_remove_dock_devices(ds);
        undock(ds);
-       dock_lock(ds, 0);
-       eject_dock(ds);
+       acpi_evaluate_lck(ds->handle, 0);
+       acpi_evaluate_ej0(ds->handle);
        if (dock_present(ds)) {
                acpi_handle_err(ds->handle, "Unable to undock!\n");
                return -EBUSY;
@@ -717,18 +604,17 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
 
 /**
  * dock_notify - act upon an acpi dock notification
- * @handle: the dock station handle
+ * @ds: dock station
  * @event: the acpi event
- * @data: our driver data struct
  *
  * If we are notified to dock, then check to see if the dock is
  * present and then dock.  Notify all drivers of the dock event,
  * and then hotplug and devices that may need hotplugging.
  */
-static void dock_notify(acpi_handle handle, u32 event, void *data)
+static void dock_notify(struct dock_station *ds, u32 event)
 {
-       struct dock_station *ds = data;
-       struct acpi_device *tmp;
+       acpi_handle handle = ds->handle;
+       struct acpi_device *ad;
        int surprise_removal = 0;
 
        /*
@@ -751,8 +637,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
        switch (event) {
        case ACPI_NOTIFY_BUS_CHECK:
        case ACPI_NOTIFY_DEVICE_CHECK:
-               if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
-                  &tmp)) {
+               if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
                        begin_dock(ds);
                        dock(ds);
                        if (!dock_present(ds)) {
@@ -760,12 +645,10 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
                                complete_dock(ds);
                                break;
                        }
-                       atomic_notifier_call_chain(&dock_notifier_list,
-                                                  event, NULL);
                        hotplug_dock_devices(ds, event);
                        complete_dock(ds);
                        dock_event(ds, event, DOCK_EVENT);
-                       dock_lock(ds, 1);
+                       acpi_evaluate_lck(ds->handle, 1);
                        acpi_update_all_gpes();
                        break;
                }
@@ -789,9 +672,8 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
 }
 
 struct dock_data {
-       acpi_handle handle;
-       unsigned long event;
        struct dock_station *ds;
+       u32 event;
 };
 
 static void acpi_dock_deferred_cb(void *context)
@@ -799,52 +681,31 @@ static void acpi_dock_deferred_cb(void *context)
        struct dock_data *data = context;
 
        acpi_scan_lock_acquire();
-       dock_notify(data->handle, data->event, data->ds);
+       dock_notify(data->ds, data->event);
        acpi_scan_lock_release();
        kfree(data);
 }
 
-static int acpi_dock_notifier_call(struct notifier_block *this,
-       unsigned long event, void *data)
+static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
 {
-       struct dock_station *dock_station;
-       acpi_handle handle = data;
+       struct dock_data *dd;
 
        if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
           && event != ACPI_NOTIFY_EJECT_REQUEST)
-               return 0;
-
-       acpi_scan_lock_acquire();
-
-       list_for_each_entry(dock_station, &dock_stations, sibling) {
-               if (dock_station->handle == handle) {
-                       struct dock_data *dd;
-                       acpi_status status;
-
-                       dd = kmalloc(sizeof(*dd), GFP_KERNEL);
-                       if (!dd)
-                               break;
+               return;
 
-                       dd->handle = handle;
-                       dd->event = event;
-                       dd->ds = dock_station;
-                       status = acpi_os_hotplug_execute(acpi_dock_deferred_cb,
-                                                        dd);
-                       if (ACPI_FAILURE(status))
-                               kfree(dd);
+       dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+       if (dd) {
+               acpi_status status;
 
-                       break;
-               }
+               dd->ds = data;
+               dd->event = event;
+               status = acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
+               if (ACPI_FAILURE(status))
+                       kfree(dd);
        }
-
-       acpi_scan_lock_release();
-       return 0;
 }
 
-static struct notifier_block dock_acpi_notifier = {
-       .notifier_call = acpi_dock_notifier_call,
-};
-
 /**
  * find_dock_devices - find devices on the dock station
  * @handle: the handle of the device we are examining
@@ -856,29 +717,16 @@ static struct notifier_block dock_acpi_notifier = {
  * check to see if an object has an _EJD method.  If it does, then it
  * will see if it is dependent on the dock station.
  */
-static acpi_status
-find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
+static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
+                                           void *context, void **rv)
 {
-       acpi_status status;
-       acpi_handle tmp, parent;
        struct dock_station *ds = context;
+       acpi_handle ejd = NULL;
 
-       status = acpi_bus_get_ejd(handle, &tmp);
-       if (ACPI_FAILURE(status)) {
-               /* try the parent device as well */
-               status = acpi_get_parent(handle, &parent);
-               if (ACPI_FAILURE(status))
-                       goto fdd_out;
-               /* see if parent is dependent on dock */
-               status = acpi_bus_get_ejd(parent, &tmp);
-               if (ACPI_FAILURE(status))
-                       goto fdd_out;
-       }
-
-       if (tmp == ds->handle)
+       acpi_bus_get_ejd(handle, &ejd);
+       if (ejd == ds->handle)
                add_dock_dependent_device(ds, handle);
 
-fdd_out:
        return AE_OK;
 }
 
@@ -988,13 +836,13 @@ static struct attribute_group dock_attribute_group = {
  */
 static int __init dock_add(acpi_handle handle)
 {
-       int ret, id;
-       struct dock_station ds, *dock_station;
+       struct dock_station *dock_station, ds = { NULL, };
        struct platform_device *dd;
+       acpi_status status;
+       int ret;
 
-       id = dock_station_count;
-       memset(&ds, 0, sizeof(ds));
-       dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
+       dd = platform_device_register_data(NULL, "dock", dock_station_count,
+                                          &ds, sizeof(ds));
        if (IS_ERR(dd))
                return PTR_ERR(dd);
 
@@ -1004,18 +852,15 @@ static int __init dock_add(acpi_handle handle)
        dock_station->dock_device = dd;
        dock_station->last_dock_time = jiffies - HZ;
 
-       mutex_init(&dock_station->hp_lock);
-       spin_lock_init(&dock_station->dd_lock);
        INIT_LIST_HEAD(&dock_station->sibling);
-       ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
        INIT_LIST_HEAD(&dock_station->dependent_devices);
 
        /* we want the dock device to send uevents */
        dev_set_uevent_suppress(&dd->dev, 0);
 
-       if (is_dock(handle))
+       if (acpi_dock_match(handle))
                dock_station->flags |= DOCK_IS_DOCK;
-       if (is_ata(handle))
+       if (acpi_ata_match(handle))
                dock_station->flags |= DOCK_IS_ATA;
        if (is_battery(handle))
                dock_station->flags |= DOCK_IS_BAT;
@@ -1034,11 +879,19 @@ static int __init dock_add(acpi_handle handle)
        if (ret)
                goto err_rmgroup;
 
+       status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+                                            dock_notify_handler, dock_station);
+       if (ACPI_FAILURE(status)) {
+               ret = -ENODEV;
+               goto err_rmgroup;
+       }
+
        dock_station_count++;
        list_add(&dock_station->sibling, &dock_stations);
        return 0;
 
 err_rmgroup:
+       remove_dock_dependent_devices(dock_station);
        sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
 err_unregister:
        platform_device_unregister(dd);
@@ -1055,10 +908,10 @@ err_unregister:
  *
  * This is called by acpi_walk_namespace to look for dock stations and bays.
  */
-static __init acpi_status
+static acpi_status __init
 find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
-       if (is_dock(handle) || is_ejectable_bay(handle))
+       if (acpi_dock_match(handle) || is_ejectable_bay(handle))
                dock_add(handle);
 
        return AE_OK;
@@ -1078,7 +931,6 @@ void __init acpi_dock_init(void)
                return;
        }
 
-       register_acpi_bus_notifier(&dock_acpi_notifier);
        pr_info(PREFIX "%s: %d docks/bays found\n",
                ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
 }
index 80403c1a89f8969919c868e0fb3238e056adb581..8dd2d4dce7c3231c608a876138eb9be76c74c273 100644 (file)
@@ -948,7 +948,7 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
        return 0;
 }
 
-static struct dmi_system_id __initdata ec_dmi_table[] = {
+static struct dmi_system_id ec_dmi_table[] __initdata = {
        {
        ec_skip_dsdt_scan, "Compal JFL92", {
        DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
@@ -1049,10 +1049,8 @@ int __init acpi_ec_ecdt_probe(void)
                * which needs it, has fake EC._INI method, so use it as flag.
                * Keep boot_ec struct as it will be needed soon.
                */
-               acpi_handle dummy;
                if (!dmi_name_in_vendors("ASUS") ||
-                   ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI",
-                                                       &dummy)))
+                   !acpi_has_method(boot_ec->handle, "_INI"))
                        return -ENODEV;
        }
 install:
index 1442737cedec784874fed04a74f251d543b1a61e..8247fcdde0795b37377bdec44d6271ca16ad225d 100644 (file)
 #define _COMPONENT             ACPI_SYSTEM_COMPONENT
 ACPI_MODULE_NAME("event");
 
-#ifdef CONFIG_ACPI_PROC_EVENT
-/* Global vars for handling event proc entry */
-static DEFINE_SPINLOCK(acpi_system_event_lock);
-int event_is_open = 0;
-extern struct list_head acpi_bus_event_list;
-extern wait_queue_head_t acpi_bus_event_queue;
-
-static int acpi_system_open_event(struct inode *inode, struct file *file)
-{
-       spin_lock_irq(&acpi_system_event_lock);
-
-       if (event_is_open)
-               goto out_busy;
-
-       event_is_open = 1;
-
-       spin_unlock_irq(&acpi_system_event_lock);
-       return 0;
-
-      out_busy:
-       spin_unlock_irq(&acpi_system_event_lock);
-       return -EBUSY;
-}
-
-static ssize_t
-acpi_system_read_event(struct file *file, char __user * buffer, size_t count,
-                      loff_t * ppos)
-{
-       int result = 0;
-       struct acpi_bus_event event;
-       static char str[ACPI_MAX_STRING];
-       static int chars_remaining = 0;
-       static char *ptr;
-
-       if (!chars_remaining) {
-               memset(&event, 0, sizeof(struct acpi_bus_event));
-
-               if ((file->f_flags & O_NONBLOCK)
-                   && (list_empty(&acpi_bus_event_list)))
-                       return -EAGAIN;
-
-               result = acpi_bus_receive_event(&event);
-               if (result)
-                       return result;
-
-               chars_remaining = sprintf(str, "%s %s %08x %08x\n",
-                                         event.device_class ? event.
-                                         device_class : "<unknown>",
-                                         event.bus_id ? event.
-                                         bus_id : "<unknown>", event.type,
-                                         event.data);
-               ptr = str;
-       }
-
-       if (chars_remaining < count) {
-               count = chars_remaining;
-       }
-
-       if (copy_to_user(buffer, ptr, count))
-               return -EFAULT;
-
-       *ppos += count;
-       chars_remaining -= count;
-       ptr += count;
-
-       return count;
-}
-
-static int acpi_system_close_event(struct inode *inode, struct file *file)
-{
-       spin_lock_irq(&acpi_system_event_lock);
-       event_is_open = 0;
-       spin_unlock_irq(&acpi_system_event_lock);
-       return 0;
-}
-
-static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait)
-{
-       poll_wait(file, &acpi_bus_event_queue, wait);
-       if (!list_empty(&acpi_bus_event_list))
-               return POLLIN | POLLRDNORM;
-       return 0;
-}
-
-static const struct file_operations acpi_system_event_ops = {
-       .owner = THIS_MODULE,
-       .open = acpi_system_open_event,
-       .read = acpi_system_read_event,
-       .release = acpi_system_close_event,
-       .poll = acpi_system_poll_event,
-       .llseek = default_llseek,
-};
-#endif /* CONFIG_ACPI_PROC_EVENT */
-
 /* ACPI notifier chain */
 static BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
 
@@ -280,9 +186,6 @@ static int acpi_event_genetlink_init(void)
 
 static int __init acpi_event_init(void)
 {
-#ifdef CONFIG_ACPI_PROC_EVENT
-       struct proc_dir_entry *entry;
-#endif
        int error = 0;
 
        if (acpi_disabled)
@@ -293,15 +196,6 @@ static int __init acpi_event_init(void)
        if (error)
                printk(KERN_WARNING PREFIX
                       "Failed to create genetlink family for ACPI event\n");
-
-#ifdef CONFIG_ACPI_PROC_EVENT
-       /* 'event' [R] */
-       entry = proc_create("event", S_IRUSR, acpi_root_dir,
-                           &acpi_system_event_ops);
-       if (!entry)
-               return -ENODEV;
-#endif
-
        return 0;
 }
 
index 5b02a0aa540cfa122ecfd8481d38162076cefbde..41ade6570bc07c22c3c40445d696aea26e4e4c41 100644 (file)
@@ -93,7 +93,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
        if (result)
                return result;
 
-       *state = (acpi_state == ACPI_STATE_D3 ? 0 :
+       *state = (acpi_state == ACPI_STATE_D3_COLD ? 0 :
                 (acpi_state == ACPI_STATE_D0 ? 1 : -1));
        return 0;
 }
@@ -108,7 +108,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
                return -EINVAL;
 
        result = acpi_bus_set_power(device->handle,
-                               state ? ACPI_STATE_D0 : ACPI_STATE_D3);
+                               state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
 
        return result;
 }
index 408f6b2a5fa8ee5c6578cd9e555a8386e112e370..94672297e1b1bc6f3b482b3e0985cda28786ee10 100644 (file)
@@ -173,6 +173,15 @@ acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
 }
 EXPORT_SYMBOL_GPL(acpi_find_child);
 
+static void acpi_physnode_link_name(char *buf, unsigned int node_id)
+{
+       if (node_id > 0)
+               snprintf(buf, PHYSICAL_NODE_NAME_SIZE,
+                        PHYSICAL_NODE_STRING "%u", node_id);
+       else
+               strcpy(buf, PHYSICAL_NODE_STRING);
+}
+
 int acpi_bind_one(struct device *dev, acpi_handle handle)
 {
        struct acpi_device *acpi_dev;
@@ -216,8 +225,15 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
        list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
                /* Sanity check. */
                if (pn->dev == dev) {
+                       mutex_unlock(&acpi_dev->physical_node_lock);
+
                        dev_warn(dev, "Already associated with ACPI node\n");
-                       goto err_free;
+                       kfree(physical_node);
+                       if (ACPI_HANDLE(dev) != handle)
+                               goto err;
+
+                       put_device(dev);
+                       return 0;
                }
                if (pn->node_id == node_id) {
                        physnode_list = &pn->node;
@@ -230,20 +246,23 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
        list_add(&physical_node->node, physnode_list);
        acpi_dev->physical_node_count++;
 
-       mutex_unlock(&acpi_dev->physical_node_lock);
-
        if (!ACPI_HANDLE(dev))
                ACPI_HANDLE_SET(dev, acpi_dev->handle);
 
-       if (!physical_node->node_id)
-               strcpy(physical_node_name, PHYSICAL_NODE_STRING);
-       else
-               sprintf(physical_node_name,
-                       "physical_node%d", physical_node->node_id);
+       acpi_physnode_link_name(physical_node_name, node_id);
        retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
-                       physical_node_name);
+                                  physical_node_name);
+       if (retval)
+               dev_err(&acpi_dev->dev, "Failed to create link %s (%d)\n",
+                       physical_node_name, retval);
+
        retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
-               "firmware_node");
+                                  "firmware_node");
+       if (retval)
+               dev_err(dev, "Failed to create link firmware_node (%d)\n",
+                       retval);
+
+       mutex_unlock(&acpi_dev->physical_node_lock);
 
        if (acpi_dev->wakeup.flags.valid)
                device_set_wakeup_capable(dev, true);
@@ -254,11 +273,6 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
        ACPI_HANDLE_SET(dev, NULL);
        put_device(dev);
        return retval;
-
- err_free:
-       mutex_unlock(&acpi_dev->physical_node_lock);
-       kfree(physical_node);
-       goto err;
 }
 EXPORT_SYMBOL_GPL(acpi_bind_one);
 
@@ -267,48 +281,37 @@ int acpi_unbind_one(struct device *dev)
        struct acpi_device_physical_node *entry;
        struct acpi_device *acpi_dev;
        acpi_status status;
-       struct list_head *node, *next;
 
        if (!ACPI_HANDLE(dev))
                return 0;
 
        status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
-       if (ACPI_FAILURE(status))
-               goto err;
+       if (ACPI_FAILURE(status)) {
+               dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__);
+               return -EINVAL;
+       }
 
        mutex_lock(&acpi_dev->physical_node_lock);
-       list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
-               char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
 
-               entry = list_entry(node, struct acpi_device_physical_node,
-                       node);
-               if (entry->dev != dev)
-                       continue;
+       list_for_each_entry(entry, &acpi_dev->physical_node_list, node)
+               if (entry->dev == dev) {
+                       char physnode_name[PHYSICAL_NODE_NAME_SIZE];
 
-               list_del(node);
+                       list_del(&entry->node);
+                       acpi_dev->physical_node_count--;
 
-               acpi_dev->physical_node_count--;
+                       acpi_physnode_link_name(physnode_name, entry->node_id);
+                       sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name);
+                       sysfs_remove_link(&dev->kobj, "firmware_node");
+                       ACPI_HANDLE_SET(dev, NULL);
+                       /* acpi_bind_one() increase refcnt by one. */
+                       put_device(dev);
+                       kfree(entry);
+                       break;
+               }
 
-               if (!entry->node_id)
-                       strcpy(physical_node_name, PHYSICAL_NODE_STRING);
-               else
-                       sprintf(physical_node_name,
-                               "physical_node%d", entry->node_id);
-
-               sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name);
-               sysfs_remove_link(&dev->kobj, "firmware_node");
-               ACPI_HANDLE_SET(dev, NULL);
-               /* acpi_bind_one increase refcnt by one */
-               put_device(dev);
-               kfree(entry);
-       }
        mutex_unlock(&acpi_dev->physical_node_lock);
-
        return 0;
-
-err:
-       dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
-       return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(acpi_unbind_one);
 
index 5da44e81dd4d70f7ed42ae1f1587734c0b6a5a55..20f423337e1fb73c66379a3771954741860c6d7d 100644 (file)
@@ -23,6 +23,7 @@
 
 #define PREFIX "ACPI: "
 
+acpi_status acpi_os_initialize1(void);
 int init_acpi_device_notify(void);
 int acpi_scan_init(void);
 #ifdef CONFIG_ACPI_PCI_SLOT
index 33e609f6358580647412dd0d029f065fdfb73741..2e82e5d7693016676ad6afdf69ce3b553e968d44 100644 (file)
@@ -159,7 +159,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
  * distance than the others.
  * Do some quick checks here and only use the SLIT if it passes.
  */
-static __init int slit_valid(struct acpi_table_slit *slit)
+static int __init slit_valid(struct acpi_table_slit *slit)
 {
        int i, j;
        int d = slit->locality_count;
index 6ab2c350552061893ded7355d376406de5d98ed8..e5f416c7f66e9e92e1ed988c3a709bc2820d56f4 100644 (file)
@@ -52,6 +52,7 @@
 #include <acpi/acpi.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/processor.h>
+#include "internal.h"
 
 #define _COMPONENT             ACPI_OS_SERVICES
 ACPI_MODULE_NAME("osl");
@@ -79,6 +80,8 @@ extern char line_buf[80];
 
 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
                                      u32 pm1b_ctrl);
+static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
+                                     u32 val_b);
 
 static acpi_osd_handler acpi_irq_handler;
 static void *acpi_irq_context;
@@ -140,7 +143,8 @@ static struct osi_linux {
        unsigned int    enable:1;
        unsigned int    dmi:1;
        unsigned int    cmdline:1;
-} osi_linux = {0, 0, 0};
+       unsigned int    default_disabling:1;
+} osi_linux = {0, 0, 0, 0};
 
 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
 {
@@ -563,10 +567,6 @@ static const char * const table_sigs[] = {
        ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
        ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
 
-/* Non-fatal errors: Affected tables/files are ignored */
-#define INVALID_TABLE(x, path, name)                                   \
-       { pr_err("ACPI OVERRIDE: " x " [%s%s]\n", path, name); continue; }
-
 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
 
 /* Must not increase 10 or needs code modification below */
@@ -593,9 +593,11 @@ void __init acpi_initrd_override(void *data, size_t size)
                data += offset;
                size -= offset;
 
-               if (file.size < sizeof(struct acpi_table_header))
-                       INVALID_TABLE("Table smaller than ACPI header",
-                                     cpio_path, file.name);
+               if (file.size < sizeof(struct acpi_table_header)) {
+                       pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
+                               cpio_path, file.name);
+                       continue;
+               }
 
                table = file.data;
 
@@ -603,15 +605,21 @@ void __init acpi_initrd_override(void *data, size_t size)
                        if (!memcmp(table->signature, table_sigs[sig], 4))
                                break;
 
-               if (!table_sigs[sig])
-                       INVALID_TABLE("Unknown signature",
-                                     cpio_path, file.name);
-               if (file.size != table->length)
-                       INVALID_TABLE("File length does not match table length",
-                                     cpio_path, file.name);
-               if (acpi_table_checksum(file.data, table->length))
-                       INVALID_TABLE("Bad table checksum",
-                                     cpio_path, file.name);
+               if (!table_sigs[sig]) {
+                       pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
+                               cpio_path, file.name);
+                       continue;
+               }
+               if (file.size != table->length) {
+                       pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
+                               cpio_path, file.name);
+                       continue;
+               }
+               if (acpi_table_checksum(file.data, table->length)) {
+                       pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
+                               cpio_path, file.name);
+                       continue;
+               }
 
                pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
                        table->signature, cpio_path, file.name, table->length);
@@ -1351,8 +1359,8 @@ struct osi_setup_entry {
        bool enable;
 };
 
-static struct osi_setup_entry __initdata
-               osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
+static struct osi_setup_entry
+               osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
        {"Module Device", true},
        {"Processor Device", true},
        {"3.0 _SCP Extensions", true},
@@ -1376,6 +1384,17 @@ void __init acpi_osi_setup(char *str)
 
        if (*str == '!') {
                str++;
+               if (*str == '\0') {
+                       osi_linux.default_disabling = 1;
+                       return;
+               } else if (*str == '*') {
+                       acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
+                       for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+                               osi = &osi_setup_entries[i];
+                               osi->enable = false;
+                       }
+                       return;
+               }
                enable = false;
        }
 
@@ -1441,6 +1460,13 @@ static void __init acpi_osi_setup_late(void)
        int i;
        acpi_status status;
 
+       if (osi_linux.default_disabling) {
+               status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
+
+               if (ACPI_SUCCESS(status))
+                       printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
+       }
+
        for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
                osi = &osi_setup_entries[i];
                str = osi->string;
@@ -1779,6 +1805,28 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
        __acpi_os_prepare_sleep = func;
 }
 
+acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
+                                 u32 val_b)
+{
+       int rc = 0;
+       if (__acpi_os_prepare_extended_sleep)
+               rc = __acpi_os_prepare_extended_sleep(sleep_state,
+                                            val_a, val_b);
+       if (rc < 0)
+               return AE_ERROR;
+       else if (rc > 0)
+               return AE_CTRL_SKIP;
+
+       return AE_OK;
+}
+
+void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
+                              u32 val_a, u32 val_b))
+{
+       __acpi_os_prepare_extended_sleep = func;
+}
+
+
 void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
                        void (*func)(struct work_struct *work))
 {
index 033d1179bdb56bb0937e067cb907b9f4272f282d..d678a180ca2aa20cfac20d32460400dc0f05951f 100644 (file)
@@ -159,12 +159,16 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
        return AE_OK;
 }
 
-void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle)
+void acpi_pci_slot_enumerate(struct pci_bus *bus)
 {
-       mutex_lock(&slot_list_lock);
-       acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
-                           register_slot, NULL, bus, NULL);
-       mutex_unlock(&slot_list_lock);
+       acpi_handle handle = ACPI_HANDLE(bus->bridge);
+
+       if (handle) {
+               mutex_lock(&slot_list_lock);
+               acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+                                   register_slot, NULL, bus, NULL);
+               mutex_unlock(&slot_list_lock);
+       }
 }
 
 void acpi_pci_slot_remove(struct pci_bus *bus)
index 5c28c894c0fc204925f489fdf988a01abb74bc88..0dbe5cdf3396e5f53b23c84bb30041329544774d 100644 (file)
@@ -637,9 +637,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
        }
 
        /* Execute _PSW */
-       arg_list.count = 1;
-       in_arg[0].integer.value = enable;
-       status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL);
+       status = acpi_execute_simple_method(dev->handle, "_PSW", enable);
        if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
                printk(KERN_ERR PREFIX "_PSW execution failed\n");
                dev->wakeup.flags.valid = 0;
@@ -786,7 +784,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
                }
        }
 
-       *state = ACPI_STATE_D3;
+       *state = ACPI_STATE_D3_COLD;
        return 0;
 }
 
index a5e9f4a5b2816a9473942b229bc4632ec640bf5c..cf34d903f4fb4a45b7975fc5fce60d141de2b7a9 100644 (file)
@@ -28,7 +28,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
        return 0;
 }
 
-static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
        {
        set_no_mwait, "Extensa 5220", {
        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
index 870eaf5fa547b935bdde1fe31a7bf31d2ab3d222..e534ba66d5b80861849ae705bf941ac570720607 100644 (file)
@@ -91,21 +91,17 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
                acpi_processor_ppc_has_changed(pr, 1);
                if (saved == pr->performance_platform_limit)
                        break;
-               acpi_bus_generate_proc_event(device, event,
-                                       pr->performance_platform_limit);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), event,
                                                  pr->performance_platform_limit);
                break;
        case ACPI_PROCESSOR_NOTIFY_POWER:
                acpi_processor_cst_has_changed(pr);
-               acpi_bus_generate_proc_event(device, event, 0);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), event, 0);
                break;
        case ACPI_PROCESSOR_NOTIFY_THROTTLING:
                acpi_processor_tstate_has_changed(pr);
-               acpi_bus_generate_proc_event(device, event, 0);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), event, 0);
                break;
@@ -179,7 +175,9 @@ static int __acpi_processor_start(struct acpi_device *device)
        acpi_processor_load_module(pr);
 #endif
        acpi_processor_get_throttling_info(pr);
-       acpi_processor_get_limit_info(pr);
+
+       if (pr->flags.throttling)
+               pr->flags.limit = 1;
 
        if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
                acpi_processor_power_init(pr);
index 1e9732d809bf67ad19eee32f83617428291025b7..51d7948611da0ece22a9aedb6ecd598ba4e4526e 100644 (file)
@@ -164,17 +164,12 @@ static void acpi_processor_ppc_ost(acpi_handle handle, int status)
                {.type = ACPI_TYPE_INTEGER,},
        };
        struct acpi_object_list arg_list = {2, params};
-       acpi_handle temp;
 
-       params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
-       params[1].integer.value =  status;
-
-       /* when there is no _OST , skip it */
-       if (ACPI_FAILURE(acpi_get_handle(handle, "_OST", &temp)))
-               return;
-
-       acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-       return;
+       if (acpi_has_method(handle, "_OST")) {
+               params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
+               params[1].integer.value =  status;
+               acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
+       }
 }
 
 int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
@@ -468,14 +463,11 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
 int acpi_processor_get_performance_info(struct acpi_processor *pr)
 {
        int result = 0;
-       acpi_status status = AE_OK;
-       acpi_handle handle = NULL;
 
        if (!pr || !pr->performance || !pr->handle)
                return -EINVAL;
 
-       status = acpi_get_handle(pr->handle, "_PCT", &handle);
-       if (ACPI_FAILURE(status)) {
+       if (!acpi_has_method(pr->handle, "_PCT")) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "ACPI-based processor performance control unavailable\n"));
                return -ENODEV;
@@ -501,7 +493,7 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
         */
  update_bios:
 #ifdef CONFIG_X86
-       if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){
+       if (acpi_has_method(pr->handle, "_PPC")) {
                if(boot_cpu_has(X86_FEATURE_EST))
                        printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
                               "frequency support\n");
index e8e652710e65fd58d3c533281cbf2e10a458cc2f..d1d2e7fb5b30778f26d46ce6e72eecb03350ddfc 100644 (file)
@@ -186,18 +186,6 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
 
 #endif
 
-int acpi_processor_get_limit_info(struct acpi_processor *pr)
-{
-
-       if (!pr)
-               return -EINVAL;
-
-       if (pr->flags.throttling)
-               pr->flags.limit = 1;
-
-       return 0;
-}
-
 /* thermal coolign device callbacks */
 static int acpi_processor_max_state(struct acpi_processor *pr)
 {
index 3322b47ab7cae22dc08520b6cec9a3b1df981b84..b7201fc6f1e19c06c798889900c1e9c689494c8d 100644 (file)
@@ -505,14 +505,12 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
                           void *preproc_data)
 {
        struct res_proc_context c;
-       acpi_handle not_used;
        acpi_status status;
 
        if (!adev || !adev->handle || !list_empty(list))
                return -EINVAL;
 
-       status = acpi_get_handle(adev->handle, METHOD_NAME__CRS, &not_used);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(adev->handle, METHOD_NAME__CRS))
                return 0;
 
        c.list = list;
index b6241eeb1132a0b3afed934ba96f52545a4a1d13..aef7e1cd1e5d62f95512484935e4fea9b11af961 100644 (file)
@@ -873,14 +873,9 @@ static void acpi_sbs_callback(void *context)
        u8 saved_charger_state = sbs->charger_present;
        u8 saved_battery_state;
        acpi_ac_get_present(sbs);
-       if (sbs->charger_present != saved_charger_state) {
-#ifdef CONFIG_ACPI_PROC_EVENT
-               acpi_bus_generate_proc_event4(ACPI_AC_CLASS, ACPI_AC_DIR_NAME,
-                                             ACPI_SBS_NOTIFY_STATUS,
-                                             sbs->charger_present);
-#endif
+       if (sbs->charger_present != saved_charger_state)
                kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
-       }
+
        if (sbs->manager_present) {
                for (id = 0; id < MAX_SBS_BAT; ++id) {
                        if (!(sbs->batteries_supported & (1 << id)))
@@ -890,12 +885,6 @@ static void acpi_sbs_callback(void *context)
                        acpi_battery_read(bat);
                        if (saved_battery_state == bat->present)
                                continue;
-#ifdef CONFIG_ACPI_PROC_EVENT
-                       acpi_bus_generate_proc_event4(ACPI_BATTERY_CLASS,
-                                                     bat->name,
-                                                     ACPI_SBS_NOTIFY_STATUS,
-                                                     bat->present);
-#endif
                        kobject_uevent(&bat->bat.dev->kobj, KOBJ_CHANGE);
                }
        }
index 8a46c924effd4e5bef09cb645429170e70ed380a..e76365136ba3500998ba9f7cd3a980fc78c92429 100644 (file)
@@ -193,9 +193,6 @@ static acpi_status acpi_bus_online_companions(acpi_handle handle, u32 lvl,
 static int acpi_scan_hot_remove(struct acpi_device *device)
 {
        acpi_handle handle = device->handle;
-       acpi_handle not_used;
-       struct acpi_object_list arg_list;
-       union acpi_object arg;
        struct device *errdev;
        acpi_status status;
        unsigned long long sta;
@@ -258,32 +255,15 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
        put_device(&device->dev);
        device = NULL;
 
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &not_used))) {
-               arg_list.count = 1;
-               arg_list.pointer = &arg;
-               arg.type = ACPI_TYPE_INTEGER;
-               arg.integer.value = 0;
-               acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
-       }
-
-       arg_list.count = 1;
-       arg_list.pointer = &arg;
-       arg.type = ACPI_TYPE_INTEGER;
-       arg.integer.value = 1;
-
+       acpi_evaluate_lck(handle, 0);
        /*
         * TBD: _EJD support.
         */
-       status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
-       if (ACPI_FAILURE(status)) {
-               if (status == AE_NOT_FOUND) {
-                       return -ENODEV;
-               } else {
-                       acpi_handle_warn(handle, "Eject failed (0x%x)\n",
-                                                               status);
-                       return -EIO;
-               }
-       }
+       status = acpi_evaluate_ej0(handle);
+       if (status == AE_NOT_FOUND)
+               return -ENODEV;
+       else if (ACPI_FAILURE(status))
+               return -EIO;
 
        /*
         * Verify if eject was indeed successful.  If not, log an error
@@ -654,7 +634,6 @@ static int acpi_device_setup_files(struct acpi_device *dev)
 {
        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
        acpi_status status;
-       acpi_handle temp;
        unsigned long long sun;
        int result = 0;
 
@@ -680,8 +659,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
        /*
         * If device has _STR, 'description' file is created
         */
-       status = acpi_get_handle(dev->handle, "_STR", &temp);
-       if (ACPI_SUCCESS(status)) {
+       if (acpi_has_method(dev->handle, "_STR")) {
                status = acpi_evaluate_object(dev->handle, "_STR",
                                        NULL, &buffer);
                if (ACPI_FAILURE(status))
@@ -711,8 +689,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
          * If device has _EJ0, 'eject' file is created that is used to trigger
          * hot-removal function from userland.
          */
-       status = acpi_get_handle(dev->handle, "_EJ0", &temp);
-       if (ACPI_SUCCESS(status)) {
+       if (acpi_has_method(dev->handle, "_EJ0")) {
                result = device_create_file(&dev->dev, &dev_attr_eject);
                if (result)
                        return result;
@@ -734,9 +711,6 @@ end:
 
 static void acpi_device_remove_files(struct acpi_device *dev)
 {
-       acpi_status status;
-       acpi_handle temp;
-
        if (dev->flags.power_manageable) {
                device_remove_file(&dev->dev, &dev_attr_power_state);
                if (dev->power.flags.power_resources)
@@ -747,20 +721,17 @@ static void acpi_device_remove_files(struct acpi_device *dev)
        /*
         * If device has _STR, remove 'description' file
         */
-       status = acpi_get_handle(dev->handle, "_STR", &temp);
-       if (ACPI_SUCCESS(status)) {
+       if (acpi_has_method(dev->handle, "_STR")) {
                kfree(dev->pnp.str_obj);
                device_remove_file(&dev->dev, &dev_attr_description);
        }
        /*
         * If device has _EJ0, remove 'eject' file.
         */
-       status = acpi_get_handle(dev->handle, "_EJ0", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(dev->handle, "_EJ0"))
                device_remove_file(&dev->dev, &dev_attr_eject);
 
-       status = acpi_get_handle(dev->handle, "_SUN", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(dev->handle, "_SUN"))
                device_remove_file(&dev->dev, &dev_attr_sun);
 
        if (dev->pnp.unique_id)
@@ -999,6 +970,28 @@ struct bus_type acpi_bus_type = {
        .uevent         = acpi_device_uevent,
 };
 
+static void acpi_bus_data_handler(acpi_handle handle, void *context)
+{
+       /* Intentionally empty. */
+}
+
+int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+{
+       acpi_status status;
+
+       if (!device)
+               return -EINVAL;
+
+       status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
+       if (ACPI_FAILURE(status) || !*device) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
+                                 handle));
+               return -ENODEV;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_get_device);
+
 int acpi_device_add(struct acpi_device *device,
                    void (*release)(struct device *))
 {
@@ -1210,14 +1203,6 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
 }
 EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
 
-void acpi_bus_data_handler(acpi_handle handle, void *context)
-{
-
-       /* TBD */
-
-       return;
-}
-
 static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
                                        struct acpi_device_wakeup *wakeup)
 {
@@ -1336,13 +1321,10 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
 
 static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
 {
-       acpi_handle temp;
-       acpi_status status = 0;
        int err;
 
        /* Presence of _PRW indicates wake capable */
-       status = acpi_get_handle(device->handle, "_PRW", &temp);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(device->handle, "_PRW"))
                return;
 
        err = acpi_bus_extract_wakeup_device_power_package(device->handle,
@@ -1372,7 +1354,6 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
        struct acpi_device_power_state *ps = &device->power.states[state];
        char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       acpi_handle handle;
        acpi_status status;
 
        INIT_LIST_HEAD(&ps->resources);
@@ -1395,8 +1376,7 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
 
        /* Evaluate "_PSx" to see if we can do explicit sets */
        pathname[2] = 'S';
-       status = acpi_get_handle(device->handle, pathname, &handle);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, pathname))
                ps->flags.explicit_set = 1;
 
        /*
@@ -1415,28 +1395,21 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
 
 static void acpi_bus_get_power_flags(struct acpi_device *device)
 {
-       acpi_status status;
-       acpi_handle handle;
        u32 i;
 
        /* Presence of _PS0|_PR0 indicates 'power manageable' */
-       status = acpi_get_handle(device->handle, "_PS0", &handle);
-       if (ACPI_FAILURE(status)) {
-               status = acpi_get_handle(device->handle, "_PR0", &handle);
-               if (ACPI_FAILURE(status))
-                       return;
-       }
+       if (!acpi_has_method(device->handle, "_PS0") &&
+           !acpi_has_method(device->handle, "_PR0"))
+               return;
 
        device->flags.power_manageable = 1;
 
        /*
         * Power Management Flags
         */
-       status = acpi_get_handle(device->handle, "_PSC", &handle);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_PSC"))
                device->power.flags.explicit_get = 1;
-       status = acpi_get_handle(device->handle, "_IRC", &handle);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_IRC"))
                device->power.flags.inrush_current = 1;
 
        /*
@@ -1450,8 +1423,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
        /* Set defaults for D0 and D3 states (always valid) */
        device->power.states[ACPI_STATE_D0].flags.valid = 1;
        device->power.states[ACPI_STATE_D0].power = 100;
-       device->power.states[ACPI_STATE_D3].flags.valid = 1;
-       device->power.states[ACPI_STATE_D3].power = 0;
+       device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+       device->power.states[ACPI_STATE_D3_COLD].power = 0;
 
        /* Set D3cold's explicit_set flag if _PS3 exists. */
        if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
@@ -1470,28 +1443,18 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
 
 static void acpi_bus_get_flags(struct acpi_device *device)
 {
-       acpi_status status = AE_OK;
-       acpi_handle temp = NULL;
-
        /* Presence of _STA indicates 'dynamic_status' */
-       status = acpi_get_handle(device->handle, "_STA", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_STA"))
                device->flags.dynamic_status = 1;
 
        /* Presence of _RMV indicates 'removable' */
-       status = acpi_get_handle(device->handle, "_RMV", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_RMV"))
                device->flags.removable = 1;
 
        /* Presence of _EJD|_EJ0 indicates 'ejectable' */
-       status = acpi_get_handle(device->handle, "_EJD", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_EJD") ||
+           acpi_has_method(device->handle, "_EJ0"))
                device->flags.ejectable = 1;
-       else {
-               status = acpi_get_handle(device->handle, "_EJ0", &temp);
-               if (ACPI_SUCCESS(status))
-                       device->flags.ejectable = 1;
-       }
 }
 
 static void acpi_device_get_busid(struct acpi_device *device)
@@ -1532,47 +1495,46 @@ static void acpi_device_get_busid(struct acpi_device *device)
        }
 }
 
+/*
+ * acpi_ata_match - see if an acpi object is an ATA device
+ *
+ * If an acpi object has one of the ACPI ATA methods defined,
+ * then we can safely call it an ATA device.
+ */
+bool acpi_ata_match(acpi_handle handle)
+{
+       return acpi_has_method(handle, "_GTF") ||
+              acpi_has_method(handle, "_GTM") ||
+              acpi_has_method(handle, "_STM") ||
+              acpi_has_method(handle, "_SDD");
+}
+
 /*
  * acpi_bay_match - see if an acpi object is an ejectable driver bay
  *
  * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
  * then we can safely call it an ejectable drive bay
  */
-static int acpi_bay_match(acpi_handle handle)
+bool acpi_bay_match(acpi_handle handle)
 {
-       acpi_status status;
-       acpi_handle tmp;
        acpi_handle phandle;
 
-       status = acpi_get_handle(handle, "_EJ0", &tmp);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
-
-       if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
-               (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
-               (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
-               (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
-               return 0;
+       if (!acpi_has_method(handle, "_EJ0"))
+               return false;
+       if (acpi_ata_match(handle))
+               return true;
+       if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
+               return false;
 
-       if (acpi_get_parent(handle, &phandle))
-               return -ENODEV;
-
-        if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) ||
-                (ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) ||
-                (ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) ||
-                (ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp))))
-                return 0;
-
-       return -ENODEV;
+       return acpi_ata_match(phandle);
 }
 
 /*
  * acpi_dock_match - see if an acpi object has a _DCK method
  */
-static int acpi_dock_match(acpi_handle handle)
+bool acpi_dock_match(acpi_handle handle)
 {
-       acpi_handle tmp;
-       return acpi_get_handle(handle, "_DCK", &tmp);
+       return acpi_has_method(handle, "_DCK");
 }
 
 const char *acpi_device_hid(struct acpi_device *device)
@@ -1610,34 +1572,26 @@ static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
  * lacks the SMBUS01 HID and the methods do not have the necessary "_"
  * prefix.  Work around this.
  */
-static int acpi_ibm_smbus_match(acpi_handle handle)
+static bool acpi_ibm_smbus_match(acpi_handle handle)
 {
-       acpi_handle h_dummy;
-       struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
-       int result;
+       char node_name[ACPI_PATH_SEGMENT_LENGTH];
+       struct acpi_buffer path = { sizeof(node_name), node_name };
 
        if (!dmi_name_in_vendors("IBM"))
-               return -ENODEV;
+               return false;
 
        /* Look for SMBS object */
-       result = acpi_get_name(handle, ACPI_SINGLE_NAME, &path);
-       if (result)
-               return result;
-
-       if (strcmp("SMBS", path.pointer)) {
-               result = -ENODEV;
-               goto out;
-       }
+       if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) ||
+           strcmp("SMBS", path.pointer))
+               return false;
 
        /* Does it have the necessary (but misnamed) methods? */
-       result = -ENODEV;
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "SBI", &h_dummy)) &&
-           ACPI_SUCCESS(acpi_get_handle(handle, "SBR", &h_dummy)) &&
-           ACPI_SUCCESS(acpi_get_handle(handle, "SBW", &h_dummy)))
-               result = 0;
-out:
-       kfree(path.pointer);
-       return result;
+       if (acpi_has_method(handle, "SBI") &&
+           acpi_has_method(handle, "SBR") &&
+           acpi_has_method(handle, "SBW"))
+               return true;
+
+       return false;
 }
 
 static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
@@ -1685,11 +1639,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                 */
                if (acpi_is_video_device(handle))
                        acpi_add_id(pnp, ACPI_VIDEO_HID);
-               else if (ACPI_SUCCESS(acpi_bay_match(handle)))
+               else if (acpi_bay_match(handle))
                        acpi_add_id(pnp, ACPI_BAY_HID);
-               else if (ACPI_SUCCESS(acpi_dock_match(handle)))
+               else if (acpi_dock_match(handle))
                        acpi_add_id(pnp, ACPI_DOCK_HID);
-               else if (!acpi_ibm_smbus_match(handle))
+               else if (acpi_ibm_smbus_match(handle))
                        acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
                else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
                        acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
@@ -1900,7 +1854,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
        struct acpi_device *device = NULL;
        int type;
        unsigned long long sta;
-       acpi_status status;
        int result;
 
        acpi_bus_get_device(handle, &device);
@@ -1921,10 +1874,8 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
        if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
            !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
                struct acpi_device_wakeup wakeup;
-               acpi_handle temp;
 
-               status = acpi_get_handle(handle, "_PRW", &temp);
-               if (ACPI_SUCCESS(status)) {
+               if (acpi_has_method(handle, "_PRW")) {
                        acpi_bus_extract_wakeup_device_power_package(handle,
                                                                     &wakeup);
                        acpi_power_resources_list_free(&wakeup.resources);
index 187ab61889e6da5fab9312e987a620a4cb212280..14df30580e154802aca352176ada8214f7ea6067 100644 (file)
@@ -31,12 +31,9 @@ static u8 sleep_states[ACPI_S_STATE_COUNT];
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
 {
-       union acpi_object in_arg = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list arg_list = { 1, &in_arg };
-       acpi_status status = AE_OK;
+       acpi_status status;
 
-       in_arg.integer.value = acpi_state;
-       status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL);
+       status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
                /*
                 * OS can't evaluate the _TTS object correctly. Some warning
@@ -141,7 +138,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
        return 0;
 }
 
-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
        {
        .callback = init_old_suspend_ordering,
        .ident = "Abit KN9 (nForce4 variant)",
@@ -423,10 +420,21 @@ static void acpi_pm_finish(void)
 }
 
 /**
- *     acpi_pm_end - Finish up suspend sequence.
+ * acpi_pm_start - Start system PM transition.
+ */
+static void acpi_pm_start(u32 acpi_state)
+{
+       acpi_target_sleep_state = acpi_state;
+       acpi_sleep_tts_switch(acpi_target_sleep_state);
+       acpi_scan_lock_acquire();
+}
+
+/**
+ * acpi_pm_end - Finish up system PM transition.
  */
 static void acpi_pm_end(void)
 {
+       acpi_scan_lock_release();
        /*
         * This is necessary in case acpi_pm_finish() is not called during a
         * failing transition to a sleep state.
@@ -454,21 +462,19 @@ static u32 acpi_suspend_states[] = {
 static int acpi_suspend_begin(suspend_state_t pm_state)
 {
        u32 acpi_state = acpi_suspend_states[pm_state];
-       int error = 0;
+       int error;
 
        error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
        if (error)
                return error;
 
-       if (sleep_states[acpi_state]) {
-               acpi_target_sleep_state = acpi_state;
-               acpi_sleep_tts_switch(acpi_target_sleep_state);
-       } else {
-               printk(KERN_ERR "ACPI does not support this state: %d\n",
-                       pm_state);
-               error = -ENOSYS;
+       if (!sleep_states[acpi_state]) {
+               pr_err("ACPI does not support sleep state S%u\n", acpi_state);
+               return -ENOSYS;
        }
-       return error;
+
+       acpi_pm_start(acpi_state);
+       return 0;
 }
 
 /**
@@ -634,10 +640,8 @@ static int acpi_hibernation_begin(void)
        int error;
 
        error = nvs_nosave ? 0 : suspend_nvs_alloc();
-       if (!error) {
-               acpi_target_sleep_state = ACPI_STATE_S4;
-               acpi_sleep_tts_switch(acpi_target_sleep_state);
-       }
+       if (!error)
+               acpi_pm_start(ACPI_STATE_S4);
 
        return error;
 }
@@ -716,8 +720,10 @@ static int acpi_hibernation_begin_old(void)
        if (!error) {
                if (!nvs_nosave)
                        error = suspend_nvs_alloc();
-               if (!error)
+               if (!error) {
                        acpi_target_sleep_state = ACPI_STATE_S4;
+                       acpi_scan_lock_acquire();
+               }
        }
        return error;
 }
index a33821ca3895e672a32d1d145aca7108751667e5..9063239e0b137070c54afef1c574ba701fbed3c3 100644 (file)
 
 #define ACPI_THERMAL_CLASS             "thermal_zone"
 #define ACPI_THERMAL_DEVICE_NAME       "Thermal Zone"
-#define ACPI_THERMAL_FILE_STATE                "state"
-#define ACPI_THERMAL_FILE_TEMPERATURE  "temperature"
-#define ACPI_THERMAL_FILE_TRIP_POINTS  "trip_points"
-#define ACPI_THERMAL_FILE_COOLING_MODE "cooling_mode"
-#define ACPI_THERMAL_FILE_POLLING_FREQ "polling_frequency"
 #define ACPI_THERMAL_NOTIFY_TEMPERATURE        0x80
 #define ACPI_THERMAL_NOTIFY_THRESHOLDS 0x81
 #define ACPI_THERMAL_NOTIFY_DEVICES    0x82
@@ -190,7 +185,6 @@ struct acpi_thermal {
        struct thermal_zone_device *thermal_zone;
        int tz_enabled;
        int kelvin_offset;
-       struct mutex lock;
 };
 
 /* --------------------------------------------------------------------------
@@ -239,26 +233,16 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
 
 static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
 {
-       acpi_status status = AE_OK;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list arg_list = { 1, &arg0 };
-       acpi_handle handle = NULL;
-
-
        if (!tz)
                return -EINVAL;
 
-       status = acpi_get_handle(tz->device->handle, "_SCP", &handle);
-       if (ACPI_FAILURE(status)) {
+       if (!acpi_has_method(tz->device->handle, "_SCP")) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n"));
                return -ENODEV;
-       }
-
-       arg0.integer.value = mode;
-
-       status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
-       if (ACPI_FAILURE(status))
+       } else if (ACPI_FAILURE(acpi_execute_simple_method(tz->device->handle,
+                                                          "_SCP", mode))) {
                return -ENODEV;
+       }
 
        return 0;
 }
@@ -769,7 +753,6 @@ static int thermal_notify(struct thermal_zone_device *thermal, int trip,
        else
                return 0;
 
-       acpi_bus_generate_proc_event(tz->device, type, 1);
        acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
                                        dev_name(&tz->device->dev), type, 1);
 
@@ -850,12 +833,13 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
                if (ACPI_SUCCESS(status) && (dev == device)) {
                        if (bind)
                                result = thermal_zone_bind_cooling_device
-                                               (thermal, -1, cdev,
-                                                THERMAL_NO_LIMIT,
+                                               (thermal, THERMAL_TRIPS_NONE,
+                                                cdev, THERMAL_NO_LIMIT,
                                                 THERMAL_NO_LIMIT);
                        else
                                result = thermal_zone_unbind_cooling_device
-                                               (thermal, -1, cdev);
+                                               (thermal, THERMAL_TRIPS_NONE,
+                                                cdev);
                        if (result)
                                goto failed;
                }
@@ -980,14 +964,12 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
        case ACPI_THERMAL_NOTIFY_THRESHOLDS:
                acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
                acpi_thermal_check(tz);
-               acpi_bus_generate_proc_event(device, event, 0);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), event, 0);
                break;
        case ACPI_THERMAL_NOTIFY_DEVICES:
                acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
                acpi_thermal_check(tz);
-               acpi_bus_generate_proc_event(device, event, 0);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), event, 0);
                break;
@@ -1101,8 +1083,6 @@ static int acpi_thermal_add(struct acpi_device *device)
        strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
        device->driver_data = tz;
-       mutex_init(&tz->lock);
-
 
        result = acpi_thermal_get_info(tz);
        if (result)
@@ -1135,7 +1115,6 @@ static int acpi_thermal_remove(struct acpi_device *device)
        tz = acpi_driver_data(device);
 
        acpi_thermal_unregister_thermal_zone(tz);
-       mutex_destroy(&tz->lock);
        kfree(tz);
        return 0;
 }
index 74437130431359005374a44529bab0c801918f3a..552248b0005b01a241ab511e52eef08a1c15d244 100644 (file)
@@ -495,3 +495,73 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
        kfree(buffer.pointer);
 }
 EXPORT_SYMBOL(acpi_handle_printk);
+
+/**
+ * acpi_has_method: Check whether @handle has a method named @name
+ * @handle: ACPI device handle
+ * @name: name of object or method
+ *
+ * Check whether @handle has a method named @name.
+ */
+bool acpi_has_method(acpi_handle handle, char *name)
+{
+       acpi_handle tmp;
+
+       return ACPI_SUCCESS(acpi_get_handle(handle, name, &tmp));
+}
+EXPORT_SYMBOL(acpi_has_method);
+
+acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
+                                      u64 arg)
+{
+       union acpi_object obj = { .type = ACPI_TYPE_INTEGER };
+       struct acpi_object_list arg_list = { .count = 1, .pointer = &obj, };
+
+       obj.integer.value = arg;
+
+       return acpi_evaluate_object(handle, method, &arg_list, NULL);
+}
+EXPORT_SYMBOL(acpi_execute_simple_method);
+
+/**
+ * acpi_evaluate_ej0: Evaluate _EJ0 method for hotplug operations
+ * @handle: ACPI device handle
+ *
+ * Evaluate device's _EJ0 method for hotplug operations.
+ */
+acpi_status acpi_evaluate_ej0(acpi_handle handle)
+{
+       acpi_status status;
+
+       status = acpi_execute_simple_method(handle, "_EJ0", 1);
+       if (status == AE_NOT_FOUND)
+               acpi_handle_warn(handle, "No _EJ0 support for device\n");
+       else if (ACPI_FAILURE(status))
+               acpi_handle_warn(handle, "Eject failed (0x%x)\n", status);
+
+       return status;
+}
+
+/**
+ * acpi_evaluate_lck: Evaluate _LCK method to lock/unlock device
+ * @handle: ACPI device handle
+ * @lock: lock device if non-zero, otherwise unlock device
+ *
+ * Evaluate device's _LCK method if present to lock/unlock device
+ */
+acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
+{
+       acpi_status status;
+
+       status = acpi_execute_simple_method(handle, "_LCK", !!lock);
+       if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+               if (lock)
+                       acpi_handle_warn(handle,
+                               "Locking device failed (0x%x)\n", status);
+               else
+                       acpi_handle_warn(handle,
+                               "Unlocking device failed (0x%x)\n", status);
+       }
+
+       return status;
+}
index e1284b8dc6eef9b800f997ecbf6bc5e60214eacc..ae0d09c867eb7aa8e65e547e92d79867ceb78bea 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  video.c - ACPI Video Driver ($Revision:$)
+ *  video.c - ACPI Video Driver
  *
  *  Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
  *  Copyright (C) 2004 Bruno Ducrot <ducrot@poupinou.org>
@@ -88,7 +88,7 @@ module_param(allow_duplicates, bool, 0644);
 static bool use_bios_initial_backlight = 1;
 module_param(use_bios_initial_backlight, bool, 0644);
 
-static int register_count = 0;
+static int register_count;
 static int acpi_video_bus_add(struct acpi_device *device);
 static int acpi_video_bus_remove(struct acpi_device *device);
 static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
@@ -118,26 +118,26 @@ struct acpi_video_bus_flags {
 };
 
 struct acpi_video_bus_cap {
-       u8 _DOS:1;              /*Enable/Disable output switching */
-       u8 _DOD:1;              /*Enumerate all devices attached to display adapter */
-       u8 _ROM:1;              /*Get ROM Data */
-       u8 _GPD:1;              /*Get POST Device */
-       u8 _SPD:1;              /*Set POST Device */
-       u8 _VPO:1;              /*Video POST Options */
+       u8 _DOS:1;              /* Enable/Disable output switching */
+       u8 _DOD:1;              /* Enumerate all devices attached to display adapter */
+       u8 _ROM:1;              /* Get ROM Data */
+       u8 _GPD:1;              /* Get POST Device */
+       u8 _SPD:1;              /* Set POST Device */
+       u8 _VPO:1;              /* Video POST Options */
        u8 reserved:2;
 };
 
 struct acpi_video_device_attrib {
        u32 display_index:4;    /* A zero-based instance of the Display */
-       u32 display_port_attachment:4;  /*This field differentiates the display type */
-       u32 display_type:4;     /*Describe the specific type in use */
-       u32 vendor_specific:4;  /*Chipset Vendor Specific */
-       u32 bios_can_detect:1;  /*BIOS can detect the device */
-       u32 depend_on_vga:1;    /*Non-VGA output device whose power is related to 
+       u32 display_port_attachment:4;  /* This field differentiates the display type */
+       u32 display_type:4;     /* Describe the specific type in use */
+       u32 vendor_specific:4;  /* Chipset Vendor Specific */
+       u32 bios_can_detect:1;  /* BIOS can detect the device */
+       u32 depend_on_vga:1;    /* Non-VGA output device whose power is related to
                                   the VGA device. */
-       u32 pipe_id:3;          /*For VGA multiple-head devices. */
-       u32 reserved:10;        /*Must be 0 */
-       u32 device_id_scheme:1; /*Device ID Scheme */
+       u32 pipe_id:3;          /* For VGA multiple-head devices. */
+       u32 reserved:10;        /* Must be 0 */
+       u32 device_id_scheme:1; /* Device ID Scheme */
 };
 
 struct acpi_video_enumerated_device {
@@ -174,19 +174,17 @@ struct acpi_video_device_flags {
 };
 
 struct acpi_video_device_cap {
-       u8 _ADR:1;              /*Return the unique ID */
-       u8 _BCL:1;              /*Query list of brightness control levels supported */
-       u8 _BCM:1;              /*Set the brightness level */
+       u8 _ADR:1;              /* Return the unique ID */
+       u8 _BCL:1;              /* Query list of brightness control levels supported */
+       u8 _BCM:1;              /* Set the brightness level */
        u8 _BQC:1;              /* Get current brightness level */
        u8 _BCQ:1;              /* Some buggy BIOS uses _BCQ instead of _BQC */
-       u8 _DDC:1;              /*Return the EDID for this device */
+       u8 _DDC:1;              /* Return the EDID for this device */
 };
 
 struct acpi_video_brightness_flags {
        u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */
-       u8 _BCL_reversed:1;             /* _BCL package is in a reversed order*/
-       u8 _BCL_use_index:1;            /* levels in _BCL are index values */
-       u8 _BCM_use_index:1;            /* input of _BCM is an index value */
+       u8 _BCL_reversed:1;             /* _BCL package is in a reversed order */
        u8 _BQC_use_index:1;            /* _BQC returns an index value */
 };
 
@@ -231,21 +229,22 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
 static int acpi_video_switch_brightness(struct acpi_video_device *device,
                                         int event);
 
-/*backlight device sysfs support*/
+/* backlight device sysfs support */
 static int acpi_video_get_brightness(struct backlight_device *bd)
 {
        unsigned long long cur_level;
        int i;
-       struct acpi_video_device *vd =
-               (struct acpi_video_device *)bl_get_data(bd);
+       struct acpi_video_device *vd = bl_get_data(bd);
 
        if (acpi_video_device_lcd_get_level_current(vd, &cur_level, false))
                return -EINVAL;
        for (i = 2; i < vd->brightness->count; i++) {
                if (vd->brightness->levels[i] == cur_level)
-                       /* The first two entries are special - see page 575
-                          of the ACPI spec 3.0 */
-                       return i-2;
+                       /*
+                        * The first two entries are special - see page 575
+                        * of the ACPI spec 3.0
+                        */
+                       return i - 2;
        }
        return 0;
 }
@@ -253,8 +252,7 @@ static int acpi_video_get_brightness(struct backlight_device *bd)
 static int acpi_video_set_brightness(struct backlight_device *bd)
 {
        int request_level = bd->props.brightness + 2;
-       struct acpi_video_device *vd =
-               (struct acpi_video_device *)bl_get_data(bd);
+       struct acpi_video_device *vd = bl_get_data(bd);
 
        return acpi_video_device_lcd_set_level(vd,
                                vd->brightness->levels[request_level]);
@@ -302,11 +300,11 @@ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long st
        struct acpi_video_device *video = acpi_driver_data(device);
        int level;
 
-       if ( state >= video->brightness->count - 2)
+       if (state >= video->brightness->count - 2)
                return -EINVAL;
 
        state = video->brightness->count - state;
-       level = video->brightness->levels[state -1];
+       level = video->brightness->levels[state - 1];
        return acpi_video_device_lcd_set_level(video, level);
 }
 
@@ -316,9 +314,11 @@ static const struct thermal_cooling_device_ops video_cooling_ops = {
        .set_cur_state = video_set_cur_state,
 };
 
-/* --------------------------------------------------------------------------
-                               Video Management
-   -------------------------------------------------------------------------- */
+/*
+ * --------------------------------------------------------------------------
+ *                             Video Management
+ * --------------------------------------------------------------------------
+ */
 
 static int
 acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
@@ -345,7 +345,7 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
 
        return 0;
 
-      err:
+err:
        kfree(buffer.pointer);
 
        return status;
@@ -355,14 +355,10 @@ static int
 acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
 {
        int status;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list args = { 1, &arg0 };
        int state;
 
-       arg0.integer.value = level;
-
-       status = acpi_evaluate_object(device->dev->handle, "_BCM",
-                                     &args, NULL);
+       status = acpi_execute_simple_method(device->dev->handle,
+                                           "_BCM", level);
        if (ACPI_FAILURE(status)) {
                ACPI_ERROR((AE_INFO, "Evaluating _BCM failed"));
                return -EIO;
@@ -546,7 +542,7 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
                                if (device->brightness->levels[i] == *level) {
                                        device->brightness->curr = *level;
                                        return 0;
-                       }
+                               }
                        /*
                         * BQC returned an invalid level.
                         * Stop using it.
@@ -556,7 +552,8 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
                                      buf));
                        device->cap._BQC = device->cap._BCQ = 0;
                } else {
-                       /* Fixme:
+                       /*
+                        * Fixme:
                         * should we return an error or ignore this failure?
                         * dev->brightness->curr is a cached value which stores
                         * the correct current backlight level in most cases.
@@ -615,8 +612,8 @@ acpi_video_device_EDID(struct acpi_video_device *device,
 
 /*
  *  Arg:
- *     video           : video bus device pointer
- *     bios_flag       : 
+ *     video           : video bus device pointer
+ *     bios_flag       :
  *             0.      The system BIOS should NOT automatically switch(toggle)
  *                     the active display output.
  *             1.      The system BIOS should automatically switch (toggle) the
@@ -628,9 +625,9 @@ acpi_video_device_EDID(struct acpi_video_device *device,
  *     lcd_flag        :
  *             0.      The system BIOS should automatically control the brightness level
  *                     of the LCD when the power changes from AC to DC
- *             1.      The system BIOS should NOT automatically control the brightness 
+ *             1.      The system BIOS should NOT automatically control the brightness
  *                     level of the LCD when the power changes from AC to DC.
- * Return Value:
+ *  Return Value:
  *             -EINVAL wrong arg.
  */
 
@@ -638,18 +635,15 @@ static int
 acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
 {
        acpi_status status;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list args = { 1, &arg0 };
 
        if (!video->cap._DOS)
                return 0;
 
        if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
                return -EINVAL;
-       arg0.integer.value = (lcd_flag << 2) | bios_flag;
-       video->dos_setting = arg0.integer.value;
-       status = acpi_evaluate_object(video->device->handle, "_DOS",
-               &args, NULL);
+       video->dos_setting = (lcd_flag << 2) | bios_flag;
+       status = acpi_execute_simple_method(video->device->handle, "_DOS",
+                                           (lcd_flag << 2) | bios_flag);
        if (ACPI_FAILURE(status))
                return -EIO;
 
@@ -717,8 +711,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
 
 
 /*
- *  Arg:       
- *     device  : video output device (LCD, CRT, ..)
+ *  Arg:
+ *     device  : video output device (LCD, CRT, ..)
  *
  *  Return Value:
  *     Maximum brightness level
@@ -806,16 +800,6 @@ acpi_video_init_brightness(struct acpi_video_device *device)
        br->count = count;
        device->brightness = br;
 
-       /* Check the input/output of _BQC/_BCL/_BCM */
-       if ((max_level < 100) && (max_level <= (count - 2)))
-               br->flags._BCL_use_index = 1;
-
-       /*
-        * _BCM is always consistent with _BCL,
-        * at least for all the laptops we have ever seen.
-        */
-       br->flags._BCM_use_index = br->flags._BCL_use_index;
-
        /* _BQC uses INDEX while _BCL uses VALUE in some laptops */
        br->curr = level = max_level;
 
@@ -877,7 +861,7 @@ out:
  *     device  : video output device (LCD, CRT, ..)
  *
  *  Return Value:
- *     None
+ *     None
  *
  *  Find out all required AML methods defined under the output
  *  device.
@@ -885,28 +869,21 @@ out:
 
 static void acpi_video_device_find_cap(struct acpi_video_device *device)
 {
-       acpi_handle h_dummy1;
-
-       if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) {
+       if (acpi_has_method(device->dev->handle, "_ADR"))
                device->cap._ADR = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCL", &h_dummy1))) {
+       if (acpi_has_method(device->dev->handle, "_BCL"))
                device->cap._BCL = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
+       if (acpi_has_method(device->dev->handle, "_BCM"))
                device->cap._BCM = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle,"_BQC",&h_dummy1)))
+       if (acpi_has_method(device->dev->handle, "_BQC")) {
                device->cap._BQC = 1;
-       else if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCQ",
-                               &h_dummy1))) {
+       } else if (acpi_has_method(device->dev->handle, "_BCQ")) {
                printk(KERN_WARNING FW_BUG "_BCQ is used instead of _BQC\n");
                device->cap._BCQ = 1;
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
+       if (acpi_has_method(device->dev->handle, "_DDC"))
                device->cap._DDC = 1;
-       }
 
        if (acpi_video_init_brightness(device))
                return;
@@ -917,7 +894,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
                acpi_handle acpi_parent;
                struct device *parent = NULL;
                int result;
-               static int count = 0;
+               static int count;
                char *name;
 
                name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
@@ -988,37 +965,29 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
 }
 
 /*
- *  Arg:       
- *     device  : video output device (VGA)
+ *  Arg:
+ *     device  : video output device (VGA)
  *
  *  Return Value:
- *     None
+ *     None
  *
  *  Find out all required AML methods defined under the video bus device.
  */
 
 static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
 {
-       acpi_handle h_dummy1;
-
-       if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) {
+       if (acpi_has_method(video->device->handle, "_DOS"))
                video->cap._DOS = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOD", &h_dummy1))) {
+       if (acpi_has_method(video->device->handle, "_DOD"))
                video->cap._DOD = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_ROM", &h_dummy1))) {
+       if (acpi_has_method(video->device->handle, "_ROM"))
                video->cap._ROM = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_GPD", &h_dummy1))) {
+       if (acpi_has_method(video->device->handle, "_GPD"))
                video->cap._GPD = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_SPD", &h_dummy1))) {
+       if (acpi_has_method(video->device->handle, "_SPD"))
                video->cap._SPD = 1;
-       }
-       if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_VPO", &h_dummy1))) {
+       if (acpi_has_method(video->device->handle, "_VPO"))
                video->cap._VPO = 1;
-       }
 }
 
 /*
@@ -1039,7 +1008,8 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
                return -ENODEV;
        pci_dev_put(dev);
 
-       /* Since there is no HID, CID and so on for VGA driver, we have
+       /*
+        * Since there is no HID, CID and so on for VGA driver, we have
         * to check well known required nodes.
         */
 
@@ -1069,12 +1039,14 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
        return status;
 }
 
-/* --------------------------------------------------------------------------
-                                 Driver Interface
-   -------------------------------------------------------------------------- */
+/*
+ * --------------------------------------------------------------------------
+ *                               Driver Interface
+ * --------------------------------------------------------------------------
+ */
 
 /* device interface */
-static struct acpi_video_device_attrib*
+static struct acpi_video_device_attrib *
 acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
 {
        struct acpi_video_enumerated_device *ids;
@@ -1112,7 +1084,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
        unsigned long long device_id;
        int status, device_type;
        struct acpi_video_device *data;
-       struct acpi_video_device_attribattribute;
+       struct acpi_video_device_attrib *attribute;
 
        status =
            acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
@@ -1134,7 +1106,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
 
        attribute = acpi_video_get_device_attr(video, device_id);
 
-       if((attribute != NULL) && attribute->device_id_scheme) {
+       if (attribute && attribute->device_id_scheme) {
                switch (attribute->display_type) {
                case ACPI_VIDEO_DISPLAY_CRT:
                        data->flags.crt = 1;
@@ -1152,24 +1124,24 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
                        data->flags.unknown = 1;
                        break;
                }
-               if(attribute->bios_can_detect)
+               if (attribute->bios_can_detect)
                        data->flags.bios = 1;
        } else {
                /* Check for legacy IDs */
                device_type = acpi_video_get_device_type(video, device_id);
                /* Ignore bits 16 and 18-20 */
                switch (device_type & 0xffe2ffff) {
-                       case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
-                               data->flags.crt = 1;
-                               break;
-                       case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
-                               data->flags.lcd = 1;
-                               break;
-                       case ACPI_VIDEO_DISPLAY_LEGACY_TV:
-                               data->flags.tvout = 1;
-                               break;
-                       default:
-                               data->flags.unknown = 1;
+               case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
+                       data->flags.crt = 1;
+                       break;
+               case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
+                       data->flags.lcd = 1;
+                       break;
+               case ACPI_VIDEO_DISPLAY_LEGACY_TV:
+                       data->flags.tvout = 1;
+                       break;
+               default:
+                       data->flags.unknown = 1;
                }
        }
 
@@ -1192,12 +1164,12 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
 
 /*
  *  Arg:
- *     video   : video bus device 
+ *     video   : video bus device
  *
  *  Return:
- *     none
- *  
- *  Enumerate the video device list of the video bus, 
+ *     none
+ *
+ *  Enumerate the video device list of the video bus,
  *  bind the ids with the corresponding video devices
  *  under the video bus.
  */
@@ -1216,13 +1188,13 @@ static void acpi_video_device_rebind(struct acpi_video_bus *video)
 
 /*
  *  Arg:
- *     video   : video bus device 
- *     device  : video output device under the video 
- *             bus
+ *     video   : video bus device
+ *     device  : video output device under the video
+ *             bus
  *
  *  Return:
- *     none
- *  
+ *     none
+ *
  *  Bind the ids with the corresponding video devices
  *  under the video bus.
  */
@@ -1245,11 +1217,11 @@ acpi_video_device_bind(struct acpi_video_bus *video,
 
 /*
  *  Arg:
- *     video   : video bus device 
+ *     video   : video bus device
  *
  *  Return:
- *     < 0     : error
- *  
+ *     < 0     : error
+ *
  *  Call _DOD to enumerate all devices attached to display adapter
  *
  */
@@ -1310,7 +1282,7 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
        video->attached_array = active_list;
        video->attached_count = count;
 
- out:
+out:
        kfree(buffer.pointer);
        return status;
 }
@@ -1577,7 +1549,6 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
        switch (event) {
        case ACPI_VIDEO_NOTIFY_SWITCH:  /* User requested a switch,
                                         * most likely via hotkey. */
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_SWITCHVIDEOMODE;
                break;
 
@@ -1585,20 +1556,16 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
                                         * connector. */
                acpi_video_device_enumerate(video);
                acpi_video_device_rebind(video);
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_SWITCHVIDEOMODE;
                break;
 
        case ACPI_VIDEO_NOTIFY_CYCLE:   /* Cycle Display output hotkey pressed. */
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_SWITCHVIDEOMODE;
                break;
        case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT:     /* Next Display output hotkey pressed. */
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_VIDEO_NEXT;
                break;
        case ACPI_VIDEO_NOTIFY_PREV_OUTPUT:     /* previous Display output hotkey pressed. */
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_VIDEO_PREV;
                break;
 
@@ -1641,31 +1608,26 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
        case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS:        /* Cycle brightness */
                if (brightness_switch_enabled)
                        acpi_video_switch_brightness(video_device, event);
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_BRIGHTNESS_CYCLE;
                break;
        case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS:  /* Increase brightness */
                if (brightness_switch_enabled)
                        acpi_video_switch_brightness(video_device, event);
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_BRIGHTNESSUP;
                break;
        case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS:  /* Decrease brightness */
                if (brightness_switch_enabled)
                        acpi_video_switch_brightness(video_device, event);
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_BRIGHTNESSDOWN;
                break;
        case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightness */
                if (brightness_switch_enabled)
                        acpi_video_switch_brightness(video_device, event);
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_BRIGHTNESS_ZERO;
                break;
        case ACPI_VIDEO_NOTIFY_DISPLAY_OFF:     /* display device off */
                if (brightness_switch_enabled)
                        acpi_video_switch_brightness(video_device, event);
-               acpi_bus_generate_proc_event(device, event, 0);
                keycode = KEY_DISPLAY_OFF;
                break;
        default:
@@ -1765,7 +1727,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
        if (!strcmp(device->pnp.bus_id, "VID")) {
                if (instance)
                        device->pnp.bus_id[3] = '0' + instance;
-               instance ++;
+               instance++;
        }
        /* a hack to fix the duplicate name "VGA" problem on Pa 3553 */
        if (!strcmp(device->pnp.bus_id, "VGA")) {
index c3397748ba466ca13f41c664b4c374c7dea5d028..940edbf2fe8f4460b27fceb1e7fe5fdb7cbacfc3 100644 (file)
@@ -53,14 +53,13 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
                          void **retyurn_value)
 {
        long *cap = context;
-       acpi_handle h_dummy;
 
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_BCM", &h_dummy)) &&
-           ACPI_SUCCESS(acpi_get_handle(handle, "_BCL", &h_dummy))) {
+       if (acpi_has_method(handle, "_BCM") &&
+           acpi_has_method(handle, "_BCL")) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
                                  "support\n"));
                *cap |= ACPI_VIDEO_BACKLIGHT;
-               if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
+               if (!acpi_has_method(handle, "_BQC"))
                        printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
                                "cannot determine initial brightness\n");
                /* We have backlight support, no need to scan further */
@@ -79,22 +78,20 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
  */
 long acpi_is_video_device(acpi_handle handle)
 {
-       acpi_handle h_dummy;
        long video_caps = 0;
 
        /* Is this device able to support video switching ? */
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_DOD", &h_dummy)) ||
-           ACPI_SUCCESS(acpi_get_handle(handle, "_DOS", &h_dummy)))
+       if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS"))
                video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
 
        /* Is this device able to retrieve a video ROM ? */
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_ROM", &h_dummy)))
+       if (acpi_has_method(handle, "_ROM"))
                video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
 
        /* Is this device able to configure which video head to be POSTed ? */
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_VPO", &h_dummy)) &&
-           ACPI_SUCCESS(acpi_get_handle(handle, "_GPD", &h_dummy)) &&
-           ACPI_SUCCESS(acpi_get_handle(handle, "_SPD", &h_dummy)))
+       if (acpi_has_method(handle, "_VPO") &&
+           acpi_has_method(handle, "_GPD") &&
+           acpi_has_method(handle, "_SPD"))
                video_caps |= ACPI_VIDEO_DEVICE_POSTING;
 
        /* Only check for backlight functionality if one of the above hit. */
index cf4e7020adacde5e69881a21adb0c578d31d7a3e..da8170dfc90f142d8924de9c10be76444acbb250 100644 (file)
@@ -947,11 +947,11 @@ static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
                        continue;
 
                acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ?
-                                               ACPI_STATE_D0 : ACPI_STATE_D3);
+                                       ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
        }
 
        if (!(state.event & PM_EVENT_RESUME))
-               acpi_bus_set_power(port_handle, ACPI_STATE_D3);
+               acpi_bus_set_power(port_handle, ACPI_STATE_D3_COLD);
 }
 
 /**
index 1c41722bb7e2d39016dd25f023dc37d6860b7584..20fd337a57314a2928c9208bf706ffabed0c4dd5 100644 (file)
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
 
        /* Disable sending Early R_OK.
         * With "cached read" HDD testing and multiple ports busy on a SATA
-        * host controller, 3726 PMP will very rarely drop a deferred
+        * host controller, 3x26 PMP will very rarely drop a deferred
         * R_OK that was intended for the host. Symptom will be all
         * 5 drives under test will timeout, get reset, and recover.
         */
-       if (vendor == 0x1095 && devid == 0x3726) {
+       if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
                u32 reg;
 
                err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
                if (err_mask) {
                        rc = -EIO;
-                       reason = "failed to read Sil3726 Private Register";
+                       reason = "failed to read Sil3x26 Private Register";
                        goto fail;
                }
                reg &= ~0x1;
                err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
                if (err_mask) {
                        rc = -EIO;
-                       reason = "failed to write Sil3726 Private Register";
+                       reason = "failed to write Sil3x26 Private Register";
                        goto fail;
                }
        }
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
        u16 devid = sata_pmp_gscr_devid(gscr);
        struct ata_link *link;
 
-       if (vendor == 0x1095 && devid == 0x3726) {
-               /* sil3726 quirks */
+       if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+               /* sil3x26 quirks */
                ata_for_each_link(link, ap, EDGE) {
                        /* link reports offline after LPM */
                        link->flags |= ATA_LFLAG_NO_LPM;
index 577d902bc4deaa12d69d1acdc8aa304732163227..cc8a6e879dfb7eeac203b294a38c97163ff90d23 100644 (file)
@@ -120,8 +120,6 @@ extern void ata_acpi_on_disable(struct ata_device *dev);
 extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
 extern int ata_acpi_register(void);
 extern void ata_acpi_unregister(void);
-extern void ata_acpi_bind(struct ata_device *dev);
-extern void ata_acpi_unbind(struct ata_device *dev);
 extern void ata_acpi_hotplug_init(struct ata_host *host);
 #else
 static inline void ata_acpi_dissociate(struct ata_host *host) { }
@@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
                                      pm_message_t state) { }
 static inline int ata_acpi_register(void) { return 0; }
 static inline void ata_acpi_unregister(void) { }
-static inline void ata_acpi_bind(struct ata_device *dev) { }
-static inline void ata_acpi_unbind(struct ata_device *dev) { }
 static inline void ata_acpi_hotplug_init(struct ata_host *host) {}
 #endif
 
index 848ed3254ddd6892f9f26cf3be0ab7efcb1a8f79..853f610af28fbc9dff0ee59d1fa1e6688cbfd732 100644 (file)
@@ -654,7 +654,7 @@ static void arasan_cf_freeze(struct ata_port *ap)
        ata_sff_freeze(ap);
 }
 
-void arasan_cf_error_handler(struct ata_port *ap)
+static void arasan_cf_error_handler(struct ata_port *ap)
 {
        struct arasan_cf_dev *acdev = ap->host->private_data;
 
@@ -683,7 +683,7 @@ static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
        ata_sff_queue_work(&acdev->work);
 }
 
-unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
+static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
        struct arasan_cf_dev *acdev = ap->host->private_data;
index 8d493b4a0961985347feb48f990075233578c893..d59d5239405f934c5bf9f8186719819f55c9dfc6 100644 (file)
@@ -271,7 +271,7 @@ static int __init pata_at32_probe(struct platform_device *pdev)
 
        struct device            *dev = &pdev->dev;
        struct at32_ide_info     *info;
-       struct ide_platform_data *board = pdev->dev.platform_data;
+       struct ide_platform_data *board = dev_get_platdata(&pdev->dev);
        struct resource          *res;
 
        int irq;
index 5364f97b42c6b9d6fcac4e130d3172913aa7c80a..d63ee8f41a4f29e9c0bc9209236c97ca47259ab5 100644 (file)
@@ -315,7 +315,7 @@ static struct ata_port_operations pata_at91_port_ops = {
 
 static int pata_at91_probe(struct platform_device *pdev)
 {
-       struct at91_cf_data *board = pdev->dev.platform_data;
+       struct at91_cf_data *board = dev_get_platdata(&pdev->dev);
        struct device *dev = &pdev->dev;
        struct at91_ide_info *info;
        struct resource *mem_res;
index dcc6b243e525572ba4ac92576c3cb5e7d28863b9..1ec53f8ca96fa682fc1492e61816c6b043876f7d 100644 (file)
@@ -48,7 +48,7 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev,
        u16 *buf16 = (u16 *) buf;
        struct ata_port *ap = dev->link->ap;
        void __iomem *mmio = ap->ioaddr.data_addr;
-       struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
+       struct ixp4xx_pata_data *data = dev_get_platdata(ap->host->dev);
 
        /* set the expansion bus in 16bit mode and restore
         * 8 bit mode after the transaction.
@@ -143,7 +143,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
        struct resource *cs0, *cs1;
        struct ata_host *host;
        struct ata_port *ap;
-       struct ixp4xx_pata_data *data = pdev->dev.platform_data;
+       struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
 
        cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
index e73bef3093d2be6c859c462b4f8fa50758e21b80..c51bbb9ea8e8a826e3d2fa68e693d1cf6a3a2a94 100644 (file)
@@ -1037,7 +1037,7 @@ static void octeon_cf_shutdown(struct device *dev)
        union cvmx_mio_boot_dma_cfgx dma_cfg;
        union cvmx_mio_boot_dma_intx dma_int;
 
-       struct octeon_cf_port *cf_port = dev->platform_data;
+       struct octeon_cf_port *cf_port = dev_get_platdata(dev);
 
        if (cf_port->dma_base) {
                /* Stop and clear the dma engine.  */
index 71e093767f4e400e66ea8defe5b7d0ff0c2d0ff7..02794885de10be8329003300d19c2f8084ff9032 100644 (file)
@@ -180,7 +180,7 @@ static int pata_platform_probe(struct platform_device *pdev)
        struct resource *io_res;
        struct resource *ctl_res;
        struct resource *irq_res;
-       struct pata_platform_info *pp_info = pdev->dev.platform_data;
+       struct pata_platform_info *pp_info = dev_get_platdata(&pdev->dev);
 
        /*
         * Simple resource validation ..
index 942ef94b29e6658a115935c64cc3e81bdf8c1e6b..a6f05acad61ed01d68b0ec201698a8abc406cf44 100644 (file)
@@ -238,7 +238,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
        struct resource *ctl_res;
        struct resource *dma_res;
        struct resource *irq_res;
-       struct pata_pxa_pdata *pdata = pdev->dev.platform_data;
+       struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
        int ret = 0;
 
        /*
index 6ef27e98c50894190e74f396e19e13a44713157c..898e544a7ae8791cb52524bb6f175be61410cd63 100644 (file)
@@ -241,8 +241,8 @@ static u8 pata_s3c_check_altstatus(struct ata_port *ap)
 /*
  * pata_s3c_data_xfer - Transfer data by PIO
  */
-unsigned int pata_s3c_data_xfer(struct ata_device *dev, unsigned char *buf,
-                               unsigned int buflen, int rw)
+static unsigned int pata_s3c_data_xfer(struct ata_device *dev,
+                               unsigned char *buf, unsigned int buflen, int rw)
 {
        struct ata_port *ap = dev->link->ap;
        struct s3c_ide_info *info = ap->host->private_data;
@@ -418,7 +418,7 @@ static struct ata_port_operations pata_s5p_port_ops = {
        .set_piomode            = pata_s3c_set_piomode,
 };
 
-static void pata_s3c_enable(void *s3c_ide_regbase, bool state)
+static void pata_s3c_enable(void __iomem *s3c_ide_regbase, bool state)
 {
        u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL);
        temp = state ? (temp | 1) : (temp & ~1);
@@ -475,7 +475,7 @@ static void pata_s3c_hwinit(struct s3c_ide_info *info,
 
 static int __init pata_s3c_probe(struct platform_device *pdev)
 {
-       struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
+       struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev);
        struct device *dev = &pdev->dev;
        struct s3c_ide_info *info;
        struct resource *res;
@@ -617,7 +617,7 @@ static int pata_s3c_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct ata_host *host = platform_get_drvdata(pdev);
-       struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
+       struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev);
        struct s3c_ide_info *info = host->private_data;
 
        pata_s3c_hwinit(info, pdata);
index 19720a0a4a65ff5c6198ab4e37df7e756ce85717..851bd3f43ac63fc0f3631193f58aa3d94c0af39a 100644 (file)
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
 {
        struct sata_fsl_host_priv *host_priv = host->private_data;
        void __iomem *hcr_base = host_priv->hcr_base;
+       unsigned long flags;
 
        if (count > ICC_MAX_INT_COUNT_THRESHOLD)
                count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
                        (count > ICC_MIN_INT_COUNT_THRESHOLD))
                ticks = ICC_SAFE_INT_TICKS;
 
-       spin_lock(&host->lock);
+       spin_lock_irqsave(&host->lock, flags);
        iowrite32((count << 24 | ticks), hcr_base + ICC);
 
        intr_coalescing_count = count;
        intr_coalescing_ticks = ticks;
-       spin_unlock(&host->lock);
+       spin_unlock_irqrestore(&host->lock, flags);
 
        DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
                        intr_coalescing_count, intr_coalescing_ticks);
index d047d92a456fbab39c56046d74feb24c9163668f..7f5e5d96327fcbb3a08994ae01f910f824a0c623 100644 (file)
 #define CR_BUSY                                0x0001
 #define CR_START                       0x0001
 #define CR_WR_RDN                      0x0002
+#define CPHY_TX_INPUT_STS              0x2001
 #define CPHY_RX_INPUT_STS              0x2002
-#define CPHY_SATA_OVERRIDE             0x4000
-#define CPHY_OVERRIDE                  0x2005
+#define CPHY_SATA_TX_OVERRIDE          0x8000
+#define CPHY_SATA_RX_OVERRIDE          0x4000
+#define CPHY_TX_OVERRIDE               0x2004
+#define CPHY_RX_OVERRIDE               0x2005
 #define SPHY_LANE                      0x100
 #define SPHY_HALF_RATE                 0x0001
 #define CPHY_SATA_DPLL_MODE            0x0700
 #define CPHY_SATA_DPLL_SHIFT           8
 #define CPHY_SATA_DPLL_RESET           (1 << 11)
+#define CPHY_SATA_TX_ATTEN             0x1c00
+#define CPHY_SATA_TX_ATTEN_SHIFT       10
 #define CPHY_PHY_COUNT                 6
 #define CPHY_LANE_COUNT                        4
 #define CPHY_PORT_COUNT                        (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
@@ -66,6 +71,7 @@ struct phy_lane_info {
        void __iomem *phy_base;
        u8 lane_mapping;
        u8 phy_devs;
+       u8 tx_atten;
 };
 static struct phy_lane_info port_data[CPHY_PORT_COUNT];
 
@@ -76,9 +82,11 @@ static DEFINE_SPINLOCK(sgpio_lock);
 #define SGPIO_PINS                     3
 #define SGPIO_PORTS                    8
 
-/* can be cast as an ahci_host_priv for compatibility with most functions */
 struct ecx_plat_data {
        u32             n_ports;
+       /* number of extra clocks that the SGPIO PIC controller expects */
+       u32             pre_clocks;
+       u32             post_clocks;
        unsigned        sgpio_gpio[SGPIO_PINS];
        u32             sgpio_pattern;
        u32             port_to_sgpio[SGPIO_PORTS];
@@ -86,11 +94,11 @@ struct ecx_plat_data {
 
 #define SGPIO_SIGNALS                  3
 #define ECX_ACTIVITY_BITS              0x300000
-#define ECX_ACTIVITY_SHIFT             2
+#define ECX_ACTIVITY_SHIFT             0
 #define ECX_LOCATE_BITS                        0x80000
 #define ECX_LOCATE_SHIFT               1
 #define ECX_FAULT_BITS                 0x400000
-#define ECX_FAULT_SHIFT                        0
+#define ECX_FAULT_SHIFT                        2
 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
                                u32 shift)
 {
@@ -155,6 +163,9 @@ static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
        spin_lock_irqsave(&sgpio_lock, flags);
        ecx_parse_sgpio(pdata, ap->port_no, state);
        sgpio_out = pdata->sgpio_pattern;
+       for (i = 0; i < pdata->pre_clocks; i++)
+               ecx_led_cycle_clock(pdata);
+
        gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
        ecx_led_cycle_clock(pdata);
        gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
@@ -167,6 +178,8 @@ static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
                sgpio_out >>= 1;
                ecx_led_cycle_clock(pdata);
        }
+       for (i = 0; i < pdata->post_clocks; i++)
+               ecx_led_cycle_clock(pdata);
 
        /* save off new led state for port/slot */
        emp->led_state = state;
@@ -201,6 +214,11 @@ static void highbank_set_em_messages(struct device *dev,
        of_property_read_u32_array(np, "calxeda,led-order",
                                                pdata->port_to_sgpio,
                                                pdata->n_ports);
+       if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
+               pdata->pre_clocks = 0;
+       if (of_property_read_u32(np, "calxeda,post-clocks",
+                               &pdata->post_clocks))
+               pdata->post_clocks = 0;
 
        /* store em_loc */
        hpriv->em_loc = 0;
@@ -259,8 +277,27 @@ static void highbank_cphy_disable_overrides(u8 sata_port)
        if (unlikely(port_data[sata_port].phy_base == NULL))
                return;
        tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
-       tmp &= ~CPHY_SATA_OVERRIDE;
-       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+       tmp &= ~CPHY_SATA_RX_OVERRIDE;
+       combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
+}
+
+static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
+{
+       u8 lane = port_data[sata_port].lane_mapping;
+       u32 tmp;
+
+       if (val & 0x8)
+               return;
+
+       tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
+       tmp &= ~CPHY_SATA_TX_OVERRIDE;
+       combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+       tmp |= CPHY_SATA_TX_OVERRIDE;
+       combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+       tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
+       combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
 }
 
 static void cphy_override_rx_mode(u8 sata_port, u32 val)
@@ -268,21 +305,21 @@ static void cphy_override_rx_mode(u8 sata_port, u32 val)
        u8 lane = port_data[sata_port].lane_mapping;
        u32 tmp;
        tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
-       tmp &= ~CPHY_SATA_OVERRIDE;
-       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+       tmp &= ~CPHY_SATA_RX_OVERRIDE;
+       combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
 
-       tmp |= CPHY_SATA_OVERRIDE;
-       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+       tmp |= CPHY_SATA_RX_OVERRIDE;
+       combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
 
        tmp &= ~CPHY_SATA_DPLL_MODE;
        tmp |= val << CPHY_SATA_DPLL_SHIFT;
-       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+       combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
 
        tmp |= CPHY_SATA_DPLL_RESET;
-       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+       combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
 
        tmp &= ~CPHY_SATA_DPLL_RESET;
-       combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+       combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
 
        msleep(15);
 }
@@ -299,16 +336,20 @@ static void highbank_cphy_override_lane(u8 sata_port)
                                                lane * SPHY_LANE);
        } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
        cphy_override_rx_mode(sata_port, 3);
+       cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
 }
 
 static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
 {
        struct device_node *sata_node = dev->of_node;
-       int phy_count = 0, phy, port = 0;
+       int phy_count = 0, phy, port = 0, i;
        void __iomem *cphy_base[CPHY_PHY_COUNT];
        struct device_node *phy_nodes[CPHY_PHY_COUNT];
+       u32 tx_atten[CPHY_PORT_COUNT];
+
        memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
        memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT);
+       memset(tx_atten, 0xff, CPHY_PORT_COUNT);
 
        do {
                u32 tmp;
@@ -336,6 +377,10 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
                of_node_put(phy_data.np);
                port += 1;
        } while (port < CPHY_PORT_COUNT);
+       of_property_read_u32_array(sata_node, "calxeda,tx-atten",
+                               tx_atten, port);
+       for (i = 0; i < port; i++)
+               port_data[i].tx_atten = (u8) tx_atten[i];
        return 0;
 }
 
@@ -479,6 +524,9 @@ static int ahci_highbank_probe(struct platform_device *pdev)
        if (hpriv->cap & HOST_CAP_PMP)
                pi.flags |= ATA_FLAG_PMP;
 
+       if (hpriv->cap & HOST_CAP_64)
+               dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+
        /* CAP.NP sometimes indicate the index of the last enabled
         * port, at other times, that of the last possible port, so
         * determining the maximum port number requires looking at
index 35c6b6d09c277526d93461388b6971069c644341..56be318198971f40ea9a57703530260ad51e4067 100644 (file)
@@ -553,10 +553,15 @@ struct mv_host_priv {
        u32                     irq_mask_offset;
        u32                     unmask_all_irqs;
 
-#if defined(CONFIG_HAVE_CLK)
+       /*
+        * Needed on some devices that require their clocks to be enabled.
+        * These are optional: if the platform device does not have any
+        * clocks, they won't be used.  Also, if the underlying hardware
+        * does not support the common clock framework (CONFIG_HAVE_CLK=n),
+        * all the clock operations become no-ops (see clk.h).
+        */
        struct clk              *clk;
        struct clk              **port_clks;
-#endif
        /*
         * These consistent DMA memory pools give us guaranteed
         * alignment for hardware-accessed data structures,
@@ -4032,9 +4037,7 @@ static int mv_platform_probe(struct platform_device *pdev)
        struct resource *res;
        int n_ports = 0, irq = 0;
        int rc;
-#if defined(CONFIG_HAVE_CLK)
        int port;
-#endif
 
        ata_print_version_once(&pdev->dev, DRV_VERSION);
 
@@ -4058,7 +4061,7 @@ static int mv_platform_probe(struct platform_device *pdev)
                of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
                irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        } else {
-               mv_platform_data = pdev->dev.platform_data;
+               mv_platform_data = dev_get_platdata(&pdev->dev);
                n_ports = mv_platform_data->n_ports;
                irq = platform_get_irq(pdev, 0);
        }
@@ -4068,13 +4071,11 @@ static int mv_platform_probe(struct platform_device *pdev)
 
        if (!host || !hpriv)
                return -ENOMEM;
-#if defined(CONFIG_HAVE_CLK)
        hpriv->port_clks = devm_kzalloc(&pdev->dev,
                                        sizeof(struct clk *) * n_ports,
                                        GFP_KERNEL);
        if (!hpriv->port_clks)
                return -ENOMEM;
-#endif
        host->private_data = hpriv;
        hpriv->n_ports = n_ports;
        hpriv->board_idx = chip_soc;
@@ -4084,7 +4085,6 @@ static int mv_platform_probe(struct platform_device *pdev)
                                   resource_size(res));
        hpriv->base -= SATAHC0_REG_BASE;
 
-#if defined(CONFIG_HAVE_CLK)
        hpriv->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(hpriv->clk))
                dev_notice(&pdev->dev, "cannot get optional clkdev\n");
@@ -4098,7 +4098,6 @@ static int mv_platform_probe(struct platform_device *pdev)
                if (!IS_ERR(hpriv->port_clks[port]))
                        clk_prepare_enable(hpriv->port_clks[port]);
        }
-#endif
 
        /*
         * (Re-)program MBUS remapping windows if we are asked to.
@@ -4124,7 +4123,6 @@ static int mv_platform_probe(struct platform_device *pdev)
                return 0;
 
 err:
-#if defined(CONFIG_HAVE_CLK)
        if (!IS_ERR(hpriv->clk)) {
                clk_disable_unprepare(hpriv->clk);
                clk_put(hpriv->clk);
@@ -4135,7 +4133,6 @@ err:
                        clk_put(hpriv->port_clks[port]);
                }
        }
-#endif
 
        return rc;
 }
@@ -4151,13 +4148,10 @@ err:
 static int mv_platform_remove(struct platform_device *pdev)
 {
        struct ata_host *host = platform_get_drvdata(pdev);
-#if defined(CONFIG_HAVE_CLK)
        struct mv_host_priv *hpriv = host->private_data;
        int port;
-#endif
        ata_host_detach(host);
 
-#if defined(CONFIG_HAVE_CLK)
        if (!IS_ERR(hpriv->clk)) {
                clk_disable_unprepare(hpriv->clk);
                clk_put(hpriv->clk);
@@ -4168,7 +4162,6 @@ static int mv_platform_remove(struct platform_device *pdev)
                        clk_put(hpriv->port_clks[port]);
                }
        }
-#endif
        return 0;
 }
 
@@ -4428,9 +4421,6 @@ static int mv_pci_device_resume(struct pci_dev *pdev)
 #endif
 #endif
 
-static int mv_platform_probe(struct platform_device *pdev);
-static int mv_platform_remove(struct platform_device *pdev);
-
 static int __init mv_init(void)
 {
        int rc = -ENODEV;
index 8108eb0654448f8eba16da40e4aace400ff8087a..c2d95e9fb971e611135ee19f81b3531eb36a597b 100644 (file)
@@ -778,10 +778,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
        int irq;
        int ret = 0;
 
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (mem == NULL)
-               return -EINVAL;
-
        irq = platform_get_irq(pdev, 0);
        if (irq <= 0)
                return -EINVAL;
@@ -807,6 +803,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
 
        host->private_data = priv;
 
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->base = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(priv->base)) {
                ret = PTR_ERR(priv->base);
index 5a9b6569dd74f8cc97ba15fde613f208bd752c46..9f098a82cf04b061edb203fff718ef0ca70e6328 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/sched.h>
 #include <linux/async.h>
 #include <linux/suspend.h>
+#include <trace/events/power.h>
 #include <linux/cpuidle.h>
 #include "../base.h"
 #include "power.h"
@@ -56,6 +57,30 @@ static pm_message_t pm_transition;
 
 static int async_error;
 
+static char *pm_verb(int event)
+{
+       switch (event) {
+       case PM_EVENT_SUSPEND:
+               return "suspend";
+       case PM_EVENT_RESUME:
+               return "resume";
+       case PM_EVENT_FREEZE:
+               return "freeze";
+       case PM_EVENT_QUIESCE:
+               return "quiesce";
+       case PM_EVENT_HIBERNATE:
+               return "hibernate";
+       case PM_EVENT_THAW:
+               return "thaw";
+       case PM_EVENT_RESTORE:
+               return "restore";
+       case PM_EVENT_RECOVER:
+               return "recover";
+       default:
+               return "(unknown PM event)";
+       }
+}
+
 /**
  * device_pm_sleep_init - Initialize system suspend-related device fields.
  * @dev: Device object being initialized.
@@ -172,16 +197,21 @@ static ktime_t initcall_debug_start(struct device *dev)
 }
 
 static void initcall_debug_report(struct device *dev, ktime_t calltime,
-                                 int error)
+                                 int error, pm_message_t state, char *info)
 {
-       ktime_t delta, rettime;
+       ktime_t rettime;
+       s64 nsecs;
+
+       rettime = ktime_get();
+       nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 
        if (pm_print_times_enabled) {
-               rettime = ktime_get();
-               delta = ktime_sub(rettime, calltime);
                pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
-                       error, (unsigned long long)ktime_to_ns(delta) >> 10);
+                       error, (unsigned long long)nsecs >> 10);
        }
+
+       trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
+                                   error);
 }
 
 /**
@@ -309,30 +339,6 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
        return NULL;
 }
 
-static char *pm_verb(int event)
-{
-       switch (event) {
-       case PM_EVENT_SUSPEND:
-               return "suspend";
-       case PM_EVENT_RESUME:
-               return "resume";
-       case PM_EVENT_FREEZE:
-               return "freeze";
-       case PM_EVENT_QUIESCE:
-               return "quiesce";
-       case PM_EVENT_HIBERNATE:
-               return "hibernate";
-       case PM_EVENT_THAW:
-               return "thaw";
-       case PM_EVENT_RESTORE:
-               return "restore";
-       case PM_EVENT_RECOVER:
-               return "recover";
-       default:
-               return "(unknown PM event)";
-       }
-}
-
 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
 {
        dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
@@ -379,7 +385,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
        error = cb(dev);
        suspend_report_result(cb, error);
 
-       initcall_debug_report(dev, calltime, error);
+       initcall_debug_report(dev, calltime, error, state, info);
 
        return error;
 }
@@ -1027,7 +1033,8 @@ EXPORT_SYMBOL_GPL(dpm_suspend_end);
  * @cb: Suspend callback to execute.
  */
 static int legacy_suspend(struct device *dev, pm_message_t state,
-                         int (*cb)(struct device *dev, pm_message_t state))
+                         int (*cb)(struct device *dev, pm_message_t state),
+                         char *info)
 {
        int error;
        ktime_t calltime;
@@ -1037,7 +1044,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
        error = cb(dev, state);
        suspend_report_result(cb, error);
 
-       initcall_debug_report(dev, calltime, error);
+       initcall_debug_report(dev, calltime, error, state, info);
 
        return error;
 }
@@ -1097,7 +1104,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                        goto Run;
                } else if (dev->class->suspend) {
                        pm_dev_dbg(dev, state, "legacy class ");
-                       error = legacy_suspend(dev, state, dev->class->suspend);
+                       error = legacy_suspend(dev, state, dev->class->suspend,
+                                               "legacy class ");
                        goto End;
                }
        }
@@ -1108,7 +1116,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                        callback = pm_op(dev->bus->pm, state);
                } else if (dev->bus->suspend) {
                        pm_dev_dbg(dev, state, "legacy bus ");
-                       error = legacy_suspend(dev, state, dev->bus->suspend);
+                       error = legacy_suspend(dev, state, dev->bus->suspend,
+                                               "legacy bus ");
                        goto End;
                }
        }
index c8ec186303db29308f1091db8d5a2bcff01dc53c..ef89897c6043eec57ed2d8d1f4c6db464cfd92d1 100644 (file)
@@ -460,6 +460,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
        srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
        return 0;
 }
+EXPORT_SYMBOL_GPL(opp_add);
 
 /**
  * opp_set_availability() - helper to set the availability of an opp
index 1643e889bafc7ad7ef3f7ce1d60f60e8be930d79..d10456ffd811f5acc7da0dcd23979b1408d736a9 100644 (file)
@@ -418,6 +418,31 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
                                reg, ret);
                        goto err_alloc;
                }
+
+               if (!chip->init_ack_masked)
+                       continue;
+
+               /* Ack masked but set interrupts */
+               reg = chip->status_base +
+                       (i * map->reg_stride * d->irq_reg_stride);
+               ret = regmap_read(map, reg, &d->status_buf[i]);
+               if (ret != 0) {
+                       dev_err(map->dev, "Failed to read IRQ status: %d\n",
+                               ret);
+                       goto err_alloc;
+               }
+
+               if (d->status_buf[i] && chip->ack_base) {
+                       reg = chip->ack_base +
+                               (i * map->reg_stride * d->irq_reg_stride);
+                       ret = regmap_write(map, reg,
+                                       d->status_buf[i] & d->mask_buf[i]);
+                       if (ret != 0) {
+                               dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+                                       reg, ret);
+                               goto err_alloc;
+                       }
+               }
        }
 
        /* Wake is disabled by default */
index 380a2003231e4c56984207dd7f2658bd9bcc3166..7c081b38ef3e840ed37248e2110e012e7ebb2191 100644 (file)
@@ -35,8 +35,14 @@ config BCMA_DRIVER_PCI_HOSTMODE
          PCI core hostmode operation (external PCI bus).
 
 config BCMA_HOST_SOC
-       bool
-       depends on BCMA_DRIVER_MIPS
+       bool "Support for BCMA in a SoC"
+       depends on BCMA
+       help
+         Host interface for a Broadcom AIX bus directly mapped into
+         the memory. This only works with the Broadcom SoCs from the
+         BCM47XX line.
+
+         If unsure, say N
 
 config BCMA_DRIVER_MIPS
        bool "BCMA Broadcom MIPS core driver"
index 0067422ec17dacb681fd3d6cd4afd0c64894ae1c..90ee350442a99d243606c97b716af75d909308c9 100644 (file)
@@ -237,7 +237,7 @@ int bcma_bus_register(struct bcma_bus *bus)
        err = bcma_bus_scan(bus);
        if (err) {
                bcma_err(bus, "Failed to scan: %d\n", err);
-               return -1;
+               return err;
        }
 
        /* Early init CC core */
index 8bffa5c9818c8e598a7f1408bd56a511f5773f29..cd6b20fce680591a5253f15546cae34d21a4bb68 100644 (file)
@@ -32,6 +32,18 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
        { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
        { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
        { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
+       { BCMA_CORE_PCIEG2, "PCIe Gen 2" },
+       { BCMA_CORE_DMA, "DMA" },
+       { BCMA_CORE_SDIO3, "SDIO3" },
+       { BCMA_CORE_USB20, "USB 2.0" },
+       { BCMA_CORE_USB30, "USB 3.0" },
+       { BCMA_CORE_A9JTAG, "ARM Cortex A9 JTAG" },
+       { BCMA_CORE_DDR23, "Denali DDR2/DDR3 memory controller" },
+       { BCMA_CORE_ROM, "ROM" },
+       { BCMA_CORE_NAND, "NAND flash controller" },
+       { BCMA_CORE_QSPI, "SPI flash controller" },
+       { BCMA_CORE_CHIPCOMMON_B, "Chipcommon B" },
+       { BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
        { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
        { BCMA_CORE_ALTA, "ALTA (I2S)" },
        { BCMA_CORE_INVALID, "Invalid" },
@@ -201,7 +213,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
        return ent;
 }
 
-static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
+static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
                                  u32 type, u8 port)
 {
        u32 addrl, addrh, sizel, sizeh = 0;
@@ -213,7 +225,7 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
            ((ent & SCAN_ADDR_TYPE) != type) ||
            (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
                bcma_erom_push_ent(eromptr);
-               return -EINVAL;
+               return (u32)-EINVAL;
        }
 
        addrl = ent & SCAN_ADDR_ADDR;
@@ -261,7 +273,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
                              struct bcma_device_id *match, int core_num,
                              struct bcma_device *core)
 {
-       s32 tmp;
+       u32 tmp;
        u8 i, j;
        s32 cia, cib;
        u8 ports[2], wrappers[2];
@@ -339,11 +351,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
         * the main register space for the core
         */
        tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
-       if (tmp <= 0) {
+       if (tmp == 0 || IS_ERR_VALUE(tmp)) {
                /* Try again to see if it is a bridge */
                tmp = bcma_erom_get_addr_desc(bus, eromptr,
                                              SCAN_ADDR_TYPE_BRIDGE, 0);
-               if (tmp <= 0) {
+               if (tmp == 0 || IS_ERR_VALUE(tmp)) {
                        return -EILSEQ;
                } else {
                        bcma_info(bus, "Bridge found\n");
@@ -357,7 +369,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
                for (j = 0; ; j++) {
                        tmp = bcma_erom_get_addr_desc(bus, eromptr,
                                SCAN_ADDR_TYPE_SLAVE, i);
-                       if (tmp < 0) {
+                       if (IS_ERR_VALUE(tmp)) {
                                /* no more entries for port _i_ */
                                /* pr_debug("erom: slave port %d "
                                 * "has %d descriptors\n", i, j); */
@@ -374,7 +386,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
                for (j = 0; ; j++) {
                        tmp = bcma_erom_get_addr_desc(bus, eromptr,
                                SCAN_ADDR_TYPE_MWRAP, i);
-                       if (tmp < 0) {
+                       if (IS_ERR_VALUE(tmp)) {
                                /* no more entries for port _i_ */
                                /* pr_debug("erom: master wrapper %d "
                                 * "has %d descriptors\n", i, j); */
@@ -392,7 +404,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
                for (j = 0; ; j++) {
                        tmp = bcma_erom_get_addr_desc(bus, eromptr,
                                SCAN_ADDR_TYPE_SWRAP, i + hack);
-                       if (tmp < 0) {
+                       if (IS_ERR_VALUE(tmp)) {
                                /* no more entries for port _i_ */
                                /* pr_debug("erom: master wrapper %d "
                                 * has %d descriptors\n", i, j); */
index 64fbb8385cdc87575ce499d888919ac3cb52b81e..b12c11ec4bd21e405fe75276d656e3519c8ad873 100644 (file)
@@ -393,7 +393,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
         * we must not block on IO to ourselves.
         * Context is receiver thread or dmsetup. */
        bytes = sizeof(struct page *)*want;
-       new_pages = kzalloc(bytes, GFP_NOIO);
+       new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
        if (!new_pages) {
                new_pages = __vmalloc(bytes,
                                GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
index 40e715531aa65f0e63babd13757ad6dfb104d22a..cef47ce077a9c816a63bc2bcb10002d3410a33cb 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/sysfs.h>
 #include <linux/miscdevice.h>
 #include <linux/falloc.h>
+#include <linux/aio.h>
 #include "loop.h"
 
 #include <asm/uaccess.h>
@@ -218,6 +219,48 @@ lo_do_transfer(struct loop_device *lo, int cmd,
        return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
 }
 
+#ifdef CONFIG_AIO
+static void lo_rw_aio_complete(u64 data, long res)
+{
+       struct bio *bio = (struct bio *)(uintptr_t)data;
+
+       if (res > 0)
+               res = 0;
+       else if (res < 0)
+               res = -EIO;
+
+       bio_endio(bio, res);
+}
+
+static int lo_rw_aio(struct loop_device *lo, struct bio *bio)
+{
+       struct file *file = lo->lo_backing_file;
+       struct kiocb *iocb;
+       unsigned short op;
+       struct iov_iter iter;
+       struct bio_vec *bvec;
+       size_t nr_segs;
+       loff_t pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+
+       iocb = aio_kernel_alloc(GFP_NOIO);
+       if (!iocb)
+               return -ENOMEM;
+
+       if (bio_rw(bio) & WRITE)
+               op = IOCB_CMD_WRITE_ITER;
+       else
+               op = IOCB_CMD_READ_ITER;
+
+       bvec = bio_iovec_idx(bio, bio->bi_idx);
+       nr_segs = bio_segments(bio);
+       iov_iter_init_bvec(&iter, bvec, nr_segs, bvec_length(bvec, nr_segs), 0);
+       aio_kernel_init_rw(iocb, file, iov_iter_count(&iter), pos);
+       aio_kernel_init_callback(iocb, lo_rw_aio_complete, (u64)(uintptr_t)bio);
+
+       return aio_kernel_submit(iocb, op, &iter);
+}
+#endif /* CONFIG_AIO */
+
 /**
  * __do_lo_send_write - helper for writing data to a loop device
  *
@@ -418,50 +461,33 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               struct file *file = lo->lo_backing_file;
-
-               if (bio->bi_rw & REQ_FLUSH) {
-                       ret = vfs_fsync(file, 0);
-                       if (unlikely(ret && ret != -EINVAL)) {
-                               ret = -EIO;
-                               goto out;
-                       }
-               }
+               ret = lo_send(lo, bio, pos);
+       } else
+               ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
 
-               /*
-                * We use punch hole to reclaim the free space used by the
-                * image a.k.a. discard. However we do not support discard if
-                * encryption is enabled, because it may give an attacker
-                * useful information.
-                */
-               if (bio->bi_rw & REQ_DISCARD) {
-                       struct file *file = lo->lo_backing_file;
-                       int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+       return ret;
+}
 
-                       if ((!file->f_op->fallocate) ||
-                           lo->lo_encrypt_key_size) {
-                               ret = -EOPNOTSUPP;
-                               goto out;
-                       }
-                       ret = file->f_op->fallocate(file, mode, pos,
-                                                   bio->bi_size);
-                       if (unlikely(ret && ret != -EINVAL &&
-                                    ret != -EOPNOTSUPP))
-                               ret = -EIO;
-                       goto out;
-               }
+static int lo_discard(struct loop_device *lo, struct bio *bio)
+{
+       struct file *file = lo->lo_backing_file;
+       int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+       loff_t pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+       int ret;
 
-               ret = lo_send(lo, bio, pos);
+       /*
+        * We use punch hole to reclaim the free space used by the
+        * image a.k.a. discard. However we do not support discard if
+        * encryption is enabled, because it may give an attacker
+        * useful information.
+        */
 
-               if ((bio->bi_rw & REQ_FUA) && !ret) {
-                       ret = vfs_fsync(file, 0);
-                       if (unlikely(ret && ret != -EINVAL))
-                               ret = -EIO;
-               }
-       } else
-               ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
+       if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size)
+               return -EOPNOTSUPP;
 
-out:
+       ret = file->f_op->fallocate(file, mode, pos, bio->bi_size);
+       if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
+               ret = -EIO;
        return ret;
 }
 
@@ -525,7 +551,35 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
                do_loop_switch(lo, bio->bi_private);
                bio_put(bio);
        } else {
-               int ret = do_bio_filebacked(lo, bio);
+               int ret;
+
+               if (bio_rw(bio) == WRITE) {
+                       if (bio->bi_rw & REQ_FLUSH) {
+                               ret = vfs_fsync(lo->lo_backing_file, 1);
+                               if (unlikely(ret && ret != -EINVAL))
+                                       goto out;
+                       }
+                       if (bio->bi_rw & REQ_DISCARD) {
+                               ret = lo_discard(lo, bio);
+                               goto out;
+                       }
+               }
+#ifdef CONFIG_AIO
+               if (lo->lo_flags & LO_FLAGS_USE_AIO &&
+                   lo->transfer == transfer_none) {
+                       ret = lo_rw_aio(lo, bio);
+                       if (ret == 0)
+                               return;
+               } else
+#endif
+                       ret = do_bio_filebacked(lo, bio);
+
+               if ((bio_rw(bio) == WRITE) && bio->bi_rw & REQ_FUA && !ret) {
+                       ret = vfs_fsync(lo->lo_backing_file, 0);
+                       if (unlikely(ret && ret != -EINVAL))
+                               ret = -EIO;
+               }
+out:
                bio_endio(bio, ret);
        }
 }
@@ -547,6 +601,12 @@ static int loop_thread(void *data)
        struct loop_device *lo = data;
        struct bio *bio;
 
+       /*
+        * In cases where the underlying filesystem calls balance_dirty_pages()
+        * we want less throttling to avoid lock ups trying to write dirty
+        * pages through the loop device
+        */
+       current->flags |= PF_LESS_THROTTLE;
        set_user_nice(current, -20);
 
        while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
@@ -869,6 +929,14 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
            !file->f_op->write)
                lo_flags |= LO_FLAGS_READ_ONLY;
 
+#ifdef CONFIG_AIO
+       if (file->f_op->write_iter && file->f_op->read_iter &&
+           mapping->a_ops->direct_IO) {
+               file->f_flags |= O_DIRECT;
+               lo_flags |= LO_FLAGS_USE_AIO;
+       }
+#endif
+
        lo_blocksize = S_ISBLK(inode->i_mode) ?
                inode->i_bdev->bd_block_size : PAGE_SIZE;
 
index 4ad2ad9a5bb01448d6d2206a3387575718c47b87..0d669ae80d61d4e2a2b33733054bf13b047f22f6 100644 (file)
@@ -2163,9 +2163,9 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
        struct rbd_obj_request *obj_request = NULL;
        struct rbd_obj_request *next_obj_request;
        bool write_request = img_request_write_test(img_request);
-       struct bio *bio_list = 0;
+       struct bio *bio_list = NULL;
        unsigned int bio_offset = 0;
-       struct page **pages = 0;
+       struct page **pages = NULL;
        u64 img_offset;
        u64 resid;
        u16 opcode;
index db2c3c305df8ea183d499861089e7e446d2beabe..023d35e3c7a75b573a52d99cbda5bd98c64e017f 100644 (file)
@@ -43,7 +43,7 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
 
-       ret = strict_strtol(buf, 10, &result);
+       ret = kstrtol(buf, 10, &result);
        if (ret)
                return ret;
 
@@ -89,7 +89,7 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
 
-       ret = strict_strtol(buf, 10, &result);
+       ret = kstrtol(buf, 10, &result);
        if (ret)
                return ret;
 
@@ -135,7 +135,7 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
 
-       ret = strict_strtol(buf, 10, &result);
+       ret = kstrtol(buf, 10, &result);
        if (ret)
                return ret;
 
index 75c262694632255e0932fa387c7e6b9d6e08aa25..00da6df9f71edfcd6f92acbe70129af5ec62e0bf 100644 (file)
@@ -486,7 +486,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
                        if (firmwarelen - offset < txlen)
                                txlen = firmwarelen - offset;
 
-                       tx_blocks = (txlen + blksz_dl - 1) / blksz_dl;
+                       tx_blocks = DIV_ROUND_UP(txlen, blksz_dl);
 
                        memcpy(fwbuf, &firmware[offset], txlen);
                }
@@ -873,7 +873,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
        }
 
        blksz = SDIO_BLOCK_SIZE;
-       buf_block_len = (nb + blksz - 1) / blksz;
+       buf_block_len = DIV_ROUND_UP(nb, blksz);
 
        sdio_claim_host(card->func);
 
index 40a865449f35bf80a7b035ed02a6904c3767b674..0aa9d91daef500486e999c8ce4ccb4cb90d2755d 100644 (file)
@@ -153,12 +153,12 @@ config HW_RANDOM_IXP4XX
 
 config HW_RANDOM_OMAP
        tristate "OMAP Random Number Generator support"
-       depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2)
+       depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2PLUS)
        default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
-         Generator hardware found on OMAP16xx and OMAP24xx multimedia
-         processors.
+         Generator hardware found on OMAP16xx, OMAP2/3/4/5 and AM33xx/AM43xx
+         multimedia processors.
 
          To compile this driver as a module, choose M here: the
          module will be called omap-rng.
index 19a12ac64a9ec966149286a84a2f0db5e3e0ee60..6a86b6f56af22c08810acdeb861f2599549f2827 100644 (file)
@@ -164,7 +164,9 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
                goto out;
        }
 
-       clk_prepare_enable(mxc_rng->clk);
+       err = clk_prepare_enable(mxc_rng->clk);
+       if (err)
+               goto out;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
index 6843ec87b98b9f87b3a6ebc3736335d6884826ff..9b89ff4881de7cdfe49e607e33c5ed0788f0be58 100644 (file)
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
 
 #include <asm/io.h>
 
-#define RNG_OUT_REG            0x00            /* Output register */
-#define RNG_STAT_REG           0x04            /* Status register
-                                                       [0] = STAT_BUSY */
-#define RNG_ALARM_REG          0x24            /* Alarm register
-                                                       [7:0] = ALARM_COUNTER */
-#define RNG_CONFIG_REG         0x28            /* Configuration register
-                                                       [11:6] = RESET_COUNT
-                                                       [5:3]  = RING2_DELAY
-                                                       [2:0]  = RING1_DELAY */
-#define RNG_REV_REG            0x3c            /* Revision register
-                                                       [7:0] = REV_NB */
-#define RNG_MASK_REG           0x40            /* Mask and reset register
-                                                       [2] = IT_EN
-                                                       [1] = SOFTRESET
-                                                       [0] = AUTOIDLE */
-#define RNG_SYSSTATUS          0x44            /* System status
-                                                       [0] = RESETDONE */
+#define RNG_REG_STATUS_RDY                     (1 << 0)
+
+#define RNG_REG_INTACK_RDY_MASK                        (1 << 0)
+#define RNG_REG_INTACK_SHUTDOWN_OFLO_MASK      (1 << 1)
+#define RNG_SHUTDOWN_OFLO_MASK                 (1 << 1)
+
+#define RNG_CONTROL_STARTUP_CYCLES_SHIFT       16
+#define RNG_CONTROL_STARTUP_CYCLES_MASK                (0xffff << 16)
+#define RNG_CONTROL_ENABLE_TRNG_SHIFT          10
+#define RNG_CONTROL_ENABLE_TRNG_MASK           (1 << 10)
+
+#define RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT      16
+#define RNG_CONFIG_MAX_REFIL_CYCLES_MASK       (0xffff << 16)
+#define RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT      0
+#define RNG_CONFIG_MIN_REFIL_CYCLES_MASK       (0xff << 0)
+
+#define RNG_CONTROL_STARTUP_CYCLES             0xff
+#define RNG_CONFIG_MIN_REFIL_CYCLES            0x21
+#define RNG_CONFIG_MAX_REFIL_CYCLES            0x22
+
+#define RNG_ALARMCNT_ALARM_TH_SHIFT            0x0
+#define RNG_ALARMCNT_ALARM_TH_MASK             (0xff << 0)
+#define RNG_ALARMCNT_SHUTDOWN_TH_SHIFT         16
+#define RNG_ALARMCNT_SHUTDOWN_TH_MASK          (0x1f << 16)
+#define RNG_ALARM_THRESHOLD                    0xff
+#define RNG_SHUTDOWN_THRESHOLD                 0x4
+
+#define RNG_REG_FROENABLE_MASK                 0xffffff
+#define RNG_REG_FRODETUNE_MASK                 0xffffff
+
+#define OMAP2_RNG_OUTPUT_SIZE                  0x4
+#define OMAP4_RNG_OUTPUT_SIZE                  0x8
+
+enum {
+       RNG_OUTPUT_L_REG = 0,
+       RNG_OUTPUT_H_REG,
+       RNG_STATUS_REG,
+       RNG_INTMASK_REG,
+       RNG_INTACK_REG,
+       RNG_CONTROL_REG,
+       RNG_CONFIG_REG,
+       RNG_ALARMCNT_REG,
+       RNG_FROENABLE_REG,
+       RNG_FRODETUNE_REG,
+       RNG_ALARMMASK_REG,
+       RNG_ALARMSTOP_REG,
+       RNG_REV_REG,
+       RNG_SYSCONFIG_REG,
+};
+
+static const u16 reg_map_omap2[] = {
+       [RNG_OUTPUT_L_REG]      = 0x0,
+       [RNG_STATUS_REG]        = 0x4,
+       [RNG_CONFIG_REG]        = 0x28,
+       [RNG_REV_REG]           = 0x3c,
+       [RNG_SYSCONFIG_REG]     = 0x40,
+};
 
+static const u16 reg_map_omap4[] = {
+       [RNG_OUTPUT_L_REG]      = 0x0,
+       [RNG_OUTPUT_H_REG]      = 0x4,
+       [RNG_STATUS_REG]        = 0x8,
+       [RNG_INTMASK_REG]       = 0xc,
+       [RNG_INTACK_REG]        = 0x10,
+       [RNG_CONTROL_REG]       = 0x14,
+       [RNG_CONFIG_REG]        = 0x18,
+       [RNG_ALARMCNT_REG]      = 0x1c,
+       [RNG_FROENABLE_REG]     = 0x20,
+       [RNG_FRODETUNE_REG]     = 0x24,
+       [RNG_ALARMMASK_REG]     = 0x28,
+       [RNG_ALARMSTOP_REG]     = 0x2c,
+       [RNG_REV_REG]           = 0x1FE0,
+       [RNG_SYSCONFIG_REG]     = 0x1FE4,
+};
+
+struct omap_rng_dev;
 /**
- * struct omap_rng_private_data - RNG IP block-specific data
- * @base: virtual address of the beginning of the RNG IP block registers
- * @mem_res: struct resource * for the IP block registers physical memory
+ * struct omap_rng_pdata - RNG IP block-specific data
+ * @regs: Pointer to the register offsets structure.
+ * @data_size: No. of bytes in RNG output.
+ * @data_present: Callback to determine if data is available.
+ * @init: Callback for IP specific initialization sequence.
+ * @cleanup: Callback for IP specific cleanup sequence.
  */
-struct omap_rng_private_data {
-       void __iomem *base;
-       struct resource *mem_res;
+struct omap_rng_pdata {
+       u16     *regs;
+       u32     data_size;
+       u32     (*data_present)(struct omap_rng_dev *priv);
+       int     (*init)(struct omap_rng_dev *priv);
+       void    (*cleanup)(struct omap_rng_dev *priv);
 };
 
-static inline u32 omap_rng_read_reg(struct omap_rng_private_data *priv, int reg)
+struct omap_rng_dev {
+       void __iomem                    *base;
+       struct device                   *dev;
+       const struct omap_rng_pdata     *pdata;
+};
+
+static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
 {
-       return __raw_readl(priv->base + reg);
+       return __raw_readl(priv->base + priv->pdata->regs[reg]);
 }
 
-static inline void omap_rng_write_reg(struct omap_rng_private_data *priv,
-                                     int reg, u32 val)
+static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
+                                     u32 val)
 {
-       __raw_writel(val, priv->base + reg);
+       __raw_writel(val, priv->base + priv->pdata->regs[reg]);
 }
 
 static int omap_rng_data_present(struct hwrng *rng, int wait)
 {
-       struct omap_rng_private_data *priv;
+       struct omap_rng_dev *priv;
        int data, i;
 
-       priv = (struct omap_rng_private_data *)rng->priv;
+       priv = (struct omap_rng_dev *)rng->priv;
 
        for (i = 0; i < 20; i++) {
-               data = omap_rng_read_reg(priv, RNG_STAT_REG) ? 0 : 1;
+               data = priv->pdata->data_present(priv);
                if (data || !wait)
                        break;
                /* RNG produces data fast enough (2+ MBit/sec, even
@@ -89,27 +163,212 @@ static int omap_rng_data_present(struct hwrng *rng, int wait)
 
 static int omap_rng_data_read(struct hwrng *rng, u32 *data)
 {
-       struct omap_rng_private_data *priv;
+       struct omap_rng_dev *priv;
+       u32 data_size, i;
+
+       priv = (struct omap_rng_dev *)rng->priv;
+       data_size = priv->pdata->data_size;
+
+       for (i = 0; i < data_size / sizeof(u32); i++)
+               data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i);
+
+       if (priv->pdata->regs[RNG_INTACK_REG])
+               omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
+       return data_size;
+}
+
+static int omap_rng_init(struct hwrng *rng)
+{
+       struct omap_rng_dev *priv;
 
-       priv = (struct omap_rng_private_data *)rng->priv;
+       priv = (struct omap_rng_dev *)rng->priv;
+       return priv->pdata->init(priv);
+}
 
-       *data = omap_rng_read_reg(priv, RNG_OUT_REG);
+static void omap_rng_cleanup(struct hwrng *rng)
+{
+       struct omap_rng_dev *priv;
 
-       return sizeof(u32);
+       priv = (struct omap_rng_dev *)rng->priv;
+       priv->pdata->cleanup(priv);
 }
 
 static struct hwrng omap_rng_ops = {
        .name           = "omap",
        .data_present   = omap_rng_data_present,
        .data_read      = omap_rng_data_read,
+       .init           = omap_rng_init,
+       .cleanup        = omap_rng_cleanup,
+};
+
+static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
+{
+       return omap_rng_read(priv, RNG_STATUS_REG) ? 0 : 1;
+}
+
+static int omap2_rng_init(struct omap_rng_dev *priv)
+{
+       omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x1);
+       return 0;
+}
+
+static void omap2_rng_cleanup(struct omap_rng_dev *priv)
+{
+       omap_rng_write(priv, RNG_SYSCONFIG_REG, 0x0);
+}
+
+static struct omap_rng_pdata omap2_rng_pdata = {
+       .regs           = (u16 *)reg_map_omap2,
+       .data_size      = OMAP2_RNG_OUTPUT_SIZE,
+       .data_present   = omap2_rng_data_present,
+       .init           = omap2_rng_init,
+       .cleanup        = omap2_rng_cleanup,
 };
 
+#if defined(CONFIG_OF)
+static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
+{
+       return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
+}
+
+static int omap4_rng_init(struct omap_rng_dev *priv)
+{
+       u32 val;
+
+       /* Return if RNG is already running. */
+       if (omap_rng_read(priv, RNG_CONFIG_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+               return 0;
+
+       val = RNG_CONFIG_MIN_REFIL_CYCLES << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+       val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+       omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+       omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+       omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+       val = RNG_ALARM_THRESHOLD << RNG_ALARMCNT_ALARM_TH_SHIFT;
+       val |= RNG_SHUTDOWN_THRESHOLD << RNG_ALARMCNT_SHUTDOWN_TH_SHIFT;
+       omap_rng_write(priv, RNG_ALARMCNT_REG, val);
+
+       val = RNG_CONTROL_STARTUP_CYCLES << RNG_CONTROL_STARTUP_CYCLES_SHIFT;
+       val |= RNG_CONTROL_ENABLE_TRNG_MASK;
+       omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+       return 0;
+}
+
+static void omap4_rng_cleanup(struct omap_rng_dev *priv)
+{
+       int val;
+
+       val = omap_rng_read(priv, RNG_CONTROL_REG);
+       val &= ~RNG_CONTROL_ENABLE_TRNG_MASK;
+       omap_rng_write(priv, RNG_CONFIG_REG, val);
+}
+
+static irqreturn_t omap4_rng_irq(int irq, void *dev_id)
+{
+       struct omap_rng_dev *priv = dev_id;
+       u32 fro_detune, fro_enable;
+
+       /*
+        * Interrupt raised by a fro shutdown threshold, do the following:
+        * 1. Clear the alarm events.
+        * 2. De tune the FROs which are shutdown.
+        * 3. Re enable the shutdown FROs.
+        */
+       omap_rng_write(priv, RNG_ALARMMASK_REG, 0x0);
+       omap_rng_write(priv, RNG_ALARMSTOP_REG, 0x0);
+
+       fro_enable = omap_rng_read(priv, RNG_FROENABLE_REG);
+       fro_detune = ~fro_enable & RNG_REG_FRODETUNE_MASK;
+       fro_detune = fro_detune | omap_rng_read(priv, RNG_FRODETUNE_REG);
+       fro_enable = RNG_REG_FROENABLE_MASK;
+
+       omap_rng_write(priv, RNG_FRODETUNE_REG, fro_detune);
+       omap_rng_write(priv, RNG_FROENABLE_REG, fro_enable);
+
+       omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_SHUTDOWN_OFLO_MASK);
+
+       return IRQ_HANDLED;
+}
+
+static struct omap_rng_pdata omap4_rng_pdata = {
+       .regs           = (u16 *)reg_map_omap4,
+       .data_size      = OMAP4_RNG_OUTPUT_SIZE,
+       .data_present   = omap4_rng_data_present,
+       .init           = omap4_rng_init,
+       .cleanup        = omap4_rng_cleanup,
+};
+
+static const struct of_device_id omap_rng_of_match[] = {
+               {
+                       .compatible     = "ti,omap2-rng",
+                       .data           = &omap2_rng_pdata,
+               },
+               {
+                       .compatible     = "ti,omap4-rng",
+                       .data           = &omap4_rng_pdata,
+               },
+               {},
+};
+MODULE_DEVICE_TABLE(of, omap_rng_of_match);
+
+static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
+                                         struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       struct device *dev = &pdev->dev;
+       int irq, err;
+
+       match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
+       if (!match) {
+               dev_err(dev, "no compatible OF match\n");
+               return -EINVAL;
+       }
+       priv->pdata = match->data;
+
+       if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) {
+               irq = platform_get_irq(pdev, 0);
+               if (irq < 0) {
+                       dev_err(dev, "%s: error getting IRQ resource - %d\n",
+                               __func__, irq);
+                       return irq;
+               }
+
+               err = devm_request_irq(dev, irq, omap4_rng_irq,
+                                      IRQF_TRIGGER_NONE, dev_name(dev), priv);
+               if (err) {
+                       dev_err(dev, "unable to request irq %d, err = %d\n",
+                               irq, err);
+                       return err;
+               }
+               omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
+       }
+       return 0;
+}
+#else
+static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng,
+                                         struct platform_device *pdev)
+{
+       return -EINVAL;
+}
+#endif
+
+static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
+{
+       /* Only OMAP2/3 can be non-DT */
+       omap_rng->pdata = &omap2_rng_pdata;
+       return 0;
+}
+
 static int omap_rng_probe(struct platform_device *pdev)
 {
-       struct omap_rng_private_data *priv;
+       struct omap_rng_dev *priv;
+       struct resource *res;
+       struct device *dev = &pdev->dev;
        int ret;
 
-       priv = kzalloc(sizeof(struct omap_rng_private_data), GFP_KERNEL);
+       priv = devm_kzalloc(dev, sizeof(struct omap_rng_dev), GFP_KERNEL);
        if (!priv) {
                dev_err(&pdev->dev, "could not allocate memory\n");
                return -ENOMEM;
@@ -117,26 +376,29 @@ static int omap_rng_probe(struct platform_device *pdev)
 
        omap_rng_ops.priv = (unsigned long)priv;
        platform_set_drvdata(pdev, priv);
+       priv->dev = dev;
 
-       priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(priv->base)) {
                ret = PTR_ERR(priv->base);
                goto err_ioremap;
        }
-       platform_set_drvdata(pdev, priv);
 
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
 
+       ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
+                               get_omap_rng_device_details(priv);
+       if (ret)
+               goto err_ioremap;
+
        ret = hwrng_register(&omap_rng_ops);
        if (ret)
                goto err_register;
 
        dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
-                omap_rng_read_reg(priv, RNG_REV_REG));
-
-       omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
+                omap_rng_read(priv, RNG_REV_REG));
 
        return 0;
 
@@ -144,26 +406,21 @@ err_register:
        priv->base = NULL;
        pm_runtime_disable(&pdev->dev);
 err_ioremap:
-       kfree(priv);
-
+       dev_err(dev, "initialization failed.\n");
        return ret;
 }
 
 static int __exit omap_rng_remove(struct platform_device *pdev)
 {
-       struct omap_rng_private_data *priv = platform_get_drvdata(pdev);
+       struct omap_rng_dev *priv = platform_get_drvdata(pdev);
 
        hwrng_unregister(&omap_rng_ops);
 
-       omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
+       priv->pdata->cleanup(priv);
 
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       release_mem_region(priv->mem_res->start, resource_size(priv->mem_res));
-
-       kfree(priv);
-
        return 0;
 }
 
@@ -171,9 +428,9 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
 
 static int omap_rng_suspend(struct device *dev)
 {
-       struct omap_rng_private_data *priv = dev_get_drvdata(dev);
+       struct omap_rng_dev *priv = dev_get_drvdata(dev);
 
-       omap_rng_write_reg(priv, RNG_MASK_REG, 0x0);
+       priv->pdata->cleanup(priv);
        pm_runtime_put_sync(dev);
 
        return 0;
@@ -181,10 +438,10 @@ static int omap_rng_suspend(struct device *dev)
 
 static int omap_rng_resume(struct device *dev)
 {
-       struct omap_rng_private_data *priv = dev_get_drvdata(dev);
+       struct omap_rng_dev *priv = dev_get_drvdata(dev);
 
        pm_runtime_get_sync(dev);
-       omap_rng_write_reg(priv, RNG_MASK_REG, 0x1);
+       priv->pdata->init(priv);
 
        return 0;
 }
@@ -198,31 +455,18 @@ static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
 
 #endif
 
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:omap_rng");
-
 static struct platform_driver omap_rng_driver = {
        .driver = {
                .name           = "omap_rng",
                .owner          = THIS_MODULE,
                .pm             = OMAP_RNG_PM,
+               .of_match_table = of_match_ptr(omap_rng_of_match),
        },
        .probe          = omap_rng_probe,
        .remove         = __exit_p(omap_rng_remove),
 };
 
-static int __init omap_rng_init(void)
-{
-       return platform_driver_register(&omap_rng_driver);
-}
-
-static void __exit omap_rng_exit(void)
-{
-       platform_driver_unregister(&omap_rng_driver);
-}
-
-module_init(omap_rng_init);
-module_exit(omap_rng_exit);
-
+module_platform_driver(omap_rng_driver);
+MODULE_ALIAS("platform:omap_rng");
 MODULE_AUTHOR("Deepak Saxena (and others)");
 MODULE_LICENSE("GPL");
index 973b95113edf7580e09bcb078db0038c047dd7f6..3d4c2293c6f560d239691ba26ab3d7ee241e38f0 100644 (file)
@@ -33,7 +33,7 @@
 
 static void __iomem *rng_base;
 static struct clk *rng_clk;
-struct device *rng_dev;
+static struct device *rng_dev;
 
 static inline u32 picoxcell_trng_read_csr(void)
 {
index 00593c847cf0b1d63875d44a26e1159461bce885..09c5fbea2b9304e4282ef9365d5f372ff6b49e0a 100644 (file)
@@ -110,12 +110,10 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
        struct resource *r;
        int i;
 
-       r = platform_get_resource(dev, IORESOURCE_MEM, 0);
-       if (!r)
-               return -EBUSY;
        rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
        if (!rngdev)
                return -ENOMEM;
+       r = platform_get_resource(dev, IORESOURCE_MEM, 0);
        rngdev->base = devm_ioremap_resource(&dev->dev, r);
        if (IS_ERR(rngdev->base))
                return PTR_ERR(rngdev->base);
index f3223aac4df11c41959a744bee67af2d2df232e5..db5fa4e9b9e50f3a88bce0993b01420b53e8735f 100644 (file)
@@ -285,9 +285,9 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
 
 static const struct file_operations raw_fops = {
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = blkdev_aio_write,
+       .write_iter     = blkdev_write_iter,
        .fsync          = blkdev_fsync,
        .open           = raw_open,
        .release        = raw_release,
index bf2349dbbf7ffd301839bf24242b9fd2127b53db..7cc1fe2241fd622990c94c3755fe2995e716a8b8 100644 (file)
@@ -876,11 +876,6 @@ found:
        if (useinput)
                sonypi_report_input_event(event);
 
-#ifdef CONFIG_ACPI
-       if (sonypi_acpi_device)
-               acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event);
-#endif
-
        kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
                        sizeof(event), &sonypi_device.fifo_lock);
        kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
index fc45567ad3acef079db496c9a2f495e19e5b5bba..b79cf3e1b793dca8f652067718d4ece3c0b3335d 100644 (file)
@@ -1529,18 +1529,22 @@ static void remove_port_data(struct port *port)
 {
        struct port_buffer *buf;
 
+       spin_lock_irq(&port->inbuf_lock);
        /* Remove unused data this port might have received. */
        discard_port_data(port);
 
-       reclaim_consumed_buffers(port);
-
        /* Remove buffers we queued up for the Host to send us data in. */
        while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
                free_buf(buf, true);
+       spin_unlock_irq(&port->inbuf_lock);
+
+       spin_lock_irq(&port->outvq_lock);
+       reclaim_consumed_buffers(port);
 
        /* Free pending buffers from the out-queue. */
        while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
                free_buf(buf, true);
+       spin_unlock_irq(&port->outvq_lock);
 }
 
 /*
@@ -1554,6 +1558,7 @@ static void unplug_port(struct port *port)
        list_del(&port->list);
        spin_unlock_irq(&port->portdev->ports_lock);
 
+       spin_lock_irq(&port->inbuf_lock);
        if (port->guest_connected) {
                /* Let the app know the port is going down. */
                send_sigio_to_port(port);
@@ -1564,6 +1569,7 @@ static void unplug_port(struct port *port)
 
                wake_up_interruptible(&port->waitqueue);
        }
+       spin_unlock_irq(&port->inbuf_lock);
 
        if (is_console_port(port)) {
                spin_lock_irq(&pdrvdata_lock);
@@ -1585,9 +1591,8 @@ static void unplug_port(struct port *port)
        device_destroy(pdrvdata.class, port->dev->devt);
        cdev_del(port->cdev);
 
-       kfree(port->name);
-
        debugfs_remove(port->debugfs_file);
+       kfree(port->name);
 
        /*
         * Locks around here are not necessary - a port can't be
@@ -1681,7 +1686,9 @@ static void handle_control_message(struct ports_device *portdev,
                 * If the guest is connected, it'll be interested in
                 * knowing the host connection state changed.
                 */
+               spin_lock_irq(&port->inbuf_lock);
                send_sigio_to_port(port);
+               spin_unlock_irq(&port->inbuf_lock);
                break;
        case VIRTIO_CONSOLE_PORT_NAME:
                /*
@@ -1801,13 +1808,13 @@ static void in_intr(struct virtqueue *vq)
        if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
                discard_port_data(port);
 
+       /* Send a SIGIO indicating new data in case the process asked for it */
+       send_sigio_to_port(port);
+
        spin_unlock_irqrestore(&port->inbuf_lock, flags);
 
        wake_up_interruptible(&port->waitqueue);
 
-       /* Send a SIGIO indicating new data in case the process asked for it */
-       send_sigio_to_port(port);
-
        if (is_console_port(port) && hvc_poll(port->cons.hvc))
                hvc_kick();
 }
@@ -2241,10 +2248,8 @@ static int __init init(void)
        }
 
        pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
-       if (!pdrvdata.debugfs_dir) {
-               pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
-                          PTR_ERR(pdrvdata.debugfs_dir));
-       }
+       if (!pdrvdata.debugfs_dir)
+               pr_warning("Error creating debugfs dir for virtio-ports\n");
        INIT_LIST_HEAD(&pdrvdata.consoles);
        INIT_LIST_HEAD(&pdrvdata.portdevs);
 
index de4d5d93c3fdc826ae013c6f7e0652b13e550f2d..0fa204b244bd2df52974a50fc4290e9fa77d287c 100644 (file)
@@ -17,37 +17,47 @@ config ARM_DT_BL_CPUFREQ
          big.LITTLE platform. This gets frequency tables from DT.
 
 config ARM_EXYNOS_CPUFREQ
-       bool "SAMSUNG EXYNOS SoCs"
-       depends on ARCH_EXYNOS
+       bool
        select CPU_FREQ_TABLE
-       default y
-       help
-         This adds the CPUFreq driver common part for Samsung
-         EXYNOS SoCs.
-
-         If in doubt, say N.
 
 config ARM_EXYNOS4210_CPUFREQ
-       def_bool CPU_EXYNOS4210
+       bool "SAMSUNG EXYNOS4210"
+       depends on CPU_EXYNOS4210
+       default y
+       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS4210
          SoC (S5PV310 or S5PC210).
 
+         If in doubt, say N.
+
 config ARM_EXYNOS4X12_CPUFREQ
-       def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+       bool "SAMSUNG EXYNOS4x12"
+       depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+       default y
+       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS4X12
          SoC (EXYNOS4212 or EXYNOS4412).
 
+         If in doubt, say N.
+
 config ARM_EXYNOS5250_CPUFREQ
-       def_bool SOC_EXYNOS5250
+       bool "SAMSUNG EXYNOS5250"
+       depends on SOC_EXYNOS5250
+       default y
+       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS5250
          SoC.
 
+         If in doubt, say N.
+
 config ARM_EXYNOS5440_CPUFREQ
-       def_bool SOC_EXYNOS5440
+       bool "SAMSUNG EXYNOS5440"
+       depends on SOC_EXYNOS5440
        depends on HAVE_CLK && PM_OPP && OF
+       default y
        select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for Samsung EXYNOS5440
@@ -55,6 +65,8 @@ config ARM_EXYNOS5440_CPUFREQ
          different than previous exynos controllers so not using
          the common exynos framework.
 
+         If in doubt, say N.
+
 config ARM_HIGHBANK_CPUFREQ
        tristate "Calxeda Highbank-based"
        depends on ARCH_HIGHBANK
index d345b5a7aa719e52fd660013afdf96b4d3da6335..ed9731398f2c5996b48ff4ef520382fc1e6b5856 100644 (file)
@@ -23,7 +23,7 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0)    += cpufreq-cpu0.o
 # powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
 # speedstep-* is preferred over p4-clockmod.
 
-obj-$(CONFIG_X86_ACPI_CPUFREQ)         += acpi-cpufreq.o mperf.o
+obj-$(CONFIG_X86_ACPI_CPUFREQ)         += acpi-cpufreq.o
 obj-$(CONFIG_X86_POWERNOW_K8)          += powernow-k8.o
 obj-$(CONFIG_X86_PCC_CPUFREQ)          += pcc-cpufreq.o
 obj-$(CONFIG_X86_POWERNOW_K6)          += powernow-k6.o
@@ -100,4 +100,5 @@ obj-$(CONFIG_LOONGSON2_CPUFREQ)             += loongson2_cpufreq.o
 obj-$(CONFIG_SH_CPU_FREQ)              += sh-cpufreq.o
 obj-$(CONFIG_SPARC_US2E_CPUFREQ)       += sparc-us2e-cpufreq.o
 obj-$(CONFIG_SPARC_US3_CPUFREQ)                += sparc-us3-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_TILEGX)          += tilegx-cpufreq.o
 obj-$(CONFIG_UNICORE32)                        += unicore2-cpufreq.o
index 39264020b88a1b2e7d461a8d0a3f2f909a31dbb4..a1260b4549db647336192d24b3471b6b627897af 100644 (file)
@@ -45,7 +45,6 @@
 #include <asm/msr.h>
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
-#include "mperf.h"
 
 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
@@ -198,7 +197,7 @@ static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
        return sprintf(buf, "%u\n", boost_enabled);
 }
 
-static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
+cpufreq_freq_attr_rw(cpb);
 #endif
 
 static int check_est_cpu(unsigned int cpuid)
@@ -710,7 +709,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                return blacklisted;
 #endif
 
-       data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
@@ -800,7 +799,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                goto err_unreg;
        }
 
-       data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
+       data->freq_table = kmalloc(sizeof(*data->freq_table) *
                    (perf->state_count+1), GFP_KERNEL);
        if (!data->freq_table) {
                result = -ENOMEM;
@@ -861,10 +860,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        /* notify BIOS that we exist */
        acpi_processor_notify_smm(THIS_MODULE);
 
-       /* Check for APERF/MPERF support in hardware */
-       if (boot_cpu_has(X86_FEATURE_APERFMPERF))
-               acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
-
        pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
        for (i = 0; i < perf->state_count; i++)
                pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
@@ -941,7 +936,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
        .exit           = acpi_cpufreq_cpu_exit,
        .resume         = acpi_cpufreq_resume,
        .name           = "acpi-cpufreq",
-       .owner          = THIS_MODULE,
        .attr           = acpi_cpufreq_attr,
 };
 
index 654488723cb5d00746b5e08ebb5f88e9beac8cd0..e0c38d9389979b2f1b9a503d36c8e12364ecc188 100644 (file)
@@ -108,7 +108,6 @@ static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
 
 static struct cpufreq_driver at32_driver = {
        .name           = "at32ap",
-       .owner          = THIS_MODULE,
        .init           = at32_cpufreq_driver_init,
        .verify         = at32_verify_speed,
        .target         = at32_set_target,
index 9cdbbd278a800da1587eae1fdb6314d23d0a9b9d..ef05978a723702de29da757eda20edec7d21c3d6 100644 (file)
@@ -225,7 +225,6 @@ static struct cpufreq_driver bfin_driver = {
        .get = bfin_getfreq_khz,
        .init = __bfin_cpu_init,
        .name = "bfin cpufreq",
-       .owner = THIS_MODULE,
        .attr = bfin_freq_attr,
 };
 
index ad1fde277661e617fd4f19e01f34cd963f1349f4..b946ac73130f9b5c88f019ca22ad9ea544b05e58 100644 (file)
@@ -69,7 +69,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
 
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
-       if (cpu_reg) {
+       if (!IS_ERR(cpu_reg)) {
                rcu_read_lock();
                opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
                if (IS_ERR(opp)) {
@@ -90,7 +90,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
                 freqs.new / 1000, volt ? volt / 1000 : -1);
 
        /* scaling up?  scale voltage before frequency */
-       if (cpu_reg && freqs.new > freqs.old) {
+       if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
                ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
                if (ret) {
                        pr_err("failed to scale voltage up: %d\n", ret);
@@ -102,14 +102,14 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
        ret = clk_set_rate(cpu_clk, freq_exact);
        if (ret) {
                pr_err("failed to set clock rate: %d\n", ret);
-               if (cpu_reg)
+               if (!IS_ERR(cpu_reg))
                        regulator_set_voltage_tol(cpu_reg, volt_old, tol);
                freqs.new = freqs.old;
                goto post_notify;
        }
 
        /* scaling down?  scale voltage after frequency */
-       if (cpu_reg && freqs.new < freqs.old) {
+       if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
                ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
                if (ret) {
                        pr_err("failed to scale voltage down: %d\n", ret);
@@ -197,7 +197,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
        cpu_dev = &pdev->dev;
        cpu_dev->of_node = np;
 
-       cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
+       cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0");
        if (IS_ERR(cpu_reg)) {
                /*
                 * If cpu0 regulator supply node is present, but regulator is
@@ -210,7 +210,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                }
                pr_warn("failed to get cpu0 regulator: %ld\n",
                        PTR_ERR(cpu_reg));
-               cpu_reg = NULL;
        }
 
        cpu_clk = devm_clk_get(cpu_dev, NULL);
index af1542d414408f0e8df9d561522479501ada341b..b83d45f6857495083163e13336027ef4449b5da2 100644 (file)
@@ -379,7 +379,6 @@ static struct cpufreq_driver nforce2_driver = {
        .get = nforce2_get,
        .init = nforce2_cpu_init,
        .exit = nforce2_cpu_exit,
-       .owner = THIS_MODULE,
 };
 
 #ifdef MODULE
index f0a5e2b0eb8a9584f1e1d55ffac92bfd2800fbba..81ceea6ed630af1836bc36b21cff6f50f1e6865a 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <asm/cputime.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/notifier.h>
+#include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/tick.h>
 #include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/cpu.h>
-#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/syscore_ops.h>
-
+#include <linux/tick.h>
 #include <trace/events/power.h>
 
 /**
  */
 static struct cpufreq_driver *cpufreq_driver;
 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
 static DEFINE_RWLOCK(cpufreq_driver_lock);
 static DEFINE_MUTEX(cpufreq_governor_lock);
+static LIST_HEAD(cpufreq_policy_list);
 
 #ifdef CONFIG_HOTPLUG_CPU
 /* This one keeps track of the previously set governor of a removed CPU */
@@ -69,15 +64,14 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
  * - Lock should not be held across
  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
  */
-static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
 
 #define lock_policy_rwsem(mode, cpu)                                   \
 static int lock_policy_rwsem_##mode(int cpu)                           \
 {                                                                      \
-       int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
-       BUG_ON(policy_cpu == -1);                                       \
-       down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
+       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
+       BUG_ON(!policy);                                                \
+       down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));           \
                                                                        \
        return 0;                                                       \
 }
@@ -88,14 +82,20 @@ lock_policy_rwsem(write, cpu);
 #define unlock_policy_rwsem(mode, cpu)                                 \
 static void unlock_policy_rwsem_##mode(int cpu)                                \
 {                                                                      \
-       int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
-       BUG_ON(policy_cpu == -1);                                       \
-       up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));              \
+       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
+       BUG_ON(!policy);                                                \
+       up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));             \
 }
 
 unlock_policy_rwsem(read, cpu);
 unlock_policy_rwsem(write, cpu);
 
+/*
+ * rwsem to guarantee that cpufreq driver module doesn't unload during critical
+ * sections
+ */
+static DECLARE_RWSEM(cpufreq_rwsem);
+
 /* internal prototypes */
 static int __cpufreq_governor(struct cpufreq_policy *policy,
                unsigned int event);
@@ -183,78 +183,46 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 }
 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 
-static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 {
-       struct cpufreq_policy *data;
+       struct cpufreq_policy *policy = NULL;
        unsigned long flags;
 
-       if (cpu >= nr_cpu_ids)
-               goto err_out;
+       if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
+               return NULL;
+
+       if (!down_read_trylock(&cpufreq_rwsem))
+               return NULL;
 
        /* get the cpufreq driver */
        read_lock_irqsave(&cpufreq_driver_lock, flags);
 
-       if (!cpufreq_driver)
-               goto err_out_unlock;
-
-       if (!try_module_get(cpufreq_driver->owner))
-               goto err_out_unlock;
-
-       /* get the CPU */
-       data = per_cpu(cpufreq_cpu_data, cpu);
-
-       if (!data)
-               goto err_out_put_module;
-
-       if (!sysfs && !kobject_get(&data->kobj))
-               goto err_out_put_module;
+       if (cpufreq_driver) {
+               /* get the CPU */
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               if (policy)
+                       kobject_get(&policy->kobj);
+       }
 
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-       return data;
 
-err_out_put_module:
-       module_put(cpufreq_driver->owner);
-err_out_unlock:
-       read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-err_out:
-       return NULL;
-}
-
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
-{
-       if (cpufreq_disabled())
-               return NULL;
+       if (!policy)
+               up_read(&cpufreq_rwsem);
 
-       return __cpufreq_cpu_get(cpu, false);
+       return policy;
 }
 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
 
-static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
-{
-       return __cpufreq_cpu_get(cpu, true);
-}
-
-static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
-{
-       if (!sysfs)
-               kobject_put(&data->kobj);
-       module_put(cpufreq_driver->owner);
-}
-
-void cpufreq_cpu_put(struct cpufreq_policy *data)
+void cpufreq_cpu_put(struct cpufreq_policy *policy)
 {
        if (cpufreq_disabled())
                return;
 
-       __cpufreq_cpu_put(data, false);
+       kobject_put(&policy->kobj);
+       up_read(&cpufreq_rwsem);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
 
-static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
-{
-       __cpufreq_cpu_put(data, true);
-}
-
 /*********************************************************************
  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
  *********************************************************************/
@@ -459,8 +427,8 @@ show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
 show_one(scaling_cur_freq, cur);
 
-static int __cpufreq_set_policy(struct cpufreq_policy *data,
-                               struct cpufreq_policy *policy);
+static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+                               struct cpufreq_policy *new_policy);
 
 /**
  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
@@ -699,12 +667,12 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
        struct cpufreq_policy *policy = to_policy(kobj);
        struct freq_attr *fattr = to_attr(attr);
        ssize_t ret = -EINVAL;
-       policy = cpufreq_cpu_get_sysfs(policy->cpu);
-       if (!policy)
-               goto no_policy;
+
+       if (!down_read_trylock(&cpufreq_rwsem))
+               goto exit;
 
        if (lock_policy_rwsem_read(policy->cpu) < 0)
-               goto fail;
+               goto up_read;
 
        if (fattr->show)
                ret = fattr->show(policy, buf);
@@ -712,9 +680,10 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
                ret = -EIO;
 
        unlock_policy_rwsem_read(policy->cpu);
-fail:
-       cpufreq_cpu_put_sysfs(policy);
-no_policy:
+
+up_read:
+       up_read(&cpufreq_rwsem);
+exit:
        return ret;
 }
 
@@ -724,12 +693,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
        struct cpufreq_policy *policy = to_policy(kobj);
        struct freq_attr *fattr = to_attr(attr);
        ssize_t ret = -EINVAL;
-       policy = cpufreq_cpu_get_sysfs(policy->cpu);
-       if (!policy)
-               goto no_policy;
+
+       if (!down_read_trylock(&cpufreq_rwsem))
+               goto exit;
 
        if (lock_policy_rwsem_write(policy->cpu) < 0)
-               goto fail;
+               goto up_read;
 
        if (fattr->store)
                ret = fattr->store(policy, buf, count);
@@ -737,9 +706,10 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
                ret = -EIO;
 
        unlock_policy_rwsem_write(policy->cpu);
-fail:
-       cpufreq_cpu_put_sysfs(policy);
-no_policy:
+
+up_read:
+       up_read(&cpufreq_rwsem);
+exit:
        return ret;
 }
 
@@ -805,41 +775,32 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr)
 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
 
 /* symlink affected CPUs */
-static int cpufreq_add_dev_symlink(unsigned int cpu,
-                                  struct cpufreq_policy *policy)
+static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
 {
        unsigned int j;
        int ret = 0;
 
        for_each_cpu(j, policy->cpus) {
-               struct cpufreq_policy *managed_policy;
                struct device *cpu_dev;
 
-               if (j == cpu)
+               if (j == policy->cpu)
                        continue;
 
-               pr_debug("CPU %u already managed, adding link\n", j);
-               managed_policy = cpufreq_cpu_get(cpu);
+               pr_debug("Adding link for CPU: %u\n", j);
                cpu_dev = get_cpu_device(j);
                ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
                                        "cpufreq");
-               if (ret) {
-                       cpufreq_cpu_put(managed_policy);
-                       return ret;
-               }
+               if (ret)
+                       break;
        }
        return ret;
 }
 
-static int cpufreq_add_dev_interface(unsigned int cpu,
-                                    struct cpufreq_policy *policy,
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
                                     struct device *dev)
 {
-       struct cpufreq_policy new_policy;
        struct freq_attr **drv_attr;
-       unsigned long flags;
        int ret = 0;
-       unsigned int j;
 
        /* prepare interface data */
        ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
@@ -871,18 +832,24 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
                        goto err_out_kobj_put;
        }
 
-       write_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu(j, policy->cpus) {
-               per_cpu(cpufreq_cpu_data, j) = policy;
-               per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
-       }
-       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-       ret = cpufreq_add_dev_symlink(cpu, policy);
+       ret = cpufreq_add_dev_symlink(policy);
        if (ret)
                goto err_out_kobj_put;
 
-       memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
+       return ret;
+
+err_out_kobj_put:
+       kobject_put(&policy->kobj);
+       wait_for_completion(&policy->kobj_unregister);
+       return ret;
+}
+
+static void cpufreq_init_policy(struct cpufreq_policy *policy)
+{
+       struct cpufreq_policy new_policy;
+       int ret = 0;
+
+       memcpy(&new_policy, policy, sizeof(*policy));
        /* assure that the starting sequence is run in __cpufreq_set_policy */
        policy->governor = NULL;
 
@@ -896,72 +863,106 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
                if (cpufreq_driver->exit)
                        cpufreq_driver->exit(policy);
        }
-       return ret;
-
-err_out_kobj_put:
-       kobject_put(&policy->kobj);
-       wait_for_completion(&policy->kobj_unregister);
-       return ret;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
-                                 struct device *dev)
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
+                                 unsigned int cpu, struct device *dev,
+                                 bool frozen)
 {
-       struct cpufreq_policy *policy;
        int ret = 0, has_target = !!cpufreq_driver->target;
        unsigned long flags;
 
-       policy = cpufreq_cpu_get(sibling);
-       WARN_ON(!policy);
-
-       if (has_target)
-               __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+       if (has_target) {
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               if (ret) {
+                       pr_err("%s: Failed to stop governor\n", __func__);
+                       return ret;
+               }
+       }
 
-       lock_policy_rwsem_write(sibling);
+       lock_policy_rwsem_write(policy->cpu);
 
        write_lock_irqsave(&cpufreq_driver_lock, flags);
 
        cpumask_set_cpu(cpu, policy->cpus);
-       per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
        per_cpu(cpufreq_cpu_data, cpu) = policy;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       unlock_policy_rwsem_write(sibling);
+       unlock_policy_rwsem_write(policy->cpu);
 
        if (has_target) {
-               __cpufreq_governor(policy, CPUFREQ_GOV_START);
-               __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+               if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
+                       (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
+                       pr_err("%s: Failed to start governor\n", __func__);
+                       return ret;
+               }
        }
 
-       ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
-       if (ret) {
-               cpufreq_cpu_put(policy);
-               return ret;
-       }
+       /* Don't touch sysfs links during light-weight init */
+       if (!frozen)
+               ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
 
-       return 0;
+       return ret;
 }
 #endif
 
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
+{
+       struct cpufreq_policy *policy;
+       unsigned long flags;
+
+       write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+       policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
+
+       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+       return policy;
+}
+
+static struct cpufreq_policy *cpufreq_policy_alloc(void)
+{
+       struct cpufreq_policy *policy;
+
+       policy = kzalloc(sizeof(*policy), GFP_KERNEL);
+       if (!policy)
+               return NULL;
+
+       if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
+               goto err_free_policy;
+
+       if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
+               goto err_free_cpumask;
+
+       INIT_LIST_HEAD(&policy->policy_list);
+       return policy;
+
+err_free_cpumask:
+       free_cpumask_var(policy->cpus);
+err_free_policy:
+       kfree(policy);
+
+       return NULL;
+}
+
+static void cpufreq_policy_free(struct cpufreq_policy *policy)
+{
+       free_cpumask_var(policy->related_cpus);
+       free_cpumask_var(policy->cpus);
+       kfree(policy);
+}
+
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
+                            bool frozen)
 {
        unsigned int j, cpu = dev->id;
        int ret = -ENOMEM;
        struct cpufreq_policy *policy;
        unsigned long flags;
 #ifdef CONFIG_HOTPLUG_CPU
+       struct cpufreq_policy *tpolicy;
        struct cpufreq_governor *gov;
-       int sibling;
 #endif
 
        if (cpu_is_offline(cpu))
@@ -978,42 +979,37 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
                return 0;
        }
 
+       if (!down_read_trylock(&cpufreq_rwsem))
+               return 0;
+
 #ifdef CONFIG_HOTPLUG_CPU
        /* Check if this cpu was hot-unplugged earlier and has siblings */
        read_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_online_cpu(sibling) {
-               struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
-               if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
+       list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
+               if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
                        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-                       return cpufreq_add_policy_cpu(cpu, sibling, dev);
+                       ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
+                       up_read(&cpufreq_rwsem);
+                       return ret;
                }
        }
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 #endif
 #endif
 
-       if (!try_module_get(cpufreq_driver->owner)) {
-               ret = -EINVAL;
-               goto module_out;
-       }
+       if (frozen)
+               /* Restore the saved policy when doing light-weight init */
+               policy = cpufreq_policy_restore(cpu);
+       else
+               policy = cpufreq_policy_alloc();
 
-       policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
        if (!policy)
                goto nomem_out;
 
-       if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
-               goto err_free_policy;
-
-       if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
-               goto err_free_cpumask;
-
        policy->cpu = cpu;
        policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
        cpumask_copy(policy->cpus, cpumask_of(cpu));
 
-       /* Initially set CPU itself as the policy_cpu */
-       per_cpu(cpufreq_policy_cpu, cpu) = cpu;
-
        init_completion(&policy->kobj_unregister);
        INIT_WORK(&policy->update, handle_update);
 
@@ -1050,12 +1046,26 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        }
 #endif
 
-       ret = cpufreq_add_dev_interface(cpu, policy, dev);
-       if (ret)
-               goto err_out_unregister;
+       write_lock_irqsave(&cpufreq_driver_lock, flags);
+       for_each_cpu(j, policy->cpus)
+               per_cpu(cpufreq_cpu_data, j) = policy;
+       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+       if (!frozen) {
+               ret = cpufreq_add_dev_interface(policy, dev);
+               if (ret)
+                       goto err_out_unregister;
+       }
+
+       write_lock_irqsave(&cpufreq_driver_lock, flags);
+       list_add(&policy->policy_list, &cpufreq_policy_list);
+       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+       cpufreq_init_policy(policy);
 
        kobject_uevent(&policy->kobj, KOBJ_ADD);
-       module_put(cpufreq_driver->owner);
+       up_read(&cpufreq_rwsem);
+
        pr_debug("initialization complete\n");
 
        return 0;
@@ -1066,32 +1076,33 @@ err_out_unregister:
                per_cpu(cpufreq_cpu_data, j) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       kobject_put(&policy->kobj);
-       wait_for_completion(&policy->kobj_unregister);
-
 err_set_policy_cpu:
-       per_cpu(cpufreq_policy_cpu, cpu) = -1;
-       free_cpumask_var(policy->related_cpus);
-err_free_cpumask:
-       free_cpumask_var(policy->cpus);
-err_free_policy:
-       kfree(policy);
+       cpufreq_policy_free(policy);
 nomem_out:
-       module_put(cpufreq_driver->owner);
-module_out:
+       up_read(&cpufreq_rwsem);
+
        return ret;
 }
 
-static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+/**
+ * cpufreq_add_dev - add a CPU device
+ *
+ * Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
-       int j;
+       return __cpufreq_add_dev(dev, sif, false);
+}
 
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
        policy->last_cpu = policy->cpu;
        policy->cpu = cpu;
 
-       for_each_cpu(j, policy->cpus)
-               per_cpu(cpufreq_policy_cpu, j) = cpu;
-
 #ifdef CONFIG_CPU_FREQ_TABLE
        cpufreq_frequency_table_update_policy_cpu(policy);
 #endif
@@ -1099,6 +1110,37 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
                        CPUFREQ_UPDATE_POLICY_CPU, policy);
 }
 
+static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
+                                          unsigned int old_cpu, bool frozen)
+{
+       struct device *cpu_dev;
+       int ret;
+
+       /* first sibling now owns the new sysfs dir */
+       cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
+
+       /* Don't touch sysfs files during light-weight tear-down */
+       if (frozen)
+               return cpu_dev->id;
+
+       sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+       ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
+       if (ret) {
+               pr_err("%s: Failed to move kobj: %d", __func__, ret);
+
+               WARN_ON(lock_policy_rwsem_write(old_cpu));
+               cpumask_set_cpu(old_cpu, policy->cpus);
+               unlock_policy_rwsem_write(old_cpu);
+
+               ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
+                                       "cpufreq");
+
+               return -EINVAL;
+       }
+
+       return cpu_dev->id;
+}
+
 /**
  * __cpufreq_remove_dev - remove a CPU device
  *
@@ -1107,111 +1149,126 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
  * This routine frees the rwsem before returning.
  */
 static int __cpufreq_remove_dev(struct device *dev,
-               struct subsys_interface *sif)
+                               struct subsys_interface *sif, bool frozen)
 {
-       unsigned int cpu = dev->id, ret, cpus;
+       unsigned int cpu = dev->id, cpus;
+       int new_cpu, ret;
        unsigned long flags;
-       struct cpufreq_policy *data;
+       struct cpufreq_policy *policy;
        struct kobject *kobj;
        struct completion *cmp;
-       struct device *cpu_dev;
 
        pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
 
        write_lock_irqsave(&cpufreq_driver_lock, flags);
 
-       data = per_cpu(cpufreq_cpu_data, cpu);
-       per_cpu(cpufreq_cpu_data, cpu) = NULL;
+       policy = per_cpu(cpufreq_cpu_data, cpu);
+
+       /* Save the policy somewhere when doing a light-weight tear-down */
+       if (frozen)
+               per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
 
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       if (!data) {
+       if (!policy) {
                pr_debug("%s: No cpu_data found\n", __func__);
                return -EINVAL;
        }
 
-       if (cpufreq_driver->target)
-               __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+       if (cpufreq_driver->target) {
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               if (ret) {
+                       pr_err("%s: Failed to stop governor\n", __func__);
+                       return ret;
+               }
+       }
 
 #ifdef CONFIG_HOTPLUG_CPU
        if (!cpufreq_driver->setpolicy)
                strncpy(per_cpu(cpufreq_cpu_governor, cpu),
-                       data->governor->name, CPUFREQ_NAME_LEN);
+                       policy->governor->name, CPUFREQ_NAME_LEN);
 #endif
 
        WARN_ON(lock_policy_rwsem_write(cpu));
-       cpus = cpumask_weight(data->cpus);
+       cpus = cpumask_weight(policy->cpus);
 
        if (cpus > 1)
-               cpumask_clear_cpu(cpu, data->cpus);
+               cpumask_clear_cpu(cpu, policy->cpus);
        unlock_policy_rwsem_write(cpu);
 
-       if (cpu != data->cpu) {
+       if (cpu != policy->cpu && !frozen) {
                sysfs_remove_link(&dev->kobj, "cpufreq");
        } else if (cpus > 1) {
-               /* first sibling now owns the new sysfs dir */
-               cpu_dev = get_cpu_device(cpumask_first(data->cpus));
-               sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
-               ret = kobject_move(&data->kobj, &cpu_dev->kobj);
-               if (ret) {
-                       pr_err("%s: Failed to move kobj: %d", __func__, ret);
 
+               new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
+               if (new_cpu >= 0) {
                        WARN_ON(lock_policy_rwsem_write(cpu));
-                       cpumask_set_cpu(cpu, data->cpus);
-
-                       write_lock_irqsave(&cpufreq_driver_lock, flags);
-                       per_cpu(cpufreq_cpu_data, cpu) = data;
-                       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
+                       update_policy_cpu(policy, new_cpu);
                        unlock_policy_rwsem_write(cpu);
 
-                       ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
-                                       "cpufreq");
-                       return -EINVAL;
+                       if (!frozen) {
+                               pr_debug("%s: policy Kobject moved to cpu: %d "
+                                        "from: %d\n",__func__, new_cpu, cpu);
+                       }
                }
-
-               WARN_ON(lock_policy_rwsem_write(cpu));
-               update_policy_cpu(data, cpu_dev->id);
-               unlock_policy_rwsem_write(cpu);
-               pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
-                               __func__, cpu_dev->id, cpu);
        }
 
        /* If cpu is last user of policy, free policy */
        if (cpus == 1) {
-               if (cpufreq_driver->target)
-                       __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
-
-               lock_policy_rwsem_read(cpu);
-               kobj = &data->kobj;
-               cmp = &data->kobj_unregister;
-               unlock_policy_rwsem_read(cpu);
-               kobject_put(kobj);
-
-               /* we need to make sure that the underlying kobj is actually
-                * not referenced anymore by anybody before we proceed with
-                * unloading.
-                */
-               pr_debug("waiting for dropping of refcount\n");
-               wait_for_completion(cmp);
-               pr_debug("wait complete\n");
+               if (cpufreq_driver->target) {
+                       ret = __cpufreq_governor(policy,
+                                       CPUFREQ_GOV_POLICY_EXIT);
+                       if (ret) {
+                               pr_err("%s: Failed to exit governor\n",
+                                               __func__);
+                               return ret;
+                       }
+               }
 
+               if (!frozen) {
+                       lock_policy_rwsem_read(cpu);
+                       kobj = &policy->kobj;
+                       cmp = &policy->kobj_unregister;
+                       unlock_policy_rwsem_read(cpu);
+                       kobject_put(kobj);
+
+                       /*
+                        * We need to make sure that the underlying kobj is
+                        * actually not referenced anymore by anybody before we
+                        * proceed with unloading.
+                        */
+                       pr_debug("waiting for dropping of refcount\n");
+                       wait_for_completion(cmp);
+                       pr_debug("wait complete\n");
+               }
+
+               /*
+                * Perform the ->exit() even during light-weight tear-down,
+                * since this is a core component, and is essential for the
+                * subsequent light-weight ->init() to succeed.
+                */
                if (cpufreq_driver->exit)
-                       cpufreq_driver->exit(data);
+                       cpufreq_driver->exit(policy);
+
+               /* Remove policy from list of active policies */
+               write_lock_irqsave(&cpufreq_driver_lock, flags);
+               list_del(&policy->policy_list);
+               write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-               free_cpumask_var(data->related_cpus);
-               free_cpumask_var(data->cpus);
-               kfree(data);
+               if (!frozen)
+                       cpufreq_policy_free(policy);
        } else {
-               pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
-               cpufreq_cpu_put(data);
                if (cpufreq_driver->target) {
-                       __cpufreq_governor(data, CPUFREQ_GOV_START);
-                       __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+                       if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
+                                       (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
+                               pr_err("%s: Failed to start governor\n",
+                                               __func__);
+                               return ret;
+                       }
                }
        }
 
-       per_cpu(cpufreq_policy_cpu, cpu) = -1;
+       per_cpu(cpufreq_cpu_data, cpu) = NULL;
        return 0;
 }
 
@@ -1223,7 +1280,7 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
        if (cpu_is_offline(cpu))
                return 0;
 
-       retval = __cpufreq_remove_dev(dev, sif);
+       retval = __cpufreq_remove_dev(dev, sif, false);
        return retval;
 }
 
@@ -1344,10 +1401,9 @@ static unsigned int __cpufreq_get(unsigned int cpu)
 unsigned int cpufreq_get(unsigned int cpu)
 {
        unsigned int ret_freq = 0;
-       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 
-       if (!policy)
-               goto out;
+       if (!down_read_trylock(&cpufreq_rwsem))
+               return 0;
 
        if (unlikely(lock_policy_rwsem_read(cpu)))
                goto out_policy;
@@ -1357,8 +1413,8 @@ unsigned int cpufreq_get(unsigned int cpu)
        unlock_policy_rwsem_read(cpu);
 
 out_policy:
-       cpufreq_cpu_put(policy);
-out:
+       up_read(&cpufreq_rwsem);
+
        return ret_freq;
 }
 EXPORT_SYMBOL(cpufreq_get);
@@ -1381,23 +1437,23 @@ static int cpufreq_bp_suspend(void)
        int ret = 0;
 
        int cpu = smp_processor_id();
-       struct cpufreq_policy *cpu_policy;
+       struct cpufreq_policy *policy;
 
        pr_debug("suspending cpu %u\n", cpu);
 
        /* If there's no policy for the boot CPU, we have nothing to do. */
-       cpu_policy = cpufreq_cpu_get(cpu);
-       if (!cpu_policy)
+       policy = cpufreq_cpu_get(cpu);
+       if (!policy)
                return 0;
 
        if (cpufreq_driver->suspend) {
-               ret = cpufreq_driver->suspend(cpu_policy);
+               ret = cpufreq_driver->suspend(policy);
                if (ret)
                        printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
-                                       "step on CPU %u\n", cpu_policy->cpu);
+                                       "step on CPU %u\n", policy->cpu);
        }
 
-       cpufreq_cpu_put(cpu_policy);
+       cpufreq_cpu_put(policy);
        return ret;
 }
 
@@ -1419,28 +1475,28 @@ static void cpufreq_bp_resume(void)
        int ret = 0;
 
        int cpu = smp_processor_id();
-       struct cpufreq_policy *cpu_policy;
+       struct cpufreq_policy *policy;
 
        pr_debug("resuming cpu %u\n", cpu);
 
        /* If there's no policy for the boot CPU, we have nothing to do. */
-       cpu_policy = cpufreq_cpu_get(cpu);
-       if (!cpu_policy)
+       policy = cpufreq_cpu_get(cpu);
+       if (!policy)
                return;
 
        if (cpufreq_driver->resume) {
-               ret = cpufreq_driver->resume(cpu_policy);
+               ret = cpufreq_driver->resume(policy);
                if (ret) {
                        printk(KERN_ERR "cpufreq: resume failed in ->resume "
-                                       "step on CPU %u\n", cpu_policy->cpu);
+                                       "step on CPU %u\n", policy->cpu);
                        goto fail;
                }
        }
 
-       schedule_work(&cpu_policy->update);
+       schedule_work(&policy->update);
 
 fail:
-       cpufreq_cpu_put(cpu_policy);
+       cpufreq_cpu_put(policy);
 }
 
 static struct syscore_ops cpufreq_syscore_ops = {
@@ -1594,18 +1650,6 @@ fail:
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
 
-int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
-{
-       if (cpufreq_disabled())
-               return 0;
-
-       if (!cpufreq_driver->getavg)
-               return 0;
-
-       return cpufreq_driver->getavg(policy, cpu);
-}
-EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
-
 /*
  * when "event" is CPUFREQ_GOV_LIMITS
  */
@@ -1640,8 +1684,9 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
                }
        }
 
-       if (!try_module_get(policy->governor->owner))
-               return -EINVAL;
+       if (event == CPUFREQ_GOV_POLICY_INIT)
+               if (!try_module_get(policy->governor->owner))
+                       return -EINVAL;
 
        pr_debug("__cpufreq_governor for CPU %u, event %u\n",
                                                policy->cpu, event);
@@ -1677,11 +1722,8 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
                mutex_unlock(&cpufreq_governor_lock);
        }
 
-       /* we keep one module reference alive for
-                       each CPU governed by this CPU */
-       if ((event != CPUFREQ_GOV_START) || ret)
-               module_put(policy->governor->owner);
-       if ((event == CPUFREQ_GOV_STOP) && !ret)
+       if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
+                       ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
                module_put(policy->governor->owner);
 
        return ret;
@@ -1761,7 +1803,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
        if (!cpu_policy)
                return -EINVAL;
 
-       memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
+       memcpy(policy, cpu_policy, sizeof(*policy));
 
        cpufreq_cpu_put(cpu_policy);
        return 0;
@@ -1772,95 +1814,94 @@ EXPORT_SYMBOL(cpufreq_get_policy);
  * data   : current policy.
  * policy : policy to be set.
  */
-static int __cpufreq_set_policy(struct cpufreq_policy *data,
-                               struct cpufreq_policy *policy)
+static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+                               struct cpufreq_policy *new_policy)
 {
        int ret = 0, failed = 1;
 
-       pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
-               policy->min, policy->max);
+       pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
+               new_policy->min, new_policy->max);
 
-       memcpy(&policy->cpuinfo, &data->cpuinfo,
-                               sizeof(struct cpufreq_cpuinfo));
+       memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
 
-       if (policy->min > data->max || policy->max < data->min) {
+       if (new_policy->min > policy->max || new_policy->max < policy->min) {
                ret = -EINVAL;
                goto error_out;
        }
 
        /* verify the cpu speed can be set within this limit */
-       ret = cpufreq_driver->verify(policy);
+       ret = cpufreq_driver->verify(new_policy);
        if (ret)
                goto error_out;
 
        /* adjust if necessary - all reasons */
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                       CPUFREQ_ADJUST, policy);
+                       CPUFREQ_ADJUST, new_policy);
 
        /* adjust if necessary - hardware incompatibility*/
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                       CPUFREQ_INCOMPATIBLE, policy);
+                       CPUFREQ_INCOMPATIBLE, new_policy);
 
        /*
         * verify the cpu speed can be set within this limit, which might be
         * different to the first one
         */
-       ret = cpufreq_driver->verify(policy);
+       ret = cpufreq_driver->verify(new_policy);
        if (ret)
                goto error_out;
 
        /* notification of the new policy */
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                       CPUFREQ_NOTIFY, policy);
+                       CPUFREQ_NOTIFY, new_policy);
 
-       data->min = policy->min;
-       data->max = policy->max;
+       policy->min = new_policy->min;
+       policy->max = new_policy->max;
 
        pr_debug("new min and max freqs are %u - %u kHz\n",
-                                       data->min, data->max);
+                                       policy->min, policy->max);
 
        if (cpufreq_driver->setpolicy) {
-               data->policy = policy->policy;
+               policy->policy = new_policy->policy;
                pr_debug("setting range\n");
-               ret = cpufreq_driver->setpolicy(policy);
+               ret = cpufreq_driver->setpolicy(new_policy);
        } else {
-               if (policy->governor != data->governor) {
+               if (new_policy->governor != policy->governor) {
                        /* save old, working values */
-                       struct cpufreq_governor *old_gov = data->governor;
+                       struct cpufreq_governor *old_gov = policy->governor;
 
                        pr_debug("governor switch\n");
 
                        /* end old governor */
-                       if (data->governor) {
-                               __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-                               unlock_policy_rwsem_write(policy->cpu);
-                               __cpufreq_governor(data,
+                       if (policy->governor) {
+                               __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+                               unlock_policy_rwsem_write(new_policy->cpu);
+                               __cpufreq_governor(policy,
                                                CPUFREQ_GOV_POLICY_EXIT);
-                               lock_policy_rwsem_write(policy->cpu);
+                               lock_policy_rwsem_write(new_policy->cpu);
                        }
 
                        /* start new governor */
-                       data->governor = policy->governor;
-                       if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
-                               if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
+                       policy->governor = new_policy->governor;
+                       if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
+                               if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
                                        failed = 0;
                                } else {
-                                       unlock_policy_rwsem_write(policy->cpu);
-                                       __cpufreq_governor(data,
+                                       unlock_policy_rwsem_write(new_policy->cpu);
+                                       __cpufreq_governor(policy,
                                                        CPUFREQ_GOV_POLICY_EXIT);
-                                       lock_policy_rwsem_write(policy->cpu);
+                                       lock_policy_rwsem_write(new_policy->cpu);
                                }
                        }
 
                        if (failed) {
                                /* new governor failed, so re-start old one */
                                pr_debug("starting governor %s failed\n",
-                                                       data->governor->name);
+                                                       policy->governor->name);
                                if (old_gov) {
-                                       data->governor = old_gov;
-                                       __cpufreq_governor(data,
+                                       policy->governor = old_gov;
+                                       __cpufreq_governor(policy,
                                                        CPUFREQ_GOV_POLICY_INIT);
-                                       __cpufreq_governor(data,
+                                       __cpufreq_governor(policy,
                                                           CPUFREQ_GOV_START);
                                }
                                ret = -EINVAL;
@@ -1869,7 +1910,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
                        /* might be a policy change, too, so fall through */
                }
                pr_debug("governor: change or update limits\n");
-               __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
        }
 
 error_out:
@@ -1885,11 +1926,11 @@ error_out:
  */
 int cpufreq_update_policy(unsigned int cpu)
 {
-       struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
-       struct cpufreq_policy policy;
+       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+       struct cpufreq_policy new_policy;
        int ret;
 
-       if (!data) {
+       if (!policy) {
                ret = -ENODEV;
                goto no_policy;
        }
@@ -1900,34 +1941,34 @@ int cpufreq_update_policy(unsigned int cpu)
        }
 
        pr_debug("updating policy for CPU %u\n", cpu);
-       memcpy(&policy, data, sizeof(struct cpufreq_policy));
-       policy.min = data->user_policy.min;
-       policy.max = data->user_policy.max;
-       policy.policy = data->user_policy.policy;
-       policy.governor = data->user_policy.governor;
+       memcpy(&new_policy, policy, sizeof(*policy));
+       new_policy.min = policy->user_policy.min;
+       new_policy.max = policy->user_policy.max;
+       new_policy.policy = policy->user_policy.policy;
+       new_policy.governor = policy->user_policy.governor;
 
        /*
         * BIOS might change freq behind our back
         * -> ask driver for current freq and notify governors about a change
         */
        if (cpufreq_driver->get) {
-               policy.cur = cpufreq_driver->get(cpu);
-               if (!data->cur) {
+               new_policy.cur = cpufreq_driver->get(cpu);
+               if (!policy->cur) {
                        pr_debug("Driver did not initialize current freq");
-                       data->cur = policy.cur;
+                       policy->cur = new_policy.cur;
                } else {
-                       if (data->cur != policy.cur && cpufreq_driver->target)
-                               cpufreq_out_of_sync(cpu, data->cur,
-                                                               policy.cur);
+                       if (policy->cur != new_policy.cur && cpufreq_driver->target)
+                               cpufreq_out_of_sync(cpu, policy->cur,
+                                                               new_policy.cur);
                }
        }
 
-       ret = __cpufreq_set_policy(data, &policy);
+       ret = __cpufreq_set_policy(policy, &new_policy);
 
        unlock_policy_rwsem_write(cpu);
 
 fail:
-       cpufreq_cpu_put(data);
+       cpufreq_cpu_put(policy);
 no_policy:
        return ret;
 }
@@ -1938,21 +1979,26 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
 {
        unsigned int cpu = (unsigned long)hcpu;
        struct device *dev;
+       bool frozen = false;
 
        dev = get_cpu_device(cpu);
        if (dev) {
-               switch (action) {
+
+               if (action & CPU_TASKS_FROZEN)
+                       frozen = true;
+
+               switch (action & ~CPU_TASKS_FROZEN) {
                case CPU_ONLINE:
-               case CPU_ONLINE_FROZEN:
-                       cpufreq_add_dev(dev, NULL);
+                       __cpufreq_add_dev(dev, NULL, frozen);
+                       cpufreq_update_policy(cpu);
                        break;
+
                case CPU_DOWN_PREPARE:
-               case CPU_DOWN_PREPARE_FROZEN:
-                       __cpufreq_remove_dev(dev, NULL);
+                       __cpufreq_remove_dev(dev, NULL, frozen);
                        break;
+
                case CPU_DOWN_FAILED:
-               case CPU_DOWN_FAILED_FROZEN:
-                       cpufreq_add_dev(dev, NULL);
+                       __cpufreq_add_dev(dev, NULL, frozen);
                        break;
                }
        }
@@ -2059,9 +2105,13 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
        subsys_interface_unregister(&cpufreq_interface);
        unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
 
+       down_write(&cpufreq_rwsem);
        write_lock_irqsave(&cpufreq_driver_lock, flags);
+
        cpufreq_driver = NULL;
+
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+       up_write(&cpufreq_rwsem);
 
        return 0;
 }
@@ -2074,10 +2124,8 @@ static int __init cpufreq_core_init(void)
        if (cpufreq_disabled())
                return -ENODEV;
 
-       for_each_possible_cpu(cpu) {
-               per_cpu(cpufreq_policy_cpu, cpu) = -1;
+       for_each_possible_cpu(cpu)
                init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-       }
 
        cpufreq_global_kobject = kobject_create();
        BUG_ON(!cpufreq_global_kobject);
index f97cb3d8c5a232a60f319e04da5c0616624925d4..7f67a75b3c3c635a8c10e952c3be3b5549809b01 100644 (file)
  * published by the Free Software Foundation.
  */
 
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/percpu-defs.h>
 #include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-
 #include "cpufreq_governor.h"
 
 /* Conservative governor macros */
@@ -329,7 +317,7 @@ static int cs_init(struct dbs_data *dbs_data)
 {
        struct cs_dbs_tuners *tuners;
 
-       tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
+       tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
        if (!tuners) {
                pr_err("%s: kzalloc failed\n", __func__);
                return -ENOMEM;
index e59afaa9da23af0dfd826751e817ab6b41e94a0b..87427360c77f95ad095d10da10568c923cb87a6c 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <asm/cputime.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
 #include <linux/export.h>
 #include <linux/kernel_stat.h>
-#include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
 
 #include "cpufreq_governor.h"
 
@@ -53,7 +47,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 
        policy = cdbs->cur_policy;
 
-       /* Get Absolute Load (in terms of freq for ondemand gov) */
+       /* Get Absolute Load */
        for_each_cpu(j, policy->cpus) {
                struct cpu_dbs_common_info *j_cdbs;
                u64 cur_wall_time, cur_idle_time;
@@ -104,14 +98,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 
                load = 100 * (wall_time - idle_time) / wall_time;
 
-               if (dbs_data->cdata->governor == GOV_ONDEMAND) {
-                       int freq_avg = __cpufreq_driver_getavg(policy, j);
-                       if (freq_avg <= 0)
-                               freq_avg = policy->cur;
-
-                       load *= freq_avg;
-               }
-
                if (load > max_load)
                        max_load = load;
        }
index d5f12b4b11b8af09c608bd6f74f01601e0a91f2d..a02d78b258982d36d13724879c6b719376c2e7ab 100644 (file)
 #define _CPUFREQ_GOVERNOR_H
 
 #include <linux/cpufreq.h>
-#include <linux/kobject.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
-#include <linux/sysfs.h>
 
 /*
  * The polling frequency depends on the capability of the processor. Default
@@ -169,7 +168,6 @@ struct od_dbs_tuners {
        unsigned int sampling_rate;
        unsigned int sampling_down_factor;
        unsigned int up_threshold;
-       unsigned int adj_up_threshold;
        unsigned int powersave_bias;
        unsigned int io_is_busy;
 };
@@ -223,7 +221,7 @@ struct od_ops {
        void (*powersave_bias_init_cpu)(int cpu);
        unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
                        unsigned int freq_next, unsigned int relation);
-       void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq);
+       void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
 };
 
 struct cs_ops {
index c087347d66884f03a4f1b2a94bf4fcbaceeda7a0..87f3305e80a69fb02001dbaaf2b398c1c2e9dede 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/cpu.h>
 #include <linux/percpu-defs.h>
 #include <linux/slab.h>
-#include <linux/sysfs.h>
 #include <linux/tick.h>
-#include <linux/types.h>
-#include <linux/cpu.h>
-
 #include "cpufreq_governor.h"
 
 /* On-demand governor macros */
-#define DEF_FREQUENCY_DOWN_DIFFERENTIAL                (10)
 #define DEF_FREQUENCY_UP_THRESHOLD             (80)
 #define DEF_SAMPLING_DOWN_FACTOR               (1)
 #define MAX_SAMPLING_DOWN_FACTOR               (100000)
-#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL      (3)
 #define MICRO_FREQUENCY_UP_THRESHOLD           (95)
 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE                (10000)
 #define MIN_FREQUENCY_UP_THRESHOLD             (11)
@@ -144,31 +132,27 @@ static void ondemand_powersave_bias_init(void)
        }
 }
 
-static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
+static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
 {
-       struct dbs_data *dbs_data = p->governor_data;
+       struct dbs_data *dbs_data = policy->governor_data;
        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 
        if (od_tuners->powersave_bias)
-               freq = od_ops.powersave_bias_target(p, freq,
+               freq = od_ops.powersave_bias_target(policy, freq,
                                CPUFREQ_RELATION_H);
-       else if (p->cur == p->max)
+       else if (policy->cur == policy->max)
                return;
 
-       __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
+       __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
                        CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
 }
 
 /*
  * Every sampling_rate, we check, if current idle time is less than 20%
- * (default), then we try to increase frequency. Every sampling_rate, we look
- * for the lowest frequency which can sustain the load while keeping idle time
- * over 30%. If such a frequency exist, we try to decrease to this frequency.
- *
- * Any frequency increase takes it to the maximum frequency. Frequency reduction
- * happens at minimum steps of 5% (default) of current frequency
+ * (default), then we try to increase frequency. Else, we adjust the frequency
+ * proportional to load.
  */
-static void od_check_cpu(int cpu, unsigned int load_freq)
+static void od_check_cpu(int cpu, unsigned int load)
 {
        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
        struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
@@ -178,29 +162,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
        dbs_info->freq_lo = 0;
 
        /* Check for frequency increase */
-       if (load_freq > od_tuners->up_threshold * policy->cur) {
+       if (load > od_tuners->up_threshold) {
                /* If switching to max speed, apply sampling_down_factor */
                if (policy->cur < policy->max)
                        dbs_info->rate_mult =
                                od_tuners->sampling_down_factor;
                dbs_freq_increase(policy, policy->max);
                return;
-       }
-
-       /* Check for frequency decrease */
-       /* if we cannot reduce the frequency anymore, break out early */
-       if (policy->cur == policy->min)
-               return;
-
-       /*
-        * The optimal frequency is the frequency that is the lowest that can
-        * support the current CPU usage without triggering the up policy. To be
-        * safe, we focus 10 points under the threshold.
-        */
-       if (load_freq < od_tuners->adj_up_threshold
-                       * policy->cur) {
+       } else {
+               /* Calculate the next frequency proportional to load */
                unsigned int freq_next;
-               freq_next = load_freq / od_tuners->adj_up_threshold;
+               freq_next = load * policy->cpuinfo.max_freq / 100;
 
                /* No longer fully busy, reset rate_mult */
                dbs_info->rate_mult = 1;
@@ -374,9 +346,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
                        input < MIN_FREQUENCY_UP_THRESHOLD) {
                return -EINVAL;
        }
-       /* Calculate the new adj_up_threshold */
-       od_tuners->adj_up_threshold += input;
-       od_tuners->adj_up_threshold -= od_tuners->up_threshold;
 
        od_tuners->up_threshold = input;
        return count;
@@ -513,7 +482,7 @@ static int od_init(struct dbs_data *dbs_data)
        u64 idle_time;
        int cpu;
 
-       tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
+       tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
        if (!tuners) {
                pr_err("%s: kzalloc failed\n", __func__);
                return -ENOMEM;
@@ -525,8 +494,6 @@ static int od_init(struct dbs_data *dbs_data)
        if (idle_time != -1ULL) {
                /* Idle micro accounting is supported. Use finer thresholds */
                tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
-               tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
-                       MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
                /*
                 * In nohz/micro accounting case we set the minimum frequency
                 * not depending on HZ, but fixed (very low). The deferred
@@ -535,8 +502,6 @@ static int od_init(struct dbs_data *dbs_data)
                dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
        } else {
                tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
-               tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
-                       DEF_FREQUENCY_DOWN_DIFFERENTIAL;
 
                /* For correct statistics, we need 10 ticks for each measure */
                dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
index 9fef7d6e4e6a6f05a96e7b8153ecb0043e7a4f6e..cf117deb39b1f45c53ade61086236eb888d24a71 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/init.h>
+#include <linux/module.h>
 
 static int cpufreq_governor_performance(struct cpufreq_policy *policy,
                                        unsigned int event)
index 32109a14f5dc04e0d99c89a8113428afa545bbb7..e3b874c235eada5d353f4b02831140c6a244af68 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/init.h>
+#include <linux/module.h>
 
 static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
                                        unsigned int event)
index d37568c5ca9c36b32dc69a8361337450c0ca1ddc..04452f026ed085a7b61f87c623c8677a2e242864 100644 (file)
@@ -9,17 +9,10 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/kernel.h>
-#include <linux/slab.h>
 #include <linux/cpu.h>
-#include <linux/sysfs.h>
 #include <linux/cpufreq.h>
 #include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/percpu.h>
-#include <linux/kobject.h>
-#include <linux/spinlock.h>
-#include <linux/notifier.h>
+#include <linux/slab.h>
 #include <asm/cputime.h>
 
 static spinlock_t cpufreq_stats_lock;
@@ -200,22 +193,22 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
 {
        unsigned int i, j, count = 0, ret = 0;
        struct cpufreq_stats *stat;
-       struct cpufreq_policy *data;
+       struct cpufreq_policy *current_policy;
        unsigned int alloc_size;
        unsigned int cpu = policy->cpu;
        if (per_cpu(cpufreq_stats_table, cpu))
                return -EBUSY;
-       stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
+       stat = kzalloc(sizeof(*stat), GFP_KERNEL);
        if ((stat) == NULL)
                return -ENOMEM;
 
-       data = cpufreq_cpu_get(cpu);
-       if (data == NULL) {
+       current_policy = cpufreq_cpu_get(cpu);
+       if (current_policy == NULL) {
                ret = -EINVAL;
                goto error_get_fail;
        }
 
-       ret = sysfs_create_group(&data->kobj, &stats_attr_group);
+       ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
        if (ret)
                goto error_out;
 
@@ -258,10 +251,10 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
        stat->last_time = get_jiffies_64();
        stat->last_index = freq_table_get_index(stat, policy->cur);
        spin_unlock(&cpufreq_stats_lock);
-       cpufreq_cpu_put(data);
+       cpufreq_cpu_put(current_policy);
        return 0;
 error_out:
-       cpufreq_cpu_put(data);
+       cpufreq_cpu_put(current_policy);
 error_get_fail:
        kfree(stat);
        per_cpu(cpufreq_stats_table, cpu) = NULL;
@@ -348,16 +341,10 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
        unsigned int cpu = (unsigned long)hcpu;
 
        switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               cpufreq_update_policy(cpu);
-               break;
        case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
                cpufreq_stats_free_sysfs(cpu);
                break;
        case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
                cpufreq_stats_free_table(cpu);
                break;
        }
@@ -390,8 +377,6 @@ static int __init cpufreq_stats_init(void)
                return ret;
 
        register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
-       for_each_online_cpu(cpu)
-               cpufreq_update_policy(cpu);
 
        ret = cpufreq_register_notifier(&notifier_trans_block,
                                CPUFREQ_TRANSITION_NOTIFIER);
index ee142c4905756afe79e0c034acb2351f278594ac..cb8276dd19caee0a5e2756883148a2d70a7132f2 100644 (file)
@@ -111,7 +111,6 @@ static struct cpufreq_driver cris_freq_driver = {
        .init   = cris_freq_cpu_init,
        .exit   = cris_freq_cpu_exit,
        .name   = "cris_freq",
-       .owner  = THIS_MODULE,
        .attr   = cris_freq_attr,
 };
 
index 12952235d5dbc69bc6f83c0efb34bd7c8663f253..72328f77dc53bdef993173a1832d3159ac3fd03c 100644 (file)
@@ -108,7 +108,6 @@ static struct cpufreq_driver cris_freq_driver = {
        .init = cris_freq_cpu_init,
        .exit = cris_freq_cpu_exit,
        .name = "cris_freq",
-       .owner = THIS_MODULE,
        .attr = cris_freq_attr,
 };
 
index a60efaeb4cf8c17291ec97a0ba86bb383fe81d55..09f64cc830197fc1f80f605e41d73be6b0441435 100644 (file)
@@ -54,7 +54,7 @@ static struct acpi_processor_performance *eps_acpi_cpu_perf;
 /* Minimum necessary to get acpi_processor_get_bios_limit() working */
 static int eps_acpi_init(void)
 {
-       eps_acpi_cpu_perf = kzalloc(sizeof(struct acpi_processor_performance),
+       eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf),
                                      GFP_KERNEL);
        if (!eps_acpi_cpu_perf)
                return -ENOMEM;
@@ -366,7 +366,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
                states = 2;
 
        /* Allocate private data and frequency table for current cpu */
-       centaur = kzalloc(sizeof(struct eps_cpu_data)
+       centaur = kzalloc(sizeof(*centaur)
                    + (states + 1) * sizeof(struct cpufreq_frequency_table),
                    GFP_KERNEL);
        if (!centaur)
@@ -436,7 +436,6 @@ static struct cpufreq_driver eps_driver = {
        .exit           = eps_cpu_exit,
        .get            = eps_get,
        .name           = "e_powersaver",
-       .owner          = THIS_MODULE,
        .attr           = eps_attr,
 };
 
index 658d860344b0df71bd8dd59751f3e5281ca34637..823a400d98fd6bc5f8c627db8151df572ffc68b9 100644 (file)
@@ -274,7 +274,6 @@ static struct cpufreq_driver elanfreq_driver = {
        .init           = elanfreq_cpu_init,
        .exit           = elanfreq_cpu_exit,
        .name           = "elanfreq",
-       .owner          = THIS_MODULE,
        .attr           = elanfreq_attr,
 };
 
index 0d32f02ef4d644e5ec915b9205fd5fdf929b30b0..0fac34439e3171dbc97b8f717a935b691657cba9 100644 (file)
@@ -289,7 +289,7 @@ static int __init exynos_cpufreq_init(void)
 {
        int ret = -EINVAL;
 
-       exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
+       exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
        if (!exynos_info)
                return -ENOMEM;
 
@@ -332,7 +332,6 @@ err_cpufreq:
        regulator_put(arm_regulator);
 err_vdd_arm:
        kfree(exynos_info);
-       pr_debug("%s: failed initialization\n", __func__);
        return -EINVAL;
 }
 late_initcall(exynos_cpufreq_init);
index 92b852ee5ddcca037bcbf9d2376ff3a4cb3a62c2..7f25cee8cec275692ee685a8b9bbccaf501b6a09 100644 (file)
@@ -43,6 +43,27 @@ struct exynos_dvfs_info {
        bool (*need_apll_change)(unsigned int, unsigned int);
 };
 
+#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
 extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
 extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
 extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
+{
+       return -EOPNOTSUPP;
+}
+#endif
index 0c74018eda47e1fd2d09ef0db1baaf8595a70547..d514c152fd1a43041e8ff570fca7f9cc2b28f58d 100644 (file)
@@ -238,6 +238,9 @@ static int exynos_target(struct cpufreq_policy *policy,
        freqs.old = dvfs_info->cur_frequency;
        freqs.new = freq_table[index].frequency;
 
+       if (freqs.old == freqs.new)
+               goto out;
+
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
        /* Set the target frequency in all C0_3_PSTATE register */
index f0d87412cc91742a73fa40a500b160387af9fb28..f111454a7aeace94454e3c8eff6e7e45360380d7 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
 #include <linux/cpufreq.h>
+#include <linux/module.h>
 
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
index 3dfc99b9ca86f3228d0f0a8d8b600bd7ae11f260..70442c7b5e71aed7e57ce61b20893e42dc36a215 100644 (file)
@@ -183,7 +183,7 @@ static void gx_write_byte(int reg, int value)
  * gx_detect_chipset:
  *
  **/
-static __init struct pci_dev *gx_detect_chipset(void)
+static struct pci_dev * __init gx_detect_chipset(void)
 {
        struct pci_dev *gx_pci = NULL;
 
@@ -446,7 +446,6 @@ static struct cpufreq_driver gx_suspmod_driver = {
        .target         = cpufreq_gx_target,
        .init           = cpufreq_gx_cpu_init,
        .name           = "gx-suspmod",
-       .owner          = THIS_MODULE,
 };
 
 static int __init cpufreq_gx_init(void)
@@ -466,7 +465,7 @@ static int __init cpufreq_gx_init(void)
 
        pr_debug("geode suspend modulation available.\n");
 
-       params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
+       params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (params == NULL)
                return -ENOMEM;
 
index 573c14ea802df7690f0e57ff3d8764b46305e51c..3e14f03171759bcdbd8a0501bc1ff38fee86d4c0 100644 (file)
@@ -274,7 +274,7 @@ acpi_cpufreq_cpu_init (
 
        pr_debug("acpi_cpufreq_cpu_init\n");
 
-       data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return (-ENOMEM);
 
@@ -304,7 +304,7 @@ acpi_cpufreq_cpu_init (
        }
 
        /* alloc freq_table */
-       data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
+       data->freq_table = kmalloc(sizeof(*data->freq_table) *
                                   (data->acpi_data.state_count + 1),
                                   GFP_KERNEL);
        if (!data->freq_table) {
@@ -409,7 +409,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
        .init           = acpi_cpufreq_cpu_init,
        .exit           = acpi_cpufreq_cpu_exit,
        .name           = "acpi-cpufreq",
-       .owner          = THIS_MODULE,
        .attr           = acpi_cpufreq_attr,
 };
 
index 7cde885011ed3d1b15809f6d25c1a7f3ddc3adf1..6efd96c196b2efff80e41b1851e4b445350ccf5b 100644 (file)
@@ -665,7 +665,6 @@ static struct cpufreq_driver intel_pstate_driver = {
        .init           = intel_pstate_cpu_init,
        .exit           = intel_pstate_cpu_exit,
        .name           = "intel_pstate",
-       .owner          = THIS_MODULE,
 };
 
 static int __initdata no_load;
index c233ea617366fad2c7e4a3ed1b2bc0a7f500fda1..45e4d7fc261d36168190e8a6cadcd838629d98ee 100644 (file)
@@ -158,7 +158,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
        .init   = kirkwood_cpufreq_cpu_init,
        .exit   = kirkwood_cpufreq_cpu_exit,
        .name   = "kirkwood-cpufreq",
-       .owner  = THIS_MODULE,
        .attr   = kirkwood_cpufreq_attr,
 };
 
index 8c49261df57dff642f63921d08a9d61ee41d95e3..4ada1cccb0523632e72c016e9a511dd38b08d33d 100644 (file)
@@ -948,7 +948,6 @@ static struct cpufreq_driver longhaul_driver = {
        .init   = longhaul_cpu_init,
        .exit   = longhaul_cpu_exit,
        .name   = "longhaul",
-       .owner  = THIS_MODULE,
        .attr   = longhaul_attr,
 };
 
index 0fe041d1f77f4a48f84542b11db0c4d07ee97b46..5aa031612d5393ff08d196f83b8701ac8c8f5e12 100644 (file)
@@ -286,7 +286,6 @@ static struct cpufreq_driver longrun_driver = {
        .get            = longrun_get,
        .init           = longrun_cpu_init,
        .name           = "longrun",
-       .owner          = THIS_MODULE,
 };
 
 static const struct x86_cpu_id longrun_ids[] = {
index 9536852c504a559865bc023a2d75ebed22a0ffd9..7bc3c44d34e2f0ec5493ad9fdf0c8467ccd8a037 100644 (file)
@@ -158,7 +158,6 @@ static struct freq_attr *loongson2_table_attr[] = {
 };
 
 static struct cpufreq_driver loongson2_cpufreq_driver = {
-       .owner = THIS_MODULE,
        .name = "loongson2",
        .init = loongson2_cpufreq_cpu_init,
        .verify = loongson2_cpufreq_verify,
index cdd62915efafbc1708ee68d4d71d99c20d741b9e..41c601f4631e23ef72dcd4e758dd690a8438af0a 100644 (file)
@@ -190,7 +190,6 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
 static struct cpufreq_driver maple_cpufreq_driver = {
        .name           = "maple",
-       .owner          = THIS_MODULE,
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = maple_cpufreq_cpu_init,
        .verify         = maple_cpufreq_verify,
diff --git a/drivers/cpufreq/mperf.c b/drivers/cpufreq/mperf.c
deleted file mode 100644 (file)
index 911e193..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/slab.h>
-
-#include "mperf.h"
-
-static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
-
-/* Called via smp_call_function_single(), on the target CPU */
-static void read_measured_perf_ctrs(void *_cur)
-{
-       struct aperfmperf *am = _cur;
-
-       get_aperfmperf(am);
-}
-
-/*
- * Return the measured active (C0) frequency on this CPU since last call
- * to this function.
- * Input: cpu number
- * Return: Average CPU frequency in terms of max frequency (zero on error)
- *
- * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
- * over a period of time, while CPU is in C0 state.
- * IA32_MPERF counts at the rate of max advertised frequency
- * IA32_APERF counts at the rate of actual CPU frequency
- * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
- * no meaning should be associated with absolute values of these MSRs.
- */
-unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
-                                       unsigned int cpu)
-{
-       struct aperfmperf perf;
-       unsigned long ratio;
-       unsigned int retval;
-
-       if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
-               return 0;
-
-       ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
-       per_cpu(acfreq_old_perf, cpu) = perf;
-
-       retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
-
-       return retval;
-}
-EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mperf.h b/drivers/cpufreq/mperf.h
deleted file mode 100644 (file)
index 5dbf295..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- *  (c) 2010 Advanced Micro Devices, Inc.
- *  Your use of this code is subject to the terms and conditions of the
- *  GNU general public license version 2. See "COPYING" or
- *  http://www.gnu.org/licenses/gpl.html
- */
-
-unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
-                                       unsigned int cpu);
index 9ee78170ff862a20cd7439e78d9ef0d71b542e77..2f0a2a65c37f67eeae9789ed12afa20627fd3503 100644 (file)
@@ -279,7 +279,6 @@ static struct cpufreq_driver p4clockmod_driver = {
        .exit           = cpufreq_p4_cpu_exit,
        .get            = cpufreq_p4_get,
        .name           = "p4-clockmod",
-       .owner          = THIS_MODULE,
        .attr           = p4clockmod_attr,
 };
 
index b704da404067061a4acd831e78b63572b54afc8c..534e43a60d1f9a80b2a952abe254fb01e06c0f12 100644 (file)
@@ -297,7 +297,6 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
 
 static struct cpufreq_driver pas_cpufreq_driver = {
        .name           = "pas-cpufreq",
-       .owner          = THIS_MODULE,
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = pas_cpufreq_cpu_init,
        .exit           = pas_cpufreq_cpu_exit,
index 1581fcc4cf4a9f3cf8de7b782abe932328584ef0..d81c4e5ea0ada8c49afac790f1107bc5ce8f11e6 100644 (file)
@@ -587,7 +587,6 @@ static struct cpufreq_driver pcc_cpufreq_driver = {
        .init = pcc_cpufreq_cpu_init,
        .exit = pcc_cpufreq_cpu_exit,
        .name = "pcc-cpufreq",
-       .owner = THIS_MODULE,
 };
 
 static int __init pcc_cpufreq_init(void)
index 3104fad824801d1e9b03480a9757317f3a602fba..38cdc63c38da1a26572f8d7cbf5555e482205e73 100644 (file)
@@ -477,7 +477,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
        .flags          = CPUFREQ_PM_NO_WARN,
        .attr           = pmac_cpu_freqs_attr,
        .name           = "powermac",
-       .owner          = THIS_MODULE,
 };
 
 
index 7ba423431cfe0dfb53fcbe3c7f7195882c36ac3d..b6850d97f0d5a7ac1a0e81e4e7f4a5e2651613bc 100644 (file)
@@ -371,7 +371,6 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
 static struct cpufreq_driver g5_cpufreq_driver = {
        .name           = "powermac",
-       .owner          = THIS_MODULE,
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = g5_cpufreq_cpu_init,
        .verify         = g5_cpufreq_verify,
@@ -447,9 +446,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
                if (!shdr)
                        goto bail_noprops;
                g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
-               ssize = (shdr->len * sizeof(u32)) -
-                       sizeof(struct smu_sdbp_header);
-               g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
+               ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr);
+               g5_fvt_count = ssize / sizeof(*g5_fvt_table);
                g5_fvt_cur = 0;
 
                /* Sanity checking */
index ea8e10382ec56b46c6a378d0bf84801a1a221108..85f1c8c25ddc5d20a361e219d023548b356b835b 100644 (file)
@@ -207,7 +207,6 @@ static struct cpufreq_driver powernow_k6_driver = {
        .exit           = powernow_k6_cpu_exit,
        .get            = powernow_k6_get,
        .name           = "powernow-k6",
-       .owner          = THIS_MODULE,
        .attr           = powernow_k6_attr,
 };
 
index 9558708779350a9b00ee6951241ac9bf24551766..14ce480be8ab2503515cb1d61b8f30b5892d85d2 100644 (file)
@@ -177,7 +177,7 @@ static int get_ranges(unsigned char *pst)
        unsigned int speed;
        u8 fid, vid;
 
-       powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
+       powernow_table = kzalloc((sizeof(*powernow_table) *
                                (number_scales + 1)), GFP_KERNEL);
        if (!powernow_table)
                return -ENOMEM;
@@ -309,8 +309,7 @@ static int powernow_acpi_init(void)
                goto err0;
        }
 
-       acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance),
-                                     GFP_KERNEL);
+       acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL);
        if (!acpi_processor_perf) {
                retval = -ENOMEM;
                goto err0;
@@ -346,7 +345,7 @@ static int powernow_acpi_init(void)
                goto err2;
        }
 
-       powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
+       powernow_table = kzalloc((sizeof(*powernow_table) *
                                (number_scales + 1)), GFP_KERNEL);
        if (!powernow_table) {
                retval = -ENOMEM;
@@ -497,7 +496,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
                                        "relevant to this CPU).\n",
                                        psb->numpst);
 
-                       p += sizeof(struct psb_s);
+                       p += sizeof(*psb);
 
                        pst = (struct pst_s *) p;
 
@@ -510,12 +509,12 @@ static int powernow_decode_bios(int maxfid, int startvid)
                                    (maxfid == pst->maxfid) &&
                                    (startvid == pst->startvid)) {
                                        print_pst_entry(pst, j);
-                                       p = (char *)pst + sizeof(struct pst_s);
+                                       p = (char *)pst + sizeof(*pst);
                                        ret = get_ranges(p);
                                        return ret;
                                } else {
                                        unsigned int k;
-                                       p = (char *)pst + sizeof(struct pst_s);
+                                       p = (char *)pst + sizeof(*pst);
                                        for (k = 0; k < number_scales; k++)
                                                p += 2;
                                }
@@ -717,7 +716,6 @@ static struct cpufreq_driver powernow_driver = {
        .init           = powernow_cpu_init,
        .exit           = powernow_cpu_exit,
        .name           = "powernow-k7",
-       .owner          = THIS_MODULE,
        .attr           = powernow_table_attr,
 };
 
index c39d189217cb6d5fedd521c602464d296b4d47bf..2344a9ed17f3c38017701e075c945df8ae0f4bdf 100644 (file)
@@ -623,7 +623,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
        if (check_pst_table(data, pst, maxvid))
                return -EINVAL;
 
-       powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+       powernow_table = kmalloc((sizeof(*powernow_table)
                * (data->numps + 1)), GFP_KERNEL);
        if (!powernow_table) {
                printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
@@ -793,7 +793,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
        }
 
        /* fill in data->powernow_table */
-       powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+       powernow_table = kmalloc((sizeof(*powernow_table)
                * (data->acpi_data.state_count + 1)), GFP_KERNEL);
        if (!powernow_table) {
                pr_debug("powernow_table memory alloc failure\n");
@@ -1106,7 +1106,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
        if (rc)
                return -ENODEV;
 
-       data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data) {
                printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
                return -ENOMEM;
@@ -1240,7 +1240,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
        .exit           = powernowk8_cpu_exit,
        .get            = powernowk8_get,
        .name           = "powernow-k8",
-       .owner          = THIS_MODULE,
        .attr           = powernow_k8_attr,
 };
 
index 3cae4529f959259f6554283c5d843ef8744046a4..60e81d524ea8231666210ecca2c97407c0a5a279 100644 (file)
@@ -300,7 +300,6 @@ static struct freq_attr *corenet_cpufreq_attr[] = {
 
 static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
        .name           = "ppc_cpufreq",
-       .owner          = THIS_MODULE,
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = corenet_cpufreq_cpu_init,
        .exit           = __exit_p(corenet_cpufreq_cpu_exit),
index 5936f8d6f2cc3e855ce52a814705ee9d1e35b10b..2e448f0bbdc583465672e7ec40804f1d01467224 100644 (file)
@@ -181,7 +181,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
        .init           = cbe_cpufreq_cpu_init,
        .exit           = cbe_cpufreq_cpu_exit,
        .name           = "cbe-cpufreq",
-       .owner          = THIS_MODULE,
        .flags          = CPUFREQ_CONST_LOOPS,
 };
 
index fb3981ac829fdb0a1eb73483ee3484fe7426990c..8749eaf1879338ad331f587c9a716a501ab0c701 100644 (file)
@@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
        return ret;
 }
 
-static __init void pxa_cpufreq_init_voltages(void)
+static void __init pxa_cpufreq_init_voltages(void)
 {
        vcc_core = regulator_get(NULL, "vcc_core");
        if (IS_ERR(vcc_core)) {
@@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
        return 0;
 }
 
-static __init void pxa_cpufreq_init_voltages(void) { }
+static void __init pxa_cpufreq_init_voltages(void) { }
 #endif
 
 static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
index 9c92ef032a9e978800e4897a2a3e8322c0d1c031..d26306fb00d2e8a749ed044c816533f101defd11 100644 (file)
@@ -213,10 +213,12 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
        policy->cur = policy->min = policy->max;
 
        if (cpu_is_pxa300() || cpu_is_pxa310())
-               ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs));
+               ret = setup_freqs_table(policy, pxa300_freqs,
+                                       ARRAY_SIZE(pxa300_freqs));
 
        if (cpu_is_pxa320())
-               ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs));
+               ret = setup_freqs_table(policy, pxa320_freqs,
+                                       ARRAY_SIZE(pxa320_freqs));
 
        if (ret) {
                pr_err("failed to setup frequency table\n");
index ce5b9fca9c1871eed562add6fb35f3979ab7fd8a..22dcb81ef9d0e9069b8fc4d49249713d59d41dd9 100644 (file)
@@ -524,7 +524,6 @@ static struct freq_attr *s3c2416_cpufreq_attr[] = {
 };
 
 static struct cpufreq_driver s3c2416_cpufreq_driver = {
-       .owner          = THIS_MODULE,
        .flags          = 0,
        .verify         = s3c2416_cpufreq_verify_speed,
        .target         = s3c2416_cpufreq_set_target,
index 87781eb20d6dce41c4b6c376f4937f67d808d0be..b0f343fcb7eefdfe40b942b202398372a618df44 100644 (file)
@@ -392,7 +392,7 @@ static int s3c_cpufreq_init(struct cpufreq_policy *policy)
        return 0;
 }
 
-static __init int s3c_cpufreq_initclks(void)
+static int __init s3c_cpufreq_initclks(void)
 {
        _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
        _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
@@ -522,7 +522,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
        /* Copy the board information so that each board can make this
         * initdata. */
 
-       ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL);
+       ours = kzalloc(sizeof(*ours), GFP_KERNEL);
        if (ours == NULL) {
                printk(KERN_ERR "%s: no memory\n", __func__);
                return -ENOMEM;
@@ -615,7 +615,7 @@ static int s3c_cpufreq_build_freq(void)
        size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
        size++;
 
-       ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL);
+       ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL);
        if (!ftab) {
                printk(KERN_ERR "%s: no memory for tables\n", __func__);
                return -ENOMEM;
@@ -691,7 +691,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
        struct cpufreq_frequency_table *vals;
        unsigned int size;
 
-       size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1);
+       size = sizeof(*vals) * (plls_no + 1);
 
        vals = kmalloc(size, GFP_KERNEL);
        if (vals) {
index 13bb4bae64ee11505fb8a072d041ec9a8268bf84..8a72b0c555f846da21d25d02d9f94d427a7796b5 100644 (file)
@@ -263,7 +263,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
 }
 
 static struct cpufreq_driver s3c64xx_cpufreq_driver = {
-       .owner          = THIS_MODULE,
        .flags          = 0,
        .verify         = s3c64xx_cpufreq_verify_speed,
        .target         = s3c64xx_cpufreq_set_target,
index 77a210975fc4b66ce737703f6904483d45368776..d6f6c6f4efa76ac6f82a97ddeff3292a06cfd18c 100644 (file)
@@ -147,7 +147,6 @@ static struct cpufreq_driver sc520_freq_driver = {
        .init   = sc520_freq_cpu_init,
        .exit   = sc520_freq_cpu_exit,
        .name   = "sc520_freq",
-       .owner  = THIS_MODULE,
        .attr   = sc520_freq_attr,
 };
 
index 73adb64651e86a8123e7b8bb41080a2e138eaefe..ffc6d24b0cfbed764db6b6c8e2015330e6fb9fbd 100644 (file)
@@ -160,7 +160,6 @@ static struct freq_attr *sh_freq_attr[] = {
 };
 
 static struct cpufreq_driver sh_cpufreq_driver = {
-       .owner          = THIS_MODULE,
        .name           = "sh",
        .get            = sh_cpufreq_get,
        .target         = sh_cpufreq_target,
index 93061a40877383b09ceff0cb7bd0bc2bba60a08a..cf5bc2ca16fa11f4155d757c6d5219b6691a6b81 100644 (file)
@@ -351,12 +351,11 @@ static int __init us2e_freq_init(void)
                struct cpufreq_driver *driver;
 
                ret = -ENOMEM;
-               driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+               driver = kzalloc(sizeof(*driver), GFP_KERNEL);
                if (!driver)
                        goto err_out;
 
-               us2e_freq_table = kzalloc(
-                       (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
+               us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
                        GFP_KERNEL);
                if (!us2e_freq_table)
                        goto err_out;
@@ -366,7 +365,6 @@ static int __init us2e_freq_init(void)
                driver->target = us2e_freq_target;
                driver->get = us2e_freq_get;
                driver->exit = us2e_freq_cpu_exit;
-               driver->owner = THIS_MODULE,
                strcpy(driver->name, "UltraSPARC-IIe");
 
                cpufreq_us2e_driver = driver;
index 880ee293d61eda8c131187d65579a5216ce2b00d..ac76b489979d48cac1278525ab861f46b69f8c86 100644 (file)
@@ -212,12 +212,11 @@ static int __init us3_freq_init(void)
                struct cpufreq_driver *driver;
 
                ret = -ENOMEM;
-               driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+               driver = kzalloc(sizeof(*driver), GFP_KERNEL);
                if (!driver)
                        goto err_out;
 
-               us3_freq_table = kzalloc(
-                       (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
+               us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
                        GFP_KERNEL);
                if (!us3_freq_table)
                        goto err_out;
@@ -227,7 +226,6 @@ static int __init us3_freq_init(void)
                driver->target = us3_freq_target;
                driver->get = us3_freq_get;
                driver->exit = us3_freq_cpu_exit;
-               driver->owner = THIS_MODULE,
                strcpy(driver->name, "UltraSPARC-III");
 
                cpufreq_us3_driver = driver;
index 0915e712fbdcc3a6393c1974c12394e060cc3c1f..f897d510584285bea376474ca6a3198eeef7d74f 100644 (file)
@@ -575,7 +575,6 @@ static struct cpufreq_driver centrino_driver = {
        .target         = centrino_target,
        .get            = get_cur_freq,
        .attr           = centrino_attr,
-       .owner          = THIS_MODULE,
 };
 
 /*
index e2e5aa97145275a0b860ba119d736cc444a3fd2f..5355abb69afc972d88c9f509924d3e779fa0cdac 100644 (file)
@@ -378,7 +378,6 @@ static struct cpufreq_driver speedstep_driver = {
        .init   = speedstep_cpu_init,
        .exit   = speedstep_cpu_exit,
        .get    = speedstep_get,
-       .owner  = THIS_MODULE,
        .attr   = speedstep_attr,
 };
 
index f5a6b70ee6c0f1965a2794ace8d66c2c7e1aec38..abfba4f731ebdc4212696732a765b56083bc8252 100644 (file)
@@ -375,7 +375,6 @@ static struct cpufreq_driver speedstep_driver = {
        .exit           = speedstep_cpu_exit,
        .get            = speedstep_get,
        .resume         = speedstep_resume,
-       .owner          = THIS_MODULE,
        .attr           = speedstep_attr,
 };
 
diff --git a/drivers/cpufreq/tilegx-cpufreq.c b/drivers/cpufreq/tilegx-cpufreq.c
new file mode 100644 (file)
index 0000000..0fc2ee5
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * Support dynamic frequency changes for TILE-Gx.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/timex.h>
+
+static unsigned int tilegx_cpufreq_get(unsigned int cpu)
+{
+       return get_clock_rate() / 1000;
+}
+
+/**
+ * tilegx_cpufreq_target() - set a new CPUFreq policy
+ * @policy:            New policy.
+ * @target_freq:       The target frequency.
+ * @relation:          How that frequency relates to achieved frequency
+ *                     (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H).
+ */
+static int tilegx_cpufreq_target(struct cpufreq_policy *policy,
+                                unsigned int target_freq,
+                                unsigned int relation)
+{
+       struct cpufreq_freqs freqs;
+       long new_freq;
+       HV_SetSpeed hvss = hv_set_speed(target_freq * 1000, 0,
+                                       HV_SET_SPEED_DRYRUN |
+                                       (relation == CPUFREQ_RELATION_L ?
+                                        HV_SET_SPEED_ROUNDUP : 0));
+       new_freq = hvss.new_speed;
+
+       /* If hv_set_speed failed, give up now. */
+       if (new_freq < 0)
+               return -ENOSYS;
+
+       freqs.old = tilegx_cpufreq_get(0);
+       freqs.new = new_freq / 1000;
+
+       /* If they aren't changing the speed, nothing to do. */
+       if (freqs.old == freqs.new)
+               return 0;
+
+       pr_debug("Changing Gx core frequency from %u to %u kHz\n",
+                freqs.old, freqs.new);
+
+       cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+       freqs.new = set_clock_rate(new_freq);
+
+       cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+       return 0;
+}
+
+/**
+ * tilegx_cpufreq_verify() - verify a new CPUFreq policy
+ * @policy:            New policy.
+ */
+static int tilegx_cpufreq_verify(struct cpufreq_policy *policy)
+{
+       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+                                    policy->cpuinfo.max_freq);
+       return 0;
+}
+
+/**
+ * tilegx_cpufreq_cpu_init() - configure the TILE-Gx CPUFreq driver
+ * @policy:            Policy.
+ */
+static int tilegx_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+       unsigned int speed = tilegx_cpufreq_get(policy->cpu);
+       HV_SetSpeed hvss;
+
+       if (!speed)
+               return -EIO;
+
+       /* All of our CPUs share the same speed. */
+       cpumask_copy(policy->cpus, cpu_online_mask);
+
+       /* Set cpuinfo and default policy values. */
+       policy->cur = speed;
+
+       hvss = hv_set_speed(0, 0, HV_SET_SPEED_DRYRUN | HV_SET_SPEED_ROUNDUP);
+       if (hvss.new_speed < 0)
+               return -EPERM;
+       policy->cpuinfo.min_freq = hvss.new_speed / 1000;
+
+       hvss = hv_set_speed(LONG_MAX, 0, HV_SET_SPEED_DRYRUN);
+       if (hvss.new_speed < 0)
+               return -EPERM;
+       policy->cpuinfo.max_freq = hvss.new_speed / 1000;
+
+       /*
+        * This is worst-case, going from 40 MHz to 1200 MHz with voltage
+        * scaling enabled.  If you aren't doing anything that extreme,
+        * it'll be a lot lower, and you could reasonably tweak the
+        * governor sampling rate down via sysfs.
+        */
+       policy->cpuinfo.transition_latency = 4200000;
+
+       policy->cur = speed;
+       policy->min = policy->cpuinfo.min_freq;
+       policy->max = policy->cpuinfo.max_freq;
+
+       policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+
+       return 0;
+}
+
+/**
+ * tilegx_cpufreq_cpu_exit() - deconfigure the TILE-Gx CPUFreq driver
+ * @policy:            Policy.
+ */
+static int tilegx_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+       return 0;
+}
+
+/* TILE-Gx CPUFreq attributes. */
+static struct freq_attr *tilegx_cpufreq_attr[] = {
+       NULL,
+};
+
+/* TILE-Gx CPUFreq operations vector. */
+static struct cpufreq_driver tilegx_cpufreq_driver = {
+       .name   = "tilegx_cpufreq",
+       .verify = tilegx_cpufreq_verify,
+       .target = tilegx_cpufreq_target,
+       .init   = tilegx_cpufreq_cpu_init,
+       .exit   = tilegx_cpufreq_cpu_exit,
+       .get    = tilegx_cpufreq_get,
+       .owner  = THIS_MODULE,
+       .attr   = tilegx_cpufreq_attr,
+};
+
+/* Initialize the TILE-Gx CPUFreq driver. */
+static int __init tilegx_cpufreq_init(void)
+{
+       return cpufreq_register_driver(&tilegx_cpufreq_driver);
+}
+
+/* Remove the TILE-Gx CPUFreq driver. */
+static void __exit tilegx_cpufreq_exit(void)
+{
+       cpufreq_unregister_driver(&tilegx_cpufreq_driver);
+}
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_DESCRIPTION("CPU Frequency driver for TILE-Gx");
+MODULE_LICENSE("GPL");
+
+module_init(tilegx_cpufreq_init);
+module_exit(tilegx_cpufreq_exit);
index 12fc904d7dabea615955f1f8449b4bc670849ed7..b225f04d8ae5c55bcfee26f644812d3d1100cc04 100644 (file)
@@ -24,7 +24,7 @@ static struct cpufreq_driver ucv2_driver;
 /* make sure that only the "userspace" governor is run
  * -- anything else wouldn't make sense on this platform, anyway.
  */
-int ucv2_verify_speed(struct cpufreq_policy *policy)
+static int ucv2_verify_speed(struct cpufreq_policy *policy)
 {
        if (policy->cpu)
                return -EINVAL;
index 0e2cd5cab4d0924392d8c4d0a99ffb9a75d4485e..b3fb81d7cf0410bb9358d94a4e345899323f1ef3 100644 (file)
@@ -1,5 +1,6 @@
+menu "CPU Idle"
 
-menuconfig CPU_IDLE
+config CPU_IDLE
        bool "CPU idle PM support"
        default y if ACPI || PPC_PSERIES
        select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
@@ -29,20 +30,13 @@ config CPU_IDLE_GOV_MENU
        bool "Menu governor (for tickless system)"
        default y
 
-config CPU_IDLE_CALXEDA
-       bool "CPU Idle Driver for Calxeda processors"
-       depends on ARCH_HIGHBANK
-       select ARM_CPU_SUSPEND
-       help
-         Select this to enable cpuidle on Calxeda processors.
-
-config CPU_IDLE_ZYNQ
-       bool "CPU Idle Driver for Xilinx Zynq processors"
-       depends on ARCH_ZYNQ
-       help
-         Select this to enable cpuidle on Xilinx Zynq processors.
+menu "ARM CPU Idle Drivers"
+depends on ARM
+source "drivers/cpuidle/Kconfig.arm"
+endmenu
 
 endif
 
 config ARCH_NEEDS_CPU_IDLE_COUPLED
        def_bool n
+endmenu
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
new file mode 100644 (file)
index 0000000..b330219
--- /dev/null
@@ -0,0 +1,29 @@
+#
+# ARM CPU Idle drivers
+#
+
+config ARM_HIGHBANK_CPUIDLE
+       bool "CPU Idle Driver for Calxeda processors"
+       depends on ARCH_HIGHBANK
+       select ARM_CPU_SUSPEND
+       help
+         Select this to enable cpuidle on Calxeda processors.
+
+config ARM_KIRKWOOD_CPUIDLE
+       bool "CPU Idle Driver for Marvell Kirkwood SoCs"
+       depends on ARCH_KIRKWOOD
+       help
+         This adds the CPU Idle driver for Marvell Kirkwood SoCs.
+
+config ARM_ZYNQ_CPUIDLE
+       bool "CPU Idle Driver for Xilinx Zynq processors"
+       depends on ARCH_ZYNQ
+       help
+         Select this to enable cpuidle on Xilinx Zynq processors.
+
+config ARM_U8500_CPUIDLE
+       bool "Cpu Idle Driver for the ST-E u8500 processors"
+       depends on ARCH_U8500
+       help
+         Select this to enable cpuidle for ST-E u8500 processors
+
index 8767a7b3eb913d9b5eb530f6df208eafe6476aa2..0b9d200c7e454eefd4864abbc83bfee7f7dcb129 100644 (file)
@@ -5,6 +5,9 @@
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
 obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 
-obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
-obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
-obj-$(CONFIG_CPU_IDLE_ZYNQ) += cpuidle-zynq.o
+##################################################################################
+# ARM SoC drivers
+obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE)     += cpuidle-calxeda.o
+obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE)     += cpuidle-kirkwood.o
+obj-$(CONFIG_ARM_ZYNQ_CPUIDLE)         += cpuidle-zynq.o
+obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
index 0e6e408c0a630b71c466f3fabaa7decd1b97fed0..34605847957269635b650997d51ac7eb9062416f 100644 (file)
@@ -35,7 +35,7 @@
 #include <asm/cp15.h>
 
 extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
-extern void *scu_base_addr;
+extern void __iomem *scu_base_addr;
 
 static noinline void calxeda_idle_restore(void)
 {
index 521b0a7fdd89930a358f5c6cba557620a3a35474..41ba843251b8e843e60ade3e87c1a0d39d444c48 100644 (file)
@@ -60,9 +60,6 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
        struct resource *res;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL)
-               return -EINVAL;
-
        ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(ddr_operation_base))
                return PTR_ERR(ddr_operation_base);
@@ -70,7 +67,7 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
        return cpuidle_register(&kirkwood_idle_driver, NULL);
 }
 
-int kirkwood_cpuidle_remove(struct platform_device *pdev)
+static int kirkwood_cpuidle_remove(struct platform_device *pdev)
 {
        cpuidle_unregister(&kirkwood_idle_driver);
        return 0;
similarity index 90%
rename from arch/arm/mach-ux500/cpuidle.c
rename to drivers/cpuidle/cpuidle-ux500.c
index a45dd09daed9212c2d799ce1ef9421be8d13bab5..e0564652af35114f2d73227b68bf781758975629 100644 (file)
 #include <linux/smp.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/platform_data/arm-ux500-pm.h>
+#include <linux/platform_device.h>
 
 #include <asm/cpuidle.h>
 #include <asm/proc-fns.h>
 
-#include "db8500-regs.h"
-#include "id.h"
-
 static atomic_t master = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(master_lock);
 
@@ -113,11 +111,8 @@ static struct cpuidle_driver ux500_idle_driver = {
        .state_count = 2,
 };
 
-int __init ux500_idle_init(void)
+static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
 {
-       if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
-               return -ENODEV;
-
        /* Configure wake up reasons */
        prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
                             PRCMU_WAKEUP(ABB));
@@ -125,4 +120,12 @@ int __init ux500_idle_init(void)
        return cpuidle_register(&ux500_idle_driver, NULL);
 }
 
-device_initcall(ux500_idle_init);
+static struct platform_driver dbx500_cpuidle_plat_driver = {
+       .driver = {
+               .name = "cpuidle-dbx500",
+               .owner = THIS_MODULE,
+       },
+       .probe = dbx500_cpuidle_probe,
+};
+
+module_platform_driver(dbx500_cpuidle_plat_driver);
index fdc432f18022b331f2102dccf13b539e74fae3ec..d75040ddd2b3ba8e317dfc76f5367faa62340b8f 100644 (file)
@@ -42,8 +42,6 @@ void disable_cpuidle(void)
        off = 1;
 }
 
-static int __cpuidle_register_device(struct cpuidle_device *dev);
-
 /**
  * cpuidle_play_dead - cpu off-lining
  *
@@ -278,7 +276,7 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
  */
 int cpuidle_enable_device(struct cpuidle_device *dev)
 {
-       int ret, i;
+       int ret;
        struct cpuidle_driver *drv;
 
        if (!dev)
@@ -292,15 +290,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
        if (!drv || !cpuidle_curr_governor)
                return -EIO;
 
+       if (!dev->registered)
+               return -EINVAL;
+
        if (!dev->state_count)
                dev->state_count = drv->state_count;
 
-       if (dev->registered == 0) {
-               ret = __cpuidle_register_device(dev);
-               if (ret)
-                       return ret;
-       }
-
        poll_idle_init(drv);
 
        ret = cpuidle_add_device_sysfs(dev);
@@ -311,12 +306,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
            (ret = cpuidle_curr_governor->enable(drv, dev)))
                goto fail_sysfs;
 
-       for (i = 0; i < dev->state_count; i++) {
-               dev->states_usage[i].usage = 0;
-               dev->states_usage[i].time = 0;
-       }
-       dev->last_residency = 0;
-
        smp_wmb();
 
        dev->enabled = 1;
@@ -360,6 +349,23 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
 
 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
 
+static void __cpuidle_unregister_device(struct cpuidle_device *dev)
+{
+       struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+       list_del(&dev->device_list);
+       per_cpu(cpuidle_devices, dev->cpu) = NULL;
+       module_put(drv->owner);
+}
+
+static int __cpuidle_device_init(struct cpuidle_device *dev)
+{
+       memset(dev->states_usage, 0, sizeof(dev->states_usage));
+       dev->last_residency = 0;
+
+       return 0;
+}
+
 /**
  * __cpuidle_register_device - internal register function called before register
  * and enable routines
@@ -377,24 +383,15 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
 
        per_cpu(cpuidle_devices, dev->cpu) = dev;
        list_add(&dev->device_list, &cpuidle_detected_devices);
-       ret = cpuidle_add_sysfs(dev);
-       if (ret)
-               goto err_sysfs;
 
        ret = cpuidle_coupled_register_device(dev);
-       if (ret)
-               goto err_coupled;
+       if (ret) {
+               __cpuidle_unregister_device(dev);
+               return ret;
+       }
 
        dev->registered = 1;
        return 0;
-
-err_coupled:
-       cpuidle_remove_sysfs(dev);
-err_sysfs:
-       list_del(&dev->device_list);
-       per_cpu(cpuidle_devices, dev->cpu) = NULL;
-       module_put(drv->owner);
-       return ret;
 }
 
 /**
@@ -403,25 +400,44 @@ err_sysfs:
  */
 int cpuidle_register_device(struct cpuidle_device *dev)
 {
-       int ret;
+       int ret = -EBUSY;
 
        if (!dev)
                return -EINVAL;
 
        mutex_lock(&cpuidle_lock);
 
-       if ((ret = __cpuidle_register_device(dev))) {
-               mutex_unlock(&cpuidle_lock);
-               return ret;
-       }
+       if (dev->registered)
+               goto out_unlock;
+
+       ret = __cpuidle_device_init(dev);
+       if (ret)
+               goto out_unlock;
+
+       ret = __cpuidle_register_device(dev);
+       if (ret)
+               goto out_unlock;
+
+       ret = cpuidle_add_sysfs(dev);
+       if (ret)
+               goto out_unregister;
+
+       ret = cpuidle_enable_device(dev);
+       if (ret)
+               goto out_sysfs;
 
-       cpuidle_enable_device(dev);
        cpuidle_install_idle_handler();
 
+out_unlock:
        mutex_unlock(&cpuidle_lock);
 
-       return 0;
+       return ret;
 
+out_sysfs:
+       cpuidle_remove_sysfs(dev);
+out_unregister:
+       __cpuidle_unregister_device(dev);
+       goto out_unlock;
 }
 
 EXPORT_SYMBOL_GPL(cpuidle_register_device);
@@ -432,8 +448,6 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
  */
 void cpuidle_unregister_device(struct cpuidle_device *dev)
 {
-       struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
-
        if (dev->registered == 0)
                return;
 
@@ -442,14 +456,12 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
        cpuidle_disable_device(dev);
 
        cpuidle_remove_sysfs(dev);
-       list_del(&dev->device_list);
-       per_cpu(cpuidle_devices, dev->cpu) = NULL;
+
+       __cpuidle_unregister_device(dev);
 
        cpuidle_coupled_unregister_device(dev);
 
        cpuidle_resume_and_unlock();
-
-       module_put(drv->owner);
 }
 
 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
index 9b784051ec12b47564b3d07429096556b2fc2035..9f08e8cce1af89c17b14bfed7358382b7ae3e542 100644 (file)
@@ -192,14 +192,4 @@ static int __init init_ladder(void)
        return cpuidle_register_governor(&ladder_governor);
 }
 
-/**
- * exit_ladder - exits the governor
- */
-static void __exit exit_ladder(void)
-{
-       cpuidle_unregister_governor(&ladder_governor);
-}
-
-MODULE_LICENSE("GPL");
-module_init(init_ladder);
-module_exit(exit_ladder);
+postcore_initcall(init_ladder);
index bc580b67a65298a8bf65e699d562dfd6f237b697..cbbb73b37a6d1d0d99a9d4613633324ef0833219 100644 (file)
@@ -442,14 +442,4 @@ static int __init init_menu(void)
        return cpuidle_register_governor(&menu_governor);
 }
 
-/**
- * exit_menu - exits the governor
- */
-static void __exit exit_menu(void)
-{
-       cpuidle_unregister_governor(&menu_governor);
-}
-
-MODULE_LICENSE("GPL");
-module_init(init_menu);
-module_exit(exit_menu);
+postcore_initcall(init_menu);
index 428754af62366cda8fe71dee47b128c5da602969..8739cc05228ca97512a60b9cc3770097f3a044aa 100644 (file)
 #include <linux/sysfs.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
+#include <linux/completion.h>
 #include <linux/capability.h>
 #include <linux/device.h>
+#include <linux/kobject.h>
 
 #include "cpuidle.h"
 
@@ -33,7 +35,8 @@ static ssize_t show_available_governors(struct device *dev,
 
        mutex_lock(&cpuidle_lock);
        list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
-               if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2))
+               if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) -
+                                   CPUIDLE_NAME_LEN - 2))
                        goto out;
                i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name);
        }
@@ -166,13 +169,28 @@ struct cpuidle_attr {
 #define define_one_rw(_name, show, store) \
        static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store)
 
-#define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj)
 #define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr)
-static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf)
+
+struct cpuidle_device_kobj {
+       struct cpuidle_device *dev;
+       struct completion kobj_unregister;
+       struct kobject kobj;
+};
+
+static inline struct cpuidle_device *to_cpuidle_device(struct kobject *kobj)
+{
+       struct cpuidle_device_kobj *kdev =
+               container_of(kobj, struct cpuidle_device_kobj, kobj);
+
+       return kdev->dev;
+}
+
+static ssize_t cpuidle_show(struct kobject *kobj, struct attribute *attr,
+                           char *buf)
 {
        int ret = -EIO;
-       struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
-       struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
+       struct cpuidle_device *dev = to_cpuidle_device(kobj);
+       struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr);
 
        if (cattr->show) {
                mutex_lock(&cpuidle_lock);
@@ -182,12 +200,12 @@ static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char
        return ret;
 }
 
-static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
-                    const char * buf, size_t count)
+static ssize_t cpuidle_store(struct kobject *kobj, struct attribute *attr,
+                            const char *buf, size_t count)
 {
        int ret = -EIO;
-       struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
-       struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
+       struct cpuidle_device *dev = to_cpuidle_device(kobj);
+       struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr);
 
        if (cattr->store) {
                mutex_lock(&cpuidle_lock);
@@ -204,9 +222,10 @@ static const struct sysfs_ops cpuidle_sysfs_ops = {
 
 static void cpuidle_sysfs_release(struct kobject *kobj)
 {
-       struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
+       struct cpuidle_device_kobj *kdev =
+               container_of(kobj, struct cpuidle_device_kobj, kobj);
 
-       complete(&dev->kobj_unregister);
+       complete(&kdev->kobj_unregister);
 }
 
 static struct kobj_type ktype_cpuidle = {
@@ -237,8 +256,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
 
 #define define_store_state_ull_function(_name) \
 static ssize_t store_state_##_name(struct cpuidle_state *state, \
-               struct cpuidle_state_usage *state_usage, \
-               const char *buf, size_t size) \
+                                  struct cpuidle_state_usage *state_usage, \
+                                  const char *buf, size_t size)        \
 { \
        unsigned long long value; \
        int err; \
@@ -256,14 +275,16 @@ static ssize_t store_state_##_name(struct cpuidle_state *state, \
 
 #define define_show_state_ull_function(_name) \
 static ssize_t show_state_##_name(struct cpuidle_state *state, \
-                       struct cpuidle_state_usage *state_usage, char *buf) \
+                                 struct cpuidle_state_usage *state_usage, \
+                                 char *buf)                            \
 { \
        return sprintf(buf, "%llu\n", state_usage->_name);\
 }
 
 #define define_show_state_str_function(_name) \
 static ssize_t show_state_##_name(struct cpuidle_state *state, \
-                       struct cpuidle_state_usage *state_usage, char *buf) \
+                                 struct cpuidle_state_usage *state_usage, \
+                                 char *buf)                            \
 { \
        if (state->_name[0] == '\0')\
                return sprintf(buf, "<null>\n");\
@@ -309,8 +330,9 @@ struct cpuidle_state_kobj {
 #define kobj_to_state(k) (kobj_to_state_obj(k)->state)
 #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
 #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
-static ssize_t cpuidle_state_show(struct kobject * kobj,
-       struct attribute * attr ,char * buf)
+
+static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr,
+                                 char * buf)
 {
        int ret = -EIO;
        struct cpuidle_state *state = kobj_to_state(kobj);
@@ -323,8 +345,8 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
        return ret;
 }
 
-static ssize_t cpuidle_state_store(struct kobject *kobj,
-       struct attribute *attr, const char *buf, size_t size)
+static ssize_t cpuidle_state_store(struct kobject *kobj, struct attribute *attr,
+                                  const char *buf, size_t size)
 {
        int ret = -EIO;
        struct cpuidle_state *state = kobj_to_state(kobj);
@@ -371,6 +393,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
 {
        int i, ret = -ENOMEM;
        struct cpuidle_state_kobj *kobj;
+       struct cpuidle_device_kobj *kdev = device->kobj_dev;
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
 
        /* state statistics */
@@ -383,7 +406,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
                init_completion(&kobj->kobj_unregister);
 
                ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
-                                          &device->kobj, "state%d", i);
+                                          &kdev->kobj, "state%d", i);
                if (ret) {
                        kfree(kobj);
                        goto error_state;
@@ -449,8 +472,8 @@ static void cpuidle_driver_sysfs_release(struct kobject *kobj)
        complete(&driver_kobj->kobj_unregister);
 }
 
-static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr,
-                                  char * buf)
+static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute *attr,
+                                  char *buf)
 {
        int ret = -EIO;
        struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
@@ -500,6 +523,7 @@ static struct kobj_type ktype_driver_cpuidle = {
 static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
 {
        struct cpuidle_driver_kobj *kdrv;
+       struct cpuidle_device_kobj *kdev = dev->kobj_dev;
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int ret;
 
@@ -511,7 +535,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
        init_completion(&kdrv->kobj_unregister);
 
        ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
-                                  &dev->kobj, "driver");
+                                  &kdev->kobj, "driver");
        if (ret) {
                kfree(kdrv);
                return ret;
@@ -580,16 +604,28 @@ void cpuidle_remove_device_sysfs(struct cpuidle_device *device)
  */
 int cpuidle_add_sysfs(struct cpuidle_device *dev)
 {
+       struct cpuidle_device_kobj *kdev;
        struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
        int error;
 
-       init_completion(&dev->kobj_unregister);
+       kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+       if (!kdev)
+               return -ENOMEM;
+       kdev->dev = dev;
+       dev->kobj_dev = kdev;
 
-       error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
-                                    "cpuidle");
-       if (!error)
-               kobject_uevent(&dev->kobj, KOBJ_ADD);
-       return error;
+       init_completion(&kdev->kobj_unregister);
+
+       error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
+                                  "cpuidle");
+       if (error) {
+               kfree(kdev);
+               return error;
+       }
+
+       kobject_uevent(&kdev->kobj, KOBJ_ADD);
+
+       return 0;
 }
 
 /**
@@ -598,6 +634,9 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
  */
 void cpuidle_remove_sysfs(struct cpuidle_device *dev)
 {
-       kobject_put(&dev->kobj);
-       wait_for_completion(&dev->kobj_unregister);
+       struct cpuidle_device_kobj *kdev = dev->kobj_dev;
+
+       kobject_put(&kdev->kobj);
+       wait_for_completion(&kdev->kobj_unregister);
+       kfree(kdev);
 }
index 8ff7c230d82e487d9644c6c5c4552c228764074e..f4fd837bcb82a82530485f1f54a55dc75e362fdf 100644 (file)
@@ -242,17 +242,20 @@ config CRYPTO_DEV_PPC4XX
          This option allows you to have support for AMCC crypto acceleration.
 
 config CRYPTO_DEV_OMAP_SHAM
-       tristate "Support for OMAP SHA1/MD5 hw accelerator"
-       depends on ARCH_OMAP2 || ARCH_OMAP3
+       tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
+       depends on ARCH_OMAP2PLUS
        select CRYPTO_SHA1
        select CRYPTO_MD5
+       select CRYPTO_SHA256
+       select CRYPTO_SHA512
+       select CRYPTO_HMAC
        help
-         OMAP processors have SHA1/MD5 hw accelerator. Select this if you
-         want to use the OMAP module for SHA1/MD5 algorithms.
+         OMAP processors have MD5/SHA1/SHA2 hw accelerator. Select this if you
+         want to use the OMAP module for MD5/SHA1/SHA2 algorithms.
 
 config CRYPTO_DEV_OMAP_AES
        tristate "Support for OMAP AES hw engine"
-       depends on ARCH_OMAP2 || ARCH_OMAP3
+       depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
        select CRYPTO_AES
        select CRYPTO_BLKCIPHER2
        help
index a33243c17b00d0c3929fdd49693edc0524e06f8c..4afca396877335653ae8f6f902d1bcdf5b3b9db3 100644 (file)
 #include "crypto4xx_sa.h"
 #include "crypto4xx_core.h"
 
-void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
-                             u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc,
-                             u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op,
-                             u32 dir)
+static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
+                                    u32 save_iv, u32 ld_h, u32 ld_iv,
+                                    u32 hdr_proc, u32 h, u32 c, u32 pad_type,
+                                    u32 op_grp, u32 op, u32 dir)
 {
        sa->sa_command_0.w = 0;
        sa->sa_command_0.bf.save_hash_state = save_h;
@@ -52,9 +52,10 @@ void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
        sa->sa_command_0.bf.dir = dir;
 }
 
-void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
-                             u32 cfb, u32 esn, u32 sn_mask, u32 mute,
-                             u32 cp_pad, u32 cp_pay, u32 cp_hdr)
+static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
+                                    u32 hmac_mc, u32 cfb, u32 esn,
+                                    u32 sn_mask, u32 mute, u32 cp_pad,
+                                    u32 cp_pay, u32 cp_hdr)
 {
        sa->sa_command_1.w = 0;
        sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
index b44091c47f752535b2eaf18fb4ad250b799695b2..ca89f6b84b068c1ecc770eededee50bf8c1d55aa 100644 (file)
@@ -98,3 +98,11 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
 
          To compile this as a module, choose M here: the module
          will be called caamrng.
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+       bool "Enable debug output in CAAM driver"
+       depends on CRYPTO_DEV_FSL_CAAM
+       default n
+       help
+         Selecting this will enable printing of various debug
+         information in the CAAM driver.
index b1eb44838db5644bc51dc5c2eff85929ff125aa6..d56bd0ec65d877ca9f2989ad77173214cc537868 100644 (file)
@@ -1,6 +1,9 @@
 #
 # Makefile for the CAAM backend and dependent components
 #
+ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
+       EXTRA_CFLAGS := -DDEBUG
+endif
 
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
index bf416a8391a77ec94dc3686d773e5afb7b437b3d..7c63b72ecd750f381fef66e8baccc7955351b1b3 100644 (file)
@@ -65,8 +65,6 @@
 #define CAAM_MAX_IV_LENGTH             16
 
 /* length of descriptors text */
-#define DESC_JOB_IO_LEN                        (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
-
 #define DESC_AEAD_BASE                 (4 * CAAM_CMD_SZ)
 #define DESC_AEAD_ENC_LEN              (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
 #define DESC_AEAD_DEC_LEN              (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
@@ -84,8 +82,6 @@
 
 #ifdef DEBUG
 /* for print_hex_dumps with line references */
-#define xstr(s) str(s)
-#define str(s) #s
 #define debug(format, arg...) printk(format, arg)
 #else
 #define debug(format, arg...)
@@ -285,7 +281,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
                       desc_bytes(desc), 1);
 #endif
@@ -353,7 +349,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
                       desc_bytes(desc), 1);
 #endif
@@ -436,7 +432,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
                       desc_bytes(desc), 1);
 #endif
@@ -500,7 +496,7 @@ static int aead_setkey(struct crypto_aead *aead,
               keylen, enckeylen, authkeylen);
        printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
               ctx->split_key_len, ctx->split_key_pad_len);
-       print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 #endif
 
@@ -519,7 +515,7 @@ static int aead_setkey(struct crypto_aead *aead,
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
                       ctx->split_key_pad_len + enckeylen, 1);
 #endif
@@ -549,7 +545,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        u32 *desc;
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 #endif
 
@@ -598,7 +594,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR,
+                      "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
                       desc_bytes(desc), 1);
 #endif
@@ -643,7 +640,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        }
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR,
+                      "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
                       desc_bytes(desc), 1);
 #endif
@@ -780,13 +778,13 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
        aead_unmap(jrdev, edesc, req);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
                       req->assoclen , 1);
-       print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
                       edesc->src_nents ? 100 : ivsize, 1);
-       print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
                       edesc->src_nents ? 100 : req->cryptlen +
                       ctx->authsize + 4, 1);
@@ -814,10 +812,10 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
                 offsetof(struct aead_edesc, hw_desc));
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
                       ivsize, 1);
-       print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
                       req->cryptlen, 1);
 #endif
@@ -837,7 +835,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
                err = -EBADMSG;
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4,
                       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
                       sizeof(struct iphdr) + req->assoclen +
@@ -845,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
                       ctx->authsize + 36, 1);
        if (!err && edesc->sec4_sg_bytes) {
                struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
-               print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
                        sg->length + ctx->authsize + 16, 1);
        }
@@ -878,10 +876,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
        }
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
                       edesc->src_nents > 1 ? 100 : ivsize, 1);
-       print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
                       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 #endif
@@ -913,10 +911,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
        }
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
                       ivsize, 1);
-       print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
                       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 #endif
@@ -947,16 +945,16 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
 #ifdef DEBUG
        debug("assoclen %d cryptlen %d authsize %d\n",
              req->assoclen, req->cryptlen, authsize);
-       print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
                       req->assoclen , 1);
-       print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
                       edesc->src_nents ? 100 : ivsize, 1);
-       print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
                        edesc->src_nents ? 100 : req->cryptlen, 1);
-       print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
                       desc_bytes(sh_desc), 1);
 #endif
@@ -1025,15 +1023,15 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
 #ifdef DEBUG
        debug("assoclen %d cryptlen %d authsize %d\n",
              req->assoclen, req->cryptlen, authsize);
-       print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
                       req->assoclen , 1);
-       print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
-       print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
                        edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
-       print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
                       desc_bytes(sh_desc), 1);
 #endif
@@ -1086,10 +1084,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
        int len, sec4_sg_index = 0;
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
                       ivsize, 1);
-       print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
                       edesc->src_nents ? 100 : req->nbytes, 1);
 #endif
@@ -1247,7 +1245,7 @@ static int aead_encrypt(struct aead_request *req)
        init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
                      all_contig, true);
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
                       desc_bytes(edesc->hw_desc), 1);
 #endif
@@ -1281,7 +1279,7 @@ static int aead_decrypt(struct aead_request *req)
                return PTR_ERR(edesc);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
                       req->cryptlen, 1);
 #endif
@@ -1290,7 +1288,7 @@ static int aead_decrypt(struct aead_request *req)
        init_aead_job(ctx->sh_desc_dec,
                      ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
                       desc_bytes(edesc->hw_desc), 1);
 #endif
@@ -1437,7 +1435,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
                return PTR_ERR(edesc);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
                       req->cryptlen, 1);
 #endif
@@ -1446,7 +1444,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
        init_aead_giv_job(ctx->sh_desc_givenc,
                          ctx->sh_desc_givenc_dma, edesc, req, contig);
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
                       desc_bytes(edesc->hw_desc), 1);
 #endif
@@ -1546,7 +1544,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
        edesc->iv_dma = iv_dma;
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
                       sec4_sg_bytes, 1);
 #endif
@@ -1575,7 +1573,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
        init_ablkcipher_job(ctx->sh_desc_enc,
                ctx->sh_desc_enc_dma, edesc, req, iv_contig);
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
                       desc_bytes(edesc->hw_desc), 1);
 #endif
@@ -1613,7 +1611,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
                ctx->sh_desc_dec_dma, edesc, req, iv_contig);
        desc = edesc->hw_desc;
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
                       desc_bytes(edesc->hw_desc), 1);
 #endif
index 84573b4d6f92809d5cd98fa968f6dd1a617482d3..e732bd962e98cc715db6463c7587dbb2511707e3 100644 (file)
@@ -72,8 +72,6 @@
 #define CAAM_MAX_HASH_DIGEST_SIZE      SHA512_DIGEST_SIZE
 
 /* length of descriptors text */
-#define DESC_JOB_IO_LEN                        (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
-
 #define DESC_AHASH_BASE                        (4 * CAAM_CMD_SZ)
 #define DESC_AHASH_UPDATE_LEN          (6 * CAAM_CMD_SZ)
 #define DESC_AHASH_UPDATE_FIRST_LEN    (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
@@ -91,8 +89,6 @@
 
 #ifdef DEBUG
 /* for print_hex_dumps with line references */
-#define xstr(s) str(s)
-#define str(s) #s
 #define debug(format, arg...) printk(format, arg)
 #else
 #define debug(format, arg...)
@@ -331,7 +327,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR,
+                      "ahash update shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -349,7 +346,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR,
+                      "ahash update first shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -366,7 +364,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
                       desc_bytes(desc), 1);
 #endif
@@ -384,7 +382,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
                       desc_bytes(desc), 1);
 #endif
@@ -403,7 +401,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR,
+                      "ahash digest shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
                       desc_bytes(desc), 1);
 #endif
@@ -464,9 +463,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
                         LDST_SRCDST_BYTE_CONTEXT);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
-       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -479,7 +478,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
                wait_for_completion_interruptible(&result.completion);
                ret = result.err;
 #ifdef DEBUG
-               print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR,
+                              "digested key@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, key_in,
                               digestsize, 1);
 #endif
@@ -530,7 +530,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
 #ifdef DEBUG
        printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
               ctx->split_key_len, ctx->split_key_pad_len);
-       print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 #endif
 
@@ -545,7 +545,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
                return -ENOMEM;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
                       ctx->split_key_pad_len, 1);
 #endif
@@ -638,11 +638,11 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
        kfree(edesc);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
                       ctx->ctx_len, 1);
        if (req->result)
-               print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, req->result,
                               digestsize, 1);
 #endif
@@ -676,11 +676,11 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
        kfree(edesc);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
                       ctx->ctx_len, 1);
        if (req->result)
-               print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, req->result,
                               digestsize, 1);
 #endif
@@ -714,11 +714,11 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
        kfree(edesc);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
                       ctx->ctx_len, 1);
        if (req->result)
-               print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, req->result,
                               digestsize, 1);
 #endif
@@ -752,11 +752,11 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
        kfree(edesc);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
                       ctx->ctx_len, 1);
        if (req->result)
-               print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, req->result,
                               digestsize, 1);
 #endif
@@ -852,7 +852,7 @@ static int ahash_update_ctx(struct ahash_request *req)
                append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
 
 #ifdef DEBUG
-               print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, desc,
                               desc_bytes(desc), 1);
 #endif
@@ -871,9 +871,9 @@ static int ahash_update_ctx(struct ahash_request *req)
                *next_buflen = last_buflen;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
-       print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
                       *next_buflen, 1);
 #endif
@@ -937,7 +937,7 @@ static int ahash_final_ctx(struct ahash_request *req)
                                                digestsize);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -1016,7 +1016,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
                                                digestsize);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -1086,7 +1086,7 @@ static int ahash_digest(struct ahash_request *req)
                                                digestsize);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -1140,7 +1140,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
        edesc->src_nents = 0;
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -1228,7 +1228,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
 
 #ifdef DEBUG
-               print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, desc,
                               desc_bytes(desc), 1);
 #endif
@@ -1250,9 +1250,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                *next_buflen = 0;
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
-       print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
                       *next_buflen, 1);
 #endif
@@ -1321,7 +1321,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
                                                digestsize);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -1414,7 +1414,7 @@ static int ahash_update_first(struct ahash_request *req)
                map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
 
 #ifdef DEBUG
-               print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, desc,
                               desc_bytes(desc), 1);
 #endif
@@ -1438,7 +1438,7 @@ static int ahash_update_first(struct ahash_request *req)
                sg_copy(next_buf, req->src, req->nbytes);
        }
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
                       *next_buflen, 1);
 #endif
index f5d6deced1cbaac0b00702e60aabdf0a9598c3b7..b010d42a18035fa48a8797dc802267f70db531d2 100644 (file)
@@ -75,55 +75,53 @@ static void build_instantiation_desc(u32 *desc)
                         OP_ALG_RNG4_SK);
 }
 
-struct instantiate_result {
-       struct completion completion;
-       int err;
-};
-
-static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
-                          void *context)
-{
-       struct instantiate_result *instantiation = context;
-
-       if (err) {
-               char tmp[CAAM_ERROR_STR_MAX];
-
-               dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
-       }
-
-       instantiation->err = err;
-       complete(&instantiation->completion);
-}
-
-static int instantiate_rng(struct device *jrdev)
+static int instantiate_rng(struct device *ctrldev)
 {
-       struct instantiate_result instantiation;
-
-       dma_addr_t desc_dma;
+       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+       struct caam_full __iomem *topregs;
+       unsigned int timeout = 100000;
        u32 *desc;
-       int ret;
+       int i, ret = 0;
 
        desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
        if (!desc) {
-               dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
+               dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
                return -ENOMEM;
        }
-
        build_instantiation_desc(desc);
-       desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
-       init_completion(&instantiation.completion);
-       ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
-       if (!ret) {
-               wait_for_completion_interruptible(&instantiation.completion);
-               ret = instantiation.err;
-               if (ret)
-                       dev_err(jrdev, "unable to instantiate RNG\n");
+
+       /* Set the bit to request direct access to DECO0 */
+       topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+       setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+
+       while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
+                                                                --timeout)
+               cpu_relax();
+
+       if (!timeout) {
+               dev_err(ctrldev, "failed to acquire DECO 0\n");
+               ret = -EIO;
+               goto out;
        }
 
-       dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
+       for (i = 0; i < desc_len(desc); i++)
+               topregs->deco.descbuf[i] = *(desc + i);
 
-       kfree(desc);
+       wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR);
 
+       timeout = 10000000;
+       while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) &&
+                                                                --timeout)
+               cpu_relax();
+
+       if (!timeout) {
+               dev_err(ctrldev, "failed to instantiate RNG\n");
+               ret = -EIO;
+       }
+
+       clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+out:
+       kfree(desc);
        return ret;
 }
 
@@ -303,7 +301,7 @@ static int caam_probe(struct platform_device *pdev)
        if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
            !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
                kick_trng(pdev);
-               ret = instantiate_rng(ctrlpriv->jrdev[0]);
+               ret = instantiate_rng(dev);
                if (ret) {
                        caam_remove(pdev);
                        return ret;
@@ -315,9 +313,6 @@ static int caam_probe(struct platform_device *pdev)
 
        /* NOTE: RTIC detection ought to go here, around Si time */
 
-       /* Initialize queue allocator lock */
-       spin_lock_init(&ctrlpriv->jr_alloc_lock);
-
        caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
 
        /* Report "alive" for developer to see */
index fe3bfd1b08cae14bf00c59ccddf9d026e9b2c717..cd5f678847ce3e7e93b1634a510bcd548abafe01 100644 (file)
@@ -10,6 +10,7 @@
 #define CAAM_CMD_SZ sizeof(u32)
 #define CAAM_PTR_SZ sizeof(dma_addr_t)
 #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
 
 #ifdef DEBUG
 #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
index e4a16b741371d1df496d6344be5fc214896ba50c..34c4b9f7fbfae414a1578e37da245fd9119ac8fd 100644 (file)
@@ -9,9 +9,6 @@
 #ifndef INTERN_H
 #define INTERN_H
 
-#define JOBR_UNASSIGNED 0
-#define JOBR_ASSIGNED 1
-
 /* Currently comes from Kconfig param as a ^2 (driver-required) */
 #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
 
@@ -46,7 +43,6 @@ struct caam_drv_private_jr {
        struct caam_job_ring __iomem *rregs;    /* JobR's register space */
        struct tasklet_struct irqtask;
        int irq;                        /* One per queue */
-       int assign;                     /* busy/free */
 
        /* Job ring info */
        int ringsize;   /* Size of rings (assume input = output) */
@@ -68,7 +64,6 @@ struct caam_drv_private {
 
        struct device *dev;
        struct device **jrdev; /* Alloc'ed array per sub-device */
-       spinlock_t jr_alloc_lock;
        struct platform_device *pdev;
 
        /* Physical-presence section */
index b4aa773ecbc83ec7f61548bae34145639ddbf5ec..105ba4da618059b63a71a36747008e362a04233f 100644 (file)
@@ -125,72 +125,6 @@ static void caam_jr_dequeue(unsigned long devarg)
        clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
 }
 
-/**
- * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
- * an ordinal of the rings allocated, else returns -ENODEV if no rings
- * are available.
- * @ctrldev: points to the controller level dev (parent) that
- *           owns rings available for use.
- * @dev:     points to where a pointer to the newly allocated queue's
- *           dev can be written to if successful.
- **/
-int caam_jr_register(struct device *ctrldev, struct device **rdev)
-{
-       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
-       struct caam_drv_private_jr *jrpriv = NULL;
-       int ring;
-
-       /* Lock, if free ring - assign, unlock */
-       spin_lock(&ctrlpriv->jr_alloc_lock);
-       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
-               jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
-               if (jrpriv->assign == JOBR_UNASSIGNED) {
-                       jrpriv->assign = JOBR_ASSIGNED;
-                       *rdev = ctrlpriv->jrdev[ring];
-                       spin_unlock(&ctrlpriv->jr_alloc_lock);
-                       return ring;
-               }
-       }
-
-       /* If assigned, write dev where caller needs it */
-       spin_unlock(&ctrlpriv->jr_alloc_lock);
-       *rdev = NULL;
-
-       return -ENODEV;
-}
-EXPORT_SYMBOL(caam_jr_register);
-
-/**
- * caam_jr_deregister() - Deregister an API and release the queue.
- * Returns 0 if OK, -EBUSY if queue still contains pending entries
- * or unprocessed results at the time of the call
- * @dev     - points to the dev that identifies the queue to
- *            be released.
- **/
-int caam_jr_deregister(struct device *rdev)
-{
-       struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
-       struct caam_drv_private *ctrlpriv;
-
-       /* Get the owning controller's private space */
-       ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
-
-       /*
-        * Make sure ring empty before release
-        */
-       if (rd_reg32(&jrpriv->rregs->outring_used) ||
-           (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
-               return -EBUSY;
-
-       /* Release ring */
-       spin_lock(&ctrlpriv->jr_alloc_lock);
-       jrpriv->assign = JOBR_UNASSIGNED;
-       spin_unlock(&ctrlpriv->jr_alloc_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(caam_jr_deregister);
-
 /**
  * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
  * -EBUSY if the queue is full, -EIO if it cannot map the caller's
@@ -379,7 +313,6 @@ static int caam_jr_init(struct device *dev)
                  (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
                  (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
 
-       jrp->assign = JOBR_UNASSIGNED;
        return 0;
 }
 
index c23df395b6220b2b294610ea17254492b2e1dfd7..9d8741a59037f82ea162ef2a0b894bca49b5b040 100644 (file)
@@ -8,8 +8,6 @@
 #define JR_H
 
 /* Prototypes for backend-level services exposed to APIs */
-int caam_jr_register(struct device *ctrldev, struct device **rdev);
-int caam_jr_deregister(struct device *rdev);
 int caam_jr_enqueue(struct device *dev, u32 *desc,
                    void (*cbk)(struct device *dev, u32 *desc, u32 status,
                                void *areq),
index 87138d2adb5fe9144c4ebd9a46911363f10fbc1f..ea2e406610ebf9450008f2e2baf9ee65be0839a0 100644 (file)
@@ -95,9 +95,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
                          LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
 
 #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
-       print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+       print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
 #endif
 
@@ -110,7 +110,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
                wait_for_completion_interruptible(&result.completion);
                ret = result.err;
 #ifdef DEBUG
-               print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+               print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
                               DUMP_PREFIX_ADDRESS, 16, 4, key_out,
                               split_key_pad_len, 1);
 #endif
index c09142fc13e372171fe6093b45bdd6a3121f65f1..4455396918de84320380fcca2eca01d694971114 100644 (file)
@@ -341,6 +341,8 @@ struct caam_ctrl {
 #define MCFGR_DMA_RESET                0x10000000
 #define MCFGR_LONG_PTR         0x00010000 /* Use >32-bit desc addressing */
 #define SCFGR_RDBENABLE                0x00000400
+#define DECORR_RQD0ENABLE      0x00000001 /* Enable DECO0 for direct access */
+#define DECORR_DEN0            0x00010000 /* DECO0 available for access*/
 
 /* AXI read cache control */
 #define MCFGR_ARCACHE_SHIFT    12
@@ -703,9 +705,16 @@ struct caam_deco {
        struct deco_sg_table sctr_tbl[4];       /* DxSTR - Scatter Tables */
        u32 rsvd29[48];
        u32 descbuf[64];        /* DxDESB - Descriptor buffer */
-       u32 rsvd30[320];
+       u32 rscvd30[193];
+       u32 desc_dbg;           /* DxDDR - DECO Debug Register */
+       u32 rsvd31[126];
 };
 
+/* DECO DBG Register Valid Bit*/
+#define DECO_DBG_VALID         0x80000000
+#define DECO_JQCR_WHL          0x20000000
+#define DECO_JQCR_FOUR         0x10000000
+
 /*
  * Current top-level view of memory map is:
  *
@@ -733,6 +742,7 @@ struct caam_full {
        u64 rsvd[512];
        struct caam_assurance assure;
        struct caam_queue_if qi;
+       struct caam_deco deco;
 };
 
 #endif /* REGS_H */
index 35d483f8db66b2a3ecb204181ffd0d43b0aaaffe..7c0237dae02db1cde29272f0d9695f59f2028acf 100644 (file)
@@ -70,10 +70,15 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
 {
        struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+       unsigned long irq_flags;
        int rc;
 
-       if (nbytes > nx_ctx->ap->databytelen)
-               return -EINVAL;
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+       if (nbytes > nx_ctx->ap->databytelen) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        if (enc)
                NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
@@ -95,10 +100,12 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
        if (rc)
                goto out;
 
+       memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
        atomic_inc(&(nx_ctx->stats->aes_ops));
        atomic64_add(csbcpb->csb.processed_byte_count,
                     &(nx_ctx->stats->aes_bytes));
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
index ef5eae6d1400382a8830bca980752b123e12bf52..39d42245bc79a3d647ecff7556cb8831654be96f 100644 (file)
@@ -271,10 +271,15 @@ static int ccm_nx_decrypt(struct aead_request   *req,
        unsigned int nbytes = req->cryptlen;
        unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
        struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
+       unsigned long irq_flags;
        int rc = -1;
 
-       if (nbytes > nx_ctx->ap->databytelen)
-               return -EINVAL;
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+       if (nbytes > nx_ctx->ap->databytelen) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        nbytes -= authsize;
 
@@ -308,6 +313,7 @@ static int ccm_nx_decrypt(struct aead_request   *req,
        rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
                    authsize) ? -EBADMSG : 0;
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
@@ -318,10 +324,15 @@ static int ccm_nx_encrypt(struct aead_request   *req,
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        unsigned int nbytes = req->cryptlen;
        unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+       unsigned long irq_flags;
        int rc = -1;
 
-       if (nbytes > nx_ctx->ap->databytelen)
-               return -EINVAL;
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+       if (nbytes > nx_ctx->ap->databytelen) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
                          csbcpb->cpb.aes_ccm.in_pat_or_b0);
@@ -350,6 +361,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
                                 req->dst, nbytes, authsize,
                                 SCATTERWALK_TO_SG);
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
index b6286f14680ba82fcb7a32136dddea9b2a24ce29..762611b883cb9d1ff536138ebcef73e36662abe1 100644 (file)
@@ -88,10 +88,15 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
 {
        struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+       unsigned long irq_flags;
        int rc;
 
-       if (nbytes > nx_ctx->ap->databytelen)
-               return -EINVAL;
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+       if (nbytes > nx_ctx->ap->databytelen) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
                               csbcpb->cpb.aes_ctr.iv);
@@ -112,6 +117,7 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
        atomic64_add(csbcpb->csb.processed_byte_count,
                     &(nx_ctx->stats->aes_bytes));
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
index 7bbc9a81da219e5c27e021c9ef6c880d194c90d3..77dbe084ba4168b1ca42fbedfe3ada9b9c9a19c1 100644 (file)
@@ -70,10 +70,15 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
 {
        struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+       unsigned long irq_flags;
        int rc;
 
-       if (nbytes > nx_ctx->ap->databytelen)
-               return -EINVAL;
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+       if (nbytes > nx_ctx->ap->databytelen) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        if (enc)
                NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
@@ -98,6 +103,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
        atomic64_add(csbcpb->csb.processed_byte_count,
                     &(nx_ctx->stats->aes_bytes));
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
index 6cca6c392b00f34fa65ad4663b1725bd7b3842fe..74feee10f943aa0acd87b6b9fb15c194e6f4787f 100644 (file)
@@ -166,8 +166,11 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct blkcipher_desc desc;
        unsigned int nbytes = req->cryptlen;
+       unsigned long irq_flags;
        int rc = -EINVAL;
 
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
        if (nbytes > nx_ctx->ap->databytelen)
                goto out;
 
@@ -243,11 +246,11 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
                                 req->dst, nbytes,
                                 crypto_aead_authsize(crypto_aead_reqtfm(req)),
                                 SCATTERWALK_TO_SG);
-       } else if (req->assoclen) {
+       } else {
                u8 *itag = nx_ctx->priv.gcm.iauth_tag;
                u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
 
-               scatterwalk_map_and_copy(itag, req->dst, nbytes,
+               scatterwalk_map_and_copy(itag, req->src, nbytes,
                                 crypto_aead_authsize(crypto_aead_reqtfm(req)),
                                 SCATTERWALK_FROM_SG);
                rc = memcmp(itag, otag,
@@ -255,6 +258,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
                     -EBADMSG : 0;
        }
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
index 93923e4628c05b8af34bf19e90eb3cb692a4a0a0..658da0fd3e1f18f241143fe1e4d3c12a4f0b8af0 100644 (file)
@@ -89,8 +89,11 @@ static int nx_xcbc_update(struct shash_desc *desc,
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct nx_sg *in_sg;
        u32 to_process, leftover;
+       unsigned long irq_flags;
        int rc = 0;
 
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
        if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
                /* we've hit the nx chip previously and we're updating again,
                 * so copy over the partial digest */
@@ -158,6 +161,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
        /* everything after the first update is continuation */
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
@@ -167,8 +171,11 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct nx_sg *in_sg, *out_sg;
+       unsigned long irq_flags;
        int rc = 0;
 
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
        if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
                /* we've hit the nx chip previously, now we're finalizing,
                 * so copy over the partial digest */
@@ -211,6 +218,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
 
        memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
index 67024f2f0b78746bdbfcb8c5ffde595754a0b155..6547a7104bf64f96b4e0fe6c9a58e7114b3884e9 100644 (file)
@@ -55,71 +55,91 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        struct nx_sg *in_sg;
-       u64 to_process, leftover;
+       u64 to_process, leftover, total;
+       u32 max_sg_len;
+       unsigned long irq_flags;
        int rc = 0;
 
-       if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
-               /* we've hit the nx chip previously and we're updating again,
-                * so copy over the partial digest */
-               memcpy(csbcpb->cpb.sha256.input_partial_digest,
-                      csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
-       }
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
        /* 2 cases for total data len:
-        *  1: <= SHA256_BLOCK_SIZE: copy into state, return 0
-        *  2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
+        *  1: < SHA256_BLOCK_SIZE: copy into state, return 0
+        *  2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if (len + sctx->count < SHA256_BLOCK_SIZE) {
+       total = sctx->count + len;
+       if (total < SHA256_BLOCK_SIZE) {
                memcpy(sctx->buf + sctx->count, data, len);
                sctx->count += len;
                goto out;
        }
 
-       /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this
-        * update */
-       to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1);
-       leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1);
-
-       if (sctx->count) {
-               in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
-                                        sctx->count, nx_ctx->ap->sglen);
-               in_sg = nx_build_sg_list(in_sg, (u8 *)data,
+       in_sg = nx_ctx->in_sg;
+       max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+                          nx_ctx->ap->sglen);
+
+       do {
+               /*
+                * to_process: the SHA256_BLOCK_SIZE data chunk to process in
+                * this update. This value is also restricted by the sg list
+                * limits.
+                */
+               to_process = min_t(u64, total, nx_ctx->ap->databytelen);
+               to_process = min_t(u64, to_process,
+                                  NX_PAGE_SIZE * (max_sg_len - 1));
+               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+               leftover = total - to_process;
+
+               if (sctx->count) {
+                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                                                (u8 *) sctx->buf,
+                                                sctx->count, max_sg_len);
+               }
+               in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         to_process - sctx->count,
-                                        nx_ctx->ap->sglen);
+                                        max_sg_len);
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
                                        sizeof(struct nx_sg);
-       } else {
-               in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
-                                        to_process, nx_ctx->ap->sglen);
-               nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
-                                       sizeof(struct nx_sg);
-       }
 
-       NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+               if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+                       /*
+                        * we've hit the nx chip previously and we're updating
+                        * again, so copy over the partial digest.
+                        */
+                       memcpy(csbcpb->cpb.sha256.input_partial_digest,
+                              csbcpb->cpb.sha256.message_digest,
+                              SHA256_DIGEST_SIZE);
+               }
 
-       if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
-               rc = -EINVAL;
-               goto out;
-       }
+               NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+               if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+                       rc = -EINVAL;
+                       goto out;
+               }
 
-       rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-                          desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-       if (rc)
-               goto out;
+               rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+                                  desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+               if (rc)
+                       goto out;
 
-       atomic_inc(&(nx_ctx->stats->sha256_ops));
+               atomic_inc(&(nx_ctx->stats->sha256_ops));
+               csbcpb->cpb.sha256.message_bit_length += (u64)
+                       (csbcpb->cpb.sha256.spbc * 8);
+
+               /* everything after the first update is continuation */
+               NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+               total -= to_process;
+               data += to_process;
+               sctx->count = 0;
+               in_sg = nx_ctx->in_sg;
+       } while (leftover >= SHA256_BLOCK_SIZE);
 
        /* copy the leftover back into the state struct */
        if (leftover)
-               memcpy(sctx->buf, data + len - leftover, leftover);
+               memcpy(sctx->buf, data, leftover);
        sctx->count = leftover;
-
-       csbcpb->cpb.sha256.message_bit_length += (u64)
-               (csbcpb->cpb.sha256.spbc * 8);
-
-       /* everything after the first update is continuation */
-       NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
@@ -129,8 +149,13 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        struct nx_sg *in_sg, *out_sg;
+       u32 max_sg_len;
+       unsigned long irq_flags;
        int rc;
 
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+       max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
 
        if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
                /* we've hit the nx chip previously, now we're finalizing,
@@ -146,9 +171,9 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
        csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
 
        in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
-                                sctx->count, nx_ctx->ap->sglen);
+                                sctx->count, max_sg_len);
        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
-                                 nx_ctx->ap->sglen);
+                                 max_sg_len);
        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
@@ -168,6 +193,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
                     &(nx_ctx->stats->sha256_bytes));
        memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
@@ -177,6 +203,9 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        struct sha256_state *octx = out;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
        octx->count = sctx->count +
                      (csbcpb->cpb.sha256.message_bit_length / 8);
@@ -199,6 +228,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
                octx->state[7] = SHA256_H7;
        }
 
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return 0;
 }
 
@@ -208,6 +238,9 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        const struct sha256_state *ictx = in;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
        memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
 
@@ -222,6 +255,7 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
                NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        }
 
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return 0;
 }
 
index 08eee11223490c7a1a249a2f559f960e72e7af2c..236e6afeab1067701cd75e07bfdcf00fe41baef0 100644 (file)
@@ -55,73 +55,93 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        struct nx_sg *in_sg;
-       u64 to_process, leftover, spbc_bits;
+       u64 to_process, leftover, total, spbc_bits;
+       u32 max_sg_len;
+       unsigned long irq_flags;
        int rc = 0;
 
-       if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
-               /* we've hit the nx chip previously and we're updating again,
-                * so copy over the partial digest */
-               memcpy(csbcpb->cpb.sha512.input_partial_digest,
-                      csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
-       }
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
        /* 2 cases for total data len:
-        *  1: <= SHA512_BLOCK_SIZE: copy into state, return 0
-        *  2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
+        *  1: < SHA512_BLOCK_SIZE: copy into state, return 0
+        *  2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
         */
-       if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
+       total = sctx->count[0] + len;
+       if (total < SHA512_BLOCK_SIZE) {
                memcpy(sctx->buf + sctx->count[0], data, len);
                sctx->count[0] += len;
                goto out;
        }
 
-       /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this
-        * update */
-       to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1);
-       leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1);
-
-       if (sctx->count[0]) {
-               in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
-                                        sctx->count[0], nx_ctx->ap->sglen);
-               in_sg = nx_build_sg_list(in_sg, (u8 *)data,
+       in_sg = nx_ctx->in_sg;
+       max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+                          nx_ctx->ap->sglen);
+
+       do {
+               /*
+                * to_process: the SHA512_BLOCK_SIZE data chunk to process in
+                * this update. This value is also restricted by the sg list
+                * limits.
+                */
+               to_process = min_t(u64, total, nx_ctx->ap->databytelen);
+               to_process = min_t(u64, to_process,
+                                  NX_PAGE_SIZE * (max_sg_len - 1));
+               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+               leftover = total - to_process;
+
+               if (sctx->count[0]) {
+                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                                                (u8 *) sctx->buf,
+                                                sctx->count[0], max_sg_len);
+               }
+               in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         to_process - sctx->count[0],
-                                        nx_ctx->ap->sglen);
-               nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
-                                       sizeof(struct nx_sg);
-       } else {
-               in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
-                                        to_process, nx_ctx->ap->sglen);
+                                        max_sg_len);
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
                                        sizeof(struct nx_sg);
-       }
 
-       NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+               if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+                       /*
+                        * we've hit the nx chip previously and we're updating
+                        * again, so copy over the partial digest.
+                        */
+                       memcpy(csbcpb->cpb.sha512.input_partial_digest,
+                              csbcpb->cpb.sha512.message_digest,
+                              SHA512_DIGEST_SIZE);
+               }
 
-       if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
-               rc = -EINVAL;
-               goto out;
-       }
-
-       rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
-                          desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
-       if (rc)
-               goto out;
+               NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+               if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+                       rc = -EINVAL;
+                       goto out;
+               }
+
+               rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+                                  desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+               if (rc)
+                       goto out;
+
+               atomic_inc(&(nx_ctx->stats->sha512_ops));
+               spbc_bits = csbcpb->cpb.sha512.spbc * 8;
+               csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
+               if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
+                       csbcpb->cpb.sha512.message_bit_length_hi++;
+
+               /* everything after the first update is continuation */
+               NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       atomic_inc(&(nx_ctx->stats->sha512_ops));
+               total -= to_process;
+               data += to_process;
+               sctx->count[0] = 0;
+               in_sg = nx_ctx->in_sg;
+       } while (leftover >= SHA512_BLOCK_SIZE);
 
        /* copy the leftover back into the state struct */
        if (leftover)
-               memcpy(sctx->buf, data + len - leftover, leftover);
+               memcpy(sctx->buf, data, leftover);
        sctx->count[0] = leftover;
-
-       spbc_bits = csbcpb->cpb.sha512.spbc * 8;
-       csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
-       if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
-               csbcpb->cpb.sha512.message_bit_length_hi++;
-
-       /* everything after the first update is continuation */
-       NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
@@ -131,9 +151,15 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        struct nx_sg *in_sg, *out_sg;
+       u32 max_sg_len;
        u64 count0;
+       unsigned long irq_flags;
        int rc;
 
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+       max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
+
        if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
                /* we've hit the nx chip previously, now we're finalizing,
                 * so copy over the partial digest */
@@ -152,9 +178,9 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
                csbcpb->cpb.sha512.message_bit_length_hi++;
 
        in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
-                                nx_ctx->ap->sglen);
+                                max_sg_len);
        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
-                                 nx_ctx->ap->sglen);
+                                 max_sg_len);
        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
@@ -174,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
 
        memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
 out:
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return rc;
 }
 
@@ -183,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        struct sha512_state *octx = out;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
        /* move message_bit_length (128 bits) into count and convert its value
         * to bytes */
@@ -214,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
                octx->state[7] = SHA512_H7;
        }
 
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return 0;
 }
 
@@ -223,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
        const struct sha512_state *ictx = in;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
        memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
        sctx->count[0] = ictx->count[0] & 0x3f;
@@ -240,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
                NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        }
 
+       spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
        return 0;
 }
 
index bbdab6e5ccf08f75fcc23481dbb9d29a1df6cd69..bdf4990f9758a59b6b3f73e1857ca8c91169f2cd 100644 (file)
@@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
 
        do {
                rc = vio_h_cop_sync(viodev, op);
-       } while ((rc == -EBUSY && !may_sleep && retries--) ||
-                (rc == -EBUSY && may_sleep && cond_resched()));
+       } while (rc == -EBUSY && !may_sleep && retries--);
 
        if (rc) {
                dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
@@ -114,13 +113,29 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
         * have been described (or @sgmax elements have been written), the
         * loop ends. min_t is used to ensure @end_addr falls on the same page
         * as sg_addr, if not, we need to create another nx_sg element for the
-        * data on the next page */
+        * data on the next page.
+        *
+        * Also when using vmalloc'ed data, every time that a system page
+        * boundary is crossed the physical address needs to be re-calculated.
+        */
        for (sg = sg_head; sg_len < len; sg++) {
+               u64 next_page;
+
                sg->addr = sg_addr;
-               sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr);
-               sg->len = sg_addr - sg->addr;
+               sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
+                               end_addr);
+
+               next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
+               sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
                sg_len += sg->len;
 
+               if (sg_addr >= next_page &&
+                               is_vmalloc_addr(start_addr + sg_len)) {
+                       sg_addr = page_to_phys(vmalloc_to_page(
+                                               start_addr + sg_len));
+                       end_addr = sg_addr + len - sg_len;
+               }
+
                if ((sg - sg_head) == sgmax) {
                        pr_err("nx: scatter/gather list overflow, pid: %d\n",
                               current->pid);
@@ -235,6 +250,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
  */
 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
 {
+       spin_lock_init(&nx_ctx->lock);
        memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
        nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
 
index 3232b182dd28da33d2784cc4cb8aa577dd028fe6..14bb97f1c339ce40ea47f3795ff0ea4594361aea 100644 (file)
@@ -117,6 +117,7 @@ struct nx_ctr_priv {
 };
 
 struct nx_crypto_ctx {
+       spinlock_t lock;          /* synchronize access to the context */
        void *kmem;               /* unaligned, kmalloc'd buffer */
        size_t kmem_len;          /* length of kmem */
        struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
index 5f7980586850e1bd88fd0ed3ac0ba9db1cb5b209..ce791c2f81f79e4ffda5d7d44e6a31a8a46bcb34 100644 (file)
@@ -13,7 +13,9 @@
  *
  */
 
-#define pr_fmt(fmt) "%s: " fmt, __func__
+#define pr_fmt(fmt) "%20s: " fmt, __func__
+#define prn(num) pr_debug(#num "=%d\n", num)
+#define prx(num) pr_debug(#num "=%x\n", num)
 
 #include <linux/err.h>
 #include <linux/module.h>
@@ -38,6 +40,8 @@
 #define DST_MAXBURST                   4
 #define DMA_MIN                                (DST_MAXBURST * sizeof(u32))
 
+#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
+
 /* OMAP TRM gives bitfields as start:end, where start is the higher bit
    number. For example 7:0 */
 #define FLD_MASK(start, end)   (((1 << ((start) - (end) + 1)) - 1) << (end))
 
 #define AES_REG_LENGTH_N(x)            (0x54 + ((x) * 0x04))
 
+#define AES_REG_IRQ_STATUS(dd)         ((dd)->pdata->irq_status_ofs)
+#define AES_REG_IRQ_ENABLE(dd)         ((dd)->pdata->irq_enable_ofs)
+#define AES_REG_IRQ_DATA_IN            BIT(1)
+#define AES_REG_IRQ_DATA_OUT           BIT(2)
 #define DEFAULT_TIMEOUT                (5*HZ)
 
 #define FLAGS_MODE_MASK                0x000f
@@ -86,6 +94,8 @@
 #define FLAGS_FAST             BIT(5)
 #define FLAGS_BUSY             BIT(6)
 
+#define AES_BLOCK_WORDS                (AES_BLOCK_SIZE >> 2)
+
 struct omap_aes_ctx {
        struct omap_aes_dev *dd;
 
@@ -119,6 +129,8 @@ struct omap_aes_pdata {
        u32             data_ofs;
        u32             rev_ofs;
        u32             mask_ofs;
+       u32             irq_enable_ofs;
+       u32             irq_status_ofs;
 
        u32             dma_enable_in;
        u32             dma_enable_out;
@@ -146,25 +158,32 @@ struct omap_aes_dev {
        struct tasklet_struct   queue_task;
 
        struct ablkcipher_request       *req;
+
+       /*
+        * total is used by PIO mode for book keeping so introduce
+        * variable total_save as need it to calc page_order
+        */
        size_t                          total;
+       size_t                          total_save;
+
        struct scatterlist              *in_sg;
-       struct scatterlist              in_sgl;
-       size_t                          in_offset;
        struct scatterlist              *out_sg;
+
+       /* Buffers for copying for unaligned cases */
+       struct scatterlist              in_sgl;
        struct scatterlist              out_sgl;
-       size_t                          out_offset;
+       struct scatterlist              *orig_out;
+       int                             sgs_copied;
 
-       size_t                  buflen;
-       void                    *buf_in;
-       size_t                  dma_size;
+       struct scatter_walk             in_walk;
+       struct scatter_walk             out_walk;
        int                     dma_in;
        struct dma_chan         *dma_lch_in;
-       dma_addr_t              dma_addr_in;
-       void                    *buf_out;
        int                     dma_out;
        struct dma_chan         *dma_lch_out;
-       dma_addr_t              dma_addr_out;
-
+       int                     in_sg_len;
+       int                     out_sg_len;
+       int                     pio_only;
        const struct omap_aes_pdata     *pdata;
 };
 
@@ -172,16 +191,36 @@ struct omap_aes_dev {
 static LIST_HEAD(dev_list);
 static DEFINE_SPINLOCK(list_lock);
 
+#ifdef DEBUG
+#define omap_aes_read(dd, offset)                              \
+({                                                             \
+       int _read_ret;                                          \
+       _read_ret = __raw_readl(dd->io_base + offset);          \
+       pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n",       \
+                offset, _read_ret);                            \
+       _read_ret;                                              \
+})
+#else
 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
 {
        return __raw_readl(dd->io_base + offset);
 }
+#endif
 
+#ifdef DEBUG
+#define omap_aes_write(dd, offset, value)                              \
+       do {                                                            \
+               pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
+                        offset, value);                                \
+               __raw_writel(value, dd->io_base + offset);              \
+       } while (0)
+#else
 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
                                  u32 value)
 {
        __raw_writel(value, dd->io_base + offset);
 }
+#endif
 
 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
                                        u32 value, u32 mask)
@@ -323,33 +362,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
        dd->dma_lch_out = NULL;
        dd->dma_lch_in = NULL;
 
-       dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
-       dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
-       dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
-       dd->buflen &= ~(AES_BLOCK_SIZE - 1);
-
-       if (!dd->buf_in || !dd->buf_out) {
-               dev_err(dd->dev, "unable to alloc pages.\n");
-               goto err_alloc;
-       }
-
-       /* MAP here */
-       dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
-                                        DMA_TO_DEVICE);
-       if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
-               dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
-               err = -EINVAL;
-               goto err_map_in;
-       }
-
-       dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
-                                         DMA_FROM_DEVICE);
-       if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
-               dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
-               err = -EINVAL;
-               goto err_map_out;
-       }
-
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
 
@@ -376,14 +388,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
 err_dma_out:
        dma_release_channel(dd->dma_lch_in);
 err_dma_in:
-       dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
-                        DMA_FROM_DEVICE);
-err_map_out:
-       dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
-err_map_in:
-       free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
-       free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
-err_alloc:
        if (err)
                pr_err("error: %d\n", err);
        return err;
@@ -393,11 +397,6 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
 {
        dma_release_channel(dd->dma_lch_out);
        dma_release_channel(dd->dma_lch_in);
-       dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
-                        DMA_FROM_DEVICE);
-       dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
-       free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
-       free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
 }
 
 static void sg_copy_buf(void *buf, struct scatterlist *sg,
@@ -414,59 +413,27 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg,
        scatterwalk_done(&walk, out, 0);
 }
 
-static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
-                  size_t buflen, size_t total, int out)
-{
-       unsigned int count, off = 0;
-
-       while (buflen && total) {
-               count = min((*sg)->length - *offset, total);
-               count = min(count, buflen);
-
-               if (!count)
-                       return off;
-
-               /*
-                * buflen and total are AES_BLOCK_SIZE size aligned,
-                * so count should be also aligned
-                */
-
-               sg_copy_buf(buf + off, *sg, *offset, count, out);
-
-               off += count;
-               buflen -= count;
-               *offset += count;
-               total -= count;
-
-               if (*offset == (*sg)->length) {
-                       *sg = sg_next(*sg);
-                       if (*sg)
-                               *offset = 0;
-                       else
-                               total = 0;
-               }
-       }
-
-       return off;
-}
-
 static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
-               struct scatterlist *in_sg, struct scatterlist *out_sg)
+               struct scatterlist *in_sg, struct scatterlist *out_sg,
+               int in_sg_len, int out_sg_len)
 {
        struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
        struct omap_aes_dev *dd = ctx->dd;
        struct dma_async_tx_descriptor *tx_in, *tx_out;
        struct dma_slave_config cfg;
-       dma_addr_t dma_addr_in = sg_dma_address(in_sg);
-       int ret, length = sg_dma_len(in_sg);
+       int ret;
 
-       pr_debug("len: %d\n", length);
+       if (dd->pio_only) {
+               scatterwalk_start(&dd->in_walk, dd->in_sg);
+               scatterwalk_start(&dd->out_walk, dd->out_sg);
 
-       dd->dma_size = length;
+               /* Enable DATAIN interrupt and let it take
+                  care of the rest */
+               omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
+               return 0;
+       }
 
-       if (!(dd->flags & FLAGS_FAST))
-               dma_sync_single_for_device(dd->dev, dma_addr_in, length,
-                                          DMA_TO_DEVICE);
+       dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
 
        memset(&cfg, 0, sizeof(cfg));
 
@@ -485,7 +452,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
                return ret;
        }
 
-       tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
+       tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
                                        DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!tx_in) {
@@ -504,7 +471,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
                return ret;
        }
 
-       tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
+       tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
                                        DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!tx_out) {
@@ -522,7 +489,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
        dma_async_issue_pending(dd->dma_lch_out);
 
        /* start DMA */
-       dd->pdata->trigger(dd, length);
+       dd->pdata->trigger(dd, dd->total);
 
        return 0;
 }
@@ -531,93 +498,32 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
 {
        struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
                                        crypto_ablkcipher_reqtfm(dd->req));
-       int err, fast = 0, in, out;
-       size_t count;
-       dma_addr_t addr_in, addr_out;
-       struct scatterlist *in_sg, *out_sg;
-       int len32;
+       int err;
 
        pr_debug("total: %d\n", dd->total);
 
-       if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
-               /* check for alignment */
-               in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
-               out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
-
-               fast = in && out;
-       }
-
-       if (fast)  {
-               count = min(dd->total, sg_dma_len(dd->in_sg));
-               count = min(count, sg_dma_len(dd->out_sg));
-
-               if (count != dd->total) {
-                       pr_err("request length != buffer length\n");
-                       return -EINVAL;
-               }
-
-               pr_debug("fast\n");
-
-               err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+       if (!dd->pio_only) {
+               err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
+                                DMA_TO_DEVICE);
                if (!err) {
                        dev_err(dd->dev, "dma_map_sg() error\n");
                        return -EINVAL;
                }
 
-               err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+               err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+                                DMA_FROM_DEVICE);
                if (!err) {
                        dev_err(dd->dev, "dma_map_sg() error\n");
-                       dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
                        return -EINVAL;
                }
-
-               addr_in = sg_dma_address(dd->in_sg);
-               addr_out = sg_dma_address(dd->out_sg);
-
-               in_sg = dd->in_sg;
-               out_sg = dd->out_sg;
-
-               dd->flags |= FLAGS_FAST;
-
-       } else {
-               /* use cache buffers */
-               count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
-                                dd->buflen, dd->total, 0);
-
-               len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
-
-               /*
-                * The data going into the AES module has been copied
-                * to a local buffer and the data coming out will go
-                * into a local buffer so set up local SG entries for
-                * both.
-                */
-               sg_init_table(&dd->in_sgl, 1);
-               dd->in_sgl.offset = dd->in_offset;
-               sg_dma_len(&dd->in_sgl) = len32;
-               sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
-
-               sg_init_table(&dd->out_sgl, 1);
-               dd->out_sgl.offset = dd->out_offset;
-               sg_dma_len(&dd->out_sgl) = len32;
-               sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
-
-               in_sg = &dd->in_sgl;
-               out_sg = &dd->out_sgl;
-
-               addr_in = dd->dma_addr_in;
-               addr_out = dd->dma_addr_out;
-
-               dd->flags &= ~FLAGS_FAST;
-
        }
 
-       dd->total -= count;
-
-       err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
-       if (err) {
-               dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-               dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+       err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
+                                dd->out_sg_len);
+       if (err && !dd->pio_only) {
+               dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+               dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+                            DMA_FROM_DEVICE);
        }
 
        return err;
@@ -637,7 +543,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
 {
        int err = 0;
-       size_t count;
 
        pr_debug("total: %d\n", dd->total);
 
@@ -646,23 +551,49 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
        dmaengine_terminate_all(dd->dma_lch_in);
        dmaengine_terminate_all(dd->dma_lch_out);
 
-       if (dd->flags & FLAGS_FAST) {
-               dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
-               dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
-       } else {
-               dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
-                                          dd->dma_size, DMA_FROM_DEVICE);
-
-               /* copy data */
-               count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
-                                dd->buflen, dd->dma_size, 1);
-               if (count != dd->dma_size) {
-                       err = -EINVAL;
-                       pr_err("not all data converted: %u\n", count);
-               }
+       return err;
+}
+
+int omap_aes_check_aligned(struct scatterlist *sg)
+{
+       while (sg) {
+               if (!IS_ALIGNED(sg->offset, 4))
+                       return -1;
+               if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+                       return -1;
+               sg = sg_next(sg);
        }
+       return 0;
+}
 
-       return err;
+int omap_aes_copy_sgs(struct omap_aes_dev *dd)
+{
+       void *buf_in, *buf_out;
+       int pages;
+
+       pages = get_order(dd->total);
+
+       buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+       buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+       if (!buf_in || !buf_out) {
+               pr_err("Couldn't allocated pages for unaligned cases.\n");
+               return -1;
+       }
+
+       dd->orig_out = dd->out_sg;
+
+       sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
+
+       sg_init_table(&dd->in_sgl, 1);
+       sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+       dd->in_sg = &dd->in_sgl;
+
+       sg_init_table(&dd->out_sgl, 1);
+       sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+       dd->out_sg = &dd->out_sgl;
+
+       return 0;
 }
 
 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
@@ -698,11 +629,23 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
        /* assign new request to device */
        dd->req = req;
        dd->total = req->nbytes;
-       dd->in_offset = 0;
+       dd->total_save = req->nbytes;
        dd->in_sg = req->src;
-       dd->out_offset = 0;
        dd->out_sg = req->dst;
 
+       if (omap_aes_check_aligned(dd->in_sg) ||
+           omap_aes_check_aligned(dd->out_sg)) {
+               if (omap_aes_copy_sgs(dd))
+                       pr_err("Failed to copy SGs for unaligned cases\n");
+               dd->sgs_copied = 1;
+       } else {
+               dd->sgs_copied = 0;
+       }
+
+       dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
+       dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
+       BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
+
        rctx = ablkcipher_request_ctx(req);
        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
        rctx->mode &= FLAGS_MODE_MASK;
@@ -726,21 +669,32 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
 static void omap_aes_done_task(unsigned long data)
 {
        struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
-       int err;
-
-       pr_debug("enter\n");
+       void *buf_in, *buf_out;
+       int pages;
+
+       pr_debug("enter done_task\n");
+
+       if (!dd->pio_only) {
+               dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
+                                      DMA_FROM_DEVICE);
+               dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+               dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
+                            DMA_FROM_DEVICE);
+               omap_aes_crypt_dma_stop(dd);
+       }
 
-       err = omap_aes_crypt_dma_stop(dd);
+       if (dd->sgs_copied) {
+               buf_in = sg_virt(&dd->in_sgl);
+               buf_out = sg_virt(&dd->out_sgl);
 
-       err = dd->err ? : err;
+               sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
 
-       if (dd->total && !err) {
-               err = omap_aes_crypt_dma_start(dd);
-               if (!err)
-                       return; /* DMA started. Not fininishing. */
+               pages = get_order(dd->total_save);
+               free_pages((unsigned long)buf_in, pages);
+               free_pages((unsigned long)buf_out, pages);
        }
 
-       omap_aes_finish_req(dd, err);
+       omap_aes_finish_req(dd, 0);
        omap_aes_handle_queue(dd, NULL);
 
        pr_debug("exit\n");
@@ -1002,6 +956,8 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
        .data_ofs       = 0x60,
        .rev_ofs        = 0x80,
        .mask_ofs       = 0x84,
+       .irq_status_ofs = 0x8c,
+       .irq_enable_ofs = 0x90,
        .dma_enable_in  = BIT(5),
        .dma_enable_out = BIT(6),
        .major_mask     = 0x0700,
@@ -1010,6 +966,90 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
        .minor_shift    = 0,
 };
 
+static irqreturn_t omap_aes_irq(int irq, void *dev_id)
+{
+       struct omap_aes_dev *dd = dev_id;
+       u32 status, i;
+       u32 *src, *dst;
+
+       status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
+       if (status & AES_REG_IRQ_DATA_IN) {
+               omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
+
+               BUG_ON(!dd->in_sg);
+
+               BUG_ON(_calc_walked(in) > dd->in_sg->length);
+
+               src = sg_virt(dd->in_sg) + _calc_walked(in);
+
+               for (i = 0; i < AES_BLOCK_WORDS; i++) {
+                       omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
+
+                       scatterwalk_advance(&dd->in_walk, 4);
+                       if (dd->in_sg->length == _calc_walked(in)) {
+                               dd->in_sg = scatterwalk_sg_next(dd->in_sg);
+                               if (dd->in_sg) {
+                                       scatterwalk_start(&dd->in_walk,
+                                                         dd->in_sg);
+                                       src = sg_virt(dd->in_sg) +
+                                             _calc_walked(in);
+                               }
+                       } else {
+                               src++;
+                       }
+               }
+
+               /* Clear IRQ status */
+               status &= ~AES_REG_IRQ_DATA_IN;
+               omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
+
+               /* Enable DATA_OUT interrupt */
+               omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
+
+       } else if (status & AES_REG_IRQ_DATA_OUT) {
+               omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
+
+               BUG_ON(!dd->out_sg);
+
+               BUG_ON(_calc_walked(out) > dd->out_sg->length);
+
+               dst = sg_virt(dd->out_sg) + _calc_walked(out);
+
+               for (i = 0; i < AES_BLOCK_WORDS; i++) {
+                       *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
+                       scatterwalk_advance(&dd->out_walk, 4);
+                       if (dd->out_sg->length == _calc_walked(out)) {
+                               dd->out_sg = scatterwalk_sg_next(dd->out_sg);
+                               if (dd->out_sg) {
+                                       scatterwalk_start(&dd->out_walk,
+                                                         dd->out_sg);
+                                       dst = sg_virt(dd->out_sg) +
+                                             _calc_walked(out);
+                               }
+                       } else {
+                               dst++;
+                       }
+               }
+
+               dd->total -= AES_BLOCK_SIZE;
+
+               BUG_ON(dd->total < 0);
+
+               /* Clear IRQ status */
+               status &= ~AES_REG_IRQ_DATA_OUT;
+               omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
+
+               if (!dd->total)
+                       /* All bytes read! */
+                       tasklet_schedule(&dd->done_task);
+               else
+                       /* Enable DATA_IN interrupt for next block */
+                       omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
+       }
+
+       return IRQ_HANDLED;
+}
+
 static const struct of_device_id omap_aes_of_match[] = {
        {
                .compatible     = "ti,omap2-aes",
@@ -1115,10 +1155,10 @@ static int omap_aes_probe(struct platform_device *pdev)
        struct omap_aes_dev *dd;
        struct crypto_alg *algp;
        struct resource res;
-       int err = -ENOMEM, i, j;
+       int err = -ENOMEM, i, j, irq = -1;
        u32 reg;
 
-       dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
+       dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
        if (dd == NULL) {
                dev_err(dev, "unable to alloc data struct.\n");
                goto err_data;
@@ -1158,8 +1198,23 @@ static int omap_aes_probe(struct platform_device *pdev)
        tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
 
        err = omap_aes_dma_init(dd);
-       if (err)
-               goto err_dma;
+       if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
+               dd->pio_only = 1;
+
+               irq = platform_get_irq(pdev, 0);
+               if (irq < 0) {
+                       dev_err(dev, "can't get IRQ resource\n");
+                       goto err_irq;
+               }
+
+               err = devm_request_irq(dev, irq, omap_aes_irq, 0,
+                               dev_name(dev), dd);
+               if (err) {
+                       dev_err(dev, "Unable to grab omap-aes IRQ\n");
+                       goto err_irq;
+               }
+       }
+
 
        INIT_LIST_HEAD(&dd->list);
        spin_lock(&list_lock);
@@ -1187,13 +1242,13 @@ err_algs:
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
                        crypto_unregister_alg(
                                        &dd->pdata->algs_info[i].algs_list[j]);
-       omap_aes_dma_cleanup(dd);
-err_dma:
+       if (!dd->pio_only)
+               omap_aes_dma_cleanup(dd);
+err_irq:
        tasklet_kill(&dd->done_task);
        tasklet_kill(&dd->queue_task);
        pm_runtime_disable(dev);
 err_res:
-       kfree(dd);
        dd = NULL;
 err_data:
        dev_err(dev, "initialization failed.\n");
@@ -1221,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev)
        tasklet_kill(&dd->queue_task);
        omap_aes_dma_cleanup(dd);
        pm_runtime_disable(dd->dev);
-       kfree(dd);
        dd = NULL;
 
        return 0;
index 4bb67652c2005ea46cf2677172665f8a261e58e3..8bdde57f6bb1d7050967351744571b352062f1e1 100644 (file)
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
 
-#define SHA1_MD5_BLOCK_SIZE            SHA1_BLOCK_SIZE
 #define MD5_DIGEST_SIZE                        16
 
-#define DST_MAXBURST                   16
-#define DMA_MIN                                (DST_MAXBURST * sizeof(u32))
-
 #define SHA_REG_IDIGEST(dd, x)         ((dd)->pdata->idigest_ofs + ((x)*0x04))
 #define SHA_REG_DIN(dd, x)             ((dd)->pdata->din_ofs + ((x) * 0x04))
 #define SHA_REG_DIGCNT(dd)             ((dd)->pdata->digcnt_ofs)
 
-#define SHA_REG_ODIGEST(x)             (0x00 + ((x) * 0x04))
+#define SHA_REG_ODIGEST(dd, x)         ((dd)->pdata->odigest_ofs + (x * 0x04))
 
 #define SHA_REG_CTRL                   0x18
 #define SHA_REG_CTRL_LENGTH            (0xFFFFFFFF << 5)
 #define SHA_REG_SYSSTATUS(dd)          ((dd)->pdata->sysstatus_ofs)
 #define SHA_REG_SYSSTATUS_RESETDONE    (1 << 0)
 
-#define SHA_REG_MODE                   0x44
+#define SHA_REG_MODE(dd)               ((dd)->pdata->mode_ofs)
 #define SHA_REG_MODE_HMAC_OUTER_HASH   (1 << 7)
 #define SHA_REG_MODE_HMAC_KEY_PROC     (1 << 5)
 #define SHA_REG_MODE_CLOSE_HASH                (1 << 4)
 #define SHA_REG_MODE_ALGO_CONSTANT     (1 << 3)
-#define SHA_REG_MODE_ALGO_MASK         (3 << 1)
-#define                SHA_REG_MODE_ALGO_MD5_128       (0 << 1)
-#define                SHA_REG_MODE_ALGO_SHA1_160      (1 << 1)
-#define                SHA_REG_MODE_ALGO_SHA2_224      (2 << 1)
-#define                SHA_REG_MODE_ALGO_SHA2_256      (3 << 1)
 
-#define SHA_REG_LENGTH                 0x48
+#define SHA_REG_MODE_ALGO_MASK         (7 << 0)
+#define SHA_REG_MODE_ALGO_MD5_128      (0 << 1)
+#define SHA_REG_MODE_ALGO_SHA1_160     (1 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_224     (2 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_256     (3 << 1)
+#define SHA_REG_MODE_ALGO_SHA2_384     (1 << 0)
+#define SHA_REG_MODE_ALGO_SHA2_512     (3 << 0)
+
+#define SHA_REG_LENGTH(dd)             ((dd)->pdata->length_ofs)
 
 #define SHA_REG_IRQSTATUS              0x118
 #define SHA_REG_IRQSTATUS_CTX_RDY      (1 << 3)
 #define FLAGS_SG               17
 
 #define FLAGS_MODE_SHIFT       18
-#define FLAGS_MODE_MASK                (SHA_REG_MODE_ALGO_MASK                 \
-                                       << (FLAGS_MODE_SHIFT - 1))
-#define                FLAGS_MODE_MD5          (SHA_REG_MODE_ALGO_MD5_128      \
-                                               << (FLAGS_MODE_SHIFT - 1))
-#define                FLAGS_MODE_SHA1         (SHA_REG_MODE_ALGO_SHA1_160     \
-                                               << (FLAGS_MODE_SHIFT - 1))
-#define                FLAGS_MODE_SHA224       (SHA_REG_MODE_ALGO_SHA2_224     \
-                                               << (FLAGS_MODE_SHIFT - 1))
-#define                FLAGS_MODE_SHA256       (SHA_REG_MODE_ALGO_SHA2_256     \
-                                               << (FLAGS_MODE_SHIFT - 1))
-#define FLAGS_HMAC             20
-#define FLAGS_ERROR            21
+#define FLAGS_MODE_MASK                (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_MD5         (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA1                (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA224      (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA256      (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA384      (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
+#define FLAGS_MODE_SHA512      (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
+
+#define FLAGS_HMAC             21
+#define FLAGS_ERROR            22
 
 #define OP_UPDATE              1
 #define OP_FINAL               2
@@ -145,7 +142,7 @@ struct omap_sham_reqctx {
        unsigned long           flags;
        unsigned long           op;
 
-       u8                      digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED;
+       u8                      digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
        size_t                  digcnt;
        size_t                  bufcnt;
        size_t                  buflen;
@@ -162,8 +159,8 @@ struct omap_sham_reqctx {
 
 struct omap_sham_hmac_ctx {
        struct crypto_shash     *shash;
-       u8                      ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
-       u8                      opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
+       u8                      ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
+       u8                      opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 };
 
 struct omap_sham_ctx {
@@ -205,6 +202,8 @@ struct omap_sham_pdata {
        u32             rev_ofs;
        u32             mask_ofs;
        u32             sysstatus_ofs;
+       u32             mode_ofs;
+       u32             length_ofs;
 
        u32             major_mask;
        u32             major_shift;
@@ -223,6 +222,7 @@ struct omap_sham_dev {
        unsigned int            dma;
        struct dma_chan         *dma_lch;
        struct tasklet_struct   done_task;
+       u8                      polling_mode;
 
        unsigned long           flags;
        struct crypto_queue     queue;
@@ -306,9 +306,9 @@ static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
                for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
                        if (out)
                                opad[i] = omap_sham_read(dd,
-                                               SHA_REG_ODIGEST(i));
+                                               SHA_REG_ODIGEST(dd, i));
                        else
-                               omap_sham_write(dd, SHA_REG_ODIGEST(i),
+                               omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
                                                opad[i]);
                }
        }
@@ -342,6 +342,12 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
        case FLAGS_MODE_SHA256:
                d = SHA256_DIGEST_SIZE / sizeof(u32);
                break;
+       case FLAGS_MODE_SHA384:
+               d = SHA384_DIGEST_SIZE / sizeof(u32);
+               break;
+       case FLAGS_MODE_SHA512:
+               d = SHA512_DIGEST_SIZE / sizeof(u32);
+               break;
        default:
                d = 0;
        }
@@ -404,6 +410,30 @@ static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
        return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
 }
 
+static int get_block_size(struct omap_sham_reqctx *ctx)
+{
+       int d;
+
+       switch (ctx->flags & FLAGS_MODE_MASK) {
+       case FLAGS_MODE_MD5:
+       case FLAGS_MODE_SHA1:
+               d = SHA1_BLOCK_SIZE;
+               break;
+       case FLAGS_MODE_SHA224:
+       case FLAGS_MODE_SHA256:
+               d = SHA256_BLOCK_SIZE;
+               break;
+       case FLAGS_MODE_SHA384:
+       case FLAGS_MODE_SHA512:
+               d = SHA512_BLOCK_SIZE;
+               break;
+       default:
+               d = 0;
+       }
+
+       return d;
+}
+
 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
                                    u32 *value, int count)
 {
@@ -422,20 +452,24 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
         * CLOSE_HASH only for the last one. Note that flags mode bits
         * correspond to algorithm encoding in mode register.
         */
-       val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1);
+       val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
        if (!ctx->digcnt) {
                struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
                struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
                struct omap_sham_hmac_ctx *bctx = tctx->base;
+               int bs, nr_dr;
 
                val |= SHA_REG_MODE_ALGO_CONSTANT;
 
                if (ctx->flags & BIT(FLAGS_HMAC)) {
+                       bs = get_block_size(ctx);
+                       nr_dr = bs / (2 * sizeof(u32));
                        val |= SHA_REG_MODE_HMAC_KEY_PROC;
-                       omap_sham_write_n(dd, SHA_REG_ODIGEST(0),
-                                         (u32 *)bctx->ipad,
-                                         SHA1_BLOCK_SIZE / sizeof(u32));
-                       ctx->digcnt += SHA1_BLOCK_SIZE;
+                       omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
+                                         (u32 *)bctx->ipad, nr_dr);
+                       omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
+                                         (u32 *)bctx->ipad + nr_dr, nr_dr);
+                       ctx->digcnt += bs;
                }
        }
 
@@ -451,7 +485,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
               SHA_REG_MODE_HMAC_KEY_PROC;
 
        dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
-       omap_sham_write_mask(dd, SHA_REG_MODE, val, mask);
+       omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
        omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
        omap_sham_write_mask(dd, SHA_REG_MASK(dd),
                             SHA_REG_MASK_IT_EN |
@@ -461,7 +495,7 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
 
 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
 {
-       omap_sham_write(dd, SHA_REG_LENGTH, length);
+       omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
 }
 
 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
@@ -474,7 +508,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
                              size_t length, int final)
 {
        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-       int count, len32;
+       int count, len32, bs32, offset = 0;
        const u32 *buffer = (const u32 *)buf;
 
        dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
@@ -486,18 +520,23 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
        /* should be non-zero before next lines to disable clocks later */
        ctx->digcnt += length;
 
-       if (dd->pdata->poll_irq(dd))
-               return -ETIMEDOUT;
-
        if (final)
                set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 
        set_bit(FLAGS_CPU, &dd->flags);
 
        len32 = DIV_ROUND_UP(length, sizeof(u32));
+       bs32 = get_block_size(ctx) / sizeof(u32);
 
-       for (count = 0; count < len32; count++)
-               omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]);
+       while (len32) {
+               if (dd->pdata->poll_irq(dd))
+                       return -ETIMEDOUT;
+
+               for (count = 0; count < min(len32, bs32); count++, offset++)
+                       omap_sham_write(dd, SHA_REG_DIN(dd, count),
+                                       buffer[offset]);
+               len32 -= min(len32, bs32);
+       }
 
        return -EINPROGRESS;
 }
@@ -516,7 +555,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
        struct dma_async_tx_descriptor *tx;
        struct dma_slave_config cfg;
-       int len32, ret;
+       int len32, ret, dma_min = get_block_size(ctx);
 
        dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
                                                ctx->digcnt, length, final);
@@ -525,7 +564,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
 
        cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
        cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       cfg.dst_maxburst = DST_MAXBURST;
+       cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES;
 
        ret = dmaengine_slave_config(dd->dma_lch, &cfg);
        if (ret) {
@@ -533,7 +572,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
                return ret;
        }
 
-       len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
+       len32 = DIV_ROUND_UP(length, dma_min) * dma_min;
 
        if (is_sg) {
                /*
@@ -666,14 +705,14 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
 /* Start address alignment */
 #define SG_AA(sg)      (IS_ALIGNED(sg->offset, sizeof(u32)))
 /* SHA1 block size alignment */
-#define SG_SA(sg)      (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
+#define SG_SA(sg, bs)  (IS_ALIGNED(sg->length, bs))
 
 static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 {
        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
        unsigned int length, final, tail;
        struct scatterlist *sg;
-       int ret;
+       int ret, bs;
 
        if (!ctx->total)
                return 0;
@@ -687,30 +726,31 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
         * the dmaengine infrastructure will calculate that it needs
         * to transfer 0 frames which ultimately fails.
         */
-       if (ctx->total < (DST_MAXBURST * sizeof(u32)))
+       if (ctx->total < get_block_size(ctx))
                return omap_sham_update_dma_slow(dd);
 
        dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
                        ctx->digcnt, ctx->bufcnt, ctx->total);
 
        sg = ctx->sg;
+       bs = get_block_size(ctx);
 
        if (!SG_AA(sg))
                return omap_sham_update_dma_slow(dd);
 
-       if (!sg_is_last(sg) && !SG_SA(sg))
-               /* size is not SHA1_BLOCK_SIZE aligned */
+       if (!sg_is_last(sg) && !SG_SA(sg, bs))
+               /* size is not BLOCK_SIZE aligned */
                return omap_sham_update_dma_slow(dd);
 
        length = min(ctx->total, sg->length);
 
        if (sg_is_last(sg)) {
                if (!(ctx->flags & BIT(FLAGS_FINUP))) {
-                       /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
-                       tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
+                       /* not last sg must be BLOCK_SIZE aligned */
+                       tail = length & (bs - 1);
                        /* without finup() we need one block to close hash */
                        if (!tail)
-                               tail = SHA1_MD5_BLOCK_SIZE;
+                               tail = bs;
                        length -= tail;
                }
        }
@@ -737,13 +777,22 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 static int omap_sham_update_cpu(struct omap_sham_dev *dd)
 {
        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-       int bufcnt;
+       int bufcnt, final;
+
+       if (!ctx->total)
+               return 0;
 
        omap_sham_append_sg(ctx);
+
+       final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
+
+       dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
+               ctx->bufcnt, ctx->digcnt, final);
+
        bufcnt = ctx->bufcnt;
        ctx->bufcnt = 0;
 
-       return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+       return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
 }
 
 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
@@ -773,6 +822,7 @@ static int omap_sham_init(struct ahash_request *req)
        struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
        struct omap_sham_dev *dd = NULL, *tmp;
+       int bs = 0;
 
        spin_lock_bh(&sham.lock);
        if (!tctx->dd) {
@@ -796,15 +846,27 @@ static int omap_sham_init(struct ahash_request *req)
        switch (crypto_ahash_digestsize(tfm)) {
        case MD5_DIGEST_SIZE:
                ctx->flags |= FLAGS_MODE_MD5;
+               bs = SHA1_BLOCK_SIZE;
                break;
        case SHA1_DIGEST_SIZE:
                ctx->flags |= FLAGS_MODE_SHA1;
+               bs = SHA1_BLOCK_SIZE;
                break;
        case SHA224_DIGEST_SIZE:
                ctx->flags |= FLAGS_MODE_SHA224;
+               bs = SHA224_BLOCK_SIZE;
                break;
        case SHA256_DIGEST_SIZE:
                ctx->flags |= FLAGS_MODE_SHA256;
+               bs = SHA256_BLOCK_SIZE;
+               break;
+       case SHA384_DIGEST_SIZE:
+               ctx->flags |= FLAGS_MODE_SHA384;
+               bs = SHA384_BLOCK_SIZE;
+               break;
+       case SHA512_DIGEST_SIZE:
+               ctx->flags |= FLAGS_MODE_SHA512;
+               bs = SHA512_BLOCK_SIZE;
                break;
        }
 
@@ -816,8 +878,8 @@ static int omap_sham_init(struct ahash_request *req)
                if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
                        struct omap_sham_hmac_ctx *bctx = tctx->base;
 
-                       memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
-                       ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
+                       memcpy(ctx->buffer, bctx->ipad, bs);
+                       ctx->bufcnt = bs;
                }
 
                ctx->flags |= BIT(FLAGS_HMAC);
@@ -853,8 +915,11 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
        int err = 0, use_dma = 1;
 
-       if (ctx->bufcnt <= DMA_MIN)
-               /* faster to handle last block with cpu */
+       if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode)
+               /*
+                * faster to handle last block with cpu or
+                * use cpu when dma is not present.
+                */
                use_dma = 0;
 
        if (use_dma)
@@ -1006,6 +1071,8 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
 static int omap_sham_update(struct ahash_request *req)
 {
        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+       struct omap_sham_dev *dd = ctx->dd;
+       int bs = get_block_size(ctx);
 
        if (!req->nbytes)
                return 0;
@@ -1023,10 +1090,12 @@ static int omap_sham_update(struct ahash_request *req)
                        */
                        omap_sham_append_sg(ctx);
                        return 0;
-               } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
+               } else if ((ctx->bufcnt + ctx->total <= bs) ||
+                          dd->polling_mode) {
                        /*
-                       * faster to use CPU for short transfers
-                       */
+                        * faster to use CPU for short transfers or
+                        * use cpu when dma is not present.
+                        */
                        ctx->flags |= BIT(FLAGS_CPU);
                }
        } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
@@ -1214,6 +1283,16 @@ static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
        return omap_sham_cra_init_alg(tfm, "md5");
 }
 
+static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
+{
+       return omap_sham_cra_init_alg(tfm, "sha384");
+}
+
+static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
+{
+       return omap_sham_cra_init_alg(tfm, "sha512");
+}
+
 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
 {
        struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
@@ -1422,6 +1501,101 @@ static struct ahash_alg algs_sha224_sha256[] = {
 },
 };
 
+static struct ahash_alg algs_sha384_sha512[] = {
+{
+       .init           = omap_sham_init,
+       .update         = omap_sham_update,
+       .final          = omap_sham_final,
+       .finup          = omap_sham_finup,
+       .digest         = omap_sham_digest,
+       .halg.digestsize        = SHA384_DIGEST_SIZE,
+       .halg.base      = {
+               .cra_name               = "sha384",
+               .cra_driver_name        = "omap-sha384",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA384_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct omap_sham_ctx),
+               .cra_alignmask          = 0,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = omap_sham_cra_init,
+               .cra_exit               = omap_sham_cra_exit,
+       }
+},
+{
+       .init           = omap_sham_init,
+       .update         = omap_sham_update,
+       .final          = omap_sham_final,
+       .finup          = omap_sham_finup,
+       .digest         = omap_sham_digest,
+       .halg.digestsize        = SHA512_DIGEST_SIZE,
+       .halg.base      = {
+               .cra_name               = "sha512",
+               .cra_driver_name        = "omap-sha512",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA512_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct omap_sham_ctx),
+               .cra_alignmask          = 0,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = omap_sham_cra_init,
+               .cra_exit               = omap_sham_cra_exit,
+       }
+},
+{
+       .init           = omap_sham_init,
+       .update         = omap_sham_update,
+       .final          = omap_sham_final,
+       .finup          = omap_sham_finup,
+       .digest         = omap_sham_digest,
+       .setkey         = omap_sham_setkey,
+       .halg.digestsize        = SHA384_DIGEST_SIZE,
+       .halg.base      = {
+               .cra_name               = "hmac(sha384)",
+               .cra_driver_name        = "omap-hmac-sha384",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA384_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
+                                       sizeof(struct omap_sham_hmac_ctx),
+               .cra_alignmask          = OMAP_ALIGN_MASK,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = omap_sham_cra_sha384_init,
+               .cra_exit               = omap_sham_cra_exit,
+       }
+},
+{
+       .init           = omap_sham_init,
+       .update         = omap_sham_update,
+       .final          = omap_sham_final,
+       .finup          = omap_sham_finup,
+       .digest         = omap_sham_digest,
+       .setkey         = omap_sham_setkey,
+       .halg.digestsize        = SHA512_DIGEST_SIZE,
+       .halg.base      = {
+               .cra_name               = "hmac(sha512)",
+               .cra_driver_name        = "omap-hmac-sha512",
+               .cra_priority           = 100,
+               .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = SHA512_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
+                                       sizeof(struct omap_sham_hmac_ctx),
+               .cra_alignmask          = OMAP_ALIGN_MASK,
+               .cra_module             = THIS_MODULE,
+               .cra_init               = omap_sham_cra_sha512_init,
+               .cra_exit               = omap_sham_cra_exit,
+       }
+},
+};
+
 static void omap_sham_done_task(unsigned long data)
 {
        struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
@@ -1433,8 +1607,12 @@ static void omap_sham_done_task(unsigned long data)
        }
 
        if (test_bit(FLAGS_CPU, &dd->flags)) {
-               if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
-                       goto finish;
+               if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
+                       /* hash or semi-hash ready */
+                       err = omap_sham_update_cpu(dd);
+                       if (err != -EINPROGRESS)
+                               goto finish;
+               }
        } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
                if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
                        omap_sham_update_dma_stop(dd);
@@ -1548,11 +1726,54 @@ static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
        .poll_irq       = omap_sham_poll_irq_omap4,
        .intr_hdlr      = omap_sham_irq_omap4,
        .idigest_ofs    = 0x020,
+       .odigest_ofs    = 0x0,
        .din_ofs        = 0x080,
        .digcnt_ofs     = 0x040,
        .rev_ofs        = 0x100,
        .mask_ofs       = 0x110,
        .sysstatus_ofs  = 0x114,
+       .mode_ofs       = 0x44,
+       .length_ofs     = 0x48,
+       .major_mask     = 0x0700,
+       .major_shift    = 8,
+       .minor_mask     = 0x003f,
+       .minor_shift    = 0,
+};
+
+static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
+       {
+               .algs_list      = algs_sha1_md5,
+               .size           = ARRAY_SIZE(algs_sha1_md5),
+       },
+       {
+               .algs_list      = algs_sha224_sha256,
+               .size           = ARRAY_SIZE(algs_sha224_sha256),
+       },
+       {
+               .algs_list      = algs_sha384_sha512,
+               .size           = ARRAY_SIZE(algs_sha384_sha512),
+       },
+};
+
+static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
+       .algs_info      = omap_sham_algs_info_omap5,
+       .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
+       .flags          = BIT(FLAGS_AUTO_XOR),
+       .digest_size    = SHA512_DIGEST_SIZE,
+       .copy_hash      = omap_sham_copy_hash_omap4,
+       .write_ctrl     = omap_sham_write_ctrl_omap4,
+       .trigger        = omap_sham_trigger_omap4,
+       .poll_irq       = omap_sham_poll_irq_omap4,
+       .intr_hdlr      = omap_sham_irq_omap4,
+       .idigest_ofs    = 0x240,
+       .odigest_ofs    = 0x200,
+       .din_ofs        = 0x080,
+       .digcnt_ofs     = 0x280,
+       .rev_ofs        = 0x100,
+       .mask_ofs       = 0x110,
+       .sysstatus_ofs  = 0x114,
+       .mode_ofs       = 0x284,
+       .length_ofs     = 0x288,
        .major_mask     = 0x0700,
        .major_shift    = 8,
        .minor_mask     = 0x003f,
@@ -1568,6 +1789,10 @@ static const struct of_device_id omap_sham_of_match[] = {
                .compatible     = "ti,omap4-sham",
                .data           = &omap_sham_pdata_omap4,
        },
+       {
+               .compatible     = "ti,omap5-sham",
+               .data           = &omap_sham_pdata_omap5,
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
@@ -1667,7 +1892,7 @@ static int omap_sham_probe(struct platform_device *pdev)
        int err, i, j;
        u32 rev;
 
-       dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
+       dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
        if (dd == NULL) {
                dev_err(dev, "unable to alloc data struct.\n");
                err = -ENOMEM;
@@ -1684,20 +1909,21 @@ static int omap_sham_probe(struct platform_device *pdev)
        err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
                               omap_sham_get_res_pdev(dd, pdev, &res);
        if (err)
-               goto res_err;
+               goto data_err;
 
        dd->io_base = devm_ioremap_resource(dev, &res);
        if (IS_ERR(dd->io_base)) {
                err = PTR_ERR(dd->io_base);
-               goto res_err;
+               goto data_err;
        }
        dd->phys_base = res.start;
 
-       err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW,
-                         dev_name(dev), dd);
+       err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
+                              IRQF_TRIGGER_NONE, dev_name(dev), dd);
        if (err) {
-               dev_err(dev, "unable to request irq.\n");
-               goto res_err;
+               dev_err(dev, "unable to request irq %d, err = %d\n",
+                       dd->irq, err);
+               goto data_err;
        }
 
        dma_cap_zero(mask);
@@ -1706,10 +1932,8 @@ static int omap_sham_probe(struct platform_device *pdev)
        dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
                                                       &dd->dma, dev, "rx");
        if (!dd->dma_lch) {
-               dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
-                       dd->dma);
-               err = -ENXIO;
-               goto dma_err;
+               dd->polling_mode = 1;
+               dev_dbg(dev, "using polling mode instead of dma\n");
        }
 
        dd->flags |= dd->pdata->flags;
@@ -1747,11 +1971,6 @@ err_algs:
                                        &dd->pdata->algs_info[i].algs_list[j]);
        pm_runtime_disable(dev);
        dma_release_channel(dd->dma_lch);
-dma_err:
-       free_irq(dd->irq, dd);
-res_err:
-       kfree(dd);
-       dd = NULL;
 data_err:
        dev_err(dev, "initialization failed.\n");
 
@@ -1776,9 +1995,6 @@ static int omap_sham_remove(struct platform_device *pdev)
        tasklet_kill(&dd->done_task);
        pm_runtime_disable(&pdev->dev);
        dma_release_channel(dd->dma_lch);
-       free_irq(dd->irq, dd);
-       kfree(dd);
-       dd = NULL;
 
        return 0;
 }
index c3dc1c04a5df6f39966b3ca4a9c9874c001035ec..d7bb8bac36e973944334409760dc56c37eb02be1 100644 (file)
@@ -417,7 +417,7 @@ static void sahara_aes_done_task(unsigned long data)
        dev->req->base.complete(&dev->req->base, dev->error);
 }
 
-void sahara_watchdog(unsigned long data)
+static void sahara_watchdog(unsigned long data)
 {
        struct sahara_dev *dev = (struct sahara_dev *)data;
        unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
@@ -955,7 +955,7 @@ static int sahara_probe(struct platform_device *pdev)
        dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
                        SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
                        &dev->hw_phys_link[0], GFP_KERNEL);
-       if (!dev->hw_link) {
+       if (!dev->hw_link[0]) {
                dev_err(&pdev->dev, "Could not allocate hw links\n");
                err = -ENOMEM;
                goto err_link;
index 85ea7525fa36242bda4cb21c37ce788981d3b502..2d58da972ae279f44b91f05dd472f6287c13f081 100644 (file)
@@ -275,7 +275,7 @@ static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
                        value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
                        eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
                        icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
-               } while (eng_busy & (!icq_empty));
+               } while (eng_busy && !icq_empty);
                aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR);
        }
 
@@ -365,7 +365,7 @@ static int aes_set_key(struct tegra_aes_dev *dd)
                eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
                icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
                dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD;
-       } while (eng_busy & (!icq_empty) & dma_busy);
+       } while (eng_busy && !icq_empty && dma_busy);
 
        /* settable command to get key into internal registers */
        value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT |
@@ -379,7 +379,7 @@ static int aes_set_key(struct tegra_aes_dev *dd)
                value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
                eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
                icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
-       } while (eng_busy & (!icq_empty));
+       } while (eng_busy && !icq_empty);
 
        return 0;
 }
index 496ae6aae3164309b9cf99f0d7a1c35af59f4755..1c73f4fbc2526027f96b2323b21b0c4c0c1e1b0f 100644 (file)
@@ -11,6 +11,8 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
+#define pr_fmt(fmt) "hashX hashX: " fmt
+
 #include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -35,8 +37,6 @@
 
 #include "hash_alg.h"
 
-#define DEV_DBG_NAME "hashX hashX:"
-
 static int hash_mode;
 module_param(hash_mode, int, 0);
 MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
@@ -44,13 +44,13 @@ MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
 /**
  * Pre-calculated empty message digests.
  */
-static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
+static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
        0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
        0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
        0xaf, 0xd8, 0x07, 0x09
 };
 
-static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
+static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
        0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
        0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
        0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
@@ -58,14 +58,14 @@ static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
 };
 
 /* HMAC-SHA1, no key */
-static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
+static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
        0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
        0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
        0x70, 0x69, 0x0e, 0x1d
 };
 
 /* HMAC-SHA256, no key */
-static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
+static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
        0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
        0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
        0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
@@ -97,7 +97,7 @@ static struct hash_driver_data        driver_data;
  *
  */
 static void hash_messagepad(struct hash_device_data *device_data,
-               const u32 *message, u8 index_bytes);
+                           const u32 *message, u8 index_bytes);
 
 /**
  * release_hash_device - Releases a previously allocated hash device.
@@ -119,7 +119,7 @@ static void release_hash_device(struct hash_device_data *device_data)
 }
 
 static void hash_dma_setup_channel(struct hash_device_data *device_data,
-                               struct device *dev)
+                                  struct device *dev)
 {
        struct hash_platform_data *platform_data = dev->platform_data;
        struct dma_slave_config conf = {
@@ -127,7 +127,7 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
                .dst_addr = device_data->phybase + HASH_DMA_FIFO,
                .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
                .dst_maxburst = 16,
-        };
+       };
 
        dma_cap_zero(device_data->dma.mask);
        dma_cap_set(DMA_SLAVE, device_data->dma.mask);
@@ -135,8 +135,8 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
        device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
        device_data->dma.chan_mem2hash =
                dma_request_channel(device_data->dma.mask,
-                               platform_data->dma_filter,
-                               device_data->dma.cfg_mem2hash);
+                                   platform_data->dma_filter,
+                                   device_data->dma.cfg_mem2hash);
 
        dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
 
@@ -145,21 +145,21 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
 
 static void hash_dma_callback(void *data)
 {
-       struct hash_ctx *ctx = (struct hash_ctx *) data;
+       struct hash_ctx *ctx = data;
 
        complete(&ctx->device->dma.complete);
 }
 
 static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
-               int len, enum dma_data_direction direction)
+                                int len, enum dma_data_direction direction)
 {
        struct dma_async_tx_descriptor *desc = NULL;
        struct dma_chan *channel = NULL;
        dma_cookie_t cookie;
 
        if (direction != DMA_TO_DEVICE) {
-               dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
-                               __func__);
+               dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
+                       __func__);
                return -EFAULT;
        }
 
@@ -172,20 +172,19 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
                        direction);
 
        if (!ctx->device->dma.sg_len) {
-               dev_err(ctx->device->dev,
-                               "[%s]: Could not map the sg list (TO_DEVICE)",
-                               __func__);
+               dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
+                       __func__);
                return -EFAULT;
        }
 
-       dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
-                       "(TO_DEVICE)", __func__);
+       dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
+               __func__);
        desc = dmaengine_prep_slave_sg(channel,
                        ctx->device->dma.sg, ctx->device->dma.sg_len,
                        direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
        if (!desc) {
                dev_err(ctx->device->dev,
-                       "[%s]: device_prep_slave_sg() failed!", __func__);
+                       "%s: device_prep_slave_sg() failed!\n", __func__);
                return -EFAULT;
        }
 
@@ -205,17 +204,16 @@ static void hash_dma_done(struct hash_ctx *ctx)
        chan = ctx->device->dma.chan_mem2hash;
        dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
        dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
-                       ctx->device->dma.sg_len, DMA_TO_DEVICE);
-
+                    ctx->device->dma.sg_len, DMA_TO_DEVICE);
 }
 
 static int hash_dma_write(struct hash_ctx *ctx,
-               struct scatterlist *sg, int len)
+                         struct scatterlist *sg, int len)
 {
        int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
        if (error) {
-               dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
-                       "failed", __func__);
+               dev_dbg(ctx->device->dev,
+                       "%s: hash_set_dma_transfer() failed\n", __func__);
                return error;
        }
 
@@ -245,19 +243,18 @@ static int get_empty_message_digest(
        if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
                if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
                        memcpy(zero_hash, &zero_message_hash_sha1[0],
-                                       SHA1_DIGEST_SIZE);
+                              SHA1_DIGEST_SIZE);
                        *zero_hash_size = SHA1_DIGEST_SIZE;
                        *zero_digest = true;
                } else if (HASH_ALGO_SHA256 ==
                                ctx->config.algorithm) {
                        memcpy(zero_hash, &zero_message_hash_sha256[0],
-                                       SHA256_DIGEST_SIZE);
+                              SHA256_DIGEST_SIZE);
                        *zero_hash_size = SHA256_DIGEST_SIZE;
                        *zero_digest = true;
                } else {
-                       dev_err(device_data->dev, "[%s] "
-                                       "Incorrect algorithm!"
-                                       , __func__);
+                       dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
+                               __func__);
                        ret = -EINVAL;
                        goto out;
                }
@@ -265,25 +262,24 @@ static int get_empty_message_digest(
                if (!ctx->keylen) {
                        if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
                                memcpy(zero_hash, &zero_message_hmac_sha1[0],
-                                               SHA1_DIGEST_SIZE);
+                                      SHA1_DIGEST_SIZE);
                                *zero_hash_size = SHA1_DIGEST_SIZE;
                                *zero_digest = true;
                        } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
                                memcpy(zero_hash, &zero_message_hmac_sha256[0],
-                                               SHA256_DIGEST_SIZE);
+                                      SHA256_DIGEST_SIZE);
                                *zero_hash_size = SHA256_DIGEST_SIZE;
                                *zero_digest = true;
                        } else {
-                               dev_err(device_data->dev, "[%s] "
-                                               "Incorrect algorithm!"
-                                               , __func__);
+                               dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
+                                       __func__);
                                ret = -EINVAL;
                                goto out;
                        }
                } else {
-                       dev_dbg(device_data->dev, "[%s] Continue hash "
-                                       "calculation, since hmac key avalable",
-                                       __func__);
+                       dev_dbg(device_data->dev,
+                               "%s: Continue hash calculation, since hmac key available\n",
+                               __func__);
                }
        }
 out:
@@ -299,9 +295,8 @@ out:
  * This function request for disabling power (regulator) and clock,
  * and could also save current hw state.
  */
-static int hash_disable_power(
-               struct hash_device_data *device_data,
-               bool                    save_device_state)
+static int hash_disable_power(struct hash_device_data *device_data,
+                             bool save_device_state)
 {
        int ret = 0;
        struct device *dev = device_data->dev;
@@ -319,7 +314,7 @@ static int hash_disable_power(
        clk_disable(device_data->clk);
        ret = regulator_disable(device_data->regulator);
        if (ret)
-               dev_err(dev, "[%s] regulator_disable() failed!", __func__);
+               dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
 
        device_data->power_state = false;
 
@@ -337,9 +332,8 @@ out:
  * This function request for enabling power (regulator) and clock,
  * and could also restore a previously saved hw state.
  */
-static int hash_enable_power(
-               struct hash_device_data *device_data,
-               bool                    restore_device_state)
+static int hash_enable_power(struct hash_device_data *device_data,
+                            bool restore_device_state)
 {
        int ret = 0;
        struct device *dev = device_data->dev;
@@ -348,14 +342,13 @@ static int hash_enable_power(
        if (!device_data->power_state) {
                ret = regulator_enable(device_data->regulator);
                if (ret) {
-                       dev_err(dev, "[%s]: regulator_enable() failed!",
-                                       __func__);
+                       dev_err(dev, "%s: regulator_enable() failed!\n",
+                               __func__);
                        goto out;
                }
                ret = clk_enable(device_data->clk);
                if (ret) {
-                       dev_err(dev, "[%s]: clk_enable() failed!",
-                                       __func__);
+                       dev_err(dev, "%s: clk_enable() failed!\n", __func__);
                        ret = regulator_disable(
                                        device_data->regulator);
                        goto out;
@@ -366,8 +359,7 @@ static int hash_enable_power(
        if (device_data->restore_dev_state) {
                if (restore_device_state) {
                        device_data->restore_dev_state = false;
-                       hash_resume_state(device_data,
-                               &device_data->state);
+                       hash_resume_state(device_data, &device_data->state);
                }
        }
 out:
@@ -447,7 +439,7 @@ static int hash_get_device_data(struct hash_ctx *ctx,
  * spec or due to a bug in the hw.
  */
 static void hash_hw_write_key(struct hash_device_data *device_data,
-               const u8 *key, unsigned int keylen)
+                             const u8 *key, unsigned int keylen)
 {
        u32 word = 0;
        int nwords = 1;
@@ -491,14 +483,14 @@ static void hash_hw_write_key(struct hash_device_data *device_data,
  * calculation.
  */
 static int init_hash_hw(struct hash_device_data *device_data,
-               struct hash_ctx *ctx)
+                       struct hash_ctx *ctx)
 {
        int ret = 0;
 
        ret = hash_setconfiguration(device_data, &ctx->config);
        if (ret) {
-               dev_err(device_data->dev, "[%s] hash_setconfiguration() "
-                               "failed!", __func__);
+               dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
+                       __func__);
                return ret;
        }
 
@@ -528,9 +520,8 @@ static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
                size -= sg->length;
 
                /* hash_set_dma_transfer will align last nent */
-               if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
-                       || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
-                               size > 0))
+               if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
+                   (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
                        aligned_data = false;
 
                sg = sg_next(sg);
@@ -585,21 +576,17 @@ static int hash_init(struct ahash_request *req)
                if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
                        req_ctx->dma_mode = false; /* Don't use DMA */
 
-                       pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
-                                       "to CPU mode for data size < %d",
-                                       __func__, HASH_DMA_ALIGN_SIZE);
+                       pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
+                                __func__, HASH_DMA_ALIGN_SIZE);
                } else {
                        if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
-                                       hash_dma_valid_data(req->src,
-                                               req->nbytes)) {
+                           hash_dma_valid_data(req->src, req->nbytes)) {
                                req_ctx->dma_mode = true;
                        } else {
                                req_ctx->dma_mode = false;
-                               pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
-                                               " CPU mode for datalength < %d"
-                                               " or non-aligned data, except "
-                                               "in last nent", __func__,
-                                               HASH_DMA_PERFORMANCE_MIN_SIZE);
+                               pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
+                                        __func__,
+                                        HASH_DMA_PERFORMANCE_MIN_SIZE);
                        }
                }
        }
@@ -614,9 +601,8 @@ static int hash_init(struct ahash_request *req)
  *                     the HASH hardware.
  *
  */
-static void hash_processblock(
-               struct hash_device_data *device_data,
-               const u32 *message, int length)
+static void hash_processblock(struct hash_device_data *device_data,
+                             const u32 *message, int length)
 {
        int len = length / HASH_BYTES_PER_WORD;
        /*
@@ -641,7 +627,7 @@ static void hash_processblock(
  *
  */
 static void hash_messagepad(struct hash_device_data *device_data,
-               const u32 *message, u8 index_bytes)
+                           const u32 *message, u8 index_bytes)
 {
        int nwords = 1;
 
@@ -666,15 +652,13 @@ static void hash_messagepad(struct hash_device_data *device_data,
 
        /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
        HASH_SET_NBLW(index_bytes * 8);
-       dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
-                       readl_relaxed(&device_data->base->din),
-                       (int)(readl_relaxed(&device_data->base->str) &
-                               HASH_STR_NBLW_MASK));
+       dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
+               __func__, readl_relaxed(&device_data->base->din),
+               readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
        HASH_SET_DCAL;
-       dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
-                       __func__, readl_relaxed(&device_data->base->din),
-                       (int)(readl_relaxed(&device_data->base->str) &
-                               HASH_STR_NBLW_MASK));
+       dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
+               __func__, readl_relaxed(&device_data->base->din),
+               readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
 
        while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
                cpu_relax();
@@ -704,7 +688,7 @@ static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
  * @config:            Pointer to a configuration structure.
  */
 int hash_setconfiguration(struct hash_device_data *device_data,
-               struct hash_config *config)
+                         struct hash_config *config)
 {
        int ret = 0;
 
@@ -731,8 +715,8 @@ int hash_setconfiguration(struct hash_device_data *device_data,
                break;
 
        default:
-               dev_err(device_data->dev, "[%s] Incorrect algorithm.",
-                               __func__);
+               dev_err(device_data->dev, "%s: Incorrect algorithm\n",
+                       __func__);
                return -EPERM;
        }
 
@@ -744,23 +728,22 @@ int hash_setconfiguration(struct hash_device_data *device_data,
                HASH_CLEAR_BITS(&device_data->base->cr,
                                HASH_CR_MODE_MASK);
        else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
-               HASH_SET_BITS(&device_data->base->cr,
-                               HASH_CR_MODE_MASK);
+               HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
                if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
                        /* Truncate key to blocksize */
-                       dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
+                       dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
                        HASH_SET_BITS(&device_data->base->cr,
-                                       HASH_CR_LKEY_MASK);
+                                     HASH_CR_LKEY_MASK);
                } else {
-                       dev_dbg(device_data->dev, "[%s] LKEY cleared",
-                                       __func__);
+                       dev_dbg(device_data->dev, "%s: LKEY cleared\n",
+                               __func__);
                        HASH_CLEAR_BITS(&device_data->base->cr,
                                        HASH_CR_LKEY_MASK);
                }
        } else {        /* Wrong hash mode */
                ret = -EPERM;
-               dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
-                               __func__);
+               dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
+                       __func__);
        }
        return ret;
 }
@@ -793,8 +776,9 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
 }
 
 static int hash_process_data(struct hash_device_data *device_data,
-               struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
-               int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
+                            struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
+                            int msg_length, u8 *data_buffer, u8 *buffer,
+                            u8 *index)
 {
        int ret = 0;
        u32 count;
@@ -809,24 +793,23 @@ static int hash_process_data(struct hash_device_data *device_data,
                        msg_length = 0;
                } else {
                        if (req_ctx->updated) {
-
                                ret = hash_resume_state(device_data,
                                                &device_data->state);
                                memmove(req_ctx->state.buffer,
-                                               device_data->state.buffer,
-                                               HASH_BLOCK_SIZE / sizeof(u32));
+                                       device_data->state.buffer,
+                                       HASH_BLOCK_SIZE / sizeof(u32));
                                if (ret) {
-                                       dev_err(device_data->dev, "[%s] "
-                                                       "hash_resume_state()"
-                                                       " failed!", __func__);
+                                       dev_err(device_data->dev,
+                                               "%s: hash_resume_state() failed!\n",
+                                               __func__);
                                        goto out;
                                }
                        } else {
                                ret = init_hash_hw(device_data, ctx);
                                if (ret) {
-                                       dev_err(device_data->dev, "[%s] "
-                                                       "init_hash_hw()"
-                                                       " failed!", __func__);
+                                       dev_err(device_data->dev,
+                                               "%s: init_hash_hw() failed!\n",
+                                               __func__);
                                        goto out;
                                }
                                req_ctx->updated = 1;
@@ -838,22 +821,21 @@ static int hash_process_data(struct hash_device_data *device_data,
                         * HW peripheral, otherwise we first copy data
                         * to a local buffer
                         */
-                       if ((0 == (((u32)data_buffer) % 4))
-                                       && (0 == *index))
+                       if ((0 == (((u32)data_buffer) % 4)) &&
+                           (0 == *index))
                                hash_processblock(device_data,
-                                               (const u32 *)
-                                               data_buffer, HASH_BLOCK_SIZE);
+                                                 (const u32 *)data_buffer,
+                                                 HASH_BLOCK_SIZE);
                        else {
-                               for (count = 0; count <
-                                               (u32)(HASH_BLOCK_SIZE -
-                                                       *index);
-                                               count++) {
+                               for (count = 0;
+                                    count < (u32)(HASH_BLOCK_SIZE - *index);
+                                    count++) {
                                        buffer[*index + count] =
                                                *(data_buffer + count);
                                }
                                hash_processblock(device_data,
-                                               (const u32 *)buffer,
-                                               HASH_BLOCK_SIZE);
+                                                 (const u32 *)buffer,
+                                                 HASH_BLOCK_SIZE);
                        }
                        hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
                        data_buffer += (HASH_BLOCK_SIZE - *index);
@@ -865,12 +847,11 @@ static int hash_process_data(struct hash_device_data *device_data,
                                        &device_data->state);
 
                        memmove(device_data->state.buffer,
-                                       req_ctx->state.buffer,
-                                       HASH_BLOCK_SIZE / sizeof(u32));
+                               req_ctx->state.buffer,
+                               HASH_BLOCK_SIZE / sizeof(u32));
                        if (ret) {
-                               dev_err(device_data->dev, "[%s] "
-                                               "hash_save_state()"
-                                               " failed!", __func__);
+                               dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
+                                       __func__);
                                goto out;
                        }
                }
@@ -898,25 +879,24 @@ static int hash_dma_final(struct ahash_request *req)
        if (ret)
                return ret;
 
-       dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+       dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
 
        if (req_ctx->updated) {
                ret = hash_resume_state(device_data, &device_data->state);
 
                if (ret) {
-                       dev_err(device_data->dev, "[%s] hash_resume_state() "
-                                       "failed!", __func__);
+                       dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
+                               __func__);
                        goto out;
                }
-
        }
 
        if (!req_ctx->updated) {
                ret = hash_setconfiguration(device_data, &ctx->config);
                if (ret) {
-                       dev_err(device_data->dev, "[%s] "
-                                       "hash_setconfiguration() failed!",
-                                       __func__);
+                       dev_err(device_data->dev,
+                               "%s: hash_setconfiguration() failed!\n",
+                               __func__);
                        goto out;
                }
 
@@ -926,9 +906,9 @@ static int hash_dma_final(struct ahash_request *req)
                                        HASH_CR_DMAE_MASK);
                } else {
                        HASH_SET_BITS(&device_data->base->cr,
-                                       HASH_CR_DMAE_MASK);
+                                     HASH_CR_DMAE_MASK);
                        HASH_SET_BITS(&device_data->base->cr,
-                                       HASH_CR_PRIVN_MASK);
+                                     HASH_CR_PRIVN_MASK);
                }
 
                HASH_INITIALIZE;
@@ -944,16 +924,16 @@ static int hash_dma_final(struct ahash_request *req)
        /* Store the nents in the dma struct. */
        ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
        if (!ctx->device->dma.nents) {
-               dev_err(device_data->dev, "[%s] "
-                               "ctx->device->dma.nents = 0", __func__);
+               dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
+                       __func__);
                ret = ctx->device->dma.nents;
                goto out;
        }
 
        bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
        if (bytes_written != req->nbytes) {
-               dev_err(device_data->dev, "[%s] "
-                               "hash_dma_write() failed!", __func__);
+               dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
+                       __func__);
                ret = bytes_written;
                goto out;
        }
@@ -968,8 +948,8 @@ static int hash_dma_final(struct ahash_request *req)
                unsigned int keylen = ctx->keylen;
                u8 *key = ctx->key;
 
-               dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
-                               ctx->keylen);
+               dev_dbg(device_data->dev, "%s: keylen: %d\n",
+                       __func__, ctx->keylen);
                hash_hw_write_key(device_data, key, keylen);
        }
 
@@ -1004,14 +984,14 @@ static int hash_hw_final(struct ahash_request *req)
        if (ret)
                return ret;
 
-       dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+       dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
 
        if (req_ctx->updated) {
                ret = hash_resume_state(device_data, &device_data->state);
 
                if (ret) {
-                       dev_err(device_data->dev, "[%s] hash_resume_state() "
-                                       "failed!", __func__);
+                       dev_err(device_data->dev,
+                               "%s: hash_resume_state() failed!\n", __func__);
                        goto out;
                }
        } else if (req->nbytes == 0 && ctx->keylen == 0) {
@@ -1025,31 +1005,33 @@ static int hash_hw_final(struct ahash_request *req)
                ret = get_empty_message_digest(device_data, &zero_hash[0],
                                &zero_hash_size, &zero_digest);
                if (!ret && likely(zero_hash_size == ctx->digestsize) &&
-                               zero_digest) {
+                   zero_digest) {
                        memcpy(req->result, &zero_hash[0], ctx->digestsize);
                        goto out;
                } else if (!ret && !zero_digest) {
-                       dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
-                                       "key, continue...", __func__);
+                       dev_dbg(device_data->dev,
+                               "%s: HMAC zero msg with key, continue...\n",
+                               __func__);
                } else {
-                       dev_err(device_data->dev, "[%s] ret=%d, or wrong "
-                                       "digest size? %s", __func__, ret,
-                                       (zero_hash_size == ctx->digestsize) ?
-                                       "true" : "false");
+                       dev_err(device_data->dev,
+                               "%s: ret=%d, or wrong digest size? %s\n",
+                               __func__, ret,
+                               zero_hash_size == ctx->digestsize ?
+                               "true" : "false");
                        /* Return error */
                        goto out;
                }
        } else if (req->nbytes == 0 && ctx->keylen > 0) {
-               dev_err(device_data->dev, "[%s] Empty message with "
-                               "keylength > 0, NOT supported.", __func__);
+               dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
+                       __func__);
                goto out;
        }
 
        if (!req_ctx->updated) {
                ret = init_hash_hw(device_data, ctx);
                if (ret) {
-                       dev_err(device_data->dev, "[%s] init_hash_hw() "
-                                       "failed!", __func__);
+                       dev_err(device_data->dev,
+                               "%s: init_hash_hw() failed!\n", __func__);
                        goto out;
                }
        }
@@ -1067,8 +1049,8 @@ static int hash_hw_final(struct ahash_request *req)
                unsigned int keylen = ctx->keylen;
                u8 *key = ctx->key;
 
-               dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
-                               ctx->keylen);
+               dev_dbg(device_data->dev, "%s: keylen: %d\n",
+                       __func__, ctx->keylen);
                hash_hw_write_key(device_data, key, keylen);
        }
 
@@ -1115,10 +1097,8 @@ int hash_hw_update(struct ahash_request *req)
        /* Check if ctx->state.length + msg_length
           overflows */
        if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
-                       HASH_HIGH_WORD_MAX_VAL ==
-                       req_ctx->state.length.high_word) {
-               pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
-                               __func__);
+           HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
+               pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
                return -EPERM;
        }
 
@@ -1133,8 +1113,8 @@ int hash_hw_update(struct ahash_request *req)
                                data_buffer, buffer, &index);
 
                if (ret) {
-                       dev_err(device_data->dev, "[%s] hash_internal_hw_"
-                                       "update() failed!", __func__);
+                       dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
+                               __func__);
                        goto out;
                }
 
@@ -1142,9 +1122,8 @@ int hash_hw_update(struct ahash_request *req)
        }
 
        req_ctx->state.index = index;
-       dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))",
-                       __func__, req_ctx->state.index,
-                       req_ctx->state.bit_index);
+       dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
+               __func__, req_ctx->state.index, req_ctx->state.bit_index);
 
 out:
        release_hash_device(device_data);
@@ -1158,23 +1137,23 @@ out:
  * @device_state:      The state to be restored in the hash hardware
  */
 int hash_resume_state(struct hash_device_data *device_data,
-               const struct hash_state *device_state)
+                     const struct hash_state *device_state)
 {
        u32 temp_cr;
        s32 count;
        int hash_mode = HASH_OPER_MODE_HASH;
 
        if (NULL == device_state) {
-               dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
-                               __func__);
+               dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
+                       __func__);
                return -EPERM;
        }
 
        /* Check correctness of index and length members */
-       if (device_state->index > HASH_BLOCK_SIZE
-           || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
-               dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
-                               __func__);
+       if (device_state->index > HASH_BLOCK_SIZE ||
+           (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
+               dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
+                       __func__);
                return -EPERM;
        }
 
@@ -1198,7 +1177,7 @@ int hash_resume_state(struct hash_device_data *device_data,
                        break;
 
                writel_relaxed(device_state->csr[count],
-                               &device_data->base->csrx[count]);
+                              &device_data->base->csrx[count]);
        }
 
        writel_relaxed(device_state->csfull, &device_data->base->csfull);
@@ -1216,15 +1195,15 @@ int hash_resume_state(struct hash_device_data *device_data,
  * @device_state:      The strucure where the hardware state should be saved.
  */
 int hash_save_state(struct hash_device_data *device_data,
-               struct hash_state *device_state)
+                   struct hash_state *device_state)
 {
        u32 temp_cr;
        u32 count;
        int hash_mode = HASH_OPER_MODE_HASH;
 
        if (NULL == device_state) {
-               dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
-                               __func__);
+               dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
+                       __func__);
                return -ENOTSUPP;
        }
 
@@ -1270,20 +1249,18 @@ int hash_save_state(struct hash_device_data *device_data,
 int hash_check_hw(struct hash_device_data *device_data)
 {
        /* Checking Peripheral Ids  */
-       if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0)
-               && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1)
-               && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2)
-               && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3)
-               && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0)
-               && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1)
-               && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2)
-               && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)
-          ) {
+       if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
+           HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
+           HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
+           HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
+           HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
+           HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
+           HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
+           HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
                return 0;
        }
 
-       dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
-                       __func__);
+       dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
        return -ENOTSUPP;
 }
 
@@ -1294,14 +1271,14 @@ int hash_check_hw(struct hash_device_data *device_data)
  * @algorithm:         The algorithm in use.
  */
 void hash_get_digest(struct hash_device_data *device_data,
-               u8 *digest, int algorithm)
+                    u8 *digest, int algorithm)
 {
        u32 temp_hx_val, count;
        int loop_ctr;
 
        if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
-               dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
-                               __func__, algorithm);
+               dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
+                       __func__, algorithm);
                return;
        }
 
@@ -1310,8 +1287,8 @@ void hash_get_digest(struct hash_device_data *device_data,
        else
                loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
 
-       dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
-                       __func__, (u32) digest);
+       dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
+               __func__, (u32) digest);
 
        /* Copy result into digest array */
        for (count = 0; count < loop_ctr; count++) {
@@ -1337,8 +1314,7 @@ static int ahash_update(struct ahash_request *req)
        /* Skip update for DMA, all data will be passed to DMA in final */
 
        if (ret) {
-               pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
-                               __func__);
+               pr_err("%s: hash_hw_update() failed!\n", __func__);
        }
 
        return ret;
@@ -1353,7 +1329,7 @@ static int ahash_final(struct ahash_request *req)
        int ret = 0;
        struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
 
-       pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
+       pr_debug("%s: data size: %d\n", __func__, req->nbytes);
 
        if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
                ret = hash_dma_final(req);
@@ -1361,15 +1337,14 @@ static int ahash_final(struct ahash_request *req)
                ret = hash_hw_final(req);
 
        if (ret) {
-               pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
-                               __func__);
+               pr_err("%s: hash_hw/dma_final() failed\n", __func__);
        }
 
        return ret;
 }
 
 static int hash_setkey(struct crypto_ahash *tfm,
-               const u8 *key, unsigned int keylen, int alg)
+                      const u8 *key, unsigned int keylen, int alg)
 {
        int ret = 0;
        struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
@@ -1379,8 +1354,8 @@ static int hash_setkey(struct crypto_ahash *tfm,
         */
        ctx->key = kmemdup(key, keylen, GFP_KERNEL);
        if (!ctx->key) {
-               pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
-                      "for %d\n", __func__, alg);
+               pr_err("%s: Failed to allocate ctx->key for %d\n",
+                      __func__, alg);
                return -ENOMEM;
        }
        ctx->keylen = keylen;
@@ -1501,13 +1476,13 @@ out:
 }
 
 static int hmac_sha1_setkey(struct crypto_ahash *tfm,
-               const u8 *key, unsigned int keylen)
+                           const u8 *key, unsigned int keylen)
 {
        return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
 }
 
 static int hmac_sha256_setkey(struct crypto_ahash *tfm,
-               const u8 *key, unsigned int keylen)
+                             const u8 *key, unsigned int keylen)
 {
        return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
 }
@@ -1528,7 +1503,7 @@ static int hash_cra_init(struct crypto_tfm *tfm)
                        hash);
 
        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-                       sizeof(struct hash_req_ctx));
+                                sizeof(struct hash_req_ctx));
 
        ctx->config.data_format = HASH_DATA_8_BITS;
        ctx->config.algorithm = hash_alg->conf.algorithm;
@@ -1541,98 +1516,97 @@ static int hash_cra_init(struct crypto_tfm *tfm)
 
 static struct hash_algo_template hash_algs[] = {
        {
-                       .conf.algorithm = HASH_ALGO_SHA1,
-                       .conf.oper_mode = HASH_OPER_MODE_HASH,
-                       .hash = {
-                               .init = hash_init,
-                               .update = ahash_update,
-                               .final = ahash_final,
-                               .digest = ahash_sha1_digest,
-                               .halg.digestsize = SHA1_DIGEST_SIZE,
-                               .halg.statesize = sizeof(struct hash_ctx),
-                               .halg.base = {
-                                       .cra_name = "sha1",
-                                       .cra_driver_name = "sha1-ux500",
-                                       .cra_flags = CRYPTO_ALG_TYPE_AHASH |
-                                                       CRYPTO_ALG_ASYNC,
-                                       .cra_blocksize = SHA1_BLOCK_SIZE,
-                                       .cra_ctxsize = sizeof(struct hash_ctx),
-                                       .cra_init = hash_cra_init,
-                                       .cra_module = THIS_MODULE,
+               .conf.algorithm = HASH_ALGO_SHA1,
+               .conf.oper_mode = HASH_OPER_MODE_HASH,
+               .hash = {
+                       .init = hash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .digest = ahash_sha1_digest,
+                       .halg.digestsize = SHA1_DIGEST_SIZE,
+                       .halg.statesize = sizeof(struct hash_ctx),
+                       .halg.base = {
+                               .cra_name = "sha1",
+                               .cra_driver_name = "sha1-ux500",
+                               .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
+                                             CRYPTO_ALG_ASYNC),
+                               .cra_blocksize = SHA1_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct hash_ctx),
+                               .cra_init = hash_cra_init,
+                               .cra_module = THIS_MODULE,
                        }
                }
        },
        {
-                       .conf.algorithm         = HASH_ALGO_SHA256,
-                       .conf.oper_mode         = HASH_OPER_MODE_HASH,
-                       .hash = {
-                               .init = hash_init,
-                               .update = ahash_update,
-                               .final = ahash_final,
-                               .digest = ahash_sha256_digest,
-                               .halg.digestsize = SHA256_DIGEST_SIZE,
-                               .halg.statesize = sizeof(struct hash_ctx),
-                               .halg.base = {
-                                       .cra_name = "sha256",
-                                       .cra_driver_name = "sha256-ux500",
-                                       .cra_flags = CRYPTO_ALG_TYPE_AHASH |
-                                                       CRYPTO_ALG_ASYNC,
-                                       .cra_blocksize = SHA256_BLOCK_SIZE,
-                                       .cra_ctxsize = sizeof(struct hash_ctx),
-                                       .cra_type = &crypto_ahash_type,
-                                       .cra_init = hash_cra_init,
-                                       .cra_module = THIS_MODULE,
-                               }
+               .conf.algorithm = HASH_ALGO_SHA256,
+               .conf.oper_mode = HASH_OPER_MODE_HASH,
+               .hash = {
+                       .init = hash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .digest = ahash_sha256_digest,
+                       .halg.digestsize = SHA256_DIGEST_SIZE,
+                       .halg.statesize = sizeof(struct hash_ctx),
+                       .halg.base = {
+                               .cra_name = "sha256",
+                               .cra_driver_name = "sha256-ux500",
+                               .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
+                                             CRYPTO_ALG_ASYNC),
+                               .cra_blocksize = SHA256_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct hash_ctx),
+                               .cra_type = &crypto_ahash_type,
+                               .cra_init = hash_cra_init,
+                               .cra_module = THIS_MODULE,
                        }
-
+               }
        },
        {
-                       .conf.algorithm         = HASH_ALGO_SHA1,
-                       .conf.oper_mode         = HASH_OPER_MODE_HMAC,
+               .conf.algorithm = HASH_ALGO_SHA1,
+               .conf.oper_mode = HASH_OPER_MODE_HMAC,
                        .hash = {
-                               .init = hash_init,
-                               .update = ahash_update,
-                               .final = ahash_final,
-                               .digest = hmac_sha1_digest,
-                               .setkey = hmac_sha1_setkey,
-                               .halg.digestsize = SHA1_DIGEST_SIZE,
-                               .halg.statesize = sizeof(struct hash_ctx),
-                               .halg.base = {
-                                       .cra_name = "hmac(sha1)",
-                                       .cra_driver_name = "hmac-sha1-ux500",
-                                       .cra_flags = CRYPTO_ALG_TYPE_AHASH |
-                                                       CRYPTO_ALG_ASYNC,
-                                       .cra_blocksize = SHA1_BLOCK_SIZE,
-                                       .cra_ctxsize = sizeof(struct hash_ctx),
-                                       .cra_type = &crypto_ahash_type,
-                                       .cra_init = hash_cra_init,
-                                       .cra_module = THIS_MODULE,
-                               }
+                       .init = hash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .digest = hmac_sha1_digest,
+                       .setkey = hmac_sha1_setkey,
+                       .halg.digestsize = SHA1_DIGEST_SIZE,
+                       .halg.statesize = sizeof(struct hash_ctx),
+                       .halg.base = {
+                               .cra_name = "hmac(sha1)",
+                               .cra_driver_name = "hmac-sha1-ux500",
+                               .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
+                                             CRYPTO_ALG_ASYNC),
+                               .cra_blocksize = SHA1_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct hash_ctx),
+                               .cra_type = &crypto_ahash_type,
+                               .cra_init = hash_cra_init,
+                               .cra_module = THIS_MODULE,
                        }
+               }
        },
        {
-                       .conf.algorithm         = HASH_ALGO_SHA256,
-                       .conf.oper_mode         = HASH_OPER_MODE_HMAC,
-                       .hash = {
-                               .init = hash_init,
-                               .update = ahash_update,
-                               .final = ahash_final,
-                               .digest = hmac_sha256_digest,
-                               .setkey = hmac_sha256_setkey,
-                               .halg.digestsize = SHA256_DIGEST_SIZE,
-                               .halg.statesize = sizeof(struct hash_ctx),
-                               .halg.base = {
-                                       .cra_name = "hmac(sha256)",
-                                       .cra_driver_name = "hmac-sha256-ux500",
-                                       .cra_flags = CRYPTO_ALG_TYPE_AHASH |
-                                                       CRYPTO_ALG_ASYNC,
-                                       .cra_blocksize = SHA256_BLOCK_SIZE,
-                                       .cra_ctxsize = sizeof(struct hash_ctx),
-                                       .cra_type = &crypto_ahash_type,
-                                       .cra_init = hash_cra_init,
-                                       .cra_module = THIS_MODULE,
-                               }
+               .conf.algorithm = HASH_ALGO_SHA256,
+               .conf.oper_mode = HASH_OPER_MODE_HMAC,
+               .hash = {
+                       .init = hash_init,
+                       .update = ahash_update,
+                       .final = ahash_final,
+                       .digest = hmac_sha256_digest,
+                       .setkey = hmac_sha256_setkey,
+                       .halg.digestsize = SHA256_DIGEST_SIZE,
+                       .halg.statesize = sizeof(struct hash_ctx),
+                       .halg.base = {
+                               .cra_name = "hmac(sha256)",
+                               .cra_driver_name = "hmac-sha256-ux500",
+                               .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
+                                             CRYPTO_ALG_ASYNC),
+                               .cra_blocksize = SHA256_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct hash_ctx),
+                               .cra_type = &crypto_ahash_type,
+                               .cra_init = hash_cra_init,
+                               .cra_module = THIS_MODULE,
                        }
+               }
        }
 };
 
@@ -1649,7 +1623,7 @@ static int ahash_algs_register_all(struct hash_device_data *device_data)
                ret = crypto_register_ahash(&hash_algs[i].hash);
                if (ret) {
                        count = i;
-                       dev_err(device_data->dev, "[%s] alg registration failed",
+                       dev_err(device_data->dev, "%s: alg registration failed\n",
                                hash_algs[i].hash.halg.base.cra_driver_name);
                        goto unreg;
                }
@@ -1683,9 +1657,8 @@ static int ux500_hash_probe(struct platform_device *pdev)
        struct hash_device_data *device_data;
        struct device           *dev = &pdev->dev;
 
-       device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
+       device_data = kzalloc(sizeof(*device_data), GFP_ATOMIC);
        if (!device_data) {
-               dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
                ret = -ENOMEM;
                goto out;
        }
@@ -1695,14 +1668,14 @@ static int ux500_hash_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
-               dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
+               dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
                ret = -ENODEV;
                goto out_kfree;
        }
 
        res = request_mem_region(res->start, resource_size(res), pdev->name);
        if (res == NULL) {
-               dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
+               dev_dbg(dev, "%s: request_mem_region() failed!\n", __func__);
                ret = -EBUSY;
                goto out_kfree;
        }
@@ -1710,8 +1683,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
        device_data->phybase = res->start;
        device_data->base = ioremap(res->start, resource_size(res));
        if (!device_data->base) {
-               dev_err(dev, "[%s] ioremap() failed!",
-                               __func__);
+               dev_err(dev, "%s: ioremap() failed!\n", __func__);
                ret = -ENOMEM;
                goto out_free_mem;
        }
@@ -1721,7 +1693,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
        /* Enable power for HASH1 hardware block */
        device_data->regulator = regulator_get(dev, "v-ape");
        if (IS_ERR(device_data->regulator)) {
-               dev_err(dev, "[%s] regulator_get() failed!", __func__);
+               dev_err(dev, "%s: regulator_get() failed!\n", __func__);
                ret = PTR_ERR(device_data->regulator);
                device_data->regulator = NULL;
                goto out_unmap;
@@ -1730,27 +1702,27 @@ static int ux500_hash_probe(struct platform_device *pdev)
        /* Enable the clock for HASH1 hardware block */
        device_data->clk = clk_get(dev, NULL);
        if (IS_ERR(device_data->clk)) {
-               dev_err(dev, "[%s] clk_get() failed!", __func__);
+               dev_err(dev, "%s: clk_get() failed!\n", __func__);
                ret = PTR_ERR(device_data->clk);
                goto out_regulator;
        }
 
        ret = clk_prepare(device_data->clk);
        if (ret) {
-               dev_err(dev, "[%s] clk_prepare() failed!", __func__);
+               dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
                goto out_clk;
        }
 
        /* Enable device power (and clock) */
        ret = hash_enable_power(device_data, false);
        if (ret) {
-               dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
+               dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
                goto out_clk_unprepare;
        }
 
        ret = hash_check_hw(device_data);
        if (ret) {
-               dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
+               dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
                goto out_power;
        }
 
@@ -1766,8 +1738,8 @@ static int ux500_hash_probe(struct platform_device *pdev)
 
        ret = ahash_algs_register_all(device_data);
        if (ret) {
-               dev_err(dev, "[%s] ahash_algs_register_all() "
-                               "failed!", __func__);
+               dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
+                       __func__);
                goto out_power;
        }
 
@@ -1810,8 +1782,7 @@ static int ux500_hash_remove(struct platform_device *pdev)
 
        device_data = platform_get_drvdata(pdev);
        if (!device_data) {
-               dev_err(dev, "[%s]: platform_get_drvdata() failed!",
-                       __func__);
+               dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
                return -ENOMEM;
        }
 
@@ -1841,7 +1812,7 @@ static int ux500_hash_remove(struct platform_device *pdev)
                ahash_algs_unregister_all(device_data);
 
        if (hash_disable_power(device_data, false))
-               dev_err(dev, "[%s]: hash_disable_power() failed",
+               dev_err(dev, "%s: hash_disable_power() failed\n",
                        __func__);
 
        clk_unprepare(device_data->clk);
@@ -1870,8 +1841,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
 
        device_data = platform_get_drvdata(pdev);
        if (!device_data) {
-               dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
-                               __func__);
+               dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
+                       __func__);
                return;
        }
 
@@ -1880,8 +1851,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
        /* current_ctx allocates a device, NULL = unallocated */
        if (!device_data->current_ctx) {
                if (down_trylock(&driver_data.device_allocation))
-                       dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
-                               "Shutting down anyway...", __func__);
+                       dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
+                               __func__);
                /**
                 * (Allocate the device)
                 * Need to set this to non-null (dummy) value,
@@ -1906,8 +1877,8 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
                release_mem_region(res->start, resource_size(res));
 
        if (hash_disable_power(device_data, false))
-               dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
-                               __func__);
+               dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
+                       __func__);
 }
 
 /**
@@ -1922,7 +1893,7 @@ static int ux500_hash_suspend(struct device *dev)
 
        device_data = dev_get_drvdata(dev);
        if (!device_data) {
-               dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
+               dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
                return -ENOMEM;
        }
 
@@ -1933,15 +1904,16 @@ static int ux500_hash_suspend(struct device *dev)
 
        if (device_data->current_ctx == ++temp_ctx) {
                if (down_interruptible(&driver_data.device_allocation))
-                       dev_dbg(dev, "[%s]: down_interruptible() failed",
+                       dev_dbg(dev, "%s: down_interruptible() failed\n",
                                __func__);
                ret = hash_disable_power(device_data, false);
 
-       } else
+       } else {
                ret = hash_disable_power(device_data, true);
+       }
 
        if (ret)
-               dev_err(dev, "[%s]: hash_disable_power()", __func__);
+               dev_err(dev, "%s: hash_disable_power()\n", __func__);
 
        return ret;
 }
@@ -1958,7 +1930,7 @@ static int ux500_hash_resume(struct device *dev)
 
        device_data = dev_get_drvdata(dev);
        if (!device_data) {
-               dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
+               dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
                return -ENOMEM;
        }
 
@@ -1973,7 +1945,7 @@ static int ux500_hash_resume(struct device *dev)
                ret = hash_enable_power(device_data, true);
 
        if (ret)
-               dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
+               dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
 
        return ret;
 }
@@ -1981,8 +1953,8 @@ static int ux500_hash_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
 
 static const struct of_device_id ux500_hash_match[] = {
-        { .compatible = "stericsson,ux500-hash" },
-        { },
+       { .compatible = "stericsson,ux500-hash" },
+       { },
 };
 
 static struct platform_driver hash_driver = {
index 5a18f82f732af57a319628190713e6bd054cf8b3..ba7f93225851b56aab32c1ac2c296400308c6256 100644 (file)
@@ -73,7 +73,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
        if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
                return 0;
 
-       vendor_id = le32_to_cpu(grp->vendor_id);
+       vendor_id = le32_to_cpu((__force __le32)grp->vendor_id);
        dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
                (char *)&vendor_id, grp->device_id, grp->revision);
 
index 9e56745f87bf164aa3b4728b52a113c9a5b68cc6..755ba2f4f1d538e8db370aed4496ea8b8defd99b 100644 (file)
@@ -503,6 +503,32 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
        return NULL;
 }
 
+/**
+ * dma_request_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+       int err = -EBUSY;
+
+       /* lock against __dma_request_channel */
+       mutex_lock(&dma_list_mutex);
+
+       if (chan->client_count == 0) {
+               err = dma_chan_get(chan);
+               if (err)
+                       pr_debug("%s: failed to get %s: (%d)\n",
+                               __func__, dma_chan_name(chan), err);
+       } else
+               chan = NULL;
+
+       mutex_unlock(&dma_list_mutex);
+
+
+       return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
 /**
  * dma_request_channel - try to allocate an exclusive channel
  * @mask: capabilities that the channel must satisfy
index eea479c121736e20c6e52f71059df1b4e14a364b..89eb89f222846e0ff5d20cfc5e14619fc05d6600 100644 (file)
  * which does not support descriptor writeback.
  */
 
+static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
+{
+       return dwc->request_line == (typeof(dwc->request_line))~0;
+}
+
 static inline void dwc_set_masters(struct dw_dma_chan *dwc)
 {
        struct dw_dma *dw = to_dw_dma(dwc->chan.device);
        struct dw_dma_slave *dws = dwc->chan.private;
        unsigned char mmax = dw->nr_masters - 1;
 
-       if (dwc->request_line == ~0) {
-               dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
-               dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
-       }
+       if (!is_request_line_unset(dwc))
+               return;
+
+       dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+       dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
 }
 
 #define DWC_DEFAULT_CTLLO(_chan) ({                            \
@@ -644,10 +650,13 @@ static void dw_dma_tasklet(unsigned long data)
 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
 {
        struct dw_dma *dw = dev_id;
-       u32 status;
+       u32 status = dma_readl(dw, STATUS_INT);
+
+       dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
 
-       dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
-                       dma_readl(dw, STATUS_INT));
+       /* Check if we have any interrupt from the DMAC */
+       if (!status)
+               return IRQ_NONE;
 
        /*
         * Just disable the interrupts. We'll turn them back on in the
@@ -984,7 +993,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
        dwc->direction = sconfig->direction;
 
        /* Take the request line from slave_id member */
-       if (dwc->request_line == ~0)
+       if (is_request_line_unset(dwc))
                dwc->request_line = sconfig->slave_id;
 
        convert_burst(&dwc->dma_sconfig.src_maxburst);
@@ -1089,16 +1098,16 @@ dwc_tx_status(struct dma_chan *chan,
        enum dma_status         ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS) {
-               dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+       if (ret == DMA_SUCCESS)
+               return ret;
 
-               ret = dma_cookie_status(chan, cookie, txstate);
-       }
+       dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
 
+       ret = dma_cookie_status(chan, cookie, txstate);
        if (ret != DMA_SUCCESS)
                dma_set_residue(txstate, dwc_get_residue(dwc));
 
-       if (dwc->paused)
+       if (dwc->paused && ret == DMA_IN_PROGRESS)
                return DMA_PAUSED;
 
        return ret;
@@ -1560,8 +1569,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        /* Disable BLOCK interrupts as well */
        channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 
-       err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, 0,
-                              "dw_dmac", dw);
+       err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
+                              IRQF_SHARED, "dw_dmac", dw);
        if (err)
                return err;
 
index 6c9449cffae81b90fd20c0ba9f2df7c7166e8f1c..e35d97590311329fe1f7bd93be5cc4b845f3a7c2 100644 (file)
@@ -253,6 +253,7 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
        { "INTL9C60", 0 },
        { }
 };
+MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
 #endif
 
 #ifdef CONFIG_PM_SLEEP
index 5f3e532436ee40c5f035b4c9bd6b2b008700b350..4f6d87bcaa0d689a443b20d2b1cea8a059ff19bb 100644 (file)
@@ -502,8 +502,6 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
        } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
                struct edma_desc *edesc = echan->edesc;
                txstate->residue = edma_desc_size(edesc);
-       } else {
-               txstate->residue = 0;
        }
        spin_unlock_irqrestore(&echan->vchan.lock, flags);
 
index f2bf8c0c46757d0dbd351af10660cc5643f2cfbd..591cd8c63abbcb081a4cd2ca264ed118f7f3d782 100644 (file)
@@ -1313,15 +1313,7 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
                                            dma_cookie_t cookie,
                                            struct dma_tx_state *state)
 {
-       struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
-       enum dma_status ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&edmac->lock, flags);
-       ret = dma_cookie_status(chan, cookie, state);
-       spin_unlock_irqrestore(&edmac->lock, flags);
-
-       return ret;
+       return dma_cookie_status(chan, cookie, state);
 }
 
 /**
index 49e8fbdb898388703ac5db7e306e4c5f39d88a19..b3f3e90054f2ab956e8019dd8abe539b85fa0301 100644 (file)
@@ -979,15 +979,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
                                        dma_cookie_t cookie,
                                        struct dma_tx_state *txstate)
 {
-       struct fsldma_chan *chan = to_fsl_chan(dchan);
-       enum dma_status ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->desc_lock, flags);
-       ret = dma_cookie_status(dchan, cookie, txstate);
-       spin_unlock_irqrestore(&chan->desc_lock, flags);
-
-       return ret;
+       return dma_cookie_status(dchan, cookie, txstate);
 }
 
 /*----------------------------------------------------------------------------*/
index 1e44b8cf95dabca6b220c05e8afd33740f8a8455..960367d9589550328ec9a870cfea36bf179bdcd7 100644 (file)
@@ -243,7 +243,6 @@ struct sdma_engine;
  * @event_id1          for channels that use 2 events
  * @word_size          peripheral access size
  * @buf_tail           ID of the buffer that was processed
- * @done               channel completion
  * @num_bd             max NUM_BD. number of descriptors currently handling
  */
 struct sdma_channel {
@@ -255,7 +254,6 @@ struct sdma_channel {
        unsigned int                    event_id1;
        enum dma_slave_buswidth         word_size;
        unsigned int                    buf_tail;
-       struct completion               done;
        unsigned int                    num_bd;
        struct sdma_buffer_descriptor   *bd;
        dma_addr_t                      bd_phys;
@@ -547,8 +545,6 @@ static void sdma_tasklet(unsigned long data)
 {
        struct sdma_channel *sdmac = (struct sdma_channel *) data;
 
-       complete(&sdmac->done);
-
        if (sdmac->flags & IMX_DMA_SG_LOOP)
                sdma_handle_channel_loop(sdmac);
        else
@@ -812,9 +808,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
        sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 
        sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
-
-       init_completion(&sdmac->done);
-
        return 0;
 out:
 
@@ -1120,15 +1113,12 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 }
 
 static enum dma_status sdma_tx_status(struct dma_chan *chan,
-                                           dma_cookie_t cookie,
-                                           struct dma_tx_state *txstate)
+                                     dma_cookie_t cookie,
+                                     struct dma_tx_state *txstate)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
-       dma_cookie_t last_used;
-
-       last_used = chan->cookie;
 
-       dma_set_tx_state(txstate, chan->completed_cookie, last_used,
+       dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
                        sdmac->chn_count - sdmac->chn_real_count);
 
        return sdmac->status;
@@ -1335,7 +1325,7 @@ static int __init sdma_probe(struct platform_device *pdev)
        int ret;
        int irq;
        struct resource *iores;
-       struct sdma_platform_data *pdata = pdev->dev.platform_data;
+       struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
        int i;
        struct sdma_engine *sdma;
        s32 *saddr_arr;
index cc727ec78c4e4ed668a2eb341d7eeb228bf84a98..dd8b44a56e5d0f7090b8dd65bce87a73a60c90ef 100644 (file)
@@ -518,7 +518,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
        struct iop_adma_desc_slot *slot = NULL;
        int init = iop_chan->slots_allocated ? 0 : 1;
        struct iop_adma_platform_data *plat_data =
-               iop_chan->device->pdev->dev.platform_data;
+               dev_get_platdata(&iop_chan->device->pdev->dev);
        int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
 
        /* Allocate descriptor slots */
@@ -1351,7 +1351,7 @@ static int iop_adma_remove(struct platform_device *dev)
        struct iop_adma_device *device = platform_get_drvdata(dev);
        struct dma_chan *chan, *_chan;
        struct iop_adma_chan *iop_chan;
-       struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
+       struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
 
        dma_async_device_unregister(&device->common);
 
@@ -1376,7 +1376,7 @@ static int iop_adma_probe(struct platform_device *pdev)
        struct iop_adma_device *adev;
        struct iop_adma_chan *iop_chan;
        struct dma_device *dma_dev;
-       struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
+       struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
index d39c2cd0795d71437935d22ce90ca636c8fae0aa..608d4a26165487b2da23c1ddbf9148d04ea7dbac 100644 (file)
@@ -1593,10 +1593,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
 static enum dma_status idmac_tx_status(struct dma_chan *chan,
                       dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
-       dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
-       if (cookie != chan->cookie)
-               return DMA_ERROR;
-       return DMA_SUCCESS;
+       return dma_cookie_status(chan, cookie, txstate);
 }
 
 static int __init ipu_idmac_init(struct ipu *ipu)
index c26699f9c4dfdbcec1c3f0d6ee0cf3d173227294..5c4318c5a5b1e42b47d6e0917a26fdd7a265b485 100644 (file)
@@ -18,7 +18,9 @@
 #include <linux/platform_data/mmp_dma.h>
 #include <linux/dmapool.h>
 #include <linux/of_device.h>
+#include <linux/of_dma.h>
 #include <linux/of.h>
+#include <linux/dma/mmp-pdma.h>
 
 #include "dmaengine.h"
 
@@ -47,6 +49,8 @@
 #define DCSR_CMPST     (1 << 10)       /* The Descriptor Compare Status */
 #define DCSR_EORINTR   (1 << 9)        /* The end of Receive */
 
+#define DRCMR(n)       ((((n) < 64) ? 0x0100 : 0x1100) + \
+                                (((n) & 0x3f) << 2))
 #define DRCMR_MAPVLD   (1 << 7)        /* Map Valid (read / write) */
 #define DRCMR_CHLNUM   0x1f            /* mask for Channel Number (read / write) */
 
@@ -69,7 +73,7 @@
 #define DCMD_LENGTH    0x01fff         /* length mask (max = 8K - 1) */
 
 #define PDMA_ALIGNMENT         3
-#define PDMA_MAX_DESC_BYTES    0x1000
+#define PDMA_MAX_DESC_BYTES    DCMD_LENGTH
 
 struct mmp_pdma_desc_hw {
        u32 ddadr;      /* Points to the next descriptor + flags */
@@ -105,6 +109,7 @@ struct mmp_pdma_chan {
        struct list_head chain_pending; /* Link descriptors queue for pending */
        struct list_head chain_running; /* Link descriptors queue for running */
        bool idle;                      /* channel statue machine */
+       bool byte_align;
 
        struct dma_pool *desc_pool;     /* Descriptors pool */
 };
@@ -121,6 +126,7 @@ struct mmp_pdma_device {
        struct device                   *dev;
        struct dma_device               device;
        struct mmp_pdma_phy             *phy;
+       spinlock_t phy_lock; /* protect alloc/free phy channels */
 };
 
 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
@@ -137,15 +143,21 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 
 static void enable_chan(struct mmp_pdma_phy *phy)
 {
-       u32 reg;
+       u32 reg, dalgn;
 
        if (!phy->vchan)
                return;
 
-       reg = phy->vchan->drcmr;
-       reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+       reg = DRCMR(phy->vchan->drcmr);
        writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 
+       dalgn = readl(phy->base + DALGN);
+       if (phy->vchan->byte_align)
+               dalgn |= 1 << phy->idx;
+       else
+               dalgn &= ~(1 << phy->idx);
+       writel(dalgn, phy->base + DALGN);
+
        reg = (phy->idx << 2) + DCSR;
        writel(readl(phy->base + reg) | DCSR_RUN,
                                        phy->base + reg);
@@ -218,7 +230,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
 {
        int prio, i;
        struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
-       struct mmp_pdma_phy *phy;
+       struct mmp_pdma_phy *phy, *found = NULL;
+       unsigned long flags;
 
        /*
         * dma channel priorities
@@ -227,6 +240,8 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
         * ch 8 - 11, 24 - 27  <--> (2)
         * ch 12 - 15, 28 - 31  <--> (3)
         */
+
+       spin_lock_irqsave(&pdev->phy_lock, flags);
        for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
                for (i = 0; i < pdev->dma_channels; i++) {
                        if (prio != ((i & 0xf) >> 2))
@@ -234,12 +249,34 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
                        phy = &pdev->phy[i];
                        if (!phy->vchan) {
                                phy->vchan = pchan;
-                               return phy;
+                               found = phy;
+                               goto out_unlock;
                        }
                }
        }
 
-       return NULL;
+out_unlock:
+       spin_unlock_irqrestore(&pdev->phy_lock, flags);
+       return found;
+}
+
+static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
+{
+       struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+       unsigned long flags;
+       u32 reg;
+
+       if (!pchan->phy)
+               return;
+
+       /* clear the channel mapping in DRCMR */
+       reg = DRCMR(pchan->phy->vchan->drcmr);
+       writel(0, pchan->phy->base + reg);
+
+       spin_lock_irqsave(&pdev->phy_lock, flags);
+       pchan->phy->vchan = NULL;
+       pchan->phy = NULL;
+       spin_unlock_irqrestore(&pdev->phy_lock, flags);
 }
 
 /* desc->tx_list ==> pending list */
@@ -277,10 +314,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
 
        if (list_empty(&chan->chain_pending)) {
                /* chance to re-fetch phy channel with higher prio */
-               if (chan->phy) {
-                       chan->phy->vchan = NULL;
-                       chan->phy = NULL;
-               }
+               mmp_pdma_free_phy(chan);
                dev_dbg(chan->dev, "no pending list\n");
                return;
        }
@@ -333,7 +367,8 @@ static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
        return cookie;
 }
 
-struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+static struct mmp_pdma_desc_sw *
+mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
 {
        struct mmp_pdma_desc_sw *desc;
        dma_addr_t pdesc;
@@ -377,10 +412,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
                dev_err(chan->dev, "unable to allocate descriptor pool\n");
                return -ENOMEM;
        }
-       if (chan->phy) {
-               chan->phy->vchan = NULL;
-               chan->phy = NULL;
-       }
+       mmp_pdma_free_phy(chan);
        chan->idle = true;
        chan->dev_addr = 0;
        return 1;
@@ -411,10 +443,7 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
        chan->desc_pool = NULL;
        chan->idle = true;
        chan->dev_addr = 0;
-       if (chan->phy) {
-               chan->phy->vchan = NULL;
-               chan->phy = NULL;
-       }
+       mmp_pdma_free_phy(chan);
        return;
 }
 
@@ -434,6 +463,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
                return NULL;
 
        chan = to_mmp_pdma_chan(dchan);
+       chan->byte_align = false;
 
        if (!chan->dir) {
                chan->dir = DMA_MEM_TO_MEM;
@@ -450,6 +480,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
                }
 
                copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+               if (dma_src & 0x7 || dma_dst & 0x7)
+                       chan->byte_align = true;
 
                new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
                new->desc.dsadr = dma_src;
@@ -509,12 +541,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
        if ((sgl == NULL) || (sg_len == 0))
                return NULL;
 
+       chan->byte_align = false;
+
        for_each_sg(sgl, sg, sg_len, i) {
                addr = sg_dma_address(sg);
                avail = sg_dma_len(sgl);
 
                do {
                        len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+                       if (addr & 0x7)
+                               chan->byte_align = true;
 
                        /* allocate and populate the descriptor */
                        new = mmp_pdma_alloc_descriptor(chan);
@@ -581,10 +617,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
        switch (cmd) {
        case DMA_TERMINATE_ALL:
                disable_chan(chan->phy);
-               if (chan->phy) {
-                       chan->phy->vchan = NULL;
-                       chan->phy = NULL;
-               }
+               mmp_pdma_free_phy(chan);
                spin_lock_irqsave(&chan->desc_lock, flags);
                mmp_pdma_free_desc_list(chan, &chan->chain_pending);
                mmp_pdma_free_desc_list(chan, &chan->chain_running);
@@ -619,8 +652,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
                        chan->dcmd |= DCMD_BURST32;
 
                chan->dir = cfg->direction;
-               chan->drcmr = cfg->slave_id;
                chan->dev_addr = addr;
+               /* FIXME: drivers should be ported over to use the filter
+                * function. Once that's done, the following two lines can
+                * be removed.
+                */
+               if (cfg->slave_id)
+                       chan->drcmr = cfg->slave_id;
                break;
        default:
                return -ENOSYS;
@@ -632,15 +670,7 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
                        dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
-       struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
-       enum dma_status ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->desc_lock, flags);
-       ret = dma_cookie_status(dchan, cookie, txstate);
-       spin_unlock_irqrestore(&chan->desc_lock, flags);
-
-       return ret;
+       return dma_cookie_status(dchan, cookie, txstate);
 }
 
 /**
@@ -763,6 +793,39 @@ static struct of_device_id mmp_pdma_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
 
+static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
+                                          struct of_dma *ofdma)
+{
+       struct mmp_pdma_device *d = ofdma->of_dma_data;
+       struct dma_chan *chan, *candidate;
+
+retry:
+       candidate = NULL;
+
+       /* walk the list of channels registered with the current instance and
+        * find one that is currently unused */
+       list_for_each_entry(chan, &d->device.channels, device_node)
+               if (chan->client_count == 0) {
+                       candidate = chan;
+                       break;
+               }
+
+       if (!candidate)
+               return NULL;
+
+       /* dma_get_slave_channel will return NULL if we lost a race between
+        * the lookup and the reservation */
+       chan = dma_get_slave_channel(candidate);
+
+       if (chan) {
+               struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+               c->drcmr = dma_spec->args[0];
+               return chan;
+       }
+
+       goto retry;
+}
+
 static int mmp_pdma_probe(struct platform_device *op)
 {
        struct mmp_pdma_device *pdev;
@@ -777,10 +840,9 @@ static int mmp_pdma_probe(struct platform_device *op)
                return -ENOMEM;
        pdev->dev = &op->dev;
 
-       iores = platform_get_resource(op, IORESOURCE_MEM, 0);
-       if (!iores)
-               return -EINVAL;
+       spin_lock_init(&pdev->phy_lock);
 
+       iores = platform_get_resource(op, IORESOURCE_MEM, 0);
        pdev->base = devm_ioremap_resource(pdev->dev, iores);
        if (IS_ERR(pdev->base))
                return PTR_ERR(pdev->base);
@@ -825,7 +887,6 @@ static int mmp_pdma_probe(struct platform_device *op)
 
        dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
        dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
-       dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
        pdev->device.dev = &op->dev;
        pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
        pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
@@ -847,7 +908,17 @@ static int mmp_pdma_probe(struct platform_device *op)
                return ret;
        }
 
-       dev_info(pdev->device.dev, "initialized\n");
+       if (op->dev.of_node) {
+               /* Device-tree DMA controller registration */
+               ret = of_dma_controller_register(op->dev.of_node,
+                                                mmp_pdma_dma_xlate, pdev);
+               if (ret < 0) {
+                       dev_err(&op->dev, "of_dma_controller_register failed\n");
+                       return ret;
+               }
+       }
+
+       dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
        return 0;
 }
 
@@ -867,6 +938,19 @@ static struct platform_driver mmp_pdma_driver = {
        .remove         = mmp_pdma_remove,
 };
 
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+       struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
+
+       if (chan->device->dev->driver != &mmp_pdma_driver.driver)
+               return false;
+
+       c->drcmr = *(unsigned int *) param;
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
+
 module_platform_driver(mmp_pdma_driver);
 
 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
index 9b9366537d73e87c2e6a81113eaa3f0025ae6330..38cb517fb2ebd82034b02e2ab7d5cabfc9992cbd 100644 (file)
@@ -460,7 +460,8 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
 {
        struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
 
-       dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
+       dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
+                        tdmac->buf_len - tdmac->pos);
 
        return tdmac->status;
 }
@@ -549,9 +550,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        }
 
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!iores)
-               return -EINVAL;
-
        tdev->base = devm_ioremap_resource(&pdev->dev, iores);
        if (IS_ERR(tdev->base))
                return PTR_ERR(tdev->base);
index 2d956732aa3d262aa2c5e1c603ff530bab31db78..2fe4353773338234375df48855ec7c941798bb31 100644 (file)
@@ -556,15 +556,7 @@ static enum dma_status
 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
               struct dma_tx_state *txstate)
 {
-       struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
-       enum dma_status ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mchan->lock, flags);
-       ret = dma_cookie_status(chan, cookie, txstate);
-       spin_unlock_irqrestore(&mchan->lock, flags);
-
-       return ret;
+       return dma_cookie_status(chan, cookie, txstate);
 }
 
 /* Prepare descriptor for memory to memory copy */
index 200f1a3c9a449d2fe297fa6620c148f74c463f18..d9a26777a1b00ae8e36a898d27db0133bf41f3d5 100644 (file)
@@ -647,7 +647,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 
        dev_dbg(mv_chan_to_devp(mv_chan),
                "%s sw_desc %p async_tx %p\n",
-               __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+               __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
 
        return sw_desc ? &sw_desc->async_tx : NULL;
 }
@@ -1166,7 +1166,7 @@ static int mv_xor_probe(struct platform_device *pdev)
 {
        const struct mbus_dram_target_info *dram;
        struct mv_xor_device *xordev;
-       struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
+       struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct resource *res;
        int i, ret;
 
index 719593002ab7866051aa9cf7c52633ce60000ba6..ccd13df841db790ff9eabc9cbc9df79f5f8bb9af 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/fsl/mxs-dma.h>
 #include <linux/stmp_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -197,24 +196,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
        return container_of(chan, struct mxs_dma_chan, chan);
 }
 
-int mxs_dma_is_apbh(struct dma_chan *chan)
-{
-       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-       struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
-       return dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
-
-int mxs_dma_is_apbx(struct dma_chan *chan)
-{
-       struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-       struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-
-       return !dma_is_apbh(mxs_dma);
-}
-EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
-
 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
 {
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -349,13 +330,9 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
 static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
 {
        struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-       struct mxs_dma_data *data = chan->private;
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int ret;
 
-       if (data)
-               mxs_chan->chan_irq = data->chan_irq;
-
        mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
                                CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
                                GFP_KERNEL);
@@ -622,10 +599,8 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
                        dma_cookie_t cookie, struct dma_tx_state *txstate)
 {
        struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
-       dma_cookie_t last_used;
 
-       last_used = chan->cookie;
-       dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
+       dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
 
        return mxs_chan->status;
 }
index 75334bdd2c56bc29a18b24466d1114d7928cad58..0b88dd3d05f4880f41561f455f79c5eb9ca0a885 100644 (file)
@@ -160,7 +160,8 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
 
        count = of_property_count_strings(np, "dma-names");
        if (count < 0) {
-               pr_err("%s: dma-names property missing or empty\n", __func__);
+               pr_err("%s: dma-names property of node '%s' missing or empty\n",
+                       __func__, np->full_name);
                return NULL;
        }
 
index 0bbdea5059f3b693a8929a4d6ee82db24c90b768..956314de7c44f4fe44cffeec1ab60a35d76f43ea 100644 (file)
@@ -564,14 +564,7 @@ static void pd_free_chan_resources(struct dma_chan *chan)
 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
                                    struct dma_tx_state *txstate)
 {
-       struct pch_dma_chan *pd_chan = to_pd_chan(chan);
-       enum dma_status ret;
-
-       spin_lock_irq(&pd_chan->lock);
-       ret = dma_cookie_status(chan, cookie, txstate);
-       spin_unlock_irq(&pd_chan->lock);
-
-       return ret;
+       return dma_cookie_status(chan, cookie, txstate);
 }
 
 static void pd_issue_pending(struct dma_chan *chan)
index fa645d8250091943ba8d47aa42852b99b74ca1ce..33186fd969710c1a5f5449c6154707a4ace72947 100644 (file)
@@ -2814,6 +2814,28 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
        return &desc->txd;
 }
 
+static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+                                 struct dma_pl330_desc *first)
+{
+       unsigned long flags;
+       struct dma_pl330_desc *desc;
+
+       if (!first)
+               return;
+
+       spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+       while (!list_empty(&first->node)) {
+               desc = list_entry(first->node.next,
+                               struct dma_pl330_desc, node);
+               list_move_tail(&desc->node, &pdmac->desc_pool);
+       }
+
+       list_move_tail(&first->node, &pdmac->desc_pool);
+
+       spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+}
+
 static struct dma_async_tx_descriptor *
 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2822,7 +2844,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        struct dma_pl330_desc *first, *desc = NULL;
        struct dma_pl330_chan *pch = to_pchan(chan);
        struct scatterlist *sg;
-       unsigned long flags;
        int i;
        dma_addr_t addr;
 
@@ -2842,20 +2863,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        dev_err(pch->dmac->pif.dev,
                                "%s:%d Unable to fetch desc\n",
                                __func__, __LINE__);
-                       if (!first)
-                               return NULL;
-
-                       spin_lock_irqsave(&pdmac->pool_lock, flags);
-
-                       while (!list_empty(&first->node)) {
-                               desc = list_entry(first->node.next,
-                                               struct dma_pl330_desc, node);
-                               list_move_tail(&desc->node, &pdmac->desc_pool);
-                       }
-
-                       list_move_tail(&first->node, &pdmac->desc_pool);
-
-                       spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+                       __pl330_giveback_desc(pdmac, first);
 
                        return NULL;
                }
@@ -2896,6 +2904,32 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
                return IRQ_NONE;
 }
 
+#define PL330_DMA_BUSWIDTHS \
+       BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+       struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+       caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = false;
+       caps->cmd_terminate = true;
+
+       /*
+        * This is the limit for transfers with a buswidth of 1, larger
+        * buswidths will have larger limits.
+        */
+       caps->max_sg_len = 1900800;
+       caps->max_sg_nr = 0;
+
+       return 0;
+}
+
 static int
 pl330_probe(struct amba_device *adev, const struct amba_id *id)
 {
@@ -2908,7 +2942,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        int i, ret, irq;
        int num_chan;
 
-       pdat = adev->dev.platform_data;
+       pdat = dev_get_platdata(&adev->dev);
 
        /* Allocate a new DMAC and its Channels */
        pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
@@ -3000,6 +3034,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pd->device_prep_slave_sg = pl330_prep_slave_sg;
        pd->device_control = pl330_control;
        pd->device_issue_pending = pl330_issue_pending;
+       pd->device_slave_caps = pl330_dma_device_slave_caps;
 
        ret = dma_async_device_register(pd);
        if (ret) {
index 11bcb05cd79c9eb6e9f6258d6d8cfcabc46bdf28..966aaab0b4d337b640396e9d63e313f54e59999f 100644 (file)
@@ -42,7 +42,7 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
 
 static int shdma_of_probe(struct platform_device *pdev)
 {
-       const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
+       const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
        int ret;
 
        if (!lookup)
index 5039fbc88254eabd570e9da6d50d51ed0cff8240..211e2f1bcd9604c84db26591ed2e38f4ffe970ac 100644 (file)
@@ -660,7 +660,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
 
 static int sh_dmae_probe(struct platform_device *pdev)
 {
-       struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+       struct sh_dmae_pdata *pdata = dev_get_platdata(&pdev->dev);
        unsigned long irqflags = IRQF_DISABLED,
                chan_flag[SH_DMAE_MAX_CHANNELS] = {};
        int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
index e7c94bbddb536b8e44822e684e7301487bb9ee94..c49441753d995464796e1686442ea042fa5b3a4d 100644 (file)
@@ -335,7 +335,7 @@ static const struct shdma_ops sudmac_shdma_ops = {
 
 static int sudmac_probe(struct platform_device *pdev)
 {
-       struct sudmac_pdata *pdata = pdev->dev.platform_data;
+       struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
        int err, i;
        struct sudmac_device *su_dev;
        struct dma_device *dma_dev;
@@ -373,7 +373,7 @@ static int sudmac_probe(struct platform_device *pdev)
                return err;
 
        /* platform data */
-       su_dev->pdata = pdev->dev.platform_data;
+       su_dev->pdata = dev_get_platdata(&pdev->dev);
 
        platform_set_drvdata(pdev, su_dev);
 
index 716b23e4f327e13d96be6b046bc6926f8988b3dd..6aec3ad814d37f16b69c51f44347d9826e411885 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/module.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/slab.h>
@@ -73,6 +74,11 @@ struct sirfsoc_dma_chan {
        int                             mode;
 };
 
+struct sirfsoc_dma_regs {
+       u32                             ctrl[SIRFSOC_DMA_CHANNELS];
+       u32                             interrupt_en;
+};
+
 struct sirfsoc_dma {
        struct dma_device               dma;
        struct tasklet_struct           tasklet;
@@ -81,10 +87,13 @@ struct sirfsoc_dma {
        int                             irq;
        struct clk                      *clk;
        bool                            is_marco;
+       struct sirfsoc_dma_regs         regs_save;
 };
 
 #define DRV_NAME       "sirfsoc_dma"
 
+static int sirfsoc_dma_runtime_suspend(struct device *dev);
+
 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
 static inline
 struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
@@ -393,6 +402,8 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
        LIST_HEAD(descs);
        int i;
 
+       pm_runtime_get_sync(sdma->dma.dev);
+
        /* Alloc descriptors for this channel */
        for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
                sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
@@ -425,6 +436,7 @@ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
 static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
 {
        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
        struct sirfsoc_dma_desc *sdesc, *tmp;
        unsigned long flags;
        LIST_HEAD(descs);
@@ -445,6 +457,8 @@ static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
        /* Free descriptors */
        list_for_each_entry_safe(sdesc, tmp, &descs, node)
                kfree(sdesc);
+
+       pm_runtime_put(sdma->dma.dev);
 }
 
 /* Send pending descriptor to hardware */
@@ -595,7 +609,7 @@ sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
        spin_unlock_irqrestore(&schan->lock, iflags);
 
        if (!sdesc)
-               return 0;
+               return NULL;
 
        /* Place descriptor in prepared list */
        spin_lock_irqsave(&schan->lock, iflags);
@@ -723,14 +737,14 @@ static int sirfsoc_dma_probe(struct platform_device *op)
 
        tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
 
-       clk_prepare_enable(sdma->clk);
-
        /* Register DMA engine */
        dev_set_drvdata(dev, sdma);
+
        ret = dma_async_device_register(dma);
        if (ret)
                goto free_irq;
 
+       pm_runtime_enable(&op->dev);
        dev_info(dev, "initialized SIRFSOC DMAC driver\n");
 
        return 0;
@@ -747,13 +761,124 @@ static int sirfsoc_dma_remove(struct platform_device *op)
        struct device *dev = &op->dev;
        struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 
-       clk_disable_unprepare(sdma->clk);
        dma_async_device_unregister(&sdma->dma);
        free_irq(sdma->irq, sdma);
        irq_dispose_mapping(sdma->irq);
+       pm_runtime_disable(&op->dev);
+       if (!pm_runtime_status_suspended(&op->dev))
+               sirfsoc_dma_runtime_suspend(&op->dev);
+
+       return 0;
+}
+
+static int sirfsoc_dma_runtime_suspend(struct device *dev)
+{
+       struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(sdma->clk);
+       return 0;
+}
+
+static int sirfsoc_dma_runtime_resume(struct device *dev)
+{
+       struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare_enable(sdma->clk);
+       if (ret < 0) {
+               dev_err(dev, "clk_enable failed: %d\n", ret);
+               return ret;
+       }
+       return 0;
+}
+
+static int sirfsoc_dma_pm_suspend(struct device *dev)
+{
+       struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+       struct sirfsoc_dma_regs *save = &sdma->regs_save;
+       struct sirfsoc_dma_desc *sdesc;
+       struct sirfsoc_dma_chan *schan;
+       int ch;
+       int ret;
+
+       /*
+        * if we were runtime-suspended before, resume to enable clock
+        * before accessing register
+        */
+       if (pm_runtime_status_suspended(dev)) {
+               ret = sirfsoc_dma_runtime_resume(dev);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /*
+        * DMA controller will lose all registers while suspending
+        * so we need to save registers for active channels
+        */
+       for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+               schan = &sdma->channels[ch];
+               if (list_empty(&schan->active))
+                       continue;
+               sdesc = list_first_entry(&schan->active,
+                       struct sirfsoc_dma_desc,
+                       node);
+               save->ctrl[ch] = readl_relaxed(sdma->base +
+                       ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+       }
+       save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
+
+       /* Disable clock */
+       sirfsoc_dma_runtime_suspend(dev);
+
+       return 0;
+}
+
+static int sirfsoc_dma_pm_resume(struct device *dev)
+{
+       struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+       struct sirfsoc_dma_regs *save = &sdma->regs_save;
+       struct sirfsoc_dma_desc *sdesc;
+       struct sirfsoc_dma_chan *schan;
+       int ch;
+       int ret;
+
+       /* Enable clock before accessing register */
+       ret = sirfsoc_dma_runtime_resume(dev);
+       if (ret < 0)
+               return ret;
+
+       writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
+       for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
+               schan = &sdma->channels[ch];
+               if (list_empty(&schan->active))
+                       continue;
+               sdesc = list_first_entry(&schan->active,
+                       struct sirfsoc_dma_desc,
+                       node);
+               writel_relaxed(sdesc->width,
+                       sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
+               writel_relaxed(sdesc->xlen,
+                       sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
+               writel_relaxed(sdesc->ylen,
+                       sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
+               writel_relaxed(save->ctrl[ch],
+                       sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
+               writel_relaxed(sdesc->addr >> 2,
+                       sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
+       }
+
+       /* if we were runtime-suspended before, suspend again */
+       if (pm_runtime_status_suspended(dev))
+               sirfsoc_dma_runtime_suspend(dev);
+
        return 0;
 }
 
+static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
+       SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
+};
+
 static struct of_device_id sirfsoc_dma_match[] = {
        { .compatible = "sirf,prima2-dmac", },
        { .compatible = "sirf,marco-dmac", },
@@ -766,6 +891,7 @@ static struct platform_driver sirfsoc_dma_driver = {
        .driver = {
                .name = DRV_NAME,
                .owner = THIS_MODULE,
+               .pm = &sirfsoc_dma_pm_ops,
                .of_match_table = sirfsoc_dma_match,
        },
 };
index 5ab5880d5c9041203bdb38a4d242c888777e8f2d..0036756795d10aa5d392111c0431776773c0972c 100644 (file)
@@ -3139,7 +3139,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
 
 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
 {
-       struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+       struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
        struct clk *clk = NULL;
        void __iomem *virtbase = NULL;
        struct resource *res = NULL;
@@ -3535,7 +3535,7 @@ static int __init d40_of_probe(struct platform_device *pdev,
 
 static int __init d40_probe(struct platform_device *pdev)
 {
-       struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
+       struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
        struct device_node *np = pdev->dev.of_node;
        int ret = -ENOENT;
        struct d40_base *base = NULL;
index f137914d7b1650d285ee2c294ae941c12ac124fc..5d4986e5f5fa6b21423084b688bd0a8afbba0c2e 100644 (file)
@@ -767,13 +767,11 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
        unsigned long flags;
        unsigned int residual;
 
-       spin_lock_irqsave(&tdc->lock, flags);
-
        ret = dma_cookie_status(dc, cookie, txstate);
-       if (ret == DMA_SUCCESS) {
-               spin_unlock_irqrestore(&tdc->lock, flags);
+       if (ret == DMA_SUCCESS)
                return ret;
-       }
+
+       spin_lock_irqsave(&tdc->lock, flags);
 
        /* Check on wait_ack desc status */
        list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
index 0ef43c136aa7dbbd30c65b9b1ebb43984b1baea0..28af214fce049db85fc02fb903a748fdbef6e0a1 100644 (file)
@@ -669,7 +669,7 @@ static irqreturn_t td_irq(int irq, void *devid)
 
 static int td_probe(struct platform_device *pdev)
 {
-       struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
+       struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct timb_dma *td;
        struct resource *iomem;
        int irq;
index a59fb4841d4c18283eae911c076c43dc042f0748..71e8e775189e0df5568d474ea00157a2675f9260 100644 (file)
@@ -962,15 +962,14 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS) {
-               spin_lock_bh(&dc->lock);
-               txx9dmac_scan_descriptors(dc);
-               spin_unlock_bh(&dc->lock);
+       if (ret == DMA_SUCCESS)
+               return DMA_SUCCESS;
 
-               ret = dma_cookie_status(chan, cookie, txstate);
-       }
+       spin_lock_bh(&dc->lock);
+       txx9dmac_scan_descriptors(dc);
+       spin_unlock_bh(&dc->lock);
 
-       return ret;
+       return dma_cookie_status(chan, cookie, txstate);
 }
 
 static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
@@ -1118,9 +1117,10 @@ static void txx9dmac_off(struct txx9dmac_dev *ddev)
 
 static int __init txx9dmac_chan_probe(struct platform_device *pdev)
 {
-       struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
+       struct txx9dmac_chan_platform_data *cpdata =
+                       dev_get_platdata(&pdev->dev);
        struct platform_device *dmac_dev = cpdata->dmac_dev;
-       struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
+       struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
        struct txx9dmac_chan *dc;
        int err;
        int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
@@ -1203,7 +1203,7 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
 
 static int __init txx9dmac_probe(struct platform_device *pdev)
 {
-       struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+       struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct resource *io;
        struct txx9dmac_dev *ddev;
        u32 mcr;
@@ -1282,7 +1282,7 @@ static int txx9dmac_resume_noirq(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
-       struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+       struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
        u32 mcr;
 
        mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
index a0820536b7d9d8edf0905ed04bf913d6fb6f21db..578f915ee195576260266126fd1ff4bba00858f8 100644 (file)
@@ -257,7 +257,6 @@ static void __exit tile_edac_exit(void)
                if (!pdev)
                        continue;
 
-               platform_set_drvdata(pdev, NULL);
                platform_device_unregister(pdev);
        }
        platform_driver_unregister(&tile_edac_mc_driver);
index ac1b43a0428531273c4fdaefd56a0b83f1545ce2..d7d5c8af92b9754fa79aa632c69d9ffc1fb27420 100644 (file)
@@ -486,7 +486,7 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
 static int add_client_resource(struct client *client,
                               struct client_resource *resource, gfp_t gfp_mask)
 {
-       bool preload = gfp_mask & __GFP_WAIT;
+       bool preload = !!(gfp_mask & __GFP_WAIT);
        unsigned long flags;
        int ret;
 
index 28a94c7ec6e5bad3af57ed258e0615266ab7b135..e5af0e3a26ec9345a9a775e77ab76b78db94ba25 100644 (file)
@@ -1262,8 +1262,7 @@ static int __init fw_core_init(void)
 {
        int ret;
 
-       fw_workqueue = alloc_workqueue("firewire",
-                                      WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+       fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
        if (!fw_workqueue)
                return -ENOMEM;
 
index afb701ec90cabd50ceb03b1298beba38e97717b4..04e6eb127b7325d21c3560655fe18c22200e0d07 100644 (file)
@@ -235,7 +235,7 @@ struct fw_ohci {
        dma_addr_t next_config_rom_bus;
        __be32     next_header;
 
-       __le32    *self_id_cpu;
+       __le32    *self_id;
        dma_addr_t self_id_bus;
        struct work_struct bus_reset_work;
 
@@ -271,6 +271,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
 
 static char ohci_driver_name[] = KBUILD_MODNAME;
 
+#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
 #define PCI_DEVICE_ID_AGERE_FW643      0x5901
 #define PCI_DEVICE_ID_CREATIVE_SB1394  0x4001
 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW        0x2380
@@ -278,17 +279,16 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
 #define PCI_DEVICE_ID_TI_TSB12LV26     0x8020
 #define PCI_DEVICE_ID_TI_TSB82AA2      0x8025
 #define PCI_DEVICE_ID_VIA_VT630X       0x3044
-#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
 #define PCI_REV_ID_VIA_VT6306          0x46
 
-#define QUIRK_CYCLE_TIMER              1
-#define QUIRK_RESET_PACKET             2
-#define QUIRK_BE_HEADERS               4
-#define QUIRK_NO_1394A                 8
-#define QUIRK_NO_MSI                   16
-#define QUIRK_TI_SLLZ059               32
-#define QUIRK_IR_WAKE                  64
-#define QUIRK_PHY_LCTRL_TIMEOUT                128
+#define QUIRK_CYCLE_TIMER              0x1
+#define QUIRK_RESET_PACKET             0x2
+#define QUIRK_BE_HEADERS               0x4
+#define QUIRK_NO_1394A                 0x8
+#define QUIRK_NO_MSI                   0x10
+#define QUIRK_TI_SLLZ059               0x20
+#define QUIRK_IR_WAKE                  0x40
+#define QUIRK_PHY_LCTRL_TIMEOUT                0x80
 
 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
 static const struct {
@@ -1929,12 +1929,12 @@ static void bus_reset_work(struct work_struct *work)
                return;
        }
 
-       generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
+       generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
        rmb();
 
        for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
-               u32 id  = cond_le32_to_cpu(ohci->self_id_cpu[i]);
-               u32 id2 = cond_le32_to_cpu(ohci->self_id_cpu[i + 1]);
+               u32 id  = cond_le32_to_cpu(ohci->self_id[i]);
+               u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
 
                if (id != ~id2) {
                        /*
@@ -3692,7 +3692,7 @@ static int pci_probe(struct pci_dev *dev,
                goto fail_contexts;
        }
 
-       ohci->self_id_cpu = ohci->misc_buffer     + PAGE_SIZE/2;
+       ohci->self_id     = ohci->misc_buffer     + PAGE_SIZE/2;
        ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
 
        bus_options = reg_read(ohci, OHCI1394_BusOptions);
index 73de5a9c22473bc66890efc3cea2caf9c0c3e66f..5002d50e37817314d1fe3e449ad991ade1969f6b 100644 (file)
@@ -35,6 +35,7 @@ struct pstore_read_data {
        enum pstore_type_id *type;
        int *count;
        struct timespec *timespec;
+       bool *compressed;
        char **buf;
 };
 
@@ -42,7 +43,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
 {
        efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
        struct pstore_read_data *cb_data = data;
-       char name[DUMP_NAME_LEN];
+       char name[DUMP_NAME_LEN], data_type;
        int i;
        int cnt;
        unsigned int part;
@@ -54,12 +55,23 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
        for (i = 0; i < DUMP_NAME_LEN; i++)
                name[i] = entry->var.VariableName[i];
 
-       if (sscanf(name, "dump-type%u-%u-%d-%lu",
+       if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
+                  cb_data->type, &part, &cnt, &time, &data_type) == 5) {
+               *cb_data->id = part;
+               *cb_data->count = cnt;
+               cb_data->timespec->tv_sec = time;
+               cb_data->timespec->tv_nsec = 0;
+               if (data_type == 'C')
+                       *cb_data->compressed = true;
+               else
+                       *cb_data->compressed = false;
+       } else if (sscanf(name, "dump-type%u-%u-%d-%lu",
                   cb_data->type, &part, &cnt, &time) == 4) {
                *cb_data->id = part;
                *cb_data->count = cnt;
                cb_data->timespec->tv_sec = time;
                cb_data->timespec->tv_nsec = 0;
+               *cb_data->compressed = false;
        } else if (sscanf(name, "dump-type%u-%u-%lu",
                          cb_data->type, &part, &time) == 3) {
                /*
@@ -71,6 +83,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
                *cb_data->count = 0;
                cb_data->timespec->tv_sec = time;
                cb_data->timespec->tv_nsec = 0;
+               *cb_data->compressed = false;
        } else
                return 0;
 
@@ -87,7 +100,8 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
 
 static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
                               int *count, struct timespec *timespec,
-                              char **buf, struct pstore_info *psi)
+                              char **buf, bool *compressed,
+                              struct pstore_info *psi)
 {
        struct pstore_read_data data;
 
@@ -95,6 +109,7 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
        data.type = type;
        data.count = count;
        data.timespec = timespec;
+       data.compressed = compressed;
        data.buf = buf;
 
        return __efivar_entry_iter(efi_pstore_read_func, &efivar_sysfs_list, &data,
@@ -103,7 +118,7 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
 
 static int efi_pstore_write(enum pstore_type_id type,
                enum kmsg_dump_reason reason, u64 *id,
-               unsigned int part, int count, size_t hsize, size_t size,
+               unsigned int part, int count, bool compressed, size_t size,
                struct pstore_info *psi)
 {
        char name[DUMP_NAME_LEN];
@@ -111,8 +126,8 @@ static int efi_pstore_write(enum pstore_type_id type,
        efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
        int i, ret = 0;
 
-       sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count,
-               get_seconds());
+       sprintf(name, "dump-type%u-%u-%d-%lu-%c", type, part, count,
+               get_seconds(), compressed ? 'C' : 'D');
 
        for (i = 0; i < DUMP_NAME_LEN; i++)
                efi_name[i] = name[i];
index a7c54c8432913ec6930518a648a4658737fdf1fe..d45bfb6807435d22a167e74cb0582ed2b26c9946 100644 (file)
@@ -6,7 +6,7 @@
 #
 menuconfig DRM
        tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
-       depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+       depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
        select HDMI
        select I2C
        select I2C_ALGOBIT
@@ -168,6 +168,17 @@ config DRM_I915_KMS
          the driver to bind to PCI devices, which precludes loading things
          like intelfb.
 
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+       bool "Enable preliminary support for prerelease Intel hardware by default"
+       depends on DRM_I915
+       help
+         Choose this option if you have prerelease Intel hardware and want the
+         i915 driver to support it by default.  You can enable such support at
+         runtime with the module option i915.preliminary_hw_support=1; this
+         option changes the default for that module option.
+
+         If in doubt, say "N".
+
 config DRM_MGA
        tristate "Matrox g200/g400"
        depends on DRM && PCI
index 801bcafa3028b8a04c2b7db7fce10e55c917ea30..7b2343a2f5ebd5d9648dfccf59e05b4565bb9c8f 100644 (file)
@@ -7,13 +7,13 @@ ccflags-y := -Iinclude/drm
 drm-y       := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
                drm_context.o drm_dma.o \
                drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
-               drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+               drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
                drm_agpsupport.o drm_scatter.o drm_pci.o \
                drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
                drm_crtc.o drm_modes.o drm_edid.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
                drm_trace_points.o drm_global.o drm_prime.o \
-               drm_rect.o
+               drm_rect.o drm_vma_manager.o drm_flip_work.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
index df0d0a08097a13e4765cd649545b855f0614ff91..32e270dc714eb036a325b1d4999e18e6d7c1e11b 100644 (file)
@@ -190,7 +190,6 @@ static const struct file_operations ast_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = ast_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -198,7 +197,7 @@ static const struct file_operations ast_fops = {
 };
 
 static struct drm_driver driver = {
-       .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+       .driver_features = DRIVER_MODESET | DRIVER_GEM,
        .dev_priv_size = 0,
 
        .load = ast_driver_load,
@@ -216,7 +215,7 @@ static struct drm_driver driver = {
        .gem_free_object = ast_gem_free_object,
        .dumb_create = ast_dumb_create,
        .dumb_map_offset = ast_dumb_mmap_offset,
-       .dumb_destroy = ast_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 
 };
 
index 622d4ae7eb9e9e6433706d819b28fb9e0b736ffc..796dbb212a4138180f7bda2e50fb765f22805c93 100644 (file)
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
 extern int ast_dumb_create(struct drm_file *file,
                           struct drm_device *dev,
                           struct drm_mode_create_dumb *args);
-extern int ast_dumb_destroy(struct drm_file *file,
-                           struct drm_device *dev,
-                           uint32_t handle);
 
 extern int ast_gem_init_object(struct drm_gem_object *obj);
 extern void ast_gem_free_object(struct drm_gem_object *obj);
index f60fd7bd11839f0511bf2a436f1ebc79330f27c4..7f6152d374cace41ae991b3080df769e385824b8 100644 (file)
@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int ast_dumb_destroy(struct drm_file *file,
-                    struct drm_device *dev,
-                    uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 int ast_gem_init_object(struct drm_gem_object *obj)
 {
        BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
 
 static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 int
 ast_dumb_mmap_offset(struct drm_file *file,
index 6e8887fe6c1b44fb83d09df82d10d50483924de5..125d99ffdff1e208a2b1f79b9b02dc2339677944 100644 (file)
@@ -321,7 +321,6 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
                return ret;
        }
 
-       astbo->gem.driver_private = NULL;
        astbo->bo.bdev = &ast->ttm.bdev;
        astbo->bo.bdev->dev_mapping = dev->dev_mapping;
 
index 8ecb601152effe17019967c559e48cddfac2f7b4..138364d917824f8ad4f7f771ad7f68a6cbb35aba 100644 (file)
@@ -85,10 +85,9 @@ static const struct file_operations cirrus_driver_fops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
-       .fasync = drm_fasync,
 };
 static struct drm_driver driver = {
-       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+       .driver_features = DRIVER_MODESET | DRIVER_GEM,
        .load = cirrus_driver_load,
        .unload = cirrus_driver_unload,
        .fops = &cirrus_driver_fops,
@@ -102,7 +101,7 @@ static struct drm_driver driver = {
        .gem_free_object = cirrus_gem_free_object,
        .dumb_create = cirrus_dumb_create,
        .dumb_map_offset = cirrus_dumb_mmap_offset,
-       .dumb_destroy = cirrus_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 };
 
 static struct pci_driver cirrus_pci_driver = {
index bae55609e6c3f2192c2cd49a33417badf4c6e962..9b0bb9184afdbfa2f1eb3ebbcad2feda23aa67a4 100644 (file)
@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
 int cirrus_dumb_create(struct drm_file *file,
                    struct drm_device *dev,
                       struct drm_mode_create_dumb *args);
-int cirrus_dumb_destroy(struct drm_file *file,
-                    struct drm_device *dev,
-                       uint32_t handle);
 
 int cirrus_framebuffer_init(struct drm_device *dev,
                           struct cirrus_framebuffer *gfb,
index 35cbae8277715ad2944a00dcce1c39e4482109b6..f130a533a51257dd13fcbda93462dded728182d0 100644 (file)
@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int cirrus_dumb_destroy(struct drm_file *file,
-                    struct drm_device *dev,
-                    uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 int cirrus_gem_init_object(struct drm_gem_object *obj)
 {
        BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
 
 static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 
 int
index 69fd8f1ac8df192729c89f4dd981a5bb5b7611db..dbb8920e0ccc64fc96b18de4c3d5f376d1c386c4 100644 (file)
@@ -326,7 +326,6 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
                return ret;
        }
 
-       cirrusbo->gem.driver_private = NULL;
        cirrusbo->bo.bdev = &cirrus->ttm.bdev;
        cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
 
index 3d8fed1797972fdff1e449a70afcfcefb7dc4cde..e301d653d97e42f4537efdb50806cdc682930505 100644 (file)
@@ -423,6 +423,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
        return head;
 }
 
+/**
+ * drm_agp_clear - Clear AGP resource list
+ * @dev: DRM device
+ *
+ * Iterate over all AGP resources and remove them. But keep the AGP head
+ * intact so it can still be used. It is safe to call this if AGP is disabled or
+ * was already removed.
+ *
+ * If DRIVER_MODESET is active, nothing is done to protect the modesetting
+ * resources from getting destroyed. Drivers are responsible of cleaning them up
+ * during device shutdown.
+ */
+void drm_agp_clear(struct drm_device *dev)
+{
+       struct drm_agp_mem *entry, *tempe;
+
+       if (!drm_core_has_AGP(dev) || !dev->agp)
+               return;
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+               if (entry->bound)
+                       drm_unbind_agp(entry->memory);
+               drm_free_agp(entry->memory, entry->pages);
+               kfree(entry);
+       }
+       INIT_LIST_HEAD(&dev->agp->memory);
+
+       if (dev->agp->acquired)
+               drm_agp_release(dev);
+
+       dev->agp->acquired = 0;
+       dev->agp->enabled = 0;
+}
+
+/**
+ * drm_agp_destroy - Destroy AGP head
+ * @dev: DRM device
+ *
+ * Destroy resources that were previously allocated via drm_agp_initp. Caller
+ * must ensure to clean up all AGP resources before calling this. See
+ * drm_agp_clear().
+ *
+ * Call this to destroy AGP heads allocated via drm_agp_init().
+ */
+void drm_agp_destroy(struct drm_agp_head *agp)
+{
+       kfree(agp);
+}
+
 /**
  * Binds a collection of pages into AGP memory at the given offset, returning
  * the AGP memory structure containing them.
index 5a4dbb410b71591f6f7b61a7d89f6f7609438578..471e051d295e383b61ebb2a2a19c0e0d67d8827a 100644 (file)
@@ -207,12 +207,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
                        return 0;
                }
 
-               if (drm_core_has_MTRR(dev)) {
-                       if (map->type == _DRM_FRAME_BUFFER ||
-                           (map->flags & _DRM_WRITE_COMBINING)) {
-                               map->mtrr =
-                                       arch_phys_wc_add(map->offset, map->size);
-                       }
+               if (map->type == _DRM_FRAME_BUFFER ||
+                   (map->flags & _DRM_WRITE_COMBINING)) {
+                       map->mtrr =
+                               arch_phys_wc_add(map->offset, map->size);
                }
                if (map->type == _DRM_REGISTERS) {
                        if (map->flags & _DRM_WRITE_COMBINING)
@@ -243,7 +241,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
                }
                map->handle = vmalloc_user(map->size);
                DRM_DEBUG("%lu %d %p\n",
-                         map->size, drm_order(map->size), map->handle);
+                         map->size, order_base_2(map->size), map->handle);
                if (!map->handle) {
                        kfree(map);
                        return -ENOMEM;
@@ -464,8 +462,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
                iounmap(map->handle);
                /* FALLTHROUGH */
        case _DRM_FRAME_BUFFER:
-               if (drm_core_has_MTRR(dev))
-                       arch_phys_wc_del(map->mtrr);
+               arch_phys_wc_del(map->mtrr);
                break;
        case _DRM_SHM:
                vfree(map->handle);
@@ -630,7 +627,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
                return -EINVAL;
 
        count = request->count;
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        size = 1 << order;
 
        alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +797,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
                return -EPERM;
 
        count = request->count;
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        size = 1 << order;
 
        DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +999,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
                return -EPERM;
 
        count = request->count;
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        size = 1 << order;
 
        alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1130,161 +1127,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
        return 0;
 }
 
-static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
-{
-       struct drm_device_dma *dma = dev->dma;
-       struct drm_buf_entry *entry;
-       struct drm_buf *buf;
-       unsigned long offset;
-       unsigned long agp_offset;
-       int count;
-       int order;
-       int size;
-       int alignment;
-       int page_order;
-       int total;
-       int byte_count;
-       int i;
-       struct drm_buf **temp_buflist;
-
-       if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
-               return -EINVAL;
-
-       if (!dma)
-               return -EINVAL;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
-       count = request->count;
-       order = drm_order(request->size);
-       size = 1 << order;
-
-       alignment = (request->flags & _DRM_PAGE_ALIGN)
-           ? PAGE_ALIGN(size) : size;
-       page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
-       total = PAGE_SIZE << page_order;
-
-       byte_count = 0;
-       agp_offset = request->agp_start;
-
-       DRM_DEBUG("count:      %d\n", count);
-       DRM_DEBUG("order:      %d\n", order);
-       DRM_DEBUG("size:       %d\n", size);
-       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
-       DRM_DEBUG("alignment:  %d\n", alignment);
-       DRM_DEBUG("page_order: %d\n", page_order);
-       DRM_DEBUG("total:      %d\n", total);
-
-       if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-               return -EINVAL;
-
-       spin_lock(&dev->count_lock);
-       if (dev->buf_use) {
-               spin_unlock(&dev->count_lock);
-               return -EBUSY;
-       }
-       atomic_inc(&dev->buf_alloc);
-       spin_unlock(&dev->count_lock);
-
-       mutex_lock(&dev->struct_mutex);
-       entry = &dma->bufs[order];
-       if (entry->buf_count) {
-               mutex_unlock(&dev->struct_mutex);
-               atomic_dec(&dev->buf_alloc);
-               return -ENOMEM; /* May only call once for each order */
-       }
-
-       if (count < 0 || count > 4096) {
-               mutex_unlock(&dev->struct_mutex);
-               atomic_dec(&dev->buf_alloc);
-               return -EINVAL;
-       }
-
-       entry->buflist = kzalloc(count * sizeof(*entry->buflist),
-                               GFP_KERNEL);
-       if (!entry->buflist) {
-               mutex_unlock(&dev->struct_mutex);
-               atomic_dec(&dev->buf_alloc);
-               return -ENOMEM;
-       }
-
-       entry->buf_size = size;
-       entry->page_order = page_order;
-
-       offset = 0;
-
-       while (entry->buf_count < count) {
-               buf = &entry->buflist[entry->buf_count];
-               buf->idx = dma->buf_count + entry->buf_count;
-               buf->total = alignment;
-               buf->order = order;
-               buf->used = 0;
-
-               buf->offset = (dma->byte_count + offset);
-               buf->bus_address = agp_offset + offset;
-               buf->address = (void *)(agp_offset + offset);
-               buf->next = NULL;
-               buf->waiting = 0;
-               buf->pending = 0;
-               buf->file_priv = NULL;
-
-               buf->dev_priv_size = dev->driver->dev_priv_size;
-               buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
-               if (!buf->dev_private) {
-                       /* Set count correctly so we free the proper amount. */
-                       entry->buf_count = count;
-                       drm_cleanup_buf_error(dev, entry);
-                       mutex_unlock(&dev->struct_mutex);
-                       atomic_dec(&dev->buf_alloc);
-                       return -ENOMEM;
-               }
-
-               DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
-               offset += alignment;
-               entry->buf_count++;
-               byte_count += PAGE_SIZE << page_order;
-       }
-
-       DRM_DEBUG("byte_count: %d\n", byte_count);
-
-       temp_buflist = krealloc(dma->buflist,
-                               (dma->buf_count + entry->buf_count) *
-                               sizeof(*dma->buflist), GFP_KERNEL);
-       if (!temp_buflist) {
-               /* Free the entry because it isn't valid */
-               drm_cleanup_buf_error(dev, entry);
-               mutex_unlock(&dev->struct_mutex);
-               atomic_dec(&dev->buf_alloc);
-               return -ENOMEM;
-       }
-       dma->buflist = temp_buflist;
-
-       for (i = 0; i < entry->buf_count; i++) {
-               dma->buflist[i + dma->buf_count] = &entry->buflist[i];
-       }
-
-       dma->buf_count += entry->buf_count;
-       dma->seg_count += entry->seg_count;
-       dma->page_count += byte_count >> PAGE_SHIFT;
-       dma->byte_count += byte_count;
-
-       DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
-       DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
-       mutex_unlock(&dev->struct_mutex);
-
-       request->count = entry->buf_count;
-       request->size = size;
-
-       dma->flags = _DRM_DMA_USE_FB;
-
-       atomic_dec(&dev->buf_alloc);
-       return 0;
-}
-
-
 /**
  * Add buffers for DMA transfers (ioctl).
  *
@@ -1305,6 +1147,9 @@ int drm_addbufs(struct drm_device *dev, void *data,
        struct drm_buf_desc *request = data;
        int ret;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
                return -EINVAL;
 
@@ -1316,7 +1161,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
        if (request->flags & _DRM_SG_BUFFER)
                ret = drm_addbufs_sg(dev, request);
        else if (request->flags & _DRM_FB_BUFFER)
-               ret = drm_addbufs_fb(dev, request);
+               ret = -EINVAL;
        else
                ret = drm_addbufs_pci(dev, request);
 
@@ -1348,6 +1193,9 @@ int drm_infobufs(struct drm_device *dev, void *data,
        int i;
        int count;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
                return -EINVAL;
 
@@ -1427,6 +1275,9 @@ int drm_markbufs(struct drm_device *dev, void *data,
        int order;
        struct drm_buf_entry *entry;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
                return -EINVAL;
 
@@ -1435,7 +1286,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
 
        DRM_DEBUG("%d, %d, %d\n",
                  request->size, request->low_mark, request->high_mark);
-       order = drm_order(request->size);
+       order = order_base_2(request->size);
        if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
                return -EINVAL;
        entry = &dma->bufs[order];
@@ -1472,6 +1323,9 @@ int drm_freebufs(struct drm_device *dev, void *data,
        int idx;
        struct drm_buf *buf;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
                return -EINVAL;
 
@@ -1524,6 +1378,9 @@ int drm_mapbufs(struct drm_device *dev, void *data,
        struct drm_buf_map *request = data;
        int i;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
                return -EINVAL;
 
@@ -1541,9 +1398,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
        if (request->count >= dma->buf_count) {
                if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
                    || (drm_core_check_feature(dev, DRIVER_SG)
-                       && (dma->flags & _DRM_DMA_USE_SG))
-                   || (drm_core_check_feature(dev, DRIVER_FB_DMA)
-                       && (dma->flags & _DRM_DMA_USE_FB))) {
+                       && (dma->flags & _DRM_DMA_USE_SG))) {
                        struct drm_local_map *map = dev->agp_buffer_map;
                        unsigned long token = dev->agp_buffer_token;
 
@@ -1600,25 +1455,28 @@ int drm_mapbufs(struct drm_device *dev, void *data,
        return retcode;
 }
 
-/**
- * Compute size order.  Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
+int drm_dma_ioctl(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
 {
-       int order;
-       unsigned long tmp;
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
 
-       for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+       if (dev->driver->dma_ioctl)
+               return dev->driver->dma_ioctl(dev, data, file_priv);
+       else
+               return -EINVAL;
+}
 
-       if (size & (size - 1))
-               ++order;
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+       struct drm_map_list *entry;
 
-       return order;
+       list_for_each_entry(entry, &dev->maplist, head) {
+               if (entry->map && entry->map->type == _DRM_SHM &&
+                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+                       return entry->map;
+               }
+       }
+       return NULL;
 }
-EXPORT_SYMBOL(drm_order);
+EXPORT_SYMBOL(drm_getsarea);
index 725968d38976839a962a483eb0ca4477f1d55e7c..b4fb86d89850a31c3cc9424f4b4148831c59922f 100644 (file)
 
 #include <drm/drmP.h>
 
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
 /**
  * Free a handle from the context bitmap.
  *
  * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
  * lock.
  */
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
        mutex_lock(&dev->struct_mutex);
        idr_remove(&dev->ctx_idr, ctx_handle);
        mutex_unlock(&dev->struct_mutex);
 }
 
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+void drm_legacy_ctxbitmap_release(struct drm_device *dev,
+                                 struct drm_file *file_priv)
+{
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       mutex_lock(&dev->ctxlist_mutex);
+       if (!list_empty(&dev->ctxlist)) {
+               struct drm_ctx_list *pos, *n;
+
+               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+                       if (pos->tag == file_priv &&
+                           pos->handle != DRM_KERNEL_CONTEXT) {
+                               if (dev->driver->context_dtor)
+                                       dev->driver->context_dtor(dev,
+                                                                 pos->handle);
+
+                               drm_ctxbitmap_free(dev, pos->handle);
+
+                               list_del(&pos->head);
+                               kfree(pos);
+                               --dev->ctx_count;
+                       }
+               }
+       }
+       mutex_unlock(&dev->ctxlist_mutex);
+}
+
 /**
  * Context bitmap allocation.
  *
@@ -90,10 +121,12 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
  *
  * Initialise the drm_device::ctx_idr
  */
-int drm_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 {
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
        idr_init(&dev->ctx_idr);
-       return 0;
 }
 
 /**
@@ -104,7 +137,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
  * Free all idr members using drm_ctx_sarea_free helper function
  * while holding the drm_device::struct_mutex lock.
  */
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
 {
        mutex_lock(&dev->struct_mutex);
        idr_destroy(&dev->ctx_idr);
@@ -136,6 +169,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
        struct drm_local_map *map;
        struct drm_map_list *_entry;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        mutex_lock(&dev->struct_mutex);
 
        map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -180,6 +216,9 @@ int drm_setsareactx(struct drm_device *dev, void *data,
        struct drm_local_map *map = NULL;
        struct drm_map_list *r_list = NULL;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(r_list, &dev->maplist, head) {
                if (r_list->map
@@ -251,7 +290,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
                                       struct drm_file *file_priv, int new)
 {
        dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
-       dev->last_switch = jiffies;
 
        if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
                DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +299,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
           when the kernel holds the lock, release
           that lock here. */
        clear_bit(0, &dev->context_flag);
-       wake_up(&dev->context_wait);
 
        return 0;
 }
@@ -282,6 +319,9 @@ int drm_resctx(struct drm_device *dev, void *data,
        struct drm_ctx ctx;
        int i;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (res->count >= DRM_RESERVED_CONTEXTS) {
                memset(&ctx, 0, sizeof(ctx));
                for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -312,6 +352,9 @@ int drm_addctx(struct drm_device *dev, void *data,
        struct drm_ctx_list *ctx_entry;
        struct drm_ctx *ctx = data;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        ctx->handle = drm_ctxbitmap_next(dev);
        if (ctx->handle == DRM_KERNEL_CONTEXT) {
                /* Skip kernel's context and get a new one. */
@@ -342,12 +385,6 @@ int drm_addctx(struct drm_device *dev, void *data,
        return 0;
 }
 
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       /* This does nothing */
-       return 0;
-}
-
 /**
  * Get context.
  *
@@ -361,6 +398,9 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        struct drm_ctx *ctx = data;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        /* This is 0, because we don't handle any context flags */
        ctx->flags = 0;
 
@@ -383,6 +423,9 @@ int drm_switchctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        DRM_DEBUG("%d\n", ctx->handle);
        return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
@@ -403,6 +446,9 @@ int drm_newctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        DRM_DEBUG("%d\n", ctx->handle);
        drm_context_switch_complete(dev, file_priv, ctx->handle);
 
@@ -425,6 +471,9 @@ int drm_rmctx(struct drm_device *dev, void *data,
 {
        struct drm_ctx *ctx = data;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        DRM_DEBUG("%d\n", ctx->handle);
        if (ctx->handle != DRM_KERNEL_CONTEXT) {
                if (dev->driver->context_dtor)
index fc83bb9eb51459cbdf5a1288fad6fa1c77d25b41..54b4169fc48effaa797fdfd79f10dfde1a5b82b1 100644 (file)
@@ -125,13 +125,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
        { DRM_MODE_SCALE_ASPECT, "Full aspect" },
 };
 
-static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-{
-       { DRM_MODE_DITHERING_OFF, "Off" },
-       { DRM_MODE_DITHERING_ON, "On" },
-       { DRM_MODE_DITHERING_AUTO, "Automatic" },
-};
-
 /*
  * Non-global properties, but "required" for certain connectors.
  */
@@ -186,29 +179,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
 struct drm_conn_prop_enum_list {
        int type;
        const char *name;
-       int count;
+       struct ida ida;
 };
 
 /*
  * Connector and encoder types.
  */
 static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{      { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
-       { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
-       { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
-       { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
-       { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
-       { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
-       { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
-       { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
-       { DRM_MODE_CONNECTOR_Component, "Component", 0 },
-       { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
-       { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
-       { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
-       { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
-       { DRM_MODE_CONNECTOR_TV, "TV", 0 },
-       { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
-       { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+{      { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+       { DRM_MODE_CONNECTOR_VGA, "VGA" },
+       { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+       { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+       { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+       { DRM_MODE_CONNECTOR_Composite, "Composite" },
+       { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+       { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+       { DRM_MODE_CONNECTOR_Component, "Component" },
+       { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+       { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+       { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+       { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+       { DRM_MODE_CONNECTOR_TV, "TV" },
+       { DRM_MODE_CONNECTOR_eDP, "eDP" },
+       { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
 };
 
 static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -220,6 +213,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
        { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
 };
 
+void drm_connector_ida_init(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+               ida_init(&drm_connector_enum_list[i].ida);
+}
+
+void drm_connector_ida_destroy(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+               ida_destroy(&drm_connector_enum_list[i].ida);
+}
+
 const char *drm_get_encoder_name(const struct drm_encoder *encoder)
 {
        static char buf[32];
@@ -677,20 +686,19 @@ void drm_mode_probed_add(struct drm_connector *connector,
 }
 EXPORT_SYMBOL(drm_mode_probed_add);
 
-/**
+/*
  * drm_mode_remove - remove and free a mode
  * @connector: connector list to modify
  * @mode: mode to remove
  *
  * Remove @mode from @connector's mode list, then free it.
  */
-void drm_mode_remove(struct drm_connector *connector,
-                    struct drm_display_mode *mode)
+static void drm_mode_remove(struct drm_connector *connector,
+                           struct drm_display_mode *mode)
 {
        list_del(&mode->head);
        drm_mode_destroy(connector->dev, mode);
 }
-EXPORT_SYMBOL(drm_mode_remove);
 
 /**
  * drm_connector_init - Init a preallocated connector
@@ -711,6 +719,8 @@ int drm_connector_init(struct drm_device *dev,
                       int connector_type)
 {
        int ret;
+       struct ida *connector_ida =
+               &drm_connector_enum_list[connector_type].ida;
 
        drm_modeset_lock_all(dev);
 
@@ -723,7 +733,12 @@ int drm_connector_init(struct drm_device *dev,
        connector->funcs = funcs;
        connector->connector_type = connector_type;
        connector->connector_type_id =
-               ++drm_connector_enum_list[connector_type].count; /* TODO */
+               ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+       if (connector->connector_type_id < 0) {
+               ret = connector->connector_type_id;
+               drm_mode_object_put(dev, &connector->base);
+               goto out;
+       }
        INIT_LIST_HEAD(&connector->probed_modes);
        INIT_LIST_HEAD(&connector->modes);
        connector->edid_blob_ptr = NULL;
@@ -764,6 +779,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
        list_for_each_entry_safe(mode, t, &connector->modes, head)
                drm_mode_remove(connector, mode);
 
+       ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
+                  connector->connector_type_id);
+
        drm_mode_object_put(dev, &connector->base);
        list_del(&connector->head);
        dev->mode_config.num_connector--;
@@ -1134,30 +1152,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
 
-/**
- * drm_mode_create_dithering_property - create dithering property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dithering_property(struct drm_device *dev)
-{
-       struct drm_property *dithering_mode;
-
-       if (dev->mode_config.dithering_mode_property)
-               return 0;
-
-       dithering_mode =
-               drm_property_create_enum(dev, 0, "dithering",
-                               drm_dithering_mode_enum_list,
-                                   ARRAY_SIZE(drm_dithering_mode_enum_list));
-       dev->mode_config.dithering_mode_property = dithering_mode;
-
-       return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dithering_property);
-
 /**
  * drm_mode_create_dirty_property - create dirty property
  * @dev: DRM device
index 495b5fd2787c7b04986bdba6e9bf14efbddea7c0..8a140a953754de73d2a524bbaae8599bc5063984 100644 (file)
  *
  * Allocate and initialize a drm_device_dma structure.
  */
-int drm_dma_setup(struct drm_device *dev)
+int drm_legacy_dma_setup(struct drm_device *dev)
 {
        int i;
 
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+           drm_core_check_feature(dev, DRIVER_MODESET)) {
+               return 0;
+       }
+
+       dev->buf_use = 0;
+       atomic_set(&dev->buf_alloc, 0);
+
        dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
        if (!dev->dma)
                return -ENOMEM;
@@ -66,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev)
  * Free all pages associated with DMA buffers, the buffers and pages lists, and
  * finally the drm_device::dma structure itself.
  */
-void drm_dma_takedown(struct drm_device *dev)
+void drm_legacy_dma_takedown(struct drm_device *dev)
 {
        struct drm_device_dma *dma = dev->dma;
        int i, j;
 
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+           drm_core_check_feature(dev, DRIVER_MODESET)) {
+               return;
+       }
+
        if (!dma)
                return;
 
index 99fcd7c32ea2dea50c652576646446a1fd5cab37..288da3dc2a0910c16a0fad719a6794fb0f57392a 100644 (file)
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
        DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -106,8 +106,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
-       /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
-       DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
 
        DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
@@ -122,7 +121,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 #endif
 
-       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
        DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -171,6 +170,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
 #define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
 
+/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+       int i;
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       atomic_set(&dev->ioctl_count, 0);
+       atomic_set(&dev->vma_count, 0);
+
+       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+               atomic_set(&dev->counts[i], 0);
+
+       dev->sigdata.lock = NULL;
+
+       dev->context_flag = 0;
+       dev->last_context = 0;
+       dev->if_version = 0;
+}
+
 /**
  * Take down the DRM device.
  *
@@ -195,32 +219,9 @@ int drm_lastclose(struct drm_device * dev)
 
        mutex_lock(&dev->struct_mutex);
 
-       /* Clear AGP information */
-       if (drm_core_has_AGP(dev) && dev->agp &&
-                       !drm_core_check_feature(dev, DRIVER_MODESET)) {
-               struct drm_agp_mem *entry, *tempe;
-
-               /* Remove AGP resources, but leave dev->agp
-                  intact until drv_cleanup is called. */
-               list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
-                       if (entry->bound)
-                               drm_unbind_agp(entry->memory);
-                       drm_free_agp(entry->memory, entry->pages);
-                       kfree(entry);
-               }
-               INIT_LIST_HEAD(&dev->agp->memory);
+       drm_agp_clear(dev);
 
-               if (dev->agp->acquired)
-                       drm_agp_release(dev);
-
-               dev->agp->acquired = 0;
-               dev->agp->enabled = 0;
-       }
-       if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
-           !drm_core_check_feature(dev, DRIVER_MODESET)) {
-               drm_sg_cleanup(dev->sg);
-               dev->sg = NULL;
-       }
+       drm_legacy_sg_cleanup(dev);
 
        /* Clear vma list (only built for debugging) */
        list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
@@ -228,13 +229,13 @@ int drm_lastclose(struct drm_device * dev)
                kfree(vma);
        }
 
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
-           !drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_dma_takedown(dev);
+       drm_legacy_dma_takedown(dev);
 
        dev->dev_mapping = NULL;
        mutex_unlock(&dev->struct_mutex);
 
+       drm_legacy_dev_reinit(dev);
+
        DRM_DEBUG("lastclose completed\n");
        return 0;
 }
@@ -251,6 +252,7 @@ static int __init drm_core_init(void)
        int ret = -ENOMEM;
 
        drm_global_init();
+       drm_connector_ida_init();
        idr_init(&drm_minors_idr);
 
        if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -263,13 +265,6 @@ static int __init drm_core_init(void)
                goto err_p2;
        }
 
-       drm_proc_root = proc_mkdir("dri", NULL);
-       if (!drm_proc_root) {
-               DRM_ERROR("Cannot create /proc/dri\n");
-               ret = -1;
-               goto err_p3;
-       }
-
        drm_debugfs_root = debugfs_create_dir("dri", NULL);
        if (!drm_debugfs_root) {
                DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
@@ -292,12 +287,12 @@ err_p1:
 
 static void __exit drm_core_exit(void)
 {
-       remove_proc_entry("dri", NULL);
        debugfs_remove(drm_debugfs_root);
        drm_sysfs_destroy();
 
        unregister_chrdev(DRM_MAJOR, "drm");
 
+       drm_connector_ida_destroy();
        idr_destroy(&drm_minors_idr);
 }
 
@@ -420,9 +415,6 @@ long drm_ioctl(struct file *filp,
 
        /* Do not trust userspace, use our own definition */
        func = ioctl->func;
-       /* is there a local override? */
-       if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
-               func = dev->driver->dma_ioctl;
 
        if (!func) {
                DRM_DEBUG("no function\n");
@@ -485,19 +477,4 @@ long drm_ioctl(struct file *filp,
                DRM_DEBUG("ret = %d\n", retcode);
        return retcode;
 }
-
 EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
-       struct drm_map_list *entry;
-
-       list_for_each_entry(entry, &dev->maplist, head) {
-               if (entry->map && entry->map->type == _DRM_SHM &&
-                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
-                       return entry->map;
-               }
-       }
-       return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);
index 95d6f4b6967c6df03b4bfcf5e5a5ea27d63cf70d..dfc7a1ba9360e59cd1de3f85ddb8db3bc862a22c 100644 (file)
@@ -3102,11 +3102,13 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
        if (err < 0)
                return err;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+               frame->pixel_repeat = 1;
+
        frame->video_code = drm_match_cea_mode(mode);
-       if (!frame->video_code)
-               return 0;
 
        frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+       frame->active_info_valid = 1;
        frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
 
        return 0;
index c385cc5e730e68d91e956641c74488ddd9b72cec..61b5a47ad239115f00b2359f41bd99734645e443 100644 (file)
@@ -181,11 +181,11 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
 
 #ifdef CONFIG_DEBUG_FS
-/**
+/*
  * drm_fb_cma_describe() - Helper to dump information about a single
  * CMA framebuffer object
  */
-void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
        struct drm_fb_cma *fb_cma = to_fb_cma(fb);
        int i, n = drm_format_num_planes(fb->pixel_format);
@@ -199,7 +199,6 @@ void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
                drm_gem_cma_describe(fb_cma->obj[i], m);
        }
 }
-EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
 
 /**
  * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
new file mode 100644 (file)
index 0000000..e788882
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_flip_work.h"
+
+/**
+ * drm_flip_work_queue - queue work
+ * @work: the flip-work
+ * @val: the value to queue
+ *
+ * Queues work, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue(struct drm_flip_work *work, void *val)
+{
+       if (kfifo_put(&work->fifo, (const void **)&val)) {
+               atomic_inc(&work->pending);
+       } else {
+               DRM_ERROR("%s fifo full!\n", work->name);
+               work->func(work, val);
+       }
+}
+EXPORT_SYMBOL(drm_flip_work_queue);
+
+/**
+ * drm_flip_work_commit - commit queued work
+ * @work: the flip-work
+ * @wq: the work-queue to run the queued work on
+ *
+ * Trigger work previously queued by drm_flip_work_queue() to run
+ * on a workqueue.  The typical usage would be to queue work (via
+ * drm_flip_work_queue()) at any point (from vblank irq and/or
+ * prior), and then from vblank irq commit the queued work.
+ */
+void drm_flip_work_commit(struct drm_flip_work *work,
+               struct workqueue_struct *wq)
+{
+       uint32_t pending = atomic_read(&work->pending);
+       atomic_add(pending, &work->count);
+       atomic_sub(pending, &work->pending);
+       queue_work(wq, &work->worker);
+}
+EXPORT_SYMBOL(drm_flip_work_commit);
+
+static void flip_worker(struct work_struct *w)
+{
+       struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
+       uint32_t count = atomic_read(&work->count);
+       void *val = NULL;
+
+       atomic_sub(count, &work->count);
+
+       while(count--)
+               if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
+                       work->func(work, val);
+}
+
+/**
+ * drm_flip_work_init - initialize flip-work
+ * @work: the flip-work to initialize
+ * @size: the max queue depth
+ * @name: debug name
+ * @func: the callback work function
+ *
+ * Initializes/allocates resources for the flip-work
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_flip_work_init(struct drm_flip_work *work, int size,
+               const char *name, drm_flip_func_t func)
+{
+       int ret;
+
+       work->name = name;
+       atomic_set(&work->count, 0);
+       atomic_set(&work->pending, 0);
+       work->func = func;
+
+       ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
+       if (ret) {
+               DRM_ERROR("could not allocate %s fifo\n", name);
+               return ret;
+       }
+
+       INIT_WORK(&work->worker, flip_worker);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_flip_work_init);
+
+/**
+ * drm_flip_work_cleanup - cleans up flip-work
+ * @work: the flip-work to cleanup
+ *
+ * Destroy resources allocated for the flip-work
+ */
+void drm_flip_work_cleanup(struct drm_flip_work *work)
+{
+       WARN_ON(!kfifo_is_empty(&work->fifo));
+       kfifo_free(&work->fifo);
+}
+EXPORT_SYMBOL(drm_flip_work_cleanup);
index 3a24385e03686faebb4fa413809136bce9d60917..2d2401e9c5aea7d814430466e920857cdcfe1484 100644 (file)
@@ -48,59 +48,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
 static int drm_setup(struct drm_device * dev)
 {
-       int i;
        int ret;
 
-       if (dev->driver->firstopen) {
+       if (dev->driver->firstopen &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
                ret = dev->driver->firstopen(dev);
                if (ret != 0)
                        return ret;
        }
 
-       atomic_set(&dev->ioctl_count, 0);
-       atomic_set(&dev->vma_count, 0);
-
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
-           !drm_core_check_feature(dev, DRIVER_MODESET)) {
-               dev->buf_use = 0;
-               atomic_set(&dev->buf_alloc, 0);
-
-               i = drm_dma_setup(dev);
-               if (i < 0)
-                       return i;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
-               atomic_set(&dev->counts[i], 0);
-
-       dev->sigdata.lock = NULL;
-
-       dev->context_flag = 0;
-       dev->interrupt_flag = 0;
-       dev->dma_flag = 0;
-       dev->last_context = 0;
-       dev->last_switch = 0;
-       dev->last_checked = 0;
-       init_waitqueue_head(&dev->context_wait);
-       dev->if_version = 0;
-
-       dev->ctx_start = 0;
-       dev->lck_start = 0;
+       ret = drm_legacy_dma_setup(dev);
+       if (ret < 0)
+               return ret;
 
-       dev->buf_async = NULL;
-       init_waitqueue_head(&dev->buf_readers);
-       init_waitqueue_head(&dev->buf_writers);
 
        DRM_DEBUG("\n");
-
-       /*
-        * The kernel's context could be created here, but is now created
-        * in drm_dma_enqueue.  This is more resource-efficient for
-        * hardware that does not do DMA, but may mean that
-        * drm_select_queue fails between the time the interrupt is
-        * initialized and the time the queues are initialized.
-        */
-
        return 0;
 }
 
@@ -388,18 +350,6 @@ out_put_pid:
        return ret;
 }
 
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
-       struct drm_file *priv = filp->private_data;
-       struct drm_device *dev = priv->minor->dev;
-
-       DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
-                 (long)old_encode_dev(priv->minor->device));
-       return fasync_helper(fd, filp, on, &dev->buf_async);
-}
-EXPORT_SYMBOL(drm_fasync);
-
 static void drm_master_release(struct drm_device *dev, struct file *filp)
 {
        struct drm_file *file_priv = filp->private_data;
@@ -490,26 +440,7 @@ int drm_release(struct inode *inode, struct file *filp)
        if (dev->driver->driver_features & DRIVER_GEM)
                drm_gem_release(dev, file_priv);
 
-       mutex_lock(&dev->ctxlist_mutex);
-       if (!list_empty(&dev->ctxlist)) {
-               struct drm_ctx_list *pos, *n;
-
-               list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
-                       if (pos->tag == file_priv &&
-                           pos->handle != DRM_KERNEL_CONTEXT) {
-                               if (dev->driver->context_dtor)
-                                       dev->driver->context_dtor(dev,
-                                                                 pos->handle);
-
-                               drm_ctxbitmap_free(dev, pos->handle);
-
-                               list_del(&pos->head);
-                               kfree(pos);
-                               --dev->ctx_count;
-                       }
-               }
-       }
-       mutex_unlock(&dev->ctxlist_mutex);
+       drm_legacy_ctxbitmap_release(dev, file_priv);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -555,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp)
        if (dev->driver->postclose)
                dev->driver->postclose(dev, file_priv);
 
+
        if (drm_core_check_feature(dev, DRIVER_PRIME))
                drm_prime_destroy_file_private(&file_priv->prime);
 
index 603f256152efed25852072a49f32b11c7ddf16cd..1ce88c3301a144ff6f66a06912ae8c34868a6b01 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
 #include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
 
 /** @file drm_gem.c
  *
@@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
 {
        struct drm_gem_mm *mm;
 
-       spin_lock_init(&dev->object_name_lock);
+       mutex_init(&dev->object_name_lock);
        idr_init(&dev->object_name_idr);
 
        mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
        }
 
        dev->mm_private = mm;
-
-       if (drm_ht_create(&mm->offset_hash, 12)) {
-               kfree(mm);
-               return -ENOMEM;
-       }
-
-       drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-                   DRM_FILE_PAGE_OFFSET_SIZE);
+       drm_vma_offset_manager_init(&mm->vma_manager,
+                                   DRM_FILE_PAGE_OFFSET_START,
+                                   DRM_FILE_PAGE_OFFSET_SIZE);
 
        return 0;
 }
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
 {
        struct drm_gem_mm *mm = dev->mm_private;
 
-       drm_mm_takedown(&mm->offset_manager);
-       drm_ht_remove(&mm->offset_hash);
+       drm_vma_offset_manager_destroy(&mm->vma_manager);
        kfree(mm);
        dev->mm_private = NULL;
 }
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
 int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size)
 {
-       BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+       struct file *filp;
 
-       obj->dev = dev;
-       obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
-       if (IS_ERR(obj->filp))
-               return PTR_ERR(obj->filp);
+       filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+       if (IS_ERR(filp))
+               return PTR_ERR(filp);
 
-       kref_init(&obj->refcount);
-       atomic_set(&obj->handle_count, 0);
-       obj->size = size;
+       drm_gem_private_object_init(dev, obj, size);
+       obj->filp = filp;
 
        return 0;
 }
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
  * no GEM provided backing store. Instead the caller is responsible for
  * backing the object and handling it.
  */
-int drm_gem_private_object_init(struct drm_device *dev,
-                       struct drm_gem_object *obj, size_t size)
+void drm_gem_private_object_init(struct drm_device *dev,
+                                struct drm_gem_object *obj, size_t size)
 {
        BUG_ON((size & (PAGE_SIZE - 1)) != 0);
 
@@ -161,10 +154,8 @@ int drm_gem_private_object_init(struct drm_device *dev,
        obj->filp = NULL;
 
        kref_init(&obj->refcount);
-       atomic_set(&obj->handle_count, 0);
+       obj->handle_count = 0;
        obj->size = size;
-
-       return 0;
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
@@ -200,16 +191,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
 static void
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
-       if (obj->import_attach) {
-               drm_prime_remove_buf_handle(&filp->prime,
-                               obj->import_attach->dmabuf);
+       /*
+        * Note: obj->dma_buf can't disappear as long as we still hold a
+        * handle reference in obj->handle_count.
+        */
+       mutex_lock(&filp->prime.lock);
+       if (obj->dma_buf) {
+               drm_prime_remove_buf_handle_locked(&filp->prime,
+                                                  obj->dma_buf);
        }
-       if (obj->export_dma_buf) {
-               drm_prime_remove_buf_handle(&filp->prime,
-                               obj->export_dma_buf);
+       mutex_unlock(&filp->prime.lock);
+}
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+       BUG();
+}
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+
+       /* Remove any name for this object */
+       if (obj->name) {
+               idr_remove(&dev->object_name_idr, obj->name);
+               obj->name = 0;
+               /*
+                * The object name held a reference to this object, drop
+                * that now.
+               *
+               * This cannot be the last reference, since the handle holds one too.
+                */
+               kref_put(&obj->refcount, drm_gem_object_ref_bug);
        }
 }
 
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+       /* Unbreak the reference cycle if we have an exported dma_buf. */
+       if (obj->dma_buf) {
+               dma_buf_put(obj->dma_buf);
+               obj->dma_buf = NULL;
+       }
+}
+
+static void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+       if (WARN_ON(obj->handle_count == 0))
+               return;
+
+       /*
+       * Must bump handle count first as this may be the last
+       * ref, in which case the object would disappear before we
+       * checked for a name
+       */
+
+       mutex_lock(&obj->dev->object_name_lock);
+       if (--obj->handle_count == 0) {
+               drm_gem_object_handle_free(obj);
+               drm_gem_object_exported_dma_buf_free(obj);
+       }
+       mutex_unlock(&obj->dev->object_name_lock);
+
+       drm_gem_object_unreference_unlocked(obj);
+}
+
 /**
  * Removes the mapping from handle to filp for this object.
  */
@@ -253,18 +307,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
 EXPORT_SYMBOL(drm_gem_handle_delete);
 
 /**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ * 
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+                        struct drm_device *dev,
+                        uint32_t handle)
+{
+       return drm_gem_handle_delete(file, handle);
+}
+EXPORT_SYMBOL(drm_gem_dumb_destroy);
+
+/**
+ * drm_gem_handle_create_tail - internal functions to create a handle
+ * 
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+ * importing an object from either an flink name or a dma-buf.
  */
 int
-drm_gem_handle_create(struct drm_file *file_priv,
-                      struct drm_gem_object *obj,
-                      u32 *handlep)
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+                          struct drm_gem_object *obj,
+                          u32 *handlep)
 {
        struct drm_device *dev = obj->dev;
        int ret;
 
+       WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
        /*
         * Get the user-visible handle using idr.  Preload and perform
         * allocation under our spinlock.
@@ -273,14 +345,17 @@ drm_gem_handle_create(struct drm_file *file_priv,
        spin_lock(&file_priv->table_lock);
 
        ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
-
+       drm_gem_object_reference(obj);
+       obj->handle_count++;
        spin_unlock(&file_priv->table_lock);
        idr_preload_end();
-       if (ret < 0)
+       mutex_unlock(&dev->object_name_lock);
+       if (ret < 0) {
+               drm_gem_object_handle_unreference_unlocked(obj);
                return ret;
+       }
        *handlep = ret;
 
-       drm_gem_object_handle_reference(obj);
 
        if (dev->driver->gem_open_object) {
                ret = dev->driver->gem_open_object(obj, file_priv);
@@ -292,6 +367,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
 
        return 0;
 }
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+                      struct drm_gem_object *obj,
+                      u32 *handlep)
+{
+       mutex_lock(&obj->dev->object_name_lock);
+
+       return drm_gem_handle_create_tail(file_priv, obj, handlep);
+}
 EXPORT_SYMBOL(drm_gem_handle_create);
 
 
@@ -306,81 +396,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list = &obj->map_list;
 
-       drm_ht_remove_item(&mm->offset_hash, &list->hash);
-       drm_mm_put_block(list->file_offset_node);
-       kfree(list->map);
-       list->map = NULL;
+       drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
 }
 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
 
 /**
- * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
  * @obj: obj in question
+ * @size: the virtual size
  *
  * GEM memory mapping works by handing back to userspace a fake mmap offset
  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  * up the object based on the offset and sets up the various memory mapping
  * structures.
  *
- * This routine allocates and attaches a fake offset for @obj.
+ * This routine allocates and attaches a fake offset for @obj, in cases where
+ * the virtual size differs from the physical size (ie. obj->size).  Otherwise
+ * just use drm_gem_create_mmap_offset().
  */
 int
-drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
 {
        struct drm_device *dev = obj->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list;
-       struct drm_local_map *map;
-       int ret;
-
-       /* Set the object up for mmap'ing */
-       list = &obj->map_list;
-       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-       if (!list->map)
-               return -ENOMEM;
 
-       map = list->map;
-       map->type = _DRM_GEM;
-       map->size = obj->size;
-       map->handle = obj;
+       return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+                                 size / PAGE_SIZE);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
 
-       /* Get a DRM GEM mmap offset allocated... */
-       list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-                       obj->size / PAGE_SIZE, 0, false);
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call.  The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+       return drm_gem_create_mmap_offset_size(obj, obj->size);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 
-       if (!list->file_offset_node) {
-               DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-               ret = -ENOSPC;
-               goto out_free_list;
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+       struct inode *inode;
+       struct address_space *mapping;
+       struct page *p, **pages;
+       int i, npages;
+
+       /* This is the shared memory object that backs the GEM resource */
+       inode = file_inode(obj->filp);
+       mapping = inode->i_mapping;
+
+       /* We already BUG_ON() for non-page-aligned sizes in
+        * drm_gem_object_init(), so we should never hit this unless
+        * driver author is doing something really wrong:
+        */
+       WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+       npages = obj->size >> PAGE_SHIFT;
+
+       pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (pages == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       gfpmask |= mapping_gfp_mask(mapping);
+
+       for (i = 0; i < npages; i++) {
+               p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+               if (IS_ERR(p))
+                       goto fail;
+               pages[i] = p;
+
+               /* There is a hypothetical issue w/ drivers that require
+                * buffer memory in the low 4GB.. if the pages are un-
+                * pinned, and swapped out, they can end up swapped back
+                * in above 4GB.  If pages are already in memory, then
+                * shmem_read_mapping_page_gfp will ignore the gfpmask,
+                * even if the already in-memory page disobeys the mask.
+                *
+                * It is only a theoretical issue today, because none of
+                * the devices with this limitation can be populated with
+                * enough memory to trigger the issue.  But this BUG_ON()
+                * is here as a reminder in case the problem with
+                * shmem_read_mapping_page_gfp() isn't solved by the time
+                * it does become a real issue.
+                *
+                * See this thread: http://lkml.org/lkml/2011/7/11/238
+                */
+               BUG_ON((gfpmask & __GFP_DMA32) &&
+                               (page_to_pfn(p) >= 0x00100000UL));
        }
 
-       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-                       obj->size / PAGE_SIZE, 0);
-       if (!list->file_offset_node) {
-               ret = -ENOMEM;
-               goto out_free_list;
-       }
+       return pages;
 
-       list->hash.key = list->file_offset_node->start;
-       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-       if (ret) {
-               DRM_ERROR("failed to add to map hash\n");
-               goto out_free_mm;
-       }
+fail:
+       while (i--)
+               page_cache_release(pages[i]);
 
-       return 0;
+       drm_free_large(pages);
+       return ERR_CAST(p);
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
 
-out_free_mm:
-       drm_mm_put_block(list->file_offset_node);
-out_free_list:
-       kfree(list->map);
-       list->map = NULL;
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+               bool dirty, bool accessed)
+{
+       int i, npages;
 
-       return ret;
+       /* We already BUG_ON() for non-page-aligned sizes in
+        * drm_gem_object_init(), so we should never hit this unless
+        * driver author is doing something really wrong:
+        */
+       WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+       npages = obj->size >> PAGE_SHIFT;
+
+       for (i = 0; i < npages; i++) {
+               if (dirty)
+                       set_page_dirty(pages[i]);
+
+               if (accessed)
+                       mark_page_accessed(pages[i]);
+
+               /* Undo the reference we took when populating the table */
+               page_cache_release(pages[i]);
+       }
+
+       drm_free_large(pages);
 }
-EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+EXPORT_SYMBOL(drm_gem_put_pages);
 
 /** Returns a reference to the object named by the handle. */
 struct drm_gem_object *
@@ -445,8 +609,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
+       mutex_lock(&dev->object_name_lock);
        idr_preload(GFP_KERNEL);
-       spin_lock(&dev->object_name_lock);
+       /* prevent races with concurrent gem_close. */
+       if (obj->handle_count == 0) {
+               ret = -ENOENT;
+               goto err;
+       }
+
        if (!obj->name) {
                ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
                if (ret < 0)
@@ -462,8 +632,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
        ret = 0;
 
 err:
-       spin_unlock(&dev->object_name_lock);
        idr_preload_end();
+       mutex_unlock(&dev->object_name_lock);
        drm_gem_object_unreference_unlocked(obj);
        return ret;
 }
@@ -486,15 +656,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       spin_lock(&dev->object_name_lock);
+       mutex_lock(&dev->object_name_lock);
        obj = idr_find(&dev->object_name_idr, (int) args->name);
-       if (obj)
+       if (obj) {
                drm_gem_object_reference(obj);
-       spin_unlock(&dev->object_name_lock);
-       if (!obj)
+       } else {
+               mutex_unlock(&dev->object_name_lock);
                return -ENOENT;
+       }
 
-       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+       ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
        drm_gem_object_unreference_unlocked(obj);
        if (ret)
                return ret;
@@ -553,6 +725,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 void
 drm_gem_object_release(struct drm_gem_object *obj)
 {
+       WARN_ON(obj->dma_buf);
+
        if (obj->filp)
            fput(obj->filp);
 }
@@ -577,41 +751,6 @@ drm_gem_object_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
-       BUG();
-}
-
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void drm_gem_object_handle_free(struct drm_gem_object *obj)
-{
-       struct drm_device *dev = obj->dev;
-
-       /* Remove any name for this object */
-       spin_lock(&dev->object_name_lock);
-       if (obj->name) {
-               idr_remove(&dev->object_name_idr, obj->name);
-               obj->name = 0;
-               spin_unlock(&dev->object_name_lock);
-               /*
-                * The object name held a reference to this object, drop
-                * that now.
-               *
-               * This cannot be the last reference, since the handle holds one too.
-                */
-               kref_put(&obj->refcount, drm_gem_object_ref_bug);
-       } else
-               spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
 void drm_gem_vm_open(struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
@@ -707,8 +846,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        struct drm_file *priv = filp->private_data;
        struct drm_device *dev = priv->minor->dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_local_map *map = NULL;
-       struct drm_hash_item *hash;
+       struct drm_gem_object *obj;
+       struct drm_vma_offset_node *node;
        int ret = 0;
 
        if (drm_device_is_unplugged(dev))
@@ -716,21 +855,16 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
        mutex_lock(&dev->struct_mutex);
 
-       if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+       node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+                                          vma_pages(vma));
+       if (!node) {
                mutex_unlock(&dev->struct_mutex);
                return drm_mmap(filp, vma);
        }
 
-       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
-       if (!map ||
-           ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
-               ret =  -EPERM;
-               goto out_unlock;
-       }
-
-       ret = drm_gem_mmap_obj(map->handle, map->size, vma);
+       obj = container_of(node, struct drm_gem_object, vma_node);
+       ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
 
-out_unlock:
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
index ece72a8ac245ed6f3bc8587813f0e51cb1653821..0a4f80574eb467ea88ed5ca50acf8d29fba6cf08 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/drm_gem_cma_helper.h>
-
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
-{
-       return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
-}
+#include <drm/drm_vma_manager.h>
 
 /*
  * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_gem_cma_object *cma_obj;
 
-       if (gem_obj->map_list.map)
-               drm_gem_free_mmap_offset(gem_obj);
+       drm_gem_free_mmap_offset(gem_obj);
 
        cma_obj = to_drm_gem_cma_obj(gem_obj);
 
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
                return -EINVAL;
        }
 
-       *offset = get_gem_mmap_offset(gem_obj);
+       *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 
        drm_gem_object_unreference(gem_obj);
 
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
 
-/*
- * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
-               struct drm_device *drm, unsigned int handle)
-{
-       return drm_gem_handle_delete(file_priv, handle);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
-
 #ifdef CONFIG_DEBUG_FS
 void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
 {
        struct drm_gem_object *obj = &cma_obj->base;
        struct drm_device *dev = obj->dev;
-       uint64_t off = 0;
+       uint64_t off;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (obj->map_list.map)
-               off = (uint64_t)obj->map_list.hash.key;
+       off = drm_vma_node_start(&obj->vma_node);
 
        seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
                        obj->name, obj->refcount.refcount.counter,
index d4b20ceda3fbd8d183dc7e628683732590200f51..53298320080b86d7694a24a81082a2c402d2e553 100644 (file)
@@ -207,7 +207,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
 
        seq_printf(m, "%6d %8zd %7d %8d\n",
                   obj->name, obj->size,
-                  atomic_read(&obj->handle_count),
+                  obj->handle_count,
                   atomic_read(&obj->refcount.refcount));
        return 0;
 }
@@ -218,7 +218,11 @@ int drm_gem_name_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
 
        seq_printf(m, "  name     size handles refcount\n");
+
+       mutex_lock(&dev->object_name_lock);
        idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+       mutex_unlock(&dev->object_name_lock);
+
        return 0;
 }
 
index ffd7a7ba70d496212c149bfd2a11798f5e01665f..cffc7c0e1171cdc5c2dc968c332f33a0891c1378 100644 (file)
@@ -217,29 +217,30 @@ int drm_getclient(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
        struct drm_client *client = data;
-       struct drm_file *pt;
-       int idx;
-       int i;
-
-       idx = client->idx;
-       i = 0;
 
-       mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(pt, &dev->filelist, lhead) {
-               if (i++ >= idx) {
-                       client->auth = pt->authenticated;
-                       client->pid = pid_vnr(pt->pid);
-                       client->uid = from_kuid_munged(current_user_ns(), pt->uid);
-                       client->magic = pt->magic;
-                       client->iocs = pt->ioctl_count;
-                       mutex_unlock(&dev->struct_mutex);
-
-                       return 0;
-               }
+       /*
+        * Hollowed-out getclient ioctl to keep some dead old drm tests/tools
+        * not breaking completely. Userspace tools stop enumerating one they
+        * get -EINVAL, hence this is the return value we need to hand back for
+        * no clients tracked.
+        *
+        * Unfortunately some clients (*cough* libva *cough*) use this in a fun
+        * attempt to figure out whether they're authenticated or not. Since
+        * that's the only thing they care about, give it to the directly
+        * instead of walking one giant list.
+        */
+       if (client->idx == 0) {
+               client->auth = file_priv->authenticated;
+               client->pid = pid_vnr(file_priv->pid);
+               client->uid = from_kuid_munged(current_user_ns(),
+                                              file_priv->uid);
+               client->magic = 0;
+               client->iocs = 0;
+
+               return 0;
+       } else {
+               return -EINVAL;
        }
-       mutex_unlock(&dev->struct_mutex);
-
-       return -EINVAL;
 }
 
 /**
@@ -256,21 +257,10 @@ int drm_getstats(struct drm_device *dev, void *data,
                 struct drm_file *file_priv)
 {
        struct drm_stats *stats = data;
-       int i;
 
+       /* Clear stats to prevent userspace from eating its stack garbage. */
        memset(stats, 0, sizeof(*stats));
 
-       for (i = 0; i < dev->counters; i++) {
-               if (dev->types[i] == _DRM_STAT_LOCK)
-                       stats->data[i].value =
-                           (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
-               else
-                       stats->data[i].value = atomic_read(&dev->counts[i]);
-               stats->data[i].type = dev->types[i];
-       }
-
-       stats->count = dev->counters;
-
        return 0;
 }
 
@@ -352,9 +342,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
                        retcode = -EINVAL;
                        goto done;
                }
-
-               if (dev->driver->set_version)
-                       dev->driver->set_version(dev, sv);
        }
 
 done:
index 126d50ea181f110abaf22e059425e55e0ef99c16..64e44fad8ae84282d43f449d21772c4346acab68 100644 (file)
@@ -86,7 +86,6 @@ void drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
        agp_free_memory(handle);
 }
-EXPORT_SYMBOL(drm_free_agp);
 
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -99,7 +98,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
        return agp_unbind_memory(handle);
 }
-EXPORT_SYMBOL(drm_unbind_agp);
 
 #else  /*  __OS_HAS_AGP  */
 static inline void *agp_remap(unsigned long offset, unsigned long size,
index 543b9b3171d32310de903668bc69f30901b12e91..af93cc55259fd69e240625b152590d09dbca052d 100644 (file)
 
 #define MM_UNUSED_TARGET 4
 
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
-       struct drm_mm_node *child;
-
-       if (atomic)
-               child = kzalloc(sizeof(*child), GFP_ATOMIC);
-       else
-               child = kzalloc(sizeof(*child), GFP_KERNEL);
-
-       if (unlikely(child == NULL)) {
-               spin_lock(&mm->unused_lock);
-               if (list_empty(&mm->unused_nodes))
-                       child = NULL;
-               else {
-                       child =
-                           list_entry(mm->unused_nodes.next,
-                                      struct drm_mm_node, node_list);
-                       list_del(&child->node_list);
-                       --mm->num_unused;
-               }
-               spin_unlock(&mm->unused_lock);
-       }
-       return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm:     memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
-       struct drm_mm_node *node;
-
-       spin_lock(&mm->unused_lock);
-       while (mm->num_unused < MM_UNUSED_TARGET) {
-               spin_unlock(&mm->unused_lock);
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
-               spin_lock(&mm->unused_lock);
-
-               if (unlikely(node == NULL)) {
-                       int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
-                       spin_unlock(&mm->unused_lock);
-                       return ret;
-               }
-               ++mm->num_unused;
-               list_add_tail(&node->node_list, &mm->unused_nodes);
-       }
-       spin_unlock(&mm->unused_lock);
-       return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long color,
+                                               enum drm_mm_search_flags flags);
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+                                               unsigned long size,
+                                               unsigned alignment,
+                                               unsigned long color,
+                                               unsigned long start,
+                                               unsigned long end,
+                                               enum drm_mm_search_flags flags);
 
 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
                                 struct drm_mm_node *node,
@@ -147,33 +107,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
        }
 }
 
-struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-                                       unsigned long start,
-                                       unsigned long size,
-                                       bool atomic)
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
-       struct drm_mm_node *hole, *node;
-       unsigned long end = start + size;
+       struct drm_mm_node *hole;
+       unsigned long end = node->start + node->size;
        unsigned long hole_start;
        unsigned long hole_end;
 
+       BUG_ON(node == NULL);
+
+       /* Find the relevant hole to add our node to */
        drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-               if (hole_start > start || hole_end < end)
+               if (hole_start > node->start || hole_end < end)
                        continue;
 
-               node = drm_mm_kmalloc(mm, atomic);
-               if (unlikely(node == NULL))
-                       return NULL;
-
-               node->start = start;
-               node->size = size;
                node->mm = mm;
                node->allocated = 1;
 
                INIT_LIST_HEAD(&node->hole_stack);
                list_add(&node->node_list, &hole->node_list);
 
-               if (start == hole_start) {
+               if (node->start == hole_start) {
                        hole->hole_follows = 0;
                        list_del_init(&hole->hole_stack);
                }
@@ -184,31 +138,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
                        node->hole_follows = 1;
                }
 
-               return node;
+               return 0;
        }
 
-       WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
-       return NULL;
-}
-EXPORT_SYMBOL(drm_mm_create_block);
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
-                                            unsigned long size,
-                                            unsigned alignment,
-                                            unsigned long color,
-                                            int atomic)
-{
-       struct drm_mm_node *node;
-
-       node = drm_mm_kmalloc(hole_node->mm, atomic);
-       if (unlikely(node == NULL))
-               return NULL;
-
-       drm_mm_insert_helper(hole_node, node, size, alignment, color);
-
-       return node;
+       WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+            node->start, node->size);
+       return -ENOSPC;
 }
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_reserve_node);
 
 /**
  * Search for free space and insert a preallocated memory node. Returns
@@ -217,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
  */
 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
                               unsigned long size, unsigned alignment,
-                              unsigned long color)
+                              unsigned long color,
+                              enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *hole_node;
 
        hole_node = drm_mm_search_free_generic(mm, size, alignment,
-                                              color, 0);
+                                              color, flags);
        if (!hole_node)
                return -ENOSPC;
 
@@ -231,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
 }
 EXPORT_SYMBOL(drm_mm_insert_node_generic);
 
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
-                      unsigned long size, unsigned alignment)
-{
-       return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
-}
-EXPORT_SYMBOL(drm_mm_insert_node);
-
 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
                                       struct drm_mm_node *node,
                                       unsigned long size, unsigned alignment,
@@ -290,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
        }
 }
 
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long color,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               int atomic)
-{
-       struct drm_mm_node *node;
-
-       node = drm_mm_kmalloc(hole_node->mm, atomic);
-       if (unlikely(node == NULL))
-               return NULL;
-
-       drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
-                                  start, end);
-
-       return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
 /**
  * Search for free space and insert a preallocated memory node. Returns
  * -ENOSPC if no suitable free area is available. This is for range
@@ -318,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  */
 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
                                        unsigned long size, unsigned alignment, unsigned long color,
-                                       unsigned long start, unsigned long end)
+                                       unsigned long start, unsigned long end,
+                                       enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *hole_node;
 
        hole_node = drm_mm_search_free_in_range_generic(mm,
                                                        size, alignment, color,
-                                                       start, end, 0);
+                                                       start, end, flags);
        if (!hole_node)
                return -ENOSPC;
 
@@ -335,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
 }
 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
-                               unsigned long size, unsigned alignment,
-                               unsigned long start, unsigned long end)
-{
-       return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-
 /**
  * Remove a memory node from the allocator.
  */
@@ -351,6 +254,9 @@ void drm_mm_remove_node(struct drm_mm_node *node)
        struct drm_mm *mm = node->mm;
        struct drm_mm_node *prev_node;
 
+       if (WARN_ON(!node->allocated))
+               return;
+
        BUG_ON(node->scanned_block || node->scanned_prev_free
                                   || node->scanned_next_free);
 
@@ -377,28 +283,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_remove_node);
 
-/*
- * Remove a memory node from the allocator and free the allocated struct
- * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
- * drm_mm_get_block functions.
- */
-void drm_mm_put_block(struct drm_mm_node *node)
-{
-
-       struct drm_mm *mm = node->mm;
-
-       drm_mm_remove_node(node);
-
-       spin_lock(&mm->unused_lock);
-       if (mm->num_unused < MM_UNUSED_TARGET) {
-               list_add(&node->node_list, &mm->unused_nodes);
-               ++mm->num_unused;
-       } else
-               kfree(node);
-       spin_unlock(&mm->unused_lock);
-}
-EXPORT_SYMBOL(drm_mm_put_block);
-
 static int check_free_hole(unsigned long start, unsigned long end,
                           unsigned long size, unsigned alignment)
 {
@@ -414,11 +298,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
        return end >= start + size;
 }
 
-struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-                                              unsigned long size,
-                                              unsigned alignment,
-                                              unsigned long color,
-                                              bool best_match)
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+                                                     unsigned long size,
+                                                     unsigned alignment,
+                                                     unsigned long color,
+                                                     enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *entry;
        struct drm_mm_node *best;
@@ -441,7 +325,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
                if (!check_free_hole(adj_start, adj_end, size, alignment))
                        continue;
 
-               if (!best_match)
+               if (!(flags & DRM_MM_SEARCH_BEST))
                        return entry;
 
                if (entry->size < best_size) {
@@ -452,15 +336,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 
        return best;
 }
-EXPORT_SYMBOL(drm_mm_search_free_generic);
 
-struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
                                                        unsigned long size,
                                                        unsigned alignment,
                                                        unsigned long color,
                                                        unsigned long start,
                                                        unsigned long end,
-                                                       bool best_match)
+                                                       enum drm_mm_search_flags flags)
 {
        struct drm_mm_node *entry;
        struct drm_mm_node *best;
@@ -488,7 +371,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
                if (!check_free_hole(adj_start, adj_end, size, alignment))
                        continue;
 
-               if (!best_match)
+               if (!(flags & DRM_MM_SEARCH_BEST))
                        return entry;
 
                if (entry->size < best_size) {
@@ -499,7 +382,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 
        return best;
 }
-EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
 
 /**
  * Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -634,8 +516,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
  * corrupted.
  *
  * When the scan list is empty, the selected memory nodes can be freed. An
- * immediately following drm_mm_search_free with best_match = 0 will then return
- * the just freed block (because its at the top of the free_stack list).
+ * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
+ * return the just freed block (because its at the top of the free_stack list).
  *
  * Returns one if this block should be evicted, zero otherwise. Will always
  * return zero when no hole has been found.
@@ -672,10 +554,7 @@ EXPORT_SYMBOL(drm_mm_clean);
 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
        INIT_LIST_HEAD(&mm->hole_stack);
-       INIT_LIST_HEAD(&mm->unused_nodes);
-       mm->num_unused = 0;
        mm->scanned_blocks = 0;
-       spin_lock_init(&mm->unused_lock);
 
        /* Clever trick to avoid a special case in the free hole tracking. */
        INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -695,22 +574,8 @@ EXPORT_SYMBOL(drm_mm_init);
 
 void drm_mm_takedown(struct drm_mm * mm)
 {
-       struct drm_mm_node *entry, *next;
-
-       if (WARN(!list_empty(&mm->head_node.node_list),
-                "Memory manager not clean. Delaying takedown\n")) {
-               return;
-       }
-
-       spin_lock(&mm->unused_lock);
-       list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
-               list_del(&entry->node_list);
-               kfree(entry);
-               --mm->num_unused;
-       }
-       spin_unlock(&mm->unused_lock);
-
-       BUG_ON(mm->num_unused != 0);
+       WARN(!list_empty(&mm->head_node.node_list),
+            "Memory manager not clean during takedown.\n");
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 
index a6729bfe6860e06a3d2ac0f0da721caada4238d8..fc2adb62b7574dc9b3e1d7de810bb3074320311f 100644 (file)
@@ -595,27 +595,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
 }
 EXPORT_SYMBOL(drm_mode_set_name);
 
-/**
- * drm_mode_list_concat - move modes from one list to another
- * @head: source list
- * @new: dst list
- *
- * LOCKING:
- * Caller must ensure both lists are locked.
- *
- * Move all the modes from @head to @new.
- */
-void drm_mode_list_concat(struct list_head *head, struct list_head *new)
-{
-
-       struct list_head *entry, *tmp;
-
-       list_for_each_safe(entry, tmp, head) {
-               list_move_tail(entry, new);
-       }
-}
-EXPORT_SYMBOL(drm_mode_list_concat);
-
 /**
  * drm_mode_width - get the width of a mode
  * @mode: mode
@@ -922,43 +901,6 @@ void drm_mode_validate_size(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_mode_validate_size);
 
-/**
- * drm_mode_validate_clocks - validate modes against clock limits
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @min: minimum clock rate array
- * @max: maximum clock rate array
- * @n_ranges: number of clock ranges (size of arrays)
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Some code may need to check a mode list against the clock limits of the
- * device in question.  This function walks the mode list, testing to make
- * sure each mode falls within a given range (defined by @min and @max
- * arrays) and sets @mode->status as needed.
- */
-void drm_mode_validate_clocks(struct drm_device *dev,
-                             struct list_head *mode_list,
-                             int *min, int *max, int n_ranges)
-{
-       struct drm_display_mode *mode;
-       int i;
-
-       list_for_each_entry(mode, mode_list, head) {
-               bool good = false;
-               for (i = 0; i < n_ranges; i++) {
-                       if (mode->clock >= min[i] && mode->clock <= max[i]) {
-                               good = true;
-                               break;
-                       }
-               }
-               if (!good)
-                       mode->status = MODE_CLOCK_RANGE;
-       }
-}
-EXPORT_SYMBOL(drm_mode_validate_clocks);
-
 /**
  * drm_mode_prune_invalid - remove invalid modes from mode list
  * @dev: DRM device
index 80c0b2b298017a0e1810c7db27cd97ca4bb87d6d..3fca2db1c40cb4c68d1e65bb3eb9bbc25e07e5bd 100644 (file)
 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
 {
        drm_dma_handle_t *dmah;
-#if 1
        unsigned long addr;
        size_t sz;
-#endif
 
        /* pci_alloc_consistent only guarantees alignment to the smallest
         * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
  */
 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
-#if 1
        unsigned long addr;
        size_t sz;
-#endif
 
        if (dmah->vaddr) {
                /* XXX - Is virt_to_page() legal for consistent mem? */
@@ -276,17 +272,26 @@ static int drm_pci_agp_init(struct drm_device *dev)
                        DRM_ERROR("Cannot initialize the agpgart module.\n");
                        return -EINVAL;
                }
-               if (drm_core_has_MTRR(dev)) {
-                       if (dev->agp)
-                               dev->agp->agp_mtrr = arch_phys_wc_add(
-                                       dev->agp->agp_info.aper_base,
-                                       dev->agp->agp_info.aper_size *
-                                       1024 * 1024);
+               if (dev->agp) {
+                       dev->agp->agp_mtrr = arch_phys_wc_add(
+                               dev->agp->agp_info.aper_base,
+                               dev->agp->agp_info.aper_size *
+                               1024 * 1024);
                }
        }
        return 0;
 }
 
+static void drm_pci_agp_destroy(struct drm_device *dev)
+{
+       if (drm_core_has_AGP(dev) && dev->agp) {
+               arch_phys_wc_del(dev->agp->agp_mtrr);
+               drm_agp_clear(dev);
+               drm_agp_destroy(dev->agp);
+               dev->agp = NULL;
+       }
+}
+
 static struct drm_bus drm_pci_bus = {
        .bus_type = DRIVER_BUS_PCI,
        .get_irq = drm_pci_get_irq,
@@ -295,6 +300,7 @@ static struct drm_bus drm_pci_bus = {
        .set_unique = drm_pci_set_unique,
        .irq_by_busid = drm_pci_irq_by_busid,
        .agp_init = drm_pci_agp_init,
+       .agp_destroy = drm_pci_agp_destroy,
 };
 
 /**
index b8a282ea87515546695bc2965bce2b9f3e4f2f2d..400024b6d512935c55887faf397b4d746d352a56 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/export.h>
 #include <drm/drmP.h>
 
-/**
+/*
  * Register.
  *
  * \param platdev - Platform device struture
@@ -39,8 +39,8 @@
  * Try and register, if we fail to register, backout previous work.
  */
 
-int drm_get_platform_dev(struct platform_device *platdev,
-                        struct drm_driver *driver)
+static int drm_get_platform_dev(struct platform_device *platdev,
+                               struct drm_driver *driver)
 {
        struct drm_device *dev;
        int ret;
@@ -107,7 +107,6 @@ err_g1:
        mutex_unlock(&drm_global_mutex);
        return ret;
 }
-EXPORT_SYMBOL(drm_get_platform_dev);
 
 static int drm_platform_get_irq(struct drm_device *dev)
 {
index 85e450e3241cb1d5f6281ee9e417f7e8e4301da1..7ae2bfcab70eda25a21c4e82933acc2843dc78f4 100644 (file)
@@ -83,6 +83,34 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
        return 0;
 }
 
+static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
+                                                     uint32_t handle)
+{
+       struct drm_prime_member *member;
+
+       list_for_each_entry(member, &prime_fpriv->head, entry) {
+               if (member->handle == handle)
+                       return member->dma_buf;
+       }
+
+       return NULL;
+}
+
+static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
+                                      struct dma_buf *dma_buf,
+                                      uint32_t *handle)
+{
+       struct drm_prime_member *member;
+
+       list_for_each_entry(member, &prime_fpriv->head, entry) {
+               if (member->dma_buf == dma_buf) {
+                       *handle = member->handle;
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
 static int drm_gem_map_attach(struct dma_buf *dma_buf,
                              struct device *target_dev,
                              struct dma_buf_attachment *attach)
@@ -131,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
        attach->priv = NULL;
 }
 
-static void drm_prime_remove_buf_handle_locked(
-               struct drm_prime_file_private *prime_fpriv,
-               struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
+                                       struct dma_buf *dma_buf)
 {
        struct drm_prime_member *member, *safe;
 
@@ -167,8 +194,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
        if (WARN_ON(prime_attach->dir != DMA_NONE))
                return ERR_PTR(-EBUSY);
 
-       mutex_lock(&obj->dev->struct_mutex);
-
        sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
 
        if (!IS_ERR(sgt)) {
@@ -182,7 +207,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
                }
        }
 
-       mutex_unlock(&obj->dev->struct_mutex);
        return sgt;
 }
 
@@ -192,16 +216,14 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
        /* nothing to be done here */
 }
 
-static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
 {
        struct drm_gem_object *obj = dma_buf->priv;
 
-       if (obj->export_dma_buf == dma_buf) {
-               /* drop the reference on the export fd holds */
-               obj->export_dma_buf = NULL;
-               drm_gem_object_unreference_unlocked(obj);
-       }
+       /* drop the reference on the export fd holds */
+       drm_gem_object_unreference_unlocked(obj);
 }
+EXPORT_SYMBOL(drm_gem_dmabuf_release);
 
 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
@@ -300,62 +322,107 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
+static struct dma_buf *export_and_register_object(struct drm_device *dev,
+                                                 struct drm_gem_object *obj,
+                                                 uint32_t flags)
+{
+       struct dma_buf *dmabuf;
+
+       /* prevent races with concurrent gem_close. */
+       if (obj->handle_count == 0) {
+               dmabuf = ERR_PTR(-ENOENT);
+               return dmabuf;
+       }
+
+       dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+       if (IS_ERR(dmabuf)) {
+               /* normally the created dma-buf takes ownership of the ref,
+                * but if that fails then drop the ref
+                */
+               return dmabuf;
+       }
+
+       /*
+        * Note that callers do not need to clean up the export cache
+        * since the check for obj->handle_count guarantees that someone
+        * will clean it up.
+        */
+       obj->dma_buf = dmabuf;
+       get_dma_buf(obj->dma_buf);
+       /* Grab a new ref since the callers is now used by the dma-buf */
+       drm_gem_object_reference(obj);
+
+       return dmabuf;
+}
+
 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
                struct drm_file *file_priv, uint32_t handle, uint32_t flags,
                int *prime_fd)
 {
        struct drm_gem_object *obj;
-       void *buf;
        int ret = 0;
        struct dma_buf *dmabuf;
 
+       mutex_lock(&file_priv->prime.lock);
        obj = drm_gem_object_lookup(dev, file_priv, handle);
-       if (!obj)
-               return -ENOENT;
+       if (!obj)  {
+               ret = -ENOENT;
+               goto out_unlock;
+       }
 
-       mutex_lock(&file_priv->prime.lock);
+       dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
+       if (dmabuf) {
+               get_dma_buf(dmabuf);
+               goto out_have_handle;
+       }
+
+       mutex_lock(&dev->object_name_lock);
        /* re-export the original imported object */
        if (obj->import_attach) {
                dmabuf = obj->import_attach->dmabuf;
+               get_dma_buf(dmabuf);
                goto out_have_obj;
        }
 
-       if (obj->export_dma_buf) {
-               dmabuf = obj->export_dma_buf;
+       if (obj->dma_buf) {
+               get_dma_buf(obj->dma_buf);
+               dmabuf = obj->dma_buf;
                goto out_have_obj;
        }
 
-       buf = dev->driver->gem_prime_export(dev, obj, flags);
-       if (IS_ERR(buf)) {
+       dmabuf = export_and_register_object(dev, obj, flags);
+       if (IS_ERR(dmabuf)) {
                /* normally the created dma-buf takes ownership of the ref,
                 * but if that fails then drop the ref
                 */
-               ret = PTR_ERR(buf);
+               ret = PTR_ERR(dmabuf);
+               mutex_unlock(&dev->object_name_lock);
                goto out;
        }
-       obj->export_dma_buf = buf;
 
-       /* if we've exported this buffer the cheat and add it to the import list
-        * so we get the correct handle back
+out_have_obj:
+       /*
+        * If we've exported this buffer then cheat and add it to the import list
+        * so we get the correct handle back. We must do this under the
+        * protection of dev->object_name_lock to ensure that a racing gem close
+        * ioctl doesn't miss to remove this buffer handle from the cache.
         */
        ret = drm_prime_add_buf_handle(&file_priv->prime,
-                                      obj->export_dma_buf, handle);
+                                      dmabuf, handle);
+       mutex_unlock(&dev->object_name_lock);
        if (ret)
                goto fail_put_dmabuf;
 
-       ret = dma_buf_fd(buf, flags);
-       if (ret < 0)
-               goto fail_rm_handle;
-
-       *prime_fd = ret;
-       mutex_unlock(&file_priv->prime.lock);
-       return 0;
-
-out_have_obj:
-       get_dma_buf(dmabuf);
+out_have_handle:
        ret = dma_buf_fd(dmabuf, flags);
+       /*
+        * We must _not_ remove the buffer from the handle cache since the newly
+        * created dma buf is already linked in the global obj->dma_buf pointer,
+        * and that is invariant as long as a userspace gem handle exists.
+        * Closing the handle will clean out the cache anyway, so we don't leak.
+        */
        if (ret < 0) {
-               dma_buf_put(dmabuf);
+               goto fail_put_dmabuf;
        } else {
                *prime_fd = ret;
                ret = 0;
@@ -363,15 +430,13 @@ out_have_obj:
 
        goto out;
 
-fail_rm_handle:
-       drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
 fail_put_dmabuf:
-       /* clear NOT to be checked when releasing dma_buf */
-       obj->export_dma_buf = NULL;
-       dma_buf_put(buf);
+       dma_buf_put(dmabuf);
 out:
        drm_gem_object_unreference_unlocked(obj);
+out_unlock:
        mutex_unlock(&file_priv->prime.lock);
+
        return ret;
 }
 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -446,19 +511,26 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 
        ret = drm_prime_lookup_buf_handle(&file_priv->prime,
                        dma_buf, handle);
-       if (!ret) {
-               ret = 0;
+       if (ret == 0)
                goto out_put;
-       }
 
        /* never seen this one, need to import */
+       mutex_lock(&dev->object_name_lock);
        obj = dev->driver->gem_prime_import(dev, dma_buf);
        if (IS_ERR(obj)) {
                ret = PTR_ERR(obj);
-               goto out_put;
+               goto out_unlock;
        }
 
-       ret = drm_gem_handle_create(file_priv, obj, handle);
+       if (obj->dma_buf) {
+               WARN_ON(obj->dma_buf != dma_buf);
+       } else {
+               obj->dma_buf = dma_buf;
+               get_dma_buf(dma_buf);
+       }
+
+       /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+       ret = drm_gem_handle_create_tail(file_priv, obj, handle);
        drm_gem_object_unreference_unlocked(obj);
        if (ret)
                goto out_put;
@@ -478,7 +550,9 @@ fail:
        /* hmm, if driver attached, we are relying on the free-object path
         * to detach.. which seems ok..
         */
-       drm_gem_object_handle_unreference_unlocked(obj);
+       drm_gem_handle_delete(file_priv, *handle);
+out_unlock:
+       mutex_lock(&dev->object_name_lock);
 out_put:
        dma_buf_put(dma_buf);
        mutex_unlock(&file_priv->prime.lock);
@@ -618,25 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
        WARN_ON(!list_empty(&prime_fpriv->head));
 }
 EXPORT_SYMBOL(drm_prime_destroy_file_private);
-
-int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
-{
-       struct drm_prime_member *member;
-
-       list_for_each_entry(member, &prime_fpriv->head, entry) {
-               if (member->dma_buf == dma_buf) {
-                       *handle = member->handle;
-                       return 0;
-               }
-       }
-       return -ENOENT;
-}
-EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
-
-void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
-{
-       mutex_lock(&prime_fpriv->lock);
-       drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
-       mutex_unlock(&prime_fpriv->lock);
-}
-EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
deleted file mode 100644 (file)
index d7f2324..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * \file drm_proc.c
- * /proc support for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * \par Acknowledgements:
- *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
- *    the problem with the proc files not outputting all their information.
- */
-
-/*
- * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <drm/drmP.h>
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-/**
- * Proc file list.
- */
-static const struct drm_info_list drm_proc_list[] = {
-       {"name", drm_name_info, 0},
-       {"vm", drm_vm_info, 0},
-       {"clients", drm_clients_info, 0},
-       {"bufs", drm_bufs_info, 0},
-       {"gem_names", drm_gem_name_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
-       {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
-
-static int drm_proc_open(struct inode *inode, struct file *file)
-{
-       struct drm_info_node* node = PDE_DATA(inode);
-
-       return single_open(file, node->info_ent->show, node);
-}
-
-static const struct file_operations drm_proc_fops = {
-       .owner = THIS_MODULE,
-       .open = drm_proc_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-
-/**
- * Initialize a given set of proc files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI proc dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of proc files represented by an array of
- * gdm_proc_lists in the given root directory.
- */
-static int drm_proc_create_files(const struct drm_info_list *files, int count,
-                         struct proc_dir_entry *root, struct drm_minor *minor)
-{
-       struct drm_device *dev = minor->dev;
-       struct proc_dir_entry *ent;
-       struct drm_info_node *tmp;
-       int i;
-
-       for (i = 0; i < count; i++) {
-               u32 features = files[i].driver_features;
-
-               if (features != 0 &&
-                   (dev->driver->driver_features & features) != features)
-                       continue;
-
-               tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
-               if (!tmp)
-                       return -1;
-
-               tmp->minor = minor;
-               tmp->info_ent = &files[i];
-               list_add(&tmp->list, &minor->proc_nodes.list);
-
-               ent = proc_create_data(files[i].name, S_IRUGO, root,
-                                      &drm_proc_fops, tmp);
-               if (!ent) {
-                       DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
-                                 minor->index, files[i].name);
-                       list_del(&tmp->list);
-                       kfree(tmp);
-                       return -1;
-               }
-       }
-       return 0;
-}
-
-/**
- * Initialize the DRI proc filesystem for a device
- *
- * \param dev DRM device
- * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
- *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
- */
-int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
-{
-       char name[12];
-       int ret;
-
-       INIT_LIST_HEAD(&minor->proc_nodes.list);
-       sprintf(name, "%u", minor->index);
-       minor->proc_root = proc_mkdir(name, root);
-       if (!minor->proc_root) {
-               DRM_ERROR("Cannot create /proc/dri/%s\n", name);
-               return -1;
-       }
-
-       ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
-                                   minor->proc_root, minor);
-       if (ret) {
-               remove_proc_subtree(name, root);
-               minor->proc_root = NULL;
-               DRM_ERROR("Failed to create core drm proc files\n");
-               return ret;
-       }
-
-       return 0;
-}
-
-static int drm_proc_remove_files(const struct drm_info_list *files, int count,
-                         struct drm_minor *minor)
-{
-       struct list_head *pos, *q;
-       struct drm_info_node *tmp;
-       int i;
-
-       for (i = 0; i < count; i++) {
-               list_for_each_safe(pos, q, &minor->proc_nodes.list) {
-                       tmp = list_entry(pos, struct drm_info_node, list);
-                       if (tmp->info_ent == &files[i]) {
-                               remove_proc_entry(files[i].name,
-                                                 minor->proc_root);
-                               list_del(pos);
-                               kfree(tmp);
-                       }
-               }
-       }
-       return 0;
-}
-
-/**
- * Cleanup the proc filesystem resources.
- *
- * \param minor device minor number.
- * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
- *
- * Remove all proc entries created by proc_init().
- */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
-{
-       char name[64];
-
-       if (!root || !minor->proc_root)
-               return 0;
-
-       drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
-
-       sprintf(name, "%d", minor->index);
-       remove_proc_subtree(name, root);
-       return 0;
-}
-
index d87f60bbc3303139b9a27c052a3c939b22ed43f4..1c78406f6e71395453734a705d772303862f041e 100644 (file)
@@ -46,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size)
 #endif
 }
 
-void drm_sg_cleanup(struct drm_sg_mem * entry)
+static void drm_sg_cleanup(struct drm_sg_mem * entry)
 {
        struct page *page;
        int i;
@@ -64,19 +64,32 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
        kfree(entry);
 }
 
+void drm_legacy_sg_cleanup(struct drm_device *dev)
+{
+       if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               drm_sg_cleanup(dev->sg);
+               dev->sg = NULL;
+       }
+}
 #ifdef _LP64
 # define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
 #else
 # define ScatterHandle(x) (unsigned int)(x)
 #endif
 
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+int drm_sg_alloc(struct drm_device *dev, void *data,
+                struct drm_file *file_priv)
 {
+       struct drm_scatter_gather *request = data;
        struct drm_sg_mem *entry;
        unsigned long pages, i, j;
 
        DRM_DEBUG("\n");
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (!drm_core_check_feature(dev, DRIVER_SG))
                return -EINVAL;
 
@@ -181,21 +194,15 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
        return -ENOMEM;
 }
 
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
-                      struct drm_file *file_priv)
-{
-       struct drm_scatter_gather *request = data;
-
-       return drm_sg_alloc(dev, request);
-
-}
-
 int drm_sg_free(struct drm_device *dev, void *data,
                struct drm_file *file_priv)
 {
        struct drm_scatter_gather *request = data;
        struct drm_sg_mem *entry;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
        if (!drm_core_check_feature(dev, DRIVER_SG))
                return -EINVAL;
 
index 327ca19cda855e1d44e3bcc2aca30f6f68eed0a3..e30bb0d7c67a9de5c11130916a012db2b90d1045 100644 (file)
@@ -68,7 +68,6 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 struct idr drm_minors_idr;
 
 struct class *drm_class;
-struct proc_dir_entry *drm_proc_root;
 struct dentry *drm_debugfs_root;
 
 int drm_err(const char *func, const char *format, ...)
@@ -113,12 +112,12 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
        int base = 0, limit = 63;
 
        if (type == DRM_MINOR_CONTROL) {
-                base += 64;
-                limit = base + 127;
-        } else if (type == DRM_MINOR_RENDER) {
-                base += 128;
-                limit = base + 255;
-        }
+               base += 64;
+               limit = base + 63;
+       } else if (type == DRM_MINOR_RENDER) {
+               base += 128;
+               limit = base + 63;
+       }
 
        mutex_lock(&dev->struct_mutex);
        ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
@@ -288,13 +287,7 @@ int drm_fill_in_dev(struct drm_device *dev,
                        goto error_out_unreg;
        }
 
-
-
-       retcode = drm_ctxbitmap_init(dev);
-       if (retcode) {
-               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-               goto error_out_unreg;
-       }
+       drm_legacy_ctxbitmap_init(dev);
 
        if (driver->driver_features & DRIVER_GEM) {
                retcode = drm_gem_init(dev);
@@ -321,9 +314,8 @@ EXPORT_SYMBOL(drm_fill_in_dev);
  * \param sec-minor structure to hold the assigned minor
  * \return negative number on failure.
  *
- * Search an empty entry and initialize it to the given parameters, and
- * create the proc init entry via proc_init(). This routines assigns
- * minor numbers to secondary heads of multi-headed cards
+ * Search an empty entry and initialize it to the given parameters. This
+ * routines assigns minor numbers to secondary heads of multi-headed cards
  */
 int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
 {
@@ -351,20 +343,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
 
        idr_replace(&drm_minors_idr, new_minor, minor_id);
 
-       if (type == DRM_MINOR_LEGACY) {
-               ret = drm_proc_init(new_minor, drm_proc_root);
-               if (ret) {
-                       DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
-                       goto err_mem;
-               }
-       } else
-               new_minor->proc_root = NULL;
-
 #if defined(CONFIG_DEBUG_FS)
        ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
        if (ret) {
                DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
-               goto err_g2;
+               goto err_mem;
        }
 #endif
 
@@ -372,7 +355,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
        if (ret) {
                printk(KERN_ERR
                       "DRM: Error sysfs_device_add.\n");
-               goto err_g2;
+               goto err_debugfs;
        }
        *minor = new_minor;
 
@@ -380,10 +363,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
        return 0;
 
 
-err_g2:
-       if (new_minor->type == DRM_MINOR_LEGACY)
-               drm_proc_cleanup(new_minor, drm_proc_root);
+err_debugfs:
+#if defined(CONFIG_DEBUG_FS)
+       drm_debugfs_cleanup(new_minor);
 err_mem:
+#endif
        kfree(new_minor);
 err_idr:
        idr_remove(&drm_minors_idr, minor_id);
@@ -397,10 +381,6 @@ EXPORT_SYMBOL(drm_get_minor);
  *
  * \param sec_minor - structure to be released
  * \return always zero
- *
- * Cleans up the proc resources. Not legal for this to be the
- * last minor released.
- *
  */
 int drm_put_minor(struct drm_minor **minor_p)
 {
@@ -408,8 +388,6 @@ int drm_put_minor(struct drm_minor **minor_p)
 
        DRM_DEBUG("release secondary minor %d\n", minor->index);
 
-       if (minor->type == DRM_MINOR_LEGACY)
-               drm_proc_cleanup(minor, drm_proc_root);
 #if defined(CONFIG_DEBUG_FS)
        drm_debugfs_cleanup(minor);
 #endif
@@ -451,16 +429,11 @@ void drm_put_dev(struct drm_device *dev)
 
        drm_lastclose(dev);
 
-       if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
-               arch_phys_wc_del(dev->agp->agp_mtrr);
-
        if (dev->driver->unload)
                dev->driver->unload(dev);
 
-       if (drm_core_has_AGP(dev) && dev->agp) {
-               kfree(dev->agp);
-               dev->agp = NULL;
-       }
+       if (dev->driver->bus->agp_destroy)
+               dev->driver->bus->agp_destroy(dev);
 
        drm_vblank_cleanup(dev);
 
@@ -468,7 +441,7 @@ void drm_put_dev(struct drm_device *dev)
                drm_rmmap(dev, r_list->map);
        drm_ht_remove(&dev->map_hash);
 
-       drm_ctxbitmap_cleanup(dev);
+       drm_legacy_ctxbitmap_cleanup(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                drm_put_minor(&dev->control);
index feb20035b2c44d08ac217c0b48d40967f70f6463..b5c5af7328df200a2ff119bafeb92664bb1738cb 100644 (file)
@@ -251,8 +251,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
                        switch (map->type) {
                        case _DRM_REGISTERS:
                        case _DRM_FRAME_BUFFER:
-                               if (drm_core_has_MTRR(dev))
-                                       arch_phys_wc_del(map->mtrr);
+                               arch_phys_wc_del(map->mtrr);
                                iounmap(map->handle);
                                break;
                        case _DRM_SHM:
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644 (file)
index 0000000..3837481
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2012 David Airlie <airlied@linux.ie>
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * DOC: vma offset manager
+ *
+ * The vma-manager is responsible to map arbitrary driver-dependent memory
+ * regions into the linear user address-space. It provides offsets to the
+ * caller which can then be used on the address_space of the drm-device. It
+ * takes care to not overlap regions, size them appropriately and to not
+ * confuse mm-core by inconsistent fake vm_pgoff fields.
+ * Drivers shouldn't use this for object placement in VMEM. This manager should
+ * only be used to manage mappings into linear user-space VMs.
+ *
+ * We use drm_mm as backend to manage object allocations. But it is highly
+ * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
+ * speed up offset lookups.
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+ * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+ * own offset managers.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+ * in number of pages, not number of bytes. That means, object sizes and offsets
+ * must always be page-aligned (as usual).
+ * If you want to get a valid byte-based user-space address for a given offset,
+ * please see drm_vma_node_offset_addr().
+ */
+
+/**
+ * drm_vma_offset_manager_init - Initialize new offset-manager
+ * @mgr: Manager object
+ * @page_offset: Offset of available memory area (page-based)
+ * @size: Size of available address space range (page-based)
+ *
+ * Initialize a new offset-manager. The offset and area size available for the
+ * manager are given as @page_offset and @size. Both are interpreted as
+ * page-numbers, not bytes.
+ *
+ * Adding/removing nodes from the manager is locked internally and protected
+ * against concurrent access. However, node allocation and destruction is left
+ * for the caller. While calling into the vma-manager, a given node must
+ * always be guaranteed to be referenced.
+ */
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+                                unsigned long page_offset, unsigned long size)
+{
+       rwlock_init(&mgr->vm_lock);
+       mgr->vm_addr_space_rb = RB_ROOT;
+       drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_init);
+
+/**
+ * drm_vma_offset_manager_destroy() - Destroy offset manager
+ * @mgr: Manager object
+ *
+ * Destroy an object manager which was previously created via
+ * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
+ * before destroying the manager. Otherwise, drm_mm will refuse to free the
+ * requested resources.
+ *
+ * The manager must not be accessed after this function is called.
+ */
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
+{
+       /* take the lock to protect against buggy drivers */
+       write_lock(&mgr->vm_lock);
+       drm_mm_takedown(&mgr->vm_addr_space_mm);
+       write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
+
+/**
+ * drm_vma_offset_lookup() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Find a node given a start address and object size. This returns the _best_
+ * match for the given node. That is, @start may point somewhere into a valid
+ * region and the given node will be returned, as long as the node spans the
+ * whole requested area (given the size in number of pages as @pages).
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned. It's the caller's responsibility to make sure the node doesn't
+ * get destroyed before the caller can access it.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+                                                 unsigned long start,
+                                                 unsigned long pages)
+{
+       struct drm_vma_offset_node *node;
+
+       read_lock(&mgr->vm_lock);
+       node = drm_vma_offset_lookup_locked(mgr, start, pages);
+       read_unlock(&mgr->vm_lock);
+
+       return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+/**
+ * drm_vma_offset_lookup_locked() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+                                                        unsigned long start,
+                                                        unsigned long pages)
+{
+       struct drm_vma_offset_node *node, *best;
+       struct rb_node *iter;
+       unsigned long offset;
+
+       iter = mgr->vm_addr_space_rb.rb_node;
+       best = NULL;
+
+       while (likely(iter)) {
+               node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+               offset = node->vm_node.start;
+               if (start >= offset) {
+                       iter = iter->rb_right;
+                       best = node;
+                       if (start == offset)
+                               break;
+               } else {
+                       iter = iter->rb_left;
+               }
+       }
+
+       /* verify that the node spans the requested area */
+       if (best) {
+               offset = best->vm_node.start + best->vm_node.size;
+               if (offset < start + pages)
+                       best = NULL;
+       }
+
+       return best;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
+
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+                                  struct drm_vma_offset_node *node)
+{
+       struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+       struct rb_node *parent = NULL;
+       struct drm_vma_offset_node *iter_node;
+
+       while (likely(*iter)) {
+               parent = *iter;
+               iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+               if (node->vm_node.start < iter_node->vm_node.start)
+                       iter = &(*iter)->rb_left;
+               else if (node->vm_node.start > iter_node->vm_node.start)
+                       iter = &(*iter)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&node->vm_rb, parent, iter);
+       rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
+/**
+ * drm_vma_offset_add() - Add offset node to manager
+ * @mgr: Manager object
+ * @node: Node to be added
+ * @pages: Allocation size visible to user-space (in number of pages)
+ *
+ * Add a node to the offset-manager. If the node was already added, this does
+ * nothing and return 0. @pages is the size of the object given in number of
+ * pages.
+ * After this call succeeds, you can access the offset of the node until it
+ * is removed again.
+ *
+ * If this call fails, it is safe to retry the operation or call
+ * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
+ * case.
+ *
+ * @pages is not required to be the same size as the underlying memory object
+ * that you want to map. It only limits the size that user-space can map into
+ * their address space.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+                      struct drm_vma_offset_node *node, unsigned long pages)
+{
+       int ret;
+
+       write_lock(&mgr->vm_lock);
+
+       if (drm_mm_node_allocated(&node->vm_node)) {
+               ret = 0;
+               goto out_unlock;
+       }
+
+       ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+                                pages, 0, DRM_MM_SEARCH_DEFAULT);
+       if (ret)
+               goto out_unlock;
+
+       _drm_vma_offset_add_rb(mgr, node);
+
+out_unlock:
+       write_unlock(&mgr->vm_lock);
+       return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_add);
+
+/**
+ * drm_vma_offset_remove() - Remove offset node from manager
+ * @mgr: Manager object
+ * @node: Node to be removed
+ *
+ * Remove a node from the offset manager. If the node wasn't added before, this
+ * does nothing. After this call returns, the offset and size will be 0 until a
+ * new offset is allocated via drm_vma_offset_add() again. Helper functions like
+ * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
+ * offset is allocated.
+ */
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+                          struct drm_vma_offset_node *node)
+{
+       write_lock(&mgr->vm_lock);
+
+       if (drm_mm_node_allocated(&node->vm_node)) {
+               rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
+               drm_mm_remove_node(&node->vm_node);
+               memset(&node->vm_node, 0, sizeof(node->vm_node));
+       }
+
+       write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_remove);
index a0f997e0cbdf77b21021098914babdeea3cc85f3..fd76449cf452eb5bd0c90fda808bdb6543e7f477 100644 (file)
@@ -22,6 +22,11 @@ struct exynos_drm_dmabuf_attachment {
        bool is_mapped;
 };
 
+static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
+{
+       return to_exynos_gem_obj(buf->priv);
+}
+
 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
                                        struct device *dev,
                                        struct dma_buf_attachment *attach)
@@ -63,7 +68,7 @@ static struct sg_table *
                                        enum dma_data_direction dir)
 {
        struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
-       struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+       struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
        struct drm_device *dev = gem_obj->base.dev;
        struct exynos_drm_gem_buf *buf;
        struct scatterlist *rd, *wr;
@@ -127,27 +132,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
        /* Nothing to do. */
 }
 
-static void exynos_dmabuf_release(struct dma_buf *dmabuf)
-{
-       struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
-
-       /*
-        * exynos_dmabuf_release() call means that file object's
-        * f_count is 0 and it calls drm_gem_object_handle_unreference()
-        * to drop the references that these values had been increased
-        * at drm_prime_handle_to_fd()
-        */
-       if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
-               exynos_gem_obj->base.export_dma_buf = NULL;
-
-               /*
-                * drop this gem object refcount to release allocated buffer
-                * and resources.
-                */
-               drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
-       }
-}
-
 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
                                                unsigned long page_num)
 {
@@ -193,7 +177,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
        .kunmap                 = exynos_gem_dmabuf_kunmap,
        .kunmap_atomic          = exynos_gem_dmabuf_kunmap_atomic,
        .mmap                   = exynos_gem_dmabuf_mmap,
-       .release                = exynos_dmabuf_release,
+       .release                = drm_gem_dmabuf_release,
 };
 
 struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
@@ -201,7 +185,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
 {
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+       return dma_buf_export(obj, &exynos_dmabuf_ops,
                                exynos_gem_obj->base.size, flags);
 }
 
@@ -219,8 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        if (dma_buf->ops == &exynos_dmabuf_ops) {
                struct drm_gem_object *obj;
 
-               exynos_gem_obj = dma_buf->priv;
-               obj = &exynos_gem_obj->base;
+               obj = dma_buf->priv;
 
                /* is it from our device? */
                if (obj->dev == drm_dev) {
index ca2729a851299ade4f58e148ba08d165f26e51f3..df81d3c959b416862ccda0b7bfadcb81d1b61d3e 100644 (file)
@@ -213,7 +213,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
        .close = drm_gem_vm_close,
 };
 
-static struct drm_ioctl_desc exynos_ioctls[] = {
+static const struct drm_ioctl_desc exynos_ioctls[] = {
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
                        DRM_UNLOCKED | DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +271,13 @@ static struct drm_driver exynos_drm_driver = {
        .gem_vm_ops             = &exynos_drm_gem_vm_ops,
        .dumb_create            = exynos_drm_gem_dumb_create,
        .dumb_map_offset        = exynos_drm_gem_dumb_map_offset,
-       .dumb_destroy           = exynos_drm_gem_dumb_destroy,
+       .dumb_destroy           = drm_gem_dumb_destroy,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_export       = exynos_dmabuf_prime_export,
        .gem_prime_import       = exynos_dmabuf_prime_import,
        .ioctls                 = exynos_ioctls,
+       .num_ioctls             = ARRAY_SIZE(exynos_ioctls),
        .fops                   = &exynos_drm_driver_fops,
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
@@ -288,7 +289,6 @@ static struct drm_driver exynos_drm_driver = {
 static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
        pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
 
        return drm_platform_init(&exynos_drm_driver, pdev);
 }
index 24c22a8c3364787da406931aef1f34a607212f04..f3c6f40666e1f58f214e94c94d03c1ed8ac523a7 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
 
 #include <linux/shmem_fs.h>
 #include <drm/exynos_drm.h>
@@ -135,7 +136,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
        obj = &exynos_gem_obj->base;
        buf = exynos_gem_obj->buffer;
 
-       DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+       DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
 
        /*
         * do not release memory region from exporter.
@@ -152,8 +153,7 @@ out:
        exynos_drm_fini_buf(obj->dev, buf);
        exynos_gem_obj->buffer = NULL;
 
-       if (obj->map_list.map)
-               drm_gem_free_mmap_offset(obj);
+       drm_gem_free_mmap_offset(obj);
 
        /* release file pointer to gem object. */
        drm_gem_object_release(obj);
@@ -703,13 +703,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                goto unlock;
        }
 
-       if (!obj->map_list.map) {
-               ret = drm_gem_create_mmap_offset(obj);
-               if (ret)
-                       goto out;
-       }
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto out;
 
-       *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+       *offset = drm_vma_node_offset_addr(&obj->vma_node);
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
 out:
@@ -719,26 +717,6 @@ unlock:
        return ret;
 }
 
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-                               struct drm_device *dev,
-                               unsigned int handle)
-{
-       int ret;
-
-       /*
-        * obj->refcount and obj->handle_count are decreased and
-        * if both them are 0 then exynos_drm_gem_free_object()
-        * would be called by callback to release resources.
-        */
-       ret = drm_gem_handle_delete(file_priv, handle);
-       if (ret < 0) {
-               DRM_ERROR("failed to delete drm_gem_handle.\n");
-               return ret;
-       }
-
-       return 0;
-}
-
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
index 468766bee450837eefe19f37db7e7fdcc00133c6..09555afdfe9c6bf94fd798d2e899d4e46a1e8042 100644 (file)
@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                                   struct drm_device *dev, uint32_t handle,
                                   uint64_t *offset);
 
-/*
- * destroy memory region allocated.
- *     - a gem handle and physical memory region pointed by a gem object
- *     would be released by drm_gem_handle_delete().
- */
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
-                               struct drm_device *dev,
-                               unsigned int handle);
-
 /* page fault handler and mmap fault address(virtual) to physical memory. */
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 
index 8b1b6d923abe82ab8119ce7134d3a45b681a54f6..362dd2ad286fb57397c91dba8f723038c7ca7035 100644 (file)
@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
        /* Begin by trying to use stolen memory backing */
        backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
        if (backing) {
-               if (drm_gem_private_object_init(dev,
-                                       &backing->gem, aligned_size) == 0)
-                       return backing;
-               psb_gtt_free_range(dev, backing);
+               drm_gem_private_object_init(dev, &backing->gem, aligned_size);
+               return backing;
        }
        return NULL;
 }
index eefd6cc5b80d3c70abf84bc635fd5b668f504959..10ae8c52d06f820b5a1ea5bfb9257ca4ee2d40b7 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/drmP.h>
 #include <drm/drm.h>
 #include <drm/gma_drm.h>
+#include <drm/drm_vma_manager.h>
 #include "psb_drv.h"
 
 int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
        struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
 
        /* Remove the list map if one is present */
-       if (obj->map_list.map)
-               drm_gem_free_mmap_offset(obj);
+       drm_gem_free_mmap_offset(obj);
        drm_gem_object_release(obj);
 
        /* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
        /* What validation is needed here ? */
 
        /* Make it mmapable */
-       if (!obj->map_list.map) {
-               ret = drm_gem_create_mmap_offset(obj);
-               if (ret)
-                       goto out;
-       }
-       /* GEM should really work out the hash offsets for us */
-       *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto out;
+       *offset = drm_vma_node_offset_addr(&obj->vma_node);
 out:
        drm_gem_object_unreference(obj);
 unlock:
@@ -164,23 +161,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
        return psb_gem_create(file, dev, args->size, &args->handle);
 }
 
-/**
- *     psb_gem_dumb_destroy    -       destroy a dumb buffer
- *     @file: client file
- *     @dev: our DRM device
- *     @handle: the object handle
- *
- *     Destroy a handle that was created via psb_gem_dumb_create, at least
- *     we hope it was created that way. i915 seems to assume the caller
- *     does the checking but that might be worth review ! FIXME
- */
-int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-                       uint32_t handle)
-{
-       /* No special work needed, drop the reference and see what falls out */
-       return drm_gem_handle_delete(file, handle);
-}
-
 /**
  *     psb_gem_fault           -       pagefault handler for GEM objects
  *     @vma: the VMA of the GEM object
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
        struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
        if (gtt == NULL)
                return -ENOMEM;
-       if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
-               goto free_gtt;
+
+       drm_gem_private_object_init(dev, &gtt->gem, size);
        if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
                return 0;
-free_gtt:
+
+       drm_gem_object_release(&gtt->gem);
        psb_gtt_free_range(dev, gtt);
        return -ENOMEM;
 }
index 1f82183536a32014f0d73af7fea740489476ab35..92babac362ec0b85b6f1e52f05a6402411b43d1e 100644 (file)
@@ -196,37 +196,17 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
  */
 static int psb_gtt_attach_pages(struct gtt_range *gt)
 {
-       struct inode *inode;
-       struct address_space *mapping;
-       int i;
-       struct page *p;
-       int pages = gt->gem.size / PAGE_SIZE;
+       struct page **pages;
 
        WARN_ON(gt->pages);
 
-       /* This is the shared memory object that backs the GEM resource */
-       inode = file_inode(gt->gem.filp);
-       mapping = inode->i_mapping;
+       pages = drm_gem_get_pages(&gt->gem, 0);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
 
-       gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
-       if (gt->pages == NULL)
-               return -ENOMEM;
-       gt->npage = pages;
+       gt->pages = pages;
 
-       for (i = 0; i < pages; i++) {
-               p = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(p))
-                       goto err;
-               gt->pages[i] = p;
-       }
        return 0;
-
-err:
-       while (i--)
-               page_cache_release(gt->pages[i]);
-       kfree(gt->pages);
-       gt->pages = NULL;
-       return PTR_ERR(p);
 }
 
 /**
@@ -240,13 +220,7 @@ err:
  */
 static void psb_gtt_detach_pages(struct gtt_range *gt)
 {
-       int i;
-       for (i = 0; i < gt->npage; i++) {
-               /* FIXME: do we need to force dirty */
-               set_page_dirty(gt->pages[i]);
-               page_cache_release(gt->pages[i]);
-       }
-       kfree(gt->pages);
+       drm_gem_put_pages(&gt->gem, gt->pages, true, false);
        gt->pages = NULL;
 }
 
index bddea5807442d6faf2b94565a6c42baf3b230d92..1383e75acf2581c68a7cf41d5d1da6a685c2ce24 100644 (file)
@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
 static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 
-static struct drm_ioctl_desc psb_ioctls[] = {
+static const struct drm_ioctl_desc psb_ioctls[] = {
        DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
                      DRM_AUTH),
@@ -622,13 +622,12 @@ static const struct file_operations psb_gem_fops = {
        .unlocked_ioctl = psb_unlocked_ioctl,
        .mmap = drm_gem_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
        .read = drm_read,
 };
 
 static struct drm_driver driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
-                          DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+                          DRIVER_MODESET | DRIVER_GEM ,
        .load = psb_driver_load,
        .unload = psb_driver_unload,
 
@@ -652,7 +651,7 @@ static struct drm_driver driver = {
        .gem_vm_ops = &psb_gem_vm_ops,
        .dumb_create = psb_gem_dumb_create,
        .dumb_map_offset = psb_gem_dumb_map_gtt,
-       .dumb_destroy = psb_gem_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &psb_gem_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
index 6053b8abcd12cc3da00f5b930829f96b15ab2ebf..984cacfcbaf24a1746c9a2303d8379baa26be8a9 100644 (file)
@@ -838,8 +838,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
                        struct drm_file *file);
 extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                        struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-                       uint32_t handle);
 extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
                        uint32_t handle, uint64_t *offset);
 extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
index e68b58a1aaf9d20ad76cfe4c5b70ed06d2744fd5..c2bd711e86e9ad26a3c95842ac93e7f58f97df8d 100644 (file)
@@ -23,7 +23,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_encoder_slave.h>
 #include <drm/drm_edid.h>
-
+#include <drm/i2c/tda998x.h>
 
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
@@ -32,6 +32,11 @@ struct tda998x_priv {
        uint16_t rev;
        uint8_t current_page;
        int dpms;
+       bool is_hdmi_sink;
+       u8 vip_cntrl_0;
+       u8 vip_cntrl_1;
+       u8 vip_cntrl_2;
+       struct tda998x_encoder_params params;
 };
 
 #define to_tda998x_priv(x)  ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -68,10 +73,13 @@ struct tda998x_priv {
 # define I2C_MASTER_DIS_MM        (1 << 0)
 # define I2C_MASTER_DIS_FILT      (1 << 1)
 # define I2C_MASTER_APP_STRT_LAT  (1 << 2)
+#define REG_FEAT_POWERDOWN        REG(0x00, 0x0e)     /* read/write */
+# define FEAT_POWERDOWN_SPDIF     (1 << 3)
 #define REG_INT_FLAGS_0           REG(0x00, 0x0f)     /* read/write */
 #define REG_INT_FLAGS_1           REG(0x00, 0x10)     /* read/write */
 #define REG_INT_FLAGS_2           REG(0x00, 0x11)     /* read/write */
 # define INT_FLAGS_2_EDID_BLK_RD  (1 << 1)
+#define REG_ENA_ACLK              REG(0x00, 0x16)     /* read/write */
 #define REG_ENA_VP_0              REG(0x00, 0x18)     /* read/write */
 #define REG_ENA_VP_1              REG(0x00, 0x19)     /* read/write */
 #define REG_ENA_VP_2              REG(0x00, 0x1a)     /* read/write */
@@ -110,6 +118,8 @@ struct tda998x_priv {
 #define REG_VIP_CNTRL_5           REG(0x00, 0x25)     /* write */
 # define VIP_CNTRL_5_CKCASE       (1 << 0)
 # define VIP_CNTRL_5_SP_CNT(x)    (((x) & 3) << 1)
+#define REG_MUX_AP                REG(0x00, 0x26)     /* read/write */
+#define REG_MUX_VP_VIP_OUT        REG(0x00, 0x27)     /* read/write */
 #define REG_MAT_CONTRL            REG(0x00, 0x80)     /* write */
 # define MAT_CONTRL_MAT_SC(x)     (((x) & 3) << 0)
 # define MAT_CONTRL_MAT_BP        (1 << 2)
@@ -130,8 +140,12 @@ struct tda998x_priv {
 #define REG_VS_LINE_END_1_LSB     REG(0x00, 0xae)     /* write */
 #define REG_VS_PIX_END_1_MSB      REG(0x00, 0xaf)     /* write */
 #define REG_VS_PIX_END_1_LSB      REG(0x00, 0xb0)     /* write */
+#define REG_VS_LINE_STRT_2_MSB    REG(0x00, 0xb1)     /* write */
+#define REG_VS_LINE_STRT_2_LSB    REG(0x00, 0xb2)     /* write */
 #define REG_VS_PIX_STRT_2_MSB     REG(0x00, 0xb3)     /* write */
 #define REG_VS_PIX_STRT_2_LSB     REG(0x00, 0xb4)     /* write */
+#define REG_VS_LINE_END_2_MSB     REG(0x00, 0xb5)     /* write */
+#define REG_VS_LINE_END_2_LSB     REG(0x00, 0xb6)     /* write */
 #define REG_VS_PIX_END_2_MSB      REG(0x00, 0xb7)     /* write */
 #define REG_VS_PIX_END_2_LSB      REG(0x00, 0xb8)     /* write */
 #define REG_HS_PIX_START_MSB      REG(0x00, 0xb9)     /* write */
@@ -142,21 +156,29 @@ struct tda998x_priv {
 #define REG_VWIN_START_1_LSB      REG(0x00, 0xbe)     /* write */
 #define REG_VWIN_END_1_MSB        REG(0x00, 0xbf)     /* write */
 #define REG_VWIN_END_1_LSB        REG(0x00, 0xc0)     /* write */
+#define REG_VWIN_START_2_MSB      REG(0x00, 0xc1)     /* write */
+#define REG_VWIN_START_2_LSB      REG(0x00, 0xc2)     /* write */
+#define REG_VWIN_END_2_MSB        REG(0x00, 0xc3)     /* write */
+#define REG_VWIN_END_2_LSB        REG(0x00, 0xc4)     /* write */
 #define REG_DE_START_MSB          REG(0x00, 0xc5)     /* write */
 #define REG_DE_START_LSB          REG(0x00, 0xc6)     /* write */
 #define REG_DE_STOP_MSB           REG(0x00, 0xc7)     /* write */
 #define REG_DE_STOP_LSB           REG(0x00, 0xc8)     /* write */
 #define REG_TBG_CNTRL_0           REG(0x00, 0xca)     /* write */
+# define TBG_CNTRL_0_TOP_TGL      (1 << 0)
+# define TBG_CNTRL_0_TOP_SEL      (1 << 1)
+# define TBG_CNTRL_0_DE_EXT       (1 << 2)
+# define TBG_CNTRL_0_TOP_EXT      (1 << 3)
 # define TBG_CNTRL_0_FRAME_DIS    (1 << 5)
 # define TBG_CNTRL_0_SYNC_MTHD    (1 << 6)
 # define TBG_CNTRL_0_SYNC_ONCE    (1 << 7)
 #define REG_TBG_CNTRL_1           REG(0x00, 0xcb)     /* write */
-# define TBG_CNTRL_1_VH_TGL_0     (1 << 0)
-# define TBG_CNTRL_1_VH_TGL_1     (1 << 1)
-# define TBG_CNTRL_1_VH_TGL_2     (1 << 2)
-# define TBG_CNTRL_1_VHX_EXT_DE   (1 << 3)
-# define TBG_CNTRL_1_VHX_EXT_HS   (1 << 4)
-# define TBG_CNTRL_1_VHX_EXT_VS   (1 << 5)
+# define TBG_CNTRL_1_H_TGL        (1 << 0)
+# define TBG_CNTRL_1_V_TGL        (1 << 1)
+# define TBG_CNTRL_1_TGL_EN       (1 << 2)
+# define TBG_CNTRL_1_X_EXT        (1 << 3)
+# define TBG_CNTRL_1_H_EXT        (1 << 4)
+# define TBG_CNTRL_1_V_EXT        (1 << 5)
 # define TBG_CNTRL_1_DWIN_DIS     (1 << 6)
 #define REG_ENABLE_SPACE          REG(0x00, 0xd6)     /* write */
 #define REG_HVF_CNTRL_0           REG(0x00, 0xe4)     /* write */
@@ -171,6 +193,12 @@ struct tda998x_priv {
 # define HVF_CNTRL_1_PAD(x)       (((x) & 3) << 4)
 # define HVF_CNTRL_1_SEMI_PLANAR  (1 << 6)
 #define REG_RPT_CNTRL             REG(0x00, 0xf0)     /* write */
+#define REG_I2S_FORMAT            REG(0x00, 0xfc)     /* read/write */
+# define I2S_FORMAT(x)            (((x) & 3) << 0)
+#define REG_AIP_CLKSEL            REG(0x00, 0xfd)     /* write */
+# define AIP_CLKSEL_FS(x)         (((x) & 3) << 0)
+# define AIP_CLKSEL_CLK_POL(x)    (((x) & 1) << 2)
+# define AIP_CLKSEL_AIP(x)        (((x) & 7) << 3)
 
 
 /* Page 02h: PLL settings */
@@ -194,6 +222,12 @@ struct tda998x_priv {
 #define REG_PLL_SCGR1             REG(0x02, 0x09)     /* read/write */
 #define REG_PLL_SCGR2             REG(0x02, 0x0a)     /* read/write */
 #define REG_AUDIO_DIV             REG(0x02, 0x0e)     /* read/write */
+# define AUDIO_DIV_SERCLK_1       0
+# define AUDIO_DIV_SERCLK_2       1
+# define AUDIO_DIV_SERCLK_4       2
+# define AUDIO_DIV_SERCLK_8       3
+# define AUDIO_DIV_SERCLK_16      4
+# define AUDIO_DIV_SERCLK_32      5
 #define REG_SEL_CLK               REG(0x02, 0x11)     /* read/write */
 # define SEL_CLK_SEL_CLK1         (1 << 0)
 # define SEL_CLK_SEL_VRF_CLK(x)   (((x) & 3) << 1)
@@ -212,6 +246,11 @@ struct tda998x_priv {
 
 
 /* Page 10h: information frames and packets */
+#define REG_IF1_HB0               REG(0x10, 0x20)     /* read/write */
+#define REG_IF2_HB0               REG(0x10, 0x40)     /* read/write */
+#define REG_IF3_HB0               REG(0x10, 0x60)     /* read/write */
+#define REG_IF4_HB0               REG(0x10, 0x80)     /* read/write */
+#define REG_IF5_HB0               REG(0x10, 0xa0)     /* read/write */
 
 
 /* Page 11h: audio settings and content info packets */
@@ -221,14 +260,39 @@ struct tda998x_priv {
 # define AIP_CNTRL_0_LAYOUT       (1 << 2)
 # define AIP_CNTRL_0_ACR_MAN      (1 << 5)
 # define AIP_CNTRL_0_RST_CTS      (1 << 6)
+#define REG_CA_I2S                REG(0x11, 0x01)     /* read/write */
+# define CA_I2S_CA_I2S(x)         (((x) & 31) << 0)
+# define CA_I2S_HBR_CHSTAT        (1 << 6)
+#define REG_LATENCY_RD            REG(0x11, 0x04)     /* read/write */
+#define REG_ACR_CTS_0             REG(0x11, 0x05)     /* read/write */
+#define REG_ACR_CTS_1             REG(0x11, 0x06)     /* read/write */
+#define REG_ACR_CTS_2             REG(0x11, 0x07)     /* read/write */
+#define REG_ACR_N_0               REG(0x11, 0x08)     /* read/write */
+#define REG_ACR_N_1               REG(0x11, 0x09)     /* read/write */
+#define REG_ACR_N_2               REG(0x11, 0x0a)     /* read/write */
+#define REG_CTS_N                 REG(0x11, 0x0c)     /* read/write */
+# define CTS_N_K(x)               (((x) & 7) << 0)
+# define CTS_N_M(x)               (((x) & 3) << 4)
 #define REG_ENC_CNTRL             REG(0x11, 0x0d)     /* read/write */
 # define ENC_CNTRL_RST_ENC        (1 << 0)
 # define ENC_CNTRL_RST_SEL        (1 << 1)
 # define ENC_CNTRL_CTL_CODE(x)    (((x) & 3) << 2)
+#define REG_DIP_FLAGS             REG(0x11, 0x0e)     /* read/write */
+# define DIP_FLAGS_ACR            (1 << 0)
+# define DIP_FLAGS_GC             (1 << 1)
+#define REG_DIP_IF_FLAGS          REG(0x11, 0x0f)     /* read/write */
+# define DIP_IF_FLAGS_IF1         (1 << 1)
+# define DIP_IF_FLAGS_IF2         (1 << 2)
+# define DIP_IF_FLAGS_IF3         (1 << 3)
+# define DIP_IF_FLAGS_IF4         (1 << 4)
+# define DIP_IF_FLAGS_IF5         (1 << 5)
+#define REG_CH_STAT_B(x)          REG(0x11, 0x14 + (x)) /* read/write */
 
 
 /* Page 12h: HDCP and OTP */
 #define REG_TX3                   REG(0x12, 0x9a)     /* read/write */
+#define REG_TX4                   REG(0x12, 0x9b)     /* read/write */
+# define TX4_PD_RAM               (1 << 1)
 #define REG_TX33                  REG(0x12, 0xb8)     /* read/write */
 # define TX33_HDMI                (1 << 1)
 
@@ -338,6 +402,23 @@ fail:
        return ret;
 }
 
+static void
+reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
+{
+       struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+       uint8_t buf[cnt+1];
+       int ret;
+
+       buf[0] = REG2ADDR(reg);
+       memcpy(&buf[1], p, cnt);
+
+       set_page(encoder, reg);
+
+       ret = i2c_master_send(client, buf, cnt + 1);
+       if (ret < 0)
+               dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
 static uint8_t
 reg_read(struct drm_encoder *encoder, uint16_t reg)
 {
@@ -406,13 +487,172 @@ tda998x_reset(struct drm_encoder *encoder)
        reg_write(encoder, REG_SERIALIZER,   0x00);
        reg_write(encoder, REG_BUFFER_OUT,   0x00);
        reg_write(encoder, REG_PLL_SCG1,     0x00);
-       reg_write(encoder, REG_AUDIO_DIV,    0x03);
+       reg_write(encoder, REG_AUDIO_DIV,    AUDIO_DIV_SERCLK_8);
        reg_write(encoder, REG_SEL_CLK,      SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
        reg_write(encoder, REG_PLL_SCGN1,    0xfa);
        reg_write(encoder, REG_PLL_SCGN2,    0x00);
        reg_write(encoder, REG_PLL_SCGR1,    0x5b);
        reg_write(encoder, REG_PLL_SCGR2,    0x00);
        reg_write(encoder, REG_PLL_SCG2,     0x10);
+
+       /* Write the default value MUX register */
+       reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
+}
+
+static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
+{
+       uint8_t sum = 0;
+
+       while (bytes--)
+               sum += *buf++;
+       return (255 - sum) + 1;
+}
+
+#define HB(x) (x)
+#define PB(x) (HB(2) + 1 + (x))
+
+static void
+tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
+                uint8_t *buf, size_t size)
+{
+       buf[PB(0)] = tda998x_cksum(buf, size);
+
+       reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
+       reg_write_range(encoder, addr, buf, size);
+       reg_set(encoder, REG_DIP_IF_FLAGS, bit);
+}
+
+static void
+tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
+{
+       uint8_t buf[PB(5) + 1];
+
+       buf[HB(0)] = 0x84;
+       buf[HB(1)] = 0x01;
+       buf[HB(2)] = 10;
+       buf[PB(0)] = 0;
+       buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
+       buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
+       buf[PB(4)] = p->audio_frame[4];
+       buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+
+       tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
+                        sizeof(buf));
+}
+
+static void
+tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       uint8_t buf[PB(13) + 1];
+
+       memset(buf, 0, sizeof(buf));
+       buf[HB(0)] = 0x82;
+       buf[HB(1)] = 0x02;
+       buf[HB(2)] = 13;
+       buf[PB(4)] = drm_match_cea_mode(mode);
+
+       tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
+                        sizeof(buf));
+}
+
+static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
+{
+       if (on) {
+               reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+               reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+               reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+       } else {
+               reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+       }
+}
+
+static void
+tda998x_configure_audio(struct drm_encoder *encoder,
+               struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+{
+       uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
+       uint32_t n;
+
+       /* Enable audio ports */
+       reg_write(encoder, REG_ENA_AP, p->audio_cfg);
+       reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
+
+       /* Set audio input source */
+       switch (p->audio_format) {
+       case AFMT_SPDIF:
+               reg_write(encoder, REG_MUX_AP, 0x40);
+               clksel_aip = AIP_CLKSEL_AIP(0);
+               /* FS64SPDIF */
+               clksel_fs = AIP_CLKSEL_FS(2);
+               cts_n = CTS_N_M(3) | CTS_N_K(3);
+               ca_i2s = 0;
+               break;
+
+       case AFMT_I2S:
+               reg_write(encoder, REG_MUX_AP, 0x64);
+               clksel_aip = AIP_CLKSEL_AIP(1);
+               /* ACLK */
+               clksel_fs = AIP_CLKSEL_FS(0);
+               cts_n = CTS_N_M(3) | CTS_N_K(3);
+               ca_i2s = CA_I2S_CA_I2S(0);
+               break;
+       }
+
+       reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
+       reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
+
+       /* Enable automatic CTS generation */
+       reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
+       reg_write(encoder, REG_CTS_N, cts_n);
+
+       /*
+        * Audio input somehow depends on HDMI line rate which is
+        * related to pixclk. Testing showed that modes with pixclk
+        * >100MHz need a larger divider while <40MHz need the default.
+        * There is no detailed info in the datasheet, so we just
+        * assume 100MHz requires larger divider.
+        */
+       if (mode->clock > 100000)
+               adiv = AUDIO_DIV_SERCLK_16;
+       else
+               adiv = AUDIO_DIV_SERCLK_8;
+       reg_write(encoder, REG_AUDIO_DIV, adiv);
+
+       /*
+        * This is the approximate value of N, which happens to be
+        * the recommended values for non-coherent clocks.
+        */
+       n = 128 * p->audio_sample_rate / 1000;
+
+       /* Write the CTS and N values */
+       buf[0] = 0x44;
+       buf[1] = 0x42;
+       buf[2] = 0x01;
+       buf[3] = n;
+       buf[4] = n >> 8;
+       buf[5] = n >> 16;
+       reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
+
+       /* Set CTS clock reference */
+       reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
+
+       /* Reset CTS generator */
+       reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+       reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+
+       /* Write the channel status */
+       buf[0] = 0x04;
+       buf[1] = 0x00;
+       buf[2] = 0x00;
+       buf[3] = 0xf1;
+       reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
+
+       tda998x_audio_mute(encoder, true);
+       mdelay(20);
+       tda998x_audio_mute(encoder, false);
+
+       /* Write the audio information packet */
+       tda998x_write_aif(encoder, p);
 }
 
 /* DRM encoder functions */
@@ -420,6 +660,23 @@ tda998x_reset(struct drm_encoder *encoder)
 static void
 tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
 {
+       struct tda998x_priv *priv = to_tda998x_priv(encoder);
+       struct tda998x_encoder_params *p = params;
+
+       priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+                           (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+                           VIP_CNTRL_0_SWAP_B(p->swap_b) |
+                           (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+       priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+                           (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+                           VIP_CNTRL_1_SWAP_D(p->swap_d) |
+                           (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+       priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+                           (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+                           VIP_CNTRL_2_SWAP_F(p->swap_f) |
+                           (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+       priv->params = *p;
 }
 
 static void
@@ -436,18 +693,14 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               /* enable audio and video ports */
-               reg_write(encoder, REG_ENA_AP, 0xff);
+               /* enable video ports, audio will be enabled later */
                reg_write(encoder, REG_ENA_VP_0, 0xff);
                reg_write(encoder, REG_ENA_VP_1, 0xff);
                reg_write(encoder, REG_ENA_VP_2, 0xff);
                /* set muxing after enabling ports: */
-               reg_write(encoder, REG_VIP_CNTRL_0,
-                               VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
-               reg_write(encoder, REG_VIP_CNTRL_1,
-                               VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
-               reg_write(encoder, REG_VIP_CNTRL_2,
-                               VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+               reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+               reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+               reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
                break;
        case DRM_MODE_DPMS_OFF:
                /* disable audio and video ports */
@@ -494,43 +747,78 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
                        struct drm_display_mode *adjusted_mode)
 {
        struct tda998x_priv *priv = to_tda998x_priv(encoder);
-       uint16_t hs_start, hs_end, line_start, line_end;
-       uint16_t vwin_start, vwin_end, de_start, de_end;
-       uint16_t ref_pix, ref_line, pix_start2;
+       uint16_t ref_pix, ref_line, n_pix, n_line;
+       uint16_t hs_pix_s, hs_pix_e;
+       uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
+       uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
+       uint16_t vwin1_line_s, vwin1_line_e;
+       uint16_t vwin2_line_s, vwin2_line_e;
+       uint16_t de_pix_s, de_pix_e;
        uint8_t reg, div, rep;
 
-       hs_start   = mode->hsync_start - mode->hdisplay;
-       hs_end     = mode->hsync_end - mode->hdisplay;
-       line_start = 1;
-       line_end   = 1 + mode->vsync_end - mode->vsync_start;
-       vwin_start = mode->vtotal - mode->vsync_start;
-       vwin_end   = vwin_start + mode->vdisplay;
-       de_start   = mode->htotal - mode->hdisplay;
-       de_end     = mode->htotal;
-
-       pix_start2 = 0;
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-               pix_start2 = (mode->htotal / 2) + hs_start;
-
-       /* TODO how is this value calculated?  It is 2 for all common
-        * formats in the tables in out of tree nxp driver (assuming
-        * I've properly deciphered their byzantine table system)
+       /*
+        * Internally TDA998x is using ITU-R BT.656 style sync but
+        * we get VESA style sync. TDA998x is using a reference pixel
+        * relative to ITU to sync to the input frame and for output
+        * sync generation. Currently, we are using reference detection
+        * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
+        * which is position of rising VS with coincident rising HS.
+        *
+        * Now there is some issues to take care of:
+        * - HDMI data islands require sync-before-active
+        * - TDA998x register values must be > 0 to be enabled
+        * - REFLINE needs an additional offset of +1
+        * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
+        *
+        * So we add +1 to all horizontal and vertical register values,
+        * plus an additional +3 for REFPIX as we are using RGB input only.
         */
-       ref_line = 2;
-
-       /* this might changes for other color formats from the CRTC: */
-       ref_pix = 3 + hs_start;
+       n_pix        = mode->htotal;
+       n_line       = mode->vtotal;
+
+       hs_pix_e     = mode->hsync_end - mode->hdisplay;
+       hs_pix_s     = mode->hsync_start - mode->hdisplay;
+       de_pix_e     = mode->htotal;
+       de_pix_s     = mode->htotal - mode->hdisplay;
+       ref_pix      = 3 + hs_pix_s;
+
+       /*
+        * Attached LCD controllers may generate broken sync. Allow
+        * those to adjust the position of the rising VS edge by adding
+        * HSKEW to ref_pix.
+        */
+       if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
+               ref_pix += adjusted_mode->hskew;
+
+       if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
+               ref_line     = 1 + mode->vsync_start - mode->vdisplay;
+               vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
+               vwin1_line_e = vwin1_line_s + mode->vdisplay;
+               vs1_pix_s    = vs1_pix_e = hs_pix_s;
+               vs1_line_s   = mode->vsync_start - mode->vdisplay;
+               vs1_line_e   = vs1_line_s +
+                              mode->vsync_end - mode->vsync_start;
+               vwin2_line_s = vwin2_line_e = 0;
+               vs2_pix_s    = vs2_pix_e  = 0;
+               vs2_line_s   = vs2_line_e = 0;
+       } else {
+               ref_line     = 1 + (mode->vsync_start - mode->vdisplay)/2;
+               vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
+               vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
+               vs1_pix_s    = vs1_pix_e = hs_pix_s;
+               vs1_line_s   = (mode->vsync_start - mode->vdisplay)/2;
+               vs1_line_e   = vs1_line_s +
+                              (mode->vsync_end - mode->vsync_start)/2;
+               vwin2_line_s = vwin1_line_s + mode->vtotal/2;
+               vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
+               vs2_pix_s    = vs2_pix_e = hs_pix_s + mode->htotal/2;
+               vs2_line_s   = vs1_line_s + mode->vtotal/2 ;
+               vs2_line_e   = vs2_line_s +
+                              (mode->vsync_end - mode->vsync_start)/2;
+       }
 
        div = 148500 / mode->clock;
 
-       DBG("clock=%d, div=%u", mode->clock, div);
-       DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
-                       hs_start, hs_end, line_start, line_end);
-       DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
-                       vwin_start, vwin_end, de_start, de_end);
-       DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
-                       ref_line, ref_pix, pix_start2);
-
        /* mute the audio FIFO: */
        reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
 
@@ -561,9 +849,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
        reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
                        PLL_SERIAL_2_SRL_PR(rep));
 
-       reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
-       reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
-
        /* set color matrix bypass flag: */
        reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
 
@@ -572,47 +857,75 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
 
        reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
 
+       /*
+        * Sync on rising HSYNC/VSYNC
+        */
        reg_write(encoder, REG_VIP_CNTRL_3, 0);
        reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+
+       /*
+        * TDA19988 requires high-active sync at input stage,
+        * so invert low-active sync provided by master encoder here
+        */
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
        if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
 
+       /*
+        * Always generate sync polarity relative to input sync and
+        * revert input stage toggled sync at output stage
+        */
+       reg = TBG_CNTRL_1_TGL_EN;
        if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+               reg |= TBG_CNTRL_1_H_TGL;
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               reg |= TBG_CNTRL_1_V_TGL;
+       reg_write(encoder, REG_TBG_CNTRL_1, reg);
 
        reg_write(encoder, REG_VIDFORMAT, 0x00);
-       reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
-       reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
-       reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
-       reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
-       reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
-       reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
-       reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
-       reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
-       reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
-       reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
-       reg_write16(encoder, REG_DE_START_MSB, de_start);
-       reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+       reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+       reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+       reg_write16(encoder, REG_NPIX_MSB, n_pix);
+       reg_write16(encoder, REG_NLINE_MSB, n_line);
+       reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
+       reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
+       reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
+       reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
+       reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
+       reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
+       reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
+       reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
+       reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
+       reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
+       reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
+       reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
+       reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
+       reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
+       reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
+       reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
 
        if (priv->rev == TDA19988) {
                /* let incoming pixels fill the active space (if any) */
                reg_write(encoder, REG_ENABLE_SPACE, 0x01);
        }
 
-       reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
-       reg_write16(encoder, REG_REFLINE_MSB, ref_line);
-
-       reg = TBG_CNTRL_1_VHX_EXT_DE |
-                       TBG_CNTRL_1_VHX_EXT_HS |
-                       TBG_CNTRL_1_VHX_EXT_VS |
-                       TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
-                       TBG_CNTRL_1_VH_TGL_2;
-       if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
-               reg |= TBG_CNTRL_1_VH_TGL_0;
-       reg_set(encoder, REG_TBG_CNTRL_1, reg);
-
        /* must be last register set: */
        reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+
+       /* Only setup the info frames if the sink is HDMI */
+       if (priv->is_hdmi_sink) {
+               /* We need to turn HDMI HDCP stuff on to get audio through */
+               reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+               reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
+               reg_set(encoder, REG_TX33, TX33_HDMI);
+
+               tda998x_write_avi(encoder, adjusted_mode);
+
+               if (priv->params.audio_cfg)
+                       tda998x_configure_audio(encoder, adjusted_mode,
+                                               &priv->params);
+       }
 }
 
 static enum drm_connector_status
@@ -673,6 +986,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
 static uint8_t *
 do_get_edid(struct drm_encoder *encoder)
 {
+       struct tda998x_priv *priv = to_tda998x_priv(encoder);
        int j = 0, valid_extensions = 0;
        uint8_t *block, *new;
        bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -680,6 +994,9 @@ do_get_edid(struct drm_encoder *encoder)
        if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
                return NULL;
 
+       if (priv->rev == TDA19988)
+               reg_clear(encoder, REG_TX4, TX4_PD_RAM);
+
        /* base block fetch */
        if (read_edid_block(encoder, block, 0))
                goto fail;
@@ -689,7 +1006,7 @@ do_get_edid(struct drm_encoder *encoder)
 
        /* if there's no extensions, we're done */
        if (block[0x7e] == 0)
-               return block;
+               goto done;
 
        new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
        if (!new)
@@ -716,9 +1033,15 @@ do_get_edid(struct drm_encoder *encoder)
                block = new;
        }
 
+done:
+       if (priv->rev == TDA19988)
+               reg_set(encoder, REG_TX4, TX4_PD_RAM);
+
        return block;
 
 fail:
+       if (priv->rev == TDA19988)
+               reg_set(encoder, REG_TX4, TX4_PD_RAM);
        dev_warn(encoder->dev->dev, "failed to read EDID\n");
        kfree(block);
        return NULL;
@@ -728,12 +1051,14 @@ static int
 tda998x_encoder_get_modes(struct drm_encoder *encoder,
                         struct drm_connector *connector)
 {
+       struct tda998x_priv *priv = to_tda998x_priv(encoder);
        struct edid *edid = (struct edid *)do_get_edid(encoder);
        int n = 0;
 
        if (edid) {
                drm_mode_connector_update_edid_property(connector, edid);
                n = drm_add_edid_modes(connector, edid);
+               priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
                kfree(edid);
        }
 
@@ -807,6 +1132,10 @@ tda998x_encoder_init(struct i2c_client *client,
        if (!priv)
                return -ENOMEM;
 
+       priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
+       priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
+       priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+
        priv->current_page = 0;
        priv->cec = i2c_new_dummy(client->adapter, 0x34);
        priv->dpms = DRM_MODE_DPMS_OFF;
index ada49eda489fe661bbc202f4b43def6ec72e303c..ab1892eb10740fa2b3cc46fe7cba976940b6d186 100644 (file)
@@ -113,7 +113,6 @@ static const struct file_operations i810_buffer_fops = {
        .release = drm_release,
        .unlocked_ioctl = drm_ioctl,
        .mmap = i810_mmap_buffers,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -1241,7 +1240,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
        return 0;
 }
 
-struct drm_ioctl_desc i810_ioctls[] = {
+const struct drm_ioctl_desc i810_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
index 2e91fc3580b4a12c53aa6779366dae4f8dec685c..d8180d22ceddb38965e3c6f59367d75a8421dff2 100644 (file)
@@ -49,7 +49,6 @@ static const struct file_operations i810_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -58,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
            DRIVER_HAVE_DMA,
        .dev_priv_size = sizeof(drm_i810_buf_priv_t),
        .load = i810_driver_load,
index 6e0acad9e0f556549621e7945a00d7cafd0abbac..d4d16eddd65110c18c557175d870c432c25d1570 100644 (file)
@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
 extern int i810_driver_device_is_agp(struct drm_device *dev);
 
 extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i810_ioctls[];
+extern const struct drm_ioctl_desc i810_ioctls[];
 extern int i810_max_ioctl;
 
 #define I810_BASE(reg)         ((unsigned long) \
index 40034ecefd3b977a435f661f5f8a057392fb9fe0..b8449a84a0dcab83295696322d718c49924d3f4a 100644 (file)
@@ -5,6 +5,7 @@
 ccflags-y := -Iinclude/drm
 i915-y := i915_drv.o i915_dma.o i915_irq.o \
          i915_debugfs.o \
+         i915_gpu_error.o \
           i915_suspend.o \
          i915_gem.o \
          i915_gem_context.o \
@@ -37,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
          intel_sprite.o \
          intel_opregion.o \
          intel_sideband.o \
+         intel_uncore.o \
          dvo_ch7xxx.o \
          dvo_ch7017.o \
          dvo_ivch.o \
index 757e0fa110430b3eab1d2654b453e5f8f1260060..af42e94f68467534f3879965c930c0c47610690d 100644 (file)
@@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
                idf |= CH7xxx_IDF_HSP;
 
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               idf |= CH7xxx_IDF_HSP;
+               idf |= CH7xxx_IDF_VSP;
 
        ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
 }
index 47d6c748057e446ab70c017d788d8d015c25430c..236d97e51c3a2b4c8724478e8763eed09f2d1b24 100644 (file)
@@ -30,7 +30,8 @@
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/export.h>
-#include <generated/utsrelease.h>
+#include <linux/list_sort.h>
+#include <asm/msr-index.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
 #include "intel_ringbuffer.h"
@@ -90,41 +91,45 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
        }
 }
 
-static const char *cache_level_str(int type)
+static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 {
-       switch (type) {
-       case I915_CACHE_NONE: return " uncached";
-       case I915_CACHE_LLC: return " snooped (LLC)";
-       case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
-       default: return "";
-       }
+       return obj->has_global_gtt_mapping ? "g" : " ";
 }
 
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-       seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
+       struct i915_vma *vma;
+       seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
                   &obj->base,
                   get_pin_flag(obj),
                   get_tiling_flag(obj),
+                  get_global_flag(obj),
                   obj->base.size / 1024,
                   obj->base.read_domains,
                   obj->base.write_domain,
                   obj->last_read_seqno,
                   obj->last_write_seqno,
                   obj->last_fenced_seqno,
-                  cache_level_str(obj->cache_level),
+                  i915_cache_level_str(obj->cache_level),
                   obj->dirty ? " dirty" : "",
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
                seq_printf(m, " (name: %d)", obj->base.name);
        if (obj->pin_count)
                seq_printf(m, " (pinned x %d)", obj->pin_count);
+       if (obj->pin_display)
+               seq_printf(m, " (display)");
        if (obj->fence_reg != I915_FENCE_REG_NONE)
                seq_printf(m, " (fence: %d)", obj->fence_reg);
-       if (obj->gtt_space != NULL)
-               seq_printf(m, " (gtt offset: %08x, size: %08x)",
-                          obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (!i915_is_ggtt(vma->vm))
+                       seq_puts(m, " (pp");
+               else
+                       seq_puts(m, " (g");
+               seq_printf(m, "gtt offset: %08lx, size: %08lx)",
+                          vma->node.start, vma->node.size);
+       }
        if (obj->stolen)
                seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
        if (obj->pin_mappable || obj->fault_mappable) {
@@ -146,8 +151,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        uintptr_t list = (uintptr_t) node->info_ent->data;
        struct list_head *head;
        struct drm_device *dev = node->minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct i915_vma *vma;
        size_t total_obj_size, total_gtt_size;
        int count, ret;
 
@@ -155,14 +161,15 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        if (ret)
                return ret;
 
+       /* FIXME: the user of this interface might want more than just GGTT */
        switch (list) {
        case ACTIVE_LIST:
-               seq_printf(m, "Active:\n");
-               head = &dev_priv->mm.active_list;
+               seq_puts(m, "Active:\n");
+               head = &vm->active_list;
                break;
        case INACTIVE_LIST:
-               seq_printf(m, "Inactive:\n");
-               head = &dev_priv->mm.inactive_list;
+               seq_puts(m, "Inactive:\n");
+               head = &vm->inactive_list;
                break;
        default:
                mutex_unlock(&dev->struct_mutex);
@@ -170,14 +177,75 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        }
 
        total_obj_size = total_gtt_size = count = 0;
-       list_for_each_entry(obj, head, mm_list) {
+       list_for_each_entry(vma, head, mm_list) {
                seq_printf(m, "   ");
-               describe_obj(m, obj);
+               describe_obj(m, vma->obj);
                seq_printf(m, "\n");
+               total_obj_size += vma->obj->base.size;
+               total_gtt_size += vma->node.size;
+               count++;
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+                  count, total_obj_size, total_gtt_size);
+       return 0;
+}
+
+static int obj_rank_by_stolen(void *priv,
+                             struct list_head *A, struct list_head *B)
+{
+       struct drm_i915_gem_object *a =
+               container_of(A, struct drm_i915_gem_object, obj_exec_link);
+       struct drm_i915_gem_object *b =
+               container_of(B, struct drm_i915_gem_object, obj_exec_link);
+
+       return a->stolen->start - b->stolen->start;
+}
+
+static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj;
+       size_t total_obj_size, total_gtt_size;
+       LIST_HEAD(stolen);
+       int count, ret;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       total_obj_size = total_gtt_size = count = 0;
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               if (obj->stolen == NULL)
+                       continue;
+
+               list_add(&obj->obj_exec_link, &stolen);
+
+               total_obj_size += obj->base.size;
+               total_gtt_size += i915_gem_obj_ggtt_size(obj);
+               count++;
+       }
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+               if (obj->stolen == NULL)
+                       continue;
+
+               list_add(&obj->obj_exec_link, &stolen);
+
                total_obj_size += obj->base.size;
-               total_gtt_size += obj->gtt_space->size;
                count++;
        }
+       list_sort(NULL, &stolen, obj_rank_by_stolen);
+       seq_puts(m, "Stolen:\n");
+       while (!list_empty(&stolen)) {
+               obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
+               seq_puts(m, "   ");
+               describe_obj(m, obj);
+               seq_putc(m, '\n');
+               list_del_init(&obj->obj_exec_link);
+       }
        mutex_unlock(&dev->struct_mutex);
 
        seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
@@ -187,10 +255,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 
 #define count_objects(list, member) do { \
        list_for_each_entry(obj, list, member) { \
-               size += obj->gtt_space->size; \
+               size += i915_gem_obj_ggtt_size(obj); \
                ++count; \
                if (obj->map_and_fenceable) { \
-                       mappable_size += obj->gtt_space->size; \
+                       mappable_size += i915_gem_obj_ggtt_size(obj); \
                        ++mappable_count; \
                } \
        } \
@@ -209,7 +277,7 @@ static int per_file_stats(int id, void *ptr, void *data)
        stats->count++;
        stats->total += obj->base.size;
 
-       if (obj->gtt_space) {
+       if (i915_gem_obj_ggtt_bound(obj)) {
                if (!list_empty(&obj->ring_list))
                        stats->active += obj->base.size;
                else
@@ -222,6 +290,17 @@ static int per_file_stats(int id, void *ptr, void *data)
        return 0;
 }
 
+#define count_vmas(list, member) do { \
+       list_for_each_entry(vma, list, member) { \
+               size += i915_gem_obj_ggtt_size(vma->obj); \
+               ++count; \
+               if (vma->obj->map_and_fenceable) { \
+                       mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
+                       ++mappable_count; \
+               } \
+       } \
+} while (0)
+
 static int i915_gem_object_info(struct seq_file *m, void* data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -230,7 +309,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        u32 count, mappable_count, purgeable_count;
        size_t size, mappable_size, purgeable_size;
        struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_file *file;
+       struct i915_vma *vma;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -247,12 +328,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(&dev_priv->mm.active_list, mm_list);
+       count_vmas(&vm->active_list, mm_list);
        seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(&dev_priv->mm.inactive_list, mm_list);
+       count_vmas(&vm->inactive_list, mm_list);
        seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
@@ -267,11 +348,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        size = count = mappable_size = mappable_count = 0;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                if (obj->fault_mappable) {
-                       size += obj->gtt_space->size;
+                       size += i915_gem_obj_ggtt_size(obj);
                        ++count;
                }
                if (obj->pin_mappable) {
-                       mappable_size += obj->gtt_space->size;
+                       mappable_size += i915_gem_obj_ggtt_size(obj);
                        ++mappable_count;
                }
                if (obj->madv == I915_MADV_DONTNEED) {
@@ -287,10 +368,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                   count, size);
 
        seq_printf(m, "%zu [%lu] gtt total\n",
-                  dev_priv->gtt.total,
-                  dev_priv->gtt.mappable_end - dev_priv->gtt.start);
+                  dev_priv->gtt.base.total,
+                  dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct file_stats stats;
 
@@ -310,7 +391,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        return 0;
 }
 
-static int i915_gem_gtt_info(struct seq_file *m, voiddata)
+static int i915_gem_gtt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
@@ -329,11 +410,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
                if (list == PINNED_LIST && obj->pin_count == 0)
                        continue;
 
-               seq_printf(m, "   ");
+               seq_puts(m, "   ");
                describe_obj(m, obj);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
                total_obj_size += obj->base.size;
-               total_gtt_size += obj->gtt_space->size;
+               total_gtt_size += i915_gem_obj_ggtt_size(obj);
                count++;
        }
 
@@ -371,20 +452,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                           pipe, plane);
                        }
                        if (work->enable_stall_check)
-                               seq_printf(m, "Stall check enabled, ");
+                               seq_puts(m, "Stall check enabled, ");
                        else
-                               seq_printf(m, "Stall check waiting for page flip ioctl, ");
+                               seq_puts(m, "Stall check waiting for page flip ioctl, ");
                        seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
                        if (work->old_fb_obj) {
                                struct drm_i915_gem_object *obj = work->old_fb_obj;
                                if (obj)
-                                       seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+                                       seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
+                                                  i915_gem_obj_ggtt_offset(obj));
                        }
                        if (work->pending_flip_obj) {
                                struct drm_i915_gem_object *obj = work->pending_flip_obj;
                                if (obj)
-                                       seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+                                       seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
+                                                  i915_gem_obj_ggtt_offset(obj));
                        }
                }
                spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -424,7 +507,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
        mutex_unlock(&dev->struct_mutex);
 
        if (count == 0)
-               seq_printf(m, "No requests\n");
+               seq_puts(m, "No requests\n");
 
        return 0;
 }
@@ -574,10 +657,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
                seq_printf(m, "Fence %d, pin count = %d, object = ",
                           i, dev_priv->fence_regs[i].pin_count);
                if (obj == NULL)
-                       seq_printf(m, "unused");
+                       seq_puts(m, "unused");
                else
                        describe_obj(m, obj);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -606,361 +689,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
        return 0;
 }
 
-static const char *ring_str(int ring)
-{
-       switch (ring) {
-       case RCS: return "render";
-       case VCS: return "bsd";
-       case BCS: return "blt";
-       case VECS: return "vebox";
-       default: return "";
-       }
-}
-
-static const char *pin_flag(int pinned)
-{
-       if (pinned > 0)
-               return " P";
-       else if (pinned < 0)
-               return " p";
-       else
-               return "";
-}
-
-static const char *tiling_flag(int tiling)
-{
-       switch (tiling) {
-       default:
-       case I915_TILING_NONE: return "";
-       case I915_TILING_X: return " X";
-       case I915_TILING_Y: return " Y";
-       }
-}
-
-static const char *dirty_flag(int dirty)
-{
-       return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
-       return purgeable ? " purgeable" : "";
-}
-
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
-{
-
-       if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
-               e->err = -ENOSPC;
-               return false;
-       }
-
-       if (e->bytes == e->size - 1 || e->err)
-               return false;
-
-       return true;
-}
-
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
-                             unsigned len)
-{
-       if (e->pos + len <= e->start) {
-               e->pos += len;
-               return false;
-       }
-
-       /* First vsnprintf needs to fit in its entirety for memmove */
-       if (len >= e->size) {
-               e->err = -EIO;
-               return false;
-       }
-
-       return true;
-}
-
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
-                                unsigned len)
-{
-       /* If this is first printf in this window, adjust it so that
-        * start position matches start of the buffer
-        */
-
-       if (e->pos < e->start) {
-               const size_t off = e->start - e->pos;
-
-               /* Should not happen but be paranoid */
-               if (off > len || e->bytes) {
-                       e->err = -EIO;
-                       return;
-               }
-
-               memmove(e->buf, e->buf + off, len - off);
-               e->bytes = len - off;
-               e->pos = e->start;
-               return;
-       }
-
-       e->bytes += len;
-       e->pos += len;
-}
-
-static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
-                              const char *f, va_list args)
-{
-       unsigned len;
-
-       if (!__i915_error_ok(e))
-               return;
-
-       /* Seek the first printf which is hits start position */
-       if (e->pos < e->start) {
-               len = vsnprintf(NULL, 0, f, args);
-               if (!__i915_error_seek(e, len))
-                       return;
-       }
-
-       len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
-       if (len >= e->size - e->bytes)
-               len = e->size - e->bytes - 1;
-
-       __i915_error_advance(e, len);
-}
-
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
-                           const char *str)
-{
-       unsigned len;
-
-       if (!__i915_error_ok(e))
-               return;
-
-       len = strlen(str);
-
-       /* Seek the first printf which is hits start position */
-       if (e->pos < e->start) {
-               if (!__i915_error_seek(e, len))
-                       return;
-       }
-
-       if (len >= e->size - e->bytes)
-               len = e->size - e->bytes - 1;
-       memcpy(e->buf + e->bytes, str, len);
-
-       __i915_error_advance(e, len);
-}
-
-void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
-{
-       va_list args;
-
-       va_start(args, f);
-       i915_error_vprintf(e, f, args);
-       va_end(args);
-}
-
-#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
-#define err_puts(e, s) i915_error_puts(e, s)
-
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
-                               const char *name,
-                               struct drm_i915_error_buffer *err,
-                               int count)
-{
-       err_printf(m, "%s [%d]:\n", name, count);
-
-       while (count--) {
-               err_printf(m, "  %08x %8u %02x %02x %x %x",
-                          err->gtt_offset,
-                          err->size,
-                          err->read_domains,
-                          err->write_domain,
-                          err->rseqno, err->wseqno);
-               err_puts(m, pin_flag(err->pinned));
-               err_puts(m, tiling_flag(err->tiling));
-               err_puts(m, dirty_flag(err->dirty));
-               err_puts(m, purgeable_flag(err->purgeable));
-               err_puts(m, err->ring != -1 ? " " : "");
-               err_puts(m, ring_str(err->ring));
-               err_puts(m, cache_level_str(err->cache_level));
-
-               if (err->name)
-                       err_printf(m, " (name: %d)", err->name);
-               if (err->fence_reg != I915_FENCE_REG_NONE)
-                       err_printf(m, " (fence: %d)", err->fence_reg);
-
-               err_puts(m, "\n");
-               err++;
-       }
-}
-
-static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
-                                 struct drm_device *dev,
-                                 struct drm_i915_error_state *error,
-                                 unsigned ring)
-{
-       BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
-       err_printf(m, "%s command stream:\n", ring_str(ring));
-       err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
-       err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
-       err_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
-       err_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
-       err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
-       err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
-       err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
-       if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
-               err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-
-       if (INTEL_INFO(dev)->gen >= 4)
-               err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
-       err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
-       err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
-       if (INTEL_INFO(dev)->gen >= 6) {
-               err_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
-               err_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
-               err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
-                          error->semaphore_mboxes[ring][0],
-                          error->semaphore_seqno[ring][0]);
-               err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
-                          error->semaphore_mboxes[ring][1],
-                          error->semaphore_seqno[ring][1]);
-       }
-       err_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
-       err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
-       err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
-       err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
-}
-
-struct i915_error_state_file_priv {
-       struct drm_device *dev;
-       struct drm_i915_error_state *error;
-};
-
-
-static int i915_error_state(struct i915_error_state_file_priv *error_priv,
-                           struct drm_i915_error_state_buf *m)
-
-{
-       struct drm_device *dev = error_priv->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_error_state *error = error_priv->error;
-       struct intel_ring_buffer *ring;
-       int i, j, page, offset, elt;
-
-       if (!error) {
-               err_printf(m, "no error state collected\n");
-               return 0;
-       }
-
-       err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
-                  error->time.tv_usec);
-       err_printf(m, "Kernel: " UTS_RELEASE "\n");
-       err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
-       err_printf(m, "EIR: 0x%08x\n", error->eir);
-       err_printf(m, "IER: 0x%08x\n", error->ier);
-       err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-       err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
-       err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
-       err_printf(m, "CCID: 0x%08x\n", error->ccid);
-
-       for (i = 0; i < dev_priv->num_fence_regs; i++)
-               err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
-
-       for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
-               err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
-                          error->extra_instdone[i]);
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               err_printf(m, "ERROR: 0x%08x\n", error->error);
-               err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
-       }
-
-       if (INTEL_INFO(dev)->gen == 7)
-               err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
-       for_each_ring(ring, dev_priv, i)
-               i915_ring_error_state(m, dev, error, i);
-
-       if (error->active_bo)
-               print_error_buffers(m, "Active",
-                                   error->active_bo,
-                                   error->active_bo_count);
-
-       if (error->pinned_bo)
-               print_error_buffers(m, "Pinned",
-                                   error->pinned_bo,
-                                   error->pinned_bo_count);
-
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               struct drm_i915_error_object *obj;
-
-               if ((obj = error->ring[i].batchbuffer)) {
-                       err_printf(m, "%s --- gtt_offset = 0x%08x\n",
-                                  dev_priv->ring[i].name,
-                                  obj->gtt_offset);
-                       offset = 0;
-                       for (page = 0; page < obj->page_count; page++) {
-                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-                                       err_printf(m, "%08x :  %08x\n", offset,
-                                                  obj->pages[page][elt]);
-                                       offset += 4;
-                               }
-                       }
-               }
-
-               if (error->ring[i].num_requests) {
-                       err_printf(m, "%s --- %d requests\n",
-                                  dev_priv->ring[i].name,
-                                  error->ring[i].num_requests);
-                       for (j = 0; j < error->ring[i].num_requests; j++) {
-                               err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
-                                          error->ring[i].requests[j].seqno,
-                                          error->ring[i].requests[j].jiffies,
-                                          error->ring[i].requests[j].tail);
-                       }
-               }
-
-               if ((obj = error->ring[i].ringbuffer)) {
-                       err_printf(m, "%s --- ringbuffer = 0x%08x\n",
-                                  dev_priv->ring[i].name,
-                                  obj->gtt_offset);
-                       offset = 0;
-                       for (page = 0; page < obj->page_count; page++) {
-                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-                                       err_printf(m, "%08x :  %08x\n",
-                                                  offset,
-                                                  obj->pages[page][elt]);
-                                       offset += 4;
-                               }
-                       }
-               }
-
-               obj = error->ring[i].ctx;
-               if (obj) {
-                       err_printf(m, "%s --- HW Context = 0x%08x\n",
-                                  dev_priv->ring[i].name,
-                                  obj->gtt_offset);
-                       offset = 0;
-                       for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
-                               err_printf(m, "[%04x] %08x %08x %08x %08x\n",
-                                          offset,
-                                          obj->pages[0][elt],
-                                          obj->pages[0][elt+1],
-                                          obj->pages[0][elt+2],
-                                          obj->pages[0][elt+3]);
-                                       offset += 16;
-                       }
-               }
-       }
-
-       if (error->overlay)
-               intel_overlay_print_error_state(m, error->overlay);
-
-       if (error->display)
-               intel_display_print_error_state(m, dev, error->display);
-
-       return 0;
-}
-
 static ssize_t
 i915_error_state_write(struct file *filp,
                       const char __user *ubuf,
@@ -986,9 +714,7 @@ i915_error_state_write(struct file *filp,
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_error_state_file_priv *error_priv;
-       unsigned long flags;
 
        error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
        if (!error_priv)
@@ -996,11 +722,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
 
        error_priv->dev = dev;
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       error_priv->error = dev_priv->gpu_error.first_error;
-       if (error_priv->error)
-               kref_get(&error_priv->error->ref);
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+       i915_error_state_get(dev, error_priv);
 
        file->private_data = error_priv;
 
@@ -1011,8 +733,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
 {
        struct i915_error_state_file_priv *error_priv = file->private_data;
 
-       if (error_priv->error)
-               kref_put(&error_priv->error->ref, i915_error_state_free);
+       i915_error_state_put(error_priv);
        kfree(error_priv);
 
        return 0;
@@ -1025,40 +746,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
        struct drm_i915_error_state_buf error_str;
        loff_t tmp_pos = 0;
        ssize_t ret_count = 0;
-       int ret = 0;
-
-       memset(&error_str, 0, sizeof(error_str));
-
-       /* We need to have enough room to store any i915_error_state printf
-        * so that we can move it to start position.
-        */
-       error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
-       error_str.buf = kmalloc(error_str.size,
-                               GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
-
-       if (error_str.buf == NULL) {
-               error_str.size = PAGE_SIZE;
-               error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
-       }
-
-       if (error_str.buf == NULL) {
-               error_str.size = 128;
-               error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
-       }
-
-       if (error_str.buf == NULL)
-               return -ENOMEM;
-
-       error_str.start = *pos;
+       int ret;
 
-       ret = i915_error_state(error_priv, &error_str);
+       ret = i915_error_state_buf_init(&error_str, count, *pos);
        if (ret)
-               goto out;
+               return ret;
 
-       if (error_str.bytes == 0 && error_str.err) {
-               ret = error_str.err;
+       ret = i915_error_state_to_str(&error_str, error_priv);
+       if (ret)
                goto out;
-       }
 
        ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
                                            error_str.buf,
@@ -1069,7 +765,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
        else
                *pos = error_str.start + ret_count;
 out:
-       kfree(error_str.buf);
+       i915_error_state_buf_release(&error_str);
        return ret ?: ret_count;
 }
 
@@ -1246,7 +942,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                                        (freq_sts >> 8) & 0xff));
                mutex_unlock(&dev_priv->rps.hw_lock);
        } else {
-               seq_printf(m, "no P-state info available\n");
+               seq_puts(m, "no P-state info available\n");
        }
 
        return 0;
@@ -1341,28 +1037,28 @@ static int ironlake_drpc_info(struct seq_file *m)
        seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
        seq_printf(m, "Render standby enabled: %s\n",
                   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
-       seq_printf(m, "Current RS state: ");
+       seq_puts(m, "Current RS state: ");
        switch (rstdbyctl & RSX_STATUS_MASK) {
        case RSX_STATUS_ON:
-               seq_printf(m, "on\n");
+               seq_puts(m, "on\n");
                break;
        case RSX_STATUS_RC1:
-               seq_printf(m, "RC1\n");
+               seq_puts(m, "RC1\n");
                break;
        case RSX_STATUS_RC1E:
-               seq_printf(m, "RC1E\n");
+               seq_puts(m, "RC1E\n");
                break;
        case RSX_STATUS_RS1:
-               seq_printf(m, "RS1\n");
+               seq_puts(m, "RS1\n");
                break;
        case RSX_STATUS_RS2:
-               seq_printf(m, "RS2 (RC6)\n");
+               seq_puts(m, "RS2 (RC6)\n");
                break;
        case RSX_STATUS_RS3:
-               seq_printf(m, "RC3 (RC6+)\n");
+               seq_puts(m, "RC3 (RC6+)\n");
                break;
        default:
-               seq_printf(m, "unknown\n");
+               seq_puts(m, "unknown\n");
                break;
        }
 
@@ -1377,20 +1073,19 @@ static int gen6_drpc_info(struct seq_file *m)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
        unsigned forcewake_count;
-       int count=0, ret;
-
+       int count = 0, ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
 
-       spin_lock_irq(&dev_priv->gt_lock);
-       forcewake_count = dev_priv->forcewake_count;
-       spin_unlock_irq(&dev_priv->gt_lock);
+       spin_lock_irq(&dev_priv->uncore.lock);
+       forcewake_count = dev_priv->uncore.forcewake_count;
+       spin_unlock_irq(&dev_priv->uncore.lock);
 
        if (forcewake_count) {
-               seq_printf(m, "RC information inaccurate because somebody "
-                             "holds a forcewake reference \n");
+               seq_puts(m, "RC information inaccurate because somebody "
+                           "holds a forcewake reference \n");
        } else {
                /* NB: we cannot use forcewake, else we read the wrong values */
                while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1399,7 +1094,7 @@ static int gen6_drpc_info(struct seq_file *m)
        }
 
        gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
-       trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+       trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
 
        rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1423,25 +1118,25 @@ static int gen6_drpc_info(struct seq_file *m)
                   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
        seq_printf(m, "Deepest RC6 Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
-       seq_printf(m, "Current RC state: ");
+       seq_puts(m, "Current RC state: ");
        switch (gt_core_status & GEN6_RCn_MASK) {
        case GEN6_RC0:
                if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
-                       seq_printf(m, "Core Power Down\n");
+                       seq_puts(m, "Core Power Down\n");
                else
-                       seq_printf(m, "on\n");
+                       seq_puts(m, "on\n");
                break;
        case GEN6_RC3:
-               seq_printf(m, "RC3\n");
+               seq_puts(m, "RC3\n");
                break;
        case GEN6_RC6:
-               seq_printf(m, "RC6\n");
+               seq_puts(m, "RC6\n");
                break;
        case GEN6_RC7:
-               seq_printf(m, "RC7\n");
+               seq_puts(m, "RC7\n");
                break;
        default:
-               seq_printf(m, "Unknown\n");
+               seq_puts(m, "Unknown\n");
                break;
        }
 
@@ -1485,43 +1180,52 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        if (!I915_HAS_FBC(dev)) {
-               seq_printf(m, "FBC unsupported on this chipset\n");
+               seq_puts(m, "FBC unsupported on this chipset\n");
                return 0;
        }
 
        if (intel_fbc_enabled(dev)) {
-               seq_printf(m, "FBC enabled\n");
+               seq_puts(m, "FBC enabled\n");
        } else {
-               seq_printf(m, "FBC disabled: ");
-               switch (dev_priv->no_fbc_reason) {
+               seq_puts(m, "FBC disabled: ");
+               switch (dev_priv->fbc.no_fbc_reason) {
+               case FBC_OK:
+                       seq_puts(m, "FBC actived, but currently disabled in hardware");
+                       break;
+               case FBC_UNSUPPORTED:
+                       seq_puts(m, "unsupported by this chipset");
+                       break;
                case FBC_NO_OUTPUT:
-                       seq_printf(m, "no outputs");
+                       seq_puts(m, "no outputs");
                        break;
                case FBC_STOLEN_TOO_SMALL:
-                       seq_printf(m, "not enough stolen memory");
+                       seq_puts(m, "not enough stolen memory");
                        break;
                case FBC_UNSUPPORTED_MODE:
-                       seq_printf(m, "mode not supported");
+                       seq_puts(m, "mode not supported");
                        break;
                case FBC_MODE_TOO_LARGE:
-                       seq_printf(m, "mode too large");
+                       seq_puts(m, "mode too large");
                        break;
                case FBC_BAD_PLANE:
-                       seq_printf(m, "FBC unsupported on plane");
+                       seq_puts(m, "FBC unsupported on plane");
                        break;
                case FBC_NOT_TILED:
-                       seq_printf(m, "scanout buffer not tiled");
+                       seq_puts(m, "scanout buffer not tiled");
                        break;
                case FBC_MULTIPLE_PIPES:
-                       seq_printf(m, "multiple pipes are enabled");
+                       seq_puts(m, "multiple pipes are enabled");
                        break;
                case FBC_MODULE_PARAM:
-                       seq_printf(m, "disabled per module param (default off)");
+                       seq_puts(m, "disabled per module param (default off)");
+                       break;
+               case FBC_CHIP_DEFAULT:
+                       seq_puts(m, "disabled per chip default");
                        break;
                default:
-                       seq_printf(m, "unknown reason");
+                       seq_puts(m, "unknown reason");
                }
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
        return 0;
 }
@@ -1604,7 +1308,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        int gpu_freq, ia_freq;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
-               seq_printf(m, "unsupported on this chipset\n");
+               seq_puts(m, "unsupported on this chipset\n");
                return 0;
        }
 
@@ -1612,7 +1316,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        if (ret)
                return ret;
 
-       seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+       seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
        for (gpu_freq = dev_priv->rps.min_delay;
             gpu_freq <= dev_priv->rps.max_delay;
@@ -1701,7 +1405,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                   fb->base.bits_per_pixel,
                   atomic_read(&fb->base.refcount.refcount));
        describe_obj(m, fb->obj);
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
        mutex_unlock(&dev->mode_config.mutex);
 
        mutex_lock(&dev->mode_config.fb_lock);
@@ -1716,7 +1420,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                           fb->base.bits_per_pixel,
                           atomic_read(&fb->base.refcount.refcount));
                describe_obj(m, fb->obj);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
        mutex_unlock(&dev->mode_config.fb_lock);
 
@@ -1736,22 +1440,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
                return ret;
 
        if (dev_priv->ips.pwrctx) {
-               seq_printf(m, "power context ");
+               seq_puts(m, "power context ");
                describe_obj(m, dev_priv->ips.pwrctx);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 
        if (dev_priv->ips.renderctx) {
-               seq_printf(m, "render context ");
+               seq_puts(m, "render context ");
                describe_obj(m, dev_priv->ips.renderctx);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 
        for_each_ring(ring, dev_priv, i) {
                if (ring->default_context) {
                        seq_printf(m, "HW default context %s ring ", ring->name);
                        describe_obj(m, ring->default_context->obj);
-                       seq_printf(m, "\n");
+                       seq_putc(m, '\n');
                }
        }
 
@@ -1767,9 +1471,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned forcewake_count;
 
-       spin_lock_irq(&dev_priv->gt_lock);
-       forcewake_count = dev_priv->forcewake_count;
-       spin_unlock_irq(&dev_priv->gt_lock);
+       spin_lock_irq(&dev_priv->uncore.lock);
+       forcewake_count = dev_priv->uncore.forcewake_count;
+       spin_unlock_irq(&dev_priv->uncore.lock);
 
        seq_printf(m, "forcewake count = %u\n", forcewake_count);
 
@@ -1778,7 +1482,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
 
 static const char *swizzle_string(unsigned swizzle)
 {
-       switch(swizzle) {
+       switch (swizzle) {
        case I915_BIT_6_SWIZZLE_NONE:
                return "none";
        case I915_BIT_6_SWIZZLE_9:
@@ -1868,7 +1572,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
        if (dev_priv->mm.aliasing_ppgtt) {
                struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
-               seq_printf(m, "aliasing PPGTT:\n");
+               seq_puts(m, "aliasing PPGTT:\n");
                seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
        }
        seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1886,7 +1590,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
 
 
        if (!IS_VALLEYVIEW(dev)) {
-               seq_printf(m, "unsupported\n");
+               seq_puts(m, "unsupported\n");
                return 0;
        }
 
@@ -1924,6 +1628,169 @@ static int i915_dpio_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static int i915_llc(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Size calculation for LLC is a bit of a pain. Ignore for now. */
+       seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+       seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+
+       return 0;
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 psrstat, psrperf;
+
+       if (!IS_HASWELL(dev)) {
+               seq_puts(m, "PSR not supported on this platform\n");
+       } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
+               seq_puts(m, "PSR enabled\n");
+       } else {
+               seq_puts(m, "PSR disabled: ");
+               switch (dev_priv->no_psr_reason) {
+               case PSR_NO_SOURCE:
+                       seq_puts(m, "not supported on this platform");
+                       break;
+               case PSR_NO_SINK:
+                       seq_puts(m, "not supported by panel");
+                       break;
+               case PSR_MODULE_PARAM:
+                       seq_puts(m, "disabled by flag");
+                       break;
+               case PSR_CRTC_NOT_ACTIVE:
+                       seq_puts(m, "crtc not active");
+                       break;
+               case PSR_PWR_WELL_ENABLED:
+                       seq_puts(m, "power well enabled");
+                       break;
+               case PSR_NOT_TILED:
+                       seq_puts(m, "not tiled");
+                       break;
+               case PSR_SPRITE_ENABLED:
+                       seq_puts(m, "sprite enabled");
+                       break;
+               case PSR_S3D_ENABLED:
+                       seq_puts(m, "stereo 3d enabled");
+                       break;
+               case PSR_INTERLACED_ENABLED:
+                       seq_puts(m, "interlaced enabled");
+                       break;
+               case PSR_HSW_NOT_DDIA:
+                       seq_puts(m, "HSW ties PSR to DDI A (eDP)");
+                       break;
+               default:
+                       seq_puts(m, "unknown reason");
+               }
+               seq_puts(m, "\n");
+               return 0;
+       }
+
+       psrstat = I915_READ(EDP_PSR_STATUS_CTL);
+
+       seq_puts(m, "PSR Current State: ");
+       switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
+       case EDP_PSR_STATUS_STATE_IDLE:
+               seq_puts(m, "Reset state\n");
+               break;
+       case EDP_PSR_STATUS_STATE_SRDONACK:
+               seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
+               break;
+       case EDP_PSR_STATUS_STATE_SRDENT:
+               seq_puts(m, "SRD entry\n");
+               break;
+       case EDP_PSR_STATUS_STATE_BUFOFF:
+               seq_puts(m, "Wait for buffer turn off\n");
+               break;
+       case EDP_PSR_STATUS_STATE_BUFON:
+               seq_puts(m, "Wait for buffer turn on\n");
+               break;
+       case EDP_PSR_STATUS_STATE_AUXACK:
+               seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
+               break;
+       case EDP_PSR_STATUS_STATE_SRDOFFACK:
+               seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
+               break;
+       default:
+               seq_puts(m, "Unknown\n");
+               break;
+       }
+
+       seq_puts(m, "Link Status: ");
+       switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
+       case EDP_PSR_STATUS_LINK_FULL_OFF:
+               seq_puts(m, "Link is fully off\n");
+               break;
+       case EDP_PSR_STATUS_LINK_FULL_ON:
+               seq_puts(m, "Link is fully on\n");
+               break;
+       case EDP_PSR_STATUS_LINK_STANDBY:
+               seq_puts(m, "Link is in standby\n");
+               break;
+       default:
+               seq_puts(m, "Unknown\n");
+               break;
+       }
+
+       seq_printf(m, "PSR Entry Count: %u\n",
+                  psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
+                  EDP_PSR_STATUS_COUNT_MASK);
+
+       seq_printf(m, "Max Sleep Timer Counter: %u\n",
+                  psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
+                  EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
+
+       seq_printf(m, "Had AUX error: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
+
+       seq_printf(m, "Sending AUX: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
+
+       seq_printf(m, "Sending Idle: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
+
+       seq_printf(m, "Sending TP2 TP3: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
+
+       seq_printf(m, "Sending TP1: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
+
+       seq_printf(m, "Idle Count: %u\n",
+                  psrstat & EDP_PSR_STATUS_IDLE_MASK);
+
+       psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
+       seq_printf(m, "Performance Counter: %u\n", psrperf);
+
+       return 0;
+}
+
+static int i915_energy_uJ(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u64 power;
+       u32 units;
+
+       if (INTEL_INFO(dev)->gen < 6)
+               return -ENODEV;
+
+       rdmsrl(MSR_RAPL_POWER_UNIT, power);
+       power = (power & 0x1f00) >> 8;
+       units = 1000000 / (1 << power); /* convert to uJ */
+       power = I915_READ(MCH_SECP_NRG_STTS);
+       power *= units;
+
+       seq_printf(m, "%llu", (long long unsigned)power);
+       return 0;
+}
+
 static int
 i915_wedged_get(void *data, u64 *val)
 {
@@ -2006,6 +1873,8 @@ i915_drop_caches_set(void *data, u64 val)
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj, *next;
+       struct i915_address_space *vm;
+       struct i915_vma *vma, *x;
        int ret;
 
        DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -2026,12 +1895,17 @@ i915_drop_caches_set(void *data, u64 val)
                i915_gem_retire_requests(dev);
 
        if (val & DROP_BOUND) {
-               list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
-                       if (obj->pin_count == 0) {
-                               ret = i915_gem_object_unbind(obj);
+               list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+                       list_for_each_entry_safe(vma, x, &vm->inactive_list,
+                                                mm_list) {
+                               if (vma->obj->pin_count)
+                                       continue;
+
+                               ret = i915_vma_unbind(vma);
                                if (ret)
                                        goto unlock;
                        }
+               }
        }
 
        if (val & DROP_UNBOUND) {
@@ -2326,6 +2200,7 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+       {"i915_gem_stolen", i915_gem_stolen_list_info },
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -2353,64 +2228,41 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_swizzle_info", i915_swizzle_info, 0},
        {"i915_ppgtt_info", i915_ppgtt_info, 0},
        {"i915_dpio", i915_dpio_info, 0},
+       {"i915_llc", i915_llc, 0},
+       {"i915_edp_psr_status", i915_edp_psr_status, 0},
+       {"i915_energy_uJ", i915_energy_uJ, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
+static struct i915_debugfs_files {
+       const char *name;
+       const struct file_operations *fops;
+} i915_debugfs_files[] = {
+       {"i915_wedged", &i915_wedged_fops},
+       {"i915_max_freq", &i915_max_freq_fops},
+       {"i915_min_freq", &i915_min_freq_fops},
+       {"i915_cache_sharing", &i915_cache_sharing_fops},
+       {"i915_ring_stop", &i915_ring_stop_fops},
+       {"i915_gem_drop_caches", &i915_drop_caches_fops},
+       {"i915_error_state", &i915_error_state_fops},
+       {"i915_next_seqno", &i915_next_seqno_fops},
+};
+
 int i915_debugfs_init(struct drm_minor *minor)
 {
-       int ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_wedged",
-                                 &i915_wedged_fops);
-       if (ret)
-               return ret;
+       int ret, i;
 
        ret = i915_forcewake_create(minor->debugfs_root, minor);
        if (ret)
                return ret;
 
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_max_freq",
-                                 &i915_max_freq_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_min_freq",
-                                 &i915_min_freq_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_cache_sharing",
-                                 &i915_cache_sharing_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_ring_stop",
-                                 &i915_ring_stop_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_gem_drop_caches",
-                                 &i915_drop_caches_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_error_state",
-                                 &i915_error_state_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                "i915_next_seqno",
-                                &i915_next_seqno_fops);
-       if (ret)
-               return ret;
+       for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+               ret = i915_debugfs_create(minor->debugfs_root, minor,
+                                         i915_debugfs_files[i].name,
+                                         i915_debugfs_files[i].fops);
+               if (ret)
+                       return ret;
+       }
 
        return drm_debugfs_create_files(i915_debugfs_list,
                                        I915_DEBUGFS_ENTRIES,
@@ -2419,26 +2271,18 @@ int i915_debugfs_init(struct drm_minor *minor)
 
 void i915_debugfs_cleanup(struct drm_minor *minor)
 {
+       int i;
+
        drm_debugfs_remove_files(i915_debugfs_list,
                                 I915_DEBUGFS_ENTRIES, minor);
        drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
                                 1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
-                                1, minor);
+       for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+               struct drm_info_list *info_list =
+                       (struct drm_info_list *) i915_debugfs_files[i].fops;
+
+               drm_debugfs_remove_files(info_list, 1, minor);
+       }
 }
 
 #endif /* CONFIG_DEBUG_FS */
index f4669802a0fb35a6a1ae99b6f7c7cdaeb21d9df5..d2dc02b67512a8a05472f60a24c825efe43958fc 100644 (file)
@@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_LLC:
                value = HAS_LLC(dev);
                break;
+       case I915_PARAM_HAS_WT:
+               value = HAS_WT(dev);
+               break;
        case I915_PARAM_HAS_ALIASING_PPGTT:
                value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
                break;
@@ -1323,10 +1326,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
        /* Always safe in the mode setting case. */
        /* FIXME: do pre/post-mode set stuff in core KMS code */
        dev->vblank_disable_allowed = 1;
-       if (INTEL_INFO(dev)->num_pipes == 0) {
-               dev_priv->mm.suspended = 0;
+       if (INTEL_INFO(dev)->num_pipes == 0)
                return 0;
-       }
 
        ret = intel_fbdev_init(dev);
        if (ret)
@@ -1352,9 +1353,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        drm_kms_helper_poll_init(dev);
 
-       /* We're off and running w/KMS */
-       dev_priv->mm.suspended = 0;
-
        return 0;
 
 cleanup_gem:
@@ -1363,7 +1361,7 @@ cleanup_gem:
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
        i915_gem_cleanup_aliasing_ppgtt(dev);
-       drm_mm_takedown(&dev_priv->mm.gtt_space);
+       drm_mm_takedown(&dev_priv->gtt.base.mm);
 cleanup_irq:
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
@@ -1440,22 +1438,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 #undef SEP_COMMA
 }
 
-/**
- * intel_early_sanitize_regs - clean up BIOS state
- * @dev: DRM device
- *
- * This function must be called before we do any I915_READ or I915_WRITE. Its
- * purpose is to clean up any state left by the BIOS that may affect us when
- * reading and/or writing registers.
- */
-static void intel_early_sanitize_regs(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (HAS_FPGA_DBG_UNCLAIMED(dev))
-               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1497,15 +1479,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
-       spin_lock_init(&dev_priv->rps.lock);
-       spin_lock_init(&dev_priv->gt_lock);
        spin_lock_init(&dev_priv->backlight.lock);
+       spin_lock_init(&dev_priv->uncore.lock);
+       spin_lock_init(&dev_priv->mm.object_stat_lock);
        mutex_init(&dev_priv->dpio_lock);
        mutex_init(&dev_priv->rps.hw_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
 
        i915_dump_device_info(dev_priv);
 
+       /* Not all pre-production machines fall into this category, only the
+        * very first ones. Almost everything should work, except for maybe
+        * suspend/resume. And we don't implement workarounds that affect only
+        * pre-production machines. */
+       if (IS_HSW_EARLY_SDV(dev))
+               DRM_INFO("This is an early pre-production Haswell machine. "
+                        "It may not be fully functional.\n");
+
        if (i915_get_bridge_dev(dev)) {
                ret = -EIO;
                goto free_priv;
@@ -1531,7 +1521,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_bridge;
        }
 
-       intel_early_sanitize_regs(dev);
+       intel_uncore_early_sanitize(dev);
+
+       if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
+               /* The docs do not explain exactly how the calculation can be
+                * made. It is somewhat guessable, but for now, it's always
+                * 128MB.
+                * NB: We can't write IDICR yet because we do not have gt funcs
+                * set up */
+               dev_priv->ellc_size = 128;
+               DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+       }
 
        ret = i915_gem_gtt_init(dev);
        if (ret)
@@ -1567,8 +1567,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_rmmap;
        }
 
-       dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
-                                                aperture_size);
+       dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+                                             aperture_size);
 
        /* The i915 workqueue is primarily used for batched retirement of
         * requests (and thus managing bo) once the task has been completed
@@ -1595,8 +1595,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        intel_irq_init(dev);
        intel_pm_init(dev);
-       intel_gt_sanitize(dev);
-       intel_gt_init(dev);
+       intel_uncore_sanitize(dev);
+       intel_uncore_init(dev);
 
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
@@ -1631,9 +1631,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                        goto out_gem_unload;
        }
 
-       /* Start out suspended */
-       dev_priv->mm.suspended = 1;
-
        if (HAS_POWER_WELL(dev))
                i915_init_power_well(dev);
 
@@ -1643,6 +1640,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                        DRM_ERROR("failed to init modeset\n");
                        goto out_gem_unload;
                }
+       } else {
+               /* Start out suspended in ums mode. */
+               dev_priv->ums.mm_suspended = 1;
        }
 
        i915_setup_sysfs(dev);
@@ -1669,9 +1669,9 @@ out_gem_unload:
        intel_teardown_mchbar(dev);
        destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
-       arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+       arch_phys_wc_del(dev_priv->gtt.mtrr);
        io_mapping_free(dev_priv->gtt.mappable);
-       dev_priv->gtt.gtt_remove(dev);
+       dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 out_rmmap:
        pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
@@ -1688,8 +1688,13 @@ int i915_driver_unload(struct drm_device *dev)
 
        intel_gpu_ips_teardown();
 
-       if (HAS_POWER_WELL(dev))
+       if (HAS_POWER_WELL(dev)) {
+               /* The i915.ko module is still not prepared to be loaded when
+                * the power well is not enabled, so just enable it in case
+                * we're going to unload/reload. */
+               intel_set_power_well(dev, true);
                i915_remove_power_well(dev);
+       }
 
        i915_teardown_sysfs(dev);
 
@@ -1707,7 +1712,7 @@ int i915_driver_unload(struct drm_device *dev)
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
        io_mapping_free(dev_priv->gtt.mappable);
-       arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+       arch_phys_wc_del(dev_priv->gtt.mtrr);
 
        acpi_video_unregister();
 
@@ -1756,7 +1761,9 @@ int i915_driver_unload(struct drm_device *dev)
                        i915_free_hws(dev);
        }
 
-       drm_mm_takedown(&dev_priv->mm.gtt_space);
+       list_del(&dev_priv->gtt.base.global_link);
+       WARN_ON(!list_empty(&dev_priv->vm_list));
+       drm_mm_takedown(&dev_priv->gtt.base.mm);
        if (dev_priv->regs != NULL)
                pci_iounmap(dev->pdev, dev_priv->regs);
 
@@ -1766,7 +1773,7 @@ int i915_driver_unload(struct drm_device *dev)
        destroy_workqueue(dev_priv->wq);
        pm_qos_remove_request(&dev_priv->pm_qos);
 
-       dev_priv->gtt.gtt_remove(dev);
+       dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
        if (dev_priv->slab)
                kmem_cache_destroy(dev_priv->slab);
@@ -1842,7 +1849,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
        kfree(file_priv);
 }
 
-struct drm_ioctl_desc i915_ioctls[] = {
+const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
index 45b3c030f48393b6921e66406e48f92e2f1d643e..a9c8f18e26fca5fef06fdeaedf643e630852d3f9 100644 (file)
@@ -118,10 +118,14 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
 MODULE_PARM_DESC(i915_enable_ppgtt,
                "Enable PPGTT (default: true)");
 
-unsigned int i915_preliminary_hw_support __read_mostly = 0;
+int i915_enable_psr __read_mostly = 0;
+module_param_named(enable_psr, i915_enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
+unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
 module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
 MODULE_PARM_DESC(preliminary_hw_support,
-               "Enable preliminary hardware support. (default: false)");
+               "Enable preliminary hardware support.");
 
 int i915_disable_power_well __read_mostly = 1;
 module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -132,6 +136,16 @@ int i915_enable_ips __read_mostly = 1;
 module_param_named(enable_ips, i915_enable_ips, int, 0600);
 MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
+bool i915_fastboot __read_mostly = 0;
+module_param_named(fastboot, i915_fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
+                "(default: false)");
+
+bool i915_prefault_disable __read_mostly;
+module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+               "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
@@ -551,7 +565,11 @@ static int i915_drm_freeze(struct drm_device *dev)
 
        /* If KMS is active, we do the leavevt stuff here */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               int error = i915_gem_idle(dev);
+               int error;
+
+               mutex_lock(&dev->struct_mutex);
+               error = i915_gem_idle(dev);
+               mutex_unlock(&dev->struct_mutex);
                if (error) {
                        dev_err(&dev->pdev->dev,
                                "GEM idle failed, resume might fail\n");
@@ -656,7 +674,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
                intel_init_pch_refclk(dev);
 
                mutex_lock(&dev->struct_mutex);
-               dev_priv->mm.suspended = 0;
 
                error = i915_gem_init_hw(dev);
                mutex_unlock(&dev->struct_mutex);
@@ -706,7 +723,7 @@ static int i915_drm_thaw(struct drm_device *dev)
 {
        int error = 0;
 
-       intel_gt_sanitize(dev);
+       intel_uncore_sanitize(dev);
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                mutex_lock(&dev->struct_mutex);
@@ -732,7 +749,7 @@ int i915_resume(struct drm_device *dev)
 
        pci_set_master(dev->pdev);
 
-       intel_gt_sanitize(dev);
+       intel_uncore_sanitize(dev);
 
        /*
         * Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -753,139 +770,6 @@ int i915_resume(struct drm_device *dev)
        return 0;
 }
 
-static int i8xx_do_reset(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (IS_I85X(dev))
-               return -ENODEV;
-
-       I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
-       POSTING_READ(D_STATE);
-
-       if (IS_I830(dev) || IS_845G(dev)) {
-               I915_WRITE(DEBUG_RESET_I830,
-                          DEBUG_RESET_DISPLAY |
-                          DEBUG_RESET_RENDER |
-                          DEBUG_RESET_FULL);
-               POSTING_READ(DEBUG_RESET_I830);
-               msleep(1);
-
-               I915_WRITE(DEBUG_RESET_I830, 0);
-               POSTING_READ(DEBUG_RESET_I830);
-       }
-
-       msleep(1);
-
-       I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
-       POSTING_READ(D_STATE);
-
-       return 0;
-}
-
-static int i965_reset_complete(struct drm_device *dev)
-{
-       u8 gdrst;
-       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
-       return (gdrst & GRDOM_RESET_ENABLE) == 0;
-}
-
-static int i965_do_reset(struct drm_device *dev)
-{
-       int ret;
-       u8 gdrst;
-
-       /*
-        * Set the domains we want to reset (GRDOM/bits 2 and 3) as
-        * well as the reset bit (GR/bit 0).  Setting the GR bit
-        * triggers the reset; when done, the hardware will clear it.
-        */
-       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
-       pci_write_config_byte(dev->pdev, I965_GDRST,
-                             gdrst | GRDOM_RENDER |
-                             GRDOM_RESET_ENABLE);
-       ret =  wait_for(i965_reset_complete(dev), 500);
-       if (ret)
-               return ret;
-
-       /* We can't reset render&media without also resetting display ... */
-       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
-       pci_write_config_byte(dev->pdev, I965_GDRST,
-                             gdrst | GRDOM_MEDIA |
-                             GRDOM_RESET_ENABLE);
-
-       return wait_for(i965_reset_complete(dev), 500);
-}
-
-static int ironlake_do_reset(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 gdrst;
-       int ret;
-
-       gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
-       gdrst &= ~GRDOM_MASK;
-       I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
-                  gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
-       ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
-       if (ret)
-               return ret;
-
-       /* We can't reset render&media without also resetting display ... */
-       gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
-       gdrst &= ~GRDOM_MASK;
-       I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
-                  gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
-       return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
-}
-
-static int gen6_do_reset(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int     ret;
-       unsigned long irqflags;
-
-       /* Hold gt_lock across reset to prevent any register access
-        * with forcewake not set correctly
-        */
-       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
-
-       /* Reset the chip */
-
-       /* GEN6_GDRST is not in the gt power well, no need to check
-        * for fifo space for the write or forcewake the chip for
-        * the read
-        */
-       I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
-
-       /* Spin waiting for the device to ack the reset request */
-       ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
-
-       /* If reset with a user forcewake, try to restore, otherwise turn it off */
-       if (dev_priv->forcewake_count)
-               dev_priv->gt.force_wake_get(dev_priv);
-       else
-               dev_priv->gt.force_wake_put(dev_priv);
-
-       /* Restore fifo count */
-       dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
-
-       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-       return ret;
-}
-
-int intel_gpu_reset(struct drm_device *dev)
-{
-       switch (INTEL_INFO(dev)->gen) {
-       case 7:
-       case 6: return gen6_do_reset(dev);
-       case 5: return ironlake_do_reset(dev);
-       case 4: return i965_do_reset(dev);
-       case 2: return i8xx_do_reset(dev);
-       default: return -ENODEV;
-       }
-}
-
 /**
  * i915_reset - reset chip after a hang
  * @dev: drm device to reset
@@ -955,11 +839,11 @@ int i915_reset(struct drm_device *dev)
         * switched away).
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
-                       !dev_priv->mm.suspended) {
+                       !dev_priv->ums.mm_suspended) {
                struct intel_ring_buffer *ring;
                int i;
 
-               dev_priv->mm.suspended = 0;
+               dev_priv->ums.mm_suspended = 0;
 
                i915_gem_init_swizzling(dev);
 
@@ -1110,7 +994,6 @@ static const struct file_operations i915_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_gem_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
        .read = drm_read,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = i915_compat_ioctl,
@@ -1123,7 +1006,7 @@ static struct drm_driver driver = {
         * deal with them for Intel hardware.
         */
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
        .load = i915_driver_load,
        .unload = i915_driver_unload,
@@ -1154,7 +1037,7 @@ static struct drm_driver driver = {
 
        .dumb_create = i915_gem_dumb_create,
        .dumb_map_offset = i915_gem_mmap_gtt,
-       .dumb_destroy = i915_gem_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
        .ioctls = i915_ioctls,
        .fops = &i915_driver_fops,
        .name = DRIVER_NAME,
@@ -1215,136 +1098,3 @@ module_exit(i915_exit);
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
-
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
-       ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
-        ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE))
-static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
-{
-       /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
-        * the chip from rc6 before touching it for real. MI_MODE is masked,
-        * hence harmless to write 0 into. */
-       I915_WRITE_NOTRACE(MI_MODE, 0);
-}
-
-static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
-{
-       if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-               DRM_ERROR("Unknown unclaimed register before writing to %x\n",
-                         reg);
-               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-       }
-}
-
-static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
-{
-       if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-               DRM_ERROR("Unclaimed write to %x\n", reg);
-               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-       }
-}
-
-#define __i915_read(x, y) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
-       unsigned long irqflags; \
-       u##x val = 0; \
-       spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
-       if (IS_GEN5(dev_priv->dev)) \
-               ilk_dummy_write(dev_priv); \
-       if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-               if (dev_priv->forcewake_count == 0) \
-                       dev_priv->gt.force_wake_get(dev_priv); \
-               val = read##y(dev_priv->regs + reg); \
-               if (dev_priv->forcewake_count == 0) \
-                       dev_priv->gt.force_wake_put(dev_priv); \
-       } else { \
-               val = read##y(dev_priv->regs + reg); \
-       } \
-       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
-       trace_i915_reg_rw(false, reg, val, sizeof(val)); \
-       return val; \
-}
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
-#undef __i915_read
-
-#define __i915_write(x, y) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
-       unsigned long irqflags; \
-       u32 __fifo_ret = 0; \
-       trace_i915_reg_rw(true, reg, val, sizeof(val)); \
-       spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
-       if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-               __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
-       } \
-       if (IS_GEN5(dev_priv->dev)) \
-               ilk_dummy_write(dev_priv); \
-       hsw_unclaimed_reg_clear(dev_priv, reg); \
-       write##y(val, dev_priv->regs + reg); \
-       if (unlikely(__fifo_ret)) { \
-               gen6_gt_check_fifodbg(dev_priv); \
-       } \
-       hsw_unclaimed_reg_check(dev_priv, reg); \
-       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
-}
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
-#undef __i915_write
-
-static const struct register_whitelist {
-       uint64_t offset;
-       uint32_t size;
-       uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
-} whitelist[] = {
-       { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
-};
-
-int i915_reg_read_ioctl(struct drm_device *dev,
-                       void *data, struct drm_file *file)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_reg_read *reg = data;
-       struct register_whitelist const *entry = whitelist;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
-               if (entry->offset == reg->offset &&
-                   (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
-                       break;
-       }
-
-       if (i == ARRAY_SIZE(whitelist))
-               return -EINVAL;
-
-       switch (entry->size) {
-       case 8:
-               reg->val = I915_READ64(reg->offset);
-               break;
-       case 4:
-               reg->val = I915_READ(reg->offset);
-               break;
-       case 2:
-               reg->val = I915_READ16(reg->offset);
-               break;
-       case 1:
-               reg->val = I915_READ8(reg->offset);
-               break;
-       default:
-               WARN_ON(1);
-               return -EINVAL;
-       }
-
-       return 0;
-}
index 1929bffc1c77f645a920c9b4037f0b5eb646a145..14299a4dd3568ea36e71a0e1c5b58019929d06bd 100644 (file)
@@ -144,6 +144,7 @@ enum intel_dpll_id {
 
 struct intel_dpll_hw_state {
        uint32_t dpll;
+       uint32_t dpll_md;
        uint32_t fp0;
        uint32_t fp1;
 };
@@ -156,6 +157,8 @@ struct intel_shared_dpll {
        /* should match the index in the dev_priv->shared_dplls array */
        enum intel_dpll_id id;
        struct intel_dpll_hw_state hw_state;
+       void (*mode_set)(struct drm_i915_private *dev_priv,
+                        struct intel_shared_dpll *pll);
        void (*enable)(struct drm_i915_private *dev_priv,
                       struct intel_shared_dpll *pll);
        void (*disable)(struct drm_i915_private *dev_priv,
@@ -198,7 +201,6 @@ struct intel_ddi_plls {
 #define DRIVER_MINOR           6
 #define DRIVER_PATCHLEVEL      0
 
-#define WATCH_COHERENCY        0
 #define WATCH_LISTS    0
 #define WATCH_GTT      0
 
@@ -320,8 +322,8 @@ struct drm_i915_error_state {
                u32 purgeable:1;
                s32 ring:4;
                u32 cache_level:2;
-       } *active_bo, *pinned_bo;
-       u32 active_bo_count, pinned_bo_count;
+       } **active_bo, **pinned_bo;
+       u32 *active_bo_count, *pinned_bo_count;
        struct intel_overlay_error_state *overlay;
        struct intel_display_error_state *display;
 };
@@ -356,14 +358,16 @@ struct drm_i915_display_funcs {
                          struct dpll *match_clock,
                          struct dpll *best_clock);
        void (*update_wm)(struct drm_device *dev);
-       void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+       void (*update_sprite_wm)(struct drm_plane *plane,
+                                struct drm_crtc *crtc,
                                 uint32_t sprite_width, int pixel_size,
-                                bool enable);
+                                bool enable, bool scaled);
        void (*modeset_global_resources)(struct drm_device *dev);
        /* Returns the active state of the crtc, and if the crtc is active,
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
                                struct intel_crtc_config *);
+       void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             int x, int y,
                             struct drm_framebuffer *old_fb);
@@ -387,11 +391,20 @@ struct drm_i915_display_funcs {
        /* pll clock increase/decrease */
 };
 
-struct drm_i915_gt_funcs {
+struct intel_uncore_funcs {
        void (*force_wake_get)(struct drm_i915_private *dev_priv);
        void (*force_wake_put)(struct drm_i915_private *dev_priv);
 };
 
+struct intel_uncore {
+       spinlock_t lock; /** lock is also taken in irq contexts. */
+
+       struct intel_uncore_funcs funcs;
+
+       unsigned fifo_count;
+       unsigned forcewake_count;
+};
+
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
        func(is_mobile) sep \
        func(is_i85x) sep \
@@ -436,12 +449,64 @@ struct intel_device_info {
 
 enum i915_cache_level {
        I915_CACHE_NONE = 0,
-       I915_CACHE_LLC,
-       I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+       I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
+       I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
+                             caches, eg sampler/render caches, and the
+                             large Last-Level-Cache. LLC is coherent with
+                             the CPU, but L3 is only visible to the GPU. */
+       I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
 };
 
 typedef uint32_t gen6_gtt_pte_t;
 
+struct i915_address_space {
+       struct drm_mm mm;
+       struct drm_device *dev;
+       struct list_head global_link;
+       unsigned long start;            /* Start offset always 0 for dri2 */
+       size_t total;           /* size addr space maps (ex. 2GB for ggtt) */
+
+       struct {
+               dma_addr_t addr;
+               struct page *page;
+       } scratch;
+
+       /**
+        * List of objects currently involved in rendering.
+        *
+        * Includes buffers having the contents of their GPU caches
+        * flushed, not necessarily primitives.  last_rendering_seqno
+        * represents when the rendering involved will be completed.
+        *
+        * A reference is held on the buffer while on this list.
+        */
+       struct list_head active_list;
+
+       /**
+        * LRU list of objects which are not in the ringbuffer and
+        * are ready to unbind, but are still in the GTT.
+        *
+        * last_rendering_seqno is 0 while an object is in this list.
+        *
+        * A reference is not held on the buffer while on this list,
+        * as merely being GTT-bound shouldn't prevent its being
+        * freed, and we'll pull it off the list in the free path.
+        */
+       struct list_head inactive_list;
+
+       /* FIXME: Need a more generic return type */
+       gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
+                                    enum i915_cache_level level);
+       void (*clear_range)(struct i915_address_space *vm,
+                           unsigned int first_entry,
+                           unsigned int num_entries);
+       void (*insert_entries)(struct i915_address_space *vm,
+                              struct sg_table *st,
+                              unsigned int first_entry,
+                              enum i915_cache_level cache_level);
+       void (*cleanup)(struct i915_address_space *vm);
+};
+
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
  * collateral associated with any va->pa translations GEN hardware also has a
@@ -450,8 +515,7 @@ typedef uint32_t gen6_gtt_pte_t;
  * the spec.
  */
 struct i915_gtt {
-       unsigned long start;            /* Start offset of used GTT */
-       size_t total;                   /* Total size GTT can map */
+       struct i915_address_space base;
        size_t stolen_size;             /* Total size of stolen memory */
 
        unsigned long mappable_end;     /* End offset that we can CPU map */
@@ -462,50 +526,54 @@ struct i915_gtt {
        void __iomem *gsm;
 
        bool do_idle_maps;
-       dma_addr_t scratch_page_dma;
-       struct page *scratch_page;
+
+       int mtrr;
 
        /* global gtt ops */
        int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
                          size_t *stolen, phys_addr_t *mappable_base,
                          unsigned long *mappable_end);
-       void (*gtt_remove)(struct drm_device *dev);
-       void (*gtt_clear_range)(struct drm_device *dev,
-                               unsigned int first_entry,
-                               unsigned int num_entries);
-       void (*gtt_insert_entries)(struct drm_device *dev,
-                                  struct sg_table *st,
-                                  unsigned int pg_start,
-                                  enum i915_cache_level cache_level);
-       gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
-                                    dma_addr_t addr,
-                                    enum i915_cache_level level);
 };
-#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
-#define I915_PPGTT_PD_ENTRIES 512
-#define I915_PPGTT_PT_ENTRIES 1024
 struct i915_hw_ppgtt {
-       struct drm_device *dev;
+       struct i915_address_space base;
        unsigned num_pd_entries;
        struct page **pt_pages;
        uint32_t pd_offset;
        dma_addr_t *pt_dma_addr;
-       dma_addr_t scratch_page_dma_addr;
 
-       /* pte functions, mirroring the interface of the global gtt. */
-       void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
-                           unsigned int first_entry,
-                           unsigned int num_entries);
-       void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
-                              struct sg_table *st,
-                              unsigned int pg_start,
-                              enum i915_cache_level cache_level);
-       gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
-                                    dma_addr_t addr,
-                                    enum i915_cache_level level);
        int (*enable)(struct drm_device *dev);
-       void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
+};
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+       struct drm_mm_node node;
+       struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
+
+       /** This object's place on the active/inactive lists */
+       struct list_head mm_list;
+
+       struct list_head vma_link; /* Link in the object's VMA list */
+
+       /** This vma's place in the batchbuffer or on the eviction list */
+       struct list_head exec_list;
+
+       /**
+        * Used for performing relocations during execbuffer insertion.
+        */
+       struct hlist_node exec_node;
+       unsigned long exec_handle;
+       struct drm_i915_gem_exec_object2 *exec_entry;
+
 };
 
 struct i915_ctx_hang_stats {
@@ -528,15 +596,48 @@ struct i915_hw_context {
        struct i915_ctx_hang_stats hang_stats;
 };
 
-enum no_fbc_reason {
-       FBC_NO_OUTPUT, /* no outputs enabled to compress */
-       FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
-       FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
-       FBC_MODE_TOO_LARGE, /* mode too large for compression */
-       FBC_BAD_PLANE, /* fbc not supported on plane */
-       FBC_NOT_TILED, /* buffer not tiled */
-       FBC_MULTIPLE_PIPES, /* more than one pipe active */
-       FBC_MODULE_PARAM,
+struct i915_fbc {
+       unsigned long size;
+       unsigned int fb_id;
+       enum plane plane;
+       int y;
+
+       struct drm_mm_node *compressed_fb;
+       struct drm_mm_node *compressed_llb;
+
+       struct intel_fbc_work {
+               struct delayed_work work;
+               struct drm_crtc *crtc;
+               struct drm_framebuffer *fb;
+               int interval;
+       } *fbc_work;
+
+       enum no_fbc_reason {
+               FBC_OK, /* FBC is enabled */
+               FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
+               FBC_NO_OUTPUT, /* no outputs enabled to compress */
+               FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
+               FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+               FBC_MODE_TOO_LARGE, /* mode too large for compression */
+               FBC_BAD_PLANE, /* fbc not supported on plane */
+               FBC_NOT_TILED, /* buffer not tiled */
+               FBC_MULTIPLE_PIPES, /* more than one pipe active */
+               FBC_MODULE_PARAM,
+               FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+       } no_fbc_reason;
+};
+
+enum no_psr_reason {
+       PSR_NO_SOURCE, /* Not supported on platform */
+       PSR_NO_SINK, /* Not supported by panel */
+       PSR_MODULE_PARAM,
+       PSR_CRTC_NOT_ACTIVE,
+       PSR_PWR_WELL_ENABLED,
+       PSR_NOT_TILED,
+       PSR_SPRITE_ENABLED,
+       PSR_S3D_ENABLED,
+       PSR_INTERLACED_ENABLED,
+       PSR_HSW_NOT_DDIA,
 };
 
 enum intel_pch {
@@ -722,12 +823,12 @@ struct i915_suspend_saved_registers {
 };
 
 struct intel_gen6_power_mgmt {
+       /* work and pm_iir are protected by dev_priv->irq_lock */
        struct work_struct work;
-       struct delayed_work vlv_work;
        u32 pm_iir;
-       /* lock - irqsave spinlock that protectects the work_struct and
-        * pm_iir. */
-       spinlock_t lock;
+
+       /* On vlv we need to manually drop to Vmin with a delayed work. */
+       struct delayed_work vlv_work;
 
        /* The below variables an all the rps hw state are protected by
         * dev->struct mutext. */
@@ -793,6 +894,18 @@ struct i915_dri1_state {
        uint32_t counter;
 };
 
+struct i915_ums_state {
+       /**
+        * Flag if the X Server, and thus DRM, is not currently in
+        * control of the device.
+        *
+        * This is set between LeaveVT and EnterVT.  It needs to be
+        * replaced with a semaphore.  It also needs to be
+        * transitioned away from for kernel modesetting.
+        */
+       int mm_suspended;
+};
+
 struct intel_l3_parity {
        u32 *remap_info;
        struct work_struct error_work;
@@ -801,8 +914,6 @@ struct intel_l3_parity {
 struct i915_gem_mm {
        /** Memory allocator for GTT stolen memory */
        struct drm_mm stolen;
-       /** Memory allocator for GTT */
-       struct drm_mm gtt_space;
        /** List of all objects in gtt_space. Used to restore gtt
         * mappings on resume */
        struct list_head bound_list;
@@ -816,37 +927,12 @@ struct i915_gem_mm {
        /** Usable portion of the GTT for GEM */
        unsigned long stolen_base; /* limited to low memory (32-bit) */
 
-       int gtt_mtrr;
-
        /** PPGTT used for aliasing the PPGTT with the GTT */
        struct i915_hw_ppgtt *aliasing_ppgtt;
 
        struct shrinker inactive_shrinker;
        bool shrinker_no_lock_stealing;
 
-       /**
-        * List of objects currently involved in rendering.
-        *
-        * Includes buffers having the contents of their GPU caches
-        * flushed, not necessarily primitives.  last_rendering_seqno
-        * represents when the rendering involved will be completed.
-        *
-        * A reference is held on the buffer while on this list.
-        */
-       struct list_head active_list;
-
-       /**
-        * LRU list of objects which are not in the ringbuffer and
-        * are ready to unbind, but are still in the GTT.
-        *
-        * last_rendering_seqno is 0 while an object is in this list.
-        *
-        * A reference is not held on the buffer while on this list,
-        * as merely being GTT-bound shouldn't prevent its being
-        * freed, and we'll pull it off the list in the free path.
-        */
-       struct list_head inactive_list;
-
        /** LRU list of objects with fence regs on them. */
        struct list_head fence_list;
 
@@ -865,16 +951,6 @@ struct i915_gem_mm {
         */
        bool interruptible;
 
-       /**
-        * Flag if the X Server, and thus DRM, is not currently in
-        * control of the device.
-        *
-        * This is set between LeaveVT and EnterVT.  It needs to be
-        * replaced with a semaphore.  It also needs to be
-        * transitioned away from for kernel modesetting.
-        */
-       int suspended;
-
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
        /** Bit 6 swizzling required for Y tiling */
@@ -884,6 +960,7 @@ struct i915_gem_mm {
        struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 
        /* accounting, useful for userland debugging */
+       spinlock_t object_stat_lock;
        size_t object_memory;
        u32 object_count;
 };
@@ -897,6 +974,11 @@ struct drm_i915_error_state_buf {
        loff_t pos;
 };
 
+struct i915_error_state_file_priv {
+       struct drm_device *dev;
+       struct drm_i915_error_state *error;
+};
+
 struct i915_gpu_error {
        /* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -988,6 +1070,19 @@ struct intel_vbt_data {
        struct child_device_config *child_dev;
 };
 
+enum intel_ddb_partitioning {
+       INTEL_DDB_PART_1_2,
+       INTEL_DDB_PART_5_6, /* IVB+ */
+};
+
+struct intel_wm_level {
+       bool enable;
+       uint32_t pri_val;
+       uint32_t spr_val;
+       uint32_t cur_val;
+       uint32_t fbc_val;
+};
+
 typedef struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *slab;
@@ -998,14 +1093,7 @@ typedef struct drm_i915_private {
 
        void __iomem *regs;
 
-       struct drm_i915_gt_funcs gt;
-       /** gt_fifo_count and the subsequent register write are synchronized
-        * with dev->struct_mutex. */
-       unsigned gt_fifo_count;
-       /** forcewake_count is protected by gt_lock */
-       unsigned forcewake_count;
-       /** gt_lock is also taken in irq contexts. */
-       spinlock_t gt_lock;
+       struct intel_uncore uncore;
 
        struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
 
@@ -1042,6 +1130,7 @@ typedef struct drm_i915_private {
        /** Cached value of IMR to avoid reads in updating the bitfield */
        u32 irq_mask;
        u32 gt_irq_mask;
+       u32 pm_irq_mask;
 
        struct work_struct hotplug_work;
        bool enable_hotplug_processing;
@@ -1059,12 +1148,7 @@ typedef struct drm_i915_private {
 
        int num_plane;
 
-       unsigned long cfb_size;
-       unsigned int cfb_fb;
-       enum plane cfb_plane;
-       int cfb_y;
-       struct intel_fbc_work *fbc_work;
-
+       struct i915_fbc fbc;
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
 
@@ -1081,8 +1165,6 @@ typedef struct drm_i915_private {
        } backlight;
 
        /* LVDS info */
-       struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
-       struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
        bool no_aux_handshake;
 
        struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
@@ -1105,7 +1187,8 @@ typedef struct drm_i915_private {
        enum modeset_restore modeset_restore;
        struct mutex modeset_restore_lock;
 
-       struct i915_gtt gtt;
+       struct list_head vm_list; /* Global list of all address spaces */
+       struct i915_gtt gtt; /* VMA representing the global address space */
 
        struct i915_gem_mm mm;
 
@@ -1132,6 +1215,9 @@ typedef struct drm_i915_private {
 
        struct intel_l3_parity l3_parity;
 
+       /* Cannot be determined by PCIID. You must always read a register. */
+       size_t ellc_size;
+
        /* gen6+ rps state */
        struct intel_gen6_power_mgmt rps;
 
@@ -1142,10 +1228,7 @@ typedef struct drm_i915_private {
        /* Haswell power well */
        struct i915_power_well power_well;
 
-       enum no_fbc_reason no_fbc_reason;
-
-       struct drm_mm_node *compressed_fb;
-       struct drm_mm_node *compressed_llb;
+       enum no_psr_reason no_psr_reason;
 
        struct i915_gpu_error gpu_error;
 
@@ -1170,11 +1253,32 @@ typedef struct drm_i915_private {
 
        struct i915_suspend_saved_registers regfile;
 
+       struct {
+               /*
+                * Raw watermark latency values:
+                * in 0.1us units for WM0,
+                * in 0.5us units for WM1+.
+                */
+               /* primary */
+               uint16_t pri_latency[5];
+               /* sprite */
+               uint16_t spr_latency[5];
+               /* cursor */
+               uint16_t cur_latency[5];
+       } wm;
+
        /* Old dri1 support infrastructure, beware the dragons ya fools entering
         * here! */
        struct i915_dri1_state dri1;
+       /* Old ums support infrastructure, same warning applies. */
+       struct i915_ums_state ums;
 } drm_i915_private_t;
 
+static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
+{
+       return dev->dev_private;
+}
+
 /* Iterate over initialised rings */
 #define for_each_ring(ring__, dev_priv__, i__) \
        for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -1187,7 +1291,7 @@ enum hdmi_force_audio {
        HDMI_AUDIO_ON,                  /* force turn on HDMI audio */
 };
 
-#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+#define I915_GTT_OFFSET_NONE ((u32)-1)
 
 struct drm_i915_gem_object_ops {
        /* Interface between the GEM object and its backing storage.
@@ -1212,17 +1316,16 @@ struct drm_i915_gem_object {
 
        const struct drm_i915_gem_object_ops *ops;
 
-       /** Current space allocated to this object in the GTT, if any. */
-       struct drm_mm_node *gtt_space;
+       /** List of VMAs backed by this object */
+       struct list_head vma_list;
+
        /** Stolen memory for this object, instead of being backed by shmem. */
        struct drm_mm_node *stolen;
        struct list_head global_list;
 
-       /** This object's place on the active/inactive lists */
        struct list_head ring_list;
-       struct list_head mm_list;
-       /** This object's place in the batchbuffer or on the eviction list */
-       struct list_head exec_list;
+       /** Used in execbuf to temporarily hold a ref */
+       struct list_head obj_exec_link;
 
        /**
         * This is set if the object is on the active lists (has pending
@@ -1287,6 +1390,7 @@ struct drm_i915_gem_object {
         */
        unsigned int fault_mappable:1;
        unsigned int pin_mappable:1;
+       unsigned int pin_display:1;
 
        /*
         * Is the GPU currently using a fence to access this buffer,
@@ -1294,7 +1398,7 @@ struct drm_i915_gem_object {
        unsigned int pending_fenced_gpu_access:1;
        unsigned int fenced_gpu_access:1;
 
-       unsigned int cache_level:2;
+       unsigned int cache_level:3;
 
        unsigned int has_aliasing_ppgtt_mapping:1;
        unsigned int has_global_gtt_mapping:1;
@@ -1307,20 +1411,6 @@ struct drm_i915_gem_object {
        void *dma_buf_vmapping;
        int vmapping_count;
 
-       /**
-        * Used for performing relocations during execbuffer insertion.
-        */
-       struct hlist_node exec_node;
-       unsigned long exec_handle;
-       struct drm_i915_gem_exec_object2 *exec_entry;
-
-       /**
-        * Current offset of the object in GTT space.
-        *
-        * This is the same as gtt_space->start
-        */
-       uint32_t gtt_offset;
-
        struct intel_ring_buffer *ring;
 
        /** Breadcrumb of last rendering to the buffer. */
@@ -1396,7 +1486,7 @@ struct drm_i915_file_private {
        struct i915_ctx_hang_stats hang_stats;
 };
 
-#define INTEL_INFO(dev)        (((struct drm_i915_private *) (dev)->dev_private)->info)
+#define INTEL_INFO(dev)        (to_i915(dev)->info)
 
 #define IS_I830(dev)           ((dev)->pci_device == 0x3577)
 #define IS_845G(dev)           ((dev)->pci_device == 0x2562)
@@ -1414,7 +1504,6 @@ struct drm_i915_file_private {
 #define IS_PINEVIEW_M(dev)     ((dev)->pci_device == 0xa011)
 #define IS_PINEVIEW(dev)       (INTEL_INFO(dev)->is_pineview)
 #define IS_G33(dev)            (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev)     ((dev)->pci_device == 0x0042)
 #define IS_IRONLAKE_M(dev)     ((dev)->pci_device == 0x0046)
 #define IS_IVYBRIDGE(dev)      (INTEL_INFO(dev)->is_ivybridge)
 #define IS_IVB_GT1(dev)                ((dev)->pci_device == 0x0156 || \
@@ -1426,6 +1515,8 @@ struct drm_i915_file_private {
 #define IS_VALLEYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview)
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
+#define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
+                                ((dev)->pci_device & 0xFF00) == 0x0C00)
 #define IS_ULT(dev)            (IS_HASWELL(dev) && \
                                 ((dev)->pci_device & 0xFF00) == 0x0A00)
 
@@ -1446,6 +1537,7 @@ struct drm_i915_file_private {
 #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 #define HAS_VEBOX(dev)          (INTEL_INFO(dev)->has_vebox_ring)
 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
+#define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
@@ -1468,8 +1560,6 @@ struct drm_i915_file_private {
 #define SUPPORTS_EDP(dev)              (IS_IRONLAKE_M(dev))
 #define SUPPORTS_TV(dev)               (INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)           (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 
 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
@@ -1477,8 +1567,6 @@ struct drm_i915_file_private {
 
 #define HAS_IPS(dev)           (IS_ULT(dev))
 
-#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
-
 #define HAS_DDI(dev)           (INTEL_INFO(dev)->has_ddi)
 #define HAS_POWER_WELL(dev)    (IS_HASWELL(dev))
 #define HAS_FPGA_DBG_UNCLAIMED(dev)    (INTEL_INFO(dev)->has_fpga_dbg)
@@ -1490,7 +1578,7 @@ struct drm_i915_file_private {
 #define INTEL_PCH_LPT_DEVICE_ID_TYPE           0x8c00
 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE                0x9c00
 
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -1526,7 +1614,7 @@ struct drm_i915_file_private {
 #define INTEL_RC6p_ENABLE                      (1<<1)
 #define INTEL_RC6pp_ENABLE                     (1<<2)
 
-extern struct drm_ioctl_desc i915_ioctls[];
+extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern int i915_panel_ignore_lid __read_mostly;
@@ -1540,9 +1628,12 @@ extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 extern int i915_enable_ppgtt __read_mostly;
+extern int i915_enable_psr __read_mostly;
 extern unsigned int i915_preliminary_hw_support __read_mostly;
 extern int i915_disable_power_well __read_mostly;
 extern int i915_enable_ips __read_mostly;
+extern bool i915_fastboot __read_mostly;
+extern bool i915_prefault_disable __read_mostly;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
@@ -1578,16 +1669,19 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 extern void intel_console_resume(struct work_struct *work);
 
 /* i915_irq.c */
-void i915_hangcheck_elapsed(unsigned long data);
+void i915_queue_hangcheck(struct drm_device *dev);
 void i915_handle_error(struct drm_device *dev, bool wedged);
 
 extern void intel_irq_init(struct drm_device *dev);
 extern void intel_pm_init(struct drm_device *dev);
 extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_sanitize(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
 
-void i915_error_state_free(struct kref *error_ref);
+extern void intel_uncore_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_clear_errors(struct drm_device *dev);
+extern void intel_uncore_check_errors(struct drm_device *dev);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1595,13 +1689,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
-#ifdef CONFIG_DEBUG_FS
-extern void i915_destroy_error_state(struct drm_device *dev);
-#else
-#define i915_destroy_error_state(x)
-#endif
-
-
 /* i915_gem.c */
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
@@ -1658,13 +1745,16 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
+void i915_gem_vma_destroy(struct i915_vma *vma);
 
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm,
                                     uint32_t alignment,
                                     bool map_and_fenceable,
                                     bool nonblocking);
 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
@@ -1701,8 +1791,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      uint32_t handle, uint64_t *offset);
-int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
-                         uint32_t handle);
 /**
  * Returns true if seq1 is later than seq2.
  */
@@ -1754,10 +1842,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
 }
 
 void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
-                                           uint32_t read_domains,
-                                           uint32_t write_domain);
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
@@ -1784,6 +1869,7 @@ int __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
                                     struct intel_ring_buffer *pipelined);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
 int i915_gem_attach_phys_object(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
                                int id,
@@ -1810,6 +1896,56 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 
 void i915_gem_restore_fences(struct drm_device *dev);
 
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+                                 struct i915_address_space *vm);
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+                       struct i915_address_space *vm);
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+                               struct i915_address_space *vm);
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+                                 struct i915_address_space *vm);
+/* Some GGTT VM helpers */
+#define obj_to_ggtt(obj) \
+       (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+static inline bool i915_is_ggtt(struct i915_address_space *vm)
+{
+       struct i915_address_space *ggtt =
+               &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
+       return vm == ggtt;
+}
+
+static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+{
+       return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
+{
+       return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
+{
+       return i915_gem_obj_size(obj, obj_to_ggtt(obj));
+}
+
+static inline int __must_check
+i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
+                     uint32_t alignment,
+                     bool map_and_fenceable,
+                     bool nonblocking)
+{
+       return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
+                                  map_and_fenceable, nonblocking);
+}
+#undef obj_to_ggtt
+
 /* i915_gem_context.c */
 void i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
@@ -1828,7 +1964,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
 }
 
 struct i915_ctx_hang_stats * __must_check
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
                                struct drm_file *file,
                                u32 id);
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -1862,7 +1998,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
 
 
 /* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+                                         struct i915_address_space *vm,
+                                         int min_size,
                                          unsigned alignment,
                                          unsigned cache_level,
                                          bool mappable,
@@ -1884,7 +2022,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
 
 /* i915_gem_tiling.c */
-inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 
@@ -1897,23 +2035,36 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
 /* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
-                         const char *where, uint32_t mark);
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
 #else
 #define i915_verify_lists(dev) 0
 #endif
-void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
-                                    int handle);
-void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
-                         const char *where, uint32_t mark);
 
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
 void i915_debugfs_cleanup(struct drm_minor *minor);
+
+/* i915_gpu_error.c */
 __printf(2, 3)
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+                           const struct i915_error_state_file_priv *error);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+                             size_t count, loff_t pos);
+static inline void i915_error_state_buf_release(
+       struct drm_i915_error_state_buf *eb)
+{
+       kfree(eb->buf);
+}
+void i915_capture_error_state(struct drm_device *dev);
+void i915_error_state_get(struct drm_device *dev,
+                         struct i915_error_state_file_priv *error_priv);
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
+void i915_destroy_error_state(struct drm_device *dev);
+
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+const char *i915_cache_level_str(int type);
 
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
@@ -1993,7 +2144,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 
 /* overlay */
-#ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct intel_overlay_error_state *error);
@@ -2002,7 +2152,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct drm_device *dev,
                                            struct intel_display_error_state *error);
-#endif
 
 /* On SNB platform, before reading ring registers forcewake bit
  * must be set to prevent GT core from power down and stale values being
@@ -2010,7 +2159,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
  */
 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2029,39 +2177,37 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 int vlv_gpu_freq(int ddr_freq, int val);
 int vlv_freq_opcode(int ddr_freq, int val);
 
-#define __i915_read(x, y) \
-       u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
+#define __i915_read(x) \
+       u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
 #undef __i915_read
 
-#define __i915_write(x, y) \
-       void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
-
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
+#define __i915_write(x) \
+       void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
 #undef __i915_write
 
-#define I915_READ8(reg)                i915_read8(dev_priv, (reg))
-#define I915_WRITE8(reg, val)  i915_write8(dev_priv, (reg), (val))
+#define I915_READ8(reg)                i915_read8(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val)  i915_write8(dev_priv, (reg), (val), true)
 
-#define I915_READ16(reg)       i915_read16(dev_priv, (reg))
-#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
-#define I915_READ16_NOTRACE(reg)       readw(dev_priv->regs + (reg))
-#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
+#define I915_READ16(reg)       i915_read16(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg)       i915_read16(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
 
-#define I915_READ(reg)         i915_read32(dev_priv, (reg))
-#define I915_WRITE(reg, val)   i915_write32(dev_priv, (reg), (val))
-#define I915_READ_NOTRACE(reg)         readl(dev_priv->regs + (reg))
-#define I915_WRITE_NOTRACE(reg, val)   writel(val, dev_priv->regs + (reg))
+#define I915_READ(reg)         i915_read32(dev_priv, (reg), true)
+#define I915_WRITE(reg, val)   i915_write32(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg)         i915_read32(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val)   i915_write32(dev_priv, (reg), (val), false)
 
-#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
-#define I915_READ64(reg)       i915_read64(dev_priv, (reg))
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
+#define I915_READ64(reg)       i915_read64(dev_priv, (reg), true)
 
 #define POSTING_READ(reg)      (void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)    (void)I915_READ16_NOTRACE(reg)
index d9e2208cfe98f2fdcb3957bf4ee13f0497e06dfb..f70531445b7cfc30ee192f4f1f66f1c0e9d06b95 100644 (file)
@@ -26,6 +26,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include <linux/dma-buf.h>
 
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
-                                                   unsigned alignment,
-                                                   bool map_and_fenceable,
-                                                   bool nonblocking);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+                                                  bool force);
+static __must_check int
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+                          struct i915_address_space *vm,
+                          unsigned alignment,
+                          bool map_and_fenceable,
+                          bool nonblocking);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
@@ -59,6 +63,20 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
+static bool cpu_cache_is_coherent(struct drm_device *dev,
+                                 enum i915_cache_level level)
+{
+       return HAS_LLC(dev) || level != I915_CACHE_NONE;
+}
+
+static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+       if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+               return true;
+
+       return obj->pin_display;
+}
+
 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 {
        if (obj->tiling_mode)
@@ -75,15 +93,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
                                  size_t size)
 {
+       spin_lock(&dev_priv->mm.object_stat_lock);
        dev_priv->mm.object_count++;
        dev_priv->mm.object_memory += size;
+       spin_unlock(&dev_priv->mm.object_stat_lock);
 }
 
 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
                                     size_t size)
 {
+       spin_lock(&dev_priv->mm.object_stat_lock);
        dev_priv->mm.object_count--;
        dev_priv->mm.object_memory -= size;
+       spin_unlock(&dev_priv->mm.object_stat_lock);
 }
 
 static int
@@ -135,7 +157,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-       return obj->gtt_space && !obj->active;
+       return i915_gem_obj_bound_any(obj) && !obj->active;
 }
 
 int
@@ -178,10 +200,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
                if (obj->pin_count)
-                       pinned += obj->gtt_space->size;
+                       pinned += i915_gem_obj_ggtt_size(obj);
        mutex_unlock(&dev->struct_mutex);
 
-       args->aper_size = dev_priv->gtt.total;
+       args->aper_size = dev_priv->gtt.base.total;
        args->aper_available_size = args->aper_size - pinned;
 
        return 0;
@@ -219,16 +241,10 @@ i915_gem_create(struct drm_file *file,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file, &obj->base, &handle);
-       if (ret) {
-               drm_gem_object_release(&obj->base);
-               i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
-               i915_gem_object_free(obj);
-               return ret;
-       }
-
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference(&obj->base);
-       trace_i915_gem_object_create(obj);
+       drm_gem_object_unreference_unlocked(&obj->base);
+       if (ret)
+               return ret;
 
        *handle_p = handle;
        return 0;
@@ -246,13 +262,6 @@ i915_gem_dumb_create(struct drm_file *file,
                               args->size, &args->handle);
 }
 
-int i915_gem_dumb_destroy(struct drm_file *file,
-                         struct drm_device *dev,
-                         uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 /**
  * Creates a new mm object and returns a handle to it.
  */
@@ -420,9 +429,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
                 * read domain and manually flush cachelines (if required). This
                 * optimizes for the case when the gpu will dirty the data
                 * anyway again before the next pread happens. */
-               if (obj->cache_level == I915_CACHE_NONE)
-                       needs_clflush = 1;
-               if (obj->gtt_space) {
+               needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
+               if (i915_gem_obj_bound_any(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, false);
                        if (ret)
                                return ret;
@@ -465,7 +473,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
                mutex_unlock(&dev->struct_mutex);
 
-               if (!prefaulted) {
+               if (likely(!i915_prefault_disable) && !prefaulted) {
                        ret = fault_in_multipages_writeable(user_data, remain);
                        /* Userspace is tricking us, but we've already clobbered
                         * its pages with the prefault and promised to write the
@@ -594,7 +602,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        char __user *user_data;
        int page_offset, page_length, ret;
 
-       ret = i915_gem_object_pin(obj, 0, true, true);
+       ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
        if (ret)
                goto out;
 
@@ -609,7 +617,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
 
-       offset = obj->gtt_offset + args->offset;
+       offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
        while (remain > 0) {
                /* Operation in this page
@@ -737,19 +745,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                 * write domain and manually flush cachelines (if required). This
                 * optimizes for the case when the gpu will use the data
                 * right away and we therefore have to clflush anyway. */
-               if (obj->cache_level == I915_CACHE_NONE)
-                       needs_clflush_after = 1;
-               if (obj->gtt_space) {
+               needs_clflush_after = cpu_write_needs_clflush(obj);
+               if (i915_gem_obj_bound_any(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, true);
                        if (ret)
                                return ret;
                }
        }
-       /* Same trick applies for invalidate partially written cachelines before
-        * writing.  */
-       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
-           && obj->cache_level == I915_CACHE_NONE)
-               needs_clflush_before = 1;
+       /* Same trick applies to invalidate partially written cachelines read
+        * before writing. */
+       if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+               needs_clflush_before =
+                       !cpu_cache_is_coherent(dev, obj->cache_level);
 
        ret = i915_gem_object_get_pages(obj);
        if (ret)
@@ -828,8 +835,8 @@ out:
                 */
                if (!needs_clflush_after &&
                    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
-                       i915_gem_clflush_object(obj);
-                       i915_gem_chipset_flush(dev);
+                       if (i915_gem_clflush_object(obj, obj->pin_display))
+                               i915_gem_chipset_flush(dev);
                }
        }
 
@@ -860,10 +867,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                       args->size))
                return -EFAULT;
 
-       ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
-                                          args->size);
-       if (ret)
-               return -EFAULT;
+       if (likely(!i915_prefault_disable)) {
+               ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+                                                  args->size);
+               if (ret)
+                       return -EFAULT;
+       }
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -904,9 +913,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       if (obj->cache_level == I915_CACHE_NONE &&
-           obj->tiling_mode == I915_TILING_NONE &&
-           obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+       if (obj->tiling_mode == I915_TILING_NONE &&
+           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+           cpu_write_needs_clflush(obj)) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
                /* Note that the gtt paths might fail with non-page-backed user
                 * pointers (e.g. gtt mappings when moving data between
@@ -1255,8 +1264,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        }
 
        /* Pinned buffers may be scanout, so flush the cache */
-       if (obj->pin_count)
-               i915_gem_object_flush_cpu_write_domain(obj);
+       if (obj->pin_display)
+               i915_gem_object_flush_cpu_write_domain(obj, true);
 
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -1346,7 +1355,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        /* Now bind it into the GTT if needed */
-       ret = i915_gem_object_pin(obj, 0, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
        if (ret)
                goto unlock;
 
@@ -1360,8 +1369,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        obj->fault_mappable = true;
 
-       pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
-               page_offset;
+       pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+       pfn >>= PAGE_SHIFT;
+       pfn += page_offset;
 
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1425,11 +1435,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        if (!obj->fault_mappable)
                return;
 
-       if (obj->base.dev->dev_mapping)
-               unmap_mapping_range(obj->base.dev->dev_mapping,
-                                   (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
-                                   obj->base.size, 1);
-
+       drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
        obj->fault_mappable = false;
 }
 
@@ -1485,7 +1491,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       if (obj->base.map_list.map)
+       if (drm_vma_node_has_offset(&obj->base.vma_node))
                return 0;
 
        dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -1516,9 +1522,6 @@ out:
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 {
-       if (!obj->base.map_list.map)
-               return;
-
        drm_gem_free_mmap_offset(&obj->base);
 }
 
@@ -1557,7 +1560,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
        if (ret)
                goto out;
 
-       *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+       *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
 
 out:
        drm_gem_object_unreference(&obj->base);
@@ -1632,7 +1635,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
                 * hope for the best.
                 */
                WARN_ON(ret != -EIO);
-               i915_gem_clflush_object(obj);
+               i915_gem_clflush_object(obj, true);
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
@@ -1667,11 +1670,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages == NULL)
                return 0;
 
-       BUG_ON(obj->gtt_space);
-
        if (obj->pages_pin_count)
                return -EBUSY;
 
+       BUG_ON(i915_gem_obj_bound_any(obj));
+
        /* ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
         * lists early. */
@@ -1704,12 +1707,18 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
                }
        }
 
-       list_for_each_entry_safe(obj, next,
-                                &dev_priv->mm.inactive_list,
-                                mm_list) {
-               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
-                   i915_gem_object_unbind(obj) == 0 &&
-                   i915_gem_object_put_pages(obj) == 0) {
+       list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
+                                global_list) {
+               struct i915_vma *vma, *v;
+
+               if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                       continue;
+
+               list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                       if (i915_vma_unbind(vma))
+                               break;
+
+               if (!i915_gem_object_put_pages(obj)) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
                                return count;
@@ -1892,8 +1901,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                obj->active = 1;
        }
 
-       /* Move from whatever list we were on to the tail of execution. */
-       list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
@@ -1915,13 +1922,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+       struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
 
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
 
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
@@ -2085,11 +2093,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        trace_i915_gem_request_add(ring, request->seqno);
        ring->outstanding_lazy_request = 0;
 
-       if (!dev_priv->mm.suspended) {
-               if (i915_enable_hangcheck) {
-                       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
-               }
+       if (!dev_priv->ums.mm_suspended) {
+               i915_queue_hangcheck(ring->dev);
+
                if (was_empty) {
                        queue_delayed_work(dev_priv->wq,
                                           &dev_priv->mm.retire_work,
@@ -2119,10 +2125,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
+                                   struct i915_address_space *vm)
 {
-       if (acthd >= obj->gtt_offset &&
-           acthd < obj->gtt_offset + obj->base.size)
+       if (acthd >= i915_gem_obj_offset(obj, vm) &&
+           acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
                return true;
 
        return false;
@@ -2145,6 +2152,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked,
        return false;
 }
 
+static struct i915_address_space *
+request_to_vm(struct drm_i915_gem_request *request)
+{
+       struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
+       struct i915_address_space *vm;
+
+       vm = &dev_priv->gtt.base;
+
+       return vm;
+}
+
 static bool i915_request_guilty(struct drm_i915_gem_request *request,
                                const u32 acthd, bool *inside)
 {
@@ -2152,9 +2170,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
         * pointing inside the ring, matches the batch_obj address range.
         * However this is extremely unlikely.
         */
-
        if (request->batch_obj) {
-               if (i915_head_inside_object(acthd, request->batch_obj)) {
+               if (i915_head_inside_object(acthd, request->batch_obj,
+                                           request_to_vm(request))) {
                        *inside = true;
                        return true;
                }
@@ -2174,17 +2192,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
 {
        struct i915_ctx_hang_stats *hs = NULL;
        bool inside, guilty;
+       unsigned long offset = 0;
 
        /* Innocent until proven guilty */
        guilty = false;
 
-       if (ring->hangcheck.action != wait &&
+       if (request->batch_obj)
+               offset = i915_gem_obj_offset(request->batch_obj,
+                                            request_to_vm(request));
+
+       if (ring->hangcheck.action != HANGCHECK_WAIT &&
            i915_request_guilty(request, acthd, &inside)) {
-               DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+               DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
                          ring->name,
                          inside ? "inside" : "flushing",
-                         request->batch_obj ?
-                         request->batch_obj->gtt_offset : 0,
+                         offset,
                          request->ctx ? request->ctx->id : 0,
                          acthd);
 
@@ -2275,23 +2297,12 @@ void i915_gem_restore_fences(struct drm_device *dev)
 void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
        struct intel_ring_buffer *ring;
        int i;
 
        for_each_ring(ring, dev_priv, i)
                i915_gem_reset_ring_lists(dev_priv, ring);
 
-       /* Move everything out of the GPU domains to ensure we do any
-        * necessary invalidation upon reuse.
-        */
-       list_for_each_entry(obj,
-                           &dev_priv->mm.inactive_list,
-                           mm_list)
-       {
-               obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-       }
-
        i915_gem_restore_fences(dev);
 }
 
@@ -2400,7 +2411,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
                idle &= list_empty(&ring->request_list);
        }
 
-       if (!dev_priv->mm.suspended && !idle)
+       if (!dev_priv->ums.mm_suspended && !idle)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
                                   round_jiffies_up_relative(HZ));
        if (idle)
@@ -2586,18 +2597,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
                                            old_write_domain);
 }
 
-/**
- * Unbinds an object from the GTT aperture.
- */
-int
-i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_vma_unbind(struct i915_vma *vma)
 {
+       struct drm_i915_gem_object *obj = vma->obj;
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       if (obj->gtt_space == NULL)
+       if (list_empty(&vma->vma_link))
                return 0;
 
+       if (!drm_mm_node_allocated(&vma->node))
+               goto destroy;
+
        if (obj->pin_count)
                return -EBUSY;
 
@@ -2618,7 +2629,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        if (ret)
                return ret;
 
-       trace_i915_gem_object_unbind(obj);
+       trace_i915_vma_unbind(vma);
 
        if (obj->has_global_gtt_mapping)
                i915_gem_gtt_unbind_object(obj);
@@ -2629,18 +2640,46 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        i915_gem_gtt_finish_object(obj);
        i915_gem_object_unpin_pages(obj);
 
-       list_del(&obj->mm_list);
-       list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+       list_del(&vma->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
-       obj->map_and_fenceable = true;
+       if (i915_is_ggtt(vma->vm))
+               obj->map_and_fenceable = true;
+
+       drm_mm_remove_node(&vma->node);
+
+destroy:
+       i915_gem_vma_destroy(vma);
 
-       drm_mm_put_block(obj->gtt_space);
-       obj->gtt_space = NULL;
-       obj->gtt_offset = 0;
+       /* Since the unbound list is global, only move to that list if
+        * no more VMAs exist.
+        * NB: Until we have real VMAs there will only ever be one */
+       WARN_ON(!list_empty(&obj->vma_list));
+       if (list_empty(&obj->vma_list))
+               list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
        return 0;
 }
 
+/**
+ * Unbinds an object from the global GTT aperture.
+ */
+int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct i915_address_space *ggtt = &dev_priv->gtt.base;
+
+       if (!i915_gem_obj_ggtt_bound(obj))
+               return 0;
+
+       if (obj->pin_count)
+               return -EBUSY;
+
+       BUG_ON(obj->pages == NULL);
+
+       return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
+}
+
 int i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2691,12 +2730,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
        POSTING_READ(fence_reg);
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                uint64_t val;
 
-               val = (uint64_t)((obj->gtt_offset + size - 4096) &
+               val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
                                 0xfffff000) << 32;
-               val |= obj->gtt_offset & 0xfffff000;
+               val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
                val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2720,15 +2759,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
        u32 val;
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                int pitch_val;
                int tile_width;
 
-               WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
                     (size & -size) != size ||
-                    (obj->gtt_offset & (size - 1)),
-                    "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-                    obj->gtt_offset, obj->map_and_fenceable, size);
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
 
                if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
                        tile_width = 128;
@@ -2739,7 +2778,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
                pitch_val = obj->stride / tile_width;
                pitch_val = ffs(pitch_val) - 1;
 
-               val = obj->gtt_offset;
+               val = i915_gem_obj_ggtt_offset(obj);
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I830_FENCE_TILING_Y_SHIFT;
                val |= I915_FENCE_SIZE_BITS(size);
@@ -2764,19 +2803,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
        uint32_t val;
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                uint32_t pitch_val;
 
-               WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
                     (size & -size) != size ||
-                    (obj->gtt_offset & (size - 1)),
-                    "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
-                    obj->gtt_offset, size);
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), size);
 
                pitch_val = obj->stride / 128;
                pitch_val = ffs(pitch_val) - 1;
 
-               val = obj->gtt_offset;
+               val = i915_gem_obj_ggtt_offset(obj);
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I830_FENCE_TILING_Y_SHIFT;
                val |= I830_FENCE_SIZE_BITS(size);
@@ -2997,7 +3036,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
        if (HAS_LLC(dev))
                return true;
 
-       if (gtt_space == NULL)
+       if (!drm_mm_node_allocated(gtt_space))
                return true;
 
        if (list_empty(&gtt_space->node_list))
@@ -3030,8 +3069,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
 
                if (obj->cache_level != obj->gtt_space->color) {
                        printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
-                              obj->gtt_space->start,
-                              obj->gtt_space->start + obj->gtt_space->size,
+                              i915_gem_obj_ggtt_offset(obj),
+                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
                               obj->cache_level,
                               obj->gtt_space->color);
                        err++;
@@ -3042,8 +3081,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
                                              obj->gtt_space,
                                              obj->cache_level)) {
                        printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
-                              obj->gtt_space->start,
-                              obj->gtt_space->start + obj->gtt_space->size,
+                              i915_gem_obj_ggtt_offset(obj),
+                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
                               obj->cache_level);
                        err++;
                        continue;
@@ -3058,18 +3097,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
  * Finds free space in the GTT aperture and binds the object there.
  */
 static int
-i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
-                           unsigned alignment,
-                           bool map_and_fenceable,
-                           bool nonblocking)
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+                          struct i915_address_space *vm,
+                          unsigned alignment,
+                          bool map_and_fenceable,
+                          bool nonblocking)
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
-       bool mappable, fenceable;
-       size_t gtt_max = map_and_fenceable ?
-               dev_priv->gtt.mappable_end : dev_priv->gtt.total;
+       size_t gtt_max =
+               map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
+       struct i915_vma *vma;
        int ret;
 
        fence_size = i915_gem_get_gtt_size(dev,
@@ -3110,77 +3149,89 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (node == NULL) {
-               i915_gem_object_unpin_pages(obj);
-               return -ENOMEM;
+       BUG_ON(!i915_is_ggtt(vm));
+
+       vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unpin;
        }
 
+       /* For now we only ever use 1 vma per object */
+       WARN_ON(!list_is_singular(&obj->vma_list));
+
 search_free:
-       ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+       ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
                                                  size, alignment,
-                                                 obj->cache_level, 0, gtt_max);
+                                                 obj->cache_level, 0, gtt_max,
+                                                 DRM_MM_SEARCH_DEFAULT);
        if (ret) {
-               ret = i915_gem_evict_something(dev, size, alignment,
+               ret = i915_gem_evict_something(dev, vm, size, alignment,
                                               obj->cache_level,
                                               map_and_fenceable,
                                               nonblocking);
                if (ret == 0)
                        goto search_free;
 
-               i915_gem_object_unpin_pages(obj);
-               kfree(node);
-               return ret;
+               goto err_free_vma;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
-               i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(node);
-               return -EINVAL;
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+                                             obj->cache_level))) {
+               ret = -EINVAL;
+               goto err_remove_node;
        }
 
        ret = i915_gem_gtt_prepare_object(obj);
-       if (ret) {
-               i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(node);
-               return ret;
-       }
+       if (ret)
+               goto err_remove_node;
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       list_add_tail(&vma->mm_list, &vm->inactive_list);
 
-       obj->gtt_space = node;
-       obj->gtt_offset = node->start;
+       if (i915_is_ggtt(vm)) {
+               bool mappable, fenceable;
 
-       fenceable =
-               node->size == fence_size &&
-               (node->start & (fence_alignment - 1)) == 0;
+               fenceable = (vma->node.size == fence_size &&
+                            (vma->node.start & (fence_alignment - 1)) == 0);
 
-       mappable =
-               obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+               mappable = (vma->node.start + obj->base.size <=
+                           dev_priv->gtt.mappable_end);
 
-       obj->map_and_fenceable = mappable && fenceable;
+               obj->map_and_fenceable = mappable && fenceable;
+       }
+
+       WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
 
-       trace_i915_gem_object_bind(obj, map_and_fenceable);
+       trace_i915_vma_bind(vma, map_and_fenceable);
        i915_gem_verify_gtt(dev);
        return 0;
+
+err_remove_node:
+       drm_mm_remove_node(&vma->node);
+err_free_vma:
+       i915_gem_vma_destroy(vma);
+err_unpin:
+       i915_gem_object_unpin_pages(obj);
+       return ret;
 }
 
-void
-i915_gem_clflush_object(struct drm_i915_gem_object *obj)
+bool
+i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+                       bool force)
 {
        /* If we don't have a page list set up, then we're not pinned
         * to GPU, and we can ignore the cache flush because it'll happen
         * again at bind time.
         */
        if (obj->pages == NULL)
-               return;
+               return false;
 
        /*
         * Stolen memory is always coherent with the GPU as it is explicitly
         * marked as wc by the system, or the system is cache-coherent.
         */
        if (obj->stolen)
-               return;
+               return false;
 
        /* If the GPU is snooping the contents of the CPU cache,
         * we do not need to manually clear the CPU cache lines.  However,
@@ -3190,12 +3241,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
         * snooping behaviour occurs naturally as the result of our domain
         * tracking.
         */
-       if (obj->cache_level != I915_CACHE_NONE)
-               return;
+       if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+               return false;
 
        trace_i915_gem_object_clflush(obj);
-
        drm_clflush_sg(obj->pages);
+
+       return true;
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -3227,15 +3279,17 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 
 /** Flushes the CPU write domain for the object if it's dirty. */
 static void
-i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+                                      bool force)
 {
        uint32_t old_write_domain;
 
        if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
                return;
 
-       i915_gem_clflush_object(obj);
-       i915_gem_chipset_flush(obj->base.dev);
+       if (i915_gem_clflush_object(obj, force))
+               i915_gem_chipset_flush(obj->base.dev);
+
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
@@ -3258,7 +3312,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        int ret;
 
        /* Not valid to be called on unbound objects. */
-       if (obj->gtt_space == NULL)
+       if (!i915_gem_obj_bound_any(obj))
                return -EINVAL;
 
        if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3268,7 +3322,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       i915_gem_object_flush_cpu_write_domain(obj);
+       i915_gem_object_flush_cpu_write_domain(obj, false);
 
        /* Serialise direct access to this object with the barriers for
         * coherent writes from the GPU, by effectively invalidating the
@@ -3296,8 +3350,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
                                            old_write_domain);
 
        /* And bump the LRU for this access */
-       if (i915_gem_object_is_inactive(obj))
-               list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       if (i915_gem_object_is_inactive(obj)) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+                                                          &dev_priv->gtt.base);
+               if (vma)
+                       list_move_tail(&vma->mm_list,
+                                      &dev_priv->gtt.base.inactive_list);
+
+       }
 
        return 0;
 }
@@ -3307,6 +3367,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct i915_vma *vma;
        int ret;
 
        if (obj->cache_level == cache_level)
@@ -3317,13 +3378,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                return -EBUSY;
        }
 
-       if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
-               ret = i915_gem_object_unbind(obj);
-               if (ret)
-                       return ret;
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+                       ret = i915_vma_unbind(vma);
+                       if (ret)
+                               return ret;
+
+                       break;
+               }
        }
 
-       if (obj->gtt_space) {
+       if (i915_gem_obj_bound_any(obj)) {
                ret = i915_gem_object_finish_gpu(obj);
                if (ret)
                        return ret;
@@ -3345,11 +3410,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                if (obj->has_aliasing_ppgtt_mapping)
                        i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
                                               obj, cache_level);
-
-               obj->gtt_space->color = cache_level;
        }
 
-       if (cache_level == I915_CACHE_NONE) {
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               vma->node.color = cache_level;
+       obj->cache_level = cache_level;
+
+       if (cpu_write_needs_clflush(obj)) {
                u32 old_read_domains, old_write_domain;
 
                /* If we're coming from LLC cached, then we haven't
@@ -3359,7 +3426,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                 * Just set it to the CPU cache for now.
                 */
                WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
-               WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
 
                old_read_domains = obj->base.read_domains;
                old_write_domain = obj->base.write_domain;
@@ -3372,7 +3438,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                                    old_write_domain);
        }
 
-       obj->cache_level = cache_level;
        i915_gem_verify_gtt(dev);
        return 0;
 }
@@ -3394,7 +3459,20 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
                goto unlock;
        }
 
-       args->caching = obj->cache_level != I915_CACHE_NONE;
+       switch (obj->cache_level) {
+       case I915_CACHE_LLC:
+       case I915_CACHE_L3_LLC:
+               args->caching = I915_CACHING_CACHED;
+               break;
+
+       case I915_CACHE_WT:
+               args->caching = I915_CACHING_DISPLAY;
+               break;
+
+       default:
+               args->caching = I915_CACHING_NONE;
+               break;
+       }
 
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -3417,6 +3495,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
        case I915_CACHING_CACHED:
                level = I915_CACHE_LLC;
                break;
+       case I915_CACHING_DISPLAY:
+               level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+               break;
        default:
                return -EINVAL;
        }
@@ -3439,6 +3520,22 @@ unlock:
        return ret;
 }
 
+static bool is_pin_display(struct drm_i915_gem_object *obj)
+{
+       /* There are 3 sources that pin objects:
+        *   1. The display engine (scanouts, sprites, cursors);
+        *   2. Reservations for execbuffer;
+        *   3. The user.
+        *
+        * We can ignore reservations as we hold the struct_mutex and
+        * are only called outside of the reservation path.  The user
+        * can only increment pin_count once, and so if after
+        * subtracting the potential reference by the user, any pin_count
+        * remains, it must be due to another use by the display engine.
+        */
+       return obj->pin_count - !!obj->user_pin_count;
+}
+
 /*
  * Prepare buffer for display plane (scanout, cursors, etc).
  * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3458,6 +3555,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                        return ret;
        }
 
+       /* Mark the pin_display early so that we account for the
+        * display coherency whilst setting up the cache domains.
+        */
+       obj->pin_display = true;
+
        /* The display engine is not coherent with the LLC cache on gen6.  As
         * a result, we make sure that the pinning that is about to occur is
         * done with uncached PTEs. This is lowest common denominator for all
@@ -3467,19 +3569,20 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
         * of uncaching, which would allow us to flush all the LLC-cached data
         * with that bit in the PTE to main memory with just one PIPE_CONTROL.
         */
-       ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+       ret = i915_gem_object_set_cache_level(obj,
+                                             HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
        if (ret)
-               return ret;
+               goto err_unpin_display;
 
        /* As the user may map the buffer once pinned in the display plane
         * (e.g. libkms for the bootup splash), we have to ensure that we
         * always use map_and_fenceable for all scanout buffers.
         */
-       ret = i915_gem_object_pin(obj, alignment, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
        if (ret)
-               return ret;
+               goto err_unpin_display;
 
-       i915_gem_object_flush_cpu_write_domain(obj);
+       i915_gem_object_flush_cpu_write_domain(obj, true);
 
        old_write_domain = obj->base.write_domain;
        old_read_domains = obj->base.read_domains;
@@ -3495,6 +3598,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                            old_write_domain);
 
        return 0;
+
+err_unpin_display:
+       obj->pin_display = is_pin_display(obj);
+       return ret;
+}
+
+void
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_unpin(obj);
+       obj->pin_display = is_pin_display(obj);
 }
 
 int
@@ -3540,7 +3654,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 
        /* Flush the CPU cache if it's still invalid. */
        if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
-               i915_gem_clflush_object(obj);
+               i915_gem_clflush_object(obj, false);
 
                obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
        }
@@ -3618,37 +3732,44 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                   struct i915_address_space *vm,
                    uint32_t alignment,
                    bool map_and_fenceable,
                    bool nonblocking)
 {
+       struct i915_vma *vma;
        int ret;
 
        if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                return -EBUSY;
 
-       if (obj->gtt_space != NULL) {
-               if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+       WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
+
+       vma = i915_gem_obj_to_vma(obj, vm);
+
+       if (vma) {
+               if ((alignment &&
+                    vma->node.start & (alignment - 1)) ||
                    (map_and_fenceable && !obj->map_and_fenceable)) {
                        WARN(obj->pin_count,
                             "bo is already pinned with incorrect alignment:"
-                            " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+                            " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            obj->gtt_offset, alignment,
+                            i915_gem_obj_offset(obj, vm), alignment,
                             map_and_fenceable,
                             obj->map_and_fenceable);
-                       ret = i915_gem_object_unbind(obj);
+                       ret = i915_vma_unbind(vma);
                        if (ret)
                                return ret;
                }
        }
 
-       if (obj->gtt_space == NULL) {
+       if (!i915_gem_obj_bound(obj, vm)) {
                struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
-               ret = i915_gem_object_bind_to_gtt(obj, alignment,
-                                                 map_and_fenceable,
-                                                 nonblocking);
+               ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
+                                                map_and_fenceable,
+                                                nonblocking);
                if (ret)
                        return ret;
 
@@ -3669,7 +3790,7 @@ void
 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
        BUG_ON(obj->pin_count == 0);
-       BUG_ON(obj->gtt_space == NULL);
+       BUG_ON(!i915_gem_obj_bound_any(obj));
 
        if (--obj->pin_count == 0)
                obj->pin_mappable = false;
@@ -3707,7 +3828,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
 
        if (obj->user_pin_count == 0) {
-               ret = i915_gem_object_pin(obj, args->alignment, true, false);
+               ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
                if (ret)
                        goto out;
        }
@@ -3715,11 +3836,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        obj->user_pin_count++;
        obj->pin_filp = file;
 
-       /* XXX - flush the CPU caches for pinned objects
-        * as the X server doesn't manage domains yet
-        */
-       i915_gem_object_flush_cpu_write_domain(obj);
-       args->offset = obj->gtt_offset;
+       args->offset = i915_gem_obj_ggtt_offset(obj);
 out:
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -3858,10 +3975,10 @@ unlock:
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops)
 {
-       INIT_LIST_HEAD(&obj->mm_list);
        INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
-       INIT_LIST_HEAD(&obj->exec_list);
+       INIT_LIST_HEAD(&obj->obj_exec_link);
+       INIT_LIST_HEAD(&obj->vma_list);
 
        obj->ops = ops;
 
@@ -3926,6 +4043,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        } else
                obj->cache_level = I915_CACHE_NONE;
 
+       trace_i915_gem_object_create(obj);
+
        return obj;
 }
 
@@ -3941,6 +4060,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct i915_vma *vma, *next;
 
        trace_i915_gem_object_destroy(obj);
 
@@ -3948,15 +4068,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                i915_gem_detach_phys_object(dev, obj);
 
        obj->pin_count = 0;
-       if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
-               bool was_interruptible;
+       /* NB: 0 or 1 elements */
+       WARN_ON(!list_empty(&obj->vma_list) &&
+               !list_is_singular(&obj->vma_list));
+       list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+               int ret = i915_vma_unbind(vma);
+               if (WARN_ON(ret == -ERESTARTSYS)) {
+                       bool was_interruptible;
 
-               was_interruptible = dev_priv->mm.interruptible;
-               dev_priv->mm.interruptible = false;
+                       was_interruptible = dev_priv->mm.interruptible;
+                       dev_priv->mm.interruptible = false;
 
-               WARN_ON(i915_gem_object_unbind(obj));
+                       WARN_ON(i915_vma_unbind(vma));
 
-               dev_priv->mm.interruptible = was_interruptible;
+                       dev_priv->mm.interruptible = was_interruptible;
+               }
        }
 
        /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
@@ -3982,15 +4108,71 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        i915_gem_object_free(obj);
 }
 
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->vm == vm)
+                       return vma;
+
+       return NULL;
+}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                             struct i915_address_space *vm)
+{
+       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       if (vma == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&vma->vma_link);
+       INIT_LIST_HEAD(&vma->mm_list);
+       INIT_LIST_HEAD(&vma->exec_list);
+       vma->vm = vm;
+       vma->obj = obj;
+
+       /* Keep GGTT vmas first to make debug easier */
+       if (i915_is_ggtt(vm))
+               list_add(&vma->vma_link, &obj->vma_list);
+       else
+               list_add_tail(&vma->vma_link, &obj->vma_list);
+
+       return vma;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+                                 struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+
+       vma = i915_gem_obj_to_vma(obj, vm);
+       if (!vma)
+               vma = __i915_gem_vma_create(obj, vm);
+
+       return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+       WARN_ON(vma->node.allocated);
+       list_del(&vma->vma_link);
+
+       /* Keep the vma as a placeholder in the execbuffer reservation lists */
+       if (!list_empty(&vma->exec_list))
+               return;
+
+       kfree(vma);
+}
+
 int
 i915_gem_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
-       mutex_lock(&dev->struct_mutex);
-
-       if (dev_priv->mm.suspended) {
+       if (dev_priv->ums.mm_suspended) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4006,18 +4188,11 @@ i915_gem_idle(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_gem_evict_everything(dev);
 
-       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
-        * We need to replace this with a semaphore, or something.
-        * And not confound mm.suspended!
-        */
-       dev_priv->mm.suspended = 1;
        del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
 
-       mutex_unlock(&dev->struct_mutex);
-
        /* Cancel the retire work handler, which should be idle now. */
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
@@ -4150,8 +4325,8 @@ i915_gem_init_hw(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
 
-       if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
-               I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+       if (dev_priv->ellc_size)
+               I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
        if (HAS_PCH_NOP(dev)) {
                u32 temp = I915_READ(GEN7_MSG_CTL);
@@ -4227,7 +4402,7 @@ int
 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4239,7 +4414,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }
 
        mutex_lock(&dev->struct_mutex);
-       dev_priv->mm.suspended = 0;
+       dev_priv->ums.mm_suspended = 0;
 
        ret = i915_gem_init_hw(dev);
        if (ret != 0) {
@@ -4247,7 +4422,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                return ret;
        }
 
-       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
        mutex_unlock(&dev->struct_mutex);
 
        ret = drm_irq_install(dev);
@@ -4259,7 +4434,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 cleanup_ringbuffer:
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
-       dev_priv->mm.suspended = 1;
+       dev_priv->ums.mm_suspended = 1;
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
@@ -4269,11 +4444,26 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
        drm_irq_uninstall(dev);
-       return i915_gem_idle(dev);
+
+       mutex_lock(&dev->struct_mutex);
+       ret =  i915_gem_idle(dev);
+
+       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
+        * We need to replace this with a semaphore, or something.
+        * And not confound ums.mm_suspended!
+        */
+       if (ret != 0)
+               dev_priv->ums.mm_suspended = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
 }
 
 void
@@ -4284,9 +4474,11 @@ i915_gem_lastclose(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
 
+       mutex_lock(&dev->struct_mutex);
        ret = i915_gem_idle(dev);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -4296,6 +4488,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
        INIT_LIST_HEAD(&ring->request_list);
 }
 
+static void i915_init_vm(struct drm_i915_private *dev_priv,
+                        struct i915_address_space *vm)
+{
+       vm->dev = dev_priv->dev;
+       INIT_LIST_HEAD(&vm->active_list);
+       INIT_LIST_HEAD(&vm->inactive_list);
+       INIT_LIST_HEAD(&vm->global_link);
+       list_add(&vm->global_link, &dev_priv->vm_list);
+}
+
 void
 i915_gem_load(struct drm_device *dev)
 {
@@ -4308,8 +4510,9 @@ i915_gem_load(struct drm_device *dev)
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
 
-       INIT_LIST_HEAD(&dev_priv->mm.active_list);
-       INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->vm_list);
+       i915_init_vm(dev_priv, &dev_priv->gtt.base);
+
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4608,11 +4811,77 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
-       list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               if (obj->active)
+                       continue;
+
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
+       }
 
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
        return cnt;
 }
+
+/* All the new VM stuff */
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+                                 struct i915_address_space *vm)
+{
+       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct i915_vma *vma;
+
+       if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+               vm = &dev_priv->gtt.base;
+
+       BUG_ON(list_empty(&o->vma_list));
+       list_for_each_entry(vma, &o->vma_list, vma_link) {
+               if (vma->vm == vm)
+                       return vma->node.start;
+
+       }
+       return -1;
+}
+
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+                       struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+
+       list_for_each_entry(vma, &o->vma_list, vma_link)
+               if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+                       return true;
+
+       return false;
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct i915_address_space *vm;
+
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+               if (i915_gem_obj_bound(o, vm))
+                       return true;
+
+       return false;
+}
+
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+                               struct i915_address_space *vm)
+{
+       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct i915_vma *vma;
+
+       if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+               vm = &dev_priv->gtt.base;
+
+       BUG_ON(list_empty(&o->vma_list));
+
+       list_for_each_entry(vma, &o->vma_list, vma_link)
+               if (vma->vm == vm)
+                       return vma->node.size;
+
+       return 0;
+}
index 51b7a2171caee8a9b8c7a3c6f51edb5c5487dd25..403309c2a7d6e984c20404c38c72074f846712f7 100644 (file)
@@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev,
 
        if (INTEL_INFO(dev)->gen >= 7) {
                ret = i915_gem_object_set_cache_level(ctx->obj,
-                                                     I915_CACHE_LLC_MLC);
+                                                     I915_CACHE_L3_LLC);
                /* Failure shouldn't ever happen this early */
                if (WARN_ON(ret))
                        goto err_out;
@@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
         * default context.
         */
        dev_priv->ring[RCS].default_context = ctx;
-       ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+       ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
        if (ret) {
                DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
                goto err_destroy;
@@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data)
 }
 
 struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
                                struct drm_file *file,
                                u32 id)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_hw_context *to;
-
-       if (dev_priv->hw_contexts_disabled)
-               return ERR_PTR(-ENOENT);
-
-       if (ring->id != RCS)
-               return ERR_PTR(-EINVAL);
-
-       if (file == NULL)
-               return ERR_PTR(-EINVAL);
+       struct i915_hw_context *ctx;
 
        if (id == DEFAULT_CONTEXT_ID)
                return &file_priv->hang_stats;
 
-       to = i915_gem_context_get(file->driver_priv, id);
-       if (to == NULL)
+       ctx = NULL;
+       if (!dev_priv->hw_contexts_disabled)
+               ctx = i915_gem_context_get(file->driver_priv, id);
+       if (ctx == NULL)
                return ERR_PTR(-ENOENT);
 
-       return &to->hang_stats;
+       return &ctx->hang_stats;
 }
 
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
@@ -377,7 +370,7 @@ mi_set_context(struct intel_ring_buffer *ring,
 
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, new_context->obj->gtt_offset |
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
                        MI_MM_SPACE_GTT |
                        MI_SAVE_EXT_STATE_EN |
                        MI_RESTORE_EXT_STATE_EN |
@@ -407,7 +400,7 @@ static int do_switch(struct i915_hw_context *to)
        if (from == to)
                return 0;
 
-       ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
+       ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
        if (ret)
                return ret;
 
@@ -443,7 +436,10 @@ static int do_switch(struct i915_hw_context *to)
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
+               struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
+               struct i915_address_space *ggtt = &dev_priv->gtt.base;
                from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+               list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
                i915_gem_object_move_to_active(from->obj, ring);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
index 582e6a5f3dac6e4d464440ebab84f3604cc5f62e..775d506b3208f44a9bc7ba02186efe7f168510c9 100644 (file)
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
                }
        }
 
-       list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+       list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
                if (obj->base.dev != dev ||
                    !atomic_read(&obj->base.refcount.refcount)) {
                        DRM_ERROR("freed inactive %p\n", obj);
@@ -115,73 +115,4 @@ i915_verify_lists(struct drm_device *dev)
 
        return warned = err;
 }
-#endif /* WATCH_INACTIVE */
-
-#if WATCH_COHERENCY
-void
-i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
-{
-       struct drm_device *dev = obj->base.dev;
-       int page;
-       uint32_t *gtt_mapping;
-       uint32_t *backing_map = NULL;
-       int bad_count = 0;
-
-       DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
-                __func__, obj, obj->gtt_offset, handle,
-                obj->size / 1024);
-
-       gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
-                             obj->base.size);
-       if (gtt_mapping == NULL) {
-               DRM_ERROR("failed to map GTT space\n");
-               return;
-       }
-
-       for (page = 0; page < obj->size / PAGE_SIZE; page++) {
-               int i;
-
-               backing_map = kmap_atomic(obj->pages[page]);
-
-               if (backing_map == NULL) {
-                       DRM_ERROR("failed to map backing page\n");
-                       goto out;
-               }
-
-               for (i = 0; i < PAGE_SIZE / 4; i++) {
-                       uint32_t cpuval = backing_map[i];
-                       uint32_t gttval = readl(gtt_mapping +
-                                               page * 1024 + i);
-
-                       if (cpuval != gttval) {
-                               DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
-                                        "0x%08x vs 0x%08x\n",
-                                        (int)(obj->gtt_offset +
-                                              page * PAGE_SIZE + i * 4),
-                                        cpuval, gttval);
-                               if (bad_count++ >= 8) {
-                                       DRM_INFO("...\n");
-                                       goto out;
-                               }
-                       }
-               }
-               kunmap_atomic(backing_map);
-               backing_map = NULL;
-       }
-
- out:
-       if (backing_map != NULL)
-               kunmap_atomic(backing_map);
-       iounmap(gtt_mapping);
-
-       /* give syslog time to catch up */
-       msleep(1);
-
-       /* Directly flush the object, since we just loaded values with the CPU
-        * from the backing pages and we don't want to disturb the cache
-        * management that we're trying to observe.
-        */
-
-       i915_gem_clflush_object(obj);
-}
-#endif
+#endif /* WATCH_LIST */
index dc53a527126b0569800ff2df3a8a36ebbf904855..e918b05fcbdd16e238457f7e286b345bdf651731 100644 (file)
 #include "i915_drv.h"
 #include <linux/dma-buf.h>
 
+static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
+{
+       return to_intel_bo(buf->priv);
+}
+
 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
                                             enum dma_data_direction dir)
 {
-       struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
        struct sg_table *st;
        struct scatterlist *src, *dst;
        int ret, i;
@@ -85,25 +90,22 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
                                   struct sg_table *sg,
                                   enum dma_data_direction dir)
 {
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+
+       mutex_lock(&obj->base.dev->struct_mutex);
+
        dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
        sg_free_table(sg);
        kfree(sg);
-}
 
-static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
-       struct drm_i915_gem_object *obj = dma_buf->priv;
+       i915_gem_object_unpin_pages(obj);
 
-       if (obj->base.export_dma_buf == dma_buf) {
-               /* drop the reference on the export fd holds */
-               obj->base.export_dma_buf = NULL;
-               drm_gem_object_unreference_unlocked(&obj->base);
-       }
+       mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
 {
-       struct drm_i915_gem_object *obj = dma_buf->priv;
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        struct drm_device *dev = obj->base.dev;
        struct sg_page_iter sg_iter;
        struct page **pages;
@@ -151,7 +153,7 @@ error:
 
 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 {
-       struct drm_i915_gem_object *obj = dma_buf->priv;
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        struct drm_device *dev = obj->base.dev;
        int ret;
 
@@ -194,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
 
 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
 {
-       struct drm_i915_gem_object *obj = dma_buf->priv;
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        struct drm_device *dev = obj->base.dev;
        int ret;
        bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -211,7 +213,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
 static const struct dma_buf_ops i915_dmabuf_ops =  {
        .map_dma_buf = i915_gem_map_dma_buf,
        .unmap_dma_buf = i915_gem_unmap_dma_buf,
-       .release = i915_gem_dmabuf_release,
+       .release = drm_gem_dmabuf_release,
        .kmap = i915_gem_dmabuf_kmap,
        .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
        .kunmap = i915_gem_dmabuf_kunmap,
@@ -225,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                      struct drm_gem_object *gem_obj, int flags)
 {
-       struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
-       return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
+       return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -264,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 
        /* is this one of own objects? */
        if (dma_buf->ops == &i915_dmabuf_ops) {
-               obj = dma_buf->priv;
+               obj = dma_buf_to_obj(dma_buf);
                /* is it from our device? */
                if (obj->base.dev == dev) {
                        /*
@@ -289,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
                goto fail_detach;
        }
 
-       ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
-       if (ret) {
-               i915_gem_object_free(obj);
-               goto fail_detach;
-       }
-
+       drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
        i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
        obj->base.import_attach = attach;
 
index c86d5d9356fd086b756b9dc207b6d7c9e824f630..91b70015585003540a35028d288c4c67f657f374 100644 (file)
 #include "i915_trace.h"
 
 static bool
-mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
-       if (obj->pin_count)
+       if (vma->obj->pin_count)
                return false;
 
-       list_add(&obj->exec_list, unwind);
-       return drm_mm_scan_add_block(obj->gtt_space);
+       list_add(&vma->exec_list, unwind);
+       return drm_mm_scan_add_block(&vma->node);
 }
 
 int
-i915_gem_evict_something(struct drm_device *dev, int min_size,
-                        unsigned alignment, unsigned cache_level,
+i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+                        int min_size, unsigned alignment, unsigned cache_level,
                         bool mappable, bool nonblocking)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        int ret = 0;
 
        trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -77,17 +77,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
         */
 
        INIT_LIST_HEAD(&unwind_list);
-       if (mappable)
-               drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
-                                           min_size, alignment, cache_level,
-                                           0, dev_priv->gtt.mappable_end);
-       else
-               drm_mm_init_scan(&dev_priv->mm.gtt_space,
-                                min_size, alignment, cache_level);
+       if (mappable) {
+               BUG_ON(!i915_is_ggtt(vm));
+               drm_mm_init_scan_with_range(&vm->mm, min_size,
+                                           alignment, cache_level, 0,
+                                           dev_priv->gtt.mappable_end);
+       } else
+               drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
        /* First see if there is a large enough contiguous idle region... */
-       list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
-               if (mark_free(obj, &unwind_list))
+       list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+               if (mark_free(vma, &unwind_list))
                        goto found;
        }
 
@@ -95,22 +95,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
                goto none;
 
        /* Now merge in the soon-to-be-expired objects... */
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-               if (mark_free(obj, &unwind_list))
+       list_for_each_entry(vma, &vm->active_list, mm_list) {
+               if (mark_free(vma, &unwind_list))
                        goto found;
        }
 
 none:
        /* Nothing found, clean up and bail out! */
        while (!list_empty(&unwind_list)) {
-               obj = list_first_entry(&unwind_list,
-                                      struct drm_i915_gem_object,
+               vma = list_first_entry(&unwind_list,
+                                      struct i915_vma,
                                       exec_list);
-
-               ret = drm_mm_scan_remove_block(obj->gtt_space);
+               ret = drm_mm_scan_remove_block(&vma->node);
                BUG_ON(ret);
 
-               list_del_init(&obj->exec_list);
+               list_del_init(&vma->exec_list);
        }
 
        /* We expect the caller to unpin, evict all and try again, or give up.
@@ -124,27 +123,30 @@ found:
         * temporary list. */
        INIT_LIST_HEAD(&eviction_list);
        while (!list_empty(&unwind_list)) {
-               obj = list_first_entry(&unwind_list,
-                                      struct drm_i915_gem_object,
+               vma = list_first_entry(&unwind_list,
+                                      struct i915_vma,
                                       exec_list);
-               if (drm_mm_scan_remove_block(obj->gtt_space)) {
-                       list_move(&obj->exec_list, &eviction_list);
-                       drm_gem_object_reference(&obj->base);
+               if (drm_mm_scan_remove_block(&vma->node)) {
+                       list_move(&vma->exec_list, &eviction_list);
+                       drm_gem_object_reference(&vma->obj->base);
                        continue;
                }
-               list_del_init(&obj->exec_list);
+               list_del_init(&vma->exec_list);
        }
 
        /* Unbinding will emit any required flushes */
        while (!list_empty(&eviction_list)) {
-               obj = list_first_entry(&eviction_list,
-                                      struct drm_i915_gem_object,
+               struct drm_gem_object *obj;
+               vma = list_first_entry(&eviction_list,
+                                      struct i915_vma,
                                       exec_list);
+
+               obj =  &vma->obj->base;
+               list_del_init(&vma->exec_list);
                if (ret == 0)
-                       ret = i915_gem_object_unbind(obj);
+                       ret = i915_vma_unbind(vma);
 
-               list_del_init(&obj->exec_list);
-               drm_gem_object_unreference(&obj->base);
+               drm_gem_object_unreference(obj);
        }
 
        return ret;
@@ -154,12 +156,18 @@ int
 i915_gem_evict_everything(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj, *next;
-       bool lists_empty;
+       struct i915_address_space *vm;
+       struct i915_vma *vma, *next;
+       bool lists_empty = true;
        int ret;
 
-       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-                      list_empty(&dev_priv->mm.active_list));
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               lists_empty = (list_empty(&vm->inactive_list) &&
+                              list_empty(&vm->active_list));
+               if (!lists_empty)
+                       lists_empty = false;
+       }
+
        if (lists_empty)
                return -ENOSPC;
 
@@ -176,10 +184,11 @@ i915_gem_evict_everything(struct drm_device *dev)
        i915_gem_retire_requests(dev);
 
        /* Having flushed everything, unbind() should never raise an error */
-       list_for_each_entry_safe(obj, next,
-                                &dev_priv->mm.inactive_list, mm_list)
-               if (obj->pin_count == 0)
-                       WARN_ON(i915_gem_object_unbind(obj));
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+                       if (vma->obj->pin_count == 0)
+                               WARN_ON(i915_vma_unbind(vma));
+       }
 
        return 0;
 }
index 87a3227e51795ef44ba03933be925a49e1409504..fa82396306ee46db312dafcc12d8efebd00039af 100644 (file)
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
-struct eb_objects {
-       struct list_head objects;
+struct eb_vmas {
+       struct list_head vmas;
        int and;
        union {
-               struct drm_i915_gem_object *lut[0];
+               struct i915_vma *lut[0];
                struct hlist_head buckets[0];
        };
 };
 
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
 {
-       struct eb_objects *eb = NULL;
+       struct eb_vmas *eb = NULL;
 
        if (args->flags & I915_EXEC_HANDLE_LUT) {
                int size = args->buffer_count;
-               size *= sizeof(struct drm_i915_gem_object *);
-               size += sizeof(struct eb_objects);
+               size *= sizeof(struct i915_vma *);
+               size += sizeof(struct eb_vmas);
                eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
        }
 
@@ -61,7 +61,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
                while (count > 2*size)
                        count >>= 1;
                eb = kzalloc(count*sizeof(struct hlist_head) +
-                            sizeof(struct eb_objects),
+                            sizeof(struct eb_vmas),
                             GFP_TEMPORARY);
                if (eb == NULL)
                        return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
        } else
                eb->and = -args->buffer_count;
 
-       INIT_LIST_HEAD(&eb->objects);
+       INIT_LIST_HEAD(&eb->vmas);
        return eb;
 }
 
 static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
 {
        if (eb->and >= 0)
                memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
 static int
-eb_lookup_objects(struct eb_objects *eb,
-                 struct drm_i915_gem_exec_object2 *exec,
-                 const struct drm_i915_gem_execbuffer2 *args,
-                 struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+              struct drm_i915_gem_exec_object2 *exec,
+              const struct drm_i915_gem_execbuffer2 *args,
+              struct i915_address_space *vm,
+              struct drm_file *file)
 {
-       int i;
+       struct drm_i915_gem_object *obj;
+       struct list_head objects;
+       int i, ret = 0;
 
+       INIT_LIST_HEAD(&objects);
        spin_lock(&file->table_lock);
+       /* Grab a reference to the object and release the lock so we can lookup
+        * or create the VMA without using GFP_ATOMIC */
        for (i = 0; i < args->buffer_count; i++) {
-               struct drm_i915_gem_object *obj;
-
                obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
                if (obj == NULL) {
                        spin_unlock(&file->table_lock);
                        DRM_DEBUG("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
-                       return -ENOENT;
+                       ret = -ENOENT;
+                       goto out;
                }
 
-               if (!list_empty(&obj->exec_list)) {
+               if (!list_empty(&obj->obj_exec_link)) {
                        spin_unlock(&file->table_lock);
                        DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
                                   obj, exec[i].handle, i);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
 
                drm_gem_object_reference(&obj->base);
-               list_add_tail(&obj->exec_list, &eb->objects);
+               list_add_tail(&obj->obj_exec_link, &objects);
+       }
+       spin_unlock(&file->table_lock);
+
+       i = 0;
+       list_for_each_entry(obj, &objects, obj_exec_link) {
+               struct i915_vma *vma;
+
+               /*
+                * NOTE: We can leak any vmas created here when something fails
+                * later on. But that's no issue since vma_unbind can deal with
+                * vmas which are not actually bound. And since only
+                * lookup_or_create exists as an interface to get at the vma
+                * from the (obj, vm) we don't run the risk of creating
+                * duplicated vmas for the same vm.
+                */
+               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+               if (IS_ERR(vma)) {
+                       DRM_DEBUG("Failed to lookup VMA\n");
+                       ret = PTR_ERR(vma);
+                       goto out;
+               }
 
-               obj->exec_entry = &exec[i];
+               list_add_tail(&vma->exec_list, &eb->vmas);
+
+               vma->exec_entry = &exec[i];
                if (eb->and < 0) {
-                       eb->lut[i] = obj;
+                       eb->lut[i] = vma;
                } else {
                        uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
-                       obj->exec_handle = handle;
-                       hlist_add_head(&obj->exec_node,
+                       vma->exec_handle = handle;
+                       hlist_add_head(&vma->exec_node,
                                       &eb->buckets[handle & eb->and]);
                }
+               ++i;
        }
-       spin_unlock(&file->table_lock);
 
-       return 0;
+
+out:
+       while (!list_empty(&objects)) {
+               obj = list_first_entry(&objects,
+                                      struct drm_i915_gem_object,
+                                      obj_exec_link);
+               list_del_init(&obj->obj_exec_link);
+               if (ret)
+                       drm_gem_object_unreference(&obj->base);
+       }
+       return ret;
 }
 
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 {
        if (eb->and < 0) {
                if (handle >= -eb->and)
@@ -139,27 +177,25 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
 
                head = &eb->buckets[handle & eb->and];
                hlist_for_each(node, head) {
-                       struct drm_i915_gem_object *obj;
+                       struct i915_vma *vma;
 
-                       obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
-                       if (obj->exec_handle == handle)
-                               return obj;
+                       vma = hlist_entry(node, struct i915_vma, exec_node);
+                       if (vma->exec_handle == handle)
+                               return vma;
                }
                return NULL;
        }
 }
 
-static void
-eb_destroy(struct eb_objects *eb)
-{
-       while (!list_empty(&eb->objects)) {
-               struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+       while (!list_empty(&eb->vmas)) {
+               struct i915_vma *vma;
 
-               obj = list_first_entry(&eb->objects,
-                                      struct drm_i915_gem_object,
+               vma = list_first_entry(&eb->vmas,
+                                      struct i915_vma,
                                       exec_list);
-               list_del_init(&obj->exec_list);
-               drm_gem_object_unreference(&obj->base);
+               list_del_init(&vma->exec_list);
+               drm_gem_object_unreference(&vma->obj->base);
        }
        kfree(eb);
 }
@@ -171,24 +207,77 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
                obj->cache_level != I915_CACHE_NONE);
 }
 
+static int
+relocate_entry_cpu(struct drm_i915_gem_object *obj,
+                  struct drm_i915_gem_relocation_entry *reloc)
+{
+       uint32_t page_offset = offset_in_page(reloc->offset);
+       char *vaddr;
+       int ret = -EINVAL;
+
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+       if (ret)
+               return ret;
+
+       vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+                               reloc->offset >> PAGE_SHIFT));
+       *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+       kunmap_atomic(vaddr);
+
+       return 0;
+}
+
+static int
+relocate_entry_gtt(struct drm_i915_gem_object *obj,
+                  struct drm_i915_gem_relocation_entry *reloc)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t __iomem *reloc_entry;
+       void __iomem *reloc_page;
+       int ret = -EINVAL;
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, true);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_object_put_fence(obj);
+       if (ret)
+               return ret;
+
+       /* Map the page containing the relocation we're going to perform.  */
+       reloc->offset += i915_gem_obj_ggtt_offset(obj);
+       reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+                       reloc->offset & PAGE_MASK);
+       reloc_entry = (uint32_t __iomem *)
+               (reloc_page + offset_in_page(reloc->offset));
+       iowrite32(reloc->delta, reloc_entry);
+       io_mapping_unmap_atomic(reloc_page);
+
+       return 0;
+}
+
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-                                  struct eb_objects *eb,
-                                  struct drm_i915_gem_relocation_entry *reloc)
+                                  struct eb_vmas *eb,
+                                  struct drm_i915_gem_relocation_entry *reloc,
+                                  struct i915_address_space *vm)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_gem_object *target_obj;
        struct drm_i915_gem_object *target_i915_obj;
+       struct i915_vma *target_vma;
        uint32_t target_offset;
        int ret = -EINVAL;
 
        /* we've already hold a reference to all valid objects */
-       target_obj = &eb_get_object(eb, reloc->target_handle)->base;
-       if (unlikely(target_obj == NULL))
+       target_vma = eb_get_vma(eb, reloc->target_handle);
+       if (unlikely(target_vma == NULL))
                return -ENOENT;
+       target_i915_obj = target_vma->obj;
+       target_obj = &target_vma->obj->base;
 
-       target_i915_obj = to_intel_bo(target_obj);
-       target_offset = target_i915_obj->gtt_offset;
+       target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 
        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
         * pipe_control writes because the gpu doesn't properly redirect them
@@ -254,40 +343,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                return -EFAULT;
 
        reloc->delta += target_offset;
-       if (use_cpu_reloc(obj)) {
-               uint32_t page_offset = reloc->offset & ~PAGE_MASK;
-               char *vaddr;
-
-               ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-               if (ret)
-                       return ret;
-
-               vaddr = kmap_atomic(i915_gem_object_get_page(obj,
-                                                            reloc->offset >> PAGE_SHIFT));
-               *(uint32_t *)(vaddr + page_offset) = reloc->delta;
-               kunmap_atomic(vaddr);
-       } else {
-               struct drm_i915_private *dev_priv = dev->dev_private;
-               uint32_t __iomem *reloc_entry;
-               void __iomem *reloc_page;
-
-               ret = i915_gem_object_set_to_gtt_domain(obj, true);
-               if (ret)
-                       return ret;
-
-               ret = i915_gem_object_put_fence(obj);
-               if (ret)
-                       return ret;
-
-               /* Map the page containing the relocation we're going to perform.  */
-               reloc->offset += obj->gtt_offset;
-               reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-                                                     reloc->offset & PAGE_MASK);
-               reloc_entry = (uint32_t __iomem *)
-                       (reloc_page + (reloc->offset & ~PAGE_MASK));
-               iowrite32(reloc->delta, reloc_entry);
-               io_mapping_unmap_atomic(reloc_page);
-       }
+       if (use_cpu_reloc(obj))
+               ret = relocate_entry_cpu(obj, reloc);
+       else
+               ret = relocate_entry_gtt(obj, reloc);
 
        /* and update the user's relocation entry */
        reloc->presumed_offset = target_offset;
@@ -296,13 +355,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-                                   struct eb_objects *eb)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+                                struct eb_vmas *eb)
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
        struct drm_i915_gem_relocation_entry __user *user_relocs;
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        int remain, ret;
 
        user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -321,7 +380,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
                do {
                        u64 offset = r->presumed_offset;
 
-                       ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
+                                                                vma->vm);
                        if (ret)
                                return ret;
 
@@ -342,15 +402,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-                                        struct eb_objects *eb,
-                                        struct drm_i915_gem_relocation_entry *relocs)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+                                     struct eb_vmas *eb,
+                                     struct drm_i915_gem_relocation_entry *relocs)
 {
-       const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        int i, ret;
 
        for (i = 0; i < entry->relocation_count; i++) {
-               ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
+                                                        vma->vm);
                if (ret)
                        return ret;
        }
@@ -359,9 +420,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb)
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
+                            struct i915_address_space *vm)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        int ret = 0;
 
        /* This is the fast path and we cannot handle a pagefault whilst
@@ -372,8 +434,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
         * lockdep complains vehemently.
         */
        pagefault_disable();
-       list_for_each_entry(obj, &eb->objects, exec_list) {
-               ret = i915_gem_execbuffer_relocate_object(obj, eb);
+       list_for_each_entry(vma, &eb->vmas, exec_list) {
+               ret = i915_gem_execbuffer_relocate_vma(vma, eb);
                if (ret)
                        break;
        }
@@ -386,30 +448,33 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 
 static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
 {
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-       return entry->relocation_count && !use_cpu_reloc(obj);
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+       return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+               i915_is_ggtt(vma->vm);
 }
 
 static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
-                                  struct intel_ring_buffer *ring,
-                                  bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+                               struct intel_ring_buffer *ring,
+                               bool *need_reloc)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        bool need_fence, need_mappable;
+       struct drm_i915_gem_object *obj = vma->obj;
        int ret;
 
        need_fence =
                has_fenced_gpu_access &&
                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                obj->tiling_mode != I915_TILING_NONE;
-       need_mappable = need_fence || need_reloc_mappable(obj);
+       need_mappable = need_fence || need_reloc_mappable(vma);
 
-       ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
+       ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
+                                 false);
        if (ret)
                return ret;
 
@@ -436,8 +501,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
                obj->has_aliasing_ppgtt_mapping = 1;
        }
 
-       if (entry->offset != obj->gtt_offset) {
-               entry->offset = obj->gtt_offset;
+       if (entry->offset != vma->node.start) {
+               entry->offset = vma->node.start;
                *need_reloc = true;
        }
 
@@ -454,14 +519,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
 }
 
 static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry;
+       struct drm_i915_gem_object *obj = vma->obj;
 
-       if (!obj->gtt_space)
+       if (!drm_mm_node_allocated(&vma->node))
                return;
 
-       entry = obj->exec_entry;
+       entry = vma->exec_entry;
 
        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
                i915_gem_object_unpin_fence(obj);
@@ -474,40 +540,40 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
 
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
-                           struct list_head *objects,
+                           struct list_head *vmas,
                            bool *need_relocs)
 {
        struct drm_i915_gem_object *obj;
-       struct list_head ordered_objects;
+       struct i915_vma *vma;
+       struct list_head ordered_vmas;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        int retry;
 
-       INIT_LIST_HEAD(&ordered_objects);
-       while (!list_empty(objects)) {
+       INIT_LIST_HEAD(&ordered_vmas);
+       while (!list_empty(vmas)) {
                struct drm_i915_gem_exec_object2 *entry;
                bool need_fence, need_mappable;
 
-               obj = list_first_entry(objects,
-                                      struct drm_i915_gem_object,
-                                      exec_list);
-               entry = obj->exec_entry;
+               vma = list_first_entry(vmas, struct i915_vma, exec_list);
+               obj = vma->obj;
+               entry = vma->exec_entry;
 
                need_fence =
                        has_fenced_gpu_access &&
                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                        obj->tiling_mode != I915_TILING_NONE;
-               need_mappable = need_fence || need_reloc_mappable(obj);
+               need_mappable = need_fence || need_reloc_mappable(vma);
 
                if (need_mappable)
-                       list_move(&obj->exec_list, &ordered_objects);
+                       list_move(&vma->exec_list, &ordered_vmas);
                else
-                       list_move_tail(&obj->exec_list, &ordered_objects);
+                       list_move_tail(&vma->exec_list, &ordered_vmas);
 
                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
                obj->base.pending_write_domain = 0;
                obj->pending_fenced_gpu_access = false;
        }
-       list_splice(&ordered_objects, objects);
+       list_splice(&ordered_vmas, vmas);
 
        /* Attempt to pin all of the buffers into the GTT.
         * This is done in 3 phases:
@@ -526,41 +592,47 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                int ret = 0;
 
                /* Unbind any ill-fitting objects or pin. */
-               list_for_each_entry(obj, objects, exec_list) {
-                       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+               list_for_each_entry(vma, vmas, exec_list) {
+                       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
                        bool need_fence, need_mappable;
 
-                       if (!obj->gtt_space)
+                       obj = vma->obj;
+
+                       if (!drm_mm_node_allocated(&vma->node))
                                continue;
 
                        need_fence =
                                has_fenced_gpu_access &&
                                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                                obj->tiling_mode != I915_TILING_NONE;
-                       need_mappable = need_fence || need_reloc_mappable(obj);
+                       need_mappable = need_fence || need_reloc_mappable(vma);
+
+                       WARN_ON((need_mappable || need_fence) &&
+                              !i915_is_ggtt(vma->vm));
 
-                       if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+                       if ((entry->alignment &&
+                            vma->node.start & (entry->alignment - 1)) ||
                            (need_mappable && !obj->map_and_fenceable))
-                               ret = i915_gem_object_unbind(obj);
+                               ret = i915_vma_unbind(vma);
                        else
-                               ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+                               ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
                        if (ret)
                                goto err;
                }
 
                /* Bind fresh objects */
-               list_for_each_entry(obj, objects, exec_list) {
-                       if (obj->gtt_space)
+               list_for_each_entry(vma, vmas, exec_list) {
+                       if (drm_mm_node_allocated(&vma->node))
                                continue;
 
-                       ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+                       ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
                        if (ret)
                                goto err;
                }
 
 err:           /* Decrement pin count for bound objects */
-               list_for_each_entry(obj, objects, exec_list)
-                       i915_gem_execbuffer_unreserve_object(obj);
+               list_for_each_entry(vma, vmas, exec_list)
+                       i915_gem_execbuffer_unreserve_vma(vma);
 
                if (ret != -ENOSPC || retry++)
                        return ret;
@@ -576,23 +648,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct drm_i915_gem_execbuffer2 *args,
                                  struct drm_file *file,
                                  struct intel_ring_buffer *ring,
-                                 struct eb_objects *eb,
+                                 struct eb_vmas *eb,
                                  struct drm_i915_gem_exec_object2 *exec)
 {
        struct drm_i915_gem_relocation_entry *reloc;
-       struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
+       struct i915_vma *vma;
        bool need_relocs;
        int *reloc_offset;
        int i, total, ret;
        int count = args->buffer_count;
 
+       if (WARN_ON(list_empty(&eb->vmas)))
+               return 0;
+
+       vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
+
        /* We may process another execbuffer during the unlock... */
-       while (!list_empty(&eb->objects)) {
-               obj = list_first_entry(&eb->objects,
-                                      struct drm_i915_gem_object,
-                                      exec_list);
-               list_del_init(&obj->exec_list);
-               drm_gem_object_unreference(&obj->base);
+       while (!list_empty(&eb->vmas)) {
+               vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+               list_del_init(&vma->exec_list);
+               drm_gem_object_unreference(&vma->obj->base);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -656,19 +732,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 
        /* reacquire the objects */
        eb_reset(eb);
-       ret = eb_lookup_objects(eb, exec, args, file);
+       ret = eb_lookup_vmas(eb, exec, args, vm, file);
        if (ret)
                goto err;
 
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
        if (ret)
                goto err;
 
-       list_for_each_entry(obj, &eb->objects, exec_list) {
-               int offset = obj->exec_entry - exec;
-               ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
-                                                              reloc + reloc_offset[offset]);
+       list_for_each_entry(vma, &eb->vmas, exec_list) {
+               int offset = vma->exec_entry - exec;
+               ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+                                                           reloc + reloc_offset[offset]);
                if (ret)
                        goto err;
        }
@@ -687,24 +763,26 @@ err:
 
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
-                               struct list_head *objects)
+                               struct list_head *vmas)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        uint32_t flush_domains = 0;
+       bool flush_chipset = false;
        int ret;
 
-       list_for_each_entry(obj, objects, exec_list) {
+       list_for_each_entry(vma, vmas, exec_list) {
+               struct drm_i915_gem_object *obj = vma->obj;
                ret = i915_gem_object_sync(obj, ring);
                if (ret)
                        return ret;
 
                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
-                       i915_gem_clflush_object(obj);
+                       flush_chipset |= i915_gem_clflush_object(obj, false);
 
                flush_domains |= obj->base.write_domain;
        }
 
-       if (flush_domains & I915_GEM_DOMAIN_CPU)
+       if (flush_chipset)
                i915_gem_chipset_flush(ring->dev);
 
        if (flush_domains & I915_GEM_DOMAIN_GTT)
@@ -758,20 +836,23 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
                if (!access_ok(VERIFY_WRITE, ptr, length))
                        return -EFAULT;
 
-               if (fault_in_multipages_readable(ptr, length))
-                       return -EFAULT;
+               if (likely(!i915_prefault_disable)) {
+                       if (fault_in_multipages_readable(ptr, length))
+                               return -EFAULT;
+               }
        }
 
        return 0;
 }
 
 static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_ring_buffer *ring)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
 
-       list_for_each_entry(obj, objects, exec_list) {
+       list_for_each_entry(vma, vmas, exec_list) {
+               struct drm_i915_gem_object *obj = vma->obj;
                u32 old_read = obj->base.read_domains;
                u32 old_write = obj->base.write_domain;
 
@@ -781,6 +862,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
                obj->base.read_domains = obj->base.pending_read_domains;
                obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
+               list_move_tail(&vma->mm_list, &vma->vm->active_list);
                i915_gem_object_move_to_active(obj, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
@@ -835,10 +917,11 @@ static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
                       struct drm_i915_gem_execbuffer2 *args,
-                      struct drm_i915_gem_exec_object2 *exec)
+                      struct drm_i915_gem_exec_object2 *exec,
+                      struct i915_address_space *vm)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct eb_objects *eb;
+       struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
        struct intel_ring_buffer *ring;
@@ -872,7 +955,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                break;
        case I915_EXEC_BSD:
                ring = &dev_priv->ring[VCS];
-               if (ctx_id != 0) {
+               if (ctx_id != DEFAULT_CONTEXT_ID) {
                        DRM_DEBUG("Ring %s doesn't support contexts\n",
                                  ring->name);
                        return -EPERM;
@@ -880,7 +963,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                break;
        case I915_EXEC_BLT:
                ring = &dev_priv->ring[BCS];
-               if (ctx_id != 0) {
+               if (ctx_id != DEFAULT_CONTEXT_ID) {
                        DRM_DEBUG("Ring %s doesn't support contexts\n",
                                  ring->name);
                        return -EPERM;
@@ -888,7 +971,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                break;
        case I915_EXEC_VEBOX:
                ring = &dev_priv->ring[VECS];
-               if (ctx_id != 0) {
+               if (ctx_id != DEFAULT_CONTEXT_ID) {
                        DRM_DEBUG("Ring %s doesn't support contexts\n",
                                  ring->name);
                        return -EPERM;
@@ -972,13 +1055,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                goto pre_mutex_err;
 
-       if (dev_priv->mm.suspended) {
+       if (dev_priv->ums.mm_suspended) {
                mutex_unlock(&dev->struct_mutex);
                ret = -EBUSY;
                goto pre_mutex_err;
        }
 
-       eb = eb_create(args);
+       eb = eb_create(args, vm);
        if (eb == NULL) {
                mutex_unlock(&dev->struct_mutex);
                ret = -ENOMEM;
@@ -986,24 +1069,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
 
        /* Look up object handles */
-       ret = eb_lookup_objects(eb, exec, args, file);
+       ret = eb_lookup_vmas(eb, exec, args, vm, file);
        if (ret)
                goto err;
 
        /* take note of the batch buffer before we might reorder the lists */
-       batch_obj = list_entry(eb->objects.prev,
-                              struct drm_i915_gem_object,
-                              exec_list);
+       batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
        if (ret)
                goto err;
 
        /* The objects are in their final locations, apply the relocations. */
        if (need_relocs)
-               ret = i915_gem_execbuffer_relocate(eb);
+               ret = i915_gem_execbuffer_relocate(eb, vm);
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
@@ -1029,7 +1110,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
                i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 
-       ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+       ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
        if (ret)
                goto err;
 
@@ -1058,7 +1139,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        goto err;
        }
 
-       exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+       exec_start = i915_gem_obj_offset(batch_obj, vm) +
+               args->batch_start_offset;
        exec_len = args->batch_len;
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
@@ -1083,7 +1165,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
-       i915_gem_execbuffer_move_to_active(&eb->objects, ring);
+       i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
 err:
@@ -1104,6 +1186,7 @@ int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
                    struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_execbuffer *args = data;
        struct drm_i915_gem_execbuffer2 exec2;
        struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1159,7 +1242,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        exec2.flags = I915_EXEC_RENDER;
        i915_execbuffer2_set_context_id(exec2, 0);
 
-       ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+       ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
+                                    &dev_priv->gtt.base);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                for (i = 0; i < args->buffer_count; i++)
@@ -1185,6 +1269,7 @@ int
 i915_gem_execbuffer2(struct drm_device *dev, void *data,
                     struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_execbuffer2 *args = data;
        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
        int ret;
@@ -1215,7 +1300,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                return -EFAULT;
        }
 
-       ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+       ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
+                                    &dev_priv->gtt.base);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                ret = copy_to_user(to_user_ptr(args->buffers_ptr),
index 5101ab6869b47eef37b5be33e9b4786bc862beca..212f6d8c35ec6593cc54957ecd24445196d26657 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+#define GEN6_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
 /* PPGTT stuff */
 #define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
+#define HSW_GTT_ADDR_ENCODE(addr)      ((addr) | (((addr) >> 28) & 0x7f0))
 
 #define GEN6_PDE_VALID                 (1 << 0)
 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_PTE_UNCACHED              (1 << 1)
 #define HSW_PTE_UNCACHED               (0)
 #define GEN6_PTE_CACHE_LLC             (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC         (3 << 1)
+#define GEN7_PTE_CACHE_L3_LLC          (3 << 1)
 #define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
+#define HSW_PTE_ADDR_ENCODE(addr)      HSW_GTT_ADDR_ENCODE(addr)
 
-static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
-                                     dma_addr_t addr,
-                                     enum i915_cache_level level)
+/* Cacheability Control is a 4-bit value. The low three bits are stored in *
+ * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+                                        (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3                        HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0                        HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE0           HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE0           HSW_CACHEABILITY_CONTROL(0x6)
+
+static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
+                                    enum i915_cache_level level)
 {
        gen6_gtt_pte_t pte = GEN6_PTE_VALID;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
-       case I915_CACHE_LLC_MLC:
-               pte |= GEN6_PTE_CACHE_LLC_MLC;
+       case I915_CACHE_L3_LLC:
+       case I915_CACHE_LLC:
+               pte |= GEN6_PTE_CACHE_LLC;
+               break;
+       case I915_CACHE_NONE:
+               pte |= GEN6_PTE_UNCACHED;
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       return pte;
+}
+
+static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
+                                    enum i915_cache_level level)
+{
+       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+       switch (level) {
+       case I915_CACHE_L3_LLC:
+               pte |= GEN7_PTE_CACHE_L3_LLC;
                break;
        case I915_CACHE_LLC:
                pte |= GEN6_PTE_CACHE_LLC;
@@ -60,7 +95,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
                pte |= GEN6_PTE_UNCACHED;
                break;
        default:
-               BUG();
+               WARN_ON(1);
        }
 
        return pte;
@@ -69,8 +104,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
 #define BYT_PTE_WRITEABLE              (1 << 1)
 #define BYT_PTE_SNOOPED_BY_CPU_CACHES  (1 << 2)
 
-static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
-                                    dma_addr_t addr,
+static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
                                     enum i915_cache_level level)
 {
        gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -87,22 +121,41 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
        return pte;
 }
 
-static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
-                                    dma_addr_t addr,
+static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
                                     enum i915_cache_level level)
 {
        gen6_gtt_pte_t pte = GEN6_PTE_VALID;
-       pte |= GEN6_PTE_ADDR_ENCODE(addr);
+       pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        if (level != I915_CACHE_NONE)
-               pte |= GEN6_PTE_CACHE_LLC;
+               pte |= HSW_WB_LLC_AGE3;
+
+       return pte;
+}
+
+static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
+                                     enum i915_cache_level level)
+{
+       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+       switch (level) {
+       case I915_CACHE_NONE:
+               break;
+       case I915_CACHE_WT:
+               pte |= HSW_WT_ELLC_LLC_AGE0;
+               break;
+       default:
+               pte |= HSW_WB_ELLC_LLC_AGE0;
+               break;
+       }
 
        return pte;
 }
 
 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
+       struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
        gen6_gtt_pte_t __iomem *pd_addr;
        uint32_t pd_entry;
        int i;
@@ -181,18 +234,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
 }
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
                                   unsigned first_entry,
                                   unsigned num_entries)
 {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr, scratch_pte;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
-       scratch_pte = ppgtt->pte_encode(ppgtt->dev,
-                                       ppgtt->scratch_page_dma_addr,
-                                       I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
@@ -212,11 +265,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
        }
 }
 
-static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
                                      unsigned first_entry,
                                      enum i915_cache_level cache_level)
 {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
@@ -227,8 +282,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
                dma_addr_t page_addr;
 
                page_addr = sg_page_iter_dma_address(&sg_iter);
-               pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
-                                                     cache_level);
+               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
                if (++act_pte == I915_PPGTT_PT_ENTRIES) {
                        kunmap_atomic(pt_vaddr);
                        act_pt++;
@@ -240,13 +294,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
        kunmap_atomic(pt_vaddr);
 }
 
-static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
        int i;
 
+       drm_mm_takedown(&ppgtt->base.mm);
+
        if (ppgtt->pt_dma_addr) {
                for (i = 0; i < ppgtt->num_pd_entries; i++)
-                       pci_unmap_page(ppgtt->dev->pdev,
+                       pci_unmap_page(ppgtt->base.dev->pdev,
                                       ppgtt->pt_dma_addr[i],
                                       4096, PCI_DMA_BIDIRECTIONAL);
        }
@@ -260,7 +318,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
 
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_device *dev = ppgtt->dev;
+       struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned first_pd_entry_in_global_pt;
        int i;
@@ -271,18 +329,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
         * now. */
        first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
 
-       if (IS_HASWELL(dev)) {
-               ppgtt->pte_encode = hsw_pte_encode;
-       } else if (IS_VALLEYVIEW(dev)) {
-               ppgtt->pte_encode = byt_pte_encode;
-       } else {
-               ppgtt->pte_encode = gen6_pte_encode;
-       }
-       ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+       ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+       ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
        ppgtt->enable = gen6_ppgtt_enable;
-       ppgtt->clear_range = gen6_ppgtt_clear_range;
-       ppgtt->insert_entries = gen6_ppgtt_insert_entries;
-       ppgtt->cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
        ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
                                  GFP_KERNEL);
        if (!ppgtt->pt_pages)
@@ -313,8 +366,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
                ppgtt->pt_dma_addr[i] = pt_addr;
        }
 
-       ppgtt->clear_range(ppgtt, 0,
-                          ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+       ppgtt->base.clear_range(&ppgtt->base, 0,
+                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
 
        ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
 
@@ -347,8 +400,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
        if (!ppgtt)
                return -ENOMEM;
 
-       ppgtt->dev = dev;
-       ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
+       ppgtt->base.dev = dev;
 
        if (INTEL_INFO(dev)->gen < 8)
                ret = gen6_ppgtt_init(ppgtt);
@@ -357,8 +409,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 
        if (ret)
                kfree(ppgtt);
-       else
+       else {
                dev_priv->mm.aliasing_ppgtt = ppgtt;
+               drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
+                           ppgtt->base.total);
+       }
 
        return ret;
 }
@@ -371,7 +426,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
        if (!ppgtt)
                return;
 
-       ppgtt->cleanup(ppgtt);
+       ppgtt->base.cleanup(&ppgtt->base);
        dev_priv->mm.aliasing_ppgtt = NULL;
 }
 
@@ -379,17 +434,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
                            struct drm_i915_gem_object *obj,
                            enum i915_cache_level cache_level)
 {
-       ppgtt->insert_entries(ppgtt, obj->pages,
-                             obj->gtt_space->start >> PAGE_SHIFT,
-                             cache_level);
+       ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
+                                  i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+                                  cache_level);
 }
 
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
                              struct drm_i915_gem_object *obj)
 {
-       ppgtt->clear_range(ppgtt,
-                          obj->gtt_space->start >> PAGE_SHIFT,
-                          obj->base.size >> PAGE_SHIFT);
+       ppgtt->base.clear_range(&ppgtt->base,
+                               i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+                               obj->base.size >> PAGE_SHIFT);
 }
 
 extern int intel_iommu_gfx_mapped;
@@ -436,11 +491,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        struct drm_i915_gem_object *obj;
 
        /* First fill our portion of the GTT with scratch pages */
-       dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
-                                     dev_priv->gtt.total / PAGE_SIZE);
+       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                      dev_priv->gtt.base.start / PAGE_SIZE,
+                                      dev_priv->gtt.base.total / PAGE_SIZE);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               i915_gem_clflush_object(obj);
+               i915_gem_clflush_object(obj, obj->pin_display);
                i915_gem_gtt_bind_object(obj, obj->cache_level);
        }
 
@@ -466,12 +522,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
  * within the global GTT as well as accessible by the GPU through the GMADR
  * mapped BAR (dev_priv->mm.gtt->gtt).
  */
-static void gen6_ggtt_insert_entries(struct drm_device *dev,
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
                                     unsigned int first_entry,
                                     enum i915_cache_level level)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = vm->dev->dev_private;
        gen6_gtt_pte_t __iomem *gtt_entries =
                (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int i = 0;
@@ -480,8 +536,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
 
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
                addr = sg_page_iter_dma_address(&sg_iter);
-               iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
-                         &gtt_entries[i]);
+               iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
                i++;
        }
 
@@ -492,8 +547,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
         * hardware should work, we must keep this posting read for paranoia.
         */
        if (i != 0)
-               WARN_ON(readl(&gtt_entries[i-1])
-                       != dev_priv->gtt.pte_encode(dev, addr, level));
+               WARN_ON(readl(&gtt_entries[i-1]) !=
+                       vm->pte_encode(addr, level));
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -503,11 +558,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 
-static void gen6_ggtt_clear_range(struct drm_device *dev,
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
                                  unsigned int num_entries)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = vm->dev->dev_private;
        gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
                (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
        const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -518,16 +573,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = dev_priv->gtt.pte_encode(dev,
-                                              dev_priv->gtt.scratch_page_dma,
-                                              I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
        for (i = 0; i < num_entries; i++)
                iowrite32(scratch_pte, &gtt_base[i]);
        readl(gtt_base);
 }
 
 
-static void i915_ggtt_insert_entries(struct drm_device *dev,
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
                                     unsigned int pg_start,
                                     enum i915_cache_level cache_level)
@@ -539,7 +592,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
 
 }
 
-static void i915_ggtt_clear_range(struct drm_device *dev,
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
                                  unsigned int num_entries)
 {
@@ -552,10 +605,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 
-       dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
-                                        obj->gtt_space->start >> PAGE_SHIFT,
-                                        cache_level);
+       dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
+                                         entry,
+                                         cache_level);
 
        obj->has_global_gtt_mapping = 1;
 }
@@ -564,10 +618,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 
-       dev_priv->gtt.gtt_clear_range(obj->base.dev,
-                                     obj->gtt_space->start >> PAGE_SHIFT,
-                                     obj->base.size >> PAGE_SHIFT);
+       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                      entry,
+                                      obj->base.size >> PAGE_SHIFT);
 
        obj->has_global_gtt_mapping = 0;
 }
@@ -618,7 +673,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
         * aperture.  One page should be enough to keep any prefetching inside
         * of the aperture.
         */
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
        struct drm_mm_node *entry;
        struct drm_i915_gem_object *obj;
        unsigned long hole_start, hole_end;
@@ -626,37 +682,38 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
        BUG_ON(mappable_end > end);
 
        /* Subtract the guard page ... */
-       drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+       drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
        if (!HAS_LLC(dev))
-               dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+               dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
 
        /* Mark any preallocated objects as occupied */
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
-                             obj->gtt_offset, obj->base.size);
-
-               BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
-               obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
-                                                    obj->gtt_offset,
-                                                    obj->base.size,
-                                                    false);
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+               int ret;
+               DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
+                             i915_gem_obj_ggtt_offset(obj), obj->base.size);
+
+               WARN_ON(i915_gem_obj_ggtt_bound(obj));
+               ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+               if (ret)
+                       DRM_DEBUG_KMS("Reservation failed\n");
                obj->has_global_gtt_mapping = 1;
+               list_add(&vma->vma_link, &obj->vma_list);
        }
 
-       dev_priv->gtt.start = start;
-       dev_priv->gtt.total = end - start;
+       dev_priv->gtt.base.start = start;
+       dev_priv->gtt.base.total = end - start;
 
        /* Clear any non-preallocated blocks */
-       drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
-                            hole_start, hole_end) {
+       drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+               const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
-                                             (hole_end-hole_start) / PAGE_SIZE);
+               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
        }
 
        /* And finally clear the reserved guard page */
-       dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
 }
 
 static bool
@@ -679,7 +736,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long gtt_size, mappable_size;
 
-       gtt_size = dev_priv->gtt.total;
+       gtt_size = dev_priv->gtt.base.total;
        mappable_size = dev_priv->gtt.mappable_end;
 
        if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
@@ -688,7 +745,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
                if (INTEL_INFO(dev)->gen <= 7) {
                        /* PPGTT pdes are stolen from global gtt ptes, so shrink the
                         * aperture accordingly when using aliasing ppgtt. */
-                       gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+                       gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
                }
 
                i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -698,8 +755,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
                        return;
 
                DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
-               drm_mm_takedown(&dev_priv->mm.gtt_space);
-               gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+               drm_mm_takedown(&dev_priv->gtt.base.mm);
+               gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
        }
        i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 }
@@ -724,8 +781,8 @@ static int setup_scratch_page(struct drm_device *dev)
 #else
        dma_addr = page_to_phys(page);
 #endif
-       dev_priv->gtt.scratch_page = page;
-       dev_priv->gtt.scratch_page_dma = dma_addr;
+       dev_priv->gtt.base.scratch.page = page;
+       dev_priv->gtt.base.scratch.addr = dma_addr;
 
        return 0;
 }
@@ -733,11 +790,13 @@ static int setup_scratch_page(struct drm_device *dev)
 static void teardown_scratch_page(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       set_pages_wb(dev_priv->gtt.scratch_page, 1);
-       pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
+       struct page *page = dev_priv->gtt.base.scratch.page;
+
+       set_pages_wb(page, 1);
+       pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       put_page(dev_priv->gtt.scratch_page);
-       __free_page(dev_priv->gtt.scratch_page);
+       put_page(page);
+       __free_page(page);
 }
 
 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -800,17 +859,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
        if (ret)
                DRM_ERROR("Scratch setup failed\n");
 
-       dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
-       dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+       dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
+       dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
 
        return ret;
 }
 
-static void gen6_gmch_remove(struct drm_device *dev)
+static void gen6_gmch_remove(struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       iounmap(dev_priv->gtt.gsm);
-       teardown_scratch_page(dev_priv->dev);
+
+       struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+       iounmap(gtt->gsm);
+       teardown_scratch_page(vm->dev);
 }
 
 static int i915_gmch_probe(struct drm_device *dev,
@@ -831,13 +891,13 @@ static int i915_gmch_probe(struct drm_device *dev,
        intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
 
        dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
-       dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
-       dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
+       dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+       dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
        return 0;
 }
 
-static void i915_gmch_remove(struct drm_device *dev)
+static void i915_gmch_remove(struct i915_address_space *vm)
 {
        intel_gmch_remove();
 }
@@ -849,34 +909,35 @@ int i915_gem_gtt_init(struct drm_device *dev)
        int ret;
 
        if (INTEL_INFO(dev)->gen <= 5) {
-               dev_priv->gtt.gtt_probe = i915_gmch_probe;
-               dev_priv->gtt.gtt_remove = i915_gmch_remove;
+               gtt->gtt_probe = i915_gmch_probe;
+               gtt->base.cleanup = i915_gmch_remove;
        } else {
-               dev_priv->gtt.gtt_probe = gen6_gmch_probe;
-               dev_priv->gtt.gtt_remove = gen6_gmch_remove;
-               if (IS_HASWELL(dev)) {
-                       dev_priv->gtt.pte_encode = hsw_pte_encode;
-               } else if (IS_VALLEYVIEW(dev)) {
-                       dev_priv->gtt.pte_encode = byt_pte_encode;
-               } else {
-                       dev_priv->gtt.pte_encode = gen6_pte_encode;
-               }
+               gtt->gtt_probe = gen6_gmch_probe;
+               gtt->base.cleanup = gen6_gmch_remove;
+               if (IS_HASWELL(dev) && dev_priv->ellc_size)
+                       gtt->base.pte_encode = iris_pte_encode;
+               else if (IS_HASWELL(dev))
+                       gtt->base.pte_encode = hsw_pte_encode;
+               else if (IS_VALLEYVIEW(dev))
+                       gtt->base.pte_encode = byt_pte_encode;
+               else if (INTEL_INFO(dev)->gen >= 7)
+                       gtt->base.pte_encode = ivb_pte_encode;
+               else
+                       gtt->base.pte_encode = snb_pte_encode;
        }
 
-       ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
-                                    &dev_priv->gtt.stolen_size,
-                                    &gtt->mappable_base,
-                                    &gtt->mappable_end);
+       ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
+                            &gtt->mappable_base, &gtt->mappable_end);
        if (ret)
                return ret;
 
+       gtt->base.dev = dev;
+
        /* GMADR is the PCI mmio aperture into the global GTT. */
        DRM_INFO("Memory usable by graphics device = %zdM\n",
-                dev_priv->gtt.total >> 20);
-       DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
-                        dev_priv->gtt.mappable_end >> 20);
-       DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
-                        dev_priv->gtt.stolen_size >> 20);
+                gtt->base.total >> 20);
+       DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+       DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
 
        return 0;
 }
index 982d4732cecff93e45370f30dfab03cc1bfdb32a..b902f2afc8e25275f1de78e54ec8ff20043aa88a 100644 (file)
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct pci_dev *pdev = dev_priv->bridge_dev;
+       struct resource *r;
        u32 base;
 
-       /* On the machines I have tested the Graphics Base of Stolen Memory
-        * is unreliable, so on those compute the base by subtracting the
-        * stolen memory from the Top of Low Usable DRAM which is where the
-        * BIOS places the graphics stolen memory.
+       /* Almost universally we can find the Graphics Base of Stolen Memory
+        * at offset 0x5c in the igfx configuration space. On a few (desktop)
+        * machines this is also mirrored in the bridge device at different
+        * locations, or in the MCHBAR. On gen2, the layout is again slightly
+        * different with the Graphics Segment immediately following Top of
+        * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
+        * reported by 865g, so we just use the top of memory as determined
+        * by the e820 probe.
         *
-        * On gen2, the layout is slightly different with the Graphics Segment
-        * immediately following Top of Memory (or Top of Usable DRAM). Note
-        * it appears that TOUD is only reported by 865g, so we just use the
-        * top of memory as determined by the e820 probe.
-        *
-        * XXX gen2 requires an unavailable symbol and 945gm fails with
-        * its value of TOLUD.
+        * XXX However gen2 requires an unavailable symbol.
         */
        base = 0;
-       if (IS_VALLEYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 3) {
+               /* Read Graphics Base of Stolen Memory directly */
                pci_read_config_dword(dev->pdev, 0x5c, &base);
                base &= ~((1<<20) - 1);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               /* Read Base Data of Stolen Memory Register (BDSM) directly.
-                * Note that there is also a MCHBAR miror at 0x1080c0 or
-                * we could use device 2:0x5c instead.
-               */
-               pci_read_config_dword(pdev, 0xB0, &base);
-               base &= ~4095; /* lower bits used for locking register */
-       } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
-               /* Read Graphics Base of Stolen Memory directly */
-               pci_read_config_dword(pdev, 0xA4, &base);
+       } else { /* GEN2 */
 #if 0
-       } else if (IS_GEN3(dev)) {
-               u8 val;
-               /* Stolen is immediately below Top of Low Usable DRAM */
-               pci_read_config_byte(pdev, 0x9c, &val);
-               base = val >> 3 << 27;
-               base -= dev_priv->mm.gtt->stolen_size;
-       } else {
                /* Stolen is immediately above Top of Memory */
                base = max_low_pfn_mapped << PAGE_SHIFT;
 #endif
        }
 
+       if (base == 0)
+               return 0;
+
+       /* Verify that nothing else uses this physical address. Stolen
+        * memory should be reserved by the BIOS and hidden from the
+        * kernel. So if the region is already marked as busy, something
+        * is seriously wrong.
+        */
+       r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+                                   "Graphics Stolen Memory");
+       if (r == NULL) {
+               DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+                         base, base + (uint32_t)dev_priv->gtt.stolen_size);
+               base = 0;
+       }
+
        return base;
 }
 
@@ -95,32 +94,37 @@ static int i915_setup_compression(struct drm_device *dev, int size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+       int ret;
 
-       /* Try to over-allocate to reduce reallocations and fragmentation */
-       compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
-                                          size <<= 1, 4096, 0);
+       compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
        if (!compressed_fb)
-               compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
-                                                  size >>= 1, 4096, 0);
-       if (compressed_fb)
-               compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
-       if (!compressed_fb)
-               goto err;
+               goto err_llb;
+
+       /* Try to over-allocate to reduce reallocations and fragmentation */
+       ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+                                size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
+       if (ret)
+               ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+                                        size >>= 1, 4096,
+                                        DRM_MM_SEARCH_DEFAULT);
+       if (ret)
+               goto err_llb;
 
        if (HAS_PCH_SPLIT(dev))
                I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
        else if (IS_GM45(dev)) {
                I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
        } else {
-               compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
-                                                   4096, 4096, 0);
-               if (compressed_llb)
-                       compressed_llb = drm_mm_get_block(compressed_llb,
-                                                         4096, 4096);
+               compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
                if (!compressed_llb)
                        goto err_fb;
 
-               dev_priv->compressed_llb = compressed_llb;
+               ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
+                                        4096, 4096, DRM_MM_SEARCH_DEFAULT);
+               if (ret)
+                       goto err_fb;
+
+               dev_priv->fbc.compressed_llb = compressed_llb;
 
                I915_WRITE(FBC_CFB_BASE,
                           dev_priv->mm.stolen_base + compressed_fb->start);
@@ -128,8 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
                           dev_priv->mm.stolen_base + compressed_llb->start);
        }
 
-       dev_priv->compressed_fb = compressed_fb;
-       dev_priv->cfb_size = size;
+       dev_priv->fbc.compressed_fb = compressed_fb;
+       dev_priv->fbc.size = size;
 
        DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
                      size);
@@ -137,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size)
        return 0;
 
 err_fb:
-       drm_mm_put_block(compressed_fb);
-err:
+       kfree(compressed_llb);
+       drm_mm_remove_node(compressed_fb);
+err_llb:
+       kfree(compressed_fb);
        pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
        return -ENOSPC;
 }
@@ -150,7 +156,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return -ENODEV;
 
-       if (size < dev_priv->cfb_size)
+       if (size < dev_priv->fbc.size)
                return 0;
 
        /* Release any current block */
@@ -163,16 +169,20 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->cfb_size == 0)
+       if (dev_priv->fbc.size == 0)
                return;
 
-       if (dev_priv->compressed_fb)
-               drm_mm_put_block(dev_priv->compressed_fb);
+       if (dev_priv->fbc.compressed_fb) {
+               drm_mm_remove_node(dev_priv->fbc.compressed_fb);
+               kfree(dev_priv->fbc.compressed_fb);
+       }
 
-       if (dev_priv->compressed_llb)
-               drm_mm_put_block(dev_priv->compressed_llb);
+       if (dev_priv->fbc.compressed_llb) {
+               drm_mm_remove_node(dev_priv->fbc.compressed_llb);
+               kfree(dev_priv->fbc.compressed_llb);
+       }
 
-       dev_priv->cfb_size = 0;
+       dev_priv->fbc.size = 0;
 }
 
 void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -201,6 +211,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
        if (IS_VALLEYVIEW(dev))
                bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
 
+       if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+               return 0;
+
        /* Basic memrange allocator for stolen space */
        drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
                    bios_reserved);
@@ -271,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
        if (obj == NULL)
                return NULL;
 
-       if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
-               goto cleanup;
-
+       drm_gem_private_object_init(dev, &obj->base, stolen->size);
        i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
        obj->pages = i915_pages_create_for_stolen(dev,
@@ -285,9 +296,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
        i915_gem_object_pin_pages(obj);
        obj->stolen = stolen;
 
-       obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-       obj->base.read_domains = I915_GEM_DOMAIN_GTT;
-       obj->cache_level = I915_CACHE_NONE;
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+       obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
 
        return obj;
 
@@ -302,6 +312,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
+       int ret;
 
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return NULL;
@@ -310,17 +321,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
        if (size == 0)
                return NULL;
 
-       stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
-       if (stolen)
-               stolen = drm_mm_get_block(stolen, size, 4096);
-       if (stolen == NULL)
+       stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+       if (!stolen)
+               return NULL;
+
+       ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
+                                4096, DRM_MM_SEARCH_DEFAULT);
+       if (ret) {
+               kfree(stolen);
                return NULL;
+       }
 
        obj = _i915_gem_object_create_stolen(dev, stolen);
        if (obj)
                return obj;
 
-       drm_mm_put_block(stolen);
+       drm_mm_remove_node(stolen);
+       kfree(stolen);
        return NULL;
 }
 
@@ -331,8 +348,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                                               u32 size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *ggtt = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
+       struct i915_vma *vma;
+       int ret;
 
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return NULL;
@@ -347,56 +367,74 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        if (WARN_ON(size == 0))
                return NULL;
 
-       stolen = drm_mm_create_block(&dev_priv->mm.stolen,
-                                    stolen_offset, size,
-                                    false);
-       if (stolen == NULL) {
+       stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+       if (!stolen)
+               return NULL;
+
+       stolen->start = stolen_offset;
+       stolen->size = size;
+       ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+       if (ret) {
                DRM_DEBUG_KMS("failed to allocate stolen space\n");
+               kfree(stolen);
                return NULL;
        }
 
        obj = _i915_gem_object_create_stolen(dev, stolen);
        if (obj == NULL) {
                DRM_DEBUG_KMS("failed to allocate stolen object\n");
-               drm_mm_put_block(stolen);
+               drm_mm_remove_node(stolen);
+               kfree(stolen);
                return NULL;
        }
 
        /* Some objects just need physical mem from stolen space */
-       if (gtt_offset == -1)
+       if (gtt_offset == I915_GTT_OFFSET_NONE)
                return obj;
 
+       vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_out;
+       }
+
        /* To simplify the initialisation sequence between KMS and GTT,
         * we allow construction of the stolen object prior to
         * setting up the GTT space. The actual reservation will occur
         * later.
         */
-       if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
-               obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
-                                                    gtt_offset, size,
-                                                    false);
-               if (obj->gtt_space == NULL) {
+       vma->node.start = gtt_offset;
+       vma->node.size = size;
+       if (drm_mm_initialized(&ggtt->mm)) {
+               ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+               if (ret) {
                        DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-                       drm_gem_object_unreference(&obj->base);
-                       return NULL;
+                       goto err_vma;
                }
-       } else
-               obj->gtt_space = I915_GTT_RESERVED;
+       }
 
-       obj->gtt_offset = gtt_offset;
        obj->has_global_gtt_mapping = 1;
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       list_add_tail(&vma->mm_list, &ggtt->inactive_list);
 
        return obj;
+
+err_vma:
+       i915_gem_vma_destroy(vma);
+err_out:
+       drm_mm_remove_node(stolen);
+       kfree(stolen);
+       drm_gem_object_unreference(&obj->base);
+       return NULL;
 }
 
 void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
        if (obj->stolen) {
-               drm_mm_put_block(obj->stolen);
+               drm_mm_remove_node(obj->stolen);
+               kfree(obj->stolen);
                obj->stolen = NULL;
        }
 }
index 537545be69db89fb8c2d19e692a94cca0b418bf8..032e9ef9c89679228a03b2d668a5bf7f68f1587f 100644 (file)
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
                return true;
 
        if (INTEL_INFO(obj->base.dev)->gen == 3) {
-               if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+               if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
                        return false;
        } else {
-               if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+               if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
                        return false;
        }
 
        size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
-       if (obj->gtt_space->size != size)
+       if (i915_gem_obj_ggtt_size(obj) != size)
                return false;
 
-       if (obj->gtt_offset & (size - 1))
+       if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
                return false;
 
        return true;
@@ -359,18 +359,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                 */
 
                obj->map_and_fenceable =
-                       obj->gtt_space == NULL ||
-                       (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
+                       !i915_gem_obj_ggtt_bound(obj) ||
+                       (i915_gem_obj_ggtt_offset(obj) +
+                        obj->base.size <= dev_priv->gtt.mappable_end &&
                         i915_gem_object_fence_ok(obj, args->tiling_mode));
 
                /* Rebind if we need a change of alignment */
                if (!obj->map_and_fenceable) {
-                       u32 unfenced_alignment =
+                       u32 unfenced_align =
                                i915_gem_get_gtt_alignment(dev, obj->base.size,
                                                            args->tiling_mode,
                                                            false);
-                       if (obj->gtt_offset & (unfenced_alignment - 1))
-                               ret = i915_gem_object_unbind(obj);
+                       if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
+                               ret = i915_gem_object_ggtt_unbind(obj);
                }
 
                if (ret == 0) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644 (file)
index 0000000..558e568
--- /dev/null
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *    Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include "i915_drv.h"
+
+static const char *yesno(int v)
+{
+       return v ? "yes" : "no";
+}
+
+static const char *ring_str(int ring)
+{
+       switch (ring) {
+       case RCS: return "render";
+       case VCS: return "bsd";
+       case BCS: return "blt";
+       case VECS: return "vebox";
+       default: return "";
+       }
+}
+
+static const char *pin_flag(int pinned)
+{
+       if (pinned > 0)
+               return " P";
+       else if (pinned < 0)
+               return " p";
+       else
+               return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+       switch (tiling) {
+       default:
+       case I915_TILING_NONE: return "";
+       case I915_TILING_X: return " X";
+       case I915_TILING_Y: return " Y";
+       }
+}
+
+static const char *dirty_flag(int dirty)
+{
+       return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+       return purgeable ? " purgeable" : "";
+}
+
+static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+
+       if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+               e->err = -ENOSPC;
+               return false;
+       }
+
+       if (e->bytes == e->size - 1 || e->err)
+               return false;
+
+       return true;
+}
+
+static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
+                             unsigned len)
+{
+       if (e->pos + len <= e->start) {
+               e->pos += len;
+               return false;
+       }
+
+       /* First vsnprintf needs to fit in its entirety for memmove */
+       if (len >= e->size) {
+               e->err = -EIO;
+               return false;
+       }
+
+       return true;
+}
+
+static void __i915_error_advance(struct drm_i915_error_state_buf *e,
+                                unsigned len)
+{
+       /* If this is first printf in this window, adjust it so that
+        * start position matches start of the buffer
+        */
+
+       if (e->pos < e->start) {
+               const size_t off = e->start - e->pos;
+
+               /* Should not happen but be paranoid */
+               if (off > len || e->bytes) {
+                       e->err = -EIO;
+                       return;
+               }
+
+               memmove(e->buf, e->buf + off, len - off);
+               e->bytes = len - off;
+               e->pos = e->start;
+               return;
+       }
+
+       e->bytes += len;
+       e->pos += len;
+}
+
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+                              const char *f, va_list args)
+{
+       unsigned len;
+
+       if (!__i915_error_ok(e))
+               return;
+
+       /* Seek the first printf which is hits start position */
+       if (e->pos < e->start) {
+               len = vsnprintf(NULL, 0, f, args);
+               if (!__i915_error_seek(e, len))
+                       return;
+       }
+
+       len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+       if (len >= e->size - e->bytes)
+               len = e->size - e->bytes - 1;
+
+       __i915_error_advance(e, len);
+}
+
+static void i915_error_puts(struct drm_i915_error_state_buf *e,
+                           const char *str)
+{
+       unsigned len;
+
+       if (!__i915_error_ok(e))
+               return;
+
+       len = strlen(str);
+
+       /* Seek the first printf which is hits start position */
+       if (e->pos < e->start) {
+               if (!__i915_error_seek(e, len))
+                       return;
+       }
+
+       if (len >= e->size - e->bytes)
+               len = e->size - e->bytes - 1;
+       memcpy(e->buf + e->bytes, str, len);
+
+       __i915_error_advance(e, len);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
+                               const char *name,
+                               struct drm_i915_error_buffer *err,
+                               int count)
+{
+       err_printf(m, "%s [%d]:\n", name, count);
+
+       while (count--) {
+               err_printf(m, "  %08x %8u %02x %02x %x %x",
+                          err->gtt_offset,
+                          err->size,
+                          err->read_domains,
+                          err->write_domain,
+                          err->rseqno, err->wseqno);
+               err_puts(m, pin_flag(err->pinned));
+               err_puts(m, tiling_flag(err->tiling));
+               err_puts(m, dirty_flag(err->dirty));
+               err_puts(m, purgeable_flag(err->purgeable));
+               err_puts(m, err->ring != -1 ? " " : "");
+               err_puts(m, ring_str(err->ring));
+               err_puts(m, i915_cache_level_str(err->cache_level));
+
+               if (err->name)
+                       err_printf(m, " (name: %d)", err->name);
+               if (err->fence_reg != I915_FENCE_REG_NONE)
+                       err_printf(m, " (fence: %d)", err->fence_reg);
+
+               err_puts(m, "\n");
+               err++;
+       }
+}
+
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+                                 struct drm_device *dev,
+                                 struct drm_i915_error_state *error,
+                                 unsigned ring)
+{
+       BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+       err_printf(m, "%s command stream:\n", ring_str(ring));
+       err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
+       err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
+       err_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
+       err_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
+       err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
+       err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
+       err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
+       if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
+               err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+       err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
+       err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
+       if (INTEL_INFO(dev)->gen >= 6) {
+               err_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
+               err_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+               err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+                          error->semaphore_mboxes[ring][0],
+                          error->semaphore_seqno[ring][0]);
+               err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+                          error->semaphore_mboxes[ring][1],
+                          error->semaphore_seqno[ring][1]);
+               if (HAS_VEBOX(dev)) {
+                       err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
+                                  error->semaphore_mboxes[ring][2],
+                                  error->semaphore_seqno[ring][2]);
+               }
+       }
+       err_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
+       err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
+       err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+       err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+       va_list args;
+
+       va_start(args, f);
+       i915_error_vprintf(e, f, args);
+       va_end(args);
+}
+
+int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+                           const struct i915_error_state_file_priv *error_priv)
+{
+       struct drm_device *dev = error_priv->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error = error_priv->error;
+       struct intel_ring_buffer *ring;
+       int i, j, page, offset, elt;
+
+       if (!error) {
+               err_printf(m, "no error state collected\n");
+               goto out;
+       }
+
+       err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+                  error->time.tv_usec);
+       err_printf(m, "Kernel: " UTS_RELEASE "\n");
+       err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+       err_printf(m, "EIR: 0x%08x\n", error->eir);
+       err_printf(m, "IER: 0x%08x\n", error->ier);
+       err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+       err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+       err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+       err_printf(m, "CCID: 0x%08x\n", error->ccid);
+
+       for (i = 0; i < dev_priv->num_fence_regs; i++)
+               err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
+
+       for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+               err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
+                          error->extra_instdone[i]);
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               err_printf(m, "ERROR: 0x%08x\n", error->error);
+               err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+       }
+
+       if (INTEL_INFO(dev)->gen == 7)
+               err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+       for_each_ring(ring, dev_priv, i)
+               i915_ring_error_state(m, dev, error, i);
+
+       if (error->active_bo)
+               print_error_buffers(m, "Active",
+                                   error->active_bo[0],
+                                   error->active_bo_count[0]);
+
+       if (error->pinned_bo)
+               print_error_buffers(m, "Pinned",
+                                   error->pinned_bo[0],
+                                   error->pinned_bo_count[0]);
+
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               struct drm_i915_error_object *obj;
+
+               if ((obj = error->ring[i].batchbuffer)) {
+                       err_printf(m, "%s --- gtt_offset = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
+                       offset = 0;
+                       for (page = 0; page < obj->page_count; page++) {
+                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+                                       err_printf(m, "%08x :  %08x\n", offset,
+                                                  obj->pages[page][elt]);
+                                       offset += 4;
+                               }
+                       }
+               }
+
+               if (error->ring[i].num_requests) {
+                       err_printf(m, "%s --- %d requests\n",
+                                  dev_priv->ring[i].name,
+                                  error->ring[i].num_requests);
+                       for (j = 0; j < error->ring[i].num_requests; j++) {
+                               err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+                                          error->ring[i].requests[j].seqno,
+                                          error->ring[i].requests[j].jiffies,
+                                          error->ring[i].requests[j].tail);
+                       }
+               }
+
+               if ((obj = error->ring[i].ringbuffer)) {
+                       err_printf(m, "%s --- ringbuffer = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
+                       offset = 0;
+                       for (page = 0; page < obj->page_count; page++) {
+                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+                                       err_printf(m, "%08x :  %08x\n",
+                                                  offset,
+                                                  obj->pages[page][elt]);
+                                       offset += 4;
+                               }
+                       }
+               }
+
+               obj = error->ring[i].ctx;
+               if (obj) {
+                       err_printf(m, "%s --- HW Context = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
+                       offset = 0;
+                       for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+                               err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+                                          offset,
+                                          obj->pages[0][elt],
+                                          obj->pages[0][elt+1],
+                                          obj->pages[0][elt+2],
+                                          obj->pages[0][elt+3]);
+                                       offset += 16;
+                       }
+               }
+       }
+
+       if (error->overlay)
+               intel_overlay_print_error_state(m, error->overlay);
+
+       if (error->display)
+               intel_display_print_error_state(m, dev, error->display);
+
+out:
+       if (m->bytes == 0 && m->err)
+               return m->err;
+
+       return 0;
+}
+
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+                             size_t count, loff_t pos)
+{
+       memset(ebuf, 0, sizeof(*ebuf));
+
+       /* We need to have enough room to store any i915_error_state printf
+        * so that we can move it to start position.
+        */
+       ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+       ebuf->buf = kmalloc(ebuf->size,
+                               GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+       if (ebuf->buf == NULL) {
+               ebuf->size = PAGE_SIZE;
+               ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+       }
+
+       if (ebuf->buf == NULL) {
+               ebuf->size = 128;
+               ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+       }
+
+       if (ebuf->buf == NULL)
+               return -ENOMEM;
+
+       ebuf->start = pos;
+
+       return 0;
+}
+
+static void i915_error_object_free(struct drm_i915_error_object *obj)
+{
+       int page;
+
+       if (obj == NULL)
+               return;
+
+       for (page = 0; page < obj->page_count; page++)
+               kfree(obj->pages[page]);
+
+       kfree(obj);
+}
+
+static void i915_error_state_free(struct kref *error_ref)
+{
+       struct drm_i915_error_state *error = container_of(error_ref,
+                                                         typeof(*error), ref);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               i915_error_object_free(error->ring[i].batchbuffer);
+               i915_error_object_free(error->ring[i].ringbuffer);
+               i915_error_object_free(error->ring[i].ctx);
+               kfree(error->ring[i].requests);
+       }
+
+       kfree(error->active_bo);
+       kfree(error->overlay);
+       kfree(error->display);
+       kfree(error);
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+                              struct drm_i915_gem_object *src,
+                              const int num_pages)
+{
+       struct drm_i915_error_object *dst;
+       int i;
+       u32 reloc_offset;
+
+       if (src == NULL || src->pages == NULL)
+               return NULL;
+
+       dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+       if (dst == NULL)
+               return NULL;
+
+       reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+       for (i = 0; i < num_pages; i++) {
+               unsigned long flags;
+               void *d;
+
+               d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+               if (d == NULL)
+                       goto unwind;
+
+               local_irq_save(flags);
+               if (reloc_offset < dev_priv->gtt.mappable_end &&
+                   src->has_global_gtt_mapping) {
+                       void __iomem *s;
+
+                       /* Simply ignore tiling or any overlapping fence.
+                        * It's part of the error state, and this hopefully
+                        * captures what the GPU read.
+                        */
+
+                       s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+                                                    reloc_offset);
+                       memcpy_fromio(d, s, PAGE_SIZE);
+                       io_mapping_unmap_atomic(s);
+               } else if (src->stolen) {
+                       unsigned long offset;
+
+                       offset = dev_priv->mm.stolen_base;
+                       offset += src->stolen->start;
+                       offset += i << PAGE_SHIFT;
+
+                       memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
+               } else {
+                       struct page *page;
+                       void *s;
+
+                       page = i915_gem_object_get_page(src, i);
+
+                       drm_clflush_pages(&page, 1);
+
+                       s = kmap_atomic(page);
+                       memcpy(d, s, PAGE_SIZE);
+                       kunmap_atomic(s);
+
+                       drm_clflush_pages(&page, 1);
+               }
+               local_irq_restore(flags);
+
+               dst->pages[i] = d;
+
+               reloc_offset += PAGE_SIZE;
+       }
+       dst->page_count = num_pages;
+
+       return dst;
+
+unwind:
+       while (i--)
+               kfree(dst->pages[i]);
+       kfree(dst);
+       return NULL;
+}
+#define i915_error_object_create(dev_priv, src) \
+       i915_error_object_create_sized((dev_priv), (src), \
+                                      (src)->base.size>>PAGE_SHIFT)
+
+static void capture_bo(struct drm_i915_error_buffer *err,
+                      struct drm_i915_gem_object *obj)
+{
+       err->size = obj->base.size;
+       err->name = obj->base.name;
+       err->rseqno = obj->last_read_seqno;
+       err->wseqno = obj->last_write_seqno;
+       err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
+       err->read_domains = obj->base.read_domains;
+       err->write_domain = obj->base.write_domain;
+       err->fence_reg = obj->fence_reg;
+       err->pinned = 0;
+       if (obj->pin_count > 0)
+               err->pinned = 1;
+       if (obj->user_pin_count > 0)
+               err->pinned = -1;
+       err->tiling = obj->tiling_mode;
+       err->dirty = obj->dirty;
+       err->purgeable = obj->madv != I915_MADV_WILLNEED;
+       err->ring = obj->ring ? obj->ring->id : -1;
+       err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+                            int count, struct list_head *head)
+{
+       struct i915_vma *vma;
+       int i = 0;
+
+       list_for_each_entry(vma, head, mm_list) {
+               capture_bo(err++, vma->obj);
+               if (++i == count)
+                       break;
+       }
+
+       return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+                            int count, struct list_head *head)
+{
+       struct drm_i915_gem_object *obj;
+       int i = 0;
+
+       list_for_each_entry(obj, head, global_list) {
+               if (obj->pin_count == 0)
+                       continue;
+
+               capture_bo(err++, obj);
+               if (++i == count)
+                       break;
+       }
+
+       return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+                                  struct drm_i915_error_state *error)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       /* Fences */
+       switch (INTEL_INFO(dev)->gen) {
+       case 7:
+       case 6:
+               for (i = 0; i < dev_priv->num_fence_regs; i++)
+                       error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+               break;
+       case 5:
+       case 4:
+               for (i = 0; i < 16; i++)
+                       error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+               break;
+       case 3:
+               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+                       for (i = 0; i < 8; i++)
+                               error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+       case 2:
+               for (i = 0; i < 8; i++)
+                       error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+               break;
+
+       default:
+               BUG();
+       }
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+                            struct intel_ring_buffer *ring)
+{
+       struct i915_address_space *vm;
+       struct i915_vma *vma;
+       struct drm_i915_gem_object *obj;
+       u32 seqno;
+
+       if (!ring->get_seqno)
+               return NULL;
+
+       if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+               u32 acthd = I915_READ(ACTHD);
+
+               if (WARN_ON(ring->id != RCS))
+                       return NULL;
+
+               obj = ring->private;
+               if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+                   acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+                       return i915_error_object_create(dev_priv, obj);
+       }
+
+       seqno = ring->get_seqno(ring, false);
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               list_for_each_entry(vma, &vm->active_list, mm_list) {
+                       obj = vma->obj;
+                       if (obj->ring != ring)
+                               continue;
+
+                       if (i915_seqno_passed(seqno, obj->last_read_seqno))
+                               continue;
+
+                       if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+                               continue;
+
+                       /* We need to copy these to an anonymous buffer as the simplest
+                        * method to avoid being overwritten by userspace.
+                        */
+                       return i915_error_object_create(dev_priv, obj);
+               }
+       }
+
+       return NULL;
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+                                  struct drm_i915_error_state *error,
+                                  struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
+               error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+               error->semaphore_mboxes[ring->id][0]
+                       = I915_READ(RING_SYNC_0(ring->mmio_base));
+               error->semaphore_mboxes[ring->id][1]
+                       = I915_READ(RING_SYNC_1(ring->mmio_base));
+               error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+               error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+       }
+
+       if (HAS_VEBOX(dev)) {
+               error->semaphore_mboxes[ring->id][2] =
+                       I915_READ(RING_SYNC_2(ring->mmio_base));
+               error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
+       }
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+               error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+               error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+               error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+               error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+               if (ring->id == RCS)
+                       error->bbaddr = I915_READ64(BB_ADDR);
+       } else {
+               error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
+               error->ipeir[ring->id] = I915_READ(IPEIR);
+               error->ipehr[ring->id] = I915_READ(IPEHR);
+               error->instdone[ring->id] = I915_READ(INSTDONE);
+       }
+
+       error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
+       error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+       error->seqno[ring->id] = ring->get_seqno(ring, false);
+       error->acthd[ring->id] = intel_ring_get_active_head(ring);
+       error->head[ring->id] = I915_READ_HEAD(ring);
+       error->tail[ring->id] = I915_READ_TAIL(ring);
+       error->ctl[ring->id] = I915_READ_CTL(ring);
+
+       error->cpu_ring_head[ring->id] = ring->head;
+       error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+                                          struct drm_i915_error_state *error,
+                                          struct drm_i915_error_ring *ering)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj;
+
+       /* Currently render ring is the only HW context user */
+       if (ring->id != RCS || !error->ccid)
+               return;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
+                       ering->ctx = i915_error_object_create_sized(dev_priv,
+                                                                   obj, 1);
+                       break;
+               }
+       }
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+                                 struct drm_i915_error_state *error)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       struct drm_i915_gem_request *request;
+       int i, count;
+
+       for_each_ring(ring, dev_priv, i) {
+               i915_record_ring_state(dev, error, ring);
+
+               error->ring[i].batchbuffer =
+                       i915_error_first_batchbuffer(dev_priv, ring);
+
+               error->ring[i].ringbuffer =
+                       i915_error_object_create(dev_priv, ring->obj);
+
+
+               i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+               count = 0;
+               list_for_each_entry(request, &ring->request_list, list)
+                       count++;
+
+               error->ring[i].num_requests = count;
+               error->ring[i].requests =
+                       kmalloc(count*sizeof(struct drm_i915_error_request),
+                               GFP_ATOMIC);
+               if (error->ring[i].requests == NULL) {
+                       error->ring[i].num_requests = 0;
+                       continue;
+               }
+
+               count = 0;
+               list_for_each_entry(request, &ring->request_list, list) {
+                       struct drm_i915_error_request *erq;
+
+                       erq = &error->ring[i].requests[count++];
+                       erq->seqno = request->seqno;
+                       erq->jiffies = request->emitted_jiffies;
+                       erq->tail = request->tail;
+               }
+       }
+}
+
+/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
+ * VM.
+ */
+static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
+                               struct drm_i915_error_state *error,
+                               struct i915_address_space *vm,
+                               const int ndx)
+{
+       struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int i;
+
+       i = 0;
+       list_for_each_entry(vma, &vm->active_list, mm_list)
+               i++;
+       error->active_bo_count[ndx] = i;
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+               if (obj->pin_count)
+                       i++;
+       error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
+
+       if (i) {
+               active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+               if (active_bo)
+                       pinned_bo = active_bo + error->active_bo_count[ndx];
+       }
+
+       if (active_bo)
+               error->active_bo_count[ndx] =
+                       capture_active_bo(active_bo,
+                                         error->active_bo_count[ndx],
+                                         &vm->active_list);
+
+       if (pinned_bo)
+               error->pinned_bo_count[ndx] =
+                       capture_pinned_bo(pinned_bo,
+                                         error->pinned_bo_count[ndx],
+                                         &dev_priv->mm.bound_list);
+       error->active_bo[ndx] = active_bo;
+       error->pinned_bo[ndx] = pinned_bo;
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+                                    struct drm_i915_error_state *error)
+{
+       struct i915_address_space *vm;
+       int cnt = 0, i = 0;
+
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+               cnt++;
+
+       if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
+               cnt = 1;
+
+       vm = &dev_priv->gtt.base;
+
+       error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
+       error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
+       error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
+                                        GFP_ATOMIC);
+       error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
+                                        GFP_ATOMIC);
+
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+               i915_gem_capture_vm(dev_priv, error, vm, i++);
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error.  Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error;
+       unsigned long flags;
+       int pipe;
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       error = dev_priv->gpu_error.first_error;
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+       if (error)
+               return;
+
+       /* Account for pipe specific data like PIPE*STAT */
+       error = kzalloc(sizeof(*error), GFP_ATOMIC);
+       if (!error) {
+               DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+               return;
+       }
+
+       DRM_INFO("capturing error event; look for more information in "
+                "/sys/class/drm/card%d/error\n", dev->primary->index);
+
+       kref_init(&error->ref);
+       error->eir = I915_READ(EIR);
+       error->pgtbl_er = I915_READ(PGTBL_ER);
+       if (HAS_HW_CONTEXTS(dev))
+               error->ccid = I915_READ(CCID);
+
+       if (HAS_PCH_SPLIT(dev))
+               error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+       else if (IS_VALLEYVIEW(dev))
+               error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+       else if (IS_GEN2(dev))
+               error->ier = I915_READ16(IER);
+       else
+               error->ier = I915_READ(IER);
+
+       if (INTEL_INFO(dev)->gen >= 6)
+               error->derrmr = I915_READ(DERRMR);
+
+       if (IS_VALLEYVIEW(dev))
+               error->forcewake = I915_READ(FORCEWAKE_VLV);
+       else if (INTEL_INFO(dev)->gen >= 7)
+               error->forcewake = I915_READ(FORCEWAKE_MT);
+       else if (INTEL_INFO(dev)->gen == 6)
+               error->forcewake = I915_READ(FORCEWAKE);
+
+       if (!HAS_PCH_SPLIT(dev))
+               for_each_pipe(pipe)
+                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->error = I915_READ(ERROR_GEN6);
+               error->done_reg = I915_READ(DONE_REG);
+       }
+
+       if (INTEL_INFO(dev)->gen == 7)
+               error->err_int = I915_READ(GEN7_ERR_INT);
+
+       i915_get_extra_instdone(dev, error->extra_instdone);
+
+       i915_gem_capture_buffers(dev_priv, error);
+       i915_gem_record_fences(dev, error);
+       i915_gem_record_rings(dev, error);
+
+       do_gettimeofday(&error->time);
+
+       error->overlay = intel_overlay_capture_error_state(dev);
+       error->display = intel_display_capture_error_state(dev);
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       if (dev_priv->gpu_error.first_error == NULL) {
+               dev_priv->gpu_error.first_error = error;
+               error = NULL;
+       }
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+       if (error)
+               i915_error_state_free(&error->ref);
+}
+
+void i915_error_state_get(struct drm_device *dev,
+                         struct i915_error_state_file_priv *error_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       error_priv->error = dev_priv->gpu_error.first_error;
+       if (error_priv->error)
+               kref_get(&error_priv->error->ref);
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+}
+
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
+{
+       if (error_priv->error)
+               kref_put(&error_priv->error->ref, i915_error_state_free);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       error = dev_priv->gpu_error.first_error;
+       dev_priv->gpu_error.first_error = NULL;
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+       if (error)
+               kref_put(&error->ref, i915_error_state_free);
+}
+
+const char *i915_cache_level_str(int type)
+{
+       switch (type) {
+       case I915_CACHE_NONE: return " uncached";
+       case I915_CACHE_LLC: return " snooped or LLC";
+       case I915_CACHE_L3_LLC: return " L3+LLC";
+       default: return "";
+       }
+}
+
+/* NB: please notice the memset */
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+       switch (INTEL_INFO(dev)->gen) {
+       case 2:
+       case 3:
+               instdone[0] = I915_READ(INSTDONE);
+               break;
+       case 4:
+       case 5:
+       case 6:
+               instdone[0] = I915_READ(INSTDONE_I965);
+               instdone[1] = I915_READ(INSTDONE1);
+               break;
+       default:
+               WARN_ONCE(1, "Unsupported platform\n");
+       case 7:
+               instdone[0] = I915_READ(GEN7_INSTDONE_1);
+               instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+               instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+               instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+               break;
+       }
+}
index 3d92a7cef1541bd20b2c4016cdc2447791991c56..caf83da17bb063da37cc915d84b998998ef7de66 100644 (file)
@@ -104,6 +104,69 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
        }
 }
 
+/**
+ * ilk_update_gt_irq - update GTIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
+                             uint32_t interrupt_mask,
+                             uint32_t enabled_irq_mask)
+{
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       dev_priv->gt_irq_mask &= ~interrupt_mask;
+       dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
+       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+       POSTING_READ(GTIMR);
+}
+
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       ilk_update_gt_irq(dev_priv, mask, mask);
+}
+
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       ilk_update_gt_irq(dev_priv, mask, 0);
+}
+
+/**
+  * snb_update_pm_irq - update GEN6_PMIMR
+  * @dev_priv: driver private
+  * @interrupt_mask: mask of interrupt bits to update
+  * @enabled_irq_mask: mask of interrupt bits to enable
+  */
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+                             uint32_t interrupt_mask,
+                             uint32_t enabled_irq_mask)
+{
+       uint32_t new_val;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       new_val = dev_priv->pm_irq_mask;
+       new_val &= ~interrupt_mask;
+       new_val |= (~enabled_irq_mask & interrupt_mask);
+
+       if (new_val != dev_priv->pm_irq_mask) {
+               dev_priv->pm_irq_mask = new_val;
+               I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
+               POSTING_READ(GEN6_PMIMR);
+       }
+}
+
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       snb_update_pm_irq(dev_priv, mask, mask);
+}
+
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+       snb_update_pm_irq(dev_priv, mask, 0);
+}
+
 static bool ivb_can_enable_err_int(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -128,6 +191,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
        enum pipe pipe;
        struct intel_crtc *crtc;
 
+       assert_spin_locked(&dev_priv->irq_lock);
+
        for_each_pipe(pipe) {
                crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
@@ -152,38 +217,66 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
 }
 
 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
-                                                 bool enable)
+                                                 enum pipe pipe, bool enable)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-
        if (enable) {
+               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
                if (!ivb_can_enable_err_int(dev))
                        return;
 
-               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
-                                        ERR_INT_FIFO_UNDERRUN_B |
-                                        ERR_INT_FIFO_UNDERRUN_C);
-
                ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
        } else {
+               bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
+
+               /* Change the state _after_ we've read out the current one. */
                ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+               if (!was_enabled &&
+                   (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
+                       DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
+                                     pipe_name(pipe));
+               }
        }
 }
 
-static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
+/**
+ * ibx_display_interrupt_update - update SDEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+                                        uint32_t interrupt_mask,
+                                        uint32_t enabled_irq_mask)
+{
+       uint32_t sdeimr = I915_READ(SDEIMR);
+       sdeimr &= ~interrupt_mask;
+       sdeimr |= (~enabled_irq_mask & interrupt_mask);
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       I915_WRITE(SDEIMR, sdeimr);
+       POSTING_READ(SDEIMR);
+}
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+       ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+       ibx_display_interrupt_update((dev_priv), (bits), 0)
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+                                           enum transcoder pch_transcoder,
                                            bool enable)
 {
-       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
-                                               SDE_TRANSB_FIFO_UNDER;
+       uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+                      SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
 
        if (enable)
-               I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
+               ibx_enable_display_interrupt(dev_priv, bit);
        else
-               I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
-
-       POSTING_READ(SDEIMR);
+               ibx_disable_display_interrupt(dev_priv, bit);
 }
 
 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -193,19 +286,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (enable) {
+               I915_WRITE(SERR_INT,
+                          SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
                if (!cpt_can_enable_serr_int(dev))
                        return;
 
-               I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
-                                    SERR_INT_TRANS_B_FIFO_UNDERRUN |
-                                    SERR_INT_TRANS_C_FIFO_UNDERRUN);
-
-               I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
+               ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
        } else {
-               I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
-       }
+               uint32_t tmp = I915_READ(SERR_INT);
+               bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
 
-       POSTING_READ(SDEIMR);
+               /* Change the state _after_ we've read out the current one. */
+               ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+               if (!was_enabled &&
+                   (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
+                       DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
+                                     transcoder_name(pch_transcoder));
+               }
+       }
 }
 
 /**
@@ -243,7 +343,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
        if (IS_GEN5(dev) || IS_GEN6(dev))
                ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
        else if (IS_GEN7(dev))
-               ivybridge_set_fifo_underrun_reporting(dev, enable);
+               ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
 
 done:
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -269,29 +369,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
                                           bool enable)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe p;
-       struct drm_crtc *crtc;
-       struct intel_crtc *intel_crtc;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        unsigned long flags;
        bool ret;
 
-       if (HAS_PCH_LPT(dev)) {
-               crtc = NULL;
-               for_each_pipe(p) {
-                       struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
-                       if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
-                               crtc = c;
-                               break;
-                       }
-               }
-               if (!crtc) {
-                       DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
-                       return false;
-               }
-       } else {
-               crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
-       }
-       intel_crtc = to_intel_crtc(crtc);
+       /*
+        * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+        * has only one pch transcoder A that all pipes can use. To avoid racy
+        * pch transcoder -> pipe lookups from interrupt code simply store the
+        * underrun statistics in crtc A. Since we never expose this anywhere
+        * nor use it outside of the fifo underrun code here using the "wrong"
+        * crtc on LPT won't cause issues.
+        */
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
@@ -303,7 +393,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
        intel_crtc->pch_fifo_underrun_disabled = !enable;
 
        if (HAS_PCH_IBX(dev))
-               ibx_set_fifo_underrun_reporting(intel_crtc, enable);
+               ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
        else
                cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
 
@@ -319,6 +409,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
        u32 reg = PIPESTAT(pipe);
        u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
+       assert_spin_locked(&dev_priv->irq_lock);
+
        if ((pipestat & mask) == mask)
                return;
 
@@ -334,6 +426,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
        u32 reg = PIPESTAT(pipe);
        u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
+       assert_spin_locked(&dev_priv->irq_lock);
+
        if ((pipestat & mask) == 0)
                return;
 
@@ -625,14 +719,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
                drm_kms_helper_hotplug_event(dev);
 }
 
-static void ironlake_handle_rps_change(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 busy_up, busy_down, max_avg, min_avg;
        u8 new_delay;
-       unsigned long flags;
 
-       spin_lock_irqsave(&mchdev_lock, flags);
+       spin_lock(&mchdev_lock);
 
        I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 
@@ -660,7 +753,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
        if (ironlake_set_drps(dev, new_delay))
                dev_priv->ips.cur_delay = new_delay;
 
-       spin_unlock_irqrestore(&mchdev_lock, flags);
+       spin_unlock(&mchdev_lock);
 
        return;
 }
@@ -668,34 +761,31 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
 static void notify_ring(struct drm_device *dev,
                        struct intel_ring_buffer *ring)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        if (ring->obj == NULL)
                return;
 
        trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
 
        wake_up_all(&ring->irq_queue);
-       if (i915_enable_hangcheck) {
-               mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                         round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
-       }
+       i915_queue_hangcheck(dev);
 }
 
 static void gen6_pm_rps_work(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
                                                    rps.work);
-       u32 pm_iir, pm_imr;
+       u32 pm_iir;
        u8 new_delay;
 
-       spin_lock_irq(&dev_priv->rps.lock);
+       spin_lock_irq(&dev_priv->irq_lock);
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
-       pm_imr = I915_READ(GEN6_PMIMR);
        /* Make sure not to corrupt PMIMR state used by ringbuffer code */
-       I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
-       spin_unlock_irq(&dev_priv->rps.lock);
+       snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       /* Make sure we didn't queue anything we're not going to process. */
+       WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
 
        if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
                return;
@@ -781,13 +871,12 @@ static void ivybridge_parity_work(struct work_struct *work)
        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+       ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        mutex_unlock(&dev_priv->dev->struct_mutex);
 
-       parity_event[0] = "L3_PARITY_ERROR=1";
+       parity_event[0] = I915_L3_PARITY_UEVENT "=1";
        parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
        parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
        parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
@@ -804,22 +893,31 @@ static void ivybridge_parity_work(struct work_struct *work)
        kfree(parity_event[1]);
 }
 
-static void ivybridge_handle_parity_error(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long flags;
 
        if (!HAS_L3_GPU_CACHE(dev))
                return;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       spin_lock(&dev_priv->irq_lock);
+       ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+       spin_unlock(&dev_priv->irq_lock);
 
        queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
+static void ilk_gt_irq_handler(struct drm_device *dev,
+                              struct drm_i915_private *dev_priv,
+                              u32 gt_iir)
+{
+       if (gt_iir &
+           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+               notify_ring(dev, &dev_priv->ring[RCS]);
+       if (gt_iir & ILK_BSD_USER_INTERRUPT)
+               notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
 static void snb_gt_irq_handler(struct drm_device *dev,
                               struct drm_i915_private *dev_priv,
                               u32 gt_iir)
@@ -841,32 +939,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
        }
 
        if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-               ivybridge_handle_parity_error(dev);
-}
-
-/* Legacy way of handling PM interrupts */
-static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
-                               u32 pm_iir)
-{
-       unsigned long flags;
-
-       /*
-        * IIR bits should never already be set because IMR should
-        * prevent an interrupt from being shown in IIR. The warning
-        * displays a case where we've unsafely cleared
-        * dev_priv->rps.pm_iir. Although missing an interrupt of the same
-        * type is not a problem, it displays a problem in the logic.
-        *
-        * The mask bit in IMR is cleared by dev_priv->rps.work.
-        */
-
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       dev_priv->rps.pm_iir |= pm_iir;
-       I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
-       POSTING_READ(GEN6_PMIMR);
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
-
-       queue_work(dev_priv->wq, &dev_priv->rps.work);
+               ivybridge_parity_error_irq_handler(dev);
 }
 
 #define HPD_STORM_DETECT_PERIOD 1000
@@ -886,6 +959,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
 
+               WARN(((hpd[i] & hotplug_trigger) &&
+                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
+                    "Received HPD interrupt although disabled\n");
+
                if (!(hpd[i] & hotplug_trigger) ||
                    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
                        continue;
@@ -896,6 +973,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
                                   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
                        dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
                        dev_priv->hpd_stats[i].hpd_cnt = 0;
+                       DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
                } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
                        dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
                        dev_priv->hpd_event_bits &= ~(1 << i);
@@ -903,6 +981,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
                        storm_detected = true;
                } else {
                        dev_priv->hpd_stats[i].hpd_cnt++;
+                       DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
+                                     dev_priv->hpd_stats[i].hpd_cnt);
                }
        }
 
@@ -928,28 +1008,21 @@ static void dp_aux_irq_handler(struct drm_device *dev)
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
-/* Unlike gen6_queue_rps_work() from which this function is originally derived,
- * we must be able to deal with other PM interrupts. This is complicated because
- * of the way in which we use the masks to defer the RPS work (which for
- * posterity is necessary because of forcewake).
- */
-static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
-                              u32 pm_iir)
+/* The RPS events need forcewake, so we add them to a work queue and mask their
+ * IMR bits until the work is done. Other interrupts can be processed without
+ * the work queue. */
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
-       unsigned long flags;
+       if (pm_iir & GEN6_PM_RPS_EVENTS) {
+               spin_lock(&dev_priv->irq_lock);
+               dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
+               snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
+               spin_unlock(&dev_priv->irq_lock);
 
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
-       if (dev_priv->rps.pm_iir) {
-               I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
-               /* never want to mask useful interrupts. (also posting read) */
-               WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
-               /* TODO: if queue_work is slow, move it out of the spinlock */
                queue_work(dev_priv->wq, &dev_priv->rps.work);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
-       if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
+       if (HAS_VEBOX(dev_priv->dev)) {
                if (pm_iir & PM_VEBOX_USER_INTERRUPT)
                        notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
 
@@ -1028,8 +1101,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
                        gmbus_irq_handler(dev);
 
-               if (pm_iir & GEN6_PM_RPS_EVENTS)
-                       gen6_queue_rps_work(dev_priv, pm_iir);
+               if (pm_iir)
+                       gen6_rps_irq_handler(dev_priv, pm_iir);
 
                I915_WRITE(GTIIR, gt_iir);
                I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1179,163 +1252,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
                cpt_serr_int_handler(dev);
 }
 
-static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
+static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
-       struct drm_device *dev = (struct drm_device *) arg;
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
-       irqreturn_t ret = IRQ_NONE;
-       int i;
-
-       atomic_inc(&dev_priv->irq_received);
-
-       /* We get interrupts on unclaimed registers, so check for this before we
-        * do any I915_{READ,WRITE}. */
-       if (IS_HASWELL(dev) &&
-           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-               DRM_ERROR("Unclaimed register before interrupt\n");
-               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-       }
-
-       /* disable master interrupt before clearing iir  */
-       de_ier = I915_READ(DEIER);
-       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
-       /* Disable south interrupts. We'll only write to SDEIIR once, so further
-        * interrupts will will be stored on its back queue, and then we'll be
-        * able to process them after we restore SDEIER (as soon as we restore
-        * it, we'll get an interrupt if SDEIIR still has something to process
-        * due to its back queue). */
-       if (!HAS_PCH_NOP(dev)) {
-               sde_ier = I915_READ(SDEIER);
-               I915_WRITE(SDEIER, 0);
-               POSTING_READ(SDEIER);
-       }
-
-       /* On Haswell, also mask ERR_INT because we don't want to risk
-        * generating "unclaimed register" interrupts from inside the interrupt
-        * handler. */
-       if (IS_HASWELL(dev)) {
-               spin_lock(&dev_priv->irq_lock);
-               ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-               spin_unlock(&dev_priv->irq_lock);
-       }
-
-       gt_iir = I915_READ(GTIIR);
-       if (gt_iir) {
-               snb_gt_irq_handler(dev, dev_priv, gt_iir);
-               I915_WRITE(GTIIR, gt_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       de_iir = I915_READ(DEIIR);
-       if (de_iir) {
-               if (de_iir & DE_ERR_INT_IVB)
-                       ivb_err_int_handler(dev);
-
-               if (de_iir & DE_AUX_CHANNEL_A_IVB)
-                       dp_aux_irq_handler(dev);
-
-               if (de_iir & DE_GSE_IVB)
-                       intel_opregion_asle_intr(dev);
-
-               for (i = 0; i < 3; i++) {
-                       if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
-                               drm_handle_vblank(dev, i);
-                       if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
-                               intel_prepare_page_flip(dev, i);
-                               intel_finish_page_flip_plane(dev, i);
-                       }
-               }
-
-               /* check event from PCH */
-               if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
-                       u32 pch_iir = I915_READ(SDEIIR);
-
-                       cpt_irq_handler(dev, pch_iir);
-
-                       /* clear PCH hotplug event before clear CPU irq */
-                       I915_WRITE(SDEIIR, pch_iir);
-               }
-
-               I915_WRITE(DEIIR, de_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       pm_iir = I915_READ(GEN6_PMIIR);
-       if (pm_iir) {
-               if (IS_HASWELL(dev))
-                       hsw_pm_irq_handler(dev_priv, pm_iir);
-               else if (pm_iir & GEN6_PM_RPS_EVENTS)
-                       gen6_queue_rps_work(dev_priv, pm_iir);
-               I915_WRITE(GEN6_PMIIR, pm_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       if (IS_HASWELL(dev)) {
-               spin_lock(&dev_priv->irq_lock);
-               if (ivb_can_enable_err_int(dev))
-                       ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-               spin_unlock(&dev_priv->irq_lock);
-       }
-
-       I915_WRITE(DEIER, de_ier);
-       POSTING_READ(DEIER);
-       if (!HAS_PCH_NOP(dev)) {
-               I915_WRITE(SDEIER, sde_ier);
-               POSTING_READ(SDEIER);
-       }
-
-       return ret;
-}
-
-static void ilk_gt_irq_handler(struct drm_device *dev,
-                              struct drm_i915_private *dev_priv,
-                              u32 gt_iir)
-{
-       if (gt_iir &
-           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-               notify_ring(dev, &dev_priv->ring[RCS]);
-       if (gt_iir & ILK_BSD_USER_INTERRUPT)
-               notify_ring(dev, &dev_priv->ring[VCS]);
-}
-
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
-{
-       struct drm_device *dev = (struct drm_device *) arg;
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       int ret = IRQ_NONE;
-       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
-
-       atomic_inc(&dev_priv->irq_received);
-
-       /* disable master interrupt before clearing iir  */
-       de_ier = I915_READ(DEIER);
-       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-       POSTING_READ(DEIER);
-
-       /* Disable south interrupts. We'll only write to SDEIIR once, so further
-        * interrupts will will be stored on its back queue, and then we'll be
-        * able to process them after we restore SDEIER (as soon as we restore
-        * it, we'll get an interrupt if SDEIIR still has something to process
-        * due to its back queue). */
-       sde_ier = I915_READ(SDEIER);
-       I915_WRITE(SDEIER, 0);
-       POSTING_READ(SDEIER);
-
-       de_iir = I915_READ(DEIIR);
-       gt_iir = I915_READ(GTIIR);
-       pm_iir = I915_READ(GEN6_PMIIR);
-
-       if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
-               goto done;
-
-       ret = IRQ_HANDLED;
-
-       if (IS_GEN5(dev))
-               ilk_gt_irq_handler(dev, dev_priv, gt_iir);
-       else
-               snb_gt_irq_handler(dev, dev_priv, gt_iir);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (de_iir & DE_AUX_CHANNEL_A)
                dp_aux_irq_handler(dev);
@@ -1383,621 +1302,198 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
                I915_WRITE(SDEIIR, pch_iir);
        }
 
-       if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
-               ironlake_handle_rps_change(dev);
-
-       if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
-               gen6_queue_rps_work(dev_priv, pm_iir);
-
-       I915_WRITE(GTIIR, gt_iir);
-       I915_WRITE(DEIIR, de_iir);
-       I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
-       I915_WRITE(DEIER, de_ier);
-       POSTING_READ(DEIER);
-       I915_WRITE(SDEIER, sde_ier);
-       POSTING_READ(SDEIER);
-
-       return ret;
-}
-
-/**
- * i915_error_work_func - do process context error handling work
- * @work: work struct
- *
- * Fire an error uevent so userspace can see that a hang or error
- * was detected.
- */
-static void i915_error_work_func(struct work_struct *work)
-{
-       struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
-                                                   work);
-       drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
-                                                   gpu_error);
-       struct drm_device *dev = dev_priv->dev;
-       struct intel_ring_buffer *ring;
-       char *error_event[] = { "ERROR=1", NULL };
-       char *reset_event[] = { "RESET=1", NULL };
-       char *reset_done_event[] = { "ERROR=0", NULL };
-       int i, ret;
-
-       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
-
-       /*
-        * Note that there's only one work item which does gpu resets, so we
-        * need not worry about concurrent gpu resets potentially incrementing
-        * error->reset_counter twice. We only need to take care of another
-        * racing irq/hangcheck declaring the gpu dead for a second time. A
-        * quick check for that is good enough: schedule_work ensures the
-        * correct ordering between hang detection and this work item, and since
-        * the reset in-progress bit is only ever set by code outside of this
-        * work we don't need to worry about any other races.
-        */
-       if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
-               DRM_DEBUG_DRIVER("resetting chip\n");
-               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
-                                  reset_event);
-
-               ret = i915_reset(dev);
-
-               if (ret == 0) {
-                       /*
-                        * After all the gem state is reset, increment the reset
-                        * counter and wake up everyone waiting for the reset to
-                        * complete.
-                        *
-                        * Since unlock operations are a one-sided barrier only,
-                        * we need to insert a barrier here to order any seqno
-                        * updates before
-                        * the counter increment.
-                        */
-                       smp_mb__before_atomic_inc();
-                       atomic_inc(&dev_priv->gpu_error.reset_counter);
-
-                       kobject_uevent_env(&dev->primary->kdev.kobj,
-                                          KOBJ_CHANGE, reset_done_event);
-               } else {
-                       atomic_set(&error->reset_counter, I915_WEDGED);
-               }
-
-               for_each_ring(ring, dev_priv, i)
-                       wake_up_all(&ring->irq_queue);
-
-               intel_display_handle_reset(dev);
-
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
-       }
+       if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+               ironlake_rps_change_irq_handler(dev);
 }
 
-/* NB: please notice the memset */
-static void i915_get_extra_instdone(struct drm_device *dev,
-                                   uint32_t *instdone)
+static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
-       switch(INTEL_INFO(dev)->gen) {
-       case 2:
-       case 3:
-               instdone[0] = I915_READ(INSTDONE);
-               break;
-       case 4:
-       case 5:
-       case 6:
-               instdone[0] = I915_READ(INSTDONE_I965);
-               instdone[1] = I915_READ(INSTDONE1);
-               break;
-       default:
-               WARN_ONCE(1, "Unsupported platform\n");
-       case 7:
-               instdone[0] = I915_READ(GEN7_INSTDONE_1);
-               instdone[1] = I915_READ(GEN7_SC_INSTDONE);
-               instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
-               instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
-               break;
-       }
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct drm_i915_error_object *
-i915_error_object_create_sized(struct drm_i915_private *dev_priv,
-                              struct drm_i915_gem_object *src,
-                              const int num_pages)
-{
-       struct drm_i915_error_object *dst;
        int i;
-       u32 reloc_offset;
-
-       if (src == NULL || src->pages == NULL)
-               return NULL;
 
-       dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
-       if (dst == NULL)
-               return NULL;
-
-       reloc_offset = src->gtt_offset;
-       for (i = 0; i < num_pages; i++) {
-               unsigned long flags;
-               void *d;
-
-               d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
-               if (d == NULL)
-                       goto unwind;
-
-               local_irq_save(flags);
-               if (reloc_offset < dev_priv->gtt.mappable_end &&
-                   src->has_global_gtt_mapping) {
-                       void __iomem *s;
-
-                       /* Simply ignore tiling or any overlapping fence.
-                        * It's part of the error state, and this hopefully
-                        * captures what the GPU read.
-                        */
-
-                       s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-                                                    reloc_offset);
-                       memcpy_fromio(d, s, PAGE_SIZE);
-                       io_mapping_unmap_atomic(s);
-               } else if (src->stolen) {
-                       unsigned long offset;
+       if (de_iir & DE_ERR_INT_IVB)
+               ivb_err_int_handler(dev);
 
-                       offset = dev_priv->mm.stolen_base;
-                       offset += src->stolen->start;
-                       offset += i << PAGE_SHIFT;
-
-                       memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
-               } else {
-                       struct page *page;
-                       void *s;
-
-                       page = i915_gem_object_get_page(src, i);
-
-                       drm_clflush_pages(&page, 1);
+       if (de_iir & DE_AUX_CHANNEL_A_IVB)
+               dp_aux_irq_handler(dev);
 
-                       s = kmap_atomic(page);
-                       memcpy(d, s, PAGE_SIZE);
-                       kunmap_atomic(s);
+       if (de_iir & DE_GSE_IVB)
+               intel_opregion_asle_intr(dev);
 
-                       drm_clflush_pages(&page, 1);
+       for (i = 0; i < 3; i++) {
+               if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+                       drm_handle_vblank(dev, i);
+               if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+                       intel_prepare_page_flip(dev, i);
+                       intel_finish_page_flip_plane(dev, i);
                }
-               local_irq_restore(flags);
-
-               dst->pages[i] = d;
-
-               reloc_offset += PAGE_SIZE;
        }
-       dst->page_count = num_pages;
-       dst->gtt_offset = src->gtt_offset;
-
-       return dst;
-
-unwind:
-       while (i--)
-               kfree(dst->pages[i]);
-       kfree(dst);
-       return NULL;
-}
-#define i915_error_object_create(dev_priv, src) \
-       i915_error_object_create_sized((dev_priv), (src), \
-                                      (src)->base.size>>PAGE_SHIFT)
-
-static void
-i915_error_object_free(struct drm_i915_error_object *obj)
-{
-       int page;
-
-       if (obj == NULL)
-               return;
-
-       for (page = 0; page < obj->page_count; page++)
-               kfree(obj->pages[page]);
 
-       kfree(obj);
-}
-
-void
-i915_error_state_free(struct kref *error_ref)
-{
-       struct drm_i915_error_state *error = container_of(error_ref,
-                                                         typeof(*error), ref);
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               i915_error_object_free(error->ring[i].batchbuffer);
-               i915_error_object_free(error->ring[i].ringbuffer);
-               i915_error_object_free(error->ring[i].ctx);
-               kfree(error->ring[i].requests);
-       }
-
-       kfree(error->active_bo);
-       kfree(error->overlay);
-       kfree(error->display);
-       kfree(error);
-}
-static void capture_bo(struct drm_i915_error_buffer *err,
-                      struct drm_i915_gem_object *obj)
-{
-       err->size = obj->base.size;
-       err->name = obj->base.name;
-       err->rseqno = obj->last_read_seqno;
-       err->wseqno = obj->last_write_seqno;
-       err->gtt_offset = obj->gtt_offset;
-       err->read_domains = obj->base.read_domains;
-       err->write_domain = obj->base.write_domain;
-       err->fence_reg = obj->fence_reg;
-       err->pinned = 0;
-       if (obj->pin_count > 0)
-               err->pinned = 1;
-       if (obj->user_pin_count > 0)
-               err->pinned = -1;
-       err->tiling = obj->tiling_mode;
-       err->dirty = obj->dirty;
-       err->purgeable = obj->madv != I915_MADV_WILLNEED;
-       err->ring = obj->ring ? obj->ring->id : -1;
-       err->cache_level = obj->cache_level;
-}
-
-static u32 capture_active_bo(struct drm_i915_error_buffer *err,
-                            int count, struct list_head *head)
-{
-       struct drm_i915_gem_object *obj;
-       int i = 0;
-
-       list_for_each_entry(obj, head, mm_list) {
-               capture_bo(err++, obj);
-               if (++i == count)
-                       break;
-       }
-
-       return i;
-}
-
-static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
-                            int count, struct list_head *head)
-{
-       struct drm_i915_gem_object *obj;
-       int i = 0;
-
-       list_for_each_entry(obj, head, global_list) {
-               if (obj->pin_count == 0)
-                       continue;
-
-               capture_bo(err++, obj);
-               if (++i == count)
-                       break;
-       }
-
-       return i;
-}
-
-static void i915_gem_record_fences(struct drm_device *dev,
-                                  struct drm_i915_error_state *error)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
-
-       /* Fences */
-       switch (INTEL_INFO(dev)->gen) {
-       case 7:
-       case 6:
-               for (i = 0; i < dev_priv->num_fence_regs; i++)
-                       error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
-               break;
-       case 5:
-       case 4:
-               for (i = 0; i < 16; i++)
-                       error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
-               break;
-       case 3:
-               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-                       for (i = 0; i < 8; i++)
-                               error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
-       case 2:
-               for (i = 0; i < 8; i++)
-                       error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-               break;
-
-       default:
-               BUG();
-       }
-}
-
-static struct drm_i915_error_object *
-i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
-                            struct intel_ring_buffer *ring)
-{
-       struct drm_i915_gem_object *obj;
-       u32 seqno;
-
-       if (!ring->get_seqno)
-               return NULL;
-
-       if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
-               u32 acthd = I915_READ(ACTHD);
-
-               if (WARN_ON(ring->id != RCS))
-                       return NULL;
-
-               obj = ring->private;
-               if (acthd >= obj->gtt_offset &&
-                   acthd < obj->gtt_offset + obj->base.size)
-                       return i915_error_object_create(dev_priv, obj);
-       }
-
-       seqno = ring->get_seqno(ring, false);
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-               if (obj->ring != ring)
-                       continue;
-
-               if (i915_seqno_passed(seqno, obj->last_read_seqno))
-                       continue;
+       /* check event from PCH */
+       if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+               u32 pch_iir = I915_READ(SDEIIR);
 
-               if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
-                       continue;
+               cpt_irq_handler(dev, pch_iir);
 
-               /* We need to copy these to an anonymous buffer as the simplest
-                * method to avoid being overwritten by userspace.
-                */
-               return i915_error_object_create(dev_priv, obj);
+               /* clear PCH hotplug event before clear CPU irq */
+               I915_WRITE(SDEIIR, pch_iir);
        }
-
-       return NULL;
 }
 
-static void i915_record_ring_state(struct drm_device *dev,
-                                  struct drm_i915_error_state *error,
-                                  struct intel_ring_buffer *ring)
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
-               error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
-               error->semaphore_mboxes[ring->id][0]
-                       = I915_READ(RING_SYNC_0(ring->mmio_base));
-               error->semaphore_mboxes[ring->id][1]
-                       = I915_READ(RING_SYNC_1(ring->mmio_base));
-               error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
-               error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
-       }
-
-       if (INTEL_INFO(dev)->gen >= 4) {
-               error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
-               error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
-               error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
-               error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
-               error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-               if (ring->id == RCS)
-                       error->bbaddr = I915_READ64(BB_ADDR);
-       } else {
-               error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
-               error->ipeir[ring->id] = I915_READ(IPEIR);
-               error->ipehr[ring->id] = I915_READ(IPEHR);
-               error->instdone[ring->id] = I915_READ(INSTDONE);
-       }
-
-       error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
-       error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
-       error->seqno[ring->id] = ring->get_seqno(ring, false);
-       error->acthd[ring->id] = intel_ring_get_active_head(ring);
-       error->head[ring->id] = I915_READ_HEAD(ring);
-       error->tail[ring->id] = I915_READ_TAIL(ring);
-       error->ctl[ring->id] = I915_READ_CTL(ring);
-
-       error->cpu_ring_head[ring->id] = ring->head;
-       error->cpu_ring_tail[ring->id] = ring->tail;
-}
+       struct drm_device *dev = (struct drm_device *) arg;
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+       irqreturn_t ret = IRQ_NONE;
+       bool err_int_reenable = false;
 
+       atomic_inc(&dev_priv->irq_received);
 
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
-                                          struct drm_i915_error_state *error,
-                                          struct drm_i915_error_ring *ering)
-{
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct drm_i915_gem_object *obj;
+       /* We get interrupts on unclaimed registers, so check for this before we
+        * do any I915_{READ,WRITE}. */
+       intel_uncore_check_errors(dev);
 
-       /* Currently render ring is the only HW context user */
-       if (ring->id != RCS || !error->ccid)
-               return;
+       /* disable master interrupt before clearing iir  */
+       de_ier = I915_READ(DEIER);
+       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+       POSTING_READ(DEIER);
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
-                       ering->ctx = i915_error_object_create_sized(dev_priv,
-                                                                   obj, 1);
-               }
+       /* Disable south interrupts. We'll only write to SDEIIR once, so further
+        * interrupts will will be stored on its back queue, and then we'll be
+        * able to process them after we restore SDEIER (as soon as we restore
+        * it, we'll get an interrupt if SDEIIR still has something to process
+        * due to its back queue). */
+       if (!HAS_PCH_NOP(dev)) {
+               sde_ier = I915_READ(SDEIER);
+               I915_WRITE(SDEIER, 0);
+               POSTING_READ(SDEIER);
        }
-}
-
-static void i915_gem_record_rings(struct drm_device *dev,
-                                 struct drm_i915_error_state *error)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
-       struct drm_i915_gem_request *request;
-       int i, count;
-
-       for_each_ring(ring, dev_priv, i) {
-               i915_record_ring_state(dev, error, ring);
-
-               error->ring[i].batchbuffer =
-                       i915_error_first_batchbuffer(dev_priv, ring);
-
-               error->ring[i].ringbuffer =
-                       i915_error_object_create(dev_priv, ring->obj);
 
+       /* On Haswell, also mask ERR_INT because we don't want to risk
+        * generating "unclaimed register" interrupts from inside the interrupt
+        * handler. */
+       if (IS_HASWELL(dev)) {
+               spin_lock(&dev_priv->irq_lock);
+               err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
+               if (err_int_reenable)
+                       ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+               spin_unlock(&dev_priv->irq_lock);
+       }
 
-               i915_gem_record_active_context(ring, error, &error->ring[i]);
+       gt_iir = I915_READ(GTIIR);
+       if (gt_iir) {
+               if (INTEL_INFO(dev)->gen >= 6)
+                       snb_gt_irq_handler(dev, dev_priv, gt_iir);
+               else
+                       ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+               I915_WRITE(GTIIR, gt_iir);
+               ret = IRQ_HANDLED;
+       }
 
-               count = 0;
-               list_for_each_entry(request, &ring->request_list, list)
-                       count++;
+       de_iir = I915_READ(DEIIR);
+       if (de_iir) {
+               if (INTEL_INFO(dev)->gen >= 7)
+                       ivb_display_irq_handler(dev, de_iir);
+               else
+                       ilk_display_irq_handler(dev, de_iir);
+               I915_WRITE(DEIIR, de_iir);
+               ret = IRQ_HANDLED;
+       }
 
-               error->ring[i].num_requests = count;
-               error->ring[i].requests =
-                       kmalloc(count*sizeof(struct drm_i915_error_request),
-                               GFP_ATOMIC);
-               if (error->ring[i].requests == NULL) {
-                       error->ring[i].num_requests = 0;
-                       continue;
+       if (INTEL_INFO(dev)->gen >= 6) {
+               u32 pm_iir = I915_READ(GEN6_PMIIR);
+               if (pm_iir) {
+                       gen6_rps_irq_handler(dev_priv, pm_iir);
+                       I915_WRITE(GEN6_PMIIR, pm_iir);
+                       ret = IRQ_HANDLED;
                }
+       }
 
-               count = 0;
-               list_for_each_entry(request, &ring->request_list, list) {
-                       struct drm_i915_error_request *erq;
+       if (err_int_reenable) {
+               spin_lock(&dev_priv->irq_lock);
+               if (ivb_can_enable_err_int(dev))
+                       ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+               spin_unlock(&dev_priv->irq_lock);
+       }
 
-                       erq = &error->ring[i].requests[count++];
-                       erq->seqno = request->seqno;
-                       erq->jiffies = request->emitted_jiffies;
-                       erq->tail = request->tail;
-               }
+       I915_WRITE(DEIER, de_ier);
+       POSTING_READ(DEIER);
+       if (!HAS_PCH_NOP(dev)) {
+               I915_WRITE(SDEIER, sde_ier);
+               POSTING_READ(SDEIER);
        }
+
+       return ret;
 }
 
 /**
- * i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
+ * i915_error_work_func - do process context error handling work
+ * @work: work struct
  *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error.  Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
+ * Fire an error uevent so userspace can see that a hang or error
+ * was detected.
  */
-static void i915_capture_error_state(struct drm_device *dev)
+static void i915_error_work_func(struct work_struct *work)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
-       struct drm_i915_error_state *error;
-       unsigned long flags;
-       int i, pipe;
-
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       error = dev_priv->gpu_error.first_error;
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-       if (error)
-               return;
-
-       /* Account for pipe specific data like PIPE*STAT */
-       error = kzalloc(sizeof(*error), GFP_ATOMIC);
-       if (!error) {
-               DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
-               return;
-       }
-
-       DRM_INFO("capturing error event; look for more information in "
-                "/sys/kernel/debug/dri/%d/i915_error_state\n",
-                dev->primary->index);
-
-       kref_init(&error->ref);
-       error->eir = I915_READ(EIR);
-       error->pgtbl_er = I915_READ(PGTBL_ER);
-       if (HAS_HW_CONTEXTS(dev))
-               error->ccid = I915_READ(CCID);
-
-       if (HAS_PCH_SPLIT(dev))
-               error->ier = I915_READ(DEIER) | I915_READ(GTIER);
-       else if (IS_VALLEYVIEW(dev))
-               error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
-       else if (IS_GEN2(dev))
-               error->ier = I915_READ16(IER);
-       else
-               error->ier = I915_READ(IER);
-
-       if (INTEL_INFO(dev)->gen >= 6)
-               error->derrmr = I915_READ(DERRMR);
-
-       if (IS_VALLEYVIEW(dev))
-               error->forcewake = I915_READ(FORCEWAKE_VLV);
-       else if (INTEL_INFO(dev)->gen >= 7)
-               error->forcewake = I915_READ(FORCEWAKE_MT);
-       else if (INTEL_INFO(dev)->gen == 6)
-               error->forcewake = I915_READ(FORCEWAKE);
-
-       if (!HAS_PCH_SPLIT(dev))
-               for_each_pipe(pipe)
-                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               error->error = I915_READ(ERROR_GEN6);
-               error->done_reg = I915_READ(DONE_REG);
-       }
-
-       if (INTEL_INFO(dev)->gen == 7)
-               error->err_int = I915_READ(GEN7_ERR_INT);
-
-       i915_get_extra_instdone(dev, error->extra_instdone);
-
-       i915_gem_record_fences(dev, error);
-       i915_gem_record_rings(dev, error);
+       struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+                                                   work);
+       drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
+                                                   gpu_error);
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_ring_buffer *ring;
+       char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+       char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+       char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+       int i, ret;
 
-       /* Record buffers on the active and pinned lists. */
-       error->active_bo = NULL;
-       error->pinned_bo = NULL;
+       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
-       i = 0;
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
-               i++;
-       error->active_bo_count = i;
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-               if (obj->pin_count)
-                       i++;
-       error->pinned_bo_count = i - error->active_bo_count;
+       /*
+        * Note that there's only one work item which does gpu resets, so we
+        * need not worry about concurrent gpu resets potentially incrementing
+        * error->reset_counter twice. We only need to take care of another
+        * racing irq/hangcheck declaring the gpu dead for a second time. A
+        * quick check for that is good enough: schedule_work ensures the
+        * correct ordering between hang detection and this work item, and since
+        * the reset in-progress bit is only ever set by code outside of this
+        * work we don't need to worry about any other races.
+        */
+       if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+               DRM_DEBUG_DRIVER("resetting chip\n");
+               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+                                  reset_event);
 
-       error->active_bo = NULL;
-       error->pinned_bo = NULL;
-       if (i) {
-               error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
-                                          GFP_ATOMIC);
-               if (error->active_bo)
-                       error->pinned_bo =
-                               error->active_bo + error->active_bo_count;
-       }
+               ret = i915_reset(dev);
 
-       if (error->active_bo)
-               error->active_bo_count =
-                       capture_active_bo(error->active_bo,
-                                         error->active_bo_count,
-                                         &dev_priv->mm.active_list);
+               if (ret == 0) {
+                       /*
+                        * After all the gem state is reset, increment the reset
+                        * counter and wake up everyone waiting for the reset to
+                        * complete.
+                        *
+                        * Since unlock operations are a one-sided barrier only,
+                        * we need to insert a barrier here to order any seqno
+                        * updates before
+                        * the counter increment.
+                        */
+                       smp_mb__before_atomic_inc();
+                       atomic_inc(&dev_priv->gpu_error.reset_counter);
 
-       if (error->pinned_bo)
-               error->pinned_bo_count =
-                       capture_pinned_bo(error->pinned_bo,
-                                         error->pinned_bo_count,
-                                         &dev_priv->mm.bound_list);
+                       kobject_uevent_env(&dev->primary->kdev.kobj,
+                                          KOBJ_CHANGE, reset_done_event);
+               } else {
+                       atomic_set(&error->reset_counter, I915_WEDGED);
+               }
 
-       do_gettimeofday(&error->time);
+               for_each_ring(ring, dev_priv, i)
+                       wake_up_all(&ring->irq_queue);
 
-       error->overlay = intel_overlay_capture_error_state(dev);
-       error->display = intel_display_capture_error_state(dev);
+               intel_display_handle_reset(dev);
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       if (dev_priv->gpu_error.first_error == NULL) {
-               dev_priv->gpu_error.first_error = error;
-               error = NULL;
+               wake_up_all(&dev_priv->gpu_error.reset_queue);
        }
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
-       if (error)
-               i915_error_state_free(&error->ref);
-}
-
-void i915_destroy_error_state(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_error_state *error;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       error = dev_priv->gpu_error.first_error;
-       dev_priv->gpu_error.first_error = NULL;
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
-       if (error)
-               kref_put(&error->ref, i915_error_state_free);
 }
-#else
-#define i915_capture_error_state(x)
-#endif
 
 static void i915_report_and_clear_eir(struct drm_device *dev)
 {
@@ -2155,10 +1651,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
        if (INTEL_INFO(dev)->gen >= 4) {
                int dspsurf = DSPSURF(intel_crtc->plane);
                stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
-                                       obj->gtt_offset;
+                                       i915_gem_obj_ggtt_offset(obj);
        } else {
                int dspaddr = DSPADDR(intel_crtc->plane);
-               stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+               stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
                                                        crtc->y * crtc->fb->pitches[0] +
                                                        crtc->x * crtc->fb->bits_per_pixel/8);
        }
@@ -2202,29 +1698,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
+       uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+                                                    DE_PIPE_VBLANK_ILK(pipe);
 
        if (!i915_pipe_enabled(dev, pipe))
                return -EINVAL;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
-                                   DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
-       return 0;
-}
-
-static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       if (!i915_pipe_enabled(dev, pipe))
-               return -EINVAL;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_enable_display_irq(dev_priv,
-                                   DE_PIPEA_VBLANK_IVB << (5 * pipe));
+       ironlake_enable_display_irq(dev_priv, bit);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        return 0;
@@ -2275,21 +1756,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
+       uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+                                                    DE_PIPE_VBLANK_ILK(pipe);
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
-                                    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
-static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_disable_display_irq(dev_priv,
-                                    DE_PIPEA_VBLANK_IVB << (pipe * 5));
+       ironlake_disable_display_irq(dev_priv, bit);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
@@ -2392,10 +1863,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
        u32 tmp;
 
        if (ring->hangcheck.acthd != acthd)
-               return active;
+               return HANGCHECK_ACTIVE;
 
        if (IS_GEN2(dev))
-               return hung;
+               return HANGCHECK_HUNG;
 
        /* Is the chip hanging on a WAIT_FOR_EVENT?
         * If so we can simply poke the RB_WAIT bit
@@ -2407,24 +1878,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
                DRM_ERROR("Kicking stuck wait on %s\n",
                          ring->name);
                I915_WRITE_CTL(ring, tmp);
-               return kick;
+               return HANGCHECK_KICK;
        }
 
        if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
                switch (semaphore_passed(ring)) {
                default:
-                       return hung;
+                       return HANGCHECK_HUNG;
                case 1:
                        DRM_ERROR("Kicking stuck semaphore on %s\n",
                                  ring->name);
                        I915_WRITE_CTL(ring, tmp);
-                       return kick;
+                       return HANGCHECK_KICK;
                case 0:
-                       return wait;
+                       return HANGCHECK_WAIT;
                }
        }
 
-       return hung;
+       return HANGCHECK_HUNG;
 }
 
 /**
@@ -2435,7 +1906,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
  * we kick the ring. If we see no progress on three subsequent calls
  * we assume chip is wedged and try to fix it by resetting the chip.
  */
-void i915_hangcheck_elapsed(unsigned long data)
+static void i915_hangcheck_elapsed(unsigned long data)
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2471,8 +1942,6 @@ void i915_hangcheck_elapsed(unsigned long data)
                                } else
                                        busy = false;
                        } else {
-                               int score;
-
                                /* We always increment the hangcheck score
                                 * if the ring is busy and still processing
                                 * the same request, so that no single request
@@ -2492,21 +1961,19 @@ void i915_hangcheck_elapsed(unsigned long data)
                                                                    acthd);
 
                                switch (ring->hangcheck.action) {
-                               case wait:
-                                       score = 0;
+                               case HANGCHECK_WAIT:
                                        break;
-                               case active:
-                                       score = BUSY;
+                               case HANGCHECK_ACTIVE:
+                                       ring->hangcheck.score += BUSY;
                                        break;
-                               case kick:
-                                       score = KICK;
+                               case HANGCHECK_KICK:
+                                       ring->hangcheck.score += KICK;
                                        break;
-                               case hung:
-                                       score = HUNG;
+                               case HANGCHECK_HUNG:
+                                       ring->hangcheck.score += HUNG;
                                        stuck[i] = true;
                                        break;
                                }
-                               ring->hangcheck.score += score;
                        }
                } else {
                        /* Gradually reduce the count so that we catch DoS
@@ -2536,9 +2003,17 @@ void i915_hangcheck_elapsed(unsigned long data)
        if (busy_count)
                /* Reset timer case chip hangs without another request
                 * being added */
-               mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                         round_jiffies_up(jiffies +
-                                          DRM_I915_HANGCHECK_JIFFIES));
+               i915_queue_hangcheck(dev);
+}
+
+void i915_queue_hangcheck(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (!i915_enable_hangcheck)
+               return;
+
+       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 }
 
 static void ibx_irq_preinstall(struct drm_device *dev)
@@ -2560,31 +2035,26 @@ static void ibx_irq_preinstall(struct drm_device *dev)
        POSTING_READ(SDEIER);
 }
 
-/* drm_dma.h hooks
-*/
-static void ironlake_irq_preinstall(struct drm_device *dev)
+static void gen5_gt_irq_preinstall(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-       atomic_set(&dev_priv->irq_received, 0);
-
-       I915_WRITE(HWSTAM, 0xeffe);
-
-       /* XXX hotplug from PCH */
-
-       I915_WRITE(DEIMR, 0xffffffff);
-       I915_WRITE(DEIER, 0x0);
-       POSTING_READ(DEIER);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* and GT */
        I915_WRITE(GTIMR, 0xffffffff);
        I915_WRITE(GTIER, 0x0);
        POSTING_READ(GTIER);
 
-       ibx_irq_preinstall(dev);
+       if (INTEL_INFO(dev)->gen >= 6) {
+               /* and PM */
+               I915_WRITE(GEN6_PMIMR, 0xffffffff);
+               I915_WRITE(GEN6_PMIER, 0x0);
+               POSTING_READ(GEN6_PMIER);
+       }
 }
 
-static void ivybridge_irq_preinstall(struct drm_device *dev)
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_preinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -2592,21 +2062,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev)
 
        I915_WRITE(HWSTAM, 0xeffe);
 
-       /* XXX hotplug from PCH */
-
        I915_WRITE(DEIMR, 0xffffffff);
        I915_WRITE(DEIER, 0x0);
        POSTING_READ(DEIER);
 
-       /* and GT */
-       I915_WRITE(GTIMR, 0xffffffff);
-       I915_WRITE(GTIER, 0x0);
-       POSTING_READ(GTIER);
-
-       /* Power management */
-       I915_WRITE(GEN6_PMIMR, 0xffffffff);
-       I915_WRITE(GEN6_PMIER, 0x0);
-       POSTING_READ(GEN6_PMIER);
+       gen5_gt_irq_preinstall(dev);
 
        ibx_irq_preinstall(dev);
 }
@@ -2627,9 +2087,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
        /* and GT */
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIMR, 0xffffffff);
-       I915_WRITE(GTIER, 0x0);
-       POSTING_READ(GTIER);
+
+       gen5_gt_irq_preinstall(dev);
 
        I915_WRITE(DPINVGTT, 0xff);
 
@@ -2648,22 +2107,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *intel_encoder;
-       u32 mask = ~I915_READ(SDEIMR);
-       u32 hotplug;
+       u32 hotplug_irqs, hotplug, enabled_irqs = 0;
 
        if (HAS_PCH_IBX(dev)) {
-               mask &= ~SDE_HOTPLUG_MASK;
+               hotplug_irqs = SDE_HOTPLUG_MASK;
                list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
                        if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
-                               mask |= hpd_ibx[intel_encoder->hpd_pin];
+                               enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
        } else {
-               mask &= ~SDE_HOTPLUG_MASK_CPT;
+               hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
                list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
                        if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
-                               mask |= hpd_cpt[intel_encoder->hpd_pin];
+                               enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
        }
 
-       I915_WRITE(SDEIMR, ~mask);
+       ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
        /*
         * Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -2700,123 +2158,103 @@ static void ibx_irq_postinstall(struct drm_device *dev)
        I915_WRITE(SDEIMR, ~mask);
 }
 
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void gen5_gt_irq_postinstall(struct drm_device *dev)
 {
-       unsigned long irqflags;
-
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       /* enable kind of interrupts always enabled */
-       u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-                          DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-                          DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
-                          DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
-       u32 gt_irqs;
-
-       dev_priv->irq_mask = ~display_mask;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pm_irqs, gt_irqs;
 
-       /* should always can generate irq */
-       I915_WRITE(DEIIR, I915_READ(DEIIR));
-       I915_WRITE(DEIMR, dev_priv->irq_mask);
-       I915_WRITE(DEIER, display_mask |
-                         DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
-       POSTING_READ(DEIER);
+       pm_irqs = gt_irqs = 0;
 
        dev_priv->gt_irq_mask = ~0;
+       if (HAS_L3_GPU_CACHE(dev)) {
+               /* L3 parity interrupt is always unmasked. */
+               dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+               gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+       }
 
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
-       gt_irqs = GT_RENDER_USER_INTERRUPT;
-
-       if (IS_GEN6(dev))
-               gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
-       else
+       gt_irqs |= GT_RENDER_USER_INTERRUPT;
+       if (IS_GEN5(dev)) {
                gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
                           ILK_BSD_USER_INTERRUPT;
+       } else {
+               gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+       }
 
+       I915_WRITE(GTIIR, I915_READ(GTIIR));
+       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
        I915_WRITE(GTIER, gt_irqs);
        POSTING_READ(GTIER);
 
-       ibx_irq_postinstall(dev);
+       if (INTEL_INFO(dev)->gen >= 6) {
+               pm_irqs |= GEN6_PM_RPS_EVENTS;
 
-       if (IS_IRONLAKE_M(dev)) {
-               /* Enable PCU event interrupts
-                *
-                * spinlocking not required here for correctness since interrupt
-                * setup is guaranteed to run in single-threaded context. But we
-                * need it to make the assert_spin_locked happy. */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-               ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-       }
+               if (HAS_VEBOX(dev))
+                       pm_irqs |= PM_VEBOX_USER_INTERRUPT;
 
-       return 0;
+               dev_priv->pm_irq_mask = 0xffffffff;
+               I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+               I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
+               I915_WRITE(GEN6_PMIER, pm_irqs);
+               POSTING_READ(GEN6_PMIER);
+       }
 }
 
-static int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
 {
+       unsigned long irqflags;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       /* enable kind of interrupts always enabled */
-       u32 display_mask =
-               DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
-               DE_PLANEC_FLIP_DONE_IVB |
-               DE_PLANEB_FLIP_DONE_IVB |
-               DE_PLANEA_FLIP_DONE_IVB |
-               DE_AUX_CHANNEL_A_IVB |
-               DE_ERR_INT_IVB;
-       u32 pm_irqs = GEN6_PM_RPS_EVENTS;
-       u32 gt_irqs;
+       u32 display_mask, extra_mask;
+
+       if (INTEL_INFO(dev)->gen >= 7) {
+               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+                               DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
+                               DE_PLANEB_FLIP_DONE_IVB |
+                               DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
+                               DE_ERR_INT_IVB);
+               extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+                             DE_PIPEA_VBLANK_IVB);
+
+               I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+       } else {
+               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+                               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+                               DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+                               DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+               extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+       }
 
        dev_priv->irq_mask = ~display_mask;
 
        /* should always can generate irq */
-       I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
        I915_WRITE(DEIIR, I915_READ(DEIIR));
        I915_WRITE(DEIMR, dev_priv->irq_mask);
-       I915_WRITE(DEIER,
-                  display_mask |
-                  DE_PIPEC_VBLANK_IVB |
-                  DE_PIPEB_VBLANK_IVB |
-                  DE_PIPEA_VBLANK_IVB);
+       I915_WRITE(DEIER, display_mask | extra_mask);
        POSTING_READ(DEIER);
 
-       dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
-       gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
-                 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-       I915_WRITE(GTIER, gt_irqs);
-       POSTING_READ(GTIER);
-
-       I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
-       if (HAS_VEBOX(dev))
-               pm_irqs |= PM_VEBOX_USER_INTERRUPT |
-                       PM_VEBOX_CS_ERROR_INTERRUPT;
-
-       /* Our enable/disable rps functions may touch these registers so
-        * make sure to set a known state for only the non-RPS bits.
-        * The RMW is extra paranoia since this should be called after being set
-        * to a known state in preinstall.
-        * */
-       I915_WRITE(GEN6_PMIMR,
-                  (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
-       I915_WRITE(GEN6_PMIER,
-                  (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
-       POSTING_READ(GEN6_PMIER);
+       gen5_gt_irq_postinstall(dev);
 
        ibx_irq_postinstall(dev);
 
+       if (IS_IRONLAKE_M(dev)) {
+               /* Enable PCU event interrupts
+                *
+                * spinlocking not required here for correctness since interrupt
+                * setup is guaranteed to run in single-threaded context. But we
+                * need it to make the assert_spin_locked happy. */
+               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       }
+
        return 0;
 }
 
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 gt_irqs;
        u32 enable_mask;
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+       unsigned long irqflags;
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT;
        enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2842,20 +2280,18 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
        I915_WRITE(PIPESTAT(1), 0xffff);
        POSTING_READ(VLV_IER);
 
+       /* Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked check happy. */
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        i915_enable_pipestat(dev_priv, 0, pipestat_enable);
        i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
        i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IIR, 0xffffffff);
 
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
-       gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
-               GT_BLT_USER_INTERRUPT;
-       I915_WRITE(GTIER, gt_irqs);
-       POSTING_READ(GTIER);
+       gen5_gt_irq_postinstall(dev);
 
        /* ack & enable invalid PTE error interrupts */
 #if 0 /* FIXME: add support to irq handler for checking these bits */
@@ -3001,7 +2437,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
        u16 iir, new_iir;
        u32 pipe_stats[2];
        unsigned long irqflags;
-       int irq_received;
        int pipe;
        u16 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -3035,7 +2470,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                                        DRM_DEBUG_DRIVER("pipe %c underrun\n",
                                                         pipe_name(pipe));
                                I915_WRITE(reg, pipe_stats[pipe]);
-                               irq_received = 1;
                        }
                }
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3323,6 +2757,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask;
        u32 error_mask;
+       unsigned long irqflags;
 
        /* Unmask the interrupts that we always want on. */
        dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -3341,7 +2776,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
        if (IS_G4X(dev))
                enable_mask |= I915_BSD_USER_INTERRUPT;
 
+       /* Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked check happy. */
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        /*
         * Enable some error detection, note the instruction error mask
@@ -3616,15 +3055,6 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-       } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-               /* Share uninstall handlers with ILK/SNB */
-               dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ivybridge_irq_preinstall;
-               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ivybridge_enable_vblank;
-               dev->driver->disable_vblank = ivybridge_disable_vblank;
-               dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev->driver->irq_handler = ironlake_irq_handler;
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
index 6f514297c4837882a64f9480b9f944c3913f7b0c..53d0e709456a6438a9a11e7e4f26bd2d033042ff 100644 (file)
 #define   GC_LOW_FREQUENCY_ENABLE      (1 << 7)
 #define   GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
 #define   GC_DISPLAY_CLOCK_333_MHZ     (4 << 4)
+#define   GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
+#define   GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
+#define   GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
+#define   GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4)
+#define   GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4)
+#define   GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4)
 #define   GC_DISPLAY_CLOCK_MASK                (7 << 4)
 #define   GM45_GC_RENDER_CLOCK_MASK    (0xf << 0)
 #define   GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
 #define PUNIT_REG_GPU_LFM                      0xd3
 #define PUNIT_REG_GPU_FREQ_REQ                 0xd4
 #define PUNIT_REG_GPU_FREQ_STS                 0xd8
+#define   GENFREQSTATUS                                (1<<0)
 #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ         0xdc
 
 #define PUNIT_FUSE_BUS2                                0xf6 /* bits 47:40 */
 #define   ERR_INT_FIFO_UNDERRUN_C      (1<<6)
 #define   ERR_INT_FIFO_UNDERRUN_B      (1<<3)
 #define   ERR_INT_FIFO_UNDERRUN_A      (1<<0)
+#define   ERR_INT_FIFO_UNDERRUN(pipe)  (1<<(pipe*3))
 
 #define FPGA_DBG               0x42300
 #define   FPGA_DBG_RM_NOCLAIM  (1<<31)
                                        will not assert AGPBUSY# and will only
                                        be delivered when out of C3. */
 #define   INSTPM_FORCE_ORDERING                                (1<<7) /* GEN6+ */
+#define   INSTPM_TLB_INVALIDATE        (1<<9)
+#define   INSTPM_SYNC_FLUSH    (1<<5)
 #define ACTHD          0x020c8
 #define FW_BLC         0x020d8
 #define FW_BLC2                0x020dc
 #define _DPLL_B        (dev_priv->info->display_mmio_offset + 0x6018)
 #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
 #define   DPLL_VCO_ENABLE              (1 << 31)
-#define   DPLL_DVO_HIGH_SPEED          (1 << 30)
+#define   DPLL_SDVO_HIGH_SPEED         (1 << 30)
+#define   DPLL_DVO_2X_MODE             (1 << 30)
 #define   DPLL_EXT_BUFFER_ENABLE_VLV   (1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE         (1 << 29)
 #define   DPLL_REFA_CLK_ENABLE_VLV     (1 << 29)
 #define   MCH_SSKPD_WM0_MASK           0x3f
 #define   MCH_SSKPD_WM0_VAL            0xc
 
+#define MCH_SECP_NRG_STTS              (MCHBAR_MIRROR_BASE_SNB + 0x592c)
+
 /* Clocking configuration register */
 #define CLKCFG                 0x10c00
 #define CLKCFG_FSB_400                                 (5 << 0)        /* hrawclk 100 */
 #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
 #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
+/* HSW eDP PSR registers */
+#define EDP_PSR_CTL                            0x64800
+#define   EDP_PSR_ENABLE                       (1<<31)
+#define   EDP_PSR_LINK_DISABLE                 (0<<27)
+#define   EDP_PSR_LINK_STANDBY                 (1<<27)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK     (3<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES  (0<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES  (1<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES  (2<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES  (3<<25)
+#define   EDP_PSR_MAX_SLEEP_TIME_SHIFT         20
+#define   EDP_PSR_SKIP_AUX_EXIT                        (1<<12)
+#define   EDP_PSR_TP1_TP2_SEL                  (0<<11)
+#define   EDP_PSR_TP1_TP3_SEL                  (1<<11)
+#define   EDP_PSR_TP2_TP3_TIME_500us           (0<<8)
+#define   EDP_PSR_TP2_TP3_TIME_100us           (1<<8)
+#define   EDP_PSR_TP2_TP3_TIME_2500us          (2<<8)
+#define   EDP_PSR_TP2_TP3_TIME_0us             (3<<8)
+#define   EDP_PSR_TP1_TIME_500us               (0<<4)
+#define   EDP_PSR_TP1_TIME_100us               (1<<4)
+#define   EDP_PSR_TP1_TIME_2500us              (2<<4)
+#define   EDP_PSR_TP1_TIME_0us                 (3<<4)
+#define   EDP_PSR_IDLE_FRAME_SHIFT             0
+
+#define EDP_PSR_AUX_CTL                        0x64810
+#define EDP_PSR_AUX_DATA1              0x64814
+#define   EDP_PSR_DPCD_COMMAND         0x80060000
+#define EDP_PSR_AUX_DATA2              0x64818
+#define   EDP_PSR_DPCD_NORMAL_OPERATION        (1<<24)
+#define EDP_PSR_AUX_DATA3              0x6481c
+#define EDP_PSR_AUX_DATA4              0x64820
+#define EDP_PSR_AUX_DATA5              0x64824
+
+#define EDP_PSR_STATUS_CTL                     0x64840
+#define   EDP_PSR_STATUS_STATE_MASK            (7<<29)
+#define   EDP_PSR_STATUS_STATE_IDLE            (0<<29)
+#define   EDP_PSR_STATUS_STATE_SRDONACK                (1<<29)
+#define   EDP_PSR_STATUS_STATE_SRDENT          (2<<29)
+#define   EDP_PSR_STATUS_STATE_BUFOFF          (3<<29)
+#define   EDP_PSR_STATUS_STATE_BUFON           (4<<29)
+#define   EDP_PSR_STATUS_STATE_AUXACK          (5<<29)
+#define   EDP_PSR_STATUS_STATE_SRDOFFACK       (6<<29)
+#define   EDP_PSR_STATUS_LINK_MASK             (3<<26)
+#define   EDP_PSR_STATUS_LINK_FULL_OFF         (0<<26)
+#define   EDP_PSR_STATUS_LINK_FULL_ON          (1<<26)
+#define   EDP_PSR_STATUS_LINK_STANDBY          (2<<26)
+#define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
+#define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK  0x1f
+#define   EDP_PSR_STATUS_COUNT_SHIFT           16
+#define   EDP_PSR_STATUS_COUNT_MASK            0xf
+#define   EDP_PSR_STATUS_AUX_ERROR             (1<<15)
+#define   EDP_PSR_STATUS_AUX_SENDING           (1<<12)
+#define   EDP_PSR_STATUS_SENDING_IDLE          (1<<9)
+#define   EDP_PSR_STATUS_SENDING_TP2_TP3       (1<<8)
+#define   EDP_PSR_STATUS_SENDING_TP1           (1<<4)
+#define   EDP_PSR_STATUS_IDLE_MASK             0xf
+
+#define EDP_PSR_PERF_CNT               0x64844
+#define   EDP_PSR_PERF_CNT_MASK                0xffffff
+
+#define EDP_PSR_DEBUG_CTL              0x64860
+#define   EDP_PSR_DEBUG_MASK_LPSP      (1<<27)
+#define   EDP_PSR_DEBUG_MASK_MEMUP     (1<<26)
+#define   EDP_PSR_DEBUG_MASK_HPD       (1<<25)
+
 /* VGA port control */
 #define ADPA                   0x61100
 #define PCH_ADPA                0xe1100
  * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
  * of the infoframe structure specified by CEA-861. */
 #define   VIDEO_DIP_DATA_SIZE  32
+#define   VIDEO_DIP_VSC_DATA_SIZE      36
 #define VIDEO_DIP_CTL          0x61170
 /* Pre HSW: */
 #define   VIDEO_DIP_ENABLE             (1 << 31)
 #define BLC_PWM_CPU_CTL2       0x48250
 #define BLC_PWM_CPU_CTL                0x48254
 
+#define HSW_BLC_PWM2_CTL       0x48350
+
 /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
  * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
 #define BLC_PWM_PCH_CTL1       0xc8250
 #define   BLM_PCH_POLARITY                     (1 << 29)
 #define BLC_PWM_PCH_CTL2       0xc8254
 
+#define UTIL_PIN_CTL           0x48400
+#define   UTIL_PIN_ENABLE      (1 << 31)
+
+#define PCH_GTC_CTL            0xe7000
+#define   PCH_GTC_ENABLE       (1 << 31)
+
 /* TV port control */
 #define TV_CTL                 0x68000
 /** Enables the TV encoder */
 #define  MLTR_WM2_SHIFT                8
 /* the unit of memory self-refresh latency time is 0.5us */
 #define  ILK_SRLT_MASK         0x3f
-#define ILK_LATENCY(shift)     (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
-#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
-#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
 
 /* define the fifo size on Ironlake */
 #define ILK_DISPLAY_FIFO       128
 #define SSKPD_WM2_SHIFT                16
 #define SSKPD_WM3_SHIFT                24
 
-#define SNB_LATENCY(shift)     (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
-#define SNB_READ_WM0_LATENCY()         SNB_LATENCY(SSKPD_WM0_SHIFT)
-#define SNB_READ_WM1_LATENCY()         SNB_LATENCY(SSKPD_WM1_SHIFT)
-#define SNB_READ_WM2_LATENCY()         SNB_LATENCY(SSKPD_WM2_SHIFT)
-#define SNB_READ_WM3_LATENCY()         SNB_LATENCY(SSKPD_WM3_SHIFT)
-
 /*
  * The two pipe frame counter registers are not synchronized, so
  * reading a stable value is somewhat tricky. The following code
 #define DE_PLANEA_FLIP_DONE_IVB                (1<<3)
 #define DE_PIPEA_VBLANK_IVB            (1<<0)
 
+#define DE_PIPE_VBLANK_ILK(pipe)       (1 << ((pipe * 8) + 7))
+#define DE_PIPE_VBLANK_IVB(pipe)       (1 << (pipe * 5))
+
 #define VLV_MASTER_IER                 0x4400c /* Gunit master IER */
 #define   MASTER_INTERRUPT_ENABLE      (1<<31)
 
 #define  SERR_INT_TRANS_C_FIFO_UNDERRUN        (1<<6)
 #define  SERR_INT_TRANS_B_FIFO_UNDERRUN        (1<<3)
 #define  SERR_INT_TRANS_A_FIFO_UNDERRUN        (1<<0)
+#define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)    (1<<(pipe*3))
 
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG        0xc4030                /* SHOTPLUG_CTL */
 #define HSW_TVIDEO_DIP_VSC_DATA(trans) \
         _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
 
+#define HSW_STEREO_3D_CTL_A    0x70020
+#define   S3D_ENABLE           (1<<31)
+#define HSW_STEREO_3D_CTL_B    0x71020
+
+#define HSW_STEREO_3D_CTL(trans) \
+       _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
+
 #define _PCH_TRANS_HTOTAL_B          0xe1000
 #define _PCH_TRANS_HBLANK_B          0xe1004
 #define _PCH_TRANS_HSYNC_B           0xe1008
 #define  GT_FIFO_FREE_ENTRIES                  0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES                20
 
+#define  HSW_IDICR                             0x9008
+#define    IDIHASHMSK(x)                       (((x) & 0x3f) << 16)
+#define  HSW_EDRAM_PRESENT                     0x120010
+
 #define GEN6_UCGCTL1                           0x9400
 # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE               (1 << 5)
 # define GEN6_CSUNIT_CLOCK_GATE_DISABLE                        (1 << 7)
 #define HSW_PWR_WELL_DRIVER                    0x45404 /* CTL2 */
 #define HSW_PWR_WELL_KVMR                      0x45408 /* CTL3 */
 #define HSW_PWR_WELL_DEBUG                     0x4540C /* CTL4 */
-#define   HSW_PWR_WELL_ENABLE                  (1<<31)
-#define   HSW_PWR_WELL_STATE                   (1<<30)
+#define   HSW_PWR_WELL_ENABLE_REQUEST          (1<<31)
+#define   HSW_PWR_WELL_STATE_ENABLED           (1<<30)
 #define HSW_PWR_WELL_CTL5                      0x45410
 #define   HSW_PWR_WELL_ENABLE_SINGLE_STEP      (1<<31)
 #define   HSW_PWR_WELL_PWR_GATE_OVERRIDE       (1<<20)
 #define  SBI_SSCAUXDIV6                                0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)                ((x)<<4)
 #define  SBI_DBUFF0                            0x2a00
-#define   SBI_DBUFF0_ENABLE                    (1<<0)
+#define  SBI_GEN0                              0x1f00
+#define   SBI_GEN0_CFG_BUFFENABLE_DISABLE      (1<<0)
 
 /* LPT PIXCLK_GATE */
 #define PIXCLK_GATE                    0xC6020
 #define  LCPLL_CLK_FREQ_450            (0<<26)
 #define  LCPLL_CD_CLOCK_DISABLE                (1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE      (1<<23)
+#define  LCPLL_POWER_DOWN_ALLOW                (1<<22)
 #define  LCPLL_CD_SOURCE_FCLK          (1<<21)
+#define  LCPLL_CD_SOURCE_FCLK_DONE     (1<<19)
+
+#define D_COMP                         (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define  D_COMP_RCOMP_IN_PROGRESS      (1<<9)
+#define  D_COMP_COMP_FORCE             (1<<8)
+#define  D_COMP_COMP_DISABLE           (1<<0)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define PIPE_WM_LINETIME_A             0x45270
index 6875b5654c63d55a65dcf639734e0de101f1f037..a777e7f3b0df924c7e7d2401b3994f6b8e963f3c 100644 (file)
@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
        NULL,
 };
 
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+                               struct bin_attribute *attr, char *buf,
+                               loff_t off, size_t count)
+{
+
+       struct device *kdev = container_of(kobj, struct device, kobj);
+       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_device *dev = minor->dev;
+       struct i915_error_state_file_priv error_priv;
+       struct drm_i915_error_state_buf error_str;
+       ssize_t ret_count = 0;
+       int ret;
+
+       memset(&error_priv, 0, sizeof(error_priv));
+
+       ret = i915_error_state_buf_init(&error_str, count, off);
+       if (ret)
+               return ret;
+
+       error_priv.dev = dev;
+       i915_error_state_get(dev, &error_priv);
+
+       ret = i915_error_state_to_str(&error_str, &error_priv);
+       if (ret)
+               goto out;
+
+       ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+       memcpy(buf, error_str.buf, ret_count);
+out:
+       i915_error_state_put(&error_priv);
+       i915_error_state_buf_release(&error_str);
+
+       return ret ?: ret_count;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+                                struct bin_attribute *attr, char *buf,
+                                loff_t off, size_t count)
+{
+       struct device *kdev = container_of(kobj, struct device, kobj);
+       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_device *dev = minor->dev;
+       int ret;
+
+       DRM_DEBUG_DRIVER("Resetting error state\n");
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       i915_destroy_error_state(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       return count;
+}
+
+static struct bin_attribute error_state_attr = {
+       .attr.name = "error",
+       .attr.mode = S_IRUSR | S_IWUSR,
+       .size = 0,
+       .read = error_state_read,
+       .write = error_state_write,
+};
+
 void i915_setup_sysfs(struct drm_device *dev)
 {
        int ret;
@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
                if (ret)
                        DRM_ERROR("gen6 sysfs setup failed\n");
        }
+
+       ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+                                   &error_state_attr);
+       if (ret)
+               DRM_ERROR("error_state sysfs setup failed\n");
 }
 
 void i915_teardown_sysfs(struct drm_device *dev)
 {
+       sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
        sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
        device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
index 3db4a681771320f1d42f8fb023c83d73e4426fb6..e2c5ee6f6194eb662c4234cfdf9304c387330d9e 100644 (file)
@@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create,
            TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
 );
 
-TRACE_EVENT(i915_gem_object_bind,
-           TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
-           TP_ARGS(obj, mappable),
+TRACE_EVENT(i915_vma_bind,
+           TP_PROTO(struct i915_vma *vma, bool mappable),
+           TP_ARGS(vma, mappable),
 
            TP_STRUCT__entry(
                             __field(struct drm_i915_gem_object *, obj)
+                            __field(struct i915_address_space *, vm)
                             __field(u32, offset)
                             __field(u32, size)
                             __field(bool, mappable)
                             ),
 
            TP_fast_assign(
-                          __entry->obj = obj;
-                          __entry->offset = obj->gtt_space->start;
-                          __entry->size = obj->gtt_space->size;
+                          __entry->obj = vma->obj;
+                          __entry->vm = vma->vm;
+                          __entry->offset = vma->node.start;
+                          __entry->size = vma->node.size;
                           __entry->mappable = mappable;
                           ),
 
-           TP_printk("obj=%p, offset=%08x size=%x%s",
+           TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
                      __entry->obj, __entry->offset, __entry->size,
-                     __entry->mappable ? ", mappable" : "")
+                     __entry->mappable ? ", mappable" : "",
+                     __entry->vm)
 );
 
-TRACE_EVENT(i915_gem_object_unbind,
-           TP_PROTO(struct drm_i915_gem_object *obj),
-           TP_ARGS(obj),
+TRACE_EVENT(i915_vma_unbind,
+           TP_PROTO(struct i915_vma *vma),
+           TP_ARGS(vma),
 
            TP_STRUCT__entry(
                             __field(struct drm_i915_gem_object *, obj)
+                            __field(struct i915_address_space *, vm)
                             __field(u32, offset)
                             __field(u32, size)
                             ),
 
            TP_fast_assign(
-                          __entry->obj = obj;
-                          __entry->offset = obj->gtt_space->start;
-                          __entry->size = obj->gtt_space->size;
+                          __entry->obj = vma->obj;
+                          __entry->vm = vma->vm;
+                          __entry->offset = vma->node.start;
+                          __entry->size = vma->node.size;
                           ),
 
-           TP_printk("obj=%p, offset=%08x size=%x",
-                     __entry->obj, __entry->offset, __entry->size)
+           TP_printk("obj=%p, offset=%08x size=%x vm=%p",
+                     __entry->obj, __entry->offset, __entry->size, __entry->vm)
 );
 
 TRACE_EVENT(i915_gem_object_change_domain,
@@ -406,10 +411,12 @@ TRACE_EVENT(i915_flip_complete,
            TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
 );
 
-TRACE_EVENT(i915_reg_rw,
-       TP_PROTO(bool write, u32 reg, u64 val, int len),
+TRACE_EVENT_CONDITION(i915_reg_rw,
+       TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
+
+       TP_ARGS(write, reg, val, len, trace),
 
-       TP_ARGS(write, reg, val, len),
+       TP_CONDITION(trace),
 
        TP_STRUCT__entry(
                __field(u64, val)
index bcbbaea2a78e3cb9ba9b013038a73521219bb2f6..57fe1ae32a0d69d767eb7d698cfeab2ead4231e2 100644 (file)
@@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = {
        0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
 };
 
-static int intel_dsm(acpi_handle handle, int func, int arg)
+static int intel_dsm(acpi_handle handle, int func)
 {
        struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
        struct acpi_object_list input;
@@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
        params[1].integer.value = INTEL_DSM_REVISION_ID;
        params[2].type = ACPI_TYPE_INTEGER;
        params[2].integer.value = func;
-       params[3].type = ACPI_TYPE_INTEGER;
-       params[3].integer.value = arg;
+       params[3].type = ACPI_TYPE_PACKAGE;
+       params[3].package.count = 0;
+       params[3].package.elements = NULL;
 
        ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
        if (ret) {
@@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void)
        params[1].integer.value = INTEL_DSM_REVISION_ID;
        params[2].type = ACPI_TYPE_INTEGER;
        params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
-       params[3].type = ACPI_TYPE_INTEGER;
-       params[3].integer.value = 0;
+       params[3].type = ACPI_TYPE_PACKAGE;
+       params[3].package.count = 0;
+       params[3].package.elements = NULL;
 
        ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
                                   &output);
@@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
                return false;
        }
 
-       ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+       ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
        if (ret < 0) {
                DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
                return false;
index 3acec8c4816606f1f2aa82bbe0ad3f5c105f4ad5..b5a3875f22c7cb5d18550b6be3b1c4b48b527274 100644 (file)
@@ -52,15 +52,14 @@ struct intel_crt {
        u32 adpa_reg;
 };
 
-static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
 {
-       return container_of(intel_attached_encoder(connector),
-                           struct intel_crt, base);
+       return container_of(encoder, struct intel_crt, base);
 }
 
-static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
 {
-       return container_of(encoder, struct intel_crt, base);
+       return intel_encoder_to_crt(intel_attached_encoder(connector));
 }
 
 static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
@@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_crt_mode_set(struct drm_encoder *encoder,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adjusted_mode)
+static void intel_crt_mode_set(struct intel_encoder *encoder)
 {
 
-       struct drm_device *dev = encoder->dev;
-       struct drm_crtc *crtc = encoder->crtc;
-       struct intel_crt *crt =
-               intel_encoder_to_crt(to_intel_encoder(encoder));
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_device *dev = encoder->base.dev;
+       struct intel_crt *crt = intel_encoder_to_crt(encoder);
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
        u32 adpa;
 
        if (HAS_PCH_SPLIT(dev))
@@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
        if (HAS_PCH_LPT(dev))
                ; /* Those bits don't exist here */
        else if (HAS_PCH_CPT(dev))
-               adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
-       else if (intel_crtc->pipe == 0)
+               adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
+       else if (crtc->pipe == 0)
                adpa |= ADPA_PIPE_A_SELECT;
        else
                adpa |= ADPA_PIPE_B_SELECT;
 
        if (!HAS_PCH_SPLIT(dev))
-               I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+               I915_WRITE(BCLRPAT(crtc->pipe), 0);
 
        I915_WRITE(crt->adpa_reg, adpa);
 }
@@ -613,6 +609,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status status;
        struct intel_load_detect_pipe tmp;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+                     connector->base.id, drm_get_connector_name(connector),
+                     force);
+
        if (I915_HAS_HOTPLUG(dev)) {
                /* We can not rely on the HPD pin always being correctly wired
                 * up, for example many KVM do not pass it through, and so
@@ -707,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector)
  * Routines for controlling stuff on the analog port
  */
 
-static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
-       .mode_set = intel_crt_mode_set,
-};
-
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
        .reset = intel_crt_reset,
        .dpms = intel_crt_dpms,
@@ -800,6 +796,7 @@ void intel_crt_init(struct drm_device *dev)
                crt->adpa_reg = ADPA;
 
        crt->base.compute_config = intel_crt_compute_config;
+       crt->base.mode_set = intel_crt_mode_set;
        crt->base.disable = intel_disable_crt;
        crt->base.enable = intel_enable_crt;
        crt->base.get_config = intel_crt_get_config;
@@ -811,7 +808,6 @@ void intel_crt_init(struct drm_device *dev)
                crt->base.get_hw_state = intel_crt_get_hw_state;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
 
-       drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
        drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
        drm_sysfs_connector_add(connector);
index b042ee5c40704a8beaa1e5b99f3d6a1b2082d722..63aca49d11a843a6ad6ae1a62dc11d0b3bfd470c 100644 (file)
@@ -84,25 +84,17 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
  * in either FDI or DP modes only, as HDMI connections will work with both
  * of those
  */
-static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
-                                     bool use_fdi_mode)
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg;
        int i;
-       const u32 *ddi_translations = ((use_fdi_mode) ?
+       const u32 *ddi_translations = (port == PORT_E) ?
                hsw_ddi_translations_fdi :
-               hsw_ddi_translations_dp);
+               hsw_ddi_translations_dp;
 
-       DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
-                       port_name(port),
-                       use_fdi_mode ? "FDI" : "DP");
-
-       WARN((use_fdi_mode && (port != PORT_E)),
-               "Programming port %c in FDI mode, this probably will not work.\n",
-               port_name(port));
-
-       for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+       for (i = 0, reg = DDI_BUF_TRANS(port);
+            i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
                I915_WRITE(reg, ddi_translations[i]);
                reg += 4;
        }
@@ -118,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev)
        if (!HAS_DDI(dev))
                return;
 
-       for (port = PORT_A; port < PORT_E; port++)
-               intel_prepare_ddi_buffers(dev, port, false);
-
-       /* DDI E is the suggested one to work in FDI mode, so program is as such
-        * by default. It will have to be re-programmed in case a digital DP
-        * output will be detected on it
-        */
-       intel_prepare_ddi_buffers(dev, PORT_E, true);
+       for (port = PORT_A; port <= PORT_E; port++)
+               intel_prepare_ddi_buffers(dev, port);
 }
 
 static const long hsw_ddi_buf_ctl_values[] = {
@@ -281,25 +267,22 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
        DRM_ERROR("FDI link training failed!\n");
 }
 
-static void intel_ddi_mode_set(struct drm_encoder *encoder,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct intel_encoder *encoder)
 {
-       struct drm_crtc *crtc = encoder->crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-       int port = intel_ddi_get_encoder_port(intel_encoder);
-       int pipe = intel_crtc->pipe;
-       int type = intel_encoder->type;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       int port = intel_ddi_get_encoder_port(encoder);
+       int pipe = crtc->pipe;
+       int type = encoder->type;
+       struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
 
        DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
                      port_name(port), pipe_name(pipe));
 
-       intel_crtc->eld_vld = false;
+       crtc->eld_vld = false;
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
                struct intel_digital_port *intel_dig_port =
-                       enc_to_dig_port(encoder);
+                       enc_to_dig_port(&encoder->base);
 
                intel_dp->DP = intel_dig_port->saved_port_bits |
                               DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
@@ -307,17 +290,17 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
 
                if (intel_dp->has_audio) {
                        DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
-                                        pipe_name(intel_crtc->pipe));
+                                        pipe_name(crtc->pipe));
 
                        /* write eld */
                        DRM_DEBUG_DRIVER("DP audio: write eld information\n");
-                       intel_write_eld(encoder, adjusted_mode);
+                       intel_write_eld(&encoder->base, adjusted_mode);
                }
 
                intel_dp_init_link_config(intel_dp);
 
        } else if (type == INTEL_OUTPUT_HDMI) {
-               struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+               struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
                if (intel_hdmi->has_audio) {
                        /* Proper support for digital audio needs a new logic
@@ -325,14 +308,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
                         * patch bombing.
                         */
                        DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
-                                        pipe_name(intel_crtc->pipe));
+                                        pipe_name(crtc->pipe));
 
                        /* write eld */
                        DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
-                       intel_write_eld(encoder, adjusted_mode);
+                       intel_write_eld(&encoder->base, adjusted_mode);
                }
 
-               intel_hdmi->set_infoframes(encoder, adjusted_mode);
+               intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
        }
 }
 
@@ -1118,6 +1101,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
                        intel_dp_stop_link_train(intel_dp);
 
                ironlake_edp_backlight_on(intel_dp);
+               intel_edp_psr_enable(intel_dp);
        }
 
        if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
@@ -1148,16 +1132,20 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+               intel_edp_psr_disable(intel_dp);
                ironlake_edp_backlight_off(intel_dp);
        }
 }
 
 int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
 {
-       if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+       uint32_t lcpll = I915_READ(LCPLL_CTL);
+
+       if (lcpll & LCPLL_CD_SOURCE_FCLK)
+               return 800000;
+       else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
                return 450000;
-       else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
-                LCPLL_CLK_FREQ_450)
+       else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
                return 450000;
        else if (IS_ULT(dev_priv->dev))
                return 337500;
@@ -1309,10 +1297,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
        .destroy = intel_ddi_destroy,
 };
 
-static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
-       .mode_set = intel_ddi_mode_set,
-};
-
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1337,9 +1321,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
 
        drm_encoder_init(dev, encoder, &intel_ddi_funcs,
                         DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
 
        intel_encoder->compute_config = intel_ddi_compute_config;
+       intel_encoder->mode_set = intel_ddi_mode_set;
        intel_encoder->enable = intel_enable_ddi;
        intel_encoder->pre_enable = intel_ddi_pre_enable;
        intel_encoder->disable = intel_disable_ddi;
index e38b457866535925acaf054b549f5bb07ce180f7..0f40f8e9c3ffe0c060ca0dadf0a8af4e56029f99 100644 (file)
@@ -45,6 +45,15 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+                               struct intel_crtc_config *pipe_config);
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+                                   struct intel_crtc_config *pipe_config);
+
+static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                         int x, int y, struct drm_framebuffer *old_fb);
+
+
 typedef struct {
        int     min, max;
 } intel_range_t;
@@ -54,7 +63,6 @@ typedef struct {
        int     p2_slow, p2_fast;
 } intel_p2_t;
 
-#define INTEL_P2_NUM                 2
 typedef struct intel_limit intel_limit_t;
 struct intel_limit {
        intel_range_t   dot, vco, n, m, m1, m2, p, p1;
@@ -84,7 +92,7 @@ intel_fdi_link_freq(struct drm_device *dev)
                return 27;
 }
 
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const intel_limit_t intel_limits_i8xx_dac = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 930000, .max = 1400000 },
        .n = { .min = 3, .max = 16 },
@@ -97,6 +105,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
                .p2_slow = 4, .p2_fast = 2 },
 };
 
+static const intel_limit_t intel_limits_i8xx_dvo = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 930000, .max = 1400000 },
+       .n = { .min = 3, .max = 16 },
+       .m = { .min = 96, .max = 140 },
+       .m1 = { .min = 18, .max = 26 },
+       .m2 = { .min = 6, .max = 16 },
+       .p = { .min = 4, .max = 128 },
+       .p1 = { .min = 2, .max = 33 },
+       .p2 = { .dot_limit = 165000,
+               .p2_slow = 4, .p2_fast = 4 },
+};
+
 static const intel_limit_t intel_limits_i8xx_lvds = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 930000, .max = 1400000 },
@@ -405,8 +426,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
        } else {
                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits_i8xx_lvds;
-               else
+               else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
                        limit = &intel_limits_i8xx_dvo;
+               else
+                       limit = &intel_limits_i8xx_dac;
        }
        return limit;
 }
@@ -667,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
 {
        u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
        u32 m, n, fastclk;
-       u32 updrate, minupdate, fracbits, p;
+       u32 updrate, minupdate, p;
        unsigned long bestppm, ppm, absppm;
        int dotclk, flag;
 
@@ -678,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
        fastclk = dotclk / (2*100);
        updrate = 0;
        minupdate = 19200;
-       fracbits = 1;
        n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
        bestm1 = bestm2 = bestp1 = bestp2 = 0;
 
@@ -892,8 +914,8 @@ static const char *state_string(bool enabled)
 }
 
 /* Only for pre-ILK configs */
-static void assert_pll(struct drm_i915_private *dev_priv,
-                      enum pipe pipe, bool state)
+void assert_pll(struct drm_i915_private *dev_priv,
+               enum pipe pipe, bool state)
 {
        int reg;
        u32 val;
@@ -906,10 +928,8 @@ static void assert_pll(struct drm_i915_private *dev_priv,
             "PLL state assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
 }
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
 
-static struct intel_shared_dpll *
+struct intel_shared_dpll *
 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -921,9 +941,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
 }
 
 /* For ILK+ */
-static void assert_shared_dpll(struct drm_i915_private *dev_priv,
-                              struct intel_shared_dpll *pll,
-                              bool state)
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+                       struct intel_shared_dpll *pll,
+                       bool state)
 {
        bool cur_state;
        struct intel_dpll_hw_state hw_state;
@@ -942,8 +962,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv,
             "%s assertion failure (expected %s, current %s)\n",
             pll->name, state_string(state), state_string(cur_state));
 }
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
 
 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
                          enum pipe pipe, bool state)
@@ -1007,15 +1025,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
        WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
 }
 
-static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
-                                     enum pipe pipe)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+                      enum pipe pipe, bool state)
 {
        int reg;
        u32 val;
+       bool cur_state;
 
        reg = FDI_RX_CTL(pipe);
        val = I915_READ(reg);
-       WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+       cur_state = !!(val & FDI_RX_PLL_ENABLE);
+       WARN(cur_state != state,
+            "FDI RX PLL assertion failure (expected %s, current %s)\n",
+            state_string(state), state_string(cur_state));
 }
 
 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1111,7 +1133,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
        }
 
        /* Need to check both planes against the pipe */
-       for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+       for_each_pipe(i) {
                reg = DSPCNTR(i);
                val = I915_READ(reg);
                cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1301,51 +1323,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
-/**
- * intel_enable_pll - enable a PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- *
- * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
- * make sure the PLL reg is writable first though, since the panel write
- * protect mechanism may be enabled.
- *
- * Note!  This is for pre-ILK only.
- *
- * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
- */
-static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void vlv_enable_pll(struct intel_crtc *crtc)
 {
-       int reg;
-       u32 val;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int reg = DPLL(crtc->pipe);
+       u32 dpll = crtc->config.dpll_hw_state.dpll;
 
-       assert_pipe_disabled(dev_priv, pipe);
+       assert_pipe_disabled(dev_priv, crtc->pipe);
 
        /* No really, not for ILK+ */
-       BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
+       BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
        /* PLL is protected by panel, make sure we can write it */
        if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
-               assert_panel_unlocked(dev_priv, pipe);
+               assert_panel_unlocked(dev_priv, crtc->pipe);
 
-       reg = DPLL(pipe);
-       val = I915_READ(reg);
-       val |= DPLL_VCO_ENABLE;
+       I915_WRITE(reg, dpll);
+       POSTING_READ(reg);
+       udelay(150);
+
+       if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+               DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+
+       I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+       POSTING_READ(DPLL_MD(crtc->pipe));
 
        /* We do this three times for luck */
-       I915_WRITE(reg, val);
+       I915_WRITE(reg, dpll);
        POSTING_READ(reg);
        udelay(150); /* wait for warmup */
-       I915_WRITE(reg, val);
+       I915_WRITE(reg, dpll);
        POSTING_READ(reg);
        udelay(150); /* wait for warmup */
-       I915_WRITE(reg, val);
+       I915_WRITE(reg, dpll);
+       POSTING_READ(reg);
+       udelay(150); /* wait for warmup */
+}
+
+static void i9xx_enable_pll(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int reg = DPLL(crtc->pipe);
+       u32 dpll = crtc->config.dpll_hw_state.dpll;
+
+       assert_pipe_disabled(dev_priv, crtc->pipe);
+
+       /* No really, not for ILK+ */
+       BUG_ON(dev_priv->info->gen >= 5);
+
+       /* PLL is protected by panel, make sure we can write it */
+       if (IS_MOBILE(dev) && !IS_I830(dev))
+               assert_panel_unlocked(dev_priv, crtc->pipe);
+
+       I915_WRITE(reg, dpll);
+
+       /* Wait for the clocks to stabilize. */
+       POSTING_READ(reg);
+       udelay(150);
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               I915_WRITE(DPLL_MD(crtc->pipe),
+                          crtc->config.dpll_hw_state.dpll_md);
+       } else {
+               /* The pixel multiplier can only be updated once the
+                * DPLL is enabled and the clocks are stable.
+                *
+                * So write it again.
+                */
+               I915_WRITE(reg, dpll);
+       }
+
+       /* We do this three times for luck */
+       I915_WRITE(reg, dpll);
+       POSTING_READ(reg);
+       udelay(150); /* wait for warmup */
+       I915_WRITE(reg, dpll);
+       POSTING_READ(reg);
+       udelay(150); /* wait for warmup */
+       I915_WRITE(reg, dpll);
        POSTING_READ(reg);
        udelay(150); /* wait for warmup */
 }
 
 /**
- * intel_disable_pll - disable a PLL
+ * i9xx_disable_pll - disable a PLL
  * @dev_priv: i915 private structure
  * @pipe: pipe PLL to disable
  *
@@ -1353,11 +1416,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  *
  * Note!  This is for pre-ILK only.
  */
-static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-       int reg;
-       u32 val;
-
        /* Don't disable pipe A or pipe A PLLs if needed */
        if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
                return;
@@ -1365,11 +1425,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        /* Make sure the pipe isn't still relying on us */
        assert_pipe_disabled(dev_priv, pipe);
 
-       reg = DPLL(pipe);
-       val = I915_READ(reg);
-       val &= ~DPLL_VCO_ENABLE;
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
+       I915_WRITE(DPLL(pipe), 0);
+       POSTING_READ(DPLL(pipe));
 }
 
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
@@ -1819,7 +1876,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
        return 0;
 
 err_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_unpin_from_display_plane(obj);
 err_interruptible:
        dev_priv->mm.interruptible = true;
        return ret;
@@ -1828,7 +1885,7 @@ err_interruptible:
 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
 {
        i915_gem_object_unpin_fence(obj);
-       i915_gem_object_unpin(obj);
+       i915_gem_object_unpin_from_display_plane(obj);
 }
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -1942,16 +1999,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                intel_crtc->dspaddr_offset = linear_offset;
        }
 
-       DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
-                     obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+       DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+                     i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+                     fb->pitches[0]);
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_INFO(dev)->gen >= 4) {
                I915_MODIFY_DISPBASE(DSPSURF(plane),
-                                    obj->gtt_offset + intel_crtc->dspaddr_offset);
+                                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else
-               I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+               I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
        POSTING_READ(reg);
 
        return 0;
@@ -2031,11 +2089,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
                                               fb->pitches[0]);
        linear_offset -= intel_crtc->dspaddr_offset;
 
-       DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
-                     obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+       DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+                     i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+                     fb->pitches[0]);
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_MODIFY_DISPBASE(DSPSURF(plane),
-                            obj->gtt_offset + intel_crtc->dspaddr_offset);
+                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
        } else {
@@ -2183,6 +2242,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
+       /* Update pipe size and adjust fitter if needed */
+       if (i915_fastboot) {
+               I915_WRITE(PIPESRC(intel_crtc->pipe),
+                          ((crtc->mode.hdisplay - 1) << 16) |
+                          (crtc->mode.vdisplay - 1));
+               if (!intel_crtc->config.pch_pfit.size &&
+                   (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+                    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+                       I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
+                       I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
+                       I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
+               }
+       }
+
        ret = dev_priv->display.update_plane(crtc, fb, x, y);
        if (ret) {
                intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2203,6 +2276,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        }
 
        intel_update_fbc(dev);
+       intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
 
        intel_crtc_update_sarea_pos(crtc, x, y);
@@ -2523,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       u32 reg, temp, i;
+       u32 reg, temp, i, j;
 
        /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
           for train result */
@@ -2539,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
        DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
                      I915_READ(FDI_RX_IIR(pipe)));
 
-       /* enable CPU FDI TX and PCH FDI RX */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_DP_PORT_WIDTH_MASK;
-       temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
-       temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
-       temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
-       temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-       temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
-       temp |= FDI_COMPOSITE_SYNC;
-       I915_WRITE(reg, temp | FDI_TX_ENABLE);
-
-       I915_WRITE(FDI_RX_MISC(pipe),
-                  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_AUTO;
-       temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-       temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
-       temp |= FDI_COMPOSITE_SYNC;
-       I915_WRITE(reg, temp | FDI_RX_ENABLE);
+       /* Try each vswing and preemphasis setting twice before moving on */
+       for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+               /* disable first in case we need to retry */
+               reg = FDI_TX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+               temp &= ~FDI_TX_ENABLE;
+               I915_WRITE(reg, temp);
 
-       POSTING_READ(reg);
-       udelay(150);
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_AUTO;
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp &= ~FDI_RX_ENABLE;
+               I915_WRITE(reg, temp);
 
-       for (i = 0; i < 4; i++) {
+               /* enable CPU FDI TX and PCH FDI RX */
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
+               temp &= ~FDI_DP_PORT_WIDTH_MASK;
+               temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+               temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-               temp |= snb_b_fdi_train_param[i];
-               I915_WRITE(reg, temp);
+               temp |= snb_b_fdi_train_param[j/2];
+               temp |= FDI_COMPOSITE_SYNC;
+               I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
-               POSTING_READ(reg);
-               udelay(500);
+               I915_WRITE(FDI_RX_MISC(pipe),
+                          FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
-               reg = FDI_RX_IIR(pipe);
+               reg = FDI_RX_CTL(pipe);
                temp = I915_READ(reg);
-               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
-               if (temp & FDI_RX_BIT_LOCK ||
-                   (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
-                       I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-                       DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
-                       break;
-               }
-       }
-       if (i == 4)
-               DRM_ERROR("FDI train 1 fail!\n");
+               temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+               temp |= FDI_COMPOSITE_SYNC;
+               I915_WRITE(reg, temp | FDI_RX_ENABLE);
 
-       /* Train 2 */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_NONE_IVB;
-       temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
-       temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-       temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
-       I915_WRITE(reg, temp);
+               POSTING_READ(reg);
+               udelay(1); /* should be 0.5us */
 
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-       temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
-       I915_WRITE(reg, temp);
+               for (i = 0; i < 4; i++) {
+                       reg = FDI_RX_IIR(pipe);
+                       temp = I915_READ(reg);
+                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
-       POSTING_READ(reg);
-       udelay(150);
+                       if (temp & FDI_RX_BIT_LOCK ||
+                           (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+                               I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+                               DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
+                                             i);
+                               break;
+                       }
+                       udelay(1); /* should be 0.5us */
+               }
+               if (i == 4) {
+                       DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+                       continue;
+               }
 
-       for (i = 0; i < 4; i++) {
+               /* Train 2 */
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
-               temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
-               temp |= snb_b_fdi_train_param[i];
+               temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+               temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+               I915_WRITE(reg, temp);
+
+               reg = FDI_RX_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
                I915_WRITE(reg, temp);
 
                POSTING_READ(reg);
-               udelay(500);
+               udelay(2); /* should be 1.5us */
 
-               reg = FDI_RX_IIR(pipe);
-               temp = I915_READ(reg);
-               DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+               for (i = 0; i < 4; i++) {
+                       reg = FDI_RX_IIR(pipe);
+                       temp = I915_READ(reg);
+                       DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
-               if (temp & FDI_RX_SYMBOL_LOCK) {
-                       I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-                       DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
-                       break;
+                       if (temp & FDI_RX_SYMBOL_LOCK ||
+                           (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
+                               I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+                               DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
+                                             i);
+                               goto train_done;
+                       }
+                       udelay(2); /* should be 1.5us */
                }
+               if (i == 4)
+                       DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
        }
-       if (i == 4)
-               DRM_ERROR("FDI train 2 fail!\n");
 
+train_done:
        DRM_DEBUG_KMS("FDI train done.\n");
 }
 
@@ -2927,15 +3003,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
        /* For PCH output, training FDI link */
        dev_priv->display.fdi_link_train(crtc);
 
-       /* XXX: pch pll's can be enabled any time before we enable the PCH
-        * transcoder, and we actually should do this to not upset any PCH
-        * transcoder that already use the clock when we share it.
-        *
-        * Note that enable_shared_dpll tries to do the right thing, but
-        * get_shared_dpll unconditionally resets the pll - we need that to have
-        * the right LVDS enable sequence. */
-       ironlake_enable_shared_dpll(intel_crtc);
-
+       /* We need to program the right clock selection before writing the pixel
+        * mutliplier into the DPLL. */
        if (HAS_PCH_CPT(dev)) {
                u32 sel;
 
@@ -2949,6 +3018,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
                I915_WRITE(PCH_DPLL_SEL, temp);
        }
 
+       /* XXX: pch pll's can be enabled any time before we enable the PCH
+        * transcoder, and we actually should do this to not upset any PCH
+        * transcoder that already use the clock when we share it.
+        *
+        * Note that enable_shared_dpll tries to do the right thing, but
+        * get_shared_dpll unconditionally resets the pll - we need that to have
+        * the right LVDS enable sequence. */
+       ironlake_enable_shared_dpll(intel_crtc);
+
        /* set transcoder timing, panel must allow it */
        assert_panel_unlocked(dev_priv, pipe);
        ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
@@ -3031,7 +3109,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
        crtc->config.shared_dpll = DPLL_ID_PRIVATE;
 }
 
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp)
+static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3045,7 +3123,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 
        if (HAS_PCH_IBX(dev_priv->dev)) {
                /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
-               i = crtc->pipe;
+               i = (enum intel_dpll_id) crtc->pipe;
                pll = &dev_priv->shared_dplls[i];
 
                DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
@@ -3061,8 +3139,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                if (pll->refcount == 0)
                        continue;
 
-               if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) &&
-                   fp == I915_READ(PCH_FP0(pll->id))) {
+               if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
+                          sizeof(pll->hw_state)) == 0) {
                        DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
                                      crtc->base.base.id,
                                      pll->name, pll->refcount, pll->active);
@@ -3096,13 +3174,7 @@ found:
                WARN_ON(pll->on);
                assert_shared_dpll_disabled(dev_priv, pll);
 
-               /* Wait for the clocks to stabilize before rewriting the regs */
-               I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
-               POSTING_READ(PCH_DPLL(pll->id));
-               udelay(150);
-
-               I915_WRITE(PCH_FP0(pll->id), fp);
-               I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
+               pll->mode_set(dev_priv, pll);
        }
        pll->refcount++;
 
@@ -3174,7 +3246,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
-       u32 temp;
 
        WARN_ON(!crtc->enabled);
 
@@ -3188,12 +3259,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
        intel_update_watermarks(dev);
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-               temp = I915_READ(PCH_LVDS);
-               if ((temp & LVDS_PORT_EN) == 0)
-                       I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
-       }
-
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->pre_enable)
+                       encoder->pre_enable(encoder);
 
        if (intel_crtc->config.has_pch_encoder) {
                /* Note: FDI PLL enabling _must_ be done before we enable the
@@ -3205,10 +3273,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                assert_fdi_rx_disabled(dev_priv, pipe);
        }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
-
        ironlake_pfit_enable(intel_crtc);
 
        /*
@@ -3389,7 +3453,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        intel_crtc_wait_for_pending_flips(crtc);
        drm_vblank_off(dev, pipe);
 
-       if (dev_priv->cfb_plane == plane)
+       if (dev_priv->fbc.plane == plane)
                intel_disable_fbc(dev);
 
        intel_crtc_update_cursor(crtc, false);
@@ -3462,7 +3526,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        drm_vblank_off(dev, pipe);
 
        /* FBC must be disabled before disabling the plane on HSW. */
-       if (dev_priv->cfb_plane == plane)
+       if (dev_priv->fbc.plane == plane)
                intel_disable_fbc(dev);
 
        hsw_disable_ips(intel_crtc);
@@ -3593,22 +3657,16 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        intel_crtc->active = true;
        intel_update_watermarks(dev);
 
-       mutex_lock(&dev_priv->dpio_lock);
-
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_pll_enable)
                        encoder->pre_pll_enable(encoder);
 
-       intel_enable_pll(dev_priv, pipe);
+       vlv_enable_pll(intel_crtc);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
                        encoder->pre_enable(encoder);
 
-       /* VLV wants encoder enabling _before_ the pipe is up. */
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->enable(encoder);
-
        i9xx_pfit_enable(intel_crtc);
 
        intel_crtc_load_lut(crtc);
@@ -3620,7 +3678,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        intel_update_fbc(dev);
 
-       mutex_unlock(&dev_priv->dpio_lock);
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               encoder->enable(encoder);
 }
 
 static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -3640,12 +3699,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        intel_crtc->active = true;
        intel_update_watermarks(dev);
 
-       intel_enable_pll(dev_priv, pipe);
-
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
                        encoder->pre_enable(encoder);
 
+       i9xx_enable_pll(intel_crtc);
+
        i9xx_pfit_enable(intel_crtc);
 
        intel_crtc_load_lut(crtc);
@@ -3701,7 +3760,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        intel_crtc_wait_for_pending_flips(crtc);
        drm_vblank_off(dev, pipe);
 
-       if (dev_priv->cfb_plane == plane)
+       if (dev_priv->fbc.plane == plane)
                intel_disable_fbc(dev);
 
        intel_crtc_dpms_overlay(intel_crtc, false);
@@ -3717,7 +3776,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                if (encoder->post_disable)
                        encoder->post_disable(encoder);
 
-       intel_disable_pll(dev_priv, pipe);
+       i9xx_disable_pll(dev_priv, pipe);
 
        intel_crtc->active = false;
        intel_update_fbc(dev);
@@ -3817,16 +3876,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        }
 }
 
-void intel_modeset_disable(struct drm_device *dev)
-{
-       struct drm_crtc *crtc;
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (crtc->enabled)
-                       intel_crtc_disable(crtc);
-       }
-}
-
 void intel_encoder_destroy(struct drm_encoder *encoder)
 {
        struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -3835,10 +3884,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_encoder);
 }
 
-/* Simple dpms helper for encodres with just one connector, no cloning and only
+/* Simple dpms helper for encoders with just one connector, no cloning and only
  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
  * state of the entire output pipe. */
-void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
+static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
 {
        if (mode == DRM_MODE_DPMS_ON) {
                encoder->connectors_active = true;
@@ -4032,7 +4081,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
 {
        pipe_config->ips_enabled = i915_enable_ips &&
                                   hsw_crtc_supports_ips(crtc) &&
-                                  pipe_config->pipe_bpp == 24;
+                                  pipe_config->pipe_bpp <= 24;
 }
 
 static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -4048,12 +4097,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
                        return -EINVAL;
        }
 
-       /* All interlaced capable intel hw wants timings in frames. Note though
-        * that intel_lvds_mode_fixup does some funny tricks with the crtc
-        * timings, so we need to be careful not to clobber these.*/
-       if (!pipe_config->timings_set)
-               drm_mode_set_crtcinfo(adjusted_mode, 0);
-
        /* Cantiga+ cannot handle modes with a hsync front porch of 0.
         * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
         */
@@ -4103,6 +4146,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
        return 200000;
 }
 
+static int pnv_get_display_clock_speed(struct drm_device *dev)
+{
+       u16 gcfgc = 0;
+
+       pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+       case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+               return 267000;
+       case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+               return 333000;
+       case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+               return 444000;
+       case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+               return 200000;
+       default:
+               DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+       case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+               return 133000;
+       case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+               return 167000;
+       }
+}
+
 static int i915gm_get_display_clock_speed(struct drm_device *dev)
 {
        u16 gcfgc = 0;
@@ -4266,14 +4333,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
        }
 
        I915_WRITE(FP0(pipe), fp);
+       crtc->config.dpll_hw_state.fp0 = fp;
 
        crtc->lowfreq_avail = false;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
            reduced_clock && i915_powersave) {
                I915_WRITE(FP1(pipe), fp2);
+               crtc->config.dpll_hw_state.fp1 = fp2;
                crtc->lowfreq_avail = true;
        } else {
                I915_WRITE(FP1(pipe), fp);
+               crtc->config.dpll_hw_state.fp1 = fp;
        }
 }
 
@@ -4351,17 +4421,13 @@ static void vlv_update_pll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *encoder;
        int pipe = crtc->pipe;
        u32 dpll, mdiv;
        u32 bestn, bestm1, bestm2, bestp1, bestp2;
-       bool is_hdmi;
        u32 coreclk, reg_val, dpll_md;
 
        mutex_lock(&dev_priv->dpio_lock);
 
-       is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
-
        bestn = crtc->config.dpll.n;
        bestm1 = crtc->config.dpll.m1;
        bestm2 = crtc->config.dpll.m2;
@@ -4407,7 +4473,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
                vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
-                                0x005f0021);
+                                0x009f0003);
        else
                vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
                                 0x00d0000f);
@@ -4440,10 +4506,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
 
        vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
 
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
-
        /* Enable DPIO clock input */
        dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
                DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
@@ -4451,17 +4513,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
                dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
        dpll |= DPLL_VCO_ENABLE;
-       I915_WRITE(DPLL(pipe), dpll);
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
-               DRM_ERROR("DPLL %d failed to lock\n", pipe);
+       crtc->config.dpll_hw_state.dpll = dpll;
 
        dpll_md = (crtc->config.pixel_multiplier - 1)
                << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-       I915_WRITE(DPLL_MD(pipe), dpll_md);
-       POSTING_READ(DPLL_MD(pipe));
+       crtc->config.dpll_hw_state.dpll_md = dpll_md;
 
        if (crtc->config.has_dp_encoder)
                intel_dp_set_m_n(crtc);
@@ -4475,8 +4531,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *encoder;
-       int pipe = crtc->pipe;
        u32 dpll;
        bool is_sdvo;
        struct dpll *clock = &crtc->config.dpll;
@@ -4499,10 +4553,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
        }
 
        if (is_sdvo)
-               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |= DPLL_SDVO_HIGH_SPEED;
 
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
-               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
        if (IS_PINEVIEW(dev))
@@ -4538,35 +4592,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
                dpll |= PLL_REF_INPUT_DREFCLK;
 
        dpll |= DPLL_VCO_ENABLE;
-       I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
-
-       if (crtc->config.has_dp_encoder)
-               intel_dp_set_m_n(crtc);
-
-       I915_WRITE(DPLL(pipe), dpll);
-
-       /* Wait for the clocks to stabilize. */
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
+       crtc->config.dpll_hw_state.dpll = dpll;
 
        if (INTEL_INFO(dev)->gen >= 4) {
                u32 dpll_md = (crtc->config.pixel_multiplier - 1)
                        << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-               I915_WRITE(DPLL_MD(pipe), dpll_md);
-       } else {
-               /* The pixel multiplier can only be updated once the
-                * DPLL is enabled and the clocks are stable.
-                *
-                * So write it again.
-                */
-               I915_WRITE(DPLL(pipe), dpll);
+               crtc->config.dpll_hw_state.dpll_md = dpll_md;
        }
+
+       if (crtc->config.has_dp_encoder)
+               intel_dp_set_m_n(crtc);
 }
 
 static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -4575,8 +4610,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *encoder;
-       int pipe = crtc->pipe;
        u32 dpll;
        struct dpll *clock = &crtc->config.dpll;
 
@@ -4595,6 +4628,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
                        dpll |= PLL_P2_DIVIDE_BY_4;
        }
 
+       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+               dpll |= DPLL_DVO_2X_MODE;
+
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4602,26 +4638,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
                dpll |= PLL_REF_INPUT_DREFCLK;
 
        dpll |= DPLL_VCO_ENABLE;
-       I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
-
-       I915_WRITE(DPLL(pipe), dpll);
-
-       /* Wait for the clocks to stabilize. */
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       /* The pixel multiplier can only be updated once the
-        * DPLL is enabled and the clocks are stable.
-        *
-        * So write it again.
-        */
-       I915_WRITE(DPLL(pipe), dpll);
+       crtc->config.dpll_hw_state.dpll = dpll;
 }
 
 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -4727,6 +4744,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
        pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
 }
 
+static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
+                                            struct intel_crtc_config *pipe_config)
+{
+       struct drm_crtc *crtc = &intel_crtc->base;
+
+       crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
+       crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
+       crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
+       crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+
+       crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
+       crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
+       crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
+       crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+
+       crtc->mode.flags = pipe_config->adjusted_mode.flags;
+
+       crtc->mode.clock = pipe_config->adjusted_mode.clock;
+       crtc->mode.flags |= pipe_config->adjusted_mode.flags;
+}
+
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
@@ -4939,7 +4977,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       pipe_config->cpu_transcoder = crtc->pipe;
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
        tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -4955,6 +4993,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                pipe_config->pixel_multiplier =
                        ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
                         >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+               pipe_config->dpll_hw_state.dpll_md = tmp;
        } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
                tmp = I915_READ(DPLL(crtc->pipe));
                pipe_config->pixel_multiplier =
@@ -4966,6 +5005,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                 * function. */
                pipe_config->pixel_multiplier = 1;
        }
+       pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+       if (!IS_VALLEYVIEW(dev)) {
+               pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
+               pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+       } else {
+               /* Mask out read-only status bits. */
+               pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+                                                    DPLL_PORTC_READY_MASK |
+                                                    DPLL_PORTB_READY_MASK);
+       }
 
        return true;
 }
@@ -5119,74 +5168,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
        BUG_ON(val != final);
 }
 
-/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
-static void lpt_init_pch_refclk(struct drm_device *dev)
+static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       struct intel_encoder *encoder;
-       bool has_vga = false;
-       bool is_sdv = false;
-       u32 tmp;
-
-       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
-               switch (encoder->type) {
-               case INTEL_OUTPUT_ANALOG:
-                       has_vga = true;
-                       break;
-               }
-       }
-
-       if (!has_vga)
-               return;
-
-       mutex_lock(&dev_priv->dpio_lock);
-
-       /* XXX: Rip out SDV support once Haswell ships for real. */
-       if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
-               is_sdv = true;
-
-       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
-       tmp &= ~SBI_SSCCTL_DISABLE;
-       tmp |= SBI_SSCCTL_PATHALT;
-       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+       uint32_t tmp;
 
-       udelay(24);
+       tmp = I915_READ(SOUTH_CHICKEN2);
+       tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+       I915_WRITE(SOUTH_CHICKEN2, tmp);
 
-       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
-       tmp &= ~SBI_SSCCTL_PATHALT;
-       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
-       if (!is_sdv) {
-               tmp = I915_READ(SOUTH_CHICKEN2);
-               tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
-               I915_WRITE(SOUTH_CHICKEN2, tmp);
+       if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+                              FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+               DRM_ERROR("FDI mPHY reset assert timeout\n");
 
-               if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
-                                      FDI_MPHY_IOSFSB_RESET_STATUS, 100))
-                       DRM_ERROR("FDI mPHY reset assert timeout\n");
+       tmp = I915_READ(SOUTH_CHICKEN2);
+       tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+       I915_WRITE(SOUTH_CHICKEN2, tmp);
 
-               tmp = I915_READ(SOUTH_CHICKEN2);
-               tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
-               I915_WRITE(SOUTH_CHICKEN2, tmp);
+       if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+                               FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+               DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+}
 
-               if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
-                                       FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
-                                      100))
-                       DRM_ERROR("FDI mPHY reset de-assert timeout\n");
-       }
+/* WaMPhyProgramming:hsw */
+static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+       uint32_t tmp;
 
        tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
        tmp &= ~(0xFF << 24);
        tmp |= (0x12 << 24);
        intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
 
-       if (is_sdv) {
-               tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
-               tmp |= 0x7FFF;
-               intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
-       }
-
        tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
        tmp |= (1 << 11);
        intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
@@ -5195,24 +5207,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
        tmp |= (1 << 11);
        intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
 
-       if (is_sdv) {
-               tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
-               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
-               intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
-
-               tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
-               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
-               intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
-
-               tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
-               tmp |= (0x3F << 8);
-               intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
-
-               tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
-               tmp |= (0x3F << 8);
-               intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
-       }
-
        tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
        tmp |= (1 << 24) | (1 << 21) | (1 << 18);
        intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
@@ -5221,64 +5215,148 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
        tmp |= (1 << 24) | (1 << 21) | (1 << 18);
        intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
 
-       if (!is_sdv) {
-               tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
-               tmp &= ~(7 << 13);
-               tmp |= (5 << 13);
-               intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+       tmp &= ~(7 << 13);
+       tmp |= (5 << 13);
+       intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+       tmp &= ~(7 << 13);
+       tmp |= (5 << 13);
+       intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+       tmp |= (1 << 27);
+       intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+       tmp |= (1 << 27);
+       intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+       tmp &= ~(0xF << 28);
+       tmp |= (4 << 28);
+       intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+       tmp &= ~(0xF << 28);
+       tmp |= (4 << 28);
+       intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
+                                bool with_fdi)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t reg, tmp;
+
+       if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+               with_spread = true;
+       if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
+                with_fdi, "LP PCH doesn't have FDI\n"))
+               with_fdi = false;
+
+       mutex_lock(&dev_priv->dpio_lock);
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_DISABLE;
+       tmp |= SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+       udelay(24);
+
+       if (with_spread) {
+               tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+               tmp &= ~SBI_SSCCTL_PATHALT;
+               intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
-               tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
-               tmp &= ~(7 << 13);
-               tmp |= (5 << 13);
-               intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+               if (with_fdi) {
+                       lpt_reset_fdi_mphy(dev_priv);
+                       lpt_program_fdi_mphy(dev_priv);
+               }
        }
 
-       tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
-       tmp &= ~0xFF;
-       tmp |= 0x1C;
-       intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+       reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+              SBI_GEN0 : SBI_DBUFF0;
+       tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+       tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+       intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 
-       tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
-       tmp &= ~0xFF;
-       tmp |= 0x1C;
-       intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+       mutex_unlock(&dev_priv->dpio_lock);
+}
 
-       tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
-       tmp &= ~(0xFF << 16);
-       tmp |= (0x1C << 16);
-       intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+/* Sequence to disable CLKOUT_DP */
+static void lpt_disable_clkout_dp(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t reg, tmp;
 
-       tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
-       tmp &= ~(0xFF << 16);
-       tmp |= (0x1C << 16);
-       intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+       mutex_lock(&dev_priv->dpio_lock);
 
-       if (!is_sdv) {
-               tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
-               tmp |= (1 << 27);
-               intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+       reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+              SBI_GEN0 : SBI_DBUFF0;
+       tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+       tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+       intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
 
-               tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
-               tmp |= (1 << 27);
-               intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       if (!(tmp & SBI_SSCCTL_DISABLE)) {
+               if (!(tmp & SBI_SSCCTL_PATHALT)) {
+                       tmp |= SBI_SSCCTL_PATHALT;
+                       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+                       udelay(32);
+               }
+               tmp |= SBI_SSCCTL_DISABLE;
+               intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+       }
 
-               tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
-               tmp &= ~(0xF << 28);
-               tmp |= (4 << 28);
-               intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+       mutex_unlock(&dev_priv->dpio_lock);
+}
 
-               tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
-               tmp &= ~(0xF << 28);
-               tmp |= (4 << 28);
-               intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
-       }
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
+       bool has_vga = false;
 
-       /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
-       tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
-       tmp |= SBI_DBUFF0_ENABLE;
-       intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_ANALOG:
+                       has_vga = true;
+                       break;
+               }
+       }
 
-       mutex_unlock(&dev_priv->dpio_lock);
+       if (has_vga)
+               lpt_enable_clkout_dp(dev, true, true);
+       else
+               lpt_disable_clkout_dp(dev);
 }
 
 /*
@@ -5610,9 +5688,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 
        if (is_sdvo)
-               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |= DPLL_SDVO_HIGH_SPEED;
        if (intel_crtc->config.has_dp_encoder)
-               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
        dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -5708,7 +5786,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                else
                        intel_crtc->config.dpll_hw_state.fp1 = fp;
 
-               pll = intel_get_shared_dpll(intel_crtc, dpll, fp);
+               pll = intel_get_shared_dpll(intel_crtc);
                if (pll == NULL) {
                        DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
                                         pipe_name(pipe));
@@ -5720,10 +5798,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        if (intel_crtc->config.has_dp_encoder)
                intel_dp_set_m_n(intel_crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
-
        if (is_lvds && has_reduced_clock && i915_powersave)
                intel_crtc->lowfreq_avail = true;
        else
@@ -5732,23 +5806,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        if (intel_crtc->config.has_pch_encoder) {
                pll = intel_crtc_to_shared_dpll(intel_crtc);
 
-               I915_WRITE(PCH_DPLL(pll->id), dpll);
-
-               /* Wait for the clocks to stabilize. */
-               POSTING_READ(PCH_DPLL(pll->id));
-               udelay(150);
-
-               /* The pixel multiplier can only be updated once the
-                * DPLL is enabled and the clocks are stable.
-                *
-                * So write it again.
-                */
-               I915_WRITE(PCH_DPLL(pll->id), dpll);
-
-               if (has_reduced_clock)
-                       I915_WRITE(PCH_FP1(pll->id), fp2);
-               else
-                       I915_WRITE(PCH_FP1(pll->id), fp);
        }
 
        intel_set_pipe_timings(intel_crtc);
@@ -5820,7 +5877,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       pipe_config->cpu_transcoder = crtc->pipe;
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
        tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -5838,12 +5895,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 
                ironlake_get_fdi_m_n_config(crtc, pipe_config);
 
-               /* XXX: Can't properly read out the pch dpll pixel multiplier
-                * since we don't have state tracking for pch clocks yet. */
-               pipe_config->pixel_multiplier = 1;
-
                if (HAS_PCH_IBX(dev_priv->dev)) {
-                       pipe_config->shared_dpll = crtc->pipe;
+                       pipe_config->shared_dpll =
+                               (enum intel_dpll_id) crtc->pipe;
                } else {
                        tmp = I915_READ(PCH_DPLL_SEL);
                        if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
@@ -5856,6 +5910,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 
                WARN_ON(!pll->get_hw_state(dev_priv, pll,
                                           &pipe_config->dpll_hw_state));
+
+               tmp = pipe_config->dpll_hw_state.dpll;
+               pipe_config->pixel_multiplier =
+                       ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+                        >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
        } else {
                pipe_config->pixel_multiplier = 1;
        }
@@ -5867,6 +5926,142 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        return true;
 }
 
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+       struct intel_crtc *crtc;
+       unsigned long irqflags;
+       uint32_t val, pch_hpd_mask;
+
+       pch_hpd_mask = SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT;
+       if (!(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE))
+               pch_hpd_mask |= SDE_PORTD_HOTPLUG_CPT | SDE_CRT_HOTPLUG_CPT;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+               WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+                    pipe_name(crtc->pipe));
+
+       WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+       WARN(plls->spll_refcount, "SPLL enabled\n");
+       WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
+       WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+       WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+       WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+            "CPU PWM1 enabled\n");
+       WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+            "CPU PWM2 enabled\n");
+       WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+            "PCH PWM1 enabled\n");
+       WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+            "Utility pin enabled\n");
+       WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       val = I915_READ(DEIMR);
+       WARN((val & ~DE_PCH_EVENT_IVB) != val,
+            "Unexpected DEIMR bits enabled: 0x%x\n", val);
+       val = I915_READ(SDEIMR);
+       WARN((val & ~pch_hpd_mask) != val,
+            "Unexpected SDEIMR bits enabled: 0x%x\n", val);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+                      bool switch_to_fclk, bool allow_power_down)
+{
+       uint32_t val;
+
+       assert_can_disable_lcpll(dev_priv);
+
+       val = I915_READ(LCPLL_CTL);
+
+       if (switch_to_fclk) {
+               val |= LCPLL_CD_SOURCE_FCLK;
+               I915_WRITE(LCPLL_CTL, val);
+
+               if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+                                      LCPLL_CD_SOURCE_FCLK_DONE, 1))
+                       DRM_ERROR("Switching to FCLK failed\n");
+
+               val = I915_READ(LCPLL_CTL);
+       }
+
+       val |= LCPLL_PLL_DISABLE;
+       I915_WRITE(LCPLL_CTL, val);
+       POSTING_READ(LCPLL_CTL);
+
+       if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+               DRM_ERROR("LCPLL still locked\n");
+
+       val = I915_READ(D_COMP);
+       val |= D_COMP_COMP_DISABLE;
+       I915_WRITE(D_COMP, val);
+       POSTING_READ(D_COMP);
+       ndelay(100);
+
+       if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+               DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+       if (allow_power_down) {
+               val = I915_READ(LCPLL_CTL);
+               val |= LCPLL_POWER_DOWN_ALLOW;
+               I915_WRITE(LCPLL_CTL, val);
+               POSTING_READ(LCPLL_CTL);
+       }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+       uint32_t val;
+
+       val = I915_READ(LCPLL_CTL);
+
+       if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+                   LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+               return;
+
+       if (val & LCPLL_POWER_DOWN_ALLOW) {
+               val &= ~LCPLL_POWER_DOWN_ALLOW;
+               I915_WRITE(LCPLL_CTL, val);
+       }
+
+       val = I915_READ(D_COMP);
+       val |= D_COMP_COMP_FORCE;
+       val &= ~D_COMP_COMP_DISABLE;
+       I915_WRITE(D_COMP, val);
+       I915_READ(D_COMP);
+
+       val = I915_READ(LCPLL_CTL);
+       val &= ~LCPLL_PLL_DISABLE;
+       I915_WRITE(LCPLL_CTL, val);
+
+       if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+               DRM_ERROR("LCPLL not locked yet\n");
+
+       if (val & LCPLL_CD_SOURCE_FCLK) {
+               val = I915_READ(LCPLL_CTL);
+               val &= ~LCPLL_CD_SOURCE_FCLK;
+               I915_WRITE(LCPLL_CTL, val);
+
+               if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+                                       LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+                       DRM_ERROR("Switching back to LCPLL failed\n");
+       }
+}
+
 static void haswell_modeset_global_resources(struct drm_device *dev)
 {
        bool enable = false;
@@ -5935,7 +6130,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        enum intel_display_power_domain pfit_domain;
        uint32_t tmp;
 
-       pipe_config->cpu_transcoder = crtc->pipe;
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
        tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -6005,11 +6200,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_encoder_helper_funcs *encoder_funcs;
        struct intel_encoder *encoder;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_display_mode *adjusted_mode =
-               &intel_crtc->config.adjusted_mode;
        struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
        int pipe = intel_crtc->pipe;
        int ret;
@@ -6028,12 +6220,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        encoder->base.base.id,
                        drm_get_encoder_name(&encoder->base),
                        mode->base.id, mode->name);
-               if (encoder->mode_set) {
-                       encoder->mode_set(encoder);
-               } else {
-                       encoder_funcs = encoder->base.helper_private;
-                       encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
-               }
+               encoder->mode_set(encoder);
        }
 
        return 0;
@@ -6548,7 +6735,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                        goto fail_unpin;
                }
 
-               addr = obj->gtt_offset;
+               addr = i915_gem_obj_ggtt_offset(obj);
        } else {
                int align = IS_I830(dev) ? 16 * 1024 : 256;
                ret = i915_gem_attach_phys_object(dev, obj,
@@ -6570,7 +6757,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                        if (intel_crtc->cursor_bo != obj)
                                i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
                } else
-                       i915_gem_object_unpin(intel_crtc->cursor_bo);
+                       i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
                drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
        }
 
@@ -6585,7 +6772,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 
        return 0;
 fail_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_unpin_from_display_plane(obj);
 fail_locked:
        mutex_unlock(&dev->struct_mutex);
 fail:
@@ -6875,11 +7062,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 }
 
 /* Returns the clock of the currently programmed mode of the given pipe. */
-static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+                               struct intel_crtc_config *pipe_config)
 {
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = pipe_config->cpu_transcoder;
        u32 dpll = I915_READ(DPLL(pipe));
        u32 fp;
        intel_clock_t clock;
@@ -6918,7 +7106,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                default:
                        DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
                                  "mode\n", (int)(dpll & DPLL_MODE_MASK));
-                       return 0;
+                       pipe_config->adjusted_mode.clock = 0;
+                       return;
                }
 
                if (IS_PINEVIEW(dev))
@@ -6955,12 +7144,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                }
        }
 
-       /* XXX: It would be nice to validate the clocks, but we can't reuse
-        * i830PllIsValid() because it relies on the xf86_config connector
-        * configuration being accurate, which it isn't necessarily.
+       pipe_config->adjusted_mode.clock = clock.dot *
+               pipe_config->pixel_multiplier;
+}
+
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+                                   struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+       int link_freq, repeat;
+       u64 clock;
+       u32 link_m, link_n;
+
+       repeat = pipe_config->pixel_multiplier;
+
+       /*
+        * The calculation for the data clock is:
+        * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+        * But we want to avoid losing precison if possible, so:
+        * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+        *
+        * and the link clock is simpler:
+        * link_clock = (m * link_clock * repeat) / n
+        */
+
+       /*
+        * We need to get the FDI or DP link clock here to derive
+        * the M/N dividers.
+        *
+        * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
+        * For DP, it's either 1.62GHz or 2.7GHz.
+        * We do our calculations in 10*MHz since we don't need much precison.
         */
+       if (pipe_config->has_pch_encoder)
+               link_freq = intel_fdi_link_freq(dev) * 10000;
+       else
+               link_freq = pipe_config->port_clock;
 
-       return clock.dot;
+       link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
+       link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
+
+       if (!link_m || !link_n)
+               return;
+
+       clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
+       do_div(clock, link_n);
+
+       pipe_config->adjusted_mode.clock = clock;
 }
 
 /** Returns the currently programmed mode of the given pipe. */
@@ -6971,6 +7203,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
        struct drm_display_mode *mode;
+       struct intel_crtc_config pipe_config;
        int htot = I915_READ(HTOTAL(cpu_transcoder));
        int hsync = I915_READ(HSYNC(cpu_transcoder));
        int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -6980,7 +7213,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        if (!mode)
                return NULL;
 
-       mode->clock = intel_crtc_clock_get(dev, crtc);
+       /*
+        * Construct a pipe_config sufficient for getting the clock info
+        * back out of crtc_clock_get.
+        *
+        * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
+        * to use a real value here instead.
+        */
+       pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+       pipe_config.pixel_multiplier = 1;
+       i9xx_crtc_clock_get(intel_crtc, &pipe_config);
+
+       mode->clock = pipe_config.adjusted_mode.clock;
        mode->hdisplay = (htot & 0xffff) + 1;
        mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
        mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7263,7 +7507,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(ring, fb->pitches[0]);
-       intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, 0); /* aux display base address, unused */
 
        intel_mark_page_flip_active(intel_crtc);
@@ -7304,7 +7548,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(ring, fb->pitches[0]);
-       intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, MI_NOOP);
 
        intel_mark_page_flip_active(intel_crtc);
@@ -7344,7 +7588,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(ring, fb->pitches[0]);
        intel_ring_emit(ring,
-                       (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+                       (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
                        obj->tiling_mode);
 
        /* XXX Enabling the panel-fitter across page-flip is so far
@@ -7387,7 +7631,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
-       intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
 
        /* Contrary to the suggestions in the documentation,
         * "Enable Panel Fitter" does not seem to be required when page
@@ -7452,7 +7696,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 
        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
-       intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, (MI_NOOP));
 
        intel_mark_page_flip_active(intel_crtc);
@@ -7789,7 +8033,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
                          struct drm_display_mode *mode)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_encoder_helper_funcs *encoder_funcs;
        struct intel_encoder *encoder;
        struct intel_crtc_config *pipe_config;
        int plane_bpp, ret = -EINVAL;
@@ -7806,9 +8049,23 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 
        drm_mode_copy(&pipe_config->adjusted_mode, mode);
        drm_mode_copy(&pipe_config->requested_mode, mode);
-       pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
+       pipe_config->cpu_transcoder =
+               (enum transcoder) to_intel_crtc(crtc)->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
+       /*
+        * Sanitize sync polarity flags based on requested ones. If neither
+        * positive or negative polarity is requested, treat this as meaning
+        * negative polarity.
+        */
+       if (!(pipe_config->adjusted_mode.flags &
+             (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+               pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+
+       if (!(pipe_config->adjusted_mode.flags &
+             (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+               pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
        /* Compute a starting value for pipe_config->pipe_bpp taking the source
         * plane pixel format and any sink constraints into account. Returns the
         * source plane bpp so that dithering can be selected on mismatches
@@ -7823,6 +8080,9 @@ encoder_retry:
        pipe_config->port_clock = 0;
        pipe_config->pixel_multiplier = 1;
 
+       /* Fill in default crtc timings, allow encoders to overwrite them. */
+       drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
+
        /* Pass our mode to the connectors and the CRTC to give them a chance to
         * adjust it according to limitations or connector properties, and also
         * a chance to reject the mode entirely.
@@ -7833,20 +8093,8 @@ encoder_retry:
                if (&encoder->new_crtc->base != crtc)
                        continue;
 
-               if (encoder->compute_config) {
-                       if (!(encoder->compute_config(encoder, pipe_config))) {
-                               DRM_DEBUG_KMS("Encoder config failure\n");
-                               goto fail;
-                       }
-
-                       continue;
-               }
-
-               encoder_funcs = encoder->base.helper_private;
-               if (!(encoder_funcs->mode_fixup(&encoder->base,
-                                               &pipe_config->requested_mode,
-                                               &pipe_config->adjusted_mode))) {
-                       DRM_DEBUG_KMS("Encoder fixup failed\n");
+               if (!(encoder->compute_config(encoder, pipe_config))) {
+                       DRM_DEBUG_KMS("Encoder config failure\n");
                        goto fail;
                }
        }
@@ -8041,6 +8289,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
 
 }
 
+static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
+                                   struct intel_crtc_config *new)
+{
+       int clock1, clock2, diff;
+
+       clock1 = cur->adjusted_mode.clock;
+       clock2 = new->adjusted_mode.clock;
+
+       if (clock1 == clock2)
+               return true;
+
+       if (!clock1 || !clock2)
+               return false;
+
+       diff = abs(clock1 - clock2);
+
+       if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+               return true;
+
+       return false;
+}
+
 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
        list_for_each_entry((intel_crtc), \
                            &(dev)->mode_config.crtc_list, \
@@ -8072,7 +8342,7 @@ intel_pipe_config_compare(struct drm_device *dev,
 
 #define PIPE_CONF_CHECK_FLAGS(name, mask)      \
        if ((current_config->name ^ pipe_config->name) & (mask)) { \
-               DRM_ERROR("mismatch in " #name " " \
+               DRM_ERROR("mismatch in " #name "(" #mask ") "      \
                          "(expected %i, found %i)\n", \
                          current_config->name & (mask), \
                          pipe_config->name & (mask)); \
@@ -8106,8 +8376,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
        PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
 
-       if (!HAS_PCH_SPLIT(dev))
-               PIPE_CONF_CHECK_I(pixel_multiplier);
+       PIPE_CONF_CHECK_I(pixel_multiplier);
 
        PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
                              DRM_MODE_FLAG_INTERLACE);
@@ -8138,6 +8407,7 @@ intel_pipe_config_compare(struct drm_device *dev,
 
        PIPE_CONF_CHECK_I(shared_dpll);
        PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+       PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
@@ -8146,6 +8416,15 @@ intel_pipe_config_compare(struct drm_device *dev,
 #undef PIPE_CONF_CHECK_FLAGS
 #undef PIPE_CONF_QUIRK
 
+       if (!IS_HASWELL(dev)) {
+               if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
+                       DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
+                                 current_config->adjusted_mode.clock,
+                                 pipe_config->adjusted_mode.clock);
+                       return false;
+               }
+       }
+
        return true;
 }
 
@@ -8277,6 +8556,9 @@ check_crtc_state(struct drm_device *dev)
                                encoder->get_config(encoder, &pipe_config);
                }
 
+               if (dev_priv->display.get_clock)
+                       dev_priv->display.get_clock(crtc, &pipe_config);
+
                WARN(crtc->active != active,
                     "crtc active state doesn't match with hw state "
                     "(expected %i, found %i)\n", crtc->active, active);
@@ -8454,9 +8736,9 @@ out:
        return ret;
 }
 
-int intel_set_mode(struct drm_crtc *crtc,
-                    struct drm_display_mode *mode,
-                    int x, int y, struct drm_framebuffer *fb)
+static int intel_set_mode(struct drm_crtc *crtc,
+                         struct drm_display_mode *mode,
+                         int x, int y, struct drm_framebuffer *fb)
 {
        int ret;
 
@@ -8573,8 +8855,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
        } else if (set->crtc->fb != set->fb) {
                /* If we have no fb then treat it as a full mode set */
                if (set->crtc->fb == NULL) {
-                       DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
-                       config->mode_changed = true;
+                       struct intel_crtc *intel_crtc =
+                               to_intel_crtc(set->crtc);
+
+                       if (intel_crtc->active && i915_fastboot) {
+                               DRM_DEBUG_KMS("crtc has no fb, will flip\n");
+                               config->fb_changed = true;
+                       } else {
+                               DRM_DEBUG_KMS("inactive crtc, full mode set\n");
+                               config->mode_changed = true;
+                       }
                } else if (set->fb == NULL) {
                        config->mode_changed = true;
                } else if (set->fb->pixel_format !=
@@ -8594,6 +8884,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
                drm_mode_debug_printmodeline(set->mode);
                config->mode_changed = true;
        }
+
+       DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
+                       set->crtc->base.id, config->mode_changed, config->fb_changed);
 }
 
 static int
@@ -8604,14 +8897,13 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        struct drm_crtc *new_crtc;
        struct intel_connector *connector;
        struct intel_encoder *encoder;
-       int count, ro;
+       int ro;
 
        /* The upper layers ensure that we either disable a crtc or have a list
         * of connectors. For paranoia, double-check this. */
        WARN_ON(!set->fb && (set->num_connectors != 0));
        WARN_ON(set->fb && (set->num_connectors == 0));
 
-       count = 0;
        list_for_each_entry(connector, &dev->mode_config.connector_list,
                            base.head) {
                /* Otherwise traverse passed in connector list and get encoders
@@ -8645,7 +8937,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        /* connector->new_encoder is now updated for all connectors. */
 
        /* Update crtc of enabled connectors. */
-       count = 0;
        list_for_each_entry(connector, &dev->mode_config.connector_list,
                            base.head) {
                if (!connector->new_encoder)
@@ -8804,19 +9095,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
        return val & DPLL_VCO_ENABLE;
 }
 
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll *pll)
+{
+       I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
+       I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+}
+
 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
                                struct intel_shared_dpll *pll)
 {
-       uint32_t reg, val;
-
        /* PCH refclock must be enabled first */
        assert_pch_refclk_enabled(dev_priv);
 
-       reg = PCH_DPLL(pll->id);
-       val = I915_READ(reg);
-       val |= DPLL_VCO_ENABLE;
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
+       I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+
+       /* Wait for the clocks to stabilize. */
+       POSTING_READ(PCH_DPLL(pll->id));
+       udelay(150);
+
+       /* The pixel multiplier can only be updated once the
+        * DPLL is enabled and the clocks are stable.
+        *
+        * So write it again.
+        */
+       I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+       POSTING_READ(PCH_DPLL(pll->id));
        udelay(200);
 }
 
@@ -8825,7 +9129,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 {
        struct drm_device *dev = dev_priv->dev;
        struct intel_crtc *crtc;
-       uint32_t reg, val;
 
        /* Make sure no transcoder isn't still depending on us. */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
@@ -8833,11 +9136,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
                        assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
        }
 
-       reg = PCH_DPLL(pll->id);
-       val = I915_READ(reg);
-       val &= ~DPLL_VCO_ENABLE;
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
+       I915_WRITE(PCH_DPLL(pll->id), 0);
+       POSTING_READ(PCH_DPLL(pll->id));
        udelay(200);
 }
 
@@ -8856,6 +9156,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev)
        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
                dev_priv->shared_dplls[i].id = i;
                dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
+               dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
                dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
                dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
                dev_priv->shared_dplls[i].get_hw_state =
@@ -9035,8 +9336,13 @@ static void intel_setup_outputs(struct drm_device *dev)
                        intel_dp_init(dev, PCH_DP_D, PORT_D);
        } else if (IS_VALLEYVIEW(dev)) {
                /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
-               if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
-                       intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
+               if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
+                       intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
+                                       PORT_C);
+                       if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+                               intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
+                                             PORT_C);
+               }
 
                if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
                        intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
@@ -9096,13 +9402,17 @@ static void intel_setup_outputs(struct drm_device *dev)
        drm_helper_move_panel_connectors_to_head(dev);
 }
 
+void intel_framebuffer_fini(struct intel_framebuffer *fb)
+{
+       drm_framebuffer_cleanup(&fb->base);
+       drm_gem_object_unreference_unlocked(&fb->obj->base);
+}
+
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 
-       drm_framebuffer_cleanup(fb);
-       drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
-
+       intel_framebuffer_fini(intel_fb);
        kfree(intel_fb);
 }
 
@@ -9272,6 +9582,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+               dev_priv->display.get_clock = ironlake_crtc_clock_get;
                dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9279,6 +9590,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_clock = i9xx_crtc_clock_get;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = valleyview_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9286,6 +9598,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = i9xx_update_plane;
        } else {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_clock = i9xx_crtc_clock_get;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9303,9 +9616,12 @@ static void intel_init_display(struct drm_device *dev)
        else if (IS_I915G(dev))
                dev_priv->display.get_display_clock_speed =
                        i915_get_display_clock_speed;
-       else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+       else if (IS_I945GM(dev) || IS_845G(dev))
                dev_priv->display.get_display_clock_speed =
                        i9xx_misc_get_display_clock_speed;
+       else if (IS_PINEVIEW(dev))
+               dev_priv->display.get_display_clock_speed =
+                       pnv_get_display_clock_speed;
        else if (IS_I915GM(dev))
                dev_priv->display.get_display_clock_speed =
                        i915gm_get_display_clock_speed;
@@ -9586,7 +9902,7 @@ void intel_modeset_init(struct drm_device *dev)
                      INTEL_INFO(dev)->num_pipes,
                      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
-       for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+       for_each_pipe(i) {
                intel_crtc_init(dev, i);
                for (j = 0; j < dev_priv->num_plane; j++) {
                        ret = intel_plane_init(dev, i, j);
@@ -9792,6 +10108,17 @@ void i915_redisable_vga(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 vga_reg = i915_vgacntrl_reg(dev);
 
+       /* This function can be called both from intel_modeset_setup_hw_state or
+        * at a very early point in our resume sequence, where the power well
+        * structures are not yet restored. Since this function is at a very
+        * paranoid "someone might have enabled VGA while we were not looking"
+        * level, just check if the power well is enabled instead of trying to
+        * follow the "don't touch the power well if we don't need it" policy
+        * the rest of the driver uses. */
+       if (HAS_POWER_WELL(dev) &&
+           (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
+               return;
+
        if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
                DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
                i915_disable_vga(dev);
@@ -9862,6 +10189,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                              pipe);
        }
 
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               if (!crtc->active)
+                       continue;
+               if (dev_priv->display.get_clock)
+                       dev_priv->display.get_clock(crtc,
+                                                   &crtc->config);
+       }
+
        list_for_each_entry(connector, &dev->mode_config.connector_list,
                            base.head) {
                if (connector->get_hw_state(connector)) {
@@ -9893,6 +10229,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 
        intel_modeset_readout_hw_state(dev);
 
+       /*
+        * Now that we have the config, copy it to each CRTC struct
+        * Note that this could go away if we move to using crtc_config
+        * checking everywhere.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               if (crtc->active && i915_fastboot) {
+                       intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
+
+                       DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
+                                     crtc->base.base.id);
+                       drm_mode_debug_printmodeline(&crtc->base.mode);
+               }
+       }
+
        /* HW state is read out, now we need to sanitize this mess. */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list,
                            base.head) {
@@ -9955,7 +10307,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
-       struct intel_crtc *intel_crtc;
 
        /*
         * Interrupts and polling as the first thing to avoid creating havoc.
@@ -9979,7 +10330,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
                if (!crtc->fb)
                        continue;
 
-               intel_crtc = to_intel_crtc(crtc);
                intel_increase_pllclock(crtc);
        }
 
@@ -10035,13 +10385,12 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
        return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
 struct intel_display_error_state {
 
        u32 power_well_driver;
 
+       int num_transcoders;
+
        struct intel_cursor_error_state {
                u32 control;
                u32 position;
@@ -10050,16 +10399,7 @@ struct intel_display_error_state {
        } cursor[I915_MAX_PIPES];
 
        struct intel_pipe_error_state {
-               enum transcoder cpu_transcoder;
-               u32 conf;
                u32 source;
-
-               u32 htotal;
-               u32 hblank;
-               u32 hsync;
-               u32 vtotal;
-               u32 vblank;
-               u32 vsync;
        } pipe[I915_MAX_PIPES];
 
        struct intel_plane_error_state {
@@ -10071,6 +10411,19 @@ struct intel_display_error_state {
                u32 surface;
                u32 tile_offset;
        } plane[I915_MAX_PIPES];
+
+       struct intel_transcoder_error_state {
+               enum transcoder cpu_transcoder;
+
+               u32 conf;
+
+               u32 htotal;
+               u32 hblank;
+               u32 hsync;
+               u32 vtotal;
+               u32 vblank;
+               u32 vsync;
+       } transcoder[4];
 };
 
 struct intel_display_error_state *
@@ -10078,9 +10431,17 @@ intel_display_capture_error_state(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_display_error_state *error;
-       enum transcoder cpu_transcoder;
+       int transcoders[] = {
+               TRANSCODER_A,
+               TRANSCODER_B,
+               TRANSCODER_C,
+               TRANSCODER_EDP,
+       };
        int i;
 
+       if (INTEL_INFO(dev)->num_pipes == 0)
+               return NULL;
+
        error = kmalloc(sizeof(*error), GFP_ATOMIC);
        if (error == NULL)
                return NULL;
@@ -10089,9 +10450,6 @@ intel_display_capture_error_state(struct drm_device *dev)
                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
        for_each_pipe(i) {
-               cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
-               error->pipe[i].cpu_transcoder = cpu_transcoder;
-
                if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
                        error->cursor[i].control = I915_READ(CURCNTR(i));
                        error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10115,22 +10473,32 @@ intel_display_capture_error_state(struct drm_device *dev)
                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
                }
 
-               error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
                error->pipe[i].source = I915_READ(PIPESRC(i));
-               error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
-               error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
-               error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
-               error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
-               error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
-               error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+       }
+
+       error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+       if (HAS_DDI(dev_priv->dev))
+               error->num_transcoders++; /* Account for eDP. */
+
+       for (i = 0; i < error->num_transcoders; i++) {
+               enum transcoder cpu_transcoder = transcoders[i];
+
+               error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+               error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+               error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+               error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+               error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+               error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+               error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+               error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
        }
 
        /* In the code above we read the registers without checking if the power
         * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
         * prevent the next I915_WRITE from detecting it and printing an error
         * message. */
-       if (HAS_POWER_WELL(dev))
-               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       intel_uncore_clear_errors(dev);
 
        return error;
 }
@@ -10144,22 +10512,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 {
        int i;
 
+       if (!error)
+               return;
+
        err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
        if (HAS_POWER_WELL(dev))
                err_printf(m, "PWR_WELL_CTL2: %08x\n",
                           error->power_well_driver);
        for_each_pipe(i) {
                err_printf(m, "Pipe [%d]:\n", i);
-               err_printf(m, "  CPU transcoder: %c\n",
-                          transcoder_name(error->pipe[i].cpu_transcoder));
-               err_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
-               err_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
-               err_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
-               err_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
-               err_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
-               err_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
-               err_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
 
                err_printf(m, "Plane [%d]:\n", i);
                err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
@@ -10180,5 +10542,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                err_printf(m, "  POS: %08x\n", error->cursor[i].position);
                err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
        }
+
+       for (i = 0; i < error->num_transcoders; i++) {
+               err_printf(m, "  CPU transcoder: %c\n",
+                          transcoder_name(error->transcoder[i].cpu_transcoder));
+               err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
+               err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
+               err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
+               err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
+               err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
+               err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
+               err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
+       }
 }
-#endif
index 26e162bb3a5158d5da3f1b5e9b2614c7515ede6b..2726d4d4172258b9214a826a31d7f2ad1e05f47f 100644 (file)
@@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
        return status;
 }
 
-static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
-               uint8_t *send, int send_bytes,
-               uint8_t *recv, int recv_size)
+static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
+                                     int index)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
-       uint32_t ch_data = ch_ctl + 4;
-       int i, ret, recv_bytes;
-       uint32_t status;
-       uint32_t aux_clock_divider;
-       int try, precharge;
-       bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
 
-       /* dp aux is extremely sensitive to irq latency, hence request the
-        * lowest possible wakeup latency and so prevent the cpu from going into
-        * deep sleep states.
-        */
-       pm_qos_update_request(&dev_priv->pm_qos, 0);
-
-       intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
         * and would like to run at 2MHz. So, take the
         * hrawclk value and divide by 2 and use that
@@ -307,23 +291,53 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
         * clock divider.
         */
        if (IS_VALLEYVIEW(dev)) {
-               aux_clock_divider = 100;
+               return index ? 0 : 100;
        } else if (intel_dig_port->port == PORT_A) {
+               if (index)
+                       return 0;
                if (HAS_DDI(dev))
-                       aux_clock_divider = DIV_ROUND_CLOSEST(
-                               intel_ddi_get_cdclk_freq(dev_priv), 2000);
+                       return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
                else if (IS_GEN6(dev) || IS_GEN7(dev))
-                       aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+                       return 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
-                       aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+                       return 225; /* eDP input clock at 450Mhz */
        } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                /* Workaround for non-ULT HSW */
-               aux_clock_divider = 74;
+               switch (index) {
+               case 0: return 63;
+               case 1: return 72;
+               default: return 0;
+               }
        } else if (HAS_PCH_SPLIT(dev)) {
-               aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+               return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
        } else {
-               aux_clock_divider = intel_hrawclk(dev) / 2;
+               return index ? 0 :intel_hrawclk(dev) / 2;
        }
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+               uint8_t *send, int send_bytes,
+               uint8_t *recv, int recv_size)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+       uint32_t ch_data = ch_ctl + 4;
+       uint32_t aux_clock_divider;
+       int i, ret, recv_bytes;
+       uint32_t status;
+       int try, precharge, clock = 0;
+       bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+       /* dp aux is extremely sensitive to irq latency, hence request the
+        * lowest possible wakeup latency and so prevent the cpu from going into
+        * deep sleep states.
+        */
+       pm_qos_update_request(&dev_priv->pm_qos, 0);
+
+       intel_dp_check_edp(intel_dp);
 
        if (IS_GEN6(dev))
                precharge = 3;
@@ -345,37 +359,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                goto out;
        }
 
-       /* Must try at least 3 times according to DP spec */
-       for (try = 0; try < 5; try++) {
-               /* Load the send data into the aux channel data registers */
-               for (i = 0; i < send_bytes; i += 4)
-                       I915_WRITE(ch_data + i,
-                                  pack_aux(send + i, send_bytes - i));
-
-               /* Send the command and wait for it to complete */
-               I915_WRITE(ch_ctl,
-                          DP_AUX_CH_CTL_SEND_BUSY |
-                          (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
-                          DP_AUX_CH_CTL_TIME_OUT_400us |
-                          (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-                          (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
-                          (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
-                          DP_AUX_CH_CTL_DONE |
-                          DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                          DP_AUX_CH_CTL_RECEIVE_ERROR);
-
-               status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
-
-               /* Clear done status and any errors */
-               I915_WRITE(ch_ctl,
-                          status |
-                          DP_AUX_CH_CTL_DONE |
-                          DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                          DP_AUX_CH_CTL_RECEIVE_ERROR);
-
-               if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                             DP_AUX_CH_CTL_RECEIVE_ERROR))
-                       continue;
+       while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
+               /* Must try at least 3 times according to DP spec */
+               for (try = 0; try < 5; try++) {
+                       /* Load the send data into the aux channel data registers */
+                       for (i = 0; i < send_bytes; i += 4)
+                               I915_WRITE(ch_data + i,
+                                          pack_aux(send + i, send_bytes - i));
+
+                       /* Send the command and wait for it to complete */
+                       I915_WRITE(ch_ctl,
+                                  DP_AUX_CH_CTL_SEND_BUSY |
+                                  (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+                                  DP_AUX_CH_CTL_TIME_OUT_400us |
+                                  (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+                                  (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+                                  (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+                                  DP_AUX_CH_CTL_DONE |
+                                  DP_AUX_CH_CTL_TIME_OUT_ERROR |
+                                  DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+                       status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+
+                       /* Clear done status and any errors */
+                       I915_WRITE(ch_ctl,
+                                  status |
+                                  DP_AUX_CH_CTL_DONE |
+                                  DP_AUX_CH_CTL_TIME_OUT_ERROR |
+                                  DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+                       if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+                                     DP_AUX_CH_CTL_RECEIVE_ERROR))
+                               continue;
+                       if (status & DP_AUX_CH_CTL_DONE)
+                               break;
+               }
                if (status & DP_AUX_CH_CTL_DONE)
                        break;
        }
@@ -710,8 +728,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = pipe_config->pipe_bpp;
-       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
+       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+               DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+                             dev_priv->vbt.edp_bpp);
                bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+       }
 
        for (; bpp >= 6*3; bpp -= 2*3) {
                mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
@@ -812,15 +833,14 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
        udelay(500);
 }
 
-static void
-intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-                 struct drm_display_mode *adjusted_mode)
+static void intel_dp_mode_set(struct intel_encoder *encoder)
 {
-       struct drm_device *dev = encoder->dev;
+       struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
-       struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
 
        /*
         * There are four kinds of DP registers:
@@ -852,7 +872,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
                                 pipe_name(crtc->pipe));
                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
-               intel_write_eld(encoder, adjusted_mode);
+               intel_write_eld(&encoder->base, adjusted_mode);
        }
 
        intel_dp_init_link_config(intel_dp);
@@ -1360,6 +1380,275 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        }
 
        pipe_config->adjusted_mode.flags |= flags;
+
+       if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+               if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
+                       pipe_config->port_clock = 162000;
+               else
+                       pipe_config->port_clock = 270000;
+       }
+}
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+       return is_edp(intel_dp) &&
+               intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+static bool intel_edp_is_psr_enabled(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!IS_HASWELL(dev))
+               return false;
+
+       return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+}
+
+static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
+                                   struct edp_vsc_psr *vsc_psr)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+       u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+       u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+       uint32_t *data = (uint32_t *) vsc_psr;
+       unsigned int i;
+
+       /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+          the video DIP being updated before program video DIP data buffer
+          registers for DIP being updated. */
+       I915_WRITE(ctl_reg, 0);
+       POSTING_READ(ctl_reg);
+
+       for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+               if (i < sizeof(struct edp_vsc_psr))
+                       I915_WRITE(data_reg + i, *data++);
+               else
+                       I915_WRITE(data_reg + i, 0);
+       }
+
+       I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+       POSTING_READ(ctl_reg);
+}
+
+static void intel_edp_psr_setup(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct edp_vsc_psr psr_vsc;
+
+       if (intel_dp->psr_setup_done)
+               return;
+
+       /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+       memset(&psr_vsc, 0, sizeof(psr_vsc));
+       psr_vsc.sdp_header.HB0 = 0;
+       psr_vsc.sdp_header.HB1 = 0x7;
+       psr_vsc.sdp_header.HB2 = 0x2;
+       psr_vsc.sdp_header.HB3 = 0x8;
+       intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
+
+       /* Avoid continuous PSR exit by masking memup and hpd */
+       I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+                  EDP_PSR_DEBUG_MASK_HPD);
+
+       intel_dp->psr_setup_done = true;
+}
+
+static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
+       int precharge = 0x3;
+       int msg_size = 5;       /* Header(4) + Message(1) */
+
+       /* Enable PSR in sink */
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+               intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+                                           DP_PSR_ENABLE &
+                                           ~DP_PSR_MAIN_LINK_ACTIVE);
+       else
+               intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+                                           DP_PSR_ENABLE |
+                                           DP_PSR_MAIN_LINK_ACTIVE);
+
+       /* Setup AUX registers */
+       I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
+       I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
+       I915_WRITE(EDP_PSR_AUX_CTL,
+                  DP_AUX_CH_CTL_TIME_OUT_400us |
+                  (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+                  (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+                  (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t max_sleep_time = 0x1f;
+       uint32_t idle_frames = 1;
+       uint32_t val = 0x0;
+
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+               val |= EDP_PSR_LINK_STANDBY;
+               val |= EDP_PSR_TP2_TP3_TIME_0us;
+               val |= EDP_PSR_TP1_TIME_0us;
+               val |= EDP_PSR_SKIP_AUX_EXIT;
+       } else
+               val |= EDP_PSR_LINK_DISABLE;
+
+       I915_WRITE(EDP_PSR_CTL, val |
+                  EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
+                  max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+                  idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+                  EDP_PSR_ENABLE);
+}
+
+static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dig_port->base.base.crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
+       struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+
+       if (!IS_HASWELL(dev)) {
+               DRM_DEBUG_KMS("PSR not supported on this platform\n");
+               dev_priv->no_psr_reason = PSR_NO_SOURCE;
+               return false;
+       }
+
+       if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
+           (dig_port->port != PORT_A)) {
+               DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+               dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
+               return false;
+       }
+
+       if (!is_edp_psr(intel_dp)) {
+               DRM_DEBUG_KMS("PSR not supported by this panel\n");
+               dev_priv->no_psr_reason = PSR_NO_SINK;
+               return false;
+       }
+
+       if (!i915_enable_psr) {
+               DRM_DEBUG_KMS("PSR disable by flag\n");
+               dev_priv->no_psr_reason = PSR_MODULE_PARAM;
+               return false;
+       }
+
+       crtc = dig_port->base.base.crtc;
+       if (crtc == NULL) {
+               DRM_DEBUG_KMS("crtc not active for PSR\n");
+               dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+               return false;
+       }
+
+       intel_crtc = to_intel_crtc(crtc);
+       if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+               DRM_DEBUG_KMS("crtc not active for PSR\n");
+               dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+               return false;
+       }
+
+       obj = to_intel_framebuffer(crtc->fb)->obj;
+       if (obj->tiling_mode != I915_TILING_X ||
+           obj->fence_reg == I915_FENCE_REG_NONE) {
+               DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
+               dev_priv->no_psr_reason = PSR_NOT_TILED;
+               return false;
+       }
+
+       if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
+               DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
+               dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
+               return false;
+       }
+
+       if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+           S3D_ENABLE) {
+               DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+               dev_priv->no_psr_reason = PSR_S3D_ENABLED;
+               return false;
+       }
+
+       if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+               DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+               dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
+               return false;
+       }
+
+       return true;
+}
+
+static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+       if (!intel_edp_psr_match_conditions(intel_dp) ||
+           intel_edp_is_psr_enabled(dev))
+               return;
+
+       /* Setup PSR once */
+       intel_edp_psr_setup(intel_dp);
+
+       /* Enable PSR on the panel */
+       intel_edp_psr_enable_sink(intel_dp);
+
+       /* Enable PSR on the host */
+       intel_edp_psr_enable_source(intel_dp);
+}
+
+void intel_edp_psr_enable(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+       if (intel_edp_psr_match_conditions(intel_dp) &&
+           !intel_edp_is_psr_enabled(dev))
+               intel_edp_psr_do_enable(intel_dp);
+}
+
+void intel_edp_psr_disable(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!intel_edp_is_psr_enabled(dev))
+               return;
+
+       I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+
+       /* Wait till PSR is idle */
+       if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+                      EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+               DRM_ERROR("Timed out waiting for PSR Idle State\n");
+}
+
+void intel_edp_psr_update(struct drm_device *dev)
+{
+       struct intel_encoder *encoder;
+       struct intel_dp *intel_dp = NULL;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
+               if (encoder->type == INTEL_OUTPUT_EDP) {
+                       intel_dp = enc_to_intel_dp(&encoder->base);
+
+                       if (!is_edp_psr(intel_dp))
+                               return;
+
+                       if (!intel_edp_psr_match_conditions(intel_dp))
+                               intel_edp_psr_disable(intel_dp);
+                       else
+                               if (!intel_edp_is_psr_enabled(dev))
+                                       intel_edp_psr_do_enable(intel_dp);
+               }
 }
 
 static void intel_disable_dp(struct intel_encoder *encoder)
@@ -1411,47 +1700,50 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        intel_dp_complete_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
        ironlake_edp_backlight_on(intel_dp);
+}
 
-       if (IS_VALLEYVIEW(dev)) {
-               struct intel_digital_port *dport =
-                       enc_to_dig_port(&encoder->base);
-               int channel = vlv_dport_to_channel(dport);
-
-               vlv_wait_port_ready(dev_priv, channel);
-       }
+static void vlv_enable_dp(struct intel_encoder *encoder)
+{
 }
 
 static void intel_pre_enable_dp(struct intel_encoder *encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+
+       if (dport->port == PORT_A)
+               ironlake_edp_pll_on(intel_dp);
+}
+
+static void vlv_pre_enable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       int port = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
+       u32 val;
 
-       if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
-               ironlake_edp_pll_on(intel_dp);
+       mutex_lock(&dev_priv->dpio_lock);
 
-       if (IS_VALLEYVIEW(dev)) {
-               struct intel_crtc *intel_crtc =
-                       to_intel_crtc(encoder->base.crtc);
-               int port = vlv_dport_to_channel(dport);
-               int pipe = intel_crtc->pipe;
-               u32 val;
-
-               val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
-               val = 0;
-               if (pipe)
-                       val |= (1<<21);
-               else
-                       val &= ~(1<<21);
-               val |= 0x001000c4;
-               vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+       val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+       val = 0;
+       if (pipe)
+               val |= (1<<21);
+       else
+               val &= ~(1<<21);
+       val |= 0x001000c4;
+       vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+       vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
+       vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
 
-               vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
-                                0x00760018);
-               vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
-                                0x00400888);
-       }
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       intel_enable_dp(encoder);
+
+       vlv_wait_port_ready(dev_priv, port);
 }
 
 static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1465,6 +1757,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
                return;
 
        /* Program Tx lane resets to default */
+       mutex_lock(&dev_priv->dpio_lock);
        vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
                         DPIO_PCS_TX_LANE2_RESET |
                         DPIO_PCS_TX_LANE1_RESET);
@@ -1478,6 +1771,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
        vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
        vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
        vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+       mutex_unlock(&dev_priv->dpio_lock);
 }
 
 /*
@@ -1689,6 +1983,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
                return 0;
        }
 
+       mutex_lock(&dev_priv->dpio_lock);
        vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
        vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
        vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
@@ -1697,6 +1992,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
        vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
        vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
        vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+       mutex_unlock(&dev_priv->dpio_lock);
 
        return 0;
 }
@@ -2030,7 +2326,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
        struct drm_device *dev = encoder->dev;
        int i;
        uint8_t voltage;
-       bool clock_recovery = false;
        int voltage_tries, loop_tries;
        uint32_t DP = intel_dp->DP;
 
@@ -2048,7 +2343,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
        voltage = 0xff;
        voltage_tries = 0;
        loop_tries = 0;
-       clock_recovery = false;
        for (;;) {
                /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
                uint8_t     link_status[DP_LINK_STATUS_SIZE];
@@ -2069,7 +2363,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 
                if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        DRM_DEBUG_KMS("clock recovery OK\n");
-                       clock_recovery = true;
                        break;
                }
 
@@ -2275,6 +2568,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
        if (intel_dp->dpcd[DP_DPCD_REV] == 0)
                return false; /* DPCD not present */
 
+       /* Check if the panel supports PSR */
+       memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
+       intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+                                      intel_dp->psr_dpcd,
+                                      sizeof(intel_dp->psr_dpcd));
+       if (is_edp_psr(intel_dp))
+               DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
        if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
              DP_DWN_STRM_PORT_PRESENT))
                return true; /* native DP sink */
@@ -2542,6 +2842,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status status;
        struct edid *edid = NULL;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        intel_dp->has_audio = false;
 
        if (HAS_PCH_SPLIT(dev))
@@ -2735,10 +3038,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_dig_port);
 }
 
-static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
-       .mode_set = intel_dp_mode_set,
-};
-
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .dpms = intel_connector_dpms,
        .detect = intel_dp_detect,
@@ -3166,6 +3465,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
             error, port_name(port));
 
+       intel_dp->psr_setup_done = false;
+
        if (!intel_edp_init_connector(intel_dp, intel_connector)) {
                i2c_del_adapter(&intel_dp->adapter);
                if (is_edp(intel_dp)) {
@@ -3216,17 +3517,21 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
 
        drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
                         DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
 
        intel_encoder->compute_config = intel_dp_compute_config;
-       intel_encoder->enable = intel_enable_dp;
-       intel_encoder->pre_enable = intel_pre_enable_dp;
+       intel_encoder->mode_set = intel_dp_mode_set;
        intel_encoder->disable = intel_disable_dp;
        intel_encoder->post_disable = intel_post_disable_dp;
        intel_encoder->get_hw_state = intel_dp_get_hw_state;
        intel_encoder->get_config = intel_dp_get_config;
-       if (IS_VALLEYVIEW(dev))
+       if (IS_VALLEYVIEW(dev)) {
                intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+               intel_encoder->pre_enable = vlv_pre_enable_dp;
+               intel_encoder->enable = vlv_enable_dp;
+       } else {
+               intel_encoder->pre_enable = intel_pre_enable_dp;
+               intel_encoder->enable = intel_enable_dp;
+       }
 
        intel_dig_port->port = port;
        intel_dig_port->dp.output_reg = output_reg;
index b7d6e09456ce372f8a87e8f3b1ebd75df1779e70..8222f2426b47c8bc6270871a1a727b26ab1b4aea 100644 (file)
@@ -26,6 +26,7 @@
 #define __INTEL_DRV_H__
 
 #include <linux/i2c.h>
+#include <linux/hdmi.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include <drm/drm_crtc.h>
@@ -208,10 +209,6 @@ struct intel_crtc_config {
 
        struct drm_display_mode requested_mode;
        struct drm_display_mode adjusted_mode;
-       /* This flag must be set by the encoder's compute_config callback if it
-        * changes the crtc timings in the mode to prevent the crtc fixup from
-        * overwriting them.  Currently only lvds needs that. */
-       bool timings_set;
        /* Whether to set up the PCH/FDI. Note that we never allow sharing
         * between pch encoders and cpu encoders. */
        bool has_pch_encoder;
@@ -334,6 +331,13 @@ struct intel_crtc {
        bool pch_fifo_underrun_disabled;
 };
 
+struct intel_plane_wm_parameters {
+       uint32_t horiz_pixels;
+       uint8_t bytes_per_pixel;
+       bool enabled;
+       bool scaled;
+};
+
 struct intel_plane {
        struct drm_plane base;
        int plane;
@@ -352,20 +356,18 @@ struct intel_plane {
         * as the other pieces of the struct may not reflect the values we want
         * for the watermark calculations. Currently only Haswell uses this.
         */
-       struct {
-               bool enable;
-               uint8_t bytes_per_pixel;
-               uint32_t horiz_pixels;
-       } wm;
+       struct intel_plane_wm_parameters wm;
 
        void (*update_plane)(struct drm_plane *plane,
+                            struct drm_crtc *crtc,
                             struct drm_framebuffer *fb,
                             struct drm_i915_gem_object *obj,
                             int crtc_x, int crtc_y,
                             unsigned int crtc_w, unsigned int crtc_h,
                             uint32_t x, uint32_t y,
                             uint32_t src_w, uint32_t src_h);
-       void (*disable_plane)(struct drm_plane *plane);
+       void (*disable_plane)(struct drm_plane *plane,
+                             struct drm_crtc *crtc);
        int (*update_colorkey)(struct drm_plane *plane,
                               struct drm_intel_sprite_colorkey *key);
        void (*get_colorkey)(struct drm_plane *plane,
@@ -397,66 +399,6 @@ struct cxsr_latency {
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 #define to_intel_plane(x) container_of(x, struct intel_plane, base)
 
-#define DIP_HEADER_SIZE        5
-
-#define DIP_TYPE_AVI    0x82
-#define DIP_VERSION_AVI 0x2
-#define DIP_LEN_AVI     13
-#define DIP_AVI_PR_1    0
-#define DIP_AVI_PR_2    1
-#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT        (0 << 2)
-#define DIP_AVI_RGB_QUANT_RANGE_LIMITED        (1 << 2)
-#define DIP_AVI_RGB_QUANT_RANGE_FULL   (2 << 2)
-
-#define DIP_TYPE_SPD   0x83
-#define DIP_VERSION_SPD        0x1
-#define DIP_LEN_SPD    25
-#define DIP_SPD_UNKNOWN        0
-#define DIP_SPD_DSTB   0x1
-#define DIP_SPD_DVDP   0x2
-#define DIP_SPD_DVHS   0x3
-#define DIP_SPD_HDDVR  0x4
-#define DIP_SPD_DVC    0x5
-#define DIP_SPD_DSC    0x6
-#define DIP_SPD_VCD    0x7
-#define DIP_SPD_GAME   0x8
-#define DIP_SPD_PC     0x9
-#define DIP_SPD_BD     0xa
-#define DIP_SPD_SCD    0xb
-
-struct dip_infoframe {
-       uint8_t type;           /* HB0 */
-       uint8_t ver;            /* HB1 */
-       uint8_t len;            /* HB2 - body len, not including checksum */
-       uint8_t ecc;            /* Header ECC */
-       uint8_t checksum;       /* PB0 */
-       union {
-               struct {
-                       /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
-                       uint8_t Y_A_B_S;
-                       /* PB2 - C 7:6, M 5:4, R 3:0 */
-                       uint8_t C_M_R;
-                       /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
-                       uint8_t ITC_EC_Q_SC;
-                       /* PB4 - VIC 6:0 */
-                       uint8_t VIC;
-                       /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
-                       uint8_t YQ_CN_PR;
-                       /* PB6 to PB13 */
-                       uint16_t top_bar_end;
-                       uint16_t bottom_bar_start;
-                       uint16_t left_bar_end;
-                       uint16_t right_bar_start;
-               } __attribute__ ((packed)) avi;
-               struct {
-                       uint8_t vn[8];
-                       uint8_t pd[16];
-                       uint8_t sdi;
-               } __attribute__ ((packed)) spd;
-               uint8_t payload[27];
-       } __attribute__ ((packed)) body;
-} __attribute__((packed));
-
 struct intel_hdmi {
        u32 hdmi_reg;
        int ddc_bus;
@@ -467,7 +409,8 @@ struct intel_hdmi {
        enum hdmi_force_audio force_audio;
        bool rgb_quant_range_selectable;
        void (*write_infoframe)(struct drm_encoder *encoder,
-                               struct dip_infoframe *frame);
+                               enum hdmi_infoframe_type type,
+                               const uint8_t *frame, ssize_t len);
        void (*set_infoframes)(struct drm_encoder *encoder,
                               struct drm_display_mode *adjusted_mode);
 };
@@ -487,6 +430,7 @@ struct intel_dp {
        uint8_t link_bw;
        uint8_t lane_count;
        uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+       uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
        uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
        struct i2c_adapter adapter;
        struct i2c_algo_dp_aux_data algo;
@@ -498,6 +442,7 @@ struct intel_dp {
        int backlight_off_delay;
        struct delayed_work panel_vdd_work;
        bool want_panel_vdd;
+       bool psr_setup_done;
        struct intel_connector *attached_connector;
 };
 
@@ -549,13 +494,6 @@ struct intel_unpin_work {
        bool enable_stall_check;
 };
 
-struct intel_fbc_work {
-       struct delayed_work work;
-       struct drm_crtc *crtc;
-       struct drm_framebuffer *fb;
-       int interval;
-};
-
 int intel_pch_rawclk(struct drm_device *dev);
 
 int intel_connector_update_modes(struct drm_connector *connector,
@@ -574,7 +512,6 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
 extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
                                      struct intel_crtc_config *pipe_config);
-extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
                            bool is_sdvob);
 extern void intel_dvo_init(struct drm_device *dev);
@@ -639,14 +576,10 @@ struct intel_set_config {
        bool mode_changed;
 };
 
-extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
-                         int x, int y, struct drm_framebuffer *old_fb);
-extern void intel_modeset_disable(struct drm_device *dev);
 extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
 extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
 extern void intel_connector_dpms(struct drm_connector *, int mode);
 extern bool intel_connector_get_hw_state(struct intel_connector *connector);
 extern void intel_modeset_check_state(struct drm_device *dev);
@@ -712,12 +645,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
 extern void intel_release_load_detect_pipe(struct drm_connector *connector,
                                           struct intel_load_detect_pipe *old);
 
-extern void intelfb_restore(void);
 extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                                    u16 blue, int regno);
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
                                    u16 *blue, int regno);
-extern void intel_enable_clock_gating(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
                                      struct drm_i915_gem_object *obj,
@@ -728,6 +659,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
                                  struct drm_mode_fb_cmd2 *mode_cmd,
                                  struct drm_i915_gem_object *obj);
+extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_initial_config(struct drm_device *dev);
 extern void intel_fbdev_fini(struct drm_device *dev);
@@ -747,6 +679,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
 extern void intel_fb_output_poll_changed(struct drm_device *dev);
 extern void intel_fb_restore_mode(struct drm_device *dev);
 
+struct intel_shared_dpll *
+intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
+
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+                       struct intel_shared_dpll *pll,
+                       bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+void assert_pll(struct drm_i915_private *dev_priv,
+               enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+                      enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
 extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
                        bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
@@ -762,9 +710,10 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port);
 
 /* For use by IVB LP watermark workaround in intel_sprite.c */
 extern void intel_update_watermarks(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
-                                          uint32_t sprite_width,
-                                          int pixel_size, bool enable);
+extern void intel_update_sprite_watermarks(struct drm_plane *plane,
+                                          struct drm_crtc *crtc,
+                                          uint32_t sprite_width, int pixel_size,
+                                          bool enabled, bool scaled);
 
 extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
                                                    unsigned int tiling_mode,
@@ -780,7 +729,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
 extern void intel_init_pm(struct drm_device *dev);
 /* FBC */
 extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 extern void intel_update_fbc(struct drm_device *dev);
 /* IPS */
 extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -796,7 +744,6 @@ extern void intel_init_power_well(struct drm_device *dev);
 extern void intel_set_power_well(struct drm_device *dev, bool enable);
 extern void intel_enable_gt_powersave(struct drm_device *dev);
 extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
 extern void ironlake_teardown_rc6(struct drm_device *dev);
 
 extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -825,4 +772,17 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
                                                 enum transcoder pch_transcoder,
                                                 bool enable);
 
+extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_update(struct drm_device *dev);
+extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+                             bool switch_to_fclk, bool allow_power_down);
+extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
+extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
+                              uint32_t mask);
+extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
+                              uint32_t mask);
+
 #endif /* __INTEL_DRV_H__ */
index eb2020eb2b7ea1c60dde15062c426182e30d9d05..406303b509c1c0afeed4be9379060e5417e212e0 100644 (file)
@@ -100,15 +100,14 @@ struct intel_dvo {
        bool panel_wants_dither;
 };
 
-static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
 {
-       return container_of(encoder, struct intel_dvo, base.base);
+       return container_of(encoder, struct intel_dvo, base);
 }
 
 static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
 {
-       return container_of(intel_attached_encoder(connector),
-                           struct intel_dvo, base);
+       return enc_to_dvo(intel_attached_encoder(connector));
 }
 
 static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
@@ -123,7 +122,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+       struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 tmp;
 
        tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -140,7 +139,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
                                 struct intel_crtc_config *pipe_config)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+       struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 tmp, flags = 0;
 
        tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -159,7 +158,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
 static void intel_disable_dvo(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+       struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 dvo_reg = intel_dvo->dev.dvo_reg;
        u32 temp = I915_READ(dvo_reg);
 
@@ -171,7 +170,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
 static void intel_enable_dvo(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+       struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 dvo_reg = intel_dvo->dev.dvo_reg;
        u32 temp = I915_READ(dvo_reg);
 
@@ -241,11 +240,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
        return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
 }
 
-static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
-                                const struct drm_display_mode *mode,
-                                struct drm_display_mode *adjusted_mode)
+static bool intel_dvo_compute_config(struct intel_encoder *encoder,
+                                    struct intel_crtc_config *pipe_config)
 {
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+       struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+       struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
        /* If we have timings from the BIOS for the panel, put them in
         * to the adjusted mode.  The CRTC will be set up for this mode,
@@ -267,23 +266,23 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
        }
 
        if (intel_dvo->dev.dev_ops->mode_fixup)
-               return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+               return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
+                                                         &pipe_config->requested_mode,
+                                                         adjusted_mode);
 
        return true;
 }
 
-static void intel_dvo_mode_set(struct drm_encoder *encoder,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adjusted_mode)
+static void intel_dvo_mode_set(struct intel_encoder *encoder)
 {
-       struct drm_device *dev = encoder->dev;
+       struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-       int pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+       struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+       int pipe = crtc->pipe;
        u32 dvo_val;
        u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
-       int dpll_reg = DPLL(pipe);
 
        switch (dvo_reg) {
        case DVOA:
@@ -298,7 +297,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
                break;
        }
 
-       intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+       intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+                                        &crtc->config.requested_mode,
+                                        adjusted_mode);
 
        /* Save the data order, since I don't know what it should be set to. */
        dvo_val = I915_READ(dvo_reg) &
@@ -314,8 +315,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
 
-       I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
-
        /*I915_WRITE(DVOB_SRCDIM,
          (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
          (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@@ -335,6 +334,8 @@ static enum drm_connector_status
 intel_dvo_detect(struct drm_connector *connector, bool force)
 {
        struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
        return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
 }
 
@@ -372,11 +373,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
        kfree(connector);
 }
 
-static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
-       .mode_fixup = intel_dvo_mode_fixup,
-       .mode_set = intel_dvo_mode_set,
-};
-
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
        .dpms = intel_dvo_dpms,
        .detect = intel_dvo_detect,
@@ -392,7 +388,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
 
 static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
 {
-       struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+       struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
 
        if (intel_dvo->dev.dev_ops->destroy)
                intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
@@ -471,6 +467,8 @@ void intel_dvo_init(struct drm_device *dev)
        intel_encoder->enable = intel_enable_dvo;
        intel_encoder->get_hw_state = intel_dvo_get_hw_state;
        intel_encoder->get_config = intel_dvo_get_config;
+       intel_encoder->compute_config = intel_dvo_compute_config;
+       intel_encoder->mode_set = intel_dvo_mode_set;
        intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
 
        /* Now, try to find a controller */
@@ -537,9 +535,6 @@ void intel_dvo_init(struct drm_device *dev)
                connector->interlace_allowed = false;
                connector->doublescan_allowed = false;
 
-               drm_encoder_helper_add(&intel_encoder->base,
-                                      &intel_dvo_helper_funcs);
-
                intel_connector_attach_encoder(intel_connector, intel_encoder);
                if (dvo->type == INTEL_DVO_CHIP_LVDS) {
                        /* For our LVDS chipsets, we should hopefully be able
index dff669e2387f4e5aa8fd84433935e4603ec43992..bc2100007b21039ff628469e36e4cc48883c833a 100644 (file)
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
        info->apertures->ranges[0].base = dev->mode_config.fb_base;
        info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
-       info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+       info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
        info->fix.smem_len = size;
 
        info->screen_base =
-               ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                           size);
        if (!info->screen_base) {
                ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
                      fb->width, fb->height,
-                     obj->gtt_offset, obj);
+                     i915_gem_obj_ggtt_offset(obj), obj);
 
 
        mutex_unlock(&dev->struct_mutex);
@@ -193,26 +193,21 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
 static void intel_fbdev_destroy(struct drm_device *dev,
                                struct intel_fbdev *ifbdev)
 {
-       struct fb_info *info;
-       struct intel_framebuffer *ifb = &ifbdev->ifb;
-
        if (ifbdev->helper.fbdev) {
-               info = ifbdev->helper.fbdev;
+               struct fb_info *info = ifbdev->helper.fbdev;
+
                unregister_framebuffer(info);
                iounmap(info->screen_base);
                if (info->cmap.len)
                        fb_dealloc_cmap(&info->cmap);
+
                framebuffer_release(info);
        }
 
        drm_fb_helper_fini(&ifbdev->helper);
 
-       drm_framebuffer_unregister_private(&ifb->base);
-       drm_framebuffer_cleanup(&ifb->base);
-       if (ifb->obj) {
-               drm_gem_object_unreference_unlocked(&ifb->obj->base);
-               ifb->obj = NULL;
-       }
+       drm_framebuffer_unregister_private(&ifbdev->ifb.base);
+       intel_framebuffer_fini(&ifbdev->ifb);
 }
 
 int intel_fbdev_init(struct drm_device *dev)
index 2fd3fd5b943ee57aff93415c707798d9069cea7e..a619d94351072a135fa28aef420b9a2f48a6667e 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
+#include <linux/hdmi.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -66,89 +67,75 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
        return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
 }
 
-void intel_dip_infoframe_csum(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
 {
-       uint8_t *data = (uint8_t *)frame;
-       uint8_t sum = 0;
-       unsigned i;
-
-       frame->checksum = 0;
-       frame->ecc = 0;
-
-       for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
-               sum += data[i];
-
-       frame->checksum = 0x100 - sum;
-}
-
-static u32 g4x_infoframe_index(struct dip_infoframe *frame)
-{
-       switch (frame->type) {
-       case DIP_TYPE_AVI:
+       switch (type) {
+       case HDMI_INFOFRAME_TYPE_AVI:
                return VIDEO_DIP_SELECT_AVI;
-       case DIP_TYPE_SPD:
+       case HDMI_INFOFRAME_TYPE_SPD:
                return VIDEO_DIP_SELECT_SPD;
        default:
-               DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+               DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
                return 0;
        }
 }
 
-static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
+static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
 {
-       switch (frame->type) {
-       case DIP_TYPE_AVI:
+       switch (type) {
+       case HDMI_INFOFRAME_TYPE_AVI:
                return VIDEO_DIP_ENABLE_AVI;
-       case DIP_TYPE_SPD:
+       case HDMI_INFOFRAME_TYPE_SPD:
                return VIDEO_DIP_ENABLE_SPD;
        default:
-               DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+               DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
                return 0;
        }
 }
 
-static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
 {
-       switch (frame->type) {
-       case DIP_TYPE_AVI:
+       switch (type) {
+       case HDMI_INFOFRAME_TYPE_AVI:
                return VIDEO_DIP_ENABLE_AVI_HSW;
-       case DIP_TYPE_SPD:
+       case HDMI_INFOFRAME_TYPE_SPD:
                return VIDEO_DIP_ENABLE_SPD_HSW;
        default:
-               DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+               DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
                return 0;
        }
 }
 
-static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
+static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
                                  enum transcoder cpu_transcoder)
 {
-       switch (frame->type) {
-       case DIP_TYPE_AVI:
+       switch (type) {
+       case HDMI_INFOFRAME_TYPE_AVI:
                return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
-       case DIP_TYPE_SPD:
+       case HDMI_INFOFRAME_TYPE_SPD:
                return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
        default:
-               DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+               DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
                return 0;
        }
 }
 
 static void g4x_write_infoframe(struct drm_encoder *encoder,
-                               struct dip_infoframe *frame)
+                               enum hdmi_infoframe_type type,
+                               const uint8_t *frame, ssize_t len)
 {
        uint32_t *data = (uint32_t *)frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val = I915_READ(VIDEO_DIP_CTL);
-       unsigned i, len = DIP_HEADER_SIZE + frame->len;
+       int i;
 
        WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
        val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
-       val |= g4x_infoframe_index(frame);
+       val |= g4x_infoframe_index(type);
 
-       val &= ~g4x_infoframe_enable(frame);
+       val &= ~g4x_infoframe_enable(type);
 
        I915_WRITE(VIDEO_DIP_CTL, val);
 
@@ -162,7 +149,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
                I915_WRITE(VIDEO_DIP_DATA, 0);
        mmiowb();
 
-       val |= g4x_infoframe_enable(frame);
+       val |= g4x_infoframe_enable(type);
        val &= ~VIDEO_DIP_FREQ_MASK;
        val |= VIDEO_DIP_FREQ_VSYNC;
 
@@ -171,22 +158,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
 }
 
 static void ibx_write_infoframe(struct drm_encoder *encoder,
-                               struct dip_infoframe *frame)
+                               enum hdmi_infoframe_type type,
+                               const uint8_t *frame, ssize_t len)
 {
        uint32_t *data = (uint32_t *)frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-       int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
-       unsigned i, len = DIP_HEADER_SIZE + frame->len;
+       int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
 
        WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
        val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
-       val |= g4x_infoframe_index(frame);
+       val |= g4x_infoframe_index(type);
 
-       val &= ~g4x_infoframe_enable(frame);
+       val &= ~g4x_infoframe_enable(type);
 
        I915_WRITE(reg, val);
 
@@ -200,7 +187,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
                I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
        mmiowb();
 
-       val |= g4x_infoframe_enable(frame);
+       val |= g4x_infoframe_enable(type);
        val &= ~VIDEO_DIP_FREQ_MASK;
        val |= VIDEO_DIP_FREQ_VSYNC;
 
@@ -209,25 +196,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
 }
 
 static void cpt_write_infoframe(struct drm_encoder *encoder,
-                               struct dip_infoframe *frame)
+                               enum hdmi_infoframe_type type,
+                               const uint8_t *frame, ssize_t len)
 {
        uint32_t *data = (uint32_t *)frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-       int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
-       unsigned i, len = DIP_HEADER_SIZE + frame->len;
+       int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
 
        WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
        val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
-       val |= g4x_infoframe_index(frame);
+       val |= g4x_infoframe_index(type);
 
        /* The DIP control register spec says that we need to update the AVI
         * infoframe without clearing its enable bit */
-       if (frame->type != DIP_TYPE_AVI)
-               val &= ~g4x_infoframe_enable(frame);
+       if (type != HDMI_INFOFRAME_TYPE_AVI)
+               val &= ~g4x_infoframe_enable(type);
 
        I915_WRITE(reg, val);
 
@@ -241,7 +228,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
                I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
        mmiowb();
 
-       val |= g4x_infoframe_enable(frame);
+       val |= g4x_infoframe_enable(type);
        val &= ~VIDEO_DIP_FREQ_MASK;
        val |= VIDEO_DIP_FREQ_VSYNC;
 
@@ -250,22 +237,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
 }
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
-                                    struct dip_infoframe *frame)
+                               enum hdmi_infoframe_type type,
+                               const uint8_t *frame, ssize_t len)
 {
        uint32_t *data = (uint32_t *)frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-       int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
-       unsigned i, len = DIP_HEADER_SIZE + frame->len;
+       int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
 
        WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
        val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
-       val |= g4x_infoframe_index(frame);
+       val |= g4x_infoframe_index(type);
 
-       val &= ~g4x_infoframe_enable(frame);
+       val &= ~g4x_infoframe_enable(type);
 
        I915_WRITE(reg, val);
 
@@ -279,7 +266,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
                I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
        mmiowb();
 
-       val |= g4x_infoframe_enable(frame);
+       val |= g4x_infoframe_enable(type);
        val &= ~VIDEO_DIP_FREQ_MASK;
        val |= VIDEO_DIP_FREQ_VSYNC;
 
@@ -288,21 +275,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
 }
 
 static void hsw_write_infoframe(struct drm_encoder *encoder,
-                               struct dip_infoframe *frame)
+                               enum hdmi_infoframe_type type,
+                               const uint8_t *frame, ssize_t len)
 {
        uint32_t *data = (uint32_t *)frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
-       u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
-       unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+       u32 data_reg;
+       int i;
        u32 val = I915_READ(ctl_reg);
 
+       data_reg = hsw_infoframe_data_reg(type,
+                                         intel_crtc->config.cpu_transcoder);
        if (data_reg == 0)
                return;
 
-       val &= ~hsw_infoframe_enable(frame);
+       val &= ~hsw_infoframe_enable(type);
        I915_WRITE(ctl_reg, val);
 
        mmiowb();
@@ -315,18 +305,48 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
                I915_WRITE(data_reg + i, 0);
        mmiowb();
 
-       val |= hsw_infoframe_enable(frame);
+       val |= hsw_infoframe_enable(type);
        I915_WRITE(ctl_reg, val);
        POSTING_READ(ctl_reg);
 }
 
-static void intel_set_infoframe(struct drm_encoder *encoder,
-                               struct dip_infoframe *frame)
+/*
+ * The data we write to the DIP data buffer registers is 1 byte bigger than the
+ * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
+ * at 0). It's also a byte used by DisplayPort so the same DIP registers can be
+ * used for both technologies.
+ *
+ * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
+ * DW1:       DB3       | DB2 | DB1 | DB0
+ * DW2:       DB7       | DB6 | DB5 | DB4
+ * DW3: ...
+ *
+ * (HB is Header Byte, DB is Data Byte)
+ *
+ * The hdmi pack() functions don't know about that hardware specific hole so we
+ * trick them by giving an offset into the buffer and moving back the header
+ * bytes by one.
+ */
+static void intel_write_infoframe(struct drm_encoder *encoder,
+                                 union hdmi_infoframe *frame)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+       ssize_t len;
 
-       intel_dip_infoframe_csum(frame);
-       intel_hdmi->write_infoframe(encoder, frame);
+       /* see comment above for the reason for this offset */
+       len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
+       if (len < 0)
+               return;
+
+       /* Insert the 'hole' (see big comment above) at position 3 */
+       buffer[0] = buffer[1];
+       buffer[1] = buffer[2];
+       buffer[2] = buffer[3];
+       buffer[3] = 0;
+       len++;
+
+       intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
 }
 
 static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@@ -334,40 +354,42 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-       struct dip_infoframe avi_if = {
-               .type = DIP_TYPE_AVI,
-               .ver = DIP_VERSION_AVI,
-               .len = DIP_LEN_AVI,
-       };
+       union hdmi_infoframe frame;
+       int ret;
 
-       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
-               avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+                                                      adjusted_mode);
+       if (ret < 0) {
+               DRM_ERROR("couldn't fill AVI infoframe\n");
+               return;
+       }
 
        if (intel_hdmi->rgb_quant_range_selectable) {
                if (intel_crtc->config.limited_color_range)
-                       avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+                       frame.avi.quantization_range =
+                               HDMI_QUANTIZATION_RANGE_LIMITED;
                else
-                       avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+                       frame.avi.quantization_range =
+                               HDMI_QUANTIZATION_RANGE_FULL;
        }
 
-       avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
-
-       intel_set_infoframe(encoder, &avi_if);
+       intel_write_infoframe(encoder, &frame);
 }
 
 static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
 {
-       struct dip_infoframe spd_if;
+       union hdmi_infoframe frame;
+       int ret;
+
+       ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
+       if (ret < 0) {
+               DRM_ERROR("couldn't fill SPD infoframe\n");
+               return;
+       }
 
-       memset(&spd_if, 0, sizeof(spd_if));
-       spd_if.type = DIP_TYPE_SPD;
-       spd_if.ver = DIP_VERSION_SPD;
-       spd_if.len = DIP_LEN_SPD;
-       strcpy(spd_if.body.spd.vn, "Intel");
-       strcpy(spd_if.body.spd.pd, "Integrated gfx");
-       spd_if.body.spd.sdi = DIP_SPD_PC;
+       frame.spd.sdi = HDMI_SPD_SDI_PC;
 
-       intel_set_infoframe(encoder, &spd_if);
+       intel_write_infoframe(encoder, &frame);
 }
 
 static void g4x_set_infoframes(struct drm_encoder *encoder,
@@ -591,14 +613,13 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
        intel_hdmi_set_spd_infoframe(encoder);
 }
 
-static void intel_hdmi_mode_set(struct drm_encoder *encoder,
-                               struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode)
+static void intel_hdmi_mode_set(struct intel_encoder *encoder)
 {
-       struct drm_device *dev = encoder->dev;
+       struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
        u32 hdmi_val;
 
        hdmi_val = SDVO_ENCODING_HDMI;
@@ -609,7 +630,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
 
-       if (intel_crtc->config.pipe_bpp > 24)
+       if (crtc->config.pipe_bpp > 24)
                hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
        else
                hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
@@ -620,21 +641,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 
        if (intel_hdmi->has_audio) {
                DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
-                                pipe_name(intel_crtc->pipe));
+                                pipe_name(crtc->pipe));
                hdmi_val |= SDVO_AUDIO_ENABLE;
                hdmi_val |= HDMI_MODE_SELECT_HDMI;
-               intel_write_eld(encoder, adjusted_mode);
+               intel_write_eld(&encoder->base, adjusted_mode);
        }
 
        if (HAS_PCH_CPT(dev))
-               hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+               hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
        else
-               hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
+               hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
 
        I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
        POSTING_READ(intel_hdmi->hdmi_reg);
 
-       intel_hdmi->set_infoframes(encoder, adjusted_mode);
+       intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
 }
 
 static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -719,14 +740,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
                I915_WRITE(intel_hdmi->hdmi_reg, temp);
                POSTING_READ(intel_hdmi->hdmi_reg);
        }
+}
 
-       if (IS_VALLEYVIEW(dev)) {
-               struct intel_digital_port *dport =
-                       enc_to_dig_port(&encoder->base);
-               int channel = vlv_dport_to_channel(dport);
-
-               vlv_wait_port_ready(dev_priv, channel);
-       }
+static void vlv_enable_hdmi(struct intel_encoder *encoder)
+{
 }
 
 static void intel_disable_hdmi(struct intel_encoder *encoder)
@@ -879,6 +896,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
        struct edid *edid;
        enum drm_connector_status status = connector_status_disconnected;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        intel_hdmi->has_hdmi_sink = false;
        intel_hdmi->has_audio = false;
        intel_hdmi->rgb_quant_range_selectable = false;
@@ -1030,6 +1050,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
                return;
 
        /* Enable clock channels for this port */
+       mutex_lock(&dev_priv->dpio_lock);
        val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
        val = 0;
        if (pipe)
@@ -1060,6 +1081,11 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
                         0x00760018);
        vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
                         0x00400888);
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       intel_enable_hdmi(encoder);
+
+       vlv_wait_port_ready(dev_priv, port);
 }
 
 static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1073,6 +1099,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
                return;
 
        /* Program Tx lane resets to default */
+       mutex_lock(&dev_priv->dpio_lock);
        vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
                         DPIO_PCS_TX_LANE2_RESET |
                         DPIO_PCS_TX_LANE1_RESET);
@@ -1091,6 +1118,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
                         0x00002000);
        vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
                         DPIO_TX_OCALINIT_EN);
+       mutex_unlock(&dev_priv->dpio_lock);
 }
 
 static void intel_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1113,10 +1141,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
        kfree(connector);
 }
 
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
-       .mode_set = intel_hdmi_mode_set,
-};
-
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
        .dpms = intel_connector_dpms,
        .detect = intel_hdmi_detect,
@@ -1221,7 +1245,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
 {
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
-       struct drm_encoder *encoder;
        struct intel_connector *intel_connector;
 
        intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
@@ -1235,21 +1258,22 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
        }
 
        intel_encoder = &intel_dig_port->base;
-       encoder = &intel_encoder->base;
 
        drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
                         DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
 
        intel_encoder->compute_config = intel_hdmi_compute_config;
-       intel_encoder->enable = intel_enable_hdmi;
+       intel_encoder->mode_set = intel_hdmi_mode_set;
        intel_encoder->disable = intel_disable_hdmi;
        intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
        intel_encoder->get_config = intel_hdmi_get_config;
        if (IS_VALLEYVIEW(dev)) {
-               intel_encoder->pre_enable = intel_hdmi_pre_enable;
                intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
+               intel_encoder->pre_enable = intel_hdmi_pre_enable;
+               intel_encoder->enable = vlv_enable_hdmi;
                intel_encoder->post_disable = intel_hdmi_post_disable;
+       } else {
+               intel_encoder->enable = intel_enable_hdmi;
        }
 
        intel_encoder->type = INTEL_OUTPUT_HDMI;
index 61348eae2f0436a05f7b5d88fec4242db918655d..4d33278e31fb805dae4430a5ab493f801a1c6ced 100644 (file)
@@ -122,17 +122,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
  * This is an exception to the general rule that mode_set doesn't turn
  * things on.
  */
-static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct drm_display_mode *fixed_mode =
                lvds_encoder->attached_connector->base.panel.fixed_mode;
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        u32 temp;
 
+       if (HAS_PCH_SPLIT(dev)) {
+               assert_fdi_rx_pll_disabled(dev_priv, pipe);
+               assert_shared_dpll_disabled(dev_priv,
+                                           intel_crtc_to_shared_dpll(crtc));
+       } else {
+               assert_pll_disabled(dev_priv, pipe);
+       }
+
        temp = I915_READ(lvds_encoder->reg);
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
@@ -149,7 +157,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
 
        /* set the corresponsding LVDS_BORDER bit */
        temp &= ~LVDS_BORDER_ENABLE;
-       temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
+       temp |= crtc->config.gmch_pfit.lvds_border_bits;
        /* Set the B0-B3 data pairs corresponding to whether we're going to
         * set the DPLLs for dual-channel mode or not.
         */
@@ -169,8 +177,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
        if (INTEL_INFO(dev)->gen == 4) {
                /* Bspec wording suggests that LVDS port dithering only exists
                 * for 18bpp panels. */
-               if (intel_crtc->config.dither &&
-                   intel_crtc->config.pipe_bpp == 18)
+               if (crtc->config.dither && crtc->config.pipe_bpp == 18)
                        temp |= LVDS_ENABLE_DITHER;
                else
                        temp &= ~LVDS_ENABLE_DITHER;
@@ -312,14 +319,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        return true;
 }
 
-static void intel_lvds_mode_set(struct drm_encoder *encoder,
-                               struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode)
+static void intel_lvds_mode_set(struct intel_encoder *encoder)
 {
        /*
-        * The LVDS pin pair will already have been turned on in the
-        * intel_crtc_mode_set since it has a large impact on the DPLL
-        * settings.
+        * We don't do anything here, the LVDS port is fully set up in the pre
+        * enable hook - the ordering constraints for enabling the lvds port vs.
+        * enabling the display pll are too strict.
         */
 }
 
@@ -336,6 +341,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
        struct drm_device *dev = connector->dev;
        enum drm_connector_status status;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        status = intel_panel_detect(dev);
        if (status != connector_status_unknown)
                return status;
@@ -497,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
        return 0;
 }
 
-static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
-       .mode_set = intel_lvds_mode_set,
-};
-
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
        .get_modes = intel_lvds_get_modes,
        .mode_valid = intel_lvds_mode_valid,
@@ -959,8 +963,9 @@ void intel_lvds_init(struct drm_device *dev)
                         DRM_MODE_ENCODER_LVDS);
 
        intel_encoder->enable = intel_enable_lvds;
-       intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+       intel_encoder->pre_enable = intel_pre_enable_lvds;
        intel_encoder->compute_config = intel_lvds_compute_config;
+       intel_encoder->mode_set = intel_lvds_mode_set;
        intel_encoder->disable = intel_disable_lvds;
        intel_encoder->get_hw_state = intel_lvds_get_hw_state;
        intel_encoder->get_config = intel_lvds_get_config;
@@ -977,7 +982,6 @@ void intel_lvds_init(struct drm_device *dev)
        else
                intel_encoder->crtc_mask = (1 << 1);
 
-       drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
        drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
        connector->interlace_allowed = false;
index a3698812e9c7831f75048508b4b38ebf93ecce33..ddfd0aefe0c0906a587addfcc0d35bda414400b5 100644 (file)
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
        else
                regs = io_mapping_map_wc(dev_priv->gtt.mappable,
-                                        overlay->reg_bo->gtt_offset);
+                                        i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
        return regs;
 }
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        swidth = params->src_w;
        swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
        sheight = params->src_h;
-       iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+       iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
        ostride = params->stride_Y;
 
        if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                                      params->src_w/uv_hscale);
                swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
                sheight |= (params->src_h/uv_vscale) << 16;
-               iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
-               iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+               iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
+               iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
                ostride |= params->stride_UV << 16;
        }
 
@@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev)
 
        overlay->dev = dev;
 
-       reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+       reg_bo = NULL;
+       if (!OVERLAY_NEEDS_PHYSICAL(dev))
+               reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
        if (reg_bo == NULL)
                reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
        if (reg_bo == NULL)
@@ -1350,12 +1352,12 @@ void intel_setup_overlay(struct drm_device *dev)
                }
                overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
        } else {
-               ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
+               ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
                if (ret) {
                        DRM_ERROR("failed to pin overlay register bo\n");
                        goto out_free_bo;
                }
-               overlay->flip_addr = reg_bo->gtt_offset;
+               overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
 
                ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
                if (ret) {
@@ -1412,9 +1414,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
        kfree(dev_priv->overlay);
 }
 
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
 struct intel_overlay_error_state {
        struct overlay_registers regs;
        unsigned long base;
@@ -1435,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
                        overlay->reg_bo->phys_obj->handle->vaddr;
        else
                regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-                                               overlay->reg_bo->gtt_offset);
+                                               i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
        return regs;
 }
@@ -1468,7 +1467,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
        if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
                error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
        else
-               error->base = overlay->reg_bo->gtt_offset;
+               error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
 
        regs = intel_overlay_map_regs_atomic(overlay);
        if (!regs)
@@ -1537,4 +1536,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
        P(UVSCALEV);
 #undef P
 }
-#endif
index 5950888ae1d00bd7cf70ebb18d5595ee0ee8446e..a43c33bc4a3582ece3758ae7286b87fb2a8256ec 100644 (file)
@@ -194,9 +194,6 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
            adjusted_mode->vdisplay == mode->vdisplay)
                goto out;
 
-       drm_mode_set_crtcinfo(adjusted_mode, 0);
-       pipe_config->timings_set = true;
-
        switch (fitting_mode) {
        case DRM_MODE_SCALE_CENTER:
                /*
index b0e4a0bd1313c0dafac84df1b6af56e7ccced950..178da3ec31b40a979a88277ada331ddc46967b4c 100644 (file)
@@ -30,8 +30,7 @@
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
-
-#define FORCEWAKE_ACK_TIMEOUT_MS 2
+#include <drm/i915_powerwell.h>
 
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -86,7 +85,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        int plane, i;
        u32 fbc_ctl, fbc_ctl2;
 
-       cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+       cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
        if (fb->pitches[0] < cfb_pitch)
                cfb_pitch = fb->pitches[0];
 
@@ -217,7 +216,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
                   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
                   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
        I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-       I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -274,7 +273,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+       I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
 
        I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
                   IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +324,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        mutex_lock(&dev->struct_mutex);
-       if (work == dev_priv->fbc_work) {
+       if (work == dev_priv->fbc.fbc_work) {
                /* Double check that we haven't switched fb without cancelling
                 * the prior work.
                 */
@@ -333,12 +332,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
                        dev_priv->display.enable_fbc(work->crtc,
                                                     work->interval);
 
-                       dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
-                       dev_priv->cfb_fb = work->crtc->fb->base.id;
-                       dev_priv->cfb_y = work->crtc->y;
+                       dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+                       dev_priv->fbc.fb_id = work->crtc->fb->base.id;
+                       dev_priv->fbc.y = work->crtc->y;
                }
 
-               dev_priv->fbc_work = NULL;
+               dev_priv->fbc.fbc_work = NULL;
        }
        mutex_unlock(&dev->struct_mutex);
 
@@ -347,28 +346,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
 
 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
 {
-       if (dev_priv->fbc_work == NULL)
+       if (dev_priv->fbc.fbc_work == NULL)
                return;
 
        DRM_DEBUG_KMS("cancelling pending FBC enable\n");
 
        /* Synchronisation is provided by struct_mutex and checking of
-        * dev_priv->fbc_work, so we can perform the cancellation
+        * dev_priv->fbc.fbc_work, so we can perform the cancellation
         * entirely asynchronously.
         */
-       if (cancel_delayed_work(&dev_priv->fbc_work->work))
+       if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
                /* tasklet was killed before being run, clean up */
-               kfree(dev_priv->fbc_work);
+               kfree(dev_priv->fbc.fbc_work);
 
        /* Mark the work as no longer wanted so that if it does
         * wake-up (because the work was already running and waiting
         * for our mutex), it will discover that is no longer
         * necessary to run.
         */
-       dev_priv->fbc_work = NULL;
+       dev_priv->fbc.fbc_work = NULL;
 }
 
-void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 {
        struct intel_fbc_work *work;
        struct drm_device *dev = crtc->dev;
@@ -381,6 +380,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (work == NULL) {
+               DRM_ERROR("Failed to allocate FBC work structure\n");
                dev_priv->display.enable_fbc(crtc, interval);
                return;
        }
@@ -390,9 +390,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        work->interval = interval;
        INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
-       dev_priv->fbc_work = work;
-
-       DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+       dev_priv->fbc.fbc_work = work;
 
        /* Delay the actual enabling to let pageflipping cease and the
         * display to settle before starting the compression. Note that
@@ -404,6 +402,8 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
         * following the termination of the page-flipping sequence
         * and indeed performing the enable as a co-routine and not
         * waiting synchronously upon the vblank.
+        *
+        * WaFbcWaitForVBlankBeforeEnable:ilk,snb
         */
        schedule_delayed_work(&work->work, msecs_to_jiffies(50));
 }
@@ -418,7 +418,17 @@ void intel_disable_fbc(struct drm_device *dev)
                return;
 
        dev_priv->display.disable_fbc(dev);
-       dev_priv->cfb_plane = -1;
+       dev_priv->fbc.plane = -1;
+}
+
+static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+                             enum no_fbc_reason reason)
+{
+       if (dev_priv->fbc.no_fbc_reason == reason)
+               return false;
+
+       dev_priv->fbc.no_fbc_reason = reason;
+       return true;
 }
 
 /**
@@ -448,14 +458,18 @@ void intel_update_fbc(struct drm_device *dev)
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj;
-       int enable_fbc;
        unsigned int max_hdisplay, max_vdisplay;
 
-       if (!i915_powersave)
+       if (!I915_HAS_FBC(dev)) {
+               set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
                return;
+       }
 
-       if (!I915_HAS_FBC(dev))
+       if (!i915_powersave) {
+               if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+                       DRM_DEBUG_KMS("fbc disabled per module param\n");
                return;
+       }
 
        /*
         * If FBC is already on, we just have to verify that we can
@@ -470,8 +484,8 @@ void intel_update_fbc(struct drm_device *dev)
                if (intel_crtc_active(tmp_crtc) &&
                    !to_intel_crtc(tmp_crtc)->primary_disabled) {
                        if (crtc) {
-                               DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-                               dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+                               if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+                                       DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
                                goto out_disable;
                        }
                        crtc = tmp_crtc;
@@ -479,8 +493,8 @@ void intel_update_fbc(struct drm_device *dev)
        }
 
        if (!crtc || crtc->fb == NULL) {
-               DRM_DEBUG_KMS("no output, disabling\n");
-               dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+               if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+                       DRM_DEBUG_KMS("no output, disabling\n");
                goto out_disable;
        }
 
@@ -489,23 +503,22 @@ void intel_update_fbc(struct drm_device *dev)
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
 
-       enable_fbc = i915_enable_fbc;
-       if (enable_fbc < 0) {
-               DRM_DEBUG_KMS("fbc set to per-chip default\n");
-               enable_fbc = 1;
-               if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
-                       enable_fbc = 0;
+       if (i915_enable_fbc < 0 &&
+           INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+               if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+                       DRM_DEBUG_KMS("disabled per chip default\n");
+               goto out_disable;
        }
-       if (!enable_fbc) {
-               DRM_DEBUG_KMS("fbc disabled per module param\n");
-               dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+       if (!i915_enable_fbc) {
+               if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+                       DRM_DEBUG_KMS("fbc disabled per module param\n");
                goto out_disable;
        }
        if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
            (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
-               DRM_DEBUG_KMS("mode incompatible with compression, "
-                             "disabling\n");
-               dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+               if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+                       DRM_DEBUG_KMS("mode incompatible with compression, "
+                                     "disabling\n");
                goto out_disable;
        }
 
@@ -518,14 +531,14 @@ void intel_update_fbc(struct drm_device *dev)
        }
        if ((crtc->mode.hdisplay > max_hdisplay) ||
            (crtc->mode.vdisplay > max_vdisplay)) {
-               DRM_DEBUG_KMS("mode too large for compression, disabling\n");
-               dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+               if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
+                       DRM_DEBUG_KMS("mode too large for compression, disabling\n");
                goto out_disable;
        }
        if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
            intel_crtc->plane != 0) {
-               DRM_DEBUG_KMS("plane not 0, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+               if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
+                       DRM_DEBUG_KMS("plane not 0, disabling compression\n");
                goto out_disable;
        }
 
@@ -534,8 +547,8 @@ void intel_update_fbc(struct drm_device *dev)
         */
        if (obj->tiling_mode != I915_TILING_X ||
            obj->fence_reg == I915_FENCE_REG_NONE) {
-               DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_NOT_TILED;
+               if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
+                       DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
                goto out_disable;
        }
 
@@ -544,8 +557,8 @@ void intel_update_fbc(struct drm_device *dev)
                goto out_disable;
 
        if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
-               DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+               if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
+                       DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
                goto out_disable;
        }
 
@@ -554,9 +567,9 @@ void intel_update_fbc(struct drm_device *dev)
         * cannot be unpinned (and have its GTT offset and fence revoked)
         * without first being decoupled from the scanout and FBC disabled.
         */
-       if (dev_priv->cfb_plane == intel_crtc->plane &&
-           dev_priv->cfb_fb == fb->base.id &&
-           dev_priv->cfb_y == crtc->y)
+       if (dev_priv->fbc.plane == intel_crtc->plane &&
+           dev_priv->fbc.fb_id == fb->base.id &&
+           dev_priv->fbc.y == crtc->y)
                return;
 
        if (intel_fbc_enabled(dev)) {
@@ -588,6 +601,7 @@ void intel_update_fbc(struct drm_device *dev)
        }
 
        intel_enable_fbc(crtc, 500);
+       dev_priv->fbc.no_fbc_reason = FBC_OK;
        return;
 
 out_disable:
@@ -1666,9 +1680,6 @@ static void i830_update_wm(struct drm_device *dev)
        I915_WRITE(FW_BLC, fwater_lo);
 }
 
-#define ILK_LP0_PLANE_LATENCY          700
-#define ILK_LP0_CURSOR_LATENCY         1300
-
 /*
  * Check the wm result.
  *
@@ -1783,9 +1794,9 @@ static void ironlake_update_wm(struct drm_device *dev)
        enabled = 0;
        if (g4x_compute_wm0(dev, PIPE_A,
                            &ironlake_display_wm_info,
-                           ILK_LP0_PLANE_LATENCY,
+                           dev_priv->wm.pri_latency[0] * 100,
                            &ironlake_cursor_wm_info,
-                           ILK_LP0_CURSOR_LATENCY,
+                           dev_priv->wm.cur_latency[0] * 100,
                            &plane_wm, &cursor_wm)) {
                I915_WRITE(WM0_PIPEA_ILK,
                           (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1797,9 +1808,9 @@ static void ironlake_update_wm(struct drm_device *dev)
 
        if (g4x_compute_wm0(dev, PIPE_B,
                            &ironlake_display_wm_info,
-                           ILK_LP0_PLANE_LATENCY,
+                           dev_priv->wm.pri_latency[0] * 100,
                            &ironlake_cursor_wm_info,
-                           ILK_LP0_CURSOR_LATENCY,
+                           dev_priv->wm.cur_latency[0] * 100,
                            &plane_wm, &cursor_wm)) {
                I915_WRITE(WM0_PIPEB_ILK,
                           (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1823,7 +1834,7 @@ static void ironlake_update_wm(struct drm_device *dev)
 
        /* WM1 */
        if (!ironlake_compute_srwm(dev, 1, enabled,
-                                  ILK_READ_WM1_LATENCY() * 500,
+                                  dev_priv->wm.pri_latency[1] * 500,
                                   &ironlake_display_srwm_info,
                                   &ironlake_cursor_srwm_info,
                                   &fbc_wm, &plane_wm, &cursor_wm))
@@ -1831,14 +1842,14 @@ static void ironlake_update_wm(struct drm_device *dev)
 
        I915_WRITE(WM1_LP_ILK,
                   WM1_LP_SR_EN |
-                  (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
                   (fbc_wm << WM1_LP_FBC_SHIFT) |
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
 
        /* WM2 */
        if (!ironlake_compute_srwm(dev, 2, enabled,
-                                  ILK_READ_WM2_LATENCY() * 500,
+                                  dev_priv->wm.pri_latency[2] * 500,
                                   &ironlake_display_srwm_info,
                                   &ironlake_cursor_srwm_info,
                                   &fbc_wm, &plane_wm, &cursor_wm))
@@ -1846,7 +1857,7 @@ static void ironlake_update_wm(struct drm_device *dev)
 
        I915_WRITE(WM2_LP_ILK,
                   WM2_LP_EN |
-                  (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
                   (fbc_wm << WM1_LP_FBC_SHIFT) |
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
@@ -1860,7 +1871,7 @@ static void ironlake_update_wm(struct drm_device *dev)
 static void sandybridge_update_wm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       int latency = dev_priv->wm.pri_latency[0] * 100;        /* In unit 0.1us */
        u32 val;
        int fbc_wm, plane_wm, cursor_wm;
        unsigned int enabled;
@@ -1915,7 +1926,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
 
        /* WM1 */
        if (!ironlake_compute_srwm(dev, 1, enabled,
-                                  SNB_READ_WM1_LATENCY() * 500,
+                                  dev_priv->wm.pri_latency[1] * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
                                   &fbc_wm, &plane_wm, &cursor_wm))
@@ -1923,14 +1934,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
 
        I915_WRITE(WM1_LP_ILK,
                   WM1_LP_SR_EN |
-                  (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
                   (fbc_wm << WM1_LP_FBC_SHIFT) |
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
 
        /* WM2 */
        if (!ironlake_compute_srwm(dev, 2, enabled,
-                                  SNB_READ_WM2_LATENCY() * 500,
+                                  dev_priv->wm.pri_latency[2] * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
                                   &fbc_wm, &plane_wm, &cursor_wm))
@@ -1938,14 +1949,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
 
        I915_WRITE(WM2_LP_ILK,
                   WM2_LP_EN |
-                  (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
                   (fbc_wm << WM1_LP_FBC_SHIFT) |
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
 
        /* WM3 */
        if (!ironlake_compute_srwm(dev, 3, enabled,
-                                  SNB_READ_WM3_LATENCY() * 500,
+                                  dev_priv->wm.pri_latency[3] * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
                                   &fbc_wm, &plane_wm, &cursor_wm))
@@ -1953,7 +1964,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
 
        I915_WRITE(WM3_LP_ILK,
                   WM3_LP_EN |
-                  (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
                   (fbc_wm << WM1_LP_FBC_SHIFT) |
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
@@ -1962,7 +1973,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
 static void ivybridge_update_wm(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       int latency = dev_priv->wm.pri_latency[0] * 100;        /* In unit 0.1us */
        u32 val;
        int fbc_wm, plane_wm, cursor_wm;
        int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
@@ -2032,7 +2043,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
 
        /* WM1 */
        if (!ironlake_compute_srwm(dev, 1, enabled,
-                                  SNB_READ_WM1_LATENCY() * 500,
+                                  dev_priv->wm.pri_latency[1] * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
                                   &fbc_wm, &plane_wm, &cursor_wm))
@@ -2040,14 +2051,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
 
        I915_WRITE(WM1_LP_ILK,
                   WM1_LP_SR_EN |
-                  (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
                   (fbc_wm << WM1_LP_FBC_SHIFT) |
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
 
        /* WM2 */
        if (!ironlake_compute_srwm(dev, 2, enabled,
-                                  SNB_READ_WM2_LATENCY() * 500,
+                                  dev_priv->wm.pri_latency[2] * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
                                   &fbc_wm, &plane_wm, &cursor_wm))
@@ -2055,19 +2066,19 @@ static void ivybridge_update_wm(struct drm_device *dev)
 
        I915_WRITE(WM2_LP_ILK,
                   WM2_LP_EN |
-                  (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
                   (fbc_wm << WM1_LP_FBC_SHIFT) |
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
 
        /* WM3, note we have to correct the cursor latency */
        if (!ironlake_compute_srwm(dev, 3, enabled,
-                                  SNB_READ_WM3_LATENCY() * 500,
+                                  dev_priv->wm.pri_latency[3] * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
                                   &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
            !ironlake_compute_srwm(dev, 3, enabled,
-                                  2 * SNB_READ_WM3_LATENCY() * 500,
+                                  dev_priv->wm.cur_latency[3] * 500,
                                   &sandybridge_display_srwm_info,
                                   &sandybridge_cursor_srwm_info,
                                   &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
@@ -2075,14 +2086,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
 
        I915_WRITE(WM3_LP_ILK,
                   WM3_LP_EN |
-                  (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+                  (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
                   (fbc_wm << WM1_LP_FBC_SHIFT) |
                   (plane_wm << WM1_LP_SR_SHIFT) |
                   cursor_wm);
 }
 
-static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
-                                     struct drm_crtc *crtc)
+static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
+                                   struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pixel_rate, pfit_size;
@@ -2112,30 +2123,38 @@ static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
        return pixel_rate;
 }
 
-static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
                               uint32_t latency)
 {
        uint64_t ret;
 
+       if (WARN(latency == 0, "Latency value missing\n"))
+               return UINT_MAX;
+
        ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
        ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
 
        return ret;
 }
 
-static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
                               uint32_t horiz_pixels, uint8_t bytes_per_pixel,
                               uint32_t latency)
 {
        uint32_t ret;
 
+       if (WARN(latency == 0, "Latency value missing\n"))
+               return UINT_MAX;
+
        ret = (latency * pixel_rate) / (pipe_htotal * 10000);
        ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
        ret = DIV_ROUND_UP(ret, 64) + 2;
        return ret;
 }
 
-static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
+static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
                           uint8_t bytes_per_pixel)
 {
        return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
@@ -2143,15 +2162,11 @@ static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
 
 struct hsw_pipe_wm_parameters {
        bool active;
-       bool sprite_enabled;
-       uint8_t pri_bytes_per_pixel;
-       uint8_t spr_bytes_per_pixel;
-       uint8_t cur_bytes_per_pixel;
-       uint32_t pri_horiz_pixels;
-       uint32_t spr_horiz_pixels;
-       uint32_t cur_horiz_pixels;
        uint32_t pipe_htotal;
        uint32_t pixel_rate;
+       struct intel_plane_wm_parameters pri;
+       struct intel_plane_wm_parameters spr;
+       struct intel_plane_wm_parameters cur;
 };
 
 struct hsw_wm_maximums {
@@ -2161,15 +2176,6 @@ struct hsw_wm_maximums {
        uint16_t fbc;
 };
 
-struct hsw_lp_wm_result {
-       bool enable;
-       bool fbc_enable;
-       uint32_t pri_val;
-       uint32_t spr_val;
-       uint32_t cur_val;
-       uint32_t fbc_val;
-};
-
 struct hsw_wm_values {
        uint32_t wm_pipe[3];
        uint32_t wm_lp[3];
@@ -2178,128 +2184,289 @@ struct hsw_wm_values {
        bool enable_fbc_wm;
 };
 
-enum hsw_data_buf_partitioning {
-       HSW_DATA_BUF_PART_1_2,
-       HSW_DATA_BUF_PART_5_6,
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+       unsigned int num_pipes_active;
+       bool sprites_enabled;
+       bool sprites_scaled;
+       bool fbc_wm_enabled;
 };
 
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
                                   uint32_t mem_value,
                                   bool is_lp)
 {
        uint32_t method1, method2;
 
-       /* TODO: for now, assume the primary plane is always enabled. */
-       if (!params->active)
+       if (!params->active || !params->pri.enabled)
                return 0;
 
-       method1 = hsw_wm_method1(params->pixel_rate,
-                                params->pri_bytes_per_pixel,
+       method1 = ilk_wm_method1(params->pixel_rate,
+                                params->pri.bytes_per_pixel,
                                 mem_value);
 
        if (!is_lp)
                return method1;
 
-       method2 = hsw_wm_method2(params->pixel_rate,
+       method2 = ilk_wm_method2(params->pixel_rate,
                                 params->pipe_htotal,
-                                params->pri_horiz_pixels,
-                                params->pri_bytes_per_pixel,
+                                params->pri.horiz_pixels,
+                                params->pri.bytes_per_pixel,
                                 mem_value);
 
        return min(method1, method2);
 }
 
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
                                   uint32_t mem_value)
 {
        uint32_t method1, method2;
 
-       if (!params->active || !params->sprite_enabled)
+       if (!params->active || !params->spr.enabled)
                return 0;
 
-       method1 = hsw_wm_method1(params->pixel_rate,
-                                params->spr_bytes_per_pixel,
+       method1 = ilk_wm_method1(params->pixel_rate,
+                                params->spr.bytes_per_pixel,
                                 mem_value);
-       method2 = hsw_wm_method2(params->pixel_rate,
+       method2 = ilk_wm_method2(params->pixel_rate,
                                 params->pipe_htotal,
-                                params->spr_horiz_pixels,
-                                params->spr_bytes_per_pixel,
+                                params->spr.horiz_pixels,
+                                params->spr.bytes_per_pixel,
                                 mem_value);
        return min(method1, method2);
 }
 
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
                                   uint32_t mem_value)
 {
-       if (!params->active)
+       if (!params->active || !params->cur.enabled)
                return 0;
 
-       return hsw_wm_method2(params->pixel_rate,
+       return ilk_wm_method2(params->pixel_rate,
                              params->pipe_htotal,
-                             params->cur_horiz_pixels,
-                             params->cur_bytes_per_pixel,
+                             params->cur.horiz_pixels,
+                             params->cur.bytes_per_pixel,
                              mem_value);
 }
 
 /* Only for WM_LP. */
-static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
-                                  uint32_t pri_val,
-                                  uint32_t mem_value)
+static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+                                  uint32_t pri_val)
 {
-       if (!params->active)
+       if (!params->active || !params->pri.enabled)
                return 0;
 
-       return hsw_wm_fbc(pri_val,
-                         params->pri_horiz_pixels,
-                         params->pri_bytes_per_pixel);
+       return ilk_wm_fbc(pri_val,
+                         params->pri.horiz_pixels,
+                         params->pri.bytes_per_pixel);
 }
 
-static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
-                             struct hsw_pipe_wm_parameters *params,
-                             struct hsw_lp_wm_result *result)
+static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
 {
-       enum pipe pipe;
-       uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
+       if (INTEL_INFO(dev)->gen >= 7)
+               return 768;
+       else
+               return 512;
+}
 
-       for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
-               struct hsw_pipe_wm_parameters *p = &params[pipe];
+/* Calculate the maximum primary/sprite plane watermark */
+static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+                                    int level,
+                                    const struct intel_wm_config *config,
+                                    enum intel_ddb_partitioning ddb_partitioning,
+                                    bool is_sprite)
+{
+       unsigned int fifo_size = ilk_display_fifo_size(dev);
+       unsigned int max;
 
-               pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
-               spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
-               cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
-               fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
-       }
+       /* if sprites aren't enabled, sprites get nothing */
+       if (is_sprite && !config->sprites_enabled)
+               return 0;
 
-       result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]);
-       result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]);
-       result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]);
-       result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
+       /* HSW allows LP1+ watermarks even with multiple pipes */
+       if (level == 0 || config->num_pipes_active > 1) {
+               fifo_size /= INTEL_INFO(dev)->num_pipes;
 
-       if (result->fbc_val > max->fbc) {
-               result->fbc_enable = false;
-               result->fbc_val = 0;
-       } else {
-               result->fbc_enable = true;
+               /*
+                * For some reason the non self refresh
+                * FIFO size is only half of the self
+                * refresh FIFO size on ILK/SNB.
+                */
+               if (INTEL_INFO(dev)->gen <= 6)
+                       fifo_size /= 2;
        }
 
+       if (config->sprites_enabled) {
+               /* level 0 is always calculated with 1:1 split */
+               if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
+                       if (is_sprite)
+                               fifo_size *= 5;
+                       fifo_size /= 6;
+               } else {
+                       fifo_size /= 2;
+               }
+       }
+
+       /* clamp to max that the registers can hold */
+       if (INTEL_INFO(dev)->gen >= 7)
+               /* IVB/HSW primary/sprite plane watermarks */
+               max = level == 0 ? 127 : 1023;
+       else if (!is_sprite)
+               /* ILK/SNB primary plane watermarks */
+               max = level == 0 ? 127 : 511;
+       else
+               /* ILK/SNB sprite plane watermarks */
+               max = level == 0 ? 63 : 255;
+
+       return min(fifo_size, max);
+}
+
+/* Calculate the maximum cursor plane watermark */
+static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+                                     int level,
+                                     const struct intel_wm_config *config)
+{
+       /* HSW LP1+ watermarks w/ multiple pipes */
+       if (level > 0 && config->num_pipes_active > 1)
+               return 64;
+
+       /* otherwise just report max that registers can hold */
+       if (INTEL_INFO(dev)->gen >= 7)
+               return level == 0 ? 63 : 255;
+       else
+               return level == 0 ? 31 : 63;
+}
+
+/* Calculate the maximum FBC watermark */
+static unsigned int ilk_fbc_wm_max(void)
+{
+       /* max that registers can hold */
+       return 15;
+}
+
+static void ilk_wm_max(struct drm_device *dev,
+                      int level,
+                      const struct intel_wm_config *config,
+                      enum intel_ddb_partitioning ddb_partitioning,
+                      struct hsw_wm_maximums *max)
+{
+       max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
+       max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
+       max->cur = ilk_cursor_wm_max(dev, level, config);
+       max->fbc = ilk_fbc_wm_max();
+}
+
+static bool ilk_check_wm(int level,
+                        const struct hsw_wm_maximums *max,
+                        struct intel_wm_level *result)
+{
+       bool ret;
+
+       /* already determined to be invalid? */
+       if (!result->enable)
+               return false;
+
        result->enable = result->pri_val <= max->pri &&
                         result->spr_val <= max->spr &&
                         result->cur_val <= max->cur;
-       return result->enable;
+
+       ret = result->enable;
+
+       /*
+        * HACK until we can pre-compute everything,
+        * and thus fail gracefully if LP0 watermarks
+        * are exceeded...
+        */
+       if (level == 0 && !result->enable) {
+               if (result->pri_val > max->pri)
+                       DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
+                                     level, result->pri_val, max->pri);
+               if (result->spr_val > max->spr)
+                       DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
+                                     level, result->spr_val, max->spr);
+               if (result->cur_val > max->cur)
+                       DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
+                                     level, result->cur_val, max->cur);
+
+               result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
+               result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
+               result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+               result->enable = true;
+       }
+
+       DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
+
+       return ret;
+}
+
+static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
+                                int level,
+                                struct hsw_pipe_wm_parameters *p,
+                                struct intel_wm_level *result)
+{
+       uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+       uint16_t spr_latency = dev_priv->wm.spr_latency[level];
+       uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+
+       /* WM1+ latency values stored in 0.5us units */
+       if (level > 0) {
+               pri_latency *= 5;
+               spr_latency *= 5;
+               cur_latency *= 5;
+       }
+
+       result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
+       result->spr_val = ilk_compute_spr_wm(p, spr_latency);
+       result->cur_val = ilk_compute_cur_wm(p, cur_latency);
+       result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
+       result->enable = true;
+}
+
+static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
+                             int level, struct hsw_wm_maximums *max,
+                             struct hsw_pipe_wm_parameters *params,
+                             struct intel_wm_level *result)
+{
+       enum pipe pipe;
+       struct intel_wm_level res[3];
+
+       for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
+               ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
+
+       result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
+       result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
+       result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
+       result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
+       result->enable = true;
+
+       return ilk_check_wm(level, max, result);
 }
 
 static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
-                                   uint32_t mem_value, enum pipe pipe,
+                                   enum pipe pipe,
                                    struct hsw_pipe_wm_parameters *params)
 {
        uint32_t pri_val, cur_val, spr_val;
+       /* WM0 latency values stored in 0.1us units */
+       uint16_t pri_latency = dev_priv->wm.pri_latency[0];
+       uint16_t spr_latency = dev_priv->wm.spr_latency[0];
+       uint16_t cur_latency = dev_priv->wm.cur_latency[0];
 
-       pri_val = hsw_compute_pri_wm(params, mem_value, false);
-       spr_val = hsw_compute_spr_wm(params, mem_value);
-       cur_val = hsw_compute_cur_wm(params, mem_value);
+       pri_val = ilk_compute_pri_wm(params, pri_latency, false);
+       spr_val = ilk_compute_spr_wm(params, spr_latency);
+       cur_val = ilk_compute_cur_wm(params, cur_latency);
 
        WARN(pri_val > 127,
             "Primary WM error, mode not supported for pipe %c\n",
@@ -2338,27 +2505,116 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
               PIPE_WM_LINETIME_TIME(linetime);
 }
 
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_HASWELL(dev)) {
+               uint64_t sskpd = I915_READ64(MCH_SSKPD);
+
+               wm[0] = (sskpd >> 56) & 0xFF;
+               if (wm[0] == 0)
+                       wm[0] = sskpd & 0xF;
+               wm[1] = (sskpd >> 4) & 0xFF;
+               wm[2] = (sskpd >> 12) & 0xFF;
+               wm[3] = (sskpd >> 20) & 0x1FF;
+               wm[4] = (sskpd >> 32) & 0x1FF;
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               uint32_t sskpd = I915_READ(MCH_SSKPD);
+
+               wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
+               wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
+               wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
+               wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
+       } else if (INTEL_INFO(dev)->gen >= 5) {
+               uint32_t mltr = I915_READ(MLTR_ILK);
+
+               /* ILK primary LP0 latency is 700 ns */
+               wm[0] = 7;
+               wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
+               wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
+       }
+}
+
+static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+       /* ILK sprite LP0 latency is 1300 ns */
+       if (INTEL_INFO(dev)->gen == 5)
+               wm[0] = 13;
+}
+
+static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+       /* ILK cursor LP0 latency is 1300 ns */
+       if (INTEL_INFO(dev)->gen == 5)
+               wm[0] = 13;
+
+       /* WaDoubleCursorLP3Latency:ivb */
+       if (IS_IVYBRIDGE(dev))
+               wm[3] *= 2;
+}
+
+static void intel_print_wm_latency(struct drm_device *dev,
+                                  const char *name,
+                                  const uint16_t wm[5])
+{
+       int level, max_level;
+
+       /* how many WM levels are we expecting */
+       if (IS_HASWELL(dev))
+               max_level = 4;
+       else if (INTEL_INFO(dev)->gen >= 6)
+               max_level = 3;
+       else
+               max_level = 2;
+
+       for (level = 0; level <= max_level; level++) {
+               unsigned int latency = wm[level];
+
+               if (latency == 0) {
+                       DRM_ERROR("%s WM%d latency not provided\n",
+                                 name, level);
+                       continue;
+               }
+
+               /* WM1+ latency values in 0.5us units */
+               if (level > 0)
+                       latency *= 5;
+
+               DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
+                             name, level, wm[level],
+                             latency / 10, latency % 10);
+       }
+}
+
+static void intel_setup_wm_latency(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+
+       memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
+              sizeof(dev_priv->wm.pri_latency));
+       memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
+              sizeof(dev_priv->wm.pri_latency));
+
+       intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
+       intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+
+       intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+       intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+       intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
 static void hsw_compute_wm_parameters(struct drm_device *dev,
                                      struct hsw_pipe_wm_parameters *params,
-                                     uint32_t *wm,
                                      struct hsw_wm_maximums *lp_max_1_2,
                                      struct hsw_wm_maximums *lp_max_5_6)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
        struct drm_plane *plane;
-       uint64_t sskpd = I915_READ64(MCH_SSKPD);
        enum pipe pipe;
-       int pipes_active = 0, sprites_enabled = 0;
-
-       if ((sskpd >> 56) & 0xFF)
-               wm[0] = (sskpd >> 56) & 0xFF;
-       else
-               wm[0] = sskpd & 0xF;
-       wm[1] = ((sskpd >> 4) & 0xFF) * 5;
-       wm[2] = ((sskpd >> 12) & 0xFF) * 5;
-       wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
-       wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
+       struct intel_wm_config config = {};
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2371,15 +2627,18 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
                if (!p->active)
                        continue;
 
-               pipes_active++;
+               config.num_pipes_active++;
 
                p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
-               p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
-               p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
-               p->cur_bytes_per_pixel = 4;
-               p->pri_horiz_pixels =
+               p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+               p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
+               p->cur.bytes_per_pixel = 4;
+               p->pri.horiz_pixels =
                        intel_crtc->config.requested_mode.hdisplay;
-               p->cur_horiz_pixels = 64;
+               p->cur.horiz_pixels = 64;
+               /* TODO: for now, assume primary and cursor planes are always enabled. */
+               p->pri.enabled = true;
+               p->cur.enabled = true;
        }
 
        list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2389,59 +2648,53 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
                pipe = intel_plane->pipe;
                p = &params[pipe];
 
-               p->sprite_enabled = intel_plane->wm.enable;
-               p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
-               p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
+               p->spr = intel_plane->wm;
 
-               if (p->sprite_enabled)
-                       sprites_enabled++;
+               config.sprites_enabled |= p->spr.enabled;
+               config.sprites_scaled |= p->spr.scaled;
        }
 
-       if (pipes_active > 1) {
-               lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
-               lp_max_1_2->spr = lp_max_5_6->spr = 128;
-               lp_max_1_2->cur = lp_max_5_6->cur = 64;
-       } else {
-               lp_max_1_2->pri = sprites_enabled ? 384 : 768;
-               lp_max_5_6->pri = sprites_enabled ? 128 : 768;
-               lp_max_1_2->spr = 384;
-               lp_max_5_6->spr = 640;
-               lp_max_1_2->cur = lp_max_5_6->cur = 255;
-       }
-       lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
+       ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
+
+       /* 5/6 split only in single pipe config on IVB+ */
+       if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
+               ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
+       else
+               *lp_max_5_6 = *lp_max_1_2;
 }
 
 static void hsw_compute_wm_results(struct drm_device *dev,
                                   struct hsw_pipe_wm_parameters *params,
-                                  uint32_t *wm,
                                   struct hsw_wm_maximums *lp_maximums,
                                   struct hsw_wm_values *results)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
-       struct hsw_lp_wm_result lp_results[4] = {};
+       struct intel_wm_level lp_results[4] = {};
        enum pipe pipe;
        int level, max_level, wm_lp;
 
        for (level = 1; level <= 4; level++)
-               if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
+               if (!hsw_compute_lp_wm(dev_priv, level,
+                                      lp_maximums, params,
                                       &lp_results[level - 1]))
                        break;
        max_level = level - 1;
 
+       memset(results, 0, sizeof(*results));
+
        /* The spec says it is preferred to disable FBC WMs instead of disabling
         * a WM level. */
        results->enable_fbc_wm = true;
        for (level = 1; level <= max_level; level++) {
-               if (!lp_results[level - 1].fbc_enable) {
+               if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
                        results->enable_fbc_wm = false;
-                       break;
+                       lp_results[level - 1].fbc_val = 0;
                }
        }
 
-       memset(results, 0, sizeof(*results));
        for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
-               const struct hsw_lp_wm_result *r;
+               const struct intel_wm_level *r;
 
                level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
                if (level > max_level)
@@ -2456,8 +2709,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
        }
 
        for_each_pipe(pipe)
-               results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
-                                                            pipe,
+               results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
                                                             &params[pipe]);
 
        for_each_pipe(pipe) {
@@ -2468,8 +2720,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
 
 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  * case both are at the same level. Prefer r1 in case they're the same. */
-struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
-                                          struct hsw_wm_values *r2)
+static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+                                                 struct hsw_wm_values *r2)
 {
        int i, val_r1 = 0, val_r2 = 0;
 
@@ -2498,11 +2750,11 @@ struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
  */
 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
                                struct hsw_wm_values *results,
-                               enum hsw_data_buf_partitioning partitioning)
+                               enum intel_ddb_partitioning partitioning)
 {
        struct hsw_wm_values previous;
        uint32_t val;
-       enum hsw_data_buf_partitioning prev_partitioning;
+       enum intel_ddb_partitioning prev_partitioning;
        bool prev_enable_fbc_wm;
 
        previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
@@ -2519,7 +2771,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
        previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
 
        prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-                           HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
+                               INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
 
        prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
 
@@ -2558,7 +2810,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
 
        if (prev_partitioning != partitioning) {
                val = I915_READ(WM_MISC);
-               if (partitioning == HSW_DATA_BUF_PART_1_2)
+               if (partitioning == INTEL_DDB_PART_1_2)
                        val &= ~WM_MISC_DATA_PARTITION_5_6;
                else
                        val |= WM_MISC_DATA_PARTITION_5_6;
@@ -2595,44 +2847,39 @@ static void haswell_update_wm(struct drm_device *dev)
        struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
        struct hsw_pipe_wm_parameters params[3];
        struct hsw_wm_values results_1_2, results_5_6, *best_results;
-       uint32_t wm[5];
-       enum hsw_data_buf_partitioning partitioning;
+       enum intel_ddb_partitioning partitioning;
 
-       hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
+       hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
 
-       hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
+       hsw_compute_wm_results(dev, params,
+                              &lp_max_1_2, &results_1_2);
        if (lp_max_1_2.pri != lp_max_5_6.pri) {
-               hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
-                                      &results_5_6);
+               hsw_compute_wm_results(dev, params,
+                                      &lp_max_5_6, &results_5_6);
                best_results = hsw_find_best_result(&results_1_2, &results_5_6);
        } else {
                best_results = &results_1_2;
        }
 
        partitioning = (best_results == &results_1_2) ?
-                      HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
+                      INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
        hsw_write_wm_values(dev_priv, best_results, partitioning);
 }
 
-static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
+static void haswell_update_sprite_wm(struct drm_plane *plane,
+                                    struct drm_crtc *crtc,
                                     uint32_t sprite_width, int pixel_size,
-                                    bool enable)
+                                    bool enabled, bool scaled)
 {
-       struct drm_plane *plane;
-
-       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-               struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct intel_plane *intel_plane = to_intel_plane(plane);
 
-               if (intel_plane->pipe == pipe) {
-                       intel_plane->wm.enable = enable;
-                       intel_plane->wm.horiz_pixels = sprite_width + 1;
-                       intel_plane->wm.bytes_per_pixel = pixel_size;
-                       break;
-               }
-       }
+       intel_plane->wm.enabled = enabled;
+       intel_plane->wm.scaled = scaled;
+       intel_plane->wm.horiz_pixels = sprite_width;
+       intel_plane->wm.bytes_per_pixel = pixel_size;
 
-       haswell_update_wm(dev);
+       haswell_update_wm(plane->dev);
 }
 
 static bool
@@ -2711,17 +2958,20 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
        return *sprite_wm > 0x3ff ? false : true;
 }
 
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+static void sandybridge_update_sprite_wm(struct drm_plane *plane,
+                                        struct drm_crtc *crtc,
                                         uint32_t sprite_width, int pixel_size,
-                                        bool enable)
+                                        bool enabled, bool scaled)
 {
+       struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
+       int pipe = to_intel_plane(plane)->pipe;
+       int latency = dev_priv->wm.spr_latency[0] * 100;        /* In unit 0.1us */
        u32 val;
        int sprite_wm, reg;
        int ret;
 
-       if (!enable)
+       if (!enabled)
                return;
 
        switch (pipe) {
@@ -2756,7 +3006,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
        ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
                                              pixel_size,
                                              &sandybridge_display_srwm_info,
-                                             SNB_READ_WM1_LATENCY() * 500,
+                                             dev_priv->wm.spr_latency[1] * 500,
                                              &sprite_wm);
        if (!ret) {
                DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
@@ -2772,7 +3022,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
        ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
                                              pixel_size,
                                              &sandybridge_display_srwm_info,
-                                             SNB_READ_WM2_LATENCY() * 500,
+                                             dev_priv->wm.spr_latency[2] * 500,
                                              &sprite_wm);
        if (!ret) {
                DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
@@ -2784,7 +3034,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
        ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
                                              pixel_size,
                                              &sandybridge_display_srwm_info,
-                                             SNB_READ_WM3_LATENCY() * 500,
+                                             dev_priv->wm.spr_latency[3] * 500,
                                              &sprite_wm);
        if (!ret) {
                DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
@@ -2834,15 +3084,16 @@ void intel_update_watermarks(struct drm_device *dev)
                dev_priv->display.update_wm(dev);
 }
 
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+                                   struct drm_crtc *crtc,
                                    uint32_t sprite_width, int pixel_size,
-                                   bool enable)
+                                   bool enabled, bool scaled)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = plane->dev->dev_private;
 
        if (dev_priv->display.update_sprite_wm)
-               dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
-                                                  pixel_size, enable);
+               dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+                                                  pixel_size, enabled, scaled);
 }
 
 static struct drm_i915_gem_object *
@@ -2859,7 +3110,7 @@ intel_alloc_context_page(struct drm_device *dev)
                return NULL;
        }
 
-       ret = i915_gem_object_pin(ctx, 4096, true, false);
+       ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
        if (ret) {
                DRM_ERROR("failed to pin power context: %d\n", ret);
                goto err_unref;
@@ -3076,19 +3327,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
  */
 static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
 {
-       unsigned long timeout = jiffies + msecs_to_jiffies(10);
        u32 pval;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       do {
-               pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-               if (time_after(jiffies, timeout)) {
-                       DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-                       break;
-               }
-               udelay(10);
-       } while (pval & 1);
+       if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
+               DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
 
        pval >>= 8;
 
@@ -3129,13 +3373,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
        trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
 }
 
-
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps_interrupts(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       I915_WRITE(GEN6_RC_CONTROL, 0);
-       I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
        I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
        I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
        /* Complete PM interrupt masking here doesn't race with the rps work
@@ -3143,30 +3384,30 @@ static void gen6_disable_rps(struct drm_device *dev)
         * register (PMIMR) to mask PM interrupts. The only risk is in leaving
         * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-       spin_lock_irq(&dev_priv->rps.lock);
+       spin_lock_irq(&dev_priv->irq_lock);
        dev_priv->rps.pm_iir = 0;
-       spin_unlock_irq(&dev_priv->rps.lock);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
 }
 
-static void valleyview_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        I915_WRITE(GEN6_RC_CONTROL, 0);
-       I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-       I915_WRITE(GEN6_PMIER, 0);
-       /* Complete PM interrupt masking here doesn't race with the rps work
-        * item again unmasking PM interrupts because that is using a different
-        * register (PMIMR) to mask PM interrupts. The only risk is in leaving
-        * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+       I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
 
-       spin_lock_irq(&dev_priv->rps.lock);
-       dev_priv->rps.pm_iir = 0;
-       spin_unlock_irq(&dev_priv->rps.lock);
+       gen6_disable_rps_interrupts(dev);
+}
 
-       I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+
+       gen6_disable_rps_interrupts(dev);
 
        if (dev_priv->vlv_pctx) {
                drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
@@ -3176,6 +3417,10 @@ static void valleyview_disable_rps(struct drm_device *dev)
 
 int intel_enable_rc6(const struct drm_device *dev)
 {
+       /* No RC6 before Ironlake */
+       if (INTEL_INFO(dev)->gen < 5)
+               return 0;
+
        /* Respect the kernel parameter if it is set */
        if (i915_enable_rc6 >= 0)
                return i915_enable_rc6;
@@ -3199,6 +3444,19 @@ int intel_enable_rc6(const struct drm_device *dev)
        return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
+static void gen6_enable_rps_interrupts(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       WARN_ON(dev_priv->rps.pm_iir);
+       snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+       I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+       spin_unlock_irq(&dev_priv->irq_lock);
+       /* only unmask PM interrupts we need. Mask all others. */
+       I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+}
+
 static void gen6_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3250,7 +3508,10 @@ static void gen6_enable_rps(struct drm_device *dev)
 
        I915_WRITE(GEN6_RC_SLEEP, 0);
        I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
-       I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+       if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
+               I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
+       else
+               I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
        I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
@@ -3327,17 +3588,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 
        gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
-       /* requires MSI enabled */
-       I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
-       spin_lock_irq(&dev_priv->rps.lock);
-       /* FIXME: Our interrupt enabling sequence is bonghits.
-        * dev_priv->rps.pm_iir really should be 0 here. */
-       dev_priv->rps.pm_iir = 0;
-       I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
-       I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
-       spin_unlock_irq(&dev_priv->rps.lock);
-       /* unmask all PM interrupts */
-       I915_WRITE(GEN6_PMINTRMSK, 0);
+       gen6_enable_rps_interrupts(dev);
 
        rc6vids = 0;
        ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -3482,7 +3733,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
                pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
                pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
                                                                      pcbr_offset,
-                                                                     -1,
+                                                                     I915_GTT_OFFSET_NONE,
                                                                      pctx_size);
                goto out;
        }
@@ -3607,14 +3858,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
 
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
-       /* requires MSI enabled */
-       I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
-       spin_lock_irq(&dev_priv->rps.lock);
-       WARN_ON(dev_priv->rps.pm_iir != 0);
-       I915_WRITE(GEN6_PMIMR, 0);
-       spin_unlock_irq(&dev_priv->rps.lock);
-       /* enable all PM interrupts */
-       I915_WRITE(GEN6_PMINTRMSK, 0);
+       gen6_enable_rps_interrupts(dev);
 
        gen6_gt_force_wake_put(dev_priv);
 }
@@ -3708,7 +3952,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 
        intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
                        MI_MM_SPACE_GTT |
                        MI_SAVE_EXT_STATE_EN |
                        MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3975,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
                return;
        }
 
-       I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+       I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
        I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
@@ -4429,7 +4673,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-       /* Required for FBC */
+       /*
+        * Required for FBC
+        * WaFbcDisableDpfcClockGating:ilk
+        */
        dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
                   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
                   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
@@ -4466,6 +4713,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
         * The bit 7,8,9 of 0x42020.
         */
        if (IS_IRONLAKE_M(dev)) {
+               /* WaFbcAsynchFlipDisableFbcQueue:ilk */
                I915_WRITE(ILK_DISPLAY_CHICKEN1,
                           I915_READ(ILK_DISPLAY_CHICKEN1) |
                           ILK_FBCQ_DIS);
@@ -4602,6 +4850,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
         * The bit5 and bit7 of 0x42020
         * The bit14 of 0x70180
         * The bit14 of 0x71180
+        *
+        * WaFbcAsynchFlipDisableFbcQueue:snb
         */
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
                   I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -4614,10 +4864,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
                   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
                   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
-       /* WaMbcDriverBootEnable:snb */
-       I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
-                  GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
        g4x_disable_trickle_feed(dev);
 
        /* The default value should be 0x200 according to docs, but the two
@@ -4713,10 +4959,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
-       /* WaMbcDriverBootEnable:hsw */
-       I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
-                  GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
        /* WaSwitchSolVfFArbitrationPriority:hsw */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
@@ -4800,10 +5042,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 
        g4x_disable_trickle_feed(dev);
 
-       /* WaMbcDriverBootEnable:ivb */
-       I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
-                  GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
        /* WaVSRefCountFullforceMissDisable:ivb */
        gen7_setup_fixed_func_scheduler(dev_priv);
 
@@ -4863,11 +5101,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
                   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
                   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-       /* WaMbcDriverBootEnable:vlv */
-       I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
-                  GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
-
        /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
         * gating disable must be set.  Failure to set it results in
         * flickering pixels due to Z write ordering failures after
@@ -5035,7 +5268,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
        case POWER_DOMAIN_TRANSCODER_B:
        case POWER_DOMAIN_TRANSCODER_C:
                return I915_READ(HSW_PWR_WELL_DRIVER) ==
-                      (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
+                    (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
        default:
                BUG();
        }
@@ -5048,17 +5281,18 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
        uint32_t tmp;
 
        tmp = I915_READ(HSW_PWR_WELL_DRIVER);
-       is_enabled = tmp & HSW_PWR_WELL_STATE;
-       enable_requested = tmp & HSW_PWR_WELL_ENABLE;
+       is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+       enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
 
        if (enable) {
                if (!enable_requested)
-                       I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
+                       I915_WRITE(HSW_PWR_WELL_DRIVER,
+                                  HSW_PWR_WELL_ENABLE_REQUEST);
 
                if (!is_enabled) {
                        DRM_DEBUG_KMS("Enabling power well\n");
                        if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
-                                     HSW_PWR_WELL_STATE), 20))
+                                     HSW_PWR_WELL_STATE_ENABLED), 20))
                                DRM_ERROR("Timeout enabling power well\n");
                }
        } else {
@@ -5178,7 +5412,7 @@ void intel_init_power_well(struct drm_device *dev)
 
        /* We're taking over the BIOS, so clear any requests made by it since
         * the driver is in charge now. */
-       if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
+       if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
                I915_WRITE(HSW_PWR_WELL_BIOS, 0);
 }
 
@@ -5217,8 +5451,12 @@ void intel_init_pm(struct drm_device *dev)
 
        /* For FIFO watermark updates */
        if (HAS_PCH_SPLIT(dev)) {
+               intel_setup_wm_latency(dev);
+
                if (IS_GEN5(dev)) {
-                       if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+                       if (dev_priv->wm.pri_latency[1] &&
+                           dev_priv->wm.spr_latency[1] &&
+                           dev_priv->wm.cur_latency[1])
                                dev_priv->display.update_wm = ironlake_update_wm;
                        else {
                                DRM_DEBUG_KMS("Failed to get proper latency. "
@@ -5227,7 +5465,9 @@ void intel_init_pm(struct drm_device *dev)
                        }
                        dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
                } else if (IS_GEN6(dev)) {
-                       if (SNB_READ_WM0_LATENCY()) {
+                       if (dev_priv->wm.pri_latency[0] &&
+                           dev_priv->wm.spr_latency[0] &&
+                           dev_priv->wm.cur_latency[0]) {
                                dev_priv->display.update_wm = sandybridge_update_wm;
                                dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
@@ -5237,7 +5477,9 @@ void intel_init_pm(struct drm_device *dev)
                        }
                        dev_priv->display.init_clock_gating = gen6_init_clock_gating;
                } else if (IS_IVYBRIDGE(dev)) {
-                       if (SNB_READ_WM0_LATENCY()) {
+                       if (dev_priv->wm.pri_latency[0] &&
+                           dev_priv->wm.spr_latency[0] &&
+                           dev_priv->wm.cur_latency[0]) {
                                dev_priv->display.update_wm = ivybridge_update_wm;
                                dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
                        } else {
@@ -5247,7 +5489,9 @@ void intel_init_pm(struct drm_device *dev)
                        }
                        dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
                } else if (IS_HASWELL(dev)) {
-                       if (I915_READ64(MCH_SSKPD)) {
+                       if (dev_priv->wm.pri_latency[0] &&
+                           dev_priv->wm.spr_latency[0] &&
+                           dev_priv->wm.cur_latency[0]) {
                                dev_priv->display.update_wm = haswell_update_wm;
                                dev_priv->display.update_sprite_wm =
                                        haswell_update_sprite_wm;
@@ -5310,260 +5554,6 @@ void intel_init_pm(struct drm_device *dev)
        }
 }
 
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
-{
-       u32 gt_thread_status_mask;
-
-       if (IS_HASWELL(dev_priv->dev))
-               gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
-       else
-               gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
-
-       /* w/a for a sporadic read returning 0 by waiting for the GT
-        * thread to wake up.
-        */
-       if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
-               DRM_ERROR("GT thread status wait timed out\n");
-}
-
-static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE_NOTRACE(FORCEWAKE, 0);
-       POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-}
-
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
-       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
-       I915_WRITE_NOTRACE(FORCEWAKE, 1);
-       POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-
-       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
-
-       /* WaRsForcewakeWaitTC0:snb */
-       __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
-       /* something from same cacheline, but !FORCEWAKE_MT */
-       POSTING_READ(ECOBUS);
-}
-
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
-{
-       u32 forcewake_ack;
-
-       if (IS_HASWELL(dev_priv->dev))
-               forcewake_ack = FORCEWAKE_ACK_HSW;
-       else
-               forcewake_ack = FORCEWAKE_MT_ACK;
-
-       if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
-       I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-       /* something from same cacheline, but !FORCEWAKE_MT */
-       POSTING_READ(ECOBUS);
-
-       if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
-
-       /* WaRsForcewakeWaitTC0:ivb,hsw */
-       __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
-       if (dev_priv->forcewake_count++ == 0)
-               dev_priv->gt.force_wake_get(dev_priv);
-       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
-       u32 gtfifodbg;
-       gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
-       if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
-            "MMIO read or write has been dropped %x\n", gtfifodbg))
-               I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
-}
-
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE_NOTRACE(FORCEWAKE, 0);
-       /* something from same cacheline, but !FORCEWAKE */
-       POSTING_READ(ECOBUS);
-       gen6_gt_check_fifodbg(dev_priv);
-}
-
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       /* something from same cacheline, but !FORCEWAKE_MT */
-       POSTING_READ(ECOBUS);
-       gen6_gt_check_fifodbg(dev_priv);
-}
-
-/*
- * see gen6_gt_force_wake_get()
- */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
-       if (--dev_priv->forcewake_count == 0)
-               dev_priv->gt.force_wake_put(dev_priv);
-       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
-{
-       int ret = 0;
-
-       if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
-               int loop = 500;
-               u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
-               while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
-                       udelay(10);
-                       fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
-               }
-               if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
-                       ++ret;
-               dev_priv->gt_fifo_count = fifo;
-       }
-       dev_priv->gt_fifo_count--;
-
-       return ret;
-}
-
-static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
-       /* something from same cacheline, but !FORCEWAKE_VLV */
-       POSTING_READ(FORCEWAKE_ACK_VLV);
-}
-
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
-{
-       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
-       I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-       I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
-                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
-       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
-
-       if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
-                            FORCEWAKE_KERNEL),
-                           FORCEWAKE_ACK_TIMEOUT_MS))
-               DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
-
-       /* WaRsForcewakeWaitTC0:vlv */
-       __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
-{
-       I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
-                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       /* The below doubles as a POSTING_READ */
-       gen6_gt_check_fifodbg(dev_priv);
-}
-
-void intel_gt_sanitize(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (IS_VALLEYVIEW(dev)) {
-               vlv_force_wake_reset(dev_priv);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               __gen6_gt_force_wake_reset(dev_priv);
-               if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-                       __gen6_gt_force_wake_mt_reset(dev_priv);
-       }
-
-       /* BIOS often leaves RC6 enabled, but disable it for hw init */
-       if (INTEL_INFO(dev)->gen >= 6)
-               intel_disable_gt_powersave(dev);
-}
-
-void intel_gt_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (IS_VALLEYVIEW(dev)) {
-               dev_priv->gt.force_wake_get = vlv_force_wake_get;
-               dev_priv->gt.force_wake_put = vlv_force_wake_put;
-       } else if (IS_HASWELL(dev)) {
-               dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
-               dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
-       } else if (IS_IVYBRIDGE(dev)) {
-               u32 ecobus;
-
-               /* IVB configs may use multi-threaded forcewake */
-
-               /* A small trick here - if the bios hasn't configured
-                * MT forcewake, and if the device is in RC6, then
-                * force_wake_mt_get will not wake the device and the
-                * ECOBUS read will return zero. Which will be
-                * (correctly) interpreted by the test below as MT
-                * forcewake being disabled.
-                */
-               mutex_lock(&dev->struct_mutex);
-               __gen6_gt_force_wake_mt_get(dev_priv);
-               ecobus = I915_READ_NOTRACE(ECOBUS);
-               __gen6_gt_force_wake_mt_put(dev_priv);
-               mutex_unlock(&dev->struct_mutex);
-
-               if (ecobus & FORCEWAKE_MT_ENABLE) {
-                       dev_priv->gt.force_wake_get =
-                                               __gen6_gt_force_wake_mt_get;
-                       dev_priv->gt.force_wake_put =
-                                               __gen6_gt_force_wake_mt_put;
-               } else {
-                       DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
-                       DRM_INFO("when using vblank-synced partial screen updates.\n");
-                       dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
-                       dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
-               }
-       } else if (IS_GEN6(dev)) {
-               dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
-               dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
-       }
-}
-
-void intel_pm_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
-                         intel_gen6_powersave_work);
-}
-
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
 {
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5666,3 +5656,11 @@ int vlv_freq_opcode(int ddr_freq, int val)
        return val;
 }
 
+void intel_pm_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+                         intel_gen6_powersave_work);
+}
+
index 664118d8c1d6426353ed97bb61b1113369a7678a..5dd5b384d9569cfbd442ed04b1fe8b1453624d81 100644 (file)
@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
-       I915_WRITE_START(ring, obj->gtt_offset);
+       I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
        I915_WRITE_CTL(ring,
                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
        if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
-                    I915_READ_START(ring) == obj->gtt_offset &&
+                    I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
                     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
@@ -501,11 +501,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
 
        i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-       ret = i915_gem_object_pin(obj, 4096, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
        if (ret)
                goto err_unref;
 
-       pc->gtt_offset = obj->gtt_offset;
+       pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
        pc->cpu_page = kmap(sg_page(obj->pages->sgl));
        if (pc->cpu_page == NULL) {
                ret = -ENOMEM;
@@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
-               dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
-               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-               POSTING_READ(GTIMR);
-       }
+       if (ring->irq_refcount++ == 0)
+               ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
@@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
-               dev_priv->gt_irq_mask |= ring->irq_enable_mask;
-               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-               POSTING_READ(GTIMR);
-       }
+       if (--ring->irq_refcount == 0)
+               ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
@@ -873,7 +867,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
@@ -891,7 +885,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->irq_mask |= ring->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
@@ -910,7 +904,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
@@ -928,7 +922,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->irq_mask |= ring->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
@@ -968,6 +962,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
 
        I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
        POSTING_READ(mmio);
+
+       /* Flush the TLB for this page */
+       if (INTEL_INFO(dev)->gen >= 6) {
+               u32 reg = RING_INSTPM(ring->mmio_base);
+               I915_WRITE(reg,
+                          _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+                                             INSTPM_SYNC_FLUSH));
+               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+                            1000))
+                       DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+                                 ring->name);
+       }
 }
 
 static int
@@ -1021,16 +1027,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
        gen6_gt_force_wake_get(dev_priv);
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~(ring->irq_enable_mask |
                                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
                else
                        I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
-               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-               POSTING_READ(GTIMR);
+               ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1045,15 +1049,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
                else
                        I915_WRITE_IMR(ring, ~0);
-               dev_priv->gt_irq_mask |= ring->irq_enable_mask;
-               I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-               POSTING_READ(GTIMR);
+               ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
        }
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -1070,14 +1072,12 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
        if (!dev->irq_enabled)
                return false;
 
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       if (ring->irq_refcount.pm++ == 0) {
-               u32 pm_imr = I915_READ(GEN6_PMIMR);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (ring->irq_refcount++ == 0) {
                I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-               I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
-               POSTING_READ(GEN6_PMIMR);
+               snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
 }
@@ -1092,14 +1092,12 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
        if (!dev->irq_enabled)
                return;
 
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       if (--ring->irq_refcount.pm == 0) {
-               u32 pm_imr = I915_READ(GEN6_PMIMR);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (--ring->irq_refcount == 0) {
                I915_WRITE_IMR(ring, ~0);
-               I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
-               POSTING_READ(GEN6_PMIMR);
+               snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static int
@@ -1144,7 +1142,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
                intel_ring_advance(ring);
        } else {
                struct drm_i915_gem_object *obj = ring->private;
-               u32 cs_offset = obj->gtt_offset;
+               u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
 
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
@@ -1224,12 +1222,12 @@ static int init_status_page(struct intel_ring_buffer *ring)
 
        i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-       ret = i915_gem_object_pin(obj, 4096, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
        if (ret != 0) {
                goto err_unref;
        }
 
-       ring->status_page.gfx_addr = obj->gtt_offset;
+       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
        ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
        if (ring->status_page.page_addr == NULL) {
                ret = -ENOMEM;
@@ -1294,9 +1292,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                        return ret;
        }
 
-       obj = NULL;
-       if (!HAS_LLC(dev))
-               obj = i915_gem_object_create_stolen(dev, ring->size);
+       obj = i915_gem_object_create_stolen(dev, ring->size);
        if (obj == NULL)
                obj = i915_gem_alloc_object(dev, ring->size);
        if (obj == NULL) {
@@ -1307,7 +1303,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
        ring->obj = obj;
 
-       ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
+       ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
        if (ret)
                goto err_unref;
 
@@ -1316,7 +1312,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                goto err_unpin;
 
        ring->virtual_start =
-               ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                           ring->size);
        if (ring->virtual_start == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1594,6 +1590,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
        if (INTEL_INFO(ring->dev)->gen >= 6) {
                I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
                I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+               if (HAS_VEBOX(ring->dev))
+                       I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
        }
 
        ring->set_seqno(ring, seqno);
@@ -1828,7 +1826,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                        return -ENOMEM;
                }
 
-               ret = i915_gem_object_pin(obj, 0, true, false);
+               ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
                if (ret != 0) {
                        drm_gem_object_unreference(&obj->base);
                        DRM_ERROR("Failed to ping batch bo\n");
@@ -2008,8 +2006,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
        ring->add_request = gen6_add_request;
        ring->get_seqno = gen6_ring_get_seqno;
        ring->set_seqno = ring_set_seqno;
-       ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
-               PM_VEBOX_CS_ERROR_INTERRUPT;
+       ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
        ring->irq_get = hsw_vebox_get_irq;
        ring->irq_put = hsw_vebox_put_irq;
        ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
index 799f04c9da45a0bc30b19dd841547f75a6d4958e..432ad5311ba62693633f79d6c5d59429704ac1cb 100644 (file)
@@ -33,11 +33,12 @@ struct  intel_hw_status_page {
 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
-#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
-#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
-#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
-
-enum intel_ring_hangcheck_action { wait, active, kick, hung };
+enum intel_ring_hangcheck_action {
+       HANGCHECK_WAIT,
+       HANGCHECK_ACTIVE,
+       HANGCHECK_KICK,
+       HANGCHECK_HUNG,
+};
 
 struct intel_ring_hangcheck {
        bool deadlock;
@@ -78,10 +79,7 @@ struct  intel_ring_buffer {
         */
        u32             last_retired_head;
 
-       struct {
-               u32     gt; /*  protected by dev_priv->irq_lock */
-               u32     pm; /*  protected by dev_priv->rps.lock (sucks) */
-       } irq_refcount;
+       unsigned irq_refcount; /* protected by dev_priv->irq_lock */
        u32             irq_enable_mask;        /* bitmask to enable ring interrupt */
        u32             trace_irq_seqno;
        u32             sync_seqno[I915_NUM_RINGS-1];
index 2628d56224499307cffa4e409a646122a9578db0..317e058fb3cf1205563c3a57079ae99e4bc0d3dc 100644 (file)
@@ -202,15 +202,14 @@ struct intel_sdvo_connector {
        u32     cur_dot_crawl,  max_dot_crawl;
 };
 
-static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
 {
-       return container_of(encoder, struct intel_sdvo, base.base);
+       return container_of(encoder, struct intel_sdvo, base);
 }
 
 static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
 {
-       return container_of(intel_attached_encoder(connector),
-                           struct intel_sdvo, base);
+       return to_sdvo(intel_attached_encoder(connector));
 }
 
 static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -539,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
                                  &status))
                goto log_fail;
 
-       while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+       while ((status == SDVO_CMD_STATUS_PENDING ||
+                       status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
                if (retry < 10)
                        msleep(15);
                else
@@ -964,30 +964,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
 static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
                                         const struct drm_display_mode *adjusted_mode)
 {
-       struct dip_infoframe avi_if = {
-               .type = DIP_TYPE_AVI,
-               .ver = DIP_VERSION_AVI,
-               .len = DIP_LEN_AVI,
-       };
-       uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
-       struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
+       uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+       struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       union hdmi_infoframe frame;
+       int ret;
+       ssize_t len;
+
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+                                                      adjusted_mode);
+       if (ret < 0) {
+               DRM_ERROR("couldn't fill AVI infoframe\n");
+               return false;
+       }
 
        if (intel_sdvo->rgb_quant_range_selectable) {
                if (intel_crtc->config.limited_color_range)
-                       avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+                       frame.avi.quantization_range =
+                               HDMI_QUANTIZATION_RANGE_LIMITED;
                else
-                       avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+                       frame.avi.quantization_range =
+                               HDMI_QUANTIZATION_RANGE_FULL;
        }
 
-       avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
-
-       intel_dip_infoframe_csum(&avi_if);
-
-       /* sdvo spec says that the ecc is handled by the hw, and it looks like
-        * we must not send the ecc field, either. */
-       memcpy(sdvo_data, &avi_if, 3);
-       sdvo_data[3] = avi_if.checksum;
-       memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+       len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
+       if (len < 0)
+               return false;
 
        return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
                                          SDVO_HBUF_TX_VSYNC,
@@ -1084,7 +1086,7 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
 static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
                                      struct intel_crtc_config *pipe_config)
 {
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+       struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
        struct drm_display_mode *mode = &pipe_config->requested_mode;
 
@@ -1154,7 +1156,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
        struct drm_display_mode *adjusted_mode =
                &intel_crtc->config.adjusted_mode;
        struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
+       struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
        u32 sdvox;
        struct intel_sdvo_in_out_map in_out;
        struct intel_sdvo_dtd input_dtd, output_dtd;
@@ -1292,7 +1294,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+       struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        u16 active_outputs = 0;
        u32 tmp;
 
@@ -1315,7 +1317,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+       struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_sdvo_dtd dtd;
        int encoder_pixel_multiplier = 0;
        u32 flags = 0, sdvox;
@@ -1357,22 +1359,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
        }
 
        /* Cross check the port pixel multiplier with the sdvo encoder state. */
-       intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
-       switch (val) {
-       case SDVO_CLOCK_RATE_MULT_1X:
-               encoder_pixel_multiplier = 1;
-               break;
-       case SDVO_CLOCK_RATE_MULT_2X:
-               encoder_pixel_multiplier = 2;
-               break;
-       case SDVO_CLOCK_RATE_MULT_4X:
-               encoder_pixel_multiplier = 4;
-               break;
+       if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
+                                &val, 1)) {
+               switch (val) {
+               case SDVO_CLOCK_RATE_MULT_1X:
+                       encoder_pixel_multiplier = 1;
+                       break;
+               case SDVO_CLOCK_RATE_MULT_2X:
+                       encoder_pixel_multiplier = 2;
+                       break;
+               case SDVO_CLOCK_RATE_MULT_4X:
+                       encoder_pixel_multiplier = 4;
+                       break;
+               }
        }
 
-       if(HAS_PCH_SPLIT(dev))
-               return; /* no pixel multiplier readout support yet */
-
        WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
             "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
             pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1381,7 +1382,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 static void intel_disable_sdvo(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+       struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        u32 temp;
 
        intel_sdvo_set_active_outputs(intel_sdvo, 0);
@@ -1423,7 +1424,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+       struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        u32 temp;
        bool input1, input2;
@@ -1584,7 +1585,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
 
 static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
 {
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+       struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
 
        intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
                        &intel_sdvo->hotplug_active, 2);
@@ -1697,6 +1698,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        enum drm_connector_status ret;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        if (!intel_sdvo_get_value(intel_sdvo,
                                  SDVO_CMD_GET_ATTACHED_DISPLAYS,
                                  &response, 2))
@@ -2188,7 +2192,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
 
 static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
 {
-       struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+       struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
 
        if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
                drm_mode_destroy(encoder->dev,
index 1fa5612a4572cc0c3b92a3d6a253bc9f1da573ed..78b621cdd108f13ae061c27a1499df71c84da5bd 100644 (file)
@@ -38,7 +38,8 @@
 #include "i915_drv.h"
 
 static void
-vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
+vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
+                struct drm_framebuffer *fb,
                 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
                 unsigned int crtc_w, unsigned int crtc_h,
                 uint32_t x, uint32_t y,
@@ -108,14 +109,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
 
        sprctl |= SP_ENABLE;
 
+       intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+                                      src_w != crtc_w || src_h != crtc_h);
+
        /* Sizes are 0 based */
        src_w--;
        src_h--;
        crtc_w--;
        crtc_h--;
 
-       intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
        I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
        I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
 
@@ -133,13 +135,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
 
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
-       I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+       I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
                             sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane));
 }
 
 static void
-vlv_disable_plane(struct drm_plane *dplane)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = dplane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -152,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane)
        /* Activate double buffered register update */
        I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
        POSTING_READ(SPSURF(pipe, plane));
+
+       intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
 }
 
 static int
@@ -206,7 +210,8 @@ vlv_get_colorkey(struct drm_plane *dplane,
 }
 
 static void
-ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                struct drm_framebuffer *fb,
                 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
                 unsigned int crtc_w, unsigned int crtc_h,
                 uint32_t x, uint32_t y,
@@ -262,14 +267,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        if (IS_HASWELL(dev))
                sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
+       intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+                                      src_w != crtc_w || src_h != crtc_h);
+
        /* Sizes are 0 based */
        src_w--;
        src_h--;
        crtc_w--;
        crtc_h--;
 
-       intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
        /*
         * IVB workaround: must disable low power watermarks for at least
         * one frame before enabling scaling.  LP watermarks can be re-enabled
@@ -308,7 +314,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        if (intel_plane->can_scale)
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
-       I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+       I915_MODIFY_DISPBASE(SPRSURF(pipe),
+                            i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 
        /* potentially re-enable LP watermarks */
@@ -317,7 +324,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 }
 
 static void
-ivb_disable_plane(struct drm_plane *plane)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -335,7 +342,7 @@ ivb_disable_plane(struct drm_plane *plane)
 
        dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
 
-       intel_update_sprite_watermarks(dev, pipe, 0, 0, false);
+       intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
 
        /* potentially re-enable LP watermarks */
        if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
@@ -397,7 +404,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
 }
 
 static void
-ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+                struct drm_framebuffer *fb,
                 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
                 unsigned int crtc_w, unsigned int crtc_h,
                 uint32_t x, uint32_t y,
@@ -449,14 +457,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
                dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
        dvscntr |= DVS_ENABLE;
 
+       intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+                                      src_w != crtc_w || src_h != crtc_h);
+
        /* Sizes are 0 based */
        src_w--;
        src_h--;
        crtc_w--;
        crtc_h--;
 
-       intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
        dvsscale = 0;
        if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
                dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -478,12 +487,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
-       I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+       I915_MODIFY_DISPBASE(DVSSURF(pipe),
+                            i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
 static void
-ilk_disable_plane(struct drm_plane *plane)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,6 +506,8 @@ ilk_disable_plane(struct drm_plane *plane)
        /* Flush double buffered register updates */
        I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
        POSTING_READ(DVSSURF(pipe));
+
+       intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
 }
 
 static void
@@ -818,11 +830,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                intel_enable_primary(crtc);
 
        if (visible)
-               intel_plane->update_plane(plane, fb, obj,
+               intel_plane->update_plane(plane, crtc, fb, obj,
                                          crtc_x, crtc_y, crtc_w, crtc_h,
                                          src_x, src_y, src_w, src_h);
        else
-               intel_plane->disable_plane(plane);
+               intel_plane->disable_plane(plane, crtc);
 
        if (disable_primary)
                intel_disable_primary(crtc);
@@ -855,9 +867,14 @@ intel_disable_plane(struct drm_plane *plane)
        struct intel_plane *intel_plane = to_intel_plane(plane);
        int ret = 0;
 
-       if (plane->crtc)
-               intel_enable_primary(plane->crtc);
-       intel_plane->disable_plane(plane);
+       if (!plane->fb)
+               return 0;
+
+       if (WARN_ON(!plane->crtc))
+               return -EINVAL;
+
+       intel_enable_primary(plane->crtc);
+       intel_plane->disable_plane(plane, plane->crtc);
 
        if (!intel_plane->obj)
                goto out;
index 39debd80d190241836e89b8c6794bdb713e80653..f2c6d7909ae2d66ffa73ebe77d7c3b06d46403f7 100644 (file)
@@ -823,16 +823,14 @@ static const struct tv_mode tv_modes[] = {
        },
 };
 
-static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
 {
-       return container_of(encoder, struct intel_tv, base.base);
+       return container_of(encoder, struct intel_tv, base);
 }
 
 static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
 {
-       return container_of(intel_attached_encoder(connector),
-                           struct intel_tv,
-                           base);
+       return enc_to_tv(intel_attached_encoder(connector));
 }
 
 static bool
@@ -908,7 +906,7 @@ static bool
 intel_tv_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_config *pipe_config)
 {
-       struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
+       struct intel_tv *intel_tv = enc_to_tv(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 
        if (!tv_mode)
@@ -921,15 +919,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
        return true;
 }
 
-static void
-intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-                 struct drm_display_mode *adjusted_mode)
+static void intel_tv_mode_set(struct intel_encoder *encoder)
 {
-       struct drm_device *dev = encoder->dev;
+       struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = encoder->crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_tv *intel_tv = enc_to_tv(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
        u32 tv_ctl;
        u32 hctl1, hctl2, hctl3;
@@ -1305,6 +1300,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
        struct intel_tv *intel_tv = intel_attached_tv(connector);
        int type;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+                     connector->base.id, drm_get_connector_name(connector),
+                     force);
+
        mode = reported_modes[0];
 
        if (force) {
@@ -1483,10 +1482,6 @@ out:
        return ret;
 }
 
-static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
-       .mode_set = intel_tv_mode_set,
-};
-
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
        .dpms = intel_connector_dpms,
        .detect = intel_tv_detect,
@@ -1619,6 +1614,7 @@ intel_tv_init(struct drm_device *dev)
                         DRM_MODE_ENCODER_TVDAC);
 
        intel_encoder->compute_config = intel_tv_compute_config;
+       intel_encoder->mode_set = intel_tv_mode_set;
        intel_encoder->enable = intel_enable_tv;
        intel_encoder->disable = intel_disable_tv;
        intel_encoder->get_hw_state = intel_tv_get_hw_state;
@@ -1640,7 +1636,6 @@ intel_tv_init(struct drm_device *dev)
 
        intel_tv->tv_format = tv_modes[initial_mode].name;
 
-       drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
        drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
        connector->interlace_allowed = false;
        connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644 (file)
index 0000000..8f5bc86
--- /dev/null
@@ -0,0 +1,595 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
+#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
+#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
+#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+       u32 gt_thread_status_mask;
+
+       if (IS_HASWELL(dev_priv->dev))
+               gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+       else
+               gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+       /* w/a for a sporadic read returning 0 by waiting for the GT
+        * thread to wake up.
+        */
+       if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+               DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+       __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+       /* something from same cacheline, but !FORCEWAKE */
+       __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
+                           FORCEWAKE_ACK_TIMEOUT_MS))
+               DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+       __raw_i915_write32(dev_priv, FORCEWAKE, 1);
+       /* something from same cacheline, but !FORCEWAKE */
+       __raw_posting_read(dev_priv, ECOBUS);
+
+       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
+                           FORCEWAKE_ACK_TIMEOUT_MS))
+               DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+       /* WaRsForcewakeWaitTC0:snb */
+       __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+       __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+       u32 forcewake_ack;
+
+       if (IS_HASWELL(dev_priv->dev))
+               forcewake_ack = FORCEWAKE_ACK_HSW;
+       else
+               forcewake_ack = FORCEWAKE_MT_ACK;
+
+       if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
+                           FORCEWAKE_ACK_TIMEOUT_MS))
+               DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+       __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       __raw_posting_read(dev_priv, ECOBUS);
+
+       if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
+                           FORCEWAKE_ACK_TIMEOUT_MS))
+               DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+       /* WaRsForcewakeWaitTC0:ivb,hsw */
+       __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+       u32 gtfifodbg;
+
+       gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+       if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+            "MMIO read or write has been dropped %x\n", gtfifodbg))
+               __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+       __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+       /* something from same cacheline, but !FORCEWAKE */
+       __raw_posting_read(dev_priv, ECOBUS);
+       gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+       __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       __raw_posting_read(dev_priv, ECOBUS);
+       gen6_gt_check_fifodbg(dev_priv);
+}
+
+static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+       int ret = 0;
+
+       if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+               int loop = 500;
+               u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+               while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+                       udelay(10);
+                       fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+               }
+               if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+                       ++ret;
+               dev_priv->uncore.fifo_count = fifo;
+       }
+       dev_priv->uncore.fifo_count--;
+
+       return ret;
+}
+
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+       __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+                          _MASKED_BIT_DISABLE(0xffff));
+       /* something from same cacheline, but !FORCEWAKE_VLV */
+       __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
+                           FORCEWAKE_ACK_TIMEOUT_MS))
+               DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+       __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                          _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
+                           FORCEWAKE_ACK_TIMEOUT_MS))
+               DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+
+       if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
+                            FORCEWAKE_KERNEL),
+                           FORCEWAKE_ACK_TIMEOUT_MS))
+               DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+
+       /* WaRsForcewakeWaitTC0:vlv */
+       __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+       __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+       __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+                          _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+       /* The below doubles as a POSTING_READ */
+       gen6_gt_check_fifodbg(dev_priv);
+}
+
+void intel_uncore_early_sanitize(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (HAS_FPGA_DBG_UNCLAIMED(dev))
+               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+void intel_uncore_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_VALLEYVIEW(dev)) {
+               dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
+               dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+       } else if (IS_HASWELL(dev)) {
+               dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
+               dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+       } else if (IS_IVYBRIDGE(dev)) {
+               u32 ecobus;
+
+               /* IVB configs may use multi-threaded forcewake */
+
+               /* A small trick here - if the bios hasn't configured
+                * MT forcewake, and if the device is in RC6, then
+                * force_wake_mt_get will not wake the device and the
+                * ECOBUS read will return zero. Which will be
+                * (correctly) interpreted by the test below as MT
+                * forcewake being disabled.
+                */
+               mutex_lock(&dev->struct_mutex);
+               __gen6_gt_force_wake_mt_get(dev_priv);
+               ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+               __gen6_gt_force_wake_mt_put(dev_priv);
+               mutex_unlock(&dev->struct_mutex);
+
+               if (ecobus & FORCEWAKE_MT_ENABLE) {
+                       dev_priv->uncore.funcs.force_wake_get =
+                               __gen6_gt_force_wake_mt_get;
+                       dev_priv->uncore.funcs.force_wake_put =
+                               __gen6_gt_force_wake_mt_put;
+               } else {
+                       DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+                       DRM_INFO("when using vblank-synced partial screen updates.\n");
+                       dev_priv->uncore.funcs.force_wake_get =
+                               __gen6_gt_force_wake_get;
+                       dev_priv->uncore.funcs.force_wake_put =
+                               __gen6_gt_force_wake_put;
+               }
+       } else if (IS_GEN6(dev)) {
+               dev_priv->uncore.funcs.force_wake_get =
+                       __gen6_gt_force_wake_get;
+               dev_priv->uncore.funcs.force_wake_put =
+                       __gen6_gt_force_wake_put;
+       }
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_VALLEYVIEW(dev)) {
+               vlv_force_wake_reset(dev_priv);
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               __gen6_gt_force_wake_reset(dev_priv);
+               if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+                       __gen6_gt_force_wake_mt_reset(dev_priv);
+       }
+
+       /* BIOS often leaves RC6 enabled, but disable it for hw init */
+       intel_disable_gt_powersave(dev);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       if (dev_priv->uncore.forcewake_count++ == 0)
+               dev_priv->uncore.funcs.force_wake_get(dev_priv);
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       if (--dev_priv->uncore.forcewake_count == 0)
+               dev_priv->uncore.funcs.force_wake_put(dev_priv);
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+       ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE))
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+       /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+        * the chip from rc6 before touching it for real. MI_MODE is masked,
+        * hence harmless to write 0 into. */
+       __raw_i915_write32(dev_priv, MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+       if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+           (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+               DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+                         reg);
+               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       }
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+       if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+           (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+               DRM_ERROR("Unclaimed write to %x\n", reg);
+               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       }
+}
+
+#define __i915_read(x) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
+       unsigned long irqflags; \
+       u##x val = 0; \
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+       if (dev_priv->info->gen == 5) \
+               ilk_dummy_write(dev_priv); \
+       if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+               if (dev_priv->uncore.forcewake_count == 0) \
+                       dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+               val = __raw_i915_read##x(dev_priv, reg); \
+               if (dev_priv->uncore.forcewake_count == 0) \
+                       dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+       } else { \
+               val = __raw_i915_read##x(dev_priv, reg); \
+       } \
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+       trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+       return val; \
+}
+
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
+#undef __i915_read
+
+#define __i915_write(x) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
+       unsigned long irqflags; \
+       u32 __fifo_ret = 0; \
+       trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+       if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+               __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+       } \
+       if (dev_priv->info->gen == 5) \
+               ilk_dummy_write(dev_priv); \
+       hsw_unclaimed_reg_clear(dev_priv, reg); \
+       __raw_i915_write##x(dev_priv, reg, val); \
+       if (unlikely(__fifo_ret)) { \
+               gen6_gt_check_fifodbg(dev_priv); \
+       } \
+       hsw_unclaimed_reg_check(dev_priv, reg); \
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
+#undef __i915_write
+
+static const struct register_whitelist {
+       uint64_t offset;
+       uint32_t size;
+       uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+       { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_reg_read *reg = data;
+       struct register_whitelist const *entry = whitelist;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+               if (entry->offset == reg->offset &&
+                   (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(whitelist))
+               return -EINVAL;
+
+       switch (entry->size) {
+       case 8:
+               reg->val = I915_READ64(reg->offset);
+               break;
+       case 4:
+               reg->val = I915_READ(reg->offset);
+               break;
+       case 2:
+               reg->val = I915_READ16(reg->offset);
+               break;
+       case 1:
+               reg->val = I915_READ8(reg->offset);
+               break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_I85X(dev))
+               return -ENODEV;
+
+       I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+       POSTING_READ(D_STATE);
+
+       if (IS_I830(dev) || IS_845G(dev)) {
+               I915_WRITE(DEBUG_RESET_I830,
+                          DEBUG_RESET_DISPLAY |
+                          DEBUG_RESET_RENDER |
+                          DEBUG_RESET_FULL);
+               POSTING_READ(DEBUG_RESET_I830);
+               msleep(1);
+
+               I915_WRITE(DEBUG_RESET_I830, 0);
+               POSTING_READ(DEBUG_RESET_I830);
+       }
+
+       msleep(1);
+
+       I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+       POSTING_READ(D_STATE);
+
+       return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+       u8 gdrst;
+       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+       return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int i965_do_reset(struct drm_device *dev)
+{
+       int ret;
+
+       /*
+        * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+        * well as the reset bit (GR/bit 0).  Setting the GR bit
+        * triggers the reset; when done, the hardware will clear it.
+        */
+       pci_write_config_byte(dev->pdev, I965_GDRST,
+                             GRDOM_RENDER | GRDOM_RESET_ENABLE);
+       ret =  wait_for(i965_reset_complete(dev), 500);
+       if (ret)
+               return ret;
+
+       /* We can't reset render&media without also resetting display ... */
+       pci_write_config_byte(dev->pdev, I965_GDRST,
+                             GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+
+       ret =  wait_for(i965_reset_complete(dev), 500);
+       if (ret)
+               return ret;
+
+       pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+       return 0;
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 gdrst;
+       int ret;
+
+       gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+       gdrst &= ~GRDOM_MASK;
+       I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+                  gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+       ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+       if (ret)
+               return ret;
+
+       /* We can't reset render&media without also resetting display ... */
+       gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+       gdrst &= ~GRDOM_MASK;
+       I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+                  gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+       return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+}
+
+static int gen6_do_reset(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int     ret;
+       unsigned long irqflags;
+
+       /* Hold uncore.lock across reset to prevent any register access
+        * with forcewake not set correctly
+        */
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+       /* Reset the chip */
+
+       /* GEN6_GDRST is not in the gt power well, no need to check
+        * for fifo space for the write or forcewake the chip for
+        * the read
+        */
+       __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+
+       /* Spin waiting for the device to ack the reset request */
+       ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+       /* If reset with a user forcewake, try to restore, otherwise turn it off */
+       if (dev_priv->uncore.forcewake_count)
+               dev_priv->uncore.funcs.force_wake_get(dev_priv);
+       else
+               dev_priv->uncore.funcs.force_wake_put(dev_priv);
+
+       /* Restore fifo count */
+       dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       return ret;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+       switch (INTEL_INFO(dev)->gen) {
+       case 7:
+       case 6: return gen6_do_reset(dev);
+       case 5: return ironlake_do_reset(dev);
+       case 4: return i965_do_reset(dev);
+       case 2: return i8xx_do_reset(dev);
+       default: return -ENODEV;
+       }
+}
+
+void intel_uncore_clear_errors(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* XXX needs spinlock around caller's grouping */
+       if (HAS_FPGA_DBG_UNCLAIMED(dev))
+               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+void intel_uncore_check_errors(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
+           (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+               DRM_ERROR("Unclaimed register before interrupt\n");
+               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       }
+}
index 17d0a637e4fbffdd1f546c89b2a82d6235e21b61..6b1a87c8aac52f6f103385a7302713e719a3a1b3 100644 (file)
@@ -50,7 +50,6 @@ static const struct file_operations mga_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = mga_compat_ioctl,
 #endif
@@ -59,7 +58,7 @@ static const struct file_operations mga_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
+           DRIVER_USE_AGP | DRIVER_PCI_DMA |
            DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
        .dev_priv_size = sizeof(drm_mga_buf_priv_t),
        .load = mga_driver_load,
index 54558a01969ae02d6875c99fafb605f27a717ce0..ca4bc54ea2146303f4cedf7bdd879b3c3bc812fd 100644 (file)
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
        unsigned int agp_size;
 } drm_mga_private_t;
 
-extern struct drm_ioctl_desc mga_ioctls[];
+extern const struct drm_ioctl_desc mga_ioctls[];
 extern int mga_max_ioctl;
 
                                /* mga_dma.c */
index 9c145143ad0f7a451852a0238d46873398e61083..37cc2fb4eadd9a033316f5951eddc81f8fb072e0 100644 (file)
@@ -1083,7 +1083,7 @@ file_priv)
        return 0;
 }
 
-struct drm_ioctl_desc mga_ioctls[] = {
+const struct drm_ioctl_desc mga_ioctls[] = {
        DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
index 122b571ccc7ce7a940f267ab002821952a535c08..fcce7b2f80110d2c9a2907b02210edc141f1aab9 100644 (file)
@@ -81,7 +81,6 @@ static const struct file_operations mgag200_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = mgag200_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -89,7 +88,7 @@ static const struct file_operations mgag200_driver_fops = {
 };
 
 static struct drm_driver driver = {
-       .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+       .driver_features = DRIVER_GEM | DRIVER_MODESET,
        .load = mgag200_driver_load,
        .unload = mgag200_driver_unload,
        .fops = &mgag200_driver_fops,
@@ -104,7 +103,7 @@ static struct drm_driver driver = {
        .gem_free_object = mgag200_gem_free_object,
        .dumb_create = mgag200_dumb_create,
        .dumb_map_offset = mgag200_dumb_mmap_offset,
-       .dumb_destroy = mgag200_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 };
 
 static struct pci_driver mgag200_pci_driver = {
index 12e2499d935266150643a2bbb624a149e6fb59d0..baaae19332e2e9d98d3cda327d44b8f7c5d5ecb4 100644 (file)
@@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
 int mgag200_dumb_create(struct drm_file *file,
                        struct drm_device *dev,
                        struct drm_mode_create_dumb *args);
-int mgag200_dumb_destroy(struct drm_file *file,
-                        struct drm_device *dev,
-                        uint32_t handle);
 void mgag200_gem_free_object(struct drm_gem_object *obj);
 int
 mgag200_dumb_mmap_offset(struct drm_file *file,
index 9fa5685baee0cff1a57d13b4132f19fdf4359a3d..0f8b861b10b3e6749704ea6a82cd57457602bfc6 100644 (file)
@@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int mgag200_dumb_destroy(struct drm_file *file,
-                    struct drm_device *dev,
-                    uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 int mgag200_gem_init_object(struct drm_gem_object *obj)
 {
        BUG();
@@ -349,7 +342,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)
 
 static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
 {
-       return bo->bo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->bo.vma_node);
 }
 
 int
index d70e4a92773bfda96079b1406ffffa9a2f65a0e7..ffd7ca6295566bdd8230a7a21a547489f0ef5bc3 100644 (file)
@@ -321,7 +321,6 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
                return ret;
        }
 
-       mgabo->gem.driver_private = NULL;
        mgabo->bo.bdev = &mdev->ttm.bdev;
        mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
 
index 31cc8fe8e7f07ab26cc6ce90ee902a5a2887124a..054d9cff4f533596e0eb52ef4ad5497ce727ca82 100644 (file)
@@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay)
        if (ret)
                return ret;
 
-       DBG("status %*ph\n", 6, dp->stat);
+       DBG("status %6ph\n", dp->stat);
        return 0;
 }
 
index 907d20ef6d4d119f81c06fe1801edf3867f01673..78637afb9b94c8592a2ef62dc3df7baf7d297113 100644 (file)
@@ -673,13 +673,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
        return ret;
 }
 
-int
-nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
-                            uint32_t handle)
-{
-       return drm_gem_handle_delete(file_priv, handle);
-}
-
 int
 nouveau_display_dumb_map_offset(struct drm_file *file_priv,
                                struct drm_device *dev,
@@ -690,7 +683,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
        gem = drm_gem_object_lookup(dev, file_priv, handle);
        if (gem) {
                struct nouveau_bo *bo = gem->driver_private;
-               *poffset = bo->bo.addr_space_offset;
+               *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
                drm_gem_object_unreference_unlocked(gem);
                return 0;
        }
index 1ea3e4734b621f6e103a034f69696ea9b35529df..185e74132a6d9ce2f5679c084ef237663fbdf943 100644 (file)
@@ -68,8 +68,6 @@ int  nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
                                 struct drm_mode_create_dumb *args);
 int  nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
                                     u32 handle, u64 *offset);
-int  nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
-                                 u32 handle);
 
 void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
 
index 61972668fd0532f9f29e6ea540ab925713d5cf7d..b29d04b822ae8b587009113bc3888810a3a51064 100644 (file)
@@ -649,7 +649,7 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
        nouveau_cli_destroy(cli);
 }
 
-static struct drm_ioctl_desc
+static const struct drm_ioctl_desc
 nouveau_ioctls[] = {
        DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
        DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -673,7 +673,6 @@ nouveau_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = nouveau_ttm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
        .read = drm_read,
 #if defined(CONFIG_COMPAT)
        .compat_ioctl = nouveau_compat_ioctl,
@@ -684,7 +683,7 @@ nouveau_driver_fops = {
 static struct drm_driver
 driver = {
        .driver_features =
-               DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+               DRIVER_USE_AGP |
                DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
 
        .load = nouveau_drm_load,
@@ -704,6 +703,7 @@ driver = {
        .disable_vblank = nouveau_drm_vblank_disable,
 
        .ioctls = nouveau_ioctls,
+       .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
        .fops = &nouveau_driver_fops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -724,7 +724,7 @@ driver = {
 
        .dumb_create = nouveau_display_dumb_create,
        .dumb_map_offset = nouveau_display_dumb_map_offset,
-       .dumb_destroy = nouveau_display_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
@@ -774,8 +774,6 @@ nouveau_drm_pci_driver = {
 static int __init
 nouveau_drm_init(void)
 {
-       driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
-
        if (nouveau_modeset == -1) {
 #ifdef CONFIG_VGA_CONSOLE
                if (vgacon_text_force())
index 830cb7bad922bba909a6c5129c16a786cb9ee969..487242fb3fdc53f28cac33fc3db7ccfeefab1604 100644 (file)
@@ -220,7 +220,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
        }
 
        rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
-       rep->map_handle = nvbo->bo.addr_space_offset;
+       rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
        rep->tile_mode = nvbo->tile_mode;
        rep->tile_flags = nvbo->tile_flags;
        return 0;
index d85e058f2845a014c0216cabeda52d70cc7bf0b6..778372b062ad68b65e698a5d92eba9cc1b49b442 100644 (file)
@@ -18,7 +18,4 @@ omapdrm-y := omap_drv.o \
        omap_dmm_tiler.o \
        tcm-sita.o
 
-# temporary:
-omapdrm-y += omap_gem_helpers.o
-
 obj-$(CONFIG_DRM_OMAP) += omapdrm.o
index a3004f12b9a3657173d9aa6e56af3f1c6330bc14..2603d909f49ce032f1933a5d8d17131de132c0d9 100644 (file)
@@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
        return ret;
 }
 
-static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
        DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
        DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -524,12 +524,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 }
 
-static int dev_firstopen(struct drm_device *dev)
-{
-       DBG("firstopen: dev=%p", dev);
-       return 0;
-}
-
 /**
  * lastclose - clean up after all DRM clients have exited
  * @dev: DRM device
@@ -598,7 +592,6 @@ static const struct file_operations omapdriver_fops = {
                .release = drm_release,
                .mmap = omap_gem_mmap,
                .poll = drm_poll,
-               .fasync = drm_fasync,
                .read = drm_read,
                .llseek = noop_llseek,
 };
@@ -609,7 +602,6 @@ static struct drm_driver omap_drm_driver = {
                .load = dev_load,
                .unload = dev_unload,
                .open = dev_open,
-               .firstopen = dev_firstopen,
                .lastclose = dev_lastclose,
                .preclose = dev_preclose,
                .postclose = dev_postclose,
@@ -633,7 +625,7 @@ static struct drm_driver omap_drm_driver = {
                .gem_vm_ops = &omap_gem_vm_ops,
                .dumb_create = omap_gem_dumb_create,
                .dumb_map_offset = omap_gem_dumb_map_offset,
-               .dumb_destroy = omap_gem_dumb_destroy,
+               .dumb_destroy = drm_gem_dumb_destroy,
                .ioctls = ioctls,
                .num_ioctls = DRM_OMAP_NUM_IOCTLS,
                .fops = &omapdriver_fops,
index 14f17da2ce25552a973c7d8d5cebe1ed1c8fa037..30b95b736658b0c9154ca54c289d99e19361c16c 100644 (file)
@@ -203,9 +203,8 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
 struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
                struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
 struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
-int omap_framebuffer_replace(struct drm_framebuffer *a,
-               struct drm_framebuffer *b, void *arg,
-               void (*unpin)(void *arg, struct drm_gem_object *bo));
+int omap_framebuffer_pin(struct drm_framebuffer *fb);
+int omap_framebuffer_unpin(struct drm_framebuffer *fb);
 void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                struct omap_drm_window *win, struct omap_overlay_info *info);
 struct drm_connector *omap_framebuffer_get_next_connector(
@@ -225,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
 void *omap_gem_vaddr(struct drm_gem_object *obj);
 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
                uint32_t handle, uint64_t *offset);
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-               uint32_t handle);
 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
index 8031402e79516ac984ae24c4ca6ec6e74e2dc663..f2b8f0668c0c1701887e8b6c4a8ccb3b3bf1952b 100644 (file)
@@ -237,55 +237,49 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
        }
 }
 
-/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL).  Although
- * buffers to unpin are just pushed to the unpin fifo so that the
- * caller can defer unpin until vblank.
- *
- * Note if this fails (ie. something went very wrong!), all buffers are
- * unpinned, and the caller disables the overlay.  We could have tried
- * to revert back to the previous set of pinned buffers but if things are
- * hosed there is no guarantee that would succeed.
- */
-int omap_framebuffer_replace(struct drm_framebuffer *a,
-               struct drm_framebuffer *b, void *arg,
-               void (*unpin)(void *arg, struct drm_gem_object *bo))
+/* pin, prepare for scanout: */
+int omap_framebuffer_pin(struct drm_framebuffer *fb)
 {
-       int ret = 0, i, na, nb;
-       struct omap_framebuffer *ofba = to_omap_framebuffer(a);
-       struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
-       uint32_t pinned_mask = 0;
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       int ret, i, n = drm_format_num_planes(fb->pixel_format);
 
-       na = a ? drm_format_num_planes(a->pixel_format) : 0;
-       nb = b ? drm_format_num_planes(b->pixel_format) : 0;
+       for (i = 0; i < n; i++) {
+               struct plane *plane = &omap_fb->planes[i];
+               ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
+               if (ret)
+                       goto fail;
+               omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
+       }
 
-       for (i = 0; i < max(na, nb); i++) {
-               struct plane *pa, *pb;
+       return 0;
 
-               pa = (i < na) ? &ofba->planes[i] : NULL;
-               pb = (i < nb) ? &ofbb->planes[i] : NULL;
+fail:
+       for (i--; i >= 0; i--) {
+               struct plane *plane = &omap_fb->planes[i];
+               omap_gem_put_paddr(plane->bo);
+               plane->paddr = 0;
+       }
 
-               if (pa)
-                       unpin(arg, pa->bo);
+       return ret;
+}
 
-               if (pb && !ret) {
-                       ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
-                       if (!ret) {
-                               omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE);
-                               pinned_mask |= (1 << i);
-                       }
-               }
-       }
+/* unpin, no longer being scanned out: */
+int omap_framebuffer_unpin(struct drm_framebuffer *fb)
+{
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       int ret, i, n = drm_format_num_planes(fb->pixel_format);
 
-       if (ret) {
-               /* something went wrong.. unpin what has been pinned */
-               for (i = 0; i < nb; i++) {
-                       if (pinned_mask & (1 << i)) {
-                               struct plane *pb = &ofba->planes[i];
-                               unpin(arg, pb->bo);
-                       }
-               }
+       for (i = 0; i < n; i++) {
+               struct plane *plane = &omap_fb->planes[i];
+               ret = omap_gem_put_paddr(plane->bo);
+               if (ret)
+                       goto fail;
+               plane->paddr = 0;
        }
 
+       return 0;
+
+fail:
        return ret;
 }
 
index ebbdf4132e9cb2175fabd6422ac17365eaa10d4a..533f6ebec531ff67a1c1f7a96c2504f852d665f0 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/shmem_fs.h>
+#include <drm/drm_vma_manager.h>
 
 #include "omap_drv.h"
 #include "omap_dmm_tiler.h"
@@ -236,7 +237,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
         * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
         * we actually want CMA memory for it all anyways..
         */
-       pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+       pages = drm_gem_get_pages(obj, GFP_KERNEL);
        if (IS_ERR(pages)) {
                dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
                return PTR_ERR(pages);
@@ -270,7 +271,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
        return 0;
 
 free_pages:
-       _drm_gem_put_pages(obj, pages, true, false);
+       drm_gem_put_pages(obj, pages, true, false);
 
        return ret;
 }
@@ -294,7 +295,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
        kfree(omap_obj->addrs);
        omap_obj->addrs = NULL;
 
-       _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+       drm_gem_put_pages(obj, omap_obj->pages, true, false);
        omap_obj->pages = NULL;
 }
 
@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
 static uint64_t mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
+       int ret;
+       size_t size;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (!obj->map_list.map) {
-               /* Make it mmapable */
-               size_t size = omap_gem_mmap_size(obj);
-               int ret = _drm_gem_create_mmap_offset_size(obj, size);
-
-               if (ret) {
-                       dev_err(dev->dev, "could not allocate mmap offset\n");
-                       return 0;
-               }
+       /* Make it mmapable */
+       size = omap_gem_mmap_size(obj);
+       ret = drm_gem_create_mmap_offset_size(obj, size);
+       if (ret) {
+               dev_err(dev->dev, "could not allocate mmap offset\n");
+               return 0;
        }
 
-       return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+       return drm_vma_node_offset_addr(&obj->vma_node);
 }
 
 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
@@ -628,21 +628,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                        OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
 }
 
-/**
- * omap_gem_dumb_destroy       -       destroy a dumb buffer
- * @file: client file
- * @dev: our DRM device
- * @handle: the object handle
- *
- * Destroy a handle that was created via omap_gem_dumb_create.
- */
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-               uint32_t handle)
-{
-       /* No special work needed, drop the reference and see what falls out */
-       return drm_gem_handle_delete(file, handle);
-}
-
 /**
  * omap_gem_dumb_map   -       buffer mapping for dumb interface
  * @file: our drm client file
@@ -997,12 +982,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
        struct drm_device *dev = obj->dev;
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       uint64_t off = 0;
+       uint64_t off;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (obj->map_list.map)
-               off = (uint64_t)obj->map_list.hash.key;
+       off = drm_vma_node_start(&obj->vma_node);
 
        seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
                        omap_obj->flags, obj->name, obj->refcount.refcount.counter,
@@ -1309,8 +1293,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
 
        list_del(&omap_obj->mm_list);
 
-       if (obj->map_list.map)
-               drm_gem_free_mmap_offset(obj);
+       drm_gem_free_mmap_offset(obj);
 
        /* this means the object is still pinned.. which really should
         * not happen.  I think..
@@ -1427,8 +1410,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                omap_obj->height = gsize.tiled.height;
        }
 
+       ret = 0;
        if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
-               ret = drm_gem_private_object_init(dev, obj, size);
+               drm_gem_private_object_init(dev, obj, size);
        else
                ret = drm_gem_object_init(dev, obj, size);
 
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
deleted file mode 100644 (file)
index f9eb679..0000000
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob.clark@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-/* temporary copy of drm_gem_{get,put}_pages() until the
- * "drm/gem: add functions to get/put pages" patch is merged..
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/shmem_fs.h>
-
-#include <drm/drmP.h>
-
-/**
- * drm_gem_get_pages - helper to allocate backing pages for a GEM object
- * @obj: obj in question
- * @gfpmask: gfp mask of requested pages
- */
-struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
-{
-       struct inode *inode;
-       struct address_space *mapping;
-       struct page *p, **pages;
-       int i, npages;
-
-       /* This is the shared memory object that backs the GEM resource */
-       inode = file_inode(obj->filp);
-       mapping = inode->i_mapping;
-
-       npages = obj->size >> PAGE_SHIFT;
-
-       pages = drm_malloc_ab(npages, sizeof(struct page *));
-       if (pages == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       gfpmask |= mapping_gfp_mask(mapping);
-
-       for (i = 0; i < npages; i++) {
-               p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
-               if (IS_ERR(p))
-                       goto fail;
-               pages[i] = p;
-
-               /* There is a hypothetical issue w/ drivers that require
-                * buffer memory in the low 4GB.. if the pages are un-
-                * pinned, and swapped out, they can end up swapped back
-                * in above 4GB.  If pages are already in memory, then
-                * shmem_read_mapping_page_gfp will ignore the gfpmask,
-                * even if the already in-memory page disobeys the mask.
-                *
-                * It is only a theoretical issue today, because none of
-                * the devices with this limitation can be populated with
-                * enough memory to trigger the issue.  But this BUG_ON()
-                * is here as a reminder in case the problem with
-                * shmem_read_mapping_page_gfp() isn't solved by the time
-                * it does become a real issue.
-                *
-                * See this thread: http://lkml.org/lkml/2011/7/11/238
-                */
-               BUG_ON((gfpmask & __GFP_DMA32) &&
-                               (page_to_pfn(p) >= 0x00100000UL));
-       }
-
-       return pages;
-
-fail:
-       while (i--)
-               page_cache_release(pages[i]);
-
-       drm_free_large(pages);
-       return ERR_CAST(p);
-}
-
-/**
- * drm_gem_put_pages - helper to free backing pages for a GEM object
- * @obj: obj in question
- * @pages: pages to free
- */
-void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
-               bool dirty, bool accessed)
-{
-       int i, npages;
-
-       npages = obj->size >> PAGE_SHIFT;
-
-       for (i = 0; i < npages; i++) {
-               if (dirty)
-                       set_page_dirty(pages[i]);
-
-               if (accessed)
-                       mark_page_accessed(pages[i]);
-
-               /* Undo the reference we took when populating the table */
-               page_cache_release(pages[i]);
-       }
-
-       drm_free_large(pages);
-}
-
-int
-_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
-{
-       struct drm_device *dev = obj->dev;
-       struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list;
-       struct drm_local_map *map;
-       int ret = 0;
-
-       /* Set the object up for mmap'ing */
-       list = &obj->map_list;
-       list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-       if (!list->map)
-               return -ENOMEM;
-
-       map = list->map;
-       map->type = _DRM_GEM;
-       map->size = size;
-       map->handle = obj;
-
-       /* Get a DRM GEM mmap offset allocated... */
-       list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-                       size / PAGE_SIZE, 0, 0);
-
-       if (!list->file_offset_node) {
-               DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
-               ret = -ENOSPC;
-               goto out_free_list;
-       }
-
-       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-                       size / PAGE_SIZE, 0);
-       if (!list->file_offset_node) {
-               ret = -ENOMEM;
-               goto out_free_list;
-       }
-
-       list->hash.key = list->file_offset_node->start;
-       ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-       if (ret) {
-               DRM_ERROR("failed to add to map hash\n");
-               goto out_free_mm;
-       }
-
-       return 0;
-
-out_free_mm:
-       drm_mm_put_block(list->file_offset_node);
-out_free_list:
-       kfree(list->map);
-       list->map = NULL;
-
-       return ret;
-}
index 8d225d7ff4e300319211fd8ec80eeccfb4e8fdc2..046d5e660c04c428192b15ab268de39c8fff35a1 100644 (file)
@@ -17,7 +17,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/kfifo.h>
+#include "drm_flip_work.h"
 
 #include "omap_drv.h"
 #include "omap_dmm_tiler.h"
@@ -58,26 +58,23 @@ struct omap_plane {
 
        struct omap_drm_irq error_irq;
 
-       /* set of bo's pending unpin until next post_apply() */
-       DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
+       /* for deferring bo unpin's until next post_apply(): */
+       struct drm_flip_work unpin_work;
 
        // XXX maybe get rid of this and handle vblank in crtc too?
        struct callback apply_done_cb;
 };
 
-static void unpin(void *arg, struct drm_gem_object *bo)
+static void unpin_worker(struct drm_flip_work *work, void *val)
 {
-       struct drm_plane *plane = arg;
-       struct omap_plane *omap_plane = to_omap_plane(plane);
+       struct omap_plane *omap_plane =
+                       container_of(work, struct omap_plane, unpin_work);
+       struct drm_device *dev = omap_plane->base.dev;
 
-       if (kfifo_put(&omap_plane->unpin_fifo,
-                       (const struct drm_gem_object **)&bo)) {
-               /* also hold a ref so it isn't free'd while pinned */
-               drm_gem_object_reference(bo);
-       } else {
-               dev_err(plane->dev->dev, "unpin fifo full!\n");
-               omap_gem_put_paddr(bo);
-       }
+       omap_framebuffer_unpin(val);
+       mutex_lock(&dev->mode_config.mutex);
+       drm_framebuffer_unreference(val);
+       mutex_unlock(&dev->mode_config.mutex);
 }
 
 /* update which fb (if any) is pinned for scanout */
@@ -87,23 +84,22 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
        struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
 
        if (pinned_fb != fb) {
-               int ret;
+               int ret = 0;
 
                DBG("%p -> %p", pinned_fb, fb);
 
-               if (fb)
+               if (fb) {
                        drm_framebuffer_reference(fb);
-
-               ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
+                       ret = omap_framebuffer_pin(fb);
+               }
 
                if (pinned_fb)
-                       drm_framebuffer_unreference(pinned_fb);
+                       drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb);
 
                if (ret) {
                        dev_err(plane->dev->dev, "could not swap %p -> %p\n",
                                        omap_plane->pinned_fb, fb);
-                       if (fb)
-                               drm_framebuffer_unreference(fb);
+                       drm_framebuffer_unreference(fb);
                        omap_plane->pinned_fb = NULL;
                        return ret;
                }
@@ -170,17 +166,14 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
        struct omap_plane *omap_plane =
                        container_of(apply, struct omap_plane, apply);
        struct drm_plane *plane = &omap_plane->base;
+       struct omap_drm_private *priv = plane->dev->dev_private;
        struct omap_overlay_info *info = &omap_plane->info;
-       struct drm_gem_object *bo = NULL;
        struct callback cb;
 
        cb = omap_plane->apply_done_cb;
        omap_plane->apply_done_cb.fxn = NULL;
 
-       while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
-               omap_gem_put_paddr(bo);
-               drm_gem_object_unreference_unlocked(bo);
-       }
+       drm_flip_work_commit(&omap_plane->unpin_work, priv->wq);
 
        if (cb.fxn)
                cb.fxn(cb.arg);
@@ -277,8 +270,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
        omap_plane_disable(plane);
        drm_plane_cleanup(plane);
 
-       WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
-       kfifo_free(&omap_plane->unpin_fifo);
+       drm_flip_work_cleanup(&omap_plane->unpin_work);
 
        kfree(omap_plane);
 }
@@ -399,7 +391,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
        if (!omap_plane)
                goto fail;
 
-       ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
+       ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
+                       "unpin", unpin_worker);
        if (ret) {
                dev_err(dev->dev, "could not allocate unpin FIFO\n");
                goto fail;
index df0b577a66081bb86bc4671293f8fee28cd273d0..514118ae72d4671474b165c5c0b7385f77b04ae7 100644 (file)
@@ -84,7 +84,6 @@ static const struct file_operations qxl_fops = {
        .release = drm_release,
        .unlocked_ioctl = drm_ioctl,
        .poll = drm_poll,
-       .fasync = drm_fasync,
        .mmap = qxl_mmap,
 };
 
@@ -221,7 +220,7 @@ static struct drm_driver qxl_driver = {
 
        .dumb_create = qxl_mode_dumb_create,
        .dumb_map_offset = qxl_mode_dumb_mmap,
-       .dumb_destroy = qxl_mode_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = qxl_debugfs_init,
        .debugfs_cleanup = qxl_debugfs_takedown,
index 7e96f4f117384faea9bad6b29826888f91973cf3..f7c9adde46a0c8ffb5586eb44461e2c3155e88cb 100644 (file)
@@ -328,7 +328,7 @@ struct qxl_device {
 /* forward declaration for QXL_INFO_IO */
 void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
 
-extern struct drm_ioctl_desc qxl_ioctls[];
+extern const struct drm_ioctl_desc qxl_ioctls[];
 extern int qxl_max_ioctl;
 
 int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@ -405,9 +405,6 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
                          bool discardable, bool kernel,
                          struct qxl_surface *surf,
                          struct drm_gem_object **obj);
-int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
-                         uint64_t *gpu_addr);
-void qxl_gem_object_unpin(struct drm_gem_object *obj);
 int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
                                      struct drm_file *file_priv,
                                      u32 domain,
@@ -427,9 +424,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
 int qxl_mode_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
-                         struct drm_device *dev,
-                         uint32_t handle);
 int qxl_mode_dumb_mmap(struct drm_file *filp,
                       struct drm_device *dev,
                       uint32_t handle, uint64_t *offset_p);
index 847c4ee798f7b6816ee9c327e573b997d66c953c..d34bb4130ff095d74d915abbe2d3503b78077fc1 100644 (file)
@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
        return 0;
 }
 
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
-                            struct drm_device *dev,
-                            uint32_t handle)
-{
-       return drm_gem_handle_delete(file_priv, handle);
-}
-
 int qxl_mode_dumb_mmap(struct drm_file *file_priv,
                       struct drm_device *dev,
                       uint32_t handle, uint64_t *offset_p)
index 25e1777fb0a28551ac1d43d8a7a5b183812f54a7..1648e4125af7619f9923901306935b0f152ac0fa 100644 (file)
@@ -101,32 +101,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
        return 0;
 }
 
-int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
-                         uint64_t *gpu_addr)
-{
-       struct qxl_bo *qobj = obj->driver_private;
-       int r;
-
-       r = qxl_bo_reserve(qobj, false);
-       if (unlikely(r != 0))
-               return r;
-       r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
-       qxl_bo_unreserve(qobj);
-       return r;
-}
-
-void qxl_gem_object_unpin(struct drm_gem_object *obj)
-{
-       struct qxl_bo *qobj = obj->driver_private;
-       int r;
-
-       r = qxl_bo_reserve(qobj, false);
-       if (likely(r == 0)) {
-               qxl_bo_unpin(qobj);
-               qxl_bo_unreserve(qobj);
-       }
-}
-
 int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
        return 0;
index 6de33563d6f186fe56745137a7e8b3c3b324e5a8..7b95c75e9626ae1324dea48b310d3052f0c00d2d 100644 (file)
@@ -433,7 +433,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
        return ret;
 }
 
-struct drm_ioctl_desc qxl_ioctls[] = {
+const struct drm_ioctl_desc qxl_ioctls[] = {
        DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
 
        DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
index aa161cddd87e4a018c601e25e722663d4e0e3207..8691c76c5ef0a440b9391765b7c0ca099e2d6e2c 100644 (file)
@@ -98,7 +98,6 @@ int qxl_bo_create(struct qxl_device *qdev,
                kfree(bo);
                return r;
        }
-       bo->gem_base.driver_private = NULL;
        bo->type = domain;
        bo->pin_count = pinned ? 1 : 0;
        bo->surface_id = 0;
index 8cb6167038e544625ec666a6fef7b4308d8e832d..d458a140c02407c01858f2d7b8a212e80417db3a 100644 (file)
@@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
 
 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
 {
-       return bo->tbo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->tbo.vma_node);
 }
 
 static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
index b61449e52cd5695c7c024548ecedfc3340b11156..0109a9644cb29ef7a6e13ac76a90544effcefd20 100644 (file)
@@ -88,7 +88,7 @@ qxl_release_free(struct qxl_device *qdev,
        list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
                struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
                QXL_INFO(qdev, "release %llx\n",
-                       entry->tv.bo->addr_space_offset
+                       drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
                                                - DRM_FILE_OFFSET);
                qxl_fence_remove_release(&bo->fence, release->id);
                qxl_bo_unref(&bo);
index d4660cf942a52b98c2c0a204b423bbe90ee56ed5..c451257f08fb51ea4b2c5346339237032705f955 100644 (file)
@@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
        dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
                              + init->ring_size / sizeof(u32));
        dev_priv->ring.size = init->ring_size;
-       dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+       dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
 
        dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
 
index 472c38fe123fae8e11c95c47c916129f2b490ff1..5bd307cd8da129316db121ad0e41736a10338322 100644 (file)
@@ -48,7 +48,6 @@ static const struct file_operations r128_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = r128_compat_ioctl,
 #endif
@@ -57,7 +56,7 @@ static const struct file_operations r128_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+           DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
            DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
        .dev_priv_size = sizeof(drm_r128_buf_priv_t),
        .load = r128_driver_load,
index 930c71b2fb5e3b5dae831ab388b93721622181da..56eb5e3f54399ae9197e62cd5cfadcf094b7fb1b 100644 (file)
@@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv {
        drm_r128_freelist_t *list_entry;
 } drm_r128_buf_priv_t;
 
-extern struct drm_ioctl_desc r128_ioctls[];
+extern const struct drm_ioctl_desc r128_ioctls[];
 extern int r128_max_ioctl;
 
                                /* r128_cce.c */
index 19bb7e6f3d9a8e6a3fc292d7780f4f052a03f11e..01dd9aef9f0e9046116d570d52de96b3bf4a121b 100644 (file)
@@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev)
        r128_do_cleanup_cce(dev);
 }
 
-struct drm_ioctl_desc r128_ioctls[] = {
+const struct drm_ioctl_desc r128_ioctls[] = {
        DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
index 32501f6ec991169917af2446db4acbd37f8fbba0..3569d89b9e41c9c69250bffc8b75fce591b69653 100644 (file)
@@ -585,7 +585,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
                return false;
        }
 
-       DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
+       DRM_DEBUG_KMS("link status %6ph\n", link_status);
        return true;
 }
 
index 8928bd109c1647b684293cb49ed91198419eb33c..84302407c7f24a4304f83eba37a31e8bde7c5688 100644 (file)
@@ -2535,8 +2535,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
        /* ring 0 - compute and gfx */
        /* Set ring buffer size */
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -2920,7 +2920,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
                tmp = RREG32(CP_HPD_EOP_CONTROL);
                tmp &= ~EOP_SIZE_MASK;
-               tmp |= drm_order(MEC_HPD_SIZE / 8);
+               tmp |= order_base_2(MEC_HPD_SIZE / 8);
                WREG32(CP_HPD_EOP_CONTROL, tmp);
        }
        cik_srbm_select(rdev, 0, 0, 0, 0);
@@ -3037,9 +3037,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
                        ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
 
                mqd->queue_state.cp_hqd_pq_control |=
-                       drm_order(rdev->ring[idx].ring_size / 8);
+                       order_base_2(rdev->ring[idx].ring_size / 8);
                mqd->queue_state.cp_hqd_pq_control |=
-                       (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8);
+                       (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
 #ifdef __BIG_ENDIAN
                mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
 #endif
@@ -3383,7 +3383,7 @@ static int cik_sdma_gfx_resume(struct radeon_device *rdev)
                WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
 
                /* Set ring buffer size in dwords */
-               rb_bufsz = drm_order(ring->ring_size / 4);
+               rb_bufsz = order_base_2(ring->ring_size / 4);
                rb_cntl = rb_bufsz << 1;
 #ifdef __BIG_ENDIAN
                rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@ -5040,7 +5040,7 @@ static int cik_irq_init(struct radeon_device *rdev)
        WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
        WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
-       rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+       rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
 
        ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
                      IH_WPTR_OVERFLOW_CLEAR |
index d5b49e33315e299aa4b2a2085d0fdd3d1c8a99bc..d6e238a6472cd5c6bc79f2addcfaf87ea6c4ba19 100644 (file)
@@ -2881,8 +2881,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
        RREG32(GRBM_SOFT_RESET);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
index ccb4f8b548524b46900f6d88444179c9f6a3069f..437f60d8109e231dac435ec336d7146d56b30be7 100644 (file)
@@ -1564,8 +1564,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
 
                /* Set ring buffer size */
                ring = &rdev->ring[ridx[i]];
-               rb_cntl = drm_order(ring->ring_size / 8);
-               rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
+               rb_cntl = order_base_2(ring->ring_size / 8);
+               rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
 #ifdef __BIG_ENDIAN
                rb_cntl |= BUF_SWAP_32BIT;
 #endif
@@ -1724,7 +1724,7 @@ int cayman_dma_resume(struct radeon_device *rdev)
                WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
 
                /* Set ring buffer size in dwords */
-               rb_bufsz = drm_order(ring->ring_size / 4);
+               rb_bufsz = order_base_2(ring->ring_size / 4);
                rb_cntl = rb_bufsz << 1;
 #ifdef __BIG_ENDIAN
                rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
index 75349cdaa84b181186d6dee878d582437521a96a..5625cf706f0c3326b3e70d6d337fa23bbe7650f7 100644 (file)
@@ -1097,7 +1097,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
        }
 
        /* Align ring size */
-       rb_bufsz = drm_order(ring_size / 8);
+       rb_bufsz = order_base_2(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
        r100_cp_load_microcode(rdev);
        r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
index b9b776f1e5822cac4a2bc563bcbea886097ce9e6..d8dd269b9159fcc0dfbed98cf8c3c2037742e671 100644 (file)
@@ -1541,7 +1541,7 @@ int r300_init(struct radeon_device *rdev)
        rdev->accel_working = true;
        r = r300_startup(rdev);
        if (r) {
-               /* Somethings want wront with the accel init stop accel */
+               /* Something went wrong with the accel init, so stop accel */
                dev_err(rdev->dev, "Disabling GPU acceleration\n");
                r100_cp_fini(rdev);
                radeon_wb_fini(rdev);
index e66e7207735036e2ffab5e18f37854384595bf80..2eaed9214d026e9dd02217e6561ea42a91252334 100644 (file)
@@ -2417,8 +2417,8 @@ int r600_cp_resume(struct radeon_device *rdev)
        WREG32(GRBM_SOFT_RESET, 0);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -2471,7 +2471,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
        int r;
 
        /* Align ring size */
-       rb_bufsz = drm_order(ring_size / 8);
+       rb_bufsz = order_base_2(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
        ring->ring_size = ring_size;
        ring->align_mask = 16 - 1;
@@ -2551,7 +2551,7 @@ int r600_dma_resume(struct radeon_device *rdev)
        WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
 
        /* Set ring buffer size in dwords */
-       rb_bufsz = drm_order(ring->ring_size / 4);
+       rb_bufsz = order_base_2(ring->ring_size / 4);
        rb_cntl = rb_bufsz << 1;
 #ifdef __BIG_ENDIAN
        rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@ -2660,7 +2660,7 @@ int r600_uvd_rbc_start(struct radeon_device *rdev)
        WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
 
        /* Set ring buffer size */
-       rb_bufsz = drm_order(ring->ring_size);
+       rb_bufsz = order_base_2(ring->ring_size);
        rb_bufsz = (0x1 << 8) | rb_bufsz;
        WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
 
@@ -3838,7 +3838,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
        u32 rb_bufsz;
 
        /* Align ring size */
-       rb_bufsz = drm_order(ring_size / 4);
+       rb_bufsz = order_base_2(ring_size / 4);
        ring_size = (1 << rb_bufsz) * 4;
        rdev->ih.ring_size = ring_size;
        rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -4075,7 +4075,7 @@ int r600_irq_init(struct radeon_device *rdev)
        WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
        WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
-       rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+       rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
 
        ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
                      IH_WPTR_OVERFLOW_CLEAR |
index 1c51c08b1fdeddf5524ddf4029aa02ce98a92dcc..d8eb48bff0ed204e9c52538342a11dcd1bd5b8ff 100644 (file)
@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
        dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
                              + init->ring_size / sizeof(u32));
        dev_priv->ring.size = init->ring_size;
-       dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+       dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
 
        dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
-       dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8);
+       dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
 
        dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
-       dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16);
+       dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
 
        dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
 
index 274b8e1b889fd0fbbe1dde2a71492e975f00a711..45c835aac1f08ecbd4dec963086cc37ffad5aa16 100644 (file)
@@ -492,9 +492,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
 int radeon_mode_dumb_mmap(struct drm_file *filp,
                          struct drm_device *dev,
                          uint32_t handle, uint64_t *offset_p);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
-                            struct drm_device *dev,
-                            uint32_t handle);
 
 /*
  * Semaphores.
@@ -2163,7 +2160,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
                WREG32(reg, tmp_);                              \
        } while (0)
 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
 #define WREG32_PLL_P(reg, val, mask)                           \
        do {                                                    \
                uint32_t tmp_ = RREG32_PLL(reg);                \
index efc4f6441ef42c8a9bb85a9d93e6db44f92bc97b..3cae2bbc1854d73dda35f13f0b8cfc3201dc2c25 100644 (file)
@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
        dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
                              + init->ring_size / sizeof(u32));
        dev_priv->ring.size = init->ring_size;
-       dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+       dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
 
        dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
-       dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+       dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
 
        dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
-       dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+       dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
        dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
 
        dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
index 29876b1be8ecae78b15bc8690804307c15885f12..1f93dd50364647cd95f132291d68fcab165dfcbb 100644 (file)
@@ -81,7 +81,6 @@
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
-int radeon_driver_firstopen_kms(struct drm_device *dev);
 void radeon_driver_lastclose_kms(struct drm_device *dev);
 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
 void radeon_driver_postclose_kms(struct drm_device *dev,
@@ -101,8 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
 irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv);
 int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
 int radeon_gem_object_open(struct drm_gem_object *obj,
@@ -111,7 +108,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
                                struct drm_file *file_priv);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
                                      int *vpos, int *hpos);
-extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern const struct drm_ioctl_desc radeon_ioctls_kms[];
 extern int radeon_max_kms_ioctl;
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
 int radeon_mode_dumb_mmap(struct drm_file *filp,
@@ -120,9 +117,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
 int radeon_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
-                            struct drm_device *dev,
-                            uint32_t handle);
 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
                                                        size_t size,
@@ -272,7 +266,6 @@ static const struct file_operations radeon_driver_old_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
        .read = drm_read,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = radeon_compat_ioctl,
@@ -282,7 +275,7 @@ static const struct file_operations radeon_driver_old_fops = {
 
 static struct drm_driver driver_old = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+           DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
            DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
        .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
        .load = radeon_driver_load,
@@ -381,7 +374,6 @@ static const struct file_operations radeon_driver_kms_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = radeon_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
        .read = drm_read,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = radeon_kms_compat_ioctl,
@@ -390,12 +382,11 @@ static const struct file_operations radeon_driver_kms_fops = {
 
 static struct drm_driver kms_driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
-           DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+           DRIVER_USE_AGP |
+           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
            DRIVER_PRIME,
        .dev_priv_size = 0,
        .load = radeon_driver_load_kms,
-       .firstopen = radeon_driver_firstopen_kms,
        .open = radeon_driver_open_kms,
        .preclose = radeon_driver_preclose_kms,
        .postclose = radeon_driver_postclose_kms,
@@ -421,10 +412,9 @@ static struct drm_driver kms_driver = {
        .gem_free_object = radeon_gem_object_free,
        .gem_open_object = radeon_gem_object_open,
        .gem_close_object = radeon_gem_object_close,
-       .dma_ioctl = radeon_dma_ioctl_kms,
        .dumb_create = radeon_mode_dumb_create,
        .dumb_map_offset = radeon_mode_dumb_mmap,
-       .dumb_destroy = radeon_mode_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &radeon_driver_kms_fops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
index aa796031ab659147d546d978ec94a26d0f0bc808..dce99c8a583501e92b72f4012a250a917835a51f 100644 (file)
@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
        return 0;
 }
 
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
-                            struct drm_device *dev,
-                            uint32_t handle)
-{
-       return drm_gem_handle_delete(file_priv, handle);
-}
-
 #if defined(CONFIG_DEBUG_FS)
 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
 {
index 49ff3d1a610238009f242af5fdf4606d9a1951b9..b46a5616664a5b2a5de691b83d5d316321e23bc4 100644 (file)
@@ -448,19 +448,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 /*
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
-/**
- * radeon_driver_firstopen_kms - drm callback for first open
- *
- * @dev: drm dev pointer
- *
- * Nothing to be done for KMS (all asics).
- * Returns 0 on success.
- */
-int radeon_driver_firstopen_kms(struct drm_device *dev)
-{
-       return 0;
-}
-
 /**
  * radeon_driver_firstopen_kms - drm callback for last close
  *
@@ -683,16 +670,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
                                                     drmcrtc);
 }
 
-/*
- * IOCTL.
- */
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       /* Not valid in KMS. */
-       return -EINVAL;
-}
-
 #define KMS_INVALID_IOCTL(name)                                                \
 int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
 {                                                                      \
@@ -732,7 +709,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
 KMS_INVALID_IOCTL(radeon_surface_free_kms)
 
 
-struct drm_ioctl_desc radeon_ioctls_kms[] = {
+const struct drm_ioctl_desc radeon_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
index 2020bf4a38302385c5c815d0d1a1d19407d53ab3..c0fa4aa9ceea8ad8d22c7485b28f4bf045a3a7dd 100644 (file)
@@ -142,7 +142,6 @@ int radeon_bo_create(struct radeon_device *rdev,
                return r;
        }
        bo->rdev = rdev;
-       bo->gem_base.driver_private = NULL;
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
        INIT_LIST_HEAD(&bo->va);
index 49c82c4800134aa5500c8999c1e2e7c196330d2c..209b1115026379dccef518bf60ab9fa68217d9ab 100644 (file)
@@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
  * @bo:        radeon object for which we query the offset
  *
  * Returns mmap offset of the object.
- *
- * Note: addr_space_offset is constant after ttm bo init thus isn't protected
- * by any lock.
  */
 static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
 {
-       return bo->tbo.addr_space_offset;
+       return drm_vma_node_offset_addr(&bo->tbo.vma_node);
 }
 
 extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
index 65b9eabd5a2f08b28e864c18e05ce060802dffb6..20074560fc256867e82c62518e04d1434cd83662 100644 (file)
@@ -68,7 +68,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
                               RADEON_GEM_DOMAIN_GTT, sg, &bo);
        if (ret)
                return ERR_PTR(ret);
-       bo->gem_base.driver_private = bo;
 
        mutex_lock(&rdev->gem.mutex);
        list_add_tail(&bo->list, &rdev->gem.objects);
index f1c15754e73ca6d933d6ea1e0877b839cecab4b6..b79f4f5cdd626108c8790394cc6e57ddd9b27bce 100644 (file)
@@ -356,6 +356,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
                return -EINVAL;
        }
 
+       if (bo->tbo.sync_obj) {
+               r = radeon_fence_wait(bo->tbo.sync_obj, false);
+               if (r) {
+                       DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+                       return r;
+               }
+       }
+
        r = radeon_bo_kmap(bo, &ptr);
        if (r) {
                DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
index bcc68ec204adeb7582a536cd3125ab28d00bce6d..f5e92cfcc140984bd63e1a892fa88277bb7530c3 100644 (file)
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
                                                 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
                radeon_program_register_sequence(rdev,
                                                 rv730_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv730_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv730_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv730_mgcg_init));
                break;
        case CHIP_RV710:
                radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
                                                 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
                radeon_program_register_sequence(rdev,
                                                 rv710_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv710_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv710_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv710_mgcg_init));
                break;
        case CHIP_RV740:
                radeon_program_register_sequence(rdev,
                                                 rv740_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv740_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv740_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv740_mgcg_init));
                break;
        default:
                break;
index daa8d2df8ec502ca72a5b9897be75ae59a9a797c..c24c89aa0fb33ba1093b329150d3af4e4761b75e 100644 (file)
@@ -3387,8 +3387,8 @@ static int si_cp_resume(struct radeon_device *rdev)
        /* ring 0 - compute and gfx */
        /* Set ring buffer size */
        ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -3420,8 +3420,8 @@ static int si_cp_resume(struct radeon_device *rdev)
        /* ring1  - compute only */
        /* Set ring buffer size */
        ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -3446,8 +3446,8 @@ static int si_cp_resume(struct radeon_device *rdev)
        /* ring2 - compute only */
        /* Set ring buffer size */
        ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
-       rb_bufsz = drm_order(ring->ring_size / 8);
-       tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+       rb_bufsz = order_base_2(ring->ring_size / 8);
+       tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
 #endif
@@ -5651,7 +5651,7 @@ static int si_irq_init(struct radeon_device *rdev)
        WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
        WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
-       rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+       rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
 
        ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
                      IH_WPTR_OVERFLOW_CLEAR |
index 72887df8dd76a698e49b9d7cd95401cf52913e00..c590cd9dca0bc5d707bf1c8a2fdce62fb3946d59 100644 (file)
@@ -7,3 +7,10 @@ config DRM_RCAR_DU
        help
          Choose this option if you have an R-Car chipset.
          If M is selected the module will be called rcar-du-drm.
+
+config DRM_RCAR_LVDS
+       bool "R-Car DU LVDS Encoder Support"
+       depends on DRM_RCAR_DU
+       help
+         Enable support the R-Car Display Unit embedded LVDS encoders
+         (currently only on R8A7790).
index 7333c0094015ce45bc7dd541082dd6d439be6425..12b8d447783538518e491d34cfca1fc87a9a3683 100644 (file)
@@ -1,8 +1,12 @@
 rcar-du-drm-y := rcar_du_crtc.o \
                 rcar_du_drv.o \
+                rcar_du_encoder.o \
+                rcar_du_group.o \
                 rcar_du_kms.o \
-                rcar_du_lvds.o \
+                rcar_du_lvdscon.o \
                 rcar_du_plane.o \
-                rcar_du_vga.o
+                rcar_du_vgacon.o
 
-obj-$(CONFIG_DRM_RCAR_DU)      += rcar-du-drm.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS)    += rcar_du_lvdsenc.o
+
+obj-$(CONFIG_DRM_RCAR_DU)              += rcar-du-drm.o
index 24183fb935927851034998ddca26e63a9e820ba3..33df7a5831438092e16116d5594e38d2b8004e33 100644 (file)
 #include "rcar_du_crtc.h"
 #include "rcar_du_drv.h"
 #include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
 #include "rcar_du_plane.h"
 #include "rcar_du_regs.h"
-#include "rcar_du_vga.h"
-
-#define to_rcar_crtc(c)        container_of(c, struct rcar_du_crtc, crtc)
 
 static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
 {
-       struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
 
        return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
 }
 
 static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
 {
-       struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
 
        rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
 }
 
 static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
 {
-       struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
 
        rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
                      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +50,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
 
 static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
 {
-       struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
 
        rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
                      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -63,29 +59,48 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
 static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
                                 u32 clr, u32 set)
 {
-       struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
        u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
 
        rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
 }
 
+static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
+{
+       int ret;
+
+       ret = clk_prepare_enable(rcrtc->clock);
+       if (ret < 0)
+               return ret;
+
+       ret = rcar_du_group_get(rcrtc->group);
+       if (ret < 0)
+               clk_disable_unprepare(rcrtc->clock);
+
+       return ret;
+}
+
+static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
+{
+       rcar_du_group_put(rcrtc->group);
+       clk_disable_unprepare(rcrtc->clock);
+}
+
 static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
 {
-       struct drm_crtc *crtc = &rcrtc->crtc;
-       struct rcar_du_device *rcdu = crtc->dev->dev_private;
-       const struct drm_display_mode *mode = &crtc->mode;
+       const struct drm_display_mode *mode = &rcrtc->crtc.mode;
        unsigned long clk;
        u32 value;
        u32 div;
 
        /* Dot clock */
-       clk = clk_get_rate(rcdu->clock);
+       clk = clk_get_rate(rcrtc->clock);
        div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
        div = clamp(div, 1U, 64U) - 1;
 
-       rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR,
-                     ESCR_DCLKSEL_CLKS | div);
-       rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0);
+       rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
+                           ESCR_DCLKSEL_CLKS | div);
+       rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
 
        /* Signal polarities */
        value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
@@ -112,68 +127,25 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
        rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
 }
 
-static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc)
-{
-       struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
-       u32 dorcr = rcar_du_read(rcdu, DORCR);
-
-       dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
-
-       /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and
-        * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by
-        * default.
-        */
-       if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0)
-               dorcr |= DORCR_PG2D_DS1;
-       else
-               dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
-
-       rcar_du_write(rcdu, DORCR, dorcr);
-}
-
-static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
-{
-       rcar_du_write(rcdu, DSYSR,
-                     (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
-                     (start ? DSYSR_DEN : DSYSR_DRES));
-}
-
-static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
-{
-       /* Many of the configuration bits are only updated when the display
-        * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
-        * of those bits could be pre-configured, but others (especially the
-        * bits related to plane assignment to display timing controllers) need
-        * to be modified at runtime.
-        *
-        * Restart the display controller if a start is requested. Sorry for the
-        * flicker. It should be possible to move most of the "DRES-update" bits
-        * setup to driver initialization time and minimize the number of cases
-        * when the display controller will have to be restarted.
-        */
-       if (start) {
-               if (rcdu->used_crtcs++ != 0)
-                       __rcar_du_start_stop(rcdu, false);
-               __rcar_du_start_stop(rcdu, true);
-       } else {
-               if (--rcdu->used_crtcs == 0)
-                       __rcar_du_start_stop(rcdu, false);
-       }
-}
-
-void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output)
+void rcar_du_crtc_route_output(struct drm_crtc *crtc,
+                              enum rcar_du_output output)
 {
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
 
        /* Store the route from the CRTC output to the DU output. The DU will be
         * configured when starting the CRTC.
         */
-       rcrtc->outputs |= 1 << output;
+       rcrtc->outputs |= BIT(output);
+
+       /* Store RGB routing to DPAD0 for R8A7790. */
+       if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) &&
+           output == RCAR_DU_OUTPUT_DPAD0)
+               rcdu->dpad0_source = rcrtc->index;
 }
 
 void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
 {
-       struct rcar_du_device *rcdu = crtc->dev->dev_private;
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
        struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
        unsigned int num_planes = 0;
@@ -182,8 +154,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
        u32 dptsr = 0;
        u32 dspr = 0;
 
-       for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
-               struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+       for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
+               struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
                unsigned int j;
 
                if (plane->crtc != &rcrtc->crtc || !plane->enabled)
@@ -220,8 +192,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
        /* Select display timing and dot clock generator 2 for planes associated
         * with superposition controller 2.
         */
-       if (rcrtc->index) {
-               u32 value = rcar_du_read(rcdu, DPTSR);
+       if (rcrtc->index % 2) {
+               u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
 
                /* The DPTSR register is updated when the display controller is
                 * stopped. We thus need to restart the DU. Once again, sorry
@@ -231,21 +203,19 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
                 * occur only if we need to break the pre-association.
                 */
                if (value != dptsr) {
-                       rcar_du_write(rcdu, DPTSR, dptsr);
-                       if (rcdu->used_crtcs) {
-                               __rcar_du_start_stop(rcdu, false);
-                               __rcar_du_start_stop(rcdu, true);
-                       }
+                       rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
+                       if (rcrtc->group->used_crtcs)
+                               rcar_du_group_restart(rcrtc->group);
                }
        }
 
-       rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr);
+       rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
+                           dspr);
 }
 
 static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
 {
        struct drm_crtc *crtc = &rcrtc->crtc;
-       struct rcar_du_device *rcdu = crtc->dev->dev_private;
        unsigned int i;
 
        if (rcrtc->started)
@@ -260,16 +230,16 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
 
        /* Configure display timings and output routing */
        rcar_du_crtc_set_display_timing(rcrtc);
-       rcar_du_crtc_set_routing(rcrtc);
+       rcar_du_group_set_routing(rcrtc->group);
 
-       mutex_lock(&rcdu->planes.lock);
+       mutex_lock(&rcrtc->group->planes.lock);
        rcrtc->plane->enabled = true;
        rcar_du_crtc_update_planes(crtc);
-       mutex_unlock(&rcdu->planes.lock);
+       mutex_unlock(&rcrtc->group->planes.lock);
 
        /* Setup planes. */
-       for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
-               struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+       for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
+               struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
 
                if (plane->crtc != crtc || !plane->enabled)
                        continue;
@@ -283,7 +253,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
         */
        rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
 
-       rcar_du_start_stop(rcdu, true);
+       rcar_du_group_start_stop(rcrtc->group, true);
 
        rcrtc->started = true;
 }
@@ -291,42 +261,37 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
 static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
 {
        struct drm_crtc *crtc = &rcrtc->crtc;
-       struct rcar_du_device *rcdu = crtc->dev->dev_private;
 
        if (!rcrtc->started)
                return;
 
-       mutex_lock(&rcdu->planes.lock);
+       mutex_lock(&rcrtc->group->planes.lock);
        rcrtc->plane->enabled = false;
        rcar_du_crtc_update_planes(crtc);
-       mutex_unlock(&rcdu->planes.lock);
+       mutex_unlock(&rcrtc->group->planes.lock);
 
        /* Select switch sync mode. This stops display operation and configures
         * the HSYNC and VSYNC signals as inputs.
         */
        rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
 
-       rcar_du_start_stop(rcdu, false);
+       rcar_du_group_start_stop(rcrtc->group, false);
 
        rcrtc->started = false;
 }
 
 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
 {
-       struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
-
        rcar_du_crtc_stop(rcrtc);
-       rcar_du_put(rcdu);
+       rcar_du_crtc_put(rcrtc);
 }
 
 void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
 {
-       struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
-
        if (rcrtc->dpms != DRM_MODE_DPMS_ON)
                return;
 
-       rcar_du_get(rcdu);
+       rcar_du_crtc_get(rcrtc);
        rcar_du_crtc_start(rcrtc);
 }
 
@@ -340,18 +305,17 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
 
 static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
-       struct rcar_du_device *rcdu = crtc->dev->dev_private;
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
        if (rcrtc->dpms == mode)
                return;
 
        if (mode == DRM_MODE_DPMS_ON) {
-               rcar_du_get(rcdu);
+               rcar_du_crtc_get(rcrtc);
                rcar_du_crtc_start(rcrtc);
        } else {
                rcar_du_crtc_stop(rcrtc);
-               rcar_du_put(rcdu);
+               rcar_du_crtc_put(rcrtc);
        }
 
        rcrtc->dpms = mode;
@@ -367,13 +331,12 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
 
 static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
 {
-       struct rcar_du_device *rcdu = crtc->dev->dev_private;
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
        /* We need to access the hardware during mode set, acquire a reference
-        * to the DU.
+        * to the CRTC.
         */
-       rcar_du_get(rcdu);
+       rcar_du_crtc_get(rcrtc);
 
        /* Stop the CRTC and release the plane. Force the DPMS mode to off as a
         * result.
@@ -390,8 +353,8 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
                                 int x, int y,
                                 struct drm_framebuffer *old_fb)
 {
-       struct rcar_du_device *rcdu = crtc->dev->dev_private;
        struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+       struct rcar_du_device *rcdu = rcrtc->group->dev;
        const struct rcar_du_format_info *format;
        int ret;
 
@@ -423,10 +386,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
 
 error:
        /* There's no rollback/abort operation to clean up in case of error. We
-        * thus need to release the reference to the DU acquired in prepare()
+        * thus need to release the reference to the CRTC acquired in prepare()
         * here.
         */
-       rcar_du_put(rcdu);
+       rcar_du_crtc_put(rcrtc);
        return ret;
 }
 
@@ -514,6 +477,24 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
        drm_vblank_put(dev, rcrtc->index);
 }
 
+static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
+{
+       struct rcar_du_crtc *rcrtc = arg;
+       irqreturn_t ret = IRQ_NONE;
+       u32 status;
+
+       status = rcar_du_crtc_read(rcrtc, DSSR);
+       rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
+
+       if (status & DSSR_VBK) {
+               drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+               rcar_du_crtc_finish_page_flip(rcrtc);
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
 static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
                                  struct drm_framebuffer *fb,
                                  struct drm_pending_vblank_event *event)
@@ -549,16 +530,41 @@ static const struct drm_crtc_funcs crtc_funcs = {
        .page_flip = rcar_du_crtc_page_flip,
 };
 
-int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
+int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
 {
+       static const unsigned int mmio_offsets[] = {
+               DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
+       };
+
+       struct rcar_du_device *rcdu = rgrp->dev;
+       struct platform_device *pdev = to_platform_device(rcdu->dev);
        struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
        struct drm_crtc *crtc = &rcrtc->crtc;
+       unsigned int irqflags;
+       char clk_name[5];
+       char *name;
+       int irq;
        int ret;
 
-       rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0;
+       /* Get the CRTC clock. */
+       if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+               sprintf(clk_name, "du.%u", index);
+               name = clk_name;
+       } else {
+               name = NULL;
+       }
+
+       rcrtc->clock = devm_clk_get(rcdu->dev, name);
+       if (IS_ERR(rcrtc->clock)) {
+               dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
+               return PTR_ERR(rcrtc->clock);
+       }
+
+       rcrtc->group = rgrp;
+       rcrtc->mmio_offset = mmio_offsets[index];
        rcrtc->index = index;
        rcrtc->dpms = DRM_MODE_DPMS_OFF;
-       rcrtc->plane = &rcdu->planes.planes[index];
+       rcrtc->plane = &rgrp->planes.planes[index % 2];
 
        rcrtc->plane->crtc = crtc;
 
@@ -568,6 +574,28 @@ int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
 
        drm_crtc_helper_add(crtc, &crtc_helper_funcs);
 
+       /* Register the interrupt handler. */
+       if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+               irq = platform_get_irq(pdev, index);
+               irqflags = 0;
+       } else {
+               irq = platform_get_irq(pdev, 0);
+               irqflags = IRQF_SHARED;
+       }
+
+       if (irq < 0) {
+               dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
+               return ret;
+       }
+
+       ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
+                              dev_name(rcdu->dev), rcrtc);
+       if (ret < 0) {
+               dev_err(rcdu->dev,
+                       "failed to register IRQ for CRTC %u\n", index);
+               return ret;
+       }
+
        return 0;
 }
 
@@ -580,16 +608,3 @@ void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
                rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
        }
 }
-
-void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc)
-{
-       u32 status;
-
-       status = rcar_du_crtc_read(rcrtc, DSSR);
-       rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
-
-       if (status & DSSR_VBK) {
-               drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
-               rcar_du_crtc_finish_page_flip(rcrtc);
-       }
-}
index 2a0365bcbd14c481bb19542ba9b577ebe20b9553..43e7575c700c26354d9971ea211ef84a4db2cfc9 100644 (file)
 #define __RCAR_DU_CRTC_H__
 
 #include <linux/mutex.h>
+#include <linux/platform_data/rcar-du.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 
-struct rcar_du_device;
+struct rcar_du_group;
 struct rcar_du_plane;
 
 struct rcar_du_crtc {
        struct drm_crtc crtc;
 
+       struct clk *clock;
        unsigned int mmio_offset;
        unsigned int index;
        bool started;
@@ -33,18 +35,21 @@ struct rcar_du_crtc {
        unsigned int outputs;
        int dpms;
 
+       struct rcar_du_group *group;
        struct rcar_du_plane *plane;
 };
 
-int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index);
+#define to_rcar_crtc(c)        container_of(c, struct rcar_du_crtc, crtc)
+
+int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
 void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
-void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc);
 void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
                                   struct drm_file *file);
 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
 void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
 
-void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output);
+void rcar_du_crtc_route_output(struct drm_crtc *crtc,
+                              enum rcar_du_output output);
 void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
 
 #endif /* __RCAR_DU_CRTC_H__ */
index dc0fe09b2ba10d8b39f51e29a9340818f8c253ff..0023f9719cf18fda9e2f3232bae5bf178e15de7d 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 
 #include "rcar_du_crtc.h"
 #include "rcar_du_kms.h"
 #include "rcar_du_regs.h"
 
-/* -----------------------------------------------------------------------------
- * Core device operations
- */
-
-/*
- * rcar_du_get - Acquire a reference to the DU
- *
- * Acquiring a reference enables the device clock and setup core registers. A
- * reference must be held before accessing any hardware registers.
- *
- * This function must be called with the DRM mode_config lock held.
- *
- * Return 0 in case of success or a negative error code otherwise.
- */
-int rcar_du_get(struct rcar_du_device *rcdu)
-{
-       int ret;
-
-       if (rcdu->use_count)
-               goto done;
-
-       /* Enable clocks before accessing the hardware. */
-       ret = clk_prepare_enable(rcdu->clock);
-       if (ret < 0)
-               return ret;
-
-       /* Enable extended features */
-       rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
-       rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
-       rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
-       rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
-       rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
-
-       /* Use DS1PR and DS2PR to configure planes priorities and connects the
-        * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
-        */
-       rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
-
-done:
-       rcdu->use_count++;
-       return 0;
-}
-
-/*
- * rcar_du_put - Release a reference to the DU
- *
- * Releasing the last reference disables the device clock.
- *
- * This function must be called with the DRM mode_config lock held.
- */
-void rcar_du_put(struct rcar_du_device *rcdu)
-{
-       if (--rcdu->use_count)
-               return;
-
-       clk_disable_unprepare(rcdu->clock);
-}
-
 /* -----------------------------------------------------------------------------
  * DRM operations
  */
 
 static int rcar_du_unload(struct drm_device *dev)
 {
+       struct rcar_du_device *rcdu = dev->dev_private;
+
+       if (rcdu->fbdev)
+               drm_fbdev_cma_fini(rcdu->fbdev);
+
        drm_kms_helper_poll_fini(dev);
        drm_mode_config_cleanup(dev);
        drm_vblank_cleanup(dev);
-       drm_irq_uninstall(dev);
 
+       dev->irq_enabled = 0;
        dev->dev_private = NULL;
 
        return 0;
@@ -107,7 +55,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
        struct platform_device *pdev = dev->platformdev;
        struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
        struct rcar_du_device *rcdu;
-       struct resource *ioarea;
        struct resource *mem;
        int ret;
 
@@ -124,35 +71,15 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
 
        rcdu->dev = &pdev->dev;
        rcdu->pdata = pdata;
+       rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data;
        rcdu->ddev = dev;
        dev->dev_private = rcdu;
 
-       /* I/O resources and clocks */
+       /* I/O resources */
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (mem == NULL) {
-               dev_err(&pdev->dev, "failed to get memory resource\n");
-               return -EINVAL;
-       }
-
-       ioarea = devm_request_mem_region(&pdev->dev, mem->start,
-                                        resource_size(mem), pdev->name);
-       if (ioarea == NULL) {
-               dev_err(&pdev->dev, "failed to request memory region\n");
-               return -EBUSY;
-       }
-
-       rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
-                                         resource_size(ioarea));
-       if (rcdu->mmio == NULL) {
-               dev_err(&pdev->dev, "failed to remap memory resource\n");
-               return -ENOMEM;
-       }
-
-       rcdu->clock = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(rcdu->clock)) {
-               dev_err(&pdev->dev, "failed to get clock\n");
-               return -ENOENT;
-       }
+       rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(rcdu->mmio))
+               return PTR_ERR(rcdu->mmio);
 
        /* DRM/KMS objects */
        ret = rcar_du_modeset_init(rcdu);
@@ -161,18 +88,14 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
                goto done;
        }
 
-       /* IRQ and vblank handling */
+       /* vblank handling */
        ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to initialize vblank\n");
                goto done;
        }
 
-       ret = drm_irq_install(dev);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to install IRQ handler\n");
-               goto done;
-       }
+       dev->irq_enabled = 1;
 
        platform_set_drvdata(pdev, rcdu);
 
@@ -188,20 +111,15 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
        struct rcar_du_device *rcdu = dev->dev_private;
        unsigned int i;
 
-       for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
+       for (i = 0; i < rcdu->num_crtcs; ++i)
                rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
 }
 
-static irqreturn_t rcar_du_irq(int irq, void *arg)
+static void rcar_du_lastclose(struct drm_device *dev)
 {
-       struct drm_device *dev = arg;
        struct rcar_du_device *rcdu = dev->dev_private;
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
-               rcar_du_crtc_irq(&rcdu->crtcs[i]);
 
-       return IRQ_HANDLED;
+       drm_fbdev_cma_restore_mode(rcdu->fbdev);
 }
 
 static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
@@ -230,18 +148,16 @@ static const struct file_operations rcar_du_fops = {
 #endif
        .poll           = drm_poll,
        .read           = drm_read,
-       .fasync         = drm_fasync,
        .llseek         = no_llseek,
        .mmap           = drm_gem_cma_mmap,
 };
 
 static struct drm_driver rcar_du_driver = {
-       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
-                               | DRIVER_PRIME,
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
        .load                   = rcar_du_load,
        .unload                 = rcar_du_unload,
        .preclose               = rcar_du_preclose,
-       .irq_handler            = rcar_du_irq,
+       .lastclose              = rcar_du_lastclose,
        .get_vblank_counter     = drm_vblank_count,
        .enable_vblank          = rcar_du_enable_vblank,
        .disable_vblank         = rcar_du_disable_vblank,
@@ -258,7 +174,7 @@ static struct drm_driver rcar_du_driver = {
        .gem_prime_mmap         = drm_gem_cma_prime_mmap,
        .dumb_create            = rcar_du_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy           = drm_gem_cma_dumb_destroy,
+       .dumb_destroy           = drm_gem_dumb_destroy,
        .fops                   = &rcar_du_fops,
        .name                   = "rcar-du",
        .desc                   = "Renesas R-Car Display Unit",
@@ -313,6 +229,57 @@ static int rcar_du_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct rcar_du_device_info rcar_du_r8a7779_info = {
+       .features = 0,
+       .num_crtcs = 2,
+       .routes = {
+               /* R8A7779 has two RGB outputs and one (currently unsupported)
+                * TCON output.
+                */
+               [RCAR_DU_OUTPUT_DPAD0] = {
+                       .possible_crtcs = BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_NONE,
+               },
+               [RCAR_DU_OUTPUT_DPAD1] = {
+                       .possible_crtcs = BIT(1) | BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_NONE,
+               },
+       },
+       .num_lvds = 0,
+};
+
+static const struct rcar_du_device_info rcar_du_r8a7790_info = {
+       .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
+                 | RCAR_DU_FEATURE_DEFR8,
+       .num_crtcs = 3,
+       .routes = {
+               /* R8A7790 has one RGB output, two LVDS outputs and one
+                * (currently unsupported) TCON output.
+                */
+               [RCAR_DU_OUTPUT_DPAD0] = {
+                       .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_NONE,
+               },
+               [RCAR_DU_OUTPUT_LVDS0] = {
+                       .possible_crtcs = BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_LVDS,
+               },
+               [RCAR_DU_OUTPUT_LVDS1] = {
+                       .possible_crtcs = BIT(2) | BIT(1),
+                       .encoder_type = DRM_MODE_ENCODER_LVDS,
+               },
+       },
+       .num_lvds = 2,
+};
+
+static const struct platform_device_id rcar_du_id_table[] = {
+       { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
+       { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
+       { }
+};
+
+MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
+
 static struct platform_driver rcar_du_platform_driver = {
        .probe          = rcar_du_probe,
        .remove         = rcar_du_remove,
@@ -321,6 +288,7 @@ static struct platform_driver rcar_du_platform_driver = {
                .name   = "rcar-du",
                .pm     = &rcar_du_pm_ops,
        },
+       .id_table       = rcar_du_id_table,
 };
 
 module_platform_driver(rcar_du_platform_driver);
index 193cc59d495cbd8e1a5427fae9fd1acce229dc15..65d2d636b002d9fbf32fb2c6391b8df4bc63ff03 100644 (file)
 #define __RCAR_DU_DRV_H__
 
 #include <linux/kernel.h>
-#include <linux/mutex.h>
 #include <linux/platform_data/rcar-du.h>
 
 #include "rcar_du_crtc.h"
-#include "rcar_du_plane.h"
+#include "rcar_du_group.h"
 
 struct clk;
 struct device;
 struct drm_device;
+struct drm_fbdev_cma;
+struct rcar_du_device;
+struct rcar_du_lvdsenc;
+
+#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0)        /* Per-CRTC IRQ and clock */
+#define RCAR_DU_FEATURE_ALIGN_128B     (1 << 1)        /* Align pitches to 128 bytes */
+#define RCAR_DU_FEATURE_DEFR8          (1 << 2)        /* Has DEFR8 register */
+
+/*
+ * struct rcar_du_output_routing - Output routing specification
+ * @possible_crtcs: bitmask of possible CRTCs for the output
+ * @encoder_type: DRM type of the internal encoder associated with the output
+ *
+ * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
+ * specify the valid SoC outputs, which CRTCs can drive the output, and the type
+ * of in-SoC encoder for the output.
+ */
+struct rcar_du_output_routing {
+       unsigned int possible_crtcs;
+       unsigned int encoder_type;
+};
+
+/*
+ * struct rcar_du_device_info - DU model-specific information
+ * @features: device features (RCAR_DU_FEATURE_*)
+ * @num_crtcs: total number of CRTCs
+ * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
+ * @num_lvds: number of internal LVDS encoders
+ */
+struct rcar_du_device_info {
+       unsigned int features;
+       unsigned int num_crtcs;
+       struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
+       unsigned int num_lvds;
+};
 
 struct rcar_du_device {
        struct device *dev;
        const struct rcar_du_platform_data *pdata;
+       const struct rcar_du_device_info *info;
 
        void __iomem *mmio;
-       struct clk *clock;
-       unsigned int use_count;
 
        struct drm_device *ddev;
+       struct drm_fbdev_cma *fbdev;
 
-       struct rcar_du_crtc crtcs[2];
-       unsigned int used_crtcs;
+       struct rcar_du_crtc crtcs[3];
        unsigned int num_crtcs;
 
-       struct {
-               struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
-               unsigned int free;
-               struct mutex lock;
+       struct rcar_du_group groups[2];
 
-               struct drm_property *alpha;
-               struct drm_property *colorkey;
-               struct drm_property *zpos;
-       } planes;
+       unsigned int dpad0_source;
+       struct rcar_du_lvdsenc *lvds[2];
 };
 
-int rcar_du_get(struct rcar_du_device *rcdu);
-void rcar_du_put(struct rcar_du_device *rcdu);
+static inline bool rcar_du_has(struct rcar_du_device *rcdu,
+                              unsigned int feature)
+{
+       return rcdu->info->features & feature;
+}
 
 static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
 {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
new file mode 100644 (file)
index 0000000..3daa7a1
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * rcar_du_encoder.c  --  R-Car Display Unit Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_lvdscon.h"
+#include "rcar_du_lvdsenc.h"
+#include "rcar_du_vgacon.h"
+
+/* -----------------------------------------------------------------------------
+ * Common connector functions
+ */
+
+struct drm_encoder *
+rcar_du_connector_best_encoder(struct drm_connector *connector)
+{
+       struct rcar_du_connector *rcon = to_rcar_connector(connector);
+
+       return &rcon->encoder->encoder;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+       if (renc->lvds)
+               rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
+}
+
+static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
+                                      const struct drm_display_mode *mode,
+                                      struct drm_display_mode *adjusted_mode)
+{
+       struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+       const struct drm_display_mode *panel_mode;
+       struct drm_device *dev = encoder->dev;
+       struct drm_connector *connector;
+       bool found = false;
+
+       /* DAC encoders have currently no restriction on the mode. */
+       if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
+               return true;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               dev_dbg(dev->dev, "mode_fixup: no connector found\n");
+               return false;
+       }
+
+       if (list_empty(&connector->modes)) {
+               dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
+               return false;
+       }
+
+       panel_mode = list_first_entry(&connector->modes,
+                                     struct drm_display_mode, head);
+
+       /* We're not allowed to modify the resolution. */
+       if (mode->hdisplay != panel_mode->hdisplay ||
+           mode->vdisplay != panel_mode->vdisplay)
+               return false;
+
+       /* The flat panel mode is fixed, just copy it to the adjusted mode. */
+       drm_mode_copy(adjusted_mode, panel_mode);
+
+       /* The internal LVDS encoder has a clock frequency operating range of
+        * 30MHz to 150MHz. Clamp the clock accordingly.
+        */
+       if (renc->lvds)
+               adjusted_mode->clock = clamp(adjusted_mode->clock,
+                                            30000, 150000);
+
+       return true;
+}
+
+static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
+{
+       struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+       if (renc->lvds)
+               rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
+                                    DRM_MODE_DPMS_OFF);
+}
+
+static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
+{
+       struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+       if (renc->lvds)
+               rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
+                                    DRM_MODE_DPMS_ON);
+}
+
+static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
+                                    struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+       struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+       rcar_du_crtc_route_output(encoder->crtc, renc->output);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+       .dpms = rcar_du_encoder_dpms,
+       .mode_fixup = rcar_du_encoder_mode_fixup,
+       .prepare = rcar_du_encoder_mode_prepare,
+       .commit = rcar_du_encoder_mode_commit,
+       .mode_set = rcar_du_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+                        enum rcar_du_encoder_type type,
+                        enum rcar_du_output output,
+                        const struct rcar_du_encoder_data *data)
+{
+       struct rcar_du_encoder *renc;
+       unsigned int encoder_type;
+       int ret;
+
+       renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
+       if (renc == NULL)
+               return -ENOMEM;
+
+       renc->output = output;
+
+       switch (output) {
+       case RCAR_DU_OUTPUT_LVDS0:
+               renc->lvds = rcdu->lvds[0];
+               break;
+
+       case RCAR_DU_OUTPUT_LVDS1:
+               renc->lvds = rcdu->lvds[1];
+               break;
+
+       default:
+               break;
+       }
+
+       switch (type) {
+       case RCAR_DU_ENCODER_VGA:
+               encoder_type = DRM_MODE_ENCODER_DAC;
+               break;
+       case RCAR_DU_ENCODER_LVDS:
+               encoder_type = DRM_MODE_ENCODER_LVDS;
+               break;
+       case RCAR_DU_ENCODER_NONE:
+       default:
+               /* No external encoder, use the internal encoder type. */
+               encoder_type = rcdu->info->routes[output].encoder_type;
+               break;
+       }
+
+       ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
+                              encoder_type);
+       if (ret < 0)
+               return ret;
+
+       drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
+
+       switch (encoder_type) {
+       case DRM_MODE_ENCODER_LVDS:
+               return rcar_du_lvds_connector_init(rcdu, renc,
+                                                  &data->connector.lvds.panel);
+
+       case DRM_MODE_ENCODER_DAC:
+               return rcar_du_vga_connector_init(rcdu, renc);
+
+       default:
+               return -EINVAL;
+       }
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
new file mode 100644 (file)
index 0000000..0e5a65e
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * rcar_du_encoder.h  --  R-Car Display Unit Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_ENCODER_H__
+#define __RCAR_DU_ENCODER_H__
+
+#include <linux/platform_data/rcar-du.h>
+
+#include <drm/drm_crtc.h>
+
+struct rcar_du_device;
+struct rcar_du_lvdsenc;
+
+struct rcar_du_encoder {
+       struct drm_encoder encoder;
+       enum rcar_du_output output;
+       struct rcar_du_lvdsenc *lvds;
+};
+
+#define to_rcar_encoder(e) \
+       container_of(e, struct rcar_du_encoder, encoder)
+
+struct rcar_du_connector {
+       struct drm_connector connector;
+       struct rcar_du_encoder *encoder;
+};
+
+#define to_rcar_connector(c) \
+       container_of(c, struct rcar_du_connector, connector)
+
+struct drm_encoder *
+rcar_du_connector_best_encoder(struct drm_connector *connector);
+
+int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+                        enum rcar_du_encoder_type type,
+                        enum rcar_du_output output,
+                        const struct rcar_du_encoder_data *data);
+
+#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
new file mode 100644 (file)
index 0000000..eb53cd9
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+ * rcar_du_group.c  --  R-Car Display Unit Channels Pair
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending
+ * unit, timings generator, ...) and device-global resources (start/stop
+ * control, planes, ...) shared between the two CRTCs.
+ *
+ * The R8A7790 introduced a third CRTC with its own set of global resources.
+ * This would be modeled as two separate DU device instances if it wasn't for
+ * a handful or resources that are shared between the three CRTCs (mostly
+ * related to input and output routing). For this reason the R8A7790 DU must be
+ * modeled as a single device with three CRTCs, two sets of "semi-global"
+ * resources, and a few device-global resources.
+ *
+ * The rcar_du_group object is a driver specific object, without any real
+ * counterpart in the DU documentation, that models those semi-global resources.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_group.h"
+#include "rcar_du_regs.h"
+
+u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg)
+{
+       return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg);
+}
+
+void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
+{
+       rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
+}
+
+static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
+{
+       u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
+
+       if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8))
+               return;
+
+       /* The DEFR8 register for the first group also controls RGB output
+        * routing to DPAD0
+        */
+       if (rgrp->index == 0)
+               defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
+
+       rcar_du_group_write(rgrp, DEFR8, defr8);
+}
+
+static void rcar_du_group_setup(struct rcar_du_group *rgrp)
+{
+       /* Enable extended features */
+       rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
+       rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
+       rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
+       rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
+       rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
+
+       rcar_du_group_setup_defr8(rgrp);
+
+       /* Use DS1PR and DS2PR to configure planes priorities and connects the
+        * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
+        */
+       rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
+}
+
+/*
+ * rcar_du_group_get - Acquire a reference to the DU channels group
+ *
+ * Acquiring the first reference setups core registers. A reference must be held
+ * before accessing any hardware registers.
+ *
+ * This function must be called with the DRM mode_config lock held.
+ *
+ * Return 0 in case of success or a negative error code otherwise.
+ */
+int rcar_du_group_get(struct rcar_du_group *rgrp)
+{
+       if (rgrp->use_count)
+               goto done;
+
+       rcar_du_group_setup(rgrp);
+
+done:
+       rgrp->use_count++;
+       return 0;
+}
+
+/*
+ * rcar_du_group_put - Release a reference to the DU
+ *
+ * This function must be called with the DRM mode_config lock held.
+ */
+void rcar_du_group_put(struct rcar_du_group *rgrp)
+{
+       --rgrp->use_count;
+}
+
+static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
+{
+       rcar_du_group_write(rgrp, DSYSR,
+               (rcar_du_group_read(rgrp, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
+               (start ? DSYSR_DEN : DSYSR_DRES));
+}
+
+void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
+{
+       /* Many of the configuration bits are only updated when the display
+        * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
+        * of those bits could be pre-configured, but others (especially the
+        * bits related to plane assignment to display timing controllers) need
+        * to be modified at runtime.
+        *
+        * Restart the display controller if a start is requested. Sorry for the
+        * flicker. It should be possible to move most of the "DRES-update" bits
+        * setup to driver initialization time and minimize the number of cases
+        * when the display controller will have to be restarted.
+        */
+       if (start) {
+               if (rgrp->used_crtcs++ != 0)
+                       __rcar_du_group_start_stop(rgrp, false);
+               __rcar_du_group_start_stop(rgrp, true);
+       } else {
+               if (--rgrp->used_crtcs == 0)
+                       __rcar_du_group_start_stop(rgrp, false);
+       }
+}
+
+void rcar_du_group_restart(struct rcar_du_group *rgrp)
+{
+       __rcar_du_group_start_stop(rgrp, false);
+       __rcar_du_group_start_stop(rgrp, true);
+}
+
+static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
+{
+       int ret;
+
+       /* RGB output routing to DPAD0 is configured in the DEFR8 register of
+        * the first group. As this function can be called with the DU0 and DU1
+        * CRTCs disabled, we need to enable the first group clock before
+        * accessing the register.
+        */
+       ret = clk_prepare_enable(rcdu->crtcs[0].clock);
+       if (ret < 0)
+               return ret;
+
+       rcar_du_group_setup_defr8(&rcdu->groups[0]);
+
+       clk_disable_unprepare(rcdu->crtcs[0].clock);
+
+       return 0;
+}
+
+int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
+{
+       struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2];
+       u32 dorcr = rcar_du_group_read(rgrp, DORCR);
+
+       dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
+
+       /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
+        * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
+        * by default.
+        */
+       if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
+               dorcr |= DORCR_PG2D_DS1;
+       else
+               dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
+
+       rcar_du_group_write(rgrp, DORCR, dorcr);
+
+       return rcar_du_set_dpad0_routing(rgrp->dev);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
new file mode 100644 (file)
index 0000000..5025930
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * rcar_du_group.c  --  R-Car Display Unit Planes and CRTCs Group
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_GROUP_H__
+#define __RCAR_DU_GROUP_H__
+
+#include "rcar_du_plane.h"
+
+struct rcar_du_device;
+
+/*
+ * struct rcar_du_group - CRTCs and planes group
+ * @dev: the DU device
+ * @mmio_offset: registers offset in the device memory map
+ * @index: group index
+ * @use_count: number of users of the group (rcar_du_group_(get|put))
+ * @used_crtcs: number of CRTCs currently in use
+ * @planes: planes handled by the group
+ */
+struct rcar_du_group {
+       struct rcar_du_device *dev;
+       unsigned int mmio_offset;
+       unsigned int index;
+
+       unsigned int use_count;
+       unsigned int used_crtcs;
+
+       struct rcar_du_planes planes;
+};
+
+u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg);
+void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data);
+
+int rcar_du_group_get(struct rcar_du_group *rgrp);
+void rcar_du_group_put(struct rcar_du_group *rgrp);
+void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start);
+void rcar_du_group_restart(struct rcar_du_group *rgrp);
+int rcar_du_group_set_routing(struct rcar_du_group *rgrp);
+
+#endif /* __RCAR_DU_GROUP_H__ */
index d30c2e29bee2f122d8cff43681449fbe9e8ae46b..b31ac080c4a77ba6554ae281d0fd90754e8d633b 100644 (file)
 
 #include "rcar_du_crtc.h"
 #include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
 #include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
+#include "rcar_du_lvdsenc.h"
 #include "rcar_du_regs.h"
-#include "rcar_du_vga.h"
 
 /* -----------------------------------------------------------------------------
  * Format helpers
@@ -105,35 +105,6 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
        return NULL;
 }
 
-/* -----------------------------------------------------------------------------
- * Common connector and encoder functions
- */
-
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector)
-{
-       struct rcar_du_connector *rcon = to_rcar_connector(connector);
-
-       return &rcon->encoder->encoder;
-}
-
-void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
-{
-}
-
-void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
-                             struct drm_display_mode *mode,
-                             struct drm_display_mode *adjusted_mode)
-{
-       struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
-       rcar_du_crtc_route_output(encoder->crtc, renc->output);
-}
-
-void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
-{
-}
-
 /* -----------------------------------------------------------------------------
  * Frame buffer
  */
@@ -141,11 +112,18 @@ void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
 int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
                        struct drm_mode_create_dumb *args)
 {
+       struct rcar_du_device *rcdu = dev->dev_private;
        unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
        unsigned int align;
 
-       /* The pitch must be aligned to a 16 pixels boundary. */
-       align = 16 * args->bpp / 8;
+       /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
+        * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
+        */
+       if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+               align = 128;
+       else
+               align = 16 * args->bpp / 8;
+
        args->pitch = roundup(max(args->pitch, min_pitch), align);
 
        return drm_gem_cma_dumb_create(file, dev, args);
@@ -155,6 +133,7 @@ static struct drm_framebuffer *
 rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                  struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       struct rcar_du_device *rcdu = dev->dev_private;
        const struct rcar_du_format_info *format;
        unsigned int align;
 
@@ -165,7 +144,10 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-EINVAL);
        }
 
-       align = 16 * format->bpp / 8;
+       if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+               align = 128;
+       else
+               align = 16 * format->bpp / 8;
 
        if (mode_cmd->pitches[0] & (align - 1) ||
            mode_cmd->pitches[0] >= 8192) {
@@ -185,81 +167,124 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
        return drm_fb_cma_create(dev, file_priv, mode_cmd);
 }
 
+static void rcar_du_output_poll_changed(struct drm_device *dev)
+{
+       struct rcar_du_device *rcdu = dev->dev_private;
+
+       drm_fbdev_cma_hotplug_event(rcdu->fbdev);
+}
+
 static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
        .fb_create = rcar_du_fb_create,
+       .output_poll_changed = rcar_du_output_poll_changed,
 };
 
 int rcar_du_modeset_init(struct rcar_du_device *rcdu)
 {
+       static const unsigned int mmio_offsets[] = {
+               DU0_REG_OFFSET, DU2_REG_OFFSET
+       };
+
        struct drm_device *dev = rcdu->ddev;
        struct drm_encoder *encoder;
+       struct drm_fbdev_cma *fbdev;
+       unsigned int num_groups;
        unsigned int i;
        int ret;
 
-       drm_mode_config_init(rcdu->ddev);
+       drm_mode_config_init(dev);
 
-       rcdu->ddev->mode_config.min_width = 0;
-       rcdu->ddev->mode_config.min_height = 0;
-       rcdu->ddev->mode_config.max_width = 4095;
-       rcdu->ddev->mode_config.max_height = 2047;
-       rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs;
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+       dev->mode_config.max_width = 4095;
+       dev->mode_config.max_height = 2047;
+       dev->mode_config.funcs = &rcar_du_mode_config_funcs;
 
-       ret = rcar_du_plane_init(rcdu);
-       if (ret < 0)
-               return ret;
+       rcdu->num_crtcs = rcdu->info->num_crtcs;
+
+       /* Initialize the groups. */
+       num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
+
+       for (i = 0; i < num_groups; ++i) {
+               struct rcar_du_group *rgrp = &rcdu->groups[i];
+
+               rgrp->dev = rcdu;
+               rgrp->mmio_offset = mmio_offsets[i];
+               rgrp->index = i;
+
+               ret = rcar_du_planes_init(rgrp);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* Create the CRTCs. */
+       for (i = 0; i < rcdu->num_crtcs; ++i) {
+               struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
 
-       for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) {
-               ret = rcar_du_crtc_create(rcdu, i);
+               ret = rcar_du_crtc_create(rgrp, i);
                if (ret < 0)
                        return ret;
        }
 
-       rcdu->used_crtcs = 0;
-       rcdu->num_crtcs = i;
+       /* Initialize the encoders. */
+       ret = rcar_du_lvdsenc_init(rcdu);
+       if (ret < 0)
+               return ret;
 
        for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
                const struct rcar_du_encoder_data *pdata =
                        &rcdu->pdata->encoders[i];
+               const struct rcar_du_output_routing *route =
+                       &rcdu->info->routes[pdata->output];
+
+               if (pdata->type == RCAR_DU_ENCODER_UNUSED)
+                       continue;
 
-               if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) {
+               if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
+                   route->possible_crtcs == 0) {
                        dev_warn(rcdu->dev,
                                 "encoder %u references unexisting output %u, skipping\n",
                                 i, pdata->output);
                        continue;
                }
 
-               switch (pdata->encoder) {
-               case RCAR_DU_ENCODER_VGA:
-                       rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output);
-                       break;
-
-               case RCAR_DU_ENCODER_LVDS:
-                       rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output);
-                       break;
-
-               default:
-                       break;
-               }
+               rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata);
        }
 
-       /* Set the possible CRTCs and possible clones. All encoders can be
-        * driven by the CRTC associated with the output they're connected to,
-        * as well as by CRTC 0.
+       /* Set the possible CRTCs and possible clones. There's always at least
+        * one way for all encoders to clone each other, set all bits in the
+        * possible clones field.
         */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+               const struct rcar_du_output_routing *route =
+                       &rcdu->info->routes[renc->output];
 
-               encoder->possible_crtcs = (1 << 0) | (1 << renc->output);
-               encoder->possible_clones = 1 << 0;
+               encoder->possible_crtcs = route->possible_crtcs;
+               encoder->possible_clones = (1 << rcdu->pdata->num_encoders) - 1;
        }
 
-       ret = rcar_du_plane_register(rcdu);
-       if (ret < 0)
-               return ret;
+       /* Now that the CRTCs have been initialized register the planes. */
+       for (i = 0; i < num_groups; ++i) {
+               ret = rcar_du_planes_register(&rcdu->groups[i]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       drm_kms_helper_poll_init(dev);
+
+       drm_helper_disable_unused_functions(dev);
+
+       fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
+                                  dev->mode_config.num_connector);
+       if (IS_ERR(fbdev))
+               return PTR_ERR(fbdev);
 
-       drm_kms_helper_poll_init(rcdu->ddev);
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+       drm_fbdev_cma_restore_mode(fbdev);
+#endif
 
-       drm_helper_disable_unused_functions(rcdu->ddev);
+       rcdu->fbdev = fbdev;
 
        return 0;
 }
index dba472263486d9521028f4d1c265c196c1e1da47..5750e6af56557b36b4821953fced65372170fa09 100644 (file)
@@ -16,8 +16,9 @@
 
 #include <linux/types.h>
 
-#include <drm/drm_crtc.h>
-
+struct drm_file;
+struct drm_device;
+struct drm_mode_create_dumb;
 struct rcar_du_device;
 
 struct rcar_du_format_info {
@@ -28,32 +29,8 @@ struct rcar_du_format_info {
        unsigned int edf;
 };
 
-struct rcar_du_encoder {
-       struct drm_encoder encoder;
-       unsigned int output;
-};
-
-#define to_rcar_encoder(e) \
-       container_of(e, struct rcar_du_encoder, encoder)
-
-struct rcar_du_connector {
-       struct drm_connector connector;
-       struct rcar_du_encoder *encoder;
-};
-
-#define to_rcar_connector(c) \
-       container_of(c, struct rcar_du_connector, connector)
-
 const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc);
 
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector);
-void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder);
-void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
-                             struct drm_display_mode *mode,
-                             struct drm_display_mode *adjusted_mode);
-void rcar_du_encoder_mode_commit(struct drm_encoder *encoder);
-
 int rcar_du_modeset_init(struct rcar_du_device *rcdu);
 
 int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
similarity index 57%
rename from drivers/gpu/drm/rcar-du/rcar_du_lvds.c
rename to drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 7aefe7267e1da789eccbe2dd520d8c5553415dfd..4f3ba93cd91dac906b7c06215dd5c7efc0be4987 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * rcar_du_lvds.c  --  R-Car Display Unit LVDS Encoder and Connector
+ * rcar_du_lvdscon.c  --  R-Car Display Unit LVDS Connector
  *
  * Copyright (C) 2013 Renesas Corporation
  *
@@ -16,8 +16,9 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
 #include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
+#include "rcar_du_lvdscon.h"
 
 struct rcar_du_lvds_connector {
        struct rcar_du_connector connector;
@@ -28,13 +29,10 @@ struct rcar_du_lvds_connector {
 #define to_rcar_lvds_connector(c) \
        container_of(c, struct rcar_du_lvds_connector, connector.connector)
 
-/* -----------------------------------------------------------------------------
- * Connector
- */
-
 static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
 {
-       struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector);
+       struct rcar_du_lvds_connector *lvdscon =
+               to_rcar_lvds_connector(connector);
        struct drm_display_mode *mode;
 
        mode = drm_mode_create(connector->dev);
@@ -90,9 +88,9 @@ static const struct drm_connector_funcs connector_funcs = {
        .destroy = rcar_du_lvds_connector_destroy,
 };
 
-static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
-                                      struct rcar_du_encoder *renc,
-                                      const struct rcar_du_panel_data *panel)
+int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
+                               struct rcar_du_encoder *renc,
+                               const struct rcar_du_panel_data *panel)
 {
        struct rcar_du_lvds_connector *lvdscon;
        struct drm_connector *connector;
@@ -131,86 +129,3 @@ static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
 
        return 0;
 }
-
-/* -----------------------------------------------------------------------------
- * Encoder
- */
-
-static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder,
-                                          const struct drm_display_mode *mode,
-                                          struct drm_display_mode *adjusted_mode)
-{
-       const struct drm_display_mode *panel_mode;
-       struct drm_device *dev = encoder->dev;
-       struct drm_connector *connector;
-       bool found = false;
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       found = true;
-                       break;
-               }
-       }
-
-       if (!found) {
-               dev_dbg(dev->dev, "mode_fixup: no connector found\n");
-               return false;
-       }
-
-       if (list_empty(&connector->modes)) {
-               dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
-               return false;
-       }
-
-       panel_mode = list_first_entry(&connector->modes,
-                                     struct drm_display_mode, head);
-
-       /* We're not allowed to modify the resolution. */
-       if (mode->hdisplay != panel_mode->hdisplay ||
-           mode->vdisplay != panel_mode->vdisplay)
-               return false;
-
-       /* The flat panel mode is fixed, just copy it to the adjusted mode. */
-       drm_mode_copy(adjusted_mode, panel_mode);
-
-       return true;
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-       .dpms = rcar_du_lvds_encoder_dpms,
-       .mode_fixup = rcar_du_lvds_encoder_mode_fixup,
-       .prepare = rcar_du_encoder_mode_prepare,
-       .commit = rcar_du_encoder_mode_commit,
-       .mode_set = rcar_du_encoder_mode_set,
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
-int rcar_du_lvds_init(struct rcar_du_device *rcdu,
-                     const struct rcar_du_encoder_lvds_data *data,
-                     unsigned int output)
-{
-       struct rcar_du_encoder *renc;
-       int ret;
-
-       renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
-       if (renc == NULL)
-               return -ENOMEM;
-
-       renc->output = output;
-
-       ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
-                              DRM_MODE_ENCODER_LVDS);
-       if (ret < 0)
-               return ret;
-
-       drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
-
-       return rcar_du_lvds_connector_init(rcdu, renc, &data->panel);
-}
similarity index 53%
rename from drivers/gpu/drm/rcar-du/rcar_du_lvds.h
rename to drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index b47f8328e103cda382c3f1ee349b626e44ceda52..bff8683699cadd9e996dfa7c98f42dfed4c669c6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * rcar_du_lvds.h  --  R-Car Display Unit LVDS Encoder and Connector
+ * rcar_du_lvdscon.h  --  R-Car Display Unit LVDS Connector
  *
  * Copyright (C) 2013 Renesas Corporation
  *
  * (at your option) any later version.
  */
 
-#ifndef __RCAR_DU_LVDS_H__
-#define __RCAR_DU_LVDS_H__
+#ifndef __RCAR_DU_LVDSCON_H__
+#define __RCAR_DU_LVDSCON_H__
 
 struct rcar_du_device;
-struct rcar_du_encoder_lvds_data;
+struct rcar_du_encoder;
+struct rcar_du_panel_data;
 
-int rcar_du_lvds_init(struct rcar_du_device *rcdu,
-                     const struct rcar_du_encoder_lvds_data *data,
-                     unsigned int output);
+int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
+                               struct rcar_du_encoder *renc,
+                               const struct rcar_du_panel_data *panel);
 
-#endif /* __RCAR_DU_LVDS_H__ */
+#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
new file mode 100644 (file)
index 0000000..a0f6a17
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * rcar_du_lvdsenc.c  --  R-Car Display Unit LVDS Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_lvdsenc.h"
+#include "rcar_lvds_regs.h"
+
+struct rcar_du_lvdsenc {
+       struct rcar_du_device *dev;
+
+       unsigned int index;
+       void __iomem *mmio;
+       struct clk *clock;
+       int dpms;
+
+       enum rcar_lvds_input input;
+};
+
+static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
+{
+       iowrite32(data, lvds->mmio + reg);
+}
+
+static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
+                                struct rcar_du_crtc *rcrtc)
+{
+       const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+       unsigned int freq = mode->clock;
+       u32 lvdcr0;
+       u32 pllcr;
+       int ret;
+
+       if (lvds->dpms == DRM_MODE_DPMS_ON)
+               return 0;
+
+       ret = clk_prepare_enable(lvds->clock);
+       if (ret < 0)
+               return ret;
+
+       /* PLL clock configuration */
+       if (freq <= 38000)
+               pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
+       else if (freq <= 60000)
+               pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
+       else if (freq <= 121000)
+               pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
+       else
+               pllcr = LVDPLLCR_PLLDLYCNT_150M;
+
+       rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+
+       /* Hardcode the channels and control signals routing for now.
+        *
+        * HSYNC -> CTRL0
+        * VSYNC -> CTRL1
+        * DISP  -> CTRL2
+        * 0     -> CTRL3
+        *
+        * Channels 1 and 3 are switched on ES1.
+        */
+       rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
+                       LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
+                       LVDCTRCR_CTR0SEL_HSYNC);
+       rcar_lvds_write(lvds, LVDCHCR,
+                       LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
+                       LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
+
+       /* Select the input, hardcode mode 0, enable LVDS operation and turn
+        * bias circuitry on.
+        */
+       lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
+       if (rcrtc->index == 2)
+               lvdcr0 |= LVDCR0_DUSEL;
+       rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+       /* Turn all the channels on. */
+       rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
+                       LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
+
+       /* Turn the PLL on, wait for the startup delay, and turn the output
+        * on.
+        */
+       lvdcr0 |= LVDCR0_PLLEN;
+       rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+       usleep_range(100, 150);
+
+       lvdcr0 |= LVDCR0_LVRES;
+       rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+       lvds->dpms = DRM_MODE_DPMS_ON;
+       return 0;
+}
+
+static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
+{
+       if (lvds->dpms == DRM_MODE_DPMS_OFF)
+               return;
+
+       rcar_lvds_write(lvds, LVDCR0, 0);
+       rcar_lvds_write(lvds, LVDCR1, 0);
+
+       clk_disable_unprepare(lvds->clock);
+
+       lvds->dpms = DRM_MODE_DPMS_OFF;
+}
+
+int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+                        struct drm_crtc *crtc, int mode)
+{
+       if (mode == DRM_MODE_DPMS_OFF) {
+               rcar_du_lvdsenc_stop(lvds);
+               return 0;
+       } else if (crtc) {
+               struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+               return rcar_du_lvdsenc_start(lvds, rcrtc);
+       } else
+               return -EINVAL;
+}
+
+static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
+                                        struct platform_device *pdev)
+{
+       struct resource *mem;
+       char name[7];
+
+       sprintf(name, "lvds.%u", lvds->index);
+
+       mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+       if (mem == NULL) {
+               dev_err(&pdev->dev, "failed to get memory resource for %s\n",
+                       name);
+               return -EINVAL;
+       }
+
+       lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
+       if (lvds->mmio == NULL) {
+               dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
+                       name);
+               return -ENOMEM;
+       }
+
+       lvds->clock = devm_clk_get(&pdev->dev, name);
+       if (IS_ERR(lvds->clock)) {
+               dev_err(&pdev->dev, "failed to get clock for %s\n", name);
+               return PTR_ERR(lvds->clock);
+       }
+
+       return 0;
+}
+
+int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
+{
+       struct platform_device *pdev = to_platform_device(rcdu->dev);
+       struct rcar_du_lvdsenc *lvds;
+       unsigned int i;
+       int ret;
+
+       for (i = 0; i < rcdu->info->num_lvds; ++i) {
+               lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+               if (lvds == NULL) {
+                       dev_err(&pdev->dev, "failed to allocate private data\n");
+                       return -ENOMEM;
+               }
+
+               lvds->dev = rcdu;
+               lvds->index = i;
+               lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
+               lvds->dpms = DRM_MODE_DPMS_OFF;
+
+               ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
+               if (ret < 0)
+                       return ret;
+
+               rcdu->lvds[i] = lvds;
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
new file mode 100644 (file)
index 0000000..7051c6d
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * rcar_du_lvdsenc.h  --  R-Car Display Unit LVDS Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_LVDSENC_H__
+#define __RCAR_DU_LVDSENC_H__
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_data/rcar-du.h>
+
+struct rcar_drm_crtc;
+struct rcar_du_lvdsenc;
+
+enum rcar_lvds_input {
+       RCAR_LVDS_INPUT_DU0,
+       RCAR_LVDS_INPUT_DU1,
+       RCAR_LVDS_INPUT_DU2,
+};
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
+int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
+int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+                        struct drm_crtc *crtc, int mode);
+#else
+static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
+{
+       return 0;
+}
+static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+                                      struct drm_crtc *crtc, int mode)
+{
+       return 0;
+}
+#endif
+
+#endif /* __RCAR_DU_LVDSENC_H__ */
index a65f81ddf51df67c146143ecaffa688aa5b947a6..53000644733f29c25a5db1edb5d24e5090bbc666 100644 (file)
@@ -36,90 +36,95 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
        return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
 }
 
-static u32 rcar_du_plane_read(struct rcar_du_device *rcdu,
+static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
                              unsigned int index, u32 reg)
 {
-       return rcar_du_read(rcdu, index * PLANE_OFF + reg);
+       return rcar_du_read(rgrp->dev,
+                           rgrp->mmio_offset + index * PLANE_OFF + reg);
 }
 
-static void rcar_du_plane_write(struct rcar_du_device *rcdu,
+static void rcar_du_plane_write(struct rcar_du_group *rgrp,
                                unsigned int index, u32 reg, u32 data)
 {
-       rcar_du_write(rcdu, index * PLANE_OFF + reg, data);
+       rcar_du_write(rgrp->dev, rgrp->mmio_offset + index * PLANE_OFF + reg,
+                     data);
 }
 
 int rcar_du_plane_reserve(struct rcar_du_plane *plane,
                          const struct rcar_du_format_info *format)
 {
-       struct rcar_du_device *rcdu = plane->dev;
+       struct rcar_du_group *rgrp = plane->group;
        unsigned int i;
        int ret = -EBUSY;
 
-       mutex_lock(&rcdu->planes.lock);
+       mutex_lock(&rgrp->planes.lock);
 
-       for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
-               if (!(rcdu->planes.free & (1 << i)))
+       for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
+               if (!(rgrp->planes.free & (1 << i)))
                        continue;
 
                if (format->planes == 1 ||
-                   rcdu->planes.free & (1 << ((i + 1) % 8)))
+                   rgrp->planes.free & (1 << ((i + 1) % 8)))
                        break;
        }
 
-       if (i == ARRAY_SIZE(rcdu->planes.planes))
+       if (i == ARRAY_SIZE(rgrp->planes.planes))
                goto done;
 
-       rcdu->planes.free &= ~(1 << i);
+       rgrp->planes.free &= ~(1 << i);
        if (format->planes == 2)
-               rcdu->planes.free &= ~(1 << ((i + 1) % 8));
+               rgrp->planes.free &= ~(1 << ((i + 1) % 8));
 
        plane->hwindex = i;
 
        ret = 0;
 
 done:
-       mutex_unlock(&rcdu->planes.lock);
+       mutex_unlock(&rgrp->planes.lock);
        return ret;
 }
 
 void rcar_du_plane_release(struct rcar_du_plane *plane)
 {
-       struct rcar_du_device *rcdu = plane->dev;
+       struct rcar_du_group *rgrp = plane->group;
 
        if (plane->hwindex == -1)
                return;
 
-       mutex_lock(&rcdu->planes.lock);
-       rcdu->planes.free |= 1 << plane->hwindex;
+       mutex_lock(&rgrp->planes.lock);
+       rgrp->planes.free |= 1 << plane->hwindex;
        if (plane->format->planes == 2)
-               rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8);
-       mutex_unlock(&rcdu->planes.lock);
+               rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
+       mutex_unlock(&rgrp->planes.lock);
 
        plane->hwindex = -1;
 }
 
 void rcar_du_plane_update_base(struct rcar_du_plane *plane)
 {
-       struct rcar_du_device *rcdu = plane->dev;
+       struct rcar_du_group *rgrp = plane->group;
        unsigned int index = plane->hwindex;
 
-       /* According to the datasheet the Y position is expressed in raster line
-        * units. However, 32bpp formats seem to require a doubled Y position
-        * value. Similarly, for the second plane, NV12 and NV21 formats seem to
+       /* The Y position is expressed in raster line units and must be doubled
+        * for 32bpp formats, according to the R8A7790 datasheet. No mention of
+        * doubling the Y position is found in the R8A7779 datasheet, but the
+        * rule seems to apply there as well.
+        *
+        * Similarly, for the second plane, NV12 and NV21 formats seem to
         * require a halved Y position value.
         */
-       rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
-       rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
+       rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
+       rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
                            (plane->format->bpp == 32 ? 2 : 1));
-       rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]);
+       rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
 
        if (plane->format->planes == 2) {
                index = (index + 1) % 8;
 
-               rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
-               rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
+               rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
+               rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
                                    (plane->format->bpp == 16 ? 2 : 1) / 2);
-               rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]);
+               rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
        }
 }
 
@@ -140,7 +145,7 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
 static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
                                     unsigned int index)
 {
-       struct rcar_du_device *rcdu = plane->dev;
+       struct rcar_du_group *rgrp = plane->group;
        u32 colorkey;
        u32 pnmr;
 
@@ -154,9 +159,9 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
         * enable alpha-blending regardless of the X bit value.
         */
        if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
-               rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0);
+               rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
        else
-               rcar_du_plane_write(rcdu, index, PnALPHAR,
+               rcar_du_plane_write(rgrp, index, PnALPHAR,
                                    PnALPHAR_ABIT_X | plane->alpha);
 
        pnmr = PnMR_BM_MD | plane->format->pnmr;
@@ -172,14 +177,14 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
        if (plane->format->fourcc == DRM_FORMAT_YUYV)
                pnmr |= PnMR_YCDF_YUYV;
 
-       rcar_du_plane_write(rcdu, index, PnMR, pnmr);
+       rcar_du_plane_write(rgrp, index, PnMR, pnmr);
 
        switch (plane->format->fourcc) {
        case DRM_FORMAT_RGB565:
                colorkey = ((plane->colorkey & 0xf80000) >> 8)
                         | ((plane->colorkey & 0x00fc00) >> 5)
                         | ((plane->colorkey & 0x0000f8) >> 3);
-               rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
+               rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
                break;
 
        case DRM_FORMAT_ARGB1555:
@@ -187,12 +192,12 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
                colorkey = ((plane->colorkey & 0xf80000) >> 9)
                         | ((plane->colorkey & 0x00f800) >> 6)
                         | ((plane->colorkey & 0x0000f8) >> 3);
-               rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
+               rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
                break;
 
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_ARGB8888:
-               rcar_du_plane_write(rcdu, index, PnTC3R,
+               rcar_du_plane_write(rgrp, index, PnTC3R,
                                    PnTC3R_CODE | (plane->colorkey & 0xffffff));
                break;
        }
@@ -201,7 +206,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
 static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
                                  unsigned int index)
 {
-       struct rcar_du_device *rcdu = plane->dev;
+       struct rcar_du_group *rgrp = plane->group;
        u32 ddcr2 = PnDDCR2_CODE;
        u32 ddcr4;
        u32 mwr;
@@ -211,7 +216,7 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
         * The data format is selected by the DDDF field in PnMR and the EDF
         * field in DDCR4.
         */
-       ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4);
+       ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
        ddcr4 &= ~PnDDCR4_EDF_MASK;
        ddcr4 |= plane->format->edf | PnDDCR4_CODE;
 
@@ -232,8 +237,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
                }
        }
 
-       rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2);
-       rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4);
+       rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
+       rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
 
        /* Memory pitch (expressed in pixels) */
        if (plane->format->planes == 2)
@@ -241,19 +246,19 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
        else
                mwr = plane->pitch * 8 / plane->format->bpp;
 
-       rcar_du_plane_write(rcdu, index, PnMWR, mwr);
+       rcar_du_plane_write(rgrp, index, PnMWR, mwr);
 
        /* Destination position and size */
-       rcar_du_plane_write(rcdu, index, PnDSXR, plane->width);
-       rcar_du_plane_write(rcdu, index, PnDSYR, plane->height);
-       rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x);
-       rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y);
+       rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
+       rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
+       rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x);
+       rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y);
 
        /* Wrap-around and blinking, disabled */
-       rcar_du_plane_write(rcdu, index, PnWASPR, 0);
-       rcar_du_plane_write(rcdu, index, PnWAMWR, 4095);
-       rcar_du_plane_write(rcdu, index, PnBTR, 0);
-       rcar_du_plane_write(rcdu, index, PnMLR, 0);
+       rcar_du_plane_write(rgrp, index, PnWASPR, 0);
+       rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
+       rcar_du_plane_write(rgrp, index, PnBTR, 0);
+       rcar_du_plane_write(rgrp, index, PnMLR, 0);
 }
 
 void rcar_du_plane_setup(struct rcar_du_plane *plane)
@@ -273,7 +278,7 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                       uint32_t src_w, uint32_t src_h)
 {
        struct rcar_du_plane *rplane = to_rcar_plane(plane);
-       struct rcar_du_device *rcdu = plane->dev->dev_private;
+       struct rcar_du_device *rcdu = rplane->group->dev;
        const struct rcar_du_format_info *format;
        unsigned int nplanes;
        int ret;
@@ -316,26 +321,25 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        rcar_du_plane_compute_base(rplane, fb);
        rcar_du_plane_setup(rplane);
 
-       mutex_lock(&rcdu->planes.lock);
+       mutex_lock(&rplane->group->planes.lock);
        rplane->enabled = true;
        rcar_du_crtc_update_planes(rplane->crtc);
-       mutex_unlock(&rcdu->planes.lock);
+       mutex_unlock(&rplane->group->planes.lock);
 
        return 0;
 }
 
 static int rcar_du_plane_disable(struct drm_plane *plane)
 {
-       struct rcar_du_device *rcdu = plane->dev->dev_private;
        struct rcar_du_plane *rplane = to_rcar_plane(plane);
 
        if (!rplane->enabled)
                return 0;
 
-       mutex_lock(&rcdu->planes.lock);
+       mutex_lock(&rplane->group->planes.lock);
        rplane->enabled = false;
        rcar_du_crtc_update_planes(rplane->crtc);
-       mutex_unlock(&rcdu->planes.lock);
+       mutex_unlock(&rplane->group->planes.lock);
 
        rcar_du_plane_release(rplane);
 
@@ -377,9 +381,7 @@ static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
 static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
                                   unsigned int zpos)
 {
-       struct rcar_du_device *rcdu = plane->dev;
-
-       mutex_lock(&rcdu->planes.lock);
+       mutex_lock(&plane->group->planes.lock);
        if (plane->zpos == zpos)
                goto done;
 
@@ -390,21 +392,21 @@ static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
        rcar_du_crtc_update_planes(plane->crtc);
 
 done:
-       mutex_unlock(&rcdu->planes.lock);
+       mutex_unlock(&plane->group->planes.lock);
 }
 
 static int rcar_du_plane_set_property(struct drm_plane *plane,
                                      struct drm_property *property,
                                      uint64_t value)
 {
-       struct rcar_du_device *rcdu = plane->dev->dev_private;
        struct rcar_du_plane *rplane = to_rcar_plane(plane);
+       struct rcar_du_group *rgrp = rplane->group;
 
-       if (property == rcdu->planes.alpha)
+       if (property == rgrp->planes.alpha)
                rcar_du_plane_set_alpha(rplane, value);
-       else if (property == rcdu->planes.colorkey)
+       else if (property == rgrp->planes.colorkey)
                rcar_du_plane_set_colorkey(rplane, value);
-       else if (property == rcdu->planes.zpos)
+       else if (property == rgrp->planes.zpos)
                rcar_du_plane_set_zpos(rplane, value);
        else
                return -EINVAL;
@@ -432,37 +434,39 @@ static const uint32_t formats[] = {
        DRM_FORMAT_NV16,
 };
 
-int rcar_du_plane_init(struct rcar_du_device *rcdu)
+int rcar_du_planes_init(struct rcar_du_group *rgrp)
 {
+       struct rcar_du_planes *planes = &rgrp->planes;
+       struct rcar_du_device *rcdu = rgrp->dev;
        unsigned int i;
 
-       mutex_init(&rcdu->planes.lock);
-       rcdu->planes.free = 0xff;
+       mutex_init(&planes->lock);
+       planes->free = 0xff;
 
-       rcdu->planes.alpha =
+       planes->alpha =
                drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
-       if (rcdu->planes.alpha == NULL)
+       if (planes->alpha == NULL)
                return -ENOMEM;
 
        /* The color key is expressed as an RGB888 triplet stored in a 32-bit
         * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
         * or enable source color keying (1).
         */
-       rcdu->planes.colorkey =
+       planes->colorkey =
                drm_property_create_range(rcdu->ddev, 0, "colorkey",
                                          0, 0x01ffffff);
-       if (rcdu->planes.colorkey == NULL)
+       if (planes->colorkey == NULL)
                return -ENOMEM;
 
-       rcdu->planes.zpos =
+       planes->zpos =
                drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
-       if (rcdu->planes.zpos == NULL)
+       if (planes->zpos == NULL)
                return -ENOMEM;
 
-       for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
-               struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+       for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) {
+               struct rcar_du_plane *plane = &planes->planes[i];
 
-               plane->dev = rcdu;
+               plane->group = rgrp;
                plane->hwindex = -1;
                plane->alpha = 255;
                plane->colorkey = RCAR_DU_COLORKEY_NONE;
@@ -472,11 +476,16 @@ int rcar_du_plane_init(struct rcar_du_device *rcdu)
        return 0;
 }
 
-int rcar_du_plane_register(struct rcar_du_device *rcdu)
+int rcar_du_planes_register(struct rcar_du_group *rgrp)
 {
+       struct rcar_du_planes *planes = &rgrp->planes;
+       struct rcar_du_device *rcdu = rgrp->dev;
+       unsigned int crtcs;
        unsigned int i;
        int ret;
 
+       crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
+
        for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
                struct rcar_du_kms_plane *plane;
 
@@ -484,23 +493,22 @@ int rcar_du_plane_register(struct rcar_du_device *rcdu)
                if (plane == NULL)
                        return -ENOMEM;
 
-               plane->hwplane = &rcdu->planes.planes[i + 2];
+               plane->hwplane = &planes->planes[i + 2];
                plane->hwplane->zpos = 1;
 
-               ret = drm_plane_init(rcdu->ddev, &plane->plane,
-                                    (1 << rcdu->num_crtcs) - 1,
+               ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs,
                                     &rcar_du_plane_funcs, formats,
                                     ARRAY_SIZE(formats), false);
                if (ret < 0)
                        return ret;
 
                drm_object_attach_property(&plane->plane.base,
-                                          rcdu->planes.alpha, 255);
+                                          planes->alpha, 255);
                drm_object_attach_property(&plane->plane.base,
-                                          rcdu->planes.colorkey,
+                                          planes->colorkey,
                                           RCAR_DU_COLORKEY_NONE);
                drm_object_attach_property(&plane->plane.base,
-                                          rcdu->planes.zpos, 1);
+                                          planes->zpos, 1);
        }
 
        return 0;
index 5397dba2fe5795fe64844efaedbbfe7ead226435..f94f9ce84998782c9720661d068b78dfacc65dc3 100644 (file)
 #ifndef __RCAR_DU_PLANE_H__
 #define __RCAR_DU_PLANE_H__
 
-struct drm_crtc;
-struct drm_framebuffer;
-struct rcar_du_device;
+#include <linux/mutex.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
 struct rcar_du_format_info;
+struct rcar_du_group;
 
 /* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
  * using KMS planes requires at least one of the CRTCs being enabled, no more
@@ -30,7 +33,7 @@ struct rcar_du_format_info;
 #define RCAR_DU_NUM_SW_PLANES          9
 
 struct rcar_du_plane {
-       struct rcar_du_device *dev;
+       struct rcar_du_group *group;
        struct drm_crtc *crtc;
 
        bool enabled;
@@ -54,8 +57,19 @@ struct rcar_du_plane {
        unsigned int dst_y;
 };
 
-int rcar_du_plane_init(struct rcar_du_device *rcdu);
-int rcar_du_plane_register(struct rcar_du_device *rcdu);
+struct rcar_du_planes {
+       struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
+       unsigned int free;
+       struct mutex lock;
+
+       struct drm_property *alpha;
+       struct drm_property *colorkey;
+       struct drm_property *zpos;
+};
+
+int rcar_du_planes_init(struct rcar_du_group *rgrp);
+int rcar_du_planes_register(struct rcar_du_group *rgrp);
+
 void rcar_du_plane_setup(struct rcar_du_plane *plane);
 void rcar_du_plane_update_base(struct rcar_du_plane *plane);
 void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
index 69f21f19b51cbc14914d00079943b2894440a8d5..73f7347f740bd1f94707990a8d9115fcf57fc518 100644 (file)
 #ifndef __RCAR_DU_REGS_H__
 #define __RCAR_DU_REGS_H__
 
-#define DISP2_REG_OFFSET        0x30000
+#define DU0_REG_OFFSET         0x00000
+#define DU1_REG_OFFSET         0x30000
+#define DU2_REG_OFFSET         0x40000
 
 /* -----------------------------------------------------------------------------
  * Display Control Registers
  */
 
 #define DSYSR                  0x00000 /* display 1 */
-#define D2SYSR                 0x30000 /* display 2 */
 #define DSYSR_ILTS             (1 << 29)
 #define DSYSR_DSEC             (1 << 20)
 #define DSYSR_IUPD             (1 << 16)
@@ -35,7 +36,6 @@
 #define DSYSR_SCM_INT_VIDEO    (3 << 4)
 
 #define DSMR                   0x00004
-#define D2SMR                  0x30004
 #define DSMR_VSPM              (1 << 28)
 #define DSMR_ODPM              (1 << 27)
 #define DSMR_DIPM_DISP         (0 << 25)
@@ -60,7 +60,6 @@
 #define DSMR_CSY_MASK          (3 << 6)
 
 #define DSSR                   0x00008
-#define D2SSR                  0x30008
 #define DSSR_VC1FB_DSA0                (0 << 30)
 #define DSSR_VC1FB_DSA1                (1 << 30)
 #define DSSR_VC1FB_DSA2                (2 << 30)
@@ -80,7 +79,6 @@
 #define DSSR_ADC(n)            (1 << ((n)-1))
 
 #define DSRCR                  0x0000c
-#define D2SRCR                 0x3000c
 #define DSRCR_TVCL             (1 << 15)
 #define DSRCR_FRCL             (1 << 14)
 #define DSRCR_VBCL             (1 << 11)
@@ -90,7 +88,6 @@
 #define DSRCR_MASK             0x0000cbff
 
 #define DIER                   0x00010
-#define D2IER                  0x30010
 #define DIER_TVE               (1 << 15)
 #define DIER_FRE               (1 << 14)
 #define DIER_VBE               (1 << 11)
 #define DPPR_BPP32             (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */
 
 #define DEFR                   0x00020
-#define D2EFR                  0x30020
 #define DEFR_CODE              (0x7773 << 16)
 #define DEFR_EXSL              (1 << 12)
 #define DEFR_EXVL              (1 << 11)
 #define DCPCR_DCE              (1 << 0)
 
 #define DEFR2                  0x00034
-#define D2EFR2                 0x30034
 #define DEFR2_CODE             (0x7775 << 16)
 #define DEFR2_DEFE2G           (1 << 0)
 
 #define DEFR3                  0x00038
-#define D2EFR3                 0x30038
 #define DEFR3_CODE             (0x7776 << 16)
 #define DEFR3_EVDA             (1 << 14)
 #define DEFR3_EVDM_1           (1 << 12)
 #define DEFR3_DEFE3            (1 << 0)
 
 #define DEFR4                  0x0003c
-#define D2EFR4                 0x3003c
 #define DEFR4_CODE             (0x7777 << 16)
 #define DEFR4_LRUO             (1 << 5)
 #define DEFR4_SPCE             (1 << 4)
 #define DEFR6_MLOS1            (1 << 2)
 #define DEFR6_DEFAULT          (DEFR6_CODE | DEFR6_TCNE2)
 
+/* -----------------------------------------------------------------------------
+ * R8A7790-only Control Registers
+ */
+
+#define DD1SSR                 0x20008
+#define DD1SSR_TVR             (1 << 15)
+#define DD1SSR_FRM             (1 << 14)
+#define DD1SSR_BUF             (1 << 12)
+#define DD1SSR_VBK             (1 << 11)
+#define DD1SSR_RINT            (1 << 9)
+#define DD1SSR_HBK             (1 << 8)
+#define DD1SSR_ADC(n)          (1 << ((n)-1))
+
+#define DD1SRCR                        0x2000c
+#define DD1SRCR_TVR            (1 << 15)
+#define DD1SRCR_FRM            (1 << 14)
+#define DD1SRCR_BUF            (1 << 12)
+#define DD1SRCR_VBK            (1 << 11)
+#define DD1SRCR_RINT           (1 << 9)
+#define DD1SRCR_HBK            (1 << 8)
+#define DD1SRCR_ADC(n)         (1 << ((n)-1))
+
+#define DD1IER                 0x20010
+#define DD1IER_TVR             (1 << 15)
+#define DD1IER_FRM             (1 << 14)
+#define DD1IER_BUF             (1 << 12)
+#define DD1IER_VBK             (1 << 11)
+#define DD1IER_RINT            (1 << 9)
+#define DD1IER_HBK             (1 << 8)
+#define DD1IER_ADC(n)          (1 << ((n)-1))
+
+#define DEFR8                  0x20020
+#define DEFR8_CODE             (0x7790 << 16)
+#define DEFR8_VSCS             (1 << 6)
+#define DEFR8_DRGBS_DU(n)      ((n) << 4)
+#define DEFR8_DRGBS_MASK       (3 << 4)
+#define DEFR8_DEFE8            (1 << 0)
+
+#define DOFLR                  0x20024
+#define DOFLR_CODE             (0x7790 << 16)
+#define DOFLR_HSYCFL1          (1 << 13)
+#define DOFLR_VSYCFL1          (1 << 12)
+#define DOFLR_ODDFL1           (1 << 11)
+#define DOFLR_DISPFL1          (1 << 10)
+#define DOFLR_CDEFL1           (1 << 9)
+#define DOFLR_RGBFL1           (1 << 8)
+#define DOFLR_HSYCFL0          (1 << 5)
+#define DOFLR_VSYCFL0          (1 << 4)
+#define DOFLR_ODDFL0           (1 << 3)
+#define DOFLR_DISPFL0          (1 << 2)
+#define DOFLR_CDEFL0           (1 << 1)
+#define DOFLR_RGBFL0           (1 << 0)
+
+#define DIDSR                  0x20028
+#define DIDSR_CODE             (0x7790 << 16)
+#define DIDSR_LCDS_DCLKIN(n)   (0 << (8 + (n) * 2))
+#define DIDSR_LCDS_LVDS0(n)    (2 << (8 + (n) * 2))
+#define DIDSR_LCDS_LVDS1(n)    (3 << (8 + (n) * 2))
+#define DIDSR_LCDS_MASK(n)     (3 << (8 + (n) * 2))
+#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2))
+#define DIDSR_PCDS_MASK(n)     (3 << ((n) * 2))
+
 /* -----------------------------------------------------------------------------
  * Display Timing Generation Registers
  */
 #define APnMR_BM_AD            (2 << 4)        /* Auto Display Change Mode */
 
 #define APnMWR                 0x0a104
+
+#define APnDSXR                        0x0a110
+#define APnDSYR                        0x0a114
+#define APnDPXR                        0x0a118
+#define APnDPYR                        0x0a11c
+
 #define APnDSA0R               0x0a120
 #define APnDSA1R               0x0a124
 #define APnDSA2R               0x0a128
+
+#define APnSPXR                        0x0a130
+#define APnSPYR                        0x0a134
+#define APnWASPR               0x0a138
+#define APnWAMWR               0x0a13c
+
+#define APnBTR                 0x0a140
+
 #define APnMLR                 0x0a150
+#define APnSWAPR               0x0a180
 
 /* -----------------------------------------------------------------------------
  * Display Capture Registers
  */
 
+#define DCMR                   0x0c100
 #define DCMWR                  0x0c104
-#define DC2MWR                 0x0c204
 #define DCSAR                  0x0c120
-#define DC2SAR                 0x0c220
 #define DCMLR                  0x0c150
-#define DC2MLR                 0x0c250
 
 /* -----------------------------------------------------------------------------
  * Color Palette Registers
similarity index 59%
rename from drivers/gpu/drm/rcar-du/rcar_du_vga.c
rename to drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 327289ec380db8698018e4eb55d0f749cdfc7d9f..41d563adfeaa16321f5097af481886f0185a9605 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * rcar_du_vga.c  --  R-Car Display Unit VGA DAC and Connector
+ * rcar_du_vgacon.c  --  R-Car Display Unit VGA Connector
  *
  * Copyright (C) 2013 Renesas Corporation
  *
 #include <drm/drm_crtc_helper.h>
 
 #include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
 #include "rcar_du_kms.h"
-#include "rcar_du_vga.h"
-
-/* -----------------------------------------------------------------------------
- * Connector
- */
+#include "rcar_du_vgacon.h"
 
 static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
 {
@@ -49,7 +46,7 @@ static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
 static enum drm_connector_status
 rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
 {
-       return connector_status_unknown;
+       return connector_status_connected;
 }
 
 static const struct drm_connector_funcs connector_funcs = {
@@ -59,8 +56,8 @@ static const struct drm_connector_funcs connector_funcs = {
        .destroy = rcar_du_vga_connector_destroy,
 };
 
-static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
-                                     struct rcar_du_encoder *renc)
+int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
+                              struct rcar_du_encoder *renc)
 {
        struct rcar_du_connector *rcon;
        struct drm_connector *connector;
@@ -97,53 +94,3 @@ static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
 
        return 0;
 }
-
-/* -----------------------------------------------------------------------------
- * Encoder
- */
-
-static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder,
-                                          const struct drm_display_mode *mode,
-                                          struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-       .dpms = rcar_du_vga_encoder_dpms,
-       .mode_fixup = rcar_du_vga_encoder_mode_fixup,
-       .prepare = rcar_du_encoder_mode_prepare,
-       .commit = rcar_du_encoder_mode_commit,
-       .mode_set = rcar_du_encoder_mode_set,
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
-       .destroy = drm_encoder_cleanup,
-};
-
-int rcar_du_vga_init(struct rcar_du_device *rcdu,
-                    const struct rcar_du_encoder_vga_data *data,
-                    unsigned int output)
-{
-       struct rcar_du_encoder *renc;
-       int ret;
-
-       renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
-       if (renc == NULL)
-               return -ENOMEM;
-
-       renc->output = output;
-
-       ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
-                              DRM_MODE_ENCODER_DAC);
-       if (ret < 0)
-               return ret;
-
-       drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
-
-       return rcar_du_vga_connector_init(rcdu, renc);
-}
similarity index 56%
rename from drivers/gpu/drm/rcar-du/rcar_du_vga.h
rename to drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
index 66b4d2d7190d349db3442508301e402eea881f55..b12b0cf7f117671d585394109a8c274cd553d8e9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * rcar_du_vga.h  --  R-Car Display Unit VGA DAC and Connector
+ * rcar_du_vgacon.h  --  R-Car Display Unit VGA Connector
  *
  * Copyright (C) 2013 Renesas Corporation
  *
  * (at your option) any later version.
  */
 
-#ifndef __RCAR_DU_VGA_H__
-#define __RCAR_DU_VGA_H__
+#ifndef __RCAR_DU_VGACON_H__
+#define __RCAR_DU_VGACON_H__
 
 struct rcar_du_device;
-struct rcar_du_encoder_vga_data;
+struct rcar_du_encoder;
 
-int rcar_du_vga_init(struct rcar_du_device *rcdu,
-                    const struct rcar_du_encoder_vga_data *data,
-                    unsigned int output);
+int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
+                              struct rcar_du_encoder *renc);
 
-#endif /* __RCAR_DU_VGA_H__ */
+#endif /* __RCAR_DU_VGACON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
new file mode 100644 (file)
index 0000000..77cf928
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * rcar_lvds_regs.h  --  R-Car LVDS Interface Registers Definitions
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RCAR_LVDS_REGS_H__
+#define __RCAR_LVDS_REGS_H__
+
+#define LVDCR0                         0x0000
+#define LVDCR0_DUSEL                   (1 << 15)
+#define LVDCR0_DMD                     (1 << 12)
+#define LVDCR0_LVMD_MASK               (0xf << 8)
+#define LVDCR0_LVMD_SHIFT              8
+#define LVDCR0_PLLEN                   (1 << 4)
+#define LVDCR0_BEN                     (1 << 2)
+#define LVDCR0_LVEN                    (1 << 1)
+#define LVDCR0_LVRES                   (1 << 0)
+
+#define LVDCR1                         0x0004
+#define LVDCR1_CKSEL                   (1 << 15)
+#define LVDCR1_CHSTBY(n)               (3 << (2 + (n) * 2))
+#define LVDCR1_CLKSTBY                 (3 << 0)
+
+#define LVDPLLCR                       0x0008
+#define LVDPLLCR_CEEN                  (1 << 14)
+#define LVDPLLCR_FBEN                  (1 << 13)
+#define LVDPLLCR_COSEL                 (1 << 12)
+#define LVDPLLCR_PLLDLYCNT_150M                (0x1bf << 0)
+#define LVDPLLCR_PLLDLYCNT_121M                (0x22c << 0)
+#define LVDPLLCR_PLLDLYCNT_60M         (0x77b << 0)
+#define LVDPLLCR_PLLDLYCNT_38M         (0x69a << 0)
+#define LVDPLLCR_PLLDLYCNT_MASK                (0x7ff << 0)
+
+#define LVDCTRCR                       0x000c
+#define LVDCTRCR_CTR3SEL_ZERO          (0 << 12)
+#define LVDCTRCR_CTR3SEL_ODD           (1 << 12)
+#define LVDCTRCR_CTR3SEL_CDE           (2 << 12)
+#define LVDCTRCR_CTR3SEL_MASK          (7 << 12)
+#define LVDCTRCR_CTR2SEL_DISP          (0 << 8)
+#define LVDCTRCR_CTR2SEL_ODD           (1 << 8)
+#define LVDCTRCR_CTR2SEL_CDE           (2 << 8)
+#define LVDCTRCR_CTR2SEL_HSYNC         (3 << 8)
+#define LVDCTRCR_CTR2SEL_VSYNC         (4 << 8)
+#define LVDCTRCR_CTR2SEL_MASK          (7 << 8)
+#define LVDCTRCR_CTR1SEL_VSYNC         (0 << 4)
+#define LVDCTRCR_CTR1SEL_DISP          (1 << 4)
+#define LVDCTRCR_CTR1SEL_ODD           (2 << 4)
+#define LVDCTRCR_CTR1SEL_CDE           (3 << 4)
+#define LVDCTRCR_CTR1SEL_HSYNC         (4 << 4)
+#define LVDCTRCR_CTR1SEL_MASK          (7 << 4)
+#define LVDCTRCR_CTR0SEL_HSYNC         (0 << 0)
+#define LVDCTRCR_CTR0SEL_VSYNC         (1 << 0)
+#define LVDCTRCR_CTR0SEL_DISP          (2 << 0)
+#define LVDCTRCR_CTR0SEL_ODD           (3 << 0)
+#define LVDCTRCR_CTR0SEL_CDE           (4 << 0)
+#define LVDCTRCR_CTR0SEL_MASK          (7 << 0)
+
+#define LVDCHCR                                0x0010
+#define LVDCHCR_CHSEL_CH(n, c)         ((((c) - (n)) & 3) << ((n) * 4))
+#define LVDCHCR_CHSEL_MASK(n)          (3 << ((n) * 4))
+
+#endif /* __RCAR_LVDS_REGS_H__ */
index bd6b2cf508d56e952160f12b93332c8be17386ed..b17d0710871abc77487973d837dfe212d17eb5df 100644 (file)
@@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
                drm_idlelock_release(&file_priv->master->lock);
 }
 
-struct drm_ioctl_desc savage_ioctls[] = {
+const struct drm_ioctl_desc savage_ioctls[] = {
        DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
index 71b2081e7835fbfda44bad11f2fcb7e4b604a283..3c030216e8888a14078af5d67b26399f7dfbf585 100644 (file)
@@ -42,7 +42,6 @@ static const struct file_operations savage_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -51,7 +50,7 @@ static const struct file_operations savage_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+           DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
        .dev_priv_size = sizeof(drm_savage_buf_priv_t),
        .load = savage_driver_load,
        .firstopen = savage_driver_firstopen,
index c05082a59f6f1e40119558111184cbc9f5ef69f9..335f8fcf10416476b3707c37b0779c276f6781ab 100644 (file)
@@ -104,7 +104,7 @@ enum savage_family {
        S3_LAST
 };
 
-extern struct drm_ioctl_desc savage_ioctls[];
+extern const struct drm_ioctl_desc savage_ioctls[];
 extern int savage_max_ioctl;
 
 #define S3_SAVAGE3D_SERIES(chip)  ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
index 5f83f9a3ef593503867cace33ff8dd6ae865db86..015551866b4a0d3c6f9912c361993f99c3b548b1 100644 (file)
@@ -257,7 +257,6 @@ static const struct file_operations shmob_drm_fops = {
 #endif
        .poll           = drm_poll,
        .read           = drm_read,
-       .fasync         = drm_fasync,
        .llseek         = no_llseek,
        .mmap           = drm_gem_cma_mmap,
 };
@@ -285,7 +284,7 @@ static struct drm_driver shmob_drm_driver = {
        .gem_prime_mmap         = drm_gem_cma_prime_mmap,
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy           = drm_gem_cma_dumb_destroy,
+       .dumb_destroy           = drm_gem_dumb_destroy,
        .fops                   = &shmob_drm_fops,
        .name                   = "shmob-drm",
        .desc                   = "Renesas SH Mobile DRM",
index 5a5325e6b75999863e236a3a85102cd35423ad09..4383b74a3aa46f480ac3c118e3a113812fdff9de 100644 (file)
@@ -72,7 +72,6 @@ static const struct file_operations sis_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -103,7 +102,7 @@ void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
 }
 
 static struct drm_driver driver = {
-       .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
+       .driver_features = DRIVER_USE_AGP,
        .load = sis_driver_load,
        .unload = sis_driver_unload,
        .open = sis_driver_open,
index 13b527bb83be4df4f068af5035359aa410a4491e..c31c0253054dccda6dcf570abf99c8564f5fc5d8 100644 (file)
@@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
                                       struct drm_file *file_priv);
 extern void sis_lastclose(struct drm_device *dev);
 
-extern struct drm_ioctl_desc sis_ioctls[];
+extern const struct drm_ioctl_desc sis_ioctls[];
 extern int sis_max_ioctl;
 
 #endif
index 9a43d98e500341fc22f9aa886da64d28476f4f14..01857d836350db00261d96870af2571e2b3e356f 100644 (file)
@@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
        if (pool == AGP_TYPE) {
                retval = drm_mm_insert_node(&dev_priv->agp_mm,
                                            &item->mm_node,
-                                           mem->size, 0);
+                                           mem->size, 0,
+                                           DRM_MM_SEARCH_DEFAULT);
                offset = item->mm_node.start;
        } else {
 #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
 #else
                retval = drm_mm_insert_node(&dev_priv->vram_mm,
                                            &item->mm_node,
-                                           mem->size, 0);
+                                           mem->size, 0,
+                                           DRM_MM_SEARCH_DEFAULT);
                offset = item->mm_node.start;
 #endif
        }
@@ -348,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
        return;
 }
 
-struct drm_ioctl_desc sis_ioctls[] = {
+const struct drm_ioctl_desc sis_ioctls[] = {
        DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
index ddfa743459d04997f935bba0b203059ce35d8ad7..3492ca5c46d323b6932ee8d917a00af3544bebf2 100644 (file)
@@ -48,7 +48,6 @@ static const struct file_operations tdfx_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -56,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = {
 };
 
 static struct drm_driver driver = {
-       .driver_features = DRIVER_USE_MTRR,
        .fops = &tdfx_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
index 7418dcd986d3f50f48f33d56b703cf8dc82c4be9..fe4726628906b8defe0e8c584788e9c4deb889be 100644 (file)
@@ -15,7 +15,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/kfifo.h>
+#include "drm_flip_work.h"
 
 #include "tilcdc_drv.h"
 #include "tilcdc_regs.h"
@@ -35,21 +35,18 @@ struct tilcdc_crtc {
        struct drm_framebuffer *scanout[2];
 
        /* for deferred fb unref's: */
-       DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
-       struct work_struct work;
+       struct drm_flip_work unref_work;
 };
 #define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
 
-static void unref_worker(struct work_struct *work)
+static void unref_worker(struct drm_flip_work *work, void *val)
 {
        struct tilcdc_crtc *tilcdc_crtc =
-               container_of(work, struct tilcdc_crtc, work);
+               container_of(work, struct tilcdc_crtc, unref_work);
        struct drm_device *dev = tilcdc_crtc->base.dev;
-       struct drm_framebuffer *fb;
 
        mutex_lock(&dev->mode_config.mutex);
-       while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb))
-               drm_framebuffer_unreference(fb);
+       drm_framebuffer_unreference(val);
        mutex_unlock(&dev->mode_config.mutex);
 }
 
@@ -68,19 +65,14 @@ static void set_scanout(struct drm_crtc *crtc, int n)
        };
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
+       struct tilcdc_drm_private *priv = dev->dev_private;
 
        pm_runtime_get_sync(dev->dev);
        tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
        tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
        if (tilcdc_crtc->scanout[n]) {
-               if (kfifo_put(&tilcdc_crtc->unref_fifo,
-                               (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) {
-                       struct tilcdc_drm_private *priv = dev->dev_private;
-                       queue_work(priv->wq, &tilcdc_crtc->work);
-               } else {
-                       dev_err(dev->dev, "unref fifo full!\n");
-                       drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
-               }
+               drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
+               drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
        }
        tilcdc_crtc->scanout[n] = crtc->fb;
        drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
@@ -149,8 +141,8 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
        WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
 
        drm_crtc_cleanup(crtc);
-       WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo));
-       kfifo_free(&tilcdc_crtc->unref_fifo);
+       drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
+
        kfree(tilcdc_crtc);
 }
 
@@ -379,7 +371,12 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
        else
                tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
 
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+       /*
+        * use value from adjusted_mode here as this might have been
+        * changed as part of the fixup for slave encoders to solve the
+        * issue where tilcdc timings are not VESA compliant
+        */
+       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
                tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
        else
                tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -666,14 +663,13 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
        tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
        init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
 
-       ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL);
+       ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
+                       "unref", unref_worker);
        if (ret) {
                dev_err(dev->dev, "could not allocate unref FIFO\n");
                goto fail;
        }
 
-       INIT_WORK(&tilcdc_crtc->work, unref_worker);
-
        ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
        if (ret < 0)
                goto fail;
index 40b71da5a2145815f7539a76c0c01791fbb94fbc..116da199b9429a56ba8a7b58efc953369b721344 100644 (file)
@@ -497,7 +497,6 @@ static const struct file_operations fops = {
 #endif
        .poll               = drm_poll,
        .read               = drm_read,
-       .fasync             = drm_fasync,
        .llseek             = no_llseek,
        .mmap               = drm_gem_cma_mmap,
 };
@@ -519,7 +518,7 @@ static struct drm_driver tilcdc_driver = {
        .gem_vm_ops         = &drm_gem_cma_vm_ops,
        .dumb_create        = drm_gem_cma_dumb_create,
        .dumb_map_offset    = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy       = drm_gem_cma_dumb_destroy,
+       .dumb_destroy       = drm_gem_dumb_destroy,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = tilcdc_debugfs_init,
        .debugfs_cleanup    = tilcdc_debugfs_cleanup,
index dfffaf01402225b20bbbcefd97b264cad040ce28..23b3203d8241c11999ee02d5312c50c045a2cefd 100644 (file)
@@ -73,13 +73,38 @@ static void slave_encoder_prepare(struct drm_encoder *encoder)
        tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
 }
 
+static bool slave_encoder_fixup(struct drm_encoder *encoder,
+               const struct drm_display_mode *mode,
+               struct drm_display_mode *adjusted_mode)
+{
+       /*
+        * tilcdc does not generate VESA-complient sync but aligns
+        * VS on the second edge of HS instead of first edge.
+        * We use adjusted_mode, to fixup sync by aligning both rising
+        * edges and add HSKEW offset to let the slave encoder fix it up.
+        */
+       adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
+       adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
+               adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+               adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
+       } else {
+               adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+               adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
+       }
+
+       return drm_i2c_encoder_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+
 static const struct drm_encoder_funcs slave_encoder_funcs = {
                .destroy        = slave_encoder_destroy,
 };
 
 static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
                .dpms           = drm_i2c_encoder_dpms,
-               .mode_fixup     = drm_i2c_encoder_mode_fixup,
+               .mode_fixup     = slave_encoder_fixup,
                .prepare        = slave_encoder_prepare,
                .commit         = drm_i2c_encoder_commit,
                .mode_set       = drm_i2c_encoder_mode_set,
index cb9dd674670c97f188bd0fa1eb765c97d4f62f8e..f1a857ec1021e81754a79c384a035cd948910b2b 100644 (file)
@@ -45,7 +45,6 @@
 #define TTM_DEBUG(fmt, arg...)
 #define TTM_BO_HASH_ORDER 13
 
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
@@ -615,13 +614,7 @@ static void ttm_bo_release(struct kref *kref)
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
-       write_lock(&bdev->vm_lock);
-       if (likely(bo->vm_node != NULL)) {
-               rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
-               drm_mm_put_block(bo->vm_node);
-               bo->vm_node = NULL;
-       }
-       write_unlock(&bdev->vm_lock);
+       drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
        ttm_mem_io_lock(man, false);
        ttm_mem_io_free_vm(bo);
        ttm_mem_io_unlock(man);
@@ -1129,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->resv = &bo->ttm_resv;
        reservation_object_init(bo->resv);
        atomic_inc(&bo->glob->bo_count);
+       drm_vma_node_reset(&bo->vma_node);
 
        ret = ttm_bo_check_placement(bo, placement);
 
@@ -1139,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        if (likely(!ret) &&
            (bo->type == ttm_bo_type_device ||
             bo->type == ttm_bo_type_sg))
-               ret = ttm_bo_setup_vm(bo);
+               ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+                                        bo->mem.num_pages);
 
        locked = ww_mutex_trylock(&bo->resv->lock);
        WARN_ON(!locked);
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
                TTM_DEBUG("Swap list was clean\n");
        spin_unlock(&glob->lru_lock);
 
-       BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
-       write_lock(&bdev->vm_lock);
-       drm_mm_takedown(&bdev->addr_space_mm);
-       write_unlock(&bdev->vm_lock);
+       drm_vma_offset_manager_destroy(&bdev->vma_manager);
 
        return ret;
 }
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 {
        int ret = -EINVAL;
 
-       rwlock_init(&bdev->vm_lock);
        bdev->driver = driver;
 
        memset(bdev->man, 0, sizeof(bdev->man));
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
        if (unlikely(ret != 0))
                goto out_no_sys;
 
-       bdev->addr_space_rb = RB_ROOT;
-       drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-
+       drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
+                                   0x10000000);
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        INIT_LIST_HEAD(&bdev->ddestroy);
        bdev->dev_mapping = NULL;
@@ -1498,12 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       loff_t offset = (loff_t) bo->addr_space_offset;
-       loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
 
-       if (!bdev->dev_mapping)
-               return;
-       unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+       drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
        ttm_mem_io_free_vm(bo);
 }
 
@@ -1520,78 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
-       struct ttm_bo_device *bdev = bo->bdev;
-       struct rb_node **cur = &bdev->addr_space_rb.rb_node;
-       struct rb_node *parent = NULL;
-       struct ttm_buffer_object *cur_bo;
-       unsigned long offset = bo->vm_node->start;
-       unsigned long cur_offset;
-
-       while (*cur) {
-               parent = *cur;
-               cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
-               cur_offset = cur_bo->vm_node->start;
-               if (offset < cur_offset)
-                       cur = &parent->rb_left;
-               else if (offset > cur_offset)
-                       cur = &parent->rb_right;
-               else
-                       BUG();
-       }
-
-       rb_link_node(&bo->vm_rb, parent, cur);
-       rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
-}
-
-/**
- * ttm_bo_setup_vm:
- *
- * @bo: the buffer to allocate address space for
- *
- * Allocate address space in the drm device so that applications
- * can mmap the buffer and access the contents. This only
- * applies to ttm_bo_type_device objects as others are not
- * placed in the drm device address space.
- */
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
-{
-       struct ttm_bo_device *bdev = bo->bdev;
-       int ret;
-
-retry_pre_get:
-       ret = drm_mm_pre_get(&bdev->addr_space_mm);
-       if (unlikely(ret != 0))
-               return ret;
-
-       write_lock(&bdev->vm_lock);
-       bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
-                                        bo->mem.num_pages, 0, 0);
-
-       if (unlikely(bo->vm_node == NULL)) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
-                                             bo->mem.num_pages, 0);
-
-       if (unlikely(bo->vm_node == NULL)) {
-               write_unlock(&bdev->vm_lock);
-               goto retry_pre_get;
-       }
-
-       ttm_bo_vm_insert_rb(bo);
-       write_unlock(&bdev->vm_lock);
-       bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
-       return 0;
-out_unlock:
-       write_unlock(&bdev->vm_lock);
-       return ret;
-}
 
 int ttm_bo_wait(struct ttm_buffer_object *bo,
                bool lazy, bool interruptible, bool no_wait)
index e4367f91472a4d3e25341ad4fa3e2121724a2c99..c58eba33bd5f1b727988e660bf772f5b20b60e25 100644 (file)
@@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
        lpfn = placement->lpfn;
        if (!lpfn)
                lpfn = man->size;
-       do {
-               ret = drm_mm_pre_get(mm);
-               if (unlikely(ret))
-                       return ret;
 
-               spin_lock(&rman->lock);
-               node = drm_mm_search_free_in_range(mm,
-                                       mem->num_pages, mem->page_alignment,
-                                       placement->fpfn, lpfn, 1);
-               if (unlikely(node == NULL)) {
-                       spin_unlock(&rman->lock);
-                       return 0;
-               }
-               node = drm_mm_get_block_atomic_range(node, mem->num_pages,
-                                                    mem->page_alignment,
-                                                    placement->fpfn,
-                                                    lpfn);
-               spin_unlock(&rman->lock);
-       } while (node == NULL);
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       spin_lock(&rman->lock);
+       ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+                                         mem->page_alignment,
+                                         placement->fpfn, lpfn,
+                                         DRM_MM_SEARCH_BEST);
+       spin_unlock(&rman->lock);
+
+       if (unlikely(ret)) {
+               kfree(node);
+       } else {
+               mem->mm_node = node;
+               mem->start = node->start;
+       }
 
-       mem->mm_node = node;
-       mem->start = node->start;
        return 0;
 }
 
@@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 
        if (mem->mm_node) {
                spin_lock(&rman->lock);
-               drm_mm_put_block(mem->mm_node);
+               drm_mm_remove_node(mem->mm_node);
                spin_unlock(&rman->lock);
+
+               kfree(mem->mm_node);
                mem->mm_node = NULL;
        }
 }
index 319cf4127c5b2b308523fe154f32b6fd0076b000..7cc904d3a4d12b4efd6935f75ed302335552a4aa 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/io.h>
 #include <linux/highmem.h>
 #include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        INIT_LIST_HEAD(&fbo->io_reserve_lru);
-       fbo->vm_node = NULL;
+       drm_vma_node_reset(&fbo->vma_node);
        atomic_set(&fbo->cpu_writers, 0);
 
        spin_lock(&bdev->fence_lock);
index 3df9f16b041cb214cc5570f890af2e892c25ac7e..1006c15445e9753764305c00f652a86891f81d64 100644 (file)
@@ -33,6 +33,7 @@
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_bo_driver.h>
 #include <ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/mm.h>
 #include <linux/rbtree.h>
 #include <linux/module.h>
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
-                                                    unsigned long page_start,
-                                                    unsigned long num_pages)
-{
-       struct rb_node *cur = bdev->addr_space_rb.rb_node;
-       unsigned long cur_offset;
-       struct ttm_buffer_object *bo;
-       struct ttm_buffer_object *best_bo = NULL;
-
-       while (likely(cur != NULL)) {
-               bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
-               cur_offset = bo->vm_node->start;
-               if (page_start >= cur_offset) {
-                       cur = cur->rb_right;
-                       best_bo = bo;
-                       if (page_start == cur_offset)
-                               break;
-               } else
-                       cur = cur->rb_left;
-       }
-
-       if (unlikely(best_bo == NULL))
-               return NULL;
-
-       if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
-                    (page_start + num_pages)))
-               return NULL;
-
-       return best_bo;
-}
-
 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 
        page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
-           bo->vm_node->start - vma->vm_pgoff;
+           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
        page_last = vma_pages(vma) +
-           bo->vm_node->start - vma->vm_pgoff;
+           drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
 
        if (unlikely(page_offset >= bo->num_pages)) {
                retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
        .close = ttm_bo_vm_close
 };
 
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+                                                 unsigned long offset,
+                                                 unsigned long pages)
+{
+       struct drm_vma_offset_node *node;
+       struct ttm_buffer_object *bo = NULL;
+
+       drm_vma_offset_lock_lookup(&bdev->vma_manager);
+
+       node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+       if (likely(node)) {
+               bo = container_of(node, struct ttm_buffer_object, vma_node);
+               if (!kref_get_unless_zero(&bo->kref))
+                       bo = NULL;
+       }
+
+       drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+
+       if (!bo)
+               pr_err("Could not find buffer object to map\n");
+
+       return bo;
+}
+
 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
                struct ttm_bo_device *bdev)
 {
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
        struct ttm_buffer_object *bo;
        int ret;
 
-       read_lock(&bdev->vm_lock);
-       bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
-                                vma_pages(vma));
-       if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
-               bo = NULL;
-       read_unlock(&bdev->vm_lock);
-
-       if (unlikely(bo == NULL)) {
-               pr_err("Could not find buffer object to map\n");
+       bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+       if (unlikely(!bo))
                return -EINVAL;
-       }
 
        driver = bo->bdev->driver;
        if (unlikely(!driver->verify_access)) {
@@ -304,162 +290,3 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
        return 0;
 }
 EXPORT_SYMBOL(ttm_fbdev_mmap);
-
-
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
-                 const char __user *wbuf, char __user *rbuf, size_t count,
-                 loff_t *f_pos, bool write)
-{
-       struct ttm_buffer_object *bo;
-       struct ttm_bo_driver *driver;
-       struct ttm_bo_kmap_obj map;
-       unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
-       unsigned long kmap_offset;
-       unsigned long kmap_end;
-       unsigned long kmap_num;
-       size_t io_size;
-       unsigned int page_offset;
-       char *virtual;
-       int ret;
-       bool no_wait = false;
-       bool dummy;
-
-       read_lock(&bdev->vm_lock);
-       bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
-       if (likely(bo != NULL))
-               ttm_bo_reference(bo);
-       read_unlock(&bdev->vm_lock);
-
-       if (unlikely(bo == NULL))
-               return -EFAULT;
-
-       driver = bo->bdev->driver;
-       if (unlikely(!driver->verify_access)) {
-               ret = -EPERM;
-               goto out_unref;
-       }
-
-       ret = driver->verify_access(bo, filp);
-       if (unlikely(ret != 0))
-               goto out_unref;
-
-       kmap_offset = dev_offset - bo->vm_node->start;
-       if (unlikely(kmap_offset >= bo->num_pages)) {
-               ret = -EFBIG;
-               goto out_unref;
-       }
-
-       page_offset = *f_pos & ~PAGE_MASK;
-       io_size = bo->num_pages - kmap_offset;
-       io_size = (io_size << PAGE_SHIFT) - page_offset;
-       if (count < io_size)
-               io_size = count;
-
-       kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
-       kmap_num = kmap_end - kmap_offset + 1;
-
-       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
-       switch (ret) {
-       case 0:
-               break;
-       case -EBUSY:
-               ret = -EAGAIN;
-               goto out_unref;
-       default:
-               goto out_unref;
-       }
-
-       ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
-       if (unlikely(ret != 0)) {
-               ttm_bo_unreserve(bo);
-               goto out_unref;
-       }
-
-       virtual = ttm_kmap_obj_virtual(&map, &dummy);
-       virtual += page_offset;
-
-       if (write)
-               ret = copy_from_user(virtual, wbuf, io_size);
-       else
-               ret = copy_to_user(rbuf, virtual, io_size);
-
-       ttm_bo_kunmap(&map);
-       ttm_bo_unreserve(bo);
-       ttm_bo_unref(&bo);
-
-       if (unlikely(ret != 0))
-               return -EFBIG;
-
-       *f_pos += io_size;
-
-       return io_size;
-out_unref:
-       ttm_bo_unref(&bo);
-       return ret;
-}
-
-ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
-                       char __user *rbuf, size_t count, loff_t *f_pos,
-                       bool write)
-{
-       struct ttm_bo_kmap_obj map;
-       unsigned long kmap_offset;
-       unsigned long kmap_end;
-       unsigned long kmap_num;
-       size_t io_size;
-       unsigned int page_offset;
-       char *virtual;
-       int ret;
-       bool no_wait = false;
-       bool dummy;
-
-       kmap_offset = (*f_pos >> PAGE_SHIFT);
-       if (unlikely(kmap_offset >= bo->num_pages))
-               return -EFBIG;
-
-       page_offset = *f_pos & ~PAGE_MASK;
-       io_size = bo->num_pages - kmap_offset;
-       io_size = (io_size << PAGE_SHIFT) - page_offset;
-       if (count < io_size)
-               io_size = count;
-
-       kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
-       kmap_num = kmap_end - kmap_offset + 1;
-
-       ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
-       switch (ret) {
-       case 0:
-               break;
-       case -EBUSY:
-               return -EAGAIN;
-       default:
-               return ret;
-       }
-
-       ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
-       if (unlikely(ret != 0)) {
-               ttm_bo_unreserve(bo);
-               return ret;
-       }
-
-       virtual = ttm_kmap_obj_virtual(&map, &dummy);
-       virtual += page_offset;
-
-       if (write)
-               ret = copy_from_user(virtual, wbuf, io_size);
-       else
-               ret = copy_to_user(rbuf, virtual, io_size);
-
-       ttm_bo_kunmap(&map);
-       ttm_bo_unreserve(bo);
-       ttm_bo_unref(&bo);
-
-       if (unlikely(ret != 0))
-               return ret;
-
-       *f_pos += io_size;
-
-       return io_size;
-}
index c0770dbba74aebf4937eef4459380aa8f207829f..7650dc0d78cec2391755a04235cfe3bfc6c7a8d1 100644 (file)
@@ -65,7 +65,6 @@ static const struct file_operations udl_driver_fops = {
        .read = drm_read,
        .unlocked_ioctl = drm_ioctl,
        .release = drm_release,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -84,7 +83,7 @@ static struct drm_driver driver = {
 
        .dumb_create = udl_dumb_create,
        .dumb_map_offset = udl_gem_mmap,
-       .dumb_destroy = udl_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &udl_driver_fops,
 
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
index cc6d90f28c71a7c2958cbd652e26552f3a23923a..56aec9409fa317c37054705e0a54c724cb126068 100644 (file)
@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
                    struct drm_mode_create_dumb *args);
 int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
                 uint32_t handle, uint64_t *offset);
-int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
-                    uint32_t handle);
 
 int udl_gem_init_object(struct drm_gem_object *obj);
 void udl_gem_free_object(struct drm_gem_object *gem_obj);
index ef034fa3e6f5982a88fe2341a1fa03d38138d8fb..8dbe9d0ae9a73840ae6270bac3776ca239954a11 100644 (file)
@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
                              args->size, &args->handle);
 }
 
-int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
-                    uint32_t handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
-
 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        int ret;
@@ -123,55 +117,23 @@ int udl_gem_init_object(struct drm_gem_object *obj)
 
 static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
 {
-       int page_count, i;
-       struct page *page;
-       struct inode *inode;
-       struct address_space *mapping;
+       struct page **pages;
 
        if (obj->pages)
                return 0;
 
-       page_count = obj->base.size / PAGE_SIZE;
-       BUG_ON(obj->pages != NULL);
-       obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
-       if (obj->pages == NULL)
-               return -ENOMEM;
+       pages = drm_gem_get_pages(&obj->base, gfpmask);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
 
-       inode = file_inode(obj->base.filp);
-       mapping = inode->i_mapping;
-       gfpmask |= mapping_gfp_mask(mapping);
-
-       for (i = 0; i < page_count; i++) {
-               page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
-               if (IS_ERR(page))
-                       goto err_pages;
-               obj->pages[i] = page;
-       }
+       obj->pages = pages;
 
        return 0;
-err_pages:
-       while (i--)
-               page_cache_release(obj->pages[i]);
-       drm_free_large(obj->pages);
-       obj->pages = NULL;
-       return PTR_ERR(page);
 }
 
 static void udl_gem_put_pages(struct udl_gem_object *obj)
 {
-       int page_count = obj->base.size / PAGE_SIZE;
-       int i;
-
-       if (obj->base.import_attach) {
-               drm_free_large(obj->pages);
-               obj->pages = NULL;
-               return;
-       }
-
-       for (i = 0; i < page_count; i++)
-               page_cache_release(obj->pages[i]);
-
-       drm_free_large(obj->pages);
+       drm_gem_put_pages(&obj->base, obj->pages, false, false);
        obj->pages = NULL;
 }
 
@@ -223,8 +185,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
        if (obj->pages)
                udl_gem_put_pages(obj);
 
-       if (gem_obj->map_list.map)
-               drm_gem_free_mmap_offset(gem_obj);
+       drm_gem_free_mmap_offset(gem_obj);
 }
 
 /* the dumb interface doesn't work with the GEM straight MMAP
@@ -247,13 +208,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
        ret = udl_gem_get_pages(gobj, GFP_KERNEL);
        if (ret)
                goto out;
-       if (!gobj->base.map_list.map) {
-               ret = drm_gem_create_mmap_offset(obj);
-               if (ret)
-                       goto out;
-       }
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto out;
 
-       *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
+       *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
 
 out:
        drm_gem_object_unreference(&gobj->base);
index 0ce2d7195256197f0b1bfe6edcb0485f0f74b1c6..f5ae57406f3447eacf3dbce309583ae303628824 100644 (file)
@@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
        total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
                                    0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
        if (total_len > 5) {
-               DRM_INFO("vendor descriptor length:%x data:%*ph\n",
-                       total_len, 11, desc);
+               DRM_INFO("vendor descriptor length:%x data:%11ph\n",
+                       total_len, desc);
 
                if ((desc[0] != total_len) || /* descriptor length */
                    (desc[1] != 0x5f) ||   /* vendor descriptor type */
index 13558f5a24223989e4f8ba23416c82e6753b87b9..652f9b43ec9dcfc1c8a6e383dcc443fa9ef2db94 100644 (file)
@@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
        return ret;
 }
 
-struct drm_ioctl_desc via_ioctls[] = {
+const struct drm_ioctl_desc via_ioctls[] = {
        DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
index f4ae203279418225337dfcbbe52b1bd176eb5123..92684a9b7e3414f69cdc76f2d2ef82d286cda734 100644 (file)
@@ -64,7 +64,6 @@ static const struct file_operations via_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -73,7 +72,7 @@ static const struct file_operations via_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
+           DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
            DRIVER_IRQ_SHARED,
        .load = via_driver_load,
        .unload = via_driver_unload,
index 893a65090c36e7f08eb834856b25a0f2c3442938..a811ef2b505f1b25de22921a55ad8309d6a33d6a 100644 (file)
@@ -114,7 +114,7 @@ enum via_family {
 #define VIA_READ8(reg)         DRM_READ8(VIA_BASE, reg)
 #define VIA_WRITE8(reg, val)   DRM_WRITE8(VIA_BASE, reg, val)
 
-extern struct drm_ioctl_desc via_ioctls[];
+extern const struct drm_ioctl_desc via_ioctls[];
 extern int via_max_ioctl;
 
 extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
index 0ab93ff0987328f04a27dc922de6d3f9984101d4..7e3ad87c366c6761e41b38c4fe3226f38925d9e5 100644 (file)
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
        if (mem->type == VIA_MEM_AGP)
                retval = drm_mm_insert_node(&dev_priv->agp_mm,
                                            &item->mm_node,
-                                           tmpSize, 0);
+                                           tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
        else
                retval = drm_mm_insert_node(&dev_priv->vram_mm,
                                            &item->mm_node,
-                                           tmpSize, 0);
+                                           tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
        if (retval)
                goto fail_alloc;
 
index 78e21649d48adf52ee4a849da033eddd9c47a2a2..0dcfa6b76c45aa42f36526d67cd38f31958b3215 100644 (file)
  * Ioctl definitions.
  */
 
-static struct drm_ioctl_desc vmw_ioctls[] = {
+static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
        VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
@@ -782,7 +782,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
 
        if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
            && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
-               struct drm_ioctl_desc *ioctl =
+               const struct drm_ioctl_desc *ioctl =
                    &vmw_ioctls[nr - DRM_COMMAND_BASE];
 
                if (unlikely(ioctl->cmd_drv != cmd)) {
@@ -795,29 +795,12 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
        return drm_ioctl(filp, cmd, arg);
 }
 
-static int vmw_firstopen(struct drm_device *dev)
-{
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       dev_priv->is_opened = true;
-
-       return 0;
-}
-
 static void vmw_lastclose(struct drm_device *dev)
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
        struct drm_crtc *crtc;
        struct drm_mode_set set;
        int ret;
 
-       /**
-        * Do nothing on the lastclose call from drm_unload.
-        */
-
-       if (!dev_priv->is_opened)
-               return;
-
-       dev_priv->is_opened = false;
        set.x = 0;
        set.y = 0;
        set.fb = NULL;
@@ -1120,7 +1103,6 @@ static const struct file_operations vmwgfx_driver_fops = {
        .mmap = vmw_mmap,
        .poll = vmw_fops_poll,
        .read = vmw_fops_read,
-       .fasync = drm_fasync,
 #if defined(CONFIG_COMPAT)
        .compat_ioctl = drm_compat_ioctl,
 #endif
@@ -1132,7 +1114,6 @@ static struct drm_driver driver = {
        DRIVER_MODESET,
        .load = vmw_driver_load,
        .unload = vmw_driver_unload,
-       .firstopen = vmw_firstopen,
        .lastclose = vmw_lastclose,
        .irq_preinstall = vmw_irq_preinstall,
        .irq_postinstall = vmw_irq_postinstall,
@@ -1143,7 +1124,6 @@ static struct drm_driver driver = {
        .disable_vblank = vmw_disable_vblank,
        .ioctls = vmw_ioctls,
        .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
-       .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
        .master_create = vmw_master_create,
        .master_destroy = vmw_master_destroy,
        .master_set = vmw_master_set,
index 13aeda71280e396069c70352513149e5455e768c..150ec64af617e4974250a6deb36a5c6234766341 100644 (file)
@@ -324,7 +324,6 @@ struct vmw_private {
         */
 
        bool stealth;
-       bool is_opened;
        bool enable_fb;
 
        /**
index 7953d1f90b63d4eeefb0a44c8a43d2eee3345991..0e67cf41065d801e6526d23dc5cdf375bdabe1c7 100644 (file)
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
                goto out_no_dmabuf;
 
        rep->handle = handle;
-       rep->map_handle = dma_buf->base.addr_space_offset;
+       rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
        rep->cur_gmr_id = handle;
        rep->cur_gmr_offset = 0;
 
@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
        if (ret != 0)
                return -EINVAL;
 
-       *offset = out_buf->base.addr_space_offset;
+       *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
        vmw_dmabuf_unreference(&out_buf);
        return 0;
 }
index e184b00faacdfe3688dd262a1bbac8f4346a6643..8c61ceeaa12dda8b33cb5d9c52d987e46adb4573 100644 (file)
@@ -356,7 +356,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
 
        bo = to_tegra_bo(gem);
 
-       args->offset = tegra_bo_get_mmap_offset(bo);
+       args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
 
        drm_gem_object_unreference(gem);
 
@@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
 }
 #endif
 
-static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
 #ifdef CONFIG_DRM_TEGRA_STAGING
        DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
        DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
@@ -508,7 +508,6 @@ static const struct file_operations tegra_drm_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = tegra_drm_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
        .read = drm_read,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = drm_compat_ioctl,
@@ -633,7 +632,7 @@ struct drm_driver tegra_drm_driver = {
        .gem_vm_ops = &tegra_bo_vm_ops,
        .dumb_create = tegra_bo_dumb_create,
        .dumb_map_offset = tegra_bo_dumb_map_offset,
-       .dumb_destroy = tegra_bo_dumb_destroy,
+       .dumb_destroy = drm_gem_dumb_destroy,
 
        .ioctls = tegra_drm_ioctls,
        .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
index c5e9a9b494c2c3b1973194bd49295d4585072556..59623de4ee15f3bf9a186d80481f358397fbf99e 100644 (file)
@@ -106,11 +106,6 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
        dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
 }
 
-unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
-{
-       return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
-}
-
 struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
 {
        struct tegra_bo *bo;
@@ -182,8 +177,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
 {
        struct tegra_bo *bo = to_tegra_bo(gem);
 
-       if (gem->map_list.map)
-               drm_gem_free_mmap_offset(gem);
+       drm_gem_free_mmap_offset(gem);
 
        drm_gem_object_release(gem);
        tegra_bo_destroy(gem->dev, bo);
@@ -228,7 +222,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
 
        bo = to_tegra_bo(gem);
 
-       *offset = tegra_bo_get_mmap_offset(bo);
+       *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
 
        drm_gem_object_unreference(gem);
 
@@ -262,9 +256,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
 
        return ret;
 }
-
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
-                         unsigned int handle)
-{
-       return drm_gem_handle_delete(file, handle);
-}
index 34de2b486eb72504a03aafe9a4e1d9f8bb68b3e8..492533a2dacb1bba249cde2ff08b63e1c855c291 100644 (file)
@@ -44,13 +44,10 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
                                            unsigned int size,
                                            unsigned int *handle);
 void tegra_bo_free_object(struct drm_gem_object *gem);
-unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
                         struct drm_mode_create_dumb *args);
 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
                             uint32_t handle, uint64_t *offset);
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
-                         unsigned int handle);
 
 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
 
index 14ef6ab6979093605784dfe824c03ff931dd108c..3d7c9f67b6d7631504b9fbaed82fff152e295bb0 100644 (file)
@@ -743,6 +743,14 @@ config HID_WIIMOTE
        To compile this driver as a module, choose M here: the
        module will be called hid-wiimote.
 
+config HID_XINMO
+       tristate "Xin-Mo non-fully compliant devices"
+       depends on HID
+       ---help---
+       Support for Xin-Mo devices that are not fully compliant with the HID
+       standard. Currently only supports the Xin-Mo Dual Arcade. Say Y here
+       if you have a Xin-Mo Dual Arcade controller.
+
 config HID_ZEROPLUS
        tristate "Zeroplus based game controller support"
        depends on HID
index 6f687287e2125168fbfa46ab0f8d9384a08dc797..a959f4aecaf5754d1fd9499b1bad386e57615df5 100644 (file)
@@ -110,6 +110,7 @@ obj-$(CONFIG_HID_TIVO)              += hid-tivo.o
 obj-$(CONFIG_HID_TOPSEED)      += hid-topseed.o
 obj-$(CONFIG_HID_TWINHAN)      += hid-twinhan.o
 obj-$(CONFIG_HID_UCLOGIC)      += hid-uclogic.o
+obj-$(CONFIG_HID_XINMO)                += hid-xinmo.o
 obj-$(CONFIG_HID_ZEROPLUS)     += hid-zpff.o
 obj-$(CONFIG_HID_ZYDACRON)     += hid-zydacron.o
 obj-$(CONFIG_HID_WACOM)                += hid-wacom.o
index 7c5507e94820cc93b8867884dad993244431668a..9428ea7cdf8a00dc686e00c6da77be5cb60215b7 100644 (file)
@@ -90,11 +90,10 @@ static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id)
        struct a4tech_sc *a4;
        int ret;
 
-       a4 = kzalloc(sizeof(*a4), GFP_KERNEL);
+       a4 = devm_kzalloc(&hdev->dev, sizeof(*a4), GFP_KERNEL);
        if (a4 == NULL) {
                hid_err(hdev, "can't alloc device descriptor\n");
-               ret = -ENOMEM;
-               goto err_free;
+               return -ENOMEM;
        }
 
        a4->quirks = id->driver_data;
@@ -104,27 +103,16 @@ static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id)
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed\n");
-               goto err_free;
+               return ret;
        }
 
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "hw start failed\n");
-               goto err_free;
+               return ret;
        }
 
        return 0;
-err_free:
-       kfree(a4);
-       return ret;
-}
-
-static void a4_remove(struct hid_device *hdev)
-{
-       struct a4tech_sc *a4 = hid_get_drvdata(hdev);
-
-       hid_hw_stop(hdev);
-       kfree(a4);
 }
 
 static const struct hid_device_id a4_devices[] = {
@@ -144,7 +132,6 @@ static struct hid_driver a4_driver = {
        .input_mapped = a4_input_mapped,
        .event = a4_event,
        .probe = a4_probe,
-       .remove = a4_remove,
 };
 module_hid_driver(a4_driver);
 
index c7710b5c69afbf009d0e9f2d727668e6cdae5045..881cf7b4f9a433f8ae3e0f6721b3789aa66731f7 100644 (file)
@@ -349,7 +349,7 @@ static int apple_probe(struct hid_device *hdev,
        unsigned int connect_mask = HID_CONNECT_DEFAULT;
        int ret;
 
-       asc = kzalloc(sizeof(*asc), GFP_KERNEL);
+       asc = devm_kzalloc(&hdev->dev, sizeof(*asc), GFP_KERNEL);
        if (asc == NULL) {
                hid_err(hdev, "can't alloc apple descriptor\n");
                return -ENOMEM;
@@ -362,7 +362,7 @@ static int apple_probe(struct hid_device *hdev,
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed\n");
-               goto err_free;
+               return ret;
        }
 
        if (quirks & APPLE_HIDDEV)
@@ -373,19 +373,10 @@ static int apple_probe(struct hid_device *hdev,
        ret = hid_hw_start(hdev, connect_mask);
        if (ret) {
                hid_err(hdev, "hw start failed\n");
-               goto err_free;
+               return ret;
        }
 
        return 0;
-err_free:
-       kfree(asc);
-       return ret;
-}
-
-static void apple_remove(struct hid_device *hdev)
-{
-       hid_hw_stop(hdev);
-       kfree(hid_get_drvdata(hdev));
 }
 
 static const struct hid_device_id apple_devices[] = {
@@ -551,7 +542,6 @@ static struct hid_driver apple_driver = {
        .id_table = apple_devices,
        .report_fixup = apple_report_fixup,
        .probe = apple_probe,
-       .remove = apple_remove,
        .event = apple_event,
        .input_mapping = apple_input_mapping,
        .input_mapped = apple_input_mapped,
index 36668d1aca8fc2133299e93cf994857d78cbea2e..3efe19f4deaed585548615e1fa1cb4fbc5ffeed7 100644 (file)
@@ -450,7 +450,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
                        }
                        parser->local.delimiter_depth--;
                }
-               return 1;
+               return 0;
 
        case HID_LOCAL_ITEM_TAG_USAGE:
 
@@ -1128,7 +1128,8 @@ static void hid_output_field(const struct hid_device *hid,
 }
 
 /*
- * Create a report.
+ * Create a report. 'data' has to be allocated using
+ * hid_alloc_report_buf() so that it has proper size.
  */
 
 void hid_output_report(struct hid_report *report, __u8 *data)
@@ -1144,6 +1145,22 @@ void hid_output_report(struct hid_report *report, __u8 *data)
 }
 EXPORT_SYMBOL_GPL(hid_output_report);
 
+/*
+ * Allocator for buffer that is going to be passed to hid_output_report()
+ */
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
+{
+       /*
+        * 7 extra bytes are necessary to achieve proper functionality
+        * of implement() working on 8 byte chunks
+        */
+
+       int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+
+       return kmalloc(len, flags);
+}
+EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
+
 /*
  * Set a field value. The report this field belongs to has to be
  * created and transferred to the device, to set this value in the
@@ -1597,6 +1614,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
@@ -1736,6 +1754,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
        { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
index 9a8f051245258973f53ad84f8debef8b2f6db7e9..9325545fc3ae1cac4e3919c1120af6841590d4f4 100644 (file)
@@ -98,7 +98,7 @@ static void holtekff_send(struct holtekff_device *holtekff,
                holtekff->field->value[i] = data[i];
        }
 
-       dbg_hid("sending %*ph\n", 7, data);
+       dbg_hid("sending %7ph\n", data);
 
        hid_hw_request(hid, holtekff->field->report, HID_REQ_SET_REPORT);
 }
index ffe4c7ae3340150cdcfb8c9f3db9c241422cf8eb..4f01a52c98abda7f60252debf6dacaa53c80ab35 100644 (file)
 #define USB_VENDOR_ID_KYE              0x0458
 #define USB_DEVICE_ID_KYE_ERGO_525V    0x0087
 #define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
+#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR      0x4018
 #define USB_DEVICE_ID_KYE_GPEN_560     0x5003
 #define USB_DEVICE_ID_KYE_EASYPEN_I405X        0x5010
 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X       0x5011
 #define USB_VENDOR_ID_XAT      0x2505
 #define USB_DEVICE_ID_XAT_CSR  0x0220
 
+#define USB_VENDOR_ID_XIN_MO                   0x16c0
+#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE       0x05e1
+
 #define USB_VENDOR_ID_XIROKU           0x1477
 #define USB_DEVICE_ID_XIROKU_SPX       0x1006
 #define USB_DEVICE_ID_XIROKU_MPX       0x1007
index 7480799e535cb229a8bae89cf3795d1a5e91b738..308eee8fc7c3b1145b62b5508404d24655c355f9 100644 (file)
@@ -1137,6 +1137,74 @@ unsigned int hidinput_count_leds(struct hid_device *hid)
 }
 EXPORT_SYMBOL_GPL(hidinput_count_leds);
 
+static void hidinput_led_worker(struct work_struct *work)
+{
+       struct hid_device *hid = container_of(work, struct hid_device,
+                                             led_work);
+       struct hid_field *field;
+       struct hid_report *report;
+       int len;
+       __u8 *buf;
+
+       field = hidinput_get_led_field(hid);
+       if (!field)
+               return;
+
+       /*
+        * field->report is accessed unlocked regarding HID core. So there might
+        * be another incoming SET-LED request from user-space, which changes
+        * the LED state while we assemble our outgoing buffer. However, this
+        * doesn't matter as hid_output_report() correctly converts it into a
+        * boolean value no matter what information is currently set on the LED
+        * field (even garbage). So the remote device will always get a valid
+        * request.
+        * And in case we send a wrong value, a next led worker is spawned
+        * for every SET-LED request so the following worker will send the
+        * correct value, guaranteed!
+        */
+
+       report = field->report;
+
+       /* use custom SET_REPORT request if possible (asynchronous) */
+       if (hid->ll_driver->request)
+               return hid->ll_driver->request(hid, report, HID_REQ_SET_REPORT);
+
+       /* fall back to generic raw-output-report */
+       len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       buf = kmalloc(len, GFP_KERNEL);
+       if (!buf)
+               return;
+
+       hid_output_report(report, buf);
+       /* synchronous output report */
+       hid->hid_output_raw_report(hid, buf, len, HID_OUTPUT_REPORT);
+       kfree(buf);
+}
+
+static int hidinput_input_event(struct input_dev *dev, unsigned int type,
+                               unsigned int code, int value)
+{
+       struct hid_device *hid = input_get_drvdata(dev);
+       struct hid_field *field;
+       int offset;
+
+       if (type == EV_FF)
+               return input_ff_event(dev, type, code, value);
+
+       if (type != EV_LED)
+               return -1;
+
+       if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
+               hid_warn(dev, "event field not found\n");
+               return -1;
+       }
+
+       hid_set_field(field, offset, value);
+
+       schedule_work(&hid->led_work);
+       return 0;
+}
+
 static int hidinput_open(struct input_dev *dev)
 {
        struct hid_device *hid = input_get_drvdata(dev);
@@ -1183,7 +1251,10 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid)
        }
 
        input_set_drvdata(input_dev, hid);
-       input_dev->event = hid->ll_driver->hidinput_input_event;
+       if (hid->ll_driver->hidinput_input_event)
+               input_dev->event = hid->ll_driver->hidinput_input_event;
+       else if (hid->ll_driver->request || hid->hid_output_raw_report)
+               input_dev->event = hidinput_input_event;
        input_dev->open = hidinput_open;
        input_dev->close = hidinput_close;
        input_dev->setkeycode = hidinput_setkeycode;
@@ -1278,6 +1349,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
        int i, j, k;
 
        INIT_LIST_HEAD(&hid->inputs);
+       INIT_WORK(&hid->led_work, hidinput_led_worker);
 
        if (!force) {
                for (i = 0; i < hid->maxcollection; i++) {
@@ -1379,6 +1451,12 @@ void hidinput_disconnect(struct hid_device *hid)
                input_unregister_device(hidinput->input);
                kfree(hidinput);
        }
+
+       /* led_work is spawned by input_dev callbacks, but doesn't access the
+        * parent input_dev at all. Once all input devices are removed, we
+        * know that led_work will never get restarted, so we can cancel it
+        * synchronously and are safe. */
+       cancel_work_sync(&hid->led_work);
 }
 EXPORT_SYMBOL_GPL(hidinput_disconnect);
 
index 1e2ee2aa84a023fc5e4412c7fe8c04666a3e6a8e..73845120295eba6f678679715c9ca3f97917a643 100644 (file)
@@ -268,6 +268,26 @@ static __u8 easypen_m610x_rdesc_fixed[] = {
        0xC0                          /*  End Collection                  */
 };
 
+static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc,
+               unsigned int *rsize, int offset, const char *device_name) {
+       /*
+        * the fixup that need to be done:
+        *   - change Usage Maximum in the Comsumer Control
+        *     (report ID 3) to a reasonable value
+        */
+       if (*rsize >= offset + 31 &&
+           /* Usage Page (Consumer Devices) */
+           rdesc[offset] == 0x05 && rdesc[offset + 1] == 0x0c &&
+           /* Usage (Consumer Control) */
+           rdesc[offset + 2] == 0x09 && rdesc[offset + 3] == 0x01 &&
+           /*   Usage Maximum > 12287 */
+           rdesc[offset + 10] == 0x2a && rdesc[offset + 12] > 0x2f) {
+               hid_info(hdev, "fixing up %s report descriptor\n", device_name);
+               rdesc[offset + 12] = 0x2f;
+       }
+       return rdesc;
+}
+
 static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                unsigned int *rsize)
 {
@@ -315,23 +335,12 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                }
                break;
        case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
-               /*
-                * the fixup that need to be done:
-                *   - change Usage Maximum in the Comsumer Control
-                *     (report ID 3) to a reasonable value
-                */
-               if (*rsize >= 135 &&
-                       /* Usage Page (Consumer Devices) */
-                       rdesc[104] == 0x05 && rdesc[105] == 0x0c &&
-                       /* Usage (Consumer Control) */
-                       rdesc[106] == 0x09 && rdesc[107] == 0x01 &&
-                       /*   Usage Maximum > 12287 */
-                       rdesc[114] == 0x2a && rdesc[116] > 0x2f) {
-                       hid_info(hdev,
-                                "fixing up Genius Gila Gaming Mouse "
-                                "report descriptor\n");
-                       rdesc[116] = 0x2f;
-               }
+               rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
+                                       "Genius Gila Gaming Mouse");
+               break;
+       case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
+               rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
+                                       "Genius Gx Imperator Keyboard");
                break;
        }
        return rdesc;
@@ -428,6 +437,8 @@ static const struct hid_device_id kye_devices[] = {
                                USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+                               USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, kye_devices);
index cd33084c78602146d42efca60e358d08f00da219..7800b141056243400bfa316b7289e2c8431aa82a 100644 (file)
@@ -619,7 +619,7 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
 
        struct hid_field *field;
        struct hid_report *report;
-       unsigned char data[8];
+       unsigned char *data;
        int offset;
 
        dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
@@ -635,6 +635,13 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
                return -1;
        }
        hid_set_field(field, offset, value);
+
+       data = hid_alloc_report_buf(field->report, GFP_ATOMIC);
+       if (!data) {
+               dev_warn(&dev->dev, "failed to allocate report buf memory\n");
+               return -1;
+       }
+
        hid_output_report(field->report, &data[0]);
 
        output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
@@ -645,8 +652,9 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
 
        hid_hw_request(dj_rcv_hiddev, report, HID_REQ_SET_REPORT);
 
-       return 0;
+       kfree(data);
 
+       return 0;
 }
 
 static int logi_dj_ll_start(struct hid_device *hid)
@@ -801,10 +809,10 @@ static int logi_dj_probe(struct hid_device *hdev,
        }
 
        /* This is enabling the polling urb on the IN endpoint */
-       retval = hdev->ll_driver->open(hdev);
+       retval = hid_hw_open(hdev);
        if (retval < 0) {
-               dev_err(&hdev->dev, "%s:hdev->ll_driver->open returned "
-                       "error:%d\n", __func__, retval);
+               dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+                       __func__, retval);
                goto llopen_failed;
        }
 
@@ -821,7 +829,7 @@ static int logi_dj_probe(struct hid_device *hdev,
        return retval;
 
 logi_dj_recv_query_paired_devices_failed:
-       hdev->ll_driver->close(hdev);
+       hid_hw_close(hdev);
 
 llopen_failed:
 switch_to_dj_mode_fail:
@@ -863,7 +871,7 @@ static void logi_dj_remove(struct hid_device *hdev)
 
        cancel_work_sync(&djrcv_dev->work);
 
-       hdev->ll_driver->close(hdev);
+       hid_hw_close(hdev);
        hid_hw_stop(hdev);
 
        /* I suppose that at this point the only context that can access
index 5bc37343eb22b3de7f3cca6163c6f1fd9a10dd4b..3b43d1cfa9368609302de46a73658779199d446f 100644 (file)
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel");
 static unsigned int scroll_speed = 32;
 static int param_set_scroll_speed(const char *val, struct kernel_param *kp) {
        unsigned long speed;
-       if (!val || strict_strtoul(val, 0, &speed) || speed > 63)
+       if (!val || kstrtoul(val, 0, &speed) || speed > 63)
                return -EINVAL;
        scroll_speed = speed;
        return 0;
@@ -484,7 +484,7 @@ static int magicmouse_probe(struct hid_device *hdev,
        struct hid_report *report;
        int ret;
 
-       msc = kzalloc(sizeof(*msc), GFP_KERNEL);
+       msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
        if (msc == NULL) {
                hid_err(hdev, "can't alloc magicmouse descriptor\n");
                return -ENOMEM;
@@ -498,13 +498,13 @@ static int magicmouse_probe(struct hid_device *hdev,
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "magicmouse hid parse failed\n");
-               goto err_free;
+               return ret;
        }
 
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "magicmouse hw start failed\n");
-               goto err_free;
+               return ret;
        }
 
        if (!msc->input) {
@@ -548,19 +548,9 @@ static int magicmouse_probe(struct hid_device *hdev,
        return 0;
 err_stop_hw:
        hid_hw_stop(hdev);
-err_free:
-       kfree(msc);
        return ret;
 }
 
-static void magicmouse_remove(struct hid_device *hdev)
-{
-       struct magicmouse_sc *msc = hid_get_drvdata(hdev);
-
-       hid_hw_stop(hdev);
-       kfree(msc);
-}
-
 static const struct hid_device_id magic_mice[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
@@ -574,7 +564,6 @@ static struct hid_driver magicmouse_driver = {
        .name = "magicmouse",
        .id_table = magic_mice,
        .probe = magicmouse_probe,
-       .remove = magicmouse_remove,
        .raw_event = magicmouse_raw_event,
        .input_mapping = magicmouse_input_mapping,
        .input_configured = magicmouse_input_configured,
index cb0e361d7a4be8e5e790f9ab250ffbfd17c52fe7..0fe00e2552f23aebbdbeb408ef18a7be46d115c9 100644 (file)
@@ -261,17 +261,6 @@ static struct mt_class mt_classes[] = {
        { }
 };
 
-static void mt_free_input_name(struct hid_input *hi)
-{
-       struct hid_device *hdev = hi->report->device;
-       const char *name = hi->input->name;
-
-       if (name != hdev->name) {
-               hi->input->name = hdev->name;
-               kfree(name);
-       }
-}
-
 static ssize_t mt_show_quirks(struct device *dev,
                           struct device_attribute *attr,
                           char *buf)
@@ -415,13 +404,6 @@ static void mt_pen_report(struct hid_device *hid, struct hid_report *report)
 static void mt_pen_input_configured(struct hid_device *hdev,
                                        struct hid_input *hi)
 {
-       char *name = kzalloc(strlen(hi->input->name) + 5, GFP_KERNEL);
-       if (name) {
-               sprintf(name, "%s Pen", hi->input->name);
-               mt_free_input_name(hi);
-               hi->input->name = name;
-       }
-
        /* force BTN_STYLUS to allow tablet matching in udev */
        __set_bit(BTN_STYLUS, hi->input->keybit);
 }
@@ -928,16 +910,26 @@ static void mt_post_parse(struct mt_device *td)
 static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
-       char *name = kstrdup(hdev->name, GFP_KERNEL);
-
-       if (name)
-               hi->input->name = name;
+       char *name;
+       const char *suffix = NULL;
 
        if (hi->report->id == td->mt_report_id)
                mt_touch_input_configured(hdev, hi);
 
-       if (hi->report->id == td->pen_report_id)
+       if (hi->report->field[0]->physical == HID_DG_STYLUS) {
+               suffix = "Pen";
                mt_pen_input_configured(hdev, hi);
+       }
+
+       if (suffix) {
+               name = devm_kzalloc(&hi->input->dev,
+                                   strlen(hdev->name) + strlen(suffix) + 2,
+                                   GFP_KERNEL);
+               if (name) {
+                       sprintf(name, "%s %s", hdev->name, suffix);
+                       hi->input->name = name;
+               }
+       }
 }
 
 static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -945,7 +937,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        int ret, i;
        struct mt_device *td;
        struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
-       struct hid_input *hi;
 
        for (i = 0; mt_classes[i].name ; i++) {
                if (id->driver_data == mt_classes[i].name) {
@@ -967,7 +958,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        hdev->quirks |= HID_QUIRK_MULTI_INPUT;
        hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
 
-       td = kzalloc(sizeof(struct mt_device), GFP_KERNEL);
+       td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
        if (!td) {
                dev_err(&hdev->dev, "cannot allocate multitouch data\n");
                return -ENOMEM;
@@ -980,11 +971,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
        td->pen_report_id = -1;
        hid_set_drvdata(hdev, td);
 
-       td->fields = kzalloc(sizeof(struct mt_fields), GFP_KERNEL);
+       td->fields = devm_kzalloc(&hdev->dev, sizeof(struct mt_fields),
+                                 GFP_KERNEL);
        if (!td->fields) {
                dev_err(&hdev->dev, "cannot allocate multitouch fields data\n");
-               ret = -ENOMEM;
-               goto fail;
+               return -ENOMEM;
        }
 
        if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
@@ -992,29 +983,22 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        ret = hid_parse(hdev);
        if (ret != 0)
-               goto fail;
+               return ret;
 
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret)
-               goto hid_fail;
+               return ret;
 
        ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
 
        mt_set_maxcontacts(hdev);
        mt_set_input_mode(hdev);
 
-       kfree(td->fields);
+       /* release .fields memory as it is not used anymore */
+       devm_kfree(&hdev->dev, td->fields);
        td->fields = NULL;
 
        return 0;
-
-hid_fail:
-       list_for_each_entry(hi, &hdev->inputs, list)
-               mt_free_input_name(hi);
-fail:
-       kfree(td->fields);
-       kfree(td);
-       return ret;
 }
 
 #ifdef CONFIG_PM
@@ -1039,17 +1023,8 @@ static int mt_resume(struct hid_device *hdev)
 
 static void mt_remove(struct hid_device *hdev)
 {
-       struct mt_device *td = hid_get_drvdata(hdev);
-       struct hid_input *hi;
-
        sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
-       list_for_each_entry(hi, &hdev->inputs, list)
-               mt_free_input_name(hi);
-
        hid_hw_stop(hdev);
-
-       kfree(td);
-       hid_set_drvdata(hdev, NULL);
 }
 
 static const struct hid_device_id mt_devices[] = {
index ef95102515e4d499143dd02a831481902d674b1e..98d1fdf7d8cd0d2e3d5c7ae6b240e15085a951d7 100644 (file)
@@ -237,7 +237,7 @@ static ssize_t set_min_width(struct device *dev,
 
        unsigned long val;
 
-       if (strict_strtoul(buf, 0, &val))
+       if (kstrtoul(buf, 0, &val))
                return -EINVAL;
 
        if (val > nd->sensor_physical_width)
@@ -272,7 +272,7 @@ static ssize_t set_min_height(struct device *dev,
 
        unsigned long val;
 
-       if (strict_strtoul(buf, 0, &val))
+       if (kstrtoul(buf, 0, &val))
                return -EINVAL;
 
        if (val > nd->sensor_physical_height)
@@ -306,7 +306,7 @@ static ssize_t set_activate_slack(struct device *dev,
 
        unsigned long val;
 
-       if (strict_strtoul(buf, 0, &val))
+       if (kstrtoul(buf, 0, &val))
                return -EINVAL;
 
        if (val > 0x7f)
@@ -341,7 +341,7 @@ static ssize_t set_activation_width(struct device *dev,
 
        unsigned long val;
 
-       if (strict_strtoul(buf, 0, &val))
+       if (kstrtoul(buf, 0, &val))
                return -EINVAL;
 
        if (val > nd->sensor_physical_width)
@@ -377,7 +377,7 @@ static ssize_t set_activation_height(struct device *dev,
 
        unsigned long val;
 
-       if (strict_strtoul(buf, 0, &val))
+       if (kstrtoul(buf, 0, &val))
                return -EINVAL;
 
        if (val > nd->sensor_physical_height)
@@ -411,7 +411,7 @@ static ssize_t set_deactivate_slack(struct device *dev,
 
        unsigned long val;
 
-       if (strict_strtoul(buf, 0, &val))
+       if (kstrtoul(buf, 0, &val))
                return -EINVAL;
 
        /*
index 59ab8e157e6b249d025ea26d4bd0ba4b2bb7f227..024cdf3c2297f91d6288cd13cf75e3a4c4881991 100644 (file)
@@ -394,7 +394,7 @@ static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data,
 void picolcd_debug_out_report(struct picolcd_data *data,
                struct hid_device *hdev, struct hid_report *report)
 {
-       u8 raw_data[70];
+       u8 *raw_data;
        int raw_size = (report->size >> 3) + 1;
        char *buff;
 #define BUFF_SZ 256
@@ -407,20 +407,20 @@ void picolcd_debug_out_report(struct picolcd_data *data,
        if (!buff)
                return;
 
-       snprintf(buff, BUFF_SZ, "\nout report %d (size %d) =  ",
-                       report->id, raw_size);
-       hid_debug_event(hdev, buff);
-       if (raw_size + 5 > sizeof(raw_data)) {
+       raw_data = hid_alloc_report_buf(report, GFP_ATOMIC);
+       if (!raw_data) {
                kfree(buff);
-               hid_debug_event(hdev, " TOO BIG\n");
                return;
-       } else {
-               raw_data[0] = report->id;
-               hid_output_report(report, raw_data);
-               dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
-               hid_debug_event(hdev, buff);
        }
 
+       snprintf(buff, BUFF_SZ, "\nout report %d (size %d) =  ",
+                       report->id, raw_size);
+       hid_debug_event(hdev, buff);
+       raw_data[0] = report->id;
+       hid_output_report(report, raw_data);
+       dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
+       hid_debug_event(hdev, buff);
+
        switch (report->id) {
        case REPORT_LED_STATE:
                /* 1 data byte with GPO state */
@@ -644,6 +644,7 @@ void picolcd_debug_out_report(struct picolcd_data *data,
                break;
        }
        wake_up_interruptible(&hdev->debug_wait);
+       kfree(raw_data);
        kfree(buff);
 }
 
index 327f9b8ed1f4ea3cea3e89166c29570a9539d306..071ee9e2fd9fea5c4d40165e60e6f1b71e5bd7a4 100644 (file)
@@ -59,7 +59,7 @@ static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
        unsigned long state;
        int retval;
 
-       retval = strict_strtoul(buf, 10, &state);
+       retval = kstrtoul(buf, 10, &state);
        if (retval)
                return retval;
 
@@ -107,7 +107,7 @@ static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
        unsigned long key_mask;
        int retval;
 
-       retval = strict_strtoul(buf, 10, &key_mask);
+       retval = kstrtoul(buf, 10, &key_mask);
        if (retval)
                return retval;
 
@@ -159,7 +159,7 @@ static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
        unsigned long profile;
        int retval;
 
-       retval = strict_strtoul(buf, 10, &profile);
+       retval = kstrtoul(buf, 10, &profile);
        if (retval)
                return retval;
 
index 8023751d525766ae16b32f8a8943f8c5f9b0796e..5dd0ea4eb4f71ec6cfc210b5a71a8b648a709461 100644 (file)
@@ -82,7 +82,7 @@ static ssize_t isku_sysfs_set_actual_profile(struct device *dev,
        isku = hid_get_drvdata(dev_get_drvdata(dev));
        usb_dev = interface_to_usbdev(to_usb_interface(dev));
 
-       retval = strict_strtoul(buf, 10, &profile);
+       retval = kstrtoul(buf, 10, &profile);
        if (retval)
                return retval;
 
index 7fae070788fa2e4b03fb9281e792aaf9e24065a4..00ab287f73849b94d85c3d4bf603733273396fc6 100644 (file)
@@ -456,7 +456,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
        kone = hid_get_drvdata(dev_get_drvdata(dev));
        usb_dev = interface_to_usbdev(to_usb_interface(dev));
 
-       retval = strict_strtoul(buf, 10, &state);
+       retval = kstrtoul(buf, 10, &state);
        if (retval)
                return retval;
 
@@ -545,7 +545,7 @@ static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
        kone = hid_get_drvdata(dev_get_drvdata(dev));
        usb_dev = interface_to_usbdev(to_usb_interface(dev));
 
-       retval = strict_strtoul(buf, 10, &new_startup_profile);
+       retval = kstrtoul(buf, 10, &new_startup_profile);
        if (retval)
                return retval;
 
index 6a48fa3c7da913e487e5aa0b1538ba5aff19bbb1..26b9663ddf4746ce1590733745d77f512c46d8c0 100644 (file)
@@ -246,7 +246,7 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
        koneplus = hid_get_drvdata(dev_get_drvdata(dev));
        usb_dev = interface_to_usbdev(to_usb_interface(dev));
 
-       retval = strict_strtoul(buf, 10, &profile);
+       retval = kstrtoul(buf, 10, &profile);
        if (retval)
                return retval;
 
index b8b37789b864bbeb3c4de24047658172518b25ec..c2a17e45c99cb6fa49443e68c0b37fe9f7839f99 100644 (file)
@@ -282,7 +282,7 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
        kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
        usb_dev = interface_to_usbdev(to_usb_interface(dev));
 
-       retval = strict_strtoul(buf, 10, &profile);
+       retval = kstrtoul(buf, 10, &profile);
        if (retval)
                return retval;
 
index 87fbe2924cfac3852ee8b202b8aaeb8500de0080..30dbb6b40bbf17e93c63b2bd6d3eb3b7f8111593 100644 (file)
@@ -624,7 +624,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        struct sony_sc *sc;
        unsigned int connect_mask = HID_CONNECT_DEFAULT;
 
-       sc = kzalloc(sizeof(*sc), GFP_KERNEL);
+       sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
        if (sc == NULL) {
                hid_err(hdev, "can't alloc sony descriptor\n");
                return -ENOMEM;
@@ -636,7 +636,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed\n");
-               goto err_free;
+               return ret;
        }
 
        if (sc->quirks & VAIO_RDESC_CONSTANT)
@@ -649,7 +649,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        ret = hid_hw_start(hdev, connect_mask);
        if (ret) {
                hid_err(hdev, "hw start failed\n");
-               goto err_free;
+               return ret;
        }
 
        if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
@@ -669,8 +669,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        return 0;
 err_stop:
        hid_hw_stop(hdev);
-err_free:
-       kfree(sc);
        return ret;
 }
 
@@ -682,7 +680,6 @@ static void sony_remove(struct hid_device *hdev)
                buzz_remove(hdev);
 
        hid_hw_stop(hdev);
-       kfree(sc);
 }
 
 static const struct hid_device_id sony_devices[] = {
index 0c06054cab8f01033404f35375c0e12584db3c51..660209824e5618d827b186ba9cfc1ac201708e5e 100644 (file)
@@ -212,10 +212,12 @@ static __u8 select_drm(struct wiimote_data *wdata)
 
        if (ir == WIIPROTO_FLAG_IR_BASIC) {
                if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
-                       if (ext)
-                               return WIIPROTO_REQ_DRM_KAIE;
-                       else
-                               return WIIPROTO_REQ_DRM_KAI;
+                       /* GEN10 and ealier devices bind IR formats to DRMs.
+                        * Hence, we cannot use DRM_KAI here as it might be
+                        * bound to IR_EXT. Use DRM_KAIE unconditionally so we
+                        * work with all devices and our parsers can use the
+                        * fixed formats, too. */
+                       return WIIPROTO_REQ_DRM_KAIE;
                } else {
                        return WIIPROTO_REQ_DRM_KIE;
                }
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
new file mode 100644 (file)
index 0000000..6153e50
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ *  HID driver for Xin-Mo devices, currently only the Dual Arcade controller.
+ *  Fixes the negative axis event values (the devices sends -2) to match the
+ *  logical axis minimum of the HID report descriptor (the report announces
+ *  -1). It is needed because hid-input discards out of bounds values.
+ *  (This module is based on "hid-saitek" and "hid-lg".)
+ *
+ *  Copyright (c) 2013 Olivier Scherler
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "hid-ids.h"
+
+/*
+ * Fix negative events that are out of bounds.
+ */
+static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
+               struct hid_usage *usage, __s32 value)
+{
+       switch (usage->code) {
+       case ABS_X:
+       case ABS_Y:
+       case ABS_Z:
+       case ABS_RX:
+               if (value < -1) {
+                       input_event(field->hidinput->input, usage->type,
+                               usage->code, -1);
+                       return 1;
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static const struct hid_device_id xinmo_devices[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+       { }
+};
+
+MODULE_DEVICE_TABLE(hid, xinmo_devices);
+
+static struct hid_driver xinmo_driver = {
+       .name = "xinmo",
+       .id_table = xinmo_devices,
+       .event = xinmo_event
+};
+
+static int __init xinmo_init(void)
+{
+       return hid_register_driver(&xinmo_driver);
+}
+
+static void __exit xinmo_exit(void)
+{
+       hid_unregister_driver(&xinmo_driver);
+}
+
+module_init(xinmo_init);
+module_exit(xinmo_exit);
+MODULE_LICENSE("GPL");
index e4cddeccd6b51691bffc955a62569ead5e6d480e..1a660bd97ab2118f092403c71e237f620f6671ff 100644 (file)
@@ -169,7 +169,7 @@ static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
        int ret;
        struct zc_device *zc;
 
-       zc = kzalloc(sizeof(*zc), GFP_KERNEL);
+       zc = devm_kzalloc(&hdev->dev, sizeof(*zc), GFP_KERNEL);
        if (zc == NULL) {
                hid_err(hdev, "can't alloc descriptor\n");
                return -ENOMEM;
@@ -180,28 +180,16 @@ static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed\n");
-               goto err_free;
+               return ret;
        }
 
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "hw start failed\n");
-               goto err_free;
+               return ret;
        }
 
        return 0;
-err_free:
-       kfree(zc);
-
-       return ret;
-}
-
-static void zc_remove(struct hid_device *hdev)
-{
-       struct zc_device *zc = hid_get_drvdata(hdev);
-
-       hid_hw_stop(hdev);
-       kfree(zc);
 }
 
 static const struct hid_device_id zc_devices[] = {
@@ -217,7 +205,6 @@ static struct hid_driver zc_driver = {
        .input_mapping = zc_input_mapping,
        .raw_event = zc_raw_event,
        .probe = zc_probe,
-       .remove = zc_remove,
 };
 module_hid_driver(zc_driver);
 
index 6f1feb2c2e97b5defb0628d92bfc98a719449685..dbfe300746c65e35bf1ea38c454ba0695f7e0a4e 100644 (file)
@@ -113,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
        __u8 *buf;
        int ret = 0;
 
-       if (!hidraw_table[minor]) {
+       if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
                ret = -ENODEV;
                goto out;
        }
@@ -261,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
        }
 
        mutex_lock(&minors_lock);
-       if (!hidraw_table[minor]) {
+       if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
                err = -ENODEV;
                goto out_unlock;
        }
@@ -302,39 +302,38 @@ static int hidraw_fasync(int fd, struct file *file, int on)
        return fasync_helper(fd, file, on, &list->fasync);
 }
 
+static void drop_ref(struct hidraw *hidraw, int exists_bit)
+{
+       if (exists_bit) {
+               hid_hw_close(hidraw->hid);
+               hidraw->exist = 0;
+               if (hidraw->open)
+                       wake_up_interruptible(&hidraw->wait);
+       } else {
+               --hidraw->open;
+       }
+
+       if (!hidraw->open && !hidraw->exist) {
+               device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+               hidraw_table[hidraw->minor] = NULL;
+               kfree(hidraw);
+       }
+}
+
 static int hidraw_release(struct inode * inode, struct file * file)
 {
        unsigned int minor = iminor(inode);
-       struct hidraw *dev;
        struct hidraw_list *list = file->private_data;
-       int ret;
-       int i;
 
        mutex_lock(&minors_lock);
-       if (!hidraw_table[minor]) {
-               ret = -ENODEV;
-               goto unlock;
-       }
 
        list_del(&list->node);
-       dev = hidraw_table[minor];
-       if (!--dev->open) {
-               if (list->hidraw->exist) {
-                       hid_hw_power(dev->hid, PM_HINT_NORMAL);
-                       hid_hw_close(dev->hid);
-               } else {
-                       kfree(list->hidraw);
-               }
-       }
-
-       for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
-               kfree(list->buffer[i].value);
        kfree(list);
-       ret = 0;
-unlock:
-       mutex_unlock(&minors_lock);
 
-       return ret;
+       drop_ref(hidraw_table[minor], 0);
+
+       mutex_unlock(&minors_lock);
+       return 0;
 }
 
 static long hidraw_ioctl(struct file *file, unsigned int cmd,
@@ -539,18 +538,9 @@ void hidraw_disconnect(struct hid_device *hid)
        struct hidraw *hidraw = hid->hidraw;
 
        mutex_lock(&minors_lock);
-       hidraw->exist = 0;
-
-       device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
 
-       hidraw_table[hidraw->minor] = NULL;
+       drop_ref(hidraw, 1);
 
-       if (hidraw->open) {
-               hid_hw_close(hid);
-               wake_up_interruptible(&hidraw->wait);
-       } else {
-               kfree(hidraw);
-       }
        mutex_unlock(&minors_lock);
 }
 EXPORT_SYMBOL_GPL(hidraw_disconnect);
index 879b0ed701a3fb4109243a2642857c16518185b8..c1336193b04ba3deb92c3edf2468551f3f6933a9 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hid.h>
 #include <linux/mutex.h>
 #include <linux/acpi.h>
+#include <linux/of.h>
 
 #include <linux/i2c/i2c-hid.h>
 
@@ -756,29 +757,6 @@ static int i2c_hid_power(struct hid_device *hid, int lvl)
        return ret;
 }
 
-static int i2c_hid_hidinput_input_event(struct input_dev *dev,
-               unsigned int type, unsigned int code, int value)
-{
-       struct hid_device *hid = input_get_drvdata(dev);
-       struct hid_field *field;
-       int offset;
-
-       if (type == EV_FF)
-               return input_ff_event(dev, type, code, value);
-
-       if (type != EV_LED)
-               return -1;
-
-       offset = hidinput_find_field(hid, type, code, &field);
-
-       if (offset == -1) {
-               hid_warn(dev, "event field not found\n");
-               return -1;
-       }
-
-       return hid_set_field(field, offset, value);
-}
-
 static struct hid_ll_driver i2c_hid_ll_driver = {
        .parse = i2c_hid_parse,
        .start = i2c_hid_start,
@@ -787,7 +765,6 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
        .close = i2c_hid_close,
        .power = i2c_hid_power,
        .request = i2c_hid_request,
-       .hidinput_input_event = i2c_hid_hidinput_input_event,
 };
 
 static int i2c_hid_init_irq(struct i2c_client *client)
@@ -824,8 +801,8 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
         * bytes 2-3 -> bcdVersion (has to be 1.00) */
        ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer, 4);
 
-       i2c_hid_dbg(ihid, "%s, ihid->hdesc_buffer: %*ph\n",
-                       __func__, 4, ihid->hdesc_buffer);
+       i2c_hid_dbg(ihid, "%s, ihid->hdesc_buffer: %4ph\n", __func__,
+                       ihid->hdesc_buffer);
 
        if (ret) {
                dev_err(&client->dev,
@@ -897,8 +874,9 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        params[1].integer.value = 1;
        params[2].type = ACPI_TYPE_INTEGER;
        params[2].integer.value = 1; /* HID function */
-       params[3].type = ACPI_TYPE_INTEGER;
-       params[3].integer.value = 0;
+       params[3].type = ACPI_TYPE_PACKAGE;
+       params[3].package.count = 0;
+       params[3].package.elements = NULL;
 
        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
                dev_err(&client->dev, "device _DSM execution failed\n");
@@ -933,6 +911,42 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
 }
 #endif
 
+#ifdef CONFIG_OF
+static int i2c_hid_of_probe(struct i2c_client *client,
+               struct i2c_hid_platform_data *pdata)
+{
+       struct device *dev = &client->dev;
+       u32 val;
+       int ret;
+
+       ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
+       if (ret) {
+               dev_err(&client->dev, "HID register address not provided\n");
+               return -ENODEV;
+       }
+       if (val >> 16) {
+               dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
+                       val);
+               return -EINVAL;
+       }
+       pdata->hid_descriptor_address = val;
+
+       return 0;
+}
+
+static const struct of_device_id i2c_hid_of_match[] = {
+       { .compatible = "hid-over-i2c" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
+#else
+static inline int i2c_hid_of_probe(struct i2c_client *client,
+               struct i2c_hid_platform_data *pdata)
+{
+       return -ENODEV;
+}
+#endif
+
 static int i2c_hid_probe(struct i2c_client *client,
                         const struct i2c_device_id *dev_id)
 {
@@ -954,7 +968,11 @@ static int i2c_hid_probe(struct i2c_client *client,
        if (!ihid)
                return -ENOMEM;
 
-       if (!platform_data) {
+       if (client->dev.of_node) {
+               ret = i2c_hid_of_probe(client, &ihid->pdata);
+               if (ret)
+                       goto err;
+       } else if (!platform_data) {
                ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
                if (ret) {
                        dev_err(&client->dev,
@@ -1095,6 +1113,7 @@ static struct i2c_driver i2c_hid_driver = {
                .owner  = THIS_MODULE,
                .pm     = &i2c_hid_pm,
                .acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
+               .of_match_table = of_match_ptr(i2c_hid_of_match),
        },
 
        .probe          = i2c_hid_probe,
index fc307e0422afc92c31bfac65d410244d5859e804..f53f2d52e677b4f80b1bd454e673f2ec0a7ce816 100644 (file)
@@ -116,30 +116,6 @@ static void uhid_hid_close(struct hid_device *hid)
        uhid_queue_event(uhid, UHID_CLOSE);
 }
 
-static int uhid_hid_input(struct input_dev *input, unsigned int type,
-                         unsigned int code, int value)
-{
-       struct hid_device *hid = input_get_drvdata(input);
-       struct uhid_device *uhid = hid->driver_data;
-       unsigned long flags;
-       struct uhid_event *ev;
-
-       ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-       if (!ev)
-               return -ENOMEM;
-
-       ev->type = UHID_OUTPUT_EV;
-       ev->u.output_ev.type = type;
-       ev->u.output_ev.code = code;
-       ev->u.output_ev.value = value;
-
-       spin_lock_irqsave(&uhid->qlock, flags);
-       uhid_queue(uhid, ev);
-       spin_unlock_irqrestore(&uhid->qlock, flags);
-
-       return 0;
-}
-
 static int uhid_hid_parse(struct hid_device *hid)
 {
        struct uhid_device *uhid = hid->driver_data;
@@ -273,7 +249,6 @@ static struct hid_ll_driver uhid_hid_driver = {
        .stop = uhid_hid_stop,
        .open = uhid_hid_open,
        .close = uhid_hid_close,
-       .hidinput_input_event = uhid_hid_input,
        .parse = uhid_hid_parse,
 };
 
index 99418285222cf118cbc09e4d9029c80ba3493fcf..bd38cdfbbba64fbb9fe76b6307f436f3f63f02f9 100644 (file)
@@ -535,7 +535,6 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
 {
        int head;
        struct usbhid_device *usbhid = hid->driver_data;
-       int len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
 
        if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN)
                return;
@@ -546,7 +545,7 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
                        return;
                }
 
-               usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC);
+               usbhid->out[usbhid->outhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC);
                if (!usbhid->out[usbhid->outhead].raw_report) {
                        hid_warn(hid, "output queueing failed\n");
                        return;
@@ -595,7 +594,7 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
        }
 
        if (dir == USB_DIR_OUT) {
-               usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC);
+               usbhid->ctrl[usbhid->ctrlhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC);
                if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) {
                        hid_warn(hid, "control queueing failed\n");
                        return;
@@ -649,62 +648,6 @@ static void usbhid_submit_report(struct hid_device *hid, struct hid_report *repo
        spin_unlock_irqrestore(&usbhid->lock, flags);
 }
 
-/* Workqueue routine to send requests to change LEDs */
-static void hid_led(struct work_struct *work)
-{
-       struct usbhid_device *usbhid =
-               container_of(work, struct usbhid_device, led_work);
-       struct hid_device *hid = usbhid->hid;
-       struct hid_field *field;
-       unsigned long flags;
-
-       field = hidinput_get_led_field(hid);
-       if (!field) {
-               hid_warn(hid, "LED event field not found\n");
-               return;
-       }
-
-       spin_lock_irqsave(&usbhid->lock, flags);
-       if (!test_bit(HID_DISCONNECTED, &usbhid->iofl)) {
-               usbhid->ledcount = hidinput_count_leds(hid);
-               hid_dbg(usbhid->hid, "New ledcount = %u\n", usbhid->ledcount);
-               __usbhid_submit_report(hid, field->report, USB_DIR_OUT);
-       }
-       spin_unlock_irqrestore(&usbhid->lock, flags);
-}
-
-static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
-{
-       struct hid_device *hid = input_get_drvdata(dev);
-       struct usbhid_device *usbhid = hid->driver_data;
-       struct hid_field *field;
-       unsigned long flags;
-       int offset;
-
-       if (type == EV_FF)
-               return input_ff_event(dev, type, code, value);
-
-       if (type != EV_LED)
-               return -1;
-
-       if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
-               hid_warn(dev, "event field not found\n");
-               return -1;
-       }
-
-       spin_lock_irqsave(&usbhid->lock, flags);
-       hid_set_field(field, offset, value);
-       spin_unlock_irqrestore(&usbhid->lock, flags);
-
-       /*
-        * Defer performing requested LED action.
-        * This is more likely gather all LED changes into a single URB.
-        */
-       schedule_work(&usbhid->led_work);
-
-       return 0;
-}
-
 static int usbhid_wait_io(struct hid_device *hid)
 {
        struct usbhid_device *usbhid = hid->driver_data;
@@ -857,7 +800,7 @@ static int hid_find_field_early(struct hid_device *hid, unsigned int page,
        return -1;
 }
 
-void usbhid_set_leds(struct hid_device *hid)
+static void usbhid_set_leds(struct hid_device *hid)
 {
        struct hid_field *field;
        int offset;
@@ -867,7 +810,6 @@ void usbhid_set_leds(struct hid_device *hid)
                usbhid_submit_report(hid, field->report, USB_DIR_OUT);
        }
 }
-EXPORT_SYMBOL_GPL(usbhid_set_leds);
 
 /*
  * Traverse the supplied list of reports and find the longest
@@ -1274,7 +1216,6 @@ static struct hid_ll_driver usb_hid_driver = {
        .open = usbhid_open,
        .close = usbhid_close,
        .power = usbhid_power,
-       .hidinput_input_event = usb_hidinput_input_event,
        .request = usbhid_request,
        .wait = usbhid_wait_io,
        .idle = usbhid_idle,
@@ -1368,8 +1309,6 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
        setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
        spin_lock_init(&usbhid->lock);
 
-       INIT_WORK(&usbhid->led_work, hid_led);
-
        ret = hid_add_device(hid);
        if (ret) {
                if (ret != -ENODEV)
@@ -1402,7 +1341,6 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
 {
        del_timer_sync(&usbhid->io_retry);
        cancel_work_sync(&usbhid->reset_work);
-       cancel_work_sync(&usbhid->led_work);
 }
 
 static void hid_cease_io(struct usbhid_device *usbhid)
@@ -1522,15 +1460,17 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
        struct usbhid_device *usbhid = hid->driver_data;
        int status = 0;
        bool driver_suspended = false;
+       unsigned int ledcount;
 
        if (PMSG_IS_AUTO(message)) {
+               ledcount = hidinput_count_leds(hid);
                spin_lock_irq(&usbhid->lock);   /* Sync with error handler */
                if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
                    && !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
                    && !test_bit(HID_OUT_RUNNING, &usbhid->iofl)
                    && !test_bit(HID_CTRL_RUNNING, &usbhid->iofl)
                    && !test_bit(HID_KEYS_PRESSED, &usbhid->iofl)
-                   && (!usbhid->ledcount || ignoreled))
+                   && (!ledcount || ignoreled))
                {
                        set_bit(HID_SUSPENDED, &usbhid->iofl);
                        spin_unlock_irq(&usbhid->lock);
index dbb6af6991350d4e09b2c7ca484411065afdc75c..f633c24ce28b8d8f81d831c37059992914e7c32b 100644 (file)
@@ -92,9 +92,6 @@ struct usbhid_device {
        unsigned int retry_delay;                                       /* Delay length in ms */
        struct work_struct reset_work;                                  /* Task context for resets */
        wait_queue_head_t wait;                                         /* For sleeping */
-       int ledcount;                                                   /* counting the number of active leds */
-
-       struct work_struct led_work;                                    /* Task context for setting LEDs */
 };
 
 #define        hid_to_usb_dev(hid_dev) \
index e989f7fd645b0d78322ba1431acf4c3f01c220b3..47b3e5821cb8bc070aa49f3257e663d6e4ec6173 100644 (file)
@@ -1202,8 +1202,8 @@ config SENSORS_ADS1015
        tristate "Texas Instruments ADS1015"
        depends on I2C
        help
-         If you say yes here you get support for Texas Instruments ADS1015
-         12-bit 4-input ADC device.
+         If you say yes here you get support for Texas Instruments
+         ADS1015/ADS1115 12/16-bit 4-input ADC device.
 
          This driver can also be built as a module.  If so, the module
          will be called ads1015.
index 6351aba8819ceec4ff33a2d9c9a8a43ce8114713..d7d9b2fc01cb6778d9fc614729e8a80dc9681ead 100644 (file)
@@ -970,7 +970,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
        return 0;
 }
 
-static struct dmi_system_id __initdata pm_dmi_table[] = {
+static struct dmi_system_id pm_dmi_table[] __initdata = {
        {
                enable_cap_knobs, "IBM Active Energy Manager",
                {
index 2798246ad81470988fadd920dee5e9282f463445..7f9dc2f86b63d639a3a05baa3debc9b2cf724a19 100644 (file)
@@ -46,17 +46,28 @@ static const unsigned int fullscale_table[8] = {
        6144, 4096, 2048, 1024, 512, 256, 256, 256 };
 
 /* Data rates in samples per second */
-static const unsigned int data_rate_table[8] = {
-       128, 250, 490, 920, 1600, 2400, 3300, 3300 };
+static const unsigned int data_rate_table_1015[8] = {
+       128, 250, 490, 920, 1600, 2400, 3300, 3300
+};
+
+static const unsigned int data_rate_table_1115[8] = {
+       8, 16, 32, 64, 128, 250, 475, 860
+};
 
 #define ADS1015_DEFAULT_CHANNELS 0xff
 #define ADS1015_DEFAULT_PGA 2
 #define ADS1015_DEFAULT_DATA_RATE 4
 
+enum ads1015_chips {
+       ads1015,
+       ads1115,
+};
+
 struct ads1015_data {
        struct device *hwmon_dev;
        struct mutex update_lock; /* mutex protect updates */
        struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+       enum ads1015_chips id;
 };
 
 static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
@@ -66,6 +77,8 @@ static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
        unsigned int pga = data->channel_data[channel].pga;
        unsigned int data_rate = data->channel_data[channel].data_rate;
        unsigned int conversion_time_ms;
+       const unsigned int * const rate_table = data->id == ads1115 ?
+               data_rate_table_1115 : data_rate_table_1015;
        int res;
 
        mutex_lock(&data->update_lock);
@@ -75,7 +88,7 @@ static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
        if (res < 0)
                goto err_unlock;
        config = res;
-       conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]);
+       conversion_time_ms = DIV_ROUND_UP(1000, rate_table[data_rate]);
 
        /* setup and start single conversion */
        config &= 0x001f;
@@ -113,8 +126,9 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
        struct ads1015_data *data = i2c_get_clientdata(client);
        unsigned int pga = data->channel_data[channel].pga;
        int fullscale = fullscale_table[pga];
+       const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
 
-       return DIV_ROUND_CLOSEST(reg * fullscale, 0x7ff0);
+       return DIV_ROUND_CLOSEST(reg * fullscale, mask);
 }
 
 /* sysfs callback function */
@@ -257,7 +271,7 @@ static int ads1015_probe(struct i2c_client *client,
                            GFP_KERNEL);
        if (!data)
                return -ENOMEM;
-
+       data->id = id->driver_data;
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
@@ -286,7 +300,8 @@ exit_remove:
 }
 
 static const struct i2c_device_id ads1015_id[] = {
-       { "ads1015", 0 },
+       { "ads1015",  ads1015},
+       { "ads1115",  ads1115},
        { }
 };
 MODULE_DEVICE_TABLE(i2c, ads1015_id);
index ba962ac4b81f2e63519ee12138e1b2e298befe90..7092c78f814f03c82c0976d682b0ec1c2b4155dc 100644 (file)
@@ -145,7 +145,7 @@ static int ads7828_remove(struct i2c_client *client)
 static int ads7828_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
-       struct ads7828_platform_data *pdata = client->dev.platform_data;
+       struct ads7828_platform_data *pdata = dev_get_platdata(&client->dev);
        struct ads7828_data *data;
        int err;
 
index 69481d3a3d231fec48e1f43f0489e20879613daf..3e6aba12e46a715ed46f63382993b586f60881d0 100644 (file)
@@ -333,7 +333,7 @@ static int ADT7462_REG_VOLT_MAX(struct adt7462_data *data, int which)
                        return 0x4C;
                break;
        }
-       return -ENODEV;
+       return 0;
 }
 
 static int ADT7462_REG_VOLT_MIN(struct adt7462_data *data, int which)
@@ -392,7 +392,7 @@ static int ADT7462_REG_VOLT_MIN(struct adt7462_data *data, int which)
                        return 0x77;
                break;
        }
-       return -ENODEV;
+       return 0;
 }
 
 static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
index b25c64302cbc1911f5d272e50dc05f8477b326c9..9ddadb378dcc0bc9421bfb6a3420f326ff2a8ee5 100644 (file)
@@ -29,7 +29,7 @@ static bool new_if;
 module_param(new_if, bool, 0);
 MODULE_PARM_DESC(new_if, "Override detection heuristic and force the use of the new ATK0110 interface");
 
-static const struct dmi_system_id __initconst atk_force_new_if[] = {
+static const struct dmi_system_id atk_force_new_if[] __initconst = {
        {
                /* Old interface has broken MCH temp monitoring */
                .ident = "Asus Sabertooth X58",
index 2e5e2dc47eafdca62ae3513bee1e471cbdb1276d..78be66176840d1c6abdf29dd632d7a107f6ad2d3 100644 (file)
@@ -316,6 +316,18 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
        return tjmax;
 }
 
+static bool cpu_has_tjmax(struct cpuinfo_x86 *c)
+{
+       u8 model = c->x86_model;
+
+       return model > 0xe &&
+              model != 0x1c &&
+              model != 0x26 &&
+              model != 0x27 &&
+              model != 0x35 &&
+              model != 0x36;
+}
+
 static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
 {
        int err;
@@ -328,7 +340,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
         */
        err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
        if (err) {
-               if (c->x86_model > 0xe && c->x86_model != 0x1c)
+               if (cpu_has_tjmax(c))
                        dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
        } else {
                val = (eax >> 16) & 0xff;
index f1d6b422cf0682b43ed6b4354481d97de240fcef..0918b91365880f93be1d4a807cc0ab5fd58a6588 100644 (file)
@@ -77,7 +77,7 @@ struct ds620_data {
 
 static void ds620_init_client(struct i2c_client *client)
 {
-       struct ds620_platform_data *ds620_info = client->dev.platform_data;
+       struct ds620_platform_data *ds620_info = dev_get_platdata(&client->dev);
        u16 conf, new_conf;
 
        new_conf = conf =
index 936898f82f94d140ab00d0b58d78bc195d23e480..82e661e8241b7a15cf8c32ce57a048429704a2cd 100644 (file)
@@ -49,7 +49,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
 #define EMC6W201_REG_TEMP_HIGH(nr)     (0x57 + (nr) * 2)
 #define EMC6W201_REG_FAN_MIN(nr)       (0x62 + (nr) * 2)
 
-enum { input, min, max } subfeature;
+enum subfeature { input, min, max };
 
 /*
  * Per-device data
index 0c9f3da242bf91be9a6688f71de6d96385ce8453..15b7f5281def34947ee72afb7c3e170573d5693e 100644 (file)
@@ -1375,7 +1375,7 @@ static void f71805f_init_device(struct f71805f_data *data)
 
 static int f71805f_probe(struct platform_device *pdev)
 {
-       struct f71805f_sio_data *sio_data = pdev->dev.platform_data;
+       struct f71805f_sio_data *sio_data = dev_get_platdata(&pdev->dev);
        struct f71805f_data *data;
        struct resource *res;
        int i, err;
index cfb02dd91aadda144e5986e19e3f57e0fd65743e..31b221eeee6ca7c4c789cc4aeaf88164136664fd 100644 (file)
@@ -2267,7 +2267,7 @@ static int f71882fg_create_fan_sysfs_files(
 static int f71882fg_probe(struct platform_device *pdev)
 {
        struct f71882fg_data *data;
-       struct f71882fg_sio_data *sio_data = pdev->dev.platform_data;
+       struct f71882fg_sio_data *sio_data = dev_get_platdata(&pdev->dev);
        int nr_fans = f71882fg_nr_fans[sio_data->type];
        int nr_temps = f71882fg_nr_temps[sio_data->type];
        int err, i;
index 9e300e567f15c6b0cc22d6245e9a1cccbc1d8e0e..a837b94977f4e8e59d9f3478a96747fe56de803e 100644 (file)
@@ -832,7 +832,8 @@ static int f75375_probe(struct i2c_client *client,
                const struct i2c_device_id *id)
 {
        struct f75375_data *data;
-       struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data;
+       struct f75375s_platform_data *f75375s_pdata =
+                       dev_get_platdata(&client->dev);
        int err;
 
        if (!i2c_check_functionality(client->adapter,
index 73adf01b0ef2addbdb7052632a17c623be848a38..b4b8b5bef718a596a027d3e57703491eedba980c 100644 (file)
@@ -717,7 +717,7 @@ static void g762_of_clock_disable(struct i2c_client *client) { }
 
 static int g762_pdata_prop_import(struct i2c_client *client)
 {
-       struct g762_platform_data *pdata = client->dev.platform_data;
+       struct g762_platform_data *pdata = dev_get_platdata(&client->dev);
        int ret;
 
        if (!pdata)
index 3104149795c582e8ceac8780aca390059eafc721..b7d6a5704eb2ff15887f77341e8d8b2e57cdb291 100644 (file)
@@ -495,7 +495,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
 {
        int err;
        struct gpio_fan_data *fan_data;
-       struct gpio_fan_platform_data *pdata = pdev->dev.platform_data;
+       struct gpio_fan_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
 #ifdef CONFIG_OF_GPIO
        if (!pdata) {
index d917a2d8c30ffde4183382d8370bef781dd0e824..18c062360ca7950e77ef4573ddf3dbc9fc16be82 100644 (file)
@@ -232,9 +232,9 @@ static int ina2xx_probe(struct i2c_client *client,
        if (!data)
                return -ENOMEM;
 
-       if (client->dev.platform_data) {
+       if (dev_get_platdata(&client->dev)) {
                pdata =
-                 (struct ina2xx_platform_data *)client->dev.platform_data;
+                 (struct ina2xx_platform_data *)dev_get_platdata(&client->dev);
                shunt = pdata->shunt_uohms;
        } else if (!of_property_read_u32(client->dev.of_node,
                                "shunt-resistor", &val)) {
index 72b21d5b1c621ece4f8059ba48ae6afe26642c21..29ffa27c60b89b60bf84db948cf56a890772599e 100644 (file)
@@ -1962,7 +1962,7 @@ exit:
 static void it87_remove_files(struct device *dev)
 {
        struct it87_data *data = platform_get_drvdata(pdev);
-       struct it87_sio_data *sio_data = dev->platform_data;
+       struct it87_sio_data *sio_data = dev_get_platdata(dev);
        int i;
 
        sysfs_remove_group(&dev->kobj, &it87_group);
@@ -2014,7 +2014,7 @@ static int it87_probe(struct platform_device *pdev)
        struct it87_data *data;
        struct resource *res;
        struct device *dev = &pdev->dev;
-       struct it87_sio_data *sio_data = dev->platform_data;
+       struct it87_sio_data *sio_data = dev_get_platdata(dev);
        int err = 0, i;
        int enable_pwm_interface;
        int fan_beep_need_rw;
@@ -2316,7 +2316,7 @@ static int it87_check_pwm(struct device *dev)
 /* Called when we have found a new IT87. */
 static void it87_init_device(struct platform_device *pdev)
 {
-       struct it87_sio_data *sio_data = pdev->dev.platform_data;
+       struct it87_sio_data *sio_data = dev_get_platdata(&pdev->dev);
        struct it87_data *data = platform_get_drvdata(pdev);
        int tmp, i;
        u8 mask;
index 16e45d7021522d0eb60da181144810b3700711a9..333092ce2465e234c15d415d4a99bb381100f2a3 100644 (file)
@@ -855,8 +855,8 @@ static void lm87_init_client(struct i2c_client *client)
 {
        struct lm87_data *data = i2c_get_clientdata(client);
 
-       if (client->dev.platform_data) {
-               data->channel = *(u8 *)client->dev.platform_data;
+       if (dev_get_platdata(&client->dev)) {
+               data->channel = *(u8 *)dev_get_platdata(&client->dev);
                lm87_write_value(client,
                                 LM87_REG_CHANNEL_MODE, data->channel);
        } else {
index b5ebb9198c757e17eaf114d5cf15f7c91a28e14f..96dccaf919d1da4c02b26de8f937f5c401f84ff4 100644 (file)
@@ -261,7 +261,7 @@ static int max197_probe(struct platform_device *pdev)
 {
        int ch, ret;
        struct max197_data *data;
-       struct max197_platform_data *pdata = pdev->dev.platform_data;
+       struct max197_platform_data *pdata = dev_get_platdata(&pdev->dev);
        enum max197_chips chip = platform_get_device_id(pdev)->driver_data;
 
        if (pdata == NULL) {
index 3e7b4269f5b9df67658e5e3f855aaec6d95b3c29..066e587a18a5a280f29db1f19dd14e3d30ea4f48 100644 (file)
@@ -428,7 +428,7 @@ static int max6639_init_client(struct i2c_client *client)
 {
        struct max6639_data *data = i2c_get_clientdata(client);
        struct max6639_platform_data *max6639_info =
-               client->dev.platform_data;
+               dev_get_platdata(&client->dev);
        int i;
        int rpm_range = 1; /* default: 4000 RPM */
        int err;
index eedb32292d6d3c20035f56a90820861d38e02102..d219c06a857bb5795a176bb88d9e186fd6a4b286 100644 (file)
@@ -143,12 +143,13 @@ static int mcp3021_probe(struct i2c_client *client,
                break;
        }
 
-       if (client->dev.platform_data) {
-               data->vdd = *(u32 *)client->dev.platform_data;
+       if (dev_get_platdata(&client->dev)) {
+               data->vdd = *(u32 *)dev_get_platdata(&client->dev);
                if (data->vdd > MCP3021_VDD_MAX || data->vdd < MCP3021_VDD_MIN)
                        return -EINVAL;
-       } else
+       } else {
                data->vdd = MCP3021_VDD_REF;
+       }
 
        err = sysfs_create_file(&client->dev.kobj, &dev_attr_in0_input.attr);
        if (err)
index 99cec18254203c5ebfa59ba0b1cbfdecabf77aac..6eb03ce2cff4b46a910787f8a7954bf5d00c6f99 100644 (file)
  * Supports the following chips:
  *
  * Chip        #vin    #fan    #pwm    #temp  chip IDs       man ID
+ * nct6106d     9      3       3       6+3    0xc450 0xc1    0x5ca3
  * nct6775f     9      4       3       6+3    0xb470 0xc1    0x5ca3
  * nct6776f     9      5       3       6+3    0xc330 0xc1    0x5ca3
  * nct6779d    15      5       5       2+6    0xc560 0xc1    0x5ca3
+ * nct6791d    15      6       6       2+6    0xc800 0xc1    0x5ca3
  *
  * #temp lists the number of monitored temperature sources (first value) plus
  * the number of directly connectable temperature sensors (second value).
 
 #define USE_ALTERNATE
 
-enum kinds { nct6775, nct6776, nct6779 };
+enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791 };
 
 /* used to set data->name = nct6775_device_names[data->sio_kind] */
 static const char * const nct6775_device_names[] = {
+       "nct6106",
        "nct6775",
        "nct6776",
        "nct6779",
+       "nct6791",
 };
 
 static unsigned short force_id;
@@ -91,9 +95,11 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
 #define SIO_REG_ENABLE         0x30    /* Logical device enable */
 #define SIO_REG_ADDR           0x60    /* Logical device address (2 bytes) */
 
+#define SIO_NCT6106_ID         0xc450
 #define SIO_NCT6775_ID         0xb470
 #define SIO_NCT6776_ID         0xc330
 #define SIO_NCT6779_ID         0xc560
+#define SIO_NCT6791_ID         0xc800
 #define SIO_ID_MASK            0xFFF0
 
 enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -167,7 +173,10 @@ superio_exit(int ioreg)
 #define NUM_TEMP       10      /* Max number of temp attribute sets w/ limits*/
 #define NUM_TEMP_FIXED 6       /* Max number of fixed temp attribute sets */
 
-#define NUM_REG_ALARM  4       /* Max number of alarm registers */
+#define NUM_REG_ALARM  7       /* Max number of alarm registers */
+#define NUM_REG_BEEP   5       /* Max number of beep registers */
+
+#define NUM_FAN                6
 
 /* Common and NCT6775 specific data */
 
@@ -185,6 +194,7 @@ static const u16 NCT6775_REG_IN[] = {
 
 #define NCT6775_REG_VBAT               0x5D
 #define NCT6775_REG_DIODE              0x5E
+#define NCT6775_DIODE_MASK             0x02
 
 #define NCT6775_REG_FANDIV1            0x506
 #define NCT6775_REG_FANDIV2            0x507
@@ -193,7 +203,7 @@ static const u16 NCT6775_REG_IN[] = {
 
 static const u16 NCT6775_REG_ALARM[NUM_REG_ALARM] = { 0x459, 0x45A, 0x45B };
 
-/* 0..15 voltages, 16..23 fans, 24..31 temperatures */
+/* 0..15 voltages, 16..23 fans, 24..29 temperatures, 30..31 intrusion */
 
 static const s8 NCT6775_ALARM_BITS[] = {
        0, 1, 2, 3, 8, 21, 20, 16,      /* in0.. in7 */
@@ -208,6 +218,23 @@ static const s8 NCT6775_ALARM_BITS[] = {
 #define TEMP_ALARM_BASE                24
 #define INTRUSION_ALARM_BASE   30
 
+static const u16 NCT6775_REG_BEEP[NUM_REG_BEEP] = { 0x56, 0x57, 0x453, 0x4e };
+
+/*
+ * 0..14 voltages, 15 global beep enable, 16..23 fans, 24..29 temperatures,
+ * 30..31 intrusion
+ */
+static const s8 NCT6775_BEEP_BITS[] = {
+       0, 1, 2, 3, 8, 9, 10, 16,       /* in0.. in7 */
+       17, -1, -1, -1, -1, -1, -1,     /* in8..in14 */
+       21,                             /* global beep enable */
+       6, 7, 11, 28, -1,               /* fan1..fan5 */
+       -1, -1, -1,                     /* unused */
+       4, 5, 13, -1, -1, -1,           /* temp1..temp6 */
+       12, -1 };                       /* intrusion0, intrusion1 */
+
+#define BEEP_ENABLE_BASE               15
+
 static const u8 NCT6775_REG_CR_CASEOPEN_CLR[] = { 0xe6, 0xee };
 static const u8 NCT6775_CR_CASEOPEN_CLR_MASK[] = { 0x20, 0x01 };
 
@@ -217,27 +244,32 @@ static const u8 NCT6775_PWM_MODE_MASK[] = { 0x01, 0x02, 0x01 };
 
 /* Advanced Fan control, some values are common for all fans */
 
-static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301, 0x801, 0x901 };
-static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302, 0x802, 0x902 };
+static const u16 NCT6775_REG_TARGET[] = {
+       0x101, 0x201, 0x301, 0x801, 0x901, 0xa01 };
+static const u16 NCT6775_REG_FAN_MODE[] = {
+       0x102, 0x202, 0x302, 0x802, 0x902, 0xa02 };
 static const u16 NCT6775_REG_FAN_STEP_DOWN_TIME[] = {
-       0x103, 0x203, 0x303, 0x803, 0x903 };
+       0x103, 0x203, 0x303, 0x803, 0x903, 0xa03 };
 static const u16 NCT6775_REG_FAN_STEP_UP_TIME[] = {
-       0x104, 0x204, 0x304, 0x804, 0x904 };
+       0x104, 0x204, 0x304, 0x804, 0x904, 0xa04 };
 static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = {
-       0x105, 0x205, 0x305, 0x805, 0x905 };
-static const u16 NCT6775_REG_FAN_START_OUTPUT[]
-       = { 0x106, 0x206, 0x306, 0x806, 0x906 };
+       0x105, 0x205, 0x305, 0x805, 0x905, 0xa05 };
+static const u16 NCT6775_REG_FAN_START_OUTPUT[] = {
+       0x106, 0x206, 0x306, 0x806, 0x906, 0xa06 };
 static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
 static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
 
 static const u16 NCT6775_REG_FAN_STOP_TIME[] = {
-       0x107, 0x207, 0x307, 0x807, 0x907 };
-static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309, 0x809, 0x909 };
-static const u16 NCT6775_REG_PWM_READ[] = { 0x01, 0x03, 0x11, 0x13, 0x15 };
+       0x107, 0x207, 0x307, 0x807, 0x907, 0xa07 };
+static const u16 NCT6775_REG_PWM[] = {
+       0x109, 0x209, 0x309, 0x809, 0x909, 0xa09 };
+static const u16 NCT6775_REG_PWM_READ[] = {
+       0x01, 0x03, 0x11, 0x13, 0x15, 0xa09 };
 
 static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
 static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
 static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 };
+static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
 
 static const u16 NCT6775_REG_TEMP[] = {
        0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -253,25 +285,25 @@ static const u16 NCT6775_REG_TEMP_SOURCE[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
        0x621, 0x622, 0x623, 0x624, 0x625, 0x626 };
 
 static const u16 NCT6775_REG_TEMP_SEL[] = {
-       0x100, 0x200, 0x300, 0x800, 0x900 };
+       0x100, 0x200, 0x300, 0x800, 0x900, 0xa00 };
 
 static const u16 NCT6775_REG_WEIGHT_TEMP_SEL[] = {
-       0x139, 0x239, 0x339, 0x839, 0x939 };
+       0x139, 0x239, 0x339, 0x839, 0x939, 0xa39 };
 static const u16 NCT6775_REG_WEIGHT_TEMP_STEP[] = {
-       0x13a, 0x23a, 0x33a, 0x83a, 0x93a };
+       0x13a, 0x23a, 0x33a, 0x83a, 0x93a, 0xa3a };
 static const u16 NCT6775_REG_WEIGHT_TEMP_STEP_TOL[] = {
-       0x13b, 0x23b, 0x33b, 0x83b, 0x93b };
+       0x13b, 0x23b, 0x33b, 0x83b, 0x93b, 0xa3b };
 static const u16 NCT6775_REG_WEIGHT_DUTY_STEP[] = {
-       0x13c, 0x23c, 0x33c, 0x83c, 0x93c };
+       0x13c, 0x23c, 0x33c, 0x83c, 0x93c, 0xa3c };
 static const u16 NCT6775_REG_WEIGHT_TEMP_BASE[] = {
-       0x13d, 0x23d, 0x33d, 0x83d, 0x93d };
+       0x13d, 0x23d, 0x33d, 0x83d, 0x93d, 0xa3d };
 
 static const u16 NCT6775_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 };
 
 static const u16 NCT6775_REG_AUTO_TEMP[] = {
-       0x121, 0x221, 0x321, 0x821, 0x921 };
+       0x121, 0x221, 0x321, 0x821, 0x921, 0xa21 };
 static const u16 NCT6775_REG_AUTO_PWM[] = {
-       0x127, 0x227, 0x327, 0x827, 0x927 };
+       0x127, 0x227, 0x327, 0x827, 0x927, 0xa27 };
 
 #define NCT6775_AUTO_TEMP(data, nr, p) ((data)->REG_AUTO_TEMP[nr] + (p))
 #define NCT6775_AUTO_PWM(data, nr, p)  ((data)->REG_AUTO_PWM[nr] + (p))
@@ -279,9 +311,9 @@ static const u16 NCT6775_REG_AUTO_PWM[] = {
 static const u16 NCT6775_REG_CRITICAL_ENAB[] = { 0x134, 0x234, 0x334 };
 
 static const u16 NCT6775_REG_CRITICAL_TEMP[] = {
-       0x135, 0x235, 0x335, 0x835, 0x935 };
+       0x135, 0x235, 0x335, 0x835, 0x935, 0xa35 };
 static const u16 NCT6775_REG_CRITICAL_TEMP_TOLERANCE[] = {
-       0x138, 0x238, 0x338, 0x838, 0x938 };
+       0x138, 0x238, 0x338, 0x838, 0x938, 0xa38 };
 
 static const char *const nct6775_temp_label[] = {
        "",
@@ -325,17 +357,28 @@ static const s8 NCT6776_ALARM_BITS[] = {
        4, 5, 13, -1, -1, -1,           /* temp1..temp6 */
        12, 9 };                        /* intrusion0, intrusion1 */
 
+static const u16 NCT6776_REG_BEEP[NUM_REG_BEEP] = { 0xb2, 0xb3, 0xb4, 0xb5 };
+
+static const s8 NCT6776_BEEP_BITS[] = {
+       0, 1, 2, 3, 4, 5, 6, 7,         /* in0.. in7 */
+       8, -1, -1, -1, -1, -1, -1,      /* in8..in14 */
+       24,                             /* global beep enable */
+       25, 26, 27, 28, 29,             /* fan1..fan5 */
+       -1, -1, -1,                     /* unused */
+       16, 17, 18, 19, 20, 21,         /* temp1..temp6 */
+       30, 31 };                       /* intrusion0, intrusion1 */
+
 static const u16 NCT6776_REG_TOLERANCE_H[] = {
-       0x10c, 0x20c, 0x30c, 0x80c, 0x90c };
+       0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c };
 
-static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0 };
-static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0 };
+static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
 
 static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642 };
 static const u16 NCT6776_REG_FAN_PULSES[] = { 0x644, 0x645, 0x646, 0, 0 };
 
 static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
-       0x13e, 0x23e, 0x33e, 0x83e, 0x93e };
+       0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
 
 static const u16 NCT6776_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
        0x18, 0x152, 0x252, 0x628, 0x629, 0x62A };
@@ -390,14 +433,25 @@ static const s8 NCT6779_ALARM_BITS[] = {
        4, 5, 13, -1, -1, -1,           /* temp1..temp6 */
        12, 9 };                        /* intrusion0, intrusion1 */
 
-static const u16 NCT6779_REG_FAN[] = { 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8 };
+static const s8 NCT6779_BEEP_BITS[] = {
+       0, 1, 2, 3, 4, 5, 6, 7,         /* in0.. in7 */
+       8, 9, 10, 11, 12, 13, 14,       /* in8..in14 */
+       24,                             /* global beep enable */
+       25, 26, 27, 28, 29,             /* fan1..fan5 */
+       -1, -1, -1,                     /* unused */
+       16, 17, -1, -1, -1, -1,         /* temp1..temp6 */
+       30, 31 };                       /* intrusion0, intrusion1 */
+
+static const u16 NCT6779_REG_FAN[] = {
+       0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba };
 static const u16 NCT6779_REG_FAN_PULSES[] = {
-       0x644, 0x645, 0x646, 0x647, 0x648 };
+       0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
 
 static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
-       0x136, 0x236, 0x336, 0x836, 0x936 };
+       0x136, 0x236, 0x336, 0x836, 0x936, 0xa36 };
+#define NCT6779_CRITICAL_PWM_ENABLE_MASK       0x01
 static const u16 NCT6779_REG_CRITICAL_PWM[] = {
-       0x137, 0x237, 0x337, 0x837, 0x937 };
+       0x137, 0x237, 0x337, 0x837, 0x937, 0xa37 };
 
 static const u16 NCT6779_REG_TEMP[] = { 0x27, 0x150 };
 static const u16 NCT6779_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6779_REG_TEMP)] = {
@@ -449,6 +503,122 @@ static const u16 NCT6779_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6779_temp_label) - 1]
 static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1]
        = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x709, 0x70a };
 
+/* NCT6791 specific data */
+
+#define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE    0x28
+
+static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = {
+       0x459, 0x45A, 0x45B, 0x568, 0x45D };
+
+static const s8 NCT6791_ALARM_BITS[] = {
+       0, 1, 2, 3, 8, 21, 20, 16,      /* in0.. in7 */
+       17, 24, 25, 26, 27, 28, 29,     /* in8..in14 */
+       -1,                             /* unused */
+       6, 7, 11, 10, 23, 33,           /* fan1..fan6 */
+       -1, -1,                         /* unused */
+       4, 5, 13, -1, -1, -1,           /* temp1..temp6 */
+       12, 9 };                        /* intrusion0, intrusion1 */
+
+
+/* NCT6102D/NCT6106D specific data */
+
+#define NCT6106_REG_VBAT       0x318
+#define NCT6106_REG_DIODE      0x319
+#define NCT6106_DIODE_MASK     0x01
+
+static const u16 NCT6106_REG_IN_MAX[] = {
+       0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9e, 0xa0, 0xa2 };
+static const u16 NCT6106_REG_IN_MIN[] = {
+       0x91, 0x93, 0x95, 0x97, 0x99, 0x9b, 0x9f, 0xa1, 0xa3 };
+static const u16 NCT6106_REG_IN[] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x07, 0x08, 0x09 };
+
+static const u16 NCT6106_REG_TEMP[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15 };
+static const u16 NCT6106_REG_TEMP_HYST[] = {
+       0xc3, 0xc7, 0xcb, 0xcf, 0xd3, 0xd7 };
+static const u16 NCT6106_REG_TEMP_OVER[] = {
+       0xc2, 0xc6, 0xca, 0xce, 0xd2, 0xd6 };
+static const u16 NCT6106_REG_TEMP_CRIT_L[] = {
+       0xc0, 0xc4, 0xc8, 0xcc, 0xd0, 0xd4 };
+static const u16 NCT6106_REG_TEMP_CRIT_H[] = {
+       0xc1, 0xc5, 0xc9, 0xcf, 0xd1, 0xd5 };
+static const u16 NCT6106_REG_TEMP_OFFSET[] = { 0x311, 0x312, 0x313 };
+static const u16 NCT6106_REG_TEMP_CONFIG[] = {
+       0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc };
+
+static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
+static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
+static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 };
+static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 };
+
+static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
+static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
+static const u16 NCT6106_REG_PWM[] = { 0x119, 0x129, 0x139 };
+static const u16 NCT6106_REG_PWM_READ[] = { 0x4a, 0x4b, 0x4c };
+static const u16 NCT6106_REG_FAN_MODE[] = { 0x113, 0x123, 0x133 };
+static const u16 NCT6106_REG_TEMP_SEL[] = { 0x110, 0x120, 0x130 };
+static const u16 NCT6106_REG_TEMP_SOURCE[] = {
+       0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5 };
+
+static const u16 NCT6106_REG_CRITICAL_TEMP[] = { 0x11a, 0x12a, 0x13a };
+static const u16 NCT6106_REG_CRITICAL_TEMP_TOLERANCE[] = {
+       0x11b, 0x12b, 0x13b };
+
+static const u16 NCT6106_REG_CRITICAL_PWM_ENABLE[] = { 0x11c, 0x12c, 0x13c };
+#define NCT6106_CRITICAL_PWM_ENABLE_MASK       0x10
+static const u16 NCT6106_REG_CRITICAL_PWM[] = { 0x11d, 0x12d, 0x13d };
+
+static const u16 NCT6106_REG_FAN_STEP_UP_TIME[] = { 0x114, 0x124, 0x134 };
+static const u16 NCT6106_REG_FAN_STEP_DOWN_TIME[] = { 0x115, 0x125, 0x135 };
+static const u16 NCT6106_REG_FAN_STOP_OUTPUT[] = { 0x116, 0x126, 0x136 };
+static const u16 NCT6106_REG_FAN_START_OUTPUT[] = { 0x117, 0x127, 0x137 };
+static const u16 NCT6106_REG_FAN_STOP_TIME[] = { 0x118, 0x128, 0x138 };
+static const u16 NCT6106_REG_TOLERANCE_H[] = { 0x112, 0x122, 0x132 };
+
+static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
+
+static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
+static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
+static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
+
+static const u16 NCT6106_REG_AUTO_TEMP[] = { 0x160, 0x170, 0x180 };
+static const u16 NCT6106_REG_AUTO_PWM[] = { 0x164, 0x174, 0x184 };
+
+static const u16 NCT6106_REG_ALARM[NUM_REG_ALARM] = {
+       0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d };
+
+static const s8 NCT6106_ALARM_BITS[] = {
+       0, 1, 2, 3, 4, 5, 7, 8,         /* in0.. in7 */
+       9, -1, -1, -1, -1, -1, -1,      /* in8..in14 */
+       -1,                             /* unused */
+       32, 33, 34, -1, -1,             /* fan1..fan5 */
+       -1, -1, -1,                     /* unused */
+       16, 17, 18, 19, 20, 21,         /* temp1..temp6 */
+       48, -1                          /* intrusion0, intrusion1 */
+};
+
+static const u16 NCT6106_REG_BEEP[NUM_REG_BEEP] = {
+       0x3c0, 0x3c1, 0x3c2, 0x3c3, 0x3c4 };
+
+static const s8 NCT6106_BEEP_BITS[] = {
+       0, 1, 2, 3, 4, 5, 7, 8,         /* in0.. in7 */
+       9, 10, 11, 12, -1, -1, -1,      /* in8..in14 */
+       32,                             /* global beep enable */
+       24, 25, 26, 27, 28,             /* fan1..fan5 */
+       -1, -1, -1,                     /* unused */
+       16, 17, 18, 19, 20, 21,         /* temp1..temp6 */
+       34, -1                          /* intrusion0, intrusion1 */
+};
+
+static const u16 NCT6106_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6776_temp_label) - 1]
+       = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x51, 0x52, 0x54 };
+
+static const u16 NCT6106_REG_TEMP_CRIT[ARRAY_SIZE(nct6776_temp_label) - 1]
+       = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x204, 0x205 };
+
 static enum pwm_enable reg_to_pwm_enable(int pwm, int mode)
 {
        if (mode == 0 && pwm == 255)
@@ -550,13 +720,18 @@ static inline u8 in_to_reg(u32 val, u8 nr)
 
 struct nct6775_data {
        int addr;       /* IO base of hw monitor block */
+       int sioreg;     /* SIO register address */
        enum kinds kind;
        const char *name;
 
        struct device *hwmon_dev;
+       struct attribute_group *group_in;
+       struct attribute_group *group_fan;
+       struct attribute_group *group_temp;
+       struct attribute_group *group_pwm;
 
-       u16 reg_temp[4][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
-                                   * 3=temp_crit
+       u16 reg_temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
+                                   * 3=temp_crit, 4=temp_lcrit
                                    */
        u8 temp_src[NUM_TEMP];
        u16 reg_temp_config[NUM_TEMP];
@@ -566,8 +741,10 @@ struct nct6775_data {
        u16 REG_CONFIG;
        u16 REG_VBAT;
        u16 REG_DIODE;
+       u8 DIODE_MASK;
 
        const s8 *ALARM_BITS;
+       const s8 *BEEP_BITS;
 
        const u16 *REG_VIN;
        const u16 *REG_IN_MINMAX[2];
@@ -577,6 +754,7 @@ struct nct6775_data {
        const u16 *REG_FAN_MODE;
        const u16 *REG_FAN_MIN;
        const u16 *REG_FAN_PULSES;
+       const u16 *FAN_PULSE_SHIFT;
        const u16 *REG_FAN_TIME[3];
 
        const u16 *REG_TOLERANCE_H;
@@ -590,6 +768,10 @@ struct nct6775_data {
                                 */
        const u16 *REG_PWM_READ;
 
+       const u16 *REG_CRITICAL_PWM_ENABLE;
+       u8 CRITICAL_PWM_ENABLE_MASK;
+       const u16 *REG_CRITICAL_PWM;
+
        const u16 *REG_AUTO_TEMP;
        const u16 *REG_AUTO_PWM;
 
@@ -604,6 +786,7 @@ struct nct6775_data {
        const u16 *REG_TEMP_OFFSET;
 
        const u16 *REG_ALARM;
+       const u16 *REG_BEEP;
 
        unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
        unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
@@ -616,26 +799,30 @@ struct nct6775_data {
        u8 bank;                /* current register bank */
        u8 in_num;              /* number of in inputs we have */
        u8 in[15][3];           /* [0]=in, [1]=in_max, [2]=in_min */
-       unsigned int rpm[5];
-       u16 fan_min[5];
-       u8 fan_pulses[5];
-       u8 fan_div[5];
+       unsigned int rpm[NUM_FAN];
+       u16 fan_min[NUM_FAN];
+       u8 fan_pulses[NUM_FAN];
+       u8 fan_div[NUM_FAN];
        u8 has_pwm;
        u8 has_fan;             /* some fan inputs can be disabled */
        u8 has_fan_min;         /* some fans don't have min register */
        bool has_fan_div;
 
-       u8 num_temp_alarms;     /* 2 or 3 */
+       u8 num_temp_alarms;     /* 2, 3, or 6 */
+       u8 num_temp_beeps;      /* 2, 3, or 6 */
        u8 temp_fixed_num;      /* 3 or 6 */
        u8 temp_type[NUM_TEMP_FIXED];
        s8 temp_offset[NUM_TEMP_FIXED];
-       s16 temp[4][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
-                               * 3=temp_crit */
+       s16 temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
+                               * 3=temp_crit, 4=temp_lcrit */
        u64 alarms;
+       u64 beeps;
 
        u8 pwm_num;     /* number of pwm */
-       u8 pwm_mode[5]; /* 1->DC variable voltage, 0->PWM variable duty cycle */
-       enum pwm_enable pwm_enable[5];
+       u8 pwm_mode[NUM_FAN];   /* 1->DC variable voltage,
+                                * 0->PWM variable duty cycle
+                                */
+       enum pwm_enable pwm_enable[NUM_FAN];
                        /* 0->off
                         * 1->manual
                         * 2->thermal cruise mode (also called SmartFan I)
@@ -643,35 +830,37 @@ struct nct6775_data {
                         * 4->SmartFan III
                         * 5->enhanced variable thermal cruise (SmartFan IV)
                         */
-       u8 pwm[7][5];   /* [0]=pwm, [1]=pwm_start, [2]=pwm_floor,
-                        * [3]=pwm_max, [4]=pwm_step,
-                        * [5]=weight_duty_step, [6]=weight_duty_base
-                        */
+       u8 pwm[7][NUM_FAN];     /* [0]=pwm, [1]=pwm_start, [2]=pwm_floor,
+                                * [3]=pwm_max, [4]=pwm_step,
+                                * [5]=weight_duty_step, [6]=weight_duty_base
+                                */
 
-       u8 target_temp[5];
+       u8 target_temp[NUM_FAN];
        u8 target_temp_mask;
-       u32 target_speed[5];
-       u32 target_speed_tolerance[5];
+       u32 target_speed[NUM_FAN];
+       u32 target_speed_tolerance[NUM_FAN];
        u8 speed_tolerance_limit;
 
-       u8 temp_tolerance[2][5];
+       u8 temp_tolerance[2][NUM_FAN];
        u8 tolerance_mask;
 
-       u8 fan_time[3][5]; /* 0 = stop_time, 1 = step_up, 2 = step_down */
+       u8 fan_time[3][NUM_FAN]; /* 0 = stop_time, 1 = step_up, 2 = step_down */
 
        /* Automatic fan speed control registers */
        int auto_pwm_num;
-       u8 auto_pwm[5][7];
-       u8 auto_temp[5][7];
-       u8 pwm_temp_sel[5];
-       u8 pwm_weight_temp_sel[5];
-       u8 weight_temp[3][5];   /* 0->temp_step, 1->temp_step_tol,
-                                * 2->temp_base
-                                */
+       u8 auto_pwm[NUM_FAN][7];
+       u8 auto_temp[NUM_FAN][7];
+       u8 pwm_temp_sel[NUM_FAN];
+       u8 pwm_weight_temp_sel[NUM_FAN];
+       u8 weight_temp[3][NUM_FAN];     /* 0->temp_step, 1->temp_step_tol,
+                                        * 2->temp_base
+                                        */
 
        u8 vid;
        u8 vrm;
 
+       bool have_vid;
+
        u16 have_temp;
        u16 have_temp_fixed;
        u16 have_in;
@@ -688,9 +877,145 @@ struct nct6775_sio_data {
        enum kinds kind;
 };
 
+struct sensor_device_template {
+       struct device_attribute dev_attr;
+       union {
+               struct {
+                       u8 nr;
+                       u8 index;
+               } s;
+               int index;
+       } u;
+       bool s2;        /* true if both index and nr are used */
+};
+
+struct sensor_device_attr_u {
+       union {
+               struct sensor_device_attribute a1;
+               struct sensor_device_attribute_2 a2;
+       } u;
+       char name[32];
+};
+
+#define __TEMPLATE_ATTR(_template, _mode, _show, _store) {     \
+       .attr = {.name = _template, .mode = _mode },            \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+}
+
+#define SENSOR_DEVICE_TEMPLATE(_template, _mode, _show, _store, _index)        \
+       { .dev_attr = __TEMPLATE_ATTR(_template, _mode, _show, _store), \
+         .u.index = _index,                                            \
+         .s2 = false }
+
+#define SENSOR_DEVICE_TEMPLATE_2(_template, _mode, _show, _store,      \
+                                _nr, _index)                           \
+       { .dev_attr = __TEMPLATE_ATTR(_template, _mode, _show, _store), \
+         .u.s.index = _index,                                          \
+         .u.s.nr = _nr,                                                \
+         .s2 = true }
+
+#define SENSOR_TEMPLATE(_name, _template, _mode, _show, _store, _index)        \
+static struct sensor_device_template sensor_dev_template_##_name       \
+       = SENSOR_DEVICE_TEMPLATE(_template, _mode, _show, _store,       \
+                                _index)
+
+#define SENSOR_TEMPLATE_2(_name, _template, _mode, _show, _store,      \
+                         _nr, _index)                                  \
+static struct sensor_device_template sensor_dev_template_##_name       \
+       = SENSOR_DEVICE_TEMPLATE_2(_template, _mode, _show, _store,     \
+                                _nr, _index)
+
+struct sensor_template_group {
+       struct sensor_device_template **templates;
+       umode_t (*is_visible)(struct kobject *, struct attribute *, int);
+       int base;
+};
+
+static struct attribute_group *
+nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
+                         int repeat)
+{
+       struct attribute_group *group;
+       struct sensor_device_attr_u *su;
+       struct sensor_device_attribute *a;
+       struct sensor_device_attribute_2 *a2;
+       struct attribute **attrs;
+       struct sensor_device_template **t;
+       int err, i, j, count;
+
+       if (repeat <= 0)
+               return ERR_PTR(-EINVAL);
+
+       t = tg->templates;
+       for (count = 0; *t; t++, count++)
+               ;
+
+       if (count == 0)
+               return ERR_PTR(-EINVAL);
+
+       group = devm_kzalloc(dev, sizeof(*group), GFP_KERNEL);
+       if (group == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       attrs = devm_kzalloc(dev, sizeof(*attrs) * (repeat * count + 1),
+                            GFP_KERNEL);
+       if (attrs == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       su = devm_kzalloc(dev, sizeof(*su) * repeat * count,
+                              GFP_KERNEL);
+       if (su == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       group->attrs = attrs;
+       group->is_visible = tg->is_visible;
+
+       for (i = 0; i < repeat; i++) {
+               t = tg->templates;
+               for (j = 0; *t != NULL; j++) {
+                       snprintf(su->name, sizeof(su->name),
+                                (*t)->dev_attr.attr.name, tg->base + i);
+                       if ((*t)->s2) {
+                               a2 = &su->u.a2;
+                               a2->dev_attr.attr.name = su->name;
+                               a2->nr = (*t)->u.s.nr + i;
+                               a2->index = (*t)->u.s.index;
+                               a2->dev_attr.attr.mode =
+                                 (*t)->dev_attr.attr.mode;
+                               a2->dev_attr.show = (*t)->dev_attr.show;
+                               a2->dev_attr.store = (*t)->dev_attr.store;
+                               *attrs = &a2->dev_attr.attr;
+                       } else {
+                               a = &su->u.a1;
+                               a->dev_attr.attr.name = su->name;
+                               a->index = (*t)->u.index + i;
+                               a->dev_attr.attr.mode =
+                                 (*t)->dev_attr.attr.mode;
+                               a->dev_attr.show = (*t)->dev_attr.show;
+                               a->dev_attr.store = (*t)->dev_attr.store;
+                               *attrs = &a->dev_attr.attr;
+                       }
+                       attrs++;
+                       su++;
+                       t++;
+               }
+       }
+
+       err = sysfs_create_group(&dev->kobj, group);
+       if (err)
+               return ERR_PTR(-ENOMEM);
+
+       return group;
+}
+
 static bool is_word_sized(struct nct6775_data *data, u16 reg)
 {
        switch (data->kind) {
+       case nct6106:
+               return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
+                 reg == 0xe0 || reg == 0xe2 || reg == 0xe4 ||
+                 reg == 0x111 || reg == 0x121 || reg == 0x131;
        case nct6775:
                return (((reg & 0xff00) == 0x100 ||
                    (reg & 0xff00) == 0x200) &&
@@ -714,8 +1039,9 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
                  ((reg & 0xfff0) == 0x650 && (reg & 0x000f) >= 0x06) ||
                  reg == 0x73 || reg == 0x75 || reg == 0x77;
        case nct6779:
+       case nct6791:
                return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
-                 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x09) ||
+                 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
                  reg == 0x402 ||
                  reg == 0x63a || reg == 0x63c || reg == 0x63e ||
                  reg == 0x640 || reg == 0x642 ||
@@ -1056,15 +1382,17 @@ static void nct6775_update_pwm_limits(struct device *dev)
                case nct6776:
                        data->auto_pwm[i][data->auto_pwm_num] = 0xff;
                        break;
+               case nct6106:
                case nct6779:
+               case nct6791:
                        reg = nct6775_read_value(data,
-                                       NCT6779_REG_CRITICAL_PWM_ENABLE[i]);
-                       if (reg & 1)
-                               data->auto_pwm[i][data->auto_pwm_num] =
-                                 nct6775_read_value(data,
-                                       NCT6779_REG_CRITICAL_PWM[i]);
+                                       data->REG_CRITICAL_PWM_ENABLE[i]);
+                       if (reg & data->CRITICAL_PWM_ENABLE_MASK)
+                               reg = nct6775_read_value(data,
+                                       data->REG_CRITICAL_PWM[i]);
                        else
-                               data->auto_pwm[i][data->auto_pwm_num] = 0xff;
+                               reg = 0xff;
+                       data->auto_pwm[i][data->auto_pwm_num] = reg;
                        break;
                }
        }
@@ -1110,7 +1438,8 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
                                data->fan_min[i] = nct6775_read_value(data,
                                           data->REG_FAN_MIN[i]);
                        data->fan_pulses[i] =
-                         nct6775_read_value(data, data->REG_FAN_PULSES[i]);
+                         (nct6775_read_value(data, data->REG_FAN_PULSES[i])
+                               >> data->FAN_PULSE_SHIFT[i]) & 0x03;
 
                        nct6775_select_fan_div(dev, data, i, reg);
                }
@@ -1143,6 +1472,15 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
                        data->alarms |= ((u64)alarm) << (i << 3);
                }
 
+               data->beeps = 0;
+               for (i = 0; i < NUM_REG_BEEP; i++) {
+                       u8 beep;
+                       if (!data->REG_BEEP[i])
+                               continue;
+                       beep = nct6775_read_value(data, data->REG_BEEP[i]);
+                       data->beeps |= ((u64)beep) << (i << 3);
+               }
+
                data->last_updated = jiffies;
                data->valid = true;
        }
@@ -1230,224 +1568,138 @@ show_temp_alarm(struct device *dev, struct device_attribute *attr, char *buf)
        return sprintf(buf, "%u\n", alarm);
 }
 
-static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in_reg, NULL, 0, 0);
-static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in_reg, NULL, 1, 0);
-static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in_reg, NULL, 2, 0);
-static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in_reg, NULL, 3, 0);
-static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in_reg, NULL, 4, 0);
-static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_in_reg, NULL, 5, 0);
-static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO, show_in_reg, NULL, 6, 0);
-static SENSOR_DEVICE_ATTR_2(in7_input, S_IRUGO, show_in_reg, NULL, 7, 0);
-static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in_reg, NULL, 8, 0);
-static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in_reg, NULL, 9, 0);
-static SENSOR_DEVICE_ATTR_2(in10_input, S_IRUGO, show_in_reg, NULL, 10, 0);
-static SENSOR_DEVICE_ATTR_2(in11_input, S_IRUGO, show_in_reg, NULL, 11, 0);
-static SENSOR_DEVICE_ATTR_2(in12_input, S_IRUGO, show_in_reg, NULL, 12, 0);
-static SENSOR_DEVICE_ATTR_2(in13_input, S_IRUGO, show_in_reg, NULL, 13, 0);
-static SENSOR_DEVICE_ATTR_2(in14_input, S_IRUGO, show_in_reg, NULL, 14, 0);
-
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(in14_alarm, S_IRUGO, show_alarm, NULL, 14);
-
-static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 0, 1);
-static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 1, 1);
-static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 2, 1);
-static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 3, 1);
-static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 4, 1);
-static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 5, 1);
-static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 6, 1);
-static SENSOR_DEVICE_ATTR_2(in7_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 7, 1);
-static SENSOR_DEVICE_ATTR_2(in8_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 8, 1);
-static SENSOR_DEVICE_ATTR_2(in9_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 9, 1);
-static SENSOR_DEVICE_ATTR_2(in10_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 10, 1);
-static SENSOR_DEVICE_ATTR_2(in11_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 11, 1);
-static SENSOR_DEVICE_ATTR_2(in12_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 12, 1);
-static SENSOR_DEVICE_ATTR_2(in13_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 13, 1);
-static SENSOR_DEVICE_ATTR_2(in14_min, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 14, 1);
-
-static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 0, 2);
-static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 1, 2);
-static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 2, 2);
-static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 3, 2);
-static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 4, 2);
-static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 5, 2);
-static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 6, 2);
-static SENSOR_DEVICE_ATTR_2(in7_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 7, 2);
-static SENSOR_DEVICE_ATTR_2(in8_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 8, 2);
-static SENSOR_DEVICE_ATTR_2(in9_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 9, 2);
-static SENSOR_DEVICE_ATTR_2(in10_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 10, 2);
-static SENSOR_DEVICE_ATTR_2(in11_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 11, 2);
-static SENSOR_DEVICE_ATTR_2(in12_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 12, 2);
-static SENSOR_DEVICE_ATTR_2(in13_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 13, 2);
-static SENSOR_DEVICE_ATTR_2(in14_max, S_IWUSR | S_IRUGO, show_in_reg,
-                           store_in_reg, 14, 2);
-
-static struct attribute *nct6775_attributes_in[15][5] = {
-       {
-               &sensor_dev_attr_in0_input.dev_attr.attr,
-               &sensor_dev_attr_in0_min.dev_attr.attr,
-               &sensor_dev_attr_in0_max.dev_attr.attr,
-               &sensor_dev_attr_in0_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in1_input.dev_attr.attr,
-               &sensor_dev_attr_in1_min.dev_attr.attr,
-               &sensor_dev_attr_in1_max.dev_attr.attr,
-               &sensor_dev_attr_in1_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in2_input.dev_attr.attr,
-               &sensor_dev_attr_in2_min.dev_attr.attr,
-               &sensor_dev_attr_in2_max.dev_attr.attr,
-               &sensor_dev_attr_in2_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in3_input.dev_attr.attr,
-               &sensor_dev_attr_in3_min.dev_attr.attr,
-               &sensor_dev_attr_in3_max.dev_attr.attr,
-               &sensor_dev_attr_in3_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in4_input.dev_attr.attr,
-               &sensor_dev_attr_in4_min.dev_attr.attr,
-               &sensor_dev_attr_in4_max.dev_attr.attr,
-               &sensor_dev_attr_in4_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in5_input.dev_attr.attr,
-               &sensor_dev_attr_in5_min.dev_attr.attr,
-               &sensor_dev_attr_in5_max.dev_attr.attr,
-               &sensor_dev_attr_in5_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in6_input.dev_attr.attr,
-               &sensor_dev_attr_in6_min.dev_attr.attr,
-               &sensor_dev_attr_in6_max.dev_attr.attr,
-               &sensor_dev_attr_in6_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in7_input.dev_attr.attr,
-               &sensor_dev_attr_in7_min.dev_attr.attr,
-               &sensor_dev_attr_in7_max.dev_attr.attr,
-               &sensor_dev_attr_in7_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in8_input.dev_attr.attr,
-               &sensor_dev_attr_in8_min.dev_attr.attr,
-               &sensor_dev_attr_in8_max.dev_attr.attr,
-               &sensor_dev_attr_in8_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in9_input.dev_attr.attr,
-               &sensor_dev_attr_in9_min.dev_attr.attr,
-               &sensor_dev_attr_in9_max.dev_attr.attr,
-               &sensor_dev_attr_in9_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in10_input.dev_attr.attr,
-               &sensor_dev_attr_in10_min.dev_attr.attr,
-               &sensor_dev_attr_in10_max.dev_attr.attr,
-               &sensor_dev_attr_in10_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in11_input.dev_attr.attr,
-               &sensor_dev_attr_in11_min.dev_attr.attr,
-               &sensor_dev_attr_in11_max.dev_attr.attr,
-               &sensor_dev_attr_in11_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in12_input.dev_attr.attr,
-               &sensor_dev_attr_in12_min.dev_attr.attr,
-               &sensor_dev_attr_in12_max.dev_attr.attr,
-               &sensor_dev_attr_in12_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in13_input.dev_attr.attr,
-               &sensor_dev_attr_in13_min.dev_attr.attr,
-               &sensor_dev_attr_in13_max.dev_attr.attr,
-               &sensor_dev_attr_in13_alarm.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_in14_input.dev_attr.attr,
-               &sensor_dev_attr_in14_min.dev_attr.attr,
-               &sensor_dev_attr_in14_max.dev_attr.attr,
-               &sensor_dev_attr_in14_alarm.dev_attr.attr,
-               NULL
-       },
+static ssize_t
+show_beep(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+       struct nct6775_data *data = nct6775_update_device(dev);
+       int nr = data->BEEP_BITS[sattr->index];
+
+       return sprintf(buf, "%u\n",
+                      (unsigned int)((data->beeps >> nr) & 0x01));
+}
+
+static ssize_t
+store_beep(struct device *dev, struct device_attribute *attr, const char *buf,
+          size_t count)
+{
+       struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+       struct nct6775_data *data = dev_get_drvdata(dev);
+       int nr = data->BEEP_BITS[sattr->index];
+       int regindex = nr >> 3;
+       unsigned long val;
+
+       int err = kstrtoul(buf, 10, &val);
+       if (err < 0)
+               return err;
+       if (val > 1)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+       if (val)
+               data->beeps |= (1ULL << nr);
+       else
+               data->beeps &= ~(1ULL << nr);
+       nct6775_write_value(data, data->REG_BEEP[regindex],
+                           (data->beeps >> (regindex << 3)) & 0xff);
+       mutex_unlock(&data->update_lock);
+       return count;
+}
+
+static ssize_t
+show_temp_beep(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+       struct nct6775_data *data = nct6775_update_device(dev);
+       unsigned int beep = 0;
+       int nr;
+
+       /*
+        * For temperatures, there is no fixed mapping from registers to beep
+        * enable bits. Beep enable bits are determined by the temperature
+        * source mapping.
+        */
+       nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
+       if (nr >= 0) {
+               int bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
+               beep = (data->beeps >> bit) & 0x01;
+       }
+       return sprintf(buf, "%u\n", beep);
+}
+
+static ssize_t
+store_temp_beep(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+       struct nct6775_data *data = dev_get_drvdata(dev);
+       int nr, bit, regindex;
+       unsigned long val;
+
+       int err = kstrtoul(buf, 10, &val);
+       if (err < 0)
+               return err;
+       if (val > 1)
+               return -EINVAL;
+
+       nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
+       if (nr < 0)
+               return -ENODEV;
+
+       bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
+       regindex = bit >> 3;
+
+       mutex_lock(&data->update_lock);
+       if (val)
+               data->beeps |= (1ULL << bit);
+       else
+               data->beeps &= ~(1ULL << bit);
+       nct6775_write_value(data, data->REG_BEEP[regindex],
+                           (data->beeps >> (regindex << 3)) & 0xff);
+       mutex_unlock(&data->update_lock);
+
+       return count;
+}
+
+static umode_t nct6775_in_is_visible(struct kobject *kobj,
+                                    struct attribute *attr, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct nct6775_data *data = dev_get_drvdata(dev);
+       int in = index / 5;     /* voltage index */
+
+       if (!(data->have_in & (1 << in)))
+               return 0;
+
+       return attr->mode;
+}
+
+SENSOR_TEMPLATE_2(in_input, "in%d_input", S_IRUGO, show_in_reg, NULL, 0, 0);
+SENSOR_TEMPLATE(in_alarm, "in%d_alarm", S_IRUGO, show_alarm, NULL, 0);
+SENSOR_TEMPLATE(in_beep, "in%d_beep", S_IWUSR | S_IRUGO, show_beep, store_beep,
+               0);
+SENSOR_TEMPLATE_2(in_min, "in%d_min", S_IWUSR | S_IRUGO, show_in_reg,
+                 store_in_reg, 0, 1);
+SENSOR_TEMPLATE_2(in_max, "in%d_max", S_IWUSR | S_IRUGO, show_in_reg,
+                 store_in_reg, 0, 2);
+
+/*
+ * nct6775_in_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
+ */
+static struct sensor_device_template *nct6775_attributes_in_template[] = {
+       &sensor_dev_template_in_input,
+       &sensor_dev_template_in_alarm,
+       &sensor_dev_template_in_beep,
+       &sensor_dev_template_in_min,
+       &sensor_dev_template_in_max,
+       NULL
 };
 
-static const struct attribute_group nct6775_group_in[15] = {
-       { .attrs = nct6775_attributes_in[0] },
-       { .attrs = nct6775_attributes_in[1] },
-       { .attrs = nct6775_attributes_in[2] },
-       { .attrs = nct6775_attributes_in[3] },
-       { .attrs = nct6775_attributes_in[4] },
-       { .attrs = nct6775_attributes_in[5] },
-       { .attrs = nct6775_attributes_in[6] },
-       { .attrs = nct6775_attributes_in[7] },
-       { .attrs = nct6775_attributes_in[8] },
-       { .attrs = nct6775_attributes_in[9] },
-       { .attrs = nct6775_attributes_in[10] },
-       { .attrs = nct6775_attributes_in[11] },
-       { .attrs = nct6775_attributes_in[12] },
-       { .attrs = nct6775_attributes_in[13] },
-       { .attrs = nct6775_attributes_in[14] },
+static struct sensor_template_group nct6775_in_template_group = {
+       .templates = nct6775_attributes_in_template,
+       .is_visible = nct6775_in_is_visible,
 };
 
 static ssize_t
@@ -1592,6 +1844,7 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr,
        int nr = sattr->index;
        unsigned long val;
        int err;
+       u8 reg;
 
        err = kstrtoul(buf, 10, &val);
        if (err < 0)
@@ -1602,60 +1855,68 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->fan_pulses[nr] = val & 3;
-       nct6775_write_value(data, data->REG_FAN_PULSES[nr], val & 3);
+       reg = nct6775_read_value(data, data->REG_FAN_PULSES[nr]);
+       reg &= ~(0x03 << data->FAN_PULSE_SHIFT[nr]);
+       reg |= (val & 3) << data->FAN_PULSE_SHIFT[nr];
+       nct6775_write_value(data, data->REG_FAN_PULSES[nr], reg);
        mutex_unlock(&data->update_lock);
 
        return count;
 }
 
-static struct sensor_device_attribute sda_fan_input[] = {
-       SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
-       SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
-       SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
-       SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
-       SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
-};
+static umode_t nct6775_fan_is_visible(struct kobject *kobj,
+                                     struct attribute *attr, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct nct6775_data *data = dev_get_drvdata(dev);
+       int fan = index / 6;    /* fan index */
+       int nr = index % 6;     /* attribute index */
 
-static struct sensor_device_attribute sda_fan_alarm[] = {
-       SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE),
-       SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE + 1),
-       SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE + 2),
-       SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE + 3),
-       SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE + 4),
-};
+       if (!(data->has_fan & (1 << fan)))
+               return 0;
 
-static struct sensor_device_attribute sda_fan_min[] = {
-       SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 0),
-       SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 1),
-       SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 2),
-       SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 3),
-       SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
-                   store_fan_min, 4),
-};
+       if (nr == 1 && data->ALARM_BITS[FAN_ALARM_BASE + fan] == -1)
+               return 0;
+       if (nr == 2 && data->BEEP_BITS[FAN_ALARM_BASE + fan] == -1)
+               return 0;
+       if (nr == 4 && !(data->has_fan_min & (1 << fan)))
+               return 0;
+       if (nr == 5 && data->kind != nct6775)
+               return 0;
+
+       return attr->mode;
+}
 
-static struct sensor_device_attribute sda_fan_pulses[] = {
-       SENSOR_ATTR(fan1_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
-                   store_fan_pulses, 0),
-       SENSOR_ATTR(fan2_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
-                   store_fan_pulses, 1),
-       SENSOR_ATTR(fan3_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
-                   store_fan_pulses, 2),
-       SENSOR_ATTR(fan4_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
-                   store_fan_pulses, 3),
-       SENSOR_ATTR(fan5_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
-                   store_fan_pulses, 4),
+SENSOR_TEMPLATE(fan_input, "fan%d_input", S_IRUGO, show_fan, NULL, 0);
+SENSOR_TEMPLATE(fan_alarm, "fan%d_alarm", S_IRUGO, show_alarm, NULL,
+               FAN_ALARM_BASE);
+SENSOR_TEMPLATE(fan_beep, "fan%d_beep", S_IWUSR | S_IRUGO, show_beep,
+               store_beep, FAN_ALARM_BASE);
+SENSOR_TEMPLATE(fan_pulses, "fan%d_pulses", S_IWUSR | S_IRUGO, show_fan_pulses,
+               store_fan_pulses, 0);
+SENSOR_TEMPLATE(fan_min, "fan%d_min", S_IWUSR | S_IRUGO, show_fan_min,
+               store_fan_min, 0);
+SENSOR_TEMPLATE(fan_div, "fan%d_div", S_IRUGO, show_fan_div, NULL, 0);
+
+/*
+ * nct6775_fan_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
+ */
+static struct sensor_device_template *nct6775_attributes_fan_template[] = {
+       &sensor_dev_template_fan_input,
+       &sensor_dev_template_fan_alarm, /* 1 */
+       &sensor_dev_template_fan_beep,  /* 2 */
+       &sensor_dev_template_fan_pulses,
+       &sensor_dev_template_fan_min,   /* 4 */
+       &sensor_dev_template_fan_div,   /* 5 */
+       NULL
 };
 
-static struct sensor_device_attribute sda_fan_div[] = {
-       SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
-       SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
-       SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
-       SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3),
-       SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
+static struct sensor_template_group nct6775_fan_template_group = {
+       .templates = nct6775_attributes_fan_template,
+       .is_visible = nct6775_fan_is_visible,
+       .base = 1,
 };
 
 static ssize_t
@@ -1752,7 +2013,7 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
        int nr = sattr->index;
        unsigned long val;
        int err;
-       u8 vbat, diode, bit;
+       u8 vbat, diode, vbit, dbit;
 
        err = kstrtoul(buf, 10, &val);
        if (err < 0)
@@ -1764,16 +2025,17 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
        mutex_lock(&data->update_lock);
 
        data->temp_type[nr] = val;
-       vbat = nct6775_read_value(data, data->REG_VBAT) & ~(0x02 << nr);
-       diode = nct6775_read_value(data, data->REG_DIODE) & ~(0x02 << nr);
-       bit = 0x02 << nr;
+       vbit = 0x02 << nr;
+       dbit = data->DIODE_MASK << nr;
+       vbat = nct6775_read_value(data, data->REG_VBAT) & ~vbit;
+       diode = nct6775_read_value(data, data->REG_DIODE) & ~dbit;
        switch (val) {
        case 1: /* CPU diode (diode, current mode) */
-               vbat |= bit;
-               diode |= bit;
+               vbat |= vbit;
+               diode |= dbit;
                break;
        case 3: /* diode, voltage mode */
-               vbat |= bit;
+               vbat |= dbit;
                break;
        case 4: /* thermistor */
                break;
@@ -1785,142 +2047,83 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static struct sensor_device_attribute_2 sda_temp_input[] = {
-       SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
-       SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 1, 0),
-       SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 2, 0),
-       SENSOR_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 3, 0),
-       SENSOR_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 4, 0),
-       SENSOR_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 5, 0),
-       SENSOR_ATTR_2(temp7_input, S_IRUGO, show_temp, NULL, 6, 0),
-       SENSOR_ATTR_2(temp8_input, S_IRUGO, show_temp, NULL, 7, 0),
-       SENSOR_ATTR_2(temp9_input, S_IRUGO, show_temp, NULL, 8, 0),
-       SENSOR_ATTR_2(temp10_input, S_IRUGO, show_temp, NULL, 9, 0),
-};
+static umode_t nct6775_temp_is_visible(struct kobject *kobj,
+                                      struct attribute *attr, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct nct6775_data *data = dev_get_drvdata(dev);
+       int temp = index / 10;  /* temp index */
+       int nr = index % 10;    /* attribute index */
 
-static struct sensor_device_attribute sda_temp_label[] = {
-       SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
-       SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
-       SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
-       SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
-       SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
-       SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
-       SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
-       SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
-       SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
-       SENSOR_ATTR(temp10_label, S_IRUGO, show_temp_label, NULL, 9),
-};
+       if (!(data->have_temp & (1 << temp)))
+               return 0;
 
-static struct sensor_device_attribute_2 sda_temp_max[] = {
-       SENSOR_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     0, 1),
-       SENSOR_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     1, 1),
-       SENSOR_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     2, 1),
-       SENSOR_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     3, 1),
-       SENSOR_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     4, 1),
-       SENSOR_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     5, 1),
-       SENSOR_ATTR_2(temp7_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     6, 1),
-       SENSOR_ATTR_2(temp8_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     7, 1),
-       SENSOR_ATTR_2(temp9_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     8, 1),
-       SENSOR_ATTR_2(temp10_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     9, 1),
-};
+       if (nr == 2 && find_temp_source(data, temp, data->num_temp_alarms) < 0)
+               return 0;                               /* alarm */
 
-static struct sensor_device_attribute_2 sda_temp_max_hyst[] = {
-       SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     0, 2),
-       SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     1, 2),
-       SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     2, 2),
-       SENSOR_ATTR_2(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     3, 2),
-       SENSOR_ATTR_2(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     4, 2),
-       SENSOR_ATTR_2(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     5, 2),
-       SENSOR_ATTR_2(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     6, 2),
-       SENSOR_ATTR_2(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     7, 2),
-       SENSOR_ATTR_2(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     8, 2),
-       SENSOR_ATTR_2(temp10_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     9, 2),
-};
+       if (nr == 3 && find_temp_source(data, temp, data->num_temp_beeps) < 0)
+               return 0;                               /* beep */
 
-static struct sensor_device_attribute_2 sda_temp_crit[] = {
-       SENSOR_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     0, 3),
-       SENSOR_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     1, 3),
-       SENSOR_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     2, 3),
-       SENSOR_ATTR_2(temp4_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     3, 3),
-       SENSOR_ATTR_2(temp5_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     4, 3),
-       SENSOR_ATTR_2(temp6_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     5, 3),
-       SENSOR_ATTR_2(temp7_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     6, 3),
-       SENSOR_ATTR_2(temp8_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     7, 3),
-       SENSOR_ATTR_2(temp9_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     8, 3),
-       SENSOR_ATTR_2(temp10_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
-                     9, 3),
-};
+       if (nr == 4 && !data->reg_temp[1][temp])        /* max */
+               return 0;
 
-static struct sensor_device_attribute sda_temp_offset[] = {
-       SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 0),
-       SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 1),
-       SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 2),
-       SENSOR_ATTR(temp4_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 3),
-       SENSOR_ATTR(temp5_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 4),
-       SENSOR_ATTR(temp6_offset, S_IRUGO | S_IWUSR, show_temp_offset,
-                   store_temp_offset, 5),
-};
+       if (nr == 5 && !data->reg_temp[2][temp])        /* max_hyst */
+               return 0;
+
+       if (nr == 6 && !data->reg_temp[3][temp])        /* crit */
+               return 0;
+
+       if (nr == 7 && !data->reg_temp[4][temp])        /* lcrit */
+               return 0;
+
+       /* offset and type only apply to fixed sensors */
+       if (nr > 7 && !(data->have_temp_fixed & (1 << temp)))
+               return 0;
 
-static struct sensor_device_attribute sda_temp_type[] = {
-       SENSOR_ATTR(temp1_type, S_IRUGO | S_IWUSR, show_temp_type,
-                   store_temp_type, 0),
-       SENSOR_ATTR(temp2_type, S_IRUGO | S_IWUSR, show_temp_type,
-                   store_temp_type, 1),
-       SENSOR_ATTR(temp3_type, S_IRUGO | S_IWUSR, show_temp_type,
-                   store_temp_type, 2),
-       SENSOR_ATTR(temp4_type, S_IRUGO | S_IWUSR, show_temp_type,
-                   store_temp_type, 3),
-       SENSOR_ATTR(temp5_type, S_IRUGO | S_IWUSR, show_temp_type,
-                   store_temp_type, 4),
-       SENSOR_ATTR(temp6_type, S_IRUGO | S_IWUSR, show_temp_type,
-                   store_temp_type, 5),
+       return attr->mode;
+}
+
+SENSOR_TEMPLATE_2(temp_input, "temp%d_input", S_IRUGO, show_temp, NULL, 0, 0);
+SENSOR_TEMPLATE(temp_label, "temp%d_label", S_IRUGO, show_temp_label, NULL, 0);
+SENSOR_TEMPLATE_2(temp_max, "temp%d_max", S_IRUGO | S_IWUSR, show_temp,
+                 store_temp, 0, 1);
+SENSOR_TEMPLATE_2(temp_max_hyst, "temp%d_max_hyst", S_IRUGO | S_IWUSR,
+                 show_temp, store_temp, 0, 2);
+SENSOR_TEMPLATE_2(temp_crit, "temp%d_crit", S_IRUGO | S_IWUSR, show_temp,
+                 store_temp, 0, 3);
+SENSOR_TEMPLATE_2(temp_lcrit, "temp%d_lcrit", S_IRUGO | S_IWUSR, show_temp,
+                 store_temp, 0, 4);
+SENSOR_TEMPLATE(temp_offset, "temp%d_offset", S_IRUGO | S_IWUSR,
+               show_temp_offset, store_temp_offset, 0);
+SENSOR_TEMPLATE(temp_type, "temp%d_type", S_IRUGO | S_IWUSR, show_temp_type,
+               store_temp_type, 0);
+SENSOR_TEMPLATE(temp_alarm, "temp%d_alarm", S_IRUGO, show_temp_alarm, NULL, 0);
+SENSOR_TEMPLATE(temp_beep, "temp%d_beep", S_IRUGO | S_IWUSR, show_temp_beep,
+               store_temp_beep, 0);
+
+/*
+ * nct6775_temp_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
+ */
+static struct sensor_device_template *nct6775_attributes_temp_template[] = {
+       &sensor_dev_template_temp_input,
+       &sensor_dev_template_temp_label,
+       &sensor_dev_template_temp_alarm,        /* 2 */
+       &sensor_dev_template_temp_beep,         /* 3 */
+       &sensor_dev_template_temp_max,          /* 4 */
+       &sensor_dev_template_temp_max_hyst,     /* 5 */
+       &sensor_dev_template_temp_crit,         /* 6 */
+       &sensor_dev_template_temp_lcrit,        /* 7 */
+       &sensor_dev_template_temp_offset,       /* 8 */
+       &sensor_dev_template_temp_type,         /* 9 */
+       NULL
 };
 
-static struct sensor_device_attribute sda_temp_alarm[] = {
-       SENSOR_ATTR(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0),
-       SENSOR_ATTR(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 1),
-       SENSOR_ATTR(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 2),
-       SENSOR_ATTR(temp4_alarm, S_IRUGO, show_temp_alarm, NULL, 3),
-       SENSOR_ATTR(temp5_alarm, S_IRUGO, show_temp_alarm, NULL, 4),
-       SENSOR_ATTR(temp6_alarm, S_IRUGO, show_temp_alarm, NULL, 5),
-       SENSOR_ATTR(temp7_alarm, S_IRUGO, show_temp_alarm, NULL, 6),
-       SENSOR_ATTR(temp8_alarm, S_IRUGO, show_temp_alarm, NULL, 7),
-       SENSOR_ATTR(temp9_alarm, S_IRUGO, show_temp_alarm, NULL, 8),
-       SENSOR_ATTR(temp10_alarm, S_IRUGO, show_temp_alarm, NULL, 9),
+static struct sensor_template_group nct6775_temp_template_group = {
+       .templates = nct6775_attributes_temp_template,
+       .is_visible = nct6775_temp_is_visible,
+       .base = 1,
 };
 
 static ssize_t
@@ -2422,77 +2625,19 @@ store_speed_tolerance(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static SENSOR_DEVICE_ATTR_2(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 0);
-static SENSOR_DEVICE_ATTR_2(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1, 0);
-static SENSOR_DEVICE_ATTR_2(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2, 0);
-static SENSOR_DEVICE_ATTR_2(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3, 0);
-static SENSOR_DEVICE_ATTR_2(pwm5, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 4, 0);
-
-static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                         store_pwm_mode, 0);
-static SENSOR_DEVICE_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                         store_pwm_mode, 1);
-static SENSOR_DEVICE_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                         store_pwm_mode, 2);
-static SENSOR_DEVICE_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                         store_pwm_mode, 3);
-static SENSOR_DEVICE_ATTR(pwm5_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
-                         store_pwm_mode, 4);
-
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                         store_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                         store_pwm_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                         store_pwm_enable, 2);
-static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                         store_pwm_enable, 3);
-static SENSOR_DEVICE_ATTR(pwm5_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
-                         store_pwm_enable, 4);
-
-static SENSOR_DEVICE_ATTR(pwm1_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_temp_sel, store_pwm_temp_sel, 0);
-static SENSOR_DEVICE_ATTR(pwm2_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_temp_sel, store_pwm_temp_sel, 1);
-static SENSOR_DEVICE_ATTR(pwm3_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_temp_sel, store_pwm_temp_sel, 2);
-static SENSOR_DEVICE_ATTR(pwm4_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_temp_sel, store_pwm_temp_sel, 3);
-static SENSOR_DEVICE_ATTR(pwm5_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_temp_sel, store_pwm_temp_sel, 4);
-
-static SENSOR_DEVICE_ATTR(pwm1_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
-                         store_target_temp, 0);
-static SENSOR_DEVICE_ATTR(pwm2_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
-                         store_target_temp, 1);
-static SENSOR_DEVICE_ATTR(pwm3_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
-                         store_target_temp, 2);
-static SENSOR_DEVICE_ATTR(pwm4_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
-                         store_target_temp, 3);
-static SENSOR_DEVICE_ATTR(pwm5_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
-                         store_target_temp, 4);
-
-static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, show_target_speed,
-                         store_target_speed, 0);
-static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO, show_target_speed,
-                         store_target_speed, 1);
-static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO, show_target_speed,
-                         store_target_speed, 2);
-static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO, show_target_speed,
-                         store_target_speed, 3);
-static SENSOR_DEVICE_ATTR(fan5_target, S_IWUSR | S_IRUGO, show_target_speed,
-                         store_target_speed, 4);
-
-static SENSOR_DEVICE_ATTR(fan1_tolerance, S_IWUSR | S_IRUGO,
-                           show_speed_tolerance, store_speed_tolerance, 0);
-static SENSOR_DEVICE_ATTR(fan2_tolerance, S_IWUSR | S_IRUGO,
-                           show_speed_tolerance, store_speed_tolerance, 1);
-static SENSOR_DEVICE_ATTR(fan3_tolerance, S_IWUSR | S_IRUGO,
-                           show_speed_tolerance, store_speed_tolerance, 2);
-static SENSOR_DEVICE_ATTR(fan4_tolerance, S_IWUSR | S_IRUGO,
-                           show_speed_tolerance, store_speed_tolerance, 3);
-static SENSOR_DEVICE_ATTR(fan5_tolerance, S_IWUSR | S_IRUGO,
-                           show_speed_tolerance, store_speed_tolerance, 4);
+SENSOR_TEMPLATE_2(pwm, "pwm%d", S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 0);
+SENSOR_TEMPLATE(pwm_mode, "pwm%d_mode", S_IWUSR | S_IRUGO, show_pwm_mode,
+               store_pwm_mode, 0);
+SENSOR_TEMPLATE(pwm_enable, "pwm%d_enable", S_IWUSR | S_IRUGO, show_pwm_enable,
+               store_pwm_enable, 0);
+SENSOR_TEMPLATE(pwm_temp_sel, "pwm%d_temp_sel", S_IWUSR | S_IRUGO,
+               show_pwm_temp_sel, store_pwm_temp_sel, 0);
+SENSOR_TEMPLATE(pwm_target_temp, "pwm%d_target_temp", S_IWUSR | S_IRUGO,
+               show_target_temp, store_target_temp, 0);
+SENSOR_TEMPLATE(fan_target, "fan%d_target", S_IWUSR | S_IRUGO,
+               show_target_speed, store_target_speed, 0);
+SENSOR_TEMPLATE(fan_tolerance, "fan%d_tolerance", S_IWUSR | S_IRUGO,
+               show_speed_tolerance, store_speed_tolerance, 0);
 
 /* Smart Fan registers */
 
@@ -2531,79 +2676,18 @@ store_weight_temp(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static SENSOR_DEVICE_ATTR(pwm1_weight_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
-                           0);
-static SENSOR_DEVICE_ATTR(pwm2_weight_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
-                           1);
-static SENSOR_DEVICE_ATTR(pwm3_weight_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
-                           2);
-static SENSOR_DEVICE_ATTR(pwm4_weight_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
-                           3);
-static SENSOR_DEVICE_ATTR(pwm5_weight_temp_sel, S_IWUSR | S_IRUGO,
-                           show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
-                           4);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_weight_temp_step, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 0, 0);
-static SENSOR_DEVICE_ATTR_2(pwm2_weight_temp_step, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 1, 0);
-static SENSOR_DEVICE_ATTR_2(pwm3_weight_temp_step, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 2, 0);
-static SENSOR_DEVICE_ATTR_2(pwm4_weight_temp_step, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 3, 0);
-static SENSOR_DEVICE_ATTR_2(pwm5_weight_temp_step, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 4, 0);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_weight_temp_step_tol, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 0, 1);
-static SENSOR_DEVICE_ATTR_2(pwm2_weight_temp_step_tol, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 1, 1);
-static SENSOR_DEVICE_ATTR_2(pwm3_weight_temp_step_tol, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 2, 1);
-static SENSOR_DEVICE_ATTR_2(pwm4_weight_temp_step_tol, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 3, 1);
-static SENSOR_DEVICE_ATTR_2(pwm5_weight_temp_step_tol, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 4, 1);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_weight_temp_step_base, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 0, 2);
-static SENSOR_DEVICE_ATTR_2(pwm2_weight_temp_step_base, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 1, 2);
-static SENSOR_DEVICE_ATTR_2(pwm3_weight_temp_step_base, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 2, 2);
-static SENSOR_DEVICE_ATTR_2(pwm4_weight_temp_step_base, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 3, 2);
-static SENSOR_DEVICE_ATTR_2(pwm5_weight_temp_step_base, S_IWUSR | S_IRUGO,
-                           show_weight_temp, store_weight_temp, 4, 2);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_weight_duty_step, S_IWUSR | S_IRUGO,
-                           show_pwm, store_pwm, 0, 5);
-static SENSOR_DEVICE_ATTR_2(pwm2_weight_duty_step, S_IWUSR | S_IRUGO,
-                           show_pwm, store_pwm, 1, 5);
-static SENSOR_DEVICE_ATTR_2(pwm3_weight_duty_step, S_IWUSR | S_IRUGO,
-                           show_pwm, store_pwm, 2, 5);
-static SENSOR_DEVICE_ATTR_2(pwm4_weight_duty_step, S_IWUSR | S_IRUGO,
-                           show_pwm, store_pwm, 3, 5);
-static SENSOR_DEVICE_ATTR_2(pwm5_weight_duty_step, S_IWUSR | S_IRUGO,
-                           show_pwm, store_pwm, 4, 5);
-
-/* duty_base is not supported on all chips */
-static struct sensor_device_attribute_2 sda_weight_duty_base[] = {
-       SENSOR_ATTR_2(pwm1_weight_duty_base, S_IWUSR | S_IRUGO,
-                     show_pwm, store_pwm, 0, 6),
-       SENSOR_ATTR_2(pwm2_weight_duty_base, S_IWUSR | S_IRUGO,
-                     show_pwm, store_pwm, 1, 6),
-       SENSOR_ATTR_2(pwm3_weight_duty_base, S_IWUSR | S_IRUGO,
-                     show_pwm, store_pwm, 2, 6),
-       SENSOR_ATTR_2(pwm4_weight_duty_base, S_IWUSR | S_IRUGO,
-                     show_pwm, store_pwm, 3, 6),
-       SENSOR_ATTR_2(pwm5_weight_duty_base, S_IWUSR | S_IRUGO,
-                     show_pwm, store_pwm, 4, 6),
-};
+SENSOR_TEMPLATE(pwm_weight_temp_sel, "pwm%d_weight_temp_sel", S_IWUSR | S_IRUGO,
+                 show_pwm_weight_temp_sel, store_pwm_weight_temp_sel, 0);
+SENSOR_TEMPLATE_2(pwm_weight_temp_step, "pwm%d_weight_temp_step",
+                 S_IWUSR | S_IRUGO, show_weight_temp, store_weight_temp, 0, 0);
+SENSOR_TEMPLATE_2(pwm_weight_temp_step_tol, "pwm%d_weight_temp_step_tol",
+                 S_IWUSR | S_IRUGO, show_weight_temp, store_weight_temp, 0, 1);
+SENSOR_TEMPLATE_2(pwm_weight_temp_step_base, "pwm%d_weight_temp_step_base",
+                 S_IWUSR | S_IRUGO, show_weight_temp, store_weight_temp, 0, 2);
+SENSOR_TEMPLATE_2(pwm_weight_duty_step, "pwm%d_weight_duty_step",
+                 S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 5);
+SENSOR_TEMPLATE_2(pwm_weight_duty_base, "pwm%d_weight_duty_base",
+                 S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 6);
 
 static ssize_t
 show_fan_time(struct device *dev, struct device_attribute *attr, char *buf)
@@ -2651,227 +2735,6 @@ show_name(struct device *dev, struct device_attribute *attr, char *buf)
 
 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
-static SENSOR_DEVICE_ATTR_2(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 0, 0);
-static SENSOR_DEVICE_ATTR_2(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 1, 0);
-static SENSOR_DEVICE_ATTR_2(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 2, 0);
-static SENSOR_DEVICE_ATTR_2(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 3, 0);
-static SENSOR_DEVICE_ATTR_2(pwm5_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 4, 0);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 0, 1);
-static SENSOR_DEVICE_ATTR_2(pwm2_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 1, 1);
-static SENSOR_DEVICE_ATTR_2(pwm3_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 2, 1);
-static SENSOR_DEVICE_ATTR_2(pwm4_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 3, 1);
-static SENSOR_DEVICE_ATTR_2(pwm5_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
-                           store_fan_time, 4, 1);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_step_down_time, S_IWUSR | S_IRUGO,
-                           show_fan_time, store_fan_time, 0, 2);
-static SENSOR_DEVICE_ATTR_2(pwm2_step_down_time, S_IWUSR | S_IRUGO,
-                           show_fan_time, store_fan_time, 1, 2);
-static SENSOR_DEVICE_ATTR_2(pwm3_step_down_time, S_IWUSR | S_IRUGO,
-                           show_fan_time, store_fan_time, 2, 2);
-static SENSOR_DEVICE_ATTR_2(pwm4_step_down_time, S_IWUSR | S_IRUGO,
-                           show_fan_time, store_fan_time, 3, 2);
-static SENSOR_DEVICE_ATTR_2(pwm5_step_down_time, S_IWUSR | S_IRUGO,
-                           show_fan_time, store_fan_time, 4, 2);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_start, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 0, 1);
-static SENSOR_DEVICE_ATTR_2(pwm2_start, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 1, 1);
-static SENSOR_DEVICE_ATTR_2(pwm3_start, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 2, 1);
-static SENSOR_DEVICE_ATTR_2(pwm4_start, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 3, 1);
-static SENSOR_DEVICE_ATTR_2(pwm5_start, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 4, 1);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_floor, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 0, 2);
-static SENSOR_DEVICE_ATTR_2(pwm2_floor, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 1, 2);
-static SENSOR_DEVICE_ATTR_2(pwm3_floor, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 2, 2);
-static SENSOR_DEVICE_ATTR_2(pwm4_floor, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 3, 2);
-static SENSOR_DEVICE_ATTR_2(pwm5_floor, S_IWUSR | S_IRUGO, show_pwm,
-                           store_pwm, 4, 2);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 0, 0);
-static SENSOR_DEVICE_ATTR_2(pwm2_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 1, 0);
-static SENSOR_DEVICE_ATTR_2(pwm3_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 2, 0);
-static SENSOR_DEVICE_ATTR_2(pwm4_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 3, 0);
-static SENSOR_DEVICE_ATTR_2(pwm5_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 4, 0);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_crit_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 0, 1);
-static SENSOR_DEVICE_ATTR_2(pwm2_crit_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 1, 1);
-static SENSOR_DEVICE_ATTR_2(pwm3_crit_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 2, 1);
-static SENSOR_DEVICE_ATTR_2(pwm4_crit_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 3, 1);
-static SENSOR_DEVICE_ATTR_2(pwm5_crit_temp_tolerance, S_IWUSR | S_IRUGO,
-                           show_temp_tolerance, store_temp_tolerance, 4, 1);
-
-/* pwm_max is not supported on all chips */
-static struct sensor_device_attribute_2 sda_pwm_max[] = {
-       SENSOR_ATTR_2(pwm1_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
-                     0, 3),
-       SENSOR_ATTR_2(pwm2_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
-                     1, 3),
-       SENSOR_ATTR_2(pwm3_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
-                     2, 3),
-       SENSOR_ATTR_2(pwm4_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
-                     3, 3),
-       SENSOR_ATTR_2(pwm5_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
-                     4, 3),
-};
-
-/* pwm_step is not supported on all chips */
-static struct sensor_device_attribute_2 sda_pwm_step[] = {
-       SENSOR_ATTR_2(pwm1_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 4),
-       SENSOR_ATTR_2(pwm2_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1, 4),
-       SENSOR_ATTR_2(pwm3_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2, 4),
-       SENSOR_ATTR_2(pwm4_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3, 4),
-       SENSOR_ATTR_2(pwm5_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 4, 4),
-};
-
-static struct attribute *nct6775_attributes_pwm[5][20] = {
-       {
-               &sensor_dev_attr_pwm1.dev_attr.attr,
-               &sensor_dev_attr_pwm1_mode.dev_attr.attr,
-               &sensor_dev_attr_pwm1_enable.dev_attr.attr,
-               &sensor_dev_attr_pwm1_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm1_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm1_crit_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm1_target_temp.dev_attr.attr,
-               &sensor_dev_attr_fan1_target.dev_attr.attr,
-               &sensor_dev_attr_fan1_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm1_stop_time.dev_attr.attr,
-               &sensor_dev_attr_pwm1_step_up_time.dev_attr.attr,
-               &sensor_dev_attr_pwm1_step_down_time.dev_attr.attr,
-               &sensor_dev_attr_pwm1_start.dev_attr.attr,
-               &sensor_dev_attr_pwm1_floor.dev_attr.attr,
-               &sensor_dev_attr_pwm1_weight_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm1_weight_temp_step.dev_attr.attr,
-               &sensor_dev_attr_pwm1_weight_temp_step_tol.dev_attr.attr,
-               &sensor_dev_attr_pwm1_weight_temp_step_base.dev_attr.attr,
-               &sensor_dev_attr_pwm1_weight_duty_step.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_pwm2.dev_attr.attr,
-               &sensor_dev_attr_pwm2_mode.dev_attr.attr,
-               &sensor_dev_attr_pwm2_enable.dev_attr.attr,
-               &sensor_dev_attr_pwm2_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm2_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm2_crit_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm2_target_temp.dev_attr.attr,
-               &sensor_dev_attr_fan2_target.dev_attr.attr,
-               &sensor_dev_attr_fan2_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm2_stop_time.dev_attr.attr,
-               &sensor_dev_attr_pwm2_step_up_time.dev_attr.attr,
-               &sensor_dev_attr_pwm2_step_down_time.dev_attr.attr,
-               &sensor_dev_attr_pwm2_start.dev_attr.attr,
-               &sensor_dev_attr_pwm2_floor.dev_attr.attr,
-               &sensor_dev_attr_pwm2_weight_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm2_weight_temp_step.dev_attr.attr,
-               &sensor_dev_attr_pwm2_weight_temp_step_tol.dev_attr.attr,
-               &sensor_dev_attr_pwm2_weight_temp_step_base.dev_attr.attr,
-               &sensor_dev_attr_pwm2_weight_duty_step.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_pwm3.dev_attr.attr,
-               &sensor_dev_attr_pwm3_mode.dev_attr.attr,
-               &sensor_dev_attr_pwm3_enable.dev_attr.attr,
-               &sensor_dev_attr_pwm3_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm3_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm3_crit_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm3_target_temp.dev_attr.attr,
-               &sensor_dev_attr_fan3_target.dev_attr.attr,
-               &sensor_dev_attr_fan3_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm3_stop_time.dev_attr.attr,
-               &sensor_dev_attr_pwm3_step_up_time.dev_attr.attr,
-               &sensor_dev_attr_pwm3_step_down_time.dev_attr.attr,
-               &sensor_dev_attr_pwm3_start.dev_attr.attr,
-               &sensor_dev_attr_pwm3_floor.dev_attr.attr,
-               &sensor_dev_attr_pwm3_weight_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm3_weight_temp_step.dev_attr.attr,
-               &sensor_dev_attr_pwm3_weight_temp_step_tol.dev_attr.attr,
-               &sensor_dev_attr_pwm3_weight_temp_step_base.dev_attr.attr,
-               &sensor_dev_attr_pwm3_weight_duty_step.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_pwm4.dev_attr.attr,
-               &sensor_dev_attr_pwm4_mode.dev_attr.attr,
-               &sensor_dev_attr_pwm4_enable.dev_attr.attr,
-               &sensor_dev_attr_pwm4_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm4_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm4_crit_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm4_target_temp.dev_attr.attr,
-               &sensor_dev_attr_fan4_target.dev_attr.attr,
-               &sensor_dev_attr_fan4_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm4_stop_time.dev_attr.attr,
-               &sensor_dev_attr_pwm4_step_up_time.dev_attr.attr,
-               &sensor_dev_attr_pwm4_step_down_time.dev_attr.attr,
-               &sensor_dev_attr_pwm4_start.dev_attr.attr,
-               &sensor_dev_attr_pwm4_floor.dev_attr.attr,
-               &sensor_dev_attr_pwm4_weight_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm4_weight_temp_step.dev_attr.attr,
-               &sensor_dev_attr_pwm4_weight_temp_step_tol.dev_attr.attr,
-               &sensor_dev_attr_pwm4_weight_temp_step_base.dev_attr.attr,
-               &sensor_dev_attr_pwm4_weight_duty_step.dev_attr.attr,
-               NULL
-       },
-       {
-               &sensor_dev_attr_pwm5.dev_attr.attr,
-               &sensor_dev_attr_pwm5_mode.dev_attr.attr,
-               &sensor_dev_attr_pwm5_enable.dev_attr.attr,
-               &sensor_dev_attr_pwm5_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm5_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm5_crit_temp_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm5_target_temp.dev_attr.attr,
-               &sensor_dev_attr_fan5_target.dev_attr.attr,
-               &sensor_dev_attr_fan5_tolerance.dev_attr.attr,
-               &sensor_dev_attr_pwm5_stop_time.dev_attr.attr,
-               &sensor_dev_attr_pwm5_step_up_time.dev_attr.attr,
-               &sensor_dev_attr_pwm5_step_down_time.dev_attr.attr,
-               &sensor_dev_attr_pwm5_start.dev_attr.attr,
-               &sensor_dev_attr_pwm5_floor.dev_attr.attr,
-               &sensor_dev_attr_pwm5_weight_temp_sel.dev_attr.attr,
-               &sensor_dev_attr_pwm5_weight_temp_step.dev_attr.attr,
-               &sensor_dev_attr_pwm5_weight_temp_step_tol.dev_attr.attr,
-               &sensor_dev_attr_pwm5_weight_temp_step_base.dev_attr.attr,
-               &sensor_dev_attr_pwm5_weight_duty_step.dev_attr.attr,
-               NULL
-       },
-};
-
-static const struct attribute_group nct6775_group_pwm[5] = {
-       { .attrs = nct6775_attributes_pwm[0] },
-       { .attrs = nct6775_attributes_pwm[1] },
-       { .attrs = nct6775_attributes_pwm[2] },
-       { .attrs = nct6775_attributes_pwm[3] },
-       { .attrs = nct6775_attributes_pwm[4] },
-};
-
 static ssize_t
 show_auto_pwm(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -2927,17 +2790,19 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
                        break;
                case nct6776:
                        break; /* always enabled, nothing to do */
+               case nct6106:
                case nct6779:
-                       nct6775_write_value(data, NCT6779_REG_CRITICAL_PWM[nr],
+               case nct6791:
+                       nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
                                            val);
                        reg = nct6775_read_value(data,
-                                       NCT6779_REG_CRITICAL_PWM_ENABLE[nr]);
+                                       data->REG_CRITICAL_PWM_ENABLE[nr]);
                        if (val == 255)
-                               reg &= ~0x01;
+                               reg &= ~data->CRITICAL_PWM_ENABLE_MASK;
                        else
-                               reg |= 0x01;
+                               reg |= data->CRITICAL_PWM_ENABLE_MASK;
                        nct6775_write_value(data,
-                                           NCT6779_REG_CRITICAL_PWM_ENABLE[nr],
+                                           data->REG_CRITICAL_PWM_ENABLE[nr],
                                            reg);
                        break;
                }
@@ -2992,155 +2857,140 @@ store_auto_temp(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
+static umode_t nct6775_pwm_is_visible(struct kobject *kobj,
+                                     struct attribute *attr, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct nct6775_data *data = dev_get_drvdata(dev);
+       int pwm = index / 36;   /* pwm index */
+       int nr = index % 36;    /* attribute index */
+
+       if (!(data->has_pwm & (1 << pwm)))
+               return 0;
+
+       if (nr == 19 && data->REG_PWM[3] == NULL) /* pwm_max */
+               return 0;
+       if (nr == 20 && data->REG_PWM[4] == NULL) /* pwm_step */
+               return 0;
+       if (nr == 21 && data->REG_PWM[6] == NULL) /* weight_duty_base */
+               return 0;
+
+       if (nr >= 22 && nr <= 35) {             /* auto point */
+               int api = (nr - 22) / 2;        /* auto point index */
+
+               if (api > data->auto_pwm_num)
+                       return 0;
+       }
+       return attr->mode;
+}
+
+SENSOR_TEMPLATE_2(pwm_stop_time, "pwm%d_stop_time", S_IWUSR | S_IRUGO,
+                 show_fan_time, store_fan_time, 0, 0);
+SENSOR_TEMPLATE_2(pwm_step_up_time, "pwm%d_step_up_time", S_IWUSR | S_IRUGO,
+                 show_fan_time, store_fan_time, 0, 1);
+SENSOR_TEMPLATE_2(pwm_step_down_time, "pwm%d_step_down_time", S_IWUSR | S_IRUGO,
+                 show_fan_time, store_fan_time, 0, 2);
+SENSOR_TEMPLATE_2(pwm_start, "pwm%d_start", S_IWUSR | S_IRUGO, show_pwm,
+                 store_pwm, 0, 1);
+SENSOR_TEMPLATE_2(pwm_floor, "pwm%d_floor", S_IWUSR | S_IRUGO, show_pwm,
+                 store_pwm, 0, 2);
+SENSOR_TEMPLATE_2(pwm_temp_tolerance, "pwm%d_temp_tolerance", S_IWUSR | S_IRUGO,
+                 show_temp_tolerance, store_temp_tolerance, 0, 0);
+SENSOR_TEMPLATE_2(pwm_crit_temp_tolerance, "pwm%d_crit_temp_tolerance",
+                 S_IWUSR | S_IRUGO, show_temp_tolerance, store_temp_tolerance,
+                 0, 1);
+
+SENSOR_TEMPLATE_2(pwm_max, "pwm%d_max", S_IWUSR | S_IRUGO, show_pwm, store_pwm,
+                 0, 3);
+
+SENSOR_TEMPLATE_2(pwm_step, "pwm%d_step", S_IWUSR | S_IRUGO, show_pwm,
+                 store_pwm, 0, 4);
+
+SENSOR_TEMPLATE_2(pwm_auto_point1_pwm, "pwm%d_auto_point1_pwm",
+                 S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 0);
+SENSOR_TEMPLATE_2(pwm_auto_point1_temp, "pwm%d_auto_point1_temp",
+                 S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 0);
+
+SENSOR_TEMPLATE_2(pwm_auto_point2_pwm, "pwm%d_auto_point2_pwm",
+                 S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 1);
+SENSOR_TEMPLATE_2(pwm_auto_point2_temp, "pwm%d_auto_point2_temp",
+                 S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 1);
+
+SENSOR_TEMPLATE_2(pwm_auto_point3_pwm, "pwm%d_auto_point3_pwm",
+                 S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 2);
+SENSOR_TEMPLATE_2(pwm_auto_point3_temp, "pwm%d_auto_point3_temp",
+                 S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 2);
+
+SENSOR_TEMPLATE_2(pwm_auto_point4_pwm, "pwm%d_auto_point4_pwm",
+                 S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 3);
+SENSOR_TEMPLATE_2(pwm_auto_point4_temp, "pwm%d_auto_point4_temp",
+                 S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 3);
+
+SENSOR_TEMPLATE_2(pwm_auto_point5_pwm, "pwm%d_auto_point5_pwm",
+                 S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 4);
+SENSOR_TEMPLATE_2(pwm_auto_point5_temp, "pwm%d_auto_point5_temp",
+                 S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 4);
+
+SENSOR_TEMPLATE_2(pwm_auto_point6_pwm, "pwm%d_auto_point6_pwm",
+                 S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 5);
+SENSOR_TEMPLATE_2(pwm_auto_point6_temp, "pwm%d_auto_point6_temp",
+                 S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 5);
+
+SENSOR_TEMPLATE_2(pwm_auto_point7_pwm, "pwm%d_auto_point7_pwm",
+                 S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 6);
+SENSOR_TEMPLATE_2(pwm_auto_point7_temp, "pwm%d_auto_point7_temp",
+                 S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 6);
+
 /*
- * The number of auto-point trip points is chip dependent.
- * Need to check support while generating/removing attribute files.
+ * nct6775_pwm_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
  */
-static struct sensor_device_attribute_2 sda_auto_pwm_arrays[] = {
-       SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 0, 0),
-       SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 0, 0),
-       SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 0, 1),
-       SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 0, 1),
-       SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 0, 2),
-       SENSOR_ATTR_2(pwm1_auto_point3_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 0, 2),
-       SENSOR_ATTR_2(pwm1_auto_point4_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 0, 3),
-       SENSOR_ATTR_2(pwm1_auto_point4_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 0, 3),
-       SENSOR_ATTR_2(pwm1_auto_point5_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 0, 4),
-       SENSOR_ATTR_2(pwm1_auto_point5_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 0, 4),
-       SENSOR_ATTR_2(pwm1_auto_point6_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 0, 5),
-       SENSOR_ATTR_2(pwm1_auto_point6_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 0, 5),
-       SENSOR_ATTR_2(pwm1_auto_point7_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 0, 6),
-       SENSOR_ATTR_2(pwm1_auto_point7_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 0, 6),
-
-       SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 1, 0),
-       SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 1, 0),
-       SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 1, 1),
-       SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 1, 1),
-       SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 1, 2),
-       SENSOR_ATTR_2(pwm2_auto_point3_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 1, 2),
-       SENSOR_ATTR_2(pwm2_auto_point4_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 1, 3),
-       SENSOR_ATTR_2(pwm2_auto_point4_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 1, 3),
-       SENSOR_ATTR_2(pwm2_auto_point5_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 1, 4),
-       SENSOR_ATTR_2(pwm2_auto_point5_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 1, 4),
-       SENSOR_ATTR_2(pwm2_auto_point6_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 1, 5),
-       SENSOR_ATTR_2(pwm2_auto_point6_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 1, 5),
-       SENSOR_ATTR_2(pwm2_auto_point7_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 1, 6),
-       SENSOR_ATTR_2(pwm2_auto_point7_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 1, 6),
-
-       SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 2, 0),
-       SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 2, 0),
-       SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 2, 1),
-       SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 2, 1),
-       SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 2, 2),
-       SENSOR_ATTR_2(pwm3_auto_point3_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 2, 2),
-       SENSOR_ATTR_2(pwm3_auto_point4_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 2, 3),
-       SENSOR_ATTR_2(pwm3_auto_point4_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 2, 3),
-       SENSOR_ATTR_2(pwm3_auto_point5_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 2, 4),
-       SENSOR_ATTR_2(pwm3_auto_point5_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 2, 4),
-       SENSOR_ATTR_2(pwm3_auto_point6_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 2, 5),
-       SENSOR_ATTR_2(pwm3_auto_point6_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 2, 5),
-       SENSOR_ATTR_2(pwm3_auto_point7_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 2, 6),
-       SENSOR_ATTR_2(pwm3_auto_point7_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 2, 6),
-
-       SENSOR_ATTR_2(pwm4_auto_point1_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 3, 0),
-       SENSOR_ATTR_2(pwm4_auto_point1_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 3, 0),
-       SENSOR_ATTR_2(pwm4_auto_point2_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 3, 1),
-       SENSOR_ATTR_2(pwm4_auto_point2_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 3, 1),
-       SENSOR_ATTR_2(pwm4_auto_point3_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 3, 2),
-       SENSOR_ATTR_2(pwm4_auto_point3_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 3, 2),
-       SENSOR_ATTR_2(pwm4_auto_point4_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 3, 3),
-       SENSOR_ATTR_2(pwm4_auto_point4_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 3, 3),
-       SENSOR_ATTR_2(pwm4_auto_point5_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 3, 4),
-       SENSOR_ATTR_2(pwm4_auto_point5_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 3, 4),
-       SENSOR_ATTR_2(pwm4_auto_point6_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 3, 5),
-       SENSOR_ATTR_2(pwm4_auto_point6_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 3, 5),
-       SENSOR_ATTR_2(pwm4_auto_point7_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 3, 6),
-       SENSOR_ATTR_2(pwm4_auto_point7_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 3, 6),
-
-       SENSOR_ATTR_2(pwm5_auto_point1_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 4, 0),
-       SENSOR_ATTR_2(pwm5_auto_point1_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 4, 0),
-       SENSOR_ATTR_2(pwm5_auto_point2_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 4, 1),
-       SENSOR_ATTR_2(pwm5_auto_point2_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 4, 1),
-       SENSOR_ATTR_2(pwm5_auto_point3_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 4, 2),
-       SENSOR_ATTR_2(pwm5_auto_point3_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 4, 2),
-       SENSOR_ATTR_2(pwm5_auto_point4_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 4, 3),
-       SENSOR_ATTR_2(pwm5_auto_point4_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 4, 3),
-       SENSOR_ATTR_2(pwm5_auto_point5_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 4, 4),
-       SENSOR_ATTR_2(pwm5_auto_point5_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 4, 4),
-       SENSOR_ATTR_2(pwm5_auto_point6_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 4, 5),
-       SENSOR_ATTR_2(pwm5_auto_point6_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 4, 5),
-       SENSOR_ATTR_2(pwm5_auto_point7_pwm, S_IWUSR | S_IRUGO,
-                     show_auto_pwm, store_auto_pwm, 4, 6),
-       SENSOR_ATTR_2(pwm5_auto_point7_temp, S_IWUSR | S_IRUGO,
-                     show_auto_temp, store_auto_temp, 4, 6),
+static struct sensor_device_template *nct6775_attributes_pwm_template[] = {
+       &sensor_dev_template_pwm,
+       &sensor_dev_template_pwm_mode,
+       &sensor_dev_template_pwm_enable,
+       &sensor_dev_template_pwm_temp_sel,
+       &sensor_dev_template_pwm_temp_tolerance,
+       &sensor_dev_template_pwm_crit_temp_tolerance,
+       &sensor_dev_template_pwm_target_temp,
+       &sensor_dev_template_fan_target,
+       &sensor_dev_template_fan_tolerance,
+       &sensor_dev_template_pwm_stop_time,
+       &sensor_dev_template_pwm_step_up_time,
+       &sensor_dev_template_pwm_step_down_time,
+       &sensor_dev_template_pwm_start,
+       &sensor_dev_template_pwm_floor,
+       &sensor_dev_template_pwm_weight_temp_sel,
+       &sensor_dev_template_pwm_weight_temp_step,
+       &sensor_dev_template_pwm_weight_temp_step_tol,
+       &sensor_dev_template_pwm_weight_temp_step_base,
+       &sensor_dev_template_pwm_weight_duty_step,
+       &sensor_dev_template_pwm_max,                   /* 19 */
+       &sensor_dev_template_pwm_step,                  /* 20 */
+       &sensor_dev_template_pwm_weight_duty_base,      /* 21 */
+       &sensor_dev_template_pwm_auto_point1_pwm,       /* 22 */
+       &sensor_dev_template_pwm_auto_point1_temp,
+       &sensor_dev_template_pwm_auto_point2_pwm,
+       &sensor_dev_template_pwm_auto_point2_temp,
+       &sensor_dev_template_pwm_auto_point3_pwm,
+       &sensor_dev_template_pwm_auto_point3_temp,
+       &sensor_dev_template_pwm_auto_point4_pwm,
+       &sensor_dev_template_pwm_auto_point4_temp,
+       &sensor_dev_template_pwm_auto_point5_pwm,
+       &sensor_dev_template_pwm_auto_point5_temp,
+       &sensor_dev_template_pwm_auto_point6_pwm,
+       &sensor_dev_template_pwm_auto_point6_temp,
+       &sensor_dev_template_pwm_auto_point7_pwm,
+       &sensor_dev_template_pwm_auto_point7_temp,      /* 35 */
+
+       NULL
+};
+
+static struct sensor_template_group nct6775_pwm_template_group = {
+       .templates = nct6775_attributes_pwm_template,
+       .is_visible = nct6775_pwm_is_visible,
+       .base = 1,
 };
 
 static ssize_t
@@ -3159,7 +3009,6 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
               const char *buf, size_t count)
 {
        struct nct6775_data *data = dev_get_drvdata(dev);
-       struct nct6775_sio_data *sio_data = dev->platform_data;
        int nr = to_sensor_dev_attr(attr)->index - INTRUSION_ALARM_BASE;
        unsigned long val;
        u8 reg;
@@ -3175,19 +3024,19 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
         * The CR registers are the same for all chips, and not all chips
         * support clearing the caseopen status through "regular" registers.
         */
-       ret = superio_enter(sio_data->sioreg);
+       ret = superio_enter(data->sioreg);
        if (ret) {
                count = ret;
                goto error;
        }
 
-       superio_select(sio_data->sioreg, NCT6775_LD_ACPI);
-       reg = superio_inb(sio_data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr]);
+       superio_select(data->sioreg, NCT6775_LD_ACPI);
+       reg = superio_inb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr]);
        reg |= NCT6775_CR_CASEOPEN_CLR_MASK[nr];
-       superio_outb(sio_data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
+       superio_outb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
        reg &= ~NCT6775_CR_CASEOPEN_CLR_MASK[nr];
-       superio_outb(sio_data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
-       superio_exit(sio_data->sioreg);
+       superio_outb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
+       superio_exit(data->sioreg);
 
        data->valid = false;    /* Force cache refresh */
 error:
@@ -3195,71 +3044,79 @@ error:
        return count;
 }
 
-static struct sensor_device_attribute sda_caseopen[] = {
-       SENSOR_ATTR(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm,
-                   clear_caseopen, INTRUSION_ALARM_BASE),
-       SENSOR_ATTR(intrusion1_alarm, S_IWUSR | S_IRUGO, show_alarm,
-                   clear_caseopen, INTRUSION_ALARM_BASE + 1),
-};
-
-/*
- * Driver and device management
- */
-
-static void nct6775_device_remove_files(struct device *dev)
+static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm,
+                         clear_caseopen, INTRUSION_ALARM_BASE);
+static SENSOR_DEVICE_ATTR(intrusion1_alarm, S_IWUSR | S_IRUGO, show_alarm,
+                         clear_caseopen, INTRUSION_ALARM_BASE + 1);
+static SENSOR_DEVICE_ATTR(intrusion0_beep, S_IWUSR | S_IRUGO, show_beep,
+                         store_beep, INTRUSION_ALARM_BASE);
+static SENSOR_DEVICE_ATTR(intrusion1_beep, S_IWUSR | S_IRUGO, show_beep,
+                         store_beep, INTRUSION_ALARM_BASE + 1);
+static SENSOR_DEVICE_ATTR(beep_enable, S_IWUSR | S_IRUGO, show_beep,
+                         store_beep, BEEP_ENABLE_BASE);
+
+static umode_t nct6775_other_is_visible(struct kobject *kobj,
+                                       struct attribute *attr, int index)
 {
-       /*
-        * some entries in the following arrays may not have been used in
-        * device_create_file(), but device_remove_file() will ignore them
-        */
-       int i;
+       struct device *dev = container_of(kobj, struct device, kobj);
        struct nct6775_data *data = dev_get_drvdata(dev);
 
-       for (i = 0; i < data->pwm_num; i++)
-               sysfs_remove_group(&dev->kobj, &nct6775_group_pwm[i]);
+       if (index == 1 && !data->have_vid)
+               return 0;
 
-       for (i = 0; i < ARRAY_SIZE(sda_pwm_max); i++)
-               device_remove_file(dev, &sda_pwm_max[i].dev_attr);
+       if (index == 2 || index == 3) {
+               if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 2] < 0)
+                       return 0;
+       }
+
+       if (index == 4 || index == 5) {
+               if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 4] < 0)
+                       return 0;
+       }
 
-       for (i = 0; i < ARRAY_SIZE(sda_pwm_step); i++)
-               device_remove_file(dev, &sda_pwm_step[i].dev_attr);
+       return attr->mode;
+}
 
-       for (i = 0; i < ARRAY_SIZE(sda_weight_duty_base); i++)
-               device_remove_file(dev, &sda_weight_duty_base[i].dev_attr);
+/*
+ * nct6775_other_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
+ */
+static struct attribute *nct6775_attributes_other[] = {
+       &dev_attr_name.attr,
+       &dev_attr_cpu0_vid.attr,                                /* 1 */
+       &sensor_dev_attr_intrusion0_alarm.dev_attr.attr,        /* 2 */
+       &sensor_dev_attr_intrusion1_alarm.dev_attr.attr,        /* 3 */
+       &sensor_dev_attr_intrusion0_beep.dev_attr.attr,         /* 4 */
+       &sensor_dev_attr_intrusion1_beep.dev_attr.attr,         /* 5 */
+       &sensor_dev_attr_beep_enable.dev_attr.attr,             /* 6 */
+
+       NULL
+};
 
-       for (i = 0; i < ARRAY_SIZE(sda_auto_pwm_arrays); i++)
-               device_remove_file(dev, &sda_auto_pwm_arrays[i].dev_attr);
+static const struct attribute_group nct6775_group_other = {
+       .attrs = nct6775_attributes_other,
+       .is_visible = nct6775_other_is_visible,
+};
 
-       for (i = 0; i < data->in_num; i++)
-               sysfs_remove_group(&dev->kobj, &nct6775_group_in[i]);
+/*
+ * Driver and device management
+ */
 
-       for (i = 0; i < 5; i++) {
-               device_remove_file(dev, &sda_fan_input[i].dev_attr);
-               device_remove_file(dev, &sda_fan_alarm[i].dev_attr);
-               device_remove_file(dev, &sda_fan_div[i].dev_attr);
-               device_remove_file(dev, &sda_fan_min[i].dev_attr);
-               device_remove_file(dev, &sda_fan_pulses[i].dev_attr);
-       }
-       for (i = 0; i < NUM_TEMP; i++) {
-               if (!(data->have_temp & (1 << i)))
-                       continue;
-               device_remove_file(dev, &sda_temp_input[i].dev_attr);
-               device_remove_file(dev, &sda_temp_label[i].dev_attr);
-               device_remove_file(dev, &sda_temp_max[i].dev_attr);
-               device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
-               device_remove_file(dev, &sda_temp_crit[i].dev_attr);
-               device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
-               if (!(data->have_temp_fixed & (1 << i)))
-                       continue;
-               device_remove_file(dev, &sda_temp_type[i].dev_attr);
-               device_remove_file(dev, &sda_temp_offset[i].dev_attr);
-       }
+static void nct6775_device_remove_files(struct device *dev)
+{
+       struct nct6775_data *data = dev_get_drvdata(dev);
 
-       device_remove_file(dev, &sda_caseopen[0].dev_attr);
-       device_remove_file(dev, &sda_caseopen[1].dev_attr);
+       if (data->group_pwm)
+               sysfs_remove_group(&dev->kobj, data->group_pwm);
+       if (data->group_in)
+               sysfs_remove_group(&dev->kobj, data->group_in);
+       if (data->group_fan)
+               sysfs_remove_group(&dev->kobj, data->group_fan);
+       if (data->group_temp)
+               sysfs_remove_group(&dev->kobj, data->group_temp);
 
-       device_remove_file(dev, &dev_attr_name);
-       device_remove_file(dev, &dev_attr_cpu0_vid);
+       sysfs_remove_group(&dev->kobj, &nct6775_group_other);
 }
 
 /* Get the monitoring functions started */
@@ -3297,68 +3154,78 @@ static inline void nct6775_init_device(struct nct6775_data *data)
        for (i = 0; i < data->temp_fixed_num; i++) {
                if (!(data->have_temp_fixed & (1 << i)))
                        continue;
-               if ((tmp & (0x02 << i)))        /* diode */
-                       data->temp_type[i] = 3 - ((diode >> i) & 0x02);
+               if ((tmp & (data->DIODE_MASK << i)))    /* diode */
+                       data->temp_type[i]
+                         = 3 - ((diode >> i) & data->DIODE_MASK);
                else                            /* thermistor */
                        data->temp_type[i] = 4;
        }
 }
 
-static int
-nct6775_check_fan_inputs(const struct nct6775_sio_data *sio_data,
-                        struct nct6775_data *data)
+static void
+nct6775_check_fan_inputs(struct nct6775_data *data)
 {
+       bool fan3pin, fan4pin, fan4min, fan5pin, fan6pin;
+       bool pwm3pin, pwm4pin, pwm5pin, pwm6pin;
+       int sioreg = data->sioreg;
        int regval;
-       bool fan3pin, fan3min, fan4pin, fan4min, fan5pin;
-       bool pwm3pin, pwm4pin, pwm5pin;
-       int ret;
-
-       ret = superio_enter(sio_data->sioreg);
-       if (ret)
-               return ret;
 
        /* fan4 and fan5 share some pins with the GPIO and serial flash */
        if (data->kind == nct6775) {
-               regval = superio_inb(sio_data->sioreg, 0x2c);
+               regval = superio_inb(sioreg, 0x2c);
 
                fan3pin = regval & (1 << 6);
-               fan3min = fan3pin;
                pwm3pin = regval & (1 << 7);
 
                /* On NCT6775, fan4 shares pins with the fdc interface */
-               fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
-               fan4min = 0;
-               fan5pin = 0;
-               pwm4pin = 0;
-               pwm5pin = 0;
+               fan4pin = !(superio_inb(sioreg, 0x2A) & 0x80);
+               fan4min = false;
+               fan5pin = false;
+               fan6pin = false;
+               pwm4pin = false;
+               pwm5pin = false;
+               pwm6pin = false;
        } else if (data->kind == nct6776) {
-               bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
+               bool gpok = superio_inb(sioreg, 0x27) & 0x80;
 
-               superio_select(sio_data->sioreg, NCT6775_LD_HWM);
-               regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
+               superio_select(sioreg, NCT6775_LD_HWM);
+               regval = superio_inb(sioreg, SIO_REG_ENABLE);
 
                if (regval & 0x80)
                        fan3pin = gpok;
                else
-                       fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+                       fan3pin = !(superio_inb(sioreg, 0x24) & 0x40);
 
                if (regval & 0x40)
                        fan4pin = gpok;
                else
-                       fan4pin = superio_inb(sio_data->sioreg, 0x1C) & 0x01;
+                       fan4pin = superio_inb(sioreg, 0x1C) & 0x01;
 
                if (regval & 0x20)
                        fan5pin = gpok;
                else
-                       fan5pin = superio_inb(sio_data->sioreg, 0x1C) & 0x02;
+                       fan5pin = superio_inb(sioreg, 0x1C) & 0x02;
 
                fan4min = fan4pin;
-               fan3min = fan3pin;
+               fan6pin = false;
                pwm3pin = fan3pin;
-               pwm4pin = 0;
-               pwm5pin = 0;
-       } else {        /* NCT6779D */
-               regval = superio_inb(sio_data->sioreg, 0x1c);
+               pwm4pin = false;
+               pwm5pin = false;
+               pwm6pin = false;
+       } else if (data->kind == nct6106) {
+               regval = superio_inb(sioreg, 0x24);
+               fan3pin = !(regval & 0x80);
+               pwm3pin = regval & 0x08;
+
+               fan4pin = false;
+               fan4min = false;
+               fan5pin = false;
+               fan6pin = false;
+               pwm4pin = false;
+               pwm5pin = false;
+               pwm6pin = false;
+       } else {        /* NCT6779D or NCT6791D */
+               regval = superio_inb(sioreg, 0x1c);
 
                fan3pin = !(regval & (1 << 5));
                fan4pin = !(regval & (1 << 6));
@@ -3368,22 +3235,25 @@ nct6775_check_fan_inputs(const struct nct6775_sio_data *sio_data,
                pwm4pin = !(regval & (1 << 1));
                pwm5pin = !(regval & (1 << 2));
 
-               fan3min = fan3pin;
                fan4min = fan4pin;
-       }
-
-       superio_exit(sio_data->sioreg);
 
-       data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
-       data->has_fan |= fan3pin << 2;
-       data->has_fan_min |= fan3min << 2;
-
-       data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
-       data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
-
-       data->has_pwm = 0x03 | (pwm3pin << 2) | (pwm4pin << 3) | (pwm5pin << 4);
+               if (data->kind == nct6791) {
+                       regval = superio_inb(sioreg, 0x2d);
+                       fan6pin = (regval & (1 << 1));
+                       pwm6pin = (regval & (1 << 0));
+               } else {        /* NCT6779D */
+                       fan6pin = false;
+                       pwm6pin = false;
+               }
+       }
 
-       return 0;
+       /* fan 1 and 2 (0x03) are always present */
+       data->has_fan = 0x03 | (fan3pin << 2) | (fan4pin << 3) |
+               (fan5pin << 4) | (fan6pin << 5);
+       data->has_fan_min = 0x03 | (fan3pin << 2) | (fan4min << 3) |
+               (fan5pin << 4);
+       data->has_pwm = 0x03 | (pwm3pin << 2) | (pwm4pin << 3) |
+               (pwm5pin << 4) | (pwm6pin << 5);
 }
 
 static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
@@ -3415,16 +3285,17 @@ static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
 static int nct6775_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct nct6775_sio_data *sio_data = dev->platform_data;
+       struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
        struct nct6775_data *data;
        struct resource *res;
        int i, s, err = 0;
        int src, mask, available;
        const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config;
        const u16 *reg_temp_alternate, *reg_temp_crit;
+       const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
        int num_reg_temp;
-       bool have_vid = false;
        u8 cr2a;
+       struct attribute_group *group;
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
        if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
@@ -3437,6 +3308,7 @@ static int nct6775_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        data->kind = sio_data->kind;
+       data->sioreg = sio_data->sioreg;
        data->addr = res->start;
        mutex_init(&data->update_lock);
        data->name = nct6775_device_names[data->kind];
@@ -3444,6 +3316,75 @@ static int nct6775_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, data);
 
        switch (data->kind) {
+       case nct6106:
+               data->in_num = 9;
+               data->pwm_num = 3;
+               data->auto_pwm_num = 4;
+               data->temp_fixed_num = 3;
+               data->num_temp_alarms = 6;
+               data->num_temp_beeps = 6;
+
+               data->fan_from_reg = fan_from_reg13;
+               data->fan_from_reg_min = fan_from_reg13;
+
+               data->temp_label = nct6776_temp_label;
+               data->temp_label_num = ARRAY_SIZE(nct6776_temp_label);
+
+               data->REG_VBAT = NCT6106_REG_VBAT;
+               data->REG_DIODE = NCT6106_REG_DIODE;
+               data->DIODE_MASK = NCT6106_DIODE_MASK;
+               data->REG_VIN = NCT6106_REG_IN;
+               data->REG_IN_MINMAX[0] = NCT6106_REG_IN_MIN;
+               data->REG_IN_MINMAX[1] = NCT6106_REG_IN_MAX;
+               data->REG_TARGET = NCT6106_REG_TARGET;
+               data->REG_FAN = NCT6106_REG_FAN;
+               data->REG_FAN_MODE = NCT6106_REG_FAN_MODE;
+               data->REG_FAN_MIN = NCT6106_REG_FAN_MIN;
+               data->REG_FAN_PULSES = NCT6106_REG_FAN_PULSES;
+               data->FAN_PULSE_SHIFT = NCT6106_FAN_PULSE_SHIFT;
+               data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
+               data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
+               data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+               data->REG_PWM[0] = NCT6106_REG_PWM;
+               data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
+               data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
+               data->REG_PWM[5] = NCT6106_REG_WEIGHT_DUTY_STEP;
+               data->REG_PWM[6] = NCT6106_REG_WEIGHT_DUTY_BASE;
+               data->REG_PWM_READ = NCT6106_REG_PWM_READ;
+               data->REG_PWM_MODE = NCT6106_REG_PWM_MODE;
+               data->PWM_MODE_MASK = NCT6106_PWM_MODE_MASK;
+               data->REG_AUTO_TEMP = NCT6106_REG_AUTO_TEMP;
+               data->REG_AUTO_PWM = NCT6106_REG_AUTO_PWM;
+               data->REG_CRITICAL_TEMP = NCT6106_REG_CRITICAL_TEMP;
+               data->REG_CRITICAL_TEMP_TOLERANCE
+                 = NCT6106_REG_CRITICAL_TEMP_TOLERANCE;
+               data->REG_CRITICAL_PWM_ENABLE = NCT6106_REG_CRITICAL_PWM_ENABLE;
+               data->CRITICAL_PWM_ENABLE_MASK
+                 = NCT6106_CRITICAL_PWM_ENABLE_MASK;
+               data->REG_CRITICAL_PWM = NCT6106_REG_CRITICAL_PWM;
+               data->REG_TEMP_OFFSET = NCT6106_REG_TEMP_OFFSET;
+               data->REG_TEMP_SOURCE = NCT6106_REG_TEMP_SOURCE;
+               data->REG_TEMP_SEL = NCT6106_REG_TEMP_SEL;
+               data->REG_WEIGHT_TEMP_SEL = NCT6106_REG_WEIGHT_TEMP_SEL;
+               data->REG_WEIGHT_TEMP[0] = NCT6106_REG_WEIGHT_TEMP_STEP;
+               data->REG_WEIGHT_TEMP[1] = NCT6106_REG_WEIGHT_TEMP_STEP_TOL;
+               data->REG_WEIGHT_TEMP[2] = NCT6106_REG_WEIGHT_TEMP_BASE;
+               data->REG_ALARM = NCT6106_REG_ALARM;
+               data->ALARM_BITS = NCT6106_ALARM_BITS;
+               data->REG_BEEP = NCT6106_REG_BEEP;
+               data->BEEP_BITS = NCT6106_BEEP_BITS;
+
+               reg_temp = NCT6106_REG_TEMP;
+               num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
+               reg_temp_over = NCT6106_REG_TEMP_OVER;
+               reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+               reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+               reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+               reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+               reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+               reg_temp_crit_h = NCT6106_REG_TEMP_CRIT_H;
+
+               break;
        case nct6775:
                data->in_num = 9;
                data->pwm_num = 3;
@@ -3451,8 +3392,10 @@ static int nct6775_probe(struct platform_device *pdev)
                data->has_fan_div = true;
                data->temp_fixed_num = 3;
                data->num_temp_alarms = 3;
+               data->num_temp_beeps = 3;
 
                data->ALARM_BITS = NCT6775_ALARM_BITS;
+               data->BEEP_BITS = NCT6775_BEEP_BITS;
 
                data->fan_from_reg = fan_from_reg16;
                data->fan_from_reg_min = fan_from_reg8;
@@ -3466,6 +3409,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
                data->REG_DIODE = NCT6775_REG_DIODE;
+               data->DIODE_MASK = NCT6775_DIODE_MASK;
                data->REG_VIN = NCT6775_REG_IN;
                data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
                data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
@@ -3474,6 +3418,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
                data->REG_FAN_MIN = NCT6775_REG_FAN_MIN;
                data->REG_FAN_PULSES = NCT6775_REG_FAN_PULSES;
+               data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
                data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
                data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
                data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
@@ -3499,6 +3444,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
                data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
                data->REG_ALARM = NCT6775_REG_ALARM;
+               data->REG_BEEP = NCT6775_REG_BEEP;
 
                reg_temp = NCT6775_REG_TEMP;
                num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
@@ -3516,8 +3462,10 @@ static int nct6775_probe(struct platform_device *pdev)
                data->has_fan_div = false;
                data->temp_fixed_num = 3;
                data->num_temp_alarms = 3;
+               data->num_temp_beeps = 6;
 
                data->ALARM_BITS = NCT6776_ALARM_BITS;
+               data->BEEP_BITS = NCT6776_BEEP_BITS;
 
                data->fan_from_reg = fan_from_reg13;
                data->fan_from_reg_min = fan_from_reg13;
@@ -3531,6 +3479,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
                data->REG_DIODE = NCT6775_REG_DIODE;
+               data->DIODE_MASK = NCT6775_DIODE_MASK;
                data->REG_VIN = NCT6775_REG_IN;
                data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
                data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
@@ -3539,6 +3488,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
                data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
                data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
+               data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
                data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
                data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
                data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
@@ -3564,6 +3514,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
                data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
                data->REG_ALARM = NCT6775_REG_ALARM;
+               data->REG_BEEP = NCT6776_REG_BEEP;
 
                reg_temp = NCT6775_REG_TEMP;
                num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
@@ -3581,8 +3532,10 @@ static int nct6775_probe(struct platform_device *pdev)
                data->has_fan_div = false;
                data->temp_fixed_num = 6;
                data->num_temp_alarms = 2;
+               data->num_temp_beeps = 2;
 
                data->ALARM_BITS = NCT6779_ALARM_BITS;
+               data->BEEP_BITS = NCT6779_BEEP_BITS;
 
                data->fan_from_reg = fan_from_reg13;
                data->fan_from_reg_min = fan_from_reg13;
@@ -3596,6 +3549,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
                data->REG_DIODE = NCT6775_REG_DIODE;
+               data->DIODE_MASK = NCT6775_DIODE_MASK;
                data->REG_VIN = NCT6779_REG_IN;
                data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
                data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
@@ -3604,6 +3558,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
                data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
                data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+               data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
                data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
                data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
                data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
@@ -3621,6 +3576,10 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_CRITICAL_TEMP = NCT6775_REG_CRITICAL_TEMP;
                data->REG_CRITICAL_TEMP_TOLERANCE
                  = NCT6775_REG_CRITICAL_TEMP_TOLERANCE;
+               data->REG_CRITICAL_PWM_ENABLE = NCT6779_REG_CRITICAL_PWM_ENABLE;
+               data->CRITICAL_PWM_ENABLE_MASK
+                 = NCT6779_CRITICAL_PWM_ENABLE_MASK;
+               data->REG_CRITICAL_PWM = NCT6779_REG_CRITICAL_PWM;
                data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET;
                data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE;
                data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL;
@@ -3629,6 +3588,81 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
                data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
                data->REG_ALARM = NCT6779_REG_ALARM;
+               data->REG_BEEP = NCT6776_REG_BEEP;
+
+               reg_temp = NCT6779_REG_TEMP;
+               num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
+               reg_temp_over = NCT6779_REG_TEMP_OVER;
+               reg_temp_hyst = NCT6779_REG_TEMP_HYST;
+               reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+               reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
+               reg_temp_crit = NCT6779_REG_TEMP_CRIT;
+
+               break;
+       case nct6791:
+               data->in_num = 15;
+               data->pwm_num = 6;
+               data->auto_pwm_num = 4;
+               data->has_fan_div = false;
+               data->temp_fixed_num = 6;
+               data->num_temp_alarms = 2;
+               data->num_temp_beeps = 2;
+
+               data->ALARM_BITS = NCT6791_ALARM_BITS;
+               data->BEEP_BITS = NCT6779_BEEP_BITS;
+
+               data->fan_from_reg = fan_from_reg13;
+               data->fan_from_reg_min = fan_from_reg13;
+               data->target_temp_mask = 0xff;
+               data->tolerance_mask = 0x07;
+               data->speed_tolerance_limit = 63;
+
+               data->temp_label = nct6779_temp_label;
+               data->temp_label_num = ARRAY_SIZE(nct6779_temp_label);
+
+               data->REG_CONFIG = NCT6775_REG_CONFIG;
+               data->REG_VBAT = NCT6775_REG_VBAT;
+               data->REG_DIODE = NCT6775_REG_DIODE;
+               data->DIODE_MASK = NCT6775_DIODE_MASK;
+               data->REG_VIN = NCT6779_REG_IN;
+               data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
+               data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
+               data->REG_TARGET = NCT6775_REG_TARGET;
+               data->REG_FAN = NCT6779_REG_FAN;
+               data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
+               data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
+               data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+               data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+               data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+               data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+               data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+               data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+               data->REG_PWM[0] = NCT6775_REG_PWM;
+               data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+               data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT;
+               data->REG_PWM[5] = NCT6775_REG_WEIGHT_DUTY_STEP;
+               data->REG_PWM[6] = NCT6776_REG_WEIGHT_DUTY_BASE;
+               data->REG_PWM_READ = NCT6775_REG_PWM_READ;
+               data->REG_PWM_MODE = NCT6776_REG_PWM_MODE;
+               data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK;
+               data->REG_AUTO_TEMP = NCT6775_REG_AUTO_TEMP;
+               data->REG_AUTO_PWM = NCT6775_REG_AUTO_PWM;
+               data->REG_CRITICAL_TEMP = NCT6775_REG_CRITICAL_TEMP;
+               data->REG_CRITICAL_TEMP_TOLERANCE
+                 = NCT6775_REG_CRITICAL_TEMP_TOLERANCE;
+               data->REG_CRITICAL_PWM_ENABLE = NCT6779_REG_CRITICAL_PWM_ENABLE;
+               data->CRITICAL_PWM_ENABLE_MASK
+                 = NCT6779_CRITICAL_PWM_ENABLE_MASK;
+               data->REG_CRITICAL_PWM = NCT6779_REG_CRITICAL_PWM;
+               data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET;
+               data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE;
+               data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL;
+               data->REG_WEIGHT_TEMP_SEL = NCT6775_REG_WEIGHT_TEMP_SEL;
+               data->REG_WEIGHT_TEMP[0] = NCT6775_REG_WEIGHT_TEMP_STEP;
+               data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
+               data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
+               data->REG_ALARM = NCT6791_REG_ALARM;
+               data->REG_BEEP = NCT6776_REG_BEEP;
 
                reg_temp = NCT6779_REG_TEMP;
                num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
@@ -3700,6 +3734,13 @@ static int nct6775_probe(struct platform_device *pdev)
                        data->reg_temp[0][src - 1] = reg_temp[i];
                        data->reg_temp[1][src - 1] = reg_temp_over[i];
                        data->reg_temp[2][src - 1] = reg_temp_hyst[i];
+                       if (reg_temp_crit_h && reg_temp_crit_h[i])
+                               data->reg_temp[3][src - 1] = reg_temp_crit_h[i];
+                       else if (reg_temp_crit[src - 1])
+                               data->reg_temp[3][src - 1]
+                                 = reg_temp_crit[src - 1];
+                       if (reg_temp_crit_l && reg_temp_crit_l[i])
+                               data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
                        data->reg_temp_config[src - 1] = reg_temp_config[i];
                        data->temp_src[src - 1] = src;
                        continue;
@@ -3714,8 +3755,12 @@ static int nct6775_probe(struct platform_device *pdev)
                data->reg_temp[1][s] = reg_temp_over[i];
                data->reg_temp[2][s] = reg_temp_hyst[i];
                data->reg_temp_config[s] = reg_temp_config[i];
-               if (reg_temp_crit[src - 1])
+               if (reg_temp_crit_h && reg_temp_crit_h[i])
+                       data->reg_temp[3][s] = reg_temp_crit_h[i];
+               else if (reg_temp_crit[src - 1])
                        data->reg_temp[3][s] = reg_temp_crit[src - 1];
+               if (reg_temp_crit_l && reg_temp_crit_l[i])
+                       data->reg_temp[4][s] = reg_temp_crit_l[i];
 
                data->temp_src[s] = src;
                s++;
@@ -3767,12 +3812,14 @@ static int nct6775_probe(struct platform_device *pdev)
        cr2a = superio_inb(sio_data->sioreg, 0x2a);
        switch (data->kind) {
        case nct6775:
-               have_vid = (cr2a & 0x40);
+               data->have_vid = (cr2a & 0x40);
                break;
        case nct6776:
-               have_vid = (cr2a & 0x60) == 0x40;
+               data->have_vid = (cr2a & 0x60) == 0x40;
                break;
+       case nct6106:
        case nct6779:
+       case nct6791:
                break;
        }
 
@@ -3780,7 +3827,7 @@ static int nct6775_probe(struct platform_device *pdev)
         * Read VID value
         * We can get the VID input values directly at logical device D 0xe3.
         */
-       if (have_vid) {
+       if (data->have_vid) {
                superio_select(sio_data->sioreg, NCT6775_LD_VID);
                data->vid = superio_inb(sio_data->sioreg, 0xe3);
                data->vrm = vid_which_vrm();
@@ -3793,6 +3840,9 @@ static int nct6775_probe(struct platform_device *pdev)
                tmp = superio_inb(sio_data->sioreg,
                                  NCT6775_REG_CR_FAN_DEBOUNCE);
                switch (data->kind) {
+               case nct6106:
+                       tmp |= 0xe0;
+                       break;
                case nct6775:
                        tmp |= 0x1e;
                        break;
@@ -3800,6 +3850,9 @@ static int nct6775_probe(struct platform_device *pdev)
                case nct6779:
                        tmp |= 0x3e;
                        break;
+               case nct6791:
+                       tmp |= 0x7e;
+                       break;
                }
                superio_outb(sio_data->sioreg, NCT6775_REG_CR_FAN_DEBOUNCE,
                             tmp);
@@ -3807,157 +3860,47 @@ static int nct6775_probe(struct platform_device *pdev)
                         data->name);
        }
 
-       superio_exit(sio_data->sioreg);
-
-       if (have_vid) {
-               err = device_create_file(dev, &dev_attr_cpu0_vid);
-               if (err)
-                       return err;
-       }
+       nct6775_check_fan_inputs(data);
 
-       err = nct6775_check_fan_inputs(sio_data, data);
-       if (err)
-               goto exit_remove;
+       superio_exit(sio_data->sioreg);
 
        /* Read fan clock dividers immediately */
        nct6775_init_fan_common(dev, data);
 
        /* Register sysfs hooks */
-       for (i = 0; i < data->pwm_num; i++) {
-               if (!(data->has_pwm & (1 << i)))
-                       continue;
-
-               err = sysfs_create_group(&dev->kobj, &nct6775_group_pwm[i]);
-               if (err)
-                       goto exit_remove;
-
-               if (data->REG_PWM[3]) {
-                       err = device_create_file(dev,
-                                       &sda_pwm_max[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (data->REG_PWM[4]) {
-                       err = device_create_file(dev,
-                                       &sda_pwm_step[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (data->REG_PWM[6]) {
-                       err = device_create_file(dev,
-                                       &sda_weight_duty_base[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-       }
-       for (i = 0; i < ARRAY_SIZE(sda_auto_pwm_arrays); i++) {
-               struct sensor_device_attribute_2 *attr =
-                       &sda_auto_pwm_arrays[i];
-
-               if (!(data->has_pwm & (1 << attr->nr)))
-                       continue;
-               if (attr->index > data->auto_pwm_num)
-                       continue;
-               err = device_create_file(dev, &attr->dev_attr);
-               if (err)
-                       goto exit_remove;
-       }
-
-       for (i = 0; i < data->in_num; i++) {
-               if (!(data->have_in & (1 << i)))
-                       continue;
-               err = sysfs_create_group(&dev->kobj, &nct6775_group_in[i]);
-               if (err)
-                       goto exit_remove;
+       group = nct6775_create_attr_group(dev, &nct6775_pwm_template_group,
+                                         data->pwm_num);
+       if (IS_ERR(group)) {
+               err = PTR_ERR(group);
+               goto exit_remove;
        }
+       data->group_pwm = group;
 
-       for (i = 0; i < 5; i++) {
-               if (data->has_fan & (1 << i)) {
-                       err = device_create_file(dev,
-                                                &sda_fan_input[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-                       if (data->ALARM_BITS[FAN_ALARM_BASE + i] >= 0) {
-                               err = device_create_file(dev,
-                                               &sda_fan_alarm[i].dev_attr);
-                               if (err)
-                                       goto exit_remove;
-                       }
-                       if (data->kind != nct6776 &&
-                           data->kind != nct6779) {
-                               err = device_create_file(dev,
-                                               &sda_fan_div[i].dev_attr);
-                               if (err)
-                                       goto exit_remove;
-                       }
-                       if (data->has_fan_min & (1 << i)) {
-                               err = device_create_file(dev,
-                                               &sda_fan_min[i].dev_attr);
-                               if (err)
-                                       goto exit_remove;
-                       }
-                       err = device_create_file(dev,
-                                               &sda_fan_pulses[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
+       group = nct6775_create_attr_group(dev, &nct6775_in_template_group,
+                                         fls(data->have_in));
+       if (IS_ERR(group)) {
+               err = PTR_ERR(group);
+               goto exit_remove;
        }
+       data->group_in = group;
 
-       for (i = 0; i < NUM_TEMP; i++) {
-               if (!(data->have_temp & (1 << i)))
-                       continue;
-               err = device_create_file(dev, &sda_temp_input[i].dev_attr);
-               if (err)
-                       goto exit_remove;
-               if (data->temp_label) {
-                       err = device_create_file(dev,
-                                                &sda_temp_label[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (data->reg_temp[1][i]) {
-                       err = device_create_file(dev,
-                                                &sda_temp_max[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (data->reg_temp[2][i]) {
-                       err = device_create_file(dev,
-                                       &sda_temp_max_hyst[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (data->reg_temp[3][i]) {
-                       err = device_create_file(dev,
-                                                &sda_temp_crit[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (find_temp_source(data, i, data->num_temp_alarms) >= 0) {
-                       err = device_create_file(dev,
-                                                &sda_temp_alarm[i].dev_attr);
-                       if (err)
-                               goto exit_remove;
-               }
-               if (!(data->have_temp_fixed & (1 << i)))
-                       continue;
-               err = device_create_file(dev, &sda_temp_type[i].dev_attr);
-               if (err)
-                       goto exit_remove;
-               err = device_create_file(dev, &sda_temp_offset[i].dev_attr);
-               if (err)
-                       goto exit_remove;
+       group = nct6775_create_attr_group(dev, &nct6775_fan_template_group,
+                                         fls(data->has_fan));
+       if (IS_ERR(group)) {
+               err = PTR_ERR(group);
+               goto exit_remove;
        }
+       data->group_fan = group;
 
-       for (i = 0; i < ARRAY_SIZE(sda_caseopen); i++) {
-               if (data->ALARM_BITS[INTRUSION_ALARM_BASE + i] < 0)
-                       continue;
-               err = device_create_file(dev, &sda_caseopen[i].dev_attr);
-               if (err)
-                       goto exit_remove;
+       group = nct6775_create_attr_group(dev, &nct6775_temp_template_group,
+                                         fls(data->have_temp));
+       if (IS_ERR(group)) {
+               err = PTR_ERR(group);
+               goto exit_remove;
        }
+       data->group_temp = group;
 
-       err = device_create_file(dev, &dev_attr_name);
+       err = sysfs_create_group(&dev->kobj, &nct6775_group_other);
        if (err)
                goto exit_remove;
 
@@ -3988,11 +3931,10 @@ static int nct6775_remove(struct platform_device *pdev)
 static int nct6775_suspend(struct device *dev)
 {
        struct nct6775_data *data = nct6775_update_device(dev);
-       struct nct6775_sio_data *sio_data = dev->platform_data;
 
        mutex_lock(&data->update_lock);
        data->vbat = nct6775_read_value(data, data->REG_VBAT);
-       if (sio_data->kind == nct6775) {
+       if (data->kind == nct6775) {
                data->fandiv1 = nct6775_read_value(data, NCT6775_REG_FANDIV1);
                data->fandiv2 = nct6775_read_value(data, NCT6775_REG_FANDIV2);
        }
@@ -4004,7 +3946,6 @@ static int nct6775_suspend(struct device *dev)
 static int nct6775_resume(struct device *dev)
 {
        struct nct6775_data *data = dev_get_drvdata(dev);
-       struct nct6775_sio_data *sio_data = dev->platform_data;
        int i, j;
 
        mutex_lock(&data->update_lock);
@@ -4041,7 +3982,7 @@ static int nct6775_resume(struct device *dev)
 
        /* Restore other settings */
        nct6775_write_value(data, data->REG_VBAT, data->vbat);
-       if (sio_data->kind == nct6775) {
+       if (data->kind == nct6775) {
                nct6775_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
                nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
        }
@@ -4056,6 +3997,8 @@ static int nct6775_resume(struct device *dev)
 static const struct dev_pm_ops nct6775_dev_pm_ops = {
        .suspend = nct6775_suspend,
        .resume = nct6775_resume,
+       .freeze = nct6775_suspend,
+       .restore = nct6775_resume,
 };
 
 #define NCT6775_DEV_PM_OPS     (&nct6775_dev_pm_ops)
@@ -4074,17 +4017,19 @@ static struct platform_driver nct6775_driver = {
 };
 
 static const char * const nct6775_sio_names[] __initconst = {
+       "NCT6106D",
        "NCT6775F",
        "NCT6776D/F",
        "NCT6779D",
+       "NCT6791D",
 };
 
 /* nct6775_find() looks for a '627 in the Super-I/O config space */
-static int __init nct6775_find(int sioaddr, unsigned short *addr,
-                              struct nct6775_sio_data *sio_data)
+static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
 {
        u16 val;
        int err;
+       int addr;
 
        err = superio_enter(sioaddr);
        if (err)
@@ -4096,6 +4041,9 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
                val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8)
                    | superio_inb(sioaddr, SIO_REG_DEVID + 1);
        switch (val & SIO_ID_MASK) {
+       case SIO_NCT6106_ID:
+               sio_data->kind = nct6106;
+               break;
        case SIO_NCT6775_ID:
                sio_data->kind = nct6775;
                break;
@@ -4105,6 +4053,9 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
        case SIO_NCT6779_ID:
                sio_data->kind = nct6779;
                break;
+       case SIO_NCT6791_ID:
+               sio_data->kind = nct6791;
+               break;
        default:
                if (val != 0xffff)
                        pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -4116,8 +4067,8 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
        superio_select(sioaddr, NCT6775_LD_HWM);
        val = (superio_inb(sioaddr, SIO_REG_ADDR) << 8)
            | superio_inb(sioaddr, SIO_REG_ADDR + 1);
-       *addr = val & IOREGION_ALIGNMENT;
-       if (*addr == 0) {
+       addr = val & IOREGION_ALIGNMENT;
+       if (addr == 0) {
                pr_err("Refusing to enable a Super-I/O device with a base I/O port 0\n");
                superio_exit(sioaddr);
                return -ENODEV;
@@ -4129,13 +4080,22 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
                pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
                superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
        }
+       if (sio_data->kind == nct6791) {
+               val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
+               if (val & 0x10) {
+                       pr_info("Enabling hardware monitor logical device mappings.\n");
+                       superio_outb(sioaddr,
+                                    NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
+                                    val & ~0x10);
+               }
+       }
 
        superio_exit(sioaddr);
-       pr_info("Found %s or compatible chip at %#x\n",
-               nct6775_sio_names[sio_data->kind], *addr);
+       pr_info("Found %s or compatible chip at %#x:%#x\n",
+               nct6775_sio_names[sio_data->kind], sioaddr, addr);
        sio_data->sioreg = sioaddr;
 
-       return 0;
+       return addr;
 }
 
 /*
@@ -4144,14 +4104,20 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
  * track of the nct6775 driver. But since we platform_device_alloc(), we
  * must keep track of the device
  */
-static struct platform_device *pdev;
+static struct platform_device *pdev[2];
 
 static int __init sensors_nct6775_init(void)
 {
-       int err;
-       unsigned short address;
+       int i, err;
+       bool found = false;
+       int address;
        struct resource res;
        struct nct6775_sio_data sio_data;
+       int sioaddr[2] = { 0x2e, 0x4e };
+
+       err = platform_driver_register(&nct6775_driver);
+       if (err)
+               return err;
 
        /*
         * initialize sio_data->kind and sio_data->sioreg.
@@ -4160,64 +4126,71 @@ static int __init sensors_nct6775_init(void)
         * driver will probe 0x2e and 0x4e and auto-detect the presence of a
         * nct6775 hardware monitor, and call probe()
         */
-       if (nct6775_find(0x2e, &address, &sio_data) &&
-           nct6775_find(0x4e, &address, &sio_data))
-               return -ENODEV;
-
-       err = platform_driver_register(&nct6775_driver);
-       if (err)
-               goto exit;
+       for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+               address = nct6775_find(sioaddr[i], &sio_data);
+               if (address <= 0)
+                       continue;
 
-       pdev = platform_device_alloc(DRVNAME, address);
-       if (!pdev) {
-               err = -ENOMEM;
-               pr_err("Device allocation failed\n");
-               goto exit_unregister;
-       }
+               found = true;
 
-       err = platform_device_add_data(pdev, &sio_data,
-                                      sizeof(struct nct6775_sio_data));
-       if (err) {
-               pr_err("Platform data allocation failed\n");
-               goto exit_device_put;
-       }
+               pdev[i] = platform_device_alloc(DRVNAME, address);
+               if (!pdev[i]) {
+                       err = -ENOMEM;
+                       goto exit_device_put;
+               }
 
-       memset(&res, 0, sizeof(res));
-       res.name = DRVNAME;
-       res.start = address + IOREGION_OFFSET;
-       res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
-       res.flags = IORESOURCE_IO;
+               err = platform_device_add_data(pdev[i], &sio_data,
+                                              sizeof(struct nct6775_sio_data));
+               if (err)
+                       goto exit_device_put;
+
+               memset(&res, 0, sizeof(res));
+               res.name = DRVNAME;
+               res.start = address + IOREGION_OFFSET;
+               res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
+               res.flags = IORESOURCE_IO;
+
+               err = acpi_check_resource_conflict(&res);
+               if (err) {
+                       platform_device_put(pdev[i]);
+                       pdev[i] = NULL;
+                       continue;
+               }
 
-       err = acpi_check_resource_conflict(&res);
-       if (err)
-               goto exit_device_put;
+               err = platform_device_add_resources(pdev[i], &res, 1);
+               if (err)
+                       goto exit_device_put;
 
-       err = platform_device_add_resources(pdev, &res, 1);
-       if (err) {
-               pr_err("Device resource addition failed (%d)\n", err);
-               goto exit_device_put;
+               /* platform_device_add calls probe() */
+               err = platform_device_add(pdev[i]);
+               if (err)
+                       goto exit_device_put;
        }
-
-       /* platform_device_add calls probe() */
-       err = platform_device_add(pdev);
-       if (err) {
-               pr_err("Device addition failed (%d)\n", err);
-               goto exit_device_put;
+       if (!found) {
+               err = -ENODEV;
+               goto exit_unregister;
        }
 
        return 0;
 
 exit_device_put:
-       platform_device_put(pdev);
+       for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+               if (pdev[i])
+                       platform_device_put(pdev[i]);
+       }
 exit_unregister:
        platform_driver_unregister(&nct6775_driver);
-exit:
        return err;
 }
 
 static void __exit sensors_nct6775_exit(void)
 {
-       platform_device_unregister(pdev);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+               if (pdev[i])
+                       platform_device_unregister(pdev[i]);
+       }
        platform_driver_unregister(&nct6775_driver);
 }
 
index 830a842d796af833c0b6a04785e1eff09b73599d..8c23203915af3644cd138b13ec99faef572b49b5 100644 (file)
@@ -424,7 +424,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
        if (IS_ERR(pdata))
                return PTR_ERR(pdata);
        else if (pdata == NULL)
-               pdata = pdev->dev.platform_data;
+               pdata = dev_get_platdata(&pdev->dev);
 
        if (!pdata) {
                dev_err(&pdev->dev, "No platform init data supplied.\n");
index ea606860d2b203b8a9f679c62bc3d2cbfbfedbc0..6e6ea4437bb6947692ebd5b4fcc55e698a675b3f 100644 (file)
@@ -983,7 +983,7 @@ static int pc87427_request_regions(struct platform_device *pdev,
 
 static void pc87427_init_device(struct device *dev)
 {
-       struct pc87427_sio_data *sio_data = dev->platform_data;
+       struct pc87427_sio_data *sio_data = dev_get_platdata(dev);
        struct pc87427_data *data = dev_get_drvdata(dev);
        int i;
        u8 reg;
@@ -1075,7 +1075,7 @@ static void pc87427_remove_files(struct device *dev)
 
 static int pc87427_probe(struct platform_device *pdev)
 {
-       struct pc87427_sio_data *sio_data = pdev->dev.platform_data;
+       struct pc87427_sio_data *sio_data = dev_get_platdata(&pdev->dev);
        struct pc87427_data *data;
        int i, err, res_count;
 
index 9add60920ac00ed5aa70ca0e1113887836e0ae7d..9319fcf142d96656040eabcc0d411699f61f4b06 100644 (file)
@@ -1726,7 +1726,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
                   struct pmbus_driver_info *info)
 {
        struct device *dev = &client->dev;
-       const struct pmbus_platform_data *pdata = dev->platform_data;
+       const struct pmbus_platform_data *pdata = dev_get_platdata(dev);
        struct pmbus_data *data;
        int ret;
 
index a9f7e804f1e4f6c3d580ff6d444b3ff7506d589a..73bd64e8c30a8bc8ad6dfdd947cf7a33bf52f142 100644 (file)
@@ -165,7 +165,7 @@ static ssize_t s3c_hwmon_ch_show(struct device *dev,
 {
        struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
        struct s3c_hwmon *hwmon = platform_get_drvdata(to_platform_device(dev));
-       struct s3c_hwmon_pdata *pdata = dev->platform_data;
+       struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
        struct s3c_hwmon_chcfg *cfg;
        int ret;
 
@@ -194,7 +194,7 @@ static ssize_t s3c_hwmon_label_show(struct device *dev,
                                    char *buf)
 {
        struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
-       struct s3c_hwmon_pdata *pdata = dev->platform_data;
+       struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
        struct s3c_hwmon_chcfg *cfg;
 
        cfg = pdata->in[sen_attr->index];
@@ -274,7 +274,7 @@ static void s3c_hwmon_remove_attr(struct device *dev,
 */
 static int s3c_hwmon_probe(struct platform_device *dev)
 {
-       struct s3c_hwmon_pdata *pdata = dev->dev.platform_data;
+       struct s3c_hwmon_pdata *pdata = dev_get_platdata(&dev->dev);
        struct s3c_hwmon *hwmon;
        int ret = 0;
        int i;
index 2507f902fb7aa4ea450079a69bbbf8234e7e0ef4..97cd45a8432c03f7a87bceaac5bbe5c8e038683e 100644 (file)
@@ -940,11 +940,11 @@ static int sht15_probe(struct platform_device *pdev)
        data->dev = &pdev->dev;
        init_waitqueue_head(&data->wait_queue);
 
-       if (pdev->dev.platform_data == NULL) {
+       if (dev_get_platdata(&pdev->dev) == NULL) {
                dev_err(&pdev->dev, "no platform data supplied\n");
                return -EINVAL;
        }
-       data->pdata = pdev->dev.platform_data;
+       data->pdata = dev_get_platdata(&pdev->dev);
        data->supply_uv = data->pdata->supply_mv * 1000;
        if (data->pdata->checksum)
                data->checksumming = true;
@@ -957,7 +957,7 @@ static int sht15_probe(struct platform_device *pdev)
         * If a regulator is available,
         * query what the supply voltage actually is!
         */
-       data->reg = devm_regulator_get(data->dev, "vcc");
+       data->reg = devm_regulator_get_optional(data->dev, "vcc");
        if (!IS_ERR(data->reg)) {
                int voltage;
 
index 6d8255ccf07afb1a9d31955f82e91392a824a4ba..05cb814539cb609d42e7bd9bc84abe178cb6b7a7 100644 (file)
@@ -668,7 +668,7 @@ static void smsc47m1_remove_files(struct device *dev)
 static int __init smsc47m1_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct smsc47m1_sio_data *sio_data = dev->platform_data;
+       struct smsc47m1_sio_data *sio_data = dev_get_platdata(dev);
        struct smsc47m1_data *data;
        struct resource *res;
        int err;
@@ -940,7 +940,7 @@ exit_device:
 static void __exit sm_smsc47m1_exit(void)
 {
        platform_driver_unregister(&smsc47m1_driver);
-       smsc47m1_restore(pdev->dev.platform_data);
+       smsc47m1_restore(dev_get_platdata(&pdev->dev));
        platform_device_unregister(pdev);
 }
 
index 004801e6fbb9756d8bdeed3aac25899a7f00edaa..23ff210513d314da9fa09e944e30c4338f9d1313 100644 (file)
@@ -673,7 +673,7 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
 static void w83627ehf_write_fan_div_common(struct device *dev,
                                           struct w83627ehf_data *data, int nr)
 {
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
 
        if (sio_data->kind == nct6776)
                ; /* no dividers, do nothing */
@@ -724,7 +724,7 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
 static void w83627ehf_update_fan_div_common(struct device *dev,
                                            struct w83627ehf_data *data)
 {
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
 
        if (sio_data->kind == nct6776)
                ; /* no dividers, do nothing */
@@ -781,7 +781,7 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data)
 static void w83627ehf_update_pwm_common(struct device *dev,
                                        struct w83627ehf_data *data)
 {
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
 
        if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
                nct6775_update_pwm(data);
@@ -792,7 +792,7 @@ static void w83627ehf_update_pwm_common(struct device *dev,
 static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
 
        int i;
 
@@ -1392,7 +1392,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        int nr = sensor_attr->index;
        unsigned long val;
        int err;
@@ -1448,7 +1448,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
        int nr = sensor_attr->index;
        unsigned long val;
@@ -1527,7 +1527,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
        int nr = sensor_attr->index;
        u16 reg;
@@ -2065,7 +2065,7 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
 static int w83627ehf_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        struct w83627ehf_data *data;
        struct resource *res;
        u8 en_vrm10;
@@ -2618,7 +2618,7 @@ static int w83627ehf_remove(struct platform_device *pdev)
 static int w83627ehf_suspend(struct device *dev)
 {
        struct w83627ehf_data *data = w83627ehf_update_device(dev);
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
 
        mutex_lock(&data->update_lock);
        data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
@@ -2634,7 +2634,7 @@ static int w83627ehf_suspend(struct device *dev)
 static int w83627ehf_resume(struct device *dev)
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
-       struct w83627ehf_sio_data *sio_data = dev->platform_data;
+       struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
        int i;
 
        mutex_lock(&data->update_lock);
@@ -2694,6 +2694,8 @@ static int w83627ehf_resume(struct device *dev)
 static const struct dev_pm_ops w83627ehf_dev_pm_ops = {
        .suspend = w83627ehf_suspend,
        .resume = w83627ehf_resume,
+       .freeze = w83627ehf_suspend,
+       .restore = w83627ehf_resume,
 };
 
 #define W83627EHF_DEV_PM_OPS   (&w83627ehf_dev_pm_ops)
index 3b9ef2d23452801166e2f278b1014ace47b11639..cb9cd326ecb564147ec64474c14a296b1be764dd 100644 (file)
@@ -1415,7 +1415,7 @@ static const struct attribute_group w83627hf_group_opt = {
 static int w83627hf_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct w83627hf_sio_data *sio_data = dev->platform_data;
+       struct w83627hf_sio_data *sio_data = dev_get_platdata(dev);
        struct w83627hf_data *data;
        struct resource *res;
        int err, i;
@@ -1636,7 +1636,7 @@ static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
 
 static int w83627thf_read_gpio5(struct platform_device *pdev)
 {
-       struct w83627hf_sio_data *sio_data = pdev->dev.platform_data;
+       struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
        int res = 0xff, sel;
 
        superio_enter(sio_data);
@@ -1669,7 +1669,7 @@ exit:
 
 static int w83687thf_read_vid(struct platform_device *pdev)
 {
-       struct w83627hf_sio_data *sio_data = pdev->dev.platform_data;
+       struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
        int res = 0xff;
 
        superio_enter(sio_data);
index 0b804895be43361a9230b4632e101945091298ad..5febb43cb4c1032de659cbf3ec3ffa652c8c832f 100644 (file)
@@ -2,7 +2,7 @@
  * w83792d.c - Part of lm_sensors, Linux kernel modules for hardware
  *            monitoring
  * Copyright (C) 2004, 2005 Winbond Electronics Corp.
- *                         Chunhao Huang <DZShen@Winbond.com.tw>,
+ *                         Shane Huang,
  *                         Rudolf Marek <r.marek@assembler.cz>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -1665,6 +1665,6 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev)
 
 module_i2c_driver(w83792d_driver);
 
-MODULE_AUTHOR("Chunhao Huang @ Winbond <DZShen@Winbond.com.tw>");
+MODULE_AUTHOR("Shane Huang (Winbond)");
 MODULE_DESCRIPTION("W83792AD/D driver for linux-2.6");
 MODULE_LICENSE("GPL");
index dc6dea614abd68dca0e32f7d6948b9c8e40696ef..fcdd321f709ecb2f284c3afefce962a2b1a8a42c 100644 (file)
@@ -385,7 +385,7 @@ config I2C_CPM
 
 config I2C_DAVINCI
        tristate "DaVinci I2C driver"
-       depends on ARCH_DAVINCI
+       depends on ARCH_DAVINCI || ARCH_KEYSTONE
        help
          Support for TI DaVinci I2C controller driver.
 
index 13ea1c29873d61f4028f48b7d8f49d7fb9a1afaa..35a473ba3d81d5afb9cb0188f2141c2dc417c737 100644 (file)
@@ -582,6 +582,7 @@ static struct i2c_algorithm bfin_twi_algorithm = {
        .functionality = bfin_twi_functionality,
 };
 
+#ifdef CONFIG_PM_SLEEP
 static int i2c_bfin_twi_suspend(struct device *dev)
 {
        struct bfin_twi_iface *iface = dev_get_drvdata(dev);
@@ -619,6 +620,10 @@ static int i2c_bfin_twi_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(i2c_bfin_twi_pm,
                         i2c_bfin_twi_suspend, i2c_bfin_twi_resume);
+#define I2C_BFIN_TWI_PM_OPS    (&i2c_bfin_twi_pm)
+#else
+#define I2C_BFIN_TWI_PM_OPS    NULL
+#endif
 
 static int i2c_bfin_twi_probe(struct platform_device *pdev)
 {
@@ -669,8 +674,9 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
        p_adap->timeout = 5 * HZ;
        p_adap->retries = 3;
 
-       rc = peripheral_request_list((unsigned short *)pdev->dev.platform_data,
-                                       "i2c-bfin-twi");
+       rc = peripheral_request_list(
+                       (unsigned short *)dev_get_platdata(&pdev->dev),
+                       "i2c-bfin-twi");
        if (rc) {
                dev_err(&pdev->dev, "Can't setup pin mux!\n");
                goto out_error_pin_mux;
@@ -717,7 +723,7 @@ out_error_add_adapter:
        free_irq(iface->irq, iface);
 out_error_req_irq:
 out_error_no_irq:
-       peripheral_free_list((unsigned short *)pdev->dev.platform_data);
+       peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
 out_error_pin_mux:
        iounmap(iface->regs_base);
 out_error_ioremap:
@@ -733,7 +739,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
 
        i2c_del_adapter(&(iface->adap));
        free_irq(iface->irq, iface);
-       peripheral_free_list((unsigned short *)pdev->dev.platform_data);
+       peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
        iounmap(iface->regs_base);
        kfree(iface);
 
@@ -746,7 +752,7 @@ static struct platform_driver i2c_bfin_twi_driver = {
        .driver         = {
                .name   = "i2c-bfin-twi",
                .owner  = THIS_MODULE,
-               .pm     = &i2c_bfin_twi_pm,
+               .pm     = I2C_BFIN_TWI_PM_OPS,
        },
 };
 
index 1be13ac11dc5d615050deb2e03c58d530a99fb30..2d46f13adfdfc965561db40bdeafa55f6c445978 100644 (file)
@@ -233,8 +233,9 @@ static int cbus_i2c_probe(struct platform_device *pdev)
                chost->clk_gpio = of_get_gpio(dnode, 0);
                chost->dat_gpio = of_get_gpio(dnode, 1);
                chost->sel_gpio = of_get_gpio(dnode, 2);
-       } else if (pdev->dev.platform_data) {
-               struct i2c_cbus_platform_data *pdata = pdev->dev.platform_data;
+       } else if (dev_get_platdata(&pdev->dev)) {
+               struct i2c_cbus_platform_data *pdata =
+                       dev_get_platdata(&pdev->dev);
                chost->clk_gpio = pdata->clk_gpio;
                chost->dat_gpio = pdata->dat_gpio;
                chost->sel_gpio = pdata->sel_gpio;
index fa556057d22461a9aece9de831082f50e3b44761..cf90bfff9676138e3da1026dbeb9e678c87d50f9 100644 (file)
@@ -40,8 +40,6 @@
 #include <linux/gpio.h>
 #include <linux/of_i2c.h>
 #include <linux/of_device.h>
-
-#include <mach/hardware.h>
 #include <linux/platform_data/i2c-davinci.h>
 
 /* ----- global defines ----------------------------------------------- */
@@ -665,7 +663,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
 #endif
        dev->dev = &pdev->dev;
        dev->irq = irq->start;
-       dev->pdata = dev->dev->platform_data;
+       dev->pdata = dev_get_platdata(&dev->dev);
        platform_set_drvdata(pdev, dev);
 
        if (!dev->pdata && pdev->dev.of_node) {
index ad46616de29ec8638aeb2a17c6ff231d59c57596..f325ec7abfb1cf8c0c8d08f925c9a2f8b015890f 100644 (file)
@@ -416,6 +416,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
        u32 addr = msgs[dev->msg_write_idx].addr;
        u32 buf_len = dev->tx_buf_len;
        u8 *buf = dev->tx_buf;
+       bool need_restart = false;
 
        intr_mask = DW_IC_INTR_DEFAULT_MASK;
 
@@ -443,6 +444,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
                        /* new i2c_msg */
                        buf = msgs[dev->msg_write_idx].buf;
                        buf_len = msgs[dev->msg_write_idx].len;
+
+                       /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
+                        * IC_RESTART_EN are set, we must manually
+                        * set restart bit between messages.
+                        */
+                       if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
+                                       (dev->msg_write_idx > 0))
+                               need_restart = true;
                }
 
                tx_limit = dev->tx_fifo_depth - dw_readl(dev, DW_IC_TXFLR);
@@ -461,6 +470,11 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
                            buf_len == 1)
                                cmd |= BIT(9);
 
+                       if (need_restart) {
+                               cmd |= BIT(10);
+                               need_restart = false;
+                       }
+
                        if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
 
                                /* avoid rx buffer overrun */
index 4c5fadabe49df3236e479c86105d7e479aa90d79..64ffb908641cafad0bce655ce922247640577254 100644 (file)
@@ -207,7 +207,7 @@ static const struct of_device_id dw_i2c_of_match[] = {
 MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
 #endif
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int dw_i2c_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -228,9 +228,12 @@ static int dw_i2c_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
+#define DW_I2C_DEV_PM_OPS      (&dw_i2c_dev_pm_ops)
+#else
+#define DW_I2C_DEV_PM_OPS      NULL
+#endif
 
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:i2c_designware");
@@ -242,7 +245,7 @@ static struct platform_driver dw_i2c_driver = {
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(dw_i2c_of_match),
                .acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
-               .pm     = &dw_i2c_dev_pm_ops,
+               .pm     = DW_I2C_DEV_PM_OPS,
        },
 };
 
index bc6e139c6e7f3f6b91e1f054c7569a5191a08b9a..8cdb4f743e19882b31e783d81643055a4babe0c8 100644 (file)
@@ -137,9 +137,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
                if (ret)
                        return ret;
        } else {
-               if (!pdev->dev.platform_data)
+               if (!dev_get_platdata(&pdev->dev))
                        return -ENXIO;
-               pdata = pdev->dev.platform_data;
+               pdata = dev_get_platdata(&pdev->dev);
                sda_pin = pdata->sda_pin;
                scl_pin = pdata->scl_pin;
        }
@@ -171,7 +171,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
                pdata->scl_pin = scl_pin;
                of_i2c_gpio_get_props(pdev->dev.of_node, pdata);
        } else {
-               memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+               memcpy(pdata, dev_get_platdata(&pdev->dev), sizeof(*pdata));
        }
 
        if (pdata->sda_is_open_drain) {
index e24279725d363acb594e452b53b788a10058026d..a231d2fd91ce5d8406c8eb4ab021461e482531c0 100644 (file)
@@ -30,6 +30,8 @@
  *     Copyright (C) 2007 RightHand Technologies, Inc.
  *     Copyright (C) 2008 Darius Augulis <darius.augulis at teltonika.lt>
  *
+ *     Copyright 2013 Freescale Semiconductor, Inc.
+ *
  */
 
 /** Includes *******************************************************************
 /* Default value */
 #define IMX_I2C_BIT_RATE       100000  /* 100kHz */
 
-/* IMX I2C registers */
+/* IMX I2C registers:
+ * the I2C register offset is different between SoCs,
+ * to provid support for all these chips, split the
+ * register offset into a fixed base address and a
+ * variable shift value, then the full register offset
+ * will be calculated by
+ * reg_off = ( reg_base_addr << reg_shift)
+ */
 #define IMX_I2C_IADR   0x00    /* i2c slave address */
-#define IMX_I2C_IFDR   0x04    /* i2c frequency divider */
-#define IMX_I2C_I2CR   0x08    /* i2c control */
-#define IMX_I2C_I2SR   0x0C    /* i2c status */
-#define IMX_I2C_I2DR   0x10    /* i2c transfer data */
+#define IMX_I2C_IFDR   0x01    /* i2c frequency divider */
+#define IMX_I2C_I2CR   0x02    /* i2c control */
+#define IMX_I2C_I2SR   0x03    /* i2c status */
+#define IMX_I2C_I2DR   0x04    /* i2c transfer data */
+
+#define IMX_I2C_REGSHIFT       2
+#define VF610_I2C_REGSHIFT     0
 
 /* Bits of IMX I2C registers */
 #define I2SR_RXAK      0x01
 #define I2CR_IIEN      0x40
 #define I2CR_IEN       0x80
 
+/* register bits different operating codes definition:
+ * 1) I2SR: Interrupt flags clear operation differ between SoCs:
+ * - write zero to clear(w0c) INT flag on i.MX,
+ * - but write one to clear(w1c) INT flag on Vybrid.
+ * 2) I2CR: I2C module enable operation also differ between SoCs:
+ * - set I2CR_IEN bit enable the module on i.MX,
+ * - but clear I2CR_IEN bit enable the module on Vybrid.
+ */
+#define I2SR_CLR_OPCODE_W0C    0x0
+#define I2SR_CLR_OPCODE_W1C    (I2SR_IAL | I2SR_IIF)
+#define I2CR_IEN_OPCODE_0      0x0
+#define I2CR_IEN_OPCODE_1      I2CR_IEN
+
 /** Variables ******************************************************************
 *******************************************************************************/
 
  *
  * Duplicated divider values removed from list
  */
+struct imx_i2c_clk_pair {
+       u16     div;
+       u16     val;
+};
 
-static u16 __initdata i2c_clk_div[50][2] = {
+static struct imx_i2c_clk_pair imx_i2c_clk_div[] = {
        { 22,   0x20 }, { 24,   0x21 }, { 26,   0x22 }, { 28,   0x23 },
        { 30,   0x00 }, { 32,   0x24 }, { 36,   0x25 }, { 40,   0x26 },
        { 42,   0x03 }, { 44,   0x27 }, { 48,   0x28 }, { 52,   0x05 },
@@ -112,9 +141,38 @@ static u16 __initdata i2c_clk_div[50][2] = {
        { 3072, 0x1E }, { 3840, 0x1F }
 };
 
+/* Vybrid VF610 clock divider, register value pairs */
+static struct imx_i2c_clk_pair vf610_i2c_clk_div[] = {
+       { 20,   0x00 }, { 22,   0x01 }, { 24,   0x02 }, { 26,   0x03 },
+       { 28,   0x04 }, { 30,   0x05 }, { 32,   0x09 }, { 34,   0x06 },
+       { 36,   0x0A }, { 40,   0x07 }, { 44,   0x0C }, { 48,   0x0D },
+       { 52,   0x43 }, { 56,   0x0E }, { 60,   0x45 }, { 64,   0x12 },
+       { 68,   0x0F }, { 72,   0x13 }, { 80,   0x14 }, { 88,   0x15 },
+       { 96,   0x19 }, { 104,  0x16 }, { 112,  0x1A }, { 128,  0x17 },
+       { 136,  0x4F }, { 144,  0x1C }, { 160,  0x1D }, { 176,  0x55 },
+       { 192,  0x1E }, { 208,  0x56 }, { 224,  0x22 }, { 228,  0x24 },
+       { 240,  0x1F }, { 256,  0x23 }, { 288,  0x5C }, { 320,  0x25 },
+       { 384,  0x26 }, { 448,  0x2A }, { 480,  0x27 }, { 512,  0x2B },
+       { 576,  0x2C }, { 640,  0x2D }, { 768,  0x31 }, { 896,  0x32 },
+       { 960,  0x2F }, { 1024, 0x33 }, { 1152, 0x34 }, { 1280, 0x35 },
+       { 1536, 0x36 }, { 1792, 0x3A }, { 1920, 0x37 }, { 2048, 0x3B },
+       { 2304, 0x3C }, { 2560, 0x3D }, { 3072, 0x3E }, { 3584, 0x7A },
+       { 3840, 0x3F }, { 4096, 0x7B }, { 5120, 0x7D }, { 6144, 0x7E },
+};
+
 enum imx_i2c_type {
        IMX1_I2C,
        IMX21_I2C,
+       VF610_I2C,
+};
+
+struct imx_i2c_hwdata {
+       enum imx_i2c_type       devtype;
+       unsigned                regshift;
+       struct imx_i2c_clk_pair *clk_div;
+       unsigned                ndivs;
+       unsigned                i2sr_clr_opcode;
+       unsigned                i2cr_ien_opcode;
 };
 
 struct imx_i2c_struct {
@@ -126,16 +184,46 @@ struct imx_i2c_struct {
        unsigned int            disable_delay;
        int                     stopped;
        unsigned int            ifdr; /* IMX_I2C_IFDR */
-       enum imx_i2c_type       devtype;
+       const struct imx_i2c_hwdata     *hwdata;
+};
+
+static const struct imx_i2c_hwdata imx1_i2c_hwdata  = {
+       .devtype                = IMX1_I2C,
+       .regshift               = IMX_I2C_REGSHIFT,
+       .clk_div                = imx_i2c_clk_div,
+       .ndivs                  = ARRAY_SIZE(imx_i2c_clk_div),
+       .i2sr_clr_opcode        = I2SR_CLR_OPCODE_W0C,
+       .i2cr_ien_opcode        = I2CR_IEN_OPCODE_1,
+
+};
+
+static const struct imx_i2c_hwdata imx21_i2c_hwdata  = {
+       .devtype                = IMX21_I2C,
+       .regshift               = IMX_I2C_REGSHIFT,
+       .clk_div                = imx_i2c_clk_div,
+       .ndivs                  = ARRAY_SIZE(imx_i2c_clk_div),
+       .i2sr_clr_opcode        = I2SR_CLR_OPCODE_W0C,
+       .i2cr_ien_opcode        = I2CR_IEN_OPCODE_1,
+
+};
+
+static struct imx_i2c_hwdata vf610_i2c_hwdata = {
+       .devtype                = VF610_I2C,
+       .regshift               = VF610_I2C_REGSHIFT,
+       .clk_div                = vf610_i2c_clk_div,
+       .ndivs                  = ARRAY_SIZE(vf610_i2c_clk_div),
+       .i2sr_clr_opcode        = I2SR_CLR_OPCODE_W1C,
+       .i2cr_ien_opcode        = I2CR_IEN_OPCODE_0,
+
 };
 
 static struct platform_device_id imx_i2c_devtype[] = {
        {
                .name = "imx1-i2c",
-               .driver_data = IMX1_I2C,
+               .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
        }, {
                .name = "imx21-i2c",
-               .driver_data = IMX21_I2C,
+               .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
        }, {
                /* sentinel */
        }
@@ -143,15 +231,28 @@ static struct platform_device_id imx_i2c_devtype[] = {
 MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
 
 static const struct of_device_id i2c_imx_dt_ids[] = {
-       { .compatible = "fsl,imx1-i2c", .data = &imx_i2c_devtype[IMX1_I2C], },
-       { .compatible = "fsl,imx21-i2c", .data = &imx_i2c_devtype[IMX21_I2C], },
+       { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
+       { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
+       { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, i2c_imx_dt_ids);
 
 static inline int is_imx1_i2c(struct imx_i2c_struct *i2c_imx)
 {
-       return i2c_imx->devtype == IMX1_I2C;
+       return i2c_imx->hwdata->devtype == IMX1_I2C;
+}
+
+static inline void imx_i2c_write_reg(unsigned int val,
+               struct imx_i2c_struct *i2c_imx, unsigned int reg)
+{
+       writeb(val, i2c_imx->base + (reg << i2c_imx->hwdata->regshift));
+}
+
+static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
+               unsigned int reg)
+{
+       return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift));
 }
 
 /** Functions for IMX I2C adapter driver ***************************************
@@ -165,7 +266,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
        dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
 
        while (1) {
-               temp = readb(i2c_imx->base + IMX_I2C_I2SR);
+               temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
                if (for_busy && (temp & I2SR_IBB))
                        break;
                if (!for_busy && !(temp & I2SR_IBB))
@@ -196,7 +297,7 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
 
 static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx)
 {
-       if (readb(i2c_imx->base + IMX_I2C_I2SR) & I2SR_RXAK) {
+       if (imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR) & I2SR_RXAK) {
                dev_dbg(&i2c_imx->adapter.dev, "<%s> No ACK\n", __func__);
                return -EIO;  /* No ACK */
        }
@@ -213,25 +314,25 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
        dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
 
        clk_prepare_enable(i2c_imx->clk);
-       writeb(i2c_imx->ifdr, i2c_imx->base + IMX_I2C_IFDR);
+       imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR);
        /* Enable I2C controller */
-       writeb(0, i2c_imx->base + IMX_I2C_I2SR);
-       writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR);
+       imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
+       imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode, i2c_imx, IMX_I2C_I2CR);
 
        /* Wait controller to be stable */
        udelay(50);
 
        /* Start I2C transaction */
-       temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+       temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
        temp |= I2CR_MSTA;
-       writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+       imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
        result = i2c_imx_bus_busy(i2c_imx, 1);
        if (result)
                return result;
        i2c_imx->stopped = 0;
 
        temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
-       writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+       imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
        return result;
 }
 
@@ -242,9 +343,9 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
        if (!i2c_imx->stopped) {
                /* Stop I2C transaction */
                dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
-               temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+               temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                temp &= ~(I2CR_MSTA | I2CR_MTX);
-               writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+               imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
        }
        if (is_imx1_i2c(i2c_imx)) {
                /*
@@ -260,13 +361,15 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
        }
 
        /* Disable I2C controller */
-       writeb(0, i2c_imx->base + IMX_I2C_I2CR);
+       temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
+       imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
        clk_disable_unprepare(i2c_imx->clk);
 }
 
 static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
                                                        unsigned int rate)
 {
+       struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
        unsigned int i2c_clk_rate;
        unsigned int div;
        int i;
@@ -274,15 +377,15 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
        /* Divider value calculation */
        i2c_clk_rate = clk_get_rate(i2c_imx->clk);
        div = (i2c_clk_rate + rate - 1) / rate;
-       if (div < i2c_clk_div[0][0])
+       if (div < i2c_clk_div[0].div)
                i = 0;
-       else if (div > i2c_clk_div[ARRAY_SIZE(i2c_clk_div) - 1][0])
-               i = ARRAY_SIZE(i2c_clk_div) - 1;
+       else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
+               i = i2c_imx->hwdata->ndivs - 1;
        else
-               for (i = 0; i2c_clk_div[i][0] < div; i++);
+               for (i = 0; i2c_clk_div[i].div < div; i++);
 
        /* Store divider value */
-       i2c_imx->ifdr = i2c_clk_div[i][1];
+       i2c_imx->ifdr = i2c_clk_div[i].val;
 
        /*
         * There dummy delay is calculated.
@@ -290,7 +393,7 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
         * This delay is used in I2C bus disable function
         * to fix chip hardware bug.
         */
-       i2c_imx->disable_delay = (500000U * i2c_clk_div[i][0]
+       i2c_imx->disable_delay = (500000U * i2c_clk_div[i].div
                + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2);
 
        /* dev_dbg() can't be used, because adapter is not yet registered */
@@ -298,7 +401,7 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
        dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C_CLK=%d, REQ DIV=%d\n",
                __func__, i2c_clk_rate, div);
        dev_dbg(&i2c_imx->adapter.dev, "<%s> IFDR[IC]=0x%x, REAL DIV=%d\n",
-               __func__, i2c_clk_div[i][1], i2c_clk_div[i][0]);
+               __func__, i2c_clk_div[i].val, i2c_clk_div[i].div);
 #endif
 }
 
@@ -307,12 +410,13 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
        struct imx_i2c_struct *i2c_imx = dev_id;
        unsigned int temp;
 
-       temp = readb(i2c_imx->base + IMX_I2C_I2SR);
+       temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
        if (temp & I2SR_IIF) {
                /* save status register */
                i2c_imx->i2csr = temp;
                temp &= ~I2SR_IIF;
-               writeb(temp, i2c_imx->base + IMX_I2C_I2SR);
+               temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
+               imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
                wake_up(&i2c_imx->queue);
                return IRQ_HANDLED;
        }
@@ -328,7 +432,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
                __func__, msgs->addr << 1);
 
        /* write slave address */
-       writeb(msgs->addr << 1, i2c_imx->base + IMX_I2C_I2DR);
+       imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
        result = i2c_imx_trx_complete(i2c_imx);
        if (result)
                return result;
@@ -342,7 +446,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
                dev_dbg(&i2c_imx->adapter.dev,
                        "<%s> write byte: B%d=0x%X\n",
                        __func__, i, msgs->buf[i]);
-               writeb(msgs->buf[i], i2c_imx->base + IMX_I2C_I2DR);
+               imx_i2c_write_reg(msgs->buf[i], i2c_imx, IMX_I2C_I2DR);
                result = i2c_imx_trx_complete(i2c_imx);
                if (result)
                        return result;
@@ -363,7 +467,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
                __func__, (msgs->addr << 1) | 0x01);
 
        /* write slave address */
-       writeb((msgs->addr << 1) | 0x01, i2c_imx->base + IMX_I2C_I2DR);
+       imx_i2c_write_reg((msgs->addr << 1) | 0x01, i2c_imx, IMX_I2C_I2DR);
        result = i2c_imx_trx_complete(i2c_imx);
        if (result)
                return result;
@@ -374,12 +478,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
        dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__);
 
        /* setup bus to read data */
-       temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+       temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
        temp &= ~I2CR_MTX;
        if (msgs->len - 1)
                temp &= ~I2CR_TXAK;
-       writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
-       readb(i2c_imx->base + IMX_I2C_I2DR); /* dummy read */
+       imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+       imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
 
        dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
 
@@ -393,19 +497,19 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
                           controller from generating another clock cycle */
                        dev_dbg(&i2c_imx->adapter.dev,
                                "<%s> clear MSTA\n", __func__);
-                       temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+                       temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                        temp &= ~(I2CR_MSTA | I2CR_MTX);
-                       writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+                       imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
                        i2c_imx_bus_busy(i2c_imx, 0);
                        i2c_imx->stopped = 1;
                } else if (i == (msgs->len - 2)) {
                        dev_dbg(&i2c_imx->adapter.dev,
                                "<%s> set TXAK\n", __func__);
-                       temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+                       temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                        temp |= I2CR_TXAK;
-                       writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+                       imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
                }
-               msgs->buf[i] = readb(i2c_imx->base + IMX_I2C_I2DR);
+               msgs->buf[i] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
                dev_dbg(&i2c_imx->adapter.dev,
                        "<%s> read byte: B%d=0x%X\n",
                        __func__, i, msgs->buf[i]);
@@ -432,9 +536,9 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
                if (i) {
                        dev_dbg(&i2c_imx->adapter.dev,
                                "<%s> repeated start\n", __func__);
-                       temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+                       temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                        temp |= I2CR_RSTA;
-                       writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+                       imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
                        result =  i2c_imx_bus_busy(i2c_imx, 1);
                        if (result)
                                goto fail0;
@@ -443,13 +547,13 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
                        "<%s> transfer message: %d\n", __func__, i);
                /* write/read data */
 #ifdef CONFIG_I2C_DEBUG_BUS
-               temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+               temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
                dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, "
                        "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__,
                        (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0),
                        (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0),
                        (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0));
-               temp = readb(i2c_imx->base + IMX_I2C_I2SR);
+               temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
                dev_dbg(&i2c_imx->adapter.dev,
                        "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, "
                        "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__,
@@ -492,7 +596,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
                                                           &pdev->dev);
        struct imx_i2c_struct *i2c_imx;
        struct resource *res;
-       struct imxi2c_platform_data *pdata = pdev->dev.platform_data;
+       struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
        void __iomem *base;
        int irq, ret;
        u32 bitrate;
@@ -518,8 +622,10 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
        }
 
        if (of_id)
-               pdev->id_entry = of_id->data;
-       i2c_imx->devtype = pdev->id_entry->driver_data;
+               i2c_imx->hwdata = of_id->data;
+       else
+               i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+                               platform_get_device_id(pdev)->driver_data;
 
        /* Setup i2c_imx driver structure */
        strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -537,6 +643,11 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
                return PTR_ERR(i2c_imx->clk);
        }
 
+       ret = clk_prepare_enable(i2c_imx->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "can't enable I2C clock\n");
+               return ret;
+       }
        /* Request IRQ */
        ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
                                pdev->name, i2c_imx);
@@ -560,8 +671,9 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
        i2c_imx_set_clk(i2c_imx, bitrate);
 
        /* Set up chip registers to defaults */
-       writeb(0, i2c_imx->base + IMX_I2C_I2CR);
-       writeb(0, i2c_imx->base + IMX_I2C_I2SR);
+       imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
+                       i2c_imx, IMX_I2C_I2CR);
+       imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
 
        /* Add I2C adapter */
        ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
@@ -574,6 +686,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
 
        /* Set up platform driver data */
        platform_set_drvdata(pdev, i2c_imx);
+       clk_disable_unprepare(i2c_imx->clk);
 
        dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq);
        dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n",
@@ -596,10 +709,10 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
        i2c_del_adapter(&i2c_imx->adapter);
 
        /* setup chip registers to defaults */
-       writeb(0, i2c_imx->base + IMX_I2C_IADR);
-       writeb(0, i2c_imx->base + IMX_I2C_IFDR);
-       writeb(0, i2c_imx->base + IMX_I2C_I2CR);
-       writeb(0, i2c_imx->base + IMX_I2C_I2SR);
+       imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+       imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+       imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+       imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
 
        return 0;
 }
index 7607dc0619184eaa4100c7edbb8590525df5d2f3..ffac2145b94afc4d300787d81a24e5020dc4e7ec 100644 (file)
@@ -64,7 +64,7 @@ struct mpc_i2c {
        struct i2c_adapter adap;
        int irq;
        u32 real_clk;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        u8 fdr, dfsrr;
 #endif
 };
@@ -609,7 +609,6 @@ static const struct i2c_algorithm mpc_algo = {
 
 static struct i2c_adapter mpc_ops = {
        .owner = THIS_MODULE,
-       .name = "MPC adapter",
        .algo = &mpc_algo,
        .timeout = HZ,
 };
@@ -623,6 +622,7 @@ static int fsl_i2c_probe(struct platform_device *op)
        u32 clock = MPC_I2C_CLOCK_LEGACY;
        int result = 0;
        int plen;
+       struct resource res;
 
        match = of_match_device(mpc_i2c_of_match, &op->dev);
        if (!match)
@@ -682,6 +682,9 @@ static int fsl_i2c_probe(struct platform_device *op)
        platform_set_drvdata(op, i2c);
 
        i2c->adap = mpc_ops;
+       of_address_to_resource(op->dev.of_node, 0, &res);
+       scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
+                 "MPC adapter at 0x%llx", (unsigned long long)res.start);
        i2c_set_adapdata(&i2c->adap, i2c);
        i2c->adap.dev.parent = &op->dev;
        i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
@@ -720,7 +723,7 @@ static int fsl_i2c_remove(struct platform_device *op)
        return 0;
 };
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int mpc_i2c_suspend(struct device *dev)
 {
        struct mpc_i2c *i2c = dev_get_drvdata(dev);
@@ -741,7 +744,10 @@ static int mpc_i2c_resume(struct device *dev)
        return 0;
 }
 
-SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+static SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+#define MPC_I2C_PM_OPS (&mpc_i2c_pm_ops)
+#else
+#define MPC_I2C_PM_OPS NULL
 #endif
 
 static const struct mpc_i2c_data mpc_i2c_data_512x = {
@@ -788,9 +794,7 @@ static struct platform_driver mpc_i2c_driver = {
                .owner = THIS_MODULE,
                .name = DRV_NAME,
                .of_match_table = mpc_i2c_of_match,
-#ifdef CONFIG_PM
-               .pm = &mpc_i2c_pm_ops,
-#endif
+               .pm = MPC_I2C_PM_OPS,
        },
 };
 
index b1f42bf409638f166f592eba381ac9848dccb46a..9cc361d1994104a3c88dcf54e84512705536156a 100644 (file)
@@ -618,7 +618,7 @@ static int
 mv64xxx_i2c_probe(struct platform_device *pd)
 {
        struct mv64xxx_i2c_data         *drv_data;
-       struct mv64xxx_i2c_pdata        *pdata = pd->dev.platform_data;
+       struct mv64xxx_i2c_pdata        *pdata = dev_get_platdata(&pd->dev);
        struct resource *r;
        int     rc;
 
index e2e9a0dade9665ec0d89598754c5734d2480c09c..46cda0b02fd1e16f42cff7e20c4bc31d5aed3abe 100644 (file)
@@ -114,18 +114,21 @@ struct mxs_i2c_dev {
 
        uint32_t timing0;
        uint32_t timing1;
+       uint32_t timing2;
 
        /* DMA support components */
-       struct dma_chan                 *dmach;
+       struct dma_chan                 *dmach;
        uint32_t                        pio_data[2];
        uint32_t                        addr_data;
        struct scatterlist              sg_io[2];
        bool                            dma_read;
 };
 
-static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
+static int mxs_i2c_reset(struct mxs_i2c_dev *i2c)
 {
-       stmp_reset_block(i2c->regs);
+       int ret = stmp_reset_block(i2c->regs);
+       if (ret)
+               return ret;
 
        /*
         * Configure timing for the I2C block. The I2C TIMING2 register has to
@@ -136,9 +139,11 @@ static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
         */
        writel(i2c->timing0, i2c->regs + MXS_I2C_TIMING0);
        writel(i2c->timing1, i2c->regs + MXS_I2C_TIMING1);
-       writel(0x00300030, i2c->regs + MXS_I2C_TIMING2);
+       writel(i2c->timing2, i2c->regs + MXS_I2C_TIMING2);
 
        writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
+
+       return 0;
 }
 
 static void mxs_i2c_dma_finish(struct mxs_i2c_dev *i2c)
@@ -475,7 +480,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                                int stop)
 {
        struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
-       int ret;
+       int ret, err;
        int flags;
 
        flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
@@ -495,8 +500,11 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
        i2c->cmd_err = 0;
        if (0) {        /* disable PIO mode until a proper fix is made */
                ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
-               if (ret)
-                       mxs_i2c_reset(i2c);
+               if (ret) {
+                       err = mxs_i2c_reset(i2c);
+                       if (err)
+                               return err;
+               }
        } else {
                INIT_COMPLETION(i2c->cmd_complete);
                ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -527,7 +535,10 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
 timeout:
        dev_dbg(i2c->dev, "Timeout!\n");
        mxs_i2c_dma_finish(i2c);
-       mxs_i2c_reset(i2c);
+       ret = mxs_i2c_reset(i2c);
+       if (ret)
+               return ret;
+
        return -ETIMEDOUT;
 }
 
@@ -577,41 +588,79 @@ static const struct i2c_algorithm mxs_i2c_algo = {
        .functionality = mxs_i2c_func,
 };
 
-static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, int speed)
+static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, uint32_t speed)
 {
-       /* The I2C block clock run at 24MHz */
+       /* The I2C block clock runs at 24MHz */
        const uint32_t clk = 24000000;
-       uint32_t base;
+       uint32_t divider;
        uint16_t high_count, low_count, rcv_count, xmit_count;
+       uint32_t bus_free, leadin;
        struct device *dev = i2c->dev;
 
-       if (speed > 540000) {
-               dev_warn(dev, "Speed too high (%d Hz), using 540 kHz\n", speed);
-               speed = 540000;
-       } else if (speed < 12000) {
-               dev_warn(dev, "Speed too low (%d Hz), using 12 kHz\n", speed);
-               speed = 12000;
+       divider = DIV_ROUND_UP(clk, speed);
+
+       if (divider < 25) {
+               /*
+                * limit the divider, so that min(low_count, high_count)
+                * is >= 1
+                */
+               divider = 25;
+               dev_warn(dev,
+                       "Speed too high (%u.%03u kHz), using %u.%03u kHz\n",
+                       speed / 1000, speed % 1000,
+                       clk / divider / 1000, clk / divider % 1000);
+       } else if (divider > 1897) {
+               /*
+                * limit the divider, so that max(low_count, high_count)
+                * cannot exceed 1023
+                */
+               divider = 1897;
+               dev_warn(dev,
+                       "Speed too low (%u.%03u kHz), using %u.%03u kHz\n",
+                       speed / 1000, speed % 1000,
+                       clk / divider / 1000, clk / divider % 1000);
        }
 
        /*
-        * The timing derivation algorithm. There is no documentation for this
-        * algorithm available, it was derived by using the scope and fiddling
-        * with constants until the result observed on the scope was good enough
-        * for 20kHz, 50kHz, 100kHz, 200kHz, 300kHz and 400kHz. It should be
-        * possible to assume the algorithm works for other frequencies as well.
+        * The I2C spec specifies the following timing data:
+        *                          standard mode  fast mode Bitfield name
+        * tLOW (SCL LOW period)     4700 ns        1300 ns
+        * tHIGH (SCL HIGH period)   4000 ns         600 ns
+        * tSU;DAT (data setup time)  250 ns         100 ns
+        * tHD;STA (START hold time) 4000 ns         600 ns
+        * tBUF (bus free time)      4700 ns        1300 ns
         *
-        * Note it was necessary to cap the frequency on both ends as it's not
-        * possible to configure completely arbitrary frequency for the I2C bus
-        * clock.
+        * The hardware (of the i.MX28 at least) seems to add 2 additional
+        * clock cycles to the low_count and 7 cycles to the high_count.
+        * This is compensated for by subtracting the respective constants
+        * from the values written to the timing registers.
         */
-       base = ((clk / speed) - 38) / 2;
-       high_count = base + 3;
-       low_count = base - 3;
-       rcv_count = (high_count * 3) / 4;
-       xmit_count = low_count / 4;
+       if (speed > 100000) {
+               /* fast mode */
+               low_count = DIV_ROUND_CLOSEST(divider * 13, (13 + 6));
+               high_count = DIV_ROUND_CLOSEST(divider * 6, (13 + 6));
+               leadin = DIV_ROUND_UP(600 * (clk / 1000000), 1000);
+               bus_free = DIV_ROUND_UP(1300 * (clk / 1000000), 1000);
+       } else {
+               /* normal mode */
+               low_count = DIV_ROUND_CLOSEST(divider * 47, (47 + 40));
+               high_count = DIV_ROUND_CLOSEST(divider * 40, (47 + 40));
+               leadin = DIV_ROUND_UP(4700 * (clk / 1000000), 1000);
+               bus_free = DIV_ROUND_UP(4700 * (clk / 1000000), 1000);
+       }
+       rcv_count = high_count * 3 / 8;
+       xmit_count = low_count * 3 / 8;
+
+       dev_dbg(dev,
+               "speed=%u(actual %u) divider=%u low=%u high=%u xmit=%u rcv=%u leadin=%u bus_free=%u\n",
+               speed, clk / divider, divider, low_count, high_count,
+               xmit_count, rcv_count, leadin, bus_free);
 
+       low_count -= 2;
+       high_count -= 7;
        i2c->timing0 = (high_count << 16) | rcv_count;
        i2c->timing1 = (low_count << 16) | xmit_count;
+       i2c->timing2 = (bus_free << 16 | leadin);
 }
 
 static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
@@ -683,7 +732,9 @@ static int mxs_i2c_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, i2c);
 
        /* Do reset to enforce correct startup after pinmuxing */
-       mxs_i2c_reset(i2c);
+       err = mxs_i2c_reset(i2c);
+       if (err)
+               return err;
 
        adap = &i2c->adapter;
        strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
index 512dfe609706a562922986b040a8bda753d82126..1909e80735416ae2aa41b8cf285ccf7956855ea3 100644 (file)
@@ -943,7 +943,7 @@ static void nmk_i2c_of_probe(struct device_node *np,
 static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
 {
        int ret = 0;
-       struct nmk_i2c_controller *pdata = adev->dev.platform_data;
+       struct nmk_i2c_controller *pdata = dev_get_platdata(&adev->dev);
        struct device_node *np = adev->dev.of_node;
        struct nmk_i2c_dev      *dev;
        struct i2c_adapter *adap;
index 865ee350adb363cea47f624eb4c2bb909a3b5624..36394d737faf388998b7da4b015bbea134e85709 100644 (file)
@@ -525,7 +525,7 @@ static int nuc900_i2c_probe(struct platform_device *pdev)
        struct resource *res;
        int ret;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "no platform data\n");
                return -EINVAL;
index 0e1f8245e768d91c9fe2c58639607b952b804c57..e14182cd87ffeea016d9c36cd87b6ae8e4430a76 100644 (file)
@@ -353,10 +353,6 @@ static int ocores_i2c_probe(struct platform_device *pdev)
        int ret;
        int i;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
-
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return irq;
@@ -365,11 +361,12 @@ static int ocores_i2c_probe(struct platform_device *pdev)
        if (!i2c)
                return -ENOMEM;
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        i2c->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(i2c->base))
                return PTR_ERR(i2c->base);
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (pdata) {
                i2c->reg_shift = pdata->reg_shift;
                i2c->reg_io_width = pdata->reg_io_width;
@@ -456,7 +453,7 @@ static int ocores_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int ocores_i2c_suspend(struct device *dev)
 {
        struct ocores_i2c *i2c = dev_get_drvdata(dev);
index 142b694d1c60a1ca528860a03b45c1a913dcb145..9ccb7b9cb6fcab329551907078ffe780d38f7dd8 100644 (file)
@@ -43,7 +43,6 @@
 #include <linux/slab.h>
 #include <linux/i2c-omap.h>
 #include <linux/pm_runtime.h>
-#include <linux/pinctrl/consumer.h>
 
 /* I2C controller revisions */
 #define OMAP_I2C_OMAP1_REV_2           0x20
@@ -216,8 +215,6 @@ struct omap_i2c_dev {
        u16                     syscstate;
        u16                     westate;
        u16                     errata;
-
-       struct pinctrl          *pins;
 };
 
 static const u8 reg_map_ip_v1[] = {
@@ -618,11 +615,10 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
        if (dev->cmd_err & OMAP_I2C_STAT_NACK) {
                if (msg->flags & I2C_M_IGNORE_NAK)
                        return 0;
-               if (stop) {
-                       w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
-                       w |= OMAP_I2C_CON_STP;
-                       omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
-               }
+
+               w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
+               w |= OMAP_I2C_CON_STP;
+               omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
                return -EREMOTEIO;
        }
        return -EIO;
@@ -1079,7 +1075,7 @@ omap_i2c_probe(struct platform_device *pdev)
        struct i2c_adapter      *adap;
        struct resource         *mem;
        const struct omap_i2c_bus_platform_data *pdata =
-               pdev->dev.platform_data;
+               dev_get_platdata(&pdev->dev);
        struct device_node      *node = pdev->dev.of_node;
        const struct of_device_id *match;
        int irq;
@@ -1120,16 +1116,6 @@ omap_i2c_probe(struct platform_device *pdev)
                dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
        }
 
-       dev->pins = devm_pinctrl_get_select_default(&pdev->dev);
-       if (IS_ERR(dev->pins)) {
-               if (PTR_ERR(dev->pins) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-
-               dev_warn(&pdev->dev, "did not get pins for i2c error: %li\n",
-                        PTR_ERR(dev->pins));
-               dev->pins = NULL;
-       }
-
        dev->dev = &pdev->dev;
        dev->irq = irq;
 
index aa00df14e30b1a0114be4396b931ce4a32b523d9..39e2755e3f257c09bd1edd76371130fd8df0ceda 100644 (file)
@@ -136,7 +136,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
        struct i2c_pca_pf_data *i2c;
        struct resource *res;
        struct i2c_pca9564_pf_platform_data *platform_data =
-                               pdev->dev.platform_data;
+                               dev_get_platdata(&pdev->dev);
        int ret = 0;
        int irq;
 
index d05ad590af29b5163e0920a8d7868a31d8ec0529..a028617b8f13c37a4a8371264a149ba7f6d44540 100644 (file)
@@ -231,11 +231,11 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
 }
 
 static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
-                            const struct pci_device_id *id)
+                            const struct pci_device_id *id, u8 aux)
 {
        unsigned short piix4_smba;
        unsigned short smba_idx = 0xcd6;
-       u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c;
+       u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en;
 
        /* SB800 and later SMBus does not support forcing address */
        if (force || force_addr) {
@@ -245,6 +245,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
        }
 
        /* Determine the address of the SMBus areas */
+       smb_en = (aux) ? 0x28 : 0x2c;
+
        if (!request_region(smba_idx, 2, "smba_idx")) {
                dev_err(&PIIX4_dev->dev, "SMBus base address index region "
                        "0x%x already in use!\n", smba_idx);
@@ -272,6 +274,13 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
                return -EBUSY;
        }
 
+       /* Aux SMBus does not support IRQ information */
+       if (aux) {
+               dev_info(&PIIX4_dev->dev,
+                        "SMBus Host Controller at 0x%x\n", piix4_smba);
+               return piix4_smba;
+       }
+
        /* Request the SMBus I2C bus config region */
        if (!request_region(piix4_smba + i2ccfg_offset, 1, "i2ccfg")) {
                dev_err(&PIIX4_dev->dev, "SMBus I2C bus config region "
@@ -597,7 +606,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
             dev->revision >= 0x40) ||
            dev->vendor == PCI_VENDOR_ID_AMD)
                /* base address location etc changed in SB800 */
-               retval = piix4_setup_sb800(dev, id);
+               retval = piix4_setup_sb800(dev, id, 0);
        else
                retval = piix4_setup(dev, id);
 
@@ -611,17 +620,29 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                return retval;
 
        /* Check for auxiliary SMBus on some AMD chipsets */
+       retval = -ENODEV;
+
        if (dev->vendor == PCI_VENDOR_ID_ATI &&
-           dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
-           dev->revision < 0x40) {
-               retval = piix4_setup_aux(dev, id, 0x58);
-               if (retval > 0) {
-                       /* Try to add the aux adapter if it exists,
-                        * piix4_add_adapter will clean up if this fails */
-                       piix4_add_adapter(dev, retval, &piix4_aux_adapter);
+           dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS) {
+               if (dev->revision < 0x40) {
+                       retval = piix4_setup_aux(dev, id, 0x58);
+               } else {
+                       /* SB800 added aux bus too */
+                       retval = piix4_setup_sb800(dev, id, 1);
                }
        }
 
+       if (dev->vendor == PCI_VENDOR_ID_AMD &&
+           dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) {
+               retval = piix4_setup_sb800(dev, id, 1);
+       }
+
+       if (retval > 0) {
+               /* Try to add the aux adapter if it exists,
+                * piix4_add_adapter will clean up if this fails */
+               piix4_add_adapter(dev, retval, &piix4_aux_adapter);
+       }
+
        return 0;
 }
 
index 5f39c6d8117a063b41406f589ccb722d85f4b9c9..1713b3ee61f55306b4995d6b16c4022fdebd14ec 100644 (file)
@@ -595,7 +595,7 @@ static struct i2c_algorithm pnx_algorithm = {
        .functionality = i2c_pnx_func,
 };
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int i2c_pnx_controller_suspend(struct device *dev)
 {
        struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
index 8dc90da1e6e657b6f0ca58d91bba9dda6c29a7e7..5a88364a542b01f5d5a3d67c9a67bb594dd826fd 100644 (file)
@@ -398,7 +398,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
 
 static int i2c_powermac_probe(struct platform_device *dev)
 {
-       struct pmac_i2c_bus *bus = dev->dev.platform_data;
+       struct pmac_i2c_bus *bus = dev_get_platdata(&dev->dev);
        struct device_node *parent = NULL;
        struct i2c_adapter *adapter;
        const char *basename;
index 37a84c87c5fdd3fe31df3007afe959be7f98c289..ac80199885bef383c1a394ffbdbff335ab529b5d 100644 (file)
@@ -245,7 +245,7 @@ static int puv3_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int puv3_i2c_suspend(struct device *dev)
 {
        int poll_count;
index fbafed29fb8153ba404c27c8ec5183a4e82520ff..3dbc1acdc28a6dacf669aee1b7d6710c8ea1c018 100644 (file)
@@ -110,6 +110,8 @@ MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
 #define ICR_SADIE      (1 << 13)          /* slave address detected int enable */
 #define ICR_UR         (1 << 14)          /* unit reset */
 #define ICR_FM         (1 << 15)          /* fast mode */
+#define ICR_HS         (1 << 16)          /* High Speed mode */
+#define ICR_GPIOEN     (1 << 19)          /* enable GPIO mode for SCL in HS */
 
 #define ISR_RWM                (1 << 0)           /* read/write mode */
 #define ISR_ACKNAK     (1 << 1)           /* ack/nak status */
@@ -155,6 +157,10 @@ struct pxa_i2c {
        int                     irq;
        unsigned int            use_pio :1;
        unsigned int            fast_mode :1;
+       unsigned int            high_mode:1;
+       unsigned char           master_code;
+       unsigned long           rate;
+       bool                    highmode_enter;
 };
 
 #define _IBMR(i2c)     ((i2c)->reg_ibmr)
@@ -459,6 +465,7 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
 
        /* set control register values */
        writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
+       writel(readl(_ICR(i2c)) | (i2c->high_mode ? ICR_HS : 0), _ICR(i2c));
 
 #ifdef CONFIG_I2C_PXA_SLAVE
        dev_info(&i2c->adap.dev, "Enabling slave mode\n");
@@ -680,6 +687,34 @@ static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c)
        return 0;
 }
 
+/*
+ * PXA I2C send master code
+ * 1. Load master code to IDBR and send it.
+ *    Note for HS mode, set ICR [GPIOEN].
+ * 2. Wait until win arbitration.
+ */
+static int i2c_pxa_send_mastercode(struct pxa_i2c *i2c)
+{
+       u32 icr;
+       long timeout;
+
+       spin_lock_irq(&i2c->lock);
+       i2c->highmode_enter = true;
+       writel(i2c->master_code, _IDBR(i2c));
+
+       icr = readl(_ICR(i2c)) & ~(ICR_STOP | ICR_ALDIE);
+       icr |= ICR_GPIOEN | ICR_START | ICR_TB | ICR_ITEIE;
+       writel(icr, _ICR(i2c));
+
+       spin_unlock_irq(&i2c->lock);
+       timeout = wait_event_timeout(i2c->wait,
+                       i2c->highmode_enter == false, HZ * 1);
+
+       i2c->highmode_enter = false;
+
+       return (timeout == 0) ? I2C_RETRY : 0;
+}
+
 static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c,
                               struct i2c_msg *msg, int num)
 {
@@ -743,6 +778,14 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
                goto out;
        }
 
+       if (i2c->high_mode) {
+               ret = i2c_pxa_send_mastercode(i2c);
+               if (ret) {
+                       dev_err(&i2c->adap.dev, "i2c_pxa_send_mastercode timeout\n");
+                       goto out;
+                       }
+       }
+
        spin_lock_irq(&i2c->lock);
 
        i2c->msg = msg;
@@ -990,11 +1033,14 @@ static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
                        i2c_pxa_slave_txempty(i2c, isr);
                if (isr & ISR_IRF)
                        i2c_pxa_slave_rxfull(i2c, isr);
-       } else if (i2c->msg) {
+       } else if (i2c->msg && (!i2c->highmode_enter)) {
                if (isr & ISR_ITE)
                        i2c_pxa_irq_txempty(i2c, isr);
                if (isr & ISR_IRF)
                        i2c_pxa_irq_rxfull(i2c, isr);
+       } else if ((isr & ISR_ITE) && i2c->highmode_enter) {
+               i2c->highmode_enter = false;
+               wake_up(&i2c->wait);
        } else {
                i2c_pxa_scream_blue_murder(i2c, "spurious irq");
        }
@@ -1072,20 +1118,25 @@ static int i2c_pxa_probe_pdata(struct platform_device *pdev,
                               struct pxa_i2c *i2c,
                               enum pxa_i2c_types *i2c_types)
 {
-       struct i2c_pxa_platform_data *plat = pdev->dev.platform_data;
+       struct i2c_pxa_platform_data *plat = dev_get_platdata(&pdev->dev);
        const struct platform_device_id *id = platform_get_device_id(pdev);
 
        *i2c_types = id->driver_data;
        if (plat) {
                i2c->use_pio = plat->use_pio;
                i2c->fast_mode = plat->fast_mode;
+               i2c->high_mode = plat->high_mode;
+               i2c->master_code = plat->master_code;
+               if (!i2c->master_code)
+                       i2c->master_code = 0xe;
+               i2c->rate = plat->rate;
        }
        return 0;
 }
 
 static int i2c_pxa_probe(struct platform_device *dev)
 {
-       struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
+       struct i2c_pxa_platform_data *plat = dev_get_platdata(&dev->dev);
        enum pxa_i2c_types i2c_type;
        struct pxa_i2c *i2c;
        struct resource *res = NULL;
@@ -1151,6 +1202,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
        i2c->irq = irq;
 
        i2c->slave_addr = I2C_PXA_SLAVE_ADDR;
+       i2c->highmode_enter = false;
 
        if (plat) {
 #ifdef CONFIG_I2C_PXA_SLAVE
@@ -1160,6 +1212,16 @@ static int i2c_pxa_probe(struct platform_device *dev)
                i2c->adap.class = plat->class;
        }
 
+       if (i2c->high_mode) {
+               if (i2c->rate) {
+                       clk_set_rate(i2c->clk, i2c->rate);
+                       pr_info("i2c: <%s> set rate to %ld\n",
+                               i2c->adap.name, clk_get_rate(i2c->clk));
+               } else
+                       pr_warn("i2c: <%s> clock rate not set\n",
+                               i2c->adap.name);
+       }
+
        clk_prepare_enable(i2c->clk);
 
        if (i2c->use_pio) {
index 0fc585861610dd484fae778a729733df5e61c5d8..e59c3f61854291120e8365e0f337484bc1e90a48 100644 (file)
@@ -615,7 +615,7 @@ static const struct i2c_algorithm rcar_i2c_algo = {
 
 static int rcar_i2c_probe(struct platform_device *pdev)
 {
-       struct i2c_rcar_platform_data *pdata = pdev->dev.platform_data;
+       struct i2c_rcar_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct rcar_i2c_priv *priv;
        struct i2c_adapter *adap;
        struct resource *res;
index cab1c91b75a3a8e300057ef2aaf3e98b6af999d4..0a077b1ef94f687c4150561ab3d353532858cec2 100644 (file)
@@ -1033,7 +1033,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        int ret;
 
        if (!pdev->dev.of_node) {
-               pdata = pdev->dev.platform_data;
+               pdata = dev_get_platdata(&pdev->dev);
                if (!pdata) {
                        dev_err(&pdev->dev, "no platform data\n");
                        return -EINVAL;
index 7c1ca5aca08807f4510c0f0b6a79a8db41147fb7..dd186a03768431df0a4869c2b55c4e7687e7a4a1 100644 (file)
@@ -290,8 +290,9 @@ static int s6i2c_probe(struct platform_device *dev)
 
        clock = 0;
        bus_num = -1;
-       if (dev->dev.platform_data) {
-               struct s6_i2c_platform_data *pdata = dev->dev.platform_data;
+       if (dev_get_platdata(&dev->dev)) {
+               struct s6_i2c_platform_data *pdata =
+                       dev_get_platdata(&dev->dev);
                bus_num = pdata->bus_num;
                clock = pdata->clock;
        }
index 5351a2f349127919785bc9a58f11256466861fad..5e8f136e233f79d5205a5ef65038474721154b16 100644 (file)
@@ -437,7 +437,7 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
        struct cami2c *id;
        int ret;
 
-       pd = pdev->dev.platform_data;
+       pd = dev_get_platdata(&pdev->dev);
        if (!pd) {
                dev_err(&pdev->dev, "no platform_data!\n");
                ret = -ENODEV;
index debf745c0268f897d984d21391eed39efbd626fe..4e86a3190d46bfef45ca6cac99a9614c79ecc104 100644 (file)
@@ -658,7 +658,7 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
 
 static int sh_mobile_i2c_probe(struct platform_device *dev)
 {
-       struct i2c_sh_mobile_platform_data *pdata = dev->dev.platform_data;
+       struct i2c_sh_mobile_platform_data *pdata = dev_get_platdata(&dev->dev);
        struct sh_mobile_i2c_data *pd;
        struct i2c_adapter *adap;
        struct resource *res;
index d1a6b204af00856748b67e7d42253ebb7c849e3f..19a40afaf1728952b5c432736d8f15195831f79b 100644 (file)
@@ -884,9 +884,6 @@ stu300_probe(struct platform_device *pdev)
 
        dev->pdev = pdev;
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENOENT;
-
        dev->virtbase = devm_ioremap_resource(&pdev->dev, res);
        dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
                "base %p\n", bus_nr, dev->virtbase);
@@ -941,7 +938,7 @@ stu300_probe(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int stu300_suspend(struct device *device)
 {
        struct stu300_dev *dev = dev_get_drvdata(device);
index 05106368d405d21c1f4aae261394f25ea7b29d7b..e7d3b755af3bb3771359fcb4110310a8d6359a90 100644 (file)
@@ -54,12 +54,16 @@ static int usb_write(struct i2c_adapter *adapter, int cmd,
 
 static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
 {
-       unsigned char status;
+       unsigned char *pstatus;
        struct i2c_msg *pmsg;
-       int i;
+       int i, ret;
 
        dev_dbg(&adapter->dev, "master xfer %d messages:\n", num);
 
+       pstatus = kmalloc(sizeof(*pstatus), GFP_KERNEL);
+       if (!pstatus)
+               return -ENOMEM;
+
        for (i = 0 ; i < num ; i++) {
                int cmd = CMD_I2C_IO;
 
@@ -84,7 +88,8 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
                                     pmsg->buf, pmsg->len) != pmsg->len) {
                                dev_err(&adapter->dev,
                                        "failure reading data\n");
-                               return -EREMOTEIO;
+                               ret = -EREMOTEIO;
+                               goto out;
                        }
                } else {
                        /* write data */
@@ -93,36 +98,50 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
                                      pmsg->buf, pmsg->len) != pmsg->len) {
                                dev_err(&adapter->dev,
                                        "failure writing data\n");
-                               return -EREMOTEIO;
+                               ret = -EREMOTEIO;
+                               goto out;
                        }
                }
 
                /* read status */
-               if (usb_read(adapter, CMD_GET_STATUS, 0, 0, &status, 1) != 1) {
+               if (usb_read(adapter, CMD_GET_STATUS, 0, 0, pstatus, 1) != 1) {
                        dev_err(&adapter->dev, "failure reading status\n");
-                       return -EREMOTEIO;
+                       ret = -EREMOTEIO;
+                       goto out;
                }
 
-               dev_dbg(&adapter->dev, "  status = %d\n", status);
-               if (status == STATUS_ADDRESS_NAK)
-                       return -EREMOTEIO;
+               dev_dbg(&adapter->dev, "  status = %d\n", *pstatus);
+               if (*pstatus == STATUS_ADDRESS_NAK) {
+                       ret = -EREMOTEIO;
+                       goto out;
+               }
        }
 
-       return i;
+       ret = i;
+out:
+       kfree(pstatus);
+       return ret;
 }
 
 static u32 usb_func(struct i2c_adapter *adapter)
 {
-       __le32 func;
+       __le32 *pfunc;
+       u32 ret;
+
+       pfunc = kmalloc(sizeof(*pfunc), GFP_KERNEL);
 
        /* get functionality from adapter */
-       if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
-           sizeof(func)) {
+       if (!pfunc || usb_read(adapter, CMD_GET_FUNC, 0, 0, pfunc,
+                              sizeof(*pfunc)) != sizeof(*pfunc)) {
                dev_err(&adapter->dev, "failure reading functionality\n");
-               return 0;
+               ret = 0;
+               goto out;
        }
 
-       return le32_to_cpu(func);
+       ret = le32_to_cpup(pfunc);
+out:
+       kfree(pfunc);
+       return ret;
 }
 
 /* This is the actual algorithm we define */
index 3d0f0520c1b44718c9fe6f7f2bd3da90f74d0dcf..433f377b3869f846b32a80b854c81c88d23ab9cb 100644 (file)
@@ -703,7 +703,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
        if (irq < 0)
                goto resource_missing;
 
-       pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
+       pdata = (struct xiic_i2c_platform_data *)dev_get_platdata(&pdev->dev);
 
        i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
        if (!i2c)
index f32ca293ae0e30674a436afb4bc431cdb743de28..3d9ca2d3d77ec1a876fdb95021f97a521fb0e279 100644 (file)
@@ -1665,7 +1665,8 @@ static int i2c_default_probe(struct i2c_adapter *adap, unsigned short addr)
                err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
                                     I2C_SMBUS_BYTE, &dummy);
        else {
-               dev_warn(&adap->dev, "No suitable probing method supported\n");
+               dev_warn(&adap->dev, "No suitable probing method supported for address 0x%02X\n",
+                        addr);
                err = -EOPNOTSUPP;
        }
 
@@ -1825,7 +1826,8 @@ EXPORT_SYMBOL(i2c_get_adapter);
 
 void i2c_put_adapter(struct i2c_adapter *adap)
 {
-       module_put(adap->owner);
+       if (adap)
+               module_put(adap->owner);
 }
 EXPORT_SYMBOL(i2c_put_adapter);
 
index 92cdd2323b03f3adc8a3a0303d9f51c2eca23c5f..44d4c6071c15096e19d16433092b7428c113f34f 100644 (file)
@@ -137,7 +137,7 @@ static irqreturn_t smbalert_irq(int irq, void *d)
 static int smbalert_probe(struct i2c_client *ara,
                          const struct i2c_device_id *id)
 {
-       struct i2c_smbus_alert_setup *setup = ara->dev.platform_data;
+       struct i2c_smbus_alert_setup *setup = dev_get_platdata(&ara->dev);
        struct i2c_smbus_alert *alert;
        struct i2c_adapter *adapter = ara->adapter;
        int res;
index 210b6f7b9028af89036341b02de8f7251a7ca996..f7bf24375f81c004768d47651352cb6a91eec5e2 100644 (file)
@@ -131,7 +131,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
                dev_err(dev, "Cannot find device tree node\n");
                return -ENODEV;
        }
-       if (dev->platform_data) {
+       if (dev_get_platdata(dev)) {
                dev_err(dev, "Platform data is not supported\n");
                return -EINVAL;
        }
index 5a0ce0081dce415b14cd1d40662845fb99c0f7d4..6a206e8d58f4e3f2985a00e287ea7a5103a86024 100644 (file)
@@ -148,12 +148,14 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, mux);
 
-       if (!pdev->dev.platform_data) {
+       if (!dev_get_platdata(&pdev->dev)) {
                ret = i2c_mux_gpio_probe_dt(mux, pdev);
                if (ret < 0)
                        return ret;
-       } else
-               memcpy(&mux->data, pdev->dev.platform_data, sizeof(mux->data));
+       } else {
+               memcpy(&mux->data, dev_get_platdata(&pdev->dev),
+                       sizeof(mux->data));
+       }
 
        /*
         * If a GPIO chip name is provided, the GPIO pin numbers provided are
index 966a18a5d12d2397d2a43f25bcd22a65420cc5e4..c4f08ad311832ed79da7d02d6606342e696b72c1 100644 (file)
@@ -324,7 +324,7 @@ static int pca9541_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        struct i2c_adapter *adap = client->adapter;
-       struct pca954x_platform_data *pdata = client->dev.platform_data;
+       struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
        struct pca9541 *data;
        int force;
        int ret = -ENODEV;
index a531d801dbe47aa8a35461955a57e7320380e9fc..bad5b84a59850f79448b3ace9026eb671d201a37 100644 (file)
@@ -185,7 +185,7 @@ static int pca954x_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
-       struct pca954x_platform_data *pdata = client->dev.platform_data;
+       struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
        int num, force, class;
        struct pca954x *data;
        int ret = -ENODEV;
index a43c0ce5e3d8728918afa810842292d00efdac8c..0d082027c29afa31c1f6bb0a0ab6f768f99d91a8 100644 (file)
@@ -145,7 +145,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
 
        mux->dev = &pdev->dev;
 
-       mux->pdata = pdev->dev.platform_data;
+       mux->pdata = dev_get_platdata(&pdev->dev);
        if (!mux->pdata) {
                ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
                if (ret < 0)
index f1a6796b165c154bffd18d9c6f99e407aac24d96..140c8ef505291129d6299d2b4d4931d3b728589e 100644 (file)
@@ -520,11 +520,12 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
        ide_port_for_each_present_dev(i, drive, hwif) {
                if (drive->acpidata->obj_handle)
                        acpi_bus_set_power(drive->acpidata->obj_handle,
-                                          on ? ACPI_STATE_D0 : ACPI_STATE_D3);
+                               on ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
        }
 
        if (!on)
-               acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D3);
+               acpi_bus_set_power(hwif->acpidata->obj_handle,
+                                  ACPI_STATE_D3_COLD);
 }
 
 /**
index 5f4749e60b0428ae6fb3ffb1be7b4de00913b699..c1cd5698b8aea471802f471626e0b3865b2ca058 100644 (file)
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
-               ret = adjd_s311_read_data(indio_dev, chan->address, val);
+               ret = adjd_s311_read_data(indio_dev,
+                       ADJD_S311_DATA_REG(chan->address), val);
                if (ret < 0)
                        return ret;
                return IIO_VAL_INT;
index 7c0f9535fb7d443bf2c4b1647b3494816292020a..3a2c3c3bf723f5ab7edca847bcb32c34ee95281d 100644 (file)
@@ -1385,8 +1385,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
 {
        struct rdma_id_private *id_priv = iw_id->context;
        struct rdma_cm_event event;
-       struct sockaddr_in *sin;
        int ret = 0;
+       struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
+       struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
        if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
                return 0;
@@ -1397,10 +1398,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
                event.event = RDMA_CM_EVENT_DISCONNECTED;
                break;
        case IW_CM_EVENT_CONNECT_REPLY:
-               sin = (struct sockaddr_in *) cma_src_addr(id_priv);
-               *sin = iw_event->local_addr;
-               sin = (struct sockaddr_in *) cma_dst_addr(id_priv);
-               *sin = iw_event->remote_addr;
+               memcpy(cma_src_addr(id_priv), laddr,
+                      rdma_addr_size(laddr));
+               memcpy(cma_dst_addr(id_priv), raddr,
+                      rdma_addr_size(raddr));
                switch (iw_event->status) {
                case 0:
                        event.event = RDMA_CM_EVENT_ESTABLISHED;
@@ -1450,11 +1451,12 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
 {
        struct rdma_cm_id *new_cm_id;
        struct rdma_id_private *listen_id, *conn_id;
-       struct sockaddr_in *sin;
        struct net_device *dev = NULL;
        struct rdma_cm_event event;
        int ret;
        struct ib_device_attr attr;
+       struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
+       struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
        listen_id = cm_id->context;
        if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
@@ -1472,14 +1474,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
        conn_id->state = RDMA_CM_CONNECT;
 
-       dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
-       if (!dev) {
-               ret = -EADDRNOTAVAIL;
-               mutex_unlock(&conn_id->handler_mutex);
-               rdma_destroy_id(new_cm_id);
-               goto out;
-       }
-       ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
+       ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
        if (ret) {
                mutex_unlock(&conn_id->handler_mutex);
                rdma_destroy_id(new_cm_id);
@@ -1497,10 +1492,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        cm_id->context = conn_id;
        cm_id->cm_handler = cma_iw_handler;
 
-       sin = (struct sockaddr_in *) cma_src_addr(conn_id);
-       *sin = iw_event->local_addr;
-       sin = (struct sockaddr_in *) cma_dst_addr(conn_id);
-       *sin = iw_event->remote_addr;
+       memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
+       memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
 
        ret = ib_query_device(conn_id->id.device, &attr);
        if (ret) {
@@ -1576,7 +1569,6 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
 {
        int ret;
-       struct sockaddr_in *sin;
        struct iw_cm_id *id;
 
        id = iw_create_cm_id(id_priv->id.device,
@@ -1587,8 +1579,8 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
 
        id_priv->cm_id.iw = id;
 
-       sin = (struct sockaddr_in *) cma_src_addr(id_priv);
-       id_priv->cm_id.iw->local_addr = *sin;
+       memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
+              rdma_addr_size(cma_src_addr(id_priv)));
 
        ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
 
@@ -2803,7 +2795,6 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
                          struct rdma_conn_param *conn_param)
 {
        struct iw_cm_id *cm_id;
-       struct sockaddr_in* sin;
        int ret;
        struct iw_cm_conn_param iw_param;
 
@@ -2813,11 +2804,10 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
 
        id_priv->cm_id.iw = cm_id;
 
-       sin = (struct sockaddr_in *) cma_src_addr(id_priv);
-       cm_id->local_addr = *sin;
-
-       sin = (struct sockaddr_in *) cma_dst_addr(id_priv);
-       cm_id->remote_addr = *sin;
+       memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
+              rdma_addr_size(cma_src_addr(id_priv)));
+       memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
+              rdma_addr_size(cma_dst_addr(id_priv)));
 
        ret = cma_modify_qp_rtr(id_priv, conn_param);
        if (ret)
index 0fcd7aa26fa2121d86d48e035798aadad407cce1..b8431d64efeb897c324fec7b473eb630f7db79b4 100644 (file)
@@ -135,6 +135,7 @@ struct ib_usrq_object {
 struct ib_uqp_object {
        struct ib_uevent_object uevent;
        struct list_head        mcast_list;
+       struct ib_uxrcd_object *uxrcd;
 };
 
 struct ib_ucq_object {
index b3c07b0c9f2655bb13b9fb81c6ef38fc2efd8757..b1051405d41f8cbbb736f849f4c31b07fe772259 100644 (file)
@@ -1526,7 +1526,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
                   (unsigned long) cmd.response + sizeof resp,
                   in_len - sizeof cmd, out_len - sizeof resp);
 
-       obj = kmalloc(sizeof *obj, GFP_KERNEL);
+       obj = kzalloc(sizeof *obj, GFP_KERNEL);
        if (!obj)
                return -ENOMEM;
 
@@ -1642,8 +1642,13 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
                goto err_copy;
        }
 
-       if (xrcd)
+       if (xrcd) {
+               obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+                                         uobject);
+               atomic_inc(&obj->uxrcd->refcnt);
                put_xrcd_read(xrcd_uobj);
+       }
+
        if (pd)
                put_pd_read(pd);
        if (scq)
@@ -1753,6 +1758,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
                goto err_remove;
        }
 
+       obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
+       atomic_inc(&obj->uxrcd->refcnt);
        put_xrcd_read(xrcd_uobj);
 
        mutex_lock(&file->mutex);
@@ -2019,6 +2026,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
        if (ret)
                return ret;
 
+       if (obj->uxrcd)
+               atomic_dec(&obj->uxrcd->refcnt);
+
        idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
 
        mutex_lock(&file->mutex);
@@ -2860,6 +2870,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
        struct ib_srq                    *srq;
        struct ib_uevent_object          *obj;
        int                               ret = -EINVAL;
+       struct ib_usrq_object            *us;
+       enum ib_srq_type                  srq_type;
 
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
@@ -2869,6 +2881,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
                return -EINVAL;
        srq = uobj->object;
        obj = container_of(uobj, struct ib_uevent_object, uobject);
+       srq_type = srq->srq_type;
 
        ret = ib_destroy_srq(srq);
        if (!ret)
@@ -2879,6 +2892,11 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
        if (ret)
                return ret;
 
+       if (srq_type == IB_SRQT_XRC) {
+               us = container_of(obj, struct ib_usrq_object, uevent);
+               atomic_dec(&us->uxrcd->refcnt);
+       }
+
        idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
 
        mutex_lock(&file->mutex);
index 22192deb88282b51f195521e6468840655bb219e..077fd641b30006e82fb1f425975d35c0968a6c59 100644 (file)
@@ -346,10 +346,13 @@ EXPORT_SYMBOL(ib_destroy_srq);
 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
 {
        struct ib_qp *qp = context;
+       unsigned long flags;
 
+       spin_lock_irqsave(&qp->device->event_handler_lock, flags);
        list_for_each_entry(event->element.qp, &qp->open_list, open_list)
                if (event->element.qp->event_handler)
                        event->element.qp->event_handler(event, event->element.qp->qp_context);
+       spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
 }
 
 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
index 706cf97cbe8f4e963581f322d47b76e1b78739b4..d5d1929753e4fdc95bbdc67625f5f0521ffeb32f 100644 (file)
@@ -155,6 +155,8 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
        enum c2_event_id event_id;
        unsigned long flags;
        int status;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
 
        /*
         * retrieve the message
@@ -206,10 +208,10 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
                case CCAE_ACTIVE_CONNECT_RESULTS:
                        res = &wr->ae.ae_active_connect_results;
                        cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
-                       cm_event.local_addr.sin_addr.s_addr = res->laddr;
-                       cm_event.remote_addr.sin_addr.s_addr = res->raddr;
-                       cm_event.local_addr.sin_port = res->lport;
-                       cm_event.remote_addr.sin_port = res->rport;
+                       laddr->sin_addr.s_addr = res->laddr;
+                       raddr->sin_addr.s_addr = res->raddr;
+                       laddr->sin_port = res->lport;
+                       raddr->sin_port = res->rport;
                        if (status == 0) {
                                cm_event.private_data_len =
                                        be32_to_cpu(res->private_data_length);
@@ -281,10 +283,10 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
                }
                cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
                cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
-               cm_event.local_addr.sin_addr.s_addr = req->laddr;
-               cm_event.remote_addr.sin_addr.s_addr = req->raddr;
-               cm_event.local_addr.sin_port = req->lport;
-               cm_event.remote_addr.sin_port = req->rport;
+               laddr->sin_addr.s_addr = req->laddr;
+               raddr->sin_addr.s_addr = req->raddr;
+               laddr->sin_port = req->lport;
+               raddr->sin_port = req->rport;
                cm_event.private_data_len =
                        be32_to_cpu(req->private_data_length);
                cm_event.private_data = req->private_data;
index 95f58ab1e0b881d7913bc97c9f9ae0e2ac15ad43..23bfa94fbd4e4094b906c83f2504e00bc6ebab21 100644 (file)
@@ -46,6 +46,10 @@ int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
        struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
        struct c2_vq_req *vq_req;
        int err;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+
+       if (cm_id->remote_addr.ss_family != AF_INET)
+               return -ENOSYS;
 
        ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
        if (!ibqp)
@@ -91,8 +95,8 @@ int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
        wr->rnic_handle = c2dev->adapter_handle;
        wr->qp_handle = qp->adapter_handle;
 
-       wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
-       wr->remote_port = cm_id->remote_addr.sin_port;
+       wr->remote_addr = raddr->sin_addr.s_addr;
+       wr->remote_port = raddr->sin_port;
 
        /*
         * Move any private data from the callers's buf into
@@ -135,6 +139,10 @@ int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
        struct c2wr_ep_listen_create_rep *reply;
        struct c2_vq_req *vq_req;
        int err;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+
+       if (cm_id->local_addr.ss_family != AF_INET)
+               return -ENOSYS;
 
        c2dev = to_c2dev(cm_id->device);
        if (c2dev == NULL)
@@ -153,8 +161,8 @@ int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
        c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
        wr.hdr.context = (u64) (unsigned long) vq_req;
        wr.rnic_handle = c2dev->adapter_handle;
-       wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
-       wr.local_port = cm_id->local_addr.sin_port;
+       wr.local_addr = laddr->sin_addr.s_addr;
+       wr.local_port = laddr->sin_port;
        wr.backlog = cpu_to_be32(backlog);
        wr.user_context = (u64) (unsigned long) cm_id;
 
index 3e094cd6a0e345e1e239c96348e162cc53184fa7..095bb046e2c82eb8ded011eba72a486b376b444f 100644 (file)
@@ -721,8 +721,10 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
        memset(&event, 0, sizeof(event));
        event.event = IW_CM_EVENT_CONNECT_REPLY;
        event.status = status;
-       event.local_addr = ep->com.local_addr;
-       event.remote_addr = ep->com.remote_addr;
+       memcpy(&event.local_addr, &ep->com.local_addr,
+              sizeof(ep->com.local_addr));
+       memcpy(&event.remote_addr, &ep->com.remote_addr,
+              sizeof(ep->com.remote_addr));
 
        if ((status == 0) || (status == -ECONNREFUSED)) {
                event.private_data_len = ep->plen;
@@ -747,8 +749,10 @@ static void connect_request_upcall(struct iwch_ep *ep)
        PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
        memset(&event, 0, sizeof(event));
        event.event = IW_CM_EVENT_CONNECT_REQUEST;
-       event.local_addr = ep->com.local_addr;
-       event.remote_addr = ep->com.remote_addr;
+       memcpy(&event.local_addr, &ep->com.local_addr,
+              sizeof(ep->com.local_addr));
+       memcpy(&event.remote_addr, &ep->com.remote_addr,
+              sizeof(ep->com.local_addr));
        event.private_data_len = ep->plen;
        event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
        event.provider_data = ep;
@@ -1872,8 +1876,9 @@ err:
 static int is_loopback_dst(struct iw_cm_id *cm_id)
 {
        struct net_device *dev;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
 
-       dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
+       dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
        if (!dev)
                return 0;
        dev_put(dev);
@@ -1886,6 +1891,13 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct iwch_ep *ep;
        struct rtable *rt;
        int err = 0;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+
+       if (cm_id->remote_addr.ss_family != PF_INET) {
+               err = -ENOSYS;
+               goto out;
+       }
 
        if (is_loopback_dst(cm_id)) {
                err = -ENOSYS;
@@ -1929,11 +1941,9 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
 
        /* find a route */
-       rt = find_route(h->rdev.t3cdev_p,
-                       cm_id->local_addr.sin_addr.s_addr,
-                       cm_id->remote_addr.sin_addr.s_addr,
-                       cm_id->local_addr.sin_port,
-                       cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
+       rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
+                       raddr->sin_addr.s_addr, laddr->sin_port,
+                       raddr->sin_port, IPTOS_LOWDELAY);
        if (!rt) {
                printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
                err = -EHOSTUNREACH;
@@ -1941,7 +1951,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        ep->dst = &rt->dst;
        ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
-                            &cm_id->remote_addr.sin_addr.s_addr);
+                            &raddr->sin_addr.s_addr);
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
@@ -1950,8 +1960,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        state_set(&ep->com, CONNECTING);
        ep->tos = IPTOS_LOWDELAY;
-       ep->com.local_addr = cm_id->local_addr;
-       ep->com.remote_addr = cm_id->remote_addr;
+       memcpy(&ep->com.local_addr, &cm_id->local_addr,
+              sizeof(ep->com.local_addr));
+       memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+              sizeof(ep->com.remote_addr));
 
        /* send connect request to rnic */
        err = send_connect(ep);
@@ -1979,6 +1991,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
 
        might_sleep();
 
+       if (cm_id->local_addr.ss_family != PF_INET) {
+               err = -ENOSYS;
+               goto fail1;
+       }
+
        ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
        if (!ep) {
                printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
@@ -1990,7 +2007,8 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
        cm_id->add_ref(cm_id);
        ep->com.cm_id = cm_id;
        ep->backlog = backlog;
-       ep->com.local_addr = cm_id->local_addr;
+       memcpy(&ep->com.local_addr, &cm_id->local_addr,
+              sizeof(ep->com.local_addr));
 
        /*
         * Allocate a server TID.
index 6b7e6c543534e871867bee21a0e387ea98e3de54..d4e8983fba537d71b8da25b5b0768f088678722d 100644 (file)
@@ -1,6 +1,6 @@
 config INFINIBAND_CXGB4
        tristate "Chelsio T4 RDMA Driver"
-       depends on CHELSIO_T4 && INET
+       depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
        select GENERIC_ALLOCATOR
        ---help---
          This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
index 65c30ea8c1a156f0f7d81bc4e039723b321be8dd..12fef76c791c524454bd9a0c48d5b4a967ac19af 100644 (file)
@@ -44,6 +44,8 @@
 #include <net/netevent.h>
 #include <net/route.h>
 #include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
 
 #include "iw_cxgb4.h"
 
@@ -330,22 +332,80 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
        } else {
                skb = alloc_skb(len, gfp);
        }
+       t4_set_arp_err_handler(skb, NULL, NULL);
        return skb;
 }
 
-static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
+static struct net_device *get_real_dev(struct net_device *egress_dev)
+{
+       struct net_device *phys_dev = egress_dev;
+       if (egress_dev->priv_flags & IFF_802_1Q_VLAN)
+               phys_dev = vlan_dev_real_dev(egress_dev);
+       return phys_dev;
+}
+
+static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
+{
+       int i;
+
+       egress_dev = get_real_dev(egress_dev);
+       for (i = 0; i < dev->rdev.lldi.nports; i++)
+               if (dev->rdev.lldi.ports[i] == egress_dev)
+                       return 1;
+       return 0;
+}
+
+static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
+                                    __u8 *peer_ip, __be16 local_port,
+                                    __be16 peer_port, u8 tos,
+                                    __u32 sin6_scope_id)
+{
+       struct dst_entry *dst = NULL;
+
+       if (IS_ENABLED(CONFIG_IPV6)) {
+               struct flowi6 fl6;
+
+               memset(&fl6, 0, sizeof(fl6));
+               memcpy(&fl6.daddr, peer_ip, 16);
+               memcpy(&fl6.saddr, local_ip, 16);
+               if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+                       fl6.flowi6_oif = sin6_scope_id;
+               dst = ip6_route_output(&init_net, NULL, &fl6);
+               if (!dst)
+                       goto out;
+               if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
+                   !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+                       dst_release(dst);
+                       dst = NULL;
+               }
+       }
+
+out:
+       return dst;
+}
+
+static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
                                 __be32 peer_ip, __be16 local_port,
                                 __be16 peer_port, u8 tos)
 {
        struct rtable *rt;
        struct flowi4 fl4;
+       struct neighbour *n;
 
        rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
                                   peer_port, local_port, IPPROTO_TCP,
                                   tos, 0);
        if (IS_ERR(rt))
                return NULL;
-       return rt;
+       n = dst_neigh_lookup(&rt->dst, &peer_ip);
+       if (!n)
+               return NULL;
+       if (!our_interface(dev, n->dev)) {
+               dst_release(&rt->dst);
+               return NULL;
+       }
+       neigh_release(n);
+       return &rt->dst;
 }
 
 static void arp_failure_discard(void *handle, struct sk_buff *skb)
@@ -487,7 +547,7 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
                        ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
                else {
                        ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
-                       ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+                       ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
                }
                ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
                          FILTER_SEL_WIDTH_VLD_TAG_P_FC;
@@ -512,15 +572,28 @@ static int send_connect(struct c4iw_ep *ep)
 {
        struct cpl_act_open_req *req;
        struct cpl_t5_act_open_req *t5_req;
+       struct cpl_act_open_req6 *req6;
+       struct cpl_t5_act_open_req6 *t5_req6;
        struct sk_buff *skb;
        u64 opt0;
        u32 opt2;
        unsigned int mtu_idx;
        int wscale;
-       int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
-               sizeof(struct cpl_act_open_req) :
-               sizeof(struct cpl_t5_act_open_req);
-       int wrlen = roundup(size, 16);
+       int wrlen;
+       int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
+                               sizeof(struct cpl_act_open_req) :
+                               sizeof(struct cpl_t5_act_open_req);
+       int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
+                               sizeof(struct cpl_act_open_req6) :
+                               sizeof(struct cpl_t5_act_open_req6);
+       struct sockaddr_in *la = (struct sockaddr_in *)&ep->com.local_addr;
+       struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr;
+       struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+       struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
+
+       wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
+                       roundup(sizev4, 16) :
+                       roundup(sizev6, 16);
 
        PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
 
@@ -557,33 +630,82 @@ static int send_connect(struct c4iw_ep *ep)
        t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
 
        if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
-               req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
-               INIT_TP_WR(req, 0);
-               OPCODE_TID(req) = cpu_to_be32(
-                               MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
-                               ((ep->rss_qid << 14) | ep->atid)));
-               req->local_port = ep->com.local_addr.sin_port;
-               req->peer_port = ep->com.remote_addr.sin_port;
-               req->local_ip = ep->com.local_addr.sin_addr.s_addr;
-               req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
-               req->opt0 = cpu_to_be64(opt0);
-               req->params = cpu_to_be32(select_ntuple(ep->com.dev,
-                                       ep->dst, ep->l2t));
-               req->opt2 = cpu_to_be32(opt2);
+               if (ep->com.remote_addr.ss_family == AF_INET) {
+                       req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+                       INIT_TP_WR(req, 0);
+                       OPCODE_TID(req) = cpu_to_be32(
+                                       MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+                                       ((ep->rss_qid << 14) | ep->atid)));
+                       req->local_port = la->sin_port;
+                       req->peer_port = ra->sin_port;
+                       req->local_ip = la->sin_addr.s_addr;
+                       req->peer_ip = ra->sin_addr.s_addr;
+                       req->opt0 = cpu_to_be64(opt0);
+                       req->params = cpu_to_be32(select_ntuple(ep->com.dev,
+                                               ep->dst, ep->l2t));
+                       req->opt2 = cpu_to_be32(opt2);
+               } else {
+                       req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
+
+                       INIT_TP_WR(req6, 0);
+                       OPCODE_TID(req6) = cpu_to_be32(
+                                          MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                          ((ep->rss_qid<<14)|ep->atid)));
+                       req6->local_port = la6->sin6_port;
+                       req6->peer_port = ra6->sin6_port;
+                       req6->local_ip_hi = *((__be64 *)
+                                               (la6->sin6_addr.s6_addr));
+                       req6->local_ip_lo = *((__be64 *)
+                                               (la6->sin6_addr.s6_addr + 8));
+                       req6->peer_ip_hi = *((__be64 *)
+                                               (ra6->sin6_addr.s6_addr));
+                       req6->peer_ip_lo = *((__be64 *)
+                                               (ra6->sin6_addr.s6_addr + 8));
+                       req6->opt0 = cpu_to_be64(opt0);
+                       req6->params = cpu_to_be32(
+                                       select_ntuple(ep->com.dev, ep->dst,
+                                                     ep->l2t));
+                       req6->opt2 = cpu_to_be32(opt2);
+               }
        } else {
-               t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen);
-               INIT_TP_WR(t5_req, 0);
-               OPCODE_TID(t5_req) = cpu_to_be32(
+               if (ep->com.remote_addr.ss_family == AF_INET) {
+                       t5_req = (struct cpl_t5_act_open_req *)
+                                skb_put(skb, wrlen);
+                       INIT_TP_WR(t5_req, 0);
+                       OPCODE_TID(t5_req) = cpu_to_be32(
                                        MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
                                        ((ep->rss_qid << 14) | ep->atid)));
-               t5_req->local_port = ep->com.local_addr.sin_port;
-               t5_req->peer_port = ep->com.remote_addr.sin_port;
-               t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr;
-               t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
-               t5_req->opt0 = cpu_to_be64(opt0);
-               t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
-                               select_ntuple(ep->com.dev, ep->dst, ep->l2t)));
-               t5_req->opt2 = cpu_to_be32(opt2);
+                       t5_req->local_port = la->sin_port;
+                       t5_req->peer_port = ra->sin_port;
+                       t5_req->local_ip = la->sin_addr.s_addr;
+                       t5_req->peer_ip = ra->sin_addr.s_addr;
+                       t5_req->opt0 = cpu_to_be64(opt0);
+                       t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
+                                               select_ntuple(ep->com.dev,
+                                               ep->dst, ep->l2t)));
+                       t5_req->opt2 = cpu_to_be32(opt2);
+               } else {
+                       t5_req6 = (struct cpl_t5_act_open_req6 *)
+                                 skb_put(skb, wrlen);
+                       INIT_TP_WR(t5_req6, 0);
+                       OPCODE_TID(t5_req6) = cpu_to_be32(
+                                             MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                             ((ep->rss_qid<<14)|ep->atid)));
+                       t5_req6->local_port = la6->sin6_port;
+                       t5_req6->peer_port = ra6->sin6_port;
+                       t5_req6->local_ip_hi = *((__be64 *)
+                                               (la6->sin6_addr.s6_addr));
+                       t5_req6->local_ip_lo = *((__be64 *)
+                                               (la6->sin6_addr.s6_addr + 8));
+                       t5_req6->peer_ip_hi = *((__be64 *)
+                                               (ra6->sin6_addr.s6_addr));
+                       t5_req6->peer_ip_lo = *((__be64 *)
+                                               (ra6->sin6_addr.s6_addr + 8));
+                       t5_req6->opt0 = cpu_to_be64(opt0);
+                       t5_req6->params = (__force __be64)cpu_to_be32(
+                               select_ntuple(ep->com.dev, ep->dst, ep->l2t));
+                       t5_req6->opt2 = cpu_to_be32(opt2);
+               }
        }
 
        set_bit(ACT_OPEN_REQ, &ep->com.history);
@@ -952,8 +1074,10 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
        memset(&event, 0, sizeof(event));
        event.event = IW_CM_EVENT_CONNECT_REPLY;
        event.status = status;
-       event.local_addr = ep->com.local_addr;
-       event.remote_addr = ep->com.remote_addr;
+       memcpy(&event.local_addr, &ep->com.local_addr,
+              sizeof(ep->com.local_addr));
+       memcpy(&event.remote_addr, &ep->com.remote_addr,
+              sizeof(ep->com.remote_addr));
 
        if ((status == 0) || (status == -ECONNREFUSED)) {
                if (!ep->tried_with_mpa_v1) {
@@ -989,8 +1113,10 @@ static void connect_request_upcall(struct c4iw_ep *ep)
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
        memset(&event, 0, sizeof(event));
        event.event = IW_CM_EVENT_CONNECT_REQUEST;
-       event.local_addr = ep->com.local_addr;
-       event.remote_addr = ep->com.remote_addr;
+       memcpy(&event.local_addr, &ep->com.local_addr,
+              sizeof(ep->com.local_addr));
+       memcpy(&event.remote_addr, &ep->com.remote_addr,
+              sizeof(ep->com.remote_addr));
        event.provider_data = ep;
        if (!ep->tried_with_mpa_v1) {
                /* this means MPA_v2 is used */
@@ -1447,10 +1573,9 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
                               " qpid %u ep %p state %d tid %u status %d\n",
                               __func__, ep->com.qp->wq.sq.qid, ep,
                               state_read(&ep->com), ep->hwtid, status);
-               attrs.next_state = C4IW_QP_STATE_ERROR;
+               attrs.next_state = C4IW_QP_STATE_TERMINATE;
                c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-               c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
                break;
        }
        default:
@@ -1498,6 +1623,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        struct fw_ofld_connection_wr *req;
        unsigned int mtu_idx;
        int wscale;
+       struct sockaddr_in *sin;
 
        skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
        req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
@@ -1506,10 +1632,12 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
        req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
                                     ep->l2t));
-       req->le.lport = ep->com.local_addr.sin_port;
-       req->le.pport = ep->com.remote_addr.sin_port;
-       req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
-       req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
+       sin = (struct sockaddr_in *)&ep->com.local_addr;
+       req->le.lport = sin->sin_port;
+       req->le.u.ipv4.lip = sin->sin_addr.s_addr;
+       sin = (struct sockaddr_in *)&ep->com.remote_addr;
+       req->le.pport = sin->sin_port;
+       req->le.u.ipv4.pip = sin->sin_addr.s_addr;
        req->tcb.t_state_to_astid =
                        htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
                        V_FW_OFLD_CONNECTION_WR_ASTID(atid));
@@ -1560,14 +1688,98 @@ static inline int act_open_has_tid(int status)
 
 #define ACT_OPEN_RETRY_COUNT 2
 
+static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+                    struct dst_entry *dst, struct c4iw_dev *cdev,
+                    bool clear_mpa_v1)
+{
+       struct neighbour *n;
+       int err, step;
+       struct net_device *pdev;
+
+       n = dst_neigh_lookup(dst, peer_ip);
+       if (!n)
+               return -ENODEV;
+
+       rcu_read_lock();
+       err = -ENOMEM;
+       if (n->dev->flags & IFF_LOOPBACK) {
+               if (iptype == 4)
+                       pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
+               else if (IS_ENABLED(CONFIG_IPV6))
+                       for_each_netdev(&init_net, pdev) {
+                               if (ipv6_chk_addr(&init_net,
+                                                 (struct in6_addr *)peer_ip,
+                                                 pdev, 1))
+                                       break;
+                       }
+               else
+                       pdev = NULL;
+
+               if (!pdev) {
+                       err = -ENODEV;
+                       goto out;
+               }
+               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+                                       n, pdev, 0);
+               if (!ep->l2t)
+                       goto out;
+               ep->mtu = pdev->mtu;
+               ep->tx_chan = cxgb4_port_chan(pdev);
+               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+               step = cdev->rdev.lldi.ntxq /
+                       cdev->rdev.lldi.nchan;
+               ep->txq_idx = cxgb4_port_idx(pdev) * step;
+               step = cdev->rdev.lldi.nrxq /
+                       cdev->rdev.lldi.nchan;
+               ep->ctrlq_idx = cxgb4_port_idx(pdev);
+               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+                       cxgb4_port_idx(pdev) * step];
+               dev_put(pdev);
+       } else {
+               pdev = get_real_dev(n->dev);
+               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+                                       n, pdev, 0);
+               if (!ep->l2t)
+                       goto out;
+               ep->mtu = dst_mtu(dst);
+               ep->tx_chan = cxgb4_port_chan(n->dev);
+               ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+               step = cdev->rdev.lldi.ntxq /
+                       cdev->rdev.lldi.nchan;
+               ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+               ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+               step = cdev->rdev.lldi.nrxq /
+                       cdev->rdev.lldi.nchan;
+               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+                       cxgb4_port_idx(n->dev) * step];
+
+               if (clear_mpa_v1) {
+                       ep->retry_with_mpa_v1 = 0;
+                       ep->tried_with_mpa_v1 = 0;
+               }
+       }
+       err = 0;
+out:
+       rcu_read_unlock();
+
+       neigh_release(n);
+
+       return err;
+}
+
 static int c4iw_reconnect(struct c4iw_ep *ep)
 {
        int err = 0;
-       struct rtable *rt;
-       struct port_info *pi;
-       struct net_device *pdev;
-       int step;
-       struct neighbour *neigh;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)
+                                   &ep->com.cm_id->local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)
+                                   &ep->com.cm_id->remote_addr;
+       struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
+                                     &ep->com.cm_id->local_addr;
+       struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
+                                     &ep->com.cm_id->remote_addr;
+       int iptype;
+       __u8 *ra;
 
        PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
        init_timer(&ep->timer);
@@ -1584,57 +1796,28 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
        insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
 
        /* find a route */
-       rt = find_route(ep->com.dev,
-                       ep->com.cm_id->local_addr.sin_addr.s_addr,
-                       ep->com.cm_id->remote_addr.sin_addr.s_addr,
-                       ep->com.cm_id->local_addr.sin_port,
-                       ep->com.cm_id->remote_addr.sin_port, 0);
-       if (!rt) {
+       if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
+               ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
+                                    raddr->sin_addr.s_addr, laddr->sin_port,
+                                    raddr->sin_port, 0);
+               iptype = 4;
+               ra = (__u8 *)&raddr->sin_addr;
+       } else {
+               ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
+                                     raddr6->sin6_addr.s6_addr,
+                                     laddr6->sin6_port, raddr6->sin6_port, 0,
+                                     raddr6->sin6_scope_id);
+               iptype = 6;
+               ra = (__u8 *)&raddr6->sin6_addr;
+       }
+       if (!ep->dst) {
                pr_err("%s - cannot find route.\n", __func__);
                err = -EHOSTUNREACH;
                goto fail3;
        }
-       ep->dst = &rt->dst;
-
-       neigh = dst_neigh_lookup(ep->dst,
-                       &ep->com.cm_id->remote_addr.sin_addr.s_addr);
-       if (!neigh) {
-               pr_err("%s - cannot alloc neigh.\n", __func__);
-               err = -ENOMEM;
-               goto fail4;
-       }
-
-       /* get a l2t entry */
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               PDBG("%s LOOPBACK\n", __func__);
-               pdev = ip_dev_find(&init_net,
-                               ep->com.cm_id->remote_addr.sin_addr.s_addr);
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                               neigh, pdev, 0);
-               pi = (struct port_info *)netdev_priv(pdev);
-               ep->mtu = pdev->mtu;
-               ep->tx_chan = cxgb4_port_chan(pdev);
-               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               dev_put(pdev);
-       } else {
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                               neigh, neigh->dev, 0);
-               pi = (struct port_info *)netdev_priv(neigh->dev);
-               ep->mtu = dst_mtu(ep->dst);
-               ep->tx_chan = cxgb4_port_chan(neigh->dev);
-               ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
-                               0x7F) << 1;
-       }
-
-       step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
-       ep->txq_idx = pi->port_id * step;
-       ep->ctrlq_idx = pi->port_id;
-       step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
-       ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
-
-       if (!ep->l2t) {
+       err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false);
+       if (err) {
                pr_err("%s - cannot alloc l2e.\n", __func__);
-               err = -ENOMEM;
                goto fail4;
        }
 
@@ -1677,8 +1860,16 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                                        ntohl(rpl->atid_status)));
        struct tid_info *t = dev->rdev.lldi.tids;
        int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+       struct sockaddr_in *la;
+       struct sockaddr_in *ra;
+       struct sockaddr_in6 *la6;
+       struct sockaddr_in6 *ra6;
 
        ep = lookup_atid(t, atid);
+       la = (struct sockaddr_in *)&ep->com.local_addr;
+       ra = (struct sockaddr_in *)&ep->com.remote_addr;
+       la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+       ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
 
        PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
             status, status2errno(status));
@@ -1699,10 +1890,11 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        case CPL_ERR_CONN_TIMEDOUT:
                break;
        case CPL_ERR_TCAM_FULL:
+               mutex_lock(&dev->rdev.stats.lock);
                dev->rdev.stats.tcam_full++;
-               if (dev->rdev.lldi.enable_fw_ofld_conn) {
-                       mutex_lock(&dev->rdev.stats.lock);
-                       mutex_unlock(&dev->rdev.stats.lock);
+               mutex_unlock(&dev->rdev.stats.lock);
+               if (ep->com.local_addr.ss_family == AF_INET &&
+                   dev->rdev.lldi.enable_fw_ofld_conn) {
                        send_fw_act_open_req(ep,
                                             GET_TID_TID(GET_AOPEN_ATID(
                                             ntohl(rpl->atid_status))));
@@ -1722,13 +1914,17 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                }
                break;
        default:
-               printk(KERN_INFO MOD "Active open failure - "
-                      "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
-                      atid, status, status2errno(status),
-                      &ep->com.local_addr.sin_addr.s_addr,
-                      ntohs(ep->com.local_addr.sin_port),
-                      &ep->com.remote_addr.sin_addr.s_addr,
-                      ntohs(ep->com.remote_addr.sin_port));
+               if (ep->com.local_addr.ss_family == AF_INET) {
+                       pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
+                               atid, status, status2errno(status),
+                               &la->sin_addr.s_addr, ntohs(la->sin_port),
+                               &ra->sin_addr.s_addr, ntohs(ra->sin_port));
+               } else {
+                       pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
+                               atid, status, status2errno(status),
+                               la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
+                               ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
+               }
                break;
        }
 
@@ -1766,27 +1962,6 @@ out:
        return 0;
 }
 
-static int listen_stop(struct c4iw_listen_ep *ep)
-{
-       struct sk_buff *skb;
-       struct cpl_close_listsvr_req *req;
-
-       PDBG("%s ep %p\n", __func__, ep);
-       skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-       if (!skb) {
-               printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
-               return -ENOMEM;
-       }
-       req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
-       INIT_TP_WR(req, 0);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
-                                                   ep->stid));
-       req->reply_ctrl = cpu_to_be16(
-                         QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
-       set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
-       return c4iw_ofld_send(&ep->com.dev->rdev, skb);
-}
-
 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
@@ -1799,7 +1974,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        return 0;
 }
 
-static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
+static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                      struct cpl_pass_accept_req *req)
 {
        struct cpl_pass_accept_rpl *rpl;
@@ -1851,16 +2026,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
        rpl->opt0 = cpu_to_be64(opt0);
        rpl->opt2 = cpu_to_be32(opt2);
        set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
+       t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
        c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 
        return;
 }
 
-static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
-                     struct sk_buff *skb)
+static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
 {
-       PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
-            peer_ip);
+       PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
        BUG_ON(skb_cloned(skb));
        skb_trim(skb, sizeof(struct cpl_tid_release));
        skb_get(skb);
@@ -1868,95 +2042,38 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
        return;
 }
 
-static void get_4tuple(struct cpl_pass_accept_req *req,
-                      __be32 *local_ip, __be32 *peer_ip,
+static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
+                      __u8 *local_ip, __u8 *peer_ip,
                       __be16 *local_port, __be16 *peer_port)
 {
        int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
        int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
        struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+       struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
        struct tcphdr *tcp = (struct tcphdr *)
                             ((u8 *)(req + 1) + eth_len + ip_len);
 
-       PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
-            ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
-            ntohs(tcp->dest));
-
-       *peer_ip = ip->saddr;
-       *local_ip = ip->daddr;
+       if (ip->version == 4) {
+               PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
+                    ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
+                    ntohs(tcp->dest));
+               *iptype = 4;
+               memcpy(peer_ip, &ip->saddr, 4);
+               memcpy(local_ip, &ip->daddr, 4);
+       } else {
+               PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
+                    ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
+                    ntohs(tcp->dest));
+               *iptype = 6;
+               memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+               memcpy(local_ip, ip6->daddr.s6_addr, 16);
+       }
        *peer_port = tcp->source;
        *local_port = tcp->dest;
 
        return;
 }
 
-static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
-                    struct c4iw_dev *cdev, bool clear_mpa_v1)
-{
-       struct neighbour *n;
-       int err, step;
-
-       n = dst_neigh_lookup(dst, &peer_ip);
-       if (!n)
-               return -ENODEV;
-
-       rcu_read_lock();
-       err = -ENOMEM;
-       if (n->dev->flags & IFF_LOOPBACK) {
-               struct net_device *pdev;
-
-               pdev = ip_dev_find(&init_net, peer_ip);
-               if (!pdev) {
-                       err = -ENODEV;
-                       goto out;
-               }
-               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
-                                       n, pdev, 0);
-               if (!ep->l2t)
-                       goto out;
-               ep->mtu = pdev->mtu;
-               ep->tx_chan = cxgb4_port_chan(pdev);
-               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = cdev->rdev.lldi.ntxq /
-                       cdev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(pdev) * step;
-               step = cdev->rdev.lldi.nrxq /
-                       cdev->rdev.lldi.nchan;
-               ep->ctrlq_idx = cxgb4_port_idx(pdev);
-               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
-                       cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
-                                       n, n->dev, 0);
-               if (!ep->l2t)
-                       goto out;
-               ep->mtu = dst_mtu(dst);
-               ep->tx_chan = cxgb4_port_chan(n->dev);
-               ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
-               step = cdev->rdev.lldi.ntxq /
-                       cdev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(n->dev) * step;
-               ep->ctrlq_idx = cxgb4_port_idx(n->dev);
-               step = cdev->rdev.lldi.nrxq /
-                       cdev->rdev.lldi.nchan;
-               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
-                       cxgb4_port_idx(n->dev) * step];
-
-               if (clear_mpa_v1) {
-                       ep->retry_with_mpa_v1 = 0;
-                       ep->tried_with_mpa_v1 = 0;
-               }
-       }
-       err = 0;
-out:
-       rcu_read_unlock();
-
-       neigh_release(n);
-
-       return err;
-}
-
 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *child_ep = NULL, *parent_ep;
@@ -1965,23 +2082,17 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int hwtid = GET_TID(req);
        struct dst_entry *dst;
-       struct rtable *rt;
-       __be32 local_ip, peer_ip = 0;
+       __u8 local_ip[16], peer_ip[16];
        __be16 local_port, peer_port;
        int err;
        u16 peer_mss = ntohs(req->tcpopt.mss);
+       int iptype;
 
        parent_ep = lookup_stid(t, stid);
        if (!parent_ep) {
                PDBG("%s connect request on invalid stid %d\n", __func__, stid);
                goto reject;
        }
-       get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
-
-       PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
-            "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
-            ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
-            ntohs(peer_port), peer_mss);
 
        if (state_read(&parent_ep->com) != LISTEN) {
                printk(KERN_ERR "%s - listening ep not in LISTEN\n",
@@ -1989,15 +2100,32 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
 
+       get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port);
+
        /* Find output route */
-       rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
-                       GET_POPEN_TOS(ntohl(req->tos_stid)));
-       if (!rt) {
+       if (iptype == 4)  {
+               PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
+                    , __func__, parent_ep, hwtid,
+                    local_ip, peer_ip, ntohs(local_port),
+                    ntohs(peer_port), peer_mss);
+               dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
+                                local_port, peer_port,
+                                GET_POPEN_TOS(ntohl(req->tos_stid)));
+       } else {
+               PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
+                    , __func__, parent_ep, hwtid,
+                    local_ip, peer_ip, ntohs(local_port),
+                    ntohs(peer_port), peer_mss);
+               dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
+                                 PASS_OPEN_TOS(ntohl(req->tos_stid)),
+                                 ((struct sockaddr_in6 *)
+                                 &parent_ep->com.local_addr)->sin6_scope_id);
+       }
+       if (!dst) {
                printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
                       __func__);
                goto reject;
        }
-       dst = &rt->dst;
 
        child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
        if (!child_ep) {
@@ -2007,7 +2135,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
 
-       err = import_ep(child_ep, peer_ip, dst, dev, false);
+       err = import_ep(child_ep, iptype, peer_ip, dst, dev, false);
        if (err) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -2022,12 +2150,27 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        state_set(&child_ep->com, CONNECTING);
        child_ep->com.dev = dev;
        child_ep->com.cm_id = NULL;
-       child_ep->com.local_addr.sin_family = PF_INET;
-       child_ep->com.local_addr.sin_port = local_port;
-       child_ep->com.local_addr.sin_addr.s_addr = local_ip;
-       child_ep->com.remote_addr.sin_family = PF_INET;
-       child_ep->com.remote_addr.sin_port = peer_port;
-       child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
+       if (iptype == 4) {
+               struct sockaddr_in *sin = (struct sockaddr_in *)
+                       &child_ep->com.local_addr;
+               sin->sin_family = PF_INET;
+               sin->sin_port = local_port;
+               sin->sin_addr.s_addr = *(__be32 *)local_ip;
+               sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
+               sin->sin_family = PF_INET;
+               sin->sin_port = peer_port;
+               sin->sin_addr.s_addr = *(__be32 *)peer_ip;
+       } else {
+               struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+                       &child_ep->com.local_addr;
+               sin6->sin6_family = PF_INET6;
+               sin6->sin6_port = local_port;
+               memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+               sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
+               sin6->sin6_family = PF_INET6;
+               sin6->sin6_port = peer_port;
+               memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
+       }
        c4iw_get_ep(&parent_ep->com);
        child_ep->parent_ep = parent_ep;
        child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
@@ -2040,11 +2183,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        init_timer(&child_ep->timer);
        cxgb4_insert_tid(t, child_ep, hwtid);
        insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
-       accept_cr(child_ep, peer_ip, skb, req);
+       accept_cr(child_ep, skb, req);
        set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
        goto out;
 reject:
-       reject_cr(dev, hwtid, peer_ip, skb);
+       reject_cr(dev, hwtid, skb);
 out:
        return 0;
 }
@@ -2512,12 +2655,79 @@ err:
        return err;
 }
 
+static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
+{
+       struct in_device *ind;
+       int found = 0;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+
+       ind = in_dev_get(dev->rdev.lldi.ports[0]);
+       if (!ind)
+               return -EADDRNOTAVAIL;
+       for_primary_ifa(ind) {
+               laddr->sin_addr.s_addr = ifa->ifa_address;
+               raddr->sin_addr.s_addr = ifa->ifa_address;
+               found = 1;
+               break;
+       }
+       endfor_ifa(ind);
+       in_dev_put(ind);
+       return found ? 0 : -EADDRNOTAVAIL;
+}
+
+static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
+                     unsigned char banned_flags)
+{
+       struct inet6_dev *idev;
+       int err = -EADDRNOTAVAIL;
+
+       rcu_read_lock();
+       idev = __in6_dev_get(dev);
+       if (idev != NULL) {
+               struct inet6_ifaddr *ifp;
+
+               read_lock_bh(&idev->lock);
+               list_for_each_entry(ifp, &idev->addr_list, if_list) {
+                       if (ifp->scope == IFA_LINK &&
+                           !(ifp->flags & banned_flags)) {
+                               memcpy(addr, &ifp->addr, 16);
+                               err = 0;
+                               break;
+                       }
+               }
+               read_unlock_bh(&idev->lock);
+       }
+       rcu_read_unlock();
+       return err;
+}
+
+static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
+{
+       struct in6_addr uninitialized_var(addr);
+       struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+       struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+
+       if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
+               memcpy(la6->sin6_addr.s6_addr, &addr, 16);
+               memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
+               return 0;
+       }
+       return -EADDRNOTAVAIL;
+}
+
 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
        struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
        struct c4iw_ep *ep;
-       struct rtable *rt;
        int err = 0;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+       struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+       struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
+                                     &cm_id->remote_addr;
+       __u8 *ra;
+       int iptype;
 
        if ((conn_param->ord > c4iw_max_read_depth) ||
            (conn_param->ird > c4iw_max_read_depth)) {
@@ -2545,7 +2755,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        ep->com.dev = dev;
        ep->com.cm_id = cm_id;
        ep->com.qp = get_qhp(dev, conn_param->qpn);
-       BUG_ON(!ep->com.qp);
+       if (!ep->com.qp) {
+               PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
+               err = -EINVAL;
+               goto fail2;
+       }
        ref_qp(ep);
        PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
             ep->com.qp, cm_id);
@@ -2561,27 +2775,56 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        insert_handle(dev, &dev->atid_idr, ep, ep->atid);
 
-       PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
-            ntohl(cm_id->local_addr.sin_addr.s_addr),
-            ntohs(cm_id->local_addr.sin_port),
-            ntohl(cm_id->remote_addr.sin_addr.s_addr),
-            ntohs(cm_id->remote_addr.sin_port));
+       if (cm_id->remote_addr.ss_family == AF_INET) {
+               iptype = 4;
+               ra = (__u8 *)&raddr->sin_addr;
 
-       /* find a route */
-       rt = find_route(dev,
-                       cm_id->local_addr.sin_addr.s_addr,
-                       cm_id->remote_addr.sin_addr.s_addr,
-                       cm_id->local_addr.sin_port,
-                       cm_id->remote_addr.sin_port, 0);
-       if (!rt) {
+               /*
+                * Handle loopback requests to INADDR_ANY.
+                */
+               if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
+                       err = pick_local_ipaddrs(dev, cm_id);
+                       if (err)
+                               goto fail2;
+               }
+
+               /* find a route */
+               PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
+                    __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
+                    ra, ntohs(raddr->sin_port));
+               ep->dst = find_route(dev, laddr->sin_addr.s_addr,
+                                    raddr->sin_addr.s_addr, laddr->sin_port,
+                                    raddr->sin_port, 0);
+       } else {
+               iptype = 6;
+               ra = (__u8 *)&raddr6->sin6_addr;
+
+               /*
+                * Handle loopback requests to INADDR_ANY.
+                */
+               if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
+                       err = pick_local_ip6addrs(dev, cm_id);
+                       if (err)
+                               goto fail2;
+               }
+
+               /* find a route */
+               PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
+                    __func__, laddr6->sin6_addr.s6_addr,
+                    ntohs(laddr6->sin6_port),
+                    raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
+               ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
+                                     raddr6->sin6_addr.s6_addr,
+                                     laddr6->sin6_port, raddr6->sin6_port, 0,
+                                     raddr6->sin6_scope_id);
+       }
+       if (!ep->dst) {
                printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
                err = -EHOSTUNREACH;
                goto fail3;
        }
-       ep->dst = &rt->dst;
 
-       err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
-                       ep->dst, ep->com.dev, true);
+       err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
        if (err) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                goto fail4;
@@ -2593,8 +2836,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        state_set(&ep->com, CONNECTING);
        ep->tos = 0;
-       ep->com.local_addr = cm_id->local_addr;
-       ep->com.remote_addr = cm_id->remote_addr;
+       memcpy(&ep->com.local_addr, &cm_id->local_addr,
+              sizeof(ep->com.local_addr));
+       memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+              sizeof(ep->com.remote_addr));
 
        /* send connect request to rnic */
        err = send_connect(ep);
@@ -2614,6 +2859,60 @@ out:
        return err;
 }
 
+static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
+{
+       int err;
+       struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+
+       c4iw_init_wr_wait(&ep->com.wr_wait);
+       err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
+                                  ep->stid, &sin6->sin6_addr,
+                                  sin6->sin6_port,
+                                  ep->com.dev->rdev.lldi.rxq_ids[0]);
+       if (!err)
+               err = c4iw_wait_for_reply(&ep->com.dev->rdev,
+                                         &ep->com.wr_wait,
+                                         0, 0, __func__);
+       if (err)
+               pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
+                      err, ep->stid,
+                      sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
+       return err;
+}
+
+static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
+{
+       int err;
+       struct sockaddr_in *sin = (struct sockaddr_in *)&ep->com.local_addr;
+
+       if (dev->rdev.lldi.enable_fw_ofld_conn) {
+               do {
+                       err = cxgb4_create_server_filter(
+                               ep->com.dev->rdev.lldi.ports[0], ep->stid,
+                               sin->sin_addr.s_addr, sin->sin_port, 0,
+                               ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
+                       if (err == -EBUSY) {
+                               set_current_state(TASK_UNINTERRUPTIBLE);
+                               schedule_timeout(usecs_to_jiffies(100));
+                       }
+               } while (err == -EBUSY);
+       } else {
+               c4iw_init_wr_wait(&ep->com.wr_wait);
+               err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
+                               ep->stid, sin->sin_addr.s_addr, sin->sin_port,
+                               0, ep->com.dev->rdev.lldi.rxq_ids[0]);
+               if (!err)
+                       err = c4iw_wait_for_reply(&ep->com.dev->rdev,
+                                                 &ep->com.wr_wait,
+                                                 0, 0, __func__);
+       }
+       if (err)
+               pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
+                      , err, ep->stid,
+                      &sin->sin_addr, ntohs(sin->sin_port));
+       return err;
+}
+
 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
 {
        int err = 0;
@@ -2633,15 +2932,18 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        ep->com.cm_id = cm_id;
        ep->com.dev = dev;
        ep->backlog = backlog;
-       ep->com.local_addr = cm_id->local_addr;
+       memcpy(&ep->com.local_addr, &cm_id->local_addr,
+              sizeof(ep->com.local_addr));
 
        /*
         * Allocate a server TID.
         */
        if (dev->rdev.lldi.enable_fw_ofld_conn)
-               ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
+               ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
+                                            cm_id->local_addr.ss_family, ep);
        else
-               ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+               ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
+                                           cm_id->local_addr.ss_family, ep);
 
        if (ep->stid == -1) {
                printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
@@ -2650,43 +2952,16 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        }
        insert_handle(dev, &dev->stid_idr, ep, ep->stid);
        state_set(&ep->com, LISTEN);
-       if (dev->rdev.lldi.enable_fw_ofld_conn) {
-               do {
-                       err = cxgb4_create_server_filter(
-                               ep->com.dev->rdev.lldi.ports[0], ep->stid,
-                               ep->com.local_addr.sin_addr.s_addr,
-                               ep->com.local_addr.sin_port,
-                               0,
-                               ep->com.dev->rdev.lldi.rxq_ids[0],
-                               0,
-                               0);
-                       if (err == -EBUSY) {
-                               set_current_state(TASK_UNINTERRUPTIBLE);
-                               schedule_timeout(usecs_to_jiffies(100));
-                       }
-               } while (err == -EBUSY);
-       } else {
-               c4iw_init_wr_wait(&ep->com.wr_wait);
-               err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
-                               ep->stid, ep->com.local_addr.sin_addr.s_addr,
-                               ep->com.local_addr.sin_port,
-                               0,
-                               ep->com.dev->rdev.lldi.rxq_ids[0]);
-               if (!err)
-                       err = c4iw_wait_for_reply(&ep->com.dev->rdev,
-                                                 &ep->com.wr_wait,
-                                                 0, 0, __func__);
-       }
+       if (ep->com.local_addr.ss_family == AF_INET)
+               err = create_server4(dev, ep);
+       else
+               err = create_server6(dev, ep);
        if (!err) {
                cm_id->provider_data = ep;
                goto out;
        }
-       pr_err("%s cxgb4_create_server/filter failed err %d " \
-              "stid %d laddr %08x lport %d\n", \
-              __func__, err, ep->stid,
-              ntohl(ep->com.local_addr.sin_addr.s_addr),
-              ntohs(ep->com.local_addr.sin_port));
-       cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+       cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
+                       ep->com.local_addr.ss_family);
 fail2:
        cm_id->rem_ref(cm_id);
        c4iw_put_ep(&ep->com);
@@ -2704,20 +2979,24 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
 
        might_sleep();
        state_set(&ep->com, DEAD);
-       if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
+       if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
+           ep->com.local_addr.ss_family == AF_INET) {
                err = cxgb4_remove_server_filter(
                        ep->com.dev->rdev.lldi.ports[0], ep->stid,
                        ep->com.dev->rdev.lldi.rxq_ids[0], 0);
        } else {
                c4iw_init_wr_wait(&ep->com.wr_wait);
-               err = listen_stop(ep);
+               err = cxgb4_remove_server(
+                               ep->com.dev->rdev.lldi.ports[0], ep->stid,
+                               ep->com.dev->rdev.lldi.rxq_ids[0], 0);
                if (err)
                        goto done;
                err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
                                          0, 0, __func__);
        }
        remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
-       cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+       cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
+                       ep->com.local_addr.ss_family);
 done:
        cm_id->rem_ref(cm_id);
        c4iw_put_ep(&ep->com);
@@ -3021,7 +3300,6 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        struct cpl_pass_accept_req *req = (void *)(rss + 1);
        struct l2t_entry *e;
        struct dst_entry *dst;
-       struct rtable *rt;
        struct c4iw_ep *lep;
        u16 window;
        struct port_info *pi;
@@ -3079,14 +3357,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
             ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
             ntohs(tcph->source), iph->tos);
 
-       rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
-                       iph->tos);
-       if (!rt) {
+       dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
+                        iph->tos);
+       if (!dst) {
                pr_err("%s - failed to find dst entry!\n",
                       __func__);
                goto reject;
        }
-       dst = &rt->dst;
        neigh = dst_neigh_lookup_skb(dst, skb);
 
        if (!neigh) {
@@ -3103,10 +3380,11 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
                tx_chan = cxgb4_port_chan(pdev);
                dev_put(pdev);
        } else {
+               pdev = get_real_dev(neigh->dev);
                e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
-                                       neigh->dev, 0);
-               pi = (struct port_info *)netdev_priv(neigh->dev);
-               tx_chan = cxgb4_port_chan(neigh->dev);
+                                       pdev, 0);
+               pi = (struct port_info *)netdev_priv(pdev);
+               tx_chan = cxgb4_port_chan(pdev);
        }
        if (!e) {
                pr_err("%s - failed to allocate l2t entry!\n",
index 0f1607c8325a5bc8a03e3a5d5471d39ed5741e8b..88de3aa9c5b0205952299a7823ad0bfc071d3a5d 100644 (file)
@@ -225,43 +225,186 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
        t4_swcq_produce(cq);
 }
 
-int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
+static void advance_oldest_read(struct t4_wq *wq);
+
+int c4iw_flush_sq(struct c4iw_qp *qhp)
 {
        int flushed = 0;
-       struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
-       int in_use = wq->sq.in_use - count;
-
-       BUG_ON(in_use < 0);
-       while (in_use--) {
-               swsqe->signaled = 0;
-               insert_sq_cqe(wq, cq, swsqe);
-               swsqe++;
-               if (swsqe == (wq->sq.sw_sq + wq->sq.size))
-                       swsqe = wq->sq.sw_sq;
-               flushed++;
+       struct t4_wq *wq = &qhp->wq;
+       struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
+       struct t4_cq *cq = &chp->cq;
+       int idx;
+       struct t4_swsqe *swsqe;
+       int error = (qhp->attr.state != C4IW_QP_STATE_CLOSING &&
+                       qhp->attr.state != C4IW_QP_STATE_IDLE);
+
+       if (wq->sq.flush_cidx == -1)
+               wq->sq.flush_cidx = wq->sq.cidx;
+       idx = wq->sq.flush_cidx;
+       BUG_ON(idx >= wq->sq.size);
+       while (idx != wq->sq.pidx) {
+               if (error) {
+                       swsqe = &wq->sq.sw_sq[idx];
+                       BUG_ON(swsqe->flushed);
+                       swsqe->flushed = 1;
+                       insert_sq_cqe(wq, cq, swsqe);
+                       if (wq->sq.oldest_read == swsqe) {
+                               BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
+                               advance_oldest_read(wq);
+                       }
+                       flushed++;
+               } else {
+                       t4_sq_consume(wq);
+               }
+               if (++idx == wq->sq.size)
+                       idx = 0;
        }
+       wq->sq.flush_cidx += flushed;
+       if (wq->sq.flush_cidx >= wq->sq.size)
+               wq->sq.flush_cidx -= wq->sq.size;
        return flushed;
 }
 
+static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
+{
+       struct t4_swsqe *swsqe;
+       int cidx;
+
+       if (wq->sq.flush_cidx == -1)
+               wq->sq.flush_cidx = wq->sq.cidx;
+       cidx = wq->sq.flush_cidx;
+       BUG_ON(cidx > wq->sq.size);
+
+       while (cidx != wq->sq.pidx) {
+               swsqe = &wq->sq.sw_sq[cidx];
+               if (!swsqe->signaled) {
+                       if (++cidx == wq->sq.size)
+                               cidx = 0;
+               } else if (swsqe->complete) {
+
+                       BUG_ON(swsqe->flushed);
+
+                       /*
+                        * Insert this completed cqe into the swcq.
+                        */
+                       PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
+                                       __func__, cidx, cq->sw_pidx);
+                       swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+                       cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
+                       t4_swcq_produce(cq);
+                       swsqe->flushed = 1;
+                       if (++cidx == wq->sq.size)
+                               cidx = 0;
+                       wq->sq.flush_cidx = cidx;
+               } else
+                       break;
+       }
+}
+
+static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
+               struct t4_cqe *read_cqe)
+{
+       read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
+       read_cqe->len = htonl(wq->sq.oldest_read->read_len);
+       read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
+                       V_CQE_SWCQE(SW_CQE(hw_cqe)) |
+                       V_CQE_OPCODE(FW_RI_READ_REQ) |
+                       V_CQE_TYPE(1));
+       read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
+}
+
+static void advance_oldest_read(struct t4_wq *wq)
+{
+
+       u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
+
+       if (rptr == wq->sq.size)
+               rptr = 0;
+       while (rptr != wq->sq.pidx) {
+               wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
+
+               if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
+                       return;
+               if (++rptr == wq->sq.size)
+                       rptr = 0;
+       }
+       wq->sq.oldest_read = NULL;
+}
+
 /*
  * Move all CQEs from the HWCQ into the SWCQ.
+ * Deal with out-of-order and/or completions that complete
+ * prior unsignalled WRs.
  */
-void c4iw_flush_hw_cq(struct t4_cq *cq)
+void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 {
-       struct t4_cqe *cqe = NULL, *swcqe;
+       struct t4_cqe *hw_cqe, *swcqe, read_cqe;
+       struct c4iw_qp *qhp;
+       struct t4_swsqe *swsqe;
        int ret;
 
-       PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
-       ret = t4_next_hw_cqe(cq, &cqe);
+       PDBG("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
+       ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
+
+       /*
+        * This logic is similar to poll_cq(), but not quite the same
+        * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
+        * also do any translation magic that poll_cq() normally does.
+        */
        while (!ret) {
-               PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
-                    __func__, cq->cidx, cq->sw_pidx);
-               swcqe = &cq->sw_queue[cq->sw_pidx];
-               *swcqe = *cqe;
-               swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
-               t4_swcq_produce(cq);
-               t4_hwcq_consume(cq);
-               ret = t4_next_hw_cqe(cq, &cqe);
+               qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
+
+               /*
+                * drop CQEs with no associated QP
+                */
+               if (qhp == NULL)
+                       goto next_cqe;
+
+               if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
+                       goto next_cqe;
+
+               if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
+
+                       /*
+                        * drop peer2peer RTR reads.
+                        */
+                       if (CQE_WRID_STAG(hw_cqe) == 1)
+                               goto next_cqe;
+
+                       /*
+                        * Eat completions for unsignaled read WRs.
+                        */
+                       if (!qhp->wq.sq.oldest_read->signaled) {
+                               advance_oldest_read(&qhp->wq);
+                               goto next_cqe;
+                       }
+
+                       /*
+                        * Don't write to the HWCQ, create a new read req CQE
+                        * in local memory and move it into the swcq.
+                        */
+                       create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
+                       hw_cqe = &read_cqe;
+                       advance_oldest_read(&qhp->wq);
+               }
+
+               /* if its a SQ completion, then do the magic to move all the
+                * unsignaled and now in-order completions into the swcq.
+                */
+               if (SQ_TYPE(hw_cqe)) {
+                       swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
+                       swsqe->cqe = *hw_cqe;
+                       swsqe->complete = 1;
+                       flush_completed_wrs(&qhp->wq, &chp->cq);
+               } else {
+                       swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
+                       *swcqe = *hw_cqe;
+                       swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+                       t4_swcq_produce(&chp->cq);
+               }
+next_cqe:
+               t4_hwcq_consume(&chp->cq);
+               ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
        }
 }
 
@@ -281,25 +424,6 @@ static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
        return 1;
 }
 
-void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
-{
-       struct t4_cqe *cqe;
-       u32 ptr;
-
-       *count = 0;
-       ptr = cq->sw_cidx;
-       while (ptr != cq->sw_pidx) {
-               cqe = &cq->sw_queue[ptr];
-               if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
-                                     wq->sq.oldest_read)) &&
-                   (CQE_QPID(cqe) == wq->sq.qid))
-                       (*count)++;
-               if (++ptr == cq->size)
-                       ptr = 0;
-       }
-       PDBG("%s cq %p count %d\n", __func__, cq, *count);
-}
-
 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
 {
        struct t4_cqe *cqe;
@@ -319,70 +443,6 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
        PDBG("%s cq %p count %d\n", __func__, cq, *count);
 }
 
-static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
-{
-       struct t4_swsqe *swsqe;
-       u16 ptr = wq->sq.cidx;
-       int count = wq->sq.in_use;
-       int unsignaled = 0;
-
-       swsqe = &wq->sq.sw_sq[ptr];
-       while (count--)
-               if (!swsqe->signaled) {
-                       if (++ptr == wq->sq.size)
-                               ptr = 0;
-                       swsqe = &wq->sq.sw_sq[ptr];
-                       unsignaled++;
-               } else if (swsqe->complete) {
-
-                       /*
-                        * Insert this completed cqe into the swcq.
-                        */
-                       PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
-                            __func__, ptr, cq->sw_pidx);
-                       swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
-                       cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
-                       t4_swcq_produce(cq);
-                       swsqe->signaled = 0;
-                       wq->sq.in_use -= unsignaled;
-                       break;
-               } else
-                       break;
-}
-
-static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
-                               struct t4_cqe *read_cqe)
-{
-       read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
-       read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
-       read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
-                                V_CQE_SWCQE(SW_CQE(hw_cqe)) |
-                                V_CQE_OPCODE(FW_RI_READ_REQ) |
-                                V_CQE_TYPE(1));
-       read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
-}
-
-/*
- * Return a ptr to the next read wr in the SWSQ or NULL.
- */
-static void advance_oldest_read(struct t4_wq *wq)
-{
-
-       u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
-
-       if (rptr == wq->sq.size)
-               rptr = 0;
-       while (rptr != wq->sq.pidx) {
-               wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
-
-               if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
-                       return;
-               if (++rptr == wq->sq.size)
-                       rptr = 0;
-       }
-       wq->sq.oldest_read = NULL;
-}
-
 /*
  * poll_cq
  *
@@ -426,6 +486,22 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                goto skip_cqe;
        }
 
+       /*
+       * skip hw cqe's if the wq is flushed.
+       */
+       if (wq->flushed && !SW_CQE(hw_cqe)) {
+               ret = -EAGAIN;
+               goto skip_cqe;
+       }
+
+       /*
+        * skip TERMINATE cqes...
+        */
+       if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
+               ret = -EAGAIN;
+               goto skip_cqe;
+       }
+
        /*
         * Gotta tweak READ completions:
         *      1) the cqe doesn't contain the sq_wptr from the wr.
@@ -440,13 +516,22 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                 * was generated by the kernel driver as part of peer-2-peer
                 * connection setup.  So ignore the completion.
                 */
-               if (!wq->sq.oldest_read) {
+               if (CQE_WRID_STAG(hw_cqe) == 1) {
                        if (CQE_STATUS(hw_cqe))
                                t4_set_wq_in_error(wq);
                        ret = -EAGAIN;
                        goto skip_cqe;
                }
 
+               /*
+                * Eat completions for unsignaled read WRs.
+                */
+               if (!wq->sq.oldest_read->signaled) {
+                       advance_oldest_read(wq);
+                       ret = -EAGAIN;
+                       goto skip_cqe;
+               }
+
                /*
                 * Don't write to the HWCQ, so create a new read req CQE
                 * in local memory.
@@ -457,14 +542,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
        }
 
        if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
-               *cqe_flushed = t4_wq_in_error(wq);
+               *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
                t4_set_wq_in_error(wq);
-               goto proc_cqe;
-       }
-
-       if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
-               ret = -EAGAIN;
-               goto skip_cqe;
        }
 
        /*
@@ -523,7 +602,24 @@ proc_cqe:
         * completion.
         */
        if (SQ_TYPE(hw_cqe)) {
-               wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
+               int idx = CQE_WRID_SQ_IDX(hw_cqe);
+               BUG_ON(idx > wq->sq.size);
+
+               /*
+               * Account for any unsignaled completions completed by
+               * this signaled completion.  In this case, cidx points
+               * to the first unsignaled one, and idx points to the
+               * signaled one.  So adjust in_use based on this delta.
+               * if this is not completing any unsigned wrs, then the
+               * delta will be 0. Handle wrapping also!
+               */
+               if (idx < wq->sq.cidx)
+                       wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
+               else
+                       wq->sq.in_use -= idx - wq->sq.cidx;
+               BUG_ON(wq->sq.in_use < 0 && wq->sq.in_use < wq->sq.size);
+
+               wq->sq.cidx = (uint16_t)idx;
                PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
                *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
                t4_sq_consume(wq);
@@ -532,6 +628,7 @@ proc_cqe:
                *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
                BUG_ON(t4_rq_empty(wq));
                t4_rq_consume(wq);
+               goto skip_cqe;
        }
 
 flush_wq:
index ae656016e1ae968bdf4fff69e3c1d39de3a87e50..33d2cc6ab56220bed4a49af1fb0f22e68955508d 100644 (file)
@@ -103,18 +103,43 @@ static int dump_qp(int id, void *p, void *data)
        if (space == 0)
                return 1;
 
-       if (qp->ep)
-               cc = snprintf(qpd->buf + qpd->pos, space,
-                            "qp sq id %u rq id %u state %u onchip %u "
-                            "ep tid %u state %u %pI4:%u->%pI4:%u\n",
-                            qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
-                            qp->wq.sq.flags & T4_SQ_ONCHIP,
-                            qp->ep->hwtid, (int)qp->ep->com.state,
-                            &qp->ep->com.local_addr.sin_addr.s_addr,
-                            ntohs(qp->ep->com.local_addr.sin_port),
-                            &qp->ep->com.remote_addr.sin_addr.s_addr,
-                            ntohs(qp->ep->com.remote_addr.sin_port));
-       else
+       if (qp->ep) {
+               if (qp->ep->com.local_addr.ss_family == AF_INET) {
+                       struct sockaddr_in *lsin = (struct sockaddr_in *)
+                               &qp->ep->com.local_addr;
+                       struct sockaddr_in *rsin = (struct sockaddr_in *)
+                               &qp->ep->com.remote_addr;
+
+                       cc = snprintf(qpd->buf + qpd->pos, space,
+                                     "rc qp sq id %u rq id %u state %u "
+                                     "onchip %u ep tid %u state %u "
+                                     "%pI4:%u->%pI4:%u\n",
+                                     qp->wq.sq.qid, qp->wq.rq.qid,
+                                     (int)qp->attr.state,
+                                     qp->wq.sq.flags & T4_SQ_ONCHIP,
+                                     qp->ep->hwtid, (int)qp->ep->com.state,
+                                     &lsin->sin_addr, ntohs(lsin->sin_port),
+                                     &rsin->sin_addr, ntohs(rsin->sin_port));
+               } else {
+                       struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
+                               &qp->ep->com.local_addr;
+                       struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
+                               &qp->ep->com.remote_addr;
+
+                       cc = snprintf(qpd->buf + qpd->pos, space,
+                                     "rc qp sq id %u rq id %u state %u "
+                                     "onchip %u ep tid %u state %u "
+                                     "%pI6:%u->%pI6:%u\n",
+                                     qp->wq.sq.qid, qp->wq.rq.qid,
+                                     (int)qp->attr.state,
+                                     qp->wq.sq.flags & T4_SQ_ONCHIP,
+                                     qp->ep->hwtid, (int)qp->ep->com.state,
+                                     &lsin6->sin6_addr,
+                                     ntohs(lsin6->sin6_port),
+                                     &rsin6->sin6_addr,
+                                     ntohs(rsin6->sin6_port));
+               }
+       } else
                cc = snprintf(qpd->buf + qpd->pos, space,
                             "qp sq id %u rq id %u state %u onchip %u\n",
                              qp->wq.sq.qid, qp->wq.rq.qid,
@@ -351,15 +376,37 @@ static int dump_ep(int id, void *p, void *data)
        if (space == 0)
                return 1;
 
-       cc = snprintf(epd->buf + epd->pos, space,
-                       "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
-                       "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
-                       ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
-                       ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
-                       &ep->com.local_addr.sin_addr.s_addr,
-                       ntohs(ep->com.local_addr.sin_port),
-                       &ep->com.remote_addr.sin_addr.s_addr,
-                       ntohs(ep->com.remote_addr.sin_port));
+       if (ep->com.local_addr.ss_family == AF_INET) {
+               struct sockaddr_in *lsin = (struct sockaddr_in *)
+                       &ep->com.local_addr;
+               struct sockaddr_in *rsin = (struct sockaddr_in *)
+                       &ep->com.remote_addr;
+
+               cc = snprintf(epd->buf + epd->pos, space,
+                             "ep %p cm_id %p qp %p state %d flags 0x%lx "
+                             "history 0x%lx hwtid %d atid %d "
+                             "%pI4:%d <-> %pI4:%d\n",
+                             ep, ep->com.cm_id, ep->com.qp,
+                             (int)ep->com.state, ep->com.flags,
+                             ep->com.history, ep->hwtid, ep->atid,
+                             &lsin->sin_addr, ntohs(lsin->sin_port),
+                             &rsin->sin_addr, ntohs(rsin->sin_port));
+       } else {
+               struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
+                       &ep->com.local_addr;
+               struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
+                       &ep->com.remote_addr;
+
+               cc = snprintf(epd->buf + epd->pos, space,
+                             "ep %p cm_id %p qp %p state %d flags 0x%lx "
+                             "history 0x%lx hwtid %d atid %d "
+                             "%pI6:%d <-> %pI6:%d\n",
+                             ep, ep->com.cm_id, ep->com.qp,
+                             (int)ep->com.state, ep->com.flags,
+                             ep->com.history, ep->hwtid, ep->atid,
+                             &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
+                             &rsin6->sin6_addr, ntohs(rsin6->sin6_port));
+       }
        if (cc < space)
                epd->pos += cc;
        return 0;
@@ -376,12 +423,27 @@ static int dump_listen_ep(int id, void *p, void *data)
        if (space == 0)
                return 1;
 
-       cc = snprintf(epd->buf + epd->pos, space,
-                       "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
-                       "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
-                       ep->com.flags, ep->stid, ep->backlog,
-                       &ep->com.local_addr.sin_addr.s_addr,
-                       ntohs(ep->com.local_addr.sin_port));
+       if (ep->com.local_addr.ss_family == AF_INET) {
+               struct sockaddr_in *lsin = (struct sockaddr_in *)
+                       &ep->com.local_addr;
+
+               cc = snprintf(epd->buf + epd->pos, space,
+                             "ep %p cm_id %p state %d flags 0x%lx stid %d "
+                             "backlog %d %pI4:%d\n",
+                             ep, ep->com.cm_id, (int)ep->com.state,
+                             ep->com.flags, ep->stid, ep->backlog,
+                             &lsin->sin_addr, ntohs(lsin->sin_port));
+       } else {
+               struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
+                       &ep->com.local_addr;
+
+               cc = snprintf(epd->buf + epd->pos, space,
+                             "ep %p cm_id %p state %d flags 0x%lx stid %d "
+                             "backlog %d %pI6:%d\n",
+                             ep, ep->com.cm_id, (int)ep->com.state,
+                             ep->com.flags, ep->stid, ep->backlog,
+                             &lsin6->sin6_addr, ntohs(lsin6->sin6_port));
+       }
        if (cc < space)
                epd->pos += cc;
        return 0;
index 1a840b2211dd385f35c0567bbb0f9a61df43626c..d61d0a18f784c9d1b032be277a87524f89449e41 100644 (file)
@@ -44,16 +44,6 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
        struct c4iw_qp_attributes attrs;
        unsigned long flag;
 
-       if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
-           (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
-               pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\
-                      "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
-                      __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
-                      CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
-                      CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
-               return;
-       }
-
        printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
               "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
               CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
index 485183ad34cd7ab7b2db8c2e310974fb319bd616..23eaeabab93b50d483e279de2adb7175c7c29dfc 100644 (file)
@@ -752,8 +752,8 @@ struct c4iw_ep_common {
        enum c4iw_ep_state state;
        struct kref kref;
        struct mutex mutex;
-       struct sockaddr_in local_addr;
-       struct sockaddr_in remote_addr;
+       struct sockaddr_storage local_addr;
+       struct sockaddr_storage remote_addr;
        struct c4iw_wr_wait wr_wait;
        unsigned long flags;
        unsigned long history;
@@ -917,12 +917,11 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
-void c4iw_flush_hw_cq(struct t4_cq *cq);
+void c4iw_flush_hw_cq(struct c4iw_cq *chp);
 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
-void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
-int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_flush_sq(struct c4iw_qp *qhp);
 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
index a4975e1654a639960b214114648593564fcb0b27..582936708e6e492dfca46b88b5db98f16159c0db 100644 (file)
@@ -737,6 +737,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                swsqe->idx = qhp->wq.sq.pidx;
                swsqe->complete = 0;
                swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
+               swsqe->flushed = 0;
                swsqe->wr_id = wr->wr_id;
 
                init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
@@ -1006,7 +1007,15 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
        /* locking hierarchy: cq lock first, then qp lock. */
        spin_lock_irqsave(&rchp->lock, flag);
        spin_lock(&qhp->lock);
-       c4iw_flush_hw_cq(&rchp->cq);
+
+       if (qhp->wq.flushed) {
+               spin_unlock(&qhp->lock);
+               spin_unlock_irqrestore(&rchp->lock, flag);
+               return;
+       }
+       qhp->wq.flushed = 1;
+
+       c4iw_flush_hw_cq(rchp);
        c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
        flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
        spin_unlock(&qhp->lock);
@@ -1020,9 +1029,9 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
        /* locking hierarchy: cq lock first, then qp lock. */
        spin_lock_irqsave(&schp->lock, flag);
        spin_lock(&qhp->lock);
-       c4iw_flush_hw_cq(&schp->cq);
-       c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
-       flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
+       if (schp != rchp)
+               c4iw_flush_hw_cq(schp);
+       flushed = c4iw_flush_sq(qhp);
        spin_unlock(&qhp->lock);
        spin_unlock_irqrestore(&schp->lock, flag);
        if (flushed) {
@@ -1037,11 +1046,11 @@ static void flush_qp(struct c4iw_qp *qhp)
        struct c4iw_cq *rchp, *schp;
        unsigned long flag;
 
-       rchp = get_chp(qhp->rhp, qhp->attr.rcq);
-       schp = get_chp(qhp->rhp, qhp->attr.scq);
+       rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+       schp = to_c4iw_cq(qhp->ibqp.send_cq);
 
+       t4_set_wq_in_error(&qhp->wq);
        if (qhp->ibqp.uobject) {
-               t4_set_wq_in_error(&qhp->wq);
                t4_set_cq_in_error(&rchp->cq);
                spin_lock_irqsave(&rchp->comp_handler_lock, flag);
                (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
@@ -1330,8 +1339,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                                disconnect = 1;
                                c4iw_get_ep(&qhp->ep->com);
                        }
-                       if (qhp->ibqp.uobject)
-                               t4_set_wq_in_error(&qhp->wq);
+                       t4_set_wq_in_error(&qhp->wq);
                        ret = rdma_fini(rhp, qhp, ep);
                        if (ret)
                                goto err;
@@ -1340,18 +1348,21 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        set_state(qhp, C4IW_QP_STATE_TERMINATE);
                        qhp->attr.layer_etype = attrs->layer_etype;
                        qhp->attr.ecode = attrs->ecode;
-                       if (qhp->ibqp.uobject)
-                               t4_set_wq_in_error(&qhp->wq);
+                       t4_set_wq_in_error(&qhp->wq);
                        ep = qhp->ep;
+                       disconnect = 1;
                        if (!internal)
                                terminate = 1;
-                       disconnect = 1;
+                       else {
+                               ret = rdma_fini(rhp, qhp, ep);
+                               if (ret)
+                                       goto err;
+                       }
                        c4iw_get_ep(&qhp->ep->com);
                        break;
                case C4IW_QP_STATE_ERROR:
                        set_state(qhp, C4IW_QP_STATE_ERROR);
-                       if (qhp->ibqp.uobject)
-                               t4_set_wq_in_error(&qhp->wq);
+                       t4_set_wq_in_error(&qhp->wq);
                        if (!internal) {
                                abort = 1;
                                disconnect = 1;
@@ -1552,12 +1563,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 
        ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
 
-
        qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
        if (!qhp)
                return ERR_PTR(-ENOMEM);
        qhp->wq.sq.size = sqsize;
        qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
+       qhp->wq.sq.flush_cidx = -1;
        qhp->wq.rq.size = rqsize;
        qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
 
index ebcb03bd1b72ed6003bd7ae85e88dd8ce9c5595d..e73ace739183a9864ab8fee70e374cdc9ce0d178 100644 (file)
@@ -36,9 +36,9 @@
 #include "t4_msg.h"
 #include "t4fw_ri_api.h"
 
-#define T4_MAX_NUM_QP (1<<16)
-#define T4_MAX_NUM_CQ (1<<15)
-#define T4_MAX_NUM_PD (1<<15)
+#define T4_MAX_NUM_QP 65536
+#define T4_MAX_NUM_CQ 65536
+#define T4_MAX_NUM_PD 65536
 #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
 #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
 #define T4_MAX_IQ_SIZE (65520 - 1)
@@ -47,7 +47,7 @@
 #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
 #define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
 #define T4_MAX_NUM_STAG (1<<15)
-#define T4_MAX_MR_SIZE (~0ULL - 1)
+#define T4_MAX_MR_SIZE (~0ULL)
 #define T4_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
 #define T4_STAG_UNSET 0xffffffff
 #define T4_FW_MAJ 0
@@ -269,6 +269,7 @@ struct t4_swsqe {
        int                     complete;
        int                     signaled;
        u16                     idx;
+       int                     flushed;
 };
 
 static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
@@ -300,6 +301,7 @@ struct t4_sq {
        u16 pidx;
        u16 wq_pidx;
        u16 flags;
+       short flush_cidx;
 };
 
 struct t4_swrqe {
@@ -330,6 +332,7 @@ struct t4_wq {
        void __iomem *db;
        void __iomem *gts;
        struct c4iw_rdev *rdev;
+       int flushed;
 };
 
 static inline int t4_rqes_posted(struct t4_wq *wq)
@@ -412,6 +415,9 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
 
 static inline void t4_sq_consume(struct t4_wq *wq)
 {
+       BUG_ON(wq->sq.in_use < 1);
+       if (wq->sq.cidx == wq->sq.flush_cidx)
+               wq->sq.flush_cidx = -1;
        wq->sq.in_use--;
        if (++wq->sq.cidx == wq->sq.size)
                wq->sq.cidx = 0;
@@ -505,12 +511,18 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
 static inline void t4_swcq_produce(struct t4_cq *cq)
 {
        cq->sw_in_use++;
+       if (cq->sw_in_use == cq->size) {
+               PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
+               cq->error = 1;
+               BUG_ON(1);
+       }
        if (++cq->sw_pidx == cq->size)
                cq->sw_pidx = 0;
 }
 
 static inline void t4_swcq_consume(struct t4_cq *cq)
 {
+       BUG_ON(cq->sw_in_use < 1);
        cq->sw_in_use--;
        if (++cq->sw_cidx == cq->size)
                cq->sw_cidx = 0;
@@ -519,7 +531,7 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
 static inline void t4_hwcq_consume(struct t4_cq *cq)
 {
        cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
-       if (++cq->cidx_inc == (cq->size >> 4)) {
+       if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
                u32 val;
 
                val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
@@ -552,6 +564,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
                ret = -EOVERFLOW;
                cq->error = 1;
                printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+               BUG_ON(1);
        } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
                *cqe = &cq->queue[cq->cidx];
                ret = 0;
@@ -562,6 +575,12 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
 
 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
 {
+       if (cq->sw_in_use == cq->size) {
+               PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
+               cq->error = 1;
+               BUG_ON(1);
+               return NULL;
+       }
        if (cq->sw_in_use)
                return &cq->sw_queue[cq->sw_cidx];
        return NULL;
index 62c71fadb4d97d10a29f0e05452b63f888fb0814..8d594517cd292821182e0dc363a32ea0f6d98c9f 100644 (file)
@@ -222,7 +222,8 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
        queue->small_page = NULL;
 
        /* allocate queue page pointers */
-       queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
+       queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
+                                    GFP_KERNEL | __GFP_NOWARN);
        if (!queue->queue_pages) {
                queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
                if (!queue->queue_pages) {
index 24b9f1a0107b9174b7035d12b560f69fa90633c1..6b29249aa85a9ed59b24df4c5fc42636371b9181 100644 (file)
@@ -2998,6 +2998,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        u8 *start_ptr = &start_addr;
        u8 **start_buff = &start_ptr;
        u16 buff_len = 0;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
 
        ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
        if (!ibqp)
@@ -3062,8 +3064,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        /* setup our first outgoing iWarp send WQE (the IETF frame response) */
        wqe = &nesqp->hwqp.sq_vbase[0];
 
-       if (cm_id->remote_addr.sin_addr.s_addr !=
-           cm_id->local_addr.sin_addr.s_addr) {
+       if (raddr->sin_addr.s_addr != laddr->sin_addr.s_addr) {
                u64temp = (unsigned long)nesqp;
                nesibdev = nesvnic->nesibdev;
                nespd = nesqp->nespd;
@@ -3132,13 +3133,10 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        nes_cm_init_tsa_conn(nesqp, cm_node);
 
-       nesqp->nesqp_context->tcpPorts[0] =
-               cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
-       nesqp->nesqp_context->tcpPorts[1] =
-               cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+       nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port));
+       nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port));
 
-       nesqp->nesqp_context->ip0 =
-                       cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+       nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr));
 
        nesqp->nesqp_context->misc2 |= cpu_to_le32(
                (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3162,9 +3160,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        memset(&nes_quad, 0, sizeof(nes_quad));
        nes_quad.DstIpAdrIndex =
                cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
-       nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
-       nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
-       nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+       nes_quad.SrcIpadr = raddr->sin_addr.s_addr;
+       nes_quad.TcpPorts[0] = raddr->sin_port;
+       nes_quad.TcpPorts[1] = laddr->sin_port;
 
        /* Produce hash key */
        crc_value = get_crc_value(&nes_quad);
@@ -3180,10 +3178,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = "
                  "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + "
                  "private data length=%u.\n", nesqp->hwqp.qp_id,
-                 ntohl(cm_id->remote_addr.sin_addr.s_addr),
-                 ntohs(cm_id->remote_addr.sin_port),
-                 ntohl(cm_id->local_addr.sin_addr.s_addr),
-                 ntohs(cm_id->local_addr.sin_port),
+                 ntohl(raddr->sin_addr.s_addr), ntohs(raddr->sin_port),
+                 ntohl(laddr->sin_addr.s_addr), ntohs(laddr->sin_port),
                  le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
                  le32_to_cpu(nesqp->nesqp_context->snd_nxt),
                  buff_len);
@@ -3263,7 +3259,11 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct nes_cm_node *cm_node;
        struct nes_cm_info cm_info;
        int apbvt_set = 0;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
 
+       if (cm_id->remote_addr.ss_family != AF_INET)
+               return -ENOSYS;
        ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
        if (!ibqp)
                return -EINVAL;
@@ -3277,16 +3277,14 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        if (!nesdev)
                return -EINVAL;
 
-       if (!(cm_id->local_addr.sin_port) || !(cm_id->remote_addr.sin_port))
+       if (!laddr->sin_port || !raddr->sin_port)
                return -EINVAL;
 
        nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
                  "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
-                 ntohl(nesvnic->local_ipaddr),
-                 ntohl(cm_id->remote_addr.sin_addr.s_addr),
-                 ntohs(cm_id->remote_addr.sin_port),
-                 ntohl(cm_id->local_addr.sin_addr.s_addr),
-                 ntohs(cm_id->local_addr.sin_port));
+                 ntohl(nesvnic->local_ipaddr), ntohl(raddr->sin_addr.s_addr),
+                 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
+                 ntohs(laddr->sin_port));
 
        atomic_inc(&cm_connects);
        nesqp->active_conn = 1;
@@ -3306,18 +3304,18 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
                  conn_param->private_data_len);
 
-       if (cm_id->local_addr.sin_addr.s_addr !=
-           cm_id->remote_addr.sin_addr.s_addr) {
-               nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
-                                PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+       if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) {
+               nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
+                                PCI_FUNC(nesdev->pcidev->devfn),
+                                NES_MANAGE_APBVT_ADD);
                apbvt_set = 1;
        }
 
        /* set up the connection params for the node */
-       cm_info.loc_addr = htonl(cm_id->local_addr.sin_addr.s_addr);
-       cm_info.loc_port = htons(cm_id->local_addr.sin_port);
-       cm_info.rem_addr = htonl(cm_id->remote_addr.sin_addr.s_addr);
-       cm_info.rem_port = htons(cm_id->remote_addr.sin_port);
+       cm_info.loc_addr = htonl(laddr->sin_addr.s_addr);
+       cm_info.loc_port = htons(laddr->sin_port);
+       cm_info.rem_addr = htonl(raddr->sin_addr.s_addr);
+       cm_info.rem_port = htons(raddr->sin_port);
        cm_info.cm_id = cm_id;
        cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
 
@@ -3329,7 +3327,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                                          &cm_info);
        if (!cm_node) {
                if (apbvt_set)
-                       nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+                       nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
                                         PCI_FUNC(nesdev->pcidev->devfn),
                                         NES_MANAGE_APBVT_DEL);
 
@@ -3355,10 +3353,13 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
        struct nes_cm_listener *cm_node;
        struct nes_cm_info cm_info;
        int err;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
 
        nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
-                       cm_id, ntohs(cm_id->local_addr.sin_port));
+                 cm_id, ntohs(laddr->sin_port));
 
+       if (cm_id->local_addr.ss_family != AF_INET)
+               return -ENOSYS;
        nesvnic = to_nesvnic(cm_id->device);
        if (!nesvnic)
                return -EINVAL;
@@ -3367,11 +3368,11 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
                        nesvnic, nesvnic->netdev, nesvnic->netdev->name);
 
        nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n",
-                       nesvnic->local_ipaddr, cm_id->local_addr.sin_addr.s_addr);
+                       nesvnic->local_ipaddr, laddr->sin_addr.s_addr);
 
        /* setup listen params in our api call struct */
        cm_info.loc_addr = nesvnic->local_ipaddr;
-       cm_info.loc_port = cm_id->local_addr.sin_port;
+       cm_info.loc_port = laddr->sin_port;
        cm_info.backlog = backlog;
        cm_info.cm_id = cm_id;
 
@@ -3388,8 +3389,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
        cm_id->provider_data = cm_node;
 
        if (!cm_node->reused_node) {
-               err = nes_manage_apbvt(nesvnic,
-                                      ntohs(cm_id->local_addr.sin_port),
+               err = nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
                                       PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
                                       NES_MANAGE_APBVT_ADD);
                if (err) {
@@ -3487,6 +3487,9 @@ static void cm_event_connected(struct nes_cm_event *event)
        struct nes_v4_quad nes_quad;
        u32 crc_value;
        int ret;
+       struct sockaddr_in *laddr;
+       struct sockaddr_in *raddr;
+       struct sockaddr_in *cm_event_laddr;
 
        /* get all our handles */
        cm_node = event->cm_node;
@@ -3496,27 +3499,24 @@ static void cm_event_connected(struct nes_cm_event *event)
        nesvnic = to_nesvnic(nesqp->ibqp.device);
        nesdev = nesvnic->nesdev;
        nesadapter = nesdev->nesadapter;
+       laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+       cm_event_laddr = (struct sockaddr_in *)&cm_event.local_addr;
 
        if (nesqp->destroyed)
                return;
        atomic_inc(&cm_connecteds);
        nes_debug(NES_DBG_CM, "QP%u attempting to connect to  0x%08X:0x%04X on"
                  " local port 0x%04X. jiffies = %lu.\n",
-                 nesqp->hwqp.qp_id,
-                 ntohl(cm_id->remote_addr.sin_addr.s_addr),
-                 ntohs(cm_id->remote_addr.sin_port),
-                 ntohs(cm_id->local_addr.sin_port),
-                 jiffies);
+                 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
+                 ntohs(raddr->sin_port), ntohs(laddr->sin_port), jiffies);
 
        nes_cm_init_tsa_conn(nesqp, cm_node);
 
        /* set the QP tsa context */
-       nesqp->nesqp_context->tcpPorts[0] =
-               cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
-       nesqp->nesqp_context->tcpPorts[1] =
-               cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
-       nesqp->nesqp_context->ip0 =
-                       cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+       nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port));
+       nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port));
+       nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr));
 
        nesqp->nesqp_context->misc2 |= cpu_to_le32(
                        (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3544,9 +3544,9 @@ static void cm_event_connected(struct nes_cm_event *event)
 
        nes_quad.DstIpAdrIndex =
                cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
-       nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
-       nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
-       nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+       nes_quad.SrcIpadr = raddr->sin_addr.s_addr;
+       nes_quad.TcpPorts[0] = raddr->sin_port;
+       nes_quad.TcpPorts[1] = laddr->sin_port;
 
        /* Produce hash key */
        crc_value = get_crc_value(&nes_quad);
@@ -3565,8 +3565,8 @@ static void cm_event_connected(struct nes_cm_event *event)
        cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
        cm_event.status = 0;
        cm_event.provider_data = cm_id->provider_data;
-       cm_event.local_addr.sin_family = AF_INET;
-       cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
+       cm_event_laddr->sin_family = AF_INET;
+       cm_event_laddr->sin_port = laddr->sin_port;
        cm_event.remote_addr = cm_id->remote_addr;
 
        cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
@@ -3574,7 +3574,7 @@ static void cm_event_connected(struct nes_cm_event *event)
        cm_event.ird = cm_node->ird_size;
        cm_event.ord = cm_node->ord_size;
 
-       cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
+       cm_event_laddr->sin_addr.s_addr = event->cm_info.rem_addr;
        ret = cm_id->event_handler(cm_id, &cm_event);
        nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
 
@@ -3627,9 +3627,16 @@ static void cm_event_connect_error(struct nes_cm_event *event)
        cm_event.private_data = NULL;
        cm_event.private_data_len = 0;
 
-       nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, "
-                 "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr,
-                 cm_event.remote_addr.sin_addr.s_addr);
+#ifdef CONFIG_INFINIBAND_NES_DEBUG
+       {
+               struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
+                                                    &cm_event.local_addr;
+               struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
+                                                    &cm_event.remote_addr;
+               nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remote_addr=%08x\n",
+                         cm_event_laddr->sin_addr.s_addr, cm_event_raddr->sin_addr.s_addr);
+       }
+#endif
 
        ret = cm_id->event_handler(cm_id, &cm_event);
        nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
@@ -3709,6 +3716,10 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
        struct iw_cm_event cm_event;
        int ret;
        struct nes_cm_node *cm_node;
+       struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
+                                            &cm_event.local_addr;
+       struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
+                                            &cm_event.remote_addr;
 
        cm_node = event->cm_node;
        if (!cm_node)
@@ -3723,13 +3734,13 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
        cm_event.status = 0;
        cm_event.provider_data = (void *)cm_node;
 
-       cm_event.local_addr.sin_family = AF_INET;
-       cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
-       cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
+       cm_event_laddr->sin_family = AF_INET;
+       cm_event_laddr->sin_port = htons(event->cm_info.loc_port);
+       cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
 
-       cm_event.remote_addr.sin_family = AF_INET;
-       cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
-       cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+       cm_event_raddr->sin_family = AF_INET;
+       cm_event_raddr->sin_port = htons(event->cm_info.rem_port);
+       cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
        cm_event.private_data = cm_node->mpa_frame_buf;
        cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
        cm_event.ird = cm_node->ird_size;
@@ -3749,6 +3760,10 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
        struct iw_cm_event cm_event;
        struct nes_cm_node *cm_node;
        int ret;
+       struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
+                                            &cm_event.local_addr;
+       struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
+                                            &cm_event.remote_addr;
 
        cm_node = event->cm_node;
        if (!cm_node)
@@ -3763,21 +3778,21 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
        cm_event.status = -ECONNREFUSED;
        cm_event.provider_data = cm_id->provider_data;
 
-       cm_event.local_addr.sin_family = AF_INET;
-       cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
-       cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
+       cm_event_laddr->sin_family = AF_INET;
+       cm_event_laddr->sin_port = htons(event->cm_info.loc_port);
+       cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
 
-       cm_event.remote_addr.sin_family = AF_INET;
-       cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
-       cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+       cm_event_raddr->sin_family = AF_INET;
+       cm_event_raddr->sin_port = htons(event->cm_info.rem_port);
+       cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
 
        cm_event.private_data = cm_node->mpa_frame_buf;
        cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
 
        nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, "
                  "remove_addr=%08x\n",
-                 cm_event.local_addr.sin_addr.s_addr,
-                 cm_event.remote_addr.sin_addr.s_addr);
+                 cm_event_laddr->sin_addr.s_addr,
+                 cm_event_raddr->sin_addr.s_addr);
 
        ret = cm_id->event_handler(cm_id, &cm_event);
        if (ret)
index d540180a8e420865a71387a630a509b203959747..e798837f1fe0dcb250bbdb3ef750d578147dae3c 100644 (file)
@@ -60,6 +60,7 @@ struct ocrdma_dev_attr {
        int max_send_sge;
        int max_recv_sge;
        int max_srq_sge;
+       int max_rdma_sge;
        int max_mr;
        u64 max_mr_size;
        u32 max_num_mr_pbl;
@@ -172,7 +173,6 @@ struct ocrdma_dev {
 
 struct ocrdma_cq {
        struct ib_cq ibcq;
-       struct ocrdma_dev *dev;
        struct ocrdma_cqe *va;
        u32 phase;
        u32 getp;       /* pointer to pending wrs to
@@ -214,7 +214,6 @@ struct ocrdma_pd {
 
 struct ocrdma_ah {
        struct ib_ah ibah;
-       struct ocrdma_dev *dev;
        struct ocrdma_av *av;
        u16 sgid_index;
        u32 id;
@@ -234,7 +233,6 @@ struct ocrdma_qp_hwq_info {
 
 struct ocrdma_srq {
        struct ib_srq ibsrq;
-       struct ocrdma_dev *dev;
        u8 __iomem *db;
        struct ocrdma_qp_hwq_info rq;
        u64 *rqe_wr_id_tbl;
@@ -290,10 +288,10 @@ struct ocrdma_qp {
        u32 qkey;
        bool dpp_enabled;
        u8 *ird_q_va;
+       u16 db_cache;
 };
 
 struct ocrdma_hw_mr {
-       struct ocrdma_dev *dev;
        u32 lkey;
        u8 fr_mr;
        u8 remote_atomic;
@@ -317,12 +315,10 @@ struct ocrdma_mr {
        struct ib_mr ibmr;
        struct ib_umem *umem;
        struct ocrdma_hw_mr hwmr;
-       struct ocrdma_pd *pd;
 };
 
 struct ocrdma_ucontext {
        struct ib_ucontext ibucontext;
-       struct ocrdma_dev *dev;
 
        struct list_head mm_head;
        struct mutex mm_list_lock; /* protects list entries of mm type */
@@ -393,7 +389,7 @@ static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
 {
        int cqe_valid;
        cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
-       return ((cqe_valid == cq->phase) ? 1 : 0);
+       return (cqe_valid == cq->phase);
 }
 
 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
index 517ab20b727c51feac5314a64e717adfdedacde4..e5ea9a9776a443701a4216929210fe4eb3125666 100644 (file)
@@ -28,6 +28,8 @@
 #ifndef __OCRDMA_ABI_H__
 #define __OCRDMA_ABI_H__
 
+/* user kernel communication data structures. */
+
 struct ocrdma_alloc_ucontext_resp {
        u32 dev_id;
        u32 wqe_size;
@@ -35,16 +37,16 @@ struct ocrdma_alloc_ucontext_resp {
        u32 dpp_wqe_size;
        u64 ah_tbl_page;
        u32 ah_tbl_len;
-       u32 rsvd;
-       u8 fw_ver[32];
        u32 rqe_size;
+       u8 fw_ver[32];
+       /* for future use/new features in progress */
        u64 rsvd1;
-} __packed;
+       u64 rsvd2;
+};
 
-/* user kernel communication data structures. */
 struct ocrdma_alloc_pd_ureq {
        u64 rsvd1;
-} __packed;
+};
 
 struct ocrdma_alloc_pd_uresp {
        u32 id;
@@ -52,12 +54,12 @@ struct ocrdma_alloc_pd_uresp {
        u32 dpp_page_addr_hi;
        u32 dpp_page_addr_lo;
        u64 rsvd1;
-} __packed;
+};
 
 struct ocrdma_create_cq_ureq {
        u32 dpp_cq;
-       u32 rsvd;
-} __packed;
+       u32 rsvd; /* pad */
+};
 
 #define MAX_CQ_PAGES 8
 struct ocrdma_create_cq_uresp {
@@ -69,9 +71,10 @@ struct ocrdma_create_cq_uresp {
        u64 db_page_addr;
        u32 db_page_size;
        u32 phase_change;
+       /* for future use/new features in progress */
        u64 rsvd1;
        u64 rsvd2;
-} __packed;
+};
 
 #define MAX_QP_PAGES 8
 #define MAX_UD_AV_PAGES 8
@@ -80,14 +83,14 @@ struct ocrdma_create_qp_ureq {
        u8 enable_dpp_cq;
        u8 rsvd;
        u16 dpp_cq_id;
-       u32 rsvd1;
+       u32 rsvd1;      /* pad */
 };
 
 struct ocrdma_create_qp_uresp {
        u16 qp_id;
        u16 sq_dbid;
        u16 rq_dbid;
-       u16 resv0;
+       u16 resv0;      /* pad */
        u32 sq_page_size;
        u32 rq_page_size;
        u32 num_sq_pages;
@@ -98,19 +101,19 @@ struct ocrdma_create_qp_uresp {
        u32 db_page_size;
        u32 dpp_credit;
        u32 dpp_offset;
-       u32 rsvd1;
        u32 num_wqe_allocated;
        u32 num_rqe_allocated;
        u32 db_sq_offset;
        u32 db_rq_offset;
        u32 db_shift;
+       u64 rsvd1;
        u64 rsvd2;
        u64 rsvd3;
 } __packed;
 
 struct ocrdma_create_srq_uresp {
        u16 rq_dbid;
-       u16 resv0;
+       u16 resv0;      /* pad */
        u32 resv1;
 
        u32 rq_page_size;
@@ -126,6 +129,6 @@ struct ocrdma_create_srq_uresp {
 
        u64 rsvd2;
        u64 rsvd3;
-} __packed;
+};
 
 #endif                         /* __OCRDMA_ABI_H__ */
index f4c587c68f648055546deafce7f8708798f4d109..df9e73758afbe344f8327e2cdf8455c2b831b3df 100644 (file)
 #include "ocrdma_ah.h"
 #include "ocrdma_hw.h"
 
-static inline int set_av_attr(struct ocrdma_ah *ah,
+static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                                struct ib_ah_attr *attr, int pdid)
 {
        int status = 0;
        u16 vlan_tag; bool vlan_enabled = false;
-       struct ocrdma_dev *dev = ah->dev;
        struct ocrdma_eth_vlan eth;
        struct ocrdma_grh grh;
        int eth_sz;
@@ -92,7 +91,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
        int status;
        struct ocrdma_ah *ah;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
-       struct ocrdma_dev *dev = pd->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
 
        if (!(attr->ah_flags & IB_AH_GRH))
                return ERR_PTR(-EINVAL);
@@ -100,12 +99,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
        ah = kzalloc(sizeof *ah, GFP_ATOMIC);
        if (!ah)
                return ERR_PTR(-ENOMEM);
-       ah->dev = pd->dev;
 
        status = ocrdma_alloc_av(dev, ah);
        if (status)
                goto av_err;
-       status = set_av_attr(ah, attr, pd->id);
+       status = set_av_attr(dev, ah, attr, pd->id);
        if (status)
                goto av_conf_err;
 
@@ -126,7 +124,9 @@ av_err:
 int ocrdma_destroy_ah(struct ib_ah *ibah)
 {
        struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
-       ocrdma_free_av(ah->dev, ah);
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
+
+       ocrdma_free_av(dev, ah);
        kfree(ah);
        return 0;
 }
index 0965278dd2ed7e805f7c1a8e1d9eae03b9c3765b..97bb1ce8d243d6e1f2fa0e59ecdf439d2fa683a3 100644 (file)
@@ -94,7 +94,7 @@ enum cqe_status {
 
 static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
 {
-       return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
+       return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
 }
 
 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
@@ -105,8 +105,7 @@ static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
 static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
 {
        struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
-           ((u8 *) dev->mq.cq.va +
-            (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
+           (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
 
        if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
                return NULL;
@@ -120,9 +119,7 @@ static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
 
 static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
 {
-       return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va +
-                                    (dev->mq.sq.head *
-                                     sizeof(struct ocrdma_mqe)));
+       return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
 }
 
 static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
@@ -132,8 +129,7 @@ static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
 
 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
 {
-       return (void *)((u8 *) dev->mq.sq.va +
-                       (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)));
+       return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
 }
 
 enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
@@ -181,7 +177,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
 
 static int ocrdma_get_mbx_errno(u32 status)
 {
-       int err_num = -EFAULT;
+       int err_num;
        u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
                                        OCRDMA_MBX_RSP_STATUS_SHIFT;
        u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
@@ -438,9 +434,9 @@ static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
                                 NULL);
        if (!status) {
                eq->q.id = rsp->vector_eqid & 0xffff;
-               if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
+               if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
                        ocrdma_assign_eq_vect_gen2(dev, eq);
-               else {
+               else {
                        eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
                        dev->nic_info.msix.start_vector += 1;
                }
@@ -527,16 +523,21 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
        ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
                        OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
 
-       cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size);
+       cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+       cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+               OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
+       cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
+
        cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
-       cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT);
+       cmd->eqn = eq->id;
+       cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
 
-       ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt,
+       ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
                             cq->dma, PAGE_SIZE_4K);
        status = be_roce_mcc_cmd(dev->nic_info.netdev,
                                 cmd, sizeof(*cmd), NULL, NULL);
        if (!status) {
-               cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
+               cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
                cq->created = true;
        }
        return status;
@@ -653,7 +654,7 @@ static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
 
        if (qp == NULL)
                BUG();
-       ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);
+       ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
 }
 
 static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
@@ -746,8 +747,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
                        qp->srq->ibsrq.event_handler(&ib_evt,
                                                     qp->srq->ibsrq.
                                                     srq_context);
-       } else if (dev_event)
+       } else if (dev_event) {
                ib_dispatch_event(&ib_evt);
+       }
 
 }
 
@@ -957,9 +959,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
        rsp = ocrdma_get_mqe_rsp(dev);
        ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
        if (cqe_status || ext_status) {
-               pr_err
-                   ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
-                    __func__,
+               pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
+                      __func__,
                     (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
                     OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
                status = ocrdma_get_mbx_cqe_errno(cqe_status);
@@ -991,6 +992,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_srq_sge = (rsp->max_srq_rqe_sge &
                              OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
+       attr->max_rdma_sge = (rsp->max_write_send_sge &
+                             OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
        attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
                                OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1377,15 +1381,13 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
        cmd->cmd.pgsz_pgcnt |= hw_pages;
        cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
 
-       if (dev->eq_cnt < 0)
-               goto eq_err;
        cq->eqn = ocrdma_bind_eq(dev);
        cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
        cqe_count = cq->len / cqe_size;
-       if (cqe_count > 1024)
+       if (cqe_count > 1024) {
                /* Set cnt to 3 to indicate more than 1024 cq entries */
                cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
-       else {
+       else {
                u8 count = 0;
                switch (cqe_count) {
                case 256:
@@ -1427,7 +1429,6 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
        return 0;
 mbx_err:
        ocrdma_unbind_eq(dev, cq->eqn);
-eq_err:
        dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
 mem_err:
        kfree(cmd);
@@ -1678,8 +1679,8 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
        spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
 }
 
-int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
-                           enum ib_qp_state *old_ib_state)
+int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
+                          enum ib_qp_state *old_ib_state)
 {
        unsigned long flags;
        int status = 0;
@@ -1696,96 +1697,11 @@ int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
                return 1;
        }
 
-       switch (qp->state) {
-       case OCRDMA_QPS_RST:
-               switch (new_state) {
-               case OCRDMA_QPS_RST:
-               case OCRDMA_QPS_INIT:
-                       break;
-               default:
-                       status = -EINVAL;
-                       break;
-               };
-               break;
-       case OCRDMA_QPS_INIT:
-               /* qps: INIT->XXX */
-               switch (new_state) {
-               case OCRDMA_QPS_INIT:
-               case OCRDMA_QPS_RTR:
-                       break;
-               case OCRDMA_QPS_ERR:
-                       ocrdma_flush_qp(qp);
-                       break;
-               default:
-                       status = -EINVAL;
-                       break;
-               };
-               break;
-       case OCRDMA_QPS_RTR:
-               /* qps: RTS->XXX */
-               switch (new_state) {
-               case OCRDMA_QPS_RTS:
-                       break;
-               case OCRDMA_QPS_ERR:
-                       ocrdma_flush_qp(qp);
-                       break;
-               default:
-                       status = -EINVAL;
-                       break;
-               };
-               break;
-       case OCRDMA_QPS_RTS:
-               /* qps: RTS->XXX */
-               switch (new_state) {
-               case OCRDMA_QPS_SQD:
-               case OCRDMA_QPS_SQE:
-                       break;
-               case OCRDMA_QPS_ERR:
-                       ocrdma_flush_qp(qp);
-                       break;
-               default:
-                       status = -EINVAL;
-                       break;
-               };
-               break;
-       case OCRDMA_QPS_SQD:
-               /* qps: SQD->XXX */
-               switch (new_state) {
-               case OCRDMA_QPS_RTS:
-               case OCRDMA_QPS_SQE:
-               case OCRDMA_QPS_ERR:
-                       break;
-               default:
-                       status = -EINVAL;
-                       break;
-               };
-               break;
-       case OCRDMA_QPS_SQE:
-               switch (new_state) {
-               case OCRDMA_QPS_RTS:
-               case OCRDMA_QPS_ERR:
-                       break;
-               default:
-                       status = -EINVAL;
-                       break;
-               };
-               break;
-       case OCRDMA_QPS_ERR:
-               /* qps: ERR->XXX */
-               switch (new_state) {
-               case OCRDMA_QPS_RST:
-                       break;
-               default:
-                       status = -EINVAL;
-                       break;
-               };
-               break;
-       default:
-               status = -EINVAL;
-               break;
-       };
-       if (!status)
-               qp->state = new_state;
+
+       if (new_state == OCRDMA_QPS_ERR)
+               ocrdma_flush_qp(qp);
+
+       qp->state = new_state;
 
        spin_unlock_irqrestore(&qp->q_lock, flags);
        return status;
@@ -2057,9 +1973,10 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
        qp->rq_cq = cq;
 
        if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
-           (attrs->cap.max_inline_data <= dev->attr.max_inline_data))
+           (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
                ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
                                             dpp_cq_id);
+       }
 
        status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
        if (status)
@@ -2108,27 +2025,29 @@ int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
        struct in6_addr in6;
 
        memcpy(&in6, dgid, sizeof in6);
-       if (rdma_is_multicast_addr(&in6))
+       if (rdma_is_multicast_addr(&in6)) {
                rdma_get_mcast_mac(&in6, mac_addr);
-       else if (rdma_link_local_addr(&in6))
+       } else if (rdma_link_local_addr(&in6)) {
                rdma_get_ll_mac(&in6, mac_addr);
-       else {
+       else {
                pr_err("%s() fail to resolve mac_addr.\n", __func__);
                return -EINVAL;
        }
        return 0;
 }
 
-static void ocrdma_set_av_params(struct ocrdma_qp *qp,
+static int ocrdma_set_av_params(struct ocrdma_qp *qp,
                                struct ocrdma_modify_qp *cmd,
                                struct ib_qp_attr *attrs)
 {
+       int status;
        struct ib_ah_attr *ah_attr = &attrs->ah_attr;
-       union ib_gid sgid;
+       union ib_gid sgid, zgid;
        u32 vlan_id;
        u8 mac_addr[6];
+
        if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
-               return;
+               return -EINVAL;
        cmd->params.tclass_sq_psn |=
            (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
        cmd->params.rnt_rc_sl_fl |=
@@ -2138,8 +2057,15 @@ static void ocrdma_set_av_params(struct ocrdma_qp *qp,
        cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
        memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
               sizeof(cmd->params.dgid));
-       ocrdma_query_gid(&qp->dev->ibdev, 1,
+       status = ocrdma_query_gid(&qp->dev->ibdev, 1,
                         ah_attr->grh.sgid_index, &sgid);
+       if (status)
+               return status;
+
+       memset(&zgid, 0, sizeof(zgid));
+       if (!memcmp(&sgid, &zgid, sizeof(zgid)))
+               return -EINVAL;
+
        qp->sgid_idx = ah_attr->grh.sgid_index;
        memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
        ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
@@ -2155,6 +2081,7 @@ static void ocrdma_set_av_params(struct ocrdma_qp *qp,
                    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
                cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
        }
+       return 0;
 }
 
 static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
@@ -2176,9 +2103,11 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->params.qkey = attrs->qkey;
                cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
        }
-       if (attr_mask & IB_QP_AV)
-               ocrdma_set_av_params(qp, cmd, attrs);
-       else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
+       if (attr_mask & IB_QP_AV) {
+               status = ocrdma_set_av_params(qp, cmd, attrs);
+               if (status)
+                       return status;
+       } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
                /* set the default mac address for UD, GSI QPs */
                cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
                        (qp->dev->nic_info.mac_addr[1] << 8) |
@@ -2283,10 +2212,12 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
                     OCRDMA_QP_PARAMS_STATE_SHIFT) &
                    OCRDMA_QP_PARAMS_STATE_MASK;
                cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
-       } else
+       } else {
                cmd->params.max_sge_recv_flags |=
                    (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
                    OCRDMA_QP_PARAMS_STATE_MASK;
+       }
+
        status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
        if (status)
                goto mbx_err;
@@ -2324,7 +2255,7 @@ mbx_err:
        return status;
 }
 
-int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
+int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
                          struct ib_srq_init_attr *srq_attr,
                          struct ocrdma_pd *pd)
 {
@@ -2334,7 +2265,6 @@ int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
        struct ocrdma_create_srq_rsp *rsp;
        struct ocrdma_create_srq *cmd;
        dma_addr_t pa;
-       struct ocrdma_dev *dev = srq->dev;
        struct pci_dev *pdev = dev->nic_info.pdev;
        u32 max_rqe_allocated;
 
@@ -2404,13 +2334,15 @@ int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
 {
        int status = -ENOMEM;
        struct ocrdma_modify_srq *cmd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
+
        cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
        if (!cmd)
                return status;
        cmd->id = srq->id;
        cmd->limit_max_rqe |= srq_attr->srq_limit <<
            OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
-       status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
        kfree(cmd);
        return status;
 }
@@ -2419,11 +2351,13 @@ int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
 {
        int status = -ENOMEM;
        struct ocrdma_query_srq *cmd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
+
        cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
        if (!cmd)
                return status;
        cmd->id = srq->rq.dbid;
-       status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
        if (status == 0) {
                struct ocrdma_query_srq_rsp *rsp =
                    (struct ocrdma_query_srq_rsp *)cmd;
@@ -2448,7 +2382,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
        if (!cmd)
                return status;
        cmd->id = srq->id;
-       status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
        if (srq->rq.va)
                dma_free_coherent(&pdev->dev, srq->rq.len,
                                  srq->rq.va, srq->rq.pa);
@@ -2497,9 +2431,9 @@ static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
        unsigned long flags = 0;
        int num_eq = 0;
 
-       if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
+       if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
                flags = IRQF_SHARED;
-       else {
+       else {
                num_eq = dev->nic_info.msix.num_vectors -
                                dev->nic_info.msix.start_vector;
                /* minimum two vectors/eq are required for rdma to work.
@@ -2532,8 +2466,10 @@ static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
        if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
                num_eq = 1;
                flags = IRQF_SHARED;
-       } else
+       } else {
                num_eq = min_t(u32, num_eq, num_online_cpus());
+       }
+
        dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
        if (!dev->qp_eq_tbl)
                return -ENOMEM;
@@ -2561,8 +2497,7 @@ static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
        /* one eq is sufficient for data path to work */
        if (dev->eq_cnt >= 1)
                return 0;
-       if (status)
-               ocrdma_destroy_qp_eqs(dev);
+       ocrdma_destroy_qp_eqs(dev);
        return status;
 }
 
index be5db77404dbae1c9660997d05d85e0ac16c1027..cc90ac3b6d424f73f432b3f4639c85212367c57a 100644 (file)
@@ -112,8 +112,7 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
 int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
                        struct ocrdma_qp_params *param);
 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
-
-int ocrdma_mbx_create_srq(struct ocrdma_srq *,
+int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
                          struct ib_srq_init_attr *,
                          struct ocrdma_pd *);
 int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
@@ -123,7 +122,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
 int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
 int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
 
-int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state,
+int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
                            enum ib_qp_state *old_ib_state);
 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
index 36b062da2aea4218d14b77ded42fbc9f05fea383..0184009060dbcf4d10f58b9d33824a2a83875002 100644 (file)
@@ -177,7 +177,7 @@ struct ocrdma_mbx_hdr {
        u32 timeout;            /* in seconds */
        u32 cmd_len;
        u32 rsvd_version;
-} __packed;
+};
 
 enum {
        OCRDMA_MBX_RSP_OPCODE_SHIFT     = 0,
@@ -197,7 +197,7 @@ struct ocrdma_mbx_rsp {
        u32 status;
        u32 rsp_len;
        u32 add_rsp_len;
-} __packed;
+};
 
 enum {
        OCRDMA_MQE_EMBEDDED     = 1,
@@ -208,7 +208,7 @@ struct ocrdma_mqe_sge {
        u32 pa_lo;
        u32 pa_hi;
        u32 len;
-} __packed;
+};
 
 enum {
        OCRDMA_MQE_HDR_EMB_SHIFT        = 0,
@@ -225,12 +225,12 @@ struct ocrdma_mqe_hdr {
        u32 tag_lo;
        u32 tag_hi;
        u32 rsvd3;
-} __packed;
+};
 
 struct ocrdma_mqe_emb_cmd {
        struct ocrdma_mbx_hdr mch;
        u8 pyld[220];
-} __packed;
+};
 
 struct ocrdma_mqe {
        struct ocrdma_mqe_hdr hdr;
@@ -242,7 +242,7 @@ struct ocrdma_mqe {
                u8 cmd[236];
                struct ocrdma_mbx_rsp rsp;
        } u;
-} __packed;
+};
 
 #define OCRDMA_EQ_LEN       4096
 #define OCRDMA_MQ_CQ_LEN    256
@@ -259,12 +259,12 @@ struct ocrdma_mqe {
 struct ocrdma_delete_q_req {
        struct ocrdma_mbx_hdr req;
        u32 id;
-} __packed;
+};
 
 struct ocrdma_pa {
        u32 lo;
        u32 hi;
-} __packed;
+};
 
 #define MAX_OCRDMA_EQ_PAGES (8)
 struct ocrdma_create_eq_req {
@@ -275,7 +275,7 @@ struct ocrdma_create_eq_req {
        u32 delay;
        u32 rsvd;
        struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES];
-} __packed;
+};
 
 enum {
        OCRDMA_CREATE_EQ_VALID  = Bit(29),
@@ -310,7 +310,7 @@ struct ocrdma_mcqe {
        u32 tag_lo;
        u32 tag_hi;
        u32 valid_ae_cmpl_cons;
-} __packed;
+};
 
 enum {
        OCRDMA_AE_MCQE_QPVALID          = Bit(31),
@@ -332,7 +332,7 @@ struct ocrdma_ae_mcqe {
        u32 cqvalid_cqid;
        u32 evt_tag;
        u32 valid_ae_event;
-} __packed;
+};
 
 enum {
        OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT         = 16,
@@ -356,7 +356,7 @@ struct ocrdma_ae_mpa_mcqe {
        u32 w1;
        u32 w2;
        u32 valid_ae_event;
-} __packed;
+};
 
 enum {
        OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT    = 0,
@@ -382,7 +382,7 @@ struct ocrdma_ae_qp_mcqe {
        u32 w1;
        u32 w2;
        u32 valid_ae_event;
-} __packed;
+};
 
 #define OCRDMA_ASYNC_EVE_CODE 0x14
 
@@ -487,7 +487,8 @@ struct ocrdma_mbx_query_config {
        u32 max_ird_ord_per_qp;
        u32 max_shared_ird_ord;
        u32 max_mr;
-       u64 max_mr_size;
+       u32 max_mr_size_lo;
+       u32 max_mr_size_hi;
        u32 max_num_mr_pbl;
        u32 max_mw;
        u32 max_fmr;
@@ -502,14 +503,14 @@ struct ocrdma_mbx_query_config {
        u32 max_wqes_rqes_per_q;
        u32 max_cq_cqes_per_cq;
        u32 max_srq_rqe_sge;
-} __packed;
+};
 
 struct ocrdma_fw_ver_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
 
        u8 running_ver[32];
-} __packed;
+};
 
 struct ocrdma_fw_conf_rsp {
        struct ocrdma_mqe_hdr hdr;
@@ -535,7 +536,7 @@ struct ocrdma_fw_conf_rsp {
        u32 base_eqid;
        u32 max_eq;
 
-} __packed;
+};
 
 enum {
        OCRDMA_FN_MODE_RDMA     = 0x4
@@ -584,7 +585,7 @@ struct ocrdma_create_cq_cmd {
 struct ocrdma_create_cq {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_create_cq_cmd cmd;
-} __packed;
+};
 
 enum {
        OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
@@ -593,12 +594,12 @@ enum {
 struct ocrdma_create_cq_cmd_rsp {
        struct ocrdma_mbx_rsp rsp;
        u32 cq_id;
-} __packed;
+};
 
 struct ocrdma_create_cq_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_create_cq_cmd_rsp rsp;
-} __packed;
+};
 
 enum {
        OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT         = 22,
@@ -617,12 +618,12 @@ struct ocrdma_create_mq_req {
        u32 async_cqid_valid;
        u32 rsvd;
        struct ocrdma_pa pa[8];
-} __packed;
+};
 
 struct ocrdma_create_mq_rsp {
        struct ocrdma_mbx_rsp rsp;
        u32 id;
-} __packed;
+};
 
 enum {
        OCRDMA_DESTROY_CQ_QID_SHIFT                     = 0,
@@ -637,12 +638,12 @@ struct ocrdma_destroy_cq {
        struct ocrdma_mbx_hdr req;
 
        u32 bypass_flush_qid;
-} __packed;
+};
 
 struct ocrdma_destroy_cq_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
 
 enum {
        OCRDMA_QPT_GSI  = 1,
@@ -766,7 +767,7 @@ struct ocrdma_create_qp_req {
        u32 dpp_credits_cqid;
        u32 rpir_lkey;
        struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES];
-} __packed;
+};
 
 enum {
        OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT                = 0,
@@ -820,18 +821,18 @@ struct ocrdma_create_qp_rsp {
        u32 max_ord_ird;
        u32 sq_rq_id;
        u32 dpp_response;
-} __packed;
+};
 
 struct ocrdma_destroy_qp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_hdr req;
        u32 qp_id;
-} __packed;
+};
 
 struct ocrdma_destroy_qp_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
 
 enum {
        OCRDMA_MODIFY_QP_ID_SHIFT       = 0,
@@ -975,7 +976,7 @@ struct ocrdma_qp_params {
        u32 dmac_b0_to_b3;
        u32 vlan_dmac_b4_to_b5;
        u32 qkey;
-} __packed;
+};
 
 
 struct ocrdma_modify_qp {
@@ -986,7 +987,7 @@ struct ocrdma_modify_qp {
        u32 flags;
        u32 rdma_flags;
        u32 num_outstanding_atomic_rd;
-} __packed;
+};
 
 enum {
        OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT      = 0,
@@ -1007,7 +1008,7 @@ struct ocrdma_modify_qp_rsp {
 
        u32 max_wqe_rqe;
        u32 max_ord_ird;
-} __packed;
+};
 
 struct ocrdma_query_qp {
        struct ocrdma_mqe_hdr hdr;
@@ -1016,13 +1017,13 @@ struct ocrdma_query_qp {
 #define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
 #define OCRDMA_QUERY_UP_QP_ID_MASK   0xFFFFFF
        u32 qp_id;
-} __packed;
+};
 
 struct ocrdma_query_qp_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
        struct ocrdma_qp_params params;
-} __packed;
+};
 
 enum {
        OCRDMA_CREATE_SRQ_PD_ID_SHIFT           = 0,
@@ -1051,7 +1052,7 @@ struct ocrdma_create_srq {
        u32 max_sge_rqe;
        u32 pages_rqe_sz;
        struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES];
-} __packed;
+};
 
 enum {
        OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT                      = 0,
@@ -1070,7 +1071,7 @@ struct ocrdma_create_srq_rsp {
 
        u32 id;
        u32 max_sge_rqe_allocated;
-} __packed;
+};
 
 enum {
        OCRDMA_MODIFY_SRQ_ID_SHIFT      = 0,
@@ -1089,7 +1090,7 @@ struct ocrdma_modify_srq {
 
        u32 id;
        u32 limit_max_rqe;
-} __packed;
+};
 
 enum {
        OCRDMA_QUERY_SRQ_ID_SHIFT       = 0,
@@ -1101,7 +1102,7 @@ struct ocrdma_query_srq {
        struct ocrdma_mbx_rsp req;
 
        u32 id;
-} __packed;
+};
 
 enum {
        OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT        = 0,
@@ -1123,7 +1124,7 @@ struct ocrdma_query_srq_rsp {
 
        u32 max_rqe_pdid;
        u32 srq_lmt_max_sge;
-} __packed;
+};
 
 enum {
        OCRDMA_DESTROY_SRQ_ID_SHIFT     = 0,
@@ -1135,7 +1136,7 @@ struct ocrdma_destroy_srq {
        struct ocrdma_mbx_rsp req;
 
        u32 id;
-} __packed;
+};
 
 enum {
        OCRDMA_ALLOC_PD_ENABLE_DPP      = BIT(16),
@@ -1147,7 +1148,7 @@ struct ocrdma_alloc_pd {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_hdr req;
        u32 enable_dpp_rsvd;
-} __packed;
+};
 
 enum {
        OCRDMA_ALLOC_PD_RSP_DPP                 = Bit(16),
@@ -1159,18 +1160,18 @@ struct ocrdma_alloc_pd_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
        u32 dpp_page_pdid;
-} __packed;
+};
 
 struct ocrdma_dealloc_pd {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_hdr req;
        u32 id;
-} __packed;
+};
 
 struct ocrdma_dealloc_pd_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
 
 enum {
        OCRDMA_ADDR_CHECK_ENABLE        = 1,
@@ -1206,7 +1207,7 @@ struct ocrdma_alloc_lkey {
 
        u32 pdid;
        u32 pbl_sz_flags;
-} __packed;
+};
 
 struct ocrdma_alloc_lkey_rsp {
        struct ocrdma_mqe_hdr hdr;
@@ -1214,7 +1215,7 @@ struct ocrdma_alloc_lkey_rsp {
 
        u32 lrkey;
        u32 num_pbl_rsvd;
-} __packed;
+};
 
 struct ocrdma_dealloc_lkey {
        struct ocrdma_mqe_hdr hdr;
@@ -1222,12 +1223,12 @@ struct ocrdma_dealloc_lkey {
 
        u32 lkey;
        u32 rsvd_frmr;
-} __packed;
+};
 
 struct ocrdma_dealloc_lkey_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
 
 #define MAX_OCRDMA_NSMR_PBL    (u32)22
 #define MAX_OCRDMA_PBL_SIZE     65536
@@ -1283,7 +1284,7 @@ struct ocrdma_reg_nsmr {
        u32 va_loaddr;
        u32 va_hiaddr;
        struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
-} __packed;
+};
 
 enum {
        OCRDMA_REG_NSMR_CONT_PBL_SHIFT          = 0,
@@ -1305,12 +1306,12 @@ struct ocrdma_reg_nsmr_cont {
        u32 last;
 
        struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
-} __packed;
+};
 
 struct ocrdma_pbe {
        u32 pa_hi;
        u32 pa_lo;
-} __packed;
+};
 
 enum {
        OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT       = 16,
@@ -1322,7 +1323,7 @@ struct ocrdma_reg_nsmr_rsp {
 
        u32 lrkey;
        u32 num_pbl;
-} __packed;
+};
 
 enum {
        OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT      = 0,
@@ -1342,7 +1343,7 @@ struct ocrdma_reg_nsmr_cont_rsp {
 
        u32 lrkey_key_index;
        u32 num_pbl;
-} __packed;
+};
 
 enum {
        OCRDMA_ALLOC_MW_PD_ID_SHIFT     = 0,
@@ -1354,7 +1355,7 @@ struct ocrdma_alloc_mw {
        struct ocrdma_mbx_hdr req;
 
        u32 pdid;
-} __packed;
+};
 
 enum {
        OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT   = 0,
@@ -1366,7 +1367,7 @@ struct ocrdma_alloc_mw_rsp {
        struct ocrdma_mbx_rsp rsp;
 
        u32 lrkey_index;
-} __packed;
+};
 
 struct ocrdma_attach_mcast {
        struct ocrdma_mqe_hdr hdr;
@@ -1375,12 +1376,12 @@ struct ocrdma_attach_mcast {
        u8 mgid[16];
        u32 mac_b0_to_b3;
        u32 vlan_mac_b4_to_b5;
-} __packed;
+};
 
 struct ocrdma_attach_mcast_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
 
 struct ocrdma_detach_mcast {
        struct ocrdma_mqe_hdr hdr;
@@ -1389,12 +1390,12 @@ struct ocrdma_detach_mcast {
        u8 mgid[16];
        u32 mac_b0_to_b3;
        u32 vlan_mac_b4_to_b5;
-} __packed;
+};
 
 struct ocrdma_detach_mcast_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
 
 enum {
        OCRDMA_CREATE_AH_NUM_PAGES_SHIFT        = 19,
@@ -1418,24 +1419,24 @@ struct ocrdma_create_ah_tbl {
 
        u32 ah_conf;
        struct ocrdma_pa tbl_addr[8];
-} __packed;
+};
 
 struct ocrdma_create_ah_tbl_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
        u32 ahid;
-} __packed;
+};
 
 struct ocrdma_delete_ah_tbl {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_hdr req;
        u32 ahid;
-} __packed;
+};
 
 struct ocrdma_delete_ah_tbl_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
 
 enum {
        OCRDMA_EQE_VALID_SHIFT          = 0,
@@ -1448,7 +1449,7 @@ enum {
 
 struct ocrdma_eqe {
        u32 id_valid;
-} __packed;
+};
 
 enum OCRDMA_CQE_STATUS {
        OCRDMA_CQE_SUCCESS = 0,
@@ -1532,14 +1533,14 @@ struct ocrdma_cqe {
                } cmn;
        };
        u32 flags_status_srcqpn;        /* w3 */
-} __packed;
+};
 
 struct ocrdma_sge {
        u32 addr_hi;
        u32 addr_lo;
        u32 lrkey;
        u32 len;
-} __packed;
+};
 
 enum {
        OCRDMA_FLAG_SIG         = 0x1,
@@ -1600,14 +1601,14 @@ struct ocrdma_hdr_wqe {
                u32 lkey;
        };
        u32 total_len;
-} __packed;
+};
 
 struct ocrdma_ewqe_ud_hdr {
        u32 rsvd_dest_qpn;
        u32 qkey;
        u32 rsvd_ahid;
        u32 rsvd;
-} __packed;
+};
 
 struct ocrdma_eth_basic {
        u8 dmac[6];
index f36630e4b6be182c735b7aa81d357b4207266c82..5f68dff0d6caec3ad8f59cf5f5fe1b6844abcbe7 100644 (file)
@@ -84,7 +84,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
                                        IB_DEVICE_SYS_IMAGE_GUID |
                                        IB_DEVICE_LOCAL_DMA_LKEY;
        attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
-       attr->max_sge_rd = 0;
+       attr->max_sge_rd = dev->attr.max_rdma_sge;
        attr->max_cq = dev->attr.max_cq;
        attr->max_cqe = dev->attr.max_cqe;
        attr->max_mr = dev->attr.max_mr;
@@ -229,7 +229,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return ERR_PTR(-ENOMEM);
-       ctx->dev = dev;
        INIT_LIST_HEAD(&ctx->mm_head);
        mutex_init(&ctx->mm_list_lock);
 
@@ -274,7 +273,8 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
 {
        struct ocrdma_mm *mm, *tmp;
        struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
-       struct pci_dev *pdev = uctx->dev->nic_info.pdev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
+       struct pci_dev *pdev = dev->nic_info.pdev;
 
        ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
        dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
@@ -291,7 +291,7 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 {
        struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
-       struct ocrdma_dev *dev = ucontext->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
        unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
        u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
        unsigned long len = (vma->vm_end - vma->vm_start);
@@ -327,7 +327,7 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
        return status;
 }
 
-static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
+static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
                                struct ib_ucontext *ib_ctx,
                                struct ib_udata *udata)
 {
@@ -341,16 +341,16 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
        memset(&rsp, 0, sizeof(rsp));
        rsp.id = pd->id;
        rsp.dpp_enabled = pd->dpp_enabled;
-       db_page_addr = pd->dev->nic_info.unmapped_db +
-                       (pd->id * pd->dev->nic_info.db_page_size);
-       db_page_size = pd->dev->nic_info.db_page_size;
+       db_page_addr = dev->nic_info.unmapped_db +
+                       (pd->id * dev->nic_info.db_page_size);
+       db_page_size = dev->nic_info.db_page_size;
 
        status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
        if (status)
                return status;
 
        if (pd->dpp_enabled) {
-               dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr +
+               dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
                                (pd->id * OCRDMA_DPP_PAGE_SIZE);
                status = ocrdma_add_mmap(uctx, dpp_page_addr,
                                 OCRDMA_DPP_PAGE_SIZE);
@@ -386,10 +386,9 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd)
                return ERR_PTR(-ENOMEM);
-       pd->dev = dev;
        if (udata && context) {
-               pd->dpp_enabled = (dev->nic_info.dev_family ==
-                                       OCRDMA_GEN2_FAMILY) ? true : false;
+               pd->dpp_enabled =
+                       (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY);
                pd->num_dpp_qp =
                        pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
        }
@@ -400,21 +399,22 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
        }
 
        if (udata && context) {
-               status = ocrdma_copy_pd_uresp(pd, context, udata);
+               status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
                if (status)
                        goto err;
        }
        return &pd->ibpd;
 
 err:
-       ocrdma_dealloc_pd(&pd->ibpd);
+       status = ocrdma_mbx_dealloc_pd(dev, pd);
+       kfree(pd);
        return ERR_PTR(status);
 }
 
 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
 {
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
-       struct ocrdma_dev *dev = pd->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
        int status;
        u64 usr_db;
 
@@ -432,25 +432,11 @@ int ocrdma_dealloc_pd(struct ib_pd *ibpd)
        return status;
 }
 
-static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
-                                          int acc, u32 num_pbls,
-                                          u32 addr_check)
+static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+                           u32 pdid, int acc, u32 num_pbls, u32 addr_check)
 {
        int status;
-       struct ocrdma_mr *mr;
-       struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
-       struct ocrdma_dev *dev = pd->dev;
 
-       if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
-               pr_err("%s(%d) leaving err, invalid access rights\n",
-                      __func__, dev->id);
-               return ERR_PTR(-EINVAL);
-       }
-
-       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-       if (!mr)
-               return ERR_PTR(-ENOMEM);
-       mr->hwmr.dev = dev;
        mr->hwmr.fr_mr = 0;
        mr->hwmr.local_rd = 1;
        mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
@@ -460,25 +446,38 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
        mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
        mr->hwmr.num_pbls = num_pbls;
 
-       status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check);
-       if (status) {
-               kfree(mr);
-               return ERR_PTR(-ENOMEM);
-       }
-       mr->pd = pd;
+       status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
+       if (status)
+               return status;
+
        mr->ibmr.lkey = mr->hwmr.lkey;
        if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
                mr->ibmr.rkey = mr->hwmr.lkey;
-       return mr;
+       return 0;
 }
 
 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
 {
+       int status;
        struct ocrdma_mr *mr;
+       struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
 
-       mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE);
-       if (IS_ERR(mr))
-               return ERR_CAST(mr);
+       if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
+               pr_err("%s err, invalid access rights\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(-ENOMEM);
+
+       status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
+                                  OCRDMA_ADDR_CHECK_DISABLE);
+       if (status) {
+               kfree(mr);
+               return ERR_PTR(status);
+       }
 
        return &mr->ibmr;
 }
@@ -502,7 +501,8 @@ static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
        }
 }
 
-static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
+static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+                             u32 num_pbes)
 {
        u32 num_pbls = 0;
        u32 idx = 0;
@@ -518,7 +518,7 @@ static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
                num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
                num_pbls = num_pbls / (pbl_size / sizeof(u64));
                idx++;
-       } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl);
+       } while (num_pbls >= dev->attr.max_num_mr_pbl);
 
        mr->hwmr.num_pbes = num_pbes;
        mr->hwmr.num_pbls = num_pbls;
@@ -613,13 +613,12 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
                                 u64 usr_addr, int acc, struct ib_udata *udata)
 {
        int status = -ENOMEM;
-       struct ocrdma_dev *dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
        struct ocrdma_mr *mr;
        struct ocrdma_pd *pd;
        u32 num_pbes;
 
        pd = get_ocrdma_pd(ibpd);
-       dev = pd->dev;
 
        if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
                return ERR_PTR(-EINVAL);
@@ -627,14 +626,13 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr)
                return ERR_PTR(status);
-       mr->hwmr.dev = dev;
        mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
        if (IS_ERR(mr->umem)) {
                status = -EFAULT;
                goto umem_err;
        }
        num_pbes = ib_umem_page_count(mr->umem);
-       status = ocrdma_get_pbl_info(mr, num_pbes);
+       status = ocrdma_get_pbl_info(dev, mr, num_pbes);
        if (status)
                goto umem_err;
 
@@ -654,7 +652,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
        status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
        if (status)
                goto mbx_err;
-       mr->pd = pd;
        mr->ibmr.lkey = mr->hwmr.lkey;
        if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
                mr->ibmr.rkey = mr->hwmr.lkey;
@@ -671,7 +668,7 @@ umem_err:
 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
 {
        struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
-       struct ocrdma_dev *dev = mr->hwmr.dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
        int status;
 
        status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
@@ -686,7 +683,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
        return status;
 }
 
-static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
+static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+                               struct ib_udata *udata,
                                struct ib_ucontext *ib_ctx)
 {
        int status;
@@ -699,13 +697,13 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
        uresp.num_pages = 1;
        uresp.max_hw_cqe = cq->max_hw_cqe;
        uresp.page_addr[0] = cq->pa;
-       uresp.db_page_addr = cq->dev->nic_info.unmapped_db;
-       uresp.db_page_size = cq->dev->nic_info.db_page_size;
+       uresp.db_page_addr = dev->nic_info.unmapped_db;
+       uresp.db_page_size = dev->nic_info.db_page_size;
        uresp.phase_change = cq->phase_change ? 1 : 0;
        status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
        if (status) {
                pr_err("%s(%d) copy error cqid=0x%x.\n",
-                      __func__, cq->dev->id, cq->id);
+                      __func__, dev->id, cq->id);
                goto err;
        }
        uctx = get_ocrdma_ucontext(ib_ctx);
@@ -744,7 +742,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
        spin_lock_init(&cq->comp_handler_lock);
        INIT_LIST_HEAD(&cq->sq_head);
        INIT_LIST_HEAD(&cq->rq_head);
-       cq->dev = dev;
 
        status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
        if (status) {
@@ -752,7 +749,7 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
                return ERR_PTR(status);
        }
        if (ib_ctx) {
-               status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);
+               status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
                if (status)
                        goto ctx_err;
        }
@@ -786,7 +783,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
 {
        int status;
        struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
-       struct ocrdma_dev *dev = cq->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
 
        status = ocrdma_mbx_destroy_cq(dev, cq);
 
@@ -1026,7 +1023,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
        int status;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
        struct ocrdma_qp *qp;
-       struct ocrdma_dev *dev = pd->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
        struct ocrdma_create_qp_ureq ureq;
        u16 dpp_credit_lmt, dpp_offset;
 
@@ -1093,6 +1090,17 @@ gen_err:
        return ERR_PTR(status);
 }
 
+
+static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
+{
+       if (qp->db_cache) {
+               u32 val = qp->rq.dbid | (qp->db_cache <<
+                               ocrdma_get_num_posted_shift(qp));
+               iowrite32(val, qp->rq_db);
+               qp->db_cache = 0;
+       }
+}
+
 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                      int attr_mask)
 {
@@ -1104,13 +1112,16 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        qp = get_ocrdma_qp(ibqp);
        dev = qp->dev;
        if (attr_mask & IB_QP_STATE)
-               status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps);
+               status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
        /* if new and previous states are same hw doesn't need to
         * know about it.
         */
        if (status < 0)
                return status;
        status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
+       if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
+               ocrdma_flush_rq_db(qp);
+
        return status;
 }
 
@@ -1360,17 +1371,18 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
                 */
                discard_cnt += 1;
                cqe->cmn.qpn = 0;
-               if (is_cqe_for_sq(cqe))
+               if (is_cqe_for_sq(cqe)) {
                        ocrdma_hwq_inc_tail(&qp->sq);
-               else {
+               else {
                        if (qp->srq) {
                                spin_lock_irqsave(&qp->srq->q_lock, flags);
                                ocrdma_hwq_inc_tail(&qp->srq->rq);
                                ocrdma_srq_toggle_bit(qp->srq, cur_getp);
                                spin_unlock_irqrestore(&qp->srq->q_lock, flags);
 
-                       } else
+                       } else {
                                ocrdma_hwq_inc_tail(&qp->rq);
+                       }
                }
 skip_cqe:
                cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
@@ -1457,7 +1469,8 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        return status;
 }
 
-static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
+static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
+                               struct ib_udata *udata)
 {
        int status;
        struct ocrdma_create_srq_uresp uresp;
@@ -1467,11 +1480,11 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
        uresp.num_rq_pages = 1;
        uresp.rq_page_addr[0] = srq->rq.pa;
        uresp.rq_page_size = srq->rq.len;
-       uresp.db_page_addr = srq->dev->nic_info.unmapped_db +
-           (srq->pd->id * srq->dev->nic_info.db_page_size);
-       uresp.db_page_size = srq->dev->nic_info.db_page_size;
+       uresp.db_page_addr = dev->nic_info.unmapped_db +
+           (srq->pd->id * dev->nic_info.db_page_size);
+       uresp.db_page_size = dev->nic_info.db_page_size;
        uresp.num_rqe_allocated = srq->rq.max_cnt;
-       if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+       if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
                uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
                uresp.db_shift = 24;
        } else {
@@ -1495,7 +1508,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
 {
        int status = -ENOMEM;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
-       struct ocrdma_dev *dev = pd->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
        struct ocrdma_srq *srq;
 
        if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
@@ -1508,10 +1521,9 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
                return ERR_PTR(status);
 
        spin_lock_init(&srq->q_lock);
-       srq->dev = dev;
        srq->pd = pd;
        srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
-       status = ocrdma_mbx_create_srq(srq, init_attr, pd);
+       status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
        if (status)
                goto err;
 
@@ -1538,7 +1550,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
        }
 
        if (udata) {
-               status = ocrdma_copy_srq_uresp(srq, udata);
+               status = ocrdma_copy_srq_uresp(dev, srq, udata);
                if (status)
                        goto arm_err;
        }
@@ -1584,10 +1596,9 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
 {
        int status;
        struct ocrdma_srq *srq;
-       struct ocrdma_dev *dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
 
        srq = get_ocrdma_srq(ibsrq);
-       dev = srq->dev;
 
        status = ocrdma_mbx_destroy_srq(dev, srq);
 
@@ -1675,8 +1686,9 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
                ocrdma_build_ud_hdr(qp, hdr, wr);
                sge = (struct ocrdma_sge *)(hdr + 2);
                wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
-       } else
+       } else {
                sge = (struct ocrdma_sge *)(hdr + 1);
+       }
 
        status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
        return status;
@@ -1824,7 +1836,10 @@ static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
 {
        u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
 
-       iowrite32(val, qp->rq_db);
+       if (qp->state != OCRDMA_QPS_INIT)
+               iowrite32(val, qp->rq_db);
+       else
+               qp->db_cache++;
 }
 
 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
@@ -1958,7 +1973,7 @@ int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 
 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
 {
-       enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR;
+       enum ib_wc_status ibwc_status;
 
        switch (status) {
        case OCRDMA_CQE_GENERAL_ERR:
@@ -2108,7 +2123,7 @@ static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
        ibwc->status = ocrdma_to_ibwc_err(status);
 
        ocrdma_flush_qp(qp);
-       ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL);
+       ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
 
        /* if wqe/rqe pending for which cqe needs to be returned,
         * trigger inflating it.
@@ -2299,9 +2314,9 @@ static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
                ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
                ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
        }
-       if (qp->ibqp.srq)
+       if (qp->ibqp.srq) {
                ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
-       else {
+       else {
                ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
                ocrdma_hwq_inc_tail(&qp->rq);
        }
@@ -2314,13 +2329,14 @@ static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
        bool expand = false;
 
        ibwc->wc_flags = 0;
-       if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
+       if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
                status = (le32_to_cpu(cqe->flags_status_srcqpn) &
                                        OCRDMA_CQE_UD_STATUS_MASK) >>
                                        OCRDMA_CQE_UD_STATUS_SHIFT;
-       else
+       } else {
                status = (le32_to_cpu(cqe->flags_status_srcqpn) &
                             OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+       }
 
        if (status == OCRDMA_CQE_SUCCESS) {
                *polled = true;
@@ -2338,9 +2354,10 @@ static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
        if (cq->phase_change) {
                if (cur_getp == 0)
                        cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
-       } else
+       } else {
                /* clear valid bit */
                cqe->flags_status_srcqpn = 0;
+       }
 }
 
 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
@@ -2351,7 +2368,7 @@ static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
        bool expand = false;
        int polled_hw_cqes = 0;
        struct ocrdma_qp *qp = NULL;
-       struct ocrdma_dev *dev = cq->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
        struct ocrdma_cqe *cqe;
        u16 cur_getp; bool polled = false; bool stop = false;
 
@@ -2417,8 +2434,9 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
                } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
                        ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
                        ocrdma_hwq_inc_tail(&qp->rq);
-               } else
+               } else {
                        return err_cqes;
+               }
                ibwc->byte_len = 0;
                ibwc->status = IB_WC_WR_FLUSH_ERR;
                ibwc = ibwc + 1;
@@ -2431,14 +2449,11 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 {
        int cqes_to_poll = num_entries;
-       struct ocrdma_cq *cq = NULL;
-       unsigned long flags;
-       struct ocrdma_dev *dev;
+       struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
        int num_os_cqe = 0, err_cqes = 0;
        struct ocrdma_qp *qp;
-
-       cq = get_ocrdma_cq(ibcq);
-       dev = cq->dev;
+       unsigned long flags;
 
        /* poll cqes from adapter CQ */
        spin_lock_irqsave(&cq->cq_lock, flags);
@@ -2469,16 +2484,14 @@ int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 
 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
 {
-       struct ocrdma_cq *cq;
-       unsigned long flags;
-       struct ocrdma_dev *dev;
+       struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
        u16 cq_id;
        u16 cur_getp;
        struct ocrdma_cqe *cqe;
+       unsigned long flags;
 
-       cq = get_ocrdma_cq(ibcq);
        cq_id = cq->id;
-       dev = cq->dev;
 
        spin_lock_irqsave(&cq->cq_lock, flags);
        if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
index 4a9af795b88f3bf5ba2fc8a8d44aaf637a318029..1946101419a31c0c3066c72d90401639a474d3e3 100644 (file)
@@ -89,7 +89,6 @@ struct qlogic_ib_stats {
 
 extern struct qlogic_ib_stats qib_stats;
 extern const struct pci_error_handlers qib_pci_err_handler;
-extern struct pci_driver qib_driver;
 
 #define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
 /*
@@ -576,11 +575,13 @@ struct qib_pportdata {
        /* read/write using lock */
        spinlock_t            sdma_lock ____cacheline_aligned_in_smp;
        struct list_head      sdma_activelist;
+       struct list_head      sdma_userpending;
        u64                   sdma_descq_added;
        u64                   sdma_descq_removed;
        u16                   sdma_descq_tail;
        u16                   sdma_descq_head;
        u8                    sdma_generation;
+       u8                    sdma_intrequest;
 
        struct tasklet_struct sdma_sw_clean_up_task
                ____cacheline_aligned_in_smp;
@@ -1326,6 +1327,8 @@ int qib_setup_sdma(struct qib_pportdata *);
 void qib_teardown_sdma(struct qib_pportdata *);
 void __qib_sdma_intr(struct qib_pportdata *);
 void qib_sdma_intr(struct qib_pportdata *);
+void qib_user_sdma_send_desc(struct qib_pportdata *dd,
+                       struct list_head *pktlist);
 int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
                        u32, struct qib_verbs_txreq *);
 /* ppd->sdma_lock should be locked before calling this. */
index 4f255b723ffd786b3378a4e7920937af9cbb72f8..5670ace27c639adb351b9928c65ed081a599a910 100644 (file)
@@ -279,7 +279,7 @@ struct qib_base_info {
  * may not be implemented; the user code must deal with this if it
  * cares, or it must abort after initialization reports the difference.
  */
-#define QIB_USER_SWMINOR 12
+#define QIB_USER_SWMINOR 13
 
 #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
 
@@ -701,7 +701,37 @@ struct qib_message_header {
        __be32 bth[3];
        /* fields below this point are in host byte order */
        struct qib_header iph;
+       /* fields below are simplified, but should match PSM */
+       /* some are accessed by driver when packet spliting is needed */
        __u8 sub_opcode;
+       __u8 flags;
+       __u16 commidx;
+       __u32 ack_seq_num;
+       __u8 flowid;
+       __u8 hdr_dlen;
+       __u16 mqhdr;
+       __u32 uwords[4];
+};
+
+/* sequence number bits for message */
+union qib_seqnum {
+       struct {
+               __u32 seq:11;
+               __u32 gen:8;
+               __u32 flow:5;
+       };
+       struct {
+               __u32 pkt:16;
+               __u32 msg:8;
+       };
+       __u32 val;
+};
+
+/* qib receiving-dma tid-session-member */
+struct qib_tid_session_member {
+       __u16 tid;
+       __u16 offset;
+       __u16 length;
 };
 
 /* IB - LRH header consts */
index b51a51486cb845479b0512dc2b9f31c1443f9649..275f247f9fca540e45854655bc7871bacdb6bc98 100644 (file)
@@ -1220,7 +1220,7 @@ static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
                        return user_swminor == 3;
                default:
                        /* >= 4 are compatible (or are expected to be) */
-                       return user_swminor >= 4;
+                       return user_swminor <= QIB_USER_SWMINOR;
                }
        }
        /* make no promises yet for future major versions */
index 36e048e0e1d93bca227707a2c5e569ec4a851a78..24e802f4ea2f04b4554678eee76341043801cd35 100644 (file)
@@ -1193,7 +1193,7 @@ static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
 
-struct pci_driver qib_driver = {
+static struct pci_driver qib_driver = {
        .name = QIB_DRV_NAME,
        .probe = qib_init_one,
        .remove = qib_remove_one,
index c574ec7c85e6834662a78912dafc5bdd2a216a58..3f14009fb6625bb90f3adb94bbf72cd782d61e4f 100644 (file)
@@ -283,12 +283,12 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
                goto bail;
        }
 
-       pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
+       pos = dd->pcidev->msix_cap;
        if (nent && *nent && pos) {
                qib_msix_setup(dd, pos, nent, entry);
                ret = 0; /* did it, either MSIx or INTx */
        } else {
-               pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+               pos = dd->pcidev->msi_cap;
                if (pos)
                        ret = qib_msi_setup(dd, pos);
                else
@@ -357,7 +357,7 @@ int qib_reinit_intr(struct qib_devdata *dd)
        if (!dd->msi_lo)
                goto bail;
 
-       pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+       pos = dd->pcidev->msi_cap;
        if (!pos) {
                qib_dev_err(dd,
                        "Can't find MSI capability, can't restore MSI settings\n");
@@ -426,7 +426,7 @@ void qib_enable_intx(struct pci_dev *pdev)
        if (new != cw)
                pci_write_config_word(pdev, PCI_COMMAND, new);
 
-       pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+       pos = pdev->msi_cap;
        if (pos) {
                /* then turn off MSI */
                pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
@@ -434,7 +434,7 @@ void qib_enable_intx(struct pci_dev *pdev)
                if (new != cw)
                        pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
        }
-       pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+       pos = pdev->msix_cap;
        if (pos) {
                /* then turn off MSIx */
                pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
index 9b5322d8cd5accca85737745ba06973ca7212efe..c6d6a54d2e19ddd2898e1e78e650832ce68bc7b4 100644 (file)
@@ -423,8 +423,11 @@ void qib_sdma_intr(struct qib_pportdata *ppd)
 
 void __qib_sdma_intr(struct qib_pportdata *ppd)
 {
-       if (__qib_sdma_running(ppd))
+       if (__qib_sdma_running(ppd)) {
                qib_sdma_make_progress(ppd);
+               if (!list_empty(&ppd->sdma_userpending))
+                       qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
+       }
 }
 
 int qib_setup_sdma(struct qib_pportdata *ppd)
@@ -452,6 +455,9 @@ int qib_setup_sdma(struct qib_pportdata *ppd)
        ppd->sdma_descq_removed = 0;
        ppd->sdma_descq_added = 0;
 
+       ppd->sdma_intrequest = 0;
+       INIT_LIST_HEAD(&ppd->sdma_userpending);
+
        INIT_LIST_HEAD(&ppd->sdma_activelist);
 
        tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
index 82442085cbe64ccbea4f5e46565b492d864ade89..d0a0ea0c14d6a965afe2bcf8d359052fdb946e26 100644 (file)
 #define QIB_USER_SDMA_DRAIN_TIMEOUT 500
 
 struct qib_user_sdma_pkt {
-       u8 naddr;               /* dimension of addr (1..3) ... */
+       struct list_head list;  /* list element */
+
+       u8  tiddma;             /* if this is NEW tid-sdma */
+       u8  largepkt;           /* this is large pkt from kmalloc */
+       u16 frag_size;          /* frag size used by PSM */
+       u16 index;              /* last header index or push index */
+       u16 naddr;              /* dimension of addr (1..3) ... */
+       u16 addrlimit;          /* addr array size */
+       u16 tidsmidx;           /* current tidsm index */
+       u16 tidsmcount;         /* tidsm array item count */
+       u16 payload_size;       /* payload size so far for header */
+       u32 bytes_togo;         /* bytes for processing */
        u32 counter;            /* sdma pkts queued counter for this entry */
+       struct qib_tid_session_member *tidsm;   /* tid session member array */
+       struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */
        u64 added;              /* global descq number of entries */
 
        struct {
-               u32 offset;                     /* offset for kvaddr, addr */
-               u32 length;                     /* length in page */
-               u8  put_page;                   /* should we put_page? */
-               u8  dma_mapped;                 /* is page dma_mapped? */
+               u16 offset;                     /* offset for kvaddr, addr */
+               u16 length;                     /* length in page */
+               u16 first_desc;                 /* first desc */
+               u16 last_desc;                  /* last desc */
+               u16 put_page;                   /* should we put_page? */
+               u16 dma_mapped;                 /* is page dma_mapped? */
+               u16 dma_length;                 /* for dma_unmap_page() */
+               u16 padding;
                struct page *page;              /* may be NULL (coherent mem) */
                void *kvaddr;                   /* FIXME: only for pio hack */
                dma_addr_t addr;
        } addr[4];   /* max pages, any more and we coalesce */
-       struct list_head list;  /* list element */
 };
 
 struct qib_user_sdma_queue {
@@ -77,6 +93,12 @@ struct qib_user_sdma_queue {
         */
        struct list_head sent;
 
+       /*
+        * Because above list will be accessed by both process and
+        * signal handler, we need a spinlock for it.
+        */
+       spinlock_t sent_lock ____cacheline_aligned_in_smp;
+
        /* headers with expected length are allocated from here... */
        char header_cache_name[64];
        struct dma_pool *header_cache;
@@ -88,6 +110,12 @@ struct qib_user_sdma_queue {
        /* as packets go on the queued queue, they are counted... */
        u32 counter;
        u32 sent_counter;
+       /* pending packets, not sending yet */
+       u32 num_pending;
+       /* sending packets, not complete yet */
+       u32 num_sending;
+       /* global descq number of entry of last sending packet */
+       u64 added;
 
        /* dma page table */
        struct rb_root dma_pages_root;
@@ -107,8 +135,12 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
 
        pq->counter = 0;
        pq->sent_counter = 0;
-       INIT_LIST_HEAD(&pq->sent);
+       pq->num_pending = 0;
+       pq->num_sending = 0;
+       pq->added = 0;
 
+       INIT_LIST_HEAD(&pq->sent);
+       spin_lock_init(&pq->sent_lock);
        mutex_init(&pq->lock);
 
        snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
@@ -144,34 +176,310 @@ done:
 }
 
 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
-                                   int i, size_t offset, size_t len,
-                                   int put_page, int dma_mapped,
-                                   struct page *page,
-                                   void *kvaddr, dma_addr_t dma_addr)
+                                   int i, u16 offset, u16 len,
+                                   u16 first_desc, u16 last_desc,
+                                   u16 put_page, u16 dma_mapped,
+                                   struct page *page, void *kvaddr,
+                                   dma_addr_t dma_addr, u16 dma_length)
 {
        pkt->addr[i].offset = offset;
        pkt->addr[i].length = len;
+       pkt->addr[i].first_desc = first_desc;
+       pkt->addr[i].last_desc = last_desc;
        pkt->addr[i].put_page = put_page;
        pkt->addr[i].dma_mapped = dma_mapped;
        pkt->addr[i].page = page;
        pkt->addr[i].kvaddr = kvaddr;
        pkt->addr[i].addr = dma_addr;
+       pkt->addr[i].dma_length = dma_length;
 }
 
-static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
-                                     u32 counter, size_t offset,
-                                     size_t len, int dma_mapped,
-                                     struct page *page,
-                                     void *kvaddr, dma_addr_t dma_addr)
+static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
+                               size_t len, dma_addr_t *dma_addr)
 {
-       pkt->naddr = 1;
-       pkt->counter = counter;
-       qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
-                               kvaddr, dma_addr);
+       void *hdr;
+
+       if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
+               hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
+                                            dma_addr);
+       else
+               hdr = NULL;
+
+       if (!hdr) {
+               hdr = kmalloc(len, GFP_KERNEL);
+               if (!hdr)
+                       return NULL;
+
+               *dma_addr = 0;
+       }
+
+       return hdr;
+}
+
+static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
+                                      struct qib_user_sdma_queue *pq,
+                                      struct qib_user_sdma_pkt *pkt,
+                                      struct page *page, u16 put,
+                                      u16 offset, u16 len, void *kvaddr)
+{
+       __le16 *pbc16;
+       void *pbcvaddr;
+       struct qib_message_header *hdr;
+       u16 newlen, pbclen, lastdesc, dma_mapped;
+       u32 vcto;
+       union qib_seqnum seqnum;
+       dma_addr_t pbcdaddr;
+       dma_addr_t dma_addr =
+               dma_map_page(&dd->pcidev->dev,
+                       page, offset, len, DMA_TO_DEVICE);
+       int ret = 0;
+
+       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+               /*
+                * dma mapping error, pkt has not managed
+                * this page yet, return the page here so
+                * the caller can ignore this page.
+                */
+               if (put) {
+                       put_page(page);
+               } else {
+                       /* coalesce case */
+                       kunmap(page);
+                       __free_page(page);
+               }
+               ret = -ENOMEM;
+               goto done;
+       }
+       offset = 0;
+       dma_mapped = 1;
+
+
+next_fragment:
+
+       /*
+        * In tid-sdma, the transfer length is restricted by
+        * receiver side current tid page length.
+        */
+       if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
+               newlen = pkt->tidsm[pkt->tidsmidx].length;
+       else
+               newlen = len;
+
+       /*
+        * Then the transfer length is restricted by MTU.
+        * the last descriptor flag is determined by:
+        * 1. the current packet is at frag size length.
+        * 2. the current tid page is done if tid-sdma.
+        * 3. there is no more byte togo if sdma.
+        */
+       lastdesc = 0;
+       if ((pkt->payload_size + newlen) >= pkt->frag_size) {
+               newlen = pkt->frag_size - pkt->payload_size;
+               lastdesc = 1;
+       } else if (pkt->tiddma) {
+               if (newlen == pkt->tidsm[pkt->tidsmidx].length)
+                       lastdesc = 1;
+       } else {
+               if (newlen == pkt->bytes_togo)
+                       lastdesc = 1;
+       }
+
+       /* fill the next fragment in this page */
+       qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
+               offset, newlen,         /* offset, len */
+               0, lastdesc,            /* first last desc */
+               put, dma_mapped,        /* put page, dma mapped */
+               page, kvaddr,           /* struct page, virt addr */
+               dma_addr, len);         /* dma addr, dma length */
+       pkt->bytes_togo -= newlen;
+       pkt->payload_size += newlen;
+       pkt->naddr++;
+       if (pkt->naddr == pkt->addrlimit) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       /* If there is no more byte togo. (lastdesc==1) */
+       if (pkt->bytes_togo == 0) {
+               /* The packet is done, header is not dma mapped yet.
+                * it should be from kmalloc */
+               if (!pkt->addr[pkt->index].addr) {
+                       pkt->addr[pkt->index].addr =
+                               dma_map_single(&dd->pcidev->dev,
+                                       pkt->addr[pkt->index].kvaddr,
+                                       pkt->addr[pkt->index].dma_length,
+                                       DMA_TO_DEVICE);
+                       if (dma_mapping_error(&dd->pcidev->dev,
+                                       pkt->addr[pkt->index].addr)) {
+                               ret = -ENOMEM;
+                               goto done;
+                       }
+                       pkt->addr[pkt->index].dma_mapped = 1;
+               }
+
+               goto done;
+       }
+
+       /* If tid-sdma, advance tid info. */
+       if (pkt->tiddma) {
+               pkt->tidsm[pkt->tidsmidx].length -= newlen;
+               if (pkt->tidsm[pkt->tidsmidx].length) {
+                       pkt->tidsm[pkt->tidsmidx].offset += newlen;
+               } else {
+                       pkt->tidsmidx++;
+                       if (pkt->tidsmidx == pkt->tidsmcount) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+
+       /*
+        * If this is NOT the last descriptor. (newlen==len)
+        * the current packet is not done yet, but the current
+        * send side page is done.
+        */
+       if (lastdesc == 0)
+               goto done;
+
+       /*
+        * If running this driver under PSM with message size
+        * fitting into one transfer unit, it is not possible
+        * to pass this line. otherwise, it is a buggggg.
+        */
+
+       /*
+        * Since the current packet is done, and there are more
+        * bytes togo, we need to create a new sdma header, copying
+        * from previous sdma header and modify both.
+        */
+       pbclen = pkt->addr[pkt->index].length;
+       pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
+       if (!pbcvaddr) {
+               ret = -ENOMEM;
+               goto done;
+       }
+       /* Copy the previous sdma header to new sdma header */
+       pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
+       memcpy(pbcvaddr, pbc16, pbclen);
+
+       /* Modify the previous sdma header */
+       hdr = (struct qib_message_header *)&pbc16[4];
+
+       /* New pbc length */
+       pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
+
+       /* New packet length */
+       hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
+
+       if (pkt->tiddma) {
+               /* turn on the header suppression */
+               hdr->iph.pkt_flags =
+                       cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
+               /* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
+               hdr->flags &= ~(0x04|0x20);
+       } else {
+               /* turn off extra bytes: 20-21 bits */
+               hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
+               /* turn off ACK_REQ: 0x04 */
+               hdr->flags &= ~(0x04);
+       }
+
+       /* New kdeth checksum */
+       vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
+       hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
+               be16_to_cpu(hdr->lrh[2]) -
+               ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
+               le16_to_cpu(hdr->iph.pkt_flags));
+
+       /* The packet is done, header is not dma mapped yet.
+        * it should be from kmalloc */
+       if (!pkt->addr[pkt->index].addr) {
+               pkt->addr[pkt->index].addr =
+                       dma_map_single(&dd->pcidev->dev,
+                               pkt->addr[pkt->index].kvaddr,
+                               pkt->addr[pkt->index].dma_length,
+                               DMA_TO_DEVICE);
+               if (dma_mapping_error(&dd->pcidev->dev,
+                               pkt->addr[pkt->index].addr)) {
+                       ret = -ENOMEM;
+                       goto done;
+               }
+               pkt->addr[pkt->index].dma_mapped = 1;
+       }
+
+       /* Modify the new sdma header */
+       pbc16 = (__le16 *)pbcvaddr;
+       hdr = (struct qib_message_header *)&pbc16[4];
+
+       /* New pbc length */
+       pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
+
+       /* New packet length */
+       hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
+
+       if (pkt->tiddma) {
+               /* Set new tid and offset for new sdma header */
+               hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
+                       (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
+                       (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
+                       (pkt->tidsm[pkt->tidsmidx].offset>>2));
+       } else {
+               /* Middle protocol new packet offset */
+               hdr->uwords[2] += pkt->payload_size;
+       }
+
+       /* New kdeth checksum */
+       vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
+       hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
+               be16_to_cpu(hdr->lrh[2]) -
+               ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
+               le16_to_cpu(hdr->iph.pkt_flags));
+
+       /* Next sequence number in new sdma header */
+       seqnum.val = be32_to_cpu(hdr->bth[2]);
+       if (pkt->tiddma)
+               seqnum.seq++;
+       else
+               seqnum.pkt++;
+       hdr->bth[2] = cpu_to_be32(seqnum.val);
+
+       /* Init new sdma header. */
+       qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
+               0, pbclen,              /* offset, len */
+               1, 0,                   /* first last desc */
+               0, 0,                   /* put page, dma mapped */
+               NULL, pbcvaddr,         /* struct page, virt addr */
+               pbcdaddr, pbclen);      /* dma addr, dma length */
+       pkt->index = pkt->naddr;
+       pkt->payload_size = 0;
+       pkt->naddr++;
+       if (pkt->naddr == pkt->addrlimit) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       /* Prepare for next fragment in this page */
+       if (newlen != len) {
+               if (dma_mapped) {
+                       put = 0;
+                       dma_mapped = 0;
+                       page = NULL;
+                       kvaddr = NULL;
+               }
+               len -= newlen;
+               offset += newlen;
+
+               goto next_fragment;
+       }
+
+done:
+       return ret;
 }
 
 /* we've too many pages in the iovec, coalesce to a single page */
 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
+                                 struct qib_user_sdma_queue *pq,
                                  struct qib_user_sdma_pkt *pkt,
                                  const struct iovec *iov,
                                  unsigned long niov)
@@ -182,7 +490,6 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
        char *mpage;
        int i;
        int len = 0;
-       dma_addr_t dma_addr;
 
        if (!page) {
                ret = -ENOMEM;
@@ -205,17 +512,8 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
                len += iov[i].iov_len;
        }
 
-       dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
-                               DMA_TO_DEVICE);
-       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-               ret = -ENOMEM;
-               goto free_unmap;
-       }
-
-       qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
-                               dma_addr);
-       pkt->naddr = 2;
-
+       ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
+                       page, 0, 0, len, mpage_save);
        goto done;
 
 free_unmap:
@@ -238,16 +536,6 @@ static int qib_user_sdma_num_pages(const struct iovec *iov)
        return 1 + ((epage - spage) >> PAGE_SHIFT);
 }
 
-/*
- * Truncate length to page boundary.
- */
-static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
-{
-       const unsigned long offset = addr & ~PAGE_MASK;
-
-       return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
-}
-
 static void qib_user_sdma_free_pkt_frag(struct device *dev,
                                        struct qib_user_sdma_queue *pq,
                                        struct qib_user_sdma_pkt *pkt,
@@ -256,10 +544,11 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
        const int i = frag;
 
        if (pkt->addr[i].page) {
+               /* only user data has page */
                if (pkt->addr[i].dma_mapped)
                        dma_unmap_page(dev,
                                       pkt->addr[i].addr,
-                                      pkt->addr[i].length,
+                                      pkt->addr[i].dma_length,
                                       DMA_TO_DEVICE);
 
                if (pkt->addr[i].kvaddr)
@@ -269,55 +558,81 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
                        put_page(pkt->addr[i].page);
                else
                        __free_page(pkt->addr[i].page);
-       } else if (pkt->addr[i].kvaddr)
-               /* free coherent mem from cache... */
-               dma_pool_free(pq->header_cache,
+       } else if (pkt->addr[i].kvaddr) {
+               /* for headers */
+               if (pkt->addr[i].dma_mapped) {
+                       /* from kmalloc & dma mapped */
+                       dma_unmap_single(dev,
+                                      pkt->addr[i].addr,
+                                      pkt->addr[i].dma_length,
+                                      DMA_TO_DEVICE);
+                       kfree(pkt->addr[i].kvaddr);
+               } else if (pkt->addr[i].addr) {
+                       /* free coherent mem from cache... */
+                       dma_pool_free(pq->header_cache,
                              pkt->addr[i].kvaddr, pkt->addr[i].addr);
+               } else {
+                       /* from kmalloc but not dma mapped */
+                       kfree(pkt->addr[i].kvaddr);
+               }
+       }
 }
 
 /* return number of pages pinned... */
 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+                                  struct qib_user_sdma_queue *pq,
                                   struct qib_user_sdma_pkt *pkt,
                                   unsigned long addr, int tlen, int npages)
 {
-       struct page *pages[2];
-       int j;
-       int ret;
-
-       ret = get_user_pages(current, current->mm, addr,
-                            npages, 0, 1, pages, NULL);
-
-       if (ret != npages) {
-               int i;
-
-               for (i = 0; i < ret; i++)
-                       put_page(pages[i]);
-
-               ret = -ENOMEM;
-               goto done;
-       }
+       struct page *pages[8];
+       int i, j;
+       int ret = 0;
 
-       for (j = 0; j < npages; j++) {
-               /* map the pages... */
-               const int flen = qib_user_sdma_page_length(addr, tlen);
-               dma_addr_t dma_addr =
-                       dma_map_page(&dd->pcidev->dev,
-                                    pages[j], 0, flen, DMA_TO_DEVICE);
-               unsigned long fofs = addr & ~PAGE_MASK;
+       while (npages) {
+               if (npages > 8)
+                       j = 8;
+               else
+                       j = npages;
 
-               if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+               ret = get_user_pages(current, current->mm, addr,
+                            j, 0, 1, pages, NULL);
+               if (ret != j) {
+                       i = 0;
+                       j = ret;
                        ret = -ENOMEM;
-                       goto done;
+                       goto free_pages;
                }
 
-               qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
-                                       pages[j], kmap(pages[j]), dma_addr);
+               for (i = 0; i < j; i++) {
+                       /* map the pages... */
+                       unsigned long fofs = addr & ~PAGE_MASK;
+                       int flen = ((fofs + tlen) > PAGE_SIZE) ?
+                               (PAGE_SIZE - fofs) : tlen;
+
+                       ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
+                               pages[i], 1, fofs, flen, NULL);
+                       if (ret < 0) {
+                               /* current page has beed taken
+                                * care of inside above call.
+                                */
+                               i++;
+                               goto free_pages;
+                       }
 
-               pkt->naddr++;
-               addr += flen;
-               tlen -= flen;
+                       addr += flen;
+                       tlen -= flen;
+               }
+
+               npages -= j;
        }
 
+       goto done;
+
+       /* if error, return all pages not managed by pkt */
+free_pages:
+       while (i < j)
+               put_page(pages[i++]);
+
 done:
        return ret;
 }
@@ -335,7 +650,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
                const int npages = qib_user_sdma_num_pages(iov + idx);
                const unsigned long addr = (unsigned long) iov[idx].iov_base;
 
-               ret = qib_user_sdma_pin_pages(dd, pkt, addr,
+               ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
                                              iov[idx].iov_len, npages);
                if (ret < 0)
                        goto free_pkt;
@@ -344,9 +659,22 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
        goto done;
 
 free_pkt:
-       for (idx = 0; idx < pkt->naddr; idx++)
+       /* we need to ignore the first entry here */
+       for (idx = 1; idx < pkt->naddr; idx++)
                qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
 
+       /* need to dma unmap the first entry, this is to restore to
+        * the original state so that caller can free the memory in
+        * error condition. Caller does not know if dma mapped or not*/
+       if (pkt->addr[0].dma_mapped) {
+               dma_unmap_single(&dd->pcidev->dev,
+                      pkt->addr[0].addr,
+                      pkt->addr[0].dma_length,
+                      DMA_TO_DEVICE);
+               pkt->addr[0].addr = 0;
+               pkt->addr[0].dma_mapped = 0;
+       }
+
 done:
        return ret;
 }
@@ -359,8 +687,9 @@ static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
 {
        int ret = 0;
 
-       if (npages >= ARRAY_SIZE(pkt->addr))
-               ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
+       if (pkt->frag_size == pkt->bytes_togo &&
+                       npages >= ARRAY_SIZE(pkt->addr))
+               ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
        else
                ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
 
@@ -380,7 +709,10 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
                for (i = 0; i < pkt->naddr; i++)
                        qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
 
-               kmem_cache_free(pq->pkt_slab, pkt);
+               if (pkt->largepkt)
+                       kfree(pkt);
+               else
+                       kmem_cache_free(pq->pkt_slab, pkt);
        }
        INIT_LIST_HEAD(list);
 }
@@ -393,63 +725,48 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
  * as, if there is an error we clean it...
  */
 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+                                   struct qib_pportdata *ppd,
                                    struct qib_user_sdma_queue *pq,
-                                   struct list_head *list,
                                    const struct iovec *iov,
                                    unsigned long niov,
-                                   int maxpkts)
+                                   struct list_head *list,
+                                   int *maxpkts, int *ndesc)
 {
        unsigned long idx = 0;
        int ret = 0;
        int npkts = 0;
-       struct page *page = NULL;
        __le32 *pbc;
        dma_addr_t dma_addr;
        struct qib_user_sdma_pkt *pkt = NULL;
        size_t len;
        size_t nw;
        u32 counter = pq->counter;
-       int dma_mapped = 0;
+       u16 frag_size;
 
-       while (idx < niov && npkts < maxpkts) {
+       while (idx < niov && npkts < *maxpkts) {
                const unsigned long addr = (unsigned long) iov[idx].iov_base;
                const unsigned long idx_save = idx;
                unsigned pktnw;
                unsigned pktnwc;
                int nfrags = 0;
                int npages = 0;
+               int bytes_togo = 0;
+               int tiddma = 0;
                int cfur;
 
-               dma_mapped = 0;
                len = iov[idx].iov_len;
                nw = len >> 2;
-               page = NULL;
-
-               pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
-               if (!pkt) {
-                       ret = -ENOMEM;
-                       goto free_list;
-               }
 
                if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
                    len > PAGE_SIZE || len & 3 || addr & 3) {
                        ret = -EINVAL;
-                       goto free_pkt;
+                       goto free_list;
                }
 
-               if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
-                       pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
-                                            &dma_addr);
-               else
-                       pbc = NULL;
-
+               pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
                if (!pbc) {
-                       page = alloc_page(GFP_KERNEL);
-                       if (!page) {
-                               ret = -ENOMEM;
-                               goto free_pkt;
-                       }
-                       pbc = kmap(page);
+                       ret = -ENOMEM;
+                       goto free_list;
                }
 
                cfur = copy_from_user(pbc, iov[idx].iov_base, len);
@@ -474,8 +791,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
                 * we can verify that the packet is consistent with the
                 * iovec lengths.
                 */
-               pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
-               if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
+               pktnw = le32_to_cpu(*pbc) & 0xFFFF;
+               if (pktnw < pktnwc) {
                        ret = -EINVAL;
                        goto free_pbc;
                }
@@ -486,17 +803,14 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
                        const unsigned long faddr =
                                (unsigned long) iov[idx].iov_base;
 
-                       if (slen & 3 || faddr & 3 || !slen ||
-                           slen > PAGE_SIZE) {
+                       if (slen & 3 || faddr & 3 || !slen) {
                                ret = -EINVAL;
                                goto free_pbc;
                        }
 
-                       npages++;
-                       if ((faddr & PAGE_MASK) !=
-                           ((faddr + slen - 1) & PAGE_MASK))
-                               npages++;
+                       npages += qib_user_sdma_num_pages(&iov[idx]);
 
+                       bytes_togo += slen;
                        pktnwc += slen >> 2;
                        idx++;
                        nfrags++;
@@ -507,48 +821,139 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
                        goto free_pbc;
                }
 
-               if (page) {
-                       dma_addr = dma_map_page(&dd->pcidev->dev,
-                                               page, 0, len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+               frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
+               if (((frag_size ? frag_size : bytes_togo) + len) >
+                                               ppd->ibmaxlen) {
+                       ret = -EINVAL;
+                       goto free_pbc;
+               }
+
+               if (frag_size) {
+                       int pktsize, tidsmsize, n;
+
+                       n = npages*((2*PAGE_SIZE/frag_size)+1);
+                       pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n;
+
+                       /*
+                        * Determine if this is tid-sdma or just sdma.
+                        */
+                       tiddma = (((le32_to_cpu(pbc[7])>>
+                               QLOGIC_IB_I_TID_SHIFT)&
+                               QLOGIC_IB_I_TID_MASK) !=
+                               QLOGIC_IB_I_TID_MASK);
+
+                       if (tiddma)
+                               tidsmsize = iov[idx].iov_len;
+                       else
+                               tidsmsize = 0;
+
+                       pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
+                       if (!pkt) {
                                ret = -ENOMEM;
                                goto free_pbc;
                        }
+                       pkt->largepkt = 1;
+                       pkt->frag_size = frag_size;
+                       pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
+
+                       if (tiddma) {
+                               char *tidsm = (char *)pkt + pktsize;
+                               cfur = copy_from_user(tidsm,
+                                       iov[idx].iov_base, tidsmsize);
+                               if (cfur) {
+                                       ret = -EFAULT;
+                                       goto free_pkt;
+                               }
+                               pkt->tidsm =
+                                       (struct qib_tid_session_member *)tidsm;
+                               pkt->tidsmcount = tidsmsize/
+                                       sizeof(struct qib_tid_session_member);
+                               pkt->tidsmidx = 0;
+                               idx++;
+                       }
 
-                       dma_mapped = 1;
+                       /*
+                        * pbc 'fill1' field is borrowed to pass frag size,
+                        * we need to clear it after picking frag size, the
+                        * hardware requires this field to be zero.
+                        */
+                       *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
+               } else {
+                       pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
+                       if (!pkt) {
+                               ret = -ENOMEM;
+                               goto free_pbc;
+                       }
+                       pkt->largepkt = 0;
+                       pkt->frag_size = bytes_togo;
+                       pkt->addrlimit = ARRAY_SIZE(pkt->addr);
                }
-
-               qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
-                                         page, pbc, dma_addr);
+               pkt->bytes_togo = bytes_togo;
+               pkt->payload_size = 0;
+               pkt->counter = counter;
+               pkt->tiddma = tiddma;
+
+               /* setup the first header */
+               qib_user_sdma_init_frag(pkt, 0, /* index */
+                       0, len,         /* offset, len */
+                       1, 0,           /* first last desc */
+                       0, 0,           /* put page, dma mapped */
+                       NULL, pbc,      /* struct page, virt addr */
+                       dma_addr, len); /* dma addr, dma length */
+               pkt->index = 0;
+               pkt->naddr = 1;
 
                if (nfrags) {
                        ret = qib_user_sdma_init_payload(dd, pq, pkt,
                                                         iov + idx_save + 1,
                                                         nfrags, npages);
                        if (ret < 0)
-                               goto free_pbc_dma;
+                               goto free_pkt;
+               } else {
+                       /* since there is no payload, mark the
+                        * header as the last desc. */
+                       pkt->addr[0].last_desc = 1;
+
+                       if (dma_addr == 0) {
+                               /*
+                                * the header is not dma mapped yet.
+                                * it should be from kmalloc.
+                                */
+                               dma_addr = dma_map_single(&dd->pcidev->dev,
+                                       pbc, len, DMA_TO_DEVICE);
+                               if (dma_mapping_error(&dd->pcidev->dev,
+                                                               dma_addr)) {
+                                       ret = -ENOMEM;
+                                       goto free_pkt;
+                               }
+                               pkt->addr[0].addr = dma_addr;
+                               pkt->addr[0].dma_mapped = 1;
+                       }
                }
 
                counter++;
                npkts++;
+               pkt->pq = pq;
+               pkt->index = 0; /* reset index for push on hw */
+               *ndesc += pkt->naddr;
 
                list_add_tail(&pkt->list, list);
        }
 
+       *maxpkts = npkts;
        ret = idx;
        goto done;
 
-free_pbc_dma:
-       if (dma_mapped)
-               dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
+free_pkt:
+       if (pkt->largepkt)
+               kfree(pkt);
+       else
+               kmem_cache_free(pq->pkt_slab, pkt);
 free_pbc:
-       if (page) {
-               kunmap(page);
-               __free_page(page);
-       } else
+       if (dma_addr)
                dma_pool_free(pq->header_cache, pbc, dma_addr);
-free_pkt:
-       kmem_cache_free(pq->pkt_slab, pkt);
+       else
+               kfree(pbc);
 free_list:
        qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
 done:
@@ -569,10 +974,20 @@ static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
        struct list_head free_list;
        struct qib_user_sdma_pkt *pkt;
        struct qib_user_sdma_pkt *pkt_prev;
+       unsigned long flags;
        int ret = 0;
 
+       if (!pq->num_sending)
+               return 0;
+
        INIT_LIST_HEAD(&free_list);
 
+       /*
+        * We need this spin lock here because interrupt handler
+        * might modify this list in qib_user_sdma_send_desc(), also
+        * we can not get interrupted, otherwise it is a deadlock.
+        */
+       spin_lock_irqsave(&pq->sent_lock, flags);
        list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
                s64 descd = ppd->sdma_descq_removed - pkt->added;
 
@@ -583,7 +998,9 @@ static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
 
                /* one more packet cleaned */
                ret++;
+               pq->num_sending--;
        }
+       spin_unlock_irqrestore(&pq->sent_lock, flags);
 
        if (!list_empty(&free_list)) {
                u32 counter;
@@ -627,6 +1044,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
                               struct qib_user_sdma_queue *pq)
 {
        struct qib_devdata *dd = ppd->dd;
+       unsigned long flags;
        int i;
 
        if (!pq)
@@ -634,7 +1052,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
 
        for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
                mutex_lock(&pq->lock);
-               if (list_empty(&pq->sent)) {
+               if (!pq->num_pending && !pq->num_sending) {
                        mutex_unlock(&pq->lock);
                        break;
                }
@@ -644,29 +1062,44 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
                msleep(10);
        }
 
-       if (!list_empty(&pq->sent)) {
+       if (pq->num_pending || pq->num_sending) {
+               struct qib_user_sdma_pkt *pkt;
+               struct qib_user_sdma_pkt *pkt_prev;
                struct list_head free_list;
 
+               mutex_lock(&pq->lock);
+               spin_lock_irqsave(&ppd->sdma_lock, flags);
+               /*
+                * Since we hold sdma_lock, it is safe without sent_lock.
+                */
+               if (pq->num_pending) {
+                       list_for_each_entry_safe(pkt, pkt_prev,
+                                       &ppd->sdma_userpending, list) {
+                               if (pkt->pq == pq) {
+                                       list_move_tail(&pkt->list, &pq->sent);
+                                       pq->num_pending--;
+                                       pq->num_sending++;
+                               }
+                       }
+               }
+               spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
                qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
                INIT_LIST_HEAD(&free_list);
-               mutex_lock(&pq->lock);
                list_splice_init(&pq->sent, &free_list);
+               pq->num_sending = 0;
                qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
                mutex_unlock(&pq->lock);
        }
 }
 
-static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
+static inline __le64 qib_sdma_make_desc0(u8 gen,
                                         u64 addr, u64 dwlen, u64 dwoffset)
 {
-       u8 tmpgen;
-
-       tmpgen = ppd->sdma_generation;
-
        return cpu_to_le64(/* SDmaPhyAddr[31:0] */
                           ((addr & 0xfffffffcULL) << 32) |
                           /* SDmaGeneration[1:0] */
-                          ((tmpgen & 3ULL) << 30) |
+                          ((gen & 3ULL) << 30) |
                           /* SDmaDwordCount[10:0] */
                           ((dwlen & 0x7ffULL) << 16) |
                           /* SDmaBufOffset[12:2] */
@@ -692,7 +1125,7 @@ static inline __le64 qib_sdma_make_desc1(u64 addr)
 
 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
                                    struct qib_user_sdma_pkt *pkt, int idx,
-                                   unsigned ofs, u16 tail)
+                                   unsigned ofs, u16 tail, u8 gen)
 {
        const u64 addr = (u64) pkt->addr[idx].addr +
                (u64) pkt->addr[idx].offset;
@@ -702,104 +1135,132 @@ static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
 
        descqp = &ppd->sdma_descq[tail].qw[0];
 
-       descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
-       if (idx == 0)
+       descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
+       if (pkt->addr[idx].first_desc)
                descq0 = qib_sdma_make_first_desc0(descq0);
-       if (idx == pkt->naddr - 1)
+       if (pkt->addr[idx].last_desc) {
                descq0 = qib_sdma_make_last_desc0(descq0);
+               if (ppd->sdma_intrequest) {
+                       descq0 |= cpu_to_le64(1ULL << 15);
+                       ppd->sdma_intrequest = 0;
+               }
+       }
 
        descqp[0] = descq0;
        descqp[1] = qib_sdma_make_desc1(addr);
 }
 
-/* pq->lock must be held, get packets on the wire... */
-static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
-                                  struct qib_user_sdma_queue *pq,
-                                  struct list_head *pktlist)
+void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
+                               struct list_head *pktlist)
 {
        struct qib_devdata *dd = ppd->dd;
-       int ret = 0;
-       unsigned long flags;
-       u16 tail;
-       u8 generation;
-       u64 descq_added;
-
-       if (list_empty(pktlist))
-               return 0;
+       u16 nfree, nsent;
+       u16 tail, tail_c;
+       u8 gen, gen_c;
 
-       if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
-               return -ECOMM;
-
-       spin_lock_irqsave(&ppd->sdma_lock, flags);
-
-       /* keep a copy for restoring purposes in case of problems */
-       generation = ppd->sdma_generation;
-       descq_added = ppd->sdma_descq_added;
-
-       if (unlikely(!__qib_sdma_running(ppd))) {
-               ret = -ECOMM;
-               goto unlock;
-       }
+       nfree = qib_sdma_descq_freecnt(ppd);
+       if (!nfree)
+               return;
 
-       tail = ppd->sdma_descq_tail;
+retry:
+       nsent = 0;
+       tail_c = tail = ppd->sdma_descq_tail;
+       gen_c = gen = ppd->sdma_generation;
        while (!list_empty(pktlist)) {
                struct qib_user_sdma_pkt *pkt =
                        list_entry(pktlist->next, struct qib_user_sdma_pkt,
                                   list);
-               int i;
+               int i, j, c = 0;
                unsigned ofs = 0;
                u16 dtail = tail;
 
-               if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
-                       goto unlock_check_tail;
-
-               for (i = 0; i < pkt->naddr; i++) {
-                       qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
+               for (i = pkt->index; i < pkt->naddr && nfree; i++) {
+                       qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
                        ofs += pkt->addr[i].length >> 2;
 
                        if (++tail == ppd->sdma_descq_cnt) {
                                tail = 0;
-                               ++ppd->sdma_generation;
+                               ++gen;
+                               ppd->sdma_intrequest = 1;
+                       } else if (tail == (ppd->sdma_descq_cnt>>1)) {
+                               ppd->sdma_intrequest = 1;
                        }
-               }
+                       nfree--;
+                       if (pkt->addr[i].last_desc == 0)
+                               continue;
 
-               if ((ofs << 2) > ppd->ibmaxlen) {
-                       ret = -EMSGSIZE;
-                       goto unlock;
-               }
-
-               /*
-                * If the packet is >= 2KB mtu equivalent, we have to use
-                * the large buffers, and have to mark each descriptor as
-                * part of a large buffer packet.
-                */
-               if (ofs > dd->piosize2kmax_dwords) {
-                       for (i = 0; i < pkt->naddr; i++) {
-                               ppd->sdma_descq[dtail].qw[0] |=
-                                       cpu_to_le64(1ULL << 14);
-                               if (++dtail == ppd->sdma_descq_cnt)
-                                       dtail = 0;
+                       /*
+                        * If the packet is >= 2KB mtu equivalent, we
+                        * have to use the large buffers, and have to
+                        * mark each descriptor as part of a large
+                        * buffer packet.
+                        */
+                       if (ofs > dd->piosize2kmax_dwords) {
+                               for (j = pkt->index; j <= i; j++) {
+                                       ppd->sdma_descq[dtail].qw[0] |=
+                                               cpu_to_le64(1ULL << 14);
+                                       if (++dtail == ppd->sdma_descq_cnt)
+                                               dtail = 0;
+                               }
                        }
+                       c += i + 1 - pkt->index;
+                       pkt->index = i + 1; /* index for next first */
+                       tail_c = dtail = tail;
+                       gen_c = gen;
+                       ofs = 0;  /* reset for next packet */
                }
 
-               ppd->sdma_descq_added += pkt->naddr;
-               pkt->added = ppd->sdma_descq_added;
-               list_move_tail(&pkt->list, &pq->sent);
-               ret++;
+               ppd->sdma_descq_added += c;
+               nsent += c;
+               if (pkt->index == pkt->naddr) {
+                       pkt->added = ppd->sdma_descq_added;
+                       pkt->pq->added = pkt->added;
+                       pkt->pq->num_pending--;
+                       spin_lock(&pkt->pq->sent_lock);
+                       pkt->pq->num_sending++;
+                       list_move_tail(&pkt->list, &pkt->pq->sent);
+                       spin_unlock(&pkt->pq->sent_lock);
+               }
+               if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
+                       break;
        }
 
-unlock_check_tail:
        /* advance the tail on the chip if necessary */
-       if (ppd->sdma_descq_tail != tail)
-               dd->f_sdma_update_tail(ppd, tail);
+       if (ppd->sdma_descq_tail != tail_c) {
+               ppd->sdma_generation = gen_c;
+               dd->f_sdma_update_tail(ppd, tail_c);
+       }
 
-unlock:
-       if (unlikely(ret < 0)) {
-               ppd->sdma_generation = generation;
-               ppd->sdma_descq_added = descq_added;
+       if (nfree && !list_empty(pktlist))
+               goto retry;
+
+       return;
+}
+
+/* pq->lock must be held, get packets on the wire... */
+static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
+                                struct qib_user_sdma_queue *pq,
+                                struct list_head *pktlist, int count)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
+               return -ECOMM;
+
+       spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+       if (unlikely(!__qib_sdma_running(ppd))) {
+               ret = -ECOMM;
+               goto unlock;
        }
-       spin_unlock_irqrestore(&ppd->sdma_lock, flags);
 
+       pq->num_pending += count;
+       list_splice_tail_init(pktlist, &ppd->sdma_userpending);
+       qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
+
+unlock:
+       spin_unlock_irqrestore(&ppd->sdma_lock, flags);
        return ret;
 }
 
@@ -822,19 +1283,23 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
        if (!qib_sdma_running(ppd))
                goto done_unlock;
 
-       if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
+       /* if I have packets not complete yet */
+       if (pq->added > ppd->sdma_descq_removed)
                qib_user_sdma_hwqueue_clean(ppd);
+       /* if I have complete packets to be freed */
+       if (pq->num_sending)
                qib_user_sdma_queue_clean(ppd, pq);
-       }
 
        while (dim) {
-               const int mxp = 8;
+               int mxp = 8;
+               int ndesc = 0;
 
                down_write(&current->mm->mmap_sem);
-               ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+               ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
+                               iov, dim, &list, &mxp, &ndesc);
                up_write(&current->mm->mmap_sem);
 
-               if (ret <= 0)
+               if (ret < 0)
                        goto done_unlock;
                else {
                        dim -= ret;
@@ -844,24 +1309,20 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
                /* force packets onto the sdma hw queue... */
                if (!list_empty(&list)) {
                        /*
-                        * Lazily clean hw queue.  the 4 is a guess of about
-                        * how many sdma descriptors a packet will take (it
-                        * doesn't have to be perfect).
+                        * Lazily clean hw queue.
                         */
-                       if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
+                       if (qib_sdma_descq_freecnt(ppd) < ndesc) {
                                qib_user_sdma_hwqueue_clean(ppd);
-                               qib_user_sdma_queue_clean(ppd, pq);
+                               if (pq->num_sending)
+                                       qib_user_sdma_queue_clean(ppd, pq);
                        }
 
-                       ret = qib_user_sdma_push_pkts(ppd, pq, &list);
+                       ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
                        if (ret < 0)
                                goto done_unlock;
                        else {
-                               npkts += ret;
-                               pq->counter += ret;
-
-                               if (!list_empty(&list))
-                                       goto done_unlock;
+                               npkts += mxp;
+                               pq->counter += mxp;
                        }
                }
        }
index 3eceb61e3532844555b2656b42f50a4ca42664dc..7a3175400b2a1c6cf05b85e59f39773b5eb4db86 100644 (file)
@@ -817,7 +817,6 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
                if (neigh) {
                        neigh->cm = NULL;
-                       list_del(&neigh->list);
                        ipoib_neigh_free(neigh);
 
                        tx->neigh = NULL;
@@ -1234,7 +1233,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
 
                if (neigh) {
                        neigh->cm = NULL;
-                       list_del(&neigh->list);
                        ipoib_neigh_free(neigh);
 
                        tx->neigh = NULL;
@@ -1325,7 +1323,6 @@ static void ipoib_cm_tx_start(struct work_struct *work)
                        neigh = p->neigh;
                        if (neigh) {
                                neigh->cm = NULL;
-                               list_del(&neigh->list);
                                ipoib_neigh_free(neigh);
                        }
                        list_del(&p->list);
index c6f71a88c55ca9649b6098cc81d64de229857055..82cec1af902cd24533ef032869cf2052fa9d4f63 100644 (file)
@@ -493,7 +493,6 @@ static void path_rec_completion(int status,
                                                                               path,
                                                                               neigh));
                                if (!ipoib_cm_get(neigh)) {
-                                       list_del(&neigh->list);
                                        ipoib_neigh_free(neigh);
                                        continue;
                                }
@@ -618,7 +617,6 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
                        if (!ipoib_cm_get(neigh))
                                ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
                        if (!ipoib_cm_get(neigh)) {
-                               list_del(&neigh->list);
                                ipoib_neigh_free(neigh);
                                goto err_drop;
                        }
@@ -639,7 +637,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
                neigh->ah  = NULL;
 
                if (!path->query && path_rec_start(dev, path))
-                       goto err_list;
+                       goto err_path;
 
                __skb_queue_tail(&neigh->queue, skb);
        }
@@ -648,9 +646,6 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
        ipoib_neigh_put(neigh);
        return;
 
-err_list:
-       list_del(&neigh->list);
-
 err_path:
        ipoib_neigh_free(neigh);
 err_drop:
@@ -1098,6 +1093,8 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
                        rcu_assign_pointer(*np,
                                           rcu_dereference_protected(neigh->hnext,
                                                                     lockdep_is_held(&priv->lock)));
+                       /* remove from parent list */
+                       list_del(&neigh->list);
                        call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
                        return;
                } else {
index 2e84ef859c5b9755d0940ee6bb812915dfcd69f0..705de7b40201a02602d5edbdf657c6a42882ed62 100644 (file)
@@ -347,6 +347,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_iser_conn *iser_conn;
+       struct iscsi_session *session;
        struct iser_conn *ib_conn;
        struct iscsi_endpoint *ep;
        int error;
@@ -365,7 +366,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
        }
        ib_conn = ep->dd_data;
 
-       if (iser_alloc_rx_descriptors(ib_conn))
+       session = conn->session;
+       if (iser_alloc_rx_descriptors(ib_conn, session))
                return -ENOMEM;
 
        /* binds the iSER connection retrieved from the previously
@@ -419,12 +421,13 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
        struct iscsi_cls_session *cls_session;
        struct iscsi_session *session;
        struct Scsi_Host *shost;
-       struct iser_conn *ib_conn;
+       struct iser_conn *ib_conn = NULL;
 
        shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
        if (!shost)
                return NULL;
        shost->transportt = iscsi_iser_scsi_transport;
+       shost->cmd_per_lun = qdepth;
        shost->max_lun = iscsi_max_lun;
        shost->max_id = 0;
        shost->max_channel = 0;
@@ -441,12 +444,14 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                           ep ? ib_conn->device->ib_device->dma_device : NULL))
                goto free_host;
 
-       /*
-        * we do not support setting can_queue cmd_per_lun from userspace yet
-        * because we preallocate so many resources
-        */
+       if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
+               iser_info("cmds_max changed from %u to %u\n",
+                         cmds_max, ISER_DEF_XMIT_CMDS_MAX);
+               cmds_max = ISER_DEF_XMIT_CMDS_MAX;
+       }
+
        cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
-                                         ISCSI_DEF_XMIT_CMDS_MAX, 0,
+                                         cmds_max, 0,
                                          sizeof(struct iscsi_iser_task),
                                          initial_cmdsn, 0);
        if (!cls_session)
index 4f069c0d4c04371be4a156cfac59f24e23fbac02..67914027c614d5a393d036fcce1ff3edda021a39 100644 (file)
 
 #define iser_warn(fmt, arg...)                         \
        do {                                            \
-               if (iser_debug_level > 1)               \
+               if (iser_debug_level > 0)               \
                        pr_warn(PFX "%s:" fmt,          \
                                __func__ , ## arg);     \
        } while (0)
 
 #define iser_info(fmt, arg...)                         \
        do {                                            \
-               if (iser_debug_level > 0)               \
+               if (iser_debug_level > 1)               \
                        pr_info(PFX "%s:" fmt,          \
                                __func__ , ## arg);     \
        } while (0)
 
                                        /* support up to 512KB in one RDMA */
 #define ISCSI_ISER_SG_TABLESIZE         (0x80000 >> SHIFT_4K)
-#define ISER_DEF_CMD_PER_LUN           ISCSI_DEF_XMIT_CMDS_MAX
+#define ISER_DEF_XMIT_CMDS_DEFAULT             512
+#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
+       #define ISER_DEF_XMIT_CMDS_MAX          ISCSI_DEF_XMIT_CMDS_MAX
+#else
+       #define ISER_DEF_XMIT_CMDS_MAX          ISER_DEF_XMIT_CMDS_DEFAULT
+#endif
+#define ISER_DEF_CMD_PER_LUN           ISER_DEF_XMIT_CMDS_MAX
 
 /* QP settings */
 /* Maximal bounds on received asynchronous PDUs */
 #define ISER_MAX_TX_MISC_PDUS          6 /* NOOP_OUT(2), TEXT(1),         *
                                           * SCSI_TMFUNC(2), LOGOUT(1) */
 
-#define ISER_QP_MAX_RECV_DTOS          (ISCSI_DEF_XMIT_CMDS_MAX)
+#define ISER_QP_MAX_RECV_DTOS          (ISER_DEF_XMIT_CMDS_MAX)
 
-#define ISER_MIN_POSTED_RX             (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
+#define ISER_MIN_POSTED_RX             (ISER_DEF_XMIT_CMDS_MAX >> 2)
 
 /* the max TX (send) WR supported by the iSER QP is defined by                 *
  * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect   *
 
 #define ISER_INFLIGHT_DATAOUTS         8
 
-#define ISER_QP_MAX_REQ_DTOS           (ISCSI_DEF_XMIT_CMDS_MAX *    \
+#define ISER_QP_MAX_REQ_DTOS           (ISER_DEF_XMIT_CMDS_MAX *    \
                                        (1 + ISER_INFLIGHT_DATAOUTS) + \
                                        ISER_MAX_TX_MISC_PDUS        + \
                                        ISER_MAX_RX_MISC_PDUS)
@@ -205,7 +211,7 @@ struct iser_mem_reg {
        u64  va;
        u64  len;
        void *mem_h;
-       int  is_fmr;
+       int  is_mr;
 };
 
 struct iser_regd_buf {
@@ -246,6 +252,9 @@ struct iser_rx_desc {
 
 #define ISER_MAX_CQ 4
 
+struct iser_conn;
+struct iscsi_iser_task;
+
 struct iser_device {
        struct ib_device             *ib_device;
        struct ib_pd                 *pd;
@@ -259,6 +268,22 @@ struct iser_device {
        int                          cq_active_qps[ISER_MAX_CQ];
        int                          cqs_used;
        struct iser_cq_desc          *cq_desc;
+       int                          (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn,
+                                                               unsigned cmds_max);
+       void                         (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn);
+       int                          (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
+                                                         enum iser_data_dir cmd_dir);
+       void                         (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
+                                                           enum iser_data_dir cmd_dir);
+};
+
+struct fast_reg_descriptor {
+       struct list_head                  list;
+       /* For fast registration - FRWR */
+       struct ib_mr                     *data_mr;
+       struct ib_fast_reg_page_list     *data_frpl;
+       /* Valid for fast registration flag */
+       bool                              valid;
 };
 
 struct iser_conn {
@@ -270,13 +295,13 @@ struct iser_conn {
        struct iser_device           *device;       /* device context          */
        struct rdma_cm_id            *cma_id;       /* CMA ID                  */
        struct ib_qp                 *qp;           /* QP                      */
-       struct ib_fmr_pool           *fmr_pool;     /* pool of IB FMRs         */
        wait_queue_head_t            wait;          /* waitq for conn/disconn  */
+       unsigned                     qp_max_recv_dtos; /* num of rx buffers */
+       unsigned                     qp_max_recv_dtos_mask; /* above minus 1 */
+       unsigned                     min_posted_rx; /* qp_max_recv_dtos >> 2 */
        int                          post_recv_buf_count; /* posted rx count  */
        atomic_t                     post_send_buf_count; /* posted tx count   */
        char                         name[ISER_OBJECT_NAME_SIZE];
-       struct iser_page_vec         *page_vec;     /* represents SG to fmr maps*
-                                                    * maps serialized as tx is*/
        struct list_head             conn_list;       /* entry in ig conn list */
 
        char                         *login_buf;
@@ -285,6 +310,17 @@ struct iser_conn {
        unsigned int                 rx_desc_head;
        struct iser_rx_desc          *rx_descs;
        struct ib_recv_wr            rx_wr[ISER_MIN_POSTED_RX];
+       union {
+               struct {
+                       struct ib_fmr_pool      *pool;     /* pool of IB FMRs         */
+                       struct iser_page_vec    *page_vec; /* represents SG to fmr maps*
+                                                           * maps serialized as tx is*/
+               } fmr;
+               struct {
+                       struct list_head        pool;
+                       int                     pool_size;
+               } frwr;
+       } fastreg;
 };
 
 struct iscsi_iser_conn {
@@ -368,8 +404,10 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn);
 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
                                     enum iser_data_dir         cmd_dir);
 
-int  iser_reg_rdma_mem(struct iscsi_iser_task *task,
-                      enum   iser_data_dir        cmd_dir);
+int  iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
+                          enum iser_data_dir cmd_dir);
+int  iser_reg_rdma_mem_frwr(struct iscsi_iser_task *task,
+                           enum iser_data_dir cmd_dir);
 
 int  iser_connect(struct iser_conn   *ib_conn,
                  struct sockaddr_in *src_addr,
@@ -380,7 +418,10 @@ int  iser_reg_page_vec(struct iser_conn     *ib_conn,
                       struct iser_page_vec *page_vec,
                       struct iser_mem_reg  *mem_reg);
 
-void iser_unreg_mem(struct iser_mem_reg *mem_reg);
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
+                       enum iser_data_dir cmd_dir);
+void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task,
+                        enum iser_data_dir cmd_dir);
 
 int  iser_post_recvl(struct iser_conn *ib_conn);
 int  iser_post_recvm(struct iser_conn *ib_conn, int count);
@@ -394,5 +435,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
 int  iser_initialize_task_headers(struct iscsi_task *task,
                        struct iser_tx_desc *tx_desc);
-int iser_alloc_rx_descriptors(struct iser_conn *ib_conn);
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session);
+int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
+void iser_free_fmr_pool(struct iser_conn *ib_conn);
+int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
+void iser_free_frwr_pool(struct iser_conn *ib_conn);
 #endif
index b6d81a86c9760eed77c3724d3bb138d0e8cf8a6e..bdc38f423ca2f8e5908b3229a2cf1e04d2701a38 100644 (file)
@@ -49,6 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
 
 {
        struct iscsi_iser_task *iser_task = task->dd_data;
+       struct iser_device  *device = iser_task->iser_conn->ib_conn->device;
        struct iser_regd_buf *regd_buf;
        int err;
        struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -69,7 +70,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
                return -EINVAL;
        }
 
-       err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
+       err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
        if (err) {
                iser_err("Failed to set up Data-IN RDMA\n");
                return err;
@@ -98,6 +99,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
                       unsigned int edtl)
 {
        struct iscsi_iser_task *iser_task = task->dd_data;
+       struct iser_device  *device = iser_task->iser_conn->ib_conn->device;
        struct iser_regd_buf *regd_buf;
        int err;
        struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -119,7 +121,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
                return -EINVAL;
        }
 
-       err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
+       err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
        if (err != 0) {
                iser_err("Failed to register write cmd RDMA mem\n");
                return err;
@@ -170,8 +172,78 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
        }
 }
 
+static void iser_free_login_buf(struct iser_conn *ib_conn)
+{
+       if (!ib_conn->login_buf)
+               return;
+
+       if (ib_conn->login_req_dma)
+               ib_dma_unmap_single(ib_conn->device->ib_device,
+                                   ib_conn->login_req_dma,
+                                   ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+
+       if (ib_conn->login_resp_dma)
+               ib_dma_unmap_single(ib_conn->device->ib_device,
+                                   ib_conn->login_resp_dma,
+                                   ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+
+       kfree(ib_conn->login_buf);
+
+       /* make sure we never redo any unmapping */
+       ib_conn->login_req_dma = 0;
+       ib_conn->login_resp_dma = 0;
+       ib_conn->login_buf = NULL;
+}
+
+static int iser_alloc_login_buf(struct iser_conn *ib_conn)
+{
+       struct iser_device      *device;
+       int                     req_err, resp_err;
+
+       BUG_ON(ib_conn->device == NULL);
+
+       device = ib_conn->device;
+
+       ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
+                                    ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+       if (!ib_conn->login_buf)
+               goto out_err;
+
+       ib_conn->login_req_buf  = ib_conn->login_buf;
+       ib_conn->login_resp_buf = ib_conn->login_buf +
+                                               ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+       ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
+                               (void *)ib_conn->login_req_buf,
+                               ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+
+       ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
+                               (void *)ib_conn->login_resp_buf,
+                               ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+
+       req_err  = ib_dma_mapping_error(device->ib_device,
+                                       ib_conn->login_req_dma);
+       resp_err = ib_dma_mapping_error(device->ib_device,
+                                       ib_conn->login_resp_dma);
 
-int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+       if (req_err || resp_err) {
+               if (req_err)
+                       ib_conn->login_req_dma = 0;
+               if (resp_err)
+                       ib_conn->login_resp_dma = 0;
+               goto free_login_buf;
+       }
+       return 0;
+
+free_login_buf:
+       iser_free_login_buf(ib_conn);
+
+out_err:
+       iser_err("unable to alloc or map login buf\n");
+       return -ENOMEM;
+}
+
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session)
 {
        int i, j;
        u64 dma_addr;
@@ -179,14 +251,24 @@ int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
        struct ib_sge       *rx_sg;
        struct iser_device  *device = ib_conn->device;
 
-       ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
+       ib_conn->qp_max_recv_dtos = session->cmds_max;
+       ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
+       ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2;
+
+       if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
+               goto create_rdma_reg_res_failed;
+
+       if (iser_alloc_login_buf(ib_conn))
+               goto alloc_login_buf_fail;
+
+       ib_conn->rx_descs = kmalloc(session->cmds_max *
                                sizeof(struct iser_rx_desc), GFP_KERNEL);
        if (!ib_conn->rx_descs)
                goto rx_desc_alloc_fail;
 
        rx_desc = ib_conn->rx_descs;
 
-       for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
+       for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)  {
                dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
                                        ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
                if (ib_dma_mapping_error(device->ib_device, dma_addr))
@@ -207,10 +289,14 @@ rx_desc_dma_map_failed:
        rx_desc = ib_conn->rx_descs;
        for (j = 0; j < i; j++, rx_desc++)
                ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
-                       ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+                                   ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
        kfree(ib_conn->rx_descs);
        ib_conn->rx_descs = NULL;
 rx_desc_alloc_fail:
+       iser_free_login_buf(ib_conn);
+alloc_login_buf_fail:
+       device->iser_free_rdma_reg_res(ib_conn);
+create_rdma_reg_res_failed:
        iser_err("failed allocating rx descriptors / data buffers\n");
        return -ENOMEM;
 }
@@ -222,13 +308,21 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
        struct iser_device *device = ib_conn->device;
 
        if (!ib_conn->rx_descs)
-               return;
+               goto free_login_buf;
+
+       if (device && device->iser_free_rdma_reg_res)
+               device->iser_free_rdma_reg_res(ib_conn);
 
        rx_desc = ib_conn->rx_descs;
-       for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
+       for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)
                ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
-                       ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+                                   ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
        kfree(ib_conn->rx_descs);
+       /* make sure we never redo any unmapping */
+       ib_conn->rx_descs = NULL;
+
+free_login_buf:
+       iser_free_login_buf(ib_conn);
 }
 
 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
@@ -248,9 +342,10 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
        WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
        WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
 
-       iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
+       iser_dbg("Initially post: %d\n", iser_conn->ib_conn->min_posted_rx);
        /* Initial post receive buffers */
-       if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
+       if (iser_post_recvm(iser_conn->ib_conn,
+                           iser_conn->ib_conn->min_posted_rx))
                return -ENOMEM;
 
        return 0;
@@ -487,9 +582,9 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
                return;
 
        outstanding = ib_conn->post_recv_buf_count;
-       if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
-               count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
-                                               ISER_MIN_POSTED_RX);
+       if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) {
+               count = min(ib_conn->qp_max_recv_dtos - outstanding,
+                                               ib_conn->min_posted_rx);
                err = iser_post_recvm(ib_conn, count);
                if (err)
                        iser_err("posting %d rx bufs err %d\n", count, err);
@@ -538,8 +633,8 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
 
 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
 {
+       struct iser_device *device = iser_task->iser_conn->ib_conn->device;
        int is_rdma_aligned = 1;
-       struct iser_regd_buf *regd;
 
        /* if we were reading, copy back to unaligned sglist,
         * anyway dma_unmap and free the copy
@@ -553,17 +648,11 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
                iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
        }
 
-       if (iser_task->dir[ISER_DIR_IN]) {
-               regd = &iser_task->rdma_regd[ISER_DIR_IN];
-               if (regd->reg.is_fmr)
-                       iser_unreg_mem(&regd->reg);
-       }
+       if (iser_task->dir[ISER_DIR_IN])
+               device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
 
-       if (iser_task->dir[ISER_DIR_OUT]) {
-               regd = &iser_task->rdma_regd[ISER_DIR_OUT];
-               if (regd->reg.is_fmr)
-                       iser_unreg_mem(&regd->reg);
-       }
+       if (iser_task->dir[ISER_DIR_OUT])
+               device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
 
        /* if the data was unaligned, it was already unmapped and then copied */
        if (is_rdma_aligned)
index 7827baf455a1f45fa4fa20495fcf01dd370914ff..1ce0c97d2ccb894d33ef9871456726aa45b51ab9 100644 (file)
@@ -170,8 +170,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
  */
 
 static int iser_sg_to_page_vec(struct iser_data_buf *data,
-                              struct iser_page_vec *page_vec,
-                              struct ib_device *ibdev)
+                              struct ib_device *ibdev, u64 *pages,
+                              int *offset, int *data_size)
 {
        struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
        u64 start_addr, end_addr, page, chunk_start = 0;
@@ -180,7 +180,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
        int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
 
        /* compute the offset of first element */
-       page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
+       *offset = (u64) sgl[0].offset & ~MASK_4K;
 
        new_chunk = 1;
        cur_page  = 0;
@@ -204,13 +204,14 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
                   which might be unaligned */
                page = chunk_start & MASK_4K;
                do {
-                       page_vec->pages[cur_page++] = page;
+                       pages[cur_page++] = page;
                        page += SIZE_4K;
                } while (page < end_addr);
        }
 
-       page_vec->data_size = total_sz;
-       iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
+       *data_size = total_sz;
+       iser_dbg("page_vec->data_size:%d cur_page %d\n",
+                *data_size, cur_page);
        return cur_page;
 }
 
@@ -267,11 +268,8 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
        struct scatterlist *sg;
        int i;
 
-       if (iser_debug_level == 0)
-               return;
-
        for_each_sg(sgl, sg, data->dma_nents, i)
-               iser_warn("sg[%d] dma_addr:0x%lX page:0x%p "
+               iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
                         "off:0x%x sz:0x%x dma_len:0x%x\n",
                         i, (unsigned long)ib_sg_dma_address(ibdev, sg),
                         sg_page(sg), sg->offset,
@@ -298,8 +296,10 @@ static void iser_page_vec_build(struct iser_data_buf *data,
        page_vec->offset = 0;
 
        iser_dbg("Translating sg sz: %d\n", data->dma_nents);
-       page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
-       iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
+       page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
+                                          &page_vec->offset,
+                                          &page_vec->data_size);
+       iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
 
        page_vec->length = page_vec_len;
 
@@ -347,16 +347,41 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
        }
 }
 
+static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
+                             struct ib_device *ibdev,
+                             enum iser_data_dir cmd_dir,
+                             int aligned_len)
+{
+       struct iscsi_conn    *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+       struct iser_data_buf *mem = &iser_task->data[cmd_dir];
+
+       iscsi_conn->fmr_unalign_cnt++;
+       iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
+                 aligned_len, mem->size);
+
+       if (iser_debug_level > 0)
+               iser_data_buf_dump(mem, ibdev);
+
+       /* unmap the command data before accessing it */
+       iser_dma_unmap_task_data(iser_task);
+
+       /* allocate copy buf, if we are writing, copy the */
+       /* unaligned scatterlist, dma map the copy        */
+       if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
+                       return -ENOMEM;
+
+       return 0;
+}
+
 /**
- * iser_reg_rdma_mem - Registers memory intended for RDMA,
- * obtaining rkey and va
+ * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
+ * using FMR (if possible) obtaining rkey and va
  *
  * returns 0 on success, errno code on failure
  */
-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
-                     enum   iser_data_dir        cmd_dir)
+int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
+                         enum iser_data_dir cmd_dir)
 {
-       struct iscsi_conn    *iscsi_conn = iser_task->iser_conn->iscsi_conn;
        struct iser_conn     *ib_conn = iser_task->iser_conn->ib_conn;
        struct iser_device   *device = ib_conn->device;
        struct ib_device     *ibdev = device->ib_device;
@@ -370,20 +395,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
        regd_buf = &iser_task->rdma_regd[cmd_dir];
 
        aligned_len = iser_data_buf_aligned_len(mem, ibdev);
-       if (aligned_len != mem->dma_nents ||
-           (!ib_conn->fmr_pool && mem->dma_nents > 1)) {
-               iscsi_conn->fmr_unalign_cnt++;
-               iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
-                         aligned_len, mem->size);
-               iser_data_buf_dump(mem, ibdev);
-
-               /* unmap the command data before accessing it */
-               iser_dma_unmap_task_data(iser_task);
-
-               /* allocate copy buf, if we are writing, copy the */
-               /* unaligned scatterlist, dma map the copy        */
-               if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
-                               return -ENOMEM;
+       if (aligned_len != mem->dma_nents) {
+               err = fall_to_bounce_buf(iser_task, ibdev,
+                                        cmd_dir, aligned_len);
+               if (err) {
+                       iser_err("failed to allocate bounce buffer\n");
+                       return err;
+               }
                mem = &iser_task->data_copy[cmd_dir];
        }
 
@@ -395,7 +413,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
                regd_buf->reg.rkey = device->mr->rkey;
                regd_buf->reg.len  = ib_sg_dma_len(ibdev, &sg[0]);
                regd_buf->reg.va   = ib_sg_dma_address(ibdev, &sg[0]);
-               regd_buf->reg.is_fmr = 0;
+               regd_buf->reg.is_mr = 0;
 
                iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X  "
                         "va: 0x%08lX sz: %ld]\n",
@@ -404,22 +422,159 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
                         (unsigned long)regd_buf->reg.va,
                         (unsigned long)regd_buf->reg.len);
        } else { /* use FMR for multiple dma entries */
-               iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
-               err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
+               iser_page_vec_build(mem, ib_conn->fastreg.fmr.page_vec, ibdev);
+               err = iser_reg_page_vec(ib_conn, ib_conn->fastreg.fmr.page_vec,
+                                       &regd_buf->reg);
                if (err && err != -EAGAIN) {
                        iser_data_buf_dump(mem, ibdev);
                        iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
                                 mem->dma_nents,
                                 ntoh24(iser_task->desc.iscsi_header.dlength));
                        iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
-                                ib_conn->page_vec->data_size, ib_conn->page_vec->length,
-                                ib_conn->page_vec->offset);
-                       for (i=0 ; i<ib_conn->page_vec->length ; i++)
+                                ib_conn->fastreg.fmr.page_vec->data_size,
+                                ib_conn->fastreg.fmr.page_vec->length,
+                                ib_conn->fastreg.fmr.page_vec->offset);
+                       for (i = 0; i < ib_conn->fastreg.fmr.page_vec->length; i++)
                                iser_err("page_vec[%d] = 0x%llx\n", i,
-                                        (unsigned long long) ib_conn->page_vec->pages[i]);
+                                        (unsigned long long) ib_conn->fastreg.fmr.page_vec->pages[i]);
                }
                if (err)
                        return err;
        }
        return 0;
 }
+
+static int iser_fast_reg_mr(struct fast_reg_descriptor *desc,
+                           struct iser_conn *ib_conn,
+                           struct iser_regd_buf *regd_buf,
+                           u32 offset, unsigned int data_size,
+                           unsigned int page_list_len)
+{
+       struct ib_send_wr fastreg_wr, inv_wr;
+       struct ib_send_wr *bad_wr, *wr = NULL;
+       u8 key;
+       int ret;
+
+       if (!desc->valid) {
+               memset(&inv_wr, 0, sizeof(inv_wr));
+               inv_wr.opcode = IB_WR_LOCAL_INV;
+               inv_wr.send_flags = IB_SEND_SIGNALED;
+               inv_wr.ex.invalidate_rkey = desc->data_mr->rkey;
+               wr = &inv_wr;
+               /* Bump the key */
+               key = (u8)(desc->data_mr->rkey & 0x000000FF);
+               ib_update_fast_reg_key(desc->data_mr, ++key);
+       }
+
+       /* Prepare FASTREG WR */
+       memset(&fastreg_wr, 0, sizeof(fastreg_wr));
+       fastreg_wr.opcode = IB_WR_FAST_REG_MR;
+       fastreg_wr.send_flags = IB_SEND_SIGNALED;
+       fastreg_wr.wr.fast_reg.iova_start = desc->data_frpl->page_list[0] + offset;
+       fastreg_wr.wr.fast_reg.page_list = desc->data_frpl;
+       fastreg_wr.wr.fast_reg.page_list_len = page_list_len;
+       fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
+       fastreg_wr.wr.fast_reg.length = data_size;
+       fastreg_wr.wr.fast_reg.rkey = desc->data_mr->rkey;
+       fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE  |
+                                              IB_ACCESS_REMOTE_WRITE |
+                                              IB_ACCESS_REMOTE_READ);
+
+       if (!wr) {
+               wr = &fastreg_wr;
+               atomic_inc(&ib_conn->post_send_buf_count);
+       } else {
+               wr->next = &fastreg_wr;
+               atomic_add(2, &ib_conn->post_send_buf_count);
+       }
+
+       ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
+       if (ret) {
+               if (bad_wr->next)
+                       atomic_sub(2, &ib_conn->post_send_buf_count);
+               else
+                       atomic_dec(&ib_conn->post_send_buf_count);
+               iser_err("fast registration failed, ret:%d\n", ret);
+               return ret;
+       }
+       desc->valid = false;
+
+       regd_buf->reg.mem_h = desc;
+       regd_buf->reg.lkey = desc->data_mr->lkey;
+       regd_buf->reg.rkey = desc->data_mr->rkey;
+       regd_buf->reg.va = desc->data_frpl->page_list[0] + offset;
+       regd_buf->reg.len = data_size;
+       regd_buf->reg.is_mr = 1;
+
+       return ret;
+}
+
+/**
+ * iser_reg_rdma_mem_frwr - Registers memory intended for RDMA,
+ * using Fast Registration WR (if possible) obtaining rkey and va
+ *
+ * returns 0 on success, errno code on failure
+ */
+int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *iser_task,
+                          enum iser_data_dir cmd_dir)
+{
+       struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
+       struct iser_device *device = ib_conn->device;
+       struct ib_device *ibdev = device->ib_device;
+       struct iser_data_buf *mem = &iser_task->data[cmd_dir];
+       struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
+       struct fast_reg_descriptor *desc;
+       unsigned int data_size, page_list_len;
+       int err, aligned_len;
+       unsigned long flags;
+       u32 offset;
+
+       aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+       if (aligned_len != mem->dma_nents) {
+               err = fall_to_bounce_buf(iser_task, ibdev,
+                                        cmd_dir, aligned_len);
+               if (err) {
+                       iser_err("failed to allocate bounce buffer\n");
+                       return err;
+               }
+               mem = &iser_task->data_copy[cmd_dir];
+       }
+
+       /* if there a single dma entry, dma mr suffices */
+       if (mem->dma_nents == 1) {
+               struct scatterlist *sg = (struct scatterlist *)mem->buf;
+
+               regd_buf->reg.lkey = device->mr->lkey;
+               regd_buf->reg.rkey = device->mr->rkey;
+               regd_buf->reg.len  = ib_sg_dma_len(ibdev, &sg[0]);
+               regd_buf->reg.va   = ib_sg_dma_address(ibdev, &sg[0]);
+               regd_buf->reg.is_mr = 0;
+       } else {
+               spin_lock_irqsave(&ib_conn->lock, flags);
+               desc = list_first_entry(&ib_conn->fastreg.frwr.pool,
+                                       struct fast_reg_descriptor, list);
+               list_del(&desc->list);
+               spin_unlock_irqrestore(&ib_conn->lock, flags);
+               page_list_len = iser_sg_to_page_vec(mem, device->ib_device,
+                                                   desc->data_frpl->page_list,
+                                                   &offset, &data_size);
+
+               if (page_list_len * SIZE_4K < data_size) {
+                       iser_err("fast reg page_list too short to hold this SG\n");
+                       err = -EINVAL;
+                       goto err_reg;
+               }
+
+               err = iser_fast_reg_mr(desc, ib_conn, regd_buf,
+                                      offset, data_size, page_list_len);
+               if (err)
+                       goto err_reg;
+       }
+
+       return 0;
+err_reg:
+       spin_lock_irqsave(&ib_conn->lock, flags);
+       list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
+       spin_unlock_irqrestore(&ib_conn->lock, flags);
+       return err;
+}
index 2c4941d0656b2e389cb0d38f978682ce73388311..28badacb01341ff849b75ee0fb0622731c97cb00 100644 (file)
@@ -73,6 +73,36 @@ static int iser_create_device_ib_res(struct iser_device *device)
 {
        int i, j;
        struct iser_cq_desc *cq_desc;
+       struct ib_device_attr *dev_attr;
+
+       dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL);
+       if (!dev_attr)
+               return -ENOMEM;
+
+       if (ib_query_device(device->ib_device, dev_attr)) {
+               pr_warn("Query device failed for %s\n", device->ib_device->name);
+               goto dev_attr_err;
+       }
+
+       /* Assign function handles  - based on FMR support */
+       if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
+           device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
+               iser_info("FMR supported, using FMR for registration\n");
+               device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
+               device->iser_free_rdma_reg_res = iser_free_fmr_pool;
+               device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
+               device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
+       } else
+       if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+               iser_info("FRWR supported, using FRWR for registration\n");
+               device->iser_alloc_rdma_reg_res = iser_create_frwr_pool;
+               device->iser_free_rdma_reg_res = iser_free_frwr_pool;
+               device->iser_reg_rdma_mem = iser_reg_rdma_mem_frwr;
+               device->iser_unreg_rdma_mem = iser_unreg_mem_frwr;
+       } else {
+               iser_err("IB device does not support FMRs nor FRWRs, can't register memory\n");
+               goto dev_attr_err;
+       }
 
        device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
        iser_info("using %d CQs, device %s supports %d vectors\n",
@@ -128,6 +158,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
        if (ib_register_event_handler(&device->event_handler))
                goto handler_err;
 
+       kfree(dev_attr);
        return 0;
 
 handler_err:
@@ -147,6 +178,8 @@ pd_err:
        kfree(device->cq_desc);
 cq_desc_err:
        iser_err("failed to allocate an IB resource\n");
+dev_attr_err:
+       kfree(dev_attr);
        return -1;
 }
 
@@ -178,56 +211,23 @@ static void iser_free_device_ib_res(struct iser_device *device)
 }
 
 /**
- * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP)
+ * iser_create_fmr_pool - Creates FMR pool and page_vector
  *
- * returns 0 on success, -1 on failure
+ * returns 0 on success, or errno code on failure
  */
-static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
 {
-       struct iser_device      *device;
-       struct ib_qp_init_attr  init_attr;
-       int                     req_err, resp_err, ret = -ENOMEM;
+       struct iser_device *device = ib_conn->device;
        struct ib_fmr_pool_param params;
-       int index, min_index = 0;
+       int ret = -ENOMEM;
 
-       BUG_ON(ib_conn->device == NULL);
-
-       device = ib_conn->device;
-
-       ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
-                                       ISER_RX_LOGIN_SIZE, GFP_KERNEL);
-       if (!ib_conn->login_buf)
-               goto out_err;
-
-       ib_conn->login_req_buf  = ib_conn->login_buf;
-       ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN;
+       ib_conn->fastreg.fmr.page_vec = kmalloc(sizeof(struct iser_page_vec) +
+                                               (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
+                                               GFP_KERNEL);
+       if (!ib_conn->fastreg.fmr.page_vec)
+               return ret;
 
-       ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
-                               (void *)ib_conn->login_req_buf,
-                               ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
-
-       ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
-                               (void *)ib_conn->login_resp_buf,
-                               ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
-
-       req_err  = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma);
-       resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma);
-
-       if (req_err || resp_err) {
-               if (req_err)
-                       ib_conn->login_req_dma = 0;
-               if (resp_err)
-                       ib_conn->login_resp_dma = 0;
-               goto out_err;
-       }
-
-       ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
-                                   (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
-                                   GFP_KERNEL);
-       if (!ib_conn->page_vec)
-               goto out_err;
-
-       ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
+       ib_conn->fastreg.fmr.page_vec->pages = (u64 *)(ib_conn->fastreg.fmr.page_vec + 1);
 
        params.page_shift        = SHIFT_4K;
        /* when the first/last SG element are not start/end *
@@ -235,25 +235,140 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
        params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
        /* make the pool size twice the max number of SCSI commands *
         * the ML is expected to queue, watermark for unmap at 50%  */
-       params.pool_size         = ISCSI_DEF_XMIT_CMDS_MAX * 2;
-       params.dirty_watermark   = ISCSI_DEF_XMIT_CMDS_MAX;
+       params.pool_size         = cmds_max * 2;
+       params.dirty_watermark   = cmds_max;
        params.cache             = 0;
        params.flush_function    = NULL;
        params.access            = (IB_ACCESS_LOCAL_WRITE  |
                                    IB_ACCESS_REMOTE_WRITE |
                                    IB_ACCESS_REMOTE_READ);
 
-       ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
-       ret = PTR_ERR(ib_conn->fmr_pool);
-       if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) {
-               ib_conn->fmr_pool = NULL;
-               goto out_err;
-       } else if (ret == -ENOSYS) {
-               ib_conn->fmr_pool = NULL;
+       ib_conn->fastreg.fmr.pool = ib_create_fmr_pool(device->pd, &params);
+       if (!IS_ERR(ib_conn->fastreg.fmr.pool))
+               return 0;
+
+       /* no FMR => no need for page_vec */
+       kfree(ib_conn->fastreg.fmr.page_vec);
+       ib_conn->fastreg.fmr.page_vec = NULL;
+
+       ret = PTR_ERR(ib_conn->fastreg.fmr.pool);
+       ib_conn->fastreg.fmr.pool = NULL;
+       if (ret != -ENOSYS) {
+               iser_err("FMR allocation failed, err %d\n", ret);
+               return ret;
+       } else {
                iser_warn("FMRs are not supported, using unaligned mode\n");
-               ret = 0;
+               return 0;
+       }
+}
+
+/**
+ * iser_free_fmr_pool - releases the FMR pool and page vec
+ */
+void iser_free_fmr_pool(struct iser_conn *ib_conn)
+{
+       iser_info("freeing conn %p fmr pool %p\n",
+                 ib_conn, ib_conn->fastreg.fmr.pool);
+
+       if (ib_conn->fastreg.fmr.pool != NULL)
+               ib_destroy_fmr_pool(ib_conn->fastreg.fmr.pool);
+
+       ib_conn->fastreg.fmr.pool = NULL;
+
+       kfree(ib_conn->fastreg.fmr.page_vec);
+       ib_conn->fastreg.fmr.page_vec = NULL;
+}
+
+/**
+ * iser_create_frwr_pool - Creates pool of fast_reg descriptors
+ * for fast registration work requests.
+ * returns 0 on success, or errno code on failure
+ */
+int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
+{
+       struct iser_device      *device = ib_conn->device;
+       struct fast_reg_descriptor      *desc;
+       int i, ret;
+
+       INIT_LIST_HEAD(&ib_conn->fastreg.frwr.pool);
+       ib_conn->fastreg.frwr.pool_size = 0;
+       for (i = 0; i < cmds_max; i++) {
+               desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+               if (!desc) {
+                       iser_err("Failed to allocate a new fast_reg descriptor\n");
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               desc->data_frpl = ib_alloc_fast_reg_page_list(device->ib_device,
+                                                        ISCSI_ISER_SG_TABLESIZE + 1);
+               if (IS_ERR(desc->data_frpl)) {
+                       ret = PTR_ERR(desc->data_frpl);
+                       iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", ret);
+                       goto err;
+               }
+
+               desc->data_mr = ib_alloc_fast_reg_mr(device->pd,
+                                                    ISCSI_ISER_SG_TABLESIZE + 1);
+               if (IS_ERR(desc->data_mr)) {
+                       ret = PTR_ERR(desc->data_mr);
+                       iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
+                       ib_free_fast_reg_page_list(desc->data_frpl);
+                       goto err;
+               }
+               desc->valid = true;
+               list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
+               ib_conn->fastreg.frwr.pool_size++;
+       }
+
+       return 0;
+err:
+       iser_free_frwr_pool(ib_conn);
+       return ret;
+}
+
+/**
+ * iser_free_frwr_pool - releases the pool of fast_reg descriptors
+ */
+void iser_free_frwr_pool(struct iser_conn *ib_conn)
+{
+       struct fast_reg_descriptor *desc, *tmp;
+       int i = 0;
+
+       if (list_empty(&ib_conn->fastreg.frwr.pool))
+               return;
+
+       iser_info("freeing conn %p frwr pool\n", ib_conn);
+
+       list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.frwr.pool, list) {
+               list_del(&desc->list);
+               ib_free_fast_reg_page_list(desc->data_frpl);
+               ib_dereg_mr(desc->data_mr);
+               kfree(desc);
+               ++i;
        }
 
+       if (i < ib_conn->fastreg.frwr.pool_size)
+               iser_warn("pool still has %d regions registered\n",
+                         ib_conn->fastreg.frwr.pool_size - i);
+}
+
+/**
+ * iser_create_ib_conn_res - Queue-Pair (QP)
+ *
+ * returns 0 on success, -1 on failure
+ */
+static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+{
+       struct iser_device      *device;
+       struct ib_qp_init_attr  init_attr;
+       int                     ret = -ENOMEM;
+       int index, min_index = 0;
+
+       BUG_ON(ib_conn->device == NULL);
+
+       device = ib_conn->device;
+
        memset(&init_attr, 0, sizeof init_attr);
 
        mutex_lock(&ig.connlist_mutex);
@@ -282,9 +397,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
                goto out_err;
 
        ib_conn->qp = ib_conn->cma_id->qp;
-       iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
+       iser_info("setting conn %p cma_id %p qp %p\n",
                  ib_conn, ib_conn->cma_id,
-                 ib_conn->fmr_pool, ib_conn->cma_id->qp);
+                 ib_conn->cma_id->qp);
        return ret;
 
 out_err:
@@ -293,7 +408,7 @@ out_err:
 }
 
 /**
- * releases the FMR pool and QP objects, returns 0 on success,
+ * releases the QP objects, returns 0 on success,
  * -1 on failure
  */
 static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
@@ -301,13 +416,11 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
        int cq_index;
        BUG_ON(ib_conn == NULL);
 
-       iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n",
+       iser_info("freeing conn %p cma_id %p qp %p\n",
                  ib_conn, ib_conn->cma_id,
-                 ib_conn->fmr_pool, ib_conn->qp);
+                 ib_conn->qp);
 
        /* qp is created only once both addr & route are resolved */
-       if (ib_conn->fmr_pool != NULL)
-               ib_destroy_fmr_pool(ib_conn->fmr_pool);
 
        if (ib_conn->qp != NULL) {
                cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
@@ -316,21 +429,7 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
                rdma_destroy_qp(ib_conn->cma_id);
        }
 
-       ib_conn->fmr_pool = NULL;
        ib_conn->qp       = NULL;
-       kfree(ib_conn->page_vec);
-
-       if (ib_conn->login_buf) {
-               if (ib_conn->login_req_dma)
-                       ib_dma_unmap_single(ib_conn->device->ib_device,
-                               ib_conn->login_req_dma,
-                               ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
-               if (ib_conn->login_resp_dma)
-                       ib_dma_unmap_single(ib_conn->device->ib_device,
-                               ib_conn->login_resp_dma,
-                               ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
-               kfree(ib_conn->login_buf);
-       }
 
        return 0;
 }
@@ -694,7 +793,7 @@ int iser_reg_page_vec(struct iser_conn     *ib_conn,
        page_list = page_vec->pages;
        io_addr   = page_list[0];
 
-       mem  = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
+       mem  = ib_fmr_pool_map_phys(ib_conn->fastreg.fmr.pool,
                                    page_list,
                                    page_vec->length,
                                    io_addr);
@@ -709,7 +808,7 @@ int iser_reg_page_vec(struct iser_conn     *ib_conn,
        mem_reg->rkey  = mem->fmr->rkey;
        mem_reg->len   = page_vec->length * SIZE_4K;
        mem_reg->va    = io_addr;
-       mem_reg->is_fmr = 1;
+       mem_reg->is_mr = 1;
        mem_reg->mem_h = (void *)mem;
 
        mem_reg->va   += page_vec->offset;
@@ -727,12 +826,18 @@ int iser_reg_page_vec(struct iser_conn     *ib_conn,
 }
 
 /**
- * Unregister (previosuly registered) memory.
+ * Unregister (previosuly registered using FMR) memory.
+ * If memory is non-FMR does nothing.
  */
-void iser_unreg_mem(struct iser_mem_reg *reg)
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
+                       enum iser_data_dir cmd_dir)
 {
+       struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
        int ret;
 
+       if (!reg->is_mr)
+               return;
+
        iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
 
        ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
@@ -742,6 +847,23 @@ void iser_unreg_mem(struct iser_mem_reg *reg)
        reg->mem_h = NULL;
 }
 
+void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task,
+                        enum iser_data_dir cmd_dir)
+{
+       struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
+       struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
+       struct fast_reg_descriptor *desc = reg->mem_h;
+
+       if (!reg->is_mr)
+               return;
+
+       reg->mem_h = NULL;
+       reg->is_mr = 0;
+       spin_lock_bh(&ib_conn->lock);
+       list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
+       spin_unlock_bh(&ib_conn->lock);
+}
+
 int iser_post_recvl(struct iser_conn *ib_conn)
 {
        struct ib_recv_wr rx_wr, *rx_wr_failed;
@@ -779,7 +901,7 @@ int iser_post_recvm(struct iser_conn *ib_conn, int count)
                rx_wr->sg_list  = &rx_desc->rx_sg;
                rx_wr->num_sge  = 1;
                rx_wr->next     = rx_wr + 1;
-               my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
+               my_rx_head = (my_rx_head + 1) & ib_conn->qp_max_recv_dtos_mask;
        }
 
        rx_wr--;
@@ -863,7 +985,11 @@ static int iser_drain_tx_cq(struct iser_device  *device, int cq_index)
                if (wc.status == IB_WC_SUCCESS) {
                        if (wc.opcode == IB_WC_SEND)
                                iser_snd_completion(tx_desc, ib_conn);
-                       else
+                       else if (wc.opcode == IB_WC_LOCAL_INV ||
+                                wc.opcode == IB_WC_FAST_REG_MR) {
+                               atomic_dec(&ib_conn->post_send_buf_count);
+                               continue;
+                       } else
                                iser_err("expected opcode %d got %d\n",
                                        IB_WC_SEND, wc.opcode);
                } else {
index 121cd63d3334b7a89b6a928fe62c8227b6f07231..13eba2a8c4e053f98e6e6816e98ad01ed1aa4837 100644 (file)
@@ -234,7 +234,7 @@ static int as5011_probe(struct i2c_client *client,
        int irq;
        int error;
 
-       plat_data = client->dev.platform_data;
+       plat_data = dev_get_platdata(&client->dev);
        if (!plat_data)
                return -EINVAL;
 
index 59c10ec5a2a13310ef667c96d9cb8675f739368c..8aa6e4c497da77c70df1827934b696c2bc4c0159 100644 (file)
@@ -61,7 +61,7 @@ static void dc_pad_callback(struct mapleq *mq)
 
 static int dc_pad_open(struct input_dev *dev)
 {
-       struct dc_pad *pad = dev->dev.platform_data;
+       struct dc_pad *pad = dev_get_platdata(&dev->dev);
 
        maple_getcond_callback(pad->mdev, dc_pad_callback, HZ/20,
                MAPLE_FUNC_CONTROLLER);
@@ -71,7 +71,7 @@ static int dc_pad_open(struct input_dev *dev)
 
 static void dc_pad_close(struct input_dev *dev)
 {
-       struct dc_pad *pad = dev->dev.platform_data;
+       struct dc_pad *pad = dev_get_platdata(&dev->dev);
 
        maple_getcond_callback(pad->mdev, dc_pad_callback, 0,
                MAPLE_FUNC_CONTROLLER);
index 03c8cc5cb6c144433c2883f12ca8685ba4bc7c26..328cfc1eed95dac7d8452547a389909d0320e35a 100644 (file)
@@ -442,12 +442,6 @@ static int imx_keypad_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL) {
-               dev_err(&pdev->dev, "no I/O memory defined in platform data\n");
-               return -EINVAL;
-       }
-
        input_dev = devm_input_allocate_device(&pdev->dev);
        if (!input_dev) {
                dev_err(&pdev->dev, "failed to allocate the input device\n");
@@ -468,6 +462,7 @@ static int imx_keypad_probe(struct platform_device *pdev)
        setup_timer(&keypad->check_matrix_timer,
                    imx_keypad_check_for_events, (unsigned long) keypad);
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(keypad->mmio_base))
                return PTR_ERR(keypad->mmio_base);
index 7c7af2b01e6566fd6ca8781bc6bc0154c4f14426..bc2cdaf563fd76da86c9318f0159a9d7a492590f 100644 (file)
@@ -271,7 +271,7 @@ static int max7359_remove(struct i2c_client *client)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int max7359_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
index 20d872d6f6031ec78dc4f226a4b02e991a90900f..b3e3edab6d9f208050bf029a85c0b63ab755969b 100644 (file)
@@ -171,12 +171,6 @@ static int nspire_keypad_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "missing platform resources\n");
-               return -EINVAL;
-       }
-
        keypad = devm_kzalloc(&pdev->dev, sizeof(struct nspire_keypad),
                              GFP_KERNEL);
        if (!keypad) {
@@ -208,6 +202,7 @@ static int nspire_keypad_probe(struct platform_device *pdev)
                return PTR_ERR(keypad->clk);
        }
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        keypad->reg_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(keypad->reg_base))
                return PTR_ERR(keypad->reg_base);
index 42b773b3125a03702ed86350f12fbd685d5e2952..6c561ec3cc09853240ad1cc0f3bbc6902085ff95 100644 (file)
@@ -243,6 +243,32 @@ static int qt1070_remove(struct i2c_client *client)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int qt1070_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct qt1070_data *data = i2c_get_clientdata(client);
+
+       if (device_may_wakeup(dev))
+               enable_irq_wake(data->irq);
+
+       return 0;
+}
+
+static int qt1070_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct qt1070_data *data = i2c_get_clientdata(client);
+
+       if (device_may_wakeup(dev))
+               disable_irq_wake(data->irq);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(qt1070_pm_ops, qt1070_suspend, qt1070_resume);
+
 static const struct i2c_device_id qt1070_id[] = {
        { "qt1070", 0 },
        { },
@@ -253,6 +279,7 @@ static struct i2c_driver qt1070_driver = {
        .driver = {
                .name   = "qt1070",
                .owner  = THIS_MODULE,
+               .pm     = &qt1070_pm_ops,
        },
        .id_table       = qt1070_id,
        .probe          = qt1070_probe,
index 7111124b53621de7c5de1df601f1d1fba6f8505f..85ff530d9a913c00741f39bf0082f701e4a59d91 100644 (file)
@@ -191,12 +191,6 @@ static int spear_kbd_probe(struct platform_device *pdev)
        int irq;
        int error;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no keyboard resource defined\n");
-               return -EBUSY;
-       }
-
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "not able to get irq for the device\n");
@@ -228,6 +222,7 @@ static int spear_kbd_probe(struct platform_device *pdev)
                kbd->suspended_rate = pdata->suspended_rate;
        }
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        kbd->io_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(kbd->io_base))
                return PTR_ERR(kbd->io_base);
index b46142f78ef24fec15147c190880b01bffb458bd..9cd20e6905a08274bff3d55a48fa7e26d8f2b2c6 100644 (file)
@@ -638,12 +638,6 @@ static int tegra_kbc_probe(struct platform_device *pdev)
        if (!tegra_kbc_check_pin_cfg(kbc, &num_rows))
                return -EINVAL;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "failed to get I/O memory\n");
-               return -ENXIO;
-       }
-
        kbc->irq = platform_get_irq(pdev, 0);
        if (kbc->irq < 0) {
                dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
@@ -658,6 +652,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
 
        setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        kbc->mmio = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(kbc->mmio))
                return PTR_ERR(kbc->mmio);
index a37f0c909aba7ad34ce95eaa9c23f3bdedd96b4b..2ff4d1c78ab8ff91c4338c6e40d6ff91fac349c3 100644 (file)
@@ -143,7 +143,7 @@ static int pwm_beeper_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int pwm_beeper_suspend(struct device *dev)
 {
        struct pwm_beeper *beeper = dev_get_drvdata(dev);
index 0c2dfc8e96918c3e022aad1920ece05e1115abd8..7864b0c3ebb3f2f2ad571cb04ac29b67572aaa85 100644 (file)
@@ -257,7 +257,6 @@ static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
 
 static int twl6040_vibra_probe(struct platform_device *pdev)
 {
-       struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
        struct device *twl6040_core_dev = pdev->dev.parent;
        struct device_node *twl6040_core_node = NULL;
        struct vibra_info *info;
@@ -270,8 +269,8 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
                                                 "vibra");
 #endif
 
-       if (!pdata && !twl6040_core_node) {
-               dev_err(&pdev->dev, "platform_data not available\n");
+       if (!twl6040_core_node) {
+               dev_err(&pdev->dev, "parent of node is missing?\n");
                return -EINVAL;
        }
 
@@ -284,27 +283,17 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
        info->dev = &pdev->dev;
 
        info->twl6040 = dev_get_drvdata(pdev->dev.parent);
-       if (pdata) {
-               info->vibldrv_res = pdata->vibldrv_res;
-               info->vibrdrv_res = pdata->vibrdrv_res;
-               info->viblmotor_res = pdata->viblmotor_res;
-               info->vibrmotor_res = pdata->vibrmotor_res;
-               vddvibl_uV = pdata->vddvibl_uV;
-               vddvibr_uV = pdata->vddvibr_uV;
-       } else {
-               of_property_read_u32(twl6040_core_node, "ti,vibldrv-res",
-                                    &info->vibldrv_res);
-               of_property_read_u32(twl6040_core_node, "ti,vibrdrv-res",
-                                    &info->vibrdrv_res);
-               of_property_read_u32(twl6040_core_node, "ti,viblmotor-res",
-                                    &info->viblmotor_res);
-               of_property_read_u32(twl6040_core_node, "ti,vibrmotor-res",
-                                    &info->vibrmotor_res);
-               of_property_read_u32(twl6040_core_node, "ti,vddvibl-uV",
-                                    &vddvibl_uV);
-               of_property_read_u32(twl6040_core_node, "ti,vddvibr-uV",
-                                    &vddvibr_uV);
-       }
+
+       of_property_read_u32(twl6040_core_node, "ti,vibldrv-res",
+                            &info->vibldrv_res);
+       of_property_read_u32(twl6040_core_node, "ti,vibrdrv-res",
+                            &info->vibrdrv_res);
+       of_property_read_u32(twl6040_core_node, "ti,viblmotor-res",
+                            &info->viblmotor_res);
+       of_property_read_u32(twl6040_core_node, "ti,vibrmotor-res",
+                            &info->vibrmotor_res);
+       of_property_read_u32(twl6040_core_node, "ti,vddvibl-uV", &vddvibl_uV);
+       of_property_read_u32(twl6040_core_node, "ti,vddvibr-uV", &vddvibr_uV);
 
        if ((!info->vibldrv_res && !info->viblmotor_res) ||
            (!info->vibrdrv_res && !info->vibrmotor_res)) {
@@ -334,8 +323,8 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
         * When booted with Device tree the regulators are attached to the
         * parent device (twl6040 MFD core)
         */
-       ret = regulator_bulk_get(pdata ? info->dev : twl6040_core_dev,
-                                ARRAY_SIZE(info->supplies), info->supplies);
+       ret = regulator_bulk_get(twl6040_core_dev, ARRAY_SIZE(info->supplies),
+                                info->supplies);
        if (ret) {
                dev_err(info->dev, "couldn't get regulators %d\n", ret);
                return ret;
index 56536f4b9572c07757232f63430768cc8df9d6fd..b6505454bcc4575f810b468d81c419a6f8ff611c 100644 (file)
@@ -46,7 +46,6 @@
 MODULE_AUTHOR("Miloslav Trmac <mitr@volny.cz>");
 MODULE_DESCRIPTION("Wistron laptop button driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.3");
 
 static bool force; /* = 0; */
 module_param(force, bool, 0);
@@ -563,7 +562,7 @@ static struct key_entry keymap_wistron_md96500[] __initdata = {
        { KE_KEY, 0x36, {KEY_WWW} },
        { KE_WIFI, 0x30 },
        { KE_BLUETOOTH, 0x44 },
-       { KE_END, FE_UNTESTED }
+       { KE_END, 0 }
 };
 
 static struct key_entry keymap_wistron_generic[] __initdata = {
@@ -635,7 +634,7 @@ static struct key_entry keymap_prestigio[] __initdata = {
  * a list of buttons and their key codes (reported when loading this module
  * with force=1) and the output of dmidecode to $MODULE_AUTHOR.
  */
-static const struct dmi_system_id __initconst dmi_ids[] = {
+static const struct dmi_system_id dmi_ids[] __initconst = {
        {
                /* Fujitsu-Siemens Amilo Pro V2000 */
                .callback = dmi_matched,
@@ -972,6 +971,7 @@ static const struct dmi_system_id __initconst dmi_ids[] = {
        },
        { NULL, }
 };
+MODULE_DEVICE_TABLE(dmi, dmi_ids);
 
 /* Copy the good keymap, as the original ones are free'd */
 static int __init copy_keymap(void)
index 57b2637e153a1603db3ed62bc4968135b7b0f868..8551dcaf24dbadc854a7f6a6664128598c99a067 100644 (file)
@@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse)
  */
 static int elantech_packet_check_v3(struct psmouse *psmouse)
 {
+       struct elantech_data *etd = psmouse->private;
        const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff };
        unsigned char *packet = psmouse->packet;
 
@@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse)
        if (!memcmp(packet, debounce_packet, sizeof(debounce_packet)))
                return PACKET_DEBOUNCE;
 
-       if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
-               return PACKET_V3_HEAD;
+       /*
+        * If the hardware flag 'crc_enabled' is set the packets have
+        * different signatures.
+        */
+       if (etd->crc_enabled) {
+               if ((packet[3] & 0x09) == 0x08)
+                       return PACKET_V3_HEAD;
+
+               if ((packet[3] & 0x09) == 0x09)
+                       return PACKET_V3_TAIL;
+       } else {
+               if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
+                       return PACKET_V3_HEAD;
 
-       if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
-               return PACKET_V3_TAIL;
+               if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
+                       return PACKET_V3_TAIL;
+       }
 
        return PACKET_UNKNOWN;
 }
 
 static int elantech_packet_check_v4(struct psmouse *psmouse)
 {
+       struct elantech_data *etd = psmouse->private;
        unsigned char *packet = psmouse->packet;
        unsigned char packet_type = packet[3] & 0x03;
+       bool sanity_check;
+
+       /*
+        * Sanity check based on the constant bits of a packet.
+        * The constant bits change depending on the value of
+        * the hardware flag 'crc_enabled' but are the same for
+        * every packet, regardless of the type.
+        */
+       if (etd->crc_enabled)
+               sanity_check = ((packet[3] & 0x08) == 0x00);
+       else
+               sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+                               (packet[3] & 0x1c) == 0x10);
+
+       if (!sanity_check)
+               return PACKET_UNKNOWN;
 
        switch (packet_type) {
        case 0:
@@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd)
                        etd->reports_pressure = true;
        }
 
+       /*
+        * The signatures of v3 and v4 packets change depending on the
+        * value of this hardware flag.
+        */
+       etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
+
        return 0;
 }
 
index 46db3be45ac988710a821450bd37f5127bfc90c3..036a04abaef72314a8dcc7d444f6ab3feea377f9 100644 (file)
@@ -129,6 +129,7 @@ struct elantech_data {
        bool paritycheck;
        bool jumpy_cursor;
        bool reports_pressure;
+       bool crc_enabled;
        unsigned char hw_version;
        unsigned int fw_version;
        unsigned int single_finger_reports;
index 2c4db636de6cab98aeffefbe38cc34e3607d8233..23222dd5a66fb146d7b53774818c750050fb6511 100644 (file)
@@ -44,7 +44,7 @@ static int lifebook_set_6byte_proto(const struct dmi_system_id *d)
        return 1;
 }
 
-static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
+static const struct dmi_system_id lifebook_dmi_table[] __initconst = {
        {
                /* FLORA-ie 55mi */
                .matches = {
index b2420ae19e148039147a33081a651d718e8d2e4d..26386f9d25696841584f4b7f698a3cc8727fdc54 100644 (file)
@@ -1433,7 +1433,7 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 
 static bool impaired_toshiba_kbc;
 
-static const struct dmi_system_id __initconst toshiba_dmi_table[] = {
+static const struct dmi_system_id toshiba_dmi_table[] __initconst = {
 #if defined(CONFIG_DMI) && defined(CONFIG_X86)
        {
                /* Toshiba Satellite */
@@ -1472,7 +1472,7 @@ static const struct dmi_system_id __initconst toshiba_dmi_table[] = {
 
 static bool broken_olpc_ec;
 
-static const struct dmi_system_id __initconst olpc_dmi_table[] = {
+static const struct dmi_system_id olpc_dmi_table[] __initconst = {
 #if defined(CONFIG_DMI) && defined(CONFIG_OLPC)
        {
                /* OLPC XO-1 or XO-1.5 */
index 3fb7727c8ea5679b9a443dc729d568d02c0788c6..8024a6d7fccbc8034b07b9b0bc9e0087275e1d94 100644 (file)
@@ -189,12 +189,6 @@ static int arc_ps2_probe(struct platform_device *pdev)
        int irq;
        int error, id, i;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no IO memory defined\n");
-               return -EINVAL;
-       }
-
        irq = platform_get_irq_byname(pdev, "arc_ps2_irq");
        if (irq < 0) {
                dev_err(&pdev->dev, "no IRQ defined\n");
@@ -208,6 +202,7 @@ static int arc_ps2_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        arc_ps2->addr = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(arc_ps2->addr))
                return PTR_ERR(arc_ps2->addr);
index 818aa466b5d2fb2378836b188d7d310dda6823f5..51b1d40cc286aaf224ac3e67cd500ae335bc129d 100644 (file)
@@ -183,9 +183,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
 
        np = pdev->dev.of_node;
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENOENT;
-
        priv->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->base)) {
                dev_err(&pdev->dev, "Failed to map WTM registers\n");
index aaf23aeae2ea428b4a26ea0cd99f20eeac32c964..1ad3e07986b4612d38ebb281eeb4fd5beda4c4e8 100644 (file)
@@ -221,39 +221,6 @@ static int wacom_calc_hid_res(int logical_extents, int physical_extents,
        return logical_extents / physical_extents;
 }
 
-/*
- * The physical dimension specified by the HID descriptor is likely not in
- * the "100th of a mm" units expected by wacom_calculate_touch_res. This
- * function adjusts the value of [xy]_phy based on the unit and exponent
- * provided by the HID descriptor. If an error occurs durring conversion
- * (e.g. from the unit being left unspecified) [xy]_phy is not modified.
- */
-static void wacom_fix_phy_from_hid(struct wacom_features *features)
-{
-       int xres = wacom_calc_hid_res(features->x_max, features->x_phy,
-                                       features->unit, features->unitExpo);
-       int yres = wacom_calc_hid_res(features->y_max, features->y_phy,
-                                       features->unit, features->unitExpo);
-
-       if (xres > 0 && yres > 0) {
-               features->x_phy = (100 * features->x_max) / xres;
-               features->y_phy = (100 * features->y_max) / yres;
-       }
-}
-
-/*
- * Static values for max X/Y and resolution of Pen interface is stored in
- * features. This mean physical size of active area can be computed.
- * This is useful to do when Pen and Touch have same active area of tablet.
- * This means for Touch device, we only need to find max X/Y value and we
- * have enough information to compute resolution of touch.
- */
-static void wacom_set_phy_from_res(struct wacom_features *features)
-{
-       features->x_phy = (features->x_max * 100) / features->x_resolution;
-       features->y_phy = (features->y_max * 100) / features->y_resolution;
-}
-
 static int wacom_parse_logical_collection(unsigned char *report,
                                          struct wacom_features *features)
 {
@@ -265,8 +232,6 @@ static int wacom_parse_logical_collection(unsigned char *report,
                features->pktlen = WACOM_PKGLEN_BBTOUCH3;
                features->device_type = BTN_TOOL_FINGER;
 
-               wacom_set_phy_from_res(features);
-
                features->x_max = features->y_max =
                        get_unaligned_le16(&report[10]);
 
@@ -640,9 +605,6 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
                }
        }
        error = wacom_parse_hid(intf, hid_desc, features);
-       if (error)
-               goto out;
-       wacom_fix_phy_from_hid(features);
 
  out:
        return error;
@@ -1228,7 +1190,6 @@ static void wacom_wireless_work(struct work_struct *work)
                        *((struct wacom_features *)id->driver_info);
                wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
                wacom_wac2->features.device_type = BTN_TOOL_FINGER;
-               wacom_set_phy_from_res(&wacom_wac2->features);
                wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
                error = wacom_register_input(wacom2);
                if (error)
@@ -1251,6 +1212,33 @@ fail1:
        return;
 }
 
+/*
+ * Not all devices report physical dimensions from HID.
+ * Compute the default from hardcoded logical dimension
+ * and resolution before driver overwrites them.
+ */
+static void wacom_set_default_phy(struct wacom_features *features)
+{
+       if (features->x_resolution) {
+               features->x_phy = (features->x_max * 100) /
+                                       features->x_resolution;
+               features->y_phy = (features->y_max * 100) /
+                                       features->y_resolution;
+       }
+}
+
+static void wacom_calculate_res(struct wacom_features *features)
+{
+       features->x_resolution = wacom_calc_hid_res(features->x_max,
+                                                   features->x_phy,
+                                                   features->unit,
+                                                   features->unitExpo);
+       features->y_resolution = wacom_calc_hid_res(features->y_max,
+                                                   features->y_phy,
+                                                   features->unit,
+                                                   features->unitExpo);
+}
+
 static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
        struct usb_device *dev = interface_to_usbdev(intf);
@@ -1297,6 +1285,9 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
 
        endpoint = &intf->cur_altsetting->endpoint[0].desc;
 
+       /* set the default size in case we do not get them from hid */
+       wacom_set_default_phy(features);
+
        /* Retrieve the physical and logical size for touch devices */
        error = wacom_retrieve_hid_descriptor(intf, features);
        if (error)
@@ -1312,8 +1303,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
                        features->device_type = BTN_TOOL_FINGER;
                        features->pktlen = WACOM_PKGLEN_BBTOUCH3;
 
-                       wacom_set_phy_from_res(features);
-
                        features->x_max = 4096;
                        features->y_max = 4096;
                } else {
@@ -1323,6 +1312,13 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
 
        wacom_setup_device_quirks(features);
 
+       /* set unit to "100th of a mm" for devices not reported by HID */
+       if (!features->unit) {
+               features->unit = 0x11;
+               features->unitExpo = 16 - 3;
+       }
+       wacom_calculate_res(features);
+
        strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
 
        if (features->quirks & WACOM_QUIRK_MULTI_INPUT) {
@@ -1334,7 +1330,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
                                " Pen" : " Finger",
                        sizeof(wacom_wac->name));
 
-
                other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
                if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
                        other_dev = dev;
index 384fbcd0cee0d1b65d402beaa0f4f418e22e0a50..69fa67880846d3d344a532d6f91489a49d1625a2 100644 (file)
@@ -1445,13 +1445,6 @@ void wacom_setup_device_quirks(struct wacom_features *features)
        }
 }
 
-static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
-                                             unsigned int physical_max)
-{
-       /* Touch physical dimensions are in 100th of mm */
-       return (logical_max * 100) / physical_max;
-}
-
 static void wacom_abs_set_axis(struct input_dev *input_dev,
                               struct wacom_wac *wacom_wac)
 {
@@ -1475,11 +1468,9 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
                        input_set_abs_params(input_dev, ABS_Y, 0,
                                features->y_max, features->y_fuzz, 0);
                        input_abs_set_res(input_dev, ABS_X,
-                               wacom_calculate_touch_res(features->x_max,
-                                                       features->x_phy));
+                                         features->x_resolution);
                        input_abs_set_res(input_dev, ABS_Y,
-                               wacom_calculate_touch_res(features->y_max,
-                                                       features->y_phy));
+                                         features->y_resolution);
                }
 
                if (features->touch_max > 1) {
@@ -1488,11 +1479,9 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
                        input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
                                features->y_max, features->y_fuzz, 0);
                        input_abs_set_res(input_dev, ABS_MT_POSITION_X,
-                               wacom_calculate_touch_res(features->x_max,
-                                                       features->x_phy));
+                                         features->x_resolution);
                        input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
-                               wacom_calculate_touch_res(features->y_max,
-                                                       features->y_phy));
+                                         features->y_resolution);
                }
        }
 }
index 96e0eedcc7e5876a871c72f909653359a154260e..8c651985a5c44929f083c99e881be5431bca9cad 100644 (file)
@@ -291,7 +291,7 @@ err_free_mem:
        return err;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int cy8ctmg110_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
@@ -319,9 +319,9 @@ static int cy8ctmg110_resume(struct device *dev)
        }
        return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
-#endif
 
 static int cy8ctmg110_remove(struct i2c_client *client)
 {
@@ -351,9 +351,7 @@ static struct i2c_driver cy8ctmg110_driver = {
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = CY8CTMG110_DRIVER_NAME,
-#ifdef CONFIG_PM
                .pm     = &cy8ctmg110_pm,
-#endif
        },
        .id_table       = cy8ctmg110_idtable,
        .probe          = cy8ctmg110_probe,
index 8fe5086c8d2e0b0d54f291a7b1f1a3f90293cad9..1ce3d29ffca5a11e2411936cce7999c089d68764 100644 (file)
@@ -264,7 +264,7 @@ static int eeti_ts_remove(struct i2c_client *client)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int eeti_ts_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
@@ -302,9 +302,9 @@ static int eeti_ts_resume(struct device *dev)
 
        return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume);
-#endif
 
 static const struct i2c_device_id eeti_ts_id[] = {
        { "eeti_ts", 0 },
@@ -315,9 +315,7 @@ MODULE_DEVICE_TABLE(i2c, eeti_ts_id);
 static struct i2c_driver eeti_ts_driver = {
        .driver = {
                .name = "eeti_ts",
-#ifdef CONFIG_PM
                .pm = &eeti_ts_pm,
-#endif
        },
        .probe = eeti_ts_probe,
        .remove = eeti_ts_remove,
index 6c4fb84369575df40bbfe8bc6a7addcd7a913e15..66500852341b66319b634133b3f0930f3dc3f2ec 100644 (file)
@@ -221,7 +221,7 @@ static struct isa_driver htcpen_isa_driver = {
        }
 };
 
-static struct dmi_system_id __initdata htcshift_dmi_table[] = {
+static struct dmi_system_id htcshift_dmi_table[] __initdata = {
        {
                .ident = "Shift",
                .matches = {
index 820d85c4a4a0f8c2b8fc813adfa30a07c529a953..fe302e33f72e7b2da024180e697422461b017bc7 100644 (file)
@@ -17,6 +17,16 @@ config OF_IOMMU
        def_bool y
        depends on OF
 
+config FSL_PAMU
+       bool "Freescale IOMMU support"
+       depends on PPC_E500MC
+       select IOMMU_API
+       select GENERIC_ALLOCATOR
+       help
+         Freescale PAMU support. PAMU is the IOMMU present on Freescale QorIQ platforms.
+         PAMU can authorize memory access, remap the memory address, and remap I/O
+         transaction types.
+
 # MSM IOMMU support
 config MSM_IOMMU
        bool "MSM IOMMU Support"
index bbe7041212dd64e3ff934cc23c45fdee3adcdda4..14c1f474cf1188008316e7e480c7e3c6d22f04f5 100644 (file)
@@ -16,3 +16,4 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
 obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
 obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
 obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
+obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
index 6dc659426a51f5cca55fdbaf2348b3c28fa03dde..72531f008a5e34ea871e7d3d3569a724af695d86 100644 (file)
@@ -456,8 +456,10 @@ static int iommu_init_device(struct device *dev)
        }
 
        ret = init_iommu_group(dev);
-       if (ret)
+       if (ret) {
+               free_dev_data(dev_data);
                return ret;
+       }
 
        if (pci_iommuv2_capable(pdev)) {
                struct amd_iommu *iommu;
index 7acbf351e9af2d52b80909177ce89dae6a8fc558..8f798be6e398d98ccacc5ba85f3f5b76c4096683 100644 (file)
@@ -1384,7 +1384,7 @@ static int iommu_init_msi(struct amd_iommu *iommu)
        if (iommu->int_enabled)
                goto enable_faults;
 
-       if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
+       if (iommu->dev->msi_cap)
                ret = iommu_setup_msi(iommu);
        else
                ret = -ENODEV;
index ebd0a4cff049a7479a32884d6ed86199208b7b94..f417e89e1e7e47e812fef6e16faf9a19432ce0b4 100644 (file)
@@ -56,9 +56,6 @@
 /* Maximum number of mapping groups per SMMU */
 #define ARM_SMMU_MAX_SMRS              128
 
-/* Number of VMIDs per SMMU */
-#define ARM_SMMU_NUM_VMIDS             256
-
 /* SMMU global address space */
 #define ARM_SMMU_GR0(smmu)             ((smmu)->base)
 #define ARM_SMMU_GR1(smmu)             ((smmu)->base + (smmu)->pagesize)
@@ -87,6 +84,7 @@
 #define ARM_SMMU_PTE_AP_UNPRIV         (((pteval_t)1) << 6)
 #define ARM_SMMU_PTE_AP_RDONLY         (((pteval_t)2) << 6)
 #define ARM_SMMU_PTE_ATTRINDX_SHIFT    2
+#define ARM_SMMU_PTE_nG                        (((pteval_t)1) << 11)
 
 /* Stage-2 PTE */
 #define ARM_SMMU_PTE_HAP_FAULT         (((pteval_t)0) << 6)
 #define ARM_SMMU_CB_FAR_LO             0x60
 #define ARM_SMMU_CB_FAR_HI             0x64
 #define ARM_SMMU_CB_FSYNR0             0x68
+#define ARM_SMMU_CB_S1_TLBIASID                0x610
 
 #define SCTLR_S1_ASIDPNE               (1 << 12)
 #define SCTLR_CFCFG                    (1 << 7)
 #define TTBCR2_ADDR_44                 4
 #define TTBCR2_ADDR_48                 5
 
+#define TTBRn_HI_ASID_SHIFT            16
+
 #define MAIR_ATTR_SHIFT(n)             ((n) << 3)
 #define MAIR_ATTR_MASK                 0xff
 #define MAIR_ATTR_DEVICE               0x04
 #define FSR_IGN                                (FSR_AFF | FSR_ASF | FSR_TLBMCF |       \
                                         FSR_TLBLKF)
 #define FSR_FAULT                      (FSR_MULTI | FSR_SS | FSR_UUT |         \
-                                        FSR_EF | FSR_PF | FSR_TF)
+                                        FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
 
 #define FSYNR0_WNR                     (1 << 4)
 
@@ -365,21 +366,21 @@ struct arm_smmu_device {
        u32                             num_context_irqs;
        unsigned int                    *irqs;
 
-       DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS);
-
        struct list_head                list;
        struct rb_root                  masters;
 };
 
 struct arm_smmu_cfg {
        struct arm_smmu_device          *smmu;
-       u8                              vmid;
        u8                              cbndx;
        u8                              irptndx;
        u32                             cbar;
        pgd_t                           *pgd;
 };
 
+#define ARM_SMMU_CB_ASID(cfg)          ((cfg)->cbndx)
+#define ARM_SMMU_CB_VMID(cfg)          ((cfg)->cbndx + 1)
+
 struct arm_smmu_domain {
        /*
         * A domain can span across multiple, chained SMMUs and requires
@@ -533,6 +534,25 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
        }
 }
 
+static void arm_smmu_tlb_inv_context(struct arm_smmu_cfg *cfg)
+{
+       struct arm_smmu_device *smmu = cfg->smmu;
+       void __iomem *base = ARM_SMMU_GR0(smmu);
+       bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+
+       if (stage1) {
+               base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+               writel_relaxed(ARM_SMMU_CB_ASID(cfg),
+                              base + ARM_SMMU_CB_S1_TLBIASID);
+       } else {
+               base = ARM_SMMU_GR0(smmu);
+               writel_relaxed(ARM_SMMU_CB_VMID(cfg),
+                              base + ARM_SMMU_GR0_TLBIVMID);
+       }
+
+       arm_smmu_tlb_sync(smmu);
+}
+
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 {
        int flags, ret;
@@ -590,6 +610,9 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
        gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+       if (!gfsr)
+               return IRQ_NONE;
+
        gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
        gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
        gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
@@ -601,7 +624,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
                gfsr, gfsynr0, gfsynr1, gfsynr2);
 
        writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
-       return IRQ_NONE;
+       return IRQ_HANDLED;
 }
 
 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
@@ -618,14 +641,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
        cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
 
        /* CBAR */
-       reg = root_cfg->cbar |
-             (root_cfg->vmid << CBAR_VMID_SHIFT);
+       reg = root_cfg->cbar;
        if (smmu->version == 1)
              reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
 
        /* Use the weakest memory type, so it is overridden by the pte */
        if (stage1)
                reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+       else
+               reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
        writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
 
        if (smmu->version > 1) {
@@ -687,15 +711,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
 
        /* TTBR0 */
        reg = __pa(root_cfg->pgd);
-#ifndef __BIG_ENDIAN
        writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
        reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
+       if (stage1)
+               reg |= ARM_SMMU_CB_ASID(root_cfg) << TTBRn_HI_ASID_SHIFT;
        writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-#else
-       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-       reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
-       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
-#endif
 
        /*
         * TTBCR
@@ -750,10 +770,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
                writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
        }
 
-       /* Nuke the TLB */
-       writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
-       arm_smmu_tlb_sync(smmu);
-
        /* SCTLR */
        reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
        if (stage1)
@@ -790,11 +806,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                return -ENODEV;
        }
 
-       ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS);
-       if (IS_ERR_VALUE(ret))
-               return ret;
-
-       root_cfg->vmid = ret;
        if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
                /*
                 * We will likely want to change this if/when KVM gets
@@ -813,10 +824,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
                                      smmu->num_context_banks);
        if (IS_ERR_VALUE(ret))
-               goto out_free_vmid;
+               return ret;
 
        root_cfg->cbndx = ret;
-
        if (smmu->version == 1) {
                root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
                root_cfg->irptndx %= smmu->num_context_irqs;
@@ -840,8 +850,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 
 out_free_context:
        __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
-out_free_vmid:
-       __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
        return ret;
 }
 
@@ -850,17 +858,22 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        struct arm_smmu_domain *smmu_domain = domain->priv;
        struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
        struct arm_smmu_device *smmu = root_cfg->smmu;
+       void __iomem *cb_base;
        int irq;
 
        if (!smmu)
                return;
 
+       /* Disable the context bank and nuke the TLB before freeing it. */
+       cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
+       writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+       arm_smmu_tlb_inv_context(root_cfg);
+
        if (root_cfg->irptndx != -1) {
                irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
                free_irq(irq, domain);
        }
 
-       __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
        __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
 }
 
@@ -959,6 +972,11 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
 static void arm_smmu_domain_destroy(struct iommu_domain *domain)
 {
        struct arm_smmu_domain *smmu_domain = domain->priv;
+
+       /*
+        * Free the domain resources. We assume that all devices have
+        * already been detached.
+        */
        arm_smmu_destroy_domain_context(domain);
        arm_smmu_free_pgtables(smmu_domain);
        kfree(smmu_domain);
@@ -1199,7 +1217,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
        }
 
        if (stage == 1) {
-               pteval |= ARM_SMMU_PTE_AP_UNPRIV;
+               pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
                if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
                        pteval |= ARM_SMMU_PTE_AP_RDONLY;
 
@@ -1415,13 +1433,9 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 {
        int ret;
        struct arm_smmu_domain *smmu_domain = domain->priv;
-       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
-       struct arm_smmu_device *smmu = root_cfg->smmu;
-       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
        ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
-       writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
-       arm_smmu_tlb_sync(smmu);
+       arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
        return ret ? ret : size;
 }
 
@@ -1544,6 +1558,7 @@ static struct iommu_ops arm_smmu_ops = {
 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 {
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR;
        int i = 0;
        u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
 
@@ -1553,6 +1568,10 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
                writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
        }
 
+       /* Make sure all context banks are disabled */
+       for (i = 0; i < smmu->num_context_banks; ++i)
+               writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i));
+
        /* Invalidate the TLB, just in case */
        writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
        writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
@@ -1906,7 +1925,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
                of_node_put(master->of_node);
        }
 
-       if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS))
+       if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
                dev_err(dev, "removing device with active domains!\n");
 
        for (i = 0; i < smmu->num_global_irqs; ++i)
index 3f32d64ab87a4f98f910212b1f1a8a8d66d99b5f..074018979cdfb047f96619bf050b0f26ba5a623f 100644 (file)
@@ -247,50 +247,6 @@ static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
        __raw_writel(size - 1 + base,  sfrbase + REG_PB0_EADDR + idx * 8);
 }
 
-void exynos_sysmmu_set_prefbuf(struct device *dev,
-                               unsigned long base0, unsigned long size0,
-                               unsigned long base1, unsigned long size1)
-{
-       struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
-       unsigned long flags;
-       int i;
-
-       BUG_ON((base0 + size0) <= base0);
-       BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
-
-       read_lock_irqsave(&data->lock, flags);
-       if (!is_sysmmu_active(data))
-               goto finish;
-
-       for (i = 0; i < data->nsfrs; i++) {
-               if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
-                       if (!sysmmu_block(data->sfrbases[i]))
-                               continue;
-
-                       if (size1 == 0) {
-                               if (size0 <= SZ_128K) {
-                                       base1 = base0;
-                                       size1 = size0;
-                               } else {
-                                       size1 = size0 -
-                                               ALIGN(size0 / 2, SZ_64K);
-                                       size0 = size0 - size1;
-                                       base1 = base0 + size0;
-                               }
-                       }
-
-                       __sysmmu_set_prefbuf(
-                                       data->sfrbases[i], base0, size0, 0);
-                       __sysmmu_set_prefbuf(
-                                       data->sfrbases[i], base1, size1, 1);
-
-                       sysmmu_unblock(data->sfrbases[i]);
-               }
-       }
-finish:
-       read_unlock_irqrestore(&data->lock, flags);
-}
-
 static void __set_fault_handler(struct sysmmu_drvdata *data,
                                        sysmmu_fault_handler_t handler)
 {
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
new file mode 100644 (file)
index 0000000..cba0498
--- /dev/null
@@ -0,0 +1,1309 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#define pr_fmt(fmt)    "fsl-pamu: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/bootmem.h>
+#include <linux/genalloc.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/fsl_guts.h>
+
+#include "fsl_pamu.h"
+
+/* define indexes for each operation mapping scenario */
+#define OMI_QMAN        0x00
+#define OMI_FMAN        0x01
+#define OMI_QMAN_PRIV   0x02
+#define OMI_CAAM        0x03
+
+#define make64(high, low) (((u64)(high) << 32) | (low))
+
+struct pamu_isr_data {
+       void __iomem *pamu_reg_base;    /* Base address of PAMU regs*/
+       unsigned int count;             /* The number of PAMUs */
+};
+
+static struct paace *ppaact;
+static struct paace *spaact;
+static struct ome *omt;
+
+/*
+ * Table for matching compatible strings, for device tree
+ * guts node, for QorIQ SOCs.
+ * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
+ * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
+ * string would be used.
+*/
+static const struct of_device_id guts_device_ids[] = {
+       { .compatible = "fsl,qoriq-device-config-1.0", },
+       { .compatible = "fsl,qoriq-device-config-2.0", },
+       {}
+};
+
+
+/*
+ * Table for matching compatible strings, for device tree
+ * L3 cache controller node.
+ * "fsl,t4240-l3-cache-controller" corresponds to T4,
+ * "fsl,b4860-l3-cache-controller" corresponds to B4 &
+ * "fsl,p4080-l3-cache-controller" corresponds to other,
+ * SOCs.
+*/
+static const struct of_device_id l3_device_ids[] = {
+       { .compatible = "fsl,t4240-l3-cache-controller", },
+       { .compatible = "fsl,b4860-l3-cache-controller", },
+       { .compatible = "fsl,p4080-l3-cache-controller", },
+       {}
+};
+
+/* maximum subwindows permitted per liodn */
+static u32 max_subwindow_count;
+
+/* Pool for fspi allocation */
+struct gen_pool *spaace_pool;
+
+/**
+ * pamu_get_max_subwin_cnt() - Return the maximum supported
+ * subwindow count per liodn.
+ *
+ */
+u32 pamu_get_max_subwin_cnt()
+{
+       return max_subwindow_count;
+}
+
+/**
+ * pamu_get_ppaace() - Return the primary PACCE
+ * @liodn: liodn PAACT index for desired PAACE
+ *
+ * Returns the ppace pointer upon success else return
+ * null.
+ */
+static struct paace *pamu_get_ppaace(int liodn)
+{
+       if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) {
+               pr_debug("PPAACT doesn't exist\n");
+               return NULL;
+       }
+
+       return &ppaact[liodn];
+}
+
+/**
+ * pamu_enable_liodn() - Set valid bit of PACCE
+ * @liodn: liodn PAACT index for desired PAACE
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+int pamu_enable_liodn(int liodn)
+{
+       struct paace *ppaace;
+
+       ppaace = pamu_get_ppaace(liodn);
+       if (!ppaace) {
+               pr_debug("Invalid primary paace entry\n");
+               return -ENOENT;
+       }
+
+       if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) {
+               pr_debug("liodn %d not configured\n", liodn);
+               return -EINVAL;
+       }
+
+       /* Ensure that all other stores to the ppaace complete first */
+       mb();
+
+       set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
+       mb();
+
+       return 0;
+}
+
+/**
+ * pamu_disable_liodn() - Clears valid bit of PACCE
+ * @liodn: liodn PAACT index for desired PAACE
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+int pamu_disable_liodn(int liodn)
+{
+       struct paace *ppaace;
+
+       ppaace = pamu_get_ppaace(liodn);
+       if (!ppaace) {
+               pr_debug("Invalid primary paace entry\n");
+               return -ENOENT;
+       }
+
+       set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
+       mb();
+
+       return 0;
+}
+
+/* Derive the window size encoding for a particular PAACE entry */
+static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
+{
+       /* Bug if not a power of 2 */
+       BUG_ON(!is_power_of_2(addrspace_size));
+
+       /* window size is 2^(WSE+1) bytes */
+       return __ffs(addrspace_size) - 1;
+}
+
+/* Derive the PAACE window count encoding for the subwindow count */
+static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
+{
+       /* window count is 2^(WCE+1) bytes */
+       return __ffs(subwindow_cnt) - 1;
+}
+
+/*
+ * Set the PAACE type as primary and set the coherency required domain
+ * attribute
+ */
+static void pamu_init_ppaace(struct paace *ppaace)
+{
+       set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY);
+
+       set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
+              PAACE_M_COHERENCE_REQ);
+}
+
+/*
+ * Set the PAACE type as secondary and set the coherency required domain
+ * attribute.
+ */
+static void pamu_init_spaace(struct paace *spaace)
+{
+       set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
+       set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
+              PAACE_M_COHERENCE_REQ);
+}
+
+/*
+ * Return the spaace (corresponding to the secondary window index)
+ * for a particular ppaace.
+ */
+static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
+{
+       u32 subwin_cnt;
+       struct paace *spaace = NULL;
+
+       subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
+
+       if (wnum < subwin_cnt)
+               spaace = &spaact[paace->fspi + wnum];
+       else
+               pr_debug("secondary paace out of bounds\n");
+
+       return spaace;
+}
+
+/**
+ * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
+ *                                required for primary PAACE in the secondary
+ *                                PAACE table.
+ * @subwin_cnt: Number of subwindows to be reserved.
+ *
+ * A PPAACE entry may have a number of associated subwindows. A subwindow
+ * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
+ * the index (fspi) of the first SPAACE entry in the SPAACT table. This
+ * function returns the index of the first SPAACE entry. The remaining
+ * SPAACE entries are reserved contiguously from that index.
+ *
+ * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
+ * If no SPAACE entry is available or the allocator can not reserve the required
+ * number of contiguous entries function returns ULONG_MAX indicating a failure.
+ *
+*/
+static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
+{
+       unsigned long spaace_addr;
+
+       spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
+       if (!spaace_addr)
+               return ULONG_MAX;
+
+       return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
+}
+
+/* Release the subwindows reserved for a particular LIODN */
+void pamu_free_subwins(int liodn)
+{
+       struct paace *ppaace;
+       u32 subwin_cnt, size;
+
+       ppaace = pamu_get_ppaace(liodn);
+       if (!ppaace) {
+               pr_debug("Invalid liodn entry\n");
+               return;
+       }
+
+       if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
+               subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
+               size = (subwin_cnt - 1) * sizeof(struct paace);
+               gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
+               set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
+       }
+}
+
+/*
+ * Function used for updating stash destination for the coressponding
+ * LIODN.
+ */
+int  pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
+{
+       struct paace *paace;
+
+       paace = pamu_get_ppaace(liodn);
+       if (!paace) {
+               pr_debug("Invalid liodn entry\n");
+               return -ENOENT;
+       }
+       if (subwin) {
+               paace = pamu_get_spaace(paace, subwin - 1);
+               if (!paace) {
+                       return -ENOENT;
+               }
+       }
+       set_bf(paace->impl_attr, PAACE_IA_CID, value);
+
+       mb();
+
+       return 0;
+}
+
+/* Disable a subwindow corresponding to the LIODN */
+int pamu_disable_spaace(int liodn, u32 subwin)
+{
+       struct paace *paace;
+
+       paace = pamu_get_ppaace(liodn);
+       if (!paace) {
+               pr_debug("Invalid liodn entry\n");
+               return -ENOENT;
+       }
+       if (subwin) {
+               paace = pamu_get_spaace(paace, subwin - 1);
+               if (!paace) {
+                       return -ENOENT;
+               }
+               set_bf(paace->addr_bitfields, PAACE_AF_V,
+                        PAACE_V_INVALID);
+       } else {
+               set_bf(paace->addr_bitfields, PAACE_AF_AP,
+                        PAACE_AP_PERMS_DENIED);
+       }
+
+       mb();
+
+       return 0;
+}
+
+
+/**
+ * pamu_config_paace() - Sets up PPAACE entry for specified liodn
+ *
+ * @liodn: Logical IO device number
+ * @win_addr: starting address of DSA window
+ * @win-size: size of DSA window
+ * @omi: Operation mapping index -- if ~omi == 0 then omi not defined
+ * @rpn: real (true physical) page number
+ * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
+ *          stashid not defined
+ * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
+ *          snoopid not defined
+ * @subwin_cnt: number of sub-windows
+ * @prot: window permissions
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
+                      u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
+                      u32 subwin_cnt, int prot)
+{
+       struct paace *ppaace;
+       unsigned long fspi;
+
+       if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
+               pr_debug("window size too small or not a power of two %llx\n", win_size);
+               return -EINVAL;
+       }
+
+       if (win_addr & (win_size - 1)) {
+               pr_debug("window address is not aligned with window size\n");
+               return -EINVAL;
+       }
+
+       ppaace = pamu_get_ppaace(liodn);
+       if (!ppaace) {
+               return -ENOENT;
+       }
+
+       /* window size is 2^(WSE+1) bytes */
+       set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
+               map_addrspace_size_to_wse(win_size));
+
+       pamu_init_ppaace(ppaace);
+
+       ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
+       set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
+              (win_addr >> PAMU_PAGE_SHIFT));
+
+       /* set up operation mapping if it's configured */
+       if (omi < OME_NUMBER_ENTRIES) {
+               set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+               ppaace->op_encode.index_ot.omi = omi;
+       } else if (~omi != 0) {
+               pr_debug("bad operation mapping index: %d\n", omi);
+               return -EINVAL;
+       }
+
+       /* configure stash id */
+       if (~stashid != 0)
+               set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
+
+       /* configure snoop id */
+       if (~snoopid != 0)
+               ppaace->domain_attr.to_host.snpid = snoopid;
+
+       if (subwin_cnt) {
+               /* The first entry is in the primary PAACE instead */
+               fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
+               if (fspi == ULONG_MAX) {
+                       pr_debug("spaace indexes exhausted\n");
+                       return -EINVAL;
+               }
+
+               /* window count is 2^(WCE+1) bytes */
+               set_bf(ppaace->impl_attr, PAACE_IA_WCE,
+                      map_subwindow_cnt_to_wce(subwin_cnt));
+               set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
+               ppaace->fspi = fspi;
+       } else {
+               set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
+               ppaace->twbah = rpn >> 20;
+               set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
+               set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
+               set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
+               set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
+       }
+       mb();
+
+       return 0;
+}
+
+/**
+ * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
+ *
+ * @liodn:  Logical IO device number
+ * @subwin_cnt:  number of sub-windows associated with dma-window
+ * @subwin: subwindow index
+ * @subwin_size: size of subwindow
+ * @omi: Operation mapping index
+ * @rpn: real (true physical) page number
+ * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
+ *                       snoopid not defined
+ * @stashid: cache stash id for associated cpu
+ * @enable: enable/disable subwindow after reconfiguration
+ * @prot: sub window permissions
+ *
+ * Returns 0 upon success else error code < 0 returned
+ */
+int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
+                      phys_addr_t subwin_size, u32 omi, unsigned long rpn,
+                      u32 snoopid, u32 stashid, int enable, int prot)
+{
+       struct paace *paace;
+
+
+       /* setup sub-windows */
+       if (!subwin_cnt) {
+               pr_debug("Invalid subwindow count\n");
+               return -EINVAL;
+       }
+
+       paace = pamu_get_ppaace(liodn);
+       if (subwin > 0 && subwin < subwin_cnt && paace) {
+               paace = pamu_get_spaace(paace, subwin - 1);
+
+               if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
+                       pamu_init_spaace(paace);
+                       set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
+               }
+       }
+
+       if (!paace) {
+               pr_debug("Invalid liodn entry\n");
+               return -ENOENT;
+       }
+
+       if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
+               pr_debug("subwindow size out of range, or not a power of 2\n");
+               return -EINVAL;
+       }
+
+       if (rpn == ULONG_MAX) {
+               pr_debug("real page number out of range\n");
+               return -EINVAL;
+       }
+
+       /* window size is 2^(WSE+1) bytes */
+       set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
+              map_addrspace_size_to_wse(subwin_size));
+
+       set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
+       paace->twbah = rpn >> 20;
+       set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
+       set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
+
+       /* configure snoop id */
+       if (~snoopid != 0)
+               paace->domain_attr.to_host.snpid = snoopid;
+
+       /* set up operation mapping if it's configured */
+       if (omi < OME_NUMBER_ENTRIES) {
+               set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+               paace->op_encode.index_ot.omi = omi;
+       } else if (~omi != 0) {
+               pr_debug("bad operation mapping index: %d\n", omi);
+               return -EINVAL;
+       }
+
+       if (~stashid != 0)
+               set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
+
+       smp_wmb();
+
+       if (enable)
+               set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
+
+       mb();
+
+       return 0;
+}
+
+/**
+* get_ome_index() - Returns the index in the operation mapping table
+*                   for device.
+* @*omi_index: pointer for storing the index value
+*
+*/
+void get_ome_index(u32 *omi_index, struct device *dev)
+{
+       if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
+               *omi_index = OMI_QMAN;
+       if (of_device_is_compatible(dev->of_node, "fsl,qman"))
+               *omi_index = OMI_QMAN_PRIV;
+}
+
+/**
+ * get_stash_id - Returns stash destination id corresponding to a
+ *                cache type and vcpu.
+ * @stash_dest_hint: L1, L2 or L3
+ * @vcpu: vpcu target for a particular cache type.
+ *
+ * Returs stash on success or ~(u32)0 on failure.
+ *
+ */
+u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
+{
+       const u32 *prop;
+       struct device_node *node;
+       u32 cache_level;
+       int len, found = 0;
+       int i;
+
+       /* Fastpath, exit early if L3/CPC cache is target for stashing */
+       if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
+               node = of_find_matching_node(NULL, l3_device_ids);
+               if (node) {
+                       prop = of_get_property(node, "cache-stash-id", 0);
+                       if (!prop) {
+                               pr_debug("missing cache-stash-id at %s\n", node->full_name);
+                               of_node_put(node);
+                               return ~(u32)0;
+                       }
+                       of_node_put(node);
+                       return be32_to_cpup(prop);
+               }
+               return ~(u32)0;
+       }
+
+       for_each_node_by_type(node, "cpu") {
+               prop = of_get_property(node, "reg", &len);
+               for (i = 0; i < len / sizeof(u32); i++) {
+                       if (be32_to_cpup(&prop[i]) == vcpu) {
+                               found = 1;
+                               goto found_cpu_node;
+                       }
+               }
+       }
+found_cpu_node:
+
+       /* find the hwnode that represents the cache */
+       for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
+               if (stash_dest_hint == cache_level) {
+                       prop = of_get_property(node, "cache-stash-id", 0);
+                       if (!prop) {
+                               pr_debug("missing cache-stash-id at %s\n", node->full_name);
+                               of_node_put(node);
+                               return ~(u32)0;
+                       }
+                       of_node_put(node);
+                       return be32_to_cpup(prop);
+               }
+
+               prop = of_get_property(node, "next-level-cache", 0);
+               if (!prop) {
+                       pr_debug("can't find next-level-cache at %s\n",
+                               node->full_name);
+                       of_node_put(node);
+                       return ~(u32)0;  /* can't traverse any further */
+               }
+               of_node_put(node);
+
+               /* advance to next node in cache hierarchy */
+               node = of_find_node_by_phandle(*prop);
+               if (!node) {
+                       pr_debug("Invalid node for cache hierarchy %s\n",
+                               node->full_name);
+                       return ~(u32)0;
+               }
+       }
+
+       pr_debug("stash dest not found for %d on vcpu %d\n",
+                 stash_dest_hint, vcpu);
+       return ~(u32)0;
+}
+
+/* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */
+#define QMAN_PAACE 1
+#define QMAN_PORTAL_PAACE 2
+#define BMAN_PAACE 3
+
+/**
+ * Setup operation mapping and stash destinations for QMAN and QMAN portal.
+ * Memory accesses to QMAN and BMAN private memory need not be coherent, so
+ * clear the PAACE entry coherency attribute for them.
+ */
+static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
+{
+       switch (paace_type) {
+       case QMAN_PAACE:
+               set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+               ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV;
+               /* setup QMAN Private data stashing for the L3 cache */
+               set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
+               set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
+                      0);
+               break;
+       case QMAN_PORTAL_PAACE:
+               set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
+               ppaace->op_encode.index_ot.omi = OMI_QMAN;
+               /*Set DQRR and Frame stashing for the L3 cache */
+               set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
+               break;
+       case BMAN_PAACE:
+               set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
+                      0);
+               break;
+       }
+}
+
+/**
+ * Setup the operation mapping table for various devices. This is a static
+ * table where each table index corresponds to a particular device. PAMU uses
+ * this table to translate device transaction to appropriate corenet
+ * transaction.
+ */
+static void __init setup_omt(struct ome *omt)
+{
+       struct ome *ome;
+
+       /* Configure OMI_QMAN */
+       ome = &omt[OMI_QMAN];
+
+       ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
+       ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
+       ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+       ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
+
+       ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC;
+       ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE;
+
+       /* Configure OMI_FMAN */
+       ome = &omt[OMI_FMAN];
+       ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READI;
+       ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+
+       /* Configure OMI_QMAN private */
+       ome = &omt[OMI_QMAN_PRIV];
+       ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READ;
+       ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+       ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
+       ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA;
+
+       /* Configure OMI_CAAM */
+       ome = &omt[OMI_CAAM];
+       ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READI;
+       ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
+}
+
+/*
+ * Get the maximum number of PAACT table entries
+ * and subwindows supported by PAMU
+ */
+static void get_pamu_cap_values(unsigned long pamu_reg_base)
+{
+       u32 pc_val;
+
+       pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3));
+       /* Maximum number of subwindows per liodn */
+       max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val));
+}
+
+/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
+int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+                  phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+                  phys_addr_t omt_phys)
+{
+       u32 *pc;
+       struct pamu_mmap_regs *pamu_regs;
+
+       pc = (u32 *) (pamu_reg_base + PAMU_PC);
+       pamu_regs = (struct pamu_mmap_regs *)
+               (pamu_reg_base + PAMU_MMAP_REGS_BASE);
+
+       /* set up pointers to corenet control blocks */
+
+       out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
+       out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys));
+       ppaact_phys = ppaact_phys + PAACT_SIZE;
+       out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys));
+       out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys));
+
+       out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys));
+       out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys));
+       spaact_phys = spaact_phys + SPAACT_SIZE;
+       out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys));
+       out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys));
+
+       out_be32(&pamu_regs->obah, upper_32_bits(omt_phys));
+       out_be32(&pamu_regs->obal, lower_32_bits(omt_phys));
+       omt_phys = omt_phys + OMT_SIZE;
+       out_be32(&pamu_regs->olah, upper_32_bits(omt_phys));
+       out_be32(&pamu_regs->olal, lower_32_bits(omt_phys));
+
+       /*
+        * set PAMU enable bit,
+        * allow ppaact & omt to be cached
+        * & enable PAMU access violation interrupts.
+        */
+
+       out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
+                       PAMU_ACCESS_VIOLATION_ENABLE);
+       out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
+       return 0;
+}
+
+/* Enable all device LIODNS */
+static void __init setup_liodns(void)
+{
+       int i, len;
+       struct paace *ppaace;
+       struct device_node *node = NULL;
+       const u32 *prop;
+
+       for_each_node_with_property(node, "fsl,liodn") {
+               prop = of_get_property(node, "fsl,liodn", &len);
+               for (i = 0; i < len / sizeof(u32); i++) {
+                       int liodn;
+
+                       liodn = be32_to_cpup(&prop[i]);
+                       if (liodn >= PAACE_NUMBER_ENTRIES) {
+                               pr_debug("Invalid LIODN value %d\n", liodn);
+                               continue;
+                       }
+                       ppaace = pamu_get_ppaace(liodn);
+                       pamu_init_ppaace(ppaace);
+                       /* window size is 2^(WSE+1) bytes */
+                       set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
+                       ppaace->wbah = 0;
+                       set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
+                       set_bf(ppaace->impl_attr, PAACE_IA_ATM,
+                               PAACE_ATM_NO_XLATE);
+                       set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
+                               PAACE_AP_PERMS_ALL);
+                       if (of_device_is_compatible(node, "fsl,qman-portal"))
+                               setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
+                       if (of_device_is_compatible(node, "fsl,qman"))
+                               setup_qbman_paace(ppaace, QMAN_PAACE);
+                       if (of_device_is_compatible(node, "fsl,bman"))
+                               setup_qbman_paace(ppaace, BMAN_PAACE);
+                       mb();
+                       pamu_enable_liodn(liodn);
+               }
+       }
+}
+
+irqreturn_t pamu_av_isr(int irq, void *arg)
+{
+       struct pamu_isr_data *data = arg;
+       phys_addr_t phys;
+       unsigned int i, j, ret;
+
+       pr_emerg("access violation interrupt\n");
+
+       for (i = 0; i < data->count; i++) {
+               void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
+               u32 pics = in_be32(p + PAMU_PICS);
+
+               if (pics & PAMU_ACCESS_VIOLATION_STAT) {
+                       u32 avs1 = in_be32(p + PAMU_AVS1);
+                       struct paace *paace;
+
+                       pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
+                       pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
+                       pr_emerg("AVS1=%08x\n", avs1);
+                       pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
+                       pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
+                               in_be32(p + PAMU_AVAL)));
+                       pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
+                       pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
+                               in_be32(p + PAMU_POEAL)));
+
+                       phys = make64(in_be32(p + PAMU_POEAH),
+                               in_be32(p + PAMU_POEAL));
+
+                       /* Assume that POEA points to a PAACE */
+                       if (phys) {
+                               u32 *paace = phys_to_virt(phys);
+
+                               /* Only the first four words are relevant */
+                               for (j = 0; j < 4; j++)
+                                       pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
+                       }
+
+                       /* clear access violation condition */
+                       out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
+                       paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
+                       BUG_ON(!paace);
+                       /* check if we got a violation for a disabled LIODN */
+                       if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) {
+                               /*
+                                * As per hardware erratum A-003638, access
+                                * violation can be reported for a disabled
+                                * LIODN. If we hit that condition, disable
+                                * access violation reporting.
+                                */
+                               pics &= ~PAMU_ACCESS_VIOLATION_ENABLE;
+                       } else {
+                               /* Disable the LIODN */
+                               ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
+                               BUG_ON(ret);
+                               pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
+                       }
+                       out_be32((p + PAMU_PICS), pics);
+               }
+       }
+
+
+       return IRQ_HANDLED;
+}
+
+#define LAWAR_EN               0x80000000
+#define LAWAR_TARGET_MASK      0x0FF00000
+#define LAWAR_TARGET_SHIFT     20
+#define LAWAR_SIZE_MASK                0x0000003F
+#define LAWAR_CSDID_MASK       0x000FF000
+#define LAWAR_CSDID_SHIFT      12
+
+#define LAW_SIZE_4K            0xb
+
+struct ccsr_law {
+       u32     lawbarh;        /* LAWn base address high */
+       u32     lawbarl;        /* LAWn base address low */
+       u32     lawar;          /* LAWn attributes */
+       u32     reserved;
+};
+
+/*
+ * Create a coherence subdomain for a given memory block.
+ */
+static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+{
+       struct device_node *np;
+       const __be32 *iprop;
+       void __iomem *lac = NULL;       /* Local Access Control registers */
+       struct ccsr_law __iomem *law;
+       void __iomem *ccm = NULL;
+       u32 __iomem *csdids;
+       unsigned int i, num_laws, num_csds;
+       u32 law_target = 0;
+       u32 csd_id = 0;
+       int ret = 0;
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law");
+       if (!np)
+               return -ENODEV;
+
+       iprop = of_get_property(np, "fsl,num-laws", NULL);
+       if (!iprop) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       num_laws = be32_to_cpup(iprop);
+       if (!num_laws) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       lac = of_iomap(np, 0);
+       if (!lac) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       /* LAW registers are at offset 0xC00 */
+       law = lac + 0xC00;
+
+       of_node_put(np);
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf");
+       if (!np) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL);
+       if (!iprop) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       num_csds = be32_to_cpup(iprop);
+       if (!num_csds) {
+               ret = -ENODEV;
+               goto error;
+       }
+
+       ccm = of_iomap(np, 0);
+       if (!ccm) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       /* The undocumented CSDID registers are at offset 0x600 */
+       csdids = ccm + 0x600;
+
+       of_node_put(np);
+       np = NULL;
+
+       /* Find an unused coherence subdomain ID */
+       for (csd_id = 0; csd_id < num_csds; csd_id++) {
+               if (!csdids[csd_id])
+                       break;
+       }
+
+       /* Store the Port ID in the (undocumented) proper CIDMRxx register */
+       csdids[csd_id] = csd_port_id;
+
+       /* Find the DDR LAW that maps to our buffer. */
+       for (i = 0; i < num_laws; i++) {
+               if (law[i].lawar & LAWAR_EN) {
+                       phys_addr_t law_start, law_end;
+
+                       law_start = make64(law[i].lawbarh, law[i].lawbarl);
+                       law_end = law_start +
+                               (2ULL << (law[i].lawar & LAWAR_SIZE_MASK));
+
+                       if (law_start <= phys && phys < law_end) {
+                               law_target = law[i].lawar & LAWAR_TARGET_MASK;
+                               break;
+                       }
+               }
+       }
+
+       if (i == 0 || i == num_laws) {
+               /* This should never happen*/
+               ret = -ENOENT;
+               goto error;
+       }
+
+       /* Find a free LAW entry */
+       while (law[--i].lawar & LAWAR_EN) {
+               if (i == 0) {
+                       /* No higher priority LAW slots available */
+                       ret = -ENOENT;
+                       goto error;
+               }
+       }
+
+       law[i].lawbarh = upper_32_bits(phys);
+       law[i].lawbarl = lower_32_bits(phys);
+       wmb();
+       law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) |
+               (LAW_SIZE_4K + get_order(size));
+       wmb();
+
+error:
+       if (ccm)
+               iounmap(ccm);
+
+       if (lac)
+               iounmap(lac);
+
+       if (np)
+               of_node_put(np);
+
+       return ret;
+}
+
+/*
+ * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a
+ * bit map of snoopers for a given range of memory mapped by a LAW.
+ *
+ * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this
+ * table should never need to be updated.  SVRs are guaranteed to be unique, so
+ * there is no worry that a future SOC will inadvertently have one of these
+ * values.
+ */
+static const struct {
+       u32 svr;
+       u32 port_id;
+} port_id_map[] = {
+       {0x82100010, 0xFF000000},       /* P2040 1.0 */
+       {0x82100011, 0xFF000000},       /* P2040 1.1 */
+       {0x82100110, 0xFF000000},       /* P2041 1.0 */
+       {0x82100111, 0xFF000000},       /* P2041 1.1 */
+       {0x82110310, 0xFF000000},       /* P3041 1.0 */
+       {0x82110311, 0xFF000000},       /* P3041 1.1 */
+       {0x82010020, 0xFFF80000},       /* P4040 2.0 */
+       {0x82000020, 0xFFF80000},       /* P4080 2.0 */
+       {0x82210010, 0xFC000000},       /* P5010 1.0 */
+       {0x82210020, 0xFC000000},       /* P5010 2.0 */
+       {0x82200010, 0xFC000000},       /* P5020 1.0 */
+       {0x82050010, 0xFF800000},       /* P5021 1.0 */
+       {0x82040010, 0xFF800000},       /* P5040 1.0 */
+};
+
+#define SVR_SECURITY   0x80000 /* The Security (E) bit */
+
+static int __init fsl_pamu_probe(struct platform_device *pdev)
+{
+       void __iomem *pamu_regs = NULL;
+       struct ccsr_guts __iomem *guts_regs = NULL;
+       u32 pamubypenr, pamu_counter;
+       unsigned long pamu_reg_off;
+       unsigned long pamu_reg_base;
+       struct pamu_isr_data *data = NULL;
+       struct device_node *guts_node;
+       u64 size;
+       struct page *p;
+       int ret = 0;
+       int irq;
+       phys_addr_t ppaact_phys;
+       phys_addr_t spaact_phys;
+       phys_addr_t omt_phys;
+       size_t mem_size = 0;
+       unsigned int order = 0;
+       u32 csd_port_id = 0;
+       unsigned i;
+       /*
+        * enumerate all PAMUs and allocate and setup PAMU tables
+        * for each of them,
+        * NOTE : All PAMUs share the same LIODN tables.
+        */
+
+       pamu_regs = of_iomap(pdev->dev.of_node, 0);
+       if (!pamu_regs) {
+               dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
+               return -ENOMEM;
+       }
+       of_get_address(pdev->dev.of_node, 0, &size, NULL);
+
+       irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       if (irq == NO_IRQ) {
+               dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
+               goto error;
+       }
+
+       data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
+       if (!data) {
+               dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
+               ret = -ENOMEM;
+               goto error;
+       }
+       data->pamu_reg_base = pamu_regs;
+       data->count = size / PAMU_OFFSET;
+
+       /* The ISR needs access to the regs, so we won't iounmap them */
+       ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
+                       ret, irq);
+               goto error;
+       }
+
+       guts_node = of_find_matching_node(NULL, guts_device_ids);
+       if (!guts_node) {
+               dev_err(&pdev->dev, "could not find GUTS node %s\n",
+                       pdev->dev.of_node->full_name);
+               ret = -ENODEV;
+               goto error;
+       }
+
+       guts_regs = of_iomap(guts_node, 0);
+       of_node_put(guts_node);
+       if (!guts_regs) {
+               dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
+               ret = -ENODEV;
+               goto error;
+       }
+
+       /* read in the PAMU capability registers */
+       get_pamu_cap_values((unsigned long)pamu_regs);
+       /*
+        * To simplify the allocation of a coherency domain, we allocate the
+        * PAACT and the OMT in the same memory buffer.  Unfortunately, this
+        * wastes more memory compared to allocating the buffers separately.
+        */
+       /* Determine how much memory we need */
+       mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
+               (PAGE_SIZE << get_order(SPAACT_SIZE)) +
+               (PAGE_SIZE << get_order(OMT_SIZE));
+       order = get_order(mem_size);
+
+       p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+       if (!p) {
+               dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       ppaact = page_address(p);
+       ppaact_phys = page_to_phys(p);
+
+       /* Make sure the memory is naturally aligned */
+       if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
+               dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
+       omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
+
+       dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
+               (unsigned long long) ppaact_phys);
+
+       /* Check to see if we need to implement the work-around on this SOC */
+
+       /* Determine the Port ID for our coherence subdomain */
+       for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
+               if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
+                       csd_port_id = port_id_map[i].port_id;
+                       dev_dbg(&pdev->dev, "found matching SVR %08x\n",
+                               port_id_map[i].svr);
+                       break;
+               }
+       }
+
+       if (csd_port_id) {
+               dev_dbg(&pdev->dev, "creating coherency subdomain at address "
+                       "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
+                       mem_size, csd_port_id);
+
+               ret = create_csd(ppaact_phys, mem_size, csd_port_id);
+               if (ret) {
+                       dev_err(&pdev->dev, "could not create coherence "
+                               "subdomain\n");
+                       return ret;
+               }
+       }
+
+       spaact_phys = virt_to_phys(spaact);
+       omt_phys = virt_to_phys(omt);
+
+       spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
+       if (!spaace_pool) {
+               ret = -ENOMEM;
+               dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
+               goto error;
+       }
+
+       ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
+       if (ret)
+               goto error_genpool;
+
+       pamubypenr = in_be32(&guts_regs->pamubypenr);
+
+       for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
+            pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
+
+               pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
+               setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
+                                spaact_phys, omt_phys);
+               /* Disable PAMU bypass for this PAMU */
+               pamubypenr &= ~pamu_counter;
+       }
+
+       setup_omt(omt);
+
+       /* Enable all relevant PAMU(s) */
+       out_be32(&guts_regs->pamubypenr, pamubypenr);
+
+       iounmap(guts_regs);
+
+       /* Enable DMA for the LIODNs in the device tree*/
+
+       setup_liodns();
+
+       return 0;
+
+error_genpool:
+       gen_pool_destroy(spaace_pool);
+
+error:
+       if (irq != NO_IRQ)
+               free_irq(irq, data);
+
+       if (data) {
+               memset(data, 0, sizeof(struct pamu_isr_data));
+               kfree(data);
+       }
+
+       if (pamu_regs)
+               iounmap(pamu_regs);
+
+       if (guts_regs)
+               iounmap(guts_regs);
+
+       if (ppaact)
+               free_pages((unsigned long)ppaact, order);
+
+       ppaact = NULL;
+
+       return ret;
+}
+
+static const struct of_device_id fsl_of_pamu_ids[] = {
+       {
+               .compatible = "fsl,p4080-pamu",
+       },
+       {
+               .compatible = "fsl,pamu",
+       },
+       {},
+};
+
+static struct platform_driver fsl_of_pamu_driver = {
+       .driver = {
+               .name = "fsl-of-pamu",
+               .owner = THIS_MODULE,
+       },
+       .probe = fsl_pamu_probe,
+};
+
+static __init int fsl_pamu_init(void)
+{
+       struct platform_device *pdev = NULL;
+       struct device_node *np;
+       int ret;
+
+       /*
+        * The normal OF process calls the probe function at some
+        * indeterminate later time, after most drivers have loaded.  This is
+        * too late for us, because PAMU clients (like the Qman driver)
+        * depend on PAMU being initialized early.
+        *
+        * So instead, we "manually" call our probe function by creating the
+        * platform devices ourselves.
+        */
+
+       /*
+        * We assume that there is only one PAMU node in the device tree.  A
+        * single PAMU node represents all of the PAMU devices in the SOC
+        * already.   Everything else already makes that assumption, and the
+        * binding for the PAMU nodes doesn't allow for any parent-child
+        * relationships anyway.  In other words, support for more than one
+        * PAMU node would require significant changes to a lot of code.
+        */
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,pamu");
+       if (!np) {
+               pr_err("could not find a PAMU node\n");
+               return -ENODEV;
+       }
+
+       ret = platform_driver_register(&fsl_of_pamu_driver);
+       if (ret) {
+               pr_err("could not register driver (err=%i)\n", ret);
+               goto error_driver_register;
+       }
+
+       pdev = platform_device_alloc("fsl-of-pamu", 0);
+       if (!pdev) {
+               pr_err("could not allocate device %s\n",
+                      np->full_name);
+               ret = -ENOMEM;
+               goto error_device_alloc;
+       }
+       pdev->dev.of_node = of_node_get(np);
+
+       ret = pamu_domain_init();
+       if (ret)
+               goto error_device_add;
+
+       ret = platform_device_add(pdev);
+       if (ret) {
+               pr_err("could not add device %s (err=%i)\n",
+                      np->full_name, ret);
+               goto error_device_add;
+       }
+
+       return 0;
+
+error_device_add:
+       of_node_put(pdev->dev.of_node);
+       pdev->dev.of_node = NULL;
+
+       platform_device_put(pdev);
+
+error_device_alloc:
+       platform_driver_unregister(&fsl_of_pamu_driver);
+
+error_driver_register:
+       of_node_put(np);
+
+       return ret;
+}
+arch_initcall(fsl_pamu_init);
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
new file mode 100644 (file)
index 0000000..8fc1a12
--- /dev/null
@@ -0,0 +1,410 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_PAMU_H
+#define __FSL_PAMU_H
+
+#include <asm/fsl_pamu_stash.h>
+
+/* Bit Field macros
+ *     v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
+ */
+#define set_bf(v, m, x)                (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
+#define get_bf(v, m)           (((v) & (m)) >> (m##_SHIFT))
+
+/* PAMU CCSR space */
+#define PAMU_PGC 0x00000000     /* Allows all peripheral accesses */
+#define PAMU_PE 0x40000000      /* enable PAMU                    */
+
+/* PAMU_OFFSET to the next pamu space in ccsr */
+#define PAMU_OFFSET 0x1000
+
+#define PAMU_MMAP_REGS_BASE 0
+
+struct pamu_mmap_regs {
+       u32 ppbah;
+       u32 ppbal;
+       u32 pplah;
+       u32 pplal;
+       u32 spbah;
+       u32 spbal;
+       u32 splah;
+       u32 splal;
+       u32 obah;
+       u32 obal;
+       u32 olah;
+       u32 olal;
+};
+
+/* PAMU Error Registers */
+#define PAMU_POES1 0x0040
+#define PAMU_POES2 0x0044
+#define PAMU_POEAH 0x0048
+#define PAMU_POEAL 0x004C
+#define PAMU_AVS1  0x0050
+#define PAMU_AVS1_AV    0x1
+#define PAMU_AVS1_OTV   0x6
+#define PAMU_AVS1_APV   0x78
+#define PAMU_AVS1_WAV   0x380
+#define PAMU_AVS1_LAV   0x1c00
+#define PAMU_AVS1_GCV   0x2000
+#define PAMU_AVS1_PDV   0x4000
+#define PAMU_AV_MASK    (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
+                       | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
+#define PAMU_AVS1_LIODN_SHIFT 16
+#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
+
+#define PAMU_AVS2  0x0054
+#define PAMU_AVAH  0x0058
+#define PAMU_AVAL  0x005C
+#define PAMU_EECTL 0x0060
+#define PAMU_EEDIS 0x0064
+#define PAMU_EEINTEN 0x0068
+#define PAMU_EEDET 0x006C
+#define PAMU_EEATTR 0x0070
+#define PAMU_EEAHI 0x0074
+#define PAMU_EEALO 0x0078
+#define PAMU_EEDHI 0X007C
+#define PAMU_EEDLO 0x0080
+#define PAMU_EECC  0x0084
+#define PAMU_UDAD  0x0090
+
+/* PAMU Revision Registers */
+#define PAMU_PR1 0x0BF8
+#define PAMU_PR2 0x0BFC
+
+/* PAMU version mask */
+#define PAMU_PR1_MASK 0xffff
+
+/* PAMU Capabilities Registers */
+#define PAMU_PC1 0x0C00
+#define PAMU_PC2 0x0C04
+#define PAMU_PC3 0x0C08
+#define PAMU_PC4 0x0C0C
+
+/* PAMU Control Register */
+#define PAMU_PC 0x0C10
+
+/* PAMU control defs */
+#define PAMU_CONTROL 0x0C10
+#define PAMU_PC_PGC 0x80000000  /* PAMU gate closed bit */
+#define PAMU_PC_PE   0x40000000 /* PAMU enable bit */
+#define PAMU_PC_SPCC 0x00000010 /* sPAACE cache enable */
+#define PAMU_PC_PPCC 0x00000001 /* pPAACE cache enable */
+#define PAMU_PC_OCE  0x00001000 /* OMT cache enable */
+
+#define PAMU_PFA1 0x0C14
+#define PAMU_PFA2 0x0C18
+
+#define PAMU_PC2_MLIODN(X) ((X) >> 16)
+#define PAMU_PC3_MWCE(X) (((X) >> 21) & 0xf)
+
+/* PAMU Interrupt control and Status Register */
+#define PAMU_PICS 0x0C1C
+#define PAMU_ACCESS_VIOLATION_STAT   0x8
+#define PAMU_ACCESS_VIOLATION_ENABLE 0x4
+
+/* PAMU Debug Registers */
+#define PAMU_PD1 0x0F00
+#define PAMU_PD2 0x0F04
+#define PAMU_PD3 0x0F08
+#define PAMU_PD4 0x0F0C
+
+#define PAACE_AP_PERMS_DENIED  0x0
+#define PAACE_AP_PERMS_QUERY   0x1
+#define PAACE_AP_PERMS_UPDATE  0x2
+#define PAACE_AP_PERMS_ALL     0x3
+
+#define PAACE_DD_TO_HOST       0x0
+#define PAACE_DD_TO_IO         0x1
+#define PAACE_PT_PRIMARY       0x0
+#define PAACE_PT_SECONDARY     0x1
+#define PAACE_V_INVALID        0x0
+#define PAACE_V_VALID          0x1
+#define PAACE_MW_SUBWINDOWS    0x1
+
+#define PAACE_WSE_4K           0xB
+#define PAACE_WSE_8K           0xC
+#define PAACE_WSE_16K          0xD
+#define PAACE_WSE_32K          0xE
+#define PAACE_WSE_64K          0xF
+#define PAACE_WSE_128K         0x10
+#define PAACE_WSE_256K         0x11
+#define PAACE_WSE_512K         0x12
+#define PAACE_WSE_1M           0x13
+#define PAACE_WSE_2M           0x14
+#define PAACE_WSE_4M           0x15
+#define PAACE_WSE_8M           0x16
+#define PAACE_WSE_16M          0x17
+#define PAACE_WSE_32M          0x18
+#define PAACE_WSE_64M          0x19
+#define PAACE_WSE_128M         0x1A
+#define PAACE_WSE_256M         0x1B
+#define PAACE_WSE_512M         0x1C
+#define PAACE_WSE_1G           0x1D
+#define PAACE_WSE_2G           0x1E
+#define PAACE_WSE_4G           0x1F
+
+#define PAACE_DID_PCI_EXPRESS_1 0x00
+#define PAACE_DID_PCI_EXPRESS_2 0x01
+#define PAACE_DID_PCI_EXPRESS_3 0x02
+#define PAACE_DID_PCI_EXPRESS_4 0x03
+#define PAACE_DID_LOCAL_BUS     0x04
+#define PAACE_DID_SRIO          0x0C
+#define PAACE_DID_MEM_1         0x10
+#define PAACE_DID_MEM_2         0x11
+#define PAACE_DID_MEM_3         0x12
+#define PAACE_DID_MEM_4         0x13
+#define PAACE_DID_MEM_1_2       0x14
+#define PAACE_DID_MEM_3_4       0x15
+#define PAACE_DID_MEM_1_4       0x16
+#define PAACE_DID_BM_SW_PORTAL  0x18
+#define PAACE_DID_PAMU          0x1C
+#define PAACE_DID_CAAM          0x21
+#define PAACE_DID_QM_SW_PORTAL  0x3C
+#define PAACE_DID_CORE0_INST    0x80
+#define PAACE_DID_CORE0_DATA    0x81
+#define PAACE_DID_CORE1_INST    0x82
+#define PAACE_DID_CORE1_DATA    0x83
+#define PAACE_DID_CORE2_INST    0x84
+#define PAACE_DID_CORE2_DATA    0x85
+#define PAACE_DID_CORE3_INST    0x86
+#define PAACE_DID_CORE3_DATA    0x87
+#define PAACE_DID_CORE4_INST    0x88
+#define PAACE_DID_CORE4_DATA    0x89
+#define PAACE_DID_CORE5_INST    0x8A
+#define PAACE_DID_CORE5_DATA    0x8B
+#define PAACE_DID_CORE6_INST    0x8C
+#define PAACE_DID_CORE6_DATA    0x8D
+#define PAACE_DID_CORE7_INST    0x8E
+#define PAACE_DID_CORE7_DATA    0x8F
+#define PAACE_DID_BROADCAST     0xFF
+
+#define PAACE_ATM_NO_XLATE      0x00
+#define PAACE_ATM_WINDOW_XLATE  0x01
+#define PAACE_ATM_PAGE_XLATE    0x02
+#define PAACE_ATM_WIN_PG_XLATE  \
+                (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
+#define PAACE_OTM_NO_XLATE      0x00
+#define PAACE_OTM_IMMEDIATE     0x01
+#define PAACE_OTM_INDEXED       0x02
+#define PAACE_OTM_RESERVED      0x03
+
+#define PAACE_M_COHERENCE_REQ   0x01
+
+#define PAACE_PID_0             0x0
+#define PAACE_PID_1             0x1
+#define PAACE_PID_2             0x2
+#define PAACE_PID_3             0x3
+#define PAACE_PID_4             0x4
+#define PAACE_PID_5             0x5
+#define PAACE_PID_6             0x6
+#define PAACE_PID_7             0x7
+
+#define PAACE_TCEF_FORMAT0_8B   0x00
+#define PAACE_TCEF_FORMAT1_RSVD 0x01
+/*
+ * Hard coded value for the PAACT size to accomodate
+ * maximum LIODN value generated by u-boot.
+ */
+#define PAACE_NUMBER_ENTRIES    0x500
+/* Hard coded value for the SPAACT size */
+#define SPAACE_NUMBER_ENTRIES  0x800
+
+#define        OME_NUMBER_ENTRIES      16
+
+/* PAACE Bit Field Defines */
+#define PPAACE_AF_WBAL                 0xfffff000
+#define PPAACE_AF_WBAL_SHIFT           12
+#define PPAACE_AF_WSE                  0x00000fc0
+#define PPAACE_AF_WSE_SHIFT            6
+#define PPAACE_AF_MW                   0x00000020
+#define PPAACE_AF_MW_SHIFT             5
+
+#define SPAACE_AF_LIODN                        0xffff0000
+#define SPAACE_AF_LIODN_SHIFT          16
+
+#define PAACE_AF_AP                    0x00000018
+#define PAACE_AF_AP_SHIFT              3
+#define PAACE_AF_DD                    0x00000004
+#define PAACE_AF_DD_SHIFT              2
+#define PAACE_AF_PT                    0x00000002
+#define PAACE_AF_PT_SHIFT              1
+#define PAACE_AF_V                     0x00000001
+#define PAACE_AF_V_SHIFT               0
+
+#define PAACE_DA_HOST_CR               0x80
+#define PAACE_DA_HOST_CR_SHIFT         7
+
+#define PAACE_IA_CID                   0x00FF0000
+#define PAACE_IA_CID_SHIFT             16
+#define PAACE_IA_WCE                   0x000000F0
+#define PAACE_IA_WCE_SHIFT             4
+#define PAACE_IA_ATM                   0x0000000C
+#define PAACE_IA_ATM_SHIFT             2
+#define PAACE_IA_OTM                   0x00000003
+#define PAACE_IA_OTM_SHIFT             0
+
+#define PAACE_WIN_TWBAL                        0xfffff000
+#define PAACE_WIN_TWBAL_SHIFT          12
+#define PAACE_WIN_SWSE                 0x00000fc0
+#define PAACE_WIN_SWSE_SHIFT           6
+
+/* PAMU Data Structures */
+/* primary / secondary paact structure */
+struct paace {
+       /* PAACE Offset 0x00 */
+       u32 wbah;                               /* only valid for Primary PAACE */
+       u32 addr_bitfields;             /* See P/S PAACE_AF_* */
+
+       /* PAACE Offset 0x08 */
+       /* Interpretation of first 32 bits dependent on DD above */
+       union {
+               struct {
+                       /* Destination ID, see PAACE_DID_* defines */
+                       u8 did;
+                       /* Partition ID */
+                       u8 pid;
+                       /* Snoop ID */
+                       u8 snpid;
+                       /* coherency_required : 1 reserved : 7 */
+                       u8 coherency_required; /* See PAACE_DA_* */
+               } to_host;
+               struct {
+                       /* Destination ID, see PAACE_DID_* defines */
+                       u8  did;
+                       u8  reserved1;
+                       u16 reserved2;
+               } to_io;
+       } domain_attr;
+
+       /* Implementation attributes + window count + address & operation translation modes */
+       u32 impl_attr;                  /* See PAACE_IA_* */
+
+       /* PAACE Offset 0x10 */
+       /* Translated window base address */
+       u32 twbah;
+       u32 win_bitfields;                      /* See PAACE_WIN_* */
+
+       /* PAACE Offset 0x18 */
+       /* first secondary paace entry */
+       u32 fspi;                               /* only valid for Primary PAACE */
+       union {
+               struct {
+                       u8 ioea;
+                       u8 moea;
+                       u8 ioeb;
+                       u8 moeb;
+               } immed_ot;
+               struct {
+                       u16 reserved;
+                       u16 omi;
+               } index_ot;
+       } op_encode;
+
+       /* PAACE Offsets 0x20-0x38 */
+       u32 reserved[8];                        /* not currently implemented */
+};
+
+/* OME : Operation mapping entry
+ * MOE : Mapped Operation Encodings
+ * The operation mapping table is table containing operation mapping entries (OME).
+ * The index of a particular OME is programmed in the PAACE entry for translation
+ * in bound I/O operations corresponding to an LIODN. The OMT is used for translation
+ * specifically in case of the indexed translation mode. Each OME contains a 128
+ * byte mapped operation encoding (MOE), where each byte represents an MOE.
+ */
+#define NUM_MOE 128
+struct ome {
+       u8 moe[NUM_MOE];
+} __attribute__((packed));
+
+#define PAACT_SIZE              (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
+#define SPAACT_SIZE              (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
+#define OMT_SIZE                (sizeof(struct ome) * OME_NUMBER_ENTRIES)
+
+#define PAMU_PAGE_SHIFT 12
+#define PAMU_PAGE_SIZE  4096ULL
+
+#define IOE_READ        0x00
+#define IOE_READ_IDX    0x00
+#define IOE_WRITE       0x81
+#define IOE_WRITE_IDX   0x01
+#define IOE_EREAD0      0x82    /* Enhanced read type 0 */
+#define IOE_EREAD0_IDX  0x02    /* Enhanced read type 0 */
+#define IOE_EWRITE0     0x83    /* Enhanced write type 0 */
+#define IOE_EWRITE0_IDX 0x03    /* Enhanced write type 0 */
+#define IOE_DIRECT0     0x84    /* Directive type 0 */
+#define IOE_DIRECT0_IDX 0x04    /* Directive type 0 */
+#define IOE_EREAD1      0x85    /* Enhanced read type 1 */
+#define IOE_EREAD1_IDX  0x05    /* Enhanced read type 1 */
+#define IOE_EWRITE1     0x86    /* Enhanced write type 1 */
+#define IOE_EWRITE1_IDX 0x06    /* Enhanced write type 1 */
+#define IOE_DIRECT1     0x87    /* Directive type 1 */
+#define IOE_DIRECT1_IDX 0x07    /* Directive type 1 */
+#define IOE_RAC         0x8c    /* Read with Atomic clear */
+#define IOE_RAC_IDX     0x0c    /* Read with Atomic clear */
+#define IOE_RAS         0x8d    /* Read with Atomic set */
+#define IOE_RAS_IDX     0x0d    /* Read with Atomic set */
+#define IOE_RAD         0x8e    /* Read with Atomic decrement */
+#define IOE_RAD_IDX     0x0e    /* Read with Atomic decrement */
+#define IOE_RAI         0x8f    /* Read with Atomic increment */
+#define IOE_RAI_IDX     0x0f    /* Read with Atomic increment */
+
+#define EOE_READ        0x00
+#define EOE_WRITE       0x01
+#define EOE_RAC         0x0c    /* Read with Atomic clear */
+#define EOE_RAS         0x0d    /* Read with Atomic set */
+#define EOE_RAD         0x0e    /* Read with Atomic decrement */
+#define EOE_RAI         0x0f    /* Read with Atomic increment */
+#define EOE_LDEC        0x10    /* Load external cache */
+#define EOE_LDECL       0x11    /* Load external cache with stash lock */
+#define EOE_LDECPE      0x12    /* Load external cache with preferred exclusive */
+#define EOE_LDECPEL     0x13    /* Load external cache with preferred exclusive and lock */
+#define EOE_LDECFE      0x14    /* Load external cache with forced exclusive */
+#define EOE_LDECFEL     0x15    /* Load external cache with forced exclusive and lock */
+#define EOE_RSA         0x16    /* Read with stash allocate */
+#define EOE_RSAU        0x17    /* Read with stash allocate and unlock */
+#define EOE_READI       0x18    /* Read with invalidate */
+#define EOE_RWNITC      0x19    /* Read with no intention to cache */
+#define EOE_WCI         0x1a    /* Write cache inhibited */
+#define EOE_WWSA        0x1b    /* Write with stash allocate */
+#define EOE_WWSAL       0x1c    /* Write with stash allocate and lock */
+#define EOE_WWSAO       0x1d    /* Write with stash allocate only */
+#define EOE_WWSAOL      0x1e    /* Write with stash allocate only and lock */
+#define EOE_VALID       0x80
+
+/* Function prototypes */
+int pamu_domain_init(void);
+int pamu_enable_liodn(int liodn);
+int pamu_disable_liodn(int liodn);
+void pamu_free_subwins(int liodn);
+int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
+                      u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid,
+                      u32 subwin_cnt, int prot);
+int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr,
+                      phys_addr_t subwin_size, u32 omi, unsigned long rpn,
+                      uint32_t snoopid, u32 stashid, int enable, int prot);
+
+u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
+void get_ome_index(u32 *omi_index, struct device *dev);
+int  pamu_update_paace_stash(int liodn, u32 subwin, u32 value);
+int pamu_disable_spaace(int liodn, u32 subwin);
+u32 pamu_get_max_subwin_cnt(void);
+
+#endif  /* __FSL_PAMU_H */
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
new file mode 100644 (file)
index 0000000..c857c30
--- /dev/null
@@ -0,0 +1,1172 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Author: Varun Sethi <varun.sethi@freescale.com>
+ *
+ */
+
+#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/iommu.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+
+#include <asm/pci-bridge.h>
+#include <sysdev/fsl_pci.h>
+
+#include "fsl_pamu_domain.h"
+#include "pci.h"
+
+/*
+ * Global spinlock that needs to be held while
+ * configuring PAMU.
+ */
+static DEFINE_SPINLOCK(iommu_lock);
+
+static struct kmem_cache *fsl_pamu_domain_cache;
+static struct kmem_cache *iommu_devinfo_cache;
+static DEFINE_SPINLOCK(device_domain_lock);
+
+static int __init iommu_init_mempool(void)
+{
+
+       fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
+                                        sizeof(struct fsl_dma_domain),
+                                        0,
+                                        SLAB_HWCACHE_ALIGN,
+
+                                        NULL);
+       if (!fsl_pamu_domain_cache) {
+               pr_debug("Couldn't create fsl iommu_domain cache\n");
+               return -ENOMEM;
+       }
+
+       iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
+                                        sizeof(struct device_domain_info),
+                                        0,
+                                        SLAB_HWCACHE_ALIGN,
+                                        NULL);
+       if (!iommu_devinfo_cache) {
+               pr_debug("Couldn't create devinfo cache\n");
+               kmem_cache_destroy(fsl_pamu_domain_cache);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
+{
+       u32 win_cnt = dma_domain->win_cnt;
+       struct dma_window *win_ptr =
+                               &dma_domain->win_arr[0];
+       struct iommu_domain_geometry *geom;
+
+       geom = &dma_domain->iommu_domain->geometry;
+
+       if (!win_cnt || !dma_domain->geom_size) {
+               pr_debug("Number of windows/geometry not configured for the domain\n");
+               return 0;
+       }
+
+       if (win_cnt > 1) {
+               u64 subwin_size;
+               dma_addr_t subwin_iova;
+               u32 wnd;
+
+               subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
+               subwin_iova = iova & ~(subwin_size - 1);
+               wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
+               win_ptr = &dma_domain->win_arr[wnd];
+       }
+
+       if (win_ptr->valid)
+               return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
+
+       return 0;
+}
+
+static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
+{
+       struct dma_window *sub_win_ptr =
+                               &dma_domain->win_arr[0];
+       int i, ret;
+       unsigned long rpn, flags;
+
+       for (i = 0; i < dma_domain->win_cnt; i++) {
+               if (sub_win_ptr[i].valid) {
+                       rpn = sub_win_ptr[i].paddr >>
+                                PAMU_PAGE_SHIFT;
+                       spin_lock_irqsave(&iommu_lock, flags);
+                       ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
+                                                sub_win_ptr[i].size,
+                                                ~(u32)0,
+                                                rpn,
+                                                dma_domain->snoop_id,
+                                                dma_domain->stash_id,
+                                                (i > 0) ? 1 : 0,
+                                                sub_win_ptr[i].prot);
+                       spin_unlock_irqrestore(&iommu_lock, flags);
+                       if (ret) {
+                               pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
+                                        liodn);
+                               return ret;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
+{
+       int ret;
+       struct dma_window *wnd = &dma_domain->win_arr[0];
+       phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu_lock, flags);
+       ret = pamu_config_ppaace(liodn, wnd_addr,
+                                wnd->size,
+                                ~(u32)0,
+                                wnd->paddr >> PAMU_PAGE_SHIFT,
+                                dma_domain->snoop_id, dma_domain->stash_id,
+                                0, wnd->prot);
+       spin_unlock_irqrestore(&iommu_lock, flags);
+       if (ret)
+               pr_debug("PAMU PAACE configuration failed for liodn %d\n",
+                       liodn);
+
+       return ret;
+}
+
+/* Map the DMA window corresponding to the LIODN */
+static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
+{
+       if (dma_domain->win_cnt > 1)
+               return map_subwins(liodn, dma_domain);
+       else
+               return map_win(liodn, dma_domain);
+
+}
+
+/* Update window/subwindow mapping for the LIODN */
+static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
+{
+       int ret;
+       struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu_lock, flags);
+       if (dma_domain->win_cnt > 1) {
+               ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
+                                        wnd->size,
+                                        ~(u32)0,
+                                        wnd->paddr >> PAMU_PAGE_SHIFT,
+                                        dma_domain->snoop_id,
+                                        dma_domain->stash_id,
+                                        (wnd_nr > 0) ? 1 : 0,
+                                        wnd->prot);
+               if (ret)
+                       pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
+       } else {
+               phys_addr_t wnd_addr;
+
+               wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
+
+               ret = pamu_config_ppaace(liodn, wnd_addr,
+                                        wnd->size,
+                                        ~(u32)0,
+                                        wnd->paddr >> PAMU_PAGE_SHIFT,
+                                       dma_domain->snoop_id, dma_domain->stash_id,
+                                       0, wnd->prot);
+               if (ret)
+                       pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
+       }
+
+       spin_unlock_irqrestore(&iommu_lock, flags);
+
+       return ret;
+}
+
+static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
+                                u32 val)
+{
+       int ret = 0, i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu_lock, flags);
+       if (!dma_domain->win_arr) {
+               pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
+               spin_unlock_irqrestore(&iommu_lock, flags);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < dma_domain->win_cnt; i++) {
+               ret = pamu_update_paace_stash(liodn, i, val);
+               if (ret) {
+                       pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
+                       spin_unlock_irqrestore(&iommu_lock, flags);
+                       return ret;
+               }
+       }
+
+       spin_unlock_irqrestore(&iommu_lock, flags);
+
+       return ret;
+}
+
+/* Set the geometry parameters for a LIODN */
+static int pamu_set_liodn(int liodn, struct device *dev,
+                          struct fsl_dma_domain *dma_domain,
+                          struct iommu_domain_geometry *geom_attr,
+                          u32 win_cnt)
+{
+       phys_addr_t window_addr, window_size;
+       phys_addr_t subwin_size;
+       int ret = 0, i;
+       u32 omi_index = ~(u32)0;
+       unsigned long flags;
+
+       /*
+        * Configure the omi_index at the geometry setup time.
+        * This is a static value which depends on the type of
+        * device and would not change thereafter.
+        */
+       get_ome_index(&omi_index, dev);
+
+       window_addr = geom_attr->aperture_start;
+       window_size = dma_domain->geom_size;
+
+       spin_lock_irqsave(&iommu_lock, flags);
+       ret = pamu_disable_liodn(liodn);
+       if (!ret)
+               ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
+                                        0, dma_domain->snoop_id,
+                                        dma_domain->stash_id, win_cnt, 0);
+       spin_unlock_irqrestore(&iommu_lock, flags);
+       if (ret) {
+               pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
+               return ret;
+       }
+
+       if (win_cnt > 1) {
+               subwin_size = window_size >> ilog2(win_cnt);
+               for (i = 0; i < win_cnt; i++) {
+                       spin_lock_irqsave(&iommu_lock, flags);
+                       ret = pamu_disable_spaace(liodn, i);
+                       if (!ret)
+                               ret = pamu_config_spaace(liodn, win_cnt, i,
+                                                        subwin_size, omi_index,
+                                                        0, dma_domain->snoop_id,
+                                                        dma_domain->stash_id,
+                                                        0, 0);
+                       spin_unlock_irqrestore(&iommu_lock, flags);
+                       if (ret) {
+                               pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
+                               return ret;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int check_size(u64 size, dma_addr_t iova)
+{
+       /*
+        * Size must be a power of two and at least be equal
+        * to PAMU page size.
+        */
+       if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
+               pr_debug("%s: size too small or not a power of two\n", __func__);
+               return -EINVAL;
+       }
+
+       /* iova must be page size aligned*/
+       if (iova & (size - 1)) {
+               pr_debug("%s: address is not aligned with window size\n", __func__);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
+{
+       struct fsl_dma_domain *domain;
+
+       domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
+       if (!domain)
+               return NULL;
+
+       domain->stash_id = ~(u32)0;
+       domain->snoop_id = ~(u32)0;
+       domain->win_cnt = pamu_get_max_subwin_cnt();
+       domain->geom_size = 0;
+
+       INIT_LIST_HEAD(&domain->devices);
+
+       spin_lock_init(&domain->domain_lock);
+
+       return domain;
+}
+
+static inline struct device_domain_info *find_domain(struct device *dev)
+{
+       return dev->archdata.iommu_domain;
+}
+
+static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
+{
+       unsigned long flags;
+
+       list_del(&info->link);
+       spin_lock_irqsave(&iommu_lock, flags);
+       if (win_cnt > 1)
+               pamu_free_subwins(info->liodn);
+       pamu_disable_liodn(info->liodn);
+       spin_unlock_irqrestore(&iommu_lock, flags);
+       spin_lock_irqsave(&device_domain_lock, flags);
+       info->dev->archdata.iommu_domain = NULL;
+       kmem_cache_free(iommu_devinfo_cache, info);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
+{
+       struct device_domain_info *info, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dma_domain->domain_lock, flags);
+       /* Remove the device from the domain device list */
+       list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
+               if (!dev || (info->dev == dev))
+                       remove_device_ref(info, dma_domain->win_cnt);
+       }
+       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+}
+
+static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
+{
+       struct device_domain_info *info, *old_domain_info;
+       unsigned long flags;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       /*
+        * Check here if the device is already attached to domain or not.
+        * If the device is already attached to a domain detach it.
+        */
+       old_domain_info = find_domain(dev);
+       if (old_domain_info && old_domain_info->domain != dma_domain) {
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               detach_device(dev, old_domain_info->domain);
+               spin_lock_irqsave(&device_domain_lock, flags);
+       }
+
+       info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
+
+       info->dev = dev;
+       info->liodn = liodn;
+       info->domain = dma_domain;
+
+       list_add(&info->link, &dma_domain->devices);
+       /*
+        * In case of devices with multiple LIODNs just store
+        * the info for the first LIODN as all
+        * LIODNs share the same domain
+        */
+       if (!old_domain_info)
+               dev->archdata.iommu_domain = info;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+}
+
+static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
+                                           dma_addr_t iova)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+
+       if ((iova < domain->geometry.aperture_start) ||
+               iova > (domain->geometry.aperture_end))
+               return 0;
+
+       return get_phys_addr(dma_domain, iova);
+}
+
+static int fsl_pamu_domain_has_cap(struct iommu_domain *domain,
+                                     unsigned long cap)
+{
+       return cap == IOMMU_CAP_CACHE_COHERENCY;
+}
+
+static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+
+       domain->priv = NULL;
+
+       /* remove all the devices from the device list */
+       detach_device(NULL, dma_domain);
+
+       dma_domain->enabled = 0;
+       dma_domain->mapped = 0;
+
+       kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
+}
+
+static int fsl_pamu_domain_init(struct iommu_domain *domain)
+{
+       struct fsl_dma_domain *dma_domain;
+
+       dma_domain = iommu_alloc_dma_domain();
+       if (!dma_domain) {
+               pr_debug("dma_domain allocation failed\n");
+               return -ENOMEM;
+       }
+       domain->priv = dma_domain;
+       dma_domain->iommu_domain = domain;
+       /* defaul geometry 64 GB i.e. maximum system address */
+       domain->geometry.aperture_start = 0;
+       domain->geometry.aperture_end = (1ULL << 36) - 1;
+       domain->geometry.force_aperture = true;
+
+       return 0;
+}
+
+/* Configure geometry settings for all LIODNs associated with domain */
+static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
+                                   struct iommu_domain_geometry *geom_attr,
+                                   u32 win_cnt)
+{
+       struct device_domain_info *info;
+       int ret = 0;
+
+       list_for_each_entry(info, &dma_domain->devices, link) {
+               ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
+                                     geom_attr, win_cnt);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/* Update stash destination for all LIODNs associated with the domain */
+static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
+{
+       struct device_domain_info *info;
+       int ret = 0;
+
+       list_for_each_entry(info, &dma_domain->devices, link) {
+               ret = update_liodn_stash(info->liodn, dma_domain, val);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/* Update domain mappings for all LIODNs associated with the domain */
+static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
+{
+       struct device_domain_info *info;
+       int ret = 0;
+
+       list_for_each_entry(info, &dma_domain->devices, link) {
+               ret = update_liodn(info->liodn, dma_domain, wnd_nr);
+               if (ret)
+                       break;
+       }
+       return ret;
+}
+
+static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
+{
+       struct device_domain_info *info;
+       int ret = 0;
+
+       list_for_each_entry(info, &dma_domain->devices, link) {
+               if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
+                       ret = pamu_disable_liodn(info->liodn);
+                       if (!ret)
+                               dma_domain->enabled = 0;
+               } else {
+                       ret = pamu_disable_spaace(info->liodn, wnd_nr);
+               }
+       }
+
+       return ret;
+}
+
+static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dma_domain->domain_lock, flags);
+       if (!dma_domain->win_arr) {
+               pr_debug("Number of windows not configured\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return;
+       }
+
+       if (wnd_nr >= dma_domain->win_cnt) {
+               pr_debug("Invalid window index\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return;
+       }
+
+       if (dma_domain->win_arr[wnd_nr].valid) {
+               ret = disable_domain_win(dma_domain, wnd_nr);
+               if (!ret) {
+                       dma_domain->win_arr[wnd_nr].valid = 0;
+                       dma_domain->mapped--;
+               }
+       }
+
+       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+}
+
+static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
+                                 phys_addr_t paddr, u64 size, int prot)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+       struct dma_window *wnd;
+       int pamu_prot = 0;
+       int ret;
+       unsigned long flags;
+       u64 win_size;
+
+       if (prot & IOMMU_READ)
+               pamu_prot |= PAACE_AP_PERMS_QUERY;
+       if (prot & IOMMU_WRITE)
+               pamu_prot |= PAACE_AP_PERMS_UPDATE;
+
+       spin_lock_irqsave(&dma_domain->domain_lock, flags);
+       if (!dma_domain->win_arr) {
+               pr_debug("Number of windows not configured\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return -ENODEV;
+       }
+
+       if (wnd_nr >= dma_domain->win_cnt) {
+               pr_debug("Invalid window index\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return -EINVAL;
+       }
+
+       win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
+       if (size > win_size) {
+               pr_debug("Invalid window size \n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return -EINVAL;
+       }
+
+       if (dma_domain->win_cnt == 1) {
+               if (dma_domain->enabled) {
+                       pr_debug("Disable the window before updating the mapping\n");
+                       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+                       return -EBUSY;
+               }
+
+               ret = check_size(size, domain->geometry.aperture_start);
+               if (ret) {
+                       pr_debug("Aperture start not aligned to the size\n");
+                       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+                       return -EINVAL;
+               }
+       }
+
+       wnd = &dma_domain->win_arr[wnd_nr];
+       if (!wnd->valid) {
+               wnd->paddr = paddr;
+               wnd->size = size;
+               wnd->prot = pamu_prot;
+
+               ret = update_domain_mapping(dma_domain, wnd_nr);
+               if (!ret) {
+                       wnd->valid = 1;
+                       dma_domain->mapped++;
+               }
+       } else {
+               pr_debug("Disable the window before updating the mapping\n");
+               ret = -EBUSY;
+       }
+
+       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+       return ret;
+}
+
+/*
+ * Attach the LIODN to the DMA domain and configure the geometry
+ * and window mappings.
+ */
+static int handle_attach_device(struct fsl_dma_domain *dma_domain,
+                                struct device *dev, const u32 *liodn,
+                                int num)
+{
+       unsigned long flags;
+       struct iommu_domain *domain = dma_domain->iommu_domain;
+       int ret = 0;
+       int i;
+
+       spin_lock_irqsave(&dma_domain->domain_lock, flags);
+       for (i = 0; i < num; i++) {
+
+               /* Ensure that LIODN value is valid */
+               if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
+                       pr_debug("Invalid liodn %d, attach device failed for %s\n",
+                               liodn[i], dev->of_node->full_name);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               attach_device(dma_domain, liodn[i], dev);
+               /*
+                * Check if geometry has already been configured
+                * for the domain. If yes, set the geometry for
+                * the LIODN.
+                */
+               if (dma_domain->win_arr) {
+                       u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
+                       ret = pamu_set_liodn(liodn[i], dev, dma_domain,
+                                             &domain->geometry,
+                                             win_cnt);
+                       if (ret)
+                               break;
+                       if (dma_domain->mapped) {
+                               /*
+                                * Create window/subwindow mapping for
+                                * the LIODN.
+                                */
+                               ret = map_liodn(liodn[i], dma_domain);
+                               if (ret)
+                                       break;
+                       }
+               }
+       }
+       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+       return ret;
+}
+
+static int fsl_pamu_attach_device(struct iommu_domain *domain,
+                                 struct device *dev)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+       const u32 *liodn;
+       u32 liodn_cnt;
+       int len, ret = 0;
+       struct pci_dev *pdev = NULL;
+       struct pci_controller *pci_ctl;
+
+       /*
+        * Use LIODN of the PCI controller while attaching a
+        * PCI device.
+        */
+       if (dev->bus == &pci_bus_type) {
+               pdev = to_pci_dev(dev);
+               pci_ctl = pci_bus_to_host(pdev->bus);
+               /*
+                * make dev point to pci controller device
+                * so we can get the LIODN programmed by
+                * u-boot.
+                */
+               dev = pci_ctl->parent;
+       }
+
+       liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
+       if (liodn) {
+               liodn_cnt = len / sizeof(u32);
+               ret = handle_attach_device(dma_domain, dev,
+                                        liodn, liodn_cnt);
+       } else {
+               pr_debug("missing fsl,liodn property at %s\n",
+                         dev->of_node->full_name);
+                       ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static void fsl_pamu_detach_device(struct iommu_domain *domain,
+                                     struct device *dev)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+       const u32 *prop;
+       int len;
+       struct pci_dev *pdev = NULL;
+       struct pci_controller *pci_ctl;
+
+       /*
+        * Use LIODN of the PCI controller while detaching a
+        * PCI device.
+        */
+       if (dev->bus == &pci_bus_type) {
+               pdev = to_pci_dev(dev);
+               pci_ctl = pci_bus_to_host(pdev->bus);
+               /*
+                * make dev point to pci controller device
+                * so we can get the LIODN programmed by
+                * u-boot.
+                */
+               dev = pci_ctl->parent;
+       }
+
+       prop = of_get_property(dev->of_node, "fsl,liodn", &len);
+       if (prop)
+               detach_device(dev, dma_domain);
+       else
+               pr_debug("missing fsl,liodn property at %s\n",
+                         dev->of_node->full_name);
+}
+
+static  int configure_domain_geometry(struct iommu_domain *domain, void *data)
+{
+       struct iommu_domain_geometry *geom_attr = data;
+       struct fsl_dma_domain *dma_domain = domain->priv;
+       dma_addr_t geom_size;
+       unsigned long flags;
+
+       geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
+       /*
+        * Sanity check the geometry size. Also, we do not support
+        * DMA outside of the geometry.
+        */
+       if (check_size(geom_size, geom_attr->aperture_start) ||
+               !geom_attr->force_aperture) {
+                       pr_debug("Invalid PAMU geometry attributes\n");
+                       return -EINVAL;
+               }
+
+       spin_lock_irqsave(&dma_domain->domain_lock, flags);
+       if (dma_domain->enabled) {
+               pr_debug("Can't set geometry attributes as domain is active\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return  -EBUSY;
+       }
+
+       /* Copy the domain geometry information */
+       memcpy(&domain->geometry, geom_attr,
+              sizeof(struct iommu_domain_geometry));
+       dma_domain->geom_size = geom_size;
+
+       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+       return 0;
+}
+
+/* Set the domain stash attribute */
+static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
+{
+       struct pamu_stash_attribute *stash_attr = data;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dma_domain->domain_lock, flags);
+
+       memcpy(&dma_domain->dma_stash, stash_attr,
+                sizeof(struct pamu_stash_attribute));
+
+       dma_domain->stash_id = get_stash_id(stash_attr->cache,
+                                           stash_attr->cpu);
+       if (dma_domain->stash_id == ~(u32)0) {
+               pr_debug("Invalid stash attributes\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return -EINVAL;
+       }
+
+       ret = update_domain_stash(dma_domain, dma_domain->stash_id);
+
+       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+       return ret;
+}
+
+/* Configure domain dma state i.e. enable/disable DMA*/
+static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dma_domain->domain_lock, flags);
+
+       if (enable && !dma_domain->mapped) {
+               pr_debug("Can't enable DMA domain without valid mapping\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return -ENODEV;
+       }
+
+       dma_domain->enabled = enable;
+       list_for_each_entry(info, &dma_domain->devices,
+                                link) {
+               ret = (enable) ? pamu_enable_liodn(info->liodn) :
+                       pamu_disable_liodn(info->liodn);
+               if (ret)
+                       pr_debug("Unable to set dma state for liodn %d",
+                                info->liodn);
+       }
+       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+       return 0;
+}
+
+static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
+                                enum iommu_attr attr_type, void *data)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+       int ret = 0;
+
+
+       switch (attr_type) {
+       case DOMAIN_ATTR_GEOMETRY:
+               ret = configure_domain_geometry(domain, data);
+               break;
+       case DOMAIN_ATTR_FSL_PAMU_STASH:
+               ret = configure_domain_stash(dma_domain, data);
+               break;
+       case DOMAIN_ATTR_FSL_PAMU_ENABLE:
+               ret = configure_domain_dma_state(dma_domain, *(int *)data);
+               break;
+       default:
+               pr_debug("Unsupported attribute type\n");
+               ret = -EINVAL;
+               break;
+       };
+
+       return ret;
+}
+
+static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
+                                enum iommu_attr attr_type, void *data)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+       int ret = 0;
+
+
+       switch (attr_type) {
+       case DOMAIN_ATTR_FSL_PAMU_STASH:
+               memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
+                                sizeof(struct pamu_stash_attribute));
+               break;
+       case DOMAIN_ATTR_FSL_PAMU_ENABLE:
+               *(int *)data = dma_domain->enabled;
+               break;
+       case DOMAIN_ATTR_FSL_PAMUV1:
+               *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
+               break;
+       default:
+               pr_debug("Unsupported attribute type\n");
+               ret = -EINVAL;
+               break;
+       };
+
+       return ret;
+}
+
+#define REQ_ACS_FLAGS  (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
+
+static struct iommu_group *get_device_iommu_group(struct device *dev)
+{
+       struct iommu_group *group;
+
+       group = iommu_group_get(dev);
+       if (!group)
+               group = iommu_group_alloc();
+
+       return group;
+}
+
+static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
+{
+       u32 version;
+
+       /* Check the PCI controller version number by readding BRR1 register */
+       version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
+       version &= PCI_FSL_BRR1_VER;
+       /* If PCI controller version is >= 0x204 we can partition endpoints*/
+       if (version >= 0x204)
+               return 1;
+
+       return 0;
+}
+
+/* Get iommu group information from peer devices or devices on the parent bus */
+static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
+{
+       struct pci_dev *tmp;
+       struct iommu_group *group;
+       struct pci_bus *bus = pdev->bus;
+
+       /*
+        * Traverese the pci bus device list to get
+        * the shared iommu group.
+        */
+       while (bus) {
+               list_for_each_entry(tmp, &bus->devices, bus_list) {
+                       if (tmp == pdev)
+                               continue;
+                       group = iommu_group_get(&tmp->dev);
+                       if (group)
+                               return group;
+               }
+
+               bus = bus->parent;
+       }
+
+       return NULL;
+}
+
+static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
+{
+       struct pci_controller *pci_ctl;
+       bool pci_endpt_partioning;
+       struct iommu_group *group = NULL;
+       struct pci_dev *bridge, *dma_pdev = NULL;
+
+       pci_ctl = pci_bus_to_host(pdev->bus);
+       pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
+       /* We can partition PCIe devices so assign device group to the device */
+       if (pci_endpt_partioning) {
+               bridge = pci_find_upstream_pcie_bridge(pdev);
+               if (bridge) {
+                       if (pci_is_pcie(bridge))
+                               dma_pdev = pci_get_domain_bus_and_slot(
+                                               pci_domain_nr(pdev->bus),
+                                               bridge->subordinate->number, 0);
+                       if (!dma_pdev)
+                               dma_pdev = pci_dev_get(bridge);
+               } else
+                       dma_pdev = pci_dev_get(pdev);
+
+               /* Account for quirked devices */
+               swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+
+               /*
+                * If it's a multifunction device that does not support our
+                * required ACS flags, add to the same group as lowest numbered
+                * function that also does not suport the required ACS flags.
+                */
+               if (dma_pdev->multifunction &&
+                   !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
+                       u8 i, slot = PCI_SLOT(dma_pdev->devfn);
+
+                       for (i = 0; i < 8; i++) {
+                               struct pci_dev *tmp;
+
+                               tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
+                               if (!tmp)
+                                       continue;
+
+                               if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
+                                       swap_pci_ref(&dma_pdev, tmp);
+                                       break;
+                               }
+                               pci_dev_put(tmp);
+                       }
+               }
+
+               /*
+                * Devices on the root bus go through the iommu.  If that's not us,
+                * find the next upstream device and test ACS up to the root bus.
+                * Finding the next device may require skipping virtual buses.
+                */
+               while (!pci_is_root_bus(dma_pdev->bus)) {
+                       struct pci_bus *bus = dma_pdev->bus;
+
+                       while (!bus->self) {
+                               if (!pci_is_root_bus(bus))
+                                       bus = bus->parent;
+                               else
+                                       goto root_bus;
+                       }
+
+                       if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
+                               break;
+
+                       swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
+               }
+
+root_bus:
+               group = get_device_iommu_group(&dma_pdev->dev);
+               pci_dev_put(dma_pdev);
+               /*
+                * PCIe controller is not a paritionable entity
+                * free the controller device iommu_group.
+                */
+               if (pci_ctl->parent->iommu_group)
+                       iommu_group_remove_device(pci_ctl->parent);
+       } else {
+               /*
+                * All devices connected to the controller will share the
+                * PCI controllers device group. If this is the first
+                * device to be probed for the pci controller, copy the
+                * device group information from the PCI controller device
+                * node and remove the PCI controller iommu group.
+                * For subsequent devices, the iommu group information can
+                * be obtained from sibling devices (i.e. from the bus_devices
+                * link list).
+                */
+               if (pci_ctl->parent->iommu_group) {
+                       group = get_device_iommu_group(pci_ctl->parent);
+                       iommu_group_remove_device(pci_ctl->parent);
+               } else
+                       group = get_shared_pci_device_group(pdev);
+       }
+
+       return group;
+}
+
+static int fsl_pamu_add_device(struct device *dev)
+{
+       struct iommu_group *group = NULL;
+       struct pci_dev *pdev;
+       const u32 *prop;
+       int ret, len;
+
+       /*
+        * For platform devices we allocate a separate group for
+        * each of the devices.
+        */
+       if (dev->bus == &pci_bus_type) {
+               pdev = to_pci_dev(dev);
+               /* Don't create device groups for virtual PCI bridges */
+               if (pdev->subordinate)
+                       return 0;
+
+               group = get_pci_device_group(pdev);
+
+       } else {
+               prop = of_get_property(dev->of_node, "fsl,liodn", &len);
+               if (prop)
+                       group = get_device_iommu_group(dev);
+       }
+
+       if (!group || IS_ERR(group))
+               return PTR_ERR(group);
+
+       ret = iommu_group_add_device(group, dev);
+
+       iommu_group_put(group);
+       return ret;
+}
+
+static void fsl_pamu_remove_device(struct device *dev)
+{
+       iommu_group_remove_device(dev);
+}
+
+static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dma_domain->domain_lock, flags);
+       /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
+       if (dma_domain->enabled) {
+               pr_debug("Can't set geometry attributes as domain is active\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return  -EBUSY;
+       }
+
+       /* Ensure that the geometry has been set for the domain */
+       if (!dma_domain->geom_size) {
+               pr_debug("Please configure geometry before setting the number of windows\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return -EINVAL;
+       }
+
+       /*
+        * Ensure we have valid window count i.e. it should be less than
+        * maximum permissible limit and should be a power of two.
+        */
+       if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
+               pr_debug("Invalid window count\n");
+               spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+               return -EINVAL;
+       }
+
+       ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
+                               ((w_count > 1) ? w_count : 0));
+       if (!ret) {
+               if (dma_domain->win_arr)
+                       kfree(dma_domain->win_arr);
+               dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
+                                                         w_count, GFP_ATOMIC);
+               if (!dma_domain->win_arr) {
+                       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+                       return -ENOMEM;
+               }
+               dma_domain->win_cnt = w_count;
+       }
+       spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+       return ret;
+}
+
+static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
+{
+       struct fsl_dma_domain *dma_domain = domain->priv;
+
+       return dma_domain->win_cnt;
+}
+
+static struct iommu_ops fsl_pamu_ops = {
+       .domain_init    = fsl_pamu_domain_init,
+       .domain_destroy = fsl_pamu_domain_destroy,
+       .attach_dev     = fsl_pamu_attach_device,
+       .detach_dev     = fsl_pamu_detach_device,
+       .domain_window_enable = fsl_pamu_window_enable,
+       .domain_window_disable = fsl_pamu_window_disable,
+       .domain_get_windows = fsl_pamu_get_windows,
+       .domain_set_windows = fsl_pamu_set_windows,
+       .iova_to_phys   = fsl_pamu_iova_to_phys,
+       .domain_has_cap = fsl_pamu_domain_has_cap,
+       .domain_set_attr = fsl_pamu_set_domain_attr,
+       .domain_get_attr = fsl_pamu_get_domain_attr,
+       .add_device     = fsl_pamu_add_device,
+       .remove_device  = fsl_pamu_remove_device,
+};
+
+int pamu_domain_init()
+{
+       int ret = 0;
+
+       ret = iommu_init_mempool();
+       if (ret)
+               return ret;
+
+       bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
+       bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
+
+       return ret;
+}
diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h
new file mode 100644 (file)
index 0000000..c90293f
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef __FSL_PAMU_DOMAIN_H
+#define __FSL_PAMU_DOMAIN_H
+
+#include "fsl_pamu.h"
+
+struct dma_window {
+       phys_addr_t paddr;
+       u64 size;
+       int valid;
+       int prot;
+};
+
+struct fsl_dma_domain {
+       /*
+        * Indicates the geometry size for the domain.
+        * This would be set when the geometry is
+        * configured for the domain.
+        */
+       dma_addr_t                      geom_size;
+       /*
+        * Number of windows assocaited with this domain.
+        * During domain initialization, it is set to the
+        * the maximum number of subwindows allowed for a LIODN.
+        * Minimum value for this is 1 indicating a single PAMU
+        * window, without any sub windows. Value can be set/
+        * queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS.
+        * Value can only be set once the geometry has been configured.
+        */
+       u32                             win_cnt;
+       /*
+        * win_arr contains information of the configured
+        * windows for a domain. This is allocated only
+        * when the number of windows for the domain are
+        * set.
+        */
+       struct dma_window               *win_arr;
+       /* list of devices associated with the domain */
+       struct list_head                devices;
+       /* dma_domain states:
+        * mapped - A particular mapping has been created
+        * within the configured geometry.
+        * enabled - DMA has been enabled for the given
+        * domain. This translates to setting of the
+        * valid bit for the primary PAACE in the PAMU
+        * PAACT table. Domain geometry should be set and
+        * it must have a valid mapping before DMA can be
+        * enabled for it.
+        *
+        */
+       int                             mapped;
+       int                             enabled;
+       /* stash_id obtained from the stash attribute details */
+       u32                             stash_id;
+       struct pamu_stash_attribute     dma_stash;
+       u32                             snoop_id;
+       struct iommu_domain             *iommu_domain;
+       spinlock_t                      domain_lock;
+};
+
+/* domain-device relationship */
+struct device_domain_info {
+       struct list_head link;  /* link to domain siblings */
+       struct device *dev;
+       u32 liodn;
+       struct fsl_dma_domain *domain; /* pointer to domain */
+};
+#endif  /* __FSL_PAMU_DOMAIN_H */
index eec0d3e04bf578ab6a6afe36e1c55ad5d4b0c969..15e9b57e9cf05ba43e19d76f37e64275d3e6db44 100644 (file)
@@ -890,56 +890,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
        return order;
 }
 
+static void dma_pte_free_level(struct dmar_domain *domain, int level,
+                              struct dma_pte *pte, unsigned long pfn,
+                              unsigned long start_pfn, unsigned long last_pfn)
+{
+       pfn = max(start_pfn, pfn);
+       pte = &pte[pfn_level_offset(pfn, level)];
+
+       do {
+               unsigned long level_pfn;
+               struct dma_pte *level_pte;
+
+               if (!dma_pte_present(pte) || dma_pte_superpage(pte))
+                       goto next;
+
+               level_pfn = pfn & level_mask(level - 1);
+               level_pte = phys_to_virt(dma_pte_addr(pte));
+
+               if (level > 2)
+                       dma_pte_free_level(domain, level - 1, level_pte,
+                                          level_pfn, start_pfn, last_pfn);
+
+               /* If range covers entire pagetable, free it */
+               if (!(start_pfn > level_pfn ||
+                     last_pfn < level_pfn + level_size(level))) {
+                       dma_clear_pte(pte);
+                       domain_flush_cache(domain, pte, sizeof(*pte));
+                       free_pgtable_page(level_pte);
+               }
+next:
+               pfn += level_size(level);
+       } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
+}
+
 /* free page table pages. last level pte should already be cleared */
 static void dma_pte_free_pagetable(struct dmar_domain *domain,
                                   unsigned long start_pfn,
                                   unsigned long last_pfn)
 {
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       struct dma_pte *first_pte, *pte;
-       int total = agaw_to_level(domain->agaw);
-       int level;
-       unsigned long tmp;
-       int large_page = 2;
 
        BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
        BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
        BUG_ON(start_pfn > last_pfn);
 
        /* We don't need lock here; nobody else touches the iova range */
-       level = 2;
-       while (level <= total) {
-               tmp = align_to_level(start_pfn, level);
-
-               /* If we can't even clear one PTE at this level, we're done */
-               if (tmp + level_size(level) - 1 > last_pfn)
-                       return;
-
-               do {
-                       large_page = level;
-                       first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
-                       if (large_page > level)
-                               level = large_page + 1;
-                       if (!pte) {
-                               tmp = align_to_level(tmp + 1, level + 1);
-                               continue;
-                       }
-                       do {
-                               if (dma_pte_present(pte)) {
-                                       free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
-                                       dma_clear_pte(pte);
-                               }
-                               pte++;
-                               tmp += level_size(level);
-                       } while (!first_pte_in_page(pte) &&
-                                tmp + level_size(level) - 1 <= last_pfn);
+       dma_pte_free_level(domain, agaw_to_level(domain->agaw),
+                          domain->pgd, 0, start_pfn, last_pfn);
 
-                       domain_flush_cache(domain, first_pte,
-                                          (void *)pte - (void *)first_pte);
-                       
-               } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
-               level++;
-       }
        /* free pgd */
        if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
                free_pgtable_page(domain->pgd);
index 1fea003ed33f63c6cef961cbc1629dd71d3656ea..3792a1aa52b88d3439cdc66195230cc1b20f0f2a 100644 (file)
@@ -30,6 +30,11 @@ config ARM_VIC_NR
          The maximum number of VICs available in the system, for
          power management.
 
+config IMGPDC_IRQ
+       bool
+       select GENERIC_IRQ_CHIP
+       select IRQ_DOMAIN
+
 config ORION_IRQCHIP
        bool
        select IRQ_DOMAIN
index e65c41a7366bf1f6887ab5a5315622f1e1341478..81e8cd49ae76ee8f7ff825414c7cb5c447e2bfe6 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_SPEAR3XX)           += spear-shirq.o
 obj-$(CONFIG_ARM_GIC)                  += irq-gic.o
 obj-$(CONFIG_ARM_NVIC)                 += irq-nvic.o
 obj-$(CONFIG_ARM_VIC)                  += irq-vic.o
+obj-$(CONFIG_IMGPDC_IRQ)               += irq-imgpdc.o
 obj-$(CONFIG_SIRF_IRQ)                 += irq-sirfsoc.o
 obj-$(CONFIG_RENESAS_INTC_IRQPIN)      += irq-renesas-intc-irqpin.o
 obj-$(CONFIG_RENESAS_IRQC)             += irq-renesas-irqc.o
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
new file mode 100644 (file)
index 0000000..8071c2e
--- /dev/null
@@ -0,0 +1,499 @@
+/*
+ * IMG PowerDown Controller (PDC)
+ *
+ * Copyright 2010-2013 Imagination Technologies Ltd.
+ *
+ * Exposes the syswake and PDC peripheral wake interrupts to the system.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+/* PDC interrupt register numbers */
+
+#define PDC_IRQ_STATUS                 0x310
+#define PDC_IRQ_ENABLE                 0x314
+#define PDC_IRQ_CLEAR                  0x318
+#define PDC_IRQ_ROUTE                  0x31c
+#define PDC_SYS_WAKE_BASE              0x330
+#define PDC_SYS_WAKE_STRIDE            0x8
+#define PDC_SYS_WAKE_CONFIG_BASE       0x334
+#define PDC_SYS_WAKE_CONFIG_STRIDE     0x8
+
+/* PDC interrupt register field masks */
+
+#define PDC_IRQ_SYS3                   0x08
+#define PDC_IRQ_SYS2                   0x04
+#define PDC_IRQ_SYS1                   0x02
+#define PDC_IRQ_SYS0                   0x01
+#define PDC_IRQ_ROUTE_WU_EN_SYS3       0x08000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS2       0x04000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS1       0x02000000
+#define PDC_IRQ_ROUTE_WU_EN_SYS0       0x01000000
+#define PDC_IRQ_ROUTE_WU_EN_WD         0x00040000
+#define PDC_IRQ_ROUTE_WU_EN_IR         0x00020000
+#define PDC_IRQ_ROUTE_WU_EN_RTC                0x00010000
+#define PDC_IRQ_ROUTE_EXT_EN_SYS3      0x00000800
+#define PDC_IRQ_ROUTE_EXT_EN_SYS2      0x00000400
+#define PDC_IRQ_ROUTE_EXT_EN_SYS1      0x00000200
+#define PDC_IRQ_ROUTE_EXT_EN_SYS0      0x00000100
+#define PDC_IRQ_ROUTE_EXT_EN_WD                0x00000004
+#define PDC_IRQ_ROUTE_EXT_EN_IR                0x00000002
+#define PDC_IRQ_ROUTE_EXT_EN_RTC       0x00000001
+#define PDC_SYS_WAKE_RESET             0x00000010
+#define PDC_SYS_WAKE_INT_MODE          0x0000000e
+#define PDC_SYS_WAKE_INT_MODE_SHIFT    1
+#define PDC_SYS_WAKE_PIN_VAL           0x00000001
+
+/* PDC interrupt constants */
+
+#define PDC_SYS_WAKE_INT_LOW           0x0
+#define PDC_SYS_WAKE_INT_HIGH          0x1
+#define PDC_SYS_WAKE_INT_DOWN          0x2
+#define PDC_SYS_WAKE_INT_UP            0x3
+#define PDC_SYS_WAKE_INT_CHANGE                0x6
+#define PDC_SYS_WAKE_INT_NONE          0x4
+
+/**
+ * struct pdc_intc_priv - private pdc interrupt data.
+ * @nr_perips:         Number of peripheral interrupt signals.
+ * @nr_syswakes:       Number of syswake signals.
+ * @perip_irqs:                List of peripheral IRQ numbers handled.
+ * @syswake_irq:       Shared PDC syswake IRQ number.
+ * @domain:            IRQ domain for PDC peripheral and syswake IRQs.
+ * @pdc_base:          Base of PDC registers.
+ * @irq_route:         Cached version of PDC_IRQ_ROUTE register.
+ * @lock:              Lock to protect the PDC syswake registers and the cached
+ *                     values of those registers in this struct.
+ */
+struct pdc_intc_priv {
+       unsigned int            nr_perips;
+       unsigned int            nr_syswakes;
+       unsigned int            *perip_irqs;
+       unsigned int            syswake_irq;
+       struct irq_domain       *domain;
+       void __iomem            *pdc_base;
+
+       u32                     irq_route;
+       raw_spinlock_t          lock;
+};
+
+static void pdc_write(struct pdc_intc_priv *priv, unsigned int reg_offs,
+                     unsigned int data)
+{
+       iowrite32(data, priv->pdc_base + reg_offs);
+}
+
+static unsigned int pdc_read(struct pdc_intc_priv *priv,
+                            unsigned int reg_offs)
+{
+       return ioread32(priv->pdc_base + reg_offs);
+}
+
+/* Generic IRQ callbacks */
+
+#define SYS0_HWIRQ     8
+
+static unsigned int hwirq_is_syswake(irq_hw_number_t hw)
+{
+       return hw >= SYS0_HWIRQ;
+}
+
+static unsigned int hwirq_to_syswake(irq_hw_number_t hw)
+{
+       return hw - SYS0_HWIRQ;
+}
+
+static irq_hw_number_t syswake_to_hwirq(unsigned int syswake)
+{
+       return SYS0_HWIRQ + syswake;
+}
+
+static struct pdc_intc_priv *irqd_to_priv(struct irq_data *data)
+{
+       return (struct pdc_intc_priv *)data->domain->host_data;
+}
+
+/*
+ * perip_irq_mask() and perip_irq_unmask() use IRQ_ROUTE which also contains
+ * wake bits, therefore we cannot use the generic irqchip mask callbacks as they
+ * cache the mask.
+ */
+
+static void perip_irq_mask(struct irq_data *data)
+{
+       struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+       raw_spin_lock(&priv->lock);
+       priv->irq_route &= ~data->mask;
+       pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+       raw_spin_unlock(&priv->lock);
+}
+
+static void perip_irq_unmask(struct irq_data *data)
+{
+       struct pdc_intc_priv *priv = irqd_to_priv(data);
+
+       raw_spin_lock(&priv->lock);
+       priv->irq_route |= data->mask;
+       pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+       raw_spin_unlock(&priv->lock);
+}
+
+static int syswake_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+       struct pdc_intc_priv *priv = irqd_to_priv(data);
+       unsigned int syswake = hwirq_to_syswake(data->hwirq);
+       unsigned int irq_mode;
+       unsigned int soc_sys_wake_regoff, soc_sys_wake;
+
+       /* translate to syswake IRQ mode */
+       switch (flow_type) {
+       case IRQ_TYPE_EDGE_BOTH:
+               irq_mode = PDC_SYS_WAKE_INT_CHANGE;
+               break;
+       case IRQ_TYPE_EDGE_RISING:
+               irq_mode = PDC_SYS_WAKE_INT_UP;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               irq_mode = PDC_SYS_WAKE_INT_DOWN;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               irq_mode = PDC_SYS_WAKE_INT_HIGH;
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               irq_mode = PDC_SYS_WAKE_INT_LOW;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       raw_spin_lock(&priv->lock);
+
+       /* set the IRQ mode */
+       soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + syswake*PDC_SYS_WAKE_STRIDE;
+       soc_sys_wake = pdc_read(priv, soc_sys_wake_regoff);
+       soc_sys_wake &= ~PDC_SYS_WAKE_INT_MODE;
+       soc_sys_wake |= irq_mode << PDC_SYS_WAKE_INT_MODE_SHIFT;
+       pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+
+       /* and update the handler */
+       irq_setup_alt_chip(data, flow_type);
+
+       raw_spin_unlock(&priv->lock);
+
+       return 0;
+}
+
+/* applies to both peripheral and syswake interrupts */
+static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+       struct pdc_intc_priv *priv = irqd_to_priv(data);
+       irq_hw_number_t hw = data->hwirq;
+       unsigned int mask = (1 << 16) << hw;
+       unsigned int dst_irq;
+
+       raw_spin_lock(&priv->lock);
+       if (on)
+               priv->irq_route |= mask;
+       else
+               priv->irq_route &= ~mask;
+       pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+       raw_spin_unlock(&priv->lock);
+
+       /* control the destination IRQ wakeup too for standby mode */
+       if (hwirq_is_syswake(hw))
+               dst_irq = priv->syswake_irq;
+       else
+               dst_irq = priv->perip_irqs[hw];
+       irq_set_irq_wake(dst_irq, on);
+
+       return 0;
+}
+
+static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc)
+{
+       struct pdc_intc_priv *priv;
+       unsigned int i, irq_no;
+
+       priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+       /* find the peripheral number */
+       for (i = 0; i < priv->nr_perips; ++i)
+               if (irq == priv->perip_irqs[i])
+                       goto found;
+
+       /* should never get here */
+       return;
+found:
+
+       /* pass on the interrupt */
+       irq_no = irq_linear_revmap(priv->domain, i);
+       generic_handle_irq(irq_no);
+}
+
+static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc)
+{
+       struct pdc_intc_priv *priv;
+       unsigned int syswake, irq_no;
+       unsigned int status;
+
+       priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
+
+       status = pdc_read(priv, PDC_IRQ_STATUS) &
+                pdc_read(priv, PDC_IRQ_ENABLE);
+       status &= (1 << priv->nr_syswakes) - 1;
+
+       for (syswake = 0; status; status >>= 1, ++syswake) {
+               /* Has this sys_wake triggered? */
+               if (!(status & 1))
+                       continue;
+
+               irq_no = irq_linear_revmap(priv->domain,
+                                          syswake_to_hwirq(syswake));
+               generic_handle_irq(irq_no);
+       }
+}
+
+static void pdc_intc_setup(struct pdc_intc_priv *priv)
+{
+       int i;
+       unsigned int soc_sys_wake_regoff;
+       unsigned int soc_sys_wake;
+
+       /*
+        * Mask all syswake interrupts before routing, or we could receive an
+        * interrupt before we're ready to handle it.
+        */
+       pdc_write(priv, PDC_IRQ_ENABLE, 0);
+
+       /*
+        * Enable routing of all syswakes
+        * Disable all wake sources
+        */
+       priv->irq_route = ((PDC_IRQ_ROUTE_EXT_EN_SYS0 << priv->nr_syswakes) -
+                               PDC_IRQ_ROUTE_EXT_EN_SYS0);
+       pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
+
+       /* Initialise syswake IRQ */
+       for (i = 0; i < priv->nr_syswakes; ++i) {
+               /* set the IRQ mode to none */
+               soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + i*PDC_SYS_WAKE_STRIDE;
+               soc_sys_wake = PDC_SYS_WAKE_INT_NONE
+                               << PDC_SYS_WAKE_INT_MODE_SHIFT;
+               pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
+       }
+}
+
+static int pdc_intc_probe(struct platform_device *pdev)
+{
+       struct pdc_intc_priv *priv;
+       struct device_node *node = pdev->dev.of_node;
+       struct resource *res_regs;
+       struct irq_chip_generic *gc;
+       unsigned int i;
+       int irq, ret;
+       u32 val;
+
+       if (!node)
+               return -ENOENT;
+
+       /* Get registers */
+       res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res_regs == NULL) {
+               dev_err(&pdev->dev, "cannot find registers resource\n");
+               return -ENOENT;
+       }
+
+       /* Allocate driver data */
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               dev_err(&pdev->dev, "cannot allocate device data\n");
+               return -ENOMEM;
+       }
+       raw_spin_lock_init(&priv->lock);
+       platform_set_drvdata(pdev, priv);
+
+       /* Ioremap the registers */
+       priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
+                                     res_regs->end - res_regs->start);
+       if (!priv->pdc_base)
+               return -EIO;
+
+       /* Get number of peripherals */
+       ret = of_property_read_u32(node, "num-perips", &val);
+       if (ret) {
+               dev_err(&pdev->dev, "No num-perips node property found\n");
+               return -EINVAL;
+       }
+       if (val > SYS0_HWIRQ) {
+               dev_err(&pdev->dev, "num-perips (%u) out of range\n", val);
+               return -EINVAL;
+       }
+       priv->nr_perips = val;
+
+       /* Get number of syswakes */
+       ret = of_property_read_u32(node, "num-syswakes", &val);
+       if (ret) {
+               dev_err(&pdev->dev, "No num-syswakes node property found\n");
+               return -EINVAL;
+       }
+       if (val > SYS0_HWIRQ) {
+               dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val);
+               return -EINVAL;
+       }
+       priv->nr_syswakes = val;
+
+       /* Get peripheral IRQ numbers */
+       priv->perip_irqs = devm_kzalloc(&pdev->dev, 4 * priv->nr_perips,
+                                       GFP_KERNEL);
+       if (!priv->perip_irqs) {
+               dev_err(&pdev->dev, "cannot allocate perip IRQ list\n");
+               return -ENOMEM;
+       }
+       for (i = 0; i < priv->nr_perips; ++i) {
+               irq = platform_get_irq(pdev, 1 + i);
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i);
+                       return irq;
+               }
+               priv->perip_irqs[i] = irq;
+       }
+       /* check if too many were provided */
+       if (platform_get_irq(pdev, 1 + i) >= 0) {
+               dev_err(&pdev->dev, "surplus perip IRQs detected\n");
+               return -EINVAL;
+       }
+
+       /* Get syswake IRQ number */
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "cannot find syswake IRQ\n");
+               return irq;
+       }
+       priv->syswake_irq = irq;
+
+       /* Set up an IRQ domain */
+       priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
+                                            priv);
+       if (unlikely(!priv->domain)) {
+               dev_err(&pdev->dev, "cannot add IRQ domain\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * Set up 2 generic irq chips with 2 chip types.
+        * The first one for peripheral irqs (only 1 chip type used)
+        * The second one for syswake irqs (edge and level chip types)
+        */
+       ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc",
+                                            handle_level_irq, 0, 0,
+                                            IRQ_GC_INIT_NESTED_LOCK);
+       if (ret)
+               goto err_generic;
+
+       /* peripheral interrupt chip */
+
+       gc = irq_get_domain_generic_chip(priv->domain, 0);
+       gc->unused      = ~(BIT(priv->nr_perips) - 1);
+       gc->reg_base    = priv->pdc_base;
+       /*
+        * IRQ_ROUTE contains wake bits, so we can't use the generic versions as
+        * they cache the mask
+        */
+       gc->chip_types[0].regs.mask             = PDC_IRQ_ROUTE;
+       gc->chip_types[0].chip.irq_mask         = perip_irq_mask;
+       gc->chip_types[0].chip.irq_unmask       = perip_irq_unmask;
+       gc->chip_types[0].chip.irq_set_wake     = pdc_irq_set_wake;
+
+       /* syswake interrupt chip */
+
+       gc = irq_get_domain_generic_chip(priv->domain, 8);
+       gc->unused      = ~(BIT(priv->nr_syswakes) - 1);
+       gc->reg_base    = priv->pdc_base;
+
+       /* edge interrupts */
+       gc->chip_types[0].type                  = IRQ_TYPE_EDGE_BOTH;
+       gc->chip_types[0].handler               = handle_edge_irq;
+       gc->chip_types[0].regs.ack              = PDC_IRQ_CLEAR;
+       gc->chip_types[0].regs.mask             = PDC_IRQ_ENABLE;
+       gc->chip_types[0].chip.irq_ack          = irq_gc_ack_set_bit;
+       gc->chip_types[0].chip.irq_mask         = irq_gc_mask_clr_bit;
+       gc->chip_types[0].chip.irq_unmask       = irq_gc_mask_set_bit;
+       gc->chip_types[0].chip.irq_set_type     = syswake_irq_set_type;
+       gc->chip_types[0].chip.irq_set_wake     = pdc_irq_set_wake;
+       /* for standby we pass on to the shared syswake IRQ */
+       gc->chip_types[0].chip.flags            = IRQCHIP_MASK_ON_SUSPEND;
+
+       /* level interrupts */
+       gc->chip_types[1].type                  = IRQ_TYPE_LEVEL_MASK;
+       gc->chip_types[1].handler               = handle_level_irq;
+       gc->chip_types[1].regs.ack              = PDC_IRQ_CLEAR;
+       gc->chip_types[1].regs.mask             = PDC_IRQ_ENABLE;
+       gc->chip_types[1].chip.irq_ack          = irq_gc_ack_set_bit;
+       gc->chip_types[1].chip.irq_mask         = irq_gc_mask_clr_bit;
+       gc->chip_types[1].chip.irq_unmask       = irq_gc_mask_set_bit;
+       gc->chip_types[1].chip.irq_set_type     = syswake_irq_set_type;
+       gc->chip_types[1].chip.irq_set_wake     = pdc_irq_set_wake;
+       /* for standby we pass on to the shared syswake IRQ */
+       gc->chip_types[1].chip.flags            = IRQCHIP_MASK_ON_SUSPEND;
+
+       /* Set up the hardware to enable interrupt routing */
+       pdc_intc_setup(priv);
+
+       /* Setup chained handlers for the peripheral IRQs */
+       for (i = 0; i < priv->nr_perips; ++i) {
+               irq = priv->perip_irqs[i];
+               irq_set_handler_data(irq, priv);
+               irq_set_chained_handler(irq, pdc_intc_perip_isr);
+       }
+
+       /* Setup chained handler for the syswake IRQ */
+       irq_set_handler_data(priv->syswake_irq, priv);
+       irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);
+
+       dev_info(&pdev->dev,
+                "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
+                priv->nr_perips,
+                priv->nr_syswakes);
+
+       return 0;
+err_generic:
+       irq_domain_remove(priv->domain);
+       return ret;
+}
+
+static int pdc_intc_remove(struct platform_device *pdev)
+{
+       struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
+
+       irq_domain_remove(priv->domain);
+       return 0;
+}
+
+static const struct of_device_id pdc_intc_match[] = {
+       { .compatible = "img,pdc-intc" },
+       {}
+};
+
+static struct platform_driver pdc_intc_driver = {
+       .driver = {
+               .name           = "pdc-intc",
+               .of_match_table = pdc_intc_match,
+       },
+       .probe = pdc_intc_probe,
+       .remove = pdc_intc_remove,
+};
+
+static int __init pdc_intc_init(void)
+{
+       return platform_driver_register(&pdc_intc_driver);
+}
+core_initcall(pdc_intc_init);
index a7e4939787c957a0f7975c4ea3d8aaaa390e6d2b..7f910c76ca0a340a967737345c7e733e2c4eced4 100644 (file)
@@ -1307,11 +1307,11 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
                }
                if (fifo2 & 2) {
                        hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
-                       hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS +
+                       hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
                                           HFCPCI_INTS_B2REC);
                } else {
                        hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
-                       hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS +
+                       hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
                                           HFCPCI_INTS_B1REC);
                }
 #ifdef REVERSE_BITORDER
@@ -1346,14 +1346,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
                if (fifo2 & 2) {
                        hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
                        if (!tics)
-                               hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+                               hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
                                                  HFCPCI_INTS_B2REC);
                        hc->hw.ctmt |= 2;
                        hc->hw.conn &= ~0x18;
                } else {
                        hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
                        if (!tics)
-                               hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+                               hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
                                                  HFCPCI_INTS_B1REC);
                        hc->hw.ctmt |= 1;
                        hc->hw.conn &= ~0x03;
@@ -1375,14 +1375,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
                if (fifo2 & 2) {
                        hc->hw.last_bfifo_cnt[1] = 0;
                        hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
-                       hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+                       hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
                                          HFCPCI_INTS_B2REC);
                        hc->hw.ctmt &= ~2;
                        hc->hw.conn &= ~0x18;
                } else {
                        hc->hw.last_bfifo_cnt[0] = 0;
                        hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
-                       hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+                       hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
                                          HFCPCI_INTS_B1REC);
                        hc->hw.ctmt &= ~1;
                        hc->hw.conn &= ~0x03;
index b27e530a87a4dc6f17046de52c22011efbf7da36..2edae7dfcab25407c4c677bcfad6e4f3bdf4e57f 100644 (file)
@@ -118,8 +118,12 @@ static ssize_t ams_input_store_joystick(struct device *dev,
 {
        unsigned long enable;
        int error = 0;
+       int ret;
 
-       if (strict_strtoul(buf, 0, &enable) || enable > 1)
+       ret = kstrtoul(buf, 0, &enable);
+       if (ret)
+               return ret;
+       if (enable > 1)
                return -EINVAL;
 
        mutex_lock(&ams_input_mutex);
index 5ef78efc27f2812155b50aca064e1a5679015f8f..2acc43fe02297dff29fe0b00635100cb42703708 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 dm-mod-y       += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
-                  dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o
+                  dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o
 dm-multipath-y += dm-path-selector.o dm-mpath.o
 dm-snapshot-y  += dm-snap.o dm-exception-store.o dm-snap-transient.o \
                    dm-snap-persistent.o
index dc112a7137fe9280fca348908ed99b77f36f9417..4296155090b2b181f5840e21d97402ae0351d739 100644 (file)
@@ -959,23 +959,21 @@ out:
        return r;
 }
 
-static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
-       struct entry *e = hash_lookup(mq, oblock);
+       struct mq_policy *mq = to_mq_policy(p);
+       struct entry *e;
+
+       mutex_lock(&mq->lock);
+
+       e = hash_lookup(mq, oblock);
 
        BUG_ON(!e || !e->in_cache);
 
        del(mq, e);
        e->in_cache = false;
        push(mq, e);
-}
 
-static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
-       struct mq_policy *mq = to_mq_policy(p);
-
-       mutex_lock(&mq->lock);
-       remove_mapping(mq, oblock);
        mutex_unlock(&mq->lock);
 }
 
index 0df3ec085ebb49705c4db91815eb8df90808703a..29569768ffbf97259e327ee09b5ce50349ae203e 100644 (file)
@@ -67,9 +67,11 @@ static void free_bitset(unsigned long *bits)
 #define MIGRATION_COUNT_WINDOW 10
 
 /*
- * The block size of the device holding cache data must be >= 32KB
+ * The block size of the device holding cache data must be
+ * between 32KB and 1GB.
  */
 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
 
 /*
  * FIXME: the cache is read/write for the time being.
@@ -101,6 +103,8 @@ struct cache {
        struct dm_target *ti;
        struct dm_target_callbacks callbacks;
 
+       struct dm_cache_metadata *cmd;
+
        /*
         * Metadata is written to this device.
         */
@@ -116,11 +120,6 @@ struct cache {
         */
        struct dm_dev *cache_dev;
 
-       /*
-        * Cache features such as write-through.
-        */
-       struct cache_features features;
-
        /*
         * Size of the origin device in _complete_ blocks and native sectors.
         */
@@ -138,8 +137,6 @@ struct cache {
        uint32_t sectors_per_block;
        int sectors_per_block_shift;
 
-       struct dm_cache_metadata *cmd;
-
        spinlock_t lock;
        struct bio_list deferred_bios;
        struct bio_list deferred_flush_bios;
@@ -148,8 +145,8 @@ struct cache {
        struct list_head completed_migrations;
        struct list_head need_commit_migrations;
        sector_t migration_threshold;
-       atomic_t nr_migrations;
        wait_queue_head_t migration_wait;
+       atomic_t nr_migrations;
 
        /*
         * cache_size entries, dirty if set
@@ -160,9 +157,16 @@ struct cache {
        /*
         * origin_blocks entries, discarded if set.
         */
-       uint32_t discard_block_size; /* a power of 2 times sectors per block */
        dm_dblock_t discard_nr_blocks;
        unsigned long *discard_bitset;
+       uint32_t discard_block_size; /* a power of 2 times sectors per block */
+
+       /*
+        * Rather than reconstructing the table line for the status we just
+        * save it and regurgitate.
+        */
+       unsigned nr_ctr_args;
+       const char **ctr_args;
 
        struct dm_kcopyd_client *copier;
        struct workqueue_struct *wq;
@@ -187,14 +191,12 @@ struct cache {
        bool loaded_mappings:1;
        bool loaded_discards:1;
 
-       struct cache_stats stats;
-
        /*
-        * Rather than reconstructing the table line for the status we just
-        * save it and regurgitate.
+        * Cache features such as write-through.
         */
-       unsigned nr_ctr_args;
-       const char **ctr_args;
+       struct cache_features features;
+
+       struct cache_stats stats;
 };
 
 struct per_bio_data {
@@ -1687,24 +1689,25 @@ static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
                            char **error)
 {
-       unsigned long tmp;
+       unsigned long block_size;
 
        if (!at_least_one_arg(as, error))
                return -EINVAL;
 
-       if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
-           tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
-           tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+       if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
+           block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+           block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
+           block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
                *error = "Invalid data block size";
                return -EINVAL;
        }
 
-       if (tmp > ca->cache_sectors) {
+       if (block_size > ca->cache_sectors) {
                *error = "Data block size is larger than the cache device";
                return -EINVAL;
        }
 
-       ca->block_size = tmp;
+       ca->block_size = block_size;
 
        return 0;
 }
@@ -2609,9 +2612,17 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
        struct cache *cache = ti->private;
+       uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
 
-       blk_limits_io_min(limits, 0);
-       blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+       /*
+        * If the system-determined stacked limits are compatible with the
+        * cache's blocksize (io_opt is a factor) do not override them.
+        */
+       if (io_opt_sectors < cache->sectors_per_block ||
+           do_div(io_opt_sectors, cache->sectors_per_block)) {
+               blk_limits_io_min(limits, 0);
+               blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+       }
        set_discard_limits(cache, limits);
 }
 
index 6d2d41ae9e322dbd53e787e5294f2d55551296eb..0fce0bc1a9572f70167cc66d0524186e9e5abece 100644 (file)
@@ -1645,20 +1645,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        ret = -ENOMEM;
-       cc->io_queue = alloc_workqueue("kcryptd_io",
-                                      WQ_NON_REENTRANT|
-                                      WQ_MEM_RECLAIM,
-                                      1);
+       cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
        if (!cc->io_queue) {
                ti->error = "Couldn't create kcryptd io queue";
                goto bad;
        }
 
        cc->crypt_queue = alloc_workqueue("kcryptd",
-                                         WQ_NON_REENTRANT|
-                                         WQ_CPU_INTENSIVE|
-                                         WQ_MEM_RECLAIM,
-                                         1);
+                                         WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
        if (!cc->crypt_queue) {
                ti->error = "Couldn't create kcryptd queue";
                goto bad;
index f1b758675ec77e2e2c0d16246e2ba772fff01476..40e6dce199a2457e4042e6b8d71a8245b4c29a67 100644 (file)
@@ -1455,20 +1455,26 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
        return 0;
 }
 
-static bool buffer_test_overflow(char *result, unsigned maxlen)
-{
-       return !maxlen || strlen(result) + 1 >= maxlen;
-}
-
 /*
- * Process device-mapper dependent messages.
+ * Process device-mapper dependent messages.  Messages prefixed with '@'
+ * are processed by the DM core.  All others are delivered to the target.
  * Returns a number <= 1 if message was processed by device mapper.
  * Returns 2 if message should be delivered to the target.
  */
 static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
                          char *result, unsigned maxlen)
 {
-       return 2;
+       int r;
+
+       if (**argv != '@')
+               return 2; /* no '@' prefix, deliver to target */
+
+       r = dm_stats_message(md, argc, argv, result, maxlen);
+       if (r < 2)
+               return r;
+
+       DMERR("Unsupported message sent to DM core: %s", argv[0]);
+       return -EINVAL;
 }
 
 /*
@@ -1542,7 +1548,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
 
        if (r == 1) {
                param->flags |= DM_DATA_OUT_FLAG;
-               if (buffer_test_overflow(result, maxlen))
+               if (dm_message_test_buffer_overflow(result, maxlen))
                        param->flags |= DM_BUFFER_FULL_FLAG;
                else
                        param->data_size = param->data_start + strlen(result) + 1;
index d581fe5d2faf1df83f1fce5725926c8619d7e7aa..3a7cade5e27d828ffa2df3b9254f9064ec078c84 100644 (file)
@@ -833,8 +833,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
                goto bad_slab;
 
        INIT_WORK(&kc->kcopyd_work, do_work);
-       kc->kcopyd_wq = alloc_workqueue("kcopyd",
-                                       WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+       kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
        if (!kc->kcopyd_wq)
                goto bad_workqueue;
 
index 699b5be68d319263cce75e8d932deb0be25c7d00..9584443c56148608d159ceab1d436fd6bacfda3b 100644 (file)
@@ -1080,8 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
        ti->discard_zeroes_data_unsupported = true;
 
-       ms->kmirrord_wq = alloc_workqueue("kmirrord",
-                                         WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+       ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
        if (!ms->kmirrord_wq) {
                DMERR("couldn't start kmirrord");
                r = -ENOMEM;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
new file mode 100644 (file)
index 0000000..b1b6007
--- /dev/null
@@ -0,0 +1,968 @@
+#include <linux/errno.h>
+#include <linux/numa.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include <linux/threads.h>
+#include <linux/preempt.h>
+#include <linux/irqflags.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/device-mapper.h>
+
+#include "dm.h"
+#include "dm-stats.h"
+
+#define DM_MSG_PREFIX "stats"
+
+static int dm_stat_need_rcu_barrier;
+
+/*
+ * Using 64-bit values to avoid overflow (which is a
+ * problem that block/genhd.c's IO accounting has).
+ */
+struct dm_stat_percpu {
+       unsigned long long sectors[2];
+       unsigned long long ios[2];
+       unsigned long long merges[2];
+       unsigned long long ticks[2];
+       unsigned long long io_ticks[2];
+       unsigned long long io_ticks_total;
+       unsigned long long time_in_queue;
+};
+
+struct dm_stat_shared {
+       atomic_t in_flight[2];
+       unsigned long stamp;
+       struct dm_stat_percpu tmp;
+};
+
+struct dm_stat {
+       struct list_head list_entry;
+       int id;
+       size_t n_entries;
+       sector_t start;
+       sector_t end;
+       sector_t step;
+       const char *program_id;
+       const char *aux_data;
+       struct rcu_head rcu_head;
+       size_t shared_alloc_size;
+       size_t percpu_alloc_size;
+       struct dm_stat_percpu *stat_percpu[NR_CPUS];
+       struct dm_stat_shared stat_shared[0];
+};
+
+struct dm_stats_last_position {
+       sector_t last_sector;
+       unsigned last_rw;
+};
+
+/*
+ * A typo on the command line could possibly make the kernel run out of memory
+ * and crash. To prevent the crash we account all used memory. We fail if we
+ * exhaust 1/4 of all memory or 1/2 of vmalloc space.
+ */
+#define DM_STATS_MEMORY_FACTOR         4
+#define DM_STATS_VMALLOC_FACTOR                2
+
+static DEFINE_SPINLOCK(shared_memory_lock);
+
+static unsigned long shared_memory_amount;
+
+static bool __check_shared_memory(size_t alloc_size)
+{
+       size_t a;
+
+       a = shared_memory_amount + alloc_size;
+       if (a < shared_memory_amount)
+               return false;
+       if (a >> PAGE_SHIFT > totalram_pages / DM_STATS_MEMORY_FACTOR)
+               return false;
+#ifdef CONFIG_MMU
+       if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
+               return false;
+#endif
+       return true;
+}
+
+static bool check_shared_memory(size_t alloc_size)
+{
+       bool ret;
+
+       spin_lock_irq(&shared_memory_lock);
+
+       ret = __check_shared_memory(alloc_size);
+
+       spin_unlock_irq(&shared_memory_lock);
+
+       return ret;
+}
+
+static bool claim_shared_memory(size_t alloc_size)
+{
+       spin_lock_irq(&shared_memory_lock);
+
+       if (!__check_shared_memory(alloc_size)) {
+               spin_unlock_irq(&shared_memory_lock);
+               return false;
+       }
+
+       shared_memory_amount += alloc_size;
+
+       spin_unlock_irq(&shared_memory_lock);
+
+       return true;
+}
+
+static void free_shared_memory(size_t alloc_size)
+{
+       unsigned long flags;
+
+       if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
+               DMCRIT("Memory usage accounting bug.");
+               return;
+       }
+
+       spin_lock_irqsave(&shared_memory_lock, flags);
+
+       shared_memory_amount -= alloc_size;
+
+       spin_unlock_irqrestore(&shared_memory_lock, flags);
+}
+
+static void *dm_kvzalloc(size_t alloc_size, int node)
+{
+       void *p;
+
+       if (!claim_shared_memory(alloc_size))
+               return NULL;
+
+       if (alloc_size <= KMALLOC_MAX_SIZE) {
+               p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
+               if (p)
+                       return p;
+       }
+       p = vzalloc_node(alloc_size, node);
+       if (p)
+               return p;
+
+       free_shared_memory(alloc_size);
+
+       return NULL;
+}
+
+static void dm_kvfree(void *ptr, size_t alloc_size)
+{
+       if (!ptr)
+               return;
+
+       free_shared_memory(alloc_size);
+
+       if (is_vmalloc_addr(ptr))
+               vfree(ptr);
+       else
+               kfree(ptr);
+}
+
+static void dm_stat_free(struct rcu_head *head)
+{
+       int cpu;
+       struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
+
+       kfree(s->program_id);
+       kfree(s->aux_data);
+       for_each_possible_cpu(cpu)
+               dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
+       dm_kvfree(s, s->shared_alloc_size);
+}
+
+static int dm_stat_in_flight(struct dm_stat_shared *shared)
+{
+       return atomic_read(&shared->in_flight[READ]) +
+              atomic_read(&shared->in_flight[WRITE]);
+}
+
+void dm_stats_init(struct dm_stats *stats)
+{
+       int cpu;
+       struct dm_stats_last_position *last;
+
+       mutex_init(&stats->mutex);
+       INIT_LIST_HEAD(&stats->list);
+       stats->last = alloc_percpu(struct dm_stats_last_position);
+       for_each_possible_cpu(cpu) {
+               last = per_cpu_ptr(stats->last, cpu);
+               last->last_sector = (sector_t)ULLONG_MAX;
+               last->last_rw = UINT_MAX;
+       }
+}
+
+void dm_stats_cleanup(struct dm_stats *stats)
+{
+       size_t ni;
+       struct dm_stat *s;
+       struct dm_stat_shared *shared;
+
+       while (!list_empty(&stats->list)) {
+               s = container_of(stats->list.next, struct dm_stat, list_entry);
+               list_del(&s->list_entry);
+               for (ni = 0; ni < s->n_entries; ni++) {
+                       shared = &s->stat_shared[ni];
+                       if (WARN_ON(dm_stat_in_flight(shared))) {
+                               DMCRIT("leaked in-flight counter at index %lu "
+                                      "(start %llu, end %llu, step %llu): reads %d, writes %d",
+                                      (unsigned long)ni,
+                                      (unsigned long long)s->start,
+                                      (unsigned long long)s->end,
+                                      (unsigned long long)s->step,
+                                      atomic_read(&shared->in_flight[READ]),
+                                      atomic_read(&shared->in_flight[WRITE]));
+                       }
+               }
+               dm_stat_free(&s->rcu_head);
+       }
+       free_percpu(stats->last);
+}
+
+static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
+                          sector_t step, const char *program_id, const char *aux_data,
+                          void (*suspend_callback)(struct mapped_device *),
+                          void (*resume_callback)(struct mapped_device *),
+                          struct mapped_device *md)
+{
+       struct list_head *l;
+       struct dm_stat *s, *tmp_s;
+       sector_t n_entries;
+       size_t ni;
+       size_t shared_alloc_size;
+       size_t percpu_alloc_size;
+       struct dm_stat_percpu *p;
+       int cpu;
+       int ret_id;
+       int r;
+
+       if (end < start || !step)
+               return -EINVAL;
+
+       n_entries = end - start;
+       if (dm_sector_div64(n_entries, step))
+               n_entries++;
+
+       if (n_entries != (size_t)n_entries || !(n_entries + 1))
+               return -EOVERFLOW;
+
+       shared_alloc_size = sizeof(struct dm_stat) + (size_t)n_entries * sizeof(struct dm_stat_shared);
+       if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
+               return -EOVERFLOW;
+
+       percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
+       if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
+               return -EOVERFLOW;
+
+       if (!check_shared_memory(shared_alloc_size + num_possible_cpus() * percpu_alloc_size))
+               return -ENOMEM;
+
+       s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
+       if (!s)
+               return -ENOMEM;
+
+       s->n_entries = n_entries;
+       s->start = start;
+       s->end = end;
+       s->step = step;
+       s->shared_alloc_size = shared_alloc_size;
+       s->percpu_alloc_size = percpu_alloc_size;
+
+       s->program_id = kstrdup(program_id, GFP_KERNEL);
+       if (!s->program_id) {
+               r = -ENOMEM;
+               goto out;
+       }
+       s->aux_data = kstrdup(aux_data, GFP_KERNEL);
+       if (!s->aux_data) {
+               r = -ENOMEM;
+               goto out;
+       }
+
+       for (ni = 0; ni < n_entries; ni++) {
+               atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
+               atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
+       }
+
+       for_each_possible_cpu(cpu) {
+               p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
+               if (!p) {
+                       r = -ENOMEM;
+                       goto out;
+               }
+               s->stat_percpu[cpu] = p;
+       }
+
+       /*
+        * Suspend/resume to make sure there is no i/o in flight,
+        * so that newly created statistics will be exact.
+        *
+        * (note: we couldn't suspend earlier because we must not
+        * allocate memory while suspended)
+        */
+       suspend_callback(md);
+
+       mutex_lock(&stats->mutex);
+       s->id = 0;
+       list_for_each(l, &stats->list) {
+               tmp_s = container_of(l, struct dm_stat, list_entry);
+               if (WARN_ON(tmp_s->id < s->id)) {
+                       r = -EINVAL;
+                       goto out_unlock_resume;
+               }
+               if (tmp_s->id > s->id)
+                       break;
+               if (unlikely(s->id == INT_MAX)) {
+                       r = -ENFILE;
+                       goto out_unlock_resume;
+               }
+               s->id++;
+       }
+       ret_id = s->id;
+       list_add_tail_rcu(&s->list_entry, l);
+       mutex_unlock(&stats->mutex);
+
+       resume_callback(md);
+
+       return ret_id;
+
+out_unlock_resume:
+       mutex_unlock(&stats->mutex);
+       resume_callback(md);
+out:
+       dm_stat_free(&s->rcu_head);
+       return r;
+}
+
+static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
+{
+       struct dm_stat *s;
+
+       list_for_each_entry(s, &stats->list, list_entry) {
+               if (s->id > id)
+                       break;
+               if (s->id == id)
+                       return s;
+       }
+
+       return NULL;
+}
+
+static int dm_stats_delete(struct dm_stats *stats, int id)
+{
+       struct dm_stat *s;
+       int cpu;
+
+       mutex_lock(&stats->mutex);
+
+       s = __dm_stats_find(stats, id);
+       if (!s) {
+               mutex_unlock(&stats->mutex);
+               return -ENOENT;
+       }
+
+       list_del_rcu(&s->list_entry);
+       mutex_unlock(&stats->mutex);
+
+       /*
+        * vfree can't be called from RCU callback
+        */
+       for_each_possible_cpu(cpu)
+               if (is_vmalloc_addr(s->stat_percpu))
+                       goto do_sync_free;
+       if (is_vmalloc_addr(s)) {
+do_sync_free:
+               synchronize_rcu_expedited();
+               dm_stat_free(&s->rcu_head);
+       } else {
+               ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+               call_rcu(&s->rcu_head, dm_stat_free);
+       }
+       return 0;
+}
+
+static int dm_stats_list(struct dm_stats *stats, const char *program,
+                        char *result, unsigned maxlen)
+{
+       struct dm_stat *s;
+       sector_t len;
+       unsigned sz = 0;
+
+       /*
+        * Output format:
+        *   <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+        */
+
+       mutex_lock(&stats->mutex);
+       list_for_each_entry(s, &stats->list, list_entry) {
+               if (!program || !strcmp(program, s->program_id)) {
+                       len = s->end - s->start;
+                       DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+                               (unsigned long long)s->start,
+                               (unsigned long long)len,
+                               (unsigned long long)s->step,
+                               s->program_id,
+                               s->aux_data);
+               }
+       }
+       mutex_unlock(&stats->mutex);
+
+       return 1;
+}
+
+static void dm_stat_round(struct dm_stat_shared *shared, struct dm_stat_percpu *p)
+{
+       /*
+        * This is racy, but so is part_round_stats_single.
+        */
+       unsigned long now = jiffies;
+       unsigned in_flight_read;
+       unsigned in_flight_write;
+       unsigned long difference = now - shared->stamp;
+
+       if (!difference)
+               return;
+       in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
+       in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
+       if (in_flight_read)
+               p->io_ticks[READ] += difference;
+       if (in_flight_write)
+               p->io_ticks[WRITE] += difference;
+       if (in_flight_read + in_flight_write) {
+               p->io_ticks_total += difference;
+               p->time_in_queue += (in_flight_read + in_flight_write) * difference;
+       }
+       shared->stamp = now;
+}
+
+static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
+                             unsigned long bi_rw, sector_t len, bool merged,
+                             bool end, unsigned long duration)
+{
+       unsigned long idx = bi_rw & REQ_WRITE;
+       struct dm_stat_shared *shared = &s->stat_shared[entry];
+       struct dm_stat_percpu *p;
+
+       /*
+        * For strict correctness we should use local_irq_disable/enable
+        * instead of preempt_disable/enable.
+        *
+        * This is racy if the driver finishes bios from non-interrupt
+        * context as well as from interrupt context or from more different
+        * interrupts.
+        *
+        * However, the race only results in not counting some events,
+        * so it is acceptable.
+        *
+        * part_stat_lock()/part_stat_unlock() have this race too.
+        */
+       preempt_disable();
+       p = &s->stat_percpu[smp_processor_id()][entry];
+
+       if (!end) {
+               dm_stat_round(shared, p);
+               atomic_inc(&shared->in_flight[idx]);
+       } else {
+               dm_stat_round(shared, p);
+               atomic_dec(&shared->in_flight[idx]);
+               p->sectors[idx] += len;
+               p->ios[idx] += 1;
+               p->merges[idx] += merged;
+               p->ticks[idx] += duration;
+       }
+
+       preempt_enable();
+}
+
+static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
+                         sector_t bi_sector, sector_t end_sector,
+                         bool end, unsigned long duration,
+                         struct dm_stats_aux *stats_aux)
+{
+       sector_t rel_sector, offset, todo, fragment_len;
+       size_t entry;
+
+       if (end_sector <= s->start || bi_sector >= s->end)
+               return;
+       if (unlikely(bi_sector < s->start)) {
+               rel_sector = 0;
+               todo = end_sector - s->start;
+       } else {
+               rel_sector = bi_sector - s->start;
+               todo = end_sector - bi_sector;
+       }
+       if (unlikely(end_sector > s->end))
+               todo -= (end_sector - s->end);
+
+       offset = dm_sector_div64(rel_sector, s->step);
+       entry = rel_sector;
+       do {
+               if (WARN_ON_ONCE(entry >= s->n_entries)) {
+                       DMCRIT("Invalid area access in region id %d", s->id);
+                       return;
+               }
+               fragment_len = todo;
+               if (fragment_len > s->step - offset)
+                       fragment_len = s->step - offset;
+               dm_stat_for_entry(s, entry, bi_rw, fragment_len,
+                                 stats_aux->merged, end, duration);
+               todo -= fragment_len;
+               entry++;
+               offset = 0;
+       } while (unlikely(todo));
+}
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+                        sector_t bi_sector, unsigned bi_sectors, bool end,
+                        unsigned long duration, struct dm_stats_aux *stats_aux)
+{
+       struct dm_stat *s;
+       sector_t end_sector;
+       struct dm_stats_last_position *last;
+
+       if (unlikely(!bi_sectors))
+               return;
+
+       end_sector = bi_sector + bi_sectors;
+
+       if (!end) {
+               /*
+                * A race condition can at worst result in the merged flag being
+                * misrepresented, so we don't have to disable preemption here.
+                */
+               last = __this_cpu_ptr(stats->last);
+               stats_aux->merged =
+                       (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+                                      ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
+                                       (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
+                                      ));
+               ACCESS_ONCE(last->last_sector) = end_sector;
+               ACCESS_ONCE(last->last_rw) = bi_rw;
+       }
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(s, &stats->list, list_entry)
+               __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux);
+
+       rcu_read_unlock();
+}
+
+static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
+                                                  struct dm_stat *s, size_t x)
+{
+       int cpu;
+       struct dm_stat_percpu *p;
+
+       local_irq_disable();
+       p = &s->stat_percpu[smp_processor_id()][x];
+       dm_stat_round(shared, p);
+       local_irq_enable();
+
+       memset(&shared->tmp, 0, sizeof(shared->tmp));
+       for_each_possible_cpu(cpu) {
+               p = &s->stat_percpu[cpu][x];
+               shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
+               shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
+               shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
+               shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
+               shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
+               shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
+               shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
+               shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
+               shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
+               shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
+               shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
+               shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+       }
+}
+
+static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
+                           bool init_tmp_percpu_totals)
+{
+       size_t x;
+       struct dm_stat_shared *shared;
+       struct dm_stat_percpu *p;
+
+       for (x = idx_start; x < idx_end; x++) {
+               shared = &s->stat_shared[x];
+               if (init_tmp_percpu_totals)
+                       __dm_stat_init_temporary_percpu_totals(shared, s, x);
+               local_irq_disable();
+               p = &s->stat_percpu[smp_processor_id()][x];
+               p->sectors[READ] -= shared->tmp.sectors[READ];
+               p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
+               p->ios[READ] -= shared->tmp.ios[READ];
+               p->ios[WRITE] -= shared->tmp.ios[WRITE];
+               p->merges[READ] -= shared->tmp.merges[READ];
+               p->merges[WRITE] -= shared->tmp.merges[WRITE];
+               p->ticks[READ] -= shared->tmp.ticks[READ];
+               p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
+               p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
+               p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
+               p->io_ticks_total -= shared->tmp.io_ticks_total;
+               p->time_in_queue -= shared->tmp.time_in_queue;
+               local_irq_enable();
+       }
+}
+
+static int dm_stats_clear(struct dm_stats *stats, int id)
+{
+       struct dm_stat *s;
+
+       mutex_lock(&stats->mutex);
+
+       s = __dm_stats_find(stats, id);
+       if (!s) {
+               mutex_unlock(&stats->mutex);
+               return -ENOENT;
+       }
+
+       __dm_stat_clear(s, 0, s->n_entries, true);
+
+       mutex_unlock(&stats->mutex);
+
+       return 1;
+}
+
+/*
+ * This is like jiffies_to_msec, but works for 64-bit values.
+ */
+static unsigned long long dm_jiffies_to_msec64(unsigned long long j)
+{
+       unsigned long long result = 0;
+       unsigned mult;
+
+       if (j)
+               result = jiffies_to_msecs(j & 0x3fffff);
+       if (j >= 1 << 22) {
+               mult = jiffies_to_msecs(1 << 22);
+               result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
+       }
+       if (j >= 1ULL << 44)
+               result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
+
+       return result;
+}
+
+static int dm_stats_print(struct dm_stats *stats, int id,
+                         size_t idx_start, size_t idx_len,
+                         bool clear, char *result, unsigned maxlen)
+{
+       unsigned sz = 0;
+       struct dm_stat *s;
+       size_t x;
+       sector_t start, end, step;
+       size_t idx_end;
+       struct dm_stat_shared *shared;
+
+       /*
+        * Output format:
+        *   <start_sector>+<length> counters
+        */
+
+       mutex_lock(&stats->mutex);
+
+       s = __dm_stats_find(stats, id);
+       if (!s) {
+               mutex_unlock(&stats->mutex);
+               return -ENOENT;
+       }
+
+       idx_end = idx_start + idx_len;
+       if (idx_end < idx_start ||
+           idx_end > s->n_entries)
+               idx_end = s->n_entries;
+
+       if (idx_start > idx_end)
+               idx_start = idx_end;
+
+       step = s->step;
+       start = s->start + (step * idx_start);
+
+       for (x = idx_start; x < idx_end; x++, start = end) {
+               shared = &s->stat_shared[x];
+               end = start + step;
+               if (unlikely(end > s->end))
+                       end = s->end;
+
+               __dm_stat_init_temporary_percpu_totals(shared, s, x);
+
+               DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu\n",
+                      (unsigned long long)start,
+                      (unsigned long long)step,
+                      shared->tmp.ios[READ],
+                      shared->tmp.merges[READ],
+                      shared->tmp.sectors[READ],
+                      dm_jiffies_to_msec64(shared->tmp.ticks[READ]),
+                      shared->tmp.ios[WRITE],
+                      shared->tmp.merges[WRITE],
+                      shared->tmp.sectors[WRITE],
+                      dm_jiffies_to_msec64(shared->tmp.ticks[WRITE]),
+                      dm_stat_in_flight(shared),
+                      dm_jiffies_to_msec64(shared->tmp.io_ticks_total),
+                      dm_jiffies_to_msec64(shared->tmp.time_in_queue),
+                      dm_jiffies_to_msec64(shared->tmp.io_ticks[READ]),
+                      dm_jiffies_to_msec64(shared->tmp.io_ticks[WRITE]));
+
+               if (unlikely(sz + 1 >= maxlen))
+                       goto buffer_overflow;
+       }
+
+       if (clear)
+               __dm_stat_clear(s, idx_start, idx_end, false);
+
+buffer_overflow:
+       mutex_unlock(&stats->mutex);
+
+       return 1;
+}
+
+static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
+{
+       struct dm_stat *s;
+       const char *new_aux_data;
+
+       mutex_lock(&stats->mutex);
+
+       s = __dm_stats_find(stats, id);
+       if (!s) {
+               mutex_unlock(&stats->mutex);
+               return -ENOENT;
+       }
+
+       new_aux_data = kstrdup(aux_data, GFP_KERNEL);
+       if (!new_aux_data) {
+               mutex_unlock(&stats->mutex);
+               return -ENOMEM;
+       }
+
+       kfree(s->aux_data);
+       s->aux_data = new_aux_data;
+
+       mutex_unlock(&stats->mutex);
+
+       return 0;
+}
+
+static int message_stats_create(struct mapped_device *md,
+                               unsigned argc, char **argv,
+                               char *result, unsigned maxlen)
+{
+       int id;
+       char dummy;
+       unsigned long long start, end, len, step;
+       unsigned divisor;
+       const char *program_id, *aux_data;
+
+       /*
+        * Input format:
+        *   <range> <step> [<program_id> [<aux_data>]]
+        */
+
+       if (argc < 3 || argc > 5)
+               return -EINVAL;
+
+       if (!strcmp(argv[1], "-")) {
+               start = 0;
+               len = dm_get_size(md);
+               if (!len)
+                       len = 1;
+       } else if (sscanf(argv[1], "%llu+%llu%c", &start, &len, &dummy) != 2 ||
+                  start != (sector_t)start || len != (sector_t)len)
+               return -EINVAL;
+
+       end = start + len;
+       if (start >= end)
+               return -EINVAL;
+
+       if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
+               step = end - start;
+               if (do_div(step, divisor))
+                       step++;
+               if (!step)
+                       step = 1;
+       } else if (sscanf(argv[2], "%llu%c", &step, &dummy) != 1 ||
+                  step != (sector_t)step || !step)
+               return -EINVAL;
+
+       program_id = "-";
+       aux_data = "-";
+
+       if (argc > 3)
+               program_id = argv[3];
+
+       if (argc > 4)
+               aux_data = argv[4];
+
+       /*
+        * If a buffer overflow happens after we created the region,
+        * it's too late (the userspace would retry with a larger
+        * buffer, but the region id that caused the overflow is already
+        * leaked).  So we must detect buffer overflow in advance.
+        */
+       snprintf(result, maxlen, "%d", INT_MAX);
+       if (dm_message_test_buffer_overflow(result, maxlen))
+               return 1;
+
+       id = dm_stats_create(dm_get_stats(md), start, end, step, program_id, aux_data,
+                            dm_internal_suspend, dm_internal_resume, md);
+       if (id < 0)
+               return id;
+
+       snprintf(result, maxlen, "%d", id);
+
+       return 1;
+}
+
+static int message_stats_delete(struct mapped_device *md,
+                               unsigned argc, char **argv)
+{
+       int id;
+       char dummy;
+
+       if (argc != 2)
+               return -EINVAL;
+
+       if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+               return -EINVAL;
+
+       return dm_stats_delete(dm_get_stats(md), id);
+}
+
+static int message_stats_clear(struct mapped_device *md,
+                              unsigned argc, char **argv)
+{
+       int id;
+       char dummy;
+
+       if (argc != 2)
+               return -EINVAL;
+
+       if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+               return -EINVAL;
+
+       return dm_stats_clear(dm_get_stats(md), id);
+}
+
+static int message_stats_list(struct mapped_device *md,
+                             unsigned argc, char **argv,
+                             char *result, unsigned maxlen)
+{
+       int r;
+       const char *program = NULL;
+
+       if (argc < 1 || argc > 2)
+               return -EINVAL;
+
+       if (argc > 1) {
+               program = kstrdup(argv[1], GFP_KERNEL);
+               if (!program)
+                       return -ENOMEM;
+       }
+
+       r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
+
+       kfree(program);
+
+       return r;
+}
+
+static int message_stats_print(struct mapped_device *md,
+                              unsigned argc, char **argv, bool clear,
+                              char *result, unsigned maxlen)
+{
+       int id;
+       char dummy;
+       unsigned long idx_start = 0, idx_len = ULONG_MAX;
+
+       if (argc != 2 && argc != 4)
+               return -EINVAL;
+
+       if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+               return -EINVAL;
+
+       if (argc > 3) {
+               if (strcmp(argv[2], "-") &&
+                   sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
+                       return -EINVAL;
+               if (strcmp(argv[3], "-") &&
+                   sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
+                       return -EINVAL;
+       }
+
+       return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
+                             result, maxlen);
+}
+
+static int message_stats_set_aux(struct mapped_device *md,
+                                unsigned argc, char **argv)
+{
+       int id;
+       char dummy;
+
+       if (argc != 3)
+               return -EINVAL;
+
+       if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
+               return -EINVAL;
+
+       return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
+}
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+                    char *result, unsigned maxlen)
+{
+       int r;
+
+       if (dm_request_based(md)) {
+               DMWARN("Statistics are only supported for bio-based devices");
+               return -EOPNOTSUPP;
+       }
+
+       /* All messages here must start with '@' */
+       if (!strcasecmp(argv[0], "@stats_create"))
+               r = message_stats_create(md, argc, argv, result, maxlen);
+       else if (!strcasecmp(argv[0], "@stats_delete"))
+               r = message_stats_delete(md, argc, argv);
+       else if (!strcasecmp(argv[0], "@stats_clear"))
+               r = message_stats_clear(md, argc, argv);
+       else if (!strcasecmp(argv[0], "@stats_list"))
+               r = message_stats_list(md, argc, argv, result, maxlen);
+       else if (!strcasecmp(argv[0], "@stats_print"))
+               r = message_stats_print(md, argc, argv, false, result, maxlen);
+       else if (!strcasecmp(argv[0], "@stats_print_clear"))
+               r = message_stats_print(md, argc, argv, true, result, maxlen);
+       else if (!strcasecmp(argv[0], "@stats_set_aux"))
+               r = message_stats_set_aux(md, argc, argv);
+       else
+               return 2; /* this wasn't a stats message */
+
+       if (r == -EINVAL)
+               DMWARN("Invalid parameters for message %s", argv[0]);
+
+       return r;
+}
+
+int __init dm_statistics_init(void)
+{
+       dm_stat_need_rcu_barrier = 0;
+       return 0;
+}
+
+void dm_statistics_exit(void)
+{
+       if (dm_stat_need_rcu_barrier)
+               rcu_barrier();
+       if (WARN_ON(shared_memory_amount))
+               DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
+}
+
+module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
+MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
new file mode 100644 (file)
index 0000000..e7c4984
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef DM_STATS_H
+#define DM_STATS_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+int dm_statistics_init(void);
+void dm_statistics_exit(void);
+
+struct dm_stats {
+       struct mutex mutex;
+       struct list_head list;  /* list of struct dm_stat */
+       struct dm_stats_last_position __percpu *last;
+       sector_t last_sector;
+       unsigned last_rw;
+};
+
+struct dm_stats_aux {
+       bool merged;
+};
+
+void dm_stats_init(struct dm_stats *st);
+void dm_stats_cleanup(struct dm_stats *st);
+
+struct mapped_device;
+
+int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+                    char *result, unsigned maxlen);
+
+void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+                        sector_t bi_sector, unsigned bi_sectors, bool end,
+                        unsigned long duration, struct dm_stats_aux *aux);
+
+static inline bool dm_stats_used(struct dm_stats *st)
+{
+       return !list_empty(&st->list);
+}
+
+#endif
index 88f2f802d528be23b8e64c26085913677082be03..21328a371e58110278bbb4d1ad780a2e7025e078 100644 (file)
@@ -2648,9 +2648,17 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
+       uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
 
-       blk_limits_io_min(limits, 0);
-       blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+       /*
+        * If the system-determined stacked limits are compatible with the
+        * pool's blocksize (io_opt is a factor) do not override them.
+        */
+       if (io_opt_sectors < pool->sectors_per_block ||
+           do_div(io_opt_sectors, pool->sectors_per_block)) {
+               blk_limits_io_min(limits, 0);
+               blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+       }
 
        /*
         * pt->adjusted_pf is a staging area for the actual features to use.
index 9e39d2b64bf8f3cc4a864e300ff61f5020b67dda..02cf12faadbf34e56c1bd9c8837f85a78c9c65d4 100644 (file)
@@ -60,6 +60,7 @@ struct dm_io {
        struct bio *bio;
        unsigned long start_time;
        spinlock_t endio_lock;
+       struct dm_stats_aux stats_aux;
 };
 
 /*
@@ -198,6 +199,8 @@ struct mapped_device {
 
        /* zero-length flush that will be cloned and submitted to targets */
        struct bio flush_bio;
+
+       struct dm_stats stats;
 };
 
 /*
@@ -269,6 +272,7 @@ static int (*_inits[])(void) __initdata = {
        dm_io_init,
        dm_kcopyd_init,
        dm_interface_init,
+       dm_statistics_init,
 };
 
 static void (*_exits[])(void) = {
@@ -279,6 +283,7 @@ static void (*_exits[])(void) = {
        dm_io_exit,
        dm_kcopyd_exit,
        dm_interface_exit,
+       dm_statistics_exit,
 };
 
 static int __init dm_init(void)
@@ -384,6 +389,16 @@ int dm_lock_for_deletion(struct mapped_device *md)
        return r;
 }
 
+sector_t dm_get_size(struct mapped_device *md)
+{
+       return get_capacity(md->disk);
+}
+
+struct dm_stats *dm_get_stats(struct mapped_device *md)
+{
+       return &md->stats;
+}
+
 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 {
        struct mapped_device *md = bdev->bd_disk->private_data;
@@ -466,8 +481,9 @@ static int md_in_flight(struct mapped_device *md)
 static void start_io_acct(struct dm_io *io)
 {
        struct mapped_device *md = io->md;
+       struct bio *bio = io->bio;
        int cpu;
-       int rw = bio_data_dir(io->bio);
+       int rw = bio_data_dir(bio);
 
        io->start_time = jiffies;
 
@@ -476,6 +492,10 @@ static void start_io_acct(struct dm_io *io)
        part_stat_unlock();
        atomic_set(&dm_disk(md)->part0.in_flight[rw],
                atomic_inc_return(&md->pending[rw]));
+
+       if (unlikely(dm_stats_used(&md->stats)))
+               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+                                   bio_sectors(bio), false, 0, &io->stats_aux);
 }
 
 static void end_io_acct(struct dm_io *io)
@@ -491,6 +511,10 @@ static void end_io_acct(struct dm_io *io)
        part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
        part_stat_unlock();
 
+       if (unlikely(dm_stats_used(&md->stats)))
+               dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector,
+                                   bio_sectors(bio), true, duration, &io->stats_aux);
+
        /*
         * After this is decremented the bio must not be touched if it is
         * a flush.
@@ -1519,7 +1543,7 @@ static void _dm_request(struct request_queue *q, struct bio *bio)
        return;
 }
 
-static int dm_request_based(struct mapped_device *md)
+int dm_request_based(struct mapped_device *md)
 {
        return blk_queue_stackable(md->queue);
 }
@@ -1946,8 +1970,7 @@ static struct mapped_device *alloc_dev(int minor)
        add_disk(md->disk);
        format_dev_t(md->name, MKDEV(_major, minor));
 
-       md->wq = alloc_workqueue("kdmflush",
-                                WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+       md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
        if (!md->wq)
                goto bad_thread;
 
@@ -1959,6 +1982,8 @@ static struct mapped_device *alloc_dev(int minor)
        md->flush_bio.bi_bdev = md->bdev;
        md->flush_bio.bi_rw = WRITE_FLUSH;
 
+       dm_stats_init(&md->stats);
+
        /* Populate the mapping, nobody knows we exist yet */
        spin_lock(&_minor_lock);
        old_md = idr_replace(&_minor_idr, md, minor);
@@ -2010,6 +2035,7 @@ static void free_dev(struct mapped_device *md)
 
        put_disk(md->disk);
        blk_cleanup_queue(md->queue);
+       dm_stats_cleanup(&md->stats);
        module_put(THIS_MODULE);
        kfree(md);
 }
@@ -2151,7 +2177,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
        /*
         * Wipe any geometry if the size of the table changed.
         */
-       if (size != get_capacity(md->disk))
+       if (size != dm_get_size(md))
                memset(&md->geometry, 0, sizeof(md->geometry));
 
        __set_size(md, size);
@@ -2695,6 +2721,38 @@ out:
        return r;
 }
 
+/*
+ * Internal suspend/resume works like userspace-driven suspend. It waits
+ * until all bios finish and prevents issuing new bios to the target drivers.
+ * It may be used only from the kernel.
+ *
+ * Internal suspend holds md->suspend_lock, which prevents interaction with
+ * userspace-driven suspend.
+ */
+
+void dm_internal_suspend(struct mapped_device *md)
+{
+       mutex_lock(&md->suspend_lock);
+       if (dm_suspended_md(md))
+               return;
+
+       set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
+       synchronize_srcu(&md->io_barrier);
+       flush_workqueue(md->wq);
+       dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+}
+
+void dm_internal_resume(struct mapped_device *md)
+{
+       if (dm_suspended_md(md))
+               goto done;
+
+       dm_queue_flush(md);
+
+done:
+       mutex_unlock(&md->suspend_lock);
+}
+
 /*-----------------------------------------------------------------
  * Event notification.
  *---------------------------------------------------------------*/
index 45b97da1bd061f02a32e0e7523fdf5125ffb1089..97633cdab9937f042f63b07146a3b67385c2fdd7 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/blkdev.h>
 #include <linux/hdreg.h>
 
+#include "dm-stats.h"
+
 /*
  * Suspend feature flags
  */
@@ -146,10 +148,16 @@ void dm_destroy(struct mapped_device *md);
 void dm_destroy_immediate(struct mapped_device *md);
 int dm_open_count(struct mapped_device *md);
 int dm_lock_for_deletion(struct mapped_device *md);
+int dm_request_based(struct mapped_device *md);
+sector_t dm_get_size(struct mapped_device *md);
+struct dm_stats *dm_get_stats(struct mapped_device *md);
 
 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
                      unsigned cookie);
 
+void dm_internal_suspend(struct mapped_device *md);
+void dm_internal_resume(struct mapped_device *md);
+
 int dm_io_init(void);
 void dm_io_exit(void);
 
@@ -162,4 +170,12 @@ void dm_kcopyd_exit(void);
 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size);
 void dm_free_md_mempools(struct dm_md_mempools *pools);
 
+/*
+ * Helpers that are used by DM core
+ */
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+{
+       return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
 #endif
index 9f13e13506efbb3859786879ed6a5ff42eb72e8f..b71d75e239008c138353474876e010e81b438b87 100644 (file)
@@ -602,9 +602,9 @@ static struct mddev * mddev_find(dev_t unit)
        goto retry;
 }
 
-static inline int mddev_lock(struct mddev * mddev)
+int md_queue_misc_work(struct work_struct *work)
 {
-       return mutex_lock_interruptible(&mddev->reconfig_mutex);
+       return queue_work(md_misc_wq, work);
 }
 
 static inline int mddev_is_locked(struct mddev *mddev)
@@ -619,7 +619,7 @@ static inline int mddev_trylock(struct mddev * mddev)
 
 static struct attribute_group md_redundancy_group;
 
-static void mddev_unlock(struct mddev * mddev)
+void mddev_unlock(struct mddev * mddev)
 {
        if (mddev->to_remove) {
                /* These cannot be removed under reconfig_mutex as
@@ -5628,10 +5628,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
        char *ptr, *buf = NULL;
        int err = -ENOMEM;
 
-       if (md_allow_write(mddev))
-               file = kmalloc(sizeof(*file), GFP_NOIO);
-       else
-               file = kmalloc(sizeof(*file), GFP_KERNEL);
+       file = kmalloc(sizeof(*file), GFP_NOIO);
 
        if (!file)
                goto out;
@@ -8681,6 +8678,8 @@ EXPORT_SYMBOL(md_unregister_thread);
 EXPORT_SYMBOL(md_wakeup_thread);
 EXPORT_SYMBOL(md_check_recovery);
 EXPORT_SYMBOL(md_reap_sync_thread);
+EXPORT_SYMBOL(mddev_unlock);
+EXPORT_SYMBOL(md_queue_misc_work);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD RAID framework");
 MODULE_ALIAS("md");
index 20f02c0b5f2d6d99ac69a36067135f0b8196d9a7..77924d3a851eef729aef2aaedc28717f198c1934 100644 (file)
@@ -621,4 +621,12 @@ static inline int mddev_check_plugged(struct mddev *mddev)
        return !!blk_check_plugged(md_unplug, mddev,
                                   sizeof(struct blk_plug_cb));
 }
+
+static inline int mddev_lock(struct mddev * mddev)
+{
+       return mutex_lock_interruptible(&mddev->reconfig_mutex);
+}
+extern void mddev_unlock(struct mddev *mddev);
+extern int md_queue_misc_work(struct work_struct *work);
+
 #endif /* _MD_MD_H */
index 81b513890e2bfd8d41b1c48f986c9c9a187f336c..a7e8bf2963886dfa349a03644cf33ff1ded748c5 100644 (file)
@@ -615,6 +615,11 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
 }
 EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
 
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
+{
+       dm_bufio_prefetch(bm->bufio, b, 1);
+}
+
 void dm_bm_set_read_only(struct dm_block_manager *bm)
 {
        bm->read_only = true;
index be5bff61be280562932906b1b182ef1d3775ea55..9a82083a66b6a86833bccc1d2b6d4d43c9de6500 100644 (file)
@@ -108,6 +108,11 @@ int dm_bm_unlock(struct dm_block *b);
 int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
                           struct dm_block *superblock);
 
+ /*
+  * Request data be prefetched into the cache.
+  */
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
+
 /*
  * Switches the bm to a read only mode.  Once read-only mode
  * has been entered the following functions will return -EPERM.
index 35865425e4b4443e925014fd918655ef51c07d57..468e371ee9b22d036fc6e5cca36ca0f46acb4f6a 100644 (file)
@@ -161,6 +161,7 @@ struct frame {
 };
 
 struct del_stack {
+       struct dm_btree_info *info;
        struct dm_transaction_manager *tm;
        int top;
        struct frame spine[MAX_SPINE_DEPTH];
@@ -183,6 +184,20 @@ static int unprocessed_frames(struct del_stack *s)
        return s->top >= 0;
 }
 
+static void prefetch_children(struct del_stack *s, struct frame *f)
+{
+       unsigned i;
+       struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
+
+       for (i = 0; i < f->nr_children; i++)
+               dm_bm_prefetch(bm, value64(f->n, i));
+}
+
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+       return f->level < (info->levels - 1);
+}
+
 static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
 {
        int r;
@@ -205,6 +220,7 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
                dm_tm_dec(s->tm, b);
 
        else {
+               uint32_t flags;
                struct frame *f = s->spine + ++s->top;
 
                r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
@@ -217,6 +233,10 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
                f->level = level;
                f->nr_children = le32_to_cpu(f->n->header.nr_entries);
                f->current_child = 0;
+
+               flags = le32_to_cpu(f->n->header.flags);
+               if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
+                       prefetch_children(s, f);
        }
 
        return 0;
@@ -230,11 +250,6 @@ static void pop_frame(struct del_stack *s)
        dm_tm_unlock(s->tm, f->b);
 }
 
-static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
-{
-       return f->level < (info->levels - 1);
-}
-
 int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
 {
        int r;
@@ -243,6 +258,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
        s = kmalloc(sizeof(*s), GFP_KERNEL);
        if (!s)
                return -ENOMEM;
+       s->info = info;
        s->tm = info->tm;
        s->top = -1;
 
@@ -287,7 +303,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
                                        info->value_type.dec(info->value_type.context,
                                                             value_ptr(f->n, i));
                        }
-                       f->current_child = f->nr_children;
+                       pop_frame(s);
                }
        }
 
index 3e7a88d99eb0260ce4e9128f94349713b28170c5..6058569fe86c3dcf862ddc933e234e721dbdc3e6 100644 (file)
@@ -292,16 +292,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
        return dm_tm_unlock(ll->tm, blk);
 }
 
-int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
+                                     uint32_t *result)
 {
        __le32 le_rc;
-       int r = sm_ll_lookup_bitmap(ll, b, result);
-
-       if (r)
-               return r;
-
-       if (*result != 3)
-               return r;
+       int r;
 
        r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
        if (r < 0)
@@ -312,6 +307,19 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
        return r;
 }
 
+int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
+{
+       int r = sm_ll_lookup_bitmap(ll, b, result);
+
+       if (r)
+               return r;
+
+       if (*result != 3)
+               return r;
+
+       return sm_ll_lookup_big_ref_count(ll, b, result);
+}
+
 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
                          dm_block_t end, dm_block_t *result)
 {
@@ -372,11 +380,12 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
        return -ENOSPC;
 }
 
-int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
-                uint32_t ref_count, enum allocation_event *ev)
+static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
+                       uint32_t (*mutator)(void *context, uint32_t old),
+                       void *context, enum allocation_event *ev)
 {
        int r;
-       uint32_t bit, old;
+       uint32_t bit, old, ref_count;
        struct dm_block *nb;
        dm_block_t index = b;
        struct disk_index_entry ie_disk;
@@ -399,6 +408,14 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
        bm_le = dm_bitmap_data(nb);
        old = sm_lookup_bitmap(bm_le, bit);
 
+       if (old > 2) {
+               r = sm_ll_lookup_big_ref_count(ll, b, &old);
+               if (r < 0)
+                       return r;
+       }
+
+       ref_count = mutator(context, old);
+
        if (ref_count <= 2) {
                sm_set_bitmap(bm_le, bit, ref_count);
 
@@ -448,31 +465,35 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
        return ll->save_ie(ll, index, &ie_disk);
 }
 
-int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+static uint32_t set_ref_count(void *context, uint32_t old)
 {
-       int r;
-       uint32_t rc;
-
-       r = sm_ll_lookup(ll, b, &rc);
-       if (r)
-               return r;
+       return *((uint32_t *) context);
+}
 
-       return sm_ll_insert(ll, b, rc + 1, ev);
+int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
+                uint32_t ref_count, enum allocation_event *ev)
+{
+       return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
 }
 
-int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+static uint32_t inc_ref_count(void *context, uint32_t old)
 {
-       int r;
-       uint32_t rc;
+       return old + 1;
+}
 
-       r = sm_ll_lookup(ll, b, &rc);
-       if (r)
-               return r;
+int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+       return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
+}
 
-       if (!rc)
-               return -EINVAL;
+static uint32_t dec_ref_count(void *context, uint32_t old)
+{
+       return old - 1;
+}
 
-       return sm_ll_insert(ll, b, rc - 1, ev);
+int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
+{
+       return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
 }
 
 int sm_ll_commit(struct ll_disk *ll)
index 78ea44336e75e06d5769b4a6158f2a1e506214e0..840d0dded676e9dcf718f0dff4c7641b15776c60 100644 (file)
@@ -200,6 +200,21 @@ static int stripe_operations_active(struct stripe_head *sh)
               test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 }
 
+static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
+{
+       struct r5conf *conf = sh->raid_conf;
+       struct raid5_percpu *percpu;
+       int i, orphaned = 1;
+
+       percpu = per_cpu_ptr(conf->percpu, sh->cpu);
+       for_each_cpu(i, &percpu->handle_threads) {
+               md_wakeup_thread(conf->aux_threads[i]->thread);
+               orphaned = 0;
+       }
+       if (orphaned)
+               md_wakeup_thread(conf->mddev->thread);
+}
+
 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
 {
        BUG_ON(!list_empty(&sh->lru));
@@ -212,9 +227,19 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
                           sh->bm_seq - conf->seq_write > 0)
                        list_add_tail(&sh->lru, &conf->bitmap_list);
                else {
+                       int cpu = sh->cpu;
+                       struct raid5_percpu *percpu;
+                       if (!cpu_online(cpu)) {
+                               cpu = cpumask_any(cpu_online_mask);
+                               sh->cpu = cpu;
+                       }
+                       percpu = per_cpu_ptr(conf->percpu, cpu);
+
                        clear_bit(STRIPE_DELAYED, &sh->state);
                        clear_bit(STRIPE_BIT_DELAY, &sh->state);
-                       list_add_tail(&sh->lru, &conf->handle_list);
+                       list_add_tail(&sh->lru, &percpu->handle_list);
+                       raid5_wakeup_stripe_thread(sh);
+                       return;
                }
                md_wakeup_thread(conf->mddev->thread);
        } else {
@@ -239,12 +264,47 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                do_release_stripe(conf, sh);
 }
 
+/* should hold conf->device_lock already */
+static int release_stripe_list(struct r5conf *conf)
+{
+       struct stripe_head *sh;
+       int count = 0;
+       struct llist_node *head;
+
+       head = llist_del_all(&conf->released_stripes);
+       while (head) {
+               sh = llist_entry(head, struct stripe_head, release_list);
+               head = llist_next(head);
+               /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
+               smp_mb();
+               clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
+               /*
+                * Don't worry the bit is set here, because if the bit is set
+                * again, the count is always > 1. This is true for
+                * STRIPE_ON_UNPLUG_LIST bit too.
+                */
+               __release_stripe(conf, sh);
+               count++;
+       }
+
+       return count;
+}
+
 static void release_stripe(struct stripe_head *sh)
 {
        struct r5conf *conf = sh->raid_conf;
        unsigned long flags;
+       bool wakeup;
 
+       if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+               goto slow_path;
+       wakeup = llist_add(&sh->release_list, &conf->released_stripes);
+       if (wakeup)
+               md_wakeup_thread(conf->mddev->thread);
+       return;
+slow_path:
        local_irq_save(flags);
+       /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
        if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
                do_release_stripe(conf, sh);
                spin_unlock(&conf->device_lock);
@@ -359,6 +419,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
                raid5_build_block(sh, i, previous);
        }
        insert_hash(conf, sh);
+       sh->cpu = smp_processor_id();
 }
 
 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -491,7 +552,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
                        if (atomic_read(&sh->count)) {
                                BUG_ON(!list_empty(&sh->lru)
                                    && !test_bit(STRIPE_EXPANDING, &sh->state)
-                                   && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
+                                   && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
+                                   && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
                        } else {
                                if (!test_bit(STRIPE_HANDLE, &sh->state))
                                        atomic_inc(&conf->active_stripes);
@@ -3773,12 +3835,19 @@ static void raid5_activate_delayed(struct r5conf *conf)
                while (!list_empty(&conf->delayed_list)) {
                        struct list_head *l = conf->delayed_list.next;
                        struct stripe_head *sh;
+                       int cpu;
                        sh = list_entry(l, struct stripe_head, lru);
                        list_del_init(l);
                        clear_bit(STRIPE_DELAYED, &sh->state);
                        if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
                        list_add_tail(&sh->lru, &conf->hold_list);
+                       cpu = sh->cpu;
+                       if (!cpu_online(cpu)) {
+                               cpu = cpumask_any(cpu_online_mask);
+                               sh->cpu = cpu;
+                       }
+                       raid5_wakeup_stripe_thread(sh);
                }
        }
 }
@@ -4058,18 +4127,29 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
  * head of the hold_list has changed, i.e. the head was promoted to the
  * handle_list.
  */
-static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
-{
-       struct stripe_head *sh;
-
+static struct stripe_head *__get_priority_stripe(struct r5conf *conf,
+       cpumask_t *mask)
+{
+       struct stripe_head *sh = NULL, *tmp;
+       struct list_head *handle_list = NULL;
+       int cpu;
+
+       /* Should we take action to avoid starvation of latter CPUs ? */
+       for_each_cpu(cpu, mask) {
+               struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
+               if (!list_empty(&percpu->handle_list)) {
+                       handle_list = &percpu->handle_list;
+                       break;
+               }
+       }
        pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
                  __func__,
-                 list_empty(&conf->handle_list) ? "empty" : "busy",
+                 !handle_list ? "empty" : "busy",
                  list_empty(&conf->hold_list) ? "empty" : "busy",
                  atomic_read(&conf->pending_full_writes), conf->bypass_count);
 
-       if (!list_empty(&conf->handle_list)) {
-               sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
+       if (handle_list) {
+               sh = list_entry(handle_list->next, typeof(*sh), lru);
 
                if (list_empty(&conf->hold_list))
                        conf->bypass_count = 0;
@@ -4087,12 +4167,23 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
                   ((conf->bypass_threshold &&
                     conf->bypass_count > conf->bypass_threshold) ||
                    atomic_read(&conf->pending_full_writes) == 0)) {
-               sh = list_entry(conf->hold_list.next,
-                               typeof(*sh), lru);
-               conf->bypass_count -= conf->bypass_threshold;
-               if (conf->bypass_count < 0)
-                       conf->bypass_count = 0;
-       } else
+
+               list_for_each_entry(tmp, &conf->hold_list,  lru) {
+                       if (cpumask_test_cpu(tmp->cpu, mask) ||
+                           !cpu_online(tmp->cpu)) {
+                               sh = tmp;
+                               break;
+                       }
+               }
+
+               if (sh) {
+                       conf->bypass_count -= conf->bypass_threshold;
+                       if (conf->bypass_count < 0)
+                               conf->bypass_count = 0;
+               }
+       }
+
+       if (!sh)
                return NULL;
 
        list_del_init(&sh->lru);
@@ -4127,6 +4218,10 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
                         */
                        smp_mb__before_clear_bit();
                        clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+                       /*
+                        * STRIPE_ON_RELEASE_LIST could be set here. In that
+                        * case, the count is always > 1 here
+                        */
                        __release_stripe(conf, sh);
                        cnt++;
                }
@@ -4789,13 +4884,13 @@ static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
 }
 
 #define MAX_STRIPE_BATCH 8
-static int handle_active_stripes(struct r5conf *conf)
+static int handle_active_stripes(struct r5conf *conf, cpumask_t *mask)
 {
        struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
        int i, batch_size = 0;
 
        while (batch_size < MAX_STRIPE_BATCH &&
-                       (sh = __get_priority_stripe(conf)) != NULL)
+                       (sh = __get_priority_stripe(conf, mask)) != NULL)
                batch[batch_size++] = sh;
 
        if (batch_size == 0)
@@ -4813,6 +4908,37 @@ static int handle_active_stripes(struct r5conf *conf)
        return batch_size;
 }
 
+static void raid5auxd(struct md_thread *thread)
+{
+       struct mddev *mddev = thread->mddev;
+       struct r5conf *conf = mddev->private;
+       struct blk_plug plug;
+       int handled;
+       struct raid5_auxth *auxth = thread->private;
+
+       pr_debug("+++ raid5auxd active\n");
+
+       blk_start_plug(&plug);
+       handled = 0;
+       spin_lock_irq(&conf->device_lock);
+       while (1) {
+               int batch_size, released;
+
+               released = release_stripe_list(conf);
+
+               batch_size = handle_active_stripes(conf, &auxth->work_mask);
+               if (!batch_size && !released)
+                       break;
+               handled += batch_size;
+       }
+       pr_debug("%d stripes handled\n", handled);
+
+       spin_unlock_irq(&conf->device_lock);
+       blk_finish_plug(&plug);
+
+       pr_debug("--- raid5auxd inactive\n");
+}
+
 /*
  * This is our raid5 kernel thread.
  *
@@ -4836,7 +4962,9 @@ static void raid5d(struct md_thread *thread)
        spin_lock_irq(&conf->device_lock);
        while (1) {
                struct bio *bio;
-               int batch_size;
+               int batch_size, released;
+
+               released = release_stripe_list(conf);
 
                if (
                    !list_empty(&conf->bitmap_list)) {
@@ -4860,8 +4988,8 @@ static void raid5d(struct md_thread *thread)
                        handled++;
                }
 
-               batch_size = handle_active_stripes(conf);
-               if (!batch_size)
+               batch_size = handle_active_stripes(conf, &conf->work_mask);
+               if (!batch_size && !released)
                        break;
                handled += batch_size;
 
@@ -4989,10 +5117,270 @@ stripe_cache_active_show(struct mddev *mddev, char *page)
 static struct md_sysfs_entry
 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
 
+static void raid5_update_threads_handle_mask(struct mddev *mddev)
+{
+       int cpu, i;
+       struct raid5_percpu *percpu;
+       struct r5conf *conf = mddev->private;
+
+       for_each_online_cpu(cpu) {
+               percpu = per_cpu_ptr(conf->percpu, cpu);
+               cpumask_clear(&percpu->handle_threads);
+       }
+       cpumask_copy(&conf->work_mask, cpu_online_mask);
+
+       for (i = 0; i < conf->aux_thread_num; i++) {
+               cpumask_t *work_mask = &conf->aux_threads[i]->work_mask;
+               for_each_cpu(cpu, work_mask) {
+                       percpu = per_cpu_ptr(conf->percpu, cpu);
+                       cpumask_set_cpu(i, &percpu->handle_threads);
+               }
+               cpumask_andnot(&conf->work_mask, &conf->work_mask,
+                               work_mask);
+       }
+}
+
+struct raid5_auxth_sysfs {
+       struct attribute attr;
+       ssize_t (*show)(struct mddev *, struct raid5_auxth *, char *);
+       ssize_t (*store)(struct mddev *, struct raid5_auxth *,
+               const char *, size_t);
+};
+
+static ssize_t raid5_show_thread_cpulist(struct mddev *mddev,
+       struct raid5_auxth *thread, char *page)
+{
+       if (!mddev->private)
+               return 0;
+       return cpulist_scnprintf(page, PAGE_SIZE, &thread->work_mask);
+}
+
+static ssize_t
+raid5_store_thread_cpulist(struct mddev *mddev, struct raid5_auxth *thread,
+       const char *page, size_t len)
+{
+       struct r5conf *conf = mddev->private;
+       cpumask_var_t mask;
+
+       if (!conf)
+               return -ENODEV;
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       if (cpulist_parse(page, mask)) {
+               free_cpumask_var(mask);
+               return -EINVAL;
+       }
+
+       get_online_cpus();
+       spin_lock_irq(&conf->device_lock);
+       cpumask_copy(&thread->work_mask, mask);
+       raid5_update_threads_handle_mask(mddev);
+       spin_unlock_irq(&conf->device_lock);
+       put_online_cpus();
+       set_cpus_allowed_ptr(thread->thread->tsk, mask);
+
+       free_cpumask_var(mask);
+       return len;
+}
+
+static struct raid5_auxth_sysfs thread_cpulist =
+__ATTR(cpulist, S_IRUGO|S_IWUSR, raid5_show_thread_cpulist,
+       raid5_store_thread_cpulist);
+
+static struct attribute *auxth_attrs[] = {
+       &thread_cpulist.attr,
+       NULL,
+};
+
+static ssize_t
+raid5_auxth_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       struct raid5_auxth_sysfs *entry = container_of(attr,
+               struct raid5_auxth_sysfs, attr);
+       struct raid5_auxth *thread = container_of(kobj,
+               struct raid5_auxth, kobj);
+       struct mddev *mddev = thread->thread->mddev;
+       ssize_t ret;
+
+       if (!entry->show)
+               return -EIO;
+       mddev_lock(mddev);
+       ret = entry->show(mddev, thread, page);
+       mddev_unlock(mddev);
+       return ret;
+}
+
+static ssize_t
+raid5_auxth_attr_store(struct kobject *kobj, struct attribute *attr,
+             const char *page, size_t length)
+{
+       struct raid5_auxth_sysfs *entry = container_of(attr,
+               struct raid5_auxth_sysfs, attr);
+       struct raid5_auxth *thread = container_of(kobj,
+               struct raid5_auxth, kobj);
+       struct mddev *mddev = thread->thread->mddev;
+       ssize_t ret;
+
+       if (!entry->store)
+               return -EIO;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       mddev_lock(mddev);
+       ret = entry->store(mddev, thread, page, length);
+       mddev_unlock(mddev);
+       return ret;
+}
+
+static void raid5_auxth_release(struct kobject *kobj)
+{
+       struct raid5_auxth *thread = container_of(kobj,
+               struct raid5_auxth, kobj);
+       kfree(thread);
+}
+
+static const struct sysfs_ops raid5_auxth_sysfsops = {
+       .show = raid5_auxth_attr_show,
+       .store = raid5_auxth_attr_store,
+};
+static struct kobj_type raid5_auxth_ktype = {
+       .release = raid5_auxth_release,
+       .sysfs_ops = &raid5_auxth_sysfsops,
+       .default_attrs = auxth_attrs,
+};
+
+static ssize_t
+raid5_show_auxthread_number(struct mddev *mddev, char *page)
+{
+       struct r5conf *conf = mddev->private;
+       if (conf)
+               return sprintf(page, "%d\n", conf->aux_thread_num);
+       else
+               return 0;
+}
+
+static void raid5_auxth_delete(struct work_struct *ws)
+{
+       struct raid5_auxth *thread = container_of(ws, struct raid5_auxth,
+               del_work);
+
+       kobject_del(&thread->kobj);
+       kobject_put(&thread->kobj);
+}
+
+static void __free_aux_thread(struct mddev *mddev, struct raid5_auxth *thread)
+{
+       md_unregister_thread(&thread->thread);
+       INIT_WORK(&thread->del_work, raid5_auxth_delete);
+       kobject_get(&thread->kobj);
+       md_queue_misc_work(&thread->del_work);
+}
+
+static struct raid5_auxth *__create_aux_thread(struct mddev *mddev, int i)
+{
+       struct raid5_auxth *thread;
+       char name[10];
+
+       thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+       if (!thread)
+               return NULL;
+       snprintf(name, 10, "aux%d", i);
+       thread->thread = md_register_thread(raid5auxd, mddev, name);
+       if (!thread->thread) {
+               kfree(thread);
+               return NULL;
+       }
+       thread->thread->private = thread;
+
+       cpumask_copy(&thread->work_mask, cpu_online_mask);
+
+       if (kobject_init_and_add(&thread->kobj, &raid5_auxth_ktype,
+           &mddev->kobj, "auxth%d", i)) {
+               md_unregister_thread(&thread->thread);
+               kfree(thread);
+               return NULL;
+       }
+       return thread;
+}
+
+static ssize_t
+raid5_store_auxthread_number(struct mddev *mddev, const char *page, size_t len)
+{
+       struct r5conf *conf = mddev->private;
+       unsigned long new;
+       int i;
+       struct raid5_auxth **threads;
+
+       if (len >= PAGE_SIZE)
+               return -EINVAL;
+       if (!conf)
+               return -ENODEV;
+
+       if (kstrtoul(page, 10, &new))
+               return -EINVAL;
+
+       if (new == conf->aux_thread_num)
+               return len;
+
+       /* There is no point creating more threads than cpu number */
+       if (new > num_online_cpus())
+               return -EINVAL;
+
+       if (new > conf->aux_thread_num) {
+               threads = kzalloc(sizeof(struct raid5_auxth *) * new,
+                               GFP_KERNEL);
+               if (!threads)
+                       return -ENOMEM;
+
+               i = conf->aux_thread_num;
+               while (i < new) {
+                       threads[i] = __create_aux_thread(mddev, i);
+                       if (!threads[i])
+                               goto error;
+
+                       i++;
+               }
+               memcpy(threads, conf->aux_threads,
+                       sizeof(struct raid5_auxth *) * conf->aux_thread_num);
+               get_online_cpus();
+               spin_lock_irq(&conf->device_lock);
+               kfree(conf->aux_threads);
+               conf->aux_threads = threads;
+               conf->aux_thread_num = new;
+               raid5_update_threads_handle_mask(mddev);
+               spin_unlock_irq(&conf->device_lock);
+               put_online_cpus();
+       } else {
+               int old = conf->aux_thread_num;
+
+               get_online_cpus();
+               spin_lock_irq(&conf->device_lock);
+               conf->aux_thread_num = new;
+               raid5_update_threads_handle_mask(mddev);
+               spin_unlock_irq(&conf->device_lock);
+               put_online_cpus();
+               for (i = new; i < old; i++)
+                       __free_aux_thread(mddev, conf->aux_threads[i]);
+       }
+
+       return len;
+error:
+       while (--i >= conf->aux_thread_num)
+               __free_aux_thread(mddev, threads[i]);
+       kfree(threads);
+       return -ENOMEM;
+}
+
+static struct md_sysfs_entry
+raid5_auxthread_number = __ATTR(auxthread_number, S_IRUGO|S_IWUSR,
+                               raid5_show_auxthread_number,
+                               raid5_store_auxthread_number);
+
 static struct attribute *raid5_attrs[] =  {
        &raid5_stripecache_size.attr,
        &raid5_stripecache_active.attr,
        &raid5_preread_bypass_threshold.attr,
+       &raid5_auxthread_number.attr,
        NULL,
 };
 static struct attribute_group raid5_attrs_group = {
@@ -5040,6 +5428,7 @@ static void raid5_free_percpu(struct r5conf *conf)
 
 static void free_conf(struct r5conf *conf)
 {
+       kfree(conf->aux_threads);
        shrink_stripes(conf);
        raid5_free_percpu(conf);
        kfree(conf->disks);
@@ -5052,7 +5441,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
                              void *hcpu)
 {
        struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
-       long cpu = (long)hcpu;
+       long cpu = (long)hcpu, anycpu;
        struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
 
        switch (action) {
@@ -5071,9 +5460,17 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
                               __func__, cpu);
                        return notifier_from_errno(-ENOMEM);
                }
+               INIT_LIST_HEAD(&(percpu->handle_list));
+               cpumask_clear(&percpu->handle_threads);
                break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
+               spin_lock_irq(&conf->device_lock);
+               anycpu = cpumask_any(cpu_online_mask);
+               list_splice_tail_init(&percpu->handle_list,
+                       &per_cpu_ptr(conf->percpu, anycpu)->handle_list);
+               spin_unlock_irq(&conf->device_lock);
+
                safe_put_page(percpu->spare_page);
                kfree(percpu->scribble);
                percpu->spare_page = NULL;
@@ -5082,6 +5479,10 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
        default:
                break;
        }
+
+       spin_lock_irq(&conf->device_lock);
+       raid5_update_threads_handle_mask(conf->mddev);
+       spin_unlock_irq(&conf->device_lock);
        return NOTIFY_OK;
 }
 #endif
@@ -5102,20 +5503,24 @@ static int raid5_alloc_percpu(struct r5conf *conf)
        get_online_cpus();
        err = 0;
        for_each_present_cpu(cpu) {
+               struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
+
                if (conf->level == 6) {
                        spare_page = alloc_page(GFP_KERNEL);
                        if (!spare_page) {
                                err = -ENOMEM;
                                break;
                        }
-                       per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
+                       percpu->spare_page = spare_page;
                }
                scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
                if (!scribble) {
                        err = -ENOMEM;
                        break;
                }
-               per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
+               percpu->scribble = scribble;
+               INIT_LIST_HEAD(&percpu->handle_list);
+               cpumask_clear(&percpu->handle_threads);
        }
 #ifdef CONFIG_HOTPLUG_CPU
        conf->cpu_notify.notifier_call = raid456_cpu_notify;
@@ -5171,17 +5576,19 @@ static struct r5conf *setup_conf(struct mddev *mddev)
        spin_lock_init(&conf->device_lock);
        init_waitqueue_head(&conf->wait_for_stripe);
        init_waitqueue_head(&conf->wait_for_overlap);
-       INIT_LIST_HEAD(&conf->handle_list);
        INIT_LIST_HEAD(&conf->hold_list);
        INIT_LIST_HEAD(&conf->delayed_list);
        INIT_LIST_HEAD(&conf->bitmap_list);
        INIT_LIST_HEAD(&conf->inactive_list);
+       init_llist_head(&conf->released_stripes);
        atomic_set(&conf->active_stripes, 0);
        atomic_set(&conf->preread_active_stripes, 0);
        atomic_set(&conf->active_aligned_reads, 0);
        conf->bypass_threshold = BYPASS_THRESHOLD;
        conf->recovery_disabled = mddev->recovery_disabled - 1;
 
+       cpumask_copy(&conf->work_mask, cpu_online_mask);
+
        conf->raid_disks = mddev->raid_disks;
        if (mddev->reshape_position == MaxSector)
                conf->previous_raid_disks = mddev->raid_disks;
@@ -5640,6 +6047,10 @@ abort:
 static int stop(struct mddev *mddev)
 {
        struct r5conf *conf = mddev->private;
+       int i;
+
+       for (i = 0; i < conf->aux_thread_num; i++)
+               __free_aux_thread(mddev, conf->aux_threads[i]);
 
        md_unregister_thread(&mddev->thread);
        if (mddev->queue)
index 70c49329ca9a23fbbad73061477ac0a0cfe80e6e..04540f302480332d364dccda2488a8c791e7270e 100644 (file)
@@ -197,6 +197,7 @@ enum reconstruct_states {
 struct stripe_head {
        struct hlist_node       hash;
        struct list_head        lru;          /* inactive_list or handle_list */
+       struct llist_node       release_list;
        struct r5conf           *raid_conf;
        short                   generation;     /* increments with every
                                                 * reshape */
@@ -211,6 +212,7 @@ struct stripe_head {
        enum check_states       check_state;
        enum reconstruct_states reconstruct_state;
        spinlock_t              stripe_lock;
+       int                     cpu;
        /**
         * struct stripe_operations
         * @target - STRIPE_OP_COMPUTE_BLK target
@@ -321,6 +323,7 @@ enum {
        STRIPE_OPS_REQ_PENDING,
        STRIPE_ON_UNPLUG_LIST,
        STRIPE_DISCARD,
+       STRIPE_ON_RELEASE_LIST,
 };
 
 /*
@@ -363,6 +366,14 @@ struct disk_info {
        struct md_rdev  *rdev, *replacement;
 };
 
+struct raid5_auxth {
+       struct md_thread        *thread;
+       /* which CPUs should the auxiliary thread handle stripes from */
+       cpumask_t               work_mask;
+       struct kobject          kobj;
+       struct work_struct      del_work;
+};
+
 struct r5conf {
        struct hlist_head       *stripe_hashtbl;
        struct mddev            *mddev;
@@ -431,6 +442,12 @@ struct r5conf {
                                              * lists and performing address
                                              * conversions
                                              */
+               struct list_head handle_list;
+               cpumask_t       handle_threads; /* Which threads can the CPU's
+                                                * stripes be handled. It really
+                                                * is a bitmap to aux_threads[],
+                                                * but has max bits NR_CPUS
+                                                */
        } __percpu *percpu;
        size_t                  scribble_len; /* size of scribble region must be
                                               * associated with conf to handle
@@ -445,6 +462,7 @@ struct r5conf {
         */
        atomic_t                active_stripes;
        struct list_head        inactive_list;
+       struct llist_head       released_stripes;
        wait_queue_head_t       wait_for_stripe;
        wait_queue_head_t       wait_for_overlap;
        int                     inactive_blocked;       /* release of inactive stripes blocked,
@@ -458,6 +476,10 @@ struct r5conf {
         * the new thread here until we fully activate the array.
         */
        struct md_thread        *thread;
+       int                     aux_thread_num;
+       struct raid5_auxth      **aux_threads;
+       /* which CPUs should raid5d thread handle stripes from */
+       cpumask_t               work_mask;
 };
 
 /*
index 886da16e14f23e16c0ef12c8b5a9e225e76820b0..419a2d6b43491d505fd386dd3468064f165ee1ff 100644 (file)
 #define USB_PID_TECHNISAT_USB2_DVB_S2                  0x0500
 #define USB_PID_CPYTO_REDI_PC50A                       0xa803
 #define USB_PID_CTVDIGDUAL_V2                          0xe410
+#define USB_PID_PCTV_2002E                              0x025c
+#define USB_PID_PCTV_2002E_SE                           0x025d
 #endif
index 7606218ec4a760bb14bd5fcfbe0813e480f89466..aeb56c53e39f563dea2768c005f2df33ad90df6c 100644 (file)
 #include <linux/uaccess.h>
 
 #include <media/adv7343.h>
+#include <media/v4l2-async.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
+#include <media/v4l2-of.h>
 
 #include "adv7343_regs.h"
 
@@ -226,12 +228,12 @@ static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
        else
                val = state->pdata->mode_config.sleep_mode << 0 |
                      state->pdata->mode_config.pll_control << 1 |
-                     state->pdata->mode_config.dac_3 << 2 |
-                     state->pdata->mode_config.dac_2 << 3 |
-                     state->pdata->mode_config.dac_1 << 4 |
-                     state->pdata->mode_config.dac_6 << 5 |
-                     state->pdata->mode_config.dac_5 << 6 |
-                     state->pdata->mode_config.dac_4 << 7;
+                     state->pdata->mode_config.dac[2] << 2 |
+                     state->pdata->mode_config.dac[1] << 3 |
+                     state->pdata->mode_config.dac[0] << 4 |
+                     state->pdata->mode_config.dac[5] << 5 |
+                     state->pdata->mode_config.dac[4] << 6 |
+                     state->pdata->mode_config.dac[3] << 7;
 
        err = adv7343_write(sd, ADV7343_POWER_MODE_REG, val);
        if (err < 0)
@@ -250,15 +252,15 @@ static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
        /* configure SD DAC Output 2 and SD DAC Output 1 bit to zero */
        val = state->reg82 & (SD_DAC_1_DI & SD_DAC_2_DI);
 
-       if (state->pdata && state->pdata->sd_config.sd_dac_out1)
-               val = val | (state->pdata->sd_config.sd_dac_out1 << 1);
-       else if (state->pdata && !state->pdata->sd_config.sd_dac_out1)
-               val = val & ~(state->pdata->sd_config.sd_dac_out1 << 1);
+       if (state->pdata && state->pdata->sd_config.sd_dac_out[0])
+               val = val | (state->pdata->sd_config.sd_dac_out[0] << 1);
+       else if (state->pdata && !state->pdata->sd_config.sd_dac_out[0])
+               val = val & ~(state->pdata->sd_config.sd_dac_out[0] << 1);
 
-       if (state->pdata && state->pdata->sd_config.sd_dac_out2)
-               val = val | (state->pdata->sd_config.sd_dac_out2 << 2);
-       else if (state->pdata && !state->pdata->sd_config.sd_dac_out2)
-               val = val & ~(state->pdata->sd_config.sd_dac_out2 << 2);
+       if (state->pdata && state->pdata->sd_config.sd_dac_out[1])
+               val = val | (state->pdata->sd_config.sd_dac_out[1] << 2);
+       else if (state->pdata && !state->pdata->sd_config.sd_dac_out[1])
+               val = val & ~(state->pdata->sd_config.sd_dac_out[1] << 2);
 
        err = adv7343_write(sd, ADV7343_SD_MODE_REG2, val);
        if (err < 0)
@@ -398,6 +400,40 @@ static int adv7343_initialize(struct v4l2_subdev *sd)
        return err;
 }
 
+static struct adv7343_platform_data *
+adv7343_get_pdata(struct i2c_client *client)
+{
+       struct adv7343_platform_data *pdata;
+       struct device_node *np;
+
+       if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
+               return client->dev.platform_data;
+
+       np = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+       if (!np)
+               return NULL;
+
+       pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               goto done;
+
+       pdata->mode_config.sleep_mode =
+                       of_property_read_bool(np, "adi,power-mode-sleep-mode");
+
+       pdata->mode_config.pll_control =
+                       of_property_read_bool(np, "adi,power-mode-pll-ctrl");
+
+       of_property_read_u32_array(np, "adi,dac-enable",
+                                  pdata->mode_config.dac, 6);
+
+       of_property_read_u32_array(np, "adi,sd-dac-enable",
+                                  pdata->sd_config.sd_dac_out, 2);
+
+done:
+       of_node_put(np);
+       return pdata;
+}
+
 static int adv7343_probe(struct i2c_client *client,
                                const struct i2c_device_id *id)
 {
@@ -416,7 +452,7 @@ static int adv7343_probe(struct i2c_client *client,
                return -ENOMEM;
 
        /* Copy board specific information here */
-       state->pdata = client->dev.platform_data;
+       state->pdata = adv7343_get_pdata(client);
 
        state->reg00    = 0x80;
        state->reg01    = 0x00;
@@ -445,16 +481,21 @@ static int adv7343_probe(struct i2c_client *client,
                                       ADV7343_GAIN_DEF);
        state->sd.ctrl_handler = &state->hdl;
        if (state->hdl.error) {
-               int err = state->hdl.error;
-
-               v4l2_ctrl_handler_free(&state->hdl);
-               return err;
+               err = state->hdl.error;
+               goto done;
        }
        v4l2_ctrl_handler_setup(&state->hdl);
 
        err = adv7343_initialize(&state->sd);
        if (err)
+               goto done;
+
+       err = v4l2_async_register_subdev(&state->sd);
+
+done:
+       if (err < 0)
                v4l2_ctrl_handler_free(&state->hdl);
+
        return err;
 }
 
@@ -463,6 +504,7 @@ static int adv7343_remove(struct i2c_client *client)
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
        struct adv7343_state *state = to_state(sd);
 
+       v4l2_async_unregister_subdev(&state->sd);
        v4l2_device_unregister_subdev(sd);
        v4l2_ctrl_handler_free(&state->hdl);
 
@@ -476,8 +518,17 @@ static const struct i2c_device_id adv7343_id[] = {
 
 MODULE_DEVICE_TABLE(i2c, adv7343_id);
 
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id adv7343_of_match[] = {
+       {.compatible = "adi,adv7343", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, adv7343_of_match);
+#endif
+
 static struct i2c_driver adv7343_driver = {
        .driver = {
+               .of_match_table = of_match_ptr(adv7343_of_match),
                .owner  = THIS_MODULE,
                .name   = "adv7343",
        },
index a9857022f71d424a02baa2aaf84652781db58fa7..a9110d8bbbcd684e691ac63199d6fbb6c1902b95 100644 (file)
@@ -209,7 +209,8 @@ static int ml86v7667_mbus_fmt(struct v4l2_subdev *sd,
 
        fmt->code = V4L2_MBUS_FMT_YUYV8_2X8;
        fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
-       fmt->field = V4L2_FIELD_INTERLACED;
+       /* The top field is always transferred first by the chip */
+       fmt->field = V4L2_FIELD_INTERLACED_TB;
        fmt->width = 720;
        fmt->height = priv->std & V4L2_STD_525_60 ? 480 : 576;
 
index a24f90c5261c5f149ca517f13be0b84e6cf19132..8a29810d155a1b50f13838ae14184ed131cf5562 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/v4l2-dv-timings.h>
 
+#include <media/v4l2-async.h>
 #include <media/v4l2-device.h>
 
 #include "ths8200_regs.h"
@@ -500,6 +501,7 @@ static int ths8200_probe(struct i2c_client *client,
 {
        struct ths8200_state *state;
        struct v4l2_subdev *sd;
+       int error;
 
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -517,6 +519,10 @@ static int ths8200_probe(struct i2c_client *client,
 
        ths8200_core_init(sd);
 
+       error = v4l2_async_register_subdev(&state->sd);
+       if (error)
+               return error;
+
        v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
                  client->addr << 1, client->adapter->name);
 
@@ -526,12 +532,13 @@ static int ths8200_probe(struct i2c_client *client,
 static int ths8200_remove(struct i2c_client *client)
 {
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
+       struct ths8200_state *decoder = to_state(sd);
 
        v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
                 client->addr << 1, client->adapter->name);
 
        ths8200_s_power(sd, false);
-
+       v4l2_async_unregister_subdev(&decoder->sd);
        v4l2_device_unregister_subdev(sd);
 
        return 0;
@@ -543,10 +550,19 @@ static struct i2c_device_id ths8200_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, ths8200_id);
 
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id ths8200_of_match[] = {
+       { .compatible = "ti,ths8200", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ths8200_of_match);
+#endif
+
 static struct i2c_driver ths8200_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name = "ths8200",
+               .of_match_table = of_match_ptr(ths8200_of_match),
        },
        .probe = ths8200_probe,
        .remove = ths8200_remove,
index 9c6d66a9868f7ac30955dd369340012f6d74084e..91f3dd4cda1b66926ac9ac77e50e17ee30579160 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/module.h>
 #include <linux/v4l2-mediabus.h>
 
+#include <media/v4l2-async.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-common.h>
 #include <media/v4l2-mediabus.h>
@@ -1175,16 +1176,22 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
        sd->ctrl_handler = &decoder->hdl;
        if (decoder->hdl.error) {
                ret = decoder->hdl.error;
-
-               v4l2_ctrl_handler_free(&decoder->hdl);
-               return ret;
+               goto done;
        }
        v4l2_ctrl_handler_setup(&decoder->hdl);
 
-       v4l2_info(sd, "%s decoder driver registered !!\n", sd->name);
-
-       return 0;
+       ret = v4l2_async_register_subdev(&decoder->sd);
+       if (!ret)
+               v4l2_info(sd, "%s decoder driver registered !!\n", sd->name);
 
+done:
+       if (ret < 0) {
+               v4l2_ctrl_handler_free(&decoder->hdl);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+               media_entity_cleanup(&decoder->sd.entity);
+#endif
+       }
+       return ret;
 }
 
 /**
@@ -1199,6 +1206,7 @@ static int tvp514x_remove(struct i2c_client *client)
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
        struct tvp514x_decoder *decoder = to_decoder(sd);
 
+       v4l2_async_unregister_subdev(&decoder->sd);
        v4l2_device_unregister_subdev(sd);
 #if defined(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&decoder->sd.entity);
index a4e49483de6a2f0e77820591c85e114f603d090e..f6b1f3fe2608a7f789444de2543c6c81ecf1b392 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/module.h>
 #include <linux/v4l2-dv-timings.h>
 #include <media/tvp7002.h>
+#include <media/v4l2-async.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-common.h>
 #include <media/v4l2-ctrls.h>
@@ -1039,6 +1040,10 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
        }
        v4l2_ctrl_handler_setup(&device->hdl);
 
+       error = v4l2_async_register_subdev(&device->sd);
+       if (error)
+               goto error;
+
        return 0;
 
 error:
@@ -1063,6 +1068,7 @@ static int tvp7002_remove(struct i2c_client *c)
 
        v4l2_dbg(1, debug, sd, "Removing tvp7002 adapter"
                                "on address 0x%x\n", c->addr);
+       v4l2_async_unregister_subdev(&device->sd);
 #if defined(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&device->sd.entity);
 #endif
index e564aac0aa30fc67ee3eeb9cca5ea7f2dabdf6d5..d85cb0ace4dc654b541017a38fde5e0e004f3b1e 100644 (file)
@@ -4441,9 +4441,7 @@ static void tibetCS16_init(struct bttv *btv)
  * is {3, 0, 2, 1}, i.e. the first controller to be detected is logical
  * unit 3, the second (which is the master) is logical unit 0, etc.
  * We need to maintain the status of the analog switch (which of the 16
- * cameras is connected to which of the 4 controllers).  Rather than
- * add to the bttv structure for this, we use the data reserved for
- * the mbox (unused for this card type).
+ * cameras is connected to which of the 4 controllers) in sw_status array.
  */
 
 /*
@@ -4478,7 +4476,6 @@ static void kodicom4400r_write(struct bttv *btv,
  */
 static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input)
 {
-       char *sw_status;
        int xaddr, yaddr;
        struct bttv *mctlr;
        static unsigned char map[4] = {3, 0, 2, 1};
@@ -4489,14 +4486,13 @@ static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input)
        }
        yaddr = (btv->c.nr - mctlr->c.nr + 1) & 3; /* the '&' is for safety */
        yaddr = map[yaddr];
-       sw_status = (char *)(&mctlr->mbox_we);
        xaddr = input & 0xf;
        /* Check if the controller/camera pair has changed, else ignore */
-       if (sw_status[yaddr] != xaddr)
+       if (mctlr->sw_status[yaddr] != xaddr)
        {
                /* "open" the old switch, "close" the new one, save the new */
-               kodicom4400r_write(mctlr, sw_status[yaddr], yaddr, 0);
-               sw_status[yaddr] = xaddr;
+               kodicom4400r_write(mctlr, mctlr->sw_status[yaddr], yaddr, 0);
+               mctlr->sw_status[yaddr] = xaddr;
                kodicom4400r_write(mctlr, xaddr, yaddr, 1);
        }
 }
@@ -4509,7 +4505,6 @@ static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input)
  */
 static void kodicom4400r_init(struct bttv *btv)
 {
-       char *sw_status = (char *)(&btv->mbox_we);
        int ix;
 
        gpio_inout(0x0003ff, 0x0003ff);
@@ -4517,7 +4512,7 @@ static void kodicom4400r_init(struct bttv *btv)
        gpio_write(0);
        /* Preset camera 0 to the 4 controllers */
        for (ix = 0; ix < 4; ix++) {
-               sw_status[ix] = ix;
+               btv->sw_status[ix] = ix;
                kodicom4400r_write(btv, ix, ix, 1);
        }
        /*
@@ -4794,7 +4789,6 @@ static void gv800s_write(struct bttv *btv,
 static void gv800s_muxsel(struct bttv *btv, unsigned int input)
 {
        struct bttv *mctlr;
-       char *sw_status;
        int xaddr, yaddr;
        static unsigned int map[4][4] = { { 0x0, 0x4, 0xa, 0x6 },
                                          { 0x1, 0x5, 0xb, 0x7 },
@@ -4807,14 +4801,13 @@ static void gv800s_muxsel(struct bttv *btv, unsigned int input)
                return;
        }
        yaddr = (btv->c.nr - mctlr->c.nr) & 3;
-       sw_status = (char *)(&mctlr->mbox_we);
        xaddr = map[yaddr][input] & 0xf;
 
        /* Check if the controller/camera pair has changed, ignore otherwise */
-       if (sw_status[yaddr] != xaddr) {
+       if (mctlr->sw_status[yaddr] != xaddr) {
                /* disable the old switch, enable the new one and save status */
-               gv800s_write(mctlr, sw_status[yaddr], yaddr, 0);
-               sw_status[yaddr] = xaddr;
+               gv800s_write(mctlr, mctlr->sw_status[yaddr], yaddr, 0);
+               mctlr->sw_status[yaddr] = xaddr;
                gv800s_write(mctlr, xaddr, yaddr, 1);
        }
 }
@@ -4822,7 +4815,6 @@ static void gv800s_muxsel(struct bttv *btv, unsigned int input)
 /* GeoVision GV-800(S) "master" chip init */
 static void gv800s_init(struct bttv *btv)
 {
-       char *sw_status = (char *)(&btv->mbox_we);
        int ix;
 
        gpio_inout(0xf107f, 0xf107f);
@@ -4831,7 +4823,7 @@ static void gv800s_init(struct bttv *btv)
 
        /* Preset camera 0 to the 4 controllers */
        for (ix = 0; ix < 4; ix++) {
-               sw_status[ix] = ix;
+               btv->sw_status[ix] = ix;
                gv800s_write(btv, ix, ix, 1);
        }
 
index 9c1cc2c50ee2abc9ef916dd3bf922ae5ab9505d5..6eefb595d0fa67509620627ed0b755ffa2747cfe 100644 (file)
@@ -459,6 +459,9 @@ struct bttv {
        int mbox_iow;
        int mbox_csel;
 
+       /* switch status for multi-controller cards */
+       char sw_status[4];
+
        /* risc memory management data
           - must acquire s_lock before changing these
           - only the irq handler is supported to touch top + bottom + vcurr */
index e958a01fd554c27ec8b8be330bffbc0749ebb044..c443b7ac5adfeb29806fd0cc345ac9b8d64c0c16 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "cx23885.h"
 #include "cx23885-av.h"
+#include "cx23885-video.h"
 
 void cx23885_av_work_handler(struct work_struct *work)
 {
@@ -32,5 +33,17 @@ void cx23885_av_work_handler(struct work_struct *work)
 
        v4l2_subdev_call(dev->sd_cx25840, core, interrupt_service_routine,
                         PCI_MSK_AV_CORE, &handled);
+
+       /* Getting here with the interrupt not handled
+          then probbaly flatiron does have pending interrupts.
+       */
+       if (!handled) {
+               /* clear left and right adc channel interrupt request flag */
+               cx23885_flatiron_write(dev, 0x1f,
+                       cx23885_flatiron_read(dev, 0x1f) | 0x80);
+               cx23885_flatiron_write(dev, 0x23,
+                       cx23885_flatiron_read(dev, 0x23) | 0x80);
+       }
+
        cx23885_irq_enable(dev, PCI_MSK_AV_CORE);
 }
index 9c5ed10b2c5eb8e5d1c03897d2bf5813f68a41fa..bb291c661143b501bc2523b3df484752c40fc8cc 100644 (file)
@@ -1249,6 +1249,10 @@ static int dvb_register(struct cx23885_tsport *port)
                fe0->dvb.frontend = dvb_attach(ds3000_attach,
                                        &tevii_ds3000_config,
                                        &i2c_bus->i2c_adap);
+               if (fe0->dvb.frontend != NULL) {
+                       dvb_attach(ts2020_attach, fe0->dvb.frontend,
+                               &tevii_ts2020_config, &i2c_bus->i2c_adap);
+               }
                break;
        case CX23885_BOARD_PROF_8000:
                i2c_bus = &dev->i2c_bus[0];
index e33d1a7dfdd09ee253e2d8160bdc31206f292c2b..161686832b2046c173756953485cda329c77b2c6 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/div64.h>
 
 #include "cx23885.h"
+#include "cx23885-video.h"
 #include <media/v4l2-common.h>
 #include <media/v4l2-ioctl.h>
 #include "cx23885-ioctl.h"
@@ -417,7 +418,7 @@ static void res_free(struct cx23885_dev *dev, struct cx23885_fh *fh,
        mutex_unlock(&dev->lock);
 }
 
-static int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data)
+int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data)
 {
        /* 8 bit registers, 8 bit values */
        u8 buf[] = { reg, data };
@@ -428,7 +429,7 @@ static int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data)
        return i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg, 1);
 }
 
-static u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg)
+u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg)
 {
        /* 8 bit registers, 8 bit values */
        int ret;
diff --git a/drivers/media/pci/cx23885/cx23885-video.h b/drivers/media/pci/cx23885/cx23885-video.h
new file mode 100644 (file)
index 0000000..c961a2b
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ *  Driver for the Conexant CX23885/7/8 PCIe bridge
+ *
+ *  Copyright (C) 2010  Andy Walls <awalls@md.metrocast.net>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ *  02110-1301, USA.
+ */
+
+#ifndef _CX23885_VIDEO_H_
+#define _CX23885_VIDEO_H_
+int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data);
+u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg);
+#endif
index bd9405df1bd67abf3a8051d0cefabe3a7ad0c362..66db0dfbadbfcd68f848c1df6b52a810b22e1192 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/kfifo.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
@@ -28,6 +29,7 @@
 
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-mem2mem.h>
 #include <media/videobuf2-core.h>
 
 #define CODA_FMO_BUF_SIZE      32
 #define CODADX6_WORK_BUF_SIZE  (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
-#define CODA7_WORK_BUF_SIZE    (512 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
+#define CODA7_WORK_BUF_SIZE    (128 * 1024)
+#define CODA7_TEMP_BUF_SIZE    (304 * 1024)
 #define CODA_PARA_BUF_SIZE     (10 * 1024)
 #define CODA_ISRAM_SIZE        (2048 * 2)
 #define CODADX6_IRAM_SIZE      0xb000
-#define CODA7_IRAM_SIZE                0x14000 /* 81920 bytes */
+#define CODA7_IRAM_SIZE                0x14000
 
-#define CODA_MAX_FRAMEBUFFERS  2
+#define CODA7_PS_BUF_SIZE      0x28000
+
+#define CODA_MAX_FRAMEBUFFERS  8
 
 #define MAX_W          8192
 #define MAX_H          8192
@@ -129,6 +134,7 @@ struct coda_dev {
        struct clk              *clk_ahb;
 
        struct coda_aux_buf     codebuf;
+       struct coda_aux_buf     tempbuf;
        struct coda_aux_buf     workbuf;
        struct gen_pool         *iram_pool;
        long unsigned int       iram_vaddr;
@@ -153,6 +159,7 @@ struct coda_params {
        u8                      mpeg4_inter_qp;
        u8                      gop_size;
        int                     codec_mode;
+       int                     codec_mode_aux;
        enum v4l2_mpeg_video_multi_slice_mode slice_mode;
        u32                     framerate;
        u16                     bitrate;
@@ -160,13 +167,30 @@ struct coda_params {
        u32                     slice_max_mb;
 };
 
+struct coda_iram_info {
+       u32             axi_sram_use;
+       phys_addr_t     buf_bit_use;
+       phys_addr_t     buf_ip_ac_dc_use;
+       phys_addr_t     buf_dbk_y_use;
+       phys_addr_t     buf_dbk_c_use;
+       phys_addr_t     buf_ovl_use;
+       phys_addr_t     buf_btp_use;
+       phys_addr_t     search_ram_paddr;
+       int             search_ram_size;
+};
+
 struct coda_ctx {
        struct coda_dev                 *dev;
+       struct mutex                    buffer_mutex;
        struct list_head                list;
+       struct work_struct              skip_run;
        int                             aborting;
+       int                             initialized;
        int                             streamon_out;
        int                             streamon_cap;
        u32                             isequence;
+       u32                             qsequence;
+       u32                             osequence;
        struct coda_q_data              q_data[2];
        enum coda_inst_type             inst_type;
        struct coda_codec               *codec;
@@ -176,12 +200,25 @@ struct coda_ctx {
        struct v4l2_ctrl_handler        ctrls;
        struct v4l2_fh                  fh;
        int                             gopcounter;
+       int                             runcounter;
        char                            vpu_header[3][64];
        int                             vpu_header_size[3];
+       struct kfifo                    bitstream_fifo;
+       struct mutex                    bitstream_mutex;
+       struct coda_aux_buf             bitstream;
+       bool                            prescan_failed;
        struct coda_aux_buf             parabuf;
+       struct coda_aux_buf             psbuf;
+       struct coda_aux_buf             slicebuf;
        struct coda_aux_buf             internal_frames[CODA_MAX_FRAMEBUFFERS];
+       struct coda_aux_buf             workbuf;
        int                             num_internal_frames;
        int                             idx;
+       int                             reg_idx;
+       struct coda_iram_info           iram_info;
+       u32                             bit_stream_param;
+       u32                             frm_dis_flg;
+       int                             display_idx;
 };
 
 static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
@@ -228,10 +265,22 @@ static int coda_wait_timeout(struct coda_dev *dev)
 static void coda_command_async(struct coda_ctx *ctx, int cmd)
 {
        struct coda_dev *dev = ctx->dev;
+
+       if (dev->devtype->product == CODA_7541) {
+               /* Restore context related registers to CODA */
+               coda_write(dev, ctx->bit_stream_param,
+                               CODA_REG_BIT_BIT_STREAM_PARAM);
+               coda_write(dev, ctx->frm_dis_flg,
+                               CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+               coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR);
+       }
+
        coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
 
        coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
        coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
+       coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
+
        coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
 }
 
@@ -297,6 +346,8 @@ static struct coda_codec codadx6_codecs[] = {
 static struct coda_codec coda7_codecs[] = {
        CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264,   1280, 720),
        CODA_CODEC(CODA7_MODE_ENCODE_MP4,  V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4,  1280, 720),
+       CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264,   V4L2_PIX_FMT_YUV420, 1920, 1080),
+       CODA_CODEC(CODA7_MODE_DECODE_MP4,  V4L2_PIX_FMT_MPEG4,  V4L2_PIX_FMT_YUV420, 1920, 1080),
 };
 
 static bool coda_format_is_yuv(u32 fourcc)
@@ -365,7 +416,7 @@ static int vidioc_querycap(struct file *file, void *priv,
 }
 
 static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
-                       enum v4l2_buf_type type)
+                       enum v4l2_buf_type type, int src_fourcc)
 {
        struct coda_ctx *ctx = fh_to_ctx(priv);
        struct coda_codec *codecs = ctx->dev->devtype->codecs;
@@ -377,7 +428,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
 
        for (i = 0; i < num_formats; i++) {
                /* Both uncompressed formats are always supported */
-               if (coda_format_is_yuv(formats[i].fourcc)) {
+               if (coda_format_is_yuv(formats[i].fourcc) &&
+                   !coda_format_is_yuv(src_fourcc)) {
                        if (num == f->index)
                                break;
                        ++num;
@@ -385,8 +437,10 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
                }
                /* Compressed formats may be supported, check the codec list */
                for (k = 0; k < num_codecs; k++) {
+                       /* if src_fourcc is set, only consider matching codecs */
                        if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
-                           formats[i].fourcc == codecs[k].dst_fourcc)
+                           formats[i].fourcc == codecs[k].dst_fourcc &&
+                           (!src_fourcc || src_fourcc == codecs[k].src_fourcc))
                                break;
                        if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
                            formats[i].fourcc == codecs[k].src_fourcc)
@@ -413,13 +467,26 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
 static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
                                   struct v4l2_fmtdesc *f)
 {
-       return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+       struct coda_ctx *ctx = fh_to_ctx(priv);
+       struct vb2_queue *src_vq;
+       struct coda_q_data *q_data_src;
+
+       /* If the source format is already fixed, only list matching formats */
+       src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       if (vb2_is_streaming(src_vq)) {
+               q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+               return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+                               q_data_src->fourcc);
+       }
+
+       return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0);
 }
 
 static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
                                   struct v4l2_fmtdesc *f)
 {
-       return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0);
 }
 
 static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
@@ -492,15 +559,45 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
                                  struct v4l2_format *f)
 {
        struct coda_ctx *ctx = fh_to_ctx(priv);
-       struct coda_codec *codec = NULL;
+       struct coda_codec *codec;
+       struct vb2_queue *src_vq;
+       int ret;
 
-       /* Determine codec by the encoded format */
-       codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
-                               f->fmt.pix.pixelformat);
+       /*
+        * If the source format is already fixed, try to find a codec that
+        * converts to the given destination format
+        */
+       src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       if (vb2_is_streaming(src_vq)) {
+               struct coda_q_data *q_data_src;
+
+               q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+               codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+                                       f->fmt.pix.pixelformat);
+               if (!codec)
+                       return -EINVAL;
+       } else {
+               /* Otherwise determine codec by encoded format, if possible */
+               codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
+                                       f->fmt.pix.pixelformat);
+       }
 
        f->fmt.pix.colorspace = ctx->colorspace;
 
-       return vidioc_try_fmt(codec, f);
+       ret = vidioc_try_fmt(codec, f);
+       if (ret < 0)
+               return ret;
+
+       /* The h.264 decoder only returns complete 16x16 macroblocks */
+       if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) {
+               f->fmt.pix.width = round_up(f->fmt.pix.width, 16);
+               f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
+               f->fmt.pix.bytesperline = f->fmt.pix.width;
+               f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+                                      f->fmt.pix.height * 3 / 2;
+       }
+
+       return 0;
 }
 
 static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
@@ -610,11 +707,35 @@ static int vidioc_expbuf(struct file *file, void *priv,
        return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
 }
 
+static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
+                                     struct v4l2_buffer *buf)
+{
+       struct vb2_queue *src_vq;
+
+       src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+       return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
+               (buf->sequence == (ctx->qsequence - 1)));
+}
+
 static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
 {
        struct coda_ctx *ctx = fh_to_ctx(priv);
+       int ret;
+
+       ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+
+       /* If this is the last capture buffer, emit an end-of-stream event */
+       if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+           coda_buf_is_end_of_stream(ctx, buf)) {
+               const struct v4l2_event eos_event = {
+                       .type = V4L2_EVENT_EOS
+               };
+
+               v4l2_event_queue_fh(&ctx->fh, &eos_event);
+       }
 
-       return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+       return ret;
 }
 
 static int vidioc_create_bufs(struct file *file, void *priv,
@@ -637,8 +758,53 @@ static int vidioc_streamoff(struct file *file, void *priv,
                            enum v4l2_buf_type type)
 {
        struct coda_ctx *ctx = fh_to_ctx(priv);
+       int ret;
+
+       /*
+        * This indirectly calls __vb2_queue_cancel, which dequeues all buffers.
+        * We therefore have to lock it against running hardware in this context,
+        * which still needs the buffers.
+        */
+       mutex_lock(&ctx->buffer_mutex);
+       ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+       mutex_unlock(&ctx->buffer_mutex);
+
+       return ret;
+}
+
+static int vidioc_decoder_cmd(struct file *file, void *fh,
+                             struct v4l2_decoder_cmd *dc)
+{
+       struct coda_ctx *ctx = fh_to_ctx(fh);
+
+       if (dc->cmd != V4L2_DEC_CMD_STOP)
+               return -EINVAL;
+
+       if ((dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) ||
+           (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY))
+               return -EINVAL;
+
+       if (dc->stop.pts != 0)
+               return -EINVAL;
+
+       if (ctx->inst_type != CODA_INST_DECODER)
+               return -EINVAL;
+
+       /* Set the strem-end flag on this context */
+       ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+       return 0;
+}
 
-       return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+static int vidioc_subscribe_event(struct v4l2_fh *fh,
+                                 const struct v4l2_event_subscription *sub)
+{
+       switch (sub->type) {
+       case V4L2_EVENT_EOS:
+               return v4l2_event_subscribe(fh, sub, 0, NULL);
+       default:
+               return v4l2_ctrl_subscribe_event(fh, sub);
+       }
 }
 
 static const struct v4l2_ioctl_ops coda_ioctl_ops = {
@@ -664,14 +830,206 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
 
        .vidioc_streamon        = vidioc_streamon,
        .vidioc_streamoff       = vidioc_streamoff,
+
+       .vidioc_decoder_cmd     = vidioc_decoder_cmd,
+
+       .vidioc_subscribe_event = vidioc_subscribe_event,
+       .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
 };
 
+static int coda_start_decoding(struct coda_ctx *ctx);
+
+static void coda_skip_run(struct work_struct *work)
+{
+       struct coda_ctx *ctx = container_of(work, struct coda_ctx, skip_run);
+
+       v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static inline int coda_get_bitstream_payload(struct coda_ctx *ctx)
+{
+       return kfifo_len(&ctx->bitstream_fifo);
+}
+
+static void coda_kfifo_sync_from_device(struct coda_ctx *ctx)
+{
+       struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+       struct coda_dev *dev = ctx->dev;
+       u32 rd_ptr;
+
+       rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+       kfifo->out = (kfifo->in & ~kfifo->mask) |
+                     (rd_ptr - ctx->bitstream.paddr);
+       if (kfifo->out > kfifo->in)
+               kfifo->out -= kfifo->mask + 1;
+}
+
+static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx)
+{
+       struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+       struct coda_dev *dev = ctx->dev;
+       u32 rd_ptr, wr_ptr;
+
+       rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask);
+       coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+       wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+       coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
+{
+       struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+       struct coda_dev *dev = ctx->dev;
+       u32 wr_ptr;
+
+       wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+       coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static int coda_bitstream_queue(struct coda_ctx *ctx, struct vb2_buffer *src_buf)
+{
+       u32 src_size = vb2_get_plane_payload(src_buf, 0);
+       u32 n;
+
+       n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0), src_size);
+       if (n < src_size)
+               return -ENOSPC;
+
+       dma_sync_single_for_device(&ctx->dev->plat_dev->dev, ctx->bitstream.paddr,
+                                  ctx->bitstream.size, DMA_TO_DEVICE);
+
+       ctx->qsequence++;
+
+       return 0;
+}
+
+static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
+                                    struct vb2_buffer *src_buf)
+{
+       int ret;
+
+       if (coda_get_bitstream_payload(ctx) +
+           vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size)
+               return false;
+
+       if (vb2_plane_vaddr(src_buf, 0) == NULL) {
+               v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
+               return true;
+       }
+
+       ret = coda_bitstream_queue(ctx, src_buf);
+       if (ret < 0) {
+               v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
+               return false;
+       }
+       /* Sync read pointer to device */
+       if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
+               coda_kfifo_sync_to_device_write(ctx);
+
+       ctx->prescan_failed = false;
+
+       return true;
+}
+
+static void coda_fill_bitstream(struct coda_ctx *ctx)
+{
+       struct vb2_buffer *src_buf;
+
+       while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
+               src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+
+               if (coda_bitstream_try_queue(ctx, src_buf)) {
+                       src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+                       v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+               } else {
+                       break;
+               }
+       }
+}
+
 /*
  * Mem-to-mem operations.
  */
-static void coda_device_run(void *m2m_priv)
+static int coda_prepare_decode(struct coda_ctx *ctx)
+{
+       struct vb2_buffer *dst_buf;
+       struct coda_dev *dev = ctx->dev;
+       struct coda_q_data *q_data_dst;
+       u32 stridey, height;
+       u32 picture_y, picture_cb, picture_cr;
+
+       dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+       q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+       if (ctx->params.rot_mode & CODA_ROT_90) {
+               stridey = q_data_dst->height;
+               height = q_data_dst->width;
+       } else {
+               stridey = q_data_dst->width;
+               height = q_data_dst->height;
+       }
+
+       /* Try to copy source buffer contents into the bitstream ringbuffer */
+       mutex_lock(&ctx->bitstream_mutex);
+       coda_fill_bitstream(ctx);
+       mutex_unlock(&ctx->bitstream_mutex);
+
+       if (coda_get_bitstream_payload(ctx) < 512 &&
+           (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+               v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+                        "bitstream payload: %d, skipping\n",
+                        coda_get_bitstream_payload(ctx));
+               schedule_work(&ctx->skip_run);
+               return -EAGAIN;
+       }
+
+       /* Run coda_start_decoding (again) if not yet initialized */
+       if (!ctx->initialized) {
+               int ret = coda_start_decoding(ctx);
+               if (ret < 0) {
+                       v4l2_err(&dev->v4l2_dev, "failed to start decoding\n");
+                       schedule_work(&ctx->skip_run);
+                       return -EAGAIN;
+               } else {
+                       ctx->initialized = 1;
+               }
+       }
+
+       /* Set rotator output */
+       picture_y = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+       if (q_data_dst->fourcc == V4L2_PIX_FMT_YVU420) {
+               /* Switch Cr and Cb for YVU420 format */
+               picture_cr = picture_y + stridey * height;
+               picture_cb = picture_cr + stridey / 2 * height / 2;
+       } else {
+               picture_cb = picture_y + stridey * height;
+               picture_cr = picture_cb + stridey / 2 * height / 2;
+       }
+       coda_write(dev, picture_y, CODA_CMD_DEC_PIC_ROT_ADDR_Y);
+       coda_write(dev, picture_cb, CODA_CMD_DEC_PIC_ROT_ADDR_CB);
+       coda_write(dev, picture_cr, CODA_CMD_DEC_PIC_ROT_ADDR_CR);
+       coda_write(dev, stridey, CODA_CMD_DEC_PIC_ROT_STRIDE);
+       coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
+                       CODA_CMD_DEC_PIC_ROT_MODE);
+
+       switch (dev->devtype->product) {
+       case CODA_DX6:
+               /* TBD */
+       case CODA_7541:
+               coda_write(dev, CODA_PRE_SCAN_EN, CODA_CMD_DEC_PIC_OPTION);
+               break;
+       }
+
+       coda_write(dev, 0, CODA_CMD_DEC_PIC_SKIP_NUM);
+
+       coda_write(dev, 0, CODA_CMD_DEC_PIC_BB_START);
+       coda_write(dev, 0, CODA_CMD_DEC_PIC_START_BYTE);
+
+       return 0;
+}
+
+static void coda_prepare_encode(struct coda_ctx *ctx)
 {
-       struct coda_ctx *ctx = m2m_priv;
        struct coda_q_data *q_data_src, *q_data_dst;
        struct vb2_buffer *src_buf, *dst_buf;
        struct coda_dev *dev = ctx->dev;
@@ -681,17 +1039,15 @@ static void coda_device_run(void *m2m_priv)
        u32 pic_stream_buffer_addr, pic_stream_buffer_size;
        u32 dst_fourcc;
 
-       mutex_lock(&dev->coda_mutex);
-
        src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
        dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
        q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
        q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
        dst_fourcc = q_data_dst->fourcc;
 
-       src_buf->v4l2_buf.sequence = ctx->isequence;
-       dst_buf->v4l2_buf.sequence = ctx->isequence;
-       ctx->isequence++;
+       src_buf->v4l2_buf.sequence = ctx->osequence;
+       dst_buf->v4l2_buf.sequence = ctx->osequence;
+       ctx->osequence++;
 
        /*
         * Workaround coda firmware BUG that only marks the first
@@ -793,16 +1149,53 @@ static void coda_device_run(void *m2m_priv)
        coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
        coda_write(dev, pic_stream_buffer_size / 1024,
                   CODA_CMD_ENC_PIC_BB_SIZE);
+}
 
-       if (dev->devtype->product == CODA_7541) {
-               coda_write(dev, CODA7_USE_BIT_ENABLE | CODA7_USE_HOST_BIT_ENABLE |
-                               CODA7_USE_ME_ENABLE | CODA7_USE_HOST_ME_ENABLE,
-                               CODA7_REG_BIT_AXI_SRAM_USE);
+static void coda_device_run(void *m2m_priv)
+{
+       struct coda_ctx *ctx = m2m_priv;
+       struct coda_dev *dev = ctx->dev;
+       int ret;
+
+       mutex_lock(&ctx->buffer_mutex);
+
+       /*
+        * If streamoff dequeued all buffers before we could get the lock,
+        * just bail out immediately.
+        */
+       if ((!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) &&
+           ctx->inst_type != CODA_INST_DECODER) ||
+               !v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+               v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+                       "%d: device_run without buffers\n", ctx->idx);
+               mutex_unlock(&ctx->buffer_mutex);
+               schedule_work(&ctx->skip_run);
+               return;
+       }
+
+       mutex_lock(&dev->coda_mutex);
+
+       if (ctx->inst_type == CODA_INST_DECODER) {
+               ret = coda_prepare_decode(ctx);
+               if (ret < 0) {
+                       mutex_unlock(&dev->coda_mutex);
+                       mutex_unlock(&ctx->buffer_mutex);
+                       /* job_finish scheduled by prepare_decode */
+                       return;
+               }
+       } else {
+               coda_prepare_encode(ctx);
        }
 
+       if (dev->devtype->product != CODA_DX6)
+               coda_write(dev, ctx->iram_info.axi_sram_use,
+                               CODA7_REG_BIT_AXI_SRAM_USE);
+
        /* 1 second timeout in case CODA locks up */
        schedule_delayed_work(&dev->timeout, HZ);
 
+       if (ctx->inst_type == CODA_INST_DECODER)
+               coda_kfifo_sync_to_device_full(ctx);
        coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
 }
 
@@ -812,15 +1205,32 @@ static int coda_job_ready(void *m2m_priv)
 
        /*
         * For both 'P' and 'key' frame cases 1 picture
-        * and 1 frame are needed.
+        * and 1 frame are needed. In the decoder case,
+        * the compressed frame can be in the bitstream.
         */
-       if (!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) ||
-               !v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+       if (!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) &&
+           ctx->inst_type != CODA_INST_DECODER) {
                v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
                         "not ready: not enough video buffers.\n");
                return 0;
        }
 
+       if (!v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+                        "not ready: not enough video capture buffers.\n");
+               return 0;
+       }
+
+       if (ctx->prescan_failed ||
+           ((ctx->inst_type == CODA_INST_DECODER) &&
+            (coda_get_bitstream_payload(ctx) < 512) &&
+            !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+                        "%d: not ready: not enough bitstream data.\n",
+                        ctx->idx);
+               return 0;
+       }
+
        if (ctx->aborting) {
                v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
                         "not ready: aborting\n");
@@ -936,7 +1346,29 @@ static int coda_buf_prepare(struct vb2_buffer *vb)
 static void coda_buf_queue(struct vb2_buffer *vb)
 {
        struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-       v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+       struct coda_q_data *q_data;
+
+       q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+       /*
+        * In the decoder case, immediately try to copy the buffer into the
+        * bitstream ringbuffer and mark it as ready to be dequeued.
+        */
+       if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
+           vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+               /*
+                * For backwards compatiblity, queuing an empty buffer marks
+                * the stream end
+                */
+               if (vb2_get_plane_payload(vb, 0) == 0)
+                       ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+               mutex_lock(&ctx->bitstream_mutex);
+               v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+               coda_fill_bitstream(ctx);
+               mutex_unlock(&ctx->bitstream_mutex);
+       } else {
+               v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+       }
 }
 
 static void coda_wait_prepare(struct vb2_queue *q)
@@ -951,88 +1383,488 @@ static void coda_wait_finish(struct vb2_queue *q)
        coda_lock(ctx);
 }
 
+static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
+{
+       struct coda_dev *dev = ctx->dev;
+       u32 *p = ctx->parabuf.vaddr;
+
+       if (dev->devtype->product == CODA_DX6)
+               p[index] = value;
+       else
+               p[index ^ 1] = value;
+}
+
+static int coda_alloc_aux_buf(struct coda_dev *dev,
+                             struct coda_aux_buf *buf, size_t size)
+{
+       buf->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buf->paddr,
+                                       GFP_KERNEL);
+       if (!buf->vaddr)
+               return -ENOMEM;
+
+       buf->size = size;
+
+       return 0;
+}
+
+static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
+                                        struct coda_aux_buf *buf, size_t size)
+{
+       return coda_alloc_aux_buf(ctx->dev, buf, size);
+}
+
+static void coda_free_aux_buf(struct coda_dev *dev,
+                             struct coda_aux_buf *buf)
+{
+       if (buf->vaddr) {
+               dma_free_coherent(&dev->plat_dev->dev, buf->size,
+                                 buf->vaddr, buf->paddr);
+               buf->vaddr = NULL;
+               buf->size = 0;
+       }
+}
+
 static void coda_free_framebuffers(struct coda_ctx *ctx)
 {
        int i;
 
-       for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++) {
-               if (ctx->internal_frames[i].vaddr) {
-                       dma_free_coherent(&ctx->dev->plat_dev->dev,
-                               ctx->internal_frames[i].size,
-                               ctx->internal_frames[i].vaddr,
-                               ctx->internal_frames[i].paddr);
-                       ctx->internal_frames[i].vaddr = NULL;
+       for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
+               coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i]);
+}
+
+static int coda_alloc_framebuffers(struct coda_ctx *ctx, struct coda_q_data *q_data, u32 fourcc)
+{
+       struct coda_dev *dev = ctx->dev;
+       int height = q_data->height;
+       dma_addr_t paddr;
+       int ysize;
+       int ret;
+       int i;
+
+       if (ctx->codec && ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
+               height = round_up(height, 16);
+       ysize = round_up(q_data->width, 8) * height;
+
+       /* Allocate frame buffers */
+       for (i = 0; i < ctx->num_internal_frames; i++) {
+               size_t size;
+
+               size = q_data->sizeimage;
+               if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
+                   dev->devtype->product != CODA_DX6)
+                       ctx->internal_frames[i].size += ysize/4;
+               ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i], size);
+               if (ret < 0) {
+                       coda_free_framebuffers(ctx);
+                       return ret;
                }
        }
+
+       /* Register frame buffers in the parameter buffer */
+       for (i = 0; i < ctx->num_internal_frames; i++) {
+               paddr = ctx->internal_frames[i].paddr;
+               coda_parabuf_write(ctx, i * 3 + 0, paddr); /* Y */
+               coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize); /* Cb */
+               coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize/4); /* Cr */
+
+               /* mvcol buffer for h.264 */
+               if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
+                   dev->devtype->product != CODA_DX6)
+                       coda_parabuf_write(ctx, 96 + i,
+                                          ctx->internal_frames[i].paddr +
+                                          ysize + ysize/4 + ysize/4);
+       }
+
+       /* mvcol buffer for mpeg4 */
+       if ((dev->devtype->product != CODA_DX6) &&
+           (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4))
+               coda_parabuf_write(ctx, 97, ctx->internal_frames[i].paddr +
+                                           ysize + ysize/4 + ysize/4);
+
+       return 0;
 }
 
-static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
+static int coda_h264_padding(int size, char *p)
+{
+       int nal_size;
+       int diff;
+
+       diff = size - (size & ~0x7);
+       if (diff == 0)
+               return 0;
+
+       nal_size = coda_filler_size[diff];
+       memcpy(p, coda_filler_nal, nal_size);
+
+       /* Add rbsp stop bit and trailing at the end */
+       *(p + nal_size - 1) = 0x80;
+
+       return nal_size;
+}
+
+static void coda_setup_iram(struct coda_ctx *ctx)
 {
+       struct coda_iram_info *iram_info = &ctx->iram_info;
        struct coda_dev *dev = ctx->dev;
-       u32 *p = ctx->parabuf.vaddr;
+       int ipacdc_size;
+       int bitram_size;
+       int dbk_size;
+       int ovl_size;
+       int mb_width;
+       int me_size;
+       int size;
+
+       memset(iram_info, 0, sizeof(*iram_info));
+       size = dev->iram_size;
 
        if (dev->devtype->product == CODA_DX6)
-               p[index] = value;
-       else
-               p[index ^ 1] = value;
+               return;
+
+       if (ctx->inst_type == CODA_INST_ENCODER) {
+               struct coda_q_data *q_data_src;
+
+               q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+               mb_width = DIV_ROUND_UP(q_data_src->width, 16);
+
+               /* Prioritize in case IRAM is too small for everything */
+               me_size = round_up(round_up(q_data_src->width, 16) * 36 + 2048,
+                                  1024);
+               iram_info->search_ram_size = me_size;
+               if (size >= iram_info->search_ram_size) {
+                       if (dev->devtype->product == CODA_7541)
+                               iram_info->axi_sram_use |= CODA7_USE_HOST_ME_ENABLE;
+                       iram_info->search_ram_paddr = dev->iram_paddr;
+                       size -= iram_info->search_ram_size;
+               } else {
+                       pr_err("IRAM is smaller than the search ram size\n");
+                       goto out;
+               }
+
+               /* Only H.264BP and H.263P3 are considered */
+               dbk_size = round_up(128 * mb_width, 1024);
+               if (size >= dbk_size) {
+                       iram_info->axi_sram_use |= CODA7_USE_HOST_DBK_ENABLE;
+                       iram_info->buf_dbk_y_use = dev->iram_paddr +
+                                                  iram_info->search_ram_size;
+                       iram_info->buf_dbk_c_use = iram_info->buf_dbk_y_use +
+                                                  dbk_size / 2;
+                       size -= dbk_size;
+               } else {
+                       goto out;
+               }
+
+               bitram_size = round_up(128 * mb_width, 1024);
+               if (size >= bitram_size) {
+                       iram_info->axi_sram_use |= CODA7_USE_HOST_BIT_ENABLE;
+                       iram_info->buf_bit_use = iram_info->buf_dbk_c_use +
+                                                dbk_size / 2;
+                       size -= bitram_size;
+               } else {
+                       goto out;
+               }
+
+               ipacdc_size = round_up(128 * mb_width, 1024);
+               if (size >= ipacdc_size) {
+                       iram_info->axi_sram_use |= CODA7_USE_HOST_IP_ENABLE;
+                       iram_info->buf_ip_ac_dc_use = iram_info->buf_bit_use +
+                                                     bitram_size;
+                       size -= ipacdc_size;
+               }
+
+               /* OVL and BTP disabled for encoder */
+       } else if (ctx->inst_type == CODA_INST_DECODER) {
+               struct coda_q_data *q_data_dst;
+               int mb_height;
+
+               q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+               mb_width = DIV_ROUND_UP(q_data_dst->width, 16);
+               mb_height = DIV_ROUND_UP(q_data_dst->height, 16);
+
+               dbk_size = round_up(256 * mb_width, 1024);
+               if (size >= dbk_size) {
+                       iram_info->axi_sram_use |= CODA7_USE_HOST_DBK_ENABLE;
+                       iram_info->buf_dbk_y_use = dev->iram_paddr;
+                       iram_info->buf_dbk_c_use = dev->iram_paddr +
+                                                  dbk_size / 2;
+                       size -= dbk_size;
+               } else {
+                       goto out;
+               }
+
+               bitram_size = round_up(128 * mb_width, 1024);
+               if (size >= bitram_size) {
+                       iram_info->axi_sram_use |= CODA7_USE_HOST_BIT_ENABLE;
+                       iram_info->buf_bit_use = iram_info->buf_dbk_c_use +
+                                                dbk_size / 2;
+                       size -= bitram_size;
+               } else {
+                       goto out;
+               }
+
+               ipacdc_size = round_up(128 * mb_width, 1024);
+               if (size >= ipacdc_size) {
+                       iram_info->axi_sram_use |= CODA7_USE_HOST_IP_ENABLE;
+                       iram_info->buf_ip_ac_dc_use = iram_info->buf_bit_use +
+                                                     bitram_size;
+                       size -= ipacdc_size;
+               } else {
+                       goto out;
+               }
+
+               ovl_size = round_up(80 * mb_width, 1024);
+       }
+
+out:
+       switch (dev->devtype->product) {
+       case CODA_DX6:
+               break;
+       case CODA_7541:
+               /* i.MX53 uses secondary AXI for IRAM access */
+               if (iram_info->axi_sram_use & CODA7_USE_HOST_BIT_ENABLE)
+                       iram_info->axi_sram_use |= CODA7_USE_BIT_ENABLE;
+               if (iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE)
+                       iram_info->axi_sram_use |= CODA7_USE_IP_ENABLE;
+               if (iram_info->axi_sram_use & CODA7_USE_HOST_DBK_ENABLE)
+                       iram_info->axi_sram_use |= CODA7_USE_DBK_ENABLE;
+               if (iram_info->axi_sram_use & CODA7_USE_HOST_OVL_ENABLE)
+                       iram_info->axi_sram_use |= CODA7_USE_OVL_ENABLE;
+               if (iram_info->axi_sram_use & CODA7_USE_HOST_ME_ENABLE)
+                       iram_info->axi_sram_use |= CODA7_USE_ME_ENABLE;
+       }
+
+       if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
+               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+                        "IRAM smaller than needed\n");
+
+       if (dev->devtype->product == CODA_7541) {
+               /* TODO - Enabling these causes picture errors on CODA7541 */
+               if (ctx->inst_type == CODA_INST_DECODER) {
+                       /* fw 1.4.50 */
+                       iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+                                                    CODA7_USE_IP_ENABLE);
+               } else {
+                       /* fw 13.4.29 */
+                       iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+                                                    CODA7_USE_HOST_DBK_ENABLE |
+                                                    CODA7_USE_IP_ENABLE |
+                                                    CODA7_USE_DBK_ENABLE);
+               }
+       }
 }
 
-static int coda_alloc_framebuffers(struct coda_ctx *ctx, struct coda_q_data *q_data, u32 fourcc)
+static void coda_free_context_buffers(struct coda_ctx *ctx)
 {
        struct coda_dev *dev = ctx->dev;
 
-       int height = q_data->height;
-       dma_addr_t paddr;
-       int ysize;
-       int i;
+       coda_free_aux_buf(dev, &ctx->slicebuf);
+       coda_free_aux_buf(dev, &ctx->psbuf);
+       if (dev->devtype->product != CODA_DX6)
+               coda_free_aux_buf(dev, &ctx->workbuf);
+}
+
+static int coda_alloc_context_buffers(struct coda_ctx *ctx,
+                                     struct coda_q_data *q_data)
+{
+       struct coda_dev *dev = ctx->dev;
+       size_t size;
+       int ret;
+
+       switch (dev->devtype->product) {
+       case CODA_7541:
+               size = CODA7_WORK_BUF_SIZE;
+               break;
+       default:
+               return 0;
+       }
+
+       if (ctx->psbuf.vaddr) {
+               v4l2_err(&dev->v4l2_dev, "psmembuf still allocated\n");
+               return -EBUSY;
+       }
+       if (ctx->slicebuf.vaddr) {
+               v4l2_err(&dev->v4l2_dev, "slicebuf still allocated\n");
+               return -EBUSY;
+       }
+       if (ctx->workbuf.vaddr) {
+               v4l2_err(&dev->v4l2_dev, "context buffer still allocated\n");
+               ret = -EBUSY;
+               return -ENOMEM;
+       }
+
+       if (q_data->fourcc == V4L2_PIX_FMT_H264) {
+               /* worst case slice size */
+               size = (DIV_ROUND_UP(q_data->width, 16) *
+                       DIV_ROUND_UP(q_data->height, 16)) * 3200 / 8 + 512;
+               ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size);
+               if (ret < 0) {
+                       v4l2_err(&dev->v4l2_dev, "failed to allocate %d byte slice buffer",
+                                ctx->slicebuf.size);
+                       return ret;
+               }
+       }
+
+       if (dev->devtype->product == CODA_7541) {
+               ret = coda_alloc_context_buf(ctx, &ctx->psbuf, CODA7_PS_BUF_SIZE);
+               if (ret < 0) {
+                       v4l2_err(&dev->v4l2_dev, "failed to allocate psmem buffer");
+                       goto err;
+               }
+       }
+
+       ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size);
+       if (ret < 0) {
+               v4l2_err(&dev->v4l2_dev, "failed to allocate %d byte context buffer",
+                        ctx->workbuf.size);
+               goto err;
+       }
+
+       return 0;
+
+err:
+       coda_free_context_buffers(ctx);
+       return ret;
+}
+
+static int coda_start_decoding(struct coda_ctx *ctx)
+{
+       struct coda_q_data *q_data_src, *q_data_dst;
+       u32 bitstream_buf, bitstream_size;
+       struct coda_dev *dev = ctx->dev;
+       int width, height;
+       u32 src_fourcc;
+       u32 val;
+       int ret;
+
+       /* Start decoding */
+       q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+       bitstream_buf = ctx->bitstream.paddr;
+       bitstream_size = ctx->bitstream.size;
+       src_fourcc = q_data_src->fourcc;
+
+       coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+
+       /* Update coda bitstream read and write pointers from kfifo */
+       coda_kfifo_sync_to_device_full(ctx);
+
+       ctx->display_idx = -1;
+       ctx->frm_dis_flg = 0;
+       coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+       coda_write(dev, CODA_BIT_DEC_SEQ_INIT_ESCAPE,
+                       CODA_REG_BIT_BIT_STREAM_PARAM);
+
+       coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
+       coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
+       val = 0;
+       if (dev->devtype->product == CODA_7541)
+               val |= CODA_REORDER_ENABLE;
+       coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
+
+       ctx->params.codec_mode = ctx->codec->mode;
+       ctx->params.codec_mode_aux = 0;
+       if (src_fourcc == V4L2_PIX_FMT_H264) {
+               if (dev->devtype->product == CODA_7541) {
+                       coda_write(dev, ctx->psbuf.paddr,
+                                       CODA_CMD_DEC_SEQ_PS_BB_START);
+                       coda_write(dev, (CODA7_PS_BUF_SIZE / 1024),
+                                       CODA_CMD_DEC_SEQ_PS_BB_SIZE);
+               }
+       }
+
+       if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) {
+               v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+               coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+               return -ETIMEDOUT;
+       }
+
+       /* Update kfifo out pointer from coda bitstream read pointer */
+       coda_kfifo_sync_from_device(ctx);
 
-       ysize = round_up(q_data->width, 8) * height;
+       coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
 
-       /* Allocate frame buffers */
-       ctx->num_internal_frames = CODA_MAX_FRAMEBUFFERS;
-       for (i = 0; i < ctx->num_internal_frames; i++) {
-               ctx->internal_frames[i].size = q_data->sizeimage;
-               if (fourcc == V4L2_PIX_FMT_H264 && dev->devtype->product != CODA_DX6)
-                       ctx->internal_frames[i].size += ysize/4;
-               ctx->internal_frames[i].vaddr = dma_alloc_coherent(
-                               &dev->plat_dev->dev, ctx->internal_frames[i].size,
-                               &ctx->internal_frames[i].paddr, GFP_KERNEL);
-               if (!ctx->internal_frames[i].vaddr) {
-                       coda_free_framebuffers(ctx);
-                       return -ENOMEM;
-               }
+       if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
+               v4l2_err(&dev->v4l2_dev,
+                       "CODA_COMMAND_SEQ_INIT failed, error code = %d\n",
+                       coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
+               return -EAGAIN;
        }
 
-       /* Register frame buffers in the parameter buffer */
-       for (i = 0; i < ctx->num_internal_frames; i++) {
-               paddr = ctx->internal_frames[i].paddr;
-               coda_parabuf_write(ctx, i * 3 + 0, paddr); /* Y */
-               coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize); /* Cb */
-               coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize/4); /* Cr */
+       val = coda_read(dev, CODA_RET_DEC_SEQ_SRC_SIZE);
+       if (dev->devtype->product == CODA_DX6) {
+               width = (val >> CODADX6_PICWIDTH_OFFSET) & CODADX6_PICWIDTH_MASK;
+               height = val & CODADX6_PICHEIGHT_MASK;
+       } else {
+               width = (val >> CODA7_PICWIDTH_OFFSET) & CODA7_PICWIDTH_MASK;
+               height = val & CODA7_PICHEIGHT_MASK;
+       }
 
-               if (dev->devtype->product != CODA_DX6 && fourcc == V4L2_PIX_FMT_H264)
-                       coda_parabuf_write(ctx, 96 + i, ctx->internal_frames[i].paddr + ysize + ysize/4 + ysize/4);
+       if (width > q_data_dst->width || height > q_data_dst->height) {
+               v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
+                        width, height, q_data_dst->width, q_data_dst->height);
+               return -EINVAL;
        }
 
-       return 0;
-}
+       width = round_up(width, 16);
+       height = round_up(height, 16);
 
-static int coda_h264_padding(int size, char *p)
-{
-       int nal_size;
-       int diff;
+       v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s instance %d now: %dx%d\n",
+                __func__, ctx->idx, width, height);
 
-       diff = size - (size & ~0x7);
-       if (diff == 0)
-               return 0;
+       ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED) + 1;
+       if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
+               v4l2_err(&dev->v4l2_dev,
+                        "not enough framebuffers to decode (%d < %d)\n",
+                        CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames);
+               return -EINVAL;
+       }
 
-       nal_size = coda_filler_size[diff];
-       memcpy(p, coda_filler_nal, nal_size);
+       ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
+       if (ret < 0)
+               return ret;
 
-       /* Add rbsp stop bit and trailing at the end */
-       *(p + nal_size - 1) = 0x80;
+       /* Tell the decoder how many frame buffers we allocated. */
+       coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+       coda_write(dev, width, CODA_CMD_SET_FRAME_BUF_STRIDE);
 
-       return nal_size;
+       if (dev->devtype->product != CODA_DX6) {
+               /* Set secondary AXI IRAM */
+               coda_setup_iram(ctx);
+
+               coda_write(dev, ctx->iram_info.buf_bit_use,
+                               CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+               coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+                               CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+               coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+                               CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+               coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+                               CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+               coda_write(dev, ctx->iram_info.buf_ovl_use,
+                               CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+       }
+
+       if (src_fourcc == V4L2_PIX_FMT_H264) {
+               coda_write(dev, ctx->slicebuf.paddr,
+                               CODA_CMD_SET_FRAME_SLICE_BB_START);
+               coda_write(dev, ctx->slicebuf.size / 1024,
+                               CODA_CMD_SET_FRAME_SLICE_BB_SIZE);
+       }
+
+       if (dev->devtype->product == CODA_7541) {
+               int max_mb_x = 1920 / 16;
+               int max_mb_y = 1088 / 16;
+               int max_mb_num = max_mb_x * max_mb_y;
+               coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
+                               CODA7_CMD_SET_FRAME_MAX_DEC_SIZE);
+       }
+
+       if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
+               v4l2_err(&ctx->dev->v4l2_dev,
+                        "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
 }
 
 static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
@@ -1050,7 +1882,7 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
                v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
                return ret;
        }
-       *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx)) -
+       *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
                coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
        memcpy(header, vb2_plane_vaddr(buf, 0), *size);
 
@@ -1069,26 +1901,36 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
        u32 value;
        int ret = 0;
 
-       if (count < 1)
-               return -EINVAL;
+       q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+               if (q_data_src->fourcc == V4L2_PIX_FMT_H264) {
+                       if (coda_get_bitstream_payload(ctx) < 512)
+                               return -EINVAL;
+               } else {
+                       if (count < 1)
+                               return -EINVAL;
+               }
 
-       if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
                ctx->streamon_out = 1;
-       else
-               ctx->streamon_cap = 1;
 
-       q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
-       if (ctx->streamon_out) {
                if (coda_format_is_yuv(q_data_src->fourcc))
                        ctx->inst_type = CODA_INST_ENCODER;
                else
                        ctx->inst_type = CODA_INST_DECODER;
+       } else {
+               if (count < 1)
+                       return -EINVAL;
+
+               ctx->streamon_cap = 1;
        }
 
        /* Don't start the coda unless both queues are on */
        if (!(ctx->streamon_out & ctx->streamon_cap))
                return 0;
 
+       /* Allow device_run with no buffers queued and after streamoff */
+       v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true);
+
        ctx->gopcounter = ctx->params.gop_size - 1;
        buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
        bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
@@ -1103,6 +1945,25 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
                return -EINVAL;
        }
 
+       /* Allocate per-instance buffers */
+       ret = coda_alloc_context_buffers(ctx, q_data_src);
+       if (ret < 0)
+               return ret;
+
+       if (ctx->inst_type == CODA_INST_DECODER) {
+               mutex_lock(&dev->coda_mutex);
+               ret = coda_start_decoding(ctx);
+               mutex_unlock(&dev->coda_mutex);
+               if (ret == -EAGAIN) {
+                       return 0;
+               } else if (ret < 0) {
+                       return ret;
+               } else {
+                       ctx->initialized = 1;
+                       return 0;
+               }
+       }
+
        if (!coda_is_initialized(dev)) {
                v4l2_err(v4l2_dev, "coda is not initialized.\n");
                return -EFAULT;
@@ -1111,8 +1972,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
        mutex_lock(&dev->coda_mutex);
 
        coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
-       coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->idx));
-       coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->idx));
+       coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+       coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
        switch (dev->devtype->product) {
        case CODA_DX6:
                coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
@@ -1207,6 +2068,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
        }
        coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
 
+       coda_setup_iram(ctx);
+
        if (dst_fourcc == V4L2_PIX_FMT_H264) {
                value  = (FMO_SLICE_SAVE_BUF_SIZE << 7);
                value |= (0 & CODA_FMOPARAM_TYPE_MASK) << CODA_FMOPARAM_TYPE_OFFSET;
@@ -1214,8 +2077,10 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
                if (dev->devtype->product == CODA_DX6) {
                        coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
                } else {
-                       coda_write(dev, dev->iram_paddr, CODA7_CMD_ENC_SEQ_SEARCH_BASE);
-                       coda_write(dev, 48 * 1024, CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
+                       coda_write(dev, ctx->iram_info.search_ram_paddr,
+                                       CODA7_CMD_ENC_SEQ_SEARCH_BASE);
+                       coda_write(dev, ctx->iram_info.search_ram_size,
+                                       CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
                }
        }
 
@@ -1231,6 +2096,7 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
                goto out;
        }
 
+       ctx->num_internal_frames = 2;
        ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
        if (ret < 0) {
                v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
@@ -1239,13 +2105,20 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
 
        coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
        coda_write(dev, round_up(q_data_src->width, 8), CODA_CMD_SET_FRAME_BUF_STRIDE);
+       if (dev->devtype->product == CODA_7541)
+               coda_write(dev, round_up(q_data_src->width, 8),
+                               CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
        if (dev->devtype->product != CODA_DX6) {
-               coda_write(dev, round_up(q_data_src->width, 8), CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
-               coda_write(dev, dev->iram_paddr + 48 * 1024, CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
-               coda_write(dev, dev->iram_paddr + 53 * 1024, CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
-               coda_write(dev, dev->iram_paddr + 58 * 1024, CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
-               coda_write(dev, dev->iram_paddr + 68 * 1024, CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
-               coda_write(dev, 0x0, CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+               coda_write(dev, ctx->iram_info.buf_bit_use,
+                               CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+               coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+                               CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+               coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+                               CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+               coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+                               CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+               coda_write(dev, ctx->iram_info.buf_ovl_use,
+                               CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
        }
        ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF);
        if (ret < 0) {
@@ -1326,32 +2199,26 @@ static int coda_stop_streaming(struct vb2_queue *q)
        struct coda_dev *dev = ctx->dev;
 
        if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
-               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+               v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
                         "%s: output\n", __func__);
                ctx->streamon_out = 0;
+
+               ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+               ctx->isequence = 0;
        } else {
-               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+               v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
                         "%s: capture\n", __func__);
                ctx->streamon_cap = 0;
-       }
-
-       /* Don't stop the coda unless both queues are off */
-       if (ctx->streamon_out || ctx->streamon_cap)
-               return 0;
 
-       cancel_delayed_work(&dev->timeout);
-
-       mutex_lock(&dev->coda_mutex);
-       v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
-                "%s: sent command 'SEQ_END' to coda\n", __func__);
-       if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
-               v4l2_err(&dev->v4l2_dev,
-                        "CODA_COMMAND_SEQ_END failed\n");
-               return -ETIMEDOUT;
+               ctx->osequence = 0;
        }
-       mutex_unlock(&dev->coda_mutex);
 
-       coda_free_framebuffers(ctx);
+       if (!ctx->streamon_out && !ctx->streamon_cap) {
+               kfifo_init(&ctx->bitstream_fifo,
+                       ctx->bitstream.vaddr, ctx->bitstream.size);
+               ctx->runcounter = 0;
+       }
 
        return 0;
 }
@@ -1511,7 +2378,7 @@ static int coda_open(struct file *file)
 {
        struct coda_dev *dev = video_drvdata(file);
        struct coda_ctx *ctx = NULL;
-       int ret = 0;
+       int ret;
        int idx;
 
        idx = coda_next_free_instance(dev);
@@ -1523,12 +2390,19 @@ static int coda_open(struct file *file)
        if (!ctx)
                return -ENOMEM;
 
+       INIT_WORK(&ctx->skip_run, coda_skip_run);
        v4l2_fh_init(&ctx->fh, video_devdata(file));
        file->private_data = &ctx->fh;
        v4l2_fh_add(&ctx->fh);
        ctx->dev = dev;
        ctx->idx = idx;
-
+       switch (dev->devtype->product) {
+       case CODA_7541:
+               ctx->reg_idx = 0;
+               break;
+       default:
+               ctx->reg_idx = idx;
+       }
        set_default_params(ctx);
        ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
                                         &coda_queue_init);
@@ -1547,13 +2421,24 @@ static int coda_open(struct file *file)
 
        ctx->fh.ctrl_handler = &ctx->ctrls;
 
-       ctx->parabuf.vaddr = dma_alloc_coherent(&dev->plat_dev->dev,
-                       CODA_PARA_BUF_SIZE, &ctx->parabuf.paddr, GFP_KERNEL);
-       if (!ctx->parabuf.vaddr) {
+       ret = coda_alloc_context_buf(ctx, &ctx->parabuf, CODA_PARA_BUF_SIZE);
+       if (ret < 0) {
                v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
+               goto err;
+       }
+
+       ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
+       ctx->bitstream.vaddr = dma_alloc_writecombine(&dev->plat_dev->dev,
+                       ctx->bitstream.size, &ctx->bitstream.paddr, GFP_KERNEL);
+       if (!ctx->bitstream.vaddr) {
+               v4l2_err(&dev->v4l2_dev, "failed to allocate bitstream ringbuffer");
                ret = -ENOMEM;
                goto err;
        }
+       kfifo_init(&ctx->bitstream_fifo,
+               ctx->bitstream.vaddr, ctx->bitstream.size);
+       mutex_init(&ctx->bitstream_mutex);
+       mutex_init(&ctx->buffer_mutex);
 
        coda_lock(ctx);
        list_add(&ctx->list, &dev->instances);
@@ -1582,13 +2467,34 @@ static int coda_release(struct file *file)
        v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
                 ctx);
 
+       /* If this instance is running, call .job_abort and wait for it to end */
+       v4l2_m2m_ctx_release(ctx->m2m_ctx);
+
+       /* In case the instance was not running, we still need to call SEQ_END */
+       mutex_lock(&dev->coda_mutex);
+       v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+                "%s: sent command 'SEQ_END' to coda\n", __func__);
+       if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+               v4l2_err(&dev->v4l2_dev,
+                        "CODA_COMMAND_SEQ_END failed\n");
+               mutex_unlock(&dev->coda_mutex);
+               return -ETIMEDOUT;
+       }
+       mutex_unlock(&dev->coda_mutex);
+
+       coda_free_framebuffers(ctx);
+
        coda_lock(ctx);
        list_del(&ctx->list);
        coda_unlock(ctx);
 
-       dma_free_coherent(&dev->plat_dev->dev, CODA_PARA_BUF_SIZE,
-               ctx->parabuf.vaddr, ctx->parabuf.paddr);
-       v4l2_m2m_ctx_release(ctx->m2m_ctx);
+       dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
+               ctx->bitstream.vaddr, ctx->bitstream.paddr);
+       coda_free_context_buffers(ctx);
+       if (ctx->dev->devtype->product == CODA_DX6)
+               coda_free_aux_buf(dev, &ctx->workbuf);
+
+       coda_free_aux_buf(dev, &ctx->parabuf);
        v4l2_ctrl_handler_free(&ctx->ctrls);
        clk_disable_unprepare(dev->clk_per);
        clk_disable_unprepare(dev->clk_ahb);
@@ -1628,55 +2534,180 @@ static const struct v4l2_file_operations coda_fops = {
        .mmap           = coda_mmap,
 };
 
-static irqreturn_t coda_irq_handler(int irq, void *data)
+static void coda_finish_decode(struct coda_ctx *ctx)
 {
-       struct vb2_buffer *src_buf, *dst_buf;
-       struct coda_dev *dev = data;
-       u32 wr_ptr, start_ptr;
-       struct coda_ctx *ctx;
+       struct coda_dev *dev = ctx->dev;
+       struct coda_q_data *q_data_src;
+       struct coda_q_data *q_data_dst;
+       struct vb2_buffer *dst_buf;
+       int width, height;
+       int decoded_idx;
+       int display_idx;
+       u32 src_fourcc;
+       int success;
+       u32 val;
 
-       cancel_delayed_work(&dev->timeout);
+       dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
 
-       /* read status register to attend the IRQ */
-       coda_read(dev, CODA_REG_BIT_INT_STATUS);
-       coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
-                     CODA_REG_BIT_INT_CLEAR);
+       /* Update kfifo out pointer from coda bitstream read pointer */
+       coda_kfifo_sync_from_device(ctx);
 
-       ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
-       if (ctx == NULL) {
-               v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n");
-               mutex_unlock(&dev->coda_mutex);
-               return IRQ_HANDLED;
+       /*
+        * in stream-end mode, the read pointer can overshoot the write pointer
+        * by up to 512 bytes
+        */
+       if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
+               if (coda_get_bitstream_payload(ctx) >= 0x100000 - 512)
+                       kfifo_init(&ctx->bitstream_fifo,
+                               ctx->bitstream.vaddr, ctx->bitstream.size);
        }
 
-       if (ctx->aborting) {
-               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
-                        "task has been aborted\n");
-               mutex_unlock(&dev->coda_mutex);
-               return IRQ_HANDLED;
+       q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+       src_fourcc = q_data_src->fourcc;
+
+       val = coda_read(dev, CODA_RET_DEC_PIC_SUCCESS);
+       if (val != 1)
+               pr_err("DEC_PIC_SUCCESS = %d\n", val);
+
+       success = val & 0x1;
+       if (!success)
+               v4l2_err(&dev->v4l2_dev, "decode failed\n");
+
+       if (src_fourcc == V4L2_PIX_FMT_H264) {
+               if (val & (1 << 3))
+                       v4l2_err(&dev->v4l2_dev,
+                                "insufficient PS buffer space (%d bytes)\n",
+                                ctx->psbuf.size);
+               if (val & (1 << 2))
+                       v4l2_err(&dev->v4l2_dev,
+                                "insufficient slice buffer space (%d bytes)\n",
+                                ctx->slicebuf.size);
        }
 
-       if (coda_isbusy(ctx->dev)) {
-               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
-                        "coda is still busy!!!!\n");
-               return IRQ_NONE;
+       val = coda_read(dev, CODA_RET_DEC_PIC_SIZE);
+       width = (val >> 16) & 0xffff;
+       height = val & 0xffff;
+
+       q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+       val = coda_read(dev, CODA_RET_DEC_PIC_TYPE);
+       if ((val & 0x7) == 0) {
+               dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+               dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+       } else {
+               dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+               dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+       }
+
+       val = coda_read(dev, CODA_RET_DEC_PIC_ERR_MB);
+       if (val > 0)
+               v4l2_err(&dev->v4l2_dev,
+                        "errors in %d macroblocks\n", val);
+
+       if (dev->devtype->product == CODA_7541) {
+               val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
+               if (val == 0) {
+                       /* not enough bitstream data */
+                       v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+                                "prescan failed: %d\n", val);
+                       ctx->prescan_failed = true;
+                       return;
+               }
+       }
+
+       ctx->frm_dis_flg = coda_read(dev, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+       /*
+        * The previous display frame was copied out by the rotator,
+        * now it can be overwritten again
+        */
+       if (ctx->display_idx >= 0 &&
+           ctx->display_idx < ctx->num_internal_frames) {
+               ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
+               coda_write(dev, ctx->frm_dis_flg,
+                               CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+       }
+
+       /*
+        * The index of the last decoded frame, not necessarily in
+        * display order, and the index of the next display frame.
+        * The latter could have been decoded in a previous run.
+        */
+       decoded_idx = coda_read(dev, CODA_RET_DEC_PIC_CUR_IDX);
+       display_idx = coda_read(dev, CODA_RET_DEC_PIC_FRAME_IDX);
+
+       if (decoded_idx == -1) {
+               /* no frame was decoded, but we might have a display frame */
+               if (display_idx < 0 && ctx->display_idx < 0)
+                       ctx->prescan_failed = true;
+       } else if (decoded_idx == -2) {
+               /* no frame was decoded, we still return the remaining buffers */
+       } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
+               v4l2_err(&dev->v4l2_dev,
+                        "decoded frame index out of range: %d\n", decoded_idx);
+       }
+
+       if (display_idx == -1) {
+               /*
+                * no more frames to be decoded, but there could still
+                * be rotator output to dequeue
+                */
+               ctx->prescan_failed = true;
+       } else if (display_idx == -3) {
+               /* possibly prescan failure */
+       } else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) {
+               v4l2_err(&dev->v4l2_dev,
+                        "presentation frame index out of range: %d\n",
+                        display_idx);
+       }
+
+       /* If a frame was copied out, return it */
+       if (ctx->display_idx >= 0 &&
+           ctx->display_idx < ctx->num_internal_frames) {
+               dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+               dst_buf->v4l2_buf.sequence = ctx->osequence++;
+
+               vb2_set_plane_payload(dst_buf, 0, width * height * 3 / 2);
+
+               v4l2_m2m_buf_done(dst_buf, success ? VB2_BUF_STATE_DONE :
+                                                    VB2_BUF_STATE_ERROR);
+
+               v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+                       "job finished: decoding frame (%d) (%s)\n",
+                       dst_buf->v4l2_buf.sequence,
+                       (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+                       "KEYFRAME" : "PFRAME");
+       } else {
+               v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+                       "job finished: no frame decoded\n");
        }
 
+       /* The rotator will copy the current display frame next time */
+       ctx->display_idx = display_idx;
+}
+
+static void coda_finish_encode(struct coda_ctx *ctx)
+{
+       struct vb2_buffer *src_buf, *dst_buf;
+       struct coda_dev *dev = ctx->dev;
+       u32 wr_ptr, start_ptr;
+
        src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
        dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
 
        /* Get results from the coda */
        coda_read(dev, CODA_RET_ENC_PIC_TYPE);
        start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
-       wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx));
+       wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+
        /* Calculate bytesused field */
        if (dst_buf->v4l2_buf.sequence == 0) {
-               dst_buf->v4l2_planes[0].bytesused = (wr_ptr - start_ptr) +
-                                               ctx->vpu_header_size[0] +
-                                               ctx->vpu_header_size[1] +
-                                               ctx->vpu_header_size[2];
+               vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr +
+                                       ctx->vpu_header_size[0] +
+                                       ctx->vpu_header_size[1] +
+                                       ctx->vpu_header_size[2]);
        } else {
-               dst_buf->v4l2_planes[0].bytesused = (wr_ptr - start_ptr);
+               vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr);
        }
 
        v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
@@ -1708,8 +2739,62 @@ static irqreturn_t coda_irq_handler(int irq, void *data)
                dst_buf->v4l2_buf.sequence,
                (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
                "KEYFRAME" : "PFRAME");
+}
+
+static irqreturn_t coda_irq_handler(int irq, void *data)
+{
+       struct coda_dev *dev = data;
+       struct coda_ctx *ctx;
+
+       cancel_delayed_work(&dev->timeout);
+
+       /* read status register to attend the IRQ */
+       coda_read(dev, CODA_REG_BIT_INT_STATUS);
+       coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
+                     CODA_REG_BIT_INT_CLEAR);
+
+       ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+       if (ctx == NULL) {
+               v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n");
+               mutex_unlock(&dev->coda_mutex);
+               return IRQ_HANDLED;
+       }
+
+       if (ctx->aborting) {
+               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+                        "task has been aborted\n");
+               goto out;
+       }
+
+       if (coda_isbusy(ctx->dev)) {
+               v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+                        "coda is still busy!!!!\n");
+               return IRQ_NONE;
+       }
+
+       if (ctx->inst_type == CODA_INST_DECODER)
+               coda_finish_decode(ctx);
+       else
+               coda_finish_encode(ctx);
+
+out:
+       if (ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) {
+               v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+                        "%s: sent command 'SEQ_END' to coda\n", __func__);
+               if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+                       v4l2_err(&dev->v4l2_dev,
+                                "CODA_COMMAND_SEQ_END failed\n");
+               }
+
+               kfifo_init(&ctx->bitstream_fifo,
+                       ctx->bitstream.vaddr, ctx->bitstream.size);
+
+               coda_free_framebuffers(ctx);
+               coda_free_context_buffers(ctx);
+       }
 
        mutex_unlock(&dev->coda_mutex);
+       mutex_unlock(&ctx->buffer_mutex);
 
        v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->m2m_ctx);
 
@@ -1726,6 +2811,8 @@ static void coda_timeout(struct work_struct *work)
 
        mutex_lock(&dev->dev_mutex);
        list_for_each_entry(ctx, &dev->instances, list) {
+               if (mutex_is_locked(&ctx->buffer_mutex))
+                       mutex_unlock(&ctx->buffer_mutex);
                v4l2_m2m_streamoff(NULL, ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
                v4l2_m2m_streamoff(NULL, ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
        }
@@ -1738,7 +2825,7 @@ static void coda_timeout(struct work_struct *work)
 
 static u32 coda_supported_firmwares[] = {
        CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
-       CODA_FIRMWARE_VERNUM(CODA_7541, 13, 4, 29),
+       CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
 };
 
 static bool coda_firmware_supported(u32 vernum)
@@ -1803,8 +2890,14 @@ static int coda_hw_init(struct coda_dev *dev)
                coda_write(dev, 0, CODA_REG_BIT_CODE_BUF_ADDR + i * 4);
 
        /* Tell the BIT where to find everything it needs */
-       coda_write(dev, dev->workbuf.paddr,
-                     CODA_REG_BIT_WORK_BUF_ADDR);
+       if (dev->devtype->product == CODA_7541) {
+               coda_write(dev, dev->tempbuf.paddr,
+                               CODA_REG_BIT_TEMP_BUF_ADDR);
+               coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+       } else {
+               coda_write(dev, dev->workbuf.paddr,
+                             CODA_REG_BIT_WORK_BUF_ADDR);
+       }
        coda_write(dev, dev->codebuf.paddr,
                      CODA_REG_BIT_CODE_BUF_ADDR);
        coda_write(dev, 0, CODA_REG_BIT_CODE_RUN);
@@ -1891,11 +2984,8 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
        }
 
        /* allocate auxiliary per-device code buffer for the BIT processor */
-       dev->codebuf.size = fw->size;
-       dev->codebuf.vaddr = dma_alloc_coherent(&pdev->dev, fw->size,
-                                                   &dev->codebuf.paddr,
-                                                   GFP_KERNEL);
-       if (!dev->codebuf.vaddr) {
+       ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size);
+       if (ret < 0) {
                dev_err(&pdev->dev, "failed to allocate code buffer\n");
                return;
        }
@@ -2048,8 +3138,8 @@ static int coda_probe(struct platform_device *pdev)
                return -ENOENT;
        }
 
-       if (devm_request_irq(&pdev->dev, irq, coda_irq_handler,
-               0, CODA_NAME, dev) < 0) {
+       if (devm_request_threaded_irq(&pdev->dev, irq, NULL, coda_irq_handler,
+               IRQF_ONESHOT, CODA_NAME, dev) < 0) {
                dev_err(&pdev->dev, "failed to request irq\n");
                return -ENOENT;
        }
@@ -2085,24 +3175,36 @@ static int coda_probe(struct platform_device *pdev)
        /* allocate auxiliary per-device buffers for the BIT processor */
        switch (dev->devtype->product) {
        case CODA_DX6:
-               dev->workbuf.size = CODADX6_WORK_BUF_SIZE;
+               ret = coda_alloc_aux_buf(dev, &dev->workbuf,
+                                        CODADX6_WORK_BUF_SIZE);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "failed to allocate work buffer\n");
+                       v4l2_device_unregister(&dev->v4l2_dev);
+                       return ret;
+               }
+               break;
+       case CODA_7541:
+               dev->tempbuf.size = CODA7_TEMP_BUF_SIZE;
                break;
-       default:
-               dev->workbuf.size = CODA7_WORK_BUF_SIZE;
        }
-       dev->workbuf.vaddr = dma_alloc_coherent(&pdev->dev, dev->workbuf.size,
-                                                   &dev->workbuf.paddr,
-                                                   GFP_KERNEL);
-       if (!dev->workbuf.vaddr) {
-               dev_err(&pdev->dev, "failed to allocate work buffer\n");
-               v4l2_device_unregister(&dev->v4l2_dev);
-               return -ENOMEM;
+       if (dev->tempbuf.size) {
+               ret = coda_alloc_aux_buf(dev, &dev->tempbuf,
+                                        dev->tempbuf.size);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "failed to allocate temp buffer\n");
+                       v4l2_device_unregister(&dev->v4l2_dev);
+                       return ret;
+               }
        }
 
-       if (dev->devtype->product == CODA_DX6)
+       switch (dev->devtype->product) {
+       case CODA_DX6:
                dev->iram_size = CODADX6_IRAM_SIZE;
-       else
+               break;
+       case CODA_7541:
                dev->iram_size = CODA7_IRAM_SIZE;
+               break;
+       }
        dev->iram_vaddr = gen_pool_alloc(dev->iram_pool, dev->iram_size);
        if (!dev->iram_vaddr) {
                dev_err(&pdev->dev, "unable to alloc iram\n");
@@ -2128,12 +3230,9 @@ static int coda_remove(struct platform_device *pdev)
        v4l2_device_unregister(&dev->v4l2_dev);
        if (dev->iram_vaddr)
                gen_pool_free(dev->iram_pool, dev->iram_vaddr, dev->iram_size);
-       if (dev->codebuf.vaddr)
-               dma_free_coherent(&pdev->dev, dev->codebuf.size,
-                                 &dev->codebuf.vaddr, dev->codebuf.paddr);
-       if (dev->workbuf.vaddr)
-               dma_free_coherent(&pdev->dev, dev->workbuf.size, &dev->workbuf.vaddr,
-                         dev->workbuf.paddr);
+       coda_free_aux_buf(dev, &dev->codebuf);
+       coda_free_aux_buf(dev, &dev->tempbuf);
+       coda_free_aux_buf(dev, &dev->workbuf);
        return 0;
 }
 
index ace0bf0a3b9cb245a5399a55ae7acd2594eb1a1d..4e32e2edea62005d8ae6c8d20da8bf071d417183 100644 (file)
 #define                CODA_STREAM_ENDIAN_SELECT       (1 << 0)
 #define CODA_REG_BIT_FRAME_MEM_CTRL            0x110
 #define                CODA_IMAGE_ENDIAN_SELECT        (1 << 0)
+#define CODA_REG_BIT_BIT_STREAM_PARAM          0x114
+#define                CODA_BIT_STREAM_END_FLAG        (1 << 2)
+#define                CODA_BIT_DEC_SEQ_INIT_ESCAPE    (1 << 0)
+#define CODA_REG_BIT_TEMP_BUF_ADDR             0x118
 #define CODA_REG_BIT_RD_PTR(x)                 (0x120 + 8 * (x))
 #define CODA_REG_BIT_WR_PTR(x)                 (0x124 + 8 * (x))
+#define CODA_REG_BIT_FRM_DIS_FLG(x)            (0x150 + 4 * (x))
 #define CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR   0x140
 #define CODA7_REG_BIT_AXI_SRAM_USE             0x140
-#define                CODA7_USE_BIT_ENABLE            (1 << 0)
+#define                CODA7_USE_HOST_ME_ENABLE        (1 << 11)
+#define                CODA7_USE_HOST_OVL_ENABLE       (1 << 10)
+#define                CODA7_USE_HOST_DBK_ENABLE       (1 << 9)
+#define                CODA7_USE_HOST_IP_ENABLE        (1 << 8)
 #define                CODA7_USE_HOST_BIT_ENABLE       (1 << 7)
 #define                CODA7_USE_ME_ENABLE             (1 << 4)
-#define                CODA7_USE_HOST_ME_ENABLE        (1 << 11)
+#define                CODA7_USE_OVL_ENABLE            (1 << 3)
+#define                CODA7_USE_DBK_ENABLE            (1 << 2)
+#define                CODA7_USE_IP_ENABLE             (1 << 1)
+#define                CODA7_USE_BIT_ENABLE            (1 << 0)
+
 #define CODA_REG_BIT_BUSY                      0x160
 #define                CODA_REG_BIT_BUSY_FLAG          1
 #define CODA_REG_BIT_RUN_COMMAND               0x164
 #define        CODA_MODE_INVALID               0xffff
 #define CODA_REG_BIT_INT_ENABLE                0x170
 #define                CODA_INT_INTERRUPT_ENABLE       (1 << 3)
+#define CODA_REG_BIT_INT_REASON                        0x174
+#define CODA7_REG_BIT_RUN_AUX_STD              0x178
+#define                CODA_MP4_AUX_MPEG4              0
+#define                CODA_MP4_AUX_DIVX3              1
+#define                CODA_VPX_AUX_THO                0
+#define                CODA_VPX_AUX_VP6                1
+#define                CODA_VPX_AUX_VP8                2
+#define                CODA_H264_AUX_AVC               0
+#define                CODA_H264_AUX_MVC               1
 
 /*
  * Commands' mailbox:
  * issued.
  */
 
+/* Decoder Sequence Initialization */
+#define CODA_CMD_DEC_SEQ_BB_START              0x180
+#define CODA_CMD_DEC_SEQ_BB_SIZE               0x184
+#define CODA_CMD_DEC_SEQ_OPTION                        0x188
+#define                CODA_REORDER_ENABLE                     (1 << 1)
+#define                CODADX6_QP_REPORT                       (1 << 0)
+#define                CODA7_MP4_DEBLK_ENABLE                  (1 << 0)
+#define CODA_CMD_DEC_SEQ_SRC_SIZE              0x18c
+#define CODA_CMD_DEC_SEQ_START_BYTE            0x190
+#define CODA_CMD_DEC_SEQ_PS_BB_START           0x194
+#define CODA_CMD_DEC_SEQ_PS_BB_SIZE            0x198
+#define CODA_CMD_DEC_SEQ_MP4_ASP_CLASS         0x19c
+#define CODA_CMD_DEC_SEQ_X264_MV_EN            0x19c
+#define CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE                0x1a0
+
+#define CODA7_RET_DEC_SEQ_ASPECT               0x1b0
+#define CODA_RET_DEC_SEQ_SUCCESS               0x1c0
+#define CODA_RET_DEC_SEQ_SRC_FMT               0x1c4 /* SRC_SIZE on CODA7 */
+#define CODA_RET_DEC_SEQ_SRC_SIZE              0x1c4
+#define CODA_RET_DEC_SEQ_SRC_F_RATE            0x1c8
+#define CODA9_RET_DEC_SEQ_ASPECT               0x1c8
+#define CODA_RET_DEC_SEQ_FRAME_NEED            0x1cc
+#define CODA_RET_DEC_SEQ_FRAME_DELAY           0x1d0
+#define CODA_RET_DEC_SEQ_INFO                  0x1d4
+#define CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT       0x1d8
+#define CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM       0x1dc
+#define CODA_RET_DEC_SEQ_NEXT_FRAME_NUM                0x1e0
+#define CODA_RET_DEC_SEQ_ERR_REASON            0x1e0
+#define CODA_RET_DEC_SEQ_FRATE_NR              0x1e4
+#define CODA_RET_DEC_SEQ_FRATE_DR              0x1e8
+#define CODA_RET_DEC_SEQ_JPG_PARA              0x1e4
+#define CODA_RET_DEC_SEQ_JPG_THUMB_IND         0x1e8
+
+/* Decoder Picture Run */
+#define CODA_CMD_DEC_PIC_ROT_MODE              0x180
+#define CODA_CMD_DEC_PIC_ROT_ADDR_Y            0x184
+#define CODA_CMD_DEC_PIC_ROT_ADDR_CB           0x188
+#define CODA_CMD_DEC_PIC_ROT_ADDR_CR           0x18c
+#define CODA_CMD_DEC_PIC_ROT_STRIDE            0x190
+
+#define CODA_CMD_DEC_PIC_OPTION                        0x194
+#define                CODA_PRE_SCAN_EN                        (1 << 0)
+#define                CODA_PRE_SCAN_MODE_DECODE               (0 << 1)
+#define                CODA_PRE_SCAN_MODE_RETURN               (1 << 1)
+#define                CODA_IFRAME_SEARCH_EN                   (1 << 2)
+#define                CODA_SKIP_FRAME_MODE                    (0x3 << 3)
+#define CODA_CMD_DEC_PIC_SKIP_NUM              0x198
+#define CODA_CMD_DEC_PIC_CHUNK_SIZE            0x19c
+#define CODA_CMD_DEC_PIC_BB_START              0x1a0
+#define CODA_CMD_DEC_PIC_START_BYTE            0x1a4
+#define CODA_RET_DEC_PIC_SIZE                  0x1bc
+#define CODA_RET_DEC_PIC_FRAME_NUM             0x1c0
+#define CODA_RET_DEC_PIC_FRAME_IDX             0x1c4
+#define CODA_RET_DEC_PIC_ERR_MB                        0x1c8
+#define CODA_RET_DEC_PIC_TYPE                  0x1cc
+#define                CODA_PIC_TYPE_MASK                      0x7
+#define                CODA_PIC_TYPE_MASK_VC1                  0x3f
+#define                CODA9_PIC_TYPE_FIRST_MASK               (0x7 << 3)
+#define                CODA9_PIC_TYPE_IDR_MASK                 (0x3 << 6)
+#define                CODA7_PIC_TYPE_H264_NPF_MASK            (0x3 << 16)
+#define                CODA7_PIC_TYPE_INTERLACED               (1 << 18)
+#define CODA_RET_DEC_PIC_POST                  0x1d0
+#define CODA_RET_DEC_PIC_MVC_REPORT            0x1d0
+#define CODA_RET_DEC_PIC_OPTION                        0x1d4
+#define CODA_RET_DEC_PIC_SUCCESS               0x1d8
+#define CODA_RET_DEC_PIC_CUR_IDX               0x1dc
+#define CODA_RET_DEC_PIC_CROP_LEFT_RIGHT       0x1e0
+#define CODA_RET_DEC_PIC_CROP_TOP_BOTTOM       0x1e4
+#define CODA_RET_DEC_PIC_FRAME_NEED            0x1ec
+
 /* Encoder Sequence Initialization */
 #define CODA_CMD_ENC_SEQ_BB_START                              0x180
 #define CODA_CMD_ENC_SEQ_BB_SIZE                               0x184
 #define CODA_CMD_ENC_SEQ_OPTION                                0x188
+#define                CODA7_OPTION_AVCINTRA16X16ONLY_OFFSET           9
 #define                CODA7_OPTION_GAMMA_OFFSET                       8
+#define                CODA7_OPTION_RCQPMAX_OFFSET                     7
 #define                CODADX6_OPTION_GAMMA_OFFSET                     7
+#define                CODA7_OPTION_RCQPMIN_OFFSET                     6
 #define                CODA_OPTION_LIMITQP_OFFSET                      6
 #define                CODA_OPTION_RCINTRAQP_OFFSET                    5
 #define                CODA_OPTION_FMO_OFFSET                          4
+#define                CODA_OPTION_AVC_AUD_OFFSET                      2
 #define                CODA_OPTION_SLICEREPORT_OFFSET                  1
 #define CODA_CMD_ENC_SEQ_COD_STD                               0x18c
 #define                CODA_STD_MPEG4                                  0
 #define                CODA_FMOPARAM_TYPE_MASK                         1
 #define                CODA_FMOPARAM_SLICENUM_OFFSET                   0
 #define                CODA_FMOPARAM_SLICENUM_MASK                     0x0f
+#define CODADX6_CMD_ENC_SEQ_INTRA_QP                           0x1bc
 #define CODA7_CMD_ENC_SEQ_SEARCH_BASE                          0x1b8
 #define CODA7_CMD_ENC_SEQ_SEARCH_SIZE                          0x1bc
+#define CODA7_CMD_ENC_SEQ_INTRA_QP                             0x1c4
 #define CODA_CMD_ENC_SEQ_RC_QP_MAX                             0x1c8
 #define                CODA_QPMAX_OFFSET                               0
 #define                CODA_QPMAX_MASK                                 0x3f
 #define CODA_CMD_ENC_PIC_OPTION        0x194
 #define CODA_CMD_ENC_PIC_BB_START      0x198
 #define CODA_CMD_ENC_PIC_BB_SIZE       0x19c
+#define CODA_RET_ENC_FRAME_NUM         0x1c0
 #define CODA_RET_ENC_PIC_TYPE          0x1c4
+#define CODA_RET_ENC_PIC_FRAME_IDX     0x1c8
 #define CODA_RET_ENC_PIC_SLICE_NUM     0x1cc
 #define CODA_RET_ENC_PIC_FLAG          0x1d0
+#define CODA_RET_ENC_PIC_SUCCESS       0x1d8
 
 /* Set Frame Buffer */
 #define CODA_CMD_SET_FRAME_BUF_NUM             0x180
 #define CODA_CMD_SET_FRAME_BUF_STRIDE          0x184
+#define CODA_CMD_SET_FRAME_SLICE_BB_START      0x188
+#define CODA_CMD_SET_FRAME_SLICE_BB_SIZE       0x18c
 #define CODA7_CMD_SET_FRAME_AXI_BIT_ADDR       0x190
 #define CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR    0x194
 #define CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR      0x198
 #define CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR      0x19c
 #define CODA7_CMD_SET_FRAME_AXI_OVL_ADDR       0x1a0
+#define CODA7_CMD_SET_FRAME_MAX_DEC_SIZE       0x1a4
 #define CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE  0x1a8
 
 /* Encoder Header */
index e180ff7282d9eb7f5aa556ba66822482b91adca8..04609cc6eba70a3a5ad40c04a01a4308bccb8403 100644 (file)
@@ -1743,11 +1743,10 @@ static int vpbe_display_probe(struct platform_device *pdev)
 
        printk(KERN_DEBUG "vpbe_display_probe\n");
        /* Allocate memory for vpbe_display */
-       disp_dev = kzalloc(sizeof(struct vpbe_display), GFP_KERNEL);
-       if (!disp_dev) {
-               printk(KERN_ERR "ran out of memory\n");
+       disp_dev = devm_kzalloc(&pdev->dev, sizeof(struct vpbe_display),
+                               GFP_KERNEL);
+       if (!disp_dev)
                return -ENOMEM;
-       }
 
        spin_lock_init(&disp_dev->dma_queue_lock);
        /*
@@ -1786,26 +1785,24 @@ static int vpbe_display_probe(struct platform_device *pdev)
        }
 
        irq = res->start;
-       if (request_irq(irq, venc_isr,  IRQF_DISABLED, VPBE_DISPLAY_DRIVER,
-               disp_dev)) {
+       err = devm_request_irq(&pdev->dev, irq, venc_isr, IRQF_DISABLED,
+                              VPBE_DISPLAY_DRIVER, disp_dev);
+       if (err) {
                v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
                                "Unable to request interrupt\n");
-               err = -ENODEV;
                goto probe_out;
        }
 
        for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
                if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
                        err = -ENODEV;
-                       goto probe_out_irq;
+                       goto probe_out;
                }
        }
 
        printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
        return 0;
 
-probe_out_irq:
-       free_irq(res->start, disp_dev);
 probe_out:
        for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
                /* Get the pointer to the layer object */
@@ -1817,7 +1814,6 @@ probe_out:
                                kfree(disp_dev->dev[k]);
                }
        }
-       kfree(disp_dev);
        return err;
 }
 
@@ -1830,15 +1826,10 @@ static int vpbe_display_remove(struct platform_device *pdev)
        struct vpbe_layer *vpbe_display_layer;
        struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
        struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
-       struct resource *res;
        int i;
 
        v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
 
-       /* unregister irq */
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       free_irq(res->start, disp_dev);
-
        /* deinitialize the vpbe display controller */
        if (NULL != vpbe_dev->ops.deinitialize)
                vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
index 6ed82e8b297bf7861847f7bb9ca085b728c36810..d053c2669c1fff0e0e140d0e5e92247ac56b0d25 100644 (file)
@@ -1547,61 +1547,36 @@ static int osd_probe(struct platform_device *pdev)
        const struct platform_device_id *pdev_id;
        struct osd_state *osd;
        struct resource *res;
-       int ret = 0;
 
-       osd = kzalloc(sizeof(struct osd_state), GFP_KERNEL);
+       pdev_id = platform_get_device_id(pdev);
+       if (!pdev_id)
+               return -EINVAL;
+
+       osd = devm_kzalloc(&pdev->dev, sizeof(struct osd_state), GFP_KERNEL);
        if (osd == NULL)
                return -ENOMEM;
 
-       pdev_id = platform_get_device_id(pdev);
-       if (!pdev_id) {
-               ret = -EINVAL;
-               goto free_mem;
-       }
 
        osd->dev = &pdev->dev;
        osd->vpbe_type = pdev_id->driver_data;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(osd->dev, "Unable to get OSD register address map\n");
-               ret = -ENODEV;
-               goto free_mem;
-       }
+       osd->osd_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(osd->osd_base))
+               return PTR_ERR(osd->osd_base);
+
        osd->osd_base_phys = res->start;
        osd->osd_size = resource_size(res);
-       if (!request_mem_region(osd->osd_base_phys, osd->osd_size,
-                               MODULE_NAME)) {
-               dev_err(osd->dev, "Unable to reserve OSD MMIO region\n");
-               ret = -ENODEV;
-               goto free_mem;
-       }
-       osd->osd_base = ioremap_nocache(res->start, osd->osd_size);
-       if (!osd->osd_base) {
-               dev_err(osd->dev, "Unable to map the OSD region\n");
-               ret = -ENODEV;
-               goto release_mem_region;
-       }
        spin_lock_init(&osd->lock);
        osd->ops = osd_ops;
        platform_set_drvdata(pdev, osd);
        dev_notice(osd->dev, "OSD sub device probe success\n");
-       return ret;
 
-release_mem_region:
-       release_mem_region(osd->osd_base_phys, osd->osd_size);
-free_mem:
-       kfree(osd);
-       return ret;
+       return 0;
 }
 
 static int osd_remove(struct platform_device *pdev)
 {
-       struct osd_state *osd = platform_get_drvdata(pdev);
-
-       iounmap((void *)osd->osd_base);
-       release_mem_region(osd->osd_base_phys, osd->osd_size);
-       kfree(osd);
        return 0;
 }
 
index 87eef9be08ede65d5e7ad2683b2cc154f596603a..14a023a75d2d3c026a9e59bac16f0e7b6bf38bfc 100644 (file)
@@ -639,105 +639,46 @@ static int venc_probe(struct platform_device *pdev)
        const struct platform_device_id *pdev_id;
        struct venc_state *venc;
        struct resource *res;
-       int ret;
 
-       venc = kzalloc(sizeof(struct venc_state), GFP_KERNEL);
+       if (!pdev->dev.platform_data) {
+               dev_err(&pdev->dev, "No platform data for VENC sub device");
+               return -EINVAL;
+       }
+
+       pdev_id = platform_get_device_id(pdev);
+       if (!pdev_id)
+               return -EINVAL;
+
+       venc = devm_kzalloc(&pdev->dev, sizeof(struct venc_state), GFP_KERNEL);
        if (venc == NULL)
                return -ENOMEM;
 
-       pdev_id = platform_get_device_id(pdev);
-       if (!pdev_id) {
-               ret = -EINVAL;
-               goto free_mem;
-       }
        venc->venc_type = pdev_id->driver_data;
        venc->pdev = &pdev->dev;
        venc->pdata = pdev->dev.platform_data;
-       if (NULL == venc->pdata) {
-               dev_err(venc->pdev, "Unable to get platform data for"
-                       " VENC sub device");
-               ret = -ENOENT;
-               goto free_mem;
-       }
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(venc->pdev,
-                       "Unable to get VENC register address map\n");
-               ret = -ENODEV;
-               goto free_mem;
-       }
 
-       if (!request_mem_region(res->start, resource_size(res), "venc")) {
-               dev_err(venc->pdev, "Unable to reserve VENC MMIO region\n");
-               ret = -ENODEV;
-               goto free_mem;
-       }
-
-       venc->venc_base = ioremap_nocache(res->start, resource_size(res));
-       if (!venc->venc_base) {
-               dev_err(venc->pdev, "Unable to map VENC IO space\n");
-               ret = -ENODEV;
-               goto release_venc_mem_region;
-       }
+       venc->venc_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(venc->venc_base))
+               return PTR_ERR(venc->venc_base);
 
        if (venc->venc_type != VPBE_VERSION_1) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               if (!res) {
-                       dev_err(venc->pdev,
-                               "Unable to get VDAC_CONFIG address map\n");
-                       ret = -ENODEV;
-                       goto unmap_venc_io;
-               }
-
-               if (!request_mem_region(res->start,
-                                       resource_size(res), "venc")) {
-                       dev_err(venc->pdev,
-                               "Unable to reserve VDAC_CONFIG  MMIO region\n");
-                       ret = -ENODEV;
-                       goto unmap_venc_io;
-               }
-
-               venc->vdaccfg_reg = ioremap_nocache(res->start,
-                                                   resource_size(res));
-               if (!venc->vdaccfg_reg) {
-                       dev_err(venc->pdev,
-                               "Unable to map VDAC_CONFIG IO space\n");
-                       ret = -ENODEV;
-                       goto release_vdaccfg_mem_region;
-               }
+
+               venc->vdaccfg_reg = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(venc->vdaccfg_reg))
+                       return PTR_ERR(venc->vdaccfg_reg);
        }
        spin_lock_init(&venc->lock);
        platform_set_drvdata(pdev, venc);
        dev_notice(venc->pdev, "VENC sub device probe success\n");
-       return 0;
 
-release_vdaccfg_mem_region:
-       release_mem_region(res->start, resource_size(res));
-unmap_venc_io:
-       iounmap(venc->venc_base);
-release_venc_mem_region:
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-free_mem:
-       kfree(venc);
-       return ret;
+       return 0;
 }
 
 static int venc_remove(struct platform_device *pdev)
 {
-       struct venc_state *venc = platform_get_drvdata(pdev);
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       iounmap((void *)venc->venc_base);
-       release_mem_region(res->start, resource_size(res));
-       if (venc->venc_type != VPBE_VERSION_1) {
-               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               iounmap((void *)venc->vdaccfg_reg);
-               release_mem_region(res->start, resource_size(res));
-       }
-       kfree(venc);
-
        return 0;
 }
 
index 5514175bbd072ec92b9e4a1a472b5d00e3f51088..7fbde6d790b5fb1e2173cbebd385ff060e74e553 100644 (file)
@@ -1979,6 +1979,76 @@ vpif_init_free_channel_objects:
        return err;
 }
 
+static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+                           struct v4l2_subdev *subdev,
+                           struct v4l2_async_subdev *asd)
+{
+       int i;
+
+       for (i = 0; i < vpif_obj.config->subdev_count; i++)
+               if (!strcmp(vpif_obj.config->subdev_info[i].name,
+                           subdev->name)) {
+                       vpif_obj.sd[i] = subdev;
+                       return 0;
+               }
+
+       return -EINVAL;
+}
+
+static int vpif_probe_complete(void)
+{
+       struct common_obj *common;
+       struct channel_obj *ch;
+       int i, j, err, k;
+
+       for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
+               ch = vpif_obj.dev[j];
+               ch->channel_id = j;
+               common = &(ch->common[VPIF_VIDEO_INDEX]);
+               spin_lock_init(&common->irqlock);
+               mutex_init(&common->lock);
+               ch->video_dev->lock = &common->lock;
+               /* Initialize prio member of channel object */
+               v4l2_prio_init(&ch->prio);
+               video_set_drvdata(ch->video_dev, ch);
+
+               /* select input 0 */
+               err = vpif_set_input(vpif_obj.config, ch, 0);
+               if (err)
+                       goto probe_out;
+
+               err = video_register_device(ch->video_dev,
+                                           VFL_TYPE_GRABBER, (j ? 1 : 0));
+               if (err)
+                       goto probe_out;
+       }
+
+       v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
+       return 0;
+
+probe_out:
+       for (k = 0; k < j; k++) {
+               /* Get the pointer to the channel object */
+               ch = vpif_obj.dev[k];
+               /* Unregister video device */
+               video_unregister_device(ch->video_dev);
+       }
+       kfree(vpif_obj.sd);
+       for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+               ch = vpif_obj.dev[i];
+               /* Note: does nothing if ch->video_dev == NULL */
+               video_device_release(ch->video_dev);
+       }
+       v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+       return err;
+}
+
+static int vpif_async_complete(struct v4l2_async_notifier *notifier)
+{
+       return vpif_probe_complete();
+}
+
 /**
  * vpif_probe : This function probes the vpif capture driver
  * @pdev: platform device pointer
@@ -1989,12 +2059,10 @@ vpif_init_free_channel_objects:
 static __init int vpif_probe(struct platform_device *pdev)
 {
        struct vpif_subdev_info *subdevdata;
-       struct vpif_capture_config *config;
-       int i, j, k, err;
+       int i, j, err;
        int res_idx = 0;
        struct i2c_adapter *i2c_adap;
        struct channel_obj *ch;
-       struct common_obj *common;
        struct video_device *vfd;
        struct resource *res;
        int subdev_count;
@@ -2068,10 +2136,9 @@ static __init int vpif_probe(struct platform_device *pdev)
                }
        }
 
-       i2c_adap = i2c_get_adapter(1);
-       config = pdev->dev.platform_data;
+       vpif_obj.config = pdev->dev.platform_data;
 
-       subdev_count = config->subdev_count;
+       subdev_count = vpif_obj.config->subdev_count;
        vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
                                GFP_KERNEL);
        if (vpif_obj.sd == NULL) {
@@ -2080,54 +2147,42 @@ static __init int vpif_probe(struct platform_device *pdev)
                goto vpif_sd_error;
        }
 
-       for (i = 0; i < subdev_count; i++) {
-               subdevdata = &config->subdev_info[i];
-               vpif_obj.sd[i] =
-                       v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
-                                                 i2c_adap,
-                                                 &subdevdata->board_info,
-                                                 NULL);
-
-               if (!vpif_obj.sd[i]) {
-                       vpif_err("Error registering v4l2 subdevice\n");
-                       err = -ENODEV;
+       if (!vpif_obj.config->asd_sizes) {
+               i2c_adap = i2c_get_adapter(1);
+               for (i = 0; i < subdev_count; i++) {
+                       subdevdata = &vpif_obj.config->subdev_info[i];
+                       vpif_obj.sd[i] =
+                               v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+                                                         i2c_adap,
+                                                         &subdevdata->
+                                                         board_info,
+                                                         NULL);
+
+                       if (!vpif_obj.sd[i]) {
+                               vpif_err("Error registering v4l2 subdevice\n");
+                               goto probe_subdev_out;
+                       }
+                       v4l2_info(&vpif_obj.v4l2_dev,
+                                 "registered sub device %s\n",
+                                  subdevdata->name);
+               }
+               vpif_probe_complete();
+       } else {
+               vpif_obj.notifier.subdevs = vpif_obj.config->asd;
+               vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
+               vpif_obj.notifier.bound = vpif_async_bound;
+               vpif_obj.notifier.complete = vpif_async_complete;
+               err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+                                                  &vpif_obj.notifier);
+               if (err) {
+                       vpif_err("Error registering async notifier\n");
+                       err = -EINVAL;
                        goto probe_subdev_out;
                }
-               v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n",
-                         subdevdata->name);
        }
 
-       for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
-               ch = vpif_obj.dev[j];
-               ch->channel_id = j;
-               common = &(ch->common[VPIF_VIDEO_INDEX]);
-               spin_lock_init(&common->irqlock);
-               mutex_init(&common->lock);
-               ch->video_dev->lock = &common->lock;
-               /* Initialize prio member of channel object */
-               v4l2_prio_init(&ch->prio);
-               video_set_drvdata(ch->video_dev, ch);
-
-               /* select input 0 */
-               err = vpif_set_input(config, ch, 0);
-               if (err)
-                       goto probe_out;
-
-               err = video_register_device(ch->video_dev,
-                                           VFL_TYPE_GRABBER, (j ? 1 : 0));
-               if (err)
-                       goto probe_out;
-       }
-       v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
        return 0;
 
-probe_out:
-       for (k = 0; k < j; k++) {
-               /* Get the pointer to the channel object */
-               ch = vpif_obj.dev[k];
-               /* Unregister video device */
-               video_unregister_device(ch->video_dev);
-       }
 probe_subdev_out:
        /* free sub devices memory */
        kfree(vpif_obj.sd);
index 0ebb312603690f39ccc102eee04a8375c1ac4e36..5a29d9a0cae12c1a15fd74b7a7ab19f9f89731c5 100644 (file)
@@ -142,6 +142,8 @@ struct vpif_device {
        struct v4l2_device v4l2_dev;
        struct channel_obj *dev[VPIF_CAPTURE_NUM_CHANNELS];
        struct v4l2_subdev **sd;
+       struct v4l2_async_notifier notifier;
+       struct vpif_capture_config *config;
 };
 
 struct vpif_config_params {
index e6e57365025020f746e5958b357525e8b9a7dfce..6336dfc864822024e13800c81062f5c2b0af24ba 100644 (file)
@@ -1618,6 +1618,102 @@ vpif_init_free_channel_objects:
        return err;
 }
 
+static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+                           struct v4l2_subdev *subdev,
+                           struct v4l2_async_subdev *asd)
+{
+       int i;
+
+       for (i = 0; i < vpif_obj.config->subdev_count; i++)
+               if (!strcmp(vpif_obj.config->subdevinfo[i].name,
+                           subdev->name)) {
+                       vpif_obj.sd[i] = subdev;
+                       vpif_obj.sd[i]->grp_id = 1 << i;
+                       return 0;
+               }
+
+       return -EINVAL;
+}
+
+static int vpif_probe_complete(void)
+{
+       struct common_obj *common;
+       struct channel_obj *ch;
+       int j, err, k;
+
+       for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
+               ch = vpif_obj.dev[j];
+               /* Initialize field of the channel objects */
+               atomic_set(&ch->usrs, 0);
+               for (k = 0; k < VPIF_NUMOBJECTS; k++) {
+                       ch->common[k].numbuffers = 0;
+                       common = &ch->common[k];
+                       common->io_usrs = 0;
+                       common->started = 0;
+                       spin_lock_init(&common->irqlock);
+                       mutex_init(&common->lock);
+                       common->numbuffers = 0;
+                       common->set_addr = NULL;
+                       common->ytop_off = 0;
+                       common->ybtm_off = 0;
+                       common->ctop_off = 0;
+                       common->cbtm_off = 0;
+                       common->cur_frm = NULL;
+                       common->next_frm = NULL;
+                       memset(&common->fmt, 0, sizeof(common->fmt));
+                       common->numbuffers = config_params.numbuffers[k];
+               }
+               ch->initialized = 0;
+               if (vpif_obj.config->subdev_count)
+                       ch->sd = vpif_obj.sd[0];
+               ch->channel_id = j;
+               if (j < 2)
+                       ch->common[VPIF_VIDEO_INDEX].numbuffers =
+                           config_params.numbuffers[ch->channel_id];
+               else
+                       ch->common[VPIF_VIDEO_INDEX].numbuffers = 0;
+
+               memset(&ch->vpifparams, 0, sizeof(ch->vpifparams));
+
+               /* Initialize prio member of channel object */
+               v4l2_prio_init(&ch->prio);
+               ch->common[VPIF_VIDEO_INDEX].fmt.type =
+                                               V4L2_BUF_TYPE_VIDEO_OUTPUT;
+               ch->video_dev->lock = &common->lock;
+               video_set_drvdata(ch->video_dev, ch);
+
+               /* select output 0 */
+               err = vpif_set_output(vpif_obj.config, ch, 0);
+               if (err)
+                       goto probe_out;
+
+               /* register video device */
+               vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
+                        (int)ch, (int)&ch->video_dev);
+
+               err = video_register_device(ch->video_dev,
+                                         VFL_TYPE_GRABBER, (j ? 3 : 2));
+               if (err < 0)
+                       goto probe_out;
+       }
+
+       return 0;
+
+probe_out:
+       for (k = 0; k < j; k++) {
+               ch = vpif_obj.dev[k];
+               video_unregister_device(ch->video_dev);
+               video_device_release(ch->video_dev);
+               ch->video_dev = NULL;
+       }
+       return err;
+}
+
+static int vpif_async_complete(struct v4l2_async_notifier *notifier)
+{
+       return vpif_probe_complete();
+}
+
 /*
  * vpif_probe: This function creates device entries by register itself to the
  * V4L2 driver and initializes fields of each channel objects
@@ -1625,11 +1721,9 @@ vpif_init_free_channel_objects:
 static __init int vpif_probe(struct platform_device *pdev)
 {
        struct vpif_subdev_info *subdevdata;
-       struct vpif_display_config *config;
-       int i, j = 0, k, err = 0;
+       int i, j = 0, err = 0;
        int res_idx = 0;
        struct i2c_adapter *i2c_adap;
-       struct common_obj *common;
        struct channel_obj *ch;
        struct video_device *vfd;
        struct resource *res;
@@ -1708,11 +1802,9 @@ static __init int vpif_probe(struct platform_device *pdev)
                                                                        size/2;
                }
        }
-
-       i2c_adap = i2c_get_adapter(1);
-       config = pdev->dev.platform_data;
-       subdev_count = config->subdev_count;
-       subdevdata = config->subdevinfo;
+       vpif_obj.config = pdev->dev.platform_data;
+       subdev_count = vpif_obj.config->subdev_count;
+       subdevdata = vpif_obj.config->subdevinfo;
        vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
                                                                GFP_KERNEL);
        if (vpif_obj.sd == NULL) {
@@ -1721,86 +1813,40 @@ static __init int vpif_probe(struct platform_device *pdev)
                goto vpif_sd_error;
        }
 
-       for (i = 0; i < subdev_count; i++) {
-               vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
-                                               i2c_adap,
-                                               &subdevdata[i].board_info,
-                                               NULL);
-               if (!vpif_obj.sd[i]) {
-                       vpif_err("Error registering v4l2 subdevice\n");
-                       err = -ENODEV;
-                       goto probe_subdev_out;
-               }
-
-               if (vpif_obj.sd[i])
-                       vpif_obj.sd[i]->grp_id = 1 << i;
-       }
-
-       for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
-               ch = vpif_obj.dev[j];
-               /* Initialize field of the channel objects */
-               atomic_set(&ch->usrs, 0);
-               for (k = 0; k < VPIF_NUMOBJECTS; k++) {
-                       ch->common[k].numbuffers = 0;
-                       common = &ch->common[k];
-                       common->io_usrs = 0;
-                       common->started = 0;
-                       spin_lock_init(&common->irqlock);
-                       mutex_init(&common->lock);
-                       common->numbuffers = 0;
-                       common->set_addr = NULL;
-                       common->ytop_off = common->ybtm_off = 0;
-                       common->ctop_off = common->cbtm_off = 0;
-                       common->cur_frm = common->next_frm = NULL;
-                       memset(&common->fmt, 0, sizeof(common->fmt));
-                       common->numbuffers = config_params.numbuffers[k];
+       if (!vpif_obj.config->asd_sizes) {
+               i2c_adap = i2c_get_adapter(1);
+               for (i = 0; i < subdev_count; i++) {
+                       vpif_obj.sd[i] =
+                               v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+                                                         i2c_adap,
+                                                         &subdevdata[i].
+                                                         board_info,
+                                                         NULL);
+                       if (!vpif_obj.sd[i]) {
+                               vpif_err("Error registering v4l2 subdevice\n");
+                               goto probe_subdev_out;
+                       }
 
+                       if (vpif_obj.sd[i])
+                               vpif_obj.sd[i]->grp_id = 1 << i;
+               }
+               vpif_probe_complete();
+       } else {
+               vpif_obj.notifier.subdevs = vpif_obj.config->asd;
+               vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
+               vpif_obj.notifier.bound = vpif_async_bound;
+               vpif_obj.notifier.complete = vpif_async_complete;
+               err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+                                                  &vpif_obj.notifier);
+               if (err) {
+                       vpif_err("Error registering async notifier\n");
+                       err = -EINVAL;
+                       goto probe_subdev_out;
                }
-               ch->initialized = 0;
-               if (subdev_count)
-                       ch->sd = vpif_obj.sd[0];
-               ch->channel_id = j;
-               if (j < 2)
-                       ch->common[VPIF_VIDEO_INDEX].numbuffers =
-                           config_params.numbuffers[ch->channel_id];
-               else
-                       ch->common[VPIF_VIDEO_INDEX].numbuffers = 0;
-
-               memset(&ch->vpifparams, 0, sizeof(ch->vpifparams));
-
-               /* Initialize prio member of channel object */
-               v4l2_prio_init(&ch->prio);
-               ch->common[VPIF_VIDEO_INDEX].fmt.type =
-                                               V4L2_BUF_TYPE_VIDEO_OUTPUT;
-               ch->video_dev->lock = &common->lock;
-               video_set_drvdata(ch->video_dev, ch);
-
-               /* select output 0 */
-               err = vpif_set_output(config, ch, 0);
-               if (err)
-                       goto probe_out;
-
-               /* register video device */
-               vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
-                               (int)ch, (int)&ch->video_dev);
-
-               err = video_register_device(ch->video_dev,
-                                         VFL_TYPE_GRABBER, (j ? 3 : 2));
-               if (err < 0)
-                       goto probe_out;
        }
 
-       v4l2_info(&vpif_obj.v4l2_dev,
-                       " VPIF display driver initialized\n");
        return 0;
 
-probe_out:
-       for (k = 0; k < j; k++) {
-               ch = vpif_obj.dev[k];
-               video_unregister_device(ch->video_dev);
-               video_device_release(ch->video_dev);
-               ch->video_dev = NULL;
-       }
 probe_subdev_out:
        kfree(vpif_obj.sd);
 vpif_sd_error:
index 5d87fc86e580ad367e8264a092b907dd91d03fd9..4d0485b99a80678c9db6eb534bb9d0a30922c94b 100644 (file)
@@ -148,7 +148,8 @@ struct vpif_device {
        struct v4l2_device v4l2_dev;
        struct channel_obj *dev[VPIF_DISPLAY_NUM_CHANNELS];
        struct v4l2_subdev **sd;
-
+       struct v4l2_async_notifier notifier;
+       struct vpif_display_config *config;
 };
 
 struct vpif_config_params {
index 8a2f01e344eee43927a3fed123815ff33ba50b54..31120b4a4a33feb4323225b31c846e0bb3f06977 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
+#include <linux/err.h>
 
 #include <media/davinci/vpss.h>
 
@@ -404,9 +405,8 @@ EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
 
 static int vpss_probe(struct platform_device *pdev)
 {
-       struct resource         *r1, *r2;
+       struct resource *res;
        char *platform_name;
-       int status;
 
        if (!pdev->dev.platform_data) {
                dev_err(&pdev->dev, "no platform data\n");
@@ -427,38 +427,19 @@ static int vpss_probe(struct platform_device *pdev)
        }
 
        dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
-       r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r1)
-               return -ENOENT;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-       r1 = request_mem_region(r1->start, resource_size(r1), r1->name);
-       if (!r1)
-               return -EBUSY;
-
-       oper_cfg.vpss_regs_base0 = ioremap(r1->start, resource_size(r1));
-       if (!oper_cfg.vpss_regs_base0) {
-               status = -EBUSY;
-               goto fail1;
-       }
+       oper_cfg.vpss_regs_base0 = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(oper_cfg.vpss_regs_base0))
+               return PTR_ERR(oper_cfg.vpss_regs_base0);
 
        if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
-               r2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               if (!r2) {
-                       status = -ENOENT;
-                       goto fail2;
-               }
-               r2 = request_mem_region(r2->start, resource_size(r2), r2->name);
-               if (!r2) {
-                       status = -EBUSY;
-                       goto fail2;
-               }
-
-               oper_cfg.vpss_regs_base1 = ioremap(r2->start,
-                                                  resource_size(r2));
-               if (!oper_cfg.vpss_regs_base1) {
-                       status = -EBUSY;
-                       goto fail3;
-               }
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+               oper_cfg.vpss_regs_base1 = devm_ioremap_resource(&pdev->dev,
+                                                                res);
+               if (IS_ERR(oper_cfg.vpss_regs_base1))
+                       return PTR_ERR(oper_cfg.vpss_regs_base1);
        }
 
        if (oper_cfg.platform == DM355) {
@@ -493,30 +474,13 @@ static int vpss_probe(struct platform_device *pdev)
 
        spin_lock_init(&oper_cfg.vpss_lock);
        dev_info(&pdev->dev, "%s vpss probe success\n", platform_name);
-       return 0;
 
-fail3:
-       release_mem_region(r2->start, resource_size(r2));
-fail2:
-       iounmap(oper_cfg.vpss_regs_base0);
-fail1:
-       release_mem_region(r1->start, resource_size(r1));
-       return status;
+       return 0;
 }
 
 static int vpss_remove(struct platform_device *pdev)
 {
-       struct resource         *res;
-
        pm_runtime_disable(&pdev->dev);
-       iounmap(oper_cfg.vpss_regs_base0);
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-       if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
-               iounmap(oper_cfg.vpss_regs_base1);
-               res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-               release_mem_region(res->start, resource_size(res));
-       }
        return 0;
 }
 
index 1f079ff33d4b94f7e0bcd92edc2f17504952fe6b..5628453612469a624300a3e934cf1620928dc074 100644 (file)
@@ -399,7 +399,7 @@ static void cafe_ctlr_init(struct mcam_camera *mcam)
 }
 
 
-static void cafe_ctlr_power_up(struct mcam_camera *mcam)
+static int cafe_ctlr_power_up(struct mcam_camera *mcam)
 {
        /*
         * Part one of the sensor dance: turn the global
@@ -414,6 +414,8 @@ static void cafe_ctlr_power_up(struct mcam_camera *mcam)
         */
        mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
        mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
+
+       return 0;
 }
 
 static void cafe_ctlr_power_down(struct mcam_camera *mcam)
index 0821ed08c122855370d3dd338e8da2f84cbddfad..5184887b155c7098415b1a3470619be336023f87 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/delay.h>
 #include <linux/vmalloc.h>
 #include <linux/io.h>
+#include <linux/clk.h>
 #include <linux/videodev2.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
@@ -93,6 +94,9 @@ MODULE_PARM_DESC(buffer_mode,
 #define CF_CONFIG_NEEDED 4     /* Must configure hardware */
 #define CF_SINGLE_BUFFER 5     /* Running with a single buffer */
 #define CF_SG_RESTART   6      /* SG restart needed */
+#define CF_FRAME_SOF0   7      /* Frame 0 started */
+#define CF_FRAME_SOF1   8
+#define CF_FRAME_SOF2   9
 
 #define sensor_call(cam, o, f, args...) \
        v4l2_subdev_call(cam->sensor, o, f, ##args)
@@ -101,6 +105,7 @@ static struct mcam_format_struct {
        __u8 *desc;
        __u32 pixelformat;
        int bpp;   /* Bytes per pixel */
+       bool planar;
        enum v4l2_mbus_pixelcode mbus_code;
 } mcam_formats[] = {
        {
@@ -108,24 +113,56 @@ static struct mcam_format_struct {
                .pixelformat    = V4L2_PIX_FMT_YUYV,
                .mbus_code      = V4L2_MBUS_FMT_YUYV8_2X8,
                .bpp            = 2,
+               .planar         = false,
+       },
+       {
+               .desc           = "UYVY 4:2:2",
+               .pixelformat    = V4L2_PIX_FMT_UYVY,
+               .mbus_code      = V4L2_MBUS_FMT_YUYV8_2X8,
+               .bpp            = 2,
+               .planar         = false,
+       },
+       {
+               .desc           = "YUV 4:2:2 PLANAR",
+               .pixelformat    = V4L2_PIX_FMT_YUV422P,
+               .mbus_code      = V4L2_MBUS_FMT_YUYV8_2X8,
+               .bpp            = 2,
+               .planar         = true,
+       },
+       {
+               .desc           = "YUV 4:2:0 PLANAR",
+               .pixelformat    = V4L2_PIX_FMT_YUV420,
+               .mbus_code      = V4L2_MBUS_FMT_YUYV8_2X8,
+               .bpp            = 2,
+               .planar         = true,
+       },
+       {
+               .desc           = "YVU 4:2:0 PLANAR",
+               .pixelformat    = V4L2_PIX_FMT_YVU420,
+               .mbus_code      = V4L2_MBUS_FMT_YUYV8_2X8,
+               .bpp            = 2,
+               .planar         = true,
        },
        {
                .desc           = "RGB 444",
                .pixelformat    = V4L2_PIX_FMT_RGB444,
                .mbus_code      = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
                .bpp            = 2,
+               .planar         = false,
        },
        {
                .desc           = "RGB 565",
                .pixelformat    = V4L2_PIX_FMT_RGB565,
                .mbus_code      = V4L2_MBUS_FMT_RGB565_2X8_LE,
                .bpp            = 2,
+               .planar         = false,
        },
        {
                .desc           = "Raw RGB Bayer",
                .pixelformat    = V4L2_PIX_FMT_SBGGR8,
                .mbus_code      = V4L2_MBUS_FMT_SBGGR8_1X8,
-               .bpp            = 1
+               .bpp            = 1,
+               .planar         = false,
        },
 };
 #define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
@@ -168,6 +205,12 @@ struct mcam_dma_desc {
        u32 segment_len;
 };
 
+struct yuv_pointer_t {
+       dma_addr_t y;
+       dma_addr_t u;
+       dma_addr_t v;
+};
+
 /*
  * Our buffer type for working with videobuf2.  Note that the vb2
  * developers have decreed that struct vb2_buffer must be at the
@@ -179,6 +222,7 @@ struct mcam_vb_buffer {
        struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
        dma_addr_t dma_desc_pa;         /* Descriptor physical address */
        int dma_desc_nent;              /* Number of mapped descriptors */
+       struct yuv_pointer_t yuv_p;
 };
 
 static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
@@ -219,8 +263,10 @@ static void mcam_reset_buffers(struct mcam_camera *cam)
        int i;
 
        cam->next_buf = -1;
-       for (i = 0; i < cam->nbufs; i++)
+       for (i = 0; i < cam->nbufs; i++) {
                clear_bit(i, &cam->flags);
+               clear_bit(CF_FRAME_SOF0 + i, &cam->flags);
+       }
 }
 
 static inline int mcam_needs_config(struct mcam_camera *cam)
@@ -253,6 +299,45 @@ static void mcam_ctlr_stop(struct mcam_camera *cam)
        mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
 }
 
+static void mcam_enable_mipi(struct mcam_camera *mcam)
+{
+       /* Using MIPI mode and enable MIPI */
+       cam_dbg(mcam, "camera: DPHY3=0x%x, DPHY5=0x%x, DPHY6=0x%x\n",
+                       mcam->dphy[0], mcam->dphy[1], mcam->dphy[2]);
+       mcam_reg_write(mcam, REG_CSI2_DPHY3, mcam->dphy[0]);
+       mcam_reg_write(mcam, REG_CSI2_DPHY5, mcam->dphy[1]);
+       mcam_reg_write(mcam, REG_CSI2_DPHY6, mcam->dphy[2]);
+
+       if (!mcam->mipi_enabled) {
+               if (mcam->lane > 4 || mcam->lane <= 0) {
+                       cam_warn(mcam, "lane number error\n");
+                       mcam->lane = 1; /* set the default value */
+               }
+               /*
+                * 0x41 actives 1 lane
+                * 0x43 actives 2 lanes
+                * 0x45 actives 3 lanes (never happen)
+                * 0x47 actives 4 lanes
+                */
+               mcam_reg_write(mcam, REG_CSI2_CTRL0,
+                       CSI2_C0_MIPI_EN | CSI2_C0_ACT_LANE(mcam->lane));
+               mcam_reg_write(mcam, REG_CLKCTRL,
+                       (mcam->mclk_src << 29) | mcam->mclk_div);
+
+               mcam->mipi_enabled = true;
+       }
+}
+
+static void mcam_disable_mipi(struct mcam_camera *mcam)
+{
+       /* Using Parallel mode or disable MIPI */
+       mcam_reg_write(mcam, REG_CSI2_CTRL0, 0x0);
+       mcam_reg_write(mcam, REG_CSI2_DPHY3, 0x0);
+       mcam_reg_write(mcam, REG_CSI2_DPHY5, 0x0);
+       mcam_reg_write(mcam, REG_CSI2_DPHY6, 0x0);
+       mcam->mipi_enabled = false;
+}
+
 /* ------------------------------------------------------------------- */
 
 #ifdef MCAM_MODE_VMALLOC
@@ -425,6 +510,15 @@ static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
 /*
  * DMA-contiguous code.
  */
+
+static bool mcam_fmt_is_planar(__u32 pfmt)
+{
+       struct mcam_format_struct *f;
+
+       f = mcam_find_format(pfmt);
+       return f->planar;
+}
+
 /*
  * Set up a contiguous buffer for the given frame.  Here also is where
  * the underrun strategy is set: if there is no buffer available, reuse
@@ -436,27 +530,58 @@ static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
 static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
 {
        struct mcam_vb_buffer *buf;
+       struct v4l2_pix_format *fmt = &cam->pix_format;
+       dma_addr_t dma_handle;
+       u32 pixel_count = fmt->width * fmt->height;
+       struct vb2_buffer *vb;
+
        /*
         * If there are no available buffers, go into single mode
         */
        if (list_empty(&cam->buffers)) {
                buf = cam->vb_bufs[frame ^ 0x1];
-               cam->vb_bufs[frame] = buf;
-               mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
-                               vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
                set_bit(CF_SINGLE_BUFFER, &cam->flags);
                cam->frame_state.singles++;
-               return;
+       } else {
+               /*
+                * OK, we have a buffer we can use.
+                */
+               buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+                                       queue);
+               list_del_init(&buf->queue);
+               clear_bit(CF_SINGLE_BUFFER, &cam->flags);
        }
-       /*
-        * OK, we have a buffer we can use.
-        */
-       buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
-       list_del_init(&buf->queue);
-       mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
-                       vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
+
        cam->vb_bufs[frame] = buf;
-       clear_bit(CF_SINGLE_BUFFER, &cam->flags);
+       vb = &buf->vb_buf;
+
+       dma_handle = vb2_dma_contig_plane_dma_addr(vb, 0);
+       buf->yuv_p.y = dma_handle;
+
+       switch (cam->pix_format.pixelformat) {
+       case V4L2_PIX_FMT_YUV422P:
+               buf->yuv_p.u = buf->yuv_p.y + pixel_count;
+               buf->yuv_p.v = buf->yuv_p.u + pixel_count / 2;
+               break;
+       case V4L2_PIX_FMT_YUV420:
+               buf->yuv_p.u = buf->yuv_p.y + pixel_count;
+               buf->yuv_p.v = buf->yuv_p.u + pixel_count / 4;
+               break;
+       case V4L2_PIX_FMT_YVU420:
+               buf->yuv_p.v = buf->yuv_p.y + pixel_count;
+               buf->yuv_p.u = buf->yuv_p.v + pixel_count / 4;
+               break;
+       default:
+               break;
+       }
+
+       mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR, buf->yuv_p.y);
+       if (mcam_fmt_is_planar(fmt->pixelformat)) {
+               mcam_reg_write(cam, frame == 0 ?
+                                       REG_U0BAR : REG_U1BAR, buf->yuv_p.u);
+               mcam_reg_write(cam, frame == 0 ?
+                                       REG_V0BAR : REG_V1BAR, buf->yuv_p.v);
+       }
 }
 
 /*
@@ -614,48 +739,90 @@ static inline void mcam_sg_restart(struct mcam_camera *cam)
  */
 static void mcam_ctlr_image(struct mcam_camera *cam)
 {
-       int imgsz;
        struct v4l2_pix_format *fmt = &cam->pix_format;
+       u32 widthy = 0, widthuv = 0, imgsz_h, imgsz_w;
+
+       cam_dbg(cam, "camera: bytesperline = %d; height = %d\n",
+               fmt->bytesperline, fmt->sizeimage / fmt->bytesperline);
+       imgsz_h = (fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK;
+       imgsz_w = (fmt->width * 2) & IMGSZ_H_MASK;
+
+       switch (fmt->pixelformat) {
+       case V4L2_PIX_FMT_YUYV:
+       case V4L2_PIX_FMT_UYVY:
+               widthy = fmt->width * 2;
+               widthuv = 0;
+               break;
+       case V4L2_PIX_FMT_JPEG:
+               imgsz_h = (fmt->sizeimage / fmt->bytesperline) << IMGSZ_V_SHIFT;
+               widthy = fmt->bytesperline;
+               widthuv = 0;
+               break;
+       case V4L2_PIX_FMT_YUV422P:
+       case V4L2_PIX_FMT_YUV420:
+       case V4L2_PIX_FMT_YVU420:
+               widthy = fmt->width;
+               widthuv = fmt->width / 2;
+               break;
+       default:
+               widthy = fmt->bytesperline;
+               widthuv = 0;
+       }
+
+       mcam_reg_write_mask(cam, REG_IMGPITCH, widthuv << 16 | widthy,
+                       IMGP_YP_MASK | IMGP_UVP_MASK);
+       mcam_reg_write(cam, REG_IMGSIZE, imgsz_h | imgsz_w);
+       mcam_reg_write(cam, REG_IMGOFFSET, 0x0);
 
-       imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
-               (fmt->bytesperline & IMGSZ_H_MASK);
-       mcam_reg_write(cam, REG_IMGSIZE, imgsz);
-       mcam_reg_write(cam, REG_IMGOFFSET, 0);
-       /* YPITCH just drops the last two bits */
-       mcam_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
-                       IMGP_YP_MASK);
        /*
         * Tell the controller about the image format we are using.
         */
-       switch (cam->pix_format.pixelformat) {
+       switch (fmt->pixelformat) {
+       case V4L2_PIX_FMT_YUV422P:
+               mcam_reg_write_mask(cam, REG_CTRL0,
+                       C0_DF_YUV | C0_YUV_PLANAR | C0_YUVE_YVYU, C0_DF_MASK);
+               break;
+       case V4L2_PIX_FMT_YUV420:
+       case V4L2_PIX_FMT_YVU420:
+               mcam_reg_write_mask(cam, REG_CTRL0,
+                       C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
+               break;
        case V4L2_PIX_FMT_YUYV:
-           mcam_reg_write_mask(cam, REG_CTRL0,
-                           C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
-                           C0_DF_MASK);
-           break;
-
+               mcam_reg_write_mask(cam, REG_CTRL0,
+                       C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
+               break;
+       case V4L2_PIX_FMT_UYVY:
+               mcam_reg_write_mask(cam, REG_CTRL0,
+                       C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+               break;
+       case V4L2_PIX_FMT_JPEG:
+               mcam_reg_write_mask(cam, REG_CTRL0,
+                       C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+               break;
        case V4L2_PIX_FMT_RGB444:
-           mcam_reg_write_mask(cam, REG_CTRL0,
-                           C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
-                           C0_DF_MASK);
+               mcam_reg_write_mask(cam, REG_CTRL0,
+                       C0_DF_RGB | C0_RGBF_444 | C0_RGB4_XRGB, C0_DF_MASK);
                /* Alpha value? */
-           break;
-
+               break;
        case V4L2_PIX_FMT_RGB565:
-           mcam_reg_write_mask(cam, REG_CTRL0,
-                           C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
-                           C0_DF_MASK);
-           break;
-
+               mcam_reg_write_mask(cam, REG_CTRL0,
+                       C0_DF_RGB | C0_RGBF_565 | C0_RGB5_BGGR, C0_DF_MASK);
+               break;
        default:
-           cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
-           break;
+               cam_err(cam, "camera: unknown format: %#x\n", fmt->pixelformat);
+               break;
        }
+
        /*
         * Make sure it knows we want to use hsync/vsync.
         */
-       mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
-                       C0_SIFM_MASK);
+       mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK);
+       /*
+        * This field controls the generation of EOF(DVP only)
+        */
+       if (cam->bus_type != V4L2_MBUS_CSI2)
+               mcam_reg_set_bit(cam, REG_CTRL0,
+                               C0_EOF_VSYNC | C0_VEDGE_CTRL);
 }
 
 
@@ -753,15 +920,21 @@ static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
 /*
  * Power up and down.
  */
-static void mcam_ctlr_power_up(struct mcam_camera *cam)
+static int mcam_ctlr_power_up(struct mcam_camera *cam)
 {
        unsigned long flags;
+       int ret;
 
        spin_lock_irqsave(&cam->dev_lock, flags);
-       cam->plat_power_up(cam);
+       ret = cam->plat_power_up(cam);
+       if (ret) {
+               spin_unlock_irqrestore(&cam->dev_lock, flags);
+               return ret;
+       }
        mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
        spin_unlock_irqrestore(&cam->dev_lock, flags);
        msleep(5); /* Just to be sure */
+       return 0;
 }
 
 static void mcam_ctlr_power_down(struct mcam_camera *cam)
@@ -869,6 +1042,17 @@ static int mcam_read_setup(struct mcam_camera *cam)
        spin_lock_irqsave(&cam->dev_lock, flags);
        clear_bit(CF_DMA_ACTIVE, &cam->flags);
        mcam_reset_buffers(cam);
+       /*
+        * Update CSI2_DPHY value
+        */
+       if (cam->calc_dphy)
+               cam->calc_dphy(cam);
+       cam_dbg(cam, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
+                       cam->dphy[0], cam->dphy[1], cam->dphy[2]);
+       if (cam->bus_type == V4L2_MBUS_CSI2)
+               mcam_enable_mipi(cam);
+       else
+               mcam_disable_mipi(cam);
        mcam_ctlr_irq_enable(cam);
        cam->state = S_STREAMING;
        if (!test_bit(CF_SG_RESTART, &cam->flags))
@@ -943,6 +1127,7 @@ static void mcam_vb_wait_finish(struct vb2_queue *vq)
 static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
 {
        struct mcam_camera *cam = vb2_get_drv_priv(vq);
+       unsigned int frame;
 
        if (cam->state != S_IDLE) {
                INIT_LIST_HEAD(&cam->buffers);
@@ -960,6 +1145,14 @@ static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
                cam->state = S_BUFWAIT;
                return 0;
        }
+
+       /*
+        * Ensure clear the left over frame flags
+        * before every really start streaming
+        */
+       for (frame = 0; frame < cam->nbufs; frame++)
+               clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+
        return mcam_read_setup(cam);
 }
 
@@ -976,6 +1169,12 @@ static int mcam_vb_stop_streaming(struct vb2_queue *vq)
        if (cam->state != S_STREAMING)
                return -EINVAL;
        mcam_ctlr_stop_dma(cam);
+       /*
+        * Reset the CCIC PHY after stopping streaming,
+        * otherwise, the CCIC may be unstable.
+        */
+       if (cam->ctlr_reset)
+               cam->ctlr_reset(cam);
        /*
         * VB2 reclaims the buffers, so we need to forget
         * about them.
@@ -1087,6 +1286,7 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
 #ifdef MCAM_MODE_DMA_CONTIG
                vq->ops = &mcam_vb2_ops;
                vq->mem_ops = &vb2_dma_contig_memops;
+               vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
                cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
                vq->io_modes = VB2_MMAP | VB2_USERPTR;
                cam->dma_setup = mcam_ctlr_dma_contig;
@@ -1097,6 +1297,7 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
 #ifdef MCAM_MODE_DMA_SG
                vq->ops = &mcam_vb2_sg_ops;
                vq->mem_ops = &vb2_dma_sg_memops;
+               vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
                vq->io_modes = VB2_MMAP | VB2_USERPTR;
                cam->dma_setup = mcam_ctlr_dma_sg;
                cam->frame_complete = mcam_dma_sg_done;
@@ -1247,7 +1448,15 @@ static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
        ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
        mutex_unlock(&cam->s_mutex);
        v4l2_fill_pix_format(pix, &mbus_fmt);
-       pix->bytesperline = pix->width * f->bpp;
+       switch (f->pixelformat) {
+       case V4L2_PIX_FMT_YUV420:
+       case V4L2_PIX_FMT_YVU420:
+               pix->bytesperline = pix->width * 3 / 2;
+               break;
+       default:
+               pix->bytesperline = pix->width * f->bpp;
+               break;
+       }
        pix->sizeimage = pix->height * pix->bytesperline;
        return ret;
 }
@@ -1475,7 +1684,9 @@ static int mcam_v4l_open(struct file *filp)
                ret = mcam_setup_vb2(cam);
                if (ret)
                        goto out;
-               mcam_ctlr_power_up(cam);
+               ret = mcam_ctlr_power_up(cam);
+               if (ret)
+                       goto out;
                __mcam_cam_reset(cam);
                mcam_set_config_needed(cam, 1);
        }
@@ -1498,10 +1709,12 @@ static int mcam_v4l_release(struct file *filp)
        if (cam->users == 0) {
                mcam_ctlr_stop_dma(cam);
                mcam_cleanup_vb2(cam);
+               mcam_disable_mipi(cam);
                mcam_ctlr_power_down(cam);
                if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
                        mcam_free_dma_bufs(cam);
        }
+
        mutex_unlock(&cam->s_mutex);
        return 0;
 }
@@ -1617,9 +1830,11 @@ int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
         * each time.
         */
        for (frame = 0; frame < cam->nbufs; frame++)
-               if (irqs & (IRQ_EOF0 << frame)) {
+               if (irqs & (IRQ_EOF0 << frame) &&
+                       test_bit(CF_FRAME_SOF0 + frame, &cam->flags)) {
                        mcam_frame_complete(cam, frame);
                        handled = 1;
+                       clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
                        if (cam->buffer_mode == B_DMA_sg)
                                break;
                }
@@ -1628,9 +1843,15 @@ int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
         * code assumes that we won't get multiple frame interrupts
         * at once; may want to rethink that.
         */
-       if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2)) {
+       for (frame = 0; frame < cam->nbufs; frame++) {
+               if (irqs & (IRQ_SOF0 << frame)) {
+                       set_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+                       handled = IRQ_HANDLED;
+               }
+       }
+
+       if (handled == IRQ_HANDLED) {
                set_bit(CF_DMA_ACTIVE, &cam->flags);
-               handled = 1;
                if (cam->buffer_mode == B_DMA_sg)
                        mcam_ctlr_stop(cam);
        }
@@ -1787,7 +2008,11 @@ int mccic_resume(struct mcam_camera *cam)
 
        mutex_lock(&cam->s_mutex);
        if (cam->users > 0) {
-               mcam_ctlr_power_up(cam);
+               ret = mcam_ctlr_power_up(cam);
+               if (ret) {
+                       mutex_unlock(&cam->s_mutex);
+                       return ret;
+               }
                __mcam_cam_reset(cam);
        } else {
                mcam_ctlr_power_down(cam);
index 520c8ded9443f5869e101f8ca92d3c3346e9894f..e0e628cb98f94c7a5d4b90588d2a85449ad44547 100644 (file)
@@ -88,6 +88,8 @@ struct mcam_frame_state {
        unsigned int delivered;
 };
 
+#define NR_MCAM_CLK 3
+
 /*
  * A description of one of our devices.
  * Locking: controlled by s_mutex.  Certain fields, however, require
@@ -108,11 +110,33 @@ struct mcam_camera {
        short int clock_speed;  /* Sensor clock speed, default 30 */
        short int use_smbus;    /* SMBUS or straight I2c? */
        enum mcam_buffer_mode buffer_mode;
+
+       int mclk_min;   /* The minimal value of mclk */
+       int mclk_src;   /* which clock source the mclk derives from */
+       int mclk_div;   /* Clock Divider Value for MCLK */
+
+       int ccic_id;
+       enum v4l2_mbus_type bus_type;
+       /* MIPI support */
+       /* The dphy config value, allocated in board file
+        * dphy[0]: DPHY3
+        * dphy[1]: DPHY5
+        * dphy[2]: DPHY6
+        */
+       int *dphy;
+       bool mipi_enabled;      /* flag whether mipi is enabled already */
+       int lane;                       /* lane number */
+
+       /* clock tree support */
+       struct clk *clk[NR_MCAM_CLK];
+
        /*
         * Callbacks from the core to the platform code.
         */
-       void (*plat_power_up) (struct mcam_camera *cam);
+       int (*plat_power_up) (struct mcam_camera *cam);
        void (*plat_power_down) (struct mcam_camera *cam);
+       void (*calc_dphy) (struct mcam_camera *cam);
+       void (*ctlr_reset) (struct mcam_camera *cam);
 
        /*
         * Everything below here is private to the mcam core and
@@ -225,6 +249,23 @@ int mccic_resume(struct mcam_camera *cam);
 #define REG_Y0BAR      0x00
 #define REG_Y1BAR      0x04
 #define REG_Y2BAR      0x08
+#define REG_U0BAR      0x0c
+#define REG_U1BAR      0x10
+#define REG_U2BAR      0x14
+#define REG_V0BAR      0x18
+#define REG_V1BAR      0x1C
+#define REG_V2BAR      0x20
+
+/*
+ * register definitions for MIPI support
+ */
+#define REG_CSI2_CTRL0 0x100
+#define   CSI2_C0_MIPI_EN (0x1 << 0)
+#define   CSI2_C0_ACT_LANE(n) ((n-1) << 1)
+#define REG_CSI2_DPHY3 0x12c
+#define REG_CSI2_DPHY5 0x134
+#define REG_CSI2_DPHY6 0x138
+
 /* ... */
 
 #define REG_IMGPITCH   0x24    /* Image pitch register */
@@ -293,13 +334,16 @@ int mccic_resume(struct mcam_camera *cam);
 #define          C0_YUVE_XUVY    0x00020000    /* 420: .UVY            */
 #define          C0_YUVE_XVUY    0x00030000    /* 420: .VUY            */
 /* Bayer bits 18,19 if needed */
+#define          C0_EOF_VSYNC    0x00400000    /* Generate EOF by VSYNC */
+#define          C0_VEDGE_CTRL   0x00800000    /* Detect falling edge of VSYNC */
 #define          C0_HPOL_LOW     0x01000000    /* HSYNC polarity active low */
 #define          C0_VPOL_LOW     0x02000000    /* VSYNC polarity active low */
 #define          C0_VCLK_LOW     0x04000000    /* VCLK on falling edge */
 #define          C0_DOWNSCALE    0x08000000    /* Enable downscaler */
-#define          C0_SIFM_MASK    0xc0000000    /* SIF mode bits */
+/* SIFMODE */
 #define          C0_SIF_HVSYNC   0x00000000    /* Use H/VSYNC */
-#define          CO_SOF_NOSYNC   0x40000000    /* Use inband active signaling */
+#define          C0_SOF_NOSYNC   0x40000000    /* Use inband active signaling */
+#define          C0_SIFM_MASK    0xc0000000    /* SIF mode bits */
 
 /* Bits below C1_444ALPHA are not present in Cafe */
 #define REG_CTRL1      0x40    /* Control 1 */
index a634888271cd7a38ff162bc56d4b97b88923e09f..f06daa4f2502000bfd6e2038d10f806640b67da7 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/pm.h>
+#include <linux/clk.h>
 
 #include "mcam-core.h"
 
@@ -33,11 +34,14 @@ MODULE_ALIAS("platform:mmp-camera");
 MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
 MODULE_LICENSE("GPL");
 
+static char *mcam_clks[] = {"CCICAXICLK", "CCICFUNCLK", "CCICPHYCLK"};
+
 struct mmp_camera {
        void *power_regs;
        struct platform_device *pdev;
        struct mcam_camera mcam;
        struct list_head devlist;
+       struct clk *mipi_clk;
        int irq;
 };
 
@@ -101,6 +105,27 @@ static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
 #define CPU_SUBSYS_PMU_BASE    0xd4282800
 #define REG_CCIC_DCGCR         0x28    /* CCIC dyn clock gate ctrl reg */
 #define REG_CCIC_CRCR          0x50    /* CCIC clk reset ctrl reg      */
+#define REG_CCIC2_CRCR         0xf4    /* CCIC2 clk reset ctrl reg     */
+
+static void mcam_clk_enable(struct mcam_camera *mcam)
+{
+       unsigned int i;
+
+       for (i = 0; i < NR_MCAM_CLK; i++) {
+               if (!IS_ERR(mcam->clk[i]))
+                       clk_prepare_enable(mcam->clk[i]);
+       }
+}
+
+static void mcam_clk_disable(struct mcam_camera *mcam)
+{
+       int i;
+
+       for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
+               if (!IS_ERR(mcam->clk[i]))
+                       clk_disable_unprepare(mcam->clk[i]);
+       }
+}
 
 /*
  * Power control.
@@ -112,10 +137,17 @@ static void mmpcam_power_up_ctlr(struct mmp_camera *cam)
        mdelay(1);
 }
 
-static void mmpcam_power_up(struct mcam_camera *mcam)
+static int mmpcam_power_up(struct mcam_camera *mcam)
 {
        struct mmp_camera *cam = mcam_to_cam(mcam);
        struct mmp_camera_platform_data *pdata;
+
+       if (mcam->bus_type == V4L2_MBUS_CSI2) {
+               cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
+               if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
+                       return PTR_ERR(cam->mipi_clk);
+       }
+
 /*
  * Turn on power and clocks to the controller.
  */
@@ -132,6 +164,10 @@ static void mmpcam_power_up(struct mcam_camera *mcam)
        mdelay(5);
        gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
        mdelay(5);
+
+       mcam_clk_enable(mcam);
+
+       return 0;
 }
 
 static void mmpcam_power_down(struct mcam_camera *mcam)
@@ -149,8 +185,133 @@ static void mmpcam_power_down(struct mcam_camera *mcam)
        pdata = cam->pdev->dev.platform_data;
        gpio_set_value(pdata->sensor_power_gpio, 0);
        gpio_set_value(pdata->sensor_reset_gpio, 0);
+
+       if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) {
+               if (cam->mipi_clk)
+                       devm_clk_put(mcam->dev, cam->mipi_clk);
+               cam->mipi_clk = NULL;
+       }
+
+       mcam_clk_disable(mcam);
 }
 
+void mcam_ctlr_reset(struct mcam_camera *mcam)
+{
+       unsigned long val;
+       struct mmp_camera *cam = mcam_to_cam(mcam);
+
+       if (mcam->ccic_id) {
+               /*
+                * Using CCIC2
+                */
+               val = ioread32(cam->power_regs + REG_CCIC2_CRCR);
+               iowrite32(val & ~0x2, cam->power_regs + REG_CCIC2_CRCR);
+               iowrite32(val | 0x2, cam->power_regs + REG_CCIC2_CRCR);
+       } else {
+               /*
+                * Using CCIC1
+                */
+               val = ioread32(cam->power_regs + REG_CCIC_CRCR);
+               iowrite32(val & ~0x2, cam->power_regs + REG_CCIC_CRCR);
+               iowrite32(val | 0x2, cam->power_regs + REG_CCIC_CRCR);
+       }
+}
+
+/*
+ * calc the dphy register values
+ * There are three dphy registers being used.
+ * dphy[0] - CSI2_DPHY3
+ * dphy[1] - CSI2_DPHY5
+ * dphy[2] - CSI2_DPHY6
+ * CSI2_DPHY3 and CSI2_DPHY6 can be set with a default value
+ * or be calculated dynamically
+ */
+void mmpcam_calc_dphy(struct mcam_camera *mcam)
+{
+       struct mmp_camera *cam = mcam_to_cam(mcam);
+       struct mmp_camera_platform_data *pdata = cam->pdev->dev.platform_data;
+       struct device *dev = &cam->pdev->dev;
+       unsigned long tx_clk_esc;
+
+       /*
+        * If CSI2_DPHY3 is calculated dynamically,
+        * pdata->lane_clk should be already set
+        * either in the board driver statically
+        * or in the sensor driver dynamically.
+        */
+       /*
+        * dphy[0] - CSI2_DPHY3:
+        *  bit 0 ~ bit 7: HS Term Enable.
+        *   defines the time that the DPHY
+        *   wait before enabling the data
+        *   lane termination after detecting
+        *   that the sensor has driven the data
+        *   lanes to the LP00 bridge state.
+        *   The value is calculated by:
+        *   (Max T(D_TERM_EN)/Period(DDR)) - 1
+        *  bit 8 ~ bit 15: HS_SETTLE
+        *   Time interval during which the HS
+        *   receiver shall ignore any Data Lane
+        *   HS transistions.
+        *   The vaule has been calibrated on
+        *   different boards. It seems to work well.
+        *
+        *  More detail please refer
+        *  MIPI Alliance Spectification for D-PHY
+        *  document for explanation of HS-SETTLE
+        *  and D-TERM-EN.
+        */
+       switch (pdata->dphy3_algo) {
+       case DPHY3_ALGO_PXA910:
+               /*
+                * Calculate CSI2_DPHY3 algo for PXA910
+                */
+               pdata->dphy[0] =
+                       (((1 + (pdata->lane_clk * 80) / 1000) & 0xff) << 8)
+                       | (1 + pdata->lane_clk * 35 / 1000);
+               break;
+       case DPHY3_ALGO_PXA2128:
+               /*
+                * Calculate CSI2_DPHY3 algo for PXA2128
+                */
+               pdata->dphy[0] =
+                       (((2 + (pdata->lane_clk * 110) / 1000) & 0xff) << 8)
+                       | (1 + pdata->lane_clk * 35 / 1000);
+               break;
+       default:
+               /*
+                * Use default CSI2_DPHY3 value for PXA688/PXA988
+                */
+               dev_dbg(dev, "camera: use the default CSI2_DPHY3 value\n");
+       }
+
+       /*
+        * mipi_clk will never be changed, it is a fixed value on MMP
+        */
+       if (IS_ERR(cam->mipi_clk))
+               return;
+
+       /* get the escape clk, this is hard coded */
+       tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
+
+       /*
+        * dphy[2] - CSI2_DPHY6:
+        * bit 0 ~ bit 7: CK Term Enable
+        *  Time for the Clock Lane receiver to enable the HS line
+        *  termination. The value is calculated similarly with
+        *  HS Term Enable
+        * bit 8 ~ bit 15: CK Settle
+        *  Time interval during which the HS receiver shall ignore
+        *  any Clock Lane HS transitions.
+        *  The value is calibrated on the boards.
+        */
+       pdata->dphy[2] =
+               ((((534 * tx_clk_esc) / 2000 - 1) & 0xff) << 8)
+               | (((38 * tx_clk_esc) / 1000 - 1) & 0xff);
+
+       dev_dbg(dev, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
+               pdata->dphy[0], pdata->dphy[1], pdata->dphy[2]);
+}
 
 static irqreturn_t mmpcam_irq(int irq, void *data)
 {
@@ -164,6 +325,35 @@ static irqreturn_t mmpcam_irq(int irq, void *data)
        return IRQ_RETVAL(handled);
 }
 
+static void mcam_deinit_clk(struct mcam_camera *mcam)
+{
+       unsigned int i;
+
+       for (i = 0; i < NR_MCAM_CLK; i++) {
+               if (!IS_ERR(mcam->clk[i])) {
+                       if (mcam->clk[i])
+                               devm_clk_put(mcam->dev, mcam->clk[i]);
+               }
+               mcam->clk[i] = NULL;
+       }
+}
+
+static void mcam_init_clk(struct mcam_camera *mcam)
+{
+       unsigned int i;
+
+       for (i = 0; i < NR_MCAM_CLK; i++) {
+               if (mcam_clks[i] != NULL) {
+                       /* Some clks are not necessary on some boards
+                        * We still try to run even it fails getting clk
+                        */
+                       mcam->clk[i] = devm_clk_get(mcam->dev, mcam_clks[i]);
+                       if (IS_ERR(mcam->clk[i]))
+                               dev_warn(mcam->dev, "Could not get clk: %s\n",
+                                               mcam_clks[i]);
+               }
+       }
+}
 
 static int mmpcam_probe(struct platform_device *pdev)
 {
@@ -173,17 +363,32 @@ static int mmpcam_probe(struct platform_device *pdev)
        struct mmp_camera_platform_data *pdata;
        int ret;
 
-       cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+       pdata = pdev->dev.platform_data;
+       if (!pdata)
+               return -ENODEV;
+
+       cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
        if (cam == NULL)
                return -ENOMEM;
        cam->pdev = pdev;
+       cam->mipi_clk = NULL;
        INIT_LIST_HEAD(&cam->devlist);
 
        mcam = &cam->mcam;
        mcam->plat_power_up = mmpcam_power_up;
        mcam->plat_power_down = mmpcam_power_down;
+       mcam->ctlr_reset = mcam_ctlr_reset;
+       mcam->calc_dphy = mmpcam_calc_dphy;
        mcam->dev = &pdev->dev;
        mcam->use_smbus = 0;
+       mcam->ccic_id = pdev->id;
+       mcam->mclk_min = pdata->mclk_min;
+       mcam->mclk_src = pdata->mclk_src;
+       mcam->mclk_div = pdata->mclk_div;
+       mcam->bus_type = pdata->bus_type;
+       mcam->dphy = pdata->dphy;
+       mcam->mipi_enabled = false;
+       mcam->lane = pdata->lane;
        mcam->chip_id = MCAM_ARMADA610;
        mcam->buffer_mode = B_DMA_sg;
        spin_lock_init(&mcam->dev_lock);
@@ -193,15 +398,11 @@ static int mmpcam_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res == NULL) {
                dev_err(&pdev->dev, "no iomem resource!\n");
-               ret = -ENODEV;
-               goto out_free;
-       }
-       mcam->regs = ioremap(res->start, resource_size(res));
-       if (mcam->regs == NULL) {
-               dev_err(&pdev->dev, "MMIO ioremap fail\n");
-               ret = -ENODEV;
-               goto out_free;
+               return -ENODEV;
        }
+       mcam->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mcam->regs))
+               return PTR_ERR(mcam->regs);
        mcam->regs_size = resource_size(res);
        /*
         * Power/clock memory is elsewhere; get it too.  Perhaps this
@@ -210,50 +411,51 @@ static int mmpcam_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (res == NULL) {
                dev_err(&pdev->dev, "no power resource!\n");
-               ret = -ENODEV;
-               goto out_unmap1;
-       }
-       cam->power_regs = ioremap(res->start, resource_size(res));
-       if (cam->power_regs == NULL) {
-               dev_err(&pdev->dev, "power MMIO ioremap fail\n");
-               ret = -ENODEV;
-               goto out_unmap1;
+               return -ENODEV;
        }
+       cam->power_regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(cam->power_regs))
+               return PTR_ERR(cam->power_regs);
        /*
         * Find the i2c adapter.  This assumes, of course, that the
         * i2c bus is already up and functioning.
         */
-       pdata = pdev->dev.platform_data;
        mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
        if (mcam->i2c_adapter == NULL) {
-               ret = -ENODEV;
                dev_err(&pdev->dev, "No i2c adapter\n");
-               goto out_unmap2;
+               return -ENODEV;
        }
        /*
         * Sensor GPIO pins.
         */
-       ret = gpio_request(pdata->sensor_power_gpio, "cam-power");
+       ret = devm_gpio_request(&pdev->dev, pdata->sensor_power_gpio,
+                                                       "cam-power");
        if (ret) {
                dev_err(&pdev->dev, "Can't get sensor power gpio %d",
                                pdata->sensor_power_gpio);
-               goto out_unmap2;
+               return ret;
        }
        gpio_direction_output(pdata->sensor_power_gpio, 0);
-       ret = gpio_request(pdata->sensor_reset_gpio, "cam-reset");
+       ret = devm_gpio_request(&pdev->dev, pdata->sensor_reset_gpio,
+                                                       "cam-reset");
        if (ret) {
                dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
                                pdata->sensor_reset_gpio);
-               goto out_gpio;
+               return ret;
        }
        gpio_direction_output(pdata->sensor_reset_gpio, 0);
+
+       mcam_init_clk(mcam);
+
        /*
         * Power the device up and hand it off to the core.
         */
-       mmpcam_power_up(mcam);
+       ret = mmpcam_power_up(mcam);
+       if (ret)
+               goto out_deinit_clk;
        ret = mccic_register(mcam);
        if (ret)
-               goto out_gpio2;
+               goto out_power_down;
        /*
         * Finally, set up our IRQ now that the core is ready to
         * deal with it.
@@ -264,8 +466,8 @@ static int mmpcam_probe(struct platform_device *pdev)
                goto out_unregister;
        }
        cam->irq = res->start;
-       ret = request_irq(cam->irq, mmpcam_irq, IRQF_SHARED,
-                       "mmp-camera", mcam);
+       ret = devm_request_irq(&pdev->dev, cam->irq, mmpcam_irq, IRQF_SHARED,
+                                       "mmp-camera", mcam);
        if (ret == 0) {
                mmpcam_add_device(cam);
                return 0;
@@ -273,17 +475,10 @@ static int mmpcam_probe(struct platform_device *pdev)
 
 out_unregister:
        mccic_shutdown(mcam);
-out_gpio2:
+out_power_down:
        mmpcam_power_down(mcam);
-       gpio_free(pdata->sensor_reset_gpio);
-out_gpio:
-       gpio_free(pdata->sensor_power_gpio);
-out_unmap2:
-       iounmap(cam->power_regs);
-out_unmap1:
-       iounmap(mcam->regs);
-out_free:
-       kfree(cam);
+out_deinit_clk:
+       mcam_deinit_clk(mcam);
        return ret;
 }
 
@@ -300,6 +495,7 @@ static int mmpcam_remove(struct mmp_camera *cam)
        pdata = cam->pdev->dev.platform_data;
        gpio_free(pdata->sensor_reset_gpio);
        gpio_free(pdata->sensor_power_gpio);
+       mcam_deinit_clk(mcam);
        iounmap(cam->power_regs);
        iounmap(mcam->regs);
        kfree(cam);
index f2de0066089ad0093b1e9619642769ce59076e0c..dae9716e34b1e932ff9263c9901857bc1db85d7d 100644 (file)
@@ -1837,9 +1837,9 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
                for (j = 0; pcdev->pdata->asd_sizes[j]; j++) {
                        for (i = 0; i < pcdev->pdata->asd_sizes[j]; i++, asd++) {
                                dev_dbg(&pdev->dev, "%s(): subdev #%d, type %u\n",
-                                       __func__, i, (*asd)->bus_type);
-                               if ((*asd)->bus_type == V4L2_ASYNC_BUS_PLATFORM &&
-                                   !strncmp(name, (*asd)->match.platform.name,
+                                       __func__, i, (*asd)->match_type);
+                               if ((*asd)->match_type == V4L2_ASYNC_MATCH_DEVNAME &&
+                                   !strncmp(name, (*asd)->match.device_name.name,
                                             sizeof(name) - 1)) {
                                        pcdev->csi2_asd = *asd;
                                        break;
index 2dd0e5272941079a80a2fa4cac0c366035ec4d20..4b42572253e0fad7be92239e9f6a1582e068b2fb 100644 (file)
@@ -1475,7 +1475,7 @@ static int scan_async_group(struct soc_camera_host *ici,
                        break;
        }
 
-       if (i == size || asd[i]->bus_type != V4L2_ASYNC_BUS_I2C) {
+       if (i == size || asd[i]->match_type != V4L2_ASYNC_MATCH_I2C) {
                /* All useless */
                dev_err(ici->v4l2_dev.dev, "No I2C data source found!\n");
                return -ENODEV;
@@ -1501,7 +1501,7 @@ static int scan_async_group(struct soc_camera_host *ici,
                return -ENOMEM;
        }
 
-       sasc->notifier.subdev = asd;
+       sasc->notifier.subdevs = asd;
        sasc->notifier.num_subdevs = size;
        sasc->notifier.bound = soc_camera_async_bound;
        sasc->notifier.unbind = soc_camera_async_unbind;
index 177bcbd7a7c1c1e72b3459e618d56fb69bba617e..705dd6f9162c019b55ee0ab987bbf04d7c0df59f 100644 (file)
@@ -26,6 +26,7 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-ctrls.h>
 #include "radio-isa.h"
+#include "lm7000.h"
 
 MODULE_AUTHOR("Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
 MODULE_DESCRIPTION("A driver for the Aztech radio card.");
@@ -54,18 +55,29 @@ struct aztech {
        int curvol;
 };
 
-static void send_0_byte(struct aztech *az)
-{
-       udelay(radio_wait_time);
-       outb_p(2 + az->curvol, az->isa.io);
-       outb_p(64 + 2 + az->curvol, az->isa.io);
-}
+/* bit definitions for register read */
+#define AZTECH_BIT_NOT_TUNED   (1 << 0)
+#define AZTECH_BIT_MONO                (1 << 1)
+/* bit definitions for register write */
+#define AZTECH_BIT_TUN_CE      (1 << 1)
+#define AZTECH_BIT_TUN_CLK     (1 << 6)
+#define AZTECH_BIT_TUN_DATA    (1 << 7)
+/* bits 0 and 2 are volume control, bits 3..5 are not connected */
 
-static void send_1_byte(struct aztech *az)
+static void aztech_set_pins(void *handle, u8 pins)
 {
-       udelay(radio_wait_time);
-       outb_p(128 + 2 + az->curvol, az->isa.io);
-       outb_p(128 + 64 + 2 + az->curvol, az->isa.io);
+       struct radio_isa_card *isa = handle;
+       struct aztech *az = container_of(isa, struct aztech, isa);
+       u8 bits = az->curvol;
+
+       if (pins & LM7000_DATA)
+               bits |= AZTECH_BIT_TUN_DATA;
+       if (pins & LM7000_CLK)
+               bits |= AZTECH_BIT_TUN_CLK;
+       if (pins & LM7000_CE)
+               bits |= AZTECH_BIT_TUN_CE;
+
+       outb_p(bits, az->isa.io);
 }
 
 static struct radio_isa_card *aztech_alloc(void)
@@ -77,58 +89,21 @@ static struct radio_isa_card *aztech_alloc(void)
 
 static int aztech_s_frequency(struct radio_isa_card *isa, u32 freq)
 {
-       struct aztech *az = container_of(isa, struct aztech, isa);
-       int  i;
-
-       freq += 171200;                 /* Add 10.7 MHz IF              */
-       freq /= 800;                    /* Convert to 50 kHz units      */
-
-       send_0_byte(az);                /*  0: LSB of frequency       */
-
-       for (i = 0; i < 13; i++)        /*   : frequency bits (1-13)  */
-               if (freq & (1 << i))
-                       send_1_byte(az);
-               else
-                       send_0_byte(az);
-
-       send_0_byte(az);                /* 14: test bit - always 0    */
-       send_0_byte(az);                /* 15: test bit - always 0    */
-       send_0_byte(az);                /* 16: band data 0 - always 0 */
-       if (isa->stereo)                /* 17: stereo (1 to enable)   */
-               send_1_byte(az);
-       else
-               send_0_byte(az);
-
-       send_1_byte(az);                /* 18: band data 1 - unknown  */
-       send_0_byte(az);                /* 19: time base - always 0   */
-       send_0_byte(az);                /* 20: spacing (0 = 25 kHz)   */
-       send_1_byte(az);                /* 21: spacing (1 = 25 kHz)   */
-       send_0_byte(az);                /* 22: spacing (0 = 25 kHz)   */
-       send_1_byte(az);                /* 23: AM/FM (FM = 1, always) */
-
-       /* latch frequency */
-
-       udelay(radio_wait_time);
-       outb_p(128 + 64 + az->curvol, az->isa.io);
+       lm7000_set_freq(freq, isa, aztech_set_pins);
 
        return 0;
 }
 
-/* thanks to Michael Dwyer for giving me a dose of clues in
- * the signal strength department..
- *
- * This card has a stereo bit - bit 0 set = mono, not set = stereo
- */
 static u32 aztech_g_rxsubchans(struct radio_isa_card *isa)
 {
-       if (inb(isa->io) & 1)
+       if (inb(isa->io) & AZTECH_BIT_MONO)
                return V4L2_TUNER_SUB_MONO;
        return V4L2_TUNER_SUB_STEREO;
 }
 
-static int aztech_s_stereo(struct radio_isa_card *isa, bool stereo)
+static u32 aztech_g_signal(struct radio_isa_card *isa)
 {
-       return aztech_s_frequency(isa, isa->freq);
+       return (inb(isa->io) & AZTECH_BIT_NOT_TUNED) ? 0 : 0xffff;
 }
 
 static int aztech_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol)
@@ -146,8 +121,8 @@ static const struct radio_isa_ops aztech_ops = {
        .alloc = aztech_alloc,
        .s_mute_volume = aztech_s_mute_volume,
        .s_frequency = aztech_s_frequency,
-       .s_stereo = aztech_s_stereo,
        .g_rxsubchans = aztech_g_rxsubchans,
+       .g_signal = aztech_g_signal,
 };
 
 static const int aztech_ioports[] = { 0x350, 0x358 };
@@ -165,7 +140,7 @@ static struct radio_isa_driver aztech_driver = {
        .radio_nr_params = radio_nr,
        .io_ports = aztech_ioports,
        .num_of_io_ports = ARRAY_SIZE(aztech_ioports),
-       .region_size = 2,
+       .region_size = 8,
        .card = "Aztech Radio",
        .ops = &aztech_ops,
        .has_stereo = true,
index bd4d3a7cdadd130511cc186679124fd6231a929c..1d1c9e1d386ea4d33c2b835075947fe9b0c9b200 100644 (file)
@@ -200,15 +200,4 @@ static struct pci_driver maxiradio_driver = {
        .remove         = maxiradio_remove,
 };
 
-static int __init maxiradio_init(void)
-{
-       return pci_register_driver(&maxiradio_driver);
-}
-
-static void __exit maxiradio_exit(void)
-{
-       pci_unregister_driver(&maxiradio_driver);
-}
-
-module_init(maxiradio_init);
-module_exit(maxiradio_exit);
+module_pci_driver(maxiradio_driver);
index ed184f68c17c5ed39c35e342018ad281eb5e23d6..c1444f84717d725a7dd3e93048b6ff7a7b7e5831 100644 (file)
@@ -476,7 +476,7 @@ select_timeout:
 }
 
 /* Enable the device for receive */
-static void ene_rx_enable(struct ene_device *dev)
+static void ene_rx_enable_hw(struct ene_device *dev)
 {
        u8 reg_value;
 
@@ -504,11 +504,17 @@ static void ene_rx_enable(struct ene_device *dev)
 
        /* enter idle mode */
        ir_raw_event_set_idle(dev->rdev, true);
+}
+
+/* Enable the device for receive - wrapper to track the state*/
+static void ene_rx_enable(struct ene_device *dev)
+{
+       ene_rx_enable_hw(dev);
        dev->rx_enabled = true;
 }
 
 /* Disable the device receiver */
-static void ene_rx_disable(struct ene_device *dev)
+static void ene_rx_disable_hw(struct ene_device *dev)
 {
        /* disable inputs */
        ene_rx_enable_cir_engine(dev, false);
@@ -516,8 +522,13 @@ static void ene_rx_disable(struct ene_device *dev)
 
        /* disable hardware IRQ and firmware flag */
        ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
-
        ir_raw_event_set_idle(dev->rdev, true);
+}
+
+/* Disable the device receiver - wrapper to track the state */
+static void ene_rx_disable(struct ene_device *dev)
+{
+       ene_rx_disable_hw(dev);
        dev->rx_enabled = false;
 }
 
@@ -1022,6 +1033,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
        spin_lock_init(&dev->hw_lock);
 
        dev->hw_io = pnp_port_start(pnp_dev, 0);
+       dev->irq = pnp_irq(pnp_dev, 0);
+
 
        pnp_set_drvdata(pnp_dev, dev);
        dev->pnp_dev = pnp_dev;
@@ -1085,7 +1098,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
                goto exit_unregister_device;
        }
 
-       dev->irq = pnp_irq(pnp_dev, 0);
        if (request_irq(dev->irq, ene_isr,
                        IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
                goto exit_release_hw_io;
@@ -1123,9 +1135,8 @@ static void ene_remove(struct pnp_dev *pnp_dev)
 }
 
 /* enable wake on IR (wakes on specific button on original remote) */
-static void ene_enable_wake(struct ene_device *dev, int enable)
+static void ene_enable_wake(struct ene_device *dev, bool enable)
 {
-       enable = enable && device_may_wakeup(&dev->pnp_dev->dev);
        dbg("wake on IR %s", enable ? "enabled" : "disabled");
        ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable);
 }
@@ -1134,9 +1145,12 @@ static void ene_enable_wake(struct ene_device *dev, int enable)
 static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
 {
        struct ene_device *dev = pnp_get_drvdata(pnp_dev);
-       ene_enable_wake(dev, true);
+       bool wake = device_may_wakeup(&dev->pnp_dev->dev);
+
+       if (!wake && dev->rx_enabled)
+               ene_rx_disable_hw(dev);
 
-       /* TODO: add support for wake pattern */
+       ene_enable_wake(dev, wake);
        return 0;
 }
 
index 6f978e85db8c6ec2625f84075a81f787dfaff71e..a7911e3b9bc07d4e887dadf8a41a76cf6e1275c5 100644 (file)
 #define __dbg(level, format, ...)                              \
 do {                                                           \
        if (debug >= level)                                     \
-               pr_debug(format "\n", ## __VA_ARGS__);          \
+               pr_info(format "\n", ## __VA_ARGS__);           \
 } while (0)
 
 #define dbg(format, ...)               __dbg(1, format, ## __VA_ARGS__)
index a4ab2e6b3f82fcdba0364745e064be3970b3ba8b..19632b1c21908cb52e0c483f7a7339f647756e1e 100644 (file)
@@ -364,8 +364,8 @@ static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
                periods = DIV_ROUND_CLOSEST(txbuf[i] * ir->carrier, 1000000);
                bytes = DIV_ROUND_UP(periods, 127);
                if (size + bytes > ir->bufsize) {
-                       count = i;
-                       break;
+                       rc = -EINVAL;
+                       goto out;
                }
                while (periods > 127) {
                        ir->packet->payload[size++] = 127 | space;
index e4561264e12439a8f24f2a63e94ac2572b0d47a0..ed2c8a1ed8caf39ee9eae20c8ec5b5f534c33c0a 100644 (file)
@@ -140,11 +140,20 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
                goto out;
        }
 
+       for (i = 0; i < count; i++) {
+               if (txbuf[i] > IR_MAX_DURATION / 1000 - duration || !txbuf[i]) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               duration += txbuf[i];
+       }
+
        ret = dev->tx_ir(dev, txbuf, count);
        if (ret < 0)
                goto out;
 
-       for (i = 0; i < ret; i++)
+       for (duration = i = 0; i < ret; i++)
                duration += txbuf[i];
 
        ret *= sizeof(unsigned int);
@@ -375,6 +384,7 @@ static int ir_lirc_register(struct rc_dev *dev)
        drv->code_length = sizeof(struct ir_raw_event) * 8;
        drv->fops = &lirc_fops;
        drv->dev = &dev->dev;
+       drv->rdev = dev;
        drv->owner = THIS_MODULE;
 
        drv->minor = lirc_register_driver(drv);
index 8dc057b273f25b49f7f4960896828c9437e3b489..dc5cbffcd5a26a4702b578e97c995e527b644236 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/device.h>
 #include <linux/cdev.h>
 
+#include <media/rc-core.h>
 #include <media/lirc.h>
 #include <media/lirc_dev.h>
 
@@ -467,6 +468,12 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
                goto error;
        }
 
+       if (ir->d.rdev) {
+               retval = rc_open(ir->d.rdev);
+               if (retval)
+                       goto error;
+       }
+
        cdev = ir->cdev;
        if (try_module_get(cdev->owner)) {
                ir->open++;
@@ -511,6 +518,9 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
 
        WARN_ON(mutex_lock_killable(&lirc_dev_lock));
 
+       if (ir->d.rdev)
+               rc_close(ir->d.rdev);
+
        ir->open--;
        if (ir->attached) {
                ir->d.set_use_dec(ir->d.data);
index 1cf382a0b27761683f6afa4d8c07aed3904ed789..1dedebda1cefb6fd9e11364ebad36cb60ab1fbb5 100644 (file)
@@ -699,19 +699,50 @@ void rc_keydown_notimeout(struct rc_dev *dev, int scancode, u8 toggle)
 }
 EXPORT_SYMBOL_GPL(rc_keydown_notimeout);
 
+int rc_open(struct rc_dev *rdev)
+{
+       int rval = 0;
+
+       if (!rdev)
+               return -EINVAL;
+
+       mutex_lock(&rdev->lock);
+       if (!rdev->users++)
+               rval = rdev->open(rdev);
+
+       if (rval)
+               rdev->users--;
+
+       mutex_unlock(&rdev->lock);
+
+       return rval;
+}
+EXPORT_SYMBOL_GPL(rc_open);
+
 static int ir_open(struct input_dev *idev)
 {
        struct rc_dev *rdev = input_get_drvdata(idev);
 
-       return rdev->open(rdev);
+       return rc_open(rdev);
+}
+
+void rc_close(struct rc_dev *rdev)
+{
+       if (rdev) {
+               mutex_lock(&rdev->lock);
+
+                if (!--rdev->users)
+                       rdev->close(rdev);
+
+               mutex_unlock(&rdev->lock);
+       }
 }
+EXPORT_SYMBOL_GPL(rc_close);
 
 static void ir_close(struct input_dev *idev)
 {
        struct rc_dev *rdev = input_get_drvdata(idev);
-
-        if (rdev)
-               rdev->close(rdev);
+       rc_close(rdev);
 }
 
 /* class for /sys/class/rc */
@@ -1076,7 +1107,14 @@ int rc_register_device(struct rc_dev *dev)
        memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id));
        dev->input_dev->phys = dev->input_phys;
        dev->input_dev->name = dev->input_name;
+
+       /* input_register_device can call ir_open, so unlock mutex here */
+       mutex_unlock(&dev->lock);
+
        rc = input_register_device(dev->input_dev);
+
+       mutex_lock(&dev->lock);
+
        if (rc)
                goto out_table;
 
index 12167a6b5472d02e242b7b686a068f90e68d3797..0042367b060cc5b5cf578f1691ce43248a2a4747 100644 (file)
@@ -206,8 +206,6 @@ struct redrat3_dev {
        struct timer_list rx_timeout;
        u32 hw_timeout;
 
-       /* is the detector enabled*/
-       bool det_enabled;
        /* Is the device currently transmitting?*/
        bool transmitting;
 
@@ -472,32 +470,11 @@ static int redrat3_enable_detector(struct redrat3_dev *rr3)
                return -EIO;
        }
 
-       rr3->det_enabled = true;
        redrat3_issue_async(rr3);
 
        return 0;
 }
 
-/* Disables the rr3 long range detector */
-static void redrat3_disable_detector(struct redrat3_dev *rr3)
-{
-       struct device *dev = rr3->dev;
-       u8 ret;
-
-       rr3_ftr(dev, "Entering %s\n", __func__);
-
-       ret = redrat3_send_cmd(RR3_RC_DET_DISABLE, rr3);
-       if (ret != 0)
-               dev_err(dev, "%s: failure!\n", __func__);
-
-       ret = redrat3_send_cmd(RR3_RC_DET_STATUS, rr3);
-       if (ret != 0)
-               dev_warn(dev, "%s: detector status: %d, should be 0\n",
-                        __func__, ret);
-
-       rr3->det_enabled = false;
-}
-
 static inline void redrat3_delete(struct redrat3_dev *rr3,
                                  struct usb_device *udev)
 {
@@ -785,10 +762,10 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
                return -EAGAIN;
        }
 
-       count = min_t(unsigned, count, RR3_MAX_SIG_SIZE - RR3_TX_TRAILER_LEN);
+       if (count > RR3_MAX_SIG_SIZE - RR3_TX_TRAILER_LEN)
+               return -EINVAL;
 
        /* rr3 will disable rc detector on transmit */
-       rr3->det_enabled = false;
        rr3->transmitting = true;
 
        sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL);
@@ -825,8 +802,8 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
                                                &irdata->lens[curlencheck]);
                                curlencheck++;
                        } else {
-                               count = i - 1;
-                               break;
+                               ret = -EINVAL;
+                               goto out;
                        }
                }
                irdata->sigdata[i] = lencheck;
@@ -868,7 +845,6 @@ out:
 
        rr3->transmitting = false;
        /* rr3 re-enables rc detector because it was enabled before */
-       rr3->det_enabled = true;
 
        return ret;
 }
@@ -1048,8 +1024,6 @@ static void redrat3_dev_disconnect(struct usb_interface *intf)
        if (!rr3)
                return;
 
-       redrat3_disable_detector(rr3);
-
        usb_set_intfdata(intf, NULL);
        rc_unregister_device(rr3->rc);
        del_timer_sync(&rr3->rx_timeout);
index a3c8ecf22078b46118e38c1a102e6ce07b317b13..2059d0c86ad3ad4b91da3c84543bd8fdf74ec213 100644 (file)
@@ -1,6 +1,6 @@
 config DVB_USB_V2
        tristate "Support for various USB DVB devices v2"
-       depends on DVB_CORE && USB && I2C
+       depends on DVB_CORE && USB && I2C && (RC_CORE || RC_CORE=n)
        help
          By enabling this you will be able to choose the various supported
          USB1.1 and USB2.0 DVB devices.
index f08136052f9c483de1e72ad4edb86bd446d82d8d..829323e42ca095671372ec15de5cea81a4810fea 100644 (file)
@@ -3589,6 +3589,8 @@ struct usb_device_id dib0700_usb_id_table[] = {
        { USB_DEVICE(USB_VID_DIBCOM,    USB_PID_DIBCOM_TFE7790P) },
        { USB_DEVICE(USB_VID_DIBCOM,    USB_PID_DIBCOM_TFE8096P) },
 /* 80 */{ USB_DEVICE(USB_VID_ELGATO,   USB_PID_ELGATO_EYETV_DTT_2) },
+       { USB_DEVICE(USB_VID_PCTV,      USB_PID_PCTV_2002E) },
+       { USB_DEVICE(USB_VID_PCTV,      USB_PID_PCTV_2002E_SE) },
        { 0 }           /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -3993,12 +3995,20 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                        }
                },
 
-               .num_device_descs = 1,
+               .num_device_descs = 3,
                .devices = {
                        {   "Hauppauge Nova-TD Stick (52009)",
                                { &dib0700_usb_id_table[35], NULL },
                                { NULL },
                        },
+                       {   "PCTV 2002e",
+                               { &dib0700_usb_id_table[81], NULL },
+                               { NULL },
+                       },
+                       {   "PCTV 2002e SE",
+                               { &dib0700_usb_id_table[82], NULL },
+                               { NULL },
+                       },
                },
 
                .rc.core = {
index c2b635d6a17a9df6ad17f838e1ad715601fbefdd..0306cb778df4b07683fb05bc42949616ee03a55d 100644 (file)
@@ -1212,7 +1212,7 @@ static struct dvb_usb_device_properties vp7049_properties = {
                .rc_interval    = 150,
                .rc_codes       = RC_MAP_TWINHAN_VP1027_DVBS,
                .rc_query       = m920x_rc_core_query,
-               .allowed_protos = RC_TYPE_UNKNOWN,
+               .allowed_protos = RC_BIT_UNKNOWN,
        },
 
        .size_of_priv     = sizeof(struct m920x_state),
index 1a577ed8ea0ca0163e9203e9db0a143ccd65a743..9d103344f34ab5e4e3267ca8fa08f58db1f5a741 100644 (file)
@@ -1008,6 +1008,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
        else
                f->fmt.pix.field = dev->interlaced ?
                           V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP;
+       f->fmt.pix.priv = 0;
 
        return 0;
 }
index d6890bc3719862e54c87af2fe6b1f60ba958997e..a2275cfe0b814625b1bfc9e14144084aed74ca6b 100644 (file)
@@ -6,7 +6,7 @@
  * Based on the usbvideo vicam driver, which is:
  *
  * Copyright (c) 2002 Joe Burks (jburks@wavicle.org),
- *                    Christopher L Cheney (ccheney@cheney.cx),
+ *                    Chris Cheney (chris.cheney@gmail.com),
  *                    Pavel Machek (pavel@ucw.cz),
  *                    John Tyner (jtyner@cs.ucr.edu),
  *                    Monroe Williams (monroe@pobox.com)
index ab97e7d0b4f25e2cdd7adf239be95cecfa70e546..6bc9b8e19e20d889d225fdc1864f15b881b18184 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  s2255drv.c - a driver for the Sensoray 2255 USB video capture device
  *
- *   Copyright (C) 2007-2010 by Sensoray Company Inc.
+ *   Copyright (C) 2007-2013 by Sensoray Company Inc.
  *                              Dean Anderson
  *
  * Some video buffer code based on vivi driver:
@@ -52,7 +52,7 @@
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-event.h>
 
-#define S2255_VERSION          "1.22.1"
+#define S2255_VERSION          "1.23.1"
 #define FIRMWARE_FILE_NAME "f2255usb.bin"
 
 /* default JPEG quality */
@@ -1303,11 +1303,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id i)
        int ret = 0;
 
        mutex_lock(&q->vb_lock);
-       if (videobuf_queue_is_busy(q)) {
-               dprintk(1, "queue busy\n");
-               ret = -EBUSY;
-               goto out_s_std;
-       }
        if (res_locked(fh)) {
                dprintk(1, "can't change standard after started\n");
                ret = -EBUSY;
index 1c3a1ec00237c4b6b2e2f3d10a2e7205d6d853fc..95584c15dc5aa7813444a3d51408142b8032b357 100644 (file)
@@ -1,8 +1,6 @@
-config VIDEO_STK1160
+config VIDEO_STK1160_COMMON
        tristate "STK1160 USB video capture support"
        depends on VIDEO_DEV && I2C
-       select VIDEOBUF2_VMALLOC
-       select VIDEO_SAA711X
 
        ---help---
          This is a video4linux driver for STK1160 based video capture devices.
@@ -12,9 +10,15 @@ config VIDEO_STK1160
 
 config VIDEO_STK1160_AC97
        bool "STK1160 AC97 codec support"
-       depends on VIDEO_STK1160 && SND
-       select SND_AC97_CODEC
+       depends on VIDEO_STK1160_COMMON && SND
 
        ---help---
          Enables AC97 codec support for stk1160 driver.
-.
+
+config VIDEO_STK1160
+       tristate
+       depends on (!VIDEO_STK1160_AC97 || (SND='n') || SND) && VIDEO_STK1160_COMMON
+       default y
+       select VIDEOBUF2_VMALLOC
+       select VIDEO_SAA711X
+       select SND_AC97_CODEC if SND
index 876fc26565e3bc05dbfe4a192ffe916080495fb2..c45c9881bb5f4fdad89cf9ffe45c1c05cf8d946c 100644 (file)
@@ -379,6 +379,9 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
        struct stk1160 *dev = video_drvdata(file);
        struct vb2_queue *q = &dev->vb_vidq;
 
+       if (dev->norm == norm)
+               return 0;
+
        if (vb2_is_busy(q))
                return -EBUSY;
 
@@ -440,9 +443,6 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
 {
        struct stk1160 *dev = video_drvdata(file);
 
-       if (vb2_is_busy(&dev->vb_vidq))
-               return -EBUSY;
-
        if (i > STK1160_MAX_INPUT)
                return -EINVAL;
 
index e07e4c699cc215344b86f10433f659a088617d00..95f94e5aa66d2a14910bc0b81eec3bf595534f02 100644 (file)
@@ -375,7 +375,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
 }
 #endif
 
-static int check_firmware(struct usb_device *udev, int *down_firmware)
+static int check_firmware(struct usb_device *udev)
 {
        void *buf;
        int ret;
@@ -395,10 +395,8 @@ static int check_firmware(struct usb_device *udev, int *down_firmware)
                         USB_CTRL_GET_TIMEOUT);
        kfree(buf);
 
-       if (ret < 0) {
-               *down_firmware = 1;
+       if (ret < 0)
                return firmware_download(udev);
-       }
        return 0;
 }
 
@@ -411,9 +409,9 @@ static int poseidon_probe(struct usb_interface *interface,
        int new_one = 0;
 
        /* download firmware */
-       check_firmware(udev, &ret);
+       ret = check_firmware(udev);
        if (ret)
-               return 0;
+               return ret;
 
        /* Do I recovery from the hibernate ? */
        pd = find_old_poseidon(udev);
@@ -436,12 +434,22 @@ static int poseidon_probe(struct usb_interface *interface,
 
                /* register v4l2 device */
                ret = v4l2_device_register(&interface->dev, &pd->v4l2_dev);
+               if (ret)
+                       goto err_v4l2;
 
                /* register devices in directory /dev */
                ret = pd_video_init(pd);
-               poseidon_audio_init(pd);
-               poseidon_fm_init(pd);
-               pd_dvb_usb_device_init(pd);
+               if (ret)
+                       goto err_video;
+               ret = poseidon_audio_init(pd);
+               if (ret)
+                       goto err_audio;
+               ret = poseidon_fm_init(pd);
+               if (ret)
+                       goto err_fm;
+               ret = pd_dvb_usb_device_init(pd);
+               if (ret)
+                       goto err_dvb;
 
                INIT_LIST_HEAD(&pd->device_list);
                list_add_tail(&pd->device_list, &pd_device_list);
@@ -459,6 +467,17 @@ static int poseidon_probe(struct usb_interface *interface,
        }
 #endif
        return 0;
+err_dvb:
+       poseidon_fm_exit(pd);
+err_fm:
+       poseidon_audio_free(pd);
+err_audio:
+       pd_video_exit(pd);
+err_video:
+       v4l2_device_unregister(&pd->v4l2_dev);
+err_v4l2:
+       kfree(pd);
+       return ret;
 }
 
 static void poseidon_disconnect(struct usb_interface *interface)
index 91650173941ad5629a56cae9bacff0a4072002f1..8a505a90d3189a59876a916507ae50b6e11a0ed9 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
-#include <linux/version.h>
 #include <linux/videodev2.h>
 
 #include <media/v4l2-device.h>
@@ -91,17 +90,78 @@ struct usbtv {
        u32 frame_id;
        int chunks_done;
 
+       enum {
+               USBTV_COMPOSITE_INPUT,
+               USBTV_SVIDEO_INPUT,
+       } input;
        int iso_size;
        unsigned int sequence;
        struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
 };
 
-static int usbtv_setup_capture(struct usbtv *usbtv)
+static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
 {
        int ret;
        int pipe = usb_rcvctrlpipe(usbtv->udev, 0);
        int i;
-       static const u16 protoregs[][2] = {
+
+       for (i = 0; i < size; i++) {
+               u16 index = regs[i][0];
+               u16 value = regs[i][1];
+
+               ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
+                       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                       value, index, NULL, 0, 0);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int usbtv_select_input(struct usbtv *usbtv, int input)
+{
+       int ret;
+
+       static const u16 composite[][2] = {
+               { USBTV_BASE + 0x0105, 0x0060 },
+               { USBTV_BASE + 0x011f, 0x00f2 },
+               { USBTV_BASE + 0x0127, 0x0060 },
+               { USBTV_BASE + 0x00ae, 0x0010 },
+               { USBTV_BASE + 0x0284, 0x00aa },
+               { USBTV_BASE + 0x0239, 0x0060 },
+       };
+
+       static const u16 svideo[][2] = {
+               { USBTV_BASE + 0x0105, 0x0010 },
+               { USBTV_BASE + 0x011f, 0x00ff },
+               { USBTV_BASE + 0x0127, 0x0060 },
+               { USBTV_BASE + 0x00ae, 0x0030 },
+               { USBTV_BASE + 0x0284, 0x0088 },
+               { USBTV_BASE + 0x0239, 0x0060 },
+       };
+
+       switch (input) {
+       case USBTV_COMPOSITE_INPUT:
+               ret = usbtv_set_regs(usbtv, composite, ARRAY_SIZE(composite));
+               break;
+       case USBTV_SVIDEO_INPUT:
+               ret = usbtv_set_regs(usbtv, svideo, ARRAY_SIZE(svideo));
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       if (!ret)
+               usbtv->input = input;
+
+       return ret;
+}
+
+static int usbtv_setup_capture(struct usbtv *usbtv)
+{
+       int ret;
+       static const u16 setup[][2] = {
                /* These seem to enable the device. */
                { USBTV_BASE + 0x0008, 0x0001 },
                { USBTV_BASE + 0x01d0, 0x00ff },
@@ -189,16 +249,13 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
                { USBTV_BASE + 0x024f, 0x0002 },
        };
 
-       for (i = 0; i < ARRAY_SIZE(protoregs); i++) {
-               u16 index = protoregs[i][0];
-               u16 value = protoregs[i][1];
+       ret = usbtv_set_regs(usbtv, setup, ARRAY_SIZE(setup));
+       if (ret)
+               return ret;
 
-               ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
-                       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                       value, index, NULL, 0, 0);
-               if (ret < 0)
-                       return ret;
-       }
+       ret = usbtv_select_input(usbtv, usbtv->input);
+       if (ret)
+               return ret;
 
        return 0;
 }
@@ -443,10 +500,17 @@ static int usbtv_querycap(struct file *file, void *priv,
 static int usbtv_enum_input(struct file *file, void *priv,
                                        struct v4l2_input *i)
 {
-       if (i->index > 0)
+       switch (i->index) {
+       case USBTV_COMPOSITE_INPUT:
+               strlcpy(i->name, "Composite", sizeof(i->name));
+               break;
+       case USBTV_SVIDEO_INPUT:
+               strlcpy(i->name, "S-Video", sizeof(i->name));
+               break;
+       default:
                return -EINVAL;
+       }
 
-       strlcpy(i->name, "Composite", sizeof(i->name));
        i->type = V4L2_INPUT_TYPE_CAMERA;
        i->std = V4L2_STD_525_60;
        return 0;
@@ -486,15 +550,15 @@ static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)
 
 static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)
 {
-       *i = 0;
+       struct usbtv *usbtv = video_drvdata(file);
+       *i = usbtv->input;
        return 0;
 }
 
 static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
 {
-       if (i > 0)
-               return -EINVAL;
-       return 0;
+       struct usbtv *usbtv = video_drvdata(file);
+       return usbtv_select_input(usbtv, i);
 }
 
 static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
index aae241730caad2cd16f116c6a54317128da7cbbb..b350ab99652ce58131b8d8b8505c61d5accce9c6 100644 (file)
@@ -27,7 +27,6 @@ static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
 #if IS_ENABLED(CONFIG_I2C)
        struct i2c_client *client = i2c_verify_client(dev);
        return client &&
-               asd->bus_type == V4L2_ASYNC_BUS_I2C &&
                asd->match.i2c.adapter_id == client->adapter->nr &&
                asd->match.i2c.address == client->addr;
 #else
@@ -35,10 +34,14 @@ static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
 #endif
 }
 
-static bool match_platform(struct device *dev, struct v4l2_async_subdev *asd)
+static bool match_devname(struct device *dev, struct v4l2_async_subdev *asd)
 {
-       return asd->bus_type == V4L2_ASYNC_BUS_PLATFORM &&
-               !strcmp(asd->match.platform.name, dev_name(dev));
+       return !strcmp(asd->match.device_name.name, dev_name(dev));
+}
+
+static bool match_of(struct device *dev, struct v4l2_async_subdev *asd)
+{
+       return dev->of_node == asd->match.of.node;
 }
 
 static LIST_HEAD(subdev_list);
@@ -46,28 +49,29 @@ static LIST_HEAD(notifier_list);
 static DEFINE_MUTEX(list_lock);
 
 static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
-                                                   struct v4l2_async_subdev_list *asdl)
+                                                   struct v4l2_subdev *sd)
 {
-       struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
        struct v4l2_async_subdev *asd;
-       bool (*match)(struct device *,
-                     struct v4l2_async_subdev *);
+       bool (*match)(struct device *, struct v4l2_async_subdev *);
 
        list_for_each_entry(asd, &notifier->waiting, list) {
                /* bus_type has been verified valid before */
-               switch (asd->bus_type) {
-               case V4L2_ASYNC_BUS_CUSTOM:
+               switch (asd->match_type) {
+               case V4L2_ASYNC_MATCH_CUSTOM:
                        match = asd->match.custom.match;
                        if (!match)
                                /* Match always */
                                return asd;
                        break;
-               case V4L2_ASYNC_BUS_PLATFORM:
-                       match = match_platform;
+               case V4L2_ASYNC_MATCH_DEVNAME:
+                       match = match_devname;
                        break;
-               case V4L2_ASYNC_BUS_I2C:
+               case V4L2_ASYNC_MATCH_I2C:
                        match = match_i2c;
                        break;
+               case V4L2_ASYNC_MATCH_OF:
+                       match = match_of;
+                       break;
                default:
                        /* Cannot happen, unless someone breaks us */
                        WARN_ON(true);
@@ -83,16 +87,15 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *
 }
 
 static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
-                                 struct v4l2_async_subdev_list *asdl,
+                                 struct v4l2_subdev *sd,
                                  struct v4l2_async_subdev *asd)
 {
-       struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
        int ret;
 
        /* Remove from the waiting list */
        list_del(&asd->list);
-       asdl->asd = asd;
-       asdl->notifier = notifier;
+       sd->asd = asd;
+       sd->notifier = notifier;
 
        if (notifier->bound) {
                ret = notifier->bound(notifier, sd, asd);
@@ -100,7 +103,7 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
                        return ret;
        }
        /* Move from the global subdevice list to notifier's done */
-       list_move(&asdl->list, &notifier->done);
+       list_move(&sd->async_list, &notifier->done);
 
        ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
        if (ret < 0) {
@@ -115,21 +118,19 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
        return 0;
 }
 
-static void v4l2_async_cleanup(struct v4l2_async_subdev_list *asdl)
+static void v4l2_async_cleanup(struct v4l2_subdev *sd)
 {
-       struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
-
        v4l2_device_unregister_subdev(sd);
-       /* Subdevice driver will reprobe and put asdl back onto the list */
-       list_del_init(&asdl->list);
-       asdl->asd = NULL;
+       /* Subdevice driver will reprobe and put the subdev back onto the list */
+       list_del_init(&sd->async_list);
+       sd->asd = NULL;
        sd->dev = NULL;
 }
 
 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
                                 struct v4l2_async_notifier *notifier)
 {
-       struct v4l2_async_subdev_list *asdl, *tmp;
+       struct v4l2_subdev *sd, *tmp;
        struct v4l2_async_subdev *asd;
        int i;
 
@@ -141,17 +142,18 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
        INIT_LIST_HEAD(&notifier->done);
 
        for (i = 0; i < notifier->num_subdevs; i++) {
-               asd = notifier->subdev[i];
+               asd = notifier->subdevs[i];
 
-               switch (asd->bus_type) {
-               case V4L2_ASYNC_BUS_CUSTOM:
-               case V4L2_ASYNC_BUS_PLATFORM:
-               case V4L2_ASYNC_BUS_I2C:
+               switch (asd->match_type) {
+               case V4L2_ASYNC_MATCH_CUSTOM:
+               case V4L2_ASYNC_MATCH_DEVNAME:
+               case V4L2_ASYNC_MATCH_I2C:
+               case V4L2_ASYNC_MATCH_OF:
                        break;
                default:
                        dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
-                               "Invalid bus-type %u on %p\n",
-                               asd->bus_type, asd);
+                               "Invalid match type %u on %p\n",
+                               asd->match_type, asd);
                        return -EINVAL;
                }
                list_add_tail(&asd->list, &notifier->waiting);
@@ -162,14 +164,14 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
        /* Keep also completed notifiers on the list */
        list_add(&notifier->list, &notifier_list);
 
-       list_for_each_entry_safe(asdl, tmp, &subdev_list, list) {
+       list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
                int ret;
 
-               asd = v4l2_async_belongs(notifier, asdl);
+               asd = v4l2_async_belongs(notifier, sd);
                if (!asd)
                        continue;
 
-               ret = v4l2_async_test_notify(notifier, asdl, asd);
+               ret = v4l2_async_test_notify(notifier, sd, asd);
                if (ret < 0) {
                        mutex_unlock(&list_lock);
                        return ret;
@@ -184,7 +186,7 @@ EXPORT_SYMBOL(v4l2_async_notifier_register);
 
 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
 {
-       struct v4l2_async_subdev_list *asdl, *tmp;
+       struct v4l2_subdev *sd, *tmp;
        unsigned int notif_n_subdev = notifier->num_subdevs;
        unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
        struct device *dev[n_subdev];
@@ -194,18 +196,16 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
 
        list_del(&notifier->list);
 
-       list_for_each_entry_safe(asdl, tmp, &notifier->done, list) {
-               struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
-
+       list_for_each_entry_safe(sd, tmp, &notifier->done, list) {
                dev[i] = get_device(sd->dev);
 
-               v4l2_async_cleanup(asdl);
+               v4l2_async_cleanup(sd);
 
                /* If we handled USB devices, we'd have to lock the parent too */
                device_release_driver(dev[i++]);
 
                if (notifier->unbind)
-                       notifier->unbind(notifier, sd, sd->asdl.asd);
+                       notifier->unbind(notifier, sd, sd->asd);
        }
 
        mutex_unlock(&list_lock);
@@ -234,24 +234,23 @@ EXPORT_SYMBOL(v4l2_async_notifier_unregister);
 
 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
 {
-       struct v4l2_async_subdev_list *asdl = &sd->asdl;
        struct v4l2_async_notifier *notifier;
 
        mutex_lock(&list_lock);
 
-       INIT_LIST_HEAD(&asdl->list);
+       INIT_LIST_HEAD(&sd->async_list);
 
        list_for_each_entry(notifier, &notifier_list, list) {
-               struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, asdl);
+               struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
                if (asd) {
-                       int ret = v4l2_async_test_notify(notifier, asdl, asd);
+                       int ret = v4l2_async_test_notify(notifier, sd, asd);
                        mutex_unlock(&list_lock);
                        return ret;
                }
        }
 
        /* None matched, wait for hot-plugging */
-       list_add(&asdl->list, &subdev_list);
+       list_add(&sd->async_list, &subdev_list);
 
        mutex_unlock(&list_lock);
 
@@ -261,23 +260,22 @@ EXPORT_SYMBOL(v4l2_async_register_subdev);
 
 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
 {
-       struct v4l2_async_subdev_list *asdl = &sd->asdl;
-       struct v4l2_async_notifier *notifier = asdl->notifier;
+       struct v4l2_async_notifier *notifier = sd->notifier;
 
-       if (!asdl->asd) {
-               if (!list_empty(&asdl->list))
-                       v4l2_async_cleanup(asdl);
+       if (!sd->asd) {
+               if (!list_empty(&sd->async_list))
+                       v4l2_async_cleanup(sd);
                return;
        }
 
        mutex_lock(&list_lock);
 
-       list_add(&asdl->asd->list, &notifier->waiting);
+       list_add(&sd->asd->list, &notifier->waiting);
 
-       v4l2_async_cleanup(asdl);
+       v4l2_async_cleanup(sd);
 
        if (notifier->unbind)
-               notifier->unbind(notifier, sd, sd->asdl.asd);
+               notifier->unbind(notifier, sd, sd->asd);
 
        mutex_unlock(&list_lock);
 }
index e96497f7c3ed1b598106da2b7aa51f9a46284c68..89b90672088c7473bd80bd09ce1c47df4fc7bc2c 100644 (file)
@@ -196,6 +196,10 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
  * 2) at least one destination buffer has to be queued,
  * 3) streaming has to be on.
  *
+ * If a queue is buffered (for example a decoder hardware ringbuffer that has
+ * to be drained before doing streamoff), allow scheduling without v4l2 buffers
+ * on that queue.
+ *
  * There may also be additional, custom requirements. In such case the driver
  * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
  * return 1 if the instance is ready.
@@ -224,7 +228,8 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
        }
 
        spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
-       if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
+       if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
+           && !m2m_ctx->out_q_ctx.buffered) {
                spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
                                        flags_out);
                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
@@ -232,7 +237,8 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
                return;
        }
        spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
-       if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
+       if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
+           && !m2m_ctx->cap_q_ctx.buffered) {
                spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
                                        flags_cap);
                spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
index 6c954835d61ecb04dd0801ccae801033869f2b74..cd9e17471232ed76b091c420a71a1146fdd4ba97 100644 (file)
@@ -541,7 +541,7 @@ static int pm800_probe(struct i2c_client *client,
 {
        int ret = 0;
        struct pm80x_chip *chip;
-       struct pm80x_platform_data *pdata = client->dev.platform_data;
+       struct pm80x_platform_data *pdata = dev_get_platdata(&client->dev);
        struct pm80x_subchip *subchip;
 
        ret = pm80x_init(client);
index 521602231c7bc43c8ec007f078ae5c4d38bcaa7d..0686cdb06b3e7ef5f0a29efc3ba7035368811240 100644 (file)
@@ -227,7 +227,7 @@ static int pm805_probe(struct i2c_client *client,
 {
        int ret = 0;
        struct pm80x_chip *chip;
-       struct pm80x_platform_data *pdata = client->dev.platform_data;
+       struct pm80x_platform_data *pdata = dev_get_platdata(&client->dev);
 
        ret = pm80x_init(client);
        if (ret) {
index eeb481d426b5df159264cc28e4bd56210c1ae1e0..7ebe9ef1eba663006399ec1b52f327e3fd3d0e47 100644 (file)
@@ -1130,7 +1130,7 @@ static int pm860x_dt_init(struct device_node *np,
 static int pm860x_probe(struct i2c_client *client,
                                  const struct i2c_device_id *id)
 {
-       struct pm860x_platform_data *pdata = client->dev.platform_data;
+       struct pm860x_platform_data *pdata = dev_get_platdata(&client->dev);
        struct device_node *node = client->dev.of_node;
        struct pm860x_chip *chip;
        int ret;
index aecd6ddcbbbf75699f1ddab73241ee21eb888687..e0e46f50f95d394642f740b111d91d5dce8431f6 100644 (file)
@@ -139,6 +139,18 @@ config MFD_DA9055
          This driver can be built as a module. If built as a module it will be
          called "da9055"
 
+config MFD_DA9063
+       bool "Dialog Semiconductor DA9063 PMIC Support"
+       select MFD_CORE
+       select REGMAP_I2C
+       select REGMAP_IRQ
+       depends on I2C=y && GENERIC_HARDIRQS
+       help
+         Say yes here for support for the Dialog Semiconductor DA9063 PMIC.
+         This includes the I2C driver and core APIs.
+         Additional drivers must be enabled in order to use the functionality
+         of the device.
+
 config MFD_MC13783
        tristate
 
@@ -1070,7 +1082,7 @@ config MFD_WM5110
          Support for Wolfson Microelectronics WM5110 low power audio SoC
 
 config MFD_WM8997
-       bool "Support Wolfson Microelectronics WM8997"
+       bool "Wolfson Microelectronics WM8997"
        depends on MFD_ARIZONA
        help
          Support for Wolfson Microelectronics WM8997 low power audio SoC
index 3c90051ffa5a390972a4d3cffb34f4b055ed3d8f..15b905c6553c07e7953b7cc4b57c63fcd5ad06a1 100644 (file)
@@ -107,6 +107,9 @@ obj-$(CONFIG_MFD_LP8788)    += lp8788.o lp8788-irq.o
 da9055-objs                    := da9055-core.o da9055-i2c.o
 obj-$(CONFIG_MFD_DA9055)       += da9055.o
 
+da9063-objs                    := da9063-core.o da9063-irq.o da9063-i2c.o
+obj-$(CONFIG_MFD_DA9063)       += da9063.o
+
 obj-$(CONFIG_MFD_MAX77686)     += max77686.o max77686-irq.o
 obj-$(CONFIG_MFD_MAX77693)     += max77693.o max77693-irq.o
 obj-$(CONFIG_MFD_MAX8907)      += max8907.o
index d4f594517521b30e5bc3344208b21d03801ef14d..6f68472e0ca633e9b0ec5f08db4f1389af93336e 100644 (file)
@@ -363,7 +363,7 @@ static inline void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
 static int aat2870_i2c_probe(struct i2c_client *client,
                             const struct i2c_device_id *id)
 {
-       struct aat2870_platform_data *pdata = client->dev.platform_data;
+       struct aat2870_platform_data *pdata = dev_get_platdata(&client->dev);
        struct aat2870_data *aat2870;
        int i, j;
        int ret = 0;
index ddc669d19530ee7a171547b78983adab6b4b7e0d..b348ae5206297eb554e70decb2e21d216fd8a2f6 100644 (file)
@@ -854,7 +854,7 @@ static int ab3100_probe(struct i2c_client *client,
 {
        struct ab3100 *ab3100;
        struct ab3100_platform_data *ab3100_plf_data =
-               client->dev.platform_data;
+               dev_get_platdata(&client->dev);
        int err;
        int i;
 
index 7623e91238287b4d04f6b5d28e994fd0e478117b..36000f920981b055195c1cd4b1dfd88954050655 100644 (file)
@@ -867,6 +867,7 @@ static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
                gpadc->cal_data[ADC_INPUT_VBAT].offset);
 }
 
+#ifdef CONFIG_PM_RUNTIME
 static int ab8500_gpadc_runtime_suspend(struct device *dev)
 {
        struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
@@ -885,7 +886,9 @@ static int ab8500_gpadc_runtime_resume(struct device *dev)
                dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
        return ret;
 }
+#endif
 
+#ifdef CONFIG_PM_SLEEP
 static int ab8500_gpadc_suspend(struct device *dev)
 {
        struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
@@ -913,6 +916,7 @@ static int ab8500_gpadc_resume(struct device *dev)
        mutex_unlock(&gpadc->ab8500_gpadc_lock);
        return ret;
 }
+#endif
 
 static int ab8500_gpadc_probe(struct platform_device *pdev)
 {
index 28346ad0b4a6da0eee7e5e6f09e025769d11b024..62501553d63c4f7c2b49da5c73d79141d964179e 100644 (file)
@@ -207,7 +207,7 @@ static int adp5520_remove_subdevs(struct adp5520_chip *chip)
 static int adp5520_probe(struct i2c_client *client,
                                        const struct i2c_device_id *id)
 {
-       struct adp5520_platform_data *pdata = client->dev.platform_data;
+       struct adp5520_platform_data *pdata = dev_get_platdata(&client->dev);
        struct platform_device *pdev;
        struct adp5520_chip *chip;
        int ret;
index 89a115301a0cbea26adb9e4b1ffec8ef1cebe7f1..5ac3aa48473be0364d70eeae84a7a423b1a3be4d 100644 (file)
@@ -438,9 +438,9 @@ static int arizona_runtime_suspend(struct device *dev)
                }
        }
 
-       regulator_disable(arizona->dcvdd);
        regcache_cache_only(arizona->regmap, true);
        regcache_mark_dirty(arizona->regmap);
+       regulator_disable(arizona->dcvdd);
 
        return 0;
 }
index 01e414162702bcd1ccfcc8e73f099e8a95cf14b3..abd3ab7c0908ab6232f0e7d82a35361ca33c46c7 100644 (file)
@@ -129,7 +129,7 @@ static int as3711_i2c_probe(struct i2c_client *client,
        int ret;
 
        if (!client->dev.of_node) {
-               pdata = client->dev.platform_data;
+               pdata = dev_get_platdata(&client->dev);
                if (!pdata)
                        dev_dbg(&client->dev, "Platform data not found\n");
        } else {
index 9532f749412f6442f190d3ec34fff0c286b36e9d..fa22154c84e49cfb6be58930339ea180b8be77b6 100644 (file)
@@ -952,7 +952,7 @@ static void asic3_mfd_remove(struct platform_device *pdev)
 /* Core */
 static int __init asic3_probe(struct platform_device *pdev)
 {
-       struct asic3_platform_data *pdata = pdev->dev.platform_data;
+       struct asic3_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct asic3 *asic;
        struct resource *mem;
        unsigned long clksel;
index f1a316e0d6a68850e367d0455937be635148fa89..e0a2e0ee603be73a6d808efa2ce1673f9c32e7ae 100644 (file)
@@ -494,7 +494,7 @@ failed:
 static int da903x_probe(struct i2c_client *client,
                                  const struct i2c_device_id *id)
 {
-       struct da903x_platform_data *pdata = client->dev.platform_data;
+       struct da903x_platform_data *pdata = dev_get_platdata(&client->dev);
        struct da903x_chip *chip;
        unsigned int tmp;
        int ret;
index a3c9613f91664e87ae03f4496c3360df74c755ef..ea28a33576e45cd3c3df406a09264bd69a5c3298 100644 (file)
@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(da9052_regmap_config);
 
 int da9052_device_init(struct da9052 *da9052, u8 chip_id)
 {
-       struct da9052_pdata *pdata = da9052->dev->platform_data;
+       struct da9052_pdata *pdata = dev_get_platdata(da9052->dev);
        int ret;
 
        mutex_init(&da9052->auxadc_lock);
index 49cb23d37469153980f5b78ceb0126f1d46aa852..d3670cd3c3c6c0e025c80ec940851ee5599037b1 100644 (file)
@@ -379,8 +379,9 @@ static struct regmap_irq_chip da9055_regmap_irq_chip = {
 
 int da9055_device_init(struct da9055 *da9055)
 {
-       struct da9055_pdata *pdata = da9055->dev->platform_data;
+       struct da9055_pdata *pdata = dev_get_platdata(da9055->dev);
        int ret;
+       uint8_t clear_events[3] = {0xFF, 0xFF, 0xFF};
 
        if (pdata && pdata->init != NULL)
                pdata->init(da9055);
@@ -390,6 +391,10 @@ int da9055_device_init(struct da9055 *da9055)
        else
                da9055->irq_base = pdata->irq_base;
 
+       ret = da9055_group_write(da9055, DA9055_REG_EVENT_A, 3, clear_events);
+       if (ret < 0)
+               return ret;
+
        ret = regmap_add_irq_chip(da9055->regmap, da9055->chip_irq,
                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                                  da9055->irq_base, &da9055_regmap_irq_chip,
index 607387ffe8caa85e6d0396facd62a094dc4523a7..13af7e50021eedd5767aa296cfe46118f55f49ac 100644 (file)
@@ -54,7 +54,7 @@ static int da9055_i2c_remove(struct i2c_client *i2c)
 }
 
 static struct i2c_device_id da9055_i2c_id[] = {
-       {"da9055-pmic", 0},
+       {"da9055", 0},
        { }
 };
 
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
new file mode 100644 (file)
index 0000000..c9cf8d9
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * da9063-core.c: Device access for Dialog DA9063 modules
+ *
+ * Copyright 2012 Dialog Semiconductors Ltd.
+ * Copyright 2013 Philipp Zabel, Pengutronix
+ *
+ * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>,
+ *         Michal Hajduk <michal.hajduk@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/da9063/core.h>
+#include <linux/mfd/da9063/pdata.h>
+#include <linux/mfd/da9063/registers.h>
+
+#include <linux/proc_fs.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+
+
+static struct resource da9063_regulators_resources[] = {
+       {
+               .name   = "LDO_LIM",
+               .start  = DA9063_IRQ_LDO_LIM,
+               .end    = DA9063_IRQ_LDO_LIM,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct resource da9063_rtc_resources[] = {
+       {
+               .name   = "ALARM",
+               .start  = DA9063_IRQ_ALARM,
+               .end    = DA9063_IRQ_ALARM,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .name   = "TICK",
+               .start  = DA9063_IRQ_TICK,
+               .end    = DA9063_IRQ_TICK,
+               .flags  = IORESOURCE_IRQ,
+       }
+};
+
+static struct resource da9063_onkey_resources[] = {
+       {
+               .start  = DA9063_IRQ_ONKEY,
+               .end    = DA9063_IRQ_ONKEY,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct resource da9063_hwmon_resources[] = {
+       {
+               .start  = DA9063_IRQ_ADC_RDY,
+               .end    = DA9063_IRQ_ADC_RDY,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+
+static struct mfd_cell da9063_devs[] = {
+       {
+               .name           = DA9063_DRVNAME_REGULATORS,
+               .num_resources  = ARRAY_SIZE(da9063_regulators_resources),
+               .resources      = da9063_regulators_resources,
+       },
+       {
+               .name           = DA9063_DRVNAME_LEDS,
+       },
+       {
+               .name           = DA9063_DRVNAME_WATCHDOG,
+       },
+       {
+               .name           = DA9063_DRVNAME_HWMON,
+               .num_resources  = ARRAY_SIZE(da9063_hwmon_resources),
+               .resources      = da9063_hwmon_resources,
+       },
+       {
+               .name           = DA9063_DRVNAME_ONKEY,
+               .num_resources  = ARRAY_SIZE(da9063_onkey_resources),
+               .resources      = da9063_onkey_resources,
+       },
+       {
+               .name           = DA9063_DRVNAME_RTC,
+               .num_resources  = ARRAY_SIZE(da9063_rtc_resources),
+               .resources      = da9063_rtc_resources,
+       },
+       {
+               .name           = DA9063_DRVNAME_VIBRATION,
+       },
+};
+
+int da9063_device_init(struct da9063 *da9063, unsigned int irq)
+{
+       struct da9063_pdata *pdata = da9063->dev->platform_data;
+       int model, revision;
+       int ret;
+
+       if (pdata) {
+               da9063->flags = pdata->flags;
+               da9063->irq_base = pdata->irq_base;
+       } else {
+               da9063->flags = 0;
+               da9063->irq_base = 0;
+       }
+       da9063->chip_irq = irq;
+
+       if (pdata && pdata->init != NULL) {
+               ret = pdata->init(da9063);
+               if (ret != 0) {
+                       dev_err(da9063->dev,
+                               "Platform initialization failed.\n");
+                       return ret;
+               }
+       }
+
+       ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model);
+       if (ret < 0) {
+               dev_err(da9063->dev, "Cannot read chip model id.\n");
+               return -EIO;
+       }
+       if (model != PMIC_DA9063) {
+               dev_err(da9063->dev, "Invalid chip model id: 0x%02x\n", model);
+               return -ENODEV;
+       }
+
+       ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &revision);
+       if (ret < 0) {
+               dev_err(da9063->dev, "Cannot read chip revision id.\n");
+               return -EIO;
+       }
+       revision >>= DA9063_CHIP_VARIANT_SHIFT;
+       if (revision != 3) {
+               dev_err(da9063->dev, "Unknown chip revision: %d\n", revision);
+               return -ENODEV;
+       }
+
+       da9063->model = model;
+       da9063->revision = revision;
+
+       dev_info(da9063->dev,
+                "Device detected (model-ID: 0x%02X  rev-ID: 0x%02X)\n",
+                model, revision);
+
+       ret = da9063_irq_init(da9063);
+       if (ret) {
+               dev_err(da9063->dev, "Cannot initialize interrupts.\n");
+               return ret;
+       }
+
+       ret = mfd_add_devices(da9063->dev, -1, da9063_devs,
+                             ARRAY_SIZE(da9063_devs), NULL, da9063->irq_base,
+                             NULL);
+       if (ret)
+               dev_err(da9063->dev, "Cannot add MFD cells\n");
+
+       return ret;
+}
+
+void da9063_device_exit(struct da9063 *da9063)
+{
+       mfd_remove_devices(da9063->dev);
+       da9063_irq_exit(da9063);
+}
+
+MODULE_DESCRIPTION("PMIC driver for Dialog DA9063");
+MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>, Michal Hajduk <michal.hajduk@diasemi.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
new file mode 100644 (file)
index 0000000..8db5c80
--- /dev/null
@@ -0,0 +1,182 @@
+/* da9063-i2c.c: Interrupt support for Dialog DA9063
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ * Copyright 2013 Philipp Zabel, Pengutronix
+ *
+ * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/da9063/core.h>
+#include <linux/mfd/da9063/pdata.h>
+#include <linux/mfd/da9063/registers.h>
+
+static const struct regmap_range da9063_readable_ranges[] = {
+       {
+               .range_min = DA9063_REG_PAGE_CON,
+               .range_max = DA9063_REG_SECOND_D,
+       }, {
+               .range_min = DA9063_REG_SEQ,
+               .range_max = DA9063_REG_ID_32_31,
+       }, {
+               .range_min = DA9063_REG_SEQ_A,
+               .range_max = DA9063_REG_AUTO3_LOW,
+       }, {
+               .range_min = DA9063_REG_T_OFFSET,
+               .range_max = DA9063_REG_GP_ID_19,
+       }, {
+               .range_min = DA9063_REG_CHIP_ID,
+               .range_max = DA9063_REG_CHIP_VARIANT,
+       },
+};
+
+static const struct regmap_range da9063_writeable_ranges[] = {
+       {
+               .range_min = DA9063_REG_PAGE_CON,
+               .range_max = DA9063_REG_PAGE_CON,
+       }, {
+               .range_min = DA9063_REG_FAULT_LOG,
+               .range_max = DA9063_REG_VSYS_MON,
+       }, {
+               .range_min = DA9063_REG_COUNT_S,
+               .range_max = DA9063_REG_ALARM_Y,
+       }, {
+               .range_min = DA9063_REG_SEQ,
+               .range_max = DA9063_REG_ID_32_31,
+       }, {
+               .range_min = DA9063_REG_SEQ_A,
+               .range_max = DA9063_REG_AUTO3_LOW,
+       }, {
+               .range_min = DA9063_REG_CONFIG_I,
+               .range_max = DA9063_REG_MON_REG_4,
+       }, {
+               .range_min = DA9063_REG_GP_ID_0,
+               .range_max = DA9063_REG_GP_ID_19,
+       },
+};
+
+static const struct regmap_range da9063_volatile_ranges[] = {
+       {
+               .range_min = DA9063_REG_STATUS_A,
+               .range_max = DA9063_REG_EVENT_D,
+       }, {
+               .range_min = DA9063_REG_CONTROL_F,
+               .range_max = DA9063_REG_CONTROL_F,
+       }, {
+               .range_min = DA9063_REG_ADC_MAN,
+               .range_max = DA9063_REG_ADC_MAN,
+       }, {
+               .range_min = DA9063_REG_ADC_RES_L,
+               .range_max = DA9063_REG_SECOND_D,
+       }, {
+               .range_min = DA9063_REG_MON_REG_5,
+               .range_max = DA9063_REG_MON_REG_6,
+       },
+};
+
+static const struct regmap_access_table da9063_readable_table = {
+       .yes_ranges = da9063_readable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063_readable_ranges),
+};
+
+static const struct regmap_access_table da9063_writeable_table = {
+       .yes_ranges = da9063_writeable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063_writeable_ranges),
+};
+
+static const struct regmap_access_table da9063_volatile_table = {
+       .yes_ranges = da9063_volatile_ranges,
+       .n_yes_ranges = ARRAY_SIZE(da9063_volatile_ranges),
+};
+
+static const struct regmap_range_cfg da9063_range_cfg[] = {
+       {
+               .range_min = DA9063_REG_PAGE_CON,
+               .range_max = DA9063_REG_CHIP_VARIANT,
+               .selector_reg = DA9063_REG_PAGE_CON,
+               .selector_mask = 1 << DA9063_I2C_PAGE_SEL_SHIFT,
+               .selector_shift = DA9063_I2C_PAGE_SEL_SHIFT,
+               .window_start = 0,
+               .window_len = 256,
+       }
+};
+
+static struct regmap_config da9063_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .ranges = da9063_range_cfg,
+       .num_ranges = ARRAY_SIZE(da9063_range_cfg),
+       .max_register = DA9063_REG_CHIP_VARIANT,
+
+       .cache_type = REGCACHE_RBTREE,
+
+       .rd_table = &da9063_readable_table,
+       .wr_table = &da9063_writeable_table,
+       .volatile_table = &da9063_volatile_table,
+};
+
+static int da9063_i2c_probe(struct i2c_client *i2c,
+       const struct i2c_device_id *id)
+{
+       struct da9063 *da9063;
+       int ret;
+
+       da9063 = devm_kzalloc(&i2c->dev, sizeof(struct da9063), GFP_KERNEL);
+       if (da9063 == NULL)
+               return -ENOMEM;
+
+       i2c_set_clientdata(i2c, da9063);
+       da9063->dev = &i2c->dev;
+       da9063->chip_irq = i2c->irq;
+
+       da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config);
+       if (IS_ERR(da9063->regmap)) {
+               ret = PTR_ERR(da9063->regmap);
+               dev_err(da9063->dev, "Failed to allocate register map: %d\n",
+                       ret);
+               return ret;
+       }
+
+       return da9063_device_init(da9063, i2c->irq);
+}
+
+static int da9063_i2c_remove(struct i2c_client *i2c)
+{
+       struct da9063 *da9063 = i2c_get_clientdata(i2c);
+
+       da9063_device_exit(da9063);
+
+       return 0;
+}
+
+static const struct i2c_device_id da9063_i2c_id[] = {
+       {"da9063", PMIC_DA9063},
+       {},
+};
+MODULE_DEVICE_TABLE(i2c, da9063_i2c_id);
+
+static struct i2c_driver da9063_i2c_driver = {
+       .driver = {
+               .name = "da9063",
+               .owner = THIS_MODULE,
+       },
+       .probe    = da9063_i2c_probe,
+       .remove   = da9063_i2c_remove,
+       .id_table = da9063_i2c_id,
+};
+
+module_i2c_driver(da9063_i2c_driver);
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
new file mode 100644 (file)
index 0000000..8229226
--- /dev/null
@@ -0,0 +1,193 @@
+/* da9063-irq.c: Interrupts support for Dialog DA9063
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ * Copyright 2013 Philipp Zabel, Pengutronix
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/da9063/core.h>
+#include <linux/mfd/da9063/pdata.h>
+
+#define        DA9063_REG_EVENT_A_OFFSET       0
+#define        DA9063_REG_EVENT_B_OFFSET       1
+#define        DA9063_REG_EVENT_C_OFFSET       2
+#define        DA9063_REG_EVENT_D_OFFSET       3
+#define EVENTS_BUF_LEN                 4
+
+static const u8 mask_events_buf[] = { [0 ... (EVENTS_BUF_LEN - 1)] = ~0 };
+
+struct da9063_irq_data {
+       u16 reg;
+       u8 mask;
+};
+
+static struct regmap_irq da9063_irqs[] = {
+       /* DA9063 event A register */
+       [DA9063_IRQ_ONKEY] = {
+               .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+               .mask = DA9063_M_ONKEY,
+       },
+       [DA9063_IRQ_ALARM] = {
+               .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+               .mask = DA9063_M_ALARM,
+       },
+       [DA9063_IRQ_TICK] = {
+               .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+               .mask = DA9063_M_TICK,
+       },
+       [DA9063_IRQ_ADC_RDY] = {
+               .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+               .mask = DA9063_M_ADC_RDY,
+       },
+       [DA9063_IRQ_SEQ_RDY] = {
+               .reg_offset = DA9063_REG_EVENT_A_OFFSET,
+               .mask = DA9063_M_SEQ_RDY,
+       },
+       /* DA9063 event B register */
+       [DA9063_IRQ_WAKE] = {
+               .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+               .mask = DA9063_M_WAKE,
+       },
+       [DA9063_IRQ_TEMP] = {
+               .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+               .mask = DA9063_M_TEMP,
+       },
+       [DA9063_IRQ_COMP_1V2] = {
+               .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+               .mask = DA9063_M_COMP_1V2,
+       },
+       [DA9063_IRQ_LDO_LIM] = {
+               .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+               .mask = DA9063_M_LDO_LIM,
+       },
+       [DA9063_IRQ_REG_UVOV] = {
+               .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+               .mask = DA9063_M_UVOV,
+       },
+       [DA9063_IRQ_VDD_MON] = {
+               .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+               .mask = DA9063_M_VDD_MON,
+       },
+       [DA9063_IRQ_WARN] = {
+               .reg_offset = DA9063_REG_EVENT_B_OFFSET,
+               .mask = DA9063_M_VDD_WARN,
+       },
+       /* DA9063 event C register */
+       [DA9063_IRQ_GPI0] = {
+               .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+               .mask = DA9063_M_GPI0,
+       },
+       [DA9063_IRQ_GPI1] = {
+               .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+               .mask = DA9063_M_GPI1,
+       },
+       [DA9063_IRQ_GPI2] = {
+               .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+               .mask = DA9063_M_GPI2,
+       },
+       [DA9063_IRQ_GPI3] = {
+               .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+               .mask = DA9063_M_GPI3,
+       },
+       [DA9063_IRQ_GPI4] = {
+               .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+               .mask = DA9063_M_GPI4,
+       },
+       [DA9063_IRQ_GPI5] = {
+               .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+               .mask = DA9063_M_GPI5,
+       },
+       [DA9063_IRQ_GPI6] = {
+               .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+               .mask = DA9063_M_GPI6,
+       },
+       [DA9063_IRQ_GPI7] = {
+               .reg_offset = DA9063_REG_EVENT_C_OFFSET,
+               .mask = DA9063_M_GPI7,
+       },
+       /* DA9063 event D register */
+       [DA9063_IRQ_GPI8] = {
+               .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+               .mask = DA9063_M_GPI8,
+       },
+       [DA9063_IRQ_GPI9] = {
+               .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+               .mask = DA9063_M_GPI9,
+       },
+       [DA9063_IRQ_GPI10] = {
+               .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+               .mask = DA9063_M_GPI10,
+       },
+       [DA9063_IRQ_GPI11] = {
+               .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+               .mask = DA9063_M_GPI11,
+       },
+       [DA9063_IRQ_GPI12] = {
+               .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+               .mask = DA9063_M_GPI12,
+       },
+       [DA9063_IRQ_GPI13] = {
+               .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+               .mask = DA9063_M_GPI13,
+       },
+       [DA9063_IRQ_GPI14] = {
+               .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+               .mask = DA9063_M_GPI14,
+       },
+       [DA9063_IRQ_GPI15] = {
+               .reg_offset = DA9063_REG_EVENT_D_OFFSET,
+               .mask = DA9063_M_GPI15,
+       },
+};
+
+static struct regmap_irq_chip da9063_irq_chip = {
+       .name = "da9063-irq",
+       .irqs = da9063_irqs,
+       .num_irqs = DA9063_NUM_IRQ,
+
+       .num_regs = 4,
+       .status_base = DA9063_REG_EVENT_A,
+       .mask_base = DA9063_REG_IRQ_MASK_A,
+       .ack_base = DA9063_REG_EVENT_A,
+       .init_ack_masked = true,
+};
+
+int da9063_irq_init(struct da9063 *da9063)
+{
+       int ret;
+
+       if (!da9063->chip_irq) {
+               dev_err(da9063->dev, "No IRQ configured\n");
+               return -EINVAL;
+       }
+
+       ret = regmap_add_irq_chip(da9063->regmap, da9063->chip_irq,
+                       IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
+                       da9063->irq_base, &da9063_irq_chip,
+                       &da9063->regmap_irq);
+       if (ret) {
+               dev_err(da9063->dev, "Failed to reguest IRQ %d: %d\n",
+                               da9063->chip_irq, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void da9063_irq_exit(struct da9063 *da9063)
+{
+       regmap_del_irq_chip(da9063->chip_irq, da9063->regmap_irq);
+}
index 3c157faee645805f398cc2047d5f7b035555aacc..0d68eb1a5ec528924695959646a29a31d87ca1a8 100644 (file)
@@ -3093,6 +3093,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
                .platform_data = &db8500_cpufreq_table,
                .pdata_size = sizeof(db8500_cpufreq_table),
        },
+       {
+               .name = "cpuidle-dbx500",
+               .of_compatible = "stericsson,cpuidle-dbx500",
+       },
        {
                .name = "db8500-thermal",
                .num_resources = ARRAY_SIZE(db8500_thsens_resources),
index 7710227d284e82f3ea98bd6dcf307abd177f71e9..7a55c0071fa8e9840b47420277673120547df191 100644 (file)
@@ -315,8 +315,8 @@ static int add_children(struct i2c_client *client)
        }
 
        /* MMC/SD inputs -- right after the last config input */
-       if (client->dev.platform_data) {
-               void (*mmcsd_setup)(unsigned) = client->dev.platform_data;
+       if (dev_get_platdata(&client->dev)) {
+               void (*mmcsd_setup)(unsigned) = dev_get_platdata(&client->dev);
 
                mmcsd_setup(dm355evm_msp_gpio.base + 8 + 5);
        }
index 5502106ad5157dcf8c3a2648c29e949c1b60b7b2..7245b0c5b794be489d1c5d250c196c92e028a48f 100644 (file)
@@ -177,7 +177,7 @@ static void pcap_msr_work(struct work_struct *work)
 static void pcap_isr_work(struct work_struct *work)
 {
        struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
-       struct pcap_platform_data *pdata = pcap->spi->dev.platform_data;
+       struct pcap_platform_data *pdata = dev_get_platdata(&pcap->spi->dev);
        u32 msr, isr, int_sel, service;
        int irq;
 
@@ -394,7 +394,7 @@ static int pcap_add_subdev(struct pcap_chip *pcap,
 static int ezx_pcap_remove(struct spi_device *spi)
 {
        struct pcap_chip *pcap = spi_get_drvdata(spi);
-       struct pcap_platform_data *pdata = spi->dev.platform_data;
+       struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
        int i, adc_irq;
 
        /* remove all registered subdevs */
@@ -420,7 +420,7 @@ static int ezx_pcap_remove(struct spi_device *spi)
 
 static int ezx_pcap_probe(struct spi_device *spi)
 {
-       struct pcap_platform_data *pdata = spi->dev.platform_data;
+       struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev);
        struct pcap_chip *pcap;
        int i, adc_irq;
        int ret = -ENODEV;
index 26aca545084b38bc6373b85998410bd28392b1f6..49f39feca7843e73bce9e906953ed5620936a193 100644 (file)
@@ -261,7 +261,7 @@ static void egpio_write_cache(struct egpio_info *ei)
 
 static int __init egpio_probe(struct platform_device *pdev)
 {
-       struct htc_egpio_platform_data *pdata = pdev->dev.platform_data;
+       struct htc_egpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct resource   *res;
        struct egpio_info *ei;
        struct gpio_chip  *chip;
index c9dfce6ae0c289229bf895adb92d7a27e440ec8e..d7b2a75aca3e5fcd9b38de98ceb4f5c3b16f013e 100644 (file)
@@ -340,7 +340,7 @@ static int htcpld_setup_chip_irq(
        int ret = 0;
 
        /* Get the platform and driver data */
-       pdata = dev->platform_data;
+       pdata = dev_get_platdata(dev);
        htcpld = platform_get_drvdata(pdev);
        chip = &htcpld->chip[chip_index];
        plat_chip_data = &pdata->chip[chip_index];
@@ -375,7 +375,7 @@ static int htcpld_register_chip_i2c(
        struct i2c_board_info info;
 
        /* Get the platform and driver data */
-       pdata = dev->platform_data;
+       pdata = dev_get_platdata(dev);
        htcpld = platform_get_drvdata(pdev);
        chip = &htcpld->chip[chip_index];
        plat_chip_data = &pdata->chip[chip_index];
@@ -447,7 +447,7 @@ static int htcpld_register_chip_gpio(
        int ret = 0;
 
        /* Get the platform and driver data */
-       pdata = dev->platform_data;
+       pdata = dev_get_platdata(dev);
        htcpld = platform_get_drvdata(pdev);
        chip = &htcpld->chip[chip_index];
        plat_chip_data = &pdata->chip[chip_index];
@@ -509,7 +509,7 @@ static int htcpld_setup_chips(struct platform_device *pdev)
        int i;
 
        /* Get the platform and driver data */
-       pdata = dev->platform_data;
+       pdata = dev_get_platdata(dev);
        htcpld = platform_get_drvdata(pdev);
 
        /* Setup each chip's output GPIOs */
@@ -574,7 +574,7 @@ static int htcpld_core_probe(struct platform_device *pdev)
        if (!dev)
                return -ENODEV;
 
-       pdata = dev->platform_data;
+       pdata = dev_get_platdata(dev);
        if (!pdata) {
                dev_warn(dev, "Platform data not found for htcpld core!\n");
                return -ENXIO;
index 0a5e85fd8517c145a952183349ee7fbc310ad85b..6bf92a507b9546b911e258037c2dba3ac391851b 100644 (file)
@@ -126,7 +126,7 @@ static struct mfd_cell ds1wm_cell __initdata = {
 
 static int __init pasic3_probe(struct platform_device *pdev)
 {
-       struct pasic3_platform_data *pdata = pdev->dev.platform_data;
+       struct pasic3_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct device *dev = &pdev->dev;
        struct pasic3_data *asic;
        struct resource *r;
index 4f2462f0963e6fac016f6cacbf917a012870eb46..9203d47cdbb1b4106e87ba91e4571cea54c55543 100644 (file)
@@ -310,7 +310,7 @@ EXPORT_SYMBOL_GPL(intel_msic_irq_read);
 static int intel_msic_init_devices(struct intel_msic *msic)
 {
        struct platform_device *pdev = msic->pdev;
-       struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
+       struct intel_msic_platform_data *pdata = dev_get_platdata(&pdev->dev);
        int ret, i;
 
        if (pdata->gpio) {
@@ -372,7 +372,7 @@ static void intel_msic_remove_devices(struct intel_msic *msic)
 
 static int intel_msic_probe(struct platform_device *pdev)
 {
-       struct intel_msic_platform_data *pdata = pdev->dev.platform_data;
+       struct intel_msic_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct intel_msic *msic;
        struct resource *res;
        u8 id0, id1;
index 686a4565acb6a6c3b5b3620cb96720014cb6e089..d3e23278d299021f34bab4aacbb361d117f5a40b 100644 (file)
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(kempld_write32);
  */
 void kempld_get_mutex(struct kempld_device_data *pld)
 {
-       struct kempld_platform_data *pdata = pld->dev->platform_data;
+       struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
 
        mutex_lock(&pld->lock);
        pdata->get_hardware_mutex(pld);
@@ -271,7 +271,7 @@ EXPORT_SYMBOL_GPL(kempld_get_mutex);
  */
 void kempld_release_mutex(struct kempld_device_data *pld)
 {
-       struct kempld_platform_data *pdata = pld->dev->platform_data;
+       struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
 
        pdata->release_hardware_mutex(pld);
        mutex_unlock(&pld->lock);
@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(kempld_release_mutex);
  */
 static int kempld_get_info(struct kempld_device_data *pld)
 {
-       struct kempld_platform_data *pdata = pld->dev->platform_data;
+       struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
 
        return pdata->get_info(pld);
 }
@@ -302,7 +302,7 @@ static int kempld_get_info(struct kempld_device_data *pld)
  */
 static int kempld_register_cells(struct kempld_device_data *pld)
 {
-       struct kempld_platform_data *pdata = pld->dev->platform_data;
+       struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
 
        return pdata->register_cells(pld);
 }
@@ -357,7 +357,7 @@ static int kempld_detect_device(struct kempld_device_data *pld)
 
 static int kempld_probe(struct platform_device *pdev)
 {
-       struct kempld_platform_data *pdata = pdev->dev.platform_data;
+       struct kempld_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct device *dev = &pdev->dev;
        struct kempld_device_data *pld;
        struct resource *ioport;
@@ -394,7 +394,7 @@ static int kempld_probe(struct platform_device *pdev)
 static int kempld_remove(struct platform_device *pdev)
 {
        struct kempld_device_data *pld = platform_get_drvdata(pdev);
-       struct kempld_platform_data *pdata = pld->dev->platform_data;
+       struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
 
        mfd_remove_devices(&pdev->dev);
        pdata->release_hardware_mutex(pld);
@@ -412,6 +412,15 @@ static struct platform_driver kempld_driver = {
 };
 
 static struct dmi_system_id __initdata kempld_dmi_table[] = {
+       {
+               .ident = "BHL6",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+                       DMI_MATCH(DMI_BOARD_NAME, "COMe-bHL6"),
+               },
+               .driver_data = (void *)&kempld_platform_data_generic,
+               .callback = kempld_create_platform_device,
+       },
        {
                .ident = "CCR2",
                .matches = {
@@ -596,6 +605,15 @@ static struct dmi_system_id __initdata kempld_dmi_table[] = {
                .driver_data = (void *)&kempld_platform_data_generic,
                .callback = kempld_create_platform_device,
        },
+       {
+               .ident = "UTH6",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+                       DMI_MATCH(DMI_BOARD_NAME, "COMe-cTH6"),
+               },
+               .driver_data = (void *)&kempld_platform_data_generic,
+               .callback = kempld_create_platform_device,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(dmi, kempld_dmi_table);
index 4b7e6dac1de80ea22b556902d1f7896dbd7a6225..8c29f7b27324f4980e1b73f98b7186c10107120a 100644 (file)
@@ -384,7 +384,7 @@ static struct attribute_group lm3533_attribute_group = {
 
 static int lm3533_device_als_init(struct lm3533 *lm3533)
 {
-       struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+       struct lm3533_platform_data *pdata = dev_get_platdata(lm3533->dev);
        int ret;
 
        if (!pdata->als)
@@ -407,7 +407,7 @@ static int lm3533_device_als_init(struct lm3533 *lm3533)
 
 static int lm3533_device_bl_init(struct lm3533 *lm3533)
 {
-       struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+       struct lm3533_platform_data *pdata = dev_get_platdata(lm3533->dev);
        int i;
        int ret;
 
@@ -436,7 +436,7 @@ static int lm3533_device_bl_init(struct lm3533 *lm3533)
 
 static int lm3533_device_led_init(struct lm3533 *lm3533)
 {
-       struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+       struct lm3533_platform_data *pdata = dev_get_platdata(lm3533->dev);
        int i;
        int ret;
 
@@ -481,7 +481,7 @@ static int lm3533_device_setup(struct lm3533 *lm3533,
 
 static int lm3533_device_init(struct lm3533 *lm3533)
 {
-       struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+       struct lm3533_platform_data *pdata = dev_get_platdata(lm3533->dev);
        int ret;
 
        dev_dbg(lm3533->dev, "%s\n", __func__);
index c3d3c9b4d3addedc53bc5b7450cd45a2e4707a72..0f1221911018b6d4fd4e558ff0df4996a4b70a10 100644 (file)
@@ -173,7 +173,7 @@ static const struct regmap_config lp8788_regmap_config = {
 static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id)
 {
        struct lp8788 *lp;
-       struct lp8788_platform_data *pdata = cl->dev.platform_data;
+       struct lp8788_platform_data *pdata = dev_get_platdata(&cl->dev);
        int ret;
 
        lp = devm_kzalloc(&cl->dev, sizeof(struct lp8788), GFP_KERNEL);
index 24033324c17a08437d3e62e3b1fa705c53f5de53..9483bc8472a51acbbc45a3f08b6072104a81e027 100644 (file)
@@ -213,7 +213,7 @@ enum lpc_chipsets {
        LPC_COLETO,     /* Coleto Creek */
 };
 
-struct lpc_ich_info lpc_chipset_info[] = {
+static struct lpc_ich_info lpc_chipset_info[] = {
        [LPC_ICH] = {
                .name = "ICH",
                .iTCO_version = 1,
index f27a21831583b2e94e06424384deee76f916e03a..522be67b2e682d743c36534005fb09737cfcf590 100644 (file)
@@ -77,7 +77,7 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
                              const struct i2c_device_id *id)
 {
        struct max77686_dev *max77686 = NULL;
-       struct max77686_platform_data *pdata = i2c->dev.platform_data;
+       struct max77686_platform_data *pdata = dev_get_platdata(&i2c->dev);
        unsigned int data;
        int ret = 0;
 
index 9e60fed5ff82a81dbf774c7d8af7547f12861516..c04723efc70709d4dd81db288be1fa11fc6deccb 100644 (file)
@@ -110,7 +110,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                              const struct i2c_device_id *id)
 {
        struct max77693_dev *max77693;
-       struct max77693_platform_data *pdata = i2c->dev.platform_data;
+       struct max77693_platform_data *pdata = dev_get_platdata(&i2c->dev);
        u8 reg_data;
        int ret = 0;
 
index 8042b3205eaaf450b3a46b2e657bd5445350c8b1..de7fb80a60528e8f3bcbc2c45e934dcfa5a65181 100644 (file)
@@ -151,7 +151,7 @@ static int max8925_dt_init(struct device_node *np, struct device *dev,
 static int max8925_probe(struct i2c_client *client,
                                   const struct i2c_device_id *id)
 {
-       struct max8925_platform_data *pdata = client->dev.platform_data;
+       struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
        static struct max8925_chip *chip;
        struct device_node *node = client->dev.of_node;
 
index 14714058f2d2a673e5b678d950e6184454a189bf..cee098c0dae36ef4ef9baf9d3fb07ec11e24913f 100644 (file)
@@ -51,7 +51,7 @@ static struct mfd_cell max8997_devs[] = {
 
 #ifdef CONFIG_OF
 static struct of_device_id max8997_pmic_dt_match[] = {
-       { .compatible = "maxim,max8997-pmic", .data = TYPE_MAX8997 },
+       { .compatible = "maxim,max8997-pmic", .data = (void *)TYPE_MAX8997 },
        {},
 };
 #endif
@@ -188,10 +188,11 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
        struct max8997_dev *max8997;
-       struct max8997_platform_data *pdata = i2c->dev.platform_data;
+       struct max8997_platform_data *pdata = dev_get_platdata(&i2c->dev);
        int ret = 0;
 
-       max8997 = kzalloc(sizeof(struct max8997_dev), GFP_KERNEL);
+       max8997 = devm_kzalloc(&i2c->dev, sizeof(struct max8997_dev),
+                               GFP_KERNEL);
        if (max8997 == NULL)
                return -ENOMEM;
 
@@ -203,14 +204,12 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
 
        if (max8997->dev->of_node) {
                pdata = max8997_i2c_parse_dt_pdata(max8997->dev);
-               if (IS_ERR(pdata)) {
-                       ret = PTR_ERR(pdata);
-                       goto err;
-               }
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
        }
 
        if (!pdata)
-               goto err;
+               return ret;
 
        max8997->pdata = pdata;
        max8997->ono = pdata->ono;
@@ -250,8 +249,6 @@ err_mfd:
        i2c_unregister_device(max8997->muic);
        i2c_unregister_device(max8997->haptic);
        i2c_unregister_device(max8997->rtc);
-err:
-       kfree(max8997);
        return ret;
 }
 
@@ -263,7 +260,6 @@ static int max8997_i2c_remove(struct i2c_client *i2c)
        i2c_unregister_device(max8997->muic);
        i2c_unregister_device(max8997->haptic);
        i2c_unregister_device(max8997->rtc);
-       kfree(max8997);
 
        return 0;
 }
index 21af51a499f4af42f657a25a4f134437b4892b4e..fe6332dcabee891a27daed80e3ed2525e3b1caa8 100644 (file)
@@ -184,11 +184,12 @@ static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c,
 static int max8998_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
-       struct max8998_platform_data *pdata = i2c->dev.platform_data;
+       struct max8998_platform_data *pdata = dev_get_platdata(&i2c->dev);
        struct max8998_dev *max8998;
        int ret = 0;
 
-       max8998 = kzalloc(sizeof(struct max8998_dev), GFP_KERNEL);
+       max8998 = devm_kzalloc(&i2c->dev, sizeof(struct max8998_dev),
+                               GFP_KERNEL);
        if (max8998 == NULL)
                return -ENOMEM;
 
@@ -246,7 +247,6 @@ err:
        mfd_remove_devices(max8998->dev);
        max8998_irq_exit(max8998);
        i2c_unregister_device(max8998->rtc);
-       kfree(max8998);
        return ret;
 }
 
@@ -257,7 +257,6 @@ static int max8998_i2c_remove(struct i2c_client *i2c)
        mfd_remove_devices(max8998->dev);
        max8998_irq_exit(max8998);
        i2c_unregister_device(max8998->rtc);
-       kfree(max8998);
 
        return 0;
 }
index 13198d937e3657ae63717c30ffe602519a92f2c5..41c31b3ac94059e4dacf21565d3d411cd37da371 100644 (file)
@@ -156,7 +156,7 @@ static struct mcp_ops mcp_sa11x0 = {
 
 static int mcp_sa11x0_probe(struct platform_device *dev)
 {
-       struct mcp_plat_data *data = dev->dev.platform_data;
+       struct mcp_plat_data *data = dev_get_platdata(&dev->dev);
        struct resource *mem0, *mem1;
        struct mcp_sa11x0 *m;
        struct mcp *mcp;
index 998ce8cb3065a5a68dc2c09289e13e68826c341b..ad25bfa3fb02cf76ee6c1afcbe3cd8fcecbd49e5 100644 (file)
@@ -442,7 +442,7 @@ void menelaus_unregister_mmc_callback(void)
        menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ);
 
        the_menelaus->mmc_callback = NULL;
-       the_menelaus->mmc_callback_data = 0;
+       the_menelaus->mmc_callback_data = NULL;
 }
 EXPORT_SYMBOL(menelaus_unregister_mmc_callback);
 
@@ -466,7 +466,7 @@ static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV,
        struct i2c_client *c = the_menelaus->client;
 
        mutex_lock(&the_menelaus->lock);
-       if (vtg == 0)
+       if (!vtg)
                goto set_voltage;
 
        ret = menelaus_read_reg(vtg->vtg_reg);
@@ -1189,7 +1189,7 @@ static int menelaus_probe(struct i2c_client *client,
        int                     rev = 0, val;
        int                     err = 0;
        struct menelaus_platform_data *menelaus_pdata =
-                                       client->dev.platform_data;
+                                       dev_get_platdata(&client->dev);
 
        if (the_menelaus) {
                dev_dbg(&client->dev, "only one %s for now\n",
@@ -1197,7 +1197,7 @@ static int menelaus_probe(struct i2c_client *client,
                return -ENODEV;
        }
 
-       menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
+       menelaus = devm_kzalloc(&client->dev, sizeof(*menelaus), GFP_KERNEL);
        if (!menelaus)
                return -ENOMEM;
 
@@ -1210,8 +1210,7 @@ static int menelaus_probe(struct i2c_client *client,
        rev = menelaus_read_reg(MENELAUS_REV);
        if (rev < 0) {
                pr_err(DRIVER_NAME ": device not found");
-               err = -ENODEV;
-               goto fail1;
+               return -ENODEV;
        }
 
        /* Ack and disable all Menelaus interrupts */
@@ -1231,7 +1230,7 @@ static int menelaus_probe(struct i2c_client *client,
                if (err) {
                        dev_dbg(&client->dev,  "can't get IRQ %d, err %d\n",
                                        client->irq, err);
-                       goto fail1;
+                       return err;
                }
        }
 
@@ -1242,7 +1241,7 @@ static int menelaus_probe(struct i2c_client *client,
 
        val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
        if (val < 0)
-               goto fail2;
+               goto fail;
        if (val & (1 << 7))
                menelaus->vcore_hw_mode = 1;
        else
@@ -1251,17 +1250,15 @@ static int menelaus_probe(struct i2c_client *client,
        if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
                err = menelaus_pdata->late_init(&client->dev);
                if (err < 0)
-                       goto fail2;
+                       goto fail;
        }
 
        menelaus_rtc_init(menelaus);
 
        return 0;
-fail2:
+fail:
        free_irq(client->irq, menelaus);
        flush_work(&menelaus->work);
-fail1:
-       kfree(menelaus);
        return err;
 }
 
@@ -1271,7 +1268,6 @@ static int __exit menelaus_remove(struct i2c_client *client)
 
        free_irq(client->irq, menelaus);
        flush_work(&menelaus->work);
-       kfree(menelaus);
        the_menelaus = NULL;
        return 0;
 }
index 7604f4e5df40a10e03693407da38cb82ea3de14b..f421586f29fb09b02644ee8b05aa6777fb353835 100644 (file)
@@ -96,6 +96,8 @@ static int mfd_add_device(struct device *parent, int id,
 
        pdev->dev.parent = parent;
        pdev->dev.type = &mfd_dev_type;
+       pdev->dev.dma_mask = parent->dma_mask;
+       pdev->dev.dma_parms = parent->dma_parms;
 
        if (parent->of_node && cell->of_compatible) {
                for_each_child_of_node(parent->of_node, np) {
index 759fae3ca7fb0dba592ab808d036f889bf96f922..d2b8e7189907481ddcb9dc3da007bcb81f421a89 100644 (file)
@@ -232,7 +232,7 @@ err_end:
 static int omap_usbhs_alloc_children(struct platform_device *pdev)
 {
        struct device                           *dev = &pdev->dev;
-       struct usbhs_omap_platform_data         *pdata = dev->platform_data;
+       struct usbhs_omap_platform_data         *pdata = dev_get_platdata(dev);
        struct platform_device                  *ehci;
        struct platform_device                  *ohci;
        struct resource                         *res;
@@ -571,7 +571,7 @@ static struct of_device_id usbhs_child_match_table[] = {
 static int usbhs_omap_probe(struct platform_device *pdev)
 {
        struct device                   *dev =  &pdev->dev;
-       struct usbhs_omap_platform_data *pdata = dev->platform_data;
+       struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
        struct usbhs_hcd_omap           *omap;
        struct resource                 *res;
        int                             ret = 0;
index e4d1c706df8b9d1cf6a5f9f2f835cc652097789b..135afabe4ae2ae850a6897ddad06dc1969dba301 100644 (file)
 #include <linux/mfd/palmas.h>
 #include <linux/of_device.h>
 
+#define PALMAS_EXT_REQ (PALMAS_EXT_CONTROL_ENABLE1 |   \
+                       PALMAS_EXT_CONTROL_ENABLE2 |    \
+                       PALMAS_EXT_CONTROL_NSLEEP)
+
+struct palmas_sleep_requestor_info {
+       int id;
+       int reg_offset;
+       int bit_pos;
+};
+
+#define EXTERNAL_REQUESTOR(_id, _offset, _pos)         \
+       [PALMAS_EXTERNAL_REQSTR_ID_##_id] = {           \
+               .id = PALMAS_EXTERNAL_REQSTR_ID_##_id,  \
+               .reg_offset = _offset,                  \
+               .bit_pos = _pos,                        \
+       }
+
+static struct palmas_sleep_requestor_info sleep_req_info[] = {
+       EXTERNAL_REQUESTOR(REGEN1, 0, 0),
+       EXTERNAL_REQUESTOR(REGEN2, 0, 1),
+       EXTERNAL_REQUESTOR(SYSEN1, 0, 2),
+       EXTERNAL_REQUESTOR(SYSEN2, 0, 3),
+       EXTERNAL_REQUESTOR(CLK32KG, 0, 4),
+       EXTERNAL_REQUESTOR(CLK32KGAUDIO, 0, 5),
+       EXTERNAL_REQUESTOR(REGEN3, 0, 6),
+       EXTERNAL_REQUESTOR(SMPS12, 1, 0),
+       EXTERNAL_REQUESTOR(SMPS3, 1, 1),
+       EXTERNAL_REQUESTOR(SMPS45, 1, 2),
+       EXTERNAL_REQUESTOR(SMPS6, 1, 3),
+       EXTERNAL_REQUESTOR(SMPS7, 1, 4),
+       EXTERNAL_REQUESTOR(SMPS8, 1, 5),
+       EXTERNAL_REQUESTOR(SMPS9, 1, 6),
+       EXTERNAL_REQUESTOR(SMPS10, 1, 7),
+       EXTERNAL_REQUESTOR(LDO1, 2, 0),
+       EXTERNAL_REQUESTOR(LDO2, 2, 1),
+       EXTERNAL_REQUESTOR(LDO3, 2, 2),
+       EXTERNAL_REQUESTOR(LDO4, 2, 3),
+       EXTERNAL_REQUESTOR(LDO5, 2, 4),
+       EXTERNAL_REQUESTOR(LDO6, 2, 5),
+       EXTERNAL_REQUESTOR(LDO7, 2, 6),
+       EXTERNAL_REQUESTOR(LDO8, 2, 7),
+       EXTERNAL_REQUESTOR(LDO9, 3, 0),
+       EXTERNAL_REQUESTOR(LDOLN, 3, 1),
+       EXTERNAL_REQUESTOR(LDOUSB, 3, 2),
+};
+
 static const struct regmap_config palmas_regmap_config[PALMAS_NUM_CLIENTS] = {
        {
                .reg_bits = 8,
@@ -186,6 +232,57 @@ static struct regmap_irq_chip palmas_irq_chip = {
                        PALMAS_INT1_MASK),
 };
 
+int palmas_ext_control_req_config(struct palmas *palmas,
+       enum palmas_external_requestor_id id,  int ext_ctrl, bool enable)
+{
+       int preq_mask_bit = 0;
+       int reg_add = 0;
+       int bit_pos;
+       int ret;
+
+       if (!(ext_ctrl & PALMAS_EXT_REQ))
+               return 0;
+
+       if (id >= PALMAS_EXTERNAL_REQSTR_ID_MAX)
+               return 0;
+
+       if (ext_ctrl & PALMAS_EXT_CONTROL_NSLEEP) {
+               reg_add = PALMAS_NSLEEP_RES_ASSIGN;
+               preq_mask_bit = 0;
+       } else if (ext_ctrl & PALMAS_EXT_CONTROL_ENABLE1) {
+               reg_add = PALMAS_ENABLE1_RES_ASSIGN;
+               preq_mask_bit = 1;
+       } else if (ext_ctrl & PALMAS_EXT_CONTROL_ENABLE2) {
+               reg_add = PALMAS_ENABLE2_RES_ASSIGN;
+               preq_mask_bit = 2;
+       }
+
+       bit_pos = sleep_req_info[id].bit_pos;
+       reg_add += sleep_req_info[id].reg_offset;
+       if (enable)
+               ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
+                               reg_add, BIT(bit_pos), BIT(bit_pos));
+       else
+               ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
+                               reg_add, BIT(bit_pos), 0);
+       if (ret < 0) {
+               dev_err(palmas->dev, "Resource reg 0x%02x update failed %d\n",
+                       reg_add, ret);
+               return ret;
+       }
+
+       /* Unmask the PREQ */
+       ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
+                       PALMAS_POWER_CTRL, BIT(preq_mask_bit), 0);
+       if (ret < 0) {
+               dev_err(palmas->dev, "POWER_CTRL register update failed %d\n",
+                       ret);
+               return ret;
+       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(palmas_ext_control_req_config);
+
 static int palmas_set_pdata_irq_flag(struct i2c_client *i2c,
                struct palmas_platform_data *pdata)
 {
@@ -229,6 +326,32 @@ static void palmas_dt_to_pdata(struct i2c_client *i2c,
                                        PALMAS_POWER_CTRL_ENABLE2_MASK;
        if (i2c->irq)
                palmas_set_pdata_irq_flag(i2c, pdata);
+
+       pdata->pm_off = of_property_read_bool(node,
+                       "ti,system-power-controller");
+}
+
+static struct palmas *palmas_dev;
+static void palmas_power_off(void)
+{
+       unsigned int addr;
+       int ret, slave;
+
+       if (!palmas_dev)
+               return;
+
+       slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
+       addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_DEV_CTRL);
+
+       ret = regmap_update_bits(
+                       palmas_dev->regmap[slave],
+                       addr,
+                       PALMAS_DEV_CTRL_DEV_ON,
+                       0);
+
+       if (ret)
+               pr_err("%s: Unable to write to DEV_CTRL_DEV_ON: %d\n",
+                               __func__, ret);
 }
 
 static unsigned int palmas_features = PALMAS_PMIC_FEATURE_SMPS10_BOOST;
@@ -423,10 +546,13 @@ no_irq:
         */
        if (node) {
                ret = of_platform_populate(node, NULL, NULL, &i2c->dev);
-               if (ret < 0)
+               if (ret < 0) {
                        goto err_irq;
-               else
+               } else if (pdata->pm_off && !pm_power_off) {
+                       palmas_dev = palmas;
+                       pm_power_off = palmas_power_off;
                        return ret;
+               }
        }
 
        return ret;
index 18b53cb72feae4984b44b6cea94c2f6d68fc3e40..b8941a556d7195e884e1f85d3e7017e89275a3f5 100644 (file)
@@ -203,7 +203,7 @@ static int pcf50633_adc_probe(struct platform_device *pdev)
 {
        struct pcf50633_adc *adc;
 
-       adc = kzalloc(sizeof(*adc), GFP_KERNEL);
+       adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
        if (!adc)
                return -ENOMEM;
 
@@ -236,7 +236,6 @@ static int pcf50633_adc_remove(struct platform_device *pdev)
                kfree(adc->queue[i]);
 
        mutex_unlock(&adc->queue_mutex);
-       kfree(adc);
 
        return 0;
 }
index d11567307fbed6c930f955abf198de1157dfb237..6841d6805fd64a6aaab032ce121d4e3c63b36443 100644 (file)
@@ -195,7 +195,7 @@ static int pcf50633_probe(struct i2c_client *client,
                                const struct i2c_device_id *ids)
 {
        struct pcf50633 *pcf;
-       struct pcf50633_platform_data *pdata = client->dev.platform_data;
+       struct pcf50633_platform_data *pdata = dev_get_platdata(&client->dev);
        int i, ret;
        int version, variant;
 
index ecc137ffa8c3c6369592a5cea5ecd987f9deeed4..a6841f77aa5e709d472b120336e25e5572646cf8 100644 (file)
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/err.h>
@@ -106,7 +107,7 @@ static int pm8921_add_subdevices(const struct pm8921_platform_data
 
 static int pm8921_probe(struct platform_device *pdev)
 {
-       const struct pm8921_platform_data *pdata = pdev->dev.platform_data;
+       const struct pm8921_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct pm8921 *pmic;
        int rc;
        u8 val;
@@ -117,7 +118,7 @@ static int pm8921_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       pmic = kzalloc(sizeof(struct pm8921), GFP_KERNEL);
+       pmic = devm_kzalloc(&pdev->dev, sizeof(struct pm8921), GFP_KERNEL);
        if (!pmic) {
                pr_err("Cannot alloc pm8921 struct\n");
                return -ENOMEM;
@@ -127,7 +128,7 @@ static int pm8921_probe(struct platform_device *pdev)
        rc = ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val));
        if (rc) {
                pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc);
-               goto err_read_rev;
+               return rc;
        }
        pr_info("PMIC revision 1: %02X\n", val);
        rev = val;
@@ -137,7 +138,7 @@ static int pm8921_probe(struct platform_device *pdev)
        if (rc) {
                pr_err("Failed to read hw rev 2 reg %d:rc=%d\n",
                        REG_HWREV_2, rc);
-               goto err_read_rev;
+               return rc;
        }
        pr_info("PMIC revision 2: %02X\n", val);
        rev |= val << BITS_PER_BYTE;
@@ -159,9 +160,6 @@ static int pm8921_probe(struct platform_device *pdev)
 
 err:
        mfd_remove_devices(pmic->dev);
-       platform_set_drvdata(pdev, NULL);
-err_read_rev:
-       kfree(pmic);
        return rc;
 }
 
@@ -179,8 +177,6 @@ static int pm8921_remove(struct platform_device *pdev)
                pm8xxx_irq_exit(pmic->irq_chip);
                pmic->irq_chip = NULL;
        }
-       platform_set_drvdata(pdev, NULL);
-       kfree(pmic);
 
        return 0;
 }
index 14bdaccefbeca4da3099ab804e807da6f190843f..346330176afcd81ba9f6a294420db761e60ab908 100644 (file)
@@ -250,7 +250,7 @@ static int rc5t583_i2c_probe(struct i2c_client *i2c,
                              const struct i2c_device_id *id)
 {
        struct rc5t583 *rc5t583;
-       struct rc5t583_platform_data *pdata = i2c->dev.platform_data;
+       struct rc5t583_platform_data *pdata = dev_get_platdata(&i2c->dev);
        int ret;
        bool irq_init_success = false;
 
index c436bf27e78d232340825ad12bf9d493d9ae26a2..e4c1833154eac27d98b8cbbcfa0056dfbdb1610d 100644 (file)
@@ -1,6 +1,6 @@
 /* Driver for Realtek PCI-Express card reader
  *
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -17,7 +17,7 @@
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ *   Roger Tseng <rogerable@realtek.com>
  */
 
 #include <linux/module.h>
@@ -47,19 +47,77 @@ static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
                return 0;
 }
 
+static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg1;
+       u8 reg3;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg1);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
+
+       if (!rtsx_vendor_setting_valid(reg1))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg1);
+       pcr->sd30_drive_sel_1v8 =
+               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg1));
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
+
+       rtsx_pci_read_config_byte(pcr, PCR_SETTING_REG3, &reg3);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
+       pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
+}
+
+static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 =
+               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
+       pcr->sd30_drive_sel_3v3 =
+               map_sd_drive(rtl8411b_reg_to_sd30_drive_sel_3v3(reg));
+}
+
+static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
+}
+
 static int rtl8411_extra_init_hw(struct rtsx_pcr *pcr)
 {
-       return rtsx_pci_write_register(pcr, CD_PAD_CTL,
+       rtsx_pci_init_cmd(pcr);
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       0xFF, pcr->sd30_drive_sel_3v3);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
                        CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
+
+       return rtsx_pci_send_cmd(pcr, 100);
 }
 
 static int rtl8411b_extra_init_hw(struct rtsx_pcr *pcr)
 {
-       if (rtl8411b_is_qfn48(pcr))
-               rtsx_pci_write_register(pcr, CARD_PULL_CTL3, 0xFF, 0xF5);
+       rtsx_pci_init_cmd(pcr);
 
-       return rtsx_pci_write_register(pcr, CD_PAD_CTL,
+       if (rtl8411b_is_qfn48(pcr))
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                               CARD_PULL_CTL3, 0xFF, 0xF5);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       0xFF, pcr->sd30_drive_sel_3v3);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
                        CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, FUNC_FORCE_CTL,
+                       0x06, 0x00);
+
+       return rtsx_pci_send_cmd(pcr, 100);
 }
 
 static int rtl8411_turn_on_led(struct rtsx_pcr *pcr)
@@ -141,13 +199,13 @@ static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
        mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
        if (voltage == OUTPUT_3V3) {
                err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
                if (err < 0)
                        return err;
                val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
        } else if (voltage == OUTPUT_1V8) {
                err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
                if (err < 0)
                        return err;
                val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
@@ -222,6 +280,7 @@ static int rtl8411_conv_clk_and_div_n(int input, int dir)
 }
 
 static const struct pcr_ops rtl8411_pcr_ops = {
+       .fetch_vendor_settings = rtl8411_fetch_vendor_settings,
        .extra_init_hw = rtl8411_extra_init_hw,
        .optimize_phy = NULL,
        .turn_on_led = rtl8411_turn_on_led,
@@ -233,9 +292,11 @@ static const struct pcr_ops rtl8411_pcr_ops = {
        .switch_output_voltage = rtl8411_switch_output_voltage,
        .cd_deglitch = rtl8411_cd_deglitch,
        .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+       .force_power_down = rtl8411_force_power_down,
 };
 
 static const struct pcr_ops rtl8411b_pcr_ops = {
+       .fetch_vendor_settings = rtl8411b_fetch_vendor_settings,
        .extra_init_hw = rtl8411b_extra_init_hw,
        .optimize_phy = NULL,
        .turn_on_led = rtl8411_turn_on_led,
@@ -247,6 +308,7 @@ static const struct pcr_ops rtl8411b_pcr_ops = {
        .switch_output_voltage = rtl8411_switch_output_voltage,
        .cd_deglitch = rtl8411_cd_deglitch,
        .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
+       .force_power_down = rtl8411_force_power_down,
 };
 
 /* SD Pull Control Enable:
@@ -385,6 +447,12 @@ void rtl8411_init_params(struct rtsx_pcr *pcr)
        pcr->num_slots = 2;
        pcr->ops = &rtl8411_pcr_ops;
 
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+       pcr->aspm_en = ASPM_L1_EN;
+
        pcr->ic_version = rtl8411_get_ic_version(pcr);
        pcr->sd_pull_ctl_enable_tbl = rtl8411_sd_pull_ctl_enable_tbl;
        pcr->sd_pull_ctl_disable_tbl = rtl8411_sd_pull_ctl_disable_tbl;
@@ -398,6 +466,12 @@ void rtl8411b_init_params(struct rtsx_pcr *pcr)
        pcr->num_slots = 2;
        pcr->ops = &rtl8411b_pcr_ops;
 
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+       pcr->aspm_en = ASPM_L1_EN;
+
        pcr->ic_version = rtl8411_get_ic_version(pcr);
 
        if (rtl8411b_is_qfn48(pcr)) {
index ec78d9fb08791a9975a825411b6d7a8049f5d801..4026e1fcb0a6fbdc53d2be883bf78dd39f39e5da 100644 (file)
@@ -1,6 +1,6 @@
 /* Driver for Realtek PCI-Express card reader
  *
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
  */
 
 #include <linux/module.h>
@@ -34,19 +33,34 @@ static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
        return val & 0x0F;
 }
 
-static void rts5209_init_vendor_cfg(struct rtsx_pcr *pcr)
+static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
 {
-       u32 val;
+       u32 reg;
 
-       rtsx_pci_read_config_dword(pcr, 0x724, &val);
-       dev_dbg(&(pcr->pci->dev), "Cfg 0x724: 0x%x\n", val);
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
 
-       if (!(val & 0x80)) {
-               if (val & 0x08)
-                       pcr->ms_pmos = false;
-               else
-                       pcr->ms_pmos = true;
+       if (rts5209_vendor_setting1_valid(reg)) {
+               if (rts5209_reg_check_ms_pmos(reg))
+                       pcr->flags |= PCR_MS_PMOS;
+               pcr->aspm_en = rts5209_reg_to_aspm(reg);
        }
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+       if (rts5209_vendor_setting2_valid(reg)) {
+               pcr->sd30_drive_sel_1v8 =
+                       rts5209_reg_to_sd30_drive_sel_1v8(reg);
+               pcr->sd30_drive_sel_3v3 =
+                       rts5209_reg_to_sd30_drive_sel_3v3(reg);
+               pcr->card_drive_sel = rts5209_reg_to_card_drive_sel(reg);
+       }
+}
+
+static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
 }
 
 static int rts5209_extra_init_hw(struct rtsx_pcr *pcr)
@@ -55,8 +69,15 @@ static int rts5209_extra_init_hw(struct rtsx_pcr *pcr)
 
        /* Turn off LED */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO, 0xFF, 0x03);
+       /* Reset ASPM state to default value */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+       /* Force CLKREQ# PIN to drive 0 to request clock */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
        /* Configure GPIO as output */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO_DIR, 0xFF, 0x03);
+       /* Configure driving */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       0xFF, pcr->sd30_drive_sel_3v3);
 
        return rtsx_pci_send_cmd(pcr, 100);
 }
@@ -95,7 +116,7 @@ static int rts5209_card_power_on(struct rtsx_pcr *pcr, int card)
        partial_pwr_on = SD_PARTIAL_POWER_ON;
        pwr_on = SD_POWER_ON;
 
-       if (pcr->ms_pmos && (card == RTSX_MS_CARD)) {
+       if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
                pwr_mask = MS_POWER_MASK;
                partial_pwr_on = MS_PARTIAL_POWER_ON;
                pwr_on = MS_POWER_ON;
@@ -131,7 +152,7 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
        pwr_mask = SD_POWER_MASK;
        pwr_off = SD_POWER_OFF;
 
-       if (pcr->ms_pmos && (card == RTSX_MS_CARD)) {
+       if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
                pwr_mask = MS_POWER_MASK;
                pwr_off = MS_POWER_OFF;
        }
@@ -140,7 +161,7 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
                        pwr_mask | PMOS_STRG_MASK, pwr_off | PMOS_STRG_400mA);
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0X06);
+                       LDO3318_PWR_MASK, 0x06);
        return rtsx_pci_send_cmd(pcr, 100);
 }
 
@@ -150,7 +171,7 @@ static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 
        if (voltage == OUTPUT_3V3) {
                err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
                if (err < 0)
                        return err;
                err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
@@ -158,7 +179,7 @@ static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
                        return err;
        } else if (voltage == OUTPUT_1V8) {
                err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
                if (err < 0)
                        return err;
                err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
@@ -172,6 +193,7 @@ static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 }
 
 static const struct pcr_ops rts5209_pcr_ops = {
+       .fetch_vendor_settings = rts5209_fetch_vendor_settings,
        .extra_init_hw = rts5209_extra_init_hw,
        .optimize_phy = rts5209_optimize_phy,
        .turn_on_led = rts5209_turn_on_led,
@@ -183,6 +205,7 @@ static const struct pcr_ops rts5209_pcr_ops = {
        .switch_output_voltage = rts5209_switch_output_voltage,
        .cd_deglitch = NULL,
        .conv_clk_and_div_n = NULL,
+       .force_power_down = rts5209_force_power_down,
 };
 
 /* SD Pull Control Enable:
@@ -242,7 +265,11 @@ void rts5209_init_params(struct rtsx_pcr *pcr)
        pcr->num_slots = 2;
        pcr->ops = &rts5209_pcr_ops;
 
-       rts5209_init_vendor_cfg(pcr);
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTS5209_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+       pcr->aspm_en = ASPM_L1_EN;
 
        pcr->ic_version = rts5209_get_ic_version(pcr);
        pcr->sd_pull_ctl_enable_tbl = rts5209_sd_pull_ctl_enable_tbl;
index fc831dcb148035fcc9e02ec3bb3eee7b9c9782d2..d7cae82720a4546ca07fb6c7596f6a809f4e4317 100644 (file)
@@ -1,6 +1,6 @@
 /* Driver for Realtek PCI-Express card reader
  *
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
- *
  *   Roger Tseng <rogerable@realtek.com>
- *   No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan
  */
 
 #include <linux/module.h>
 
 #include "rtsx_pcr.h"
 
+static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+       u8 driving_3v3[4][3] = {
+               {0x13, 0x13, 0x13},
+               {0x96, 0x96, 0x96},
+               {0x7F, 0x7F, 0x7F},
+               {0x96, 0x96, 0x96},
+       };
+       u8 driving_1v8[4][3] = {
+               {0x99, 0x99, 0x99},
+               {0xAA, 0xAA, 0xAA},
+               {0xFE, 0xFE, 0xFE},
+               {0xB3, 0xB3, 0xB3},
+       };
+       u8 (*driving)[3], drive_sel;
+
+       if (voltage == OUTPUT_3V3) {
+               driving = driving_3v3;
+               drive_sel = pcr->sd30_drive_sel_3v3;
+       } else {
+               driving = driving_1v8;
+               drive_sel = pcr->sd30_drive_sel_1v8;
+       }
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+                       0xFF, driving[drive_sel][0]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+                       0xFF, driving[drive_sel][1]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+                       0xFF, driving[drive_sel][2]);
+}
+
+static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+       pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+       if (rtsx_reg_check_reverse_socket(reg))
+               pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rts5227_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       /* Set relink_time to 0 */
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
+
+       if (pm_state == HOST_ENTER_S3)
+               rtsx_pci_write_register(pcr, PM_CTRL3, 0x10, 0x10);
+
+       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
 static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
 {
        u16 cap;
@@ -37,6 +101,8 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
 
        /* Configure GPIO as output */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+       /* Reset ASPM state to default value */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
        /* Switch LDO3318 source from DV33 to card_3v3 */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
@@ -48,17 +114,16 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
                rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
        /* Configure OBFF */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
-       /* Configure force_clock_req
-        * Maybe We should define 0xFF03 as some name
-        */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 0xFF03, 0x08, 0x08);
-       /* Correct driving */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                       SD30_CLK_DRIVE_SEL, 0xFF, 0x96);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                       SD30_CMD_DRIVE_SEL, 0xFF, 0x96);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                       SD30_DAT_DRIVE_SEL, 0xFF, 0x96);
+       /* Configure driving */
+       rts5227_fill_driving(pcr, OUTPUT_3V3);
+       /* Configure force_clock_req */
+       if (pcr->flags & PCR_REVERSE_SOCKET)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                               AUTOLOAD_CFG_BASE + 3, 0xB8, 0xB8);
+       else
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                               AUTOLOAD_CFG_BASE + 3, 0xB8, 0x88);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x10, 0x00);
 
        return rtsx_pci_send_cmd(pcr, 100);
 }
@@ -131,13 +196,11 @@ static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
 static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 {
        int err;
-       u8 drive_sel;
 
        if (voltage == OUTPUT_3V3) {
                err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
                if (err < 0)
                        return err;
-               drive_sel = 0x96;
        } else if (voltage == OUTPUT_1V8) {
                err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
                if (err < 0)
@@ -145,23 +208,18 @@ static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
                err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C80 | 0x24);
                if (err < 0)
                        return err;
-               drive_sel = 0xB3;
        } else {
                return -EINVAL;
        }
 
        /* set pad drive */
        rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
-                       0xFF, drive_sel);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
-                       0xFF, drive_sel);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
-                       0xFF, drive_sel);
+       rts5227_fill_driving(pcr, voltage);
        return rtsx_pci_send_cmd(pcr, 100);
 }
 
 static const struct pcr_ops rts5227_pcr_ops = {
+       .fetch_vendor_settings = rts5227_fetch_vendor_settings,
        .extra_init_hw = rts5227_extra_init_hw,
        .optimize_phy = rts5227_optimize_phy,
        .turn_on_led = rts5227_turn_on_led,
@@ -173,6 +231,7 @@ static const struct pcr_ops rts5227_pcr_ops = {
        .switch_output_voltage = rts5227_switch_output_voltage,
        .cd_deglitch = NULL,
        .conv_clk_and_div_n = NULL,
+       .force_power_down = rts5227_force_power_down,
 };
 
 /* SD Pull Control Enable:
@@ -227,6 +286,12 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
        pcr->num_slots = 2;
        pcr->ops = &rts5227_pcr_ops;
 
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+       pcr->aspm_en = ASPM_L1_EN;
+
        pcr->sd_pull_ctl_enable_tbl = rts5227_sd_pull_ctl_enable_tbl;
        pcr->sd_pull_ctl_disable_tbl = rts5227_sd_pull_ctl_disable_tbl;
        pcr->ms_pull_ctl_enable_tbl = rts5227_ms_pull_ctl_enable_tbl;
index 58af4dbe358623d2c45385ee5a5e6f31cb04344e..620e7fa9e0df97a0b176f8001db05c72185f910d 100644 (file)
@@ -1,6 +1,6 @@
 /* Driver for Realtek PCI-Express card reader
  *
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
  */
 
 #include <linux/module.h>
@@ -34,17 +33,51 @@ static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
        return val & 0x0F;
 }
 
+static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 =
+               map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+       pcr->sd30_drive_sel_3v3 =
+               map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
+}
+
+static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
 static int rts5229_extra_init_hw(struct rtsx_pcr *pcr)
 {
        rtsx_pci_init_cmd(pcr);
 
        /* Configure GPIO as output */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+       /* Reset ASPM state to default value */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
+       /* Force CLKREQ# PIN to drive 0 to request clock */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
        /* Switch LDO3318 source from DV33 to card_3v3 */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
        /* LED shine disabled, set initial shine cycle period */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
+       /* Configure driving */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
+                       0xFF, pcr->sd30_drive_sel_3v3);
 
        return rtsx_pci_send_cmd(pcr, 100);
 }
@@ -110,7 +143,7 @@ static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
                        SD_POWER_MASK | PMOS_STRG_MASK,
                        SD_POWER_OFF | PMOS_STRG_400mA);
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
-                       LDO3318_PWR_MASK, 0X00);
+                       LDO3318_PWR_MASK, 0x00);
        return rtsx_pci_send_cmd(pcr, 100);
 }
 
@@ -120,7 +153,7 @@ static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 
        if (voltage == OUTPUT_3V3) {
                err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_D);
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
                if (err < 0)
                        return err;
                err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
@@ -128,7 +161,7 @@ static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
                        return err;
        } else if (voltage == OUTPUT_1V8) {
                err = rtsx_pci_write_register(pcr,
-                               SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
+                               SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
                if (err < 0)
                        return err;
                err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
@@ -142,6 +175,7 @@ static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 }
 
 static const struct pcr_ops rts5229_pcr_ops = {
+       .fetch_vendor_settings = rts5229_fetch_vendor_settings,
        .extra_init_hw = rts5229_extra_init_hw,
        .optimize_phy = rts5229_optimize_phy,
        .turn_on_led = rts5229_turn_on_led,
@@ -153,6 +187,7 @@ static const struct pcr_ops rts5229_pcr_ops = {
        .switch_output_voltage = rts5229_switch_output_voltage,
        .cd_deglitch = NULL,
        .conv_clk_and_div_n = NULL,
+       .force_power_down = rts5229_force_power_down,
 };
 
 /* SD Pull Control Enable:
@@ -221,6 +256,12 @@ void rts5229_init_params(struct rtsx_pcr *pcr)
        pcr->num_slots = 2;
        pcr->ops = &rts5229_pcr_ops;
 
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
+       pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
+       pcr->aspm_en = ASPM_L1_EN;
+
        pcr->ic_version = rts5229_get_ic_version(pcr);
        if (pcr->ic_version == IC_VER_C) {
                pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl2;
index 15dc848bc0817bd6aafab9fa341e362e375f7944..ea90f8fb92ae1ece69db6451bc9fb519ce516f02 100644 (file)
@@ -17,7 +17,6 @@
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 128, West Shenhu Road, Suzhou Industry Park, Suzhou, China
  */
 
 #include <linux/module.h>
@@ -34,24 +33,95 @@ static u8 rts5249_get_ic_version(struct rtsx_pcr *pcr)
        return val & 0x0F;
 }
 
+static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+       u8 driving_3v3[4][3] = {
+               {0x11, 0x11, 0x11},
+               {0x55, 0x55, 0x5C},
+               {0x99, 0x99, 0x92},
+               {0x99, 0x99, 0x92},
+       };
+       u8 driving_1v8[4][3] = {
+               {0x3C, 0x3C, 0x3C},
+               {0xB3, 0xB3, 0xB3},
+               {0xFE, 0xFE, 0xFE},
+               {0xC4, 0xC4, 0xC4},
+       };
+       u8 (*driving)[3], drive_sel;
+
+       if (voltage == OUTPUT_3V3) {
+               driving = driving_3v3;
+               drive_sel = pcr->sd30_drive_sel_3v3;
+       } else {
+               driving = driving_1v8;
+               drive_sel = pcr->sd30_drive_sel_1v8;
+       }
+
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+                       0xFF, driving[drive_sel][0]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+                       0xFF, driving[drive_sel][1]);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+                       0xFF, driving[drive_sel][2]);
+}
+
+static void rts5249_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+       u32 reg;
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+       if (!rtsx_vendor_setting_valid(reg))
+               return;
+
+       pcr->aspm_en = rtsx_reg_to_aspm(reg);
+       pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+       pcr->card_drive_sel &= 0x3F;
+       pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+       rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+       dev_dbg(&(pcr->pci->dev), "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+       pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+       if (rtsx_reg_check_reverse_socket(reg))
+               pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rts5249_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       /* Set relink_time to 0 */
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
+       rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
+
+       if (pm_state == HOST_ENTER_S3)
+               rtsx_pci_write_register(pcr, PM_CTRL3, 0x10, 0x10);
+
+       rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
+}
+
 static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
 {
        rtsx_pci_init_cmd(pcr);
 
        /* Configure GPIO as output */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
+       /* Reset ASPM state to default value */
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
        /* Switch LDO3318 source from DV33 to card_3v3 */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
        /* LED shine disabled, set initial shine cycle period */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
-       /* Correct driving */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                       SD30_CLK_DRIVE_SEL, 0xFF, 0x99);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                       SD30_CMD_DRIVE_SEL, 0xFF, 0x99);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                       SD30_DAT_DRIVE_SEL, 0xFF, 0x92);
+       /* Configure driving */
+       rts5249_fill_driving(pcr, OUTPUT_3V3);
+       if (pcr->flags & PCR_REVERSE_SOCKET)
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                               AUTOLOAD_CFG_BASE + 3, 0xB0, 0xB0);
+       else
+               rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
+                               AUTOLOAD_CFG_BASE + 3, 0xB0, 0x80);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x10, 0x00);
 
        return rtsx_pci_send_cmd(pcr, 100);
 }
@@ -129,15 +199,11 @@ static int rts5249_card_power_off(struct rtsx_pcr *pcr, int card)
 static int rts5249_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
 {
        int err;
-       u8 clk_drive, cmd_drive, dat_drive;
 
        if (voltage == OUTPUT_3V3) {
                err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, 0x4FC0 | 0x24);
                if (err < 0)
                        return err;
-               clk_drive = 0x99;
-               cmd_drive = 0x99;
-               dat_drive = 0x92;
        } else if (voltage == OUTPUT_1V8) {
                err = rtsx_pci_write_phy_register(pcr, PHY_BACR, 0x3C02);
                if (err < 0)
@@ -145,25 +211,18 @@ static int rts5249_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
                err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, 0x4C40 | 0x24);
                if (err < 0)
                        return err;
-               clk_drive = 0xb3;
-               cmd_drive = 0xb3;
-               dat_drive = 0xb3;
        } else {
                return -EINVAL;
        }
 
        /* set pad drive */
        rtsx_pci_init_cmd(pcr);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
-                       0xFF, clk_drive);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
-                       0xFF, cmd_drive);
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
-                       0xFF, dat_drive);
+       rts5249_fill_driving(pcr, voltage);
        return rtsx_pci_send_cmd(pcr, 100);
 }
 
 static const struct pcr_ops rts5249_pcr_ops = {
+       .fetch_vendor_settings = rts5249_fetch_vendor_settings,
        .extra_init_hw = rts5249_extra_init_hw,
        .optimize_phy = rts5249_optimize_phy,
        .turn_on_led = rts5249_turn_on_led,
@@ -173,6 +232,7 @@ static const struct pcr_ops rts5249_pcr_ops = {
        .card_power_on = rts5249_card_power_on,
        .card_power_off = rts5249_card_power_off,
        .switch_output_voltage = rts5249_switch_output_voltage,
+       .force_power_down = rts5249_force_power_down,
 };
 
 /* SD Pull Control Enable:
@@ -233,6 +293,12 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
        pcr->num_slots = 2;
        pcr->ops = &rts5249_pcr_ops;
 
+       pcr->flags = 0;
+       pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+       pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_C;
+       pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+       pcr->aspm_en = ASPM_L1_EN;
+
        pcr->ic_version = rts5249_get_ic_version(pcr);
        pcr->sd_pull_ctl_enable_tbl = rts5249_sd_pull_ctl_enable_tbl;
        pcr->sd_pull_ctl_disable_tbl = rts5249_sd_pull_ctl_disable_tbl;
index dd186c4103c1e4f6ad9dc690879fa161489a8b2c..e6ae7720f9e15bd546362509bca185c7a64eb86f 100644 (file)
@@ -1,6 +1,6 @@
 /* Driver for Realtek PCI-Express card reader
  *
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
  */
 
 #include <linux/pci.h>
@@ -73,6 +72,9 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
                pcr->state = PDEV_STAT_RUN;
                if (pcr->ops->enable_auto_blink)
                        pcr->ops->enable_auto_blink(pcr);
+
+               if (pcr->aspm_en)
+                       rtsx_pci_write_config_byte(pcr, LCTLR, 0);
        }
 
        mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
@@ -717,7 +719,7 @@ int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
                [RTSX_MS_CARD] = MS_EXIST
        };
 
-       if (!pcr->ms_pmos) {
+       if (!(pcr->flags & PCR_MS_PMOS)) {
                /* When using single PMOS, accessing card is not permitted
                 * if the existing card is not the designated one.
                 */
@@ -918,9 +920,27 @@ static void rtsx_pci_idle_work(struct work_struct *work)
        if (pcr->ops->turn_off_led)
                pcr->ops->turn_off_led(pcr);
 
+       if (pcr->aspm_en)
+               rtsx_pci_write_config_byte(pcr, LCTLR, pcr->aspm_en);
+
        mutex_unlock(&pcr->pcr_mutex);
 }
 
+static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
+{
+       if (pcr->ops->turn_off_led)
+               pcr->ops->turn_off_led(pcr);
+
+       rtsx_pci_writel(pcr, RTSX_BIER, 0);
+       pcr->bier = 0;
+
+       rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
+       rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
+
+       if (pcr->ops->force_power_down)
+               pcr->ops->force_power_down(pcr, pm_state);
+}
+
 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 {
        int err;
@@ -951,13 +971,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
        /* Disable card clock */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
-       /* Reset ASPM state to default value */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
        /* Reset delink mode */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
        /* Card driving select */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
-                       0x07, DRIVER_TYPE_D);
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
+                       0xFF, pcr->card_drive_sel);
        /* Enable SSC Clock */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
                        0xFF, SSC_8X_EN | SSC_SEL_4M);
@@ -982,13 +1000,13 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
         *      0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
         */
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
-       /* Force CLKREQ# PIN to drive 0 to request clock */
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
 
        err = rtsx_pci_send_cmd(pcr, 100);
        if (err < 0)
                return err;
 
+       rtsx_pci_write_config_byte(pcr, LCTLR, 0);
+
        /* Enable clk_request_n to enable clock power management */
        rtsx_pci_write_config_byte(pcr, 0x81, 1);
        /* Enter L1 when host tx idle */
@@ -1053,6 +1071,18 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
        if (!pcr->slots)
                return -ENOMEM;
 
+       if (pcr->ops->fetch_vendor_settings)
+               pcr->ops->fetch_vendor_settings(pcr);
+
+       dev_dbg(&(pcr->pci->dev), "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
+       dev_dbg(&(pcr->pci->dev), "pcr->sd30_drive_sel_1v8 = 0x%x\n",
+                       pcr->sd30_drive_sel_1v8);
+       dev_dbg(&(pcr->pci->dev), "pcr->sd30_drive_sel_3v3 = 0x%x\n",
+                       pcr->sd30_drive_sel_3v3);
+       dev_dbg(&(pcr->pci->dev), "pcr->card_drive_sel = 0x%x\n",
+                       pcr->card_drive_sel);
+       dev_dbg(&(pcr->pci->dev), "pcr->flags = 0x%x\n", pcr->flags);
+
        pcr->state = PDEV_STAT_IDLE;
        err = rtsx_pci_init_hw(pcr);
        if (err < 0) {
@@ -1235,7 +1265,6 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
 {
        struct pcr_handle *handle;
        struct rtsx_pcr *pcr;
-       int ret = 0;
 
        dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
 
@@ -1247,14 +1276,7 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
 
        mutex_lock(&pcr->pcr_mutex);
 
-       if (pcr->ops->turn_off_led)
-               pcr->ops->turn_off_led(pcr);
-
-       rtsx_pci_writel(pcr, RTSX_BIER, 0);
-       pcr->bier = 0;
-
-       rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
-       rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x02);
+       rtsx_pci_power_off(pcr, HOST_ENTER_S3);
 
        pci_save_state(pcidev);
        pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
@@ -1262,7 +1284,7 @@ static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
        pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
 
        mutex_unlock(&pcr->pcr_mutex);
-       return ret;
+       return 0;
 }
 
 static int rtsx_pci_resume(struct pci_dev *pcidev)
@@ -1300,10 +1322,25 @@ out:
        return ret;
 }
 
+static void rtsx_pci_shutdown(struct pci_dev *pcidev)
+{
+       struct pcr_handle *handle;
+       struct rtsx_pcr *pcr;
+
+       dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+       handle = pci_get_drvdata(pcidev);
+       pcr = handle->pcr;
+       rtsx_pci_power_off(pcr, HOST_ENTER_S1);
+
+       pci_disable_device(pcidev);
+}
+
 #else /* CONFIG_PM */
 
 #define rtsx_pci_suspend NULL
 #define rtsx_pci_resume NULL
+#define rtsx_pci_shutdown NULL
 
 #endif /* CONFIG_PM */
 
@@ -1314,6 +1351,7 @@ static struct pci_driver rtsx_pci_driver = {
        .remove = rtsx_pci_remove,
        .suspend = rtsx_pci_suspend,
        .resume = rtsx_pci_resume,
+       .shutdown = rtsx_pci_shutdown,
 };
 module_pci_driver(rtsx_pci_driver);
 
index c0cac7e8972f6327571e9558eb3026aab4b6fa8a..947e79b05cebfd211590c93a53cec7f58182f907 100644 (file)
@@ -1,6 +1,6 @@
 /* Driver for Realtek PCI-Express card reader
  *
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
  */
 
 #ifndef __RTSX_PCR_H
@@ -35,4 +34,33 @@ void rts5227_init_params(struct rtsx_pcr *pcr);
 void rts5249_init_params(struct rtsx_pcr *pcr);
 void rtl8411b_init_params(struct rtsx_pcr *pcr);
 
+static inline u8 map_sd_drive(int idx)
+{
+       u8 sd_drive[4] = {
+               0x01,   /* Type D */
+               0x02,   /* Type C */
+               0x05,   /* Type A */
+               0x03    /* Type B */
+       };
+
+       return sd_drive[idx];
+}
+
+#define rtsx_vendor_setting_valid(reg)         (!((reg) & 0x1000000))
+#define rts5209_vendor_setting1_valid(reg)     (!((reg) & 0x80))
+#define rts5209_vendor_setting2_valid(reg)     ((reg) & 0x80)
+
+#define rtsx_reg_to_aspm(reg)                  (((reg) >> 28) & 0x03)
+#define rtsx_reg_to_sd30_drive_sel_1v8(reg)    (((reg) >> 26) & 0x03)
+#define rtsx_reg_to_sd30_drive_sel_3v3(reg)    (((reg) >> 5) & 0x03)
+#define rtsx_reg_to_card_drive_sel(reg)                ((((reg) >> 25) & 0x01) << 6)
+#define rtsx_reg_check_reverse_socket(reg)     ((reg) & 0x4000)
+#define rts5209_reg_to_aspm(reg)               (((reg) >> 5) & 0x03)
+#define rts5209_reg_check_ms_pmos(reg)         (!((reg) & 0x08))
+#define rts5209_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 3) & 0x07)
+#define rts5209_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x07)
+#define rts5209_reg_to_card_drive_sel(reg)     ((reg) >> 8)
+#define rtl8411_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x07)
+#define rtl8411b_reg_to_sd30_drive_sel_3v3(reg)        ((reg) & 0x03)
+
 #endif
index 79767681483a65b78804e44263bfbba4ef135d2e..f530e4b73f19abd63fe42ba7f18230dba79c088f 100644 (file)
@@ -61,7 +61,9 @@ static struct mfd_cell s5m8767_devs[] = {
 static struct mfd_cell s2mps11_devs[] = {
        {
                .name = "s2mps11-pmic",
-       },
+       }, {
+               .name = "s2mps11-clk",
+       }
 };
 
 #ifdef CONFIG_OF
@@ -69,6 +71,9 @@ static struct of_device_id sec_dt_match[] = {
        {       .compatible = "samsung,s5m8767-pmic",
                .data = (void *)S5M8767X,
        },
+       {       .compatible = "samsung,s2mps11-pmic",
+               .data = (void *)S2MPS11X,
+       },
        {},
 };
 #endif
@@ -103,6 +108,31 @@ int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
 }
 EXPORT_SYMBOL_GPL(sec_reg_update);
 
+static bool s2mps11_volatile(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case S2MPS11_REG_INT1M:
+       case S2MPS11_REG_INT2M:
+       case S2MPS11_REG_INT3M:
+               return false;
+       default:
+               return true;
+       }
+}
+
+static bool s5m8763_volatile(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case S5M8763_REG_IRQM1:
+       case S5M8763_REG_IRQM2:
+       case S5M8763_REG_IRQM3:
+       case S5M8763_REG_IRQM4:
+               return false;
+       default:
+               return true;
+       }
+}
+
 static struct regmap_config sec_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
@@ -113,6 +143,8 @@ static struct regmap_config s2mps11_regmap_config = {
        .val_bits = 8,
 
        .max_register = S2MPS11_REG_L38CTRL,
+       .volatile_reg = s2mps11_volatile,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static struct regmap_config s5m8763_regmap_config = {
@@ -120,6 +152,8 @@ static struct regmap_config s5m8763_regmap_config = {
        .val_bits = 8,
 
        .max_register = S5M8763_REG_LBCNFG2,
+       .volatile_reg = s5m8763_volatile,
+       .cache_type = REGCACHE_FLAT,
 };
 
 static struct regmap_config s5m8767_regmap_config = {
@@ -127,6 +161,8 @@ static struct regmap_config s5m8767_regmap_config = {
        .val_bits = 8,
 
        .max_register = S5M8767_REG_LDO28CTRL,
+       .volatile_reg = s2mps11_volatile,
+       .cache_type = REGCACHE_FLAT,
 };
 
 #ifdef CONFIG_OF
@@ -182,7 +218,7 @@ static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
 static int sec_pmic_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
-       struct sec_platform_data *pdata = i2c->dev.platform_data;
+       struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev);
        const struct regmap_config *regmap;
        struct sec_pmic_dev *sec_pmic;
        int ret;
index f5bc8e4bd4bf631b3266091a3af6212be07138cf..0e4a76daf18789b37e84163f2a83bb45afb642ea 100644 (file)
@@ -718,7 +718,7 @@ static int si476x_core_probe(struct i2c_client *client,
        atomic_set(&core->is_alive, 0);
        core->power_state = SI476X_POWER_DOWN;
 
-       pdata = client->dev.platform_data;
+       pdata = dev_get_platdata(&client->dev);
        if (pdata) {
                memcpy(&core->power_up_parameters,
                       &pdata->power_up_parameters,
index 9816c232e58331c202f8622e87657c90ccb1f972..33f040c558d090aca5887356bdd1cd2bbb11f461 100644 (file)
@@ -840,7 +840,7 @@ static int sm501_register_uart(struct sm501_devdata *sm, int devices)
        if (!pdev)
                return -ENOMEM;
 
-       uart_data = pdev->dev.platform_data;
+       uart_data = dev_get_platdata(&pdev->dev);
 
        if (devices & SM501_USE_UART0) {
                sm501_setup_uart_data(sm, uart_data++, 0x30000);
@@ -1167,7 +1167,7 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
        if (!pdev)
                return -ENOMEM;
 
-       icd = pdev->dev.platform_data;
+       icd = dev_get_platdata(&pdev->dev);
 
        /* We keep the pin_sda and pin_scl fields relative in case the
         * same platform data is passed to >1 SM501.
@@ -1403,7 +1403,7 @@ static int sm501_plat_probe(struct platform_device *dev)
 
        sm->dev = &dev->dev;
        sm->pdev_id = dev->id;
-       sm->platdata = dev->dev.platform_data;
+       sm->platdata = dev_get_platdata(&dev->dev);
 
        ret = platform_get_irq(dev, 0);
        if (ret < 0) {
index d70a343078fd5934ab1fff9732caed67a32b095c..65c6fa671acb27a67496b3b9277aa4919942dc89 100644 (file)
@@ -133,7 +133,7 @@ int sta2x11_mfd_get_regs_data(struct platform_device *dev,
                              void __iomem **regs,
                              spinlock_t **lock)
 {
-       struct pci_dev *pdev = *(struct pci_dev **)(dev->dev.platform_data);
+       struct pci_dev *pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev);
        struct sta2x11_mfd *mfd;
 
        if (!pdev)
@@ -312,7 +312,7 @@ static int sta2x11_mfd_platform_probe(struct platform_device *dev,
        const char *name = sta2x11_mfd_names[index];
        struct regmap_config *regmap_config = sta2x11_mfd_regmap_configs[index];
 
-       pdev = dev->dev.platform_data;
+       pdev = dev_get_platdata(&dev->dev);
        mfd = sta2x11_mfd_find(*pdev);
        if (!mfd)
                return -ENODEV;
index 5d5e6f90424aa981653e0e3a609845d4de5a935f..fff63a41862cf6bbb4acfac38496447a4137ad0a 100644 (file)
@@ -1106,7 +1106,8 @@ static int stmpe_devices_init(struct stmpe *stmpe)
        return ret;
 }
 
-void stmpe_of_probe(struct stmpe_platform_data *pdata, struct device_node *np)
+static void stmpe_of_probe(struct stmpe_platform_data *pdata,
+                          struct device_node *np)
 {
        struct device_node *child;
 
index 1a31512369f9a4e9ef0b8b7a36853301d0c80a09..27db1f92bb262891d59effede70e149e131c4d37 100644 (file)
@@ -25,7 +25,6 @@
 static struct platform_driver syscon_driver;
 
 struct syscon {
-       void __iomem *base;
        struct regmap *regmap;
 };
 
@@ -129,6 +128,7 @@ static int syscon_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct syscon *syscon;
        struct resource *res;
+       void __iomem *base;
 
        syscon = devm_kzalloc(dev, sizeof(*syscon), GFP_KERNEL);
        if (!syscon)
@@ -138,12 +138,12 @@ static int syscon_probe(struct platform_device *pdev)
        if (!res)
                return -ENOENT;
 
-       syscon->base = devm_ioremap(dev, res->start, resource_size(res));
-       if (!syscon->base)
+       base = devm_ioremap(dev, res->start, resource_size(res));
+       if (!base)
                return -ENOMEM;
 
        syscon_regmap_config.max_register = res->end - res->start - 3;
-       syscon->regmap = devm_regmap_init_mmio(dev, syscon->base,
+       syscon->regmap = devm_regmap_init_mmio(dev, base,
                                        &syscon_regmap_config);
        if (IS_ERR(syscon->regmap)) {
                dev_err(dev, "regmap init failed\n");
index a21bff283a98fa7d5637e2f9ebc9ba6e94a602c7..9e04a74859818bd0016dfd6a60bbcd94669a4248 100644 (file)
@@ -281,7 +281,7 @@ static void t7l66xb_detach_irq(struct platform_device *dev)
 static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
-       struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
+       struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
 
        if (pdata && pdata->suspend)
                pdata->suspend(dev);
@@ -293,7 +293,7 @@ static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state)
 static int t7l66xb_resume(struct platform_device *dev)
 {
        struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
-       struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
+       struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
 
        clk_enable(t7l66xb->clk48m);
        if (pdata && pdata->resume)
@@ -313,7 +313,7 @@ static int t7l66xb_resume(struct platform_device *dev)
 
 static int t7l66xb_probe(struct platform_device *dev)
 {
-       struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
+       struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
        struct t7l66xb *t7l66xb;
        struct resource *iomem, *rscr;
        int ret;
@@ -409,7 +409,7 @@ err_noirq:
 
 static int t7l66xb_remove(struct platform_device *dev)
 {
-       struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
+       struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
        struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
        int ret;
 
index 4cb92bb2aea2f27ad2a1cf4177890b7488ae7dc9..70f4909fee13b8d30a632029a9639b41b81cb051 100644 (file)
@@ -325,7 +325,7 @@ static int tc3589x_of_probe(struct device_node *np,
 static int tc3589x_probe(struct i2c_client *i2c,
                                   const struct i2c_device_id *id)
 {
-       struct tc3589x_platform_data *pdata = i2c->dev.platform_data;
+       struct tc3589x_platform_data *pdata = dev_get_platdata(&i2c->dev);
        struct device_node *np = i2c->dev.of_node;
        struct tc3589x *tc3589x;
        int ret;
index 65c425a517c50ec94836a7edb1e7cdf496e04481..acd0f3a41044a5171cc0cfde9d03ad693fa9f48c 100644 (file)
@@ -48,7 +48,7 @@ static struct resource tc6387xb_mmc_resources[] = {
 static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
-       struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
+       struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
 
        if (pdata && pdata->suspend)
                pdata->suspend(dev);
@@ -60,7 +60,7 @@ static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
 static int tc6387xb_resume(struct platform_device *dev)
 {
        struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
-       struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
+       struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
 
        clk_enable(tc6387xb->clk32k);
        if (pdata && pdata->resume)
@@ -140,7 +140,7 @@ static struct mfd_cell tc6387xb_cells[] = {
 
 static int tc6387xb_probe(struct platform_device *dev)
 {
-       struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
+       struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
        struct resource *iomem, *rscr;
        struct clk *clk32k;
        struct tc6387xb *tc6387xb;
index a563dfa3cf87434706ab8f464d1538329fa5b4f0..11c19e5385510ccf8d8272e709e7b95264b66afc 100644 (file)
@@ -604,7 +604,7 @@ static void tc6393xb_detach_irq(struct platform_device *dev)
 
 static int tc6393xb_probe(struct platform_device *dev)
 {
-       struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+       struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
        struct tc6393xb *tc6393xb;
        struct resource *iomem, *rscr;
        int ret, temp;
@@ -733,7 +733,7 @@ err_kzalloc:
 
 static int tc6393xb_remove(struct platform_device *dev)
 {
-       struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+       struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
        struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
        int ret;
 
@@ -765,7 +765,7 @@ static int tc6393xb_remove(struct platform_device *dev)
 #ifdef CONFIG_PM
 static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
 {
-       struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+       struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
        struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
        int i, ret;
 
@@ -788,7 +788,7 @@ static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
 
 static int tc6393xb_resume(struct platform_device *dev)
 {
-       struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+       struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
        struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
        int ret;
        int i;
index 09a14cec351ba4b0d3c606fcf8ad112684ee1a2b..1c2b994e1f6c0b0ce1e4bd346a4ff6e1e1c77e3d 100644 (file)
@@ -318,7 +318,7 @@ static irqreturn_t ti_ssp_interrupt(int irq, void *dev_data)
 static int ti_ssp_probe(struct platform_device *pdev)
 {
        static struct ti_ssp *ssp;
-       const struct ti_ssp_data *pdata = pdev->dev.platform_data;
+       const struct ti_ssp_data *pdata = dev_get_platdata(&pdev->dev);
        int error = 0, prediv = 0xff, id;
        unsigned long sysclk;
        struct device *dev = &pdev->dev;
index b003a16ba227384d8deddb861d89515a0f29b525..baaf5a8123bb8eba1aadffda901238acce2a9a98 100644 (file)
@@ -57,20 +57,20 @@ EXPORT_SYMBOL_GPL(am335x_tsc_se_update);
 void am335x_tsc_se_set(struct ti_tscadc_dev *tsadc, u32 val)
 {
        spin_lock(&tsadc->reg_lock);
+       tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
        tsadc->reg_se_cache |= val;
-       spin_unlock(&tsadc->reg_lock);
-
        am335x_tsc_se_update(tsadc);
+       spin_unlock(&tsadc->reg_lock);
 }
 EXPORT_SYMBOL_GPL(am335x_tsc_se_set);
 
 void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val)
 {
        spin_lock(&tsadc->reg_lock);
+       tsadc->reg_se_cache = tscadc_readl(tsadc, REG_SE);
        tsadc->reg_se_cache &= ~val;
-       spin_unlock(&tsadc->reg_lock);
-
        am335x_tsc_se_update(tsadc);
+       spin_unlock(&tsadc->reg_lock);
 }
 EXPORT_SYMBOL_GPL(am335x_tsc_se_clr);
 
@@ -197,24 +197,21 @@ static    int ti_tscadc_probe(struct platform_device *pdev)
        clock_rate = clk_get_rate(clk);
        clk_put(clk);
        clk_value = clock_rate / ADC_CLK;
-       if (clk_value < MAX_CLK_DIV) {
-               dev_err(&pdev->dev, "clock input less than min clock requirement\n");
-               err = -EINVAL;
-               goto err_disable_clk;
-       }
+
        /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
        clk_value = clk_value - 1;
        tscadc_writel(tscadc, REG_CLKDIV, clk_value);
 
        /* Set the control register bits */
        ctrl = CNTRLREG_STEPCONFIGWRT |
-                       CNTRLREG_TSCENB |
-                       CNTRLREG_STEPID |
-                       CNTRLREG_4WIRE;
+                       CNTRLREG_STEPID;
+       if (tsc_wires > 0)
+               ctrl |= CNTRLREG_4WIRE | CNTRLREG_TSCENB;
        tscadc_writel(tscadc, REG_CTRL, ctrl);
 
        /* Set register bits for Idle Config Mode */
-       tscadc_idle_config(tscadc);
+       if (tsc_wires > 0)
+               tscadc_idle_config(tscadc);
 
        /* Enable the TSC module enable bit */
        ctrl = tscadc_readl(tscadc, REG_CTRL);
@@ -294,10 +291,13 @@ static int tscadc_resume(struct device *dev)
        pm_runtime_get_sync(dev);
 
        /* context restore */
-       ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_TSCENB |
-                       CNTRLREG_STEPID | CNTRLREG_4WIRE;
+       ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_STEPID;
+       if (tscadc_dev->tsc_cell != -1)
+               ctrl |= CNTRLREG_TSCENB | CNTRLREG_4WIRE;
        tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
-       tscadc_idle_config(tscadc_dev);
+
+       if (tscadc_dev->tsc_cell != -1)
+               tscadc_idle_config(tscadc_dev);
        am335x_tsc_se_update(tscadc_dev);
        restore = tscadc_readl(tscadc_dev, REG_CTRL);
        tscadc_writel(tscadc_dev, REG_CTRL,
index 1d302f583adf2c3b2633164daa4c9fe253563476..b5dfa6e4e692968f80bc31f85be7757db4c80e69 100644 (file)
@@ -147,7 +147,7 @@ static int tps6105x_probe(struct i2c_client *client,
 
        i2c_set_clientdata(client, tps6105x);
        tps6105x->client = client;
-       pdata = client->dev.platform_data;
+       pdata = dev_get_platdata(&client->dev);
        tps6105x->pdata = pdata;
        mutex_init(&tps6105x->lock);
 
index da2691f22e114390f3f40817473f24a12c302f8f..743fb524fc8ae96f58cfaa6ea635317b621878cb 100644 (file)
@@ -242,8 +242,8 @@ static int dbg_show(struct seq_file *s, void *_)
        seq_printf(s, "mask2     %s\n", buf);
        /* ignore ackint2 */
 
-       schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
-
+       queue_delayed_work(system_power_efficient_wq, &tps->work,
+                          POWER_POLL_DELAY);
 
        /* VMAIN voltage, enable lowpower, etc */
        value = i2c_smbus_read_byte_data(tps->client, TPS_VDCDC1);
@@ -400,7 +400,8 @@ static void tps65010_interrupt(struct tps65010 *tps)
                        && (tps->chgstatus & (TPS_CHG_USB|TPS_CHG_AC)))
                poll = 1;
        if (poll)
-               schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
+               queue_delayed_work(system_power_efficient_wq, &tps->work,
+                                  POWER_POLL_DELAY);
 
        /* also potentially gpio-in rise or fall */
 }
@@ -448,7 +449,7 @@ static irqreturn_t tps65010_irq(int irq, void *_tps)
 
        disable_irq_nosync(irq);
        set_bit(FLAG_IRQ_ENABLE, &tps->flags);
-       schedule_delayed_work(&tps->work, 0);
+       queue_delayed_work(system_power_efficient_wq, &tps->work, 0);
        return IRQ_HANDLED;
 }
 
@@ -517,7 +518,7 @@ static struct tps65010 *the_tps;
 static int __exit tps65010_remove(struct i2c_client *client)
 {
        struct tps65010         *tps = i2c_get_clientdata(client);
-       struct tps65010_board   *board = client->dev.platform_data;
+       struct tps65010_board   *board = dev_get_platdata(&client->dev);
 
        if (board && board->teardown) {
                int status = board->teardown(client, board->context);
@@ -529,7 +530,6 @@ static int __exit tps65010_remove(struct i2c_client *client)
                free_irq(client->irq, tps);
        cancel_delayed_work_sync(&tps->work);
        debugfs_remove(tps->file);
-       kfree(tps);
        the_tps = NULL;
        return 0;
 }
@@ -539,7 +539,7 @@ static int tps65010_probe(struct i2c_client *client,
 {
        struct tps65010         *tps;
        int                     status;
-       struct tps65010_board   *board = client->dev.platform_data;
+       struct tps65010_board   *board = dev_get_platdata(&client->dev);
 
        if (the_tps) {
                dev_dbg(&client->dev, "only one tps6501x chip allowed\n");
@@ -549,7 +549,7 @@ static int tps65010_probe(struct i2c_client *client,
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
                return -EINVAL;
 
-       tps = kzalloc(sizeof *tps, GFP_KERNEL);
+       tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
        if (!tps)
                return -ENOMEM;
 
@@ -567,7 +567,7 @@ static int tps65010_probe(struct i2c_client *client,
                if (status < 0) {
                        dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
                                        client->irq, status);
-                       goto fail1;
+                       return status;
                }
                /* annoying race here, ideally we'd have an option
                 * to claim the irq now and enable it later.
@@ -667,9 +667,6 @@ static int tps65010_probe(struct i2c_client *client,
        }
 
        return 0;
-fail1:
-       kfree(tps);
-       return status;
 }
 
 static const struct i2c_device_id tps65010_id[] = {
@@ -718,7 +715,8 @@ int tps65010_set_vbus_draw(unsigned mA)
                        && test_and_set_bit(
                                FLAG_VBUS_CHANGED, &the_tps->flags)) {
                /* gadget drivers call this in_irq() */
-               schedule_delayed_work(&the_tps->work, 0);
+               queue_delayed_work(system_power_efficient_wq, &the_tps->work,
+                                  0);
        }
        local_irq_restore(flags);
 
index fbd6ee67b5a511317c02fe45d5d039b881987d8d..e6f03a733879b063d670ecc2bd074ea28063ec18 100644 (file)
@@ -172,7 +172,7 @@ MODULE_DEVICE_TABLE(of, tps65090_of_match);
 static int tps65090_i2c_probe(struct i2c_client *client,
                                        const struct i2c_device_id *id)
 {
-       struct tps65090_platform_data *pdata = client->dev.platform_data;
+       struct tps65090_platform_data *pdata = dev_get_platdata(&client->dev);
        int irq_base = 0;
        struct tps65090 *tps65090;
        int ret;
index 4b93ed4d5cd6a3649efdaa2c0eedc191792ab7c0..f54fe4d4f77b34a7ecbcef449635dbdbccf036d8 100644 (file)
@@ -462,7 +462,7 @@ static void tps6586x_power_off(void)
 static int tps6586x_i2c_probe(struct i2c_client *client,
                                        const struct i2c_device_id *id)
 {
-       struct tps6586x_platform_data *pdata = client->dev.platform_data;
+       struct tps6586x_platform_data *pdata = dev_get_platdata(&client->dev);
        struct tps6586x *tps6586x;
        int ret;
 
index 479886a4cf8054b60750303cc092c88128331c9d..925a044cbdf61740e3c54e0a481a9774587c652b 100644 (file)
@@ -123,7 +123,7 @@ EXPORT_SYMBOL_GPL(tps65912_reg_write);
 
 int tps65912_device_init(struct tps65912 *tps65912)
 {
-       struct tps65912_board *pmic_plat_data = tps65912->dev->platform_data;
+       struct tps65912_board *pmic_plat_data = dev_get_platdata(tps65912->dev);
        struct tps65912_platform_data *init_data;
        int ret, dcdc_avs, value;
 
index c90a2c450f5113c44536eac3b82315be92e9e89d..f15ee6d5cfbf96fb97ef6f169528c624b09a2aba 100644 (file)
@@ -418,7 +418,7 @@ static const struct regmap_config tps80031_regmap_configs[] = {
 static int tps80031_probe(struct i2c_client *client,
                          const struct i2c_device_id *id)
 {
-       struct tps80031_platform_data *pdata = client->dev.platform_data;
+       struct tps80031_platform_data *pdata = dev_get_platdata(&client->dev);
        struct tps80031 *tps80031;
        int ret;
        uint8_t es_version;
index 7f150d94d295146fc58b9c07481d06eb950b8604..29473c2c95ae0d92aa75184199486f70bdf5fc53 100644 (file)
@@ -1137,7 +1137,7 @@ static int twl_remove(struct i2c_client *client)
 static int
 twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
-       struct twl4030_platform_data    *pdata = client->dev.platform_data;
+       struct twl4030_platform_data    *pdata = dev_get_platdata(&client->dev);
        struct device_node              *node = client->dev.of_node;
        struct platform_device          *pdev;
        struct regmap_config            *twl_regmap_config;
index a31fba96ef438f745d0966d1c0e7828a8e0c930b..07fe542e6fc008a48995dc6191b73b758a20dcf6 100644 (file)
@@ -187,7 +187,7 @@ static bool twl4030_audio_has_vibra(struct twl4030_audio_data *pdata,
 static int twl4030_audio_probe(struct platform_device *pdev)
 {
        struct twl4030_audio *audio;
-       struct twl4030_audio_data *pdata = pdev->dev.platform_data;
+       struct twl4030_audio_data *pdata = dev_get_platdata(&pdev->dev);
        struct device_node *node = pdev->dev.of_node;
        struct mfd_cell *cell = NULL;
        int ret, childs = 0;
index 1ea54d4d003aeb84c87c7af509dc13052a4bfc3f..4c583e47133993b59dd512f1e3ec7316fb68c7fb 100644 (file)
@@ -701,7 +701,7 @@ static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
 static int twl4030_madc_probe(struct platform_device *pdev)
 {
        struct twl4030_madc_data *madc;
-       struct twl4030_madc_platform_data *pdata = pdev->dev.platform_data;
+       struct twl4030_madc_platform_data *pdata = dev_get_platdata(&pdev->dev);
        int ret;
        u8 regval;
 
index a5fd3c7382110d7298ea4ddf72ab644dcf1c6ed5..96162b62f3c0897df3f923ee0d4befcebfcb9048 100644 (file)
@@ -493,7 +493,7 @@ int twl4030_remove_script(u8 flags)
        return err;
 }
 
-int twl4030_power_configure_scripts(struct twl4030_power_data *pdata)
+static int twl4030_power_configure_scripts(struct twl4030_power_data *pdata)
 {
        int err;
        int i;
@@ -509,7 +509,7 @@ int twl4030_power_configure_scripts(struct twl4030_power_data *pdata)
        return 0;
 }
 
-int twl4030_power_configure_resources(struct twl4030_power_data *pdata)
+static int twl4030_power_configure_resources(struct twl4030_power_data *pdata)
 {
        struct twl4030_resconfig *resconfig = pdata->resource_config;
        int err;
@@ -553,9 +553,9 @@ static bool twl4030_power_use_poweroff(struct twl4030_power_data *pdata,
        return false;
 }
 
-int twl4030_power_probe(struct platform_device *pdev)
+static int twl4030_power_probe(struct platform_device *pdev)
 {
-       struct twl4030_power_data *pdata = pdev->dev.platform_data;
+       struct twl4030_power_data *pdata = dev_get_platdata(&pdev->dev);
        struct device_node *node = pdev->dev.of_node;
        int err = 0;
        int err2 = 0;
index 277a8dba42d5742903863e1bfc325297e0f671aa..517eda832f79978ac772c94b3bcf111643ec8835 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/suspend.h>
 #include <linux/of.h>
 #include <linux/irqdomain.h>
+#include <linux/of_device.h>
 
 #include "twl-core.h"
 
@@ -84,39 +85,77 @@ static int twl6030_interrupt_mapping[24] = {
        CHARGERFAULT_INTR_OFFSET,       /* Bit 22       INT_CHRG        */
        RSV_INTR_OFFSET,        /* Bit 23       Reserved                */
 };
+
+static int twl6032_interrupt_mapping[24] = {
+       PWR_INTR_OFFSET,        /* Bit 0        PWRON                   */
+       PWR_INTR_OFFSET,        /* Bit 1        RPWRON                  */
+       PWR_INTR_OFFSET,        /* Bit 2        SYS_VLOW                */
+       RTC_INTR_OFFSET,        /* Bit 3        RTC_ALARM               */
+       RTC_INTR_OFFSET,        /* Bit 4        RTC_PERIOD              */
+       HOTDIE_INTR_OFFSET,     /* Bit 5        HOT_DIE                 */
+       SMPSLDO_INTR_OFFSET,    /* Bit 6        VXXX_SHORT              */
+       PWR_INTR_OFFSET,        /* Bit 7        SPDURATION              */
+
+       PWR_INTR_OFFSET,        /* Bit 8        WATCHDOG                */
+       BATDETECT_INTR_OFFSET,  /* Bit 9        BAT                     */
+       SIMDETECT_INTR_OFFSET,  /* Bit 10       SIM                     */
+       MMCDETECT_INTR_OFFSET,  /* Bit 11       MMC                     */
+       MADC_INTR_OFFSET,       /* Bit 12       GPADC_RT_EOC            */
+       MADC_INTR_OFFSET,       /* Bit 13       GPADC_SW_EOC            */
+       GASGAUGE_INTR_OFFSET,   /* Bit 14       CC_EOC                  */
+       GASGAUGE_INTR_OFFSET,   /* Bit 15       CC_AUTOCAL              */
+
+       USBOTG_INTR_OFFSET,     /* Bit 16       ID_WKUP                 */
+       USBOTG_INTR_OFFSET,     /* Bit 17       VBUS_WKUP               */
+       USBOTG_INTR_OFFSET,     /* Bit 18       ID                      */
+       USB_PRES_INTR_OFFSET,   /* Bit 19       VBUS                    */
+       CHARGER_INTR_OFFSET,    /* Bit 20       CHRG_CTRL               */
+       CHARGERFAULT_INTR_OFFSET,       /* Bit 21       EXT_CHRG        */
+       CHARGERFAULT_INTR_OFFSET,       /* Bit 22       INT_CHRG        */
+       RSV_INTR_OFFSET,        /* Bit 23       Reserved                */
+};
+
 /*----------------------------------------------------------------------*/
 
-static unsigned twl6030_irq_base;
-static int twl_irq;
-static bool twl_irq_wake_enabled;
+struct twl6030_irq {
+       unsigned int            irq_base;
+       int                     twl_irq;
+       bool                    irq_wake_enabled;
+       atomic_t                wakeirqs;
+       struct notifier_block   pm_nb;
+       struct irq_chip         irq_chip;
+       struct irq_domain       *irq_domain;
+       const int               *irq_mapping_tbl;
+};
 
-static struct completion irq_event;
-static atomic_t twl6030_wakeirqs = ATOMIC_INIT(0);
+static struct twl6030_irq *twl6030_irq;
 
 static int twl6030_irq_pm_notifier(struct notifier_block *notifier,
                                   unsigned long pm_event, void *unused)
 {
        int chained_wakeups;
+       struct twl6030_irq *pdata = container_of(notifier, struct twl6030_irq,
+                                                 pm_nb);
 
        switch (pm_event) {
        case PM_SUSPEND_PREPARE:
-               chained_wakeups = atomic_read(&twl6030_wakeirqs);
+               chained_wakeups = atomic_read(&pdata->wakeirqs);
 
-               if (chained_wakeups && !twl_irq_wake_enabled) {
-                       if (enable_irq_wake(twl_irq))
+               if (chained_wakeups && !pdata->irq_wake_enabled) {
+                       if (enable_irq_wake(pdata->twl_irq))
                                pr_err("twl6030 IRQ wake enable failed\n");
                        else
-                               twl_irq_wake_enabled = true;
-               } else if (!chained_wakeups && twl_irq_wake_enabled) {
-                       disable_irq_wake(twl_irq);
-                       twl_irq_wake_enabled = false;
+                               pdata->irq_wake_enabled = true;
+               } else if (!chained_wakeups && pdata->irq_wake_enabled) {
+                       disable_irq_wake(pdata->twl_irq);
+                       pdata->irq_wake_enabled = false;
                }
 
-               disable_irq(twl_irq);
+               disable_irq(pdata->twl_irq);
                break;
 
        case PM_POST_SUSPEND:
-               enable_irq(twl_irq);
+               enable_irq(pdata->twl_irq);
                break;
 
        default:
@@ -126,124 +165,77 @@ static int twl6030_irq_pm_notifier(struct notifier_block *notifier,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block twl6030_irq_pm_notifier_block = {
-       .notifier_call = twl6030_irq_pm_notifier,
-};
-
 /*
- * This thread processes interrupts reported by the Primary Interrupt Handler.
- */
-static int twl6030_irq_thread(void *data)
+* Threaded irq handler for the twl6030 interrupt.
+* We query the interrupt controller in the twl6030 to determine
+* which module is generating the interrupt request and call
+* handle_nested_irq for that module.
+*/
+static irqreturn_t twl6030_irq_thread(int irq, void *data)
 {
-       long irq = (long)data;
-       static unsigned i2c_errors;
-       static const unsigned max_i2c_errors = 100;
-       int ret;
-
-       while (!kthread_should_stop()) {
-               int i;
-               union {
+       int i, ret;
+       union {
                u8 bytes[4];
                u32 int_sts;
-               } sts;
-
-               /* Wait for IRQ, then read PIH irq status (also blocking) */
-               wait_for_completion_interruptible(&irq_event);
-
-               /* read INT_STS_A, B and C in one shot using a burst read */
-               ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes,
-                               REG_INT_STS_A, 3);
-               if (ret) {
-                       pr_warning("twl6030: I2C error %d reading PIH ISR\n",
-                                       ret);
-                       if (++i2c_errors >= max_i2c_errors) {
-                               printk(KERN_ERR "Maximum I2C error count"
-                                               " exceeded.  Terminating %s.\n",
-                                               __func__);
-                               break;
-                       }
-                       complete(&irq_event);
-                       continue;
-               }
-
-
+       } sts;
+       struct twl6030_irq *pdata = data;
+
+       /* read INT_STS_A, B and C in one shot using a burst read */
+       ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes, REG_INT_STS_A, 3);
+       if (ret) {
+               pr_warn("twl6030_irq: I2C error %d reading PIH ISR\n", ret);
+               return IRQ_HANDLED;
+       }
 
-               sts.bytes[3] = 0; /* Only 24 bits are valid*/
+       sts.bytes[3] = 0; /* Only 24 bits are valid*/
 
-               /*
-                * Since VBUS status bit is not reliable for VBUS disconnect
-                * use CHARGER VBUS detection status bit instead.
-                */
-               if (sts.bytes[2] & 0x10)
-                       sts.bytes[2] |= 0x08;
-
-               for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
-                       local_irq_disable();
-                       if (sts.int_sts & 0x1) {
-                               int module_irq = twl6030_irq_base +
-                                       twl6030_interrupt_mapping[i];
-                               generic_handle_irq(module_irq);
-
-                       }
-               local_irq_enable();
+       /*
+        * Since VBUS status bit is not reliable for VBUS disconnect
+        * use CHARGER VBUS detection status bit instead.
+        */
+       if (sts.bytes[2] & 0x10)
+               sts.bytes[2] |= 0x08;
+
+       for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++)
+               if (sts.int_sts & 0x1) {
+                       int module_irq =
+                               irq_find_mapping(pdata->irq_domain,
+                                                pdata->irq_mapping_tbl[i]);
+                       if (module_irq)
+                               handle_nested_irq(module_irq);
+                       else
+                               pr_err("twl6030_irq: Unmapped PIH ISR %u detected\n",
+                                      i);
+                       pr_debug("twl6030_irq: PIH ISR %u, virq%u\n",
+                                i, module_irq);
                }
 
-               /*
-                * NOTE:
-                * Simulation confirms that documentation is wrong w.r.t the
-                * interrupt status clear operation. A single *byte* write to
-                * any one of STS_A to STS_C register results in all three
-                * STS registers being reset. Since it does not matter which
-                * value is written, all three registers are cleared on a
-                * single byte write, so we just use 0x0 to clear.
-                */
-               ret = twl_i2c_write_u8(TWL_MODULE_PIH, 0x00, REG_INT_STS_A);
-               if (ret)
-                       pr_warning("twl6030: I2C error in clearing PIH ISR\n");
-
-               enable_irq(irq);
-       }
-
-       return 0;
-}
+       /*
+        * NOTE:
+        * Simulation confirms that documentation is wrong w.r.t the
+        * interrupt status clear operation. A single *byte* write to
+        * any one of STS_A to STS_C register results in all three
+        * STS registers being reset. Since it does not matter which
+        * value is written, all three registers are cleared on a
+        * single byte write, so we just use 0x0 to clear.
+        */
+       ret = twl_i2c_write_u8(TWL_MODULE_PIH, 0x00, REG_INT_STS_A);
+       if (ret)
+               pr_warn("twl6030_irq: I2C error in clearing PIH ISR\n");
 
-/*
- * handle_twl6030_int() is the desc->handle method for the twl6030 interrupt.
- * This is a chained interrupt, so there is no desc->action method for it.
- * Now we need to query the interrupt controller in the twl6030 to determine
- * which module is generating the interrupt request.  However, we can't do i2c
- * transactions in interrupt context, so we must defer that work to a kernel
- * thread.  All we do here is acknowledge and mask the interrupt and wakeup
- * the kernel thread.
- */
-static irqreturn_t handle_twl6030_pih(int irq, void *devid)
-{
-       disable_irq_nosync(irq);
-       complete(devid);
        return IRQ_HANDLED;
 }
 
 /*----------------------------------------------------------------------*/
 
-static inline void activate_irq(int irq)
-{
-#ifdef CONFIG_ARM
-       /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
-        * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
-        */
-       set_irq_flags(irq, IRQF_VALID);
-#else
-       /* same effect on other architectures */
-       irq_set_noprobe(irq);
-#endif
-}
-
 static int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
 {
+       struct twl6030_irq *pdata = irq_get_chip_data(d->irq);
+
        if (on)
-               atomic_inc(&twl6030_wakeirqs);
+               atomic_inc(&pdata->wakeirqs);
        else
-               atomic_dec(&twl6030_wakeirqs);
+               atomic_dec(&pdata->wakeirqs);
 
        return 0;
 }
@@ -318,7 +310,8 @@ int twl6030_mmc_card_detect_config(void)
                return ret;
        }
 
-       return twl6030_irq_base + MMCDETECT_INTR_OFFSET;
+       return irq_find_mapping(twl6030_irq->irq_domain,
+                                MMCDETECT_INTR_OFFSET);
 }
 EXPORT_SYMBOL(twl6030_mmc_card_detect_config);
 
@@ -347,99 +340,143 @@ int twl6030_mmc_card_detect(struct device *dev, int slot)
 }
 EXPORT_SYMBOL(twl6030_mmc_card_detect);
 
+static int twl6030_irq_map(struct irq_domain *d, unsigned int virq,
+                             irq_hw_number_t hwirq)
+{
+       struct twl6030_irq *pdata = d->host_data;
+
+       irq_set_chip_data(virq, pdata);
+       irq_set_chip_and_handler(virq,  &pdata->irq_chip, handle_simple_irq);
+       irq_set_nested_thread(virq, true);
+       irq_set_parent(virq, pdata->twl_irq);
+
+#ifdef CONFIG_ARM
+       /*
+        * ARM requires an extra step to clear IRQ_NOREQUEST, which it
+        * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
+        */
+       set_irq_flags(virq, IRQF_VALID);
+#else
+       /* same effect on other architectures */
+       irq_set_noprobe(virq);
+#endif
+
+       return 0;
+}
+
+static void twl6030_irq_unmap(struct irq_domain *d, unsigned int virq)
+{
+#ifdef CONFIG_ARM
+       set_irq_flags(virq, 0);
+#endif
+       irq_set_chip_and_handler(virq, NULL, NULL);
+       irq_set_chip_data(virq, NULL);
+}
+
+static struct irq_domain_ops twl6030_irq_domain_ops = {
+       .map    = twl6030_irq_map,
+       .unmap  = twl6030_irq_unmap,
+       .xlate  = irq_domain_xlate_onetwocell,
+};
+
+static const struct of_device_id twl6030_of_match[] = {
+       {.compatible = "ti,twl6030", &twl6030_interrupt_mapping},
+       {.compatible = "ti,twl6032", &twl6032_interrupt_mapping},
+       { },
+};
+
 int twl6030_init_irq(struct device *dev, int irq_num)
 {
        struct                  device_node *node = dev->of_node;
-       int                     nr_irqs, irq_base, irq_end;
-       struct task_struct      *task;
-       static struct irq_chip  twl6030_irq_chip;
-       int                     status = 0;
-       int                     i;
+       int                     nr_irqs;
+       int                     status;
        u8                      mask[3];
+       const struct of_device_id *of_id;
 
-       nr_irqs = TWL6030_NR_IRQS;
-
-       irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
-       if (IS_ERR_VALUE(irq_base)) {
-               dev_err(dev, "Fail to allocate IRQ descs\n");
-               return irq_base;
+       of_id = of_match_device(twl6030_of_match, dev);
+       if (!of_id || !of_id->data) {
+               dev_err(dev, "Unknown TWL device model\n");
+               return -EINVAL;
        }
 
-       irq_domain_add_legacy(node, nr_irqs, irq_base, 0,
-                             &irq_domain_simple_ops, NULL);
+       nr_irqs = TWL6030_NR_IRQS;
 
-       irq_end = irq_base + nr_irqs;
+       twl6030_irq = devm_kzalloc(dev, sizeof(*twl6030_irq), GFP_KERNEL);
+       if (!twl6030_irq) {
+               dev_err(dev, "twl6030_irq: Memory allocation failed\n");
+               return -ENOMEM;
+       }
 
        mask[0] = 0xFF;
        mask[1] = 0xFF;
        mask[2] = 0xFF;
 
        /* mask all int lines */
-       twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_LINE_A, 3);
+       status = twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_LINE_A, 3);
        /* mask all int sts */
-       twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_STS_A, 3);
+       status |= twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_STS_A, 3);
        /* clear INT_STS_A,B,C */
-       twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_STS_A, 3);
+       status |= twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_STS_A, 3);
 
-       twl6030_irq_base = irq_base;
+       if (status < 0) {
+               dev_err(dev, "I2C err writing TWL_MODULE_PIH: %d\n", status);
+               return status;
+       }
 
        /*
         * install an irq handler for each of the modules;
         * clone dummy irq_chip since PIH can't *do* anything
         */
-       twl6030_irq_chip = dummy_irq_chip;
-       twl6030_irq_chip.name = "twl6030";
-       twl6030_irq_chip.irq_set_type = NULL;
-       twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
-
-       for (i = irq_base; i < irq_end; i++) {
-               irq_set_chip_and_handler(i, &twl6030_irq_chip,
-                                        handle_simple_irq);
-               irq_set_chip_data(i, (void *)irq_num);
-               activate_irq(i);
+       twl6030_irq->irq_chip = dummy_irq_chip;
+       twl6030_irq->irq_chip.name = "twl6030";
+       twl6030_irq->irq_chip.irq_set_type = NULL;
+       twl6030_irq->irq_chip.irq_set_wake = twl6030_irq_set_wake;
+
+       twl6030_irq->pm_nb.notifier_call = twl6030_irq_pm_notifier;
+       atomic_set(&twl6030_irq->wakeirqs, 0);
+       twl6030_irq->irq_mapping_tbl = of_id->data;
+
+       twl6030_irq->irq_domain =
+               irq_domain_add_linear(node, nr_irqs,
+                                     &twl6030_irq_domain_ops, twl6030_irq);
+       if (!twl6030_irq->irq_domain) {
+               dev_err(dev, "Can't add irq_domain\n");
+               return -ENOMEM;
        }
 
-       dev_info(dev, "PIH (irq %d) chaining IRQs %d..%d\n",
-                       irq_num, irq_base, irq_end);
+       dev_info(dev, "PIH (irq %d) nested IRQs\n", irq_num);
 
        /* install an irq handler to demultiplex the TWL6030 interrupt */
-       init_completion(&irq_event);
-
-       status = request_irq(irq_num, handle_twl6030_pih, 0, "TWL6030-PIH",
-                            &irq_event);
+       status = request_threaded_irq(irq_num, NULL, twl6030_irq_thread,
+                                     IRQF_ONESHOT, "TWL6030-PIH", twl6030_irq);
        if (status < 0) {
                dev_err(dev, "could not claim irq %d: %d\n", irq_num, status);
                goto fail_irq;
        }
 
-       task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq");
-       if (IS_ERR(task)) {
-               dev_err(dev, "could not create irq %d thread!\n", irq_num);
-               status = PTR_ERR(task);
-               goto fail_kthread;
-       }
-
-       twl_irq = irq_num;
-       register_pm_notifier(&twl6030_irq_pm_notifier_block);
-       return irq_base;
-
-fail_kthread:
-       free_irq(irq_num, &irq_event);
+       twl6030_irq->twl_irq = irq_num;
+       register_pm_notifier(&twl6030_irq->pm_nb);
+       return 0;
 
 fail_irq:
-       for (i = irq_base; i < irq_end; i++)
-               irq_set_chip_and_handler(i, NULL, NULL);
-
+       irq_domain_remove(twl6030_irq->irq_domain);
        return status;
 }
 
 int twl6030_exit_irq(void)
 {
-       unregister_pm_notifier(&twl6030_irq_pm_notifier_block);
-
-       if (twl6030_irq_base) {
-               pr_err("twl6030: can't yet clean up IRQs?\n");
-               return -ENOSYS;
+       if (twl6030_irq && twl6030_irq->twl_irq) {
+               unregister_pm_notifier(&twl6030_irq->pm_nb);
+               free_irq(twl6030_irq->twl_irq, NULL);
+               /*
+                * TODO: IRQ domain and allocated nested IRQ descriptors
+                * should be freed somehow here. Now It can't be done, because
+                * child devices will not be deleted during removing of
+                * TWL Core driver and they will still contain allocated
+                * virt IRQs in their Resources tables.
+                * The same prevents us from using devm_request_threaded_irq()
+                * in this module.
+                */
        }
        return 0;
 }
index 492ee2cd3400cf7b24863900137b740fbea6a72e..4d8d3b74d4e3dd3c404787d3a9559c74549e4306 100644 (file)
 #define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
 #define TWL6040_NUM_SUPPLIES   (2)
 
-static bool twl6040_has_vibra(struct twl6040_platform_data *pdata,
-                             struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *node)
 {
-       if (pdata && pdata->vibra)
-               return true;
-
 #ifdef CONFIG_OF
        if (of_find_node_by_name(node, "vibra"))
                return true;
 #endif
-
        return false;
 }
 
@@ -520,14 +515,13 @@ static struct regmap_irq_chip twl6040_irq_chip = {
 static int twl6040_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
-       struct twl6040_platform_data *pdata = client->dev.platform_data;
        struct device_node *node = client->dev.of_node;
        struct twl6040 *twl6040;
        struct mfd_cell *cell = NULL;
        int irq, ret, children = 0;
 
-       if (!pdata && !node) {
-               dev_err(&client->dev, "Platform data is missing\n");
+       if (!node) {
+               dev_err(&client->dev, "of node is missing\n");
                return -EINVAL;
        }
 
@@ -539,23 +533,19 @@ static int twl6040_probe(struct i2c_client *client,
 
        twl6040 = devm_kzalloc(&client->dev, sizeof(struct twl6040),
                               GFP_KERNEL);
-       if (!twl6040) {
-               ret = -ENOMEM;
-               goto err;
-       }
+       if (!twl6040)
+               return -ENOMEM;
 
        twl6040->regmap = devm_regmap_init_i2c(client, &twl6040_regmap_config);
-       if (IS_ERR(twl6040->regmap)) {
-               ret = PTR_ERR(twl6040->regmap);
-               goto err;
-       }
+       if (IS_ERR(twl6040->regmap))
+               return PTR_ERR(twl6040->regmap);
 
        i2c_set_clientdata(client, twl6040);
 
        twl6040->supplies[0].supply = "vio";
        twl6040->supplies[1].supply = "v2v1";
        ret = devm_regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
-                                twl6040->supplies);
+                                     twl6040->supplies);
        if (ret != 0) {
                dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
                goto regulator_get_err;
@@ -576,44 +566,40 @@ static int twl6040_probe(struct i2c_client *client,
        twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
 
        /* ERRATA: Automatic power-up is not possible in ES1.0 */
-       if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) {
-               if (pdata)
-                       twl6040->audpwron = pdata->audpwron_gpio;
-               else
-                       twl6040->audpwron = of_get_named_gpio(node,
-                                               "ti,audpwron-gpio", 0);
-       } else
+       if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
+               twl6040->audpwron = of_get_named_gpio(node,
+                                                     "ti,audpwron-gpio", 0);
+       else
                twl6040->audpwron = -EINVAL;
 
        if (gpio_is_valid(twl6040->audpwron)) {
                ret = devm_gpio_request_one(&client->dev, twl6040->audpwron,
-                                       GPIOF_OUT_INIT_LOW, "audpwron");
+                                           GPIOF_OUT_INIT_LOW, "audpwron");
                if (ret)
                        goto gpio_err;
        }
 
-       ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq,
-                       IRQF_ONESHOT, 0, &twl6040_irq_chip,
-                       &twl6040->irq_data);
+       ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq, IRQF_ONESHOT,
+                                 0, &twl6040_irq_chip,&twl6040->irq_data);
        if (ret < 0)
                goto gpio_err;
 
        twl6040->irq_ready = regmap_irq_get_virq(twl6040->irq_data,
-                                              TWL6040_IRQ_READY);
+                                                TWL6040_IRQ_READY);
        twl6040->irq_th = regmap_irq_get_virq(twl6040->irq_data,
-                                              TWL6040_IRQ_TH);
+                                             TWL6040_IRQ_TH);
 
        ret = devm_request_threaded_irq(twl6040->dev, twl6040->irq_ready, NULL,
-                                  twl6040_readyint_handler, IRQF_ONESHOT,
-                                  "twl6040_irq_ready", twl6040);
+                                       twl6040_readyint_handler, IRQF_ONESHOT,
+                                       "twl6040_irq_ready", twl6040);
        if (ret) {
                dev_err(twl6040->dev, "READY IRQ request failed: %d\n", ret);
                goto readyirq_err;
        }
 
        ret = devm_request_threaded_irq(twl6040->dev, twl6040->irq_th, NULL,
-                                  twl6040_thint_handler, IRQF_ONESHOT,
-                                  "twl6040_irq_th", twl6040);
+                                       twl6040_thint_handler, IRQF_ONESHOT,
+                                       "twl6040_irq_th", twl6040);
        if (ret) {
                dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
                goto thirq_err;
@@ -625,8 +611,6 @@ static int twl6040_probe(struct i2c_client *client,
        /*
         * The main functionality of twl6040 to provide audio on OMAP4+ systems.
         * We can add the ASoC codec child whenever this driver has been loaded.
-        * The ASoC codec can work without pdata, pass the platform_data only if
-        * it has been provided.
         */
        irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_PLUG);
        cell = &twl6040->cells[children];
@@ -635,13 +619,10 @@ static int twl6040_probe(struct i2c_client *client,
        twl6040_codec_rsrc[0].end = irq;
        cell->resources = twl6040_codec_rsrc;
        cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
-       if (pdata && pdata->codec) {
-               cell->platform_data = pdata->codec;
-               cell->pdata_size = sizeof(*pdata->codec);
-       }
        children++;
 
-       if (twl6040_has_vibra(pdata, node)) {
+       /* Vibra input driver support */
+       if (twl6040_has_vibra(node)) {
                irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_VIB);
 
                cell = &twl6040->cells[children];
@@ -650,28 +631,13 @@ static int twl6040_probe(struct i2c_client *client,
                twl6040_vibra_rsrc[0].end = irq;
                cell->resources = twl6040_vibra_rsrc;
                cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
-
-               if (pdata && pdata->vibra) {
-                       cell->platform_data = pdata->vibra;
-                       cell->pdata_size = sizeof(*pdata->vibra);
-               }
                children++;
        }
 
-       /*
-        * Enable the GPO driver in the following cases:
-        * DT booted kernel or legacy boot with valid gpo platform_data
-        */
-       if (!pdata || (pdata && pdata->gpo)) {
-               cell = &twl6040->cells[children];
-               cell->name = "twl6040-gpo";
-
-               if (pdata) {
-                       cell->platform_data = pdata->gpo;
-                       cell->pdata_size = sizeof(*pdata->gpo);
-               }
-               children++;
-       }
+       /* GPO support */
+       cell = &twl6040->cells[children];
+       cell->name = "twl6040-gpo";
+       children++;
 
        ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
                              NULL, 0, NULL);
@@ -690,7 +656,7 @@ gpio_err:
        regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
 regulator_get_err:
        i2c_set_clientdata(client, NULL);
-err:
+
        return ret;
 }
 
index e9031fa9d53d1983e0dfe5ee5307bf8d24f79857..ebb20edf9c1758a7d6e58ddce9f856dd89bbd860 100644 (file)
@@ -52,7 +52,7 @@ static int ucb1400_core_probe(struct device *dev)
        struct ucb1400_ts ucb_ts;
        struct ucb1400_gpio ucb_gpio;
        struct snd_ac97 *ac97;
-       struct ucb1400_pdata *pdata = dev->platform_data;
+       struct ucb1400_pdata *pdata = dev_get_platdata(dev);
 
        memset(&ucb_ts, 0, sizeof(ucb_ts));
        memset(&ucb_gpio, 0, sizeof(ucb_gpio));
index 70f02daeb22a884da64297f26bc3f43e4e365aa8..b7cf98f75e7cba606d94b9f7267645d2489bd880 100644 (file)
@@ -669,9 +669,10 @@ void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
        mutex_unlock(&ucb1x00_mutex);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int ucb1x00_suspend(struct device *dev)
 {
-       struct ucb1x00_plat_data *pdata = dev->platform_data;
+       struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
        struct ucb1x00 *ucb = dev_get_drvdata(dev);
        struct ucb1x00_dev *udev;
 
@@ -703,7 +704,7 @@ static int ucb1x00_suspend(struct device *dev)
 
 static int ucb1x00_resume(struct device *dev)
 {
-       struct ucb1x00_plat_data *pdata = dev->platform_data;
+       struct ucb1x00_plat_data *pdata = dev_get_platdata(dev);
        struct ucb1x00 *ucb = dev_get_drvdata(dev);
        struct ucb1x00_dev *udev;
 
@@ -736,6 +737,7 @@ static int ucb1x00_resume(struct device *dev)
        mutex_unlock(&ucb1x00_mutex);
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops ucb1x00_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(ucb1x00_suspend, ucb1x00_resume)
index edbe6c1b755a75a467c933a10323860089a33595..f7c52d901040cc5b14f00ebb86a8678ee7918e6d 100644 (file)
@@ -172,12 +172,9 @@ static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume)
 
 static int wl1273_core_remove(struct i2c_client *client)
 {
-       struct wl1273_core *core = i2c_get_clientdata(client);
-
        dev_dbg(&client->dev, "%s\n", __func__);
 
        mfd_remove_devices(&client->dev);
-       kfree(core);
 
        return 0;
 }
@@ -185,7 +182,7 @@ static int wl1273_core_remove(struct i2c_client *client)
 static int wl1273_core_probe(struct i2c_client *client,
                                       const struct i2c_device_id *id)
 {
-       struct wl1273_fm_platform_data *pdata = client->dev.platform_data;
+       struct wl1273_fm_platform_data *pdata = dev_get_platdata(&client->dev);
        struct wl1273_core *core;
        struct mfd_cell *cell;
        int children = 0;
@@ -203,7 +200,7 @@ static int wl1273_core_probe(struct i2c_client *client,
                return -EINVAL;
        }
 
-       core = kzalloc(sizeof(*core), GFP_KERNEL);
+       core = devm_kzalloc(&client->dev, sizeof(*core), GFP_KERNEL);
        if (!core)
                return -ENOMEM;
 
@@ -249,7 +246,6 @@ static int wl1273_core_probe(struct i2c_client *client,
 
 err:
        pdata->free_resources();
-       kfree(core);
 
        dev_dbg(&client->dev, "%s\n", __func__);
 
index 2a7972349159fb91f27e802addf7790be81c341a..3113e39b318e27a7aa2e74b7433e601edae2da72 100644 (file)
@@ -468,12 +468,14 @@ static const struct reg_default wm5110_reg_default[] = {
        { 0x00000176, 0x0000 },    /* R374   - FLL1 Control 6 */
        { 0x00000177, 0x0281 },    /* R375   - FLL1 Loop Filter Test 1 */
        { 0x00000178, 0x0000 },    /* R376   - FLL1 NCO Test 0 */
+       { 0x00000179, 0x0000 },    /* R376   - FLL1 Control 7 */
        { 0x00000181, 0x0000 },    /* R385   - FLL1 Synchroniser 1 */
        { 0x00000182, 0x0000 },    /* R386   - FLL1 Synchroniser 2 */
        { 0x00000183, 0x0000 },    /* R387   - FLL1 Synchroniser 3 */
        { 0x00000184, 0x0000 },    /* R388   - FLL1 Synchroniser 4 */
        { 0x00000185, 0x0000 },    /* R389   - FLL1 Synchroniser 5 */
        { 0x00000186, 0x0000 },    /* R390   - FLL1 Synchroniser 6 */
+       { 0x00000187, 0x0001 },    /* R390   - FLL1 Synchroniser 7 */
        { 0x00000189, 0x0000 },    /* R393   - FLL1 Spread Spectrum */
        { 0x0000018A, 0x0004 },    /* R394   - FLL1 GPIO Clock */
        { 0x00000191, 0x0000 },    /* R401   - FLL2 Control 1 */
@@ -484,12 +486,14 @@ static const struct reg_default wm5110_reg_default[] = {
        { 0x00000196, 0x0000 },    /* R406   - FLL2 Control 6 */
        { 0x00000197, 0x0000 },    /* R407   - FLL2 Loop Filter Test 1 */
        { 0x00000198, 0x0000 },    /* R408   - FLL2 NCO Test 0 */
+       { 0x00000199, 0x0000 },    /* R408   - FLL2 Control 7 */
        { 0x000001A1, 0x0000 },    /* R417   - FLL2 Synchroniser 1 */
        { 0x000001A2, 0x0000 },    /* R418   - FLL2 Synchroniser 2 */
        { 0x000001A3, 0x0000 },    /* R419   - FLL2 Synchroniser 3 */
        { 0x000001A4, 0x0000 },    /* R420   - FLL2 Synchroniser 4 */
        { 0x000001A5, 0x0000 },    /* R421   - FLL2 Synchroniser 5 */
        { 0x000001A6, 0x0000 },    /* R422   - FLL2 Synchroniser 6 */
+       { 0x000001A7, 0x0001 },    /* R422   - FLL2 Synchroniser 7 */
        { 0x000001A9, 0x0000 },    /* R425   - FLL2 Spread Spectrum */
        { 0x000001AA, 0x0004 },    /* R426   - FLL2 GPIO Clock */
        { 0x00000200, 0x0006 },    /* R512   - Mic Charge Pump 1 */
@@ -503,6 +507,11 @@ static const struct reg_default wm5110_reg_default[] = {
        { 0x0000029C, 0x0000 },    /* R668   - Headphone Detect 2 */
        { 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
        { 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
+       { 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
+       { 0x000002A6, 0x3737 },    /* R678   - Mic Detect Level 1 */
+       { 0x000002A7, 0x372C },    /* R679   - Mic Detect Level 2 */
+       { 0x000002A8, 0x1422 },    /* R680   - Mic Detect Level 3 */
+       { 0x000002A9, 0x300A },    /* R681   - Mic Detect Level 4 */
        { 0x000002C3, 0x0000 },    /* R707   - Mic noise mix control 1 */
        { 0x000002D3, 0x0000 },    /* R723   - Jack detect analogue */
        { 0x00000300, 0x0000 },    /* R768   - Input Enables */
@@ -1392,6 +1401,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_FLL1_CONTROL_4:
        case ARIZONA_FLL1_CONTROL_5:
        case ARIZONA_FLL1_CONTROL_6:
+       case ARIZONA_FLL1_CONTROL_7:
        case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
        case ARIZONA_FLL1_NCO_TEST_0:
        case ARIZONA_FLL1_SYNCHRONISER_1:
@@ -1400,6 +1410,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_FLL1_SYNCHRONISER_4:
        case ARIZONA_FLL1_SYNCHRONISER_5:
        case ARIZONA_FLL1_SYNCHRONISER_6:
+       case ARIZONA_FLL1_SYNCHRONISER_7:
        case ARIZONA_FLL1_SPREAD_SPECTRUM:
        case ARIZONA_FLL1_GPIO_CLOCK:
        case ARIZONA_FLL2_CONTROL_1:
@@ -1408,6 +1419,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_FLL2_CONTROL_4:
        case ARIZONA_FLL2_CONTROL_5:
        case ARIZONA_FLL2_CONTROL_6:
+       case ARIZONA_FLL2_CONTROL_7:
        case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
        case ARIZONA_FLL2_NCO_TEST_0:
        case ARIZONA_FLL2_SYNCHRONISER_1:
@@ -1416,6 +1428,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_FLL2_SYNCHRONISER_4:
        case ARIZONA_FLL2_SYNCHRONISER_5:
        case ARIZONA_FLL2_SYNCHRONISER_6:
+       case ARIZONA_FLL2_SYNCHRONISER_7:
        case ARIZONA_FLL2_SPREAD_SPECTRUM:
        case ARIZONA_FLL2_GPIO_CLOCK:
        case ARIZONA_MIC_CHARGE_PUMP_1:
@@ -1430,6 +1443,10 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_MIC_DETECT_1:
        case ARIZONA_MIC_DETECT_2:
        case ARIZONA_MIC_DETECT_3:
+       case ARIZONA_MIC_DETECT_LEVEL_1:
+       case ARIZONA_MIC_DETECT_LEVEL_2:
+       case ARIZONA_MIC_DETECT_LEVEL_3:
+       case ARIZONA_MIC_DETECT_LEVEL_4:
        case ARIZONA_MIC_NOISE_MIX_CONTROL_1:
        case ARIZONA_JACK_DETECT_ANALOGUE:
        case ARIZONA_INPUT_ENABLES:
@@ -2332,6 +2349,7 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
        case ARIZONA_IRQ_PIN_STATUS:
        case ARIZONA_AOD_IRQ1:
        case ARIZONA_AOD_IRQ2:
+       case ARIZONA_FX_CTRL2:
        case ARIZONA_ASRC_STATUS:
        case ARIZONA_DSP_STATUS:
        case ARIZONA_DSP1_CONTROL_1:
index 521340a708d3a76d9ae6ef8241f085bcd623439a..5c459f469224a61719d3258e948c0f0f3ecef856 100644 (file)
@@ -1618,7 +1618,7 @@ EXPORT_SYMBOL_GPL(wm831x_regmap_config);
  */
 int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
 {
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        int rev, wm831x_num;
        enum wm831x_parent parent;
        int ret, i;
index 804e56ec99eb284f4c32e1f44f8b7a143d4f340c..64e512eadf1718de932218ba2c3f60b7c216e257 100644 (file)
@@ -571,7 +571,7 @@ static struct irq_domain_ops wm831x_irq_domain_ops = {
 
 int wm831x_irq_init(struct wm831x *wm831x, int irq)
 {
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct irq_domain *domain;
        int i, ret, irq_base;
 
index e7ed14f661d8eb1360d7974ae0bf64ec90ea0401..07de3cc5a0d91db385a8ef755e8bf89d3817477b 100644 (file)
@@ -34,7 +34,6 @@ static int wm831x_spi_probe(struct spi_device *spi)
        if (wm831x == NULL)
                return -ENOMEM;
 
-       spi->bits_per_word = 16;
        spi->mode = SPI_MODE_0;
 
        spi_set_drvdata(spi, wm831x);
index 2e57101c8d3dabfc395f2bf764bddc23a411bbe5..f919def05e24778e53678385075b6fef440e1cf7 100644 (file)
@@ -27,6 +27,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
        struct wm8350 *wm8350;
+       struct wm8350_platform_data *pdata = dev_get_platdata(&i2c->dev);
        int ret = 0;
 
        wm8350 = devm_kzalloc(&i2c->dev, sizeof(struct wm8350), GFP_KERNEL);
@@ -44,7 +45,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
        i2c_set_clientdata(i2c, wm8350);
        wm8350->dev = &i2c->dev;
 
-       return wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
+       return wm8350_device_init(wm8350, i2c->irq, pdata);
 }
 
 static int wm8350_i2c_remove(struct i2c_client *i2c)
index 639ca359242f849cebfe407333e6107c0bca4bbb..d66d256551fb77768fc9d458d5493c976ec25f30 100644 (file)
@@ -178,7 +178,7 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
        wm8400->dev = &i2c->dev;
        i2c_set_clientdata(i2c, wm8400);
 
-       ret = wm8400_init(wm8400, i2c->dev.platform_data);
+       ret = wm8400_init(wm8400, dev_get_platdata(&i2c->dev));
        if (ret != 0)
                goto err;
 
index 781115e8dca90823bbae297ab4eadc9ff63fa791..e1c283e6d4e54eca7e88e1404ed9d1ec72497cf9 100644 (file)
@@ -201,35 +201,7 @@ static int wm8994_suspend(struct device *dev)
        int ret;
 
        /* Don't actually go through with the suspend if the CODEC is
-        * still active (eg, for audio passthrough from CP. */
-       ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
-       if (ret < 0) {
-               dev_err(dev, "Failed to read power status: %d\n", ret);
-       } else if (ret & WM8994_VMID_SEL_MASK) {
-               dev_dbg(dev, "CODEC still active, ignoring suspend\n");
-               return 0;
-       }
-
-       ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_4);
-       if (ret < 0) {
-               dev_err(dev, "Failed to read power status: %d\n", ret);
-       } else if (ret & (WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA |
-                         WM8994_AIF1ADC2L_ENA | WM8994_AIF1ADC2R_ENA |
-                         WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC1R_ENA)) {
-               dev_dbg(dev, "CODEC still active, ignoring suspend\n");
-               return 0;
-       }
-
-       ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_5);
-       if (ret < 0) {
-               dev_err(dev, "Failed to read power status: %d\n", ret);
-       } else if (ret & (WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA |
-                         WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA |
-                         WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA)) {
-               dev_dbg(dev, "CODEC still active, ignoring suspend\n");
-               return 0;
-       }
-
+        * still active for accessory detect. */
        switch (wm8994->type) {
        case WM8958:
        case WM1811:
@@ -245,20 +217,6 @@ static int wm8994_suspend(struct device *dev)
                break;
        }
 
-       switch (wm8994->type) {
-       case WM1811:
-               ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
-               if (ret < 0) {
-                       dev_err(dev, "Failed to read jackdet: %d\n", ret);
-               } else if (ret & WM1811_JACKDET_MODE_MASK) {
-                       dev_dbg(dev, "CODEC still active, ignoring suspend\n");
-                       return 0;
-               }
-               break;
-       default:
-               break;
-       }
-
        /* Disable LDO pulldowns while the device is suspended if we
         * don't know that something will be driving them. */
        if (!wm8994->ldo_ena_always_driven)
index d3a184a240f5e0a7cb683bb330b82d01c15eaeaf..e74dedda5b557caabedf38b045414e7ec7830bd6 100644 (file)
@@ -193,7 +193,7 @@ int wm8994_irq_init(struct wm8994 *wm8994)
 {
        int ret;
        unsigned long irqflags;
-       struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+       struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
 
        if (!wm8994->irq) {
                dev_warn(wm8994->dev,
index 49a5bca418bdb7a6edf24669a9821851b9593aa9..5d088551196b6d9eb61f5443817bbbe9a20492c4 100644 (file)
@@ -1313,7 +1313,7 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
 
        supply = devm_regulator_get(dev, "vmmc");
        mmc->supply.vmmc = supply;
-       mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc");
+       mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
 
        if (IS_ERR(supply))
                return PTR_ERR(supply);
index ee5f1676f14e61edc2d81acf945fed1a5bcb300d..542407363dd2012992831208cadfc8df592e3163 100644 (file)
@@ -2231,7 +2231,7 @@ int dw_mci_probe(struct dw_mci *host)
                }
        }
 
-       host->vmmc = devm_regulator_get(host->dev, "vmmc");
+       host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
        if (IS_ERR(host->vmmc)) {
                ret = PTR_ERR(host->vmmc);
                if (ret == -EPROBE_DEFER)
index 2c5a91bb8ec3dbcf1433e114625e94b7d4c3cfe6..1956a3df7cf3f0e2f03dd36775f478b454272fb6 100644 (file)
@@ -83,7 +83,7 @@ struct pxamci_host {
 static inline void pxamci_init_ocr(struct pxamci_host *host)
 {
 #ifdef CONFIG_REGULATOR
-       host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
+       host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc");
 
        if (IS_ERR(host->vcc))
                host->vcc = NULL;
index a78bd4f3aeccba6615fb71bbdcde4d283b89ba29..dd2c083c434da3f075c6b3566085f2555683315e 100644 (file)
@@ -2966,7 +2966,7 @@ int sdhci_add_host(struct sdhci_host *host)
                mmc->caps |= MMC_CAP_NEEDS_POLL;
 
        /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
-       host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
+       host->vqmmc = regulator_get_optional(mmc_dev(mmc), "vqmmc");
        if (IS_ERR_OR_NULL(host->vqmmc)) {
                if (PTR_ERR(host->vqmmc) < 0) {
                        pr_info("%s: no vqmmc regulator found\n",
@@ -3042,7 +3042,7 @@ int sdhci_add_host(struct sdhci_host *host)
 
        ocr_avail = 0;
 
-       host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+       host->vmmc = regulator_get_optional(mmc_dev(mmc), "vmmc");
        if (IS_ERR_OR_NULL(host->vmmc)) {
                if (PTR_ERR(host->vmmc) < 0) {
                        pr_info("%s: no vmmc regulator found\n",
index b72edb72f7d269ccbeb75ff3c9193974e3d11a3e..718843cfacfca1f6712410f73072d5abcd825c8f 100644 (file)
@@ -795,9 +795,13 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
         * omap_hsmmc.c driver does.
         */
        if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
-               regulator_enable(mmc->supply.vqmmc);
+               ret = regulator_enable(mmc->supply.vqmmc);
                udelay(200);
        }
+
+       if (ret < 0)
+               dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
+                       ret);
 }
 
 static void tmio_mmc_power_off(struct tmio_mmc_host *host)
index 6eeb84c81bc2ceb16a451fae1bc4194505b2dc7f..5c813907661c3415c979b1176bf937f7c086f2c8 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright © 2006-2008  Florian Fainelli <florian@openwrt.org>
  *                       Mike Albon <malbon@openwrt.org>
  * Copyright © 2009-2010  Daniel Dickinson <openwrt@cshore.neomailbox.net>
- * Copyright © 2011-2012  Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright © 2011-2013  Jonas Gorski <jonas.gorski@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include <linux/crc32.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 
+#include <asm/mach-bcm63xx/bcm63xx_nvram.h>
 #include <asm/mach-bcm63xx/bcm963xx_tag.h>
 #include <asm/mach-bcm63xx/board_bcm963xx.h>
 
 #define BCM63XX_EXTENDED_SIZE  0xBFC00000      /* Extended flash address */
 
-#define BCM63XX_CFE_BLOCK_SIZE 0x10000         /* always at least 64KiB */
+#define BCM63XX_CFE_BLOCK_SIZE SZ_64K          /* always at least 64KiB */
 
 #define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
 
@@ -90,7 +92,8 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
                              BCM63XX_CFE_BLOCK_SIZE);
 
        cfelen = cfe_erasesize;
-       nvramlen = cfe_erasesize;
+       nvramlen = bcm63xx_nvram_get_psi_size() * SZ_1K;
+       nvramlen = roundup(nvramlen, cfe_erasesize);
 
        /* Allocate memory for buffer */
        buf = vmalloc(sizeof(struct bcm_tag));
index fff665d59a0dba759f63854bb95e25bbb998b64f..89b9d689153298f3b7a3965adf811a3d9963de7c 100644 (file)
@@ -1571,8 +1571,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
        xip_enable(map, chip, adr);
        /* FIXME - should have reset delay before continuing */
 
-       printk(KERN_WARNING "MTD %s(): software timeout\n",
-              __func__ );
+       printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
+              __func__, adr);
 
        ret = -EIO;
  op_done:
index 74dbb6bcf4883a8a0461685ea0f180de7ad3da0d..ffb36ba8a6e050e0d8da73a7661cd7734905707d 100644 (file)
@@ -211,9 +211,7 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
 
        probe_function = __symbol_get(probename);
        if (!probe_function) {
-               char modname[sizeof("cfi_cmdset_%4.4X")];
-               sprintf(modname, "cfi_cmdset_%4.4X", type);
-               request_module(modname);
+               request_module("cfi_cmdset_%4.4X", type);
                probe_function = __symbol_get(probename);
        }
 
index c443f527a53a5d9dae069701c9b1e0c497e63999..7c0b27d132b1bca8aa546cedac726cf5e92c6613 100644 (file)
 #define PM49FL008      0x006A
 
 /* Sharp */
-#define LH28F640BF     0x00b0
+#define LH28F640BF     0x00B0
 
 /* ST - www.st.com */
 #define M29F800AB      0x0058
@@ -1299,13 +1299,14 @@ static const struct amd_flash_info jedec_table[] = {
                .mfr_id         = CFI_MFR_SHARP,
                .dev_id         = LH28F640BF,
                .name           = "LH28F640BF",
-               .devtypes       = CFI_DEVICETYPE_X8,
+               .devtypes       = CFI_DEVICETYPE_X16,
                .uaddr          = MTD_UADDR_UNNECESSARY,
-               .dev_size       = SIZE_4MiB,
-               .cmd_set        = P_ID_INTEL_STD,
-               .nr_regions     = 1,
+               .dev_size       = SIZE_8MiB,
+               .cmd_set        = P_ID_INTEL_EXT,
+               .nr_regions     = 2,
                .regions        = {
-                       ERASEINFO(0x40000,16),
+                       ERASEINFO(0x10000, 127),
+                       ERASEINFO(0x02000, 8),
                }
        }, {
                .mfr_id         = CFI_MFR_SST,
index 2a4d55e4b3628b7437fb82d7d51a929c92cf79e2..74ab4b7e523eb821c0ee6ceedf85a3ac23b7c473 100644 (file)
@@ -224,59 +224,4 @@ config BCH_CONST_T
        default 4
 endif
 
-config MTD_DOCPROBE
-       tristate
-       select MTD_DOCECC
-
-config MTD_DOCECC
-       tristate
-
-config MTD_DOCPROBE_ADVANCED
-       bool "Advanced detection options for DiskOnChip"
-       depends on MTD_DOCPROBE
-       help
-         This option allows you to specify nonstandard address at which to
-         probe for a DiskOnChip, or to change the detection options.  You
-         are unlikely to need any of this unless you are using LinuxBIOS.
-         Say 'N'.
-
-config MTD_DOCPROBE_ADDRESS
-       hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED
-       depends on MTD_DOCPROBE
-       default "0x0"
-       ---help---
-         By default, the probe for DiskOnChip devices will look for a
-         DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
-         This option allows you to specify a single address at which to probe
-         for the device, which is useful if you have other devices in that
-         range which get upset when they are probed.
-
-         (Note that on PowerPC, the normal probe will only check at
-         0xE4000000.)
-
-         Normally, you should leave this set to zero, to allow the probe at
-         the normal addresses.
-
-config MTD_DOCPROBE_HIGH
-       bool "Probe high addresses"
-       depends on MTD_DOCPROBE_ADVANCED
-       help
-         By default, the probe for DiskOnChip devices will look for a
-         DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
-         This option changes to make it probe between 0xFFFC8000 and
-         0xFFFEE000.  Unless you are using LinuxBIOS, this is unlikely to be
-         useful to you.  Say 'N'.
-
-config MTD_DOCPROBE_55AA
-       bool "Probe for 0x55 0xAA BIOS Extension Signature"
-       depends on MTD_DOCPROBE_ADVANCED
-       help
-         Check for the 0x55 0xAA signature of a DiskOnChip, and do not
-         continue with probing if it is absent.  The signature will always be
-         present for a DiskOnChip 2000 or a normal DiskOnChip Millennium.
-         Only if you have overwritten the first block of a DiskOnChip
-         Millennium will it be absent.  Enable this option if you are using
-         LinuxBIOS or if you need to recover a DiskOnChip Millennium on which
-         you have managed to wipe the first block.
-
 endmenu
index 18e7761137a33037a21aa61585e20d98f47b172e..2060856dbf977ad580923d06690f514c5db656c5 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <linux/mtd/mtd.h>
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
@@ -12,6 +13,57 @@ MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
 
 static const char * const probes[] = { "bcm47xxpart", NULL };
 
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static void bcm47xxsflash_cmd(struct bcm47xxsflash *b47s, u32 opcode)
+{
+       int i;
+
+       b47s->cc_write(b47s, BCMA_CC_FLASHCTL, BCMA_CC_FLASHCTL_START | opcode);
+       for (i = 0; i < 1000; i++) {
+               if (!(b47s->cc_read(b47s, BCMA_CC_FLASHCTL) &
+                     BCMA_CC_FLASHCTL_BUSY))
+                       return;
+               cpu_relax();
+       }
+       pr_err("Control command failed (timeout)!\n");
+}
+
+static int bcm47xxsflash_poll(struct bcm47xxsflash *b47s, int timeout)
+{
+       unsigned long deadline = jiffies + timeout;
+
+       do {
+               switch (b47s->type) {
+               case BCM47XXSFLASH_TYPE_ST:
+                       bcm47xxsflash_cmd(b47s, OPCODE_ST_RDSR);
+                       if (!(b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+                             SR_ST_WIP))
+                               return 0;
+                       break;
+               case BCM47XXSFLASH_TYPE_ATMEL:
+                       bcm47xxsflash_cmd(b47s, OPCODE_AT_STATUS);
+                       if (b47s->cc_read(b47s, BCMA_CC_FLASHDATA) &
+                           SR_AT_READY)
+                               return 0;
+                       break;
+               }
+
+               cpu_relax();
+               udelay(1);
+       } while (!time_after_eq(jiffies, deadline));
+
+       pr_err("Timeout waiting for flash to be ready!\n");
+
+       return -EBUSY;
+}
+
+/**************************************************
+ * MTD ops
+ **************************************************/
+
 static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
                              size_t *retlen, u_char *buf)
 {
@@ -48,6 +100,17 @@ static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
  * BCMA
  **************************************************/
 
+static int bcm47xxsflash_bcma_cc_read(struct bcm47xxsflash *b47s, u16 offset)
+{
+       return bcma_cc_read32(b47s->bcma_cc, offset);
+}
+
+static void bcm47xxsflash_bcma_cc_write(struct bcm47xxsflash *b47s, u16 offset,
+                                       u32 value)
+{
+       bcma_cc_write32(b47s->bcma_cc, offset, value);
+}
+
 static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
 {
        struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
@@ -62,6 +125,8 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
        sflash->priv = b47s;
 
        b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
+       b47s->cc_read = bcm47xxsflash_bcma_cc_read;
+       b47s->cc_write = bcm47xxsflash_bcma_cc_write;
 
        switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
        case BCMA_CC_FLASHT_STSER:
@@ -84,6 +149,9 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
                goto err_dev_reg;
        }
 
+       if (bcm47xxsflash_poll(b47s, HZ / 10))
+               pr_warn("Serial flash busy\n");
+
        return 0;
 
 err_dev_reg:
index f22f8c46dfc059566ae8cd4fcf4c90f810a7fd2a..fe93daf4f4894a35f2d7b8a65363d1d163d09ffc 100644 (file)
@@ -60,6 +60,8 @@ enum bcm47xxsflash_type {
 
 struct bcm47xxsflash {
        struct bcma_drv_cc *bcma_cc;
+       int (*cc_read)(struct bcm47xxsflash *b47s, u16 offset);
+       void (*cc_write)(struct bcm47xxsflash *b47s, u16 offset, u32 value);
 
        enum bcm47xxsflash_type type;
 
index e081bfeaaf7da1941c9dc243856c3ab2ba9abde2..5cb4c04726b2e0eb58dd6452b293602f82fa1945 100644 (file)
@@ -6,6 +6,9 @@
  *
  * Licence: GPL
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/blkdev.h>
 #include <linux/mount.h>
 #include <linux/slab.h>
 
-#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
-#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
-
-
 /* Info for the block device */
 struct block2mtd_dev {
        struct list_head list;
@@ -84,7 +83,7 @@ static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
        err = _block2mtd_erase(dev, from, len);
        mutex_unlock(&dev->write_mutex);
        if (err) {
-               ERROR("erase failed err = %d", err);
+               pr_err("erase failed err = %d\n", err);
                instr->state = MTD_ERASE_FAILED;
        } else
                instr->state = MTD_ERASE_DONE;
@@ -239,13 +238,13 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
 #endif
 
        if (IS_ERR(bdev)) {
-               ERROR("error: cannot open device %s", devname);
+               pr_err("error: cannot open device %s\n", devname);
                goto devinit_err;
        }
        dev->blkdev = bdev;
 
        if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
-               ERROR("attempting to use an MTD device as a block device");
+               pr_err("attempting to use an MTD device as a block device\n");
                goto devinit_err;
        }
 
@@ -277,9 +276,10 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
                goto devinit_err;
        }
        list_add(&dev->list, &blkmtd_device_list);
-       INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
-                       dev->mtd.name + strlen("block2mtd: "),
-                       dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+       pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
+               dev->mtd.index,
+               dev->mtd.name + strlen("block2mtd: "),
+               dev->mtd.erasesize >> 10, dev->mtd.erasesize);
        return dev;
 
 devinit_err:
@@ -339,17 +339,11 @@ static inline void kill_final_newline(char *str)
 }
 
 
-#define parse_err(fmt, args...) do {   \
-       ERROR(fmt, ## args);            \
-       return 0;                       \
-} while (0)
-
 #ifndef MODULE
 static int block2mtd_init_called = 0;
 static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
 #endif
 
-
 static int block2mtd_setup2(const char *val)
 {
        char buf[80 + 12]; /* 80 for device, 12 for erase size */
@@ -359,8 +353,10 @@ static int block2mtd_setup2(const char *val)
        size_t erase_size = PAGE_SIZE;
        int i, ret;
 
-       if (strnlen(val, sizeof(buf)) >= sizeof(buf))
-               parse_err("parameter too long");
+       if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
+               pr_err("parameter too long\n");
+               return 0;
+       }
 
        strcpy(str, val);
        kill_final_newline(str);
@@ -368,20 +364,27 @@ static int block2mtd_setup2(const char *val)
        for (i = 0; i < 2; i++)
                token[i] = strsep(&str, ",");
 
-       if (str)
-               parse_err("too many arguments");
+       if (str) {
+               pr_err("too many arguments\n");
+               return 0;
+       }
 
-       if (!token[0])
-               parse_err("no argument");
+       if (!token[0]) {
+               pr_err("no argument\n");
+               return 0;
+       }
 
        name = token[0];
-       if (strlen(name) + 1 > 80)
-               parse_err("device name too long");
+       if (strlen(name) + 1 > 80) {
+               pr_err("device name too long\n");
+               return 0;
+       }
 
        if (token[1]) {
                ret = parse_num(&erase_size, token[1]);
                if (ret) {
-                       parse_err("illegal erase size");
+                       pr_err("illegal erase size\n");
+                       return 0;
                }
        }
 
@@ -444,8 +447,9 @@ static void block2mtd_exit(void)
                struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
                block2mtd_sync(&dev->mtd);
                mtd_device_unregister(&dev->mtd);
-               INFO("mtd%d: [%s] removed", dev->mtd.index,
-                               dev->mtd.name + strlen("block2mtd: "));
+               pr_info("mtd%d: [%s] removed\n",
+                       dev->mtd.index,
+                       dev->mtd.name + strlen("block2mtd: "));
                list_del(&dev->list);
                block2mtd_free_device(dev);
        }
index dccef9fdc1f276269566bcc4ba03ae6e7a87111d..d1dd6a33a0500831f78012f9be91afa0161e4574 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/sched.h>
 #include <linux/pm_runtime.h>
 #include <linux/platform_data/elm.h>
 
+#define ELM_SYSCONFIG                  0x010
 #define ELM_IRQSTATUS                  0x018
 #define ELM_IRQENABLE                  0x01c
 #define ELM_LOCATION_CONFIG            0x020
 #define ELM_PAGE_CTRL                  0x080
 #define ELM_SYNDROME_FRAGMENT_0                0x400
+#define ELM_SYNDROME_FRAGMENT_1                0x404
+#define ELM_SYNDROME_FRAGMENT_2                0x408
+#define ELM_SYNDROME_FRAGMENT_3                0x40c
+#define ELM_SYNDROME_FRAGMENT_4                0x410
+#define ELM_SYNDROME_FRAGMENT_5                0x414
 #define ELM_SYNDROME_FRAGMENT_6                0x418
 #define ELM_LOCATION_STATUS            0x800
 #define ELM_ERROR_LOCATION_0           0x880
 #define SYNDROME_FRAGMENT_REG_SIZE     0x40
 #define ERROR_LOCATION_SIZE            0x100
 
+struct elm_registers {
+       u32 elm_irqenable;
+       u32 elm_sysconfig;
+       u32 elm_location_config;
+       u32 elm_page_ctrl;
+       u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
+       u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
+       u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
+       u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
+       u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
+       u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
+       u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
+};
+
 struct elm_info {
        struct device *dev;
        void __iomem *elm_base;
        struct completion elm_completion;
        struct list_head list;
        enum bch_ecc bch_type;
+       struct elm_registers elm_regs;
 };
 
 static LIST_HEAD(elm_devices);
@@ -346,14 +368,9 @@ static int elm_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no memory resource defined\n");
-               return -ENODEV;
-       }
-
-       info->elm_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (!info->elm_base)
-               return -EADDRNOTAVAIL;
+       info->elm_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(info->elm_base))
+               return PTR_ERR(info->elm_base);
 
        ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
                        pdev->name, info);
@@ -381,10 +398,103 @@ static int elm_remove(struct platform_device *pdev)
 {
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
+/**
+ * elm_context_save
+ * saves ELM configurations to preserve them across Hardware powered-down
+ */
+static int elm_context_save(struct elm_info *info)
+{
+       struct elm_registers *regs = &info->elm_regs;
+       enum bch_ecc bch_type = info->bch_type;
+       u32 offset = 0, i;
+
+       regs->elm_irqenable       = elm_read_reg(info, ELM_IRQENABLE);
+       regs->elm_sysconfig       = elm_read_reg(info, ELM_SYSCONFIG);
+       regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
+       regs->elm_page_ctrl       = elm_read_reg(info, ELM_PAGE_CTRL);
+       for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+               offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+               switch (bch_type) {
+               case BCH8_ECC:
+                       regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
+                                       ELM_SYNDROME_FRAGMENT_3 + offset);
+                       regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
+                                       ELM_SYNDROME_FRAGMENT_2 + offset);
+               case BCH4_ECC:
+                       regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
+                                       ELM_SYNDROME_FRAGMENT_1 + offset);
+                       regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+                                       ELM_SYNDROME_FRAGMENT_0 + offset);
+               default:
+                       return -EINVAL;
+               }
+               /* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
+                * to be saved for all BCH schemes*/
+               regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+                                       ELM_SYNDROME_FRAGMENT_6 + offset);
+       }
+       return 0;
+}
+
+/**
+ * elm_context_restore
+ * writes configurations saved duing power-down back into ELM registers
+ */
+static int elm_context_restore(struct elm_info *info)
+{
+       struct elm_registers *regs = &info->elm_regs;
+       enum bch_ecc bch_type = info->bch_type;
+       u32 offset = 0, i;
+
+       elm_write_reg(info, ELM_IRQENABLE,       regs->elm_irqenable);
+       elm_write_reg(info, ELM_SYSCONFIG,       regs->elm_sysconfig);
+       elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
+       elm_write_reg(info, ELM_PAGE_CTRL,       regs->elm_page_ctrl);
+       for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+               offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+               switch (bch_type) {
+               case BCH8_ECC:
+                       elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
+                                       regs->elm_syndrome_fragment_3[i]);
+                       elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
+                                       regs->elm_syndrome_fragment_2[i]);
+               case BCH4_ECC:
+                       elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
+                                       regs->elm_syndrome_fragment_1[i]);
+                       elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+                                       regs->elm_syndrome_fragment_0[i]);
+               default:
+                       return -EINVAL;
+               }
+               /* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
+               elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+                                       regs->elm_syndrome_fragment_6[i] &
+                                                        ELM_SYNDROME_VALID);
+       }
+       return 0;
+}
+
+static int elm_suspend(struct device *dev)
+{
+       struct elm_info *info = dev_get_drvdata(dev);
+       elm_context_save(info);
+       pm_runtime_put_sync(dev);
+       return 0;
+}
+
+static int elm_resume(struct device *dev)
+{
+       struct elm_info *info = dev_get_drvdata(dev);
+       pm_runtime_get_sync(dev);
+       elm_context_restore(info);
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
+
 #ifdef CONFIG_OF
 static const struct of_device_id elm_of_match[] = {
        { .compatible = "ti,am3352-elm" },
@@ -398,6 +508,7 @@ static struct platform_driver elm_driver = {
                .name   = "elm",
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(elm_of_match),
+               .pm     = &elm_pm_ops,
        },
        .probe  = elm_probe,
        .remove = elm_remove,
index 2f3d2a5ff349a174dc0f473f07c67f477f0139c4..26b14f9fcac6d129c3a2bc5c7a9fa7baea234691 100644 (file)
 #define        OPCODE_FAST_READ        0x0b    /* Read data bytes (high frequency) */
 #define        OPCODE_PP               0x02    /* Page program (up to 256 bytes) */
 #define        OPCODE_BE_4K            0x20    /* Erase 4KiB block */
+#define        OPCODE_BE_4K_PMC        0xd7    /* Erase 4KiB block on PMC chips */
 #define        OPCODE_BE_32K           0x52    /* Erase 32KiB block */
 #define        OPCODE_CHIP_ERASE       0xc7    /* Erase whole flash chip */
 #define        OPCODE_SE               0xd8    /* Sector erase (usually 64KiB) */
 #define        OPCODE_RDID             0x9f    /* Read JEDEC ID */
 
+/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
+#define        OPCODE_NORM_READ_4B     0x13    /* Read data bytes (low frequency) */
+#define        OPCODE_FAST_READ_4B     0x0c    /* Read data bytes (high frequency) */
+#define        OPCODE_PP_4B            0x12    /* Page program (up to 256 bytes) */
+#define        OPCODE_SE_4B            0xdc    /* Sector erase (usually 64KiB) */
+
 /* Used for SST flashes only. */
 #define        OPCODE_BP               0x02    /* Byte program */
 #define        OPCODE_WRDI             0x04    /* Write disable */
 #define        OPCODE_AAI_WP           0xad    /* Auto address increment word program */
 
-/* Used for Macronix flashes only. */
+/* Used for Macronix and Winbond flashes. */
 #define        OPCODE_EN4B             0xb7    /* Enter 4-byte mode */
 #define        OPCODE_EX4B             0xe9    /* Exit 4-byte mode */
 
@@ -84,6 +91,8 @@ struct m25p {
        u16                     page_size;
        u16                     addr_width;
        u8                      erase_opcode;
+       u8                      read_opcode;
+       u8                      program_opcode;
        u8                      *command;
        bool                    fast_read;
 };
@@ -161,6 +170,7 @@ static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
 {
        switch (JEDEC_MFR(jedec_id)) {
        case CFI_MFR_MACRONIX:
+       case CFI_MFR_ST: /* Micron, actually */
        case 0xEF /* winbond */:
                flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
                return spi_write(flash->spi, flash->command, 1);
@@ -371,7 +381,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
         */
 
        /* Set up the write data buffer. */
-       opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ;
+       opcode = flash->read_opcode;
        flash->command[0] = opcode;
        m25p_addr2cmd(flash, from, flash->command);
 
@@ -422,7 +432,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
        write_enable(flash);
 
        /* Set up the opcode in the write buffer. */
-       flash->command[0] = OPCODE_PP;
+       flash->command[0] = flash->program_opcode;
        m25p_addr2cmd(flash, to, flash->command);
 
        page_offset = to & (flash->page_size - 1);
@@ -682,6 +692,8 @@ struct flash_info {
 #define        SECT_4K         0x01            /* OPCODE_BE_4K works uniformly */
 #define        M25P_NO_ERASE   0x02            /* No erase command needed */
 #define        SST_WRITE       0x04            /* use SST byte programming */
+#define        M25P_NO_FR      0x08            /* Can't do fastread */
+#define        SECT_4K_PMC     0x10            /* OPCODE_BE_4K_PMC works uniformly */
 };
 
 #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)     \
@@ -694,13 +706,13 @@ struct flash_info {
                .flags = (_flags),                                      \
        })
 
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width)  \
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)  \
        ((kernel_ulong_t)&(struct flash_info) {                         \
                .sector_size = (_sector_size),                          \
                .n_sectors = (_n_sectors),                              \
                .page_size = (_page_size),                              \
                .addr_width = (_addr_width),                            \
-               .flags = M25P_NO_ERASE,                                 \
+               .flags = (_flags),                                      \
        })
 
 /* NOTE: double check command sets and memory organization when you add
@@ -732,7 +744,8 @@ static const struct spi_device_id m25p_ids[] = {
        { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
 
        /* Everspin */
-       { "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2) },
+       { "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
+       { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
 
        /* GigaDevice */
        { "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64, SECT_4K) },
@@ -762,6 +775,11 @@ static const struct spi_device_id m25p_ids[] = {
        { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
        { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
 
+       /* PMC */
+       { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
+       { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
+       { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024,  64, SECT_4K) },
+
        /* Spansion -- single (large) sector size only, at least
         * for the chips listed here (without boot sectors).
         */
@@ -840,17 +858,18 @@ static const struct spi_device_id m25p_ids[] = {
        { "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64, SECT_4K) },
        { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
        { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+       { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
        { "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
        { "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
        { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
        { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
 
        /* Catalyst / On Semiconductor -- non-JEDEC */
-       { "cat25c11", CAT25_INFO(  16, 8, 16, 1) },
-       { "cat25c03", CAT25_INFO(  32, 8, 16, 2) },
-       { "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
-       { "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
-       { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
+       { "cat25c11", CAT25_INFO(  16, 8, 16, 1, M25P_NO_ERASE | M25P_NO_FR) },
+       { "cat25c03", CAT25_INFO(  32, 8, 16, 2, M25P_NO_ERASE | M25P_NO_FR) },
+       { "cat25c09", CAT25_INFO( 128, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) },
+       { "cat25c17", CAT25_INFO( 256, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) },
+       { "cat25128", CAT25_INFO(2048, 8, 64, 2, M25P_NO_ERASE | M25P_NO_FR) },
        { },
 };
 MODULE_DEVICE_TABLE(spi, m25p_ids);
@@ -920,7 +939,7 @@ static int m25p_probe(struct spi_device *spi)
         * a chip ID, try the JEDEC id commands; they'll work for most
         * newer chips, even if we don't recognize the particular chip.
         */
-       data = spi->dev.platform_data;
+       data = dev_get_platdata(&spi->dev);
        if (data && data->type) {
                const struct spi_device_id *plat_id;
 
@@ -972,7 +991,7 @@ static int m25p_probe(struct spi_device *spi)
 
        flash->spi = spi;
        mutex_init(&flash->lock);
-       dev_set_drvdata(&spi->dev, flash);
+       spi_set_drvdata(spi, flash);
 
        /*
         * Atmel, SST and Intel/Numonyx serial flash tend to power
@@ -1014,6 +1033,9 @@ static int m25p_probe(struct spi_device *spi)
        if (info->flags & SECT_4K) {
                flash->erase_opcode = OPCODE_BE_4K;
                flash->mtd.erasesize = 4096;
+       } else if (info->flags & SECT_4K_PMC) {
+               flash->erase_opcode = OPCODE_BE_4K_PMC;
+               flash->mtd.erasesize = 4096;
        } else {
                flash->erase_opcode = OPCODE_SE;
                flash->mtd.erasesize = info->sector_size;
@@ -1028,24 +1050,41 @@ static int m25p_probe(struct spi_device *spi)
        flash->mtd.writebufsize = flash->page_size;
 
        flash->fast_read = false;
-#ifdef CONFIG_OF
        if (np && of_property_read_bool(np, "m25p,fast-read"))
                flash->fast_read = true;
-#endif
 
 #ifdef CONFIG_M25PXX_USE_FAST_READ
        flash->fast_read = true;
 #endif
+       if (info->flags & M25P_NO_FR)
+               flash->fast_read = false;
+
+       /* Default commands */
+       if (flash->fast_read)
+               flash->read_opcode = OPCODE_FAST_READ;
+       else
+               flash->read_opcode = OPCODE_NORM_READ;
+
+       flash->program_opcode = OPCODE_PP;
 
        if (info->addr_width)
                flash->addr_width = info->addr_width;
-       else {
+       else if (flash->mtd.size > 0x1000000) {
                /* enable 4-byte addressing if the device exceeds 16MiB */
-               if (flash->mtd.size > 0x1000000) {
-                       flash->addr_width = 4;
-                       set_4byte(flash, info->jedec_id, 1);
+               flash->addr_width = 4;
+               if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
+                       /* Dedicated 4-byte command set */
+                       flash->read_opcode = flash->fast_read ?
+                               OPCODE_FAST_READ_4B :
+                               OPCODE_NORM_READ_4B;
+                       flash->program_opcode = OPCODE_PP_4B;
+                       /* No small sector erase for 4-byte command set */
+                       flash->erase_opcode = OPCODE_SE_4B;
+                       flash->mtd.erasesize = info->sector_size;
                } else
-                       flash->addr_width = 3;
+                       set_4byte(flash, info->jedec_id, 1);
+       } else {
+               flash->addr_width = 3;
        }
 
        dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
@@ -1080,7 +1119,7 @@ static int m25p_probe(struct spi_device *spi)
 
 static int m25p_remove(struct spi_device *spi)
 {
-       struct m25p     *flash = dev_get_drvdata(&spi->dev);
+       struct m25p     *flash = spi_get_drvdata(spi);
        int             status;
 
        /* Clean up MTD stuff. */
index 28779b6dfcd98fc6c8b2ed5b49800d898fee7e81..0e8cbfeba11e42a85da4a843cd7f212263eb9448 100644 (file)
@@ -622,7 +622,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
        struct dataflash                *priv;
        struct mtd_info                 *device;
        struct mtd_part_parser_data     ppdata;
-       struct flash_platform_data      *pdata = spi->dev.platform_data;
+       struct flash_platform_data      *pdata = dev_get_platdata(&spi->dev);
        char                            *otp_tag = "";
        int                             err = 0;
 
@@ -661,7 +661,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
        dev_info(&spi->dev, "%s (%lld KBytes) pagesize %d bytes%s\n",
                        name, (long long)((device->size + 1023) >> 10),
                        pagesize, otp_tag);
-       dev_set_drvdata(&spi->dev, priv);
+       spi_set_drvdata(spi, priv);
 
        ppdata.of_node = spi->dev.of_node;
        err = mtd_device_parse_register(device, NULL, &ppdata,
@@ -671,7 +671,7 @@ static int add_dataflash_otp(struct spi_device *spi, char *name, int nr_pages,
        if (!err)
                return 0;
 
-       dev_set_drvdata(&spi->dev, NULL);
+       spi_set_drvdata(spi, NULL);
        kfree(priv);
        return err;
 }
@@ -895,14 +895,14 @@ static int dataflash_probe(struct spi_device *spi)
 
 static int dataflash_remove(struct spi_device *spi)
 {
-       struct dataflash        *flash = dev_get_drvdata(&spi->dev);
+       struct dataflash        *flash = spi_get_drvdata(spi);
        int                     status;
 
        pr_debug("%s: remove\n", dev_name(&spi->dev));
 
        status = mtd_device_unregister(&flash->mtd);
        if (status == 0) {
-               dev_set_drvdata(&spi->dev, NULL);
+               spi_set_drvdata(spi, NULL);
                kfree(flash);
        }
        return status;
index 8a82b8bc21e185d7f62ca331d93f532470bb4399..42382141206222152d948ecd19f73338e1a30a3f 100644 (file)
@@ -550,7 +550,7 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 {
        struct spear_snor_flash *flash = get_flash_data(mtd);
        struct spear_smi *dev = mtd->priv;
-       void *src;
+       void __iomem *src;
        u32 ctrlreg1, val;
        int ret;
 
@@ -583,7 +583,7 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 
        writel(val, dev->io_base + SMI_CR1);
 
-       memcpy_fromio(buf, (u8 *)src, len);
+       memcpy_fromio(buf, src, len);
 
        /* restore ctrl reg1 */
        writel(ctrlreg1, dev->io_base + SMI_CR1);
@@ -596,7 +596,7 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 }
 
 static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
-               void *dest, const void *src, size_t len)
+               void __iomem *dest, const void *src, size_t len)
 {
        int ret;
        u32 ctrlreg1;
@@ -643,7 +643,7 @@ static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
 {
        struct spear_snor_flash *flash = get_flash_data(mtd);
        struct spear_smi *dev = mtd->priv;
-       void *dest;
+       void __iomem *dest;
        u32 page_offset, page_size;
        int ret;
 
@@ -995,14 +995,12 @@ static int spear_smi_probe(struct platform_device *pdev)
                ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
                if (ret) {
                        dev_err(&dev->pdev->dev, "bank setup failed\n");
-                       goto err_bank_setup;
+                       goto err_irq;
                }
        }
 
        return 0;
 
-err_bank_setup:
-       platform_set_drvdata(pdev, NULL);
 err_irq:
        clk_disable_unprepare(dev->clk);
 err:
@@ -1040,12 +1038,11 @@ static int spear_smi_remove(struct platform_device *pdev)
        }
 
        clk_disable_unprepare(dev->clk);
-       platform_set_drvdata(pdev, NULL);
 
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int spear_smi_suspend(struct device *dev)
 {
        struct spear_smi *sdev = dev_get_drvdata(dev);
@@ -1068,9 +1065,9 @@ static int spear_smi_resume(struct device *dev)
                spear_smi_hw_init(sdev);
        return ret;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(spear_smi_pm_ops, spear_smi_suspend, spear_smi_resume);
-#endif
 
 #ifdef CONFIG_OF
 static const struct of_device_id spear_smi_id_table[] = {
@@ -1086,9 +1083,7 @@ static struct platform_driver spear_smi_driver = {
                .bus = &platform_bus_type,
                .owner = THIS_MODULE,
                .of_match_table = of_match_ptr(spear_smi_id_table),
-#ifdef CONFIG_PM
                .pm = &spear_smi_pm_ops,
-#endif
        },
        .probe = spear_smi_probe,
        .remove = spear_smi_remove,
index 8091b016369430c0975835be79d3a6c381363846..a42f1f0e7281417398f4de453845ff8edf79b60c 100644 (file)
@@ -370,9 +370,9 @@ static int sst25l_probe(struct spi_device *spi)
 
        flash->spi = spi;
        mutex_init(&flash->lock);
-       dev_set_drvdata(&spi->dev, flash);
+       spi_set_drvdata(spi, flash);
 
-       data = spi->dev.platform_data;
+       data = dev_get_platdata(&spi->dev);
        if (data && data->name)
                flash->mtd.name = data->name;
        else
@@ -404,7 +404,7 @@ static int sst25l_probe(struct spi_device *spi)
                                        data ? data->nr_parts : 0);
        if (ret) {
                kfree(flash);
-               dev_set_drvdata(&spi->dev, NULL);
+               spi_set_drvdata(spi, NULL);
                return -ENODEV;
        }
 
@@ -413,7 +413,7 @@ static int sst25l_probe(struct spi_device *spi)
 
 static int sst25l_remove(struct spi_device *spi)
 {
-       struct sst25l_flash *flash = dev_get_drvdata(&spi->dev);
+       struct sst25l_flash *flash = spi_get_drvdata(spi);
        int ret;
 
        ret = mtd_device_unregister(&flash->mtd);
index 8b27ca054c59ba1d4e2ee5bd20cfd600dce79afa..310dc7c93425587095941d76afcc8dacbdb1fc29 100644 (file)
@@ -157,24 +157,6 @@ config MTD_PXA2XX
        help
          This provides a driver for the NOR flash attached to a PXA2xx chip.
 
-config MTD_OCTAGON
-       tristate "JEDEC Flash device mapped on Octagon 5066 SBC"
-       depends on X86 && MTD_JEDEC && MTD_COMPLEX_MAPPINGS
-       help
-         This provides a 'mapping' driver which supports the way in which
-         the flash chips are connected in the Octagon-5066 Single Board
-         Computer. More information on the board is available at
-         <http://www.octagonsystems.com/products/5066.aspx>.
-
-config MTD_VMAX
-       tristate "JEDEC Flash device mapped on Tempustech VMAX SBC301"
-       depends on X86 && MTD_JEDEC && MTD_COMPLEX_MAPPINGS
-       help
-         This provides a 'mapping' driver which supports the way in which
-         the flash chips are connected in the Tempustech VMAX SBC301 Single
-         Board Computer. More information on the board is available at
-         <http://www.tempustech.com/>.
-
 config MTD_SCx200_DOCFLASH
        tristate "Flash device mapped with DOCCS on NatSemi SCx200"
        depends on SCx200 && MTD_CFI
index 9fdbd4ba64419f45576e0833470680f0d4194003..141c91a5b24cc044c063b064d600f59a0182d48d 100644 (file)
@@ -16,7 +16,6 @@ obj-$(CONFIG_MTD_ICHXROM)     += ichxrom.o
 obj-$(CONFIG_MTD_CK804XROM)    += ck804xrom.o
 obj-$(CONFIG_MTD_TSUNAMI)      += tsunami_flash.o
 obj-$(CONFIG_MTD_PXA2XX)       += pxa2xx-flash.o
-obj-$(CONFIG_MTD_OCTAGON)      += octagon-5066.o
 obj-$(CONFIG_MTD_PHYSMAP)      += physmap.o
 obj-$(CONFIG_MTD_PHYSMAP_OF)   += physmap_of.o
 obj-$(CONFIG_MTD_PISMO)                += pismo.o
@@ -28,7 +27,6 @@ obj-$(CONFIG_MTD_SC520CDP)    += sc520cdp.o
 obj-$(CONFIG_MTD_NETSC520)     += netsc520.o
 obj-$(CONFIG_MTD_TS5500)       += ts5500_flash.o
 obj-$(CONFIG_MTD_SUN_UFLASH)   += sun_uflash.o
-obj-$(CONFIG_MTD_VMAX)         += vmax301.o
 obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
 obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
 obj-$(CONFIG_MTD_PCI)          += pci.o
index 319b04a6c9d1f6a0ccc5c565c977740d63babaa9..5434d8ded015dfca0e8a19199883ce26122fc421 100644 (file)
@@ -128,7 +128,7 @@ static const char * const part_probe_types[] = {
 static int bfin_flash_probe(struct platform_device *pdev)
 {
        int ret;
-       struct physmap_flash_data *pdata = pdev->dev.platform_data;
+       struct physmap_flash_data *pdata = dev_get_platdata(&pdev->dev);
        struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        struct async_state *state;
index d16fc9d3b8cd5bd58dae35c7f5e579e289ac9083..d504b3d1791da8ef70076d2b936c6afba7cc863c 100644 (file)
 #define FLASH_PARTITION3_SIZE 0x001C0000
 
 
-struct map_info flagadm_map = {
+static struct map_info flagadm_map = {
                .name =         "FlagaDM flash device",
                .size =         FLASH_SIZE,
                .bankwidth =    2,
 };
 
-struct mtd_partition flagadm_parts[] = {
+static struct mtd_partition flagadm_parts[] = {
        {
                .name =         "Bootloader",
                .offset =       FLASH_PARTITION0_ADDR,
@@ -112,7 +112,7 @@ static int __init init_flagadm(void)
                return 0;
        }
 
-       iounmap((void *)flagadm_map.virt);
+       iounmap((void __iomem *)flagadm_map.virt);
        return -ENXIO;
 }
 
@@ -123,8 +123,8 @@ static void __exit cleanup_flagadm(void)
                map_destroy(mymtd);
        }
        if (flagadm_map.virt) {
-               iounmap((void *)flagadm_map.virt);
-               flagadm_map.virt = 0;
+               iounmap((void __iomem *)flagadm_map.virt);
+               flagadm_map.virt = NULL;
        }
 }
 
index 5ede28294f9e49765a9a24ed12867b94cc9e3208..1adba86474a52f7a4fed630bd08f300cf45e3398 100644 (file)
@@ -196,7 +196,7 @@ static int gpio_flash_probe(struct platform_device *pdev)
        struct resource *gpios;
        struct async_state *state;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        gpios = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 
index 49686744d93cac9d2dae0758fd1b70618237c9c1..15bbda03be6542cd860759a79a914387c842301d 100644 (file)
@@ -79,7 +79,7 @@ static int __init init_impa7(void)
                }
                simple_map_init(&impa7_map[i]);
 
-               impa7_mtd[i] = 0;
+               impa7_mtd[i] = NULL;
                type = rom_probe_types;
                for(; !impa7_mtd[i] && *type; type++) {
                        impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
@@ -91,9 +91,9 @@ static int __init init_impa7(void)
                        mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
                                                  partitions,
                                                  ARRAY_SIZE(partitions));
+               } else {
+                       iounmap((void __iomem *)impa7_map[i].virt);
                }
-               else
-                       iounmap((void *)impa7_map[i].virt);
        }
        return devicesfound == 0 ? -ENXIO : 0;
 }
@@ -105,8 +105,8 @@ static void __exit cleanup_impa7(void)
                if (impa7_mtd[i]) {
                        mtd_device_unregister(impa7_mtd[i]);
                        map_destroy(impa7_mtd[i]);
-                       iounmap((void *)impa7_map[i].virt);
-                       impa7_map[i].virt = 0;
+                       iounmap((void __iomem *)impa7_map[i].virt);
+                       impa7_map[i].virt = NULL;
                }
        }
 }
index 52b3410a105c943d47ce3b9ea873781948d028a4..10debfea81e7147c25fa00e4913bd2d336c24b6a 100644 (file)
@@ -152,11 +152,9 @@ static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
 
 static int ixp4xx_flash_remove(struct platform_device *dev)
 {
-       struct flash_platform_data *plat = dev->dev.platform_data;
+       struct flash_platform_data *plat = dev_get_platdata(&dev->dev);
        struct ixp4xx_flash_info *info = platform_get_drvdata(dev);
 
-       platform_set_drvdata(dev, NULL);
-
        if(!info)
                return 0;
 
@@ -180,7 +178,7 @@ static int ixp4xx_flash_remove(struct platform_device *dev)
 
 static int ixp4xx_flash_probe(struct platform_device *dev)
 {
-       struct flash_platform_data *plat = dev->dev.platform_data;
+       struct flash_platform_data *plat = dev_get_platdata(&dev->dev);
        struct ixp4xx_flash_info *info;
        struct mtd_part_parser_data ppdata = {
                .origin = dev->resource->start,
index ab0fead56b8383f1a57579c0f9f3dfe0a5211d32..98bb5d5375d741e8159497e30b704c0520533f79 100644 (file)
@@ -102,9 +102,8 @@ static int latch_addr_flash_remove(struct platform_device *dev)
        info = platform_get_drvdata(dev);
        if (info == NULL)
                return 0;
-       platform_set_drvdata(dev, NULL);
 
-       latch_addr_data = dev->dev.platform_data;
+       latch_addr_data = dev_get_platdata(&dev->dev);
 
        if (info->mtd != NULL) {
                mtd_device_unregister(info->mtd);
@@ -135,7 +134,7 @@ static int latch_addr_flash_probe(struct platform_device *dev)
        int chipsel;
        int err;
 
-       latch_addr_data = dev->dev.platform_data;
+       latch_addr_data = dev_get_platdata(&dev->dev);
        if (latch_addr_data == NULL)
                return -ENODEV;
 
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
deleted file mode 100644 (file)
index 807ac2a..0000000
+++ /dev/null
@@ -1,246 +0,0 @@
-/* ######################################################################
-
-   Octagon 5066 MTD Driver.
-
-   The Octagon 5066 is a SBC based on AMD's 586-WB running at 133 MHZ. It
-   comes with a builtin AMD 29F016 flash chip and a socketed EEPROM that
-   is replacable by flash. Both units are mapped through a multiplexer
-   into a 32k memory window at 0xe8000. The control register for the
-   multiplexing unit is located at IO 0x208 with a bit map of
-     0-5 Page Selection in 32k increments
-     6-7 Device selection:
-        00 SSD off
-        01 SSD 0 (Socket)
-        10 SSD 1 (Flash chip)
-        11 undefined
-
-   On each SSD, the first 128k is reserved for use by the bios
-   (actually it IS the bios..) This only matters if you are booting off the
-   flash, you must not put a file system starting there.
-
-   The driver tries to do a detection algorithm to guess what sort of devices
-   are plugged into the sockets.
-
-   ##################################################################### */
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <asm/io.h>
-
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-
-#define WINDOW_START 0xe8000
-#define WINDOW_LENGTH 0x8000
-#define WINDOW_SHIFT 27
-#define WINDOW_MASK 0x7FFF
-#define PAGE_IO 0x208
-
-static volatile char page_n_dev = 0;
-static unsigned long iomapadr;
-static DEFINE_SPINLOCK(oct5066_spin);
-
-/*
- * We use map_priv_1 to identify which device we are.
- */
-
-static void __oct5066_page(struct map_info *map, __u8 byte)
-{
-       outb(byte,PAGE_IO);
-       page_n_dev = byte;
-}
-
-static inline void oct5066_page(struct map_info *map, unsigned long ofs)
-{
-       __u8 byte = map->map_priv_1 | (ofs >> WINDOW_SHIFT);
-
-       if (page_n_dev != byte)
-               __oct5066_page(map, byte);
-}
-
-
-static map_word oct5066_read8(struct map_info *map, unsigned long ofs)
-{
-       map_word ret;
-       spin_lock(&oct5066_spin);
-       oct5066_page(map, ofs);
-       ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
-       spin_unlock(&oct5066_spin);
-       return ret;
-}
-
-static void oct5066_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
-       while(len) {
-               unsigned long thislen = len;
-               if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
-                       thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
-
-               spin_lock(&oct5066_spin);
-               oct5066_page(map, from);
-               memcpy_fromio(to, iomapadr + from, thislen);
-               spin_unlock(&oct5066_spin);
-               to += thislen;
-               from += thislen;
-               len -= thislen;
-       }
-}
-
-static void oct5066_write8(struct map_info *map, map_word d, unsigned long adr)
-{
-       spin_lock(&oct5066_spin);
-       oct5066_page(map, adr);
-       writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
-       spin_unlock(&oct5066_spin);
-}
-
-static void oct5066_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
-       while(len) {
-               unsigned long thislen = len;
-               if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
-                       thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
-
-               spin_lock(&oct5066_spin);
-               oct5066_page(map, to);
-               memcpy_toio(iomapadr + to, from, thislen);
-               spin_unlock(&oct5066_spin);
-               to += thislen;
-               from += thislen;
-               len -= thislen;
-       }
-}
-
-static struct map_info oct5066_map[2] = {
-       {
-               .name = "Octagon 5066 Socket",
-               .phys = NO_XIP,
-               .size = 512 * 1024,
-               .bankwidth = 1,
-               .read = oct5066_read8,
-               .copy_from = oct5066_copy_from,
-               .write = oct5066_write8,
-               .copy_to = oct5066_copy_to,
-               .map_priv_1 = 1<<6
-       },
-       {
-               .name = "Octagon 5066 Internal Flash",
-               .phys = NO_XIP,
-               .size = 2 * 1024 * 1024,
-               .bankwidth = 1,
-               .read = oct5066_read8,
-               .copy_from = oct5066_copy_from,
-               .write = oct5066_write8,
-               .copy_to = oct5066_copy_to,
-               .map_priv_1 = 2<<6
-       }
-};
-
-static struct mtd_info *oct5066_mtd[2] = {NULL, NULL};
-
-// OctProbe - Sense if this is an octagon card
-// ---------------------------------------------------------------------
-/* Perform a simple validity test, we map the window select SSD0 and
-   change pages while monitoring the window. A change in the window,
-   controlled by the PAGE_IO port is a functioning 5066 board. This will
-   fail if the thing in the socket is set to a uniform value. */
-static int __init OctProbe(void)
-{
-   unsigned int Base = (1 << 6);
-   unsigned long I;
-   unsigned long Values[10];
-   for (I = 0; I != 20; I++)
-   {
-      outb(Base + (I%10),PAGE_IO);
-      if (I < 10)
-      {
-        // Record the value and check for uniqueness
-        Values[I%10] = readl(iomapadr);
-        if (I > 0 && Values[I%10] == Values[0])
-           return -EAGAIN;
-      }
-      else
-      {
-        // Make sure we get the same values on the second pass
-        if (Values[I%10] != readl(iomapadr))
-           return -EAGAIN;
-      }
-   }
-   return 0;
-}
-
-void cleanup_oct5066(void)
-{
-       int i;
-       for (i=0; i<2; i++) {
-               if (oct5066_mtd[i]) {
-                       mtd_device_unregister(oct5066_mtd[i]);
-                       map_destroy(oct5066_mtd[i]);
-               }
-       }
-       iounmap((void *)iomapadr);
-       release_region(PAGE_IO, 1);
-}
-
-static int __init init_oct5066(void)
-{
-       int i;
-       int ret = 0;
-
-       // Do an autoprobe sequence
-       if (!request_region(PAGE_IO,1,"Octagon SSD")) {
-               printk(KERN_NOTICE "5066: Page Register in Use\n");
-               return -EAGAIN;
-       }
-       iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH);
-       if (!iomapadr) {
-               printk(KERN_NOTICE "Failed to ioremap memory region\n");
-               ret = -EIO;
-               goto out_rel;
-       }
-       if (OctProbe() != 0) {
-               printk(KERN_NOTICE "5066: Octagon Probe Failed, is this an Octagon 5066 SBC?\n");
-               iounmap((void *)iomapadr);
-               ret = -EAGAIN;
-               goto out_unmap;
-       }
-
-       // Print out our little header..
-       printk("Octagon 5066 SSD IO:0x%x MEM:0x%x-0x%x\n",PAGE_IO,WINDOW_START,
-              WINDOW_START+WINDOW_LENGTH);
-
-       for (i=0; i<2; i++) {
-               oct5066_mtd[i] = do_map_probe("cfi_probe", &oct5066_map[i]);
-               if (!oct5066_mtd[i])
-                       oct5066_mtd[i] = do_map_probe("jedec", &oct5066_map[i]);
-               if (!oct5066_mtd[i])
-                       oct5066_mtd[i] = do_map_probe("map_ram", &oct5066_map[i]);
-               if (!oct5066_mtd[i])
-                       oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
-               if (oct5066_mtd[i]) {
-                       oct5066_mtd[i]->owner = THIS_MODULE;
-                       mtd_device_register(oct5066_mtd[i], NULL, 0);
-               }
-       }
-
-       if (!oct5066_mtd[0] && !oct5066_mtd[1]) {
-               cleanup_oct5066();
-               return -ENXIO;
-       }
-
-       return 0;
-
- out_unmap:
-       iounmap((void *)iomapadr);
- out_rel:
-       release_region(PAGE_IO, 1);
-       return ret;
-}
-
-module_init(init_oct5066);
-module_exit(cleanup_oct5066);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com>, David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("MTD map driver for Octagon 5066 Single Board Computer");
index e7a592c8c76591e02257e5c39b891aefbf43d9d3..f73cd461257c852af9da775f839c3ec190527256 100644 (file)
@@ -40,9 +40,8 @@ static int physmap_flash_remove(struct platform_device *dev)
        info = platform_get_drvdata(dev);
        if (info == NULL)
                return 0;
-       platform_set_drvdata(dev, NULL);
 
-       physmap_data = dev->dev.platform_data;
+       physmap_data = dev_get_platdata(&dev->dev);
 
        if (info->cmtd) {
                mtd_device_unregister(info->cmtd);
@@ -69,7 +68,7 @@ static void physmap_set_vpp(struct map_info *map, int state)
        unsigned long flags;
 
        pdev = (struct platform_device *)map->map_priv_1;
-       physmap_data = pdev->dev.platform_data;
+       physmap_data = dev_get_platdata(&pdev->dev);
 
        if (!physmap_data->set_vpp)
                return;
@@ -103,7 +102,7 @@ static int physmap_flash_probe(struct platform_device *dev)
        int i;
        int devices_found = 0;
 
-       physmap_data = dev->dev.platform_data;
+       physmap_data = dev_get_platdata(&dev->dev);
        if (physmap_data == NULL)
                return -ENODEV;
 
index 71fdda29594b7c3595d786c1572012d09b4df8af..676271659b37442a85d4d545fca05f80c48bdb54 100644 (file)
@@ -84,8 +84,6 @@ static int platram_remove(struct platform_device *pdev)
 {
        struct platram_info *info = to_platram_info(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        dev_dbg(&pdev->dev, "removing device\n");
 
        if (info == NULL)
@@ -130,13 +128,13 @@ static int platram_probe(struct platform_device *pdev)
 
        dev_dbg(&pdev->dev, "probe entered\n");
 
-       if (pdev->dev.platform_data == NULL) {
+       if (dev_get_platdata(&pdev->dev) == NULL) {
                dev_err(&pdev->dev, "no platform data supplied\n");
                err = -ENOENT;
                goto exit_error;
        }
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
 
        info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (info == NULL) {
index acb1dbcf7ce58a438ed7ff9a1df9aecb2b57478f..d210d131fef255da97e7d277574ac80f01a66341 100644 (file)
@@ -49,7 +49,7 @@ static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
 
 static int pxa2xx_flash_probe(struct platform_device *pdev)
 {
-       struct flash_platform_data *flash = pdev->dev.platform_data;
+       struct flash_platform_data *flash = dev_get_platdata(&pdev->dev);
        struct pxa2xx_flash_info *info;
        struct resource *res;
 
@@ -107,8 +107,6 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
 {
        struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
 
-       platform_set_drvdata(dev, NULL);
-
        mtd_device_unregister(info->mtd);
 
        map_destroy(info->mtd);
index ac02fbffd6df940f70e3802895811f5916843541..93525121d69dc17876f70f8341e6f69faa251b12 100644 (file)
@@ -34,10 +34,9 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
        info = platform_get_drvdata(dev);
        if (!info)
                return 0;
-       platform_set_drvdata(dev, NULL);
 
        if (info->mtd) {
-               struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
+               struct rbtx4939_flash_data *pdata = dev_get_platdata(&dev->dev);
 
                mtd_device_unregister(info->mtd);
                map_destroy(info->mtd);
@@ -57,7 +56,7 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
        int err = 0;
        unsigned long size;
 
-       pdata = dev->dev.platform_data;
+       pdata = dev_get_platdata(&dev->dev);
        if (!pdata)
                return -ENODEV;
 
index 29e3dcaa1d90413b15df9e30c04373d897c2f6c6..8fc06bf111c4685c0376a48f2f39df486cdda268 100644 (file)
@@ -248,7 +248,7 @@ static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
 
 static int sa1100_mtd_probe(struct platform_device *pdev)
 {
-       struct flash_platform_data *plat = pdev->dev.platform_data;
+       struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
        struct sa_info *info;
        int err;
 
@@ -277,9 +277,8 @@ static int sa1100_mtd_probe(struct platform_device *pdev)
 static int __exit sa1100_mtd_remove(struct platform_device *pdev)
 {
        struct sa_info *info = platform_get_drvdata(pdev);
-       struct flash_platform_data *plat = pdev->dev.platform_data;
+       struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
 
-       platform_set_drvdata(pdev, NULL);
        sa1100_destroy(info, plat);
 
        return 0;
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
deleted file mode 100644 (file)
index 5e68de7..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-/* ######################################################################
-
-   Tempustech VMAX SBC301 MTD Driver.
-
-   The VMAx 301 is a SBC based on . It
-   comes with three builtin AMD 29F016B flash chips and a socket for SRAM or
-   more flash. Each unit has it's own 8k mapping into a settable region
-   (0xD8000). There are two 8k mappings for each MTD, the first is always set
-   to the lower 8k of the device the second is paged. Writing a 16 bit page
-   value to anywhere in the first 8k will cause the second 8k to page around.
-
-   To boot the device a bios extension must be installed into the first 8k
-   of flash that is smart enough to copy itself down, page in the rest of
-   itself and begin executing.
-
-   ##################################################################### */
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-
-
-#define WINDOW_START 0xd8000
-#define WINDOW_LENGTH 0x2000
-#define WINDOW_SHIFT 25
-#define WINDOW_MASK 0x1FFF
-
-/* Actually we could use two spinlocks, but we'd have to have
-   more private space in the struct map_info. We lose a little
-   performance like this, but we'd probably lose more by having
-   the extra indirection from having one of the map->map_priv
-   fields pointing to yet another private struct.
-*/
-static DEFINE_SPINLOCK(vmax301_spin);
-
-static void __vmax301_page(struct map_info *map, unsigned long page)
-{
-       writew(page, map->map_priv_2 - WINDOW_LENGTH);
-       map->map_priv_1 = page;
-}
-
-static inline void vmax301_page(struct map_info *map,
-                                 unsigned long ofs)
-{
-       unsigned long page = (ofs >> WINDOW_SHIFT);
-       if (map->map_priv_1 != page)
-               __vmax301_page(map, page);
-}
-
-static map_word vmax301_read8(struct map_info *map, unsigned long ofs)
-{
-       map_word ret;
-       spin_lock(&vmax301_spin);
-       vmax301_page(map, ofs);
-       ret.x[0] = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
-       spin_unlock(&vmax301_spin);
-       return ret;
-}
-
-static void vmax301_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
-       while(len) {
-               unsigned long thislen = len;
-               if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
-                       thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
-               spin_lock(&vmax301_spin);
-               vmax301_page(map, from);
-               memcpy_fromio(to, map->map_priv_2 + from, thislen);
-               spin_unlock(&vmax301_spin);
-               to += thislen;
-               from += thislen;
-               len -= thislen;
-       }
-}
-
-static void vmax301_write8(struct map_info *map, map_word d, unsigned long adr)
-{
-       spin_lock(&vmax301_spin);
-       vmax301_page(map, adr);
-       writeb(d.x[0], map->map_priv_2 + (adr & WINDOW_MASK));
-       spin_unlock(&vmax301_spin);
-}
-
-static void vmax301_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
-       while(len) {
-               unsigned long thislen = len;
-               if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
-                       thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
-
-               spin_lock(&vmax301_spin);
-               vmax301_page(map, to);
-               memcpy_toio(map->map_priv_2 + to, from, thislen);
-               spin_unlock(&vmax301_spin);
-               to += thislen;
-               from += thislen;
-               len -= thislen;
-       }
-}
-
-static struct map_info vmax_map[2] = {
-       {
-               .name = "VMAX301 Internal Flash",
-               .phys = NO_XIP,
-               .size = 3*2*1024*1024,
-               .bankwidth = 1,
-               .read = vmax301_read8,
-               .copy_from = vmax301_copy_from,
-               .write = vmax301_write8,
-               .copy_to = vmax301_copy_to,
-               .map_priv_1 = WINDOW_START + WINDOW_LENGTH,
-               .map_priv_2 = 0xFFFFFFFF
-       },
-       {
-               .name = "VMAX301 Socket",
-               .phys = NO_XIP,
-               .size = 0,
-               .bankwidth = 1,
-               .read = vmax301_read8,
-               .copy_from = vmax301_copy_from,
-               .write = vmax301_write8,
-               .copy_to = vmax301_copy_to,
-               .map_priv_1 = WINDOW_START + (3*WINDOW_LENGTH),
-               .map_priv_2 = 0xFFFFFFFF
-       }
-};
-
-static struct mtd_info *vmax_mtd[2] = {NULL, NULL};
-
-static void __exit cleanup_vmax301(void)
-{
-       int i;
-
-       for (i=0; i<2; i++) {
-               if (vmax_mtd[i]) {
-                       mtd_device_unregister(vmax_mtd[i]);
-                       map_destroy(vmax_mtd[i]);
-               }
-       }
-       iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
-}
-
-static int __init init_vmax301(void)
-{
-       int i;
-       unsigned long iomapadr;
-       // Print out our little header..
-       printk("Tempustech VMAX 301 MEM:0x%x-0x%x\n",WINDOW_START,
-              WINDOW_START+4*WINDOW_LENGTH);
-
-       iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH*4);
-       if (!iomapadr) {
-               printk("Failed to ioremap memory region\n");
-               return -EIO;
-       }
-       /* Put the address in the map's private data area.
-          We store the actual MTD IO address rather than the
-          address of the first half, because it's used more
-          often.
-       */
-       vmax_map[0].map_priv_2 = iomapadr + WINDOW_START;
-       vmax_map[1].map_priv_2 = iomapadr + (3*WINDOW_START);
-
-       for (i=0; i<2; i++) {
-               vmax_mtd[i] = do_map_probe("cfi_probe", &vmax_map[i]);
-               if (!vmax_mtd[i])
-                       vmax_mtd[i] = do_map_probe("jedec", &vmax_map[i]);
-               if (!vmax_mtd[i])
-                       vmax_mtd[i] = do_map_probe("map_ram", &vmax_map[i]);
-               if (!vmax_mtd[i])
-                       vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
-               if (vmax_mtd[i]) {
-                       vmax_mtd[i]->owner = THIS_MODULE;
-                       mtd_device_register(vmax_mtd[i], NULL, 0);
-               }
-       }
-
-       if (!vmax_mtd[0] && !vmax_mtd[1]) {
-               iounmap((void *)iomapadr);
-               return -ENXIO;
-       }
-
-       return 0;
-}
-
-module_init(init_vmax301);
-module_exit(cleanup_vmax301);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("MTD map driver for Tempustech VMAX SBC301 board");
index 048c823f5c51397df8c5e6ef023ba287ab65b02f..5e14d540ba2f623abb6438c4b6c204b991ed260b 100644 (file)
@@ -285,6 +285,16 @@ static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
                   mtd_bitflip_threshold_show,
                   mtd_bitflip_threshold_store);
 
+static ssize_t mtd_ecc_step_size_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct mtd_info *mtd = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
+
+}
+static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
+
 static struct attribute *mtd_attrs[] = {
        &dev_attr_type.attr,
        &dev_attr_flags.attr,
@@ -296,6 +306,7 @@ static struct attribute *mtd_attrs[] = {
        &dev_attr_numeraseregions.attr,
        &dev_attr_name.attr,
        &dev_attr_ecc_strength.attr,
+       &dev_attr_ecc_step_size.attr,
        &dev_attr_bitflip_threshold.attr,
        NULL,
 };
index 301493382cd0a27a0df3fe0b5285fdb6a8cbb5f0..6e732c3820c14bf9d07b693a0c1de9bfff0088f0 100644 (file)
@@ -516,6 +516,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
        }
 
        slave->mtd.ecclayout = master->ecclayout;
+       slave->mtd.ecc_step_size = master->ecc_step_size;
        slave->mtd.ecc_strength = master->ecc_strength;
        slave->mtd.bitflip_threshold = master->bitflip_threshold;
 
index 50543f1662150ade806e9967190c60426a9ab029..d88529841d3f191dce0f8233a56f1cce63c0e0ba 100644 (file)
@@ -43,6 +43,7 @@ config MTD_SM_COMMON
 
 config MTD_NAND_DENALI
         tristate "Support Denali NAND controller"
+        depends on HAS_DMA
         help
          Enable support for the Denali NAND controller.  This should be
          combined with either the PCI or platform drivers to provide device
@@ -75,7 +76,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
 
 config MTD_NAND_GPIO
        tristate "GPIO NAND Flash driver"
-       depends on GPIOLIB && ARM
+       depends on GPIOLIB
        help
          This enables a GPIO based NAND flash driver.
 
@@ -354,7 +355,7 @@ config MTD_NAND_ATMEL
 
 config MTD_NAND_PXA3xx
        tristate "Support for NAND flash devices on PXA3xx"
-       depends on PXA3xx || ARCH_MMP
+       depends on PXA3xx || ARCH_MMP || PLAT_ORION
        help
          This enables the driver for the NAND flash device found on
          PXA3xx processors
@@ -432,13 +433,6 @@ config MTD_NAND_PLATFORM
          devices. You will need to provide platform-specific functions
          via platform_data.
 
-config MTD_ALAUDA
-       tristate "MTD driver for Olympus MAUSB-10 and Fujifilm DPC-R1"
-       depends on USB
-       help
-         These two (and possibly other) Alauda-based cardreaders for
-         SmartMedia and xD allow raw flash access.
-
 config MTD_NAND_ORION
        tristate "NAND Flash support for Marvell Orion SoC"
        depends on PLAT_ORION
index bb8189172f62f49871573bb6119555fce1697654..542b5689eb636b0abad6501500872d376efad77a 100644 (file)
@@ -31,7 +31,6 @@ obj-$(CONFIG_MTD_NAND_CM_X270)                += cmx270_nand.o
 obj-$(CONFIG_MTD_NAND_PXA3xx)          += pxa3xx_nand.o
 obj-$(CONFIG_MTD_NAND_TMIO)            += tmio_nand.o
 obj-$(CONFIG_MTD_NAND_PLATFORM)                += plat_nand.o
-obj-$(CONFIG_MTD_ALAUDA)               += alauda.o
 obj-$(CONFIG_MTD_NAND_PASEMI)          += pasemi_nand.o
 obj-$(CONFIG_MTD_NAND_ORION)           += orion_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_ELBC)                += fsl_elbc_nand.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
deleted file mode 100644 (file)
index 60a0dfd..0000000
+++ /dev/null
@@ -1,723 +0,0 @@
-/*
- * MTD driver for Alauda chips
- *
- * Copyright (C) 2007 Joern Engel <joern@logfs.org>
- *
- * Based on drivers/usb/usb-skeleton.c which is:
- * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
- * and on drivers/usb/storage/alauda.c, which is:
- *   (c) 2005 Daniel Drake <dsd@gentoo.org>
- *
- * Idea and initial work by Arnd Bergmann <arnd@arndb.de>
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kref.h>
-#include <linux/usb.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand_ecc.h>
-
-/* Control commands */
-#define ALAUDA_GET_XD_MEDIA_STATUS     0x08
-#define ALAUDA_ACK_XD_MEDIA_CHANGE     0x0a
-#define ALAUDA_GET_XD_MEDIA_SIG                0x86
-
-/* Common prefix */
-#define ALAUDA_BULK_CMD                        0x40
-
-/* The two ports */
-#define ALAUDA_PORT_XD                 0x00
-#define ALAUDA_PORT_SM                 0x01
-
-/* Bulk commands */
-#define ALAUDA_BULK_READ_PAGE          0x84
-#define ALAUDA_BULK_READ_OOB           0x85 /* don't use, there's a chip bug */
-#define ALAUDA_BULK_READ_BLOCK         0x94
-#define ALAUDA_BULK_ERASE_BLOCK                0xa3
-#define ALAUDA_BULK_WRITE_PAGE         0xa4
-#define ALAUDA_BULK_WRITE_BLOCK                0xb4
-#define ALAUDA_BULK_RESET_MEDIA                0xe0
-
-/* Address shifting */
-#define PBA_LO(pba) ((pba & 0xF) << 5)
-#define PBA_HI(pba) (pba >> 3)
-#define PBA_ZONE(pba) (pba >> 11)
-
-#define TIMEOUT HZ
-
-static const struct usb_device_id alauda_table[] = {
-       { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
-       { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
-       { }
-};
-MODULE_DEVICE_TABLE(usb, alauda_table);
-
-struct alauda_card {
-       u8      id;             /* id byte */
-       u8      chipshift;      /* 1<<chipshift total size */
-       u8      pageshift;      /* 1<<pageshift page size */
-       u8      blockshift;     /* 1<<blockshift block size */
-};
-
-struct alauda {
-       struct usb_device       *dev;
-       struct usb_interface    *interface;
-       struct mtd_info         *mtd;
-       struct alauda_card      *card;
-       struct mutex            card_mutex;
-       u32                     pagemask;
-       u32                     bytemask;
-       u32                     blockmask;
-       unsigned int            write_out;
-       unsigned int            bulk_in;
-       unsigned int            bulk_out;
-       u8                      port;
-       struct kref             kref;
-};
-
-static struct alauda_card alauda_card_ids[] = {
-       /* NAND flash */
-       { 0x6e, 20, 8, 12},     /* 1 MB */
-       { 0xe8, 20, 8, 12},     /* 1 MB */
-       { 0xec, 20, 8, 12},     /* 1 MB */
-       { 0x64, 21, 8, 12},     /* 2 MB */
-       { 0xea, 21, 8, 12},     /* 2 MB */
-       { 0x6b, 22, 9, 13},     /* 4 MB */
-       { 0xe3, 22, 9, 13},     /* 4 MB */
-       { 0xe5, 22, 9, 13},     /* 4 MB */
-       { 0xe6, 23, 9, 13},     /* 8 MB */
-       { 0x73, 24, 9, 14},     /* 16 MB */
-       { 0x75, 25, 9, 14},     /* 32 MB */
-       { 0x76, 26, 9, 14},     /* 64 MB */
-       { 0x79, 27, 9, 14},     /* 128 MB */
-       { 0x71, 28, 9, 14},     /* 256 MB */
-
-       /* MASK ROM */
-       { 0x5d, 21, 9, 13},     /* 2 MB */
-       { 0xd5, 22, 9, 13},     /* 4 MB */
-       { 0xd6, 23, 9, 13},     /* 8 MB */
-       { 0x57, 24, 9, 13},     /* 16 MB */
-       { 0x58, 25, 9, 13},     /* 32 MB */
-       { }
-};
-
-static struct alauda_card *get_card(u8 id)
-{
-       struct alauda_card *card;
-
-       for (card = alauda_card_ids; card->id; card++)
-               if (card->id == id)
-                       return card;
-       return NULL;
-}
-
-static void alauda_delete(struct kref *kref)
-{
-       struct alauda *al = container_of(kref, struct alauda, kref);
-
-       if (al->mtd) {
-               mtd_device_unregister(al->mtd);
-               kfree(al->mtd);
-       }
-       usb_put_dev(al->dev);
-       kfree(al);
-}
-
-static int alauda_get_media_status(struct alauda *al, void *buf)
-{
-       int ret;
-
-       mutex_lock(&al->card_mutex);
-       ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
-                       ALAUDA_GET_XD_MEDIA_STATUS, 0xc0, 0, 1, buf, 2, HZ);
-       mutex_unlock(&al->card_mutex);
-       return ret;
-}
-
-static int alauda_ack_media(struct alauda *al)
-{
-       int ret;
-
-       mutex_lock(&al->card_mutex);
-       ret = usb_control_msg(al->dev, usb_sndctrlpipe(al->dev, 0),
-                       ALAUDA_ACK_XD_MEDIA_CHANGE, 0x40, 0, 1, NULL, 0, HZ);
-       mutex_unlock(&al->card_mutex);
-       return ret;
-}
-
-static int alauda_get_media_signatures(struct alauda *al, void *buf)
-{
-       int ret;
-
-       mutex_lock(&al->card_mutex);
-       ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
-                       ALAUDA_GET_XD_MEDIA_SIG, 0xc0, 0, 0, buf, 4, HZ);
-       mutex_unlock(&al->card_mutex);
-       return ret;
-}
-
-static void alauda_reset(struct alauda *al)
-{
-       u8 command[] = {
-               ALAUDA_BULK_CMD, ALAUDA_BULK_RESET_MEDIA, 0, 0,
-               0, 0, 0, 0, al->port
-       };
-       mutex_lock(&al->card_mutex);
-       usb_bulk_msg(al->dev, al->bulk_out, command, 9, NULL, HZ);
-       mutex_unlock(&al->card_mutex);
-}
-
-static void correct_data(void *buf, void *read_ecc,
-               int *corrected, int *uncorrected)
-{
-       u8 calc_ecc[3];
-       int err;
-
-       nand_calculate_ecc(NULL, buf, calc_ecc);
-       err = nand_correct_data(NULL, buf, read_ecc, calc_ecc);
-       if (err) {
-               if (err > 0)
-                       (*corrected)++;
-               else
-                       (*uncorrected)++;
-       }
-}
-
-struct alauda_sg_request {
-       struct urb *urb[3];
-       struct completion comp;
-};
-
-static void alauda_complete(struct urb *urb)
-{
-       struct completion *comp = urb->context;
-
-       if (comp)
-               complete(comp);
-}
-
-static int __alauda_read_page(struct mtd_info *mtd, loff_t from, void *buf,
-               void *oob)
-{
-       struct alauda_sg_request sg;
-       struct alauda *al = mtd->priv;
-       u32 pba = from >> al->card->blockshift;
-       u32 page = (from >> al->card->pageshift) & al->pagemask;
-       u8 command[] = {
-               ALAUDA_BULK_CMD, ALAUDA_BULK_READ_PAGE, PBA_HI(pba),
-               PBA_ZONE(pba), 0, PBA_LO(pba) + page, 1, 0, al->port
-       };
-       int i, err;
-
-       for (i=0; i<3; i++)
-               sg.urb[i] = NULL;
-
-       err = -ENOMEM;
-       for (i=0; i<3; i++) {
-               sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
-               if (!sg.urb[i])
-                       goto out;
-       }
-       init_completion(&sg.comp);
-       usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
-                       alauda_complete, NULL);
-       usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, mtd->writesize,
-                       alauda_complete, NULL);
-       usb_fill_bulk_urb(sg.urb[2], al->dev, al->bulk_in, oob, 16,
-                       alauda_complete, &sg.comp);
-
-       mutex_lock(&al->card_mutex);
-       for (i=0; i<3; i++) {
-               err = usb_submit_urb(sg.urb[i], GFP_NOIO);
-               if (err)
-                       goto cancel;
-       }
-       if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
-               err = -ETIMEDOUT;
-cancel:
-               for (i=0; i<3; i++) {
-                       usb_kill_urb(sg.urb[i]);
-               }
-       }
-       mutex_unlock(&al->card_mutex);
-
-out:
-       usb_free_urb(sg.urb[0]);
-       usb_free_urb(sg.urb[1]);
-       usb_free_urb(sg.urb[2]);
-       return err;
-}
-
-static int alauda_read_page(struct mtd_info *mtd, loff_t from,
-               void *buf, u8 *oob, int *corrected, int *uncorrected)
-{
-       int err;
-
-       err = __alauda_read_page(mtd, from, buf, oob);
-       if (err)
-               return err;
-       correct_data(buf, oob+13, corrected, uncorrected);
-       correct_data(buf+256, oob+8, corrected, uncorrected);
-       return 0;
-}
-
-static int alauda_write_page(struct mtd_info *mtd, loff_t to, void *buf,
-               void *oob)
-{
-       struct alauda_sg_request sg;
-       struct alauda *al = mtd->priv;
-       u32 pba = to >> al->card->blockshift;
-       u32 page = (to >> al->card->pageshift) & al->pagemask;
-       u8 command[] = {
-               ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_PAGE, PBA_HI(pba),
-               PBA_ZONE(pba), 0, PBA_LO(pba) + page, 32, 0, al->port
-       };
-       int i, err;
-
-       for (i=0; i<3; i++)
-               sg.urb[i] = NULL;
-
-       err = -ENOMEM;
-       for (i=0; i<3; i++) {
-               sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
-               if (!sg.urb[i])
-                       goto out;
-       }
-       init_completion(&sg.comp);
-       usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
-                       alauda_complete, NULL);
-       usb_fill_bulk_urb(sg.urb[1], al->dev, al->write_out, buf,mtd->writesize,
-                       alauda_complete, NULL);
-       usb_fill_bulk_urb(sg.urb[2], al->dev, al->write_out, oob, 16,
-                       alauda_complete, &sg.comp);
-
-       mutex_lock(&al->card_mutex);
-       for (i=0; i<3; i++) {
-               err = usb_submit_urb(sg.urb[i], GFP_NOIO);
-               if (err)
-                       goto cancel;
-       }
-       if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
-               err = -ETIMEDOUT;
-cancel:
-               for (i=0; i<3; i++) {
-                       usb_kill_urb(sg.urb[i]);
-               }
-       }
-       mutex_unlock(&al->card_mutex);
-
-out:
-       usb_free_urb(sg.urb[0]);
-       usb_free_urb(sg.urb[1]);
-       usb_free_urb(sg.urb[2]);
-       return err;
-}
-
-static int alauda_erase_block(struct mtd_info *mtd, loff_t ofs)
-{
-       struct alauda_sg_request sg;
-       struct alauda *al = mtd->priv;
-       u32 pba = ofs >> al->card->blockshift;
-       u8 command[] = {
-               ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba),
-               PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, al->port
-       };
-       u8 buf[2];
-       int i, err;
-
-       for (i=0; i<2; i++)
-               sg.urb[i] = NULL;
-
-       err = -ENOMEM;
-       for (i=0; i<2; i++) {
-               sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
-               if (!sg.urb[i])
-                       goto out;
-       }
-       init_completion(&sg.comp);
-       usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
-                       alauda_complete, NULL);
-       usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, 2,
-                       alauda_complete, &sg.comp);
-
-       mutex_lock(&al->card_mutex);
-       for (i=0; i<2; i++) {
-               err = usb_submit_urb(sg.urb[i], GFP_NOIO);
-               if (err)
-                       goto cancel;
-       }
-       if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
-               err = -ETIMEDOUT;
-cancel:
-               for (i=0; i<2; i++) {
-                       usb_kill_urb(sg.urb[i]);
-               }
-       }
-       mutex_unlock(&al->card_mutex);
-
-out:
-       usb_free_urb(sg.urb[0]);
-       usb_free_urb(sg.urb[1]);
-       return err;
-}
-
-static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
-{
-       static u8 ignore_buf[512]; /* write only */
-
-       return __alauda_read_page(mtd, from, ignore_buf, oob);
-}
-
-static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
-{
-       u8 oob[16];
-       int err;
-
-       err = alauda_read_oob(mtd, ofs, oob);
-       if (err)
-               return err;
-
-       /* A block is marked bad if two or more bits are zero */
-       return hweight8(oob[5]) >= 7 ? 0 : 1;
-}
-
-static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
-               size_t *retlen, u_char *buf)
-{
-       struct alauda *al = mtd->priv;
-       void *bounce_buf;
-       int err, corrected=0, uncorrected=0;
-
-       bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
-       if (!bounce_buf)
-               return -ENOMEM;
-
-       *retlen = len;
-       while (len) {
-               u8 oob[16];
-               size_t byte = from & al->bytemask;
-               size_t cplen = min(len, mtd->writesize - byte);
-
-               err = alauda_read_page(mtd, from, bounce_buf, oob,
-                               &corrected, &uncorrected);
-               if (err)
-                       goto out;
-
-               memcpy(buf, bounce_buf + byte, cplen);
-               buf += cplen;
-               from += cplen;
-               len -= cplen;
-       }
-       err = 0;
-       if (corrected)
-               err = 1;        /* return max_bitflips per ecc step */
-       if (uncorrected)
-               err = -EBADMSG;
-out:
-       kfree(bounce_buf);
-       return err;
-}
-
-static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
-               size_t *retlen, u_char *buf)
-{
-       struct alauda *al = mtd->priv;
-       int err, corrected=0, uncorrected=0;
-
-       if ((from & al->bytemask) || (len & al->bytemask))
-               return alauda_bounce_read(mtd, from, len, retlen, buf);
-
-       *retlen = len;
-       while (len) {
-               u8 oob[16];
-
-               err = alauda_read_page(mtd, from, buf, oob,
-                               &corrected, &uncorrected);
-               if (err)
-                       return err;
-
-               buf += mtd->writesize;
-               from += mtd->writesize;
-               len -= mtd->writesize;
-       }
-       err = 0;
-       if (corrected)
-               err = 1;        /* return max_bitflips per ecc step */
-       if (uncorrected)
-               err = -EBADMSG;
-       return err;
-}
-
-static int alauda_write(struct mtd_info *mtd, loff_t to, size_t len,
-               size_t *retlen, const u_char *buf)
-{
-       struct alauda *al = mtd->priv;
-       int err;
-
-       if ((to & al->bytemask) || (len & al->bytemask))
-               return -EINVAL;
-
-       *retlen = len;
-       while (len) {
-               u32 page = (to >> al->card->pageshift) & al->pagemask;
-               u8 oob[16] = {  'h', 'e', 'l', 'l', 'o', 0xff, 0xff, 0xff,
-                               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
-               /* don't write to bad blocks */
-               if (page == 0) {
-                       err = alauda_isbad(mtd, to);
-                       if (err) {
-                               return -EIO;
-                       }
-               }
-               nand_calculate_ecc(mtd, buf, &oob[13]);
-               nand_calculate_ecc(mtd, buf+256, &oob[8]);
-
-               err = alauda_write_page(mtd, to, (void*)buf, oob);
-               if (err)
-                       return err;
-
-               buf += mtd->writesize;
-               to += mtd->writesize;
-               len -= mtd->writesize;
-       }
-       return 0;
-}
-
-static int __alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
-       struct alauda *al = mtd->priv;
-       u32 ofs = instr->addr;
-       u32 len = instr->len;
-       int err;
-
-       if ((ofs & al->blockmask) || (len & al->blockmask))
-               return -EINVAL;
-
-       while (len) {
-               /* don't erase bad blocks */
-               err = alauda_isbad(mtd, ofs);
-               if (err > 0)
-                       err = -EIO;
-               if (err < 0)
-                       return err;
-
-               err = alauda_erase_block(mtd, ofs);
-               if (err < 0)
-                       return err;
-
-               ofs += mtd->erasesize;
-               len -= mtd->erasesize;
-       }
-       return 0;
-}
-
-static int alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
-       int err;
-
-       err = __alauda_erase(mtd, instr);
-       instr->state = err ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
-       mtd_erase_callback(instr);
-       return err;
-}
-
-static int alauda_init_media(struct alauda *al)
-{
-       u8 buf[4], *b0=buf, *b1=buf+1;
-       struct alauda_card *card;
-       struct mtd_info *mtd;
-       int err;
-
-       mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
-       if (!mtd)
-               return -ENOMEM;
-
-       for (;;) {
-               err = alauda_get_media_status(al, buf);
-               if (err < 0)
-                       goto error;
-               if (*b0 & 0x10)
-                       break;
-               msleep(20);
-       }
-
-       err = alauda_ack_media(al);
-       if (err)
-               goto error;
-
-       msleep(10);
-
-       err = alauda_get_media_status(al, buf);
-       if (err < 0)
-               goto error;
-
-       if (*b0 != 0x14) {
-               /* media not ready */
-               err = -EIO;
-               goto error;
-       }
-       err = alauda_get_media_signatures(al, buf);
-       if (err < 0)
-               goto error;
-
-       card = get_card(*b1);
-       if (!card) {
-               printk(KERN_ERR"Alauda: unknown card id %02x\n", *b1);
-               err = -EIO;
-               goto error;
-       }
-       printk(KERN_INFO"pagesize=%x\nerasesize=%x\nsize=%xMiB\n",
-                       1<<card->pageshift, 1<<card->blockshift,
-                       1<<(card->chipshift-20));
-       al->card = card;
-       al->pagemask = (1 << (card->blockshift - card->pageshift)) - 1;
-       al->bytemask = (1 << card->pageshift) - 1;
-       al->blockmask = (1 << card->blockshift) - 1;
-
-       mtd->name = "alauda";
-       mtd->size = 1<<card->chipshift;
-       mtd->erasesize = 1<<card->blockshift;
-       mtd->writesize = 1<<card->pageshift;
-       mtd->type = MTD_NANDFLASH;
-       mtd->flags = MTD_CAP_NANDFLASH;
-       mtd->_read = alauda_read;
-       mtd->_write = alauda_write;
-       mtd->_erase = alauda_erase;
-       mtd->_block_isbad = alauda_isbad;
-       mtd->priv = al;
-       mtd->owner = THIS_MODULE;
-       mtd->ecc_strength = 1;
-
-       err = mtd_device_register(mtd, NULL, 0);
-       if (err) {
-               err = -ENFILE;
-               goto error;
-       }
-
-       al->mtd = mtd;
-       alauda_reset(al); /* no clue whether this is necessary */
-       return 0;
-error:
-       kfree(mtd);
-       return err;
-}
-
-static int alauda_check_media(struct alauda *al)
-{
-       u8 buf[2], *b0 = buf, *b1 = buf+1;
-       int err;
-
-       err = alauda_get_media_status(al, buf);
-       if (err < 0)
-               return err;
-
-       if ((*b1 & 0x01) == 0) {
-               /* door open */
-               return -EIO;
-       }
-       if ((*b0 & 0x80) || ((*b0 & 0x1F) == 0x10)) {
-               /* no media ? */
-               return -EIO;
-       }
-       if (*b0 & 0x08) {
-               /* media change ? */
-               return alauda_init_media(al);
-       }
-       return 0;
-}
-
-static int alauda_probe(struct usb_interface *interface,
-               const struct usb_device_id *id)
-{
-       struct alauda *al;
-       struct usb_host_interface *iface;
-       struct usb_endpoint_descriptor *ep,
-                       *ep_in=NULL, *ep_out=NULL, *ep_wr=NULL;
-       int i, err = -ENOMEM;
-
-       al = kzalloc(2*sizeof(*al), GFP_KERNEL);
-       if (!al)
-               goto error;
-
-       kref_init(&al->kref);
-       usb_set_intfdata(interface, al);
-
-       al->dev = usb_get_dev(interface_to_usbdev(interface));
-       al->interface = interface;
-
-       iface = interface->cur_altsetting;
-       for (i = 0; i < iface->desc.bNumEndpoints; ++i) {
-               ep = &iface->endpoint[i].desc;
-
-               if (usb_endpoint_is_bulk_in(ep)) {
-                       ep_in = ep;
-               } else if (usb_endpoint_is_bulk_out(ep)) {
-                       if (i==0)
-                               ep_wr = ep;
-                       else
-                               ep_out = ep;
-               }
-       }
-       err = -EIO;
-       if (!ep_wr || !ep_in || !ep_out)
-               goto error;
-
-       al->write_out = usb_sndbulkpipe(al->dev,
-                       usb_endpoint_num(ep_wr));
-       al->bulk_in = usb_rcvbulkpipe(al->dev,
-                       usb_endpoint_num(ep_in));
-       al->bulk_out = usb_sndbulkpipe(al->dev,
-                       usb_endpoint_num(ep_out));
-
-       /* second device is identical up to now */
-       memcpy(al+1, al, sizeof(*al));
-
-       mutex_init(&al[0].card_mutex);
-       mutex_init(&al[1].card_mutex);
-
-       al[0].port = ALAUDA_PORT_XD;
-       al[1].port = ALAUDA_PORT_SM;
-
-       dev_info(&interface->dev, "alauda probed\n");
-       alauda_check_media(al);
-       alauda_check_media(al+1);
-
-       return 0;
-
-error:
-       if (al)
-               kref_put(&al->kref, alauda_delete);
-       return err;
-}
-
-static void alauda_disconnect(struct usb_interface *interface)
-{
-       struct alauda *al;
-
-       al = usb_get_intfdata(interface);
-       usb_set_intfdata(interface, NULL);
-
-       /* FIXME: prevent more I/O from starting */
-
-       /* decrement our usage count */
-       if (al)
-               kref_put(&al->kref, alauda_delete);
-
-       dev_info(&interface->dev, "alauda gone");
-}
-
-static struct usb_driver alauda_driver = {
-       .name =         "alauda",
-       .probe =        alauda_probe,
-       .disconnect =   alauda_disconnect,
-       .id_table =     alauda_table,
-};
-
-module_usb_driver(alauda_driver);
-
-MODULE_LICENSE("GPL");
index f1d71cdc8aac97f827f7fac7d9a136342f0a6a22..8611eb4b45fca90e73d84db280b55301640fcd93 100644 (file)
@@ -258,7 +258,6 @@ static int ams_delta_init(struct platform_device *pdev)
  out_mtd:
        gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
 out_gpio:
-       platform_set_drvdata(pdev, NULL);
        gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
        iounmap(io_base);
 out_free:
index 2d23d2929438053a255ccad183e111ac7fe633ba..8b2eb3e5896b1b61ad152bb5916896bcf824ada8 100644 (file)
@@ -18,6 +18,9 @@
  *  Add Programmable Multibit ECC support for various AT91 SoC
  *     © Copyright 2012 ATMEL, Hong Xu
  *
+ *  Add Nand Flash Controller support for SAMA5 SoC
+ *     © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
 
+#include <linux/delay.h>
 #include <linux/dmaengine.h>
 #include <linux/gpio.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/platform_data/atmel.h>
-#include <linux/pinctrl/consumer.h>
-
-#include <mach/cpu.h>
 
 static int use_dma = 1;
 module_param(use_dma, int, 0);
@@ -58,6 +60,7 @@ module_param(on_flash_bbt, int, 0);
        __raw_writel((value), add + ATMEL_ECC_##reg)
 
 #include "atmel_nand_ecc.h"    /* Hardware ECC registers */
+#include "atmel_nand_nfc.h"    /* Nand Flash Controller definition */
 
 /* oob layout for large page size
  * bad block info is on bytes 0 and 1
@@ -85,6 +88,23 @@ static struct nand_ecclayout atmel_oobinfo_small = {
        },
 };
 
+struct atmel_nfc {
+       void __iomem            *base_cmd_regs;
+       void __iomem            *hsmc_regs;
+       void __iomem            *sram_bank0;
+       dma_addr_t              sram_bank0_phys;
+       bool                    use_nfc_sram;
+       bool                    write_by_sram;
+
+       bool                    is_initialized;
+       struct completion       comp_nfc;
+
+       /* Point to the sram bank which include readed data via NFC */
+       void __iomem            *data_in_sram;
+       bool                    will_write_sram;
+};
+static struct atmel_nfc        nand_nfc;
+
 struct atmel_nand_host {
        struct nand_chip        nand_chip;
        struct mtd_info         mtd;
@@ -97,6 +117,8 @@ struct atmel_nand_host {
        struct completion       comp;
        struct dma_chan         *dma_chan;
 
+       struct atmel_nfc        *nfc;
+
        bool                    has_pmecc;
        u8                      pmecc_corr_cap;
        u16                     pmecc_sector_size;
@@ -128,11 +150,6 @@ struct atmel_nand_host {
 
 static struct nand_ecclayout atmel_pmecc_oobinfo;
 
-static int cpu_has_dma(void)
-{
-       return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
-}
-
 /*
  * Enable NAND.
  */
@@ -186,21 +203,103 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
                 !!host->board.rdy_pin_active_low;
 }
 
+/* Set up for hardware ready pin and enable pin. */
+static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct atmel_nand_host *host = chip->priv;
+       int res = 0;
+
+       if (gpio_is_valid(host->board.rdy_pin)) {
+               res = devm_gpio_request(host->dev,
+                               host->board.rdy_pin, "nand_rdy");
+               if (res < 0) {
+                       dev_err(host->dev,
+                               "can't request rdy gpio %d\n",
+                               host->board.rdy_pin);
+                       return res;
+               }
+
+               res = gpio_direction_input(host->board.rdy_pin);
+               if (res < 0) {
+                       dev_err(host->dev,
+                               "can't request input direction rdy gpio %d\n",
+                               host->board.rdy_pin);
+                       return res;
+               }
+
+               chip->dev_ready = atmel_nand_device_ready;
+       }
+
+       if (gpio_is_valid(host->board.enable_pin)) {
+               res = devm_gpio_request(host->dev,
+                               host->board.enable_pin, "nand_enable");
+               if (res < 0) {
+                       dev_err(host->dev,
+                               "can't request enable gpio %d\n",
+                               host->board.enable_pin);
+                       return res;
+               }
+
+               res = gpio_direction_output(host->board.enable_pin, 1);
+               if (res < 0) {
+                       dev_err(host->dev,
+                               "can't request output direction enable gpio %d\n",
+                               host->board.enable_pin);
+                       return res;
+               }
+       }
+
+       return res;
+}
+
+static void memcpy32_fromio(void *trg, const void __iomem  *src, size_t size)
+{
+       int i;
+       u32 *t = trg;
+       const __iomem u32 *s = src;
+
+       for (i = 0; i < (size >> 2); i++)
+               *t++ = readl_relaxed(s++);
+}
+
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+       int i;
+       u32 __iomem *t = trg;
+       const u32 *s = src;
+
+       for (i = 0; i < (size >> 2); i++)
+               writel_relaxed(*s++, t++);
+}
+
 /*
  * Minimal-overhead PIO for data access.
  */
 static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
 {
        struct nand_chip        *nand_chip = mtd->priv;
+       struct atmel_nand_host *host = nand_chip->priv;
 
-       __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+       if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+               memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+               host->nfc->data_in_sram += len;
+       } else {
+               __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+       }
 }
 
 static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
 {
        struct nand_chip        *nand_chip = mtd->priv;
+       struct atmel_nand_host *host = nand_chip->priv;
 
-       __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+       if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+               memcpy32_fromio(buf, host->nfc->data_in_sram, len);
+               host->nfc->data_in_sram += len;
+       } else {
+               __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+       }
 }
 
 static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
@@ -222,6 +321,40 @@ static void dma_complete_func(void *completion)
        complete(completion);
 }
 
+static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank)
+{
+       /* NFC only has two banks. Must be 0 or 1 */
+       if (bank > 1)
+               return -EINVAL;
+
+       if (bank) {
+               /* Only for a 2k-page or lower flash, NFC can handle 2 banks */
+               if (host->mtd.writesize > 2048)
+                       return -EINVAL;
+               nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1);
+       } else {
+               nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0);
+       }
+
+       return 0;
+}
+
+static uint nfc_get_sram_off(struct atmel_nand_host *host)
+{
+       if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+               return NFC_SRAM_BANK1_OFFSET;
+       else
+               return 0;
+}
+
+static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host)
+{
+       if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+               return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET;
+       else
+               return host->nfc->sram_bank0_phys;
+}
+
 static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
                               int is_read)
 {
@@ -235,6 +368,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
        void *p = buf;
        int err = -EIO;
        enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+       struct atmel_nfc *nfc = host->nfc;
 
        if (buf >= high_memory)
                goto err_buf;
@@ -251,11 +385,20 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
        }
 
        if (is_read) {
-               dma_src_addr = host->io_phys;
+               if (nfc && nfc->data_in_sram)
+                       dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram
+                               - (nfc->sram_bank0 + nfc_get_sram_off(host)));
+               else
+                       dma_src_addr = host->io_phys;
+
                dma_dst_addr = phys_addr;
        } else {
                dma_src_addr = phys_addr;
-               dma_dst_addr = host->io_phys;
+
+               if (nfc && nfc->write_by_sram)
+                       dma_dst_addr = nfc_sram_phys(host);
+               else
+                       dma_dst_addr = host->io_phys;
        }
 
        tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
@@ -278,6 +421,10 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
        dma_async_issue_pending(host->dma_chan);
        wait_for_completion(&host->comp);
 
+       if (is_read && nfc && nfc->data_in_sram)
+               /* After read data from SRAM, need to increase the position */
+               nfc->data_in_sram += len;
+
        err = 0;
 
 err_dma:
@@ -366,43 +513,34 @@ static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
                        table_size * sizeof(int16_t);
 }
 
-static void pmecc_data_free(struct atmel_nand_host *host)
-{
-       kfree(host->pmecc_partial_syn);
-       kfree(host->pmecc_si);
-       kfree(host->pmecc_lmu);
-       kfree(host->pmecc_smu);
-       kfree(host->pmecc_mu);
-       kfree(host->pmecc_dmu);
-       kfree(host->pmecc_delta);
-}
-
 static int pmecc_data_alloc(struct atmel_nand_host *host)
 {
        const int cap = host->pmecc_corr_cap;
+       int size;
+
+       size = (2 * cap + 1) * sizeof(int16_t);
+       host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL);
+       host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL);
+       host->pmecc_lmu = devm_kzalloc(host->dev,
+                       (cap + 1) * sizeof(int16_t), GFP_KERNEL);
+       host->pmecc_smu = devm_kzalloc(host->dev,
+                       (cap + 2) * size, GFP_KERNEL);
+
+       size = (cap + 1) * sizeof(int);
+       host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+       host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+       host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL);
+
+       if (!host->pmecc_partial_syn ||
+               !host->pmecc_si ||
+               !host->pmecc_lmu ||
+               !host->pmecc_smu ||
+               !host->pmecc_mu ||
+               !host->pmecc_dmu ||
+               !host->pmecc_delta)
+               return -ENOMEM;
 
-       host->pmecc_partial_syn = kzalloc((2 * cap + 1) * sizeof(int16_t),
-                                       GFP_KERNEL);
-       host->pmecc_si = kzalloc((2 * cap + 1) * sizeof(int16_t), GFP_KERNEL);
-       host->pmecc_lmu = kzalloc((cap + 1) * sizeof(int16_t), GFP_KERNEL);
-       host->pmecc_smu = kzalloc((cap + 2) * (2 * cap + 1) * sizeof(int16_t),
-                                       GFP_KERNEL);
-       host->pmecc_mu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
-       host->pmecc_dmu = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
-       host->pmecc_delta = kzalloc((cap + 1) * sizeof(int), GFP_KERNEL);
-
-       if (host->pmecc_partial_syn &&
-                       host->pmecc_si &&
-                       host->pmecc_lmu &&
-                       host->pmecc_smu &&
-                       host->pmecc_mu &&
-                       host->pmecc_dmu &&
-                       host->pmecc_delta)
-               return 0;
-
-       /* error happened */
-       pmecc_data_free(host);
-       return -ENOMEM;
+       return 0;
 }
 
 static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
@@ -763,6 +901,30 @@ normal_check:
        return total_err;
 }
 
+static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
+{
+       u32 val;
+
+       if (ecc_op != NAND_ECC_READ && ecc_op != NAND_ECC_WRITE) {
+               dev_err(host->dev, "atmel_nand: wrong pmecc operation type!");
+               return;
+       }
+
+       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+       val = pmecc_readl_relaxed(host->ecc, CFG);
+
+       if (ecc_op == NAND_ECC_READ)
+               pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP)
+                       | PMECC_CFG_AUTO_ENABLE);
+       else
+               pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP)
+                       & ~PMECC_CFG_AUTO_ENABLE);
+
+       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+}
+
 static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
        struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
 {
@@ -774,13 +936,8 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
        unsigned long end_time;
        int bitflips = 0;
 
-       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
-       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
-       pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG)
-               & ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE);
-
-       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
-       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+       if (!host->nfc || !host->nfc->use_nfc_sram)
+               pmecc_enable(host, NAND_ECC_READ);
 
        chip->read_buf(mtd, buf, eccsize);
        chip->read_buf(mtd, oob, mtd->oobsize);
@@ -813,16 +970,10 @@ static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
        int i, j;
        unsigned long end_time;
 
-       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
-       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
-
-       pmecc_writel(host->ecc, CFG, (pmecc_readl_relaxed(host->ecc, CFG) |
-               PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE);
-
-       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
-       pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
-
-       chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+       if (!host->nfc || !host->nfc->write_by_sram) {
+               pmecc_enable(host, NAND_ECC_WRITE);
+               chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+       }
 
        end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
        while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
@@ -967,11 +1118,11 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
                        host->pmecc_corr_cap = 2;
                else if (*cap <= 4)
                        host->pmecc_corr_cap = 4;
-               else if (*cap < 8)
+               else if (*cap <= 8)
                        host->pmecc_corr_cap = 8;
-               else if (*cap < 12)
+               else if (*cap <= 12)
                        host->pmecc_corr_cap = 12;
-               else if (*cap < 24)
+               else if (*cap <= 24)
                        host->pmecc_corr_cap = 24;
                else
                        return -EINVAL;
@@ -1002,7 +1153,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
                return err_no;
        }
 
-       if (cap != host->pmecc_corr_cap ||
+       if (cap > host->pmecc_corr_cap ||
                        sector_size != host->pmecc_sector_size)
                dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
 
@@ -1023,27 +1174,28 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
                return 0;
        }
 
-       host->ecc = ioremap(regs->start, resource_size(regs));
-       if (host->ecc == NULL) {
+       host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+       if (IS_ERR(host->ecc)) {
                dev_err(host->dev, "ioremap failed\n");
-               err_no = -EIO;
-               goto err_pmecc_ioremap;
+               err_no = PTR_ERR(host->ecc);
+               goto err;
        }
 
        regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-       regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-       if (regs_pmerr && regs_rom) {
-               host->pmerrloc_base = ioremap(regs_pmerr->start,
-                       resource_size(regs_pmerr));
-               host->pmecc_rom_base = ioremap(regs_rom->start,
-                       resource_size(regs_rom));
+       host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr);
+       if (IS_ERR(host->pmerrloc_base)) {
+               dev_err(host->dev,
+                       "Can not get I/O resource for PMECC ERRLOC controller!\n");
+               err_no = PTR_ERR(host->pmerrloc_base);
+               goto err;
        }
 
-       if (!host->pmerrloc_base || !host->pmecc_rom_base) {
-               dev_err(host->dev,
-                       "Can not get I/O resource for PMECC ERRLOC controller or ROM!\n");
-               err_no = -EIO;
-               goto err_pmloc_ioremap;
+       regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+       host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, regs_rom);
+       if (IS_ERR(host->pmecc_rom_base)) {
+               dev_err(host->dev, "Can not get I/O resource for ROM!\n");
+               err_no = PTR_ERR(host->pmecc_rom_base);
+               goto err;
        }
 
        /* ECC is calculated for the whole page (1 step) */
@@ -1052,7 +1204,8 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
        /* set ECC page size and oob layout */
        switch (mtd->writesize) {
        case 2048:
-               host->pmecc_degree = PMECC_GF_DIMENSION_13;
+               host->pmecc_degree = (sector_size == 512) ?
+                       PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
                host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
                host->pmecc_sector_number = mtd->writesize / sector_size;
                host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
@@ -1068,7 +1221,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
                if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
                        dev_err(host->dev, "No room for ECC bytes\n");
                        err_no = -EINVAL;
-                       goto err_no_ecc_room;
+                       goto err;
                }
                pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
                                        mtd->oobsize,
@@ -1093,7 +1246,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
        if (err_no) {
                dev_err(host->dev,
                                "Cannot allocate memory for PMECC computation!\n");
-               goto err_pmecc_data_alloc;
+               goto err;
        }
 
        nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
@@ -1103,15 +1256,7 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
 
        return 0;
 
-err_pmecc_data_alloc:
-err_no_ecc_room:
-err_pmloc_ioremap:
-       iounmap(host->ecc);
-       if (host->pmerrloc_base)
-               iounmap(host->pmerrloc_base);
-       if (host->pmecc_rom_base)
-               iounmap(host->pmecc_rom_base);
-err_pmecc_ioremap:
+err:
        return err_no;
 }
 
@@ -1174,10 +1319,9 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
         * Workaround: Reset the parity registers before reading the
         * actual data.
         */
-       if (cpu_is_at32ap7000()) {
-               struct atmel_nand_host *host = chip->priv;
+       struct atmel_nand_host *host = chip->priv;
+       if (host->board.need_reset_workaround)
                ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
-       }
 
        /* read the page */
        chip->read_buf(mtd, p, eccsize);
@@ -1298,11 +1442,11 @@ static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
  */
 static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
 {
-       if (cpu_is_at32ap7000()) {
-               struct nand_chip *nand_chip = mtd->priv;
-               struct atmel_nand_host *host = nand_chip->priv;
+       struct nand_chip *nand_chip = mtd->priv;
+       struct atmel_nand_host *host = nand_chip->priv;
+
+       if (host->board.need_reset_workaround)
                ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
-       }
 }
 
 #if defined(CONFIG_OF)
@@ -1337,6 +1481,8 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
 
        board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
 
+       board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
+
        if (of_get_nand_bus_width(np) == 16)
                board->bus_width_16 = 1;
 
@@ -1348,6 +1494,9 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
 
        host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
 
+       /* load the nfc driver if there is */
+       of_platform_populate(np, NULL, NULL, host->dev);
+
        if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
                return 0;       /* Not using PMECC */
 
@@ -1414,10 +1563,10 @@ static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
                return 0;
        }
 
-       host->ecc = ioremap(regs->start, resource_size(regs));
-       if (host->ecc == NULL) {
+       host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+       if (IS_ERR(host->ecc)) {
                dev_err(host->dev, "ioremap failed\n");
-               return -EIO;
+               return PTR_ERR(host->ecc);
        }
 
        /* ECC is calculated for the whole page (1 step) */
@@ -1459,6 +1608,382 @@ static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
        return 0;
 }
 
+/* SMC interrupt service routine */
+static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
+{
+       struct atmel_nand_host *host = dev_id;
+       u32 status, mask, pending;
+       irqreturn_t ret = IRQ_HANDLED;
+
+       status = nfc_readl(host->nfc->hsmc_regs, SR);
+       mask = nfc_readl(host->nfc->hsmc_regs, IMR);
+       pending = status & mask;
+
+       if (pending & NFC_SR_XFR_DONE) {
+               complete(&host->nfc->comp_nfc);
+               nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
+       } else if (pending & NFC_SR_RB_EDGE) {
+               complete(&host->nfc->comp_nfc);
+               nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
+       } else if (pending & NFC_SR_CMD_DONE) {
+               complete(&host->nfc->comp_nfc);
+               nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
+       } else {
+               ret = IRQ_NONE;
+       }
+
+       return ret;
+}
+
+/* NFC(Nand Flash Controller) related functions */
+static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
+{
+       unsigned long timeout;
+       init_completion(&host->nfc->comp_nfc);
+
+       /* Enable interrupt that need to wait for */
+       nfc_writel(host->nfc->hsmc_regs, IER, flag);
+
+       timeout = wait_for_completion_timeout(&host->nfc->comp_nfc,
+                       msecs_to_jiffies(NFC_TIME_OUT_MS));
+       if (timeout)
+               return 0;
+
+       /* Time out to wait for the interrupt */
+       dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
+       return -ETIMEDOUT;
+}
+
+static int nfc_send_command(struct atmel_nand_host *host,
+       unsigned int cmd, unsigned int addr, unsigned char cycle0)
+{
+       unsigned long timeout;
+       dev_dbg(host->dev,
+               "nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
+               cmd, addr, cycle0);
+
+       timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+       while (nfc_cmd_readl(NFCADDR_CMD_NFCBUSY, host->nfc->base_cmd_regs)
+                       & NFCADDR_CMD_NFCBUSY) {
+               if (time_after(jiffies, timeout)) {
+                       dev_err(host->dev,
+                               "Time out to wait CMD_NFCBUSY ready!\n");
+                       return -ETIMEDOUT;
+               }
+       }
+       nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
+       nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
+       return nfc_wait_interrupt(host, NFC_SR_CMD_DONE);
+}
+
+static int nfc_device_ready(struct mtd_info *mtd)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct atmel_nand_host *host = nand_chip->priv;
+       if (!nfc_wait_interrupt(host, NFC_SR_RB_EDGE))
+               return 1;
+       return 0;
+}
+
+static void nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+       struct nand_chip *nand_chip = mtd->priv;
+       struct atmel_nand_host *host = nand_chip->priv;
+
+       if (chip == -1)
+               nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE);
+       else
+               nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);
+}
+
+static int nfc_make_addr(struct mtd_info *mtd, int column, int page_addr,
+               unsigned int *addr1234, unsigned int *cycle0)
+{
+       struct nand_chip *chip = mtd->priv;
+
+       int acycle = 0;
+       unsigned char addr_bytes[8];
+       int index = 0, bit_shift;
+
+       BUG_ON(addr1234 == NULL || cycle0 == NULL);
+
+       *cycle0 = 0;
+       *addr1234 = 0;
+
+       if (column != -1) {
+               if (chip->options & NAND_BUSWIDTH_16)
+                       column >>= 1;
+               addr_bytes[acycle++] = column & 0xff;
+               if (mtd->writesize > 512)
+                       addr_bytes[acycle++] = (column >> 8) & 0xff;
+       }
+
+       if (page_addr != -1) {
+               addr_bytes[acycle++] = page_addr & 0xff;
+               addr_bytes[acycle++] = (page_addr >> 8) & 0xff;
+               if (chip->chipsize > (128 << 20))
+                       addr_bytes[acycle++] = (page_addr >> 16) & 0xff;
+       }
+
+       if (acycle > 4)
+               *cycle0 = addr_bytes[index++];
+
+       for (bit_shift = 0; index < acycle; bit_shift += 8)
+               *addr1234 += addr_bytes[index++] << bit_shift;
+
+       /* return acycle in cmd register */
+       return acycle << NFCADDR_CMD_ACYCLE_BIT_POS;
+}
+
+static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
+                               int column, int page_addr)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct atmel_nand_host *host = chip->priv;
+       unsigned long timeout;
+       unsigned int nfc_addr_cmd = 0;
+
+       unsigned int cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+
+       /* Set default settings: no cmd2, no addr cycle. read from nand */
+       unsigned int cmd2 = 0;
+       unsigned int vcmd2 = 0;
+       int acycle = NFCADDR_CMD_ACYCLE_NONE;
+       int csid = NFCADDR_CMD_CSID_3;
+       int dataen = NFCADDR_CMD_DATADIS;
+       int nfcwr = NFCADDR_CMD_NFCRD;
+       unsigned int addr1234 = 0;
+       unsigned int cycle0 = 0;
+       bool do_addr = true;
+       host->nfc->data_in_sram = NULL;
+
+       dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n",
+            __func__, command, column, page_addr);
+
+       switch (command) {
+       case NAND_CMD_RESET:
+               nfc_addr_cmd = cmd1 | acycle | csid | dataen | nfcwr;
+               nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+               udelay(chip->chip_delay);
+
+               nfc_nand_command(mtd, NAND_CMD_STATUS, -1, -1);
+               timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+               while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) {
+                       if (time_after(jiffies, timeout)) {
+                               dev_err(host->dev,
+                                       "Time out to wait status ready!\n");
+                               break;
+                       }
+               }
+               return;
+       case NAND_CMD_STATUS:
+               do_addr = false;
+               break;
+       case NAND_CMD_PARAM:
+       case NAND_CMD_READID:
+               do_addr = false;
+               acycle = NFCADDR_CMD_ACYCLE_1;
+               if (column != -1)
+                       addr1234 = column;
+               break;
+       case NAND_CMD_RNDOUT:
+               cmd2 = NAND_CMD_RNDOUTSTART << NFCADDR_CMD_CMD2_BIT_POS;
+               vcmd2 = NFCADDR_CMD_VCMD2;
+               break;
+       case NAND_CMD_READ0:
+       case NAND_CMD_READOOB:
+               if (command == NAND_CMD_READOOB) {
+                       column += mtd->writesize;
+                       command = NAND_CMD_READ0; /* only READ0 is valid */
+                       cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+               }
+               if (host->nfc->use_nfc_sram) {
+                       /* Enable Data transfer to sram */
+                       dataen = NFCADDR_CMD_DATAEN;
+
+                       /* Need enable PMECC now, since NFC will transfer
+                        * data in bus after sending nfc read command.
+                        */
+                       if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+                               pmecc_enable(host, NAND_ECC_READ);
+               }
+
+               cmd2 = NAND_CMD_READSTART << NFCADDR_CMD_CMD2_BIT_POS;
+               vcmd2 = NFCADDR_CMD_VCMD2;
+               break;
+       /* For prgramming command, the cmd need set to write enable */
+       case NAND_CMD_PAGEPROG:
+       case NAND_CMD_SEQIN:
+       case NAND_CMD_RNDIN:
+               nfcwr = NFCADDR_CMD_NFCWR;
+               if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN)
+                       dataen = NFCADDR_CMD_DATAEN;
+               break;
+       default:
+               break;
+       }
+
+       if (do_addr)
+               acycle = nfc_make_addr(mtd, column, page_addr, &addr1234,
+                               &cycle0);
+
+       nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
+       nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+
+       if (dataen == NFCADDR_CMD_DATAEN)
+               if (nfc_wait_interrupt(host, NFC_SR_XFR_DONE))
+                       dev_err(host->dev, "something wrong, No XFR_DONE interrupt comes.\n");
+
+       /*
+        * Program and erase have their own busy handlers status, sequential
+        * in, and deplete1 need no delay.
+        */
+       switch (command) {
+       case NAND_CMD_CACHEDPROG:
+       case NAND_CMD_PAGEPROG:
+       case NAND_CMD_ERASE1:
+       case NAND_CMD_ERASE2:
+       case NAND_CMD_RNDIN:
+       case NAND_CMD_STATUS:
+       case NAND_CMD_RNDOUT:
+       case NAND_CMD_SEQIN:
+       case NAND_CMD_READID:
+               return;
+
+       case NAND_CMD_READ0:
+               if (dataen == NFCADDR_CMD_DATAEN) {
+                       host->nfc->data_in_sram = host->nfc->sram_bank0 +
+                               nfc_get_sram_off(host);
+                       return;
+               }
+               /* fall through */
+       default:
+               nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
+       }
+}
+
+static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+                       uint32_t offset, int data_len, const uint8_t *buf,
+                       int oob_required, int page, int cached, int raw)
+{
+       int cfg, len;
+       int status = 0;
+       struct atmel_nand_host *host = chip->priv;
+       void __iomem *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
+
+       /* Subpage write is not supported */
+       if (offset || (data_len < mtd->writesize))
+               return -EINVAL;
+
+       cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
+       len = mtd->writesize;
+
+       if (unlikely(raw)) {
+               len += mtd->oobsize;
+               nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
+       } else
+               nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE);
+
+       /* Copy page data to sram that will write to nand via NFC */
+       if (use_dma) {
+               if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
+                       /* Fall back to use cpu copy */
+                       memcpy32_toio(sram, buf, len);
+       } else {
+               memcpy32_toio(sram, buf, len);
+       }
+
+       if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+               /*
+                * When use NFC sram, need set up PMECC before send
+                * NAND_CMD_SEQIN command. Since when the nand command
+                * is sent, nfc will do transfer from sram and nand.
+                */
+               pmecc_enable(host, NAND_ECC_WRITE);
+
+       host->nfc->will_write_sram = true;
+       chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+       host->nfc->will_write_sram = false;
+
+       if (likely(!raw))
+               /* Need to write ecc into oob */
+               status = chip->ecc.write_page(mtd, chip, buf, oob_required);
+
+       if (status < 0)
+               return status;
+
+       chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+       status = chip->waitfunc(mtd, chip);
+
+       if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+               status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+
+       if (status & NAND_STATUS_FAIL)
+               return -EIO;
+
+       return 0;
+}
+
+static int nfc_sram_init(struct mtd_info *mtd)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct atmel_nand_host *host = chip->priv;
+       int res = 0;
+
+       /* Initialize the NFC CFG register */
+       unsigned int cfg_nfc = 0;
+
+       /* set page size and oob layout */
+       switch (mtd->writesize) {
+       case 512:
+               cfg_nfc = NFC_CFG_PAGESIZE_512;
+               break;
+       case 1024:
+               cfg_nfc = NFC_CFG_PAGESIZE_1024;
+               break;
+       case 2048:
+               cfg_nfc = NFC_CFG_PAGESIZE_2048;
+               break;
+       case 4096:
+               cfg_nfc = NFC_CFG_PAGESIZE_4096;
+               break;
+       case 8192:
+               cfg_nfc = NFC_CFG_PAGESIZE_8192;
+               break;
+       default:
+               dev_err(host->dev, "Unsupported page size for NFC.\n");
+               res = -ENXIO;
+               return res;
+       }
+
+       /* oob bytes size = (NFCSPARESIZE + 1) * 4
+        * Max support spare size is 512 bytes. */
+       cfg_nfc |= (((mtd->oobsize / 4) - 1) << NFC_CFG_NFC_SPARESIZE_BIT_POS
+               & NFC_CFG_NFC_SPARESIZE);
+       /* default set a max timeout */
+       cfg_nfc |= NFC_CFG_RSPARE |
+                       NFC_CFG_NFC_DTOCYC | NFC_CFG_NFC_DTOMUL;
+
+       nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc);
+
+       host->nfc->will_write_sram = false;
+       nfc_set_sram_bank(host, 0);
+
+       /* Use Write page with NFC SRAM only for PMECC or ECC NONE. */
+       if (host->nfc->write_by_sram) {
+               if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) ||
+                               chip->ecc.mode == NAND_ECC_NONE)
+                       chip->write_page = nfc_sram_write_page;
+               else
+                       host->nfc->write_by_sram = false;
+       }
+
+       dev_info(host->dev, "Using NFC Sram read %s\n",
+                       host->nfc->write_by_sram ? "and write" : "");
+       return 0;
+}
+
+static struct platform_driver atmel_nand_nfc_driver;
 /*
  * Probe for the NAND device.
  */
@@ -1469,30 +1994,27 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
        struct nand_chip *nand_chip;
        struct resource *mem;
        struct mtd_part_parser_data ppdata = {};
-       int res;
-       struct pinctrl *pinctrl;
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               printk(KERN_ERR "atmel_nand: can't get I/O resource mem\n");
-               return -ENXIO;
-       }
+       int res, irq;
 
        /* Allocate memory for the device structure (and zero it) */
-       host = kzalloc(sizeof(struct atmel_nand_host), GFP_KERNEL);
+       host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
        if (!host) {
                printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
                return -ENOMEM;
        }
 
-       host->io_phys = (dma_addr_t)mem->start;
+       res = platform_driver_register(&atmel_nand_nfc_driver);
+       if (res)
+               dev_err(&pdev->dev, "atmel_nand: can't register NFC driver\n");
 
-       host->io_base = ioremap(mem->start, resource_size(mem));
-       if (host->io_base == NULL) {
-               printk(KERN_ERR "atmel_nand: ioremap failed\n");
-               res = -EIO;
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       host->io_base = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(host->io_base)) {
+               dev_err(&pdev->dev, "atmel_nand: ioremap resource failed\n");
+               res = PTR_ERR(host->io_base);
                goto err_nand_ioremap;
        }
+       host->io_phys = (dma_addr_t)mem->start;
 
        mtd = &host->mtd;
        nand_chip = &host->nand_chip;
@@ -1500,9 +2022,9 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
        if (pdev->dev.of_node) {
                res = atmel_of_init_port(host, pdev->dev.of_node);
                if (res)
-                       goto err_ecc_ioremap;
+                       goto err_nand_ioremap;
        } else {
-               memcpy(&host->board, pdev->dev.platform_data,
+               memcpy(&host->board, dev_get_platdata(&pdev->dev),
                       sizeof(struct atmel_nand_data));
        }
 
@@ -1513,51 +2035,35 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
        /* Set address of NAND IO lines */
        nand_chip->IO_ADDR_R = host->io_base;
        nand_chip->IO_ADDR_W = host->io_base;
-       nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
 
-       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-       if (IS_ERR(pinctrl)) {
-               dev_err(host->dev, "Failed to request pinctrl\n");
-               res = PTR_ERR(pinctrl);
-               goto err_ecc_ioremap;
-       }
+       if (nand_nfc.is_initialized) {
+               /* NFC driver is probed and initialized */
+               host->nfc = &nand_nfc;
 
-       if (gpio_is_valid(host->board.rdy_pin)) {
-               res = gpio_request(host->board.rdy_pin, "nand_rdy");
-               if (res < 0) {
-                       dev_err(&pdev->dev,
-                               "can't request rdy gpio %d\n",
-                               host->board.rdy_pin);
-                       goto err_ecc_ioremap;
-               }
+               nand_chip->select_chip = nfc_select_chip;
+               nand_chip->dev_ready = nfc_device_ready;
+               nand_chip->cmdfunc = nfc_nand_command;
 
-               res = gpio_direction_input(host->board.rdy_pin);
-               if (res < 0) {
-                       dev_err(&pdev->dev,
-                               "can't request input direction rdy gpio %d\n",
-                               host->board.rdy_pin);
-                       goto err_ecc_ioremap;
+               /* Initialize the interrupt for NFC */
+               irq = platform_get_irq(pdev, 0);
+               if (irq < 0) {
+                       dev_err(host->dev, "Cannot get HSMC irq!\n");
+                       goto err_nand_ioremap;
                }
 
-               nand_chip->dev_ready = atmel_nand_device_ready;
-       }
-
-       if (gpio_is_valid(host->board.enable_pin)) {
-               res = gpio_request(host->board.enable_pin, "nand_enable");
-               if (res < 0) {
-                       dev_err(&pdev->dev,
-                               "can't request enable gpio %d\n",
-                               host->board.enable_pin);
-                       goto err_ecc_ioremap;
+               res = devm_request_irq(&pdev->dev, irq, hsmc_interrupt,
+                               0, "hsmc", host);
+               if (res) {
+                       dev_err(&pdev->dev, "Unable to request HSMC irq %d\n",
+                               irq);
+                       goto err_nand_ioremap;
                }
+       } else {
+               res = atmel_nand_set_enable_ready_pins(mtd);
+               if (res)
+                       goto err_nand_ioremap;
 
-               res = gpio_direction_output(host->board.enable_pin, 1);
-               if (res < 0) {
-                       dev_err(&pdev->dev,
-                               "can't request output direction enable gpio %d\n",
-                               host->board.enable_pin);
-                       goto err_ecc_ioremap;
-               }
+               nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
        }
 
        nand_chip->ecc.mode = host->board.ecc_mode;
@@ -1573,7 +2079,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
        atmel_nand_enable(host);
 
        if (gpio_is_valid(host->board.det_pin)) {
-               res = gpio_request(host->board.det_pin, "nand_det");
+               res = devm_gpio_request(&pdev->dev,
+                               host->board.det_pin, "nand_det");
                if (res < 0) {
                        dev_err(&pdev->dev,
                                "can't request det gpio %d\n",
@@ -1601,7 +2108,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
                nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
        }
 
-       if (!cpu_has_dma())
+       if (!host->board.has_dma)
                use_dma = 0;
 
        if (use_dma) {
@@ -1637,6 +2144,15 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
                        goto err_hw_ecc;
        }
 
+       /* initialize the nfc configuration register */
+       if (host->nfc && host->nfc->use_nfc_sram) {
+               res = nfc_sram_init(mtd);
+               if (res) {
+                       host->nfc->use_nfc_sram = false;
+                       dev_err(host->dev, "Disable use nfc sram for data transfer.\n");
+               }
+       }
+
        /* second phase scan */
        if (nand_scan_tail(mtd)) {
                res = -ENXIO;
@@ -1651,27 +2167,16 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
                return res;
 
 err_scan_tail:
-       if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+       if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW)
                pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
-               pmecc_data_free(host);
-       }
-       if (host->ecc)
-               iounmap(host->ecc);
-       if (host->pmerrloc_base)
-               iounmap(host->pmerrloc_base);
-       if (host->pmecc_rom_base)
-               iounmap(host->pmecc_rom_base);
 err_hw_ecc:
 err_scan_ident:
 err_no_card:
        atmel_nand_disable(host);
-       platform_set_drvdata(pdev, NULL);
        if (host->dma_chan)
                dma_release_channel(host->dma_chan);
-err_ecc_ioremap:
-       iounmap(host->io_base);
 err_nand_ioremap:
-       kfree(host);
+       platform_driver_unregister(&atmel_nand_nfc_driver);
        return res;
 }
 
@@ -1691,30 +2196,12 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
                pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
                pmerrloc_writel(host->pmerrloc_base, ELDIS,
                                PMERRLOC_DISABLE);
-               pmecc_data_free(host);
        }
 
-       if (gpio_is_valid(host->board.det_pin))
-               gpio_free(host->board.det_pin);
-
-       if (gpio_is_valid(host->board.enable_pin))
-               gpio_free(host->board.enable_pin);
-
-       if (gpio_is_valid(host->board.rdy_pin))
-               gpio_free(host->board.rdy_pin);
-
-       if (host->ecc)
-               iounmap(host->ecc);
-       if (host->pmecc_rom_base)
-               iounmap(host->pmecc_rom_base);
-       if (host->pmerrloc_base)
-               iounmap(host->pmerrloc_base);
-
        if (host->dma_chan)
                dma_release_channel(host->dma_chan);
 
-       iounmap(host->io_base);
-       kfree(host);
+       platform_driver_unregister(&atmel_nand_nfc_driver);
 
        return 0;
 }
@@ -1728,6 +2215,59 @@ static const struct of_device_id atmel_nand_dt_ids[] = {
 MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
 #endif
 
+static int atmel_nand_nfc_probe(struct platform_device *pdev)
+{
+       struct atmel_nfc *nfc = &nand_nfc;
+       struct resource *nfc_cmd_regs, *nfc_hsmc_regs, *nfc_sram;
+
+       nfc_cmd_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       nfc->base_cmd_regs = devm_ioremap_resource(&pdev->dev, nfc_cmd_regs);
+       if (IS_ERR(nfc->base_cmd_regs))
+               return PTR_ERR(nfc->base_cmd_regs);
+
+       nfc_hsmc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       nfc->hsmc_regs = devm_ioremap_resource(&pdev->dev, nfc_hsmc_regs);
+       if (IS_ERR(nfc->hsmc_regs))
+               return PTR_ERR(nfc->hsmc_regs);
+
+       nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+       if (nfc_sram) {
+               nfc->sram_bank0 = devm_ioremap_resource(&pdev->dev, nfc_sram);
+               if (IS_ERR(nfc->sram_bank0)) {
+                       dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
+                                       PTR_ERR(nfc->sram_bank0));
+               } else {
+                       nfc->use_nfc_sram = true;
+                       nfc->sram_bank0_phys = (dma_addr_t)nfc_sram->start;
+
+                       if (pdev->dev.of_node)
+                               nfc->write_by_sram = of_property_read_bool(
+                                               pdev->dev.of_node,
+                                               "atmel,write-by-sram");
+               }
+       }
+
+       nfc->is_initialized = true;
+       dev_info(&pdev->dev, "NFC is probed.\n");
+       return 0;
+}
+
+#if defined(CONFIG_OF)
+static struct of_device_id atmel_nand_nfc_match[] = {
+       { .compatible = "atmel,sama5d3-nfc" },
+       { /* sentinel */ }
+};
+#endif
+
+static struct platform_driver atmel_nand_nfc_driver = {
+       .driver = {
+               .name = "atmel_nand_nfc",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(atmel_nand_nfc_match),
+       },
+       .probe = atmel_nand_nfc_probe,
+};
+
 static struct platform_driver atmel_nand_driver = {
        .remove         = __exit_p(atmel_nand_remove),
        .driver         = {
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
new file mode 100644 (file)
index 0000000..4efd117
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Atmel Nand Flash Controller (NFC) - System peripherals regsters.
+ * Based on SAMA5D3 datasheet.
+ *
+ * © Copyright 2013 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_NFC_H
+#define ATMEL_NAND_NFC_H
+
+/*
+ * HSMC NFC registers
+ */
+#define ATMEL_HSMC_NFC_CFG     0x00            /* NFC Configuration Register */
+#define                NFC_CFG_PAGESIZE        (7 << 0)
+#define                        NFC_CFG_PAGESIZE_512    (0 << 0)
+#define                        NFC_CFG_PAGESIZE_1024   (1 << 0)
+#define                        NFC_CFG_PAGESIZE_2048   (2 << 0)
+#define                        NFC_CFG_PAGESIZE_4096   (3 << 0)
+#define                        NFC_CFG_PAGESIZE_8192   (4 << 0)
+#define                NFC_CFG_WSPARE          (1 << 8)
+#define                NFC_CFG_RSPARE          (1 << 9)
+#define                NFC_CFG_NFC_DTOCYC      (0xf << 16)
+#define                NFC_CFG_NFC_DTOMUL      (0x7 << 20)
+#define                NFC_CFG_NFC_SPARESIZE   (0x7f << 24)
+#define                NFC_CFG_NFC_SPARESIZE_BIT_POS   24
+
+#define ATMEL_HSMC_NFC_CTRL    0x04            /* NFC Control Register */
+#define                NFC_CTRL_ENABLE         (1 << 0)
+#define                NFC_CTRL_DISABLE        (1 << 1)
+
+#define ATMEL_HSMC_NFC_SR      0x08            /* NFC Status Register */
+#define                NFC_SR_XFR_DONE         (1 << 16)
+#define                NFC_SR_CMD_DONE         (1 << 17)
+#define                NFC_SR_RB_EDGE          (1 << 24)
+
+#define ATMEL_HSMC_NFC_IER     0x0c
+#define ATMEL_HSMC_NFC_IDR     0x10
+#define ATMEL_HSMC_NFC_IMR     0x14
+#define ATMEL_HSMC_NFC_CYCLE0  0x18            /* NFC Address Cycle Zero */
+#define                ATMEL_HSMC_NFC_ADDR_CYCLE0      (0xff)
+
+#define ATMEL_HSMC_NFC_BANK    0x1c            /* NFC Bank Register */
+#define                ATMEL_HSMC_NFC_BANK0            (0 << 0)
+#define                ATMEL_HSMC_NFC_BANK1            (1 << 0)
+
+#define nfc_writel(addr, reg, value) \
+       writel((value), (addr) + ATMEL_HSMC_NFC_##reg)
+
+#define nfc_readl(addr, reg) \
+       readl_relaxed((addr) + ATMEL_HSMC_NFC_##reg)
+
+/*
+ * NFC Address Command definitions
+ */
+#define NFCADDR_CMD_CMD1       (0xff << 2)     /* Command for Cycle 1 */
+#define NFCADDR_CMD_CMD1_BIT_POS       2
+#define NFCADDR_CMD_CMD2       (0xff << 10)    /* Command for Cycle 2 */
+#define NFCADDR_CMD_CMD2_BIT_POS       10
+#define NFCADDR_CMD_VCMD2      (0x1 << 18)     /* Valid Cycle 2 Command */
+#define NFCADDR_CMD_ACYCLE     (0x7 << 19)     /* Number of Address required */
+#define                NFCADDR_CMD_ACYCLE_NONE         (0x0 << 19)
+#define                NFCADDR_CMD_ACYCLE_1            (0x1 << 19)
+#define                NFCADDR_CMD_ACYCLE_2            (0x2 << 19)
+#define                NFCADDR_CMD_ACYCLE_3            (0x3 << 19)
+#define                NFCADDR_CMD_ACYCLE_4            (0x4 << 19)
+#define                NFCADDR_CMD_ACYCLE_5            (0x5 << 19)
+#define NFCADDR_CMD_ACYCLE_BIT_POS     19
+#define NFCADDR_CMD_CSID       (0x7 << 22)     /* Chip Select Identifier */
+#define                NFCADDR_CMD_CSID_0              (0x0 << 22)
+#define                NFCADDR_CMD_CSID_1              (0x1 << 22)
+#define                NFCADDR_CMD_CSID_2              (0x2 << 22)
+#define                NFCADDR_CMD_CSID_3              (0x3 << 22)
+#define                NFCADDR_CMD_CSID_4              (0x4 << 22)
+#define                NFCADDR_CMD_CSID_5              (0x5 << 22)
+#define                NFCADDR_CMD_CSID_6              (0x6 << 22)
+#define                NFCADDR_CMD_CSID_7              (0x7 << 22)
+#define NFCADDR_CMD_DATAEN     (0x1 << 25)     /* Data Transfer Enable */
+#define NFCADDR_CMD_DATADIS    (0x0 << 25)     /* Data Transfer Disable */
+#define NFCADDR_CMD_NFCRD      (0x0 << 26)     /* NFC Read Enable */
+#define NFCADDR_CMD_NFCWR      (0x1 << 26)     /* NFC Write Enable */
+#define NFCADDR_CMD_NFCBUSY    (0x1 << 27)     /* NFC Busy */
+
+#define nfc_cmd_addr1234_writel(cmd, addr1234, nfc_base) \
+       writel((addr1234), (cmd) + nfc_base)
+
+#define nfc_cmd_readl(bitstatus, nfc_base) \
+       readl_relaxed((bitstatus) + nfc_base)
+
+#define NFC_TIME_OUT_MS                100
+#define        NFC_SRAM_BANK1_OFFSET   0x1200
+
+#endif
index 217459d02b2f85b6d5730e1c0d77e4fac7a108eb..ae8dd7c4103922fc760786be079b7576ad648893 100644 (file)
@@ -411,7 +411,7 @@ static int au1550nd_probe(struct platform_device *pdev)
        struct resource *r;
        int ret, cs;
 
-       pd = pdev->dev.platform_data;
+       pd = dev_get_platdata(&pdev->dev);
        if (!pd) {
                dev_err(&pdev->dev, "missing platform data\n");
                return -ENODEV;
index 776df3694f755865f88b7e0a499cac533dd04142..2c42e125720f2141258e064e1fc024fef092057b 100644 (file)
@@ -171,7 +171,7 @@ static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev)
 
 static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev)
 {
-       return pdev->dev.platform_data;
+       return dev_get_platdata(&pdev->dev);
 }
 
 /*
@@ -671,8 +671,6 @@ static int bf5xx_nand_remove(struct platform_device *pdev)
 {
        struct bf5xx_nand_info *info = to_nand_info(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        /* first thing we need to do is release all our mtds
         * and their partitions, then go through freeing the
         * resources used
@@ -832,7 +830,6 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
 out_err_nand_scan:
        bf5xx_nand_dma_remove(info);
 out_err_hw_init:
-       platform_set_drvdata(pdev, NULL);
        kfree(info);
 out_err_kzalloc:
        peripheral_free_list(bfin_nfc_pin_req);
index c3e15a55817349eb4ed2e1c86009b042c38e2405..b77a01efb4837ea325988ee6e58e82bd128d7892 100644 (file)
@@ -530,7 +530,7 @@ MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
 static struct davinci_nand_pdata
        *nand_davinci_get_pdata(struct platform_device *pdev)
 {
-       if (!pdev->dev.platform_data && pdev->dev.of_node) {
+       if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
                struct davinci_nand_pdata *pdata;
                const char *mode;
                u32 prop;
@@ -575,13 +575,13 @@ static struct davinci_nand_pdata
                        pdata->bbt_options = NAND_BBT_USE_FLASH;
        }
 
-       return pdev->dev.platform_data;
+       return dev_get_platdata(&pdev->dev);
 }
 #else
 static struct davinci_nand_pdata
        *nand_davinci_get_pdata(struct platform_device *pdev)
 {
-       return pdev->dev.platform_data;
+       return dev_get_platdata(&pdev->dev);
 }
 #endif
 
@@ -623,11 +623,14 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
                goto err_nomem;
        }
 
-       vaddr = devm_request_and_ioremap(&pdev->dev, res1);
-       base = devm_request_and_ioremap(&pdev->dev, res2);
-       if (!vaddr || !base) {
-               dev_err(&pdev->dev, "ioremap failed\n");
-               ret = -EADDRNOTAVAIL;
+       vaddr = devm_ioremap_resource(&pdev->dev, res1);
+       if (IS_ERR(vaddr)) {
+               ret = PTR_ERR(vaddr);
+               goto err_ioremap;
+       }
+       base = devm_ioremap_resource(&pdev->dev, res2);
+       if (IS_ERR(base)) {
+               ret = PTR_ERR(base);
                goto err_ioremap;
        }
 
index 0c8bb6bf8424732b068f17f4793c3a957e86031a..2ed2bb33a6e773f6a81835b2787db2ef9b6bf774 100644 (file)
@@ -1520,7 +1520,7 @@ int denali_init(struct denali_nand_info *denali)
         * so just let controller do 15bit ECC for MLC and 8bit ECC for
         * SLC if possible.
         * */
-       if (denali->nand.cellinfo & 0xc &&
+       if (denali->nand.cellinfo & NAND_CI_CELLTYPE_MSK &&
                        (denali->mtd.oobsize > (denali->bbtskipbytes +
                        ECC_15BITS * (denali->mtd.writesize /
                        ECC_SECTOR_SIZE)))) {
index 81fa5784f98b390fb28ee81a23e720db56c2cc8b..eaa3c29ad860eb7c84a5e1309ef879d9d2044ed1 100644 (file)
@@ -46,13 +46,13 @@ static unsigned long __initdata doc_locations[] = {
        0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
        0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
        0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else /*  CONFIG_MTD_DOCPROBE_HIGH */
+#else
        0xc8000, 0xca000, 0xcc000, 0xce000,
        0xd0000, 0xd2000, 0xd4000, 0xd6000,
        0xd8000, 0xda000, 0xdc000, 0xde000,
        0xe0000, 0xe2000, 0xe4000, 0xe6000,
        0xe8000, 0xea000, 0xec000, 0xee000,
-#endif /*  CONFIG_MTD_DOCPROBE_HIGH */
+#endif
 #endif
        0xffffffff };
 
index fa25e7a08134d1cc6bf3d0a68eb15da42a44524b..548db2389fab8b63e41f2f61731606c32643084a 100644 (file)
@@ -1093,7 +1093,6 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
        struct nand_chip *nand = mtd->priv;
        struct docg4_priv *doc = nand->priv;
        struct nand_bbt_descr *bbtd = nand->badblock_pattern;
-       int block = (int)(ofs >> nand->bbt_erase_shift);
        int page = (int)(ofs >> nand->page_shift);
        uint32_t g4_addr = mtd_to_docg4_address(page, 0);
 
@@ -1108,9 +1107,6 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
        if (buf == NULL)
                return -ENOMEM;
 
-       /* update bbt in memory */
-       nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
-
        /* write bit-wise negation of pattern to oob buffer */
        memset(nand->oob_poi, 0xff, mtd->oobsize);
        for (i = 0; i < bbtd->len; i++)
@@ -1120,8 +1116,6 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
        write_page_prologue(mtd, g4_addr);
        docg4_write_page(mtd, nand, buf, 1);
        ret = pageprog(mtd);
-       if (!ret)
-               mtd->ecc_stats.badblocks++;
 
        kfree(buf);
 
@@ -1368,7 +1362,6 @@ static int __init probe_docg4(struct platform_device *pdev)
                struct nand_chip *nand = mtd->priv;
                struct docg4_priv *doc = nand->priv;
                nand_release(mtd); /* deletes partitions and mtd devices */
-               platform_set_drvdata(pdev, NULL);
                free_bch(doc->bch);
                kfree(mtd);
        }
@@ -1380,7 +1373,6 @@ static int __exit cleanup_docg4(struct platform_device *pdev)
 {
        struct docg4_priv *doc = platform_get_drvdata(pdev);
        nand_release(doc->mtd);
-       platform_set_drvdata(pdev, NULL);
        free_bch(doc->bch);
        kfree(doc->mtd);
        iounmap(doc->virtadr);
index f1f7f12ab50184b3bc233e360a899b998ea8c724..317a771f1587c94f3fd57c45c8e140deeb08b982 100644 (file)
@@ -823,7 +823,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
 
        /* set up nand options */
        chip->bbt_options = NAND_BBT_USE_FLASH;
-
+       chip->options = NAND_NO_SUBPAGE_WRITE;
 
        if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
                chip->read_byte = fsl_ifc_read_byte16;
@@ -908,7 +908,6 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
 
        ifc_nand_ctrl->chips[priv->bank] = NULL;
        dev_set_drvdata(priv->dev, NULL);
-       kfree(priv);
 
        return 0;
 }
index 911e2433fe304b107f1ad8b585621d653578e302..3dc1a7564d8725d62085b16cb7c0544e138858b2 100644 (file)
@@ -889,6 +889,24 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
        if (of_get_property(np, "nand-skip-bbtscan", NULL))
                pdata->options = NAND_SKIP_BBTSCAN;
 
+       pdata->nand_timings = devm_kzalloc(&pdev->dev,
+                               sizeof(*pdata->nand_timings), GFP_KERNEL);
+       if (!pdata->nand_timings) {
+               dev_err(&pdev->dev, "no memory for nand_timing\n");
+               return -ENOMEM;
+       }
+       of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
+                                               sizeof(*pdata->nand_timings));
+
+       /* Set default NAND bank to 0 */
+       pdata->bank = 0;
+       if (!of_property_read_u32(np, "bank", &val)) {
+               if (val > 3) {
+                       dev_err(&pdev->dev, "invalid bank %u\n", val);
+                       return -EINVAL;
+               }
+               pdata->bank = val;
+       }
        return 0;
 }
 #else
@@ -940,9 +958,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
-       if (!res)
-               return -EINVAL;
-
        host->data_va = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(host->data_va))
                return PTR_ERR(host->data_va);
@@ -950,25 +965,16 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
        host->data_pa = (dma_addr_t)res->start;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
-       if (!res)
-               return -EINVAL;
-
        host->addr_va = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(host->addr_va))
                return PTR_ERR(host->addr_va);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
-       if (!res)
-               return -EINVAL;
-
        host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(host->cmd_va))
                return PTR_ERR(host->cmd_va);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
-       if (!res)
-               return -EINVAL;
-
        host->regs_va = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(host->regs_va))
                return PTR_ERR(host->regs_va);
@@ -1174,8 +1180,6 @@ static int fsmc_nand_remove(struct platform_device *pdev)
 {
        struct fsmc_nand_data *host = platform_get_drvdata(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (host) {
                nand_release(&host->mtd);
 
@@ -1190,7 +1194,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int fsmc_nand_suspend(struct device *dev)
 {
        struct fsmc_nand_data *host = dev_get_drvdata(dev);
@@ -1210,9 +1214,9 @@ static int fsmc_nand_resume(struct device *dev)
        }
        return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
-#endif
 
 #ifdef CONFIG_OF
 static const struct of_device_id fsmc_nand_id_table[] = {
@@ -1229,9 +1233,7 @@ static struct platform_driver fsmc_nand_driver = {
                .owner = THIS_MODULE,
                .name = "fsmc-nand",
                .of_match_table = of_match_ptr(fsmc_nand_id_table),
-#ifdef CONFIG_PM
                .pm = &fsmc_nand_pm_ops,
-#endif
        },
 };
 
index 89065dd83d64d7ad64a6d83ca318f36ae65ec6cf..e826f898241f92b24704ba7103fc0cdd970a63d1 100644 (file)
@@ -17,6 +17,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -86,59 +87,11 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
        gpio_nand_dosync(gpiomtd);
 }
 
-static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-       struct nand_chip *this = mtd->priv;
-
-       iowrite8_rep(this->IO_ADDR_W, buf, len);
-}
-
-static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
-{
-       struct nand_chip *this = mtd->priv;
-
-       ioread8_rep(this->IO_ADDR_R, buf, len);
-}
-
-static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
-                                int len)
-{
-       struct nand_chip *this = mtd->priv;
-
-       if (IS_ALIGNED((unsigned long)buf, 2)) {
-               iowrite16_rep(this->IO_ADDR_W, buf, len>>1);
-       } else {
-               int i;
-               unsigned short *ptr = (unsigned short *)buf;
-
-               for (i = 0; i < len; i += 2, ptr++)
-                       writew(*ptr, this->IO_ADDR_W);
-       }
-}
-
-static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
-{
-       struct nand_chip *this = mtd->priv;
-
-       if (IS_ALIGNED((unsigned long)buf, 2)) {
-               ioread16_rep(this->IO_ADDR_R, buf, len>>1);
-       } else {
-               int i;
-               unsigned short *ptr = (unsigned short *)buf;
-
-               for (i = 0; i < len; i += 2, ptr++)
-                       *ptr = readw(this->IO_ADDR_R);
-       }
-}
-
 static int gpio_nand_devready(struct mtd_info *mtd)
 {
        struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
 
-       if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
-               return gpio_get_value(gpiomtd->plat.gpio_rdy);
-
-       return 1;
+       return gpio_get_value(gpiomtd->plat.gpio_rdy);
 }
 
 #ifdef CONFIG_OF
@@ -153,6 +106,9 @@ static int gpio_nand_get_config_of(const struct device *dev,
 {
        u32 val;
 
+       if (!dev->of_node)
+               return -ENODEV;
+
        if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
                if (val == 2) {
                        plat->options |= NAND_BUSWIDTH_16;
@@ -211,8 +167,8 @@ static inline int gpio_nand_get_config(const struct device *dev,
        if (!ret)
                return ret;
 
-       if (dev->platform_data) {
-               memcpy(plat, dev->platform_data, sizeof(*plat));
+       if (dev_get_platdata(dev)) {
+               memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
                return 0;
        }
 
@@ -230,145 +186,100 @@ gpio_nand_get_io_sync(struct platform_device *pdev)
        return platform_get_resource(pdev, IORESOURCE_MEM, 1);
 }
 
-static int gpio_nand_remove(struct platform_device *dev)
+static int gpio_nand_remove(struct platform_device *pdev)
 {
-       struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
-       struct resource *res;
+       struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
 
        nand_release(&gpiomtd->mtd_info);
 
-       res = gpio_nand_get_io_sync(dev);
-       iounmap(gpiomtd->io_sync);
-       if (res)
-               release_mem_region(res->start, resource_size(res));
-
-       res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-       iounmap(gpiomtd->nand_chip.IO_ADDR_R);
-       release_mem_region(res->start, resource_size(res));
-
        if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
                gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
        gpio_set_value(gpiomtd->plat.gpio_nce, 1);
 
-       gpio_free(gpiomtd->plat.gpio_cle);
-       gpio_free(gpiomtd->plat.gpio_ale);
-       gpio_free(gpiomtd->plat.gpio_nce);
-       if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
-               gpio_free(gpiomtd->plat.gpio_nwp);
-       if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
-               gpio_free(gpiomtd->plat.gpio_rdy);
-
        return 0;
 }
 
-static void __iomem *request_and_remap(struct resource *res, size_t size,
-                                       const char *name, int *err)
-{
-       void __iomem *ptr;
-
-       if (!request_mem_region(res->start, resource_size(res), name)) {
-               *err = -EBUSY;
-               return NULL;
-       }
-
-       ptr = ioremap(res->start, size);
-       if (!ptr) {
-               release_mem_region(res->start, resource_size(res));
-               *err = -ENOMEM;
-       }
-       return ptr;
-}
-
-static int gpio_nand_probe(struct platform_device *dev)
+static int gpio_nand_probe(struct platform_device *pdev)
 {
        struct gpiomtd *gpiomtd;
-       struct nand_chip *this;
-       struct resource *res0, *res1;
+       struct nand_chip *chip;
+       struct resource *res;
        struct mtd_part_parser_data ppdata = {};
        int ret = 0;
 
-       if (!dev->dev.of_node && !dev->dev.platform_data)
-               return -EINVAL;
-
-       res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
-       if (!res0)
+       if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
                return -EINVAL;
 
-       gpiomtd = devm_kzalloc(&dev->dev, sizeof(*gpiomtd), GFP_KERNEL);
-       if (gpiomtd == NULL) {
-               dev_err(&dev->dev, "failed to create NAND MTD\n");
+       gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+       if (!gpiomtd) {
+               dev_err(&pdev->dev, "failed to create NAND MTD\n");
                return -ENOMEM;
        }
 
-       this = &gpiomtd->nand_chip;
-       this->IO_ADDR_R = request_and_remap(res0, 2, "NAND", &ret);
-       if (!this->IO_ADDR_R) {
-               dev_err(&dev->dev, "unable to map NAND\n");
-               goto err_map;
-       }
+       chip = &gpiomtd->nand_chip;
 
-       res1 = gpio_nand_get_io_sync(dev);
-       if (res1) {
-               gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
-               if (!gpiomtd->io_sync) {
-                       dev_err(&dev->dev, "unable to map sync NAND\n");
-                       goto err_sync;
-               }
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(chip->IO_ADDR_R))
+               return PTR_ERR(chip->IO_ADDR_R);
+
+       res = gpio_nand_get_io_sync(pdev);
+       if (res) {
+               gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(gpiomtd->io_sync))
+                       return PTR_ERR(gpiomtd->io_sync);
        }
 
-       ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat);
+       ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
        if (ret)
-               goto err_nce;
+               return ret;
 
-       ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
+       ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE");
        if (ret)
-               goto err_nce;
+               return ret;
        gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+
        if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
-               ret = gpio_request(gpiomtd->plat.gpio_nwp, "NAND NWP");
+               ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
+                                       "NAND NWP");
                if (ret)
-                       goto err_nwp;
-               gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+                       return ret;
        }
-       ret = gpio_request(gpiomtd->plat.gpio_ale, "NAND ALE");
+
+       ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
        if (ret)
-               goto err_ale;
+               return ret;
        gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
-       ret = gpio_request(gpiomtd->plat.gpio_cle, "NAND CLE");
+
+       ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
        if (ret)
-               goto err_cle;
+               return ret;
        gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
+
        if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
-               ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
+               ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
+                                       "NAND RDY");
                if (ret)
-                       goto err_rdy;
+                       return ret;
                gpio_direction_input(gpiomtd->plat.gpio_rdy);
+               chip->dev_ready = gpio_nand_devready;
        }
 
+       chip->IO_ADDR_W         = chip->IO_ADDR_R;
+       chip->ecc.mode          = NAND_ECC_SOFT;
+       chip->options           = gpiomtd->plat.options;
+       chip->chip_delay        = gpiomtd->plat.chip_delay;
+       chip->cmd_ctrl          = gpio_nand_cmd_ctrl;
 
-       this->IO_ADDR_W  = this->IO_ADDR_R;
-       this->ecc.mode   = NAND_ECC_SOFT;
-       this->options    = gpiomtd->plat.options;
-       this->chip_delay = gpiomtd->plat.chip_delay;
-
-       /* install our routines */
-       this->cmd_ctrl   = gpio_nand_cmd_ctrl;
-       this->dev_ready  = gpio_nand_devready;
+       gpiomtd->mtd_info.priv  = chip;
+       gpiomtd->mtd_info.owner = THIS_MODULE;
 
-       if (this->options & NAND_BUSWIDTH_16) {
-               this->read_buf   = gpio_nand_readbuf16;
-               this->write_buf  = gpio_nand_writebuf16;
-       } else {
-               this->read_buf   = gpio_nand_readbuf;
-               this->write_buf  = gpio_nand_writebuf;
-       }
+       platform_set_drvdata(pdev, gpiomtd);
 
-       /* set the mtd private data for the nand driver */
-       gpiomtd->mtd_info.priv = this;
-       gpiomtd->mtd_info.owner = THIS_MODULE;
+       if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+               gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
 
        if (nand_scan(&gpiomtd->mtd_info, 1)) {
-               dev_err(&dev->dev, "no nand chips found?\n");
                ret = -ENXIO;
                goto err_wp;
        }
@@ -377,38 +288,17 @@ static int gpio_nand_probe(struct platform_device *dev)
                gpiomtd->plat.adjust_parts(&gpiomtd->plat,
                                           gpiomtd->mtd_info.size);
 
-       ppdata.of_node = dev->dev.of_node;
+       ppdata.of_node = pdev->dev.of_node;
        ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
                                        gpiomtd->plat.parts,
                                        gpiomtd->plat.num_parts);
-       if (ret)
-               goto err_wp;
-       platform_set_drvdata(dev, gpiomtd);
-
-       return 0;
+       if (!ret)
+               return 0;
 
 err_wp:
        if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
                gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
-       if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
-               gpio_free(gpiomtd->plat.gpio_rdy);
-err_rdy:
-       gpio_free(gpiomtd->plat.gpio_cle);
-err_cle:
-       gpio_free(gpiomtd->plat.gpio_ale);
-err_ale:
-       if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
-               gpio_free(gpiomtd->plat.gpio_nwp);
-err_nwp:
-       gpio_free(gpiomtd->plat.gpio_nce);
-err_nce:
-       iounmap(gpiomtd->io_sync);
-       if (res1)
-               release_mem_region(res1->start, resource_size(res1));
-err_sync:
-       iounmap(gpiomtd->nand_chip.IO_ADDR_R);
-       release_mem_region(res0->start, resource_size(res0));
-err_map:
+
        return ret;
 }
 
@@ -417,6 +307,7 @@ static struct platform_driver gpio_nand_driver = {
        .remove         = gpio_nand_remove,
        .driver         = {
                .name   = "gpio-nand",
+               .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(gpio_nand_id_table),
        },
 };
index 25ecfa1822a8fc75cf485b41c51602ad04a0489b..9c89e80bfbc3527e184869432b5e2d517685c82f 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mtd/partitions.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_mtd.h>
@@ -112,7 +111,131 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
        return true;
 }
 
-int common_nfc_set_geometry(struct gpmi_nand_data *this)
+/*
+ * If we can get the ECC information from the nand chip, we do not
+ * need to calculate them ourselves.
+ *
+ * We may have available oob space in this case.
+ */
+static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+{
+       struct bch_geometry *geo = &this->bch_geometry;
+       struct mtd_info *mtd = &this->mtd;
+       struct nand_chip *chip = mtd->priv;
+       struct nand_oobfree *of = gpmi_hw_ecclayout.oobfree;
+       unsigned int block_mark_bit_offset;
+
+       if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+               return false;
+
+       switch (chip->ecc_step_ds) {
+       case SZ_512:
+               geo->gf_len = 13;
+               break;
+       case SZ_1K:
+               geo->gf_len = 14;
+               break;
+       default:
+               dev_err(this->dev,
+                       "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
+                       chip->ecc_strength_ds, chip->ecc_step_ds);
+               return false;
+       }
+       geo->ecc_chunk_size = chip->ecc_step_ds;
+       geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
+       if (!gpmi_check_ecc(this))
+               return false;
+
+       /* Keep the C >= O */
+       if (geo->ecc_chunk_size < mtd->oobsize) {
+               dev_err(this->dev,
+                       "unsupported nand chip. ecc size: %d, oob size : %d\n",
+                       chip->ecc_step_ds, mtd->oobsize);
+               return false;
+       }
+
+       /* The default value, see comment in the legacy_set_geometry(). */
+       geo->metadata_size = 10;
+
+       geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+       /*
+        * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
+        *
+        *    |                          P                            |
+        *    |<----------------------------------------------------->|
+        *    |                                                       |
+        *    |                                        (Block Mark)   |
+        *    |                      P'                      |      | |     |
+        *    |<-------------------------------------------->|  D   | |  O' |
+        *    |                                              |<---->| |<--->|
+        *    V                                              V      V V     V
+        *    +---+----------+-+----------+-+----------+-+----------+-+-----+
+        *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
+        *    +---+----------+-+----------+-+----------+-+----------+-+-----+
+        *                                                   ^              ^
+        *                                                   |      O       |
+        *                                                   |<------------>|
+        *                                                   |              |
+        *
+        *      P : the page size for BCH module.
+        *      E : The ECC strength.
+        *      G : the length of Galois Field.
+        *      N : The chunk count of per page.
+        *      M : the metasize of per page.
+        *      C : the ecc chunk size, aka the "data" above.
+        *      P': the nand chip's page size.
+        *      O : the nand chip's oob size.
+        *      O': the free oob.
+        *
+        *      The formula for P is :
+        *
+        *                  E * G * N
+        *             P = ------------ + P' + M
+        *                      8
+        *
+        * The position of block mark moves forward in the ECC-based view
+        * of page, and the delta is:
+        *
+        *                   E * G * (N - 1)
+        *             D = (---------------- + M)
+        *                          8
+        *
+        * Please see the comment in legacy_set_geometry().
+        * With the condition C >= O , we still can get same result.
+        * So the bit position of the physical block mark within the ECC-based
+        * view of the page is :
+        *             (P' - D) * 8
+        */
+       geo->page_size = mtd->writesize + geo->metadata_size +
+               (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+
+       /* The available oob size we have. */
+       if (geo->page_size < mtd->writesize + mtd->oobsize) {
+               of->offset = geo->page_size - mtd->writesize;
+               of->length = mtd->oobsize - of->offset;
+       }
+
+       geo->payload_size = mtd->writesize;
+
+       geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
+       geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+                               + ALIGN(geo->ecc_chunk_count, 4);
+
+       if (!this->swap_block_mark)
+               return true;
+
+       /* For bit swap. */
+       block_mark_bit_offset = mtd->writesize * 8 -
+               (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+                               + geo->metadata_size * 8);
+
+       geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+       geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
+       return true;
+}
+
+static int legacy_set_geometry(struct gpmi_nand_data *this)
 {
        struct bch_geometry *geo = &this->bch_geometry;
        struct mtd_info *mtd = &this->mtd;
@@ -224,6 +347,11 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
        return 0;
 }
 
+int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+       return set_geometry_by_ecc_info(this) ? 0 : legacy_set_geometry(this);
+}
+
 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
 {
        int chipnr = this->current_chip;
@@ -355,7 +483,7 @@ static int acquire_register_block(struct gpmi_nand_data *this,
        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
        if (!r) {
                pr_err("Can't get resource for %s\n", res_name);
-               return -ENXIO;
+               return -ENODEV;
        }
 
        p = ioremap(r->start, resource_size(r));
@@ -396,7 +524,7 @@ static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
        r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
        if (!r) {
                pr_err("Can't get resource for %s\n", res_name);
-               return -ENXIO;
+               return -ENODEV;
        }
 
        err = request_irq(r->start, irq_h, 0, res_name, this);
@@ -473,12 +601,14 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
        struct resources *r = &this->resources;
        char **extra_clks = NULL;
        struct clk *clk;
-       int i;
+       int err, i;
 
        /* The main clock is stored in the first. */
        r->clock[0] = clk_get(this->dev, "gpmi_io");
-       if (IS_ERR(r->clock[0]))
+       if (IS_ERR(r->clock[0])) {
+               err = PTR_ERR(r->clock[0]);
                goto err_clock;
+       }
 
        /* Get extra clocks */
        if (GPMI_IS_MX6Q(this))
@@ -491,8 +621,10 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
                        break;
 
                clk = clk_get(this->dev, extra_clks[i - 1]);
-               if (IS_ERR(clk))
+               if (IS_ERR(clk)) {
+                       err = PTR_ERR(clk);
                        goto err_clock;
+               }
 
                r->clock[i] = clk;
        }
@@ -511,12 +643,11 @@ static int gpmi_get_clks(struct gpmi_nand_data *this)
 err_clock:
        dev_dbg(this->dev, "failed in finding the clocks.\n");
        gpmi_put_clks(this);
-       return -ENOMEM;
+       return err;
 }
 
 static int acquire_resources(struct gpmi_nand_data *this)
 {
-       struct pinctrl *pinctrl;
        int ret;
 
        ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
@@ -535,19 +666,12 @@ static int acquire_resources(struct gpmi_nand_data *this)
        if (ret)
                goto exit_dma_channels;
 
-       pinctrl = devm_pinctrl_get_select_default(&this->pdev->dev);
-       if (IS_ERR(pinctrl)) {
-               ret = PTR_ERR(pinctrl);
-               goto exit_pin;
-       }
-
        ret = gpmi_get_clks(this);
        if (ret)
                goto exit_clock;
        return 0;
 
 exit_clock:
-exit_pin:
        release_dma_channels(this);
 exit_dma_channels:
        release_bch_irq(this);
@@ -1153,43 +1277,31 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
        struct nand_chip *chip = mtd->priv;
        struct gpmi_nand_data *this = chip->priv;
-       int block, ret = 0;
+       int ret = 0;
        uint8_t *block_mark;
        int column, page, status, chipnr;
 
-       /* Get block number */
-       block = (int)(ofs >> chip->bbt_erase_shift);
-       if (chip->bbt)
-               chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+       chipnr = (int)(ofs >> chip->chip_shift);
+       chip->select_chip(mtd, chipnr);
 
-       /* Do we have a flash based bad block table ? */
-       if (chip->bbt_options & NAND_BBT_USE_FLASH)
-               ret = nand_update_bbt(mtd, ofs);
-       else {
-               chipnr = (int)(ofs >> chip->chip_shift);
-               chip->select_chip(mtd, chipnr);
+       column = this->swap_block_mark ? mtd->writesize : 0;
 
-               column = this->swap_block_mark ? mtd->writesize : 0;
+       /* Write the block mark. */
+       block_mark = this->data_buffer_dma;
+       block_mark[0] = 0; /* bad block marker */
 
-               /* Write the block mark. */
-               block_mark = this->data_buffer_dma;
-               block_mark[0] = 0; /* bad block marker */
+       /* Shift to get page */
+       page = (int)(ofs >> chip->page_shift);
 
-               /* Shift to get page */
-               page = (int)(ofs >> chip->page_shift);
+       chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
+       chip->write_buf(mtd, block_mark, 1);
+       chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 
-               chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
-               chip->write_buf(mtd, block_mark, 1);
-               chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+       status = chip->waitfunc(mtd, chip);
+       if (status & NAND_STATUS_FAIL)
+               ret = -EIO;
 
-               status = chip->waitfunc(mtd, chip);
-               if (status & NAND_STATUS_FAIL)
-                       ret = -EIO;
-
-               chip->select_chip(mtd, -1);
-       }
-       if (!ret)
-               mtd->ecc_stats.badblocks++;
+       chip->select_chip(mtd, -1);
 
        return ret;
 }
@@ -1469,19 +1581,22 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data  *this)
        if (ret)
                return ret;
 
-       /* Adjust the ECC strength according to the chip. */
-       this->nand.ecc.strength = this->bch_geometry.ecc_strength;
-       this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
-       this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength;
-
        /* NAND boot init, depends on the gpmi_set_geometry(). */
        return nand_boot_init(this);
 }
 
-static int gpmi_scan_bbt(struct mtd_info *mtd)
+static void gpmi_nfc_exit(struct gpmi_nand_data *this)
 {
+       nand_release(&this->mtd);
+       gpmi_free_dma_buffer(this);
+}
+
+static int gpmi_init_last(struct gpmi_nand_data *this)
+{
+       struct mtd_info *mtd = &this->mtd;
        struct nand_chip *chip = mtd->priv;
-       struct gpmi_nand_data *this = chip->priv;
+       struct nand_ecc_ctrl *ecc = &chip->ecc;
+       struct bch_geometry *bch_geo = &this->bch_geometry;
        int ret;
 
        /* Prepare for the BBT scan. */
@@ -1489,6 +1604,16 @@ static int gpmi_scan_bbt(struct mtd_info *mtd)
        if (ret)
                return ret;
 
+       /* Init the nand_ecc_ctrl{} */
+       ecc->read_page  = gpmi_ecc_read_page;
+       ecc->write_page = gpmi_ecc_write_page;
+       ecc->read_oob   = gpmi_ecc_read_oob;
+       ecc->write_oob  = gpmi_ecc_write_oob;
+       ecc->mode       = NAND_ECC_HW;
+       ecc->size       = bch_geo->ecc_chunk_size;
+       ecc->strength   = bch_geo->ecc_strength;
+       ecc->layout     = &gpmi_hw_ecclayout;
+
        /*
         * Can we enable the extra features? such as EDO or Sync mode.
         *
@@ -1497,14 +1622,7 @@ static int gpmi_scan_bbt(struct mtd_info *mtd)
         */
        gpmi_extra_init(this);
 
-       /* use the default BBT implementation */
-       return nand_default_bbt(mtd);
-}
-
-static void gpmi_nfc_exit(struct gpmi_nand_data *this)
-{
-       nand_release(&this->mtd);
-       gpmi_free_dma_buffer(this);
+       return 0;
 }
 
 static int gpmi_nfc_init(struct gpmi_nand_data *this)
@@ -1530,33 +1648,33 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
        chip->read_byte         = gpmi_read_byte;
        chip->read_buf          = gpmi_read_buf;
        chip->write_buf         = gpmi_write_buf;
-       chip->ecc.read_page     = gpmi_ecc_read_page;
-       chip->ecc.write_page    = gpmi_ecc_write_page;
-       chip->ecc.read_oob      = gpmi_ecc_read_oob;
-       chip->ecc.write_oob     = gpmi_ecc_write_oob;
-       chip->scan_bbt          = gpmi_scan_bbt;
        chip->badblock_pattern  = &gpmi_bbt_descr;
        chip->block_markbad     = gpmi_block_markbad;
        chip->options           |= NAND_NO_SUBPAGE_WRITE;
-       chip->ecc.mode          = NAND_ECC_HW;
-       chip->ecc.size          = 1;
-       chip->ecc.strength      = 8;
-       chip->ecc.layout        = &gpmi_hw_ecclayout;
        if (of_get_nand_on_flash_bbt(this->dev->of_node))
                chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
 
-       /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
+       /*
+        * Allocate a temporary DMA buffer for reading ID in the
+        * nand_scan_ident().
+        */
        this->bch_geometry.payload_size = 1024;
        this->bch_geometry.auxiliary_size = 128;
        ret = gpmi_alloc_dma_buffer(this);
        if (ret)
                goto err_out;
 
-       ret = nand_scan(mtd, 1);
-       if (ret) {
-               pr_err("Chip scan failed\n");
+       ret = nand_scan_ident(mtd, 1, NULL);
+       if (ret)
+               goto err_out;
+
+       ret = gpmi_init_last(this);
+       if (ret)
+               goto err_out;
+
+       ret = nand_scan_tail(mtd);
+       if (ret)
                goto err_out;
-       }
 
        ppdata.of_node = this->pdev->dev.of_node;
        ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
@@ -1573,19 +1691,19 @@ static const struct platform_device_id gpmi_ids[] = {
        { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
        { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
        { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
-       {},
+       {}
 };
 
 static const struct of_device_id gpmi_nand_id_table[] = {
        {
                .compatible = "fsl,imx23-gpmi-nand",
-               .data = (void *)&gpmi_ids[IS_MX23]
+               .data = (void *)&gpmi_ids[IS_MX23],
        }, {
                .compatible = "fsl,imx28-gpmi-nand",
-               .data = (void *)&gpmi_ids[IS_MX28]
+               .data = (void *)&gpmi_ids[IS_MX28],
        }, {
                .compatible = "fsl,imx6q-gpmi-nand",
-               .data = (void *)&gpmi_ids[IS_MX6Q]
+               .data = (void *)&gpmi_ids[IS_MX6Q],
        }, {}
 };
 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
@@ -1601,7 +1719,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)
                pdev->id_entry = of_id->data;
        } else {
                pr_err("Failed to find the right device id.\n");
-               return -ENOMEM;
+               return -ENODEV;
        }
 
        this = kzalloc(sizeof(*this), GFP_KERNEL);
@@ -1633,7 +1751,6 @@ static int gpmi_nand_probe(struct platform_device *pdev)
 exit_nfc_init:
        release_resources(this);
 exit_acquire_resources:
-       platform_set_drvdata(pdev, NULL);
        dev_err(this->dev, "driver registration failed: %d\n", ret);
        kfree(this);
 
@@ -1646,7 +1763,6 @@ static int gpmi_nand_remove(struct platform_device *pdev)
 
        gpmi_nfc_exit(this);
        release_resources(this);
-       platform_set_drvdata(pdev, NULL);
        kfree(this);
        return 0;
 }
index b76460eeaf2253f7db3e69404ff4afb51298f84b..a264b888c66cc9153e0f160af0c50e4675826a93 100644 (file)
@@ -411,7 +411,7 @@ static int jz_nand_probe(struct platform_device *pdev)
        struct jz_nand *nand;
        struct nand_chip *chip;
        struct mtd_info *mtd;
-       struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
+       struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
        size_t chipnr, bank_idx;
        uint8_t nand_maf_id = 0, nand_dev_id = 0;
 
@@ -538,7 +538,6 @@ err_unclaim_banks:
 err_gpio_busy:
        if (pdata && gpio_is_valid(pdata->busy_gpio))
                gpio_free(pdata->busy_gpio);
-       platform_set_drvdata(pdev, NULL);
 err_iounmap_mmio:
        jz_nand_iounmap_resource(nand->mem, nand->base);
 err_free:
@@ -549,7 +548,7 @@ err_free:
 static int jz_nand_remove(struct platform_device *pdev)
 {
        struct jz_nand *nand = platform_get_drvdata(pdev);
-       struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
+       struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
        size_t i;
 
        nand_release(&nand->mtd);
@@ -570,7 +569,6 @@ static int jz_nand_remove(struct platform_device *pdev)
 
        jz_nand_iounmap_resource(nand->mem, nand->base);
 
-       platform_set_drvdata(pdev, NULL);
        kfree(nand);
 
        return 0;
index fd1df5e13ae44d77207d19fb492064166bf46525..f4dd2a887ea5da15b0b64c743f9ecb2603035b97 100644 (file)
@@ -696,7 +696,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        }
        lpc32xx_wp_disable(host);
 
-       host->pdata = pdev->dev.platform_data;
+       host->pdata = dev_get_platdata(&pdev->dev);
 
        nand_chip->priv = host;         /* link the private data structures */
        mtd->priv = nand_chip;
@@ -828,7 +828,6 @@ err_exit3:
 err_exit2:
        clk_disable(host->clk);
        clk_put(host->clk);
-       platform_set_drvdata(pdev, NULL);
 err_exit1:
        lpc32xx_wp_enable(host);
        gpio_free(host->ncfg->wp_gpio);
@@ -851,7 +850,6 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
 
        clk_disable(host->clk);
        clk_put(host->clk);
-       platform_set_drvdata(pdev, NULL);
 
        lpc32xx_wp_enable(host);
        gpio_free(host->ncfg->wp_gpio);
index be94ed5abefb74aebde118529cf5eca82abe876e..add75709d41550d893f9dc0ef144a8b2edc281f7 100644 (file)
@@ -798,7 +798,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        }
        lpc32xx_wp_disable(host);
 
-       host->pdata = pdev->dev.platform_data;
+       host->pdata = dev_get_platdata(&pdev->dev);
 
        mtd = &host->mtd;
        chip = &host->nand_chip;
@@ -936,7 +936,6 @@ err_exit3:
 err_exit2:
        clk_disable(host->clk);
        clk_put(host->clk);
-       platform_set_drvdata(pdev, NULL);
 err_exit1:
        lpc32xx_wp_enable(host);
        gpio_free(host->ncfg->wp_gpio);
@@ -963,7 +962,6 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
 
        clk_disable(host->clk);
        clk_put(host->clk);
-       platform_set_drvdata(pdev, NULL);
        lpc32xx_wp_enable(host);
        gpio_free(host->ncfg->wp_gpio);
 
index 07e5784e5cd3f365e459a651368afdd6b2ab6f1b..ce8242b6c3e7fd9147cd4a481d024eec32bf1853 100644 (file)
@@ -266,7 +266,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
        }
 };
 
-static const char const *part_probes[] = {
+static const char * const part_probes[] = {
        "cmdlinepart", "RedBoot", "ofpart", NULL };
 
 static void memcpy32_fromio(void *trg, const void __iomem  *src, size_t size)
@@ -1432,7 +1432,8 @@ static int mxcnd_probe(struct platform_device *pdev)
 
        err = mxcnd_probe_dt(host);
        if (err > 0) {
-               struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
+               struct mxc_nand_platform_data *pdata =
+                                       dev_get_platdata(&pdev->dev);
                if (pdata) {
                        host->pdata = *pdata;
                        host->devtype_data = (struct mxc_nand_devtype_data *)
@@ -1446,8 +1447,6 @@ static int mxcnd_probe(struct platform_device *pdev)
 
        if (host->devtype_data->needs_ip) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               if (!res)
-                       return -ENODEV;
                host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
                if (IS_ERR(host->regs_ip))
                        return PTR_ERR(host->regs_ip);
@@ -1457,9 +1456,6 @@ static int mxcnd_probe(struct platform_device *pdev)
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        }
 
-       if (!res)
-               return -ENODEV;
-
        host->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(host->base))
                return PTR_ERR(host->base);
@@ -1578,8 +1574,6 @@ static int mxcnd_remove(struct platform_device *pdev)
 {
        struct mxc_nand_host *host = platform_get_drvdata(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        nand_release(&host->mtd);
 
        return 0;
index dfcd0a565c5b3e8f66d9b24077ae3701f132ee36..7ed4841327f2d7668e51c75645628f2127589f82 100644 (file)
@@ -108,13 +108,13 @@ static int check_offs_len(struct mtd_info *mtd,
        int ret = 0;
 
        /* Start address must align on block boundary */
-       if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+       if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
                pr_debug("%s: unaligned address\n", __func__);
                ret = -EINVAL;
        }
 
        /* Length must align on block boundary */
-       if (len & ((1 << chip->phys_erase_shift) - 1)) {
+       if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
                pr_debug("%s: length not block aligned\n", __func__);
                ret = -EINVAL;
        }
@@ -211,11 +211,9 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr)
  */
 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 {
-       int i;
        struct nand_chip *chip = mtd->priv;
 
-       for (i = 0; i < len; i++)
-               writeb(buf[i], chip->IO_ADDR_W);
+       iowrite8_rep(chip->IO_ADDR_W, buf, len);
 }
 
 /**
@@ -228,11 +226,9 @@ static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  */
 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 {
-       int i;
        struct nand_chip *chip = mtd->priv;
 
-       for (i = 0; i < len; i++)
-               buf[i] = readb(chip->IO_ADDR_R);
+       ioread8_rep(chip->IO_ADDR_R, buf, len);
 }
 
 /**
@@ -245,14 +241,10 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  */
 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
 {
-       int i;
        struct nand_chip *chip = mtd->priv;
        u16 *p = (u16 *) buf;
-       len >>= 1;
-
-       for (i = 0; i < len; i++)
-               writew(p[i], chip->IO_ADDR_W);
 
+       iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
 }
 
 /**
@@ -265,13 +257,10 @@ static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
  */
 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
 {
-       int i;
        struct nand_chip *chip = mtd->priv;
        u16 *p = (u16 *) buf;
-       len >>= 1;
 
-       for (i = 0; i < len; i++)
-               p[i] = readw(chip->IO_ADDR_R);
+       ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
 }
 
 /**
@@ -335,80 +324,88 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
 }
 
 /**
- * nand_default_block_markbad - [DEFAULT] mark a block bad
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
  * @mtd: MTD device structure
  * @ofs: offset from device start
  *
  * This is the default implementation, which can be overridden by a hardware
- * specific driver. We try operations in the following order, according to our
- * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+       struct nand_chip *chip = mtd->priv;
+       struct mtd_oob_ops ops;
+       uint8_t buf[2] = { 0, 0 };
+       int ret = 0, res, i = 0;
+
+       ops.datbuf = NULL;
+       ops.oobbuf = buf;
+       ops.ooboffs = chip->badblockpos;
+       if (chip->options & NAND_BUSWIDTH_16) {
+               ops.ooboffs &= ~0x01;
+               ops.len = ops.ooblen = 2;
+       } else {
+               ops.len = ops.ooblen = 1;
+       }
+       ops.mode = MTD_OPS_PLACE_OOB;
+
+       /* Write to first/last page(s) if necessary */
+       if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+               ofs += mtd->erasesize - mtd->writesize;
+       do {
+               res = nand_do_write_oob(mtd, ofs, &ops);
+               if (!ret)
+                       ret = res;
+
+               i++;
+               ofs += mtd->writesize;
+       } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+
+       return ret;
+}
+
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->block_markbad).
+ *
+ * We try operations in the following order:
  *  (1) erase the affected block, to allow OOB marker to be written cleanly
- *  (2) update in-memory BBT
- *  (3) write bad block marker to OOB area of affected block
- *  (4) update flash-based BBT
- * Note that we retain the first error encountered in (3) or (4), finish the
+ *  (2) write bad block marker to OOB area of affected block (unless flag
+ *      NAND_BBT_NO_OOB_BBM is present)
+ *  (3) update the BBT
+ * Note that we retain the first error encountered in (2) or (3), finish the
  * procedures, and dump the error in the end.
 */
-static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
 {
        struct nand_chip *chip = mtd->priv;
-       uint8_t buf[2] = { 0, 0 };
-       int block, res, ret = 0, i = 0;
-       int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
+       int res, ret = 0;
 
-       if (write_oob) {
+       if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
                struct erase_info einfo;
 
                /* Attempt erase before marking OOB */
                memset(&einfo, 0, sizeof(einfo));
                einfo.mtd = mtd;
                einfo.addr = ofs;
-               einfo.len = 1 << chip->phys_erase_shift;
+               einfo.len = 1ULL << chip->phys_erase_shift;
                nand_erase_nand(mtd, &einfo, 0);
-       }
-
-       /* Get block number */
-       block = (int)(ofs >> chip->bbt_erase_shift);
-       /* Mark block bad in memory-based BBT */
-       if (chip->bbt)
-               chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
-
-       /* Write bad block marker to OOB */
-       if (write_oob) {
-               struct mtd_oob_ops ops;
-               loff_t wr_ofs = ofs;
 
+               /* Write bad block marker to OOB */
                nand_get_device(mtd, FL_WRITING);
-
-               ops.datbuf = NULL;
-               ops.oobbuf = buf;
-               ops.ooboffs = chip->badblockpos;
-               if (chip->options & NAND_BUSWIDTH_16) {
-                       ops.ooboffs &= ~0x01;
-                       ops.len = ops.ooblen = 2;
-               } else {
-                       ops.len = ops.ooblen = 1;
-               }
-               ops.mode = MTD_OPS_PLACE_OOB;
-
-               /* Write to first/last page(s) if necessary */
-               if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
-                       wr_ofs += mtd->erasesize - mtd->writesize;
-               do {
-                       res = nand_do_write_oob(mtd, wr_ofs, &ops);
-                       if (!ret)
-                               ret = res;
-
-                       i++;
-                       wr_ofs += mtd->writesize;
-               } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
-
+               ret = chip->block_markbad(mtd, ofs);
                nand_release_device(mtd);
        }
 
-       /* Update flash-based bad block table */
-       if (chip->bbt_options & NAND_BBT_USE_FLASH) {
-               res = nand_update_bbt(mtd, ofs);
+       /* Mark block bad in BBT */
+       if (chip->bbt) {
+               res = nand_markbad_bbt(mtd, ofs);
                if (!ret)
                        ret = res;
        }
@@ -1983,13 +1980,14 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
  * @mtd:       mtd info structure
  * @chip:      nand chip info structure
- * @column:    column address of subpage within the page
+ * @offset:    column address of subpage within the page
  * @data_len:  data length
+ * @buf:       data buffer
  * @oob_required: must write chip->oob_poi to OOB
  */
 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
                                struct nand_chip *chip, uint32_t offset,
-                               uint32_t data_len, const uint8_t *data_buf,
+                               uint32_t data_len, const uint8_t *buf,
                                int oob_required)
 {
        uint8_t *oob_buf  = chip->oob_poi;
@@ -2008,20 +2006,20 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
                chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
 
                /* write data (untouched subpages already masked by 0xFF) */
-               chip->write_buf(mtd, data_buf, ecc_size);
+               chip->write_buf(mtd, buf, ecc_size);
 
                /* mask ECC of un-touched subpages by padding 0xFF */
                if ((step < start_step) || (step > end_step))
                        memset(ecc_calc, 0xff, ecc_bytes);
                else
-                       chip->ecc.calculate(mtd, data_buf, ecc_calc);
+                       chip->ecc.calculate(mtd, buf, ecc_calc);
 
                /* mask OOB of un-touched subpages by padding 0xFF */
                /* if oob_required, preserve OOB metadata of written subpage */
                if (!oob_required || (step < start_step) || (step > end_step))
                        memset(oob_buf, 0xff, oob_bytes);
 
-               data_buf += ecc_size;
+               buf += ecc_size;
                ecc_calc += ecc_bytes;
                oob_buf  += oob_bytes;
        }
@@ -2633,7 +2631,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
                }
 
                /* Increment page address and decrement length */
-               len -= (1 << chip->phys_erase_shift);
+               len -= (1ULL << chip->phys_erase_shift);
                page += pages_per_block;
 
                /* Check, if we cross a chip boundary */
@@ -2694,7 +2692,6 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
  */
 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
-       struct nand_chip *chip = mtd->priv;
        int ret;
 
        ret = nand_block_isbad(mtd, ofs);
@@ -2705,7 +2702,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
                return ret;
        }
 
-       return chip->block_markbad(mtd, ofs);
+       return nand_block_markbad_lowlevel(mtd, ofs);
 }
 
 /**
@@ -2720,7 +2717,9 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
 {
        int status;
 
-       if (!chip->onfi_version)
+       if (!chip->onfi_version ||
+           !(le16_to_cpu(chip->onfi_params.opt_cmd)
+             & ONFI_OPT_CMD_SET_GET_FEATURES))
                return -EINVAL;
 
        chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
@@ -2741,7 +2740,9 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
 static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
                        int addr, uint8_t *subfeature_param)
 {
-       if (!chip->onfi_version)
+       if (!chip->onfi_version ||
+           !(le16_to_cpu(chip->onfi_params.opt_cmd)
+             & ONFI_OPT_CMD_SET_GET_FEATURES))
                return -EINVAL;
 
        /* clear the sub feature parameters */
@@ -2793,7 +2794,15 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
 
        if (!chip->select_chip)
                chip->select_chip = nand_select_chip;
-       if (!chip->read_byte)
+
+       /* set for ONFI nand */
+       if (!chip->onfi_set_features)
+               chip->onfi_set_features = nand_onfi_set_features;
+       if (!chip->onfi_get_features)
+               chip->onfi_get_features = nand_onfi_get_features;
+
+       /* If called twice, pointers that depend on busw may need to be reset */
+       if (!chip->read_byte || chip->read_byte == nand_read_byte)
                chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
        if (!chip->read_word)
                chip->read_word = nand_read_word;
@@ -2801,9 +2810,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
                chip->block_bad = nand_block_bad;
        if (!chip->block_markbad)
                chip->block_markbad = nand_default_block_markbad;
-       if (!chip->write_buf)
+       if (!chip->write_buf || chip->write_buf == nand_write_buf)
                chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
-       if (!chip->read_buf)
+       if (!chip->read_buf || chip->read_buf == nand_read_buf)
                chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
        if (!chip->scan_bbt)
                chip->scan_bbt = nand_default_bbt;
@@ -2846,6 +2855,78 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
        return crc;
 }
 
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
+               struct nand_chip *chip, struct nand_onfi_params *p)
+{
+       struct onfi_ext_param_page *ep;
+       struct onfi_ext_section *s;
+       struct onfi_ext_ecc_info *ecc;
+       uint8_t *cursor;
+       int ret = -EINVAL;
+       int len;
+       int i;
+
+       len = le16_to_cpu(p->ext_param_page_length) * 16;
+       ep = kmalloc(len, GFP_KERNEL);
+       if (!ep) {
+               ret = -ENOMEM;
+               goto ext_out;
+       }
+
+       /* Send our own NAND_CMD_PARAM. */
+       chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+
+       /* Use the Change Read Column command to skip the ONFI param pages. */
+       chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+                       sizeof(*p) * p->num_of_param_pages , -1);
+
+       /* Read out the Extended Parameter Page. */
+       chip->read_buf(mtd, (uint8_t *)ep, len);
+       if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+               != le16_to_cpu(ep->crc))) {
+               pr_debug("fail in the CRC.\n");
+               goto ext_out;
+       }
+
+       /*
+        * Check the signature.
+        * Do not strictly follow the ONFI spec, maybe changed in future.
+        */
+       if (strncmp(ep->sig, "EPPS", 4)) {
+               pr_debug("The signature is invalid.\n");
+               goto ext_out;
+       }
+
+       /* find the ECC section. */
+       cursor = (uint8_t *)(ep + 1);
+       for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+               s = ep->sections + i;
+               if (s->type == ONFI_SECTION_TYPE_2)
+                       break;
+               cursor += s->length * 16;
+       }
+       if (i == ONFI_EXT_SECTION_MAX) {
+               pr_debug("We can not find the ECC section.\n");
+               goto ext_out;
+       }
+
+       /* get the info we want. */
+       ecc = (struct onfi_ext_ecc_info *)cursor;
+
+       if (ecc->codeword_size) {
+               chip->ecc_strength_ds = ecc->ecc_bits;
+               chip->ecc_step_ds = 1 << ecc->codeword_size;
+       }
+
+       pr_info("ONFI extended param page detected.\n");
+       return 0;
+
+ext_out:
+       kfree(ep);
+       return ret;
+}
+
 /*
  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  */
@@ -2907,9 +2988,31 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
        mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
        chip->chipsize = le32_to_cpu(p->blocks_per_lun);
        chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
-       *busw = 0;
-       if (le16_to_cpu(p->features) & 1)
+
+       if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
                *busw = NAND_BUSWIDTH_16;
+       else
+               *busw = 0;
+
+       if (p->ecc_bits != 0xff) {
+               chip->ecc_strength_ds = p->ecc_bits;
+               chip->ecc_step_ds = 512;
+       } else if (chip->onfi_version >= 21 &&
+               (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+               /*
+                * The nand_flash_detect_ext_param_page() uses the
+                * Change Read Column command which maybe not supported
+                * by the chip->cmdfunc. So try to update the chip->cmdfunc
+                * now. We do not replace user supplied command function.
+                */
+               if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+                       chip->cmdfunc = nand_command_lp;
+
+               /* The Extended Parameter Page is supported since ONFI 2.1. */
+               if (nand_flash_detect_ext_param_page(mtd, chip, p))
+                       pr_info("Failed to detect the extended param page.\n");
+       }
 
        pr_info("ONFI flash detected\n");
        return 1;
@@ -3086,6 +3189,22 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
                extid >>= 2;
                /* Get buswidth information */
                *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+               /*
+                * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+                * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+                * follows:
+                * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+                *                         110b -> 24nm
+                * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC
+                */
+               if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
+                               !(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+                               (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
+                               !(id_data[4] & 0x80) /* !BENAND */) {
+                       mtd->oobsize = 32 * mtd->writesize >> 9;
+               }
+
        }
 }
 
@@ -3172,6 +3291,8 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
                chip->cellinfo = id_data[2];
                chip->chipsize = (uint64_t)type->chipsize << 20;
                chip->options |= type->options;
+               chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
+               chip->ecc_step_ds = NAND_ECC_STEP(type);
 
                *busw = type->options & NAND_BUSWIDTH_16;
 
@@ -3446,12 +3567,6 @@ int nand_scan_tail(struct mtd_info *mtd)
        if (!chip->write_page)
                chip->write_page = nand_write_page;
 
-       /* set for ONFI nand */
-       if (!chip->onfi_set_features)
-               chip->onfi_set_features = nand_onfi_set_features;
-       if (!chip->onfi_get_features)
-               chip->onfi_get_features = nand_onfi_get_features;
-
        /*
         * Check ECC mode, default to software if 3byte/512byte hardware ECC is
         * selected and we have 256 byte pagesize fallback to software ECC
@@ -3674,6 +3789,7 @@ int nand_scan_tail(struct mtd_info *mtd)
        /* propagate ecc info to mtd_info */
        mtd->ecclayout = chip->ecc.layout;
        mtd->ecc_strength = chip->ecc.strength;
+       mtd->ecc_step_size = chip->ecc.size;
        /*
         * Initialize bitflip_threshold to its default prior scan_bbt() call.
         * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
index 267264320e06587cbee58ad5e036a38b90f07fe9..bc06196d57395c5c0a9ec8dba4ea5fcd8832e8b2 100644 (file)
 #include <linux/export.h>
 #include <linux/string.h>
 
+#define BBT_BLOCK_GOOD         0x00
+#define BBT_BLOCK_WORN         0x01
+#define BBT_BLOCK_RESERVED     0x02
+#define BBT_BLOCK_FACTORY_BAD  0x03
+
+#define BBT_ENTRY_MASK         0x03
+#define BBT_ENTRY_SHIFT                2
+
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
+{
+       uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+       entry >>= (block & BBT_ENTRY_MASK) * 2;
+       return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+               uint8_t mark)
+{
+       uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+       chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
 static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
 {
        if (memcmp(buf, td->pattern, td->len))
@@ -86,33 +110,17 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
  * @td: search pattern descriptor
  *
  * Check for a pattern at the given place. Used to search bad block tables and
- * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
- * all bytes except the pattern area contain 0xff.
+ * good / bad block identifiers.
  */
 static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
 {
-       int end = 0;
-       uint8_t *p = buf;
-
        if (td->options & NAND_BBT_NO_OOB)
                return check_pattern_no_oob(buf, td);
 
-       end = paglen + td->offs;
-       if (td->options & NAND_BBT_SCANEMPTY)
-               if (memchr_inv(p, 0xff, end))
-                       return -1;
-       p += end;
-
        /* Compare the pattern */
-       if (memcmp(p, td->pattern, td->len))
+       if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
                return -1;
 
-       if (td->options & NAND_BBT_SCANEMPTY) {
-               p += td->len;
-               end += td->len;
-               if (memchr_inv(p, 0xff, len - end))
-                       return -1;
-       }
        return 0;
 }
 
@@ -159,7 +167,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
  * @page: the starting page
  * @num: the number of bbt descriptors to read
  * @td: the bbt describtion table
- * @offs: offset in the memory table
+ * @offs: block number offset in the table
  *
  * Read the bad block table starting from page.
  */
@@ -209,14 +217,16 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
                /* Analyse data */
                for (i = 0; i < len; i++) {
                        uint8_t dat = buf[i];
-                       for (j = 0; j < 8; j += bits, act += 2) {
+                       for (j = 0; j < 8; j += bits, act++) {
                                uint8_t tmp = (dat >> j) & msk;
                                if (tmp == msk)
                                        continue;
                                if (reserved_block_code && (tmp == reserved_block_code)) {
                                        pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
-                                                (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
-                                       this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
+                                                (loff_t)(offs + act) <<
+                                                this->bbt_erase_shift);
+                                       bbt_mark_entry(this, offs + act,
+                                                       BBT_BLOCK_RESERVED);
                                        mtd->ecc_stats.bbtblocks++;
                                        continue;
                                }
@@ -225,12 +235,15 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
                                 * move this message to pr_debug.
                                 */
                                pr_info("nand_read_bbt: bad block at 0x%012llx\n",
-                                        (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+                                        (loff_t)(offs + act) <<
+                                        this->bbt_erase_shift);
                                /* Factory marked bad or worn out? */
                                if (tmp == 0)
-                                       this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
+                                       bbt_mark_entry(this, offs + act,
+                                                       BBT_BLOCK_FACTORY_BAD);
                                else
-                                       this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
+                                       bbt_mark_entry(this, offs + act,
+                                                       BBT_BLOCK_WORN);
                                mtd->ecc_stats.badblocks++;
                        }
                }
@@ -265,7 +278,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
                                        td, offs);
                        if (res)
                                return res;
-                       offs += this->chipsize >> (this->bbt_erase_shift + 2);
+                       offs += this->chipsize >> this->bbt_erase_shift;
                }
        } else {
                res = read_bbt(mtd, buf, td->pages[0],
@@ -478,22 +491,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
        else
                numpages = 1;
 
-       if (!(bd->options & NAND_BBT_SCANEMPTY)) {
-               /* We need only read few bytes from the OOB area */
-               scanlen = 0;
-               readlen = bd->len;
-       } else {
-               /* Full page content should be read */
-               scanlen = mtd->writesize + mtd->oobsize;
-               readlen = numpages * mtd->writesize;
-       }
+       /* We need only read few bytes from the OOB area */
+       scanlen = 0;
+       readlen = bd->len;
 
        if (chip == -1) {
-               /*
-                * Note that numblocks is 2 * (real numblocks) here, see i+=2
-                * below as it makes shifting and masking less painful
-                */
-               numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+               numblocks = mtd->size >> this->bbt_erase_shift;
                startblock = 0;
                from = 0;
        } else {
@@ -502,16 +505,16 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
                               chip + 1, this->numchips);
                        return -EINVAL;
                }
-               numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+               numblocks = this->chipsize >> this->bbt_erase_shift;
                startblock = chip * numblocks;
                numblocks += startblock;
-               from = (loff_t)startblock << (this->bbt_erase_shift - 1);
+               from = (loff_t)startblock << this->bbt_erase_shift;
        }
 
        if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
                from += mtd->erasesize - (mtd->writesize * numpages);
 
-       for (i = startblock; i < numblocks;) {
+       for (i = startblock; i < numblocks; i++) {
                int ret;
 
                BUG_ON(bd->options & NAND_BBT_NO_OOB);
@@ -526,13 +529,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
                        return ret;
 
                if (ret) {
-                       this->bbt[i >> 3] |= 0x03 << (i & 0x6);
+                       bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
                        pr_warn("Bad eraseblock %d at 0x%012llx\n",
-                               i >> 1, (unsigned long long)from);
+                               i, (unsigned long long)from);
                        mtd->ecc_stats.badblocks++;
                }
 
-               i += 2;
                from += (1 << this->bbt_erase_shift);
        }
        return 0;
@@ -655,9 +657,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
 {
        struct nand_chip *this = mtd->priv;
        struct erase_info einfo;
-       int i, j, res, chip = 0;
+       int i, res, chip = 0;
        int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
-       int nrchips, bbtoffs, pageoffs, ooboffs;
+       int nrchips, pageoffs, ooboffs;
        uint8_t msk[4];
        uint8_t rcode = td->reserved_block_code;
        size_t retlen, len = 0;
@@ -713,10 +715,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
                for (i = 0; i < td->maxblocks; i++) {
                        int block = startblock + dir * i;
                        /* Check, if the block is bad */
-                       switch ((this->bbt[block >> 2] >>
-                                (2 * (block & 0x03))) & 0x03) {
-                       case 0x01:
-                       case 0x03:
+                       switch (bbt_get_entry(this, block)) {
+                       case BBT_BLOCK_WORN:
+                       case BBT_BLOCK_FACTORY_BAD:
                                continue;
                        }
                        page = block <<
@@ -748,8 +749,6 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
                default: return -EINVAL;
                }
 
-               bbtoffs = chip * (numblocks >> 2);
-
                to = ((loff_t)page) << this->page_shift;
 
                /* Must we save the block contents? */
@@ -814,16 +813,12 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
                        buf[ooboffs + td->veroffs] = td->version[chip];
 
                /* Walk through the memory table */
-               for (i = 0; i < numblocks;) {
+               for (i = 0; i < numblocks; i++) {
                        uint8_t dat;
-                       dat = this->bbt[bbtoffs + (i >> 2)];
-                       for (j = 0; j < 4; j++, i++) {
-                               int sftcnt = (i << (3 - sft)) & sftmsk;
-                               /* Do not store the reserved bbt blocks! */
-                               buf[offs + (i >> sft)] &=
-                                       ~(msk[dat & 0x03] << sftcnt);
-                               dat >>= 2;
-                       }
+                       int sftcnt = (i << (3 - sft)) & sftmsk;
+                       dat = bbt_get_entry(this, chip * numblocks + i);
+                       /* Do not store the reserved bbt blocks! */
+                       buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
                }
 
                memset(&einfo, 0, sizeof(einfo));
@@ -865,7 +860,6 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
 {
        struct nand_chip *this = mtd->priv;
 
-       bd->options &= ~NAND_BBT_SCANEMPTY;
        return create_bbt(mtd, this->buffers->databuf, bd, -1);
 }
 
@@ -1009,7 +1003,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
 {
        struct nand_chip *this = mtd->priv;
        int i, j, chips, block, nrblocks, update;
-       uint8_t oldval, newval;
+       uint8_t oldval;
 
        /* Do we have a bbt per chip? */
        if (td->options & NAND_BBT_PERCHIP) {
@@ -1026,12 +1020,12 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
                        if (td->pages[i] == -1)
                                continue;
                        block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
-                       block <<= 1;
-                       oldval = this->bbt[(block >> 3)];
-                       newval = oldval | (0x2 << (block & 0x06));
-                       this->bbt[(block >> 3)] = newval;
-                       if ((oldval != newval) && td->reserved_block_code)
-                               nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1));
+                       oldval = bbt_get_entry(this, block);
+                       bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+                       if ((oldval != BBT_BLOCK_RESERVED) &&
+                                       td->reserved_block_code)
+                               nand_update_bbt(mtd, (loff_t)block <<
+                                               this->bbt_erase_shift);
                        continue;
                }
                update = 0;
@@ -1039,14 +1033,12 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
                        block = ((i + 1) * nrblocks) - td->maxblocks;
                else
                        block = i * nrblocks;
-               block <<= 1;
                for (j = 0; j < td->maxblocks; j++) {
-                       oldval = this->bbt[(block >> 3)];
-                       newval = oldval | (0x2 << (block & 0x06));
-                       this->bbt[(block >> 3)] = newval;
-                       if (oldval != newval)
+                       oldval = bbt_get_entry(this, block);
+                       bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+                       if (oldval != BBT_BLOCK_RESERVED)
                                update = 1;
-                       block += 2;
+                       block++;
                }
                /*
                 * If we want reserved blocks to be recorded to flash, and some
@@ -1054,7 +1046,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
                 * bbts.  This should only happen once.
                 */
                if (update && td->reserved_block_code)
-                       nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
+                       nand_update_bbt(mtd, (loff_t)(block - 1) <<
+                                       this->bbt_erase_shift);
        }
 }
 
@@ -1180,13 +1173,13 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
 }
 
 /**
- * nand_update_bbt - [NAND Interface] update bad block table(s)
+ * nand_update_bbt - update bad block table(s)
  * @mtd: MTD device structure
  * @offs: the offset of the newly marked block
  *
  * The function updates the bad block table(s).
  */
-int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
 {
        struct nand_chip *this = mtd->priv;
        int len, res = 0;
@@ -1356,28 +1349,47 @@ int nand_default_bbt(struct mtd_info *mtd)
 int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
 {
        struct nand_chip *this = mtd->priv;
-       int block;
-       uint8_t res;
+       int block, res;
 
-       /* Get block number * 2 */
-       block = (int)(offs >> (this->bbt_erase_shift - 1));
-       res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+       block = (int)(offs >> this->bbt_erase_shift);
+       res = bbt_get_entry(this, block);
 
        pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
                        "(block %d) 0x%02x\n",
-                       (unsigned int)offs, block >> 1, res);
+                       (unsigned int)offs, block, res);
 
-       switch ((int)res) {
-       case 0x00:
+       switch (res) {
+       case BBT_BLOCK_GOOD:
                return 0;
-       case 0x01:
+       case BBT_BLOCK_WORN:
                return 1;
-       case 0x02:
+       case BBT_BLOCK_RESERVED:
                return allowbbt ? 0 : 1;
        }
        return 1;
 }
 
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+{
+       struct nand_chip *this = mtd->priv;
+       int block, ret = 0;
+
+       block = (int)(offs >> this->bbt_erase_shift);
+
+       /* Mark bad block in memory */
+       bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+       /* Update flash-based bad block table */
+       if (this->bbt_options & NAND_BBT_USE_FLASH)
+               ret = nand_update_bbt(mtd, offs);
+
+       return ret;
+}
+
 EXPORT_SYMBOL(nand_scan_bbt);
 EXPORT_SYMBOL(nand_default_bbt);
-EXPORT_SYMBOL_GPL(nand_update_bbt);
index 683813a46a905f489feabaced468efd88abc136d..a87b0a3afa351a1b8f8991836cb99af930d858da 100644 (file)
@@ -33,16 +33,16 @@ struct nand_flash_dev nand_flash_ids[] = {
         */
        {"TC58NVG2S0F 4G 3.3V 8-bit",
                { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
-                 SZ_4K, SZ_512, SZ_256K, 0, 8, 224},
+                 SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
        {"TC58NVG3S0F 8G 3.3V 8-bit",
                { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
-                 SZ_4K, SZ_1K, SZ_256K, 0, 8, 232},
+                 SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
        {"TC58NVG5D2 32G 3.3V 8-bit",
                { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
-                 SZ_8K, SZ_4K, SZ_1M, 0, 8, 640},
+                 SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
        {"TC58NVG6D2 64G 3.3V 8-bit",
                { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
-                 SZ_8K, SZ_8K, SZ_2M, 0, 8, 640},
+                 SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
 
        LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
        LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
index cb38f3d94218b2c977437062f8f402d07941fd6c..414f5225ae8b2ece87f1044e6c2a293ae481a773 100644 (file)
@@ -205,7 +205,7 @@ MODULE_PARM_DESC(bch,                "Enable BCH ecc and set how many bits should "
 
 /* Calculate the page offset in flash RAM image by (row, column) address */
 #define NS_RAW_OFFSET(ns) \
-       (((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
+       (((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
 
 /* Calculate the OOB offset in flash RAM image by (row, column) address */
 #define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
@@ -336,7 +336,6 @@ struct nandsim {
                uint pgsec;         /* number of pages per sector */
                uint secshift;      /* bits number in sector size */
                uint pgshift;       /* bits number in page size */
-               uint oobshift;      /* bits number in OOB size */
                uint pgaddrbytes;   /* bytes per page address */
                uint secaddrbytes;  /* bytes per sector address */
                uint idbytes;       /* the number ID bytes that this chip outputs */
@@ -363,7 +362,7 @@ struct nandsim {
 
        /* Fields needed when using a cache file */
        struct file *cfile; /* Open file */
-       unsigned char *pages_written; /* Which pages have been written */
+       unsigned long *pages_written; /* Which pages have been written */
        void *file_buf;
        struct page *held_pages[NS_MAX_HELD_PAGES];
        int held_cnt;
@@ -576,17 +575,18 @@ static int alloc_device(struct nandsim *ns)
                cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
                if (IS_ERR(cfile))
                        return PTR_ERR(cfile);
-               if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+               if (!file_readable(cfile)) {
                        NS_ERR("alloc_device: cache file not readable\n");
                        err = -EINVAL;
                        goto err_close;
                }
-               if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+               if (!file_writable(cfile)) {
                        NS_ERR("alloc_device: cache file not writeable\n");
                        err = -EINVAL;
                        goto err_close;
                }
-               ns->pages_written = vzalloc(ns->geom.pgnum);
+               ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) *
+                                           sizeof(unsigned long));
                if (!ns->pages_written) {
                        NS_ERR("alloc_device: unable to allocate pages written array\n");
                        err = -ENOMEM;
@@ -653,9 +653,7 @@ static void free_device(struct nandsim *ns)
 
 static char *get_partition_name(int i)
 {
-       char buf[64];
-       sprintf(buf, "NAND simulator partition %d", i);
-       return kstrdup(buf, GFP_KERNEL);
+       return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
 }
 
 /*
@@ -690,7 +688,6 @@ static int init_nandsim(struct mtd_info *mtd)
        ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
        ns->geom.secshift = ffs(ns->geom.secsz) - 1;
        ns->geom.pgshift  = chip->page_shift;
-       ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
        ns->geom.pgsec    = ns->geom.secsz / ns->geom.pgsz;
        ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
        ns->options = 0;
@@ -761,12 +758,6 @@ static int init_nandsim(struct mtd_info *mtd)
                ns->nbparts += 1;
        }
 
-       /* Detect how many ID bytes the NAND chip outputs */
-       for (i = 0; nand_flash_ids[i].name != NULL; i++) {
-               if (second_id_byte != nand_flash_ids[i].dev_id)
-                       continue;
-       }
-
        if (ns->busw == 16)
                NS_WARN("16-bit flashes support wasn't tested\n");
 
@@ -780,7 +771,7 @@ static int init_nandsim(struct mtd_info *mtd)
        printk("bus width: %u\n",               ns->busw);
        printk("bits in sector size: %u\n",     ns->geom.secshift);
        printk("bits in page size: %u\n",       ns->geom.pgshift);
-       printk("bits in OOB size: %u\n",        ns->geom.oobshift);
+       printk("bits in OOB size: %u\n",        ffs(ns->geom.oobsz) - 1);
        printk("flash size with OOB: %llu KiB\n",
                        (unsigned long long)ns->geom.totszoob >> 10);
        printk("page address bytes: %u\n",      ns->geom.pgaddrbytes);
@@ -1442,7 +1433,7 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
        return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
 }
 
-int do_read_error(struct nandsim *ns, int num)
+static int do_read_error(struct nandsim *ns, int num)
 {
        unsigned int page_no = ns->regs.row;
 
@@ -1454,7 +1445,7 @@ int do_read_error(struct nandsim *ns, int num)
        return 0;
 }
 
-void do_bit_flips(struct nandsim *ns, int num)
+static void do_bit_flips(struct nandsim *ns, int num)
 {
        if (bitflips && prandom_u32() < (1 << 22)) {
                int flips = 1;
@@ -1479,7 +1470,7 @@ static void read_page(struct nandsim *ns, int num)
        union ns_mem *mypage;
 
        if (ns->cfile) {
-               if (!ns->pages_written[ns->regs.row]) {
+               if (!test_bit(ns->regs.row, ns->pages_written)) {
                        NS_DBG("read_page: page %d not written\n", ns->regs.row);
                        memset(ns->buf.byte, 0xFF, num);
                } else {
@@ -1490,7 +1481,7 @@ static void read_page(struct nandsim *ns, int num)
                                ns->regs.row, ns->regs.column + ns->regs.off);
                        if (do_read_error(ns, num))
                                return;
-                       pos = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
+                       pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
                        tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
                        if (tx != num) {
                                NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
@@ -1525,9 +1516,9 @@ static void erase_sector(struct nandsim *ns)
 
        if (ns->cfile) {
                for (i = 0; i < ns->geom.pgsec; i++)
-                       if (ns->pages_written[ns->regs.row + i]) {
+                       if (__test_and_clear_bit(ns->regs.row + i,
+                                                ns->pages_written)) {
                                NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
-                               ns->pages_written[ns->regs.row + i] = 0;
                        }
                return;
        }
@@ -1559,8 +1550,8 @@ static int prog_page(struct nandsim *ns, int num)
 
                NS_DBG("prog_page: writing page %d\n", ns->regs.row);
                pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
-               off = (loff_t)ns->regs.row * ns->geom.pgszoob + ns->regs.column + ns->regs.off;
-               if (!ns->pages_written[ns->regs.row]) {
+               off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+               if (!test_bit(ns->regs.row, ns->pages_written)) {
                        all = 1;
                        memset(ns->file_buf, 0xff, ns->geom.pgszoob);
                } else {
@@ -1580,7 +1571,7 @@ static int prog_page(struct nandsim *ns, int num)
                                NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
                                return -1;
                        }
-                       ns->pages_written[ns->regs.row] = 1;
+                       __set_bit(ns->regs.row, ns->pages_written);
                } else {
                        tx = write_file(ns, ns->cfile, pg_off, num, off);
                        if (tx != num) {
index cd6be2ed53a86a86a1baca65440adb73183d10a8..52115151e4a7f325491a3dbf6f16ccd87221b37a 100644 (file)
@@ -324,8 +324,6 @@ static int nuc900_nand_remove(struct platform_device *pdev)
 
        kfree(nuc900_nand);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 81b80af55872a4f2481dd6d3d0c61b762e02ff51..4ecf0e5fd4844d0432d68b0c9981bc9a79531b37 100644 (file)
@@ -154,7 +154,7 @@ static struct nand_ecclayout omap_oobinfo;
  */
 static uint8_t scan_ff_pattern[] = { 0xff };
 static struct nand_bbt_descr bb_descrip_flashbased = {
-       .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+       .options = NAND_BBT_SCANALLPAGES,
        .offs = 0,
        .len = 1,
        .pattern = scan_ff_pattern,
@@ -1831,7 +1831,7 @@ static int omap_nand_probe(struct platform_device *pdev)
        struct resource                 *res;
        struct mtd_part_parser_data     ppdata = {};
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (pdata == NULL) {
                dev_err(&pdev->dev, "platform data missing\n");
                return -ENODEV;
@@ -2087,7 +2087,6 @@ static int omap_nand_remove(struct platform_device *pdev)
                                                        mtd);
        omap3_free_bch(&info->mtd);
 
-       platform_set_drvdata(pdev, NULL);
        if (info->dma)
                dma_release_channel(info->dma);
 
index 8fbd002086107f1fff96d26307e9b670b1ca7d95..a393a5b6ce1e5028155a415ae33db88067a7bfff 100644 (file)
@@ -130,8 +130,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
                if (!of_property_read_u32(pdev->dev.of_node,
                                                "chip-delay", &val))
                        board->chip_delay = (u8)val;
-       } else
-               board = pdev->dev.platform_data;
+       } else {
+               board = dev_get_platdata(&pdev->dev);
+       }
 
        mtd->priv = nc;
        mtd->owner = THIS_MODULE;
@@ -186,7 +187,6 @@ no_dev:
                clk_disable_unprepare(clk);
                clk_put(clk);
        }
-       platform_set_drvdata(pdev, NULL);
        iounmap(io_base);
 no_res:
        kfree(nc);
index c004566a9ad2ae383a311ea31587411e0a98aaa7..cad4cdc9df399a70c77640be5a634b837996790d 100644 (file)
@@ -30,7 +30,7 @@ static const char *part_probe_types[] = { "cmdlinepart", NULL };
  */
 static int plat_nand_probe(struct platform_device *pdev)
 {
-       struct platform_nand_data *pdata = pdev->dev.platform_data;
+       struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
        struct mtd_part_parser_data ppdata;
        struct plat_nand_data *data;
        struct resource *res;
@@ -122,7 +122,6 @@ static int plat_nand_probe(struct platform_device *pdev)
 out:
        if (pdata->ctrl.remove)
                pdata->ctrl.remove(pdev);
-       platform_set_drvdata(pdev, NULL);
        iounmap(data->io_base);
 out_release_io:
        release_mem_region(res->start, resource_size(res));
@@ -137,7 +136,7 @@ out_free:
 static int plat_nand_remove(struct platform_device *pdev)
 {
        struct plat_nand_data *data = platform_get_drvdata(pdev);
-       struct platform_nand_data *pdata = pdev->dev.platform_data;
+       struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
        struct resource *res;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index dec80ca6a5ce58dbd187690ea42fc706c9dc9325..dd03dfdfb0d65e0ded1b0a124221663b586482b5 100644 (file)
 #include <linux/of.h>
 #include <linux/of_device.h>
 
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
+#define ARCH_HAS_DMA
+#endif
+
+#ifdef ARCH_HAS_DMA
 #include <mach/dma.h>
+#endif
+
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
 
 #define        CHIP_DELAY_TIMEOUT      (2 * HZ/10)
@@ -80,6 +87,7 @@
 #define NDSR_RDDREQ            (0x1 << 1)
 #define NDSR_WRCMDREQ          (0x1)
 
+#define NDCB0_LEN_OVRD         (0x1 << 28)
 #define NDCB0_ST_ROW_EN         (0x1 << 26)
 #define NDCB0_AUTO_RS          (0x1 << 25)
 #define NDCB0_CSEL             (0x1 << 24)
@@ -123,9 +131,13 @@ enum {
        STATE_READY,
 };
 
+enum pxa3xx_nand_variant {
+       PXA3XX_NAND_VARIANT_PXA,
+       PXA3XX_NAND_VARIANT_ARMADA370,
+};
+
 struct pxa3xx_nand_host {
        struct nand_chip        chip;
-       struct pxa3xx_nand_cmdset *cmdset;
        struct mtd_info         *mtd;
        void                    *info_data;
 
@@ -139,10 +151,6 @@ struct pxa3xx_nand_host {
        unsigned int            row_addr_cycles;
        size_t                  read_id_bytes;
 
-       /* cached register value */
-       uint32_t                reg_ndcr;
-       uint32_t                ndtr0cs0;
-       uint32_t                ndtr1cs0;
 };
 
 struct pxa3xx_nand_info {
@@ -171,9 +179,16 @@ struct pxa3xx_nand_info {
        struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
        unsigned int            state;
 
+       /*
+        * This driver supports NFCv1 (as found in PXA SoC)
+        * and NFCv2 (as found in Armada 370/XP SoC).
+        */
+       enum pxa3xx_nand_variant variant;
+
        int                     cs;
        int                     use_ecc;        /* use HW ECC ? */
        int                     use_dma;        /* use DMA ? */
+       int                     use_spare;      /* use spare ? */
        int                     is_ready;
 
        unsigned int            page_size;      /* page size of attached chip */
@@ -181,33 +196,22 @@ struct pxa3xx_nand_info {
        unsigned int            oob_size;
        int                     retcode;
 
+       /* cached register value */
+       uint32_t                reg_ndcr;
+       uint32_t                ndtr0cs0;
+       uint32_t                ndtr1cs0;
+
        /* generated NDCBx register values */
        uint32_t                ndcb0;
        uint32_t                ndcb1;
        uint32_t                ndcb2;
+       uint32_t                ndcb3;
 };
 
 static bool use_dma = 1;
 module_param(use_dma, bool, 0444);
 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
 
-/*
- * Default NAND flash controller configuration setup by the
- * bootloader. This configuration is used only when pdata->keep_config is set
- */
-static struct pxa3xx_nand_cmdset default_cmdset = {
-       .read1          = 0x3000,
-       .read2          = 0x0050,
-       .program        = 0x1080,
-       .read_status    = 0x0070,
-       .read_id        = 0x0090,
-       .erase          = 0xD060,
-       .reset          = 0x00FF,
-       .lock           = 0x002A,
-       .unlock         = 0x2423,
-       .lock_status    = 0x007A,
-};
-
 static struct pxa3xx_nand_timing timing[] = {
        { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
        { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
@@ -230,8 +234,6 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
 /* Define a default flash type setting serve as flash detecting only */
 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
 
-const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
-
 #define NDTR0_tCH(c)   (min((c), 7) << 19)
 #define NDTR0_tCS(c)   (min((c), 7) << 16)
 #define NDTR0_tWH(c)   (min((c), 7) << 11)
@@ -264,8 +266,8 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
                NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
                NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
 
-       host->ndtr0cs0 = ndtr0;
-       host->ndtr1cs0 = ndtr1;
+       info->ndtr0cs0 = ndtr0;
+       info->ndtr1cs0 = ndtr1;
        nand_writel(info, NDTR0CS0, ndtr0);
        nand_writel(info, NDTR1CS0, ndtr1);
 }
@@ -273,7 +275,7 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
 {
        struct pxa3xx_nand_host *host = info->host[info->cs];
-       int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
+       int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
 
        info->data_size = host->page_size;
        if (!oob_enable) {
@@ -299,12 +301,25 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
  */
 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
 {
-       struct pxa3xx_nand_host *host = info->host[info->cs];
        uint32_t ndcr;
 
-       ndcr = host->reg_ndcr;
-       ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
-       ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
+       ndcr = info->reg_ndcr;
+
+       if (info->use_ecc)
+               ndcr |= NDCR_ECC_EN;
+       else
+               ndcr &= ~NDCR_ECC_EN;
+
+       if (info->use_dma)
+               ndcr |= NDCR_DMA_EN;
+       else
+               ndcr &= ~NDCR_DMA_EN;
+
+       if (info->use_spare)
+               ndcr |= NDCR_SPARE_EN;
+       else
+               ndcr &= ~NDCR_SPARE_EN;
+
        ndcr |= NDCR_ND_RUN;
 
        /* clear status bits and run */
@@ -333,7 +348,8 @@ static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
        nand_writel(info, NDSR, NDSR_MASK);
 }
 
-static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+static void __maybe_unused
+enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
 {
        uint32_t ndcr;
 
@@ -373,6 +389,7 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
        }
 }
 
+#ifdef ARCH_HAS_DMA
 static void start_data_dma(struct pxa3xx_nand_info *info)
 {
        struct pxa_dma_desc *desc = info->data_desc;
@@ -419,6 +436,10 @@ static void pxa3xx_nand_data_dma_irq(int channel, void *data)
        enable_int(info, NDCR_INT_MASK);
        nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
 }
+#else
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{}
+#endif
 
 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
 {
@@ -467,9 +488,22 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
                nand_writel(info, NDSR, NDSR_WRCMDREQ);
                status &= ~NDSR_WRCMDREQ;
                info->state = STATE_CMD_HANDLE;
+
+               /*
+                * Command buffer registers NDCB{0-2} (and optionally NDCB3)
+                * must be loaded by writing directly either 12 or 16
+                * bytes directly to NDCB0, four bytes at a time.
+                *
+                * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
+                * but each NDCBx register can be read.
+                */
                nand_writel(info, NDCB0, info->ndcb0);
                nand_writel(info, NDCB0, info->ndcb1);
                nand_writel(info, NDCB0, info->ndcb2);
+
+               /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
+               if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+                       nand_writel(info, NDCB0, info->ndcb3);
        }
 
        /* clear NDSR to let the controller exit the IRQ */
@@ -491,7 +525,6 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)
 static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
                uint16_t column, int page_addr)
 {
-       uint16_t cmd;
        int addr_cycle, exec_cmd;
        struct pxa3xx_nand_host *host;
        struct mtd_info *mtd;
@@ -506,6 +539,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
        info->buf_count         = 0;
        info->oob_size          = 0;
        info->use_ecc           = 0;
+       info->use_spare         = 1;
+       info->use_dma           = (use_dma) ? 1 : 0;
        info->is_ready          = 0;
        info->retcode           = ERR_NONE;
        if (info->cs != 0)
@@ -520,12 +555,16 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
        case NAND_CMD_READOOB:
                pxa3xx_set_datasize(info);
                break;
+       case NAND_CMD_PARAM:
+               info->use_spare = 0;
+               break;
        case NAND_CMD_SEQIN:
                exec_cmd = 0;
                break;
        default:
                info->ndcb1 = 0;
                info->ndcb2 = 0;
+               info->ndcb3 = 0;
                break;
        }
 
@@ -535,21 +574,17 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
        switch (command) {
        case NAND_CMD_READOOB:
        case NAND_CMD_READ0:
-               cmd = host->cmdset->read1;
+               info->buf_start = column;
+               info->ndcb0 |= NDCB0_CMD_TYPE(0)
+                               | addr_cycle
+                               | NAND_CMD_READ0;
+
                if (command == NAND_CMD_READOOB)
-                       info->buf_start = mtd->writesize + column;
-               else
-                       info->buf_start = column;
+                       info->buf_start += mtd->writesize;
 
-               if (unlikely(host->page_size < PAGE_CHUNK_SIZE))
-                       info->ndcb0 |= NDCB0_CMD_TYPE(0)
-                                       | addr_cycle
-                                       | (cmd & NDCB0_CMD1_MASK);
-               else
-                       info->ndcb0 |= NDCB0_CMD_TYPE(0)
-                                       | NDCB0_DBC
-                                       | addr_cycle
-                                       | cmd;
+               /* Second command setting for large pages */
+               if (host->page_size >= PAGE_CHUNK_SIZE)
+                       info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
 
        case NAND_CMD_SEQIN:
                /* small page addr setting */
@@ -580,49 +615,58 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
                        break;
                }
 
-               cmd = host->cmdset->program;
                info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
                                | NDCB0_AUTO_RS
                                | NDCB0_ST_ROW_EN
                                | NDCB0_DBC
-                               | cmd
+                               | (NAND_CMD_PAGEPROG << 8)
+                               | NAND_CMD_SEQIN
                                | addr_cycle;
                break;
 
+       case NAND_CMD_PARAM:
+               info->buf_count = 256;
+               info->ndcb0 |= NDCB0_CMD_TYPE(0)
+                               | NDCB0_ADDR_CYC(1)
+                               | NDCB0_LEN_OVRD
+                               | command;
+               info->ndcb1 = (column & 0xFF);
+               info->ndcb3 = 256;
+               info->data_size = 256;
+               break;
+
        case NAND_CMD_READID:
-               cmd = host->cmdset->read_id;
                info->buf_count = host->read_id_bytes;
                info->ndcb0 |= NDCB0_CMD_TYPE(3)
                                | NDCB0_ADDR_CYC(1)
-                               | cmd;
+                               | command;
+               info->ndcb1 = (column & 0xFF);
 
                info->data_size = 8;
                break;
        case NAND_CMD_STATUS:
-               cmd = host->cmdset->read_status;
                info->buf_count = 1;
                info->ndcb0 |= NDCB0_CMD_TYPE(4)
                                | NDCB0_ADDR_CYC(1)
-                               | cmd;
+                               | command;
 
                info->data_size = 8;
                break;
 
        case NAND_CMD_ERASE1:
-               cmd = host->cmdset->erase;
                info->ndcb0 |= NDCB0_CMD_TYPE(2)
                                | NDCB0_AUTO_RS
                                | NDCB0_ADDR_CYC(3)
                                | NDCB0_DBC
-                               | cmd;
+                               | (NAND_CMD_ERASE2 << 8)
+                               | NAND_CMD_ERASE1;
                info->ndcb1 = page_addr;
                info->ndcb2 = 0;
 
                break;
        case NAND_CMD_RESET:
-               cmd = host->cmdset->reset;
                info->ndcb0 |= NDCB0_CMD_TYPE(5)
-                               | cmd;
+                               | command;
 
                break;
 
@@ -652,7 +696,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
         * "byte" address into a "word" address appropriate
         * for indexing a word-oriented device
         */
-       if (host->reg_ndcr & NDCR_DWIDTH_M)
+       if (info->reg_ndcr & NDCR_DWIDTH_M)
                column /= 2;
 
        /*
@@ -662,8 +706,8 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
         */
        if (info->cs != host->cs) {
                info->cs = host->cs;
-               nand_writel(info, NDTR0CS0, host->ndtr0cs0);
-               nand_writel(info, NDTR1CS0, host->ndtr1cs0);
+               nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+               nand_writel(info, NDTR1CS0, info->ndtr1cs0);
        }
 
        info->state = STATE_PREPARED;
@@ -803,7 +847,7 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
                                    const struct pxa3xx_nand_flash *f)
 {
        struct platform_device *pdev = info->pdev;
-       struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+       struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct pxa3xx_nand_host *host = info->host[info->cs];
        uint32_t ndcr = 0x0; /* enable all interrupts */
 
@@ -818,7 +862,6 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
        }
 
        /* calculate flash information */
-       host->cmdset = &default_cmdset;
        host->page_size = f->page_size;
        host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
 
@@ -840,7 +883,7 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
        ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
        ndcr |= NDCR_SPARE_EN; /* enable spare by default */
 
-       host->reg_ndcr = ndcr;
+       info->reg_ndcr = ndcr;
 
        pxa3xx_nand_set_timing(host, f->timing);
        return 0;
@@ -863,12 +906,9 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
                host->read_id_bytes = 2;
        }
 
-       host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
-       host->cmdset = &default_cmdset;
-
-       host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
-       host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
-
+       info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
+       info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+       info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
        return 0;
 }
 
@@ -878,6 +918,7 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
  */
 #define MAX_BUFF_SIZE  PAGE_SIZE
 
+#ifdef ARCH_HAS_DMA
 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
 {
        struct platform_device *pdev = info->pdev;
@@ -912,6 +953,32 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
        return 0;
 }
 
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+       struct platform_device *pdev = info->pdev;
+       if (use_dma) {
+               pxa_free_dma(info->data_dma_ch);
+               dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+                                 info->data_buff, info->data_buff_phys);
+       } else {
+               kfree(info->data_buff);
+       }
+}
+#else
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+       info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+       if (info->data_buff == NULL)
+               return -ENOMEM;
+       return 0;
+}
+
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+       kfree(info->data_buff);
+}
+#endif
+
 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
 {
        struct mtd_info *mtd;
@@ -934,7 +1001,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
        struct pxa3xx_nand_host *host = mtd->priv;
        struct pxa3xx_nand_info *info = host->info_data;
        struct platform_device *pdev = info->pdev;
-       struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+       struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
        const struct pxa3xx_nand_flash *f = NULL;
        struct nand_chip *chip = mtd->priv;
@@ -1003,7 +1070,7 @@ KEEP_CONFIG:
        chip->ecc.size = host->page_size;
        chip->ecc.strength = 1;
 
-       if (host->reg_ndcr & NDCR_DWIDTH_M)
+       if (info->reg_ndcr & NDCR_DWIDTH_M)
                chip->options |= NAND_BUSWIDTH_16;
 
        if (nand_scan_ident(mtd, 1, def))
@@ -1019,8 +1086,6 @@ KEEP_CONFIG:
                host->row_addr_cycles = 3;
        else
                host->row_addr_cycles = 2;
-
-       mtd->name = mtd_names[0];
        return nand_scan_tail(mtd);
 }
 
@@ -1034,13 +1099,11 @@ static int alloc_nand_resource(struct platform_device *pdev)
        struct resource *r;
        int ret, irq, cs;
 
-       pdata = pdev->dev.platform_data;
-       info = kzalloc(sizeof(*info) + (sizeof(*mtd) +
-                      sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
-       if (!info) {
-               dev_err(&pdev->dev, "failed to allocate memory\n");
+       pdata = dev_get_platdata(&pdev->dev);
+       info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
+                           sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
+       if (!info)
                return -ENOMEM;
-       }
 
        info->pdev = pdev;
        for (cs = 0; cs < pdata->num_cs; cs++) {
@@ -1069,72 +1132,64 @@ static int alloc_nand_resource(struct platform_device *pdev)
 
        spin_lock_init(&chip->controller->lock);
        init_waitqueue_head(&chip->controller->wq);
-       info->clk = clk_get(&pdev->dev, NULL);
+       info->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(info->clk)) {
                dev_err(&pdev->dev, "failed to get nand clock\n");
-               ret = PTR_ERR(info->clk);
-               goto fail_free_mtd;
+               return PTR_ERR(info->clk);
        }
-       clk_enable(info->clk);
-
-       /*
-        * This is a dirty hack to make this driver work from devicetree
-        * bindings. It can be removed once we have a prober DMA controller
-        * framework for DT.
-        */
-       if (pdev->dev.of_node && cpu_is_pxa3xx()) {
-               info->drcmr_dat = 97;
-               info->drcmr_cmd = 99;
-       } else {
-               r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-               if (r == NULL) {
-                       dev_err(&pdev->dev, "no resource defined for data DMA\n");
-                       ret = -ENXIO;
-                       goto fail_put_clk;
-               }
-               info->drcmr_dat = r->start;
+       ret = clk_prepare_enable(info->clk);
+       if (ret < 0)
+               return ret;
 
-               r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-               if (r == NULL) {
-                       dev_err(&pdev->dev, "no resource defined for command DMA\n");
-                       ret = -ENXIO;
-                       goto fail_put_clk;
+       if (use_dma) {
+               /*
+                * This is a dirty hack to make this driver work from
+                * devicetree bindings. It can be removed once we have
+                * a prober DMA controller framework for DT.
+                */
+               if (pdev->dev.of_node &&
+                   of_machine_is_compatible("marvell,pxa3xx")) {
+                       info->drcmr_dat = 97;
+                       info->drcmr_cmd = 99;
+               } else {
+                       r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+                       if (r == NULL) {
+                               dev_err(&pdev->dev,
+                                       "no resource defined for data DMA\n");
+                               ret = -ENXIO;
+                               goto fail_disable_clk;
+                       }
+                       info->drcmr_dat = r->start;
+
+                       r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+                       if (r == NULL) {
+                               dev_err(&pdev->dev,
+                                       "no resource defined for cmd DMA\n");
+                               ret = -ENXIO;
+                               goto fail_disable_clk;
+                       }
+                       info->drcmr_cmd = r->start;
                }
-               info->drcmr_cmd = r->start;
        }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "no IRQ resource defined\n");
                ret = -ENXIO;
-               goto fail_put_clk;
+               goto fail_disable_clk;
        }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "no IO memory resource defined\n");
-               ret = -ENODEV;
-               goto fail_put_clk;
-       }
-
-       r = request_mem_region(r->start, resource_size(r), pdev->name);
-       if (r == NULL) {
-               dev_err(&pdev->dev, "failed to request memory resource\n");
-               ret = -EBUSY;
-               goto fail_put_clk;
-       }
-
-       info->mmio_base = ioremap(r->start, resource_size(r));
-       if (info->mmio_base == NULL) {
-               dev_err(&pdev->dev, "ioremap() failed\n");
-               ret = -ENODEV;
-               goto fail_free_res;
+       info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(info->mmio_base)) {
+               ret = PTR_ERR(info->mmio_base);
+               goto fail_disable_clk;
        }
        info->mmio_phys = r->start;
 
        ret = pxa3xx_nand_init_buff(info);
        if (ret)
-               goto fail_free_io;
+               goto fail_disable_clk;
 
        /* initialize all interrupts to be disabled */
        disable_int(info, NDSR_MASK);
@@ -1152,21 +1207,9 @@ static int alloc_nand_resource(struct platform_device *pdev)
 
 fail_free_buf:
        free_irq(irq, info);
-       if (use_dma) {
-               pxa_free_dma(info->data_dma_ch);
-               dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
-                       info->data_buff, info->data_buff_phys);
-       } else
-               kfree(info->data_buff);
-fail_free_io:
-       iounmap(info->mmio_base);
-fail_free_res:
-       release_mem_region(r->start, resource_size(r));
-fail_put_clk:
-       clk_disable(info->clk);
-       clk_put(info->clk);
-fail_free_mtd:
-       kfree(info);
+       pxa3xx_nand_free_buff(info);
+fail_disable_clk:
+       clk_disable_unprepare(info->clk);
        return ret;
 }
 
@@ -1174,44 +1217,47 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
 {
        struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
        struct pxa3xx_nand_platform_data *pdata;
-       struct resource *r;
        int irq, cs;
 
        if (!info)
                return 0;
 
-       pdata = pdev->dev.platform_data;
-       platform_set_drvdata(pdev, NULL);
+       pdata = dev_get_platdata(&pdev->dev);
 
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0)
                free_irq(irq, info);
-       if (use_dma) {
-               pxa_free_dma(info->data_dma_ch);
-               dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE,
-                               info->data_buff, info->data_buff_phys);
-       } else
-               kfree(info->data_buff);
-
-       iounmap(info->mmio_base);
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(r->start, resource_size(r));
+       pxa3xx_nand_free_buff(info);
 
-       clk_disable(info->clk);
-       clk_put(info->clk);
+       clk_disable_unprepare(info->clk);
 
        for (cs = 0; cs < pdata->num_cs; cs++)
                nand_release(info->host[cs]->mtd);
-       kfree(info);
        return 0;
 }
 
-#ifdef CONFIG_OF
 static struct of_device_id pxa3xx_nand_dt_ids[] = {
-       { .compatible = "marvell,pxa3xx-nand" },
+       {
+               .compatible = "marvell,pxa3xx-nand",
+               .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
+       },
+       {
+               .compatible = "marvell,armada370-nand",
+               .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+       },
        {}
 };
-MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+       const struct of_device_id *of_id =
+                       of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+       if (!of_id)
+               return PXA3XX_NAND_VARIANT_PXA;
+       return (enum pxa3xx_nand_variant)of_id->data;
+}
 
 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
 {
@@ -1237,12 +1283,6 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
 
        return 0;
 }
-#else
-static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
-{
-       return 0;
-}
-#endif
 
 static int pxa3xx_nand_probe(struct platform_device *pdev)
 {
@@ -1251,11 +1291,18 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
        struct pxa3xx_nand_info *info;
        int ret, cs, probe_success;
 
+#ifndef ARCH_HAS_DMA
+       if (use_dma) {
+               use_dma = 0;
+               dev_warn(&pdev->dev,
+                        "This platform can't do DMA on this device\n");
+       }
+#endif
        ret = pxa3xx_nand_probe_dt(pdev);
        if (ret)
                return ret;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "no platform data defined\n");
                return -ENODEV;
@@ -1268,10 +1315,14 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
        }
 
        info = platform_get_drvdata(pdev);
+       info->variant = pxa3xx_nand_get_variant(pdev);
        probe_success = 0;
        for (cs = 0; cs < pdata->num_cs; cs++) {
+               struct mtd_info *mtd = info->host[cs]->mtd;
+
+               mtd->name = pdev->name;
                info->cs = cs;
-               ret = pxa3xx_nand_scan(info->host[cs]->mtd);
+               ret = pxa3xx_nand_scan(mtd);
                if (ret) {
                        dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
                                cs);
@@ -1279,7 +1330,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
                }
 
                ppdata.of_node = pdev->dev.of_node;
-               ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
+               ret = mtd_device_parse_register(mtd, NULL,
                                                &ppdata, pdata->parts[cs],
                                                pdata->nr_parts[cs]);
                if (!ret)
@@ -1302,7 +1353,7 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
        struct mtd_info *mtd;
        int cs;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (info->state) {
                dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
                return -EAGAIN;
@@ -1323,7 +1374,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
        struct mtd_info *mtd;
        int cs;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        /* We don't want to handle interrupt without calling mtd routine */
        disable_int(info, NDCR_INT_MASK);
 
index 4495f8551fa093fc6f30117475d624691e918430..9dcf02d22aa8fed1a8e4006952f9d7ee27739c9e 100644 (file)
@@ -229,7 +229,7 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
 /*
  * Program data lines of the nand chip to send data to it
  */
-void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 {
        struct r852_device *dev = r852_get_dev(mtd);
        uint32_t reg;
@@ -261,7 +261,7 @@ void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 /*
  * Read data lines of the nand chip to retrieve data
  */
-void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 {
        struct r852_device *dev = r852_get_dev(mtd);
        uint32_t reg;
@@ -312,7 +312,7 @@ static uint8_t r852_read_byte(struct mtd_info *mtd)
 /*
  * Control several chip lines & send commands
  */
-void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
 {
        struct r852_device *dev = r852_get_dev(mtd);
 
@@ -357,7 +357,7 @@ void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
  * Wait till card is ready.
  * based on nand_wait, but returns errors on DMA error
  */
-int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
 {
        struct r852_device *dev = chip->priv;
 
@@ -386,7 +386,7 @@ int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
  * Check if card is ready
  */
 
-int r852_ready(struct mtd_info *mtd)
+static int r852_ready(struct mtd_info *mtd)
 {
        struct r852_device *dev = r852_get_dev(mtd);
        return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
@@ -397,7 +397,7 @@ int r852_ready(struct mtd_info *mtd)
  * Set ECC engine mode
 */
 
-void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+static void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
 {
        struct r852_device *dev = r852_get_dev(mtd);
 
@@ -429,7 +429,7 @@ void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
  * Calculate ECC, only used for writes
  */
 
-int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
                                                        uint8_t *ecc_code)
 {
        struct r852_device *dev = r852_get_dev(mtd);
@@ -461,7 +461,7 @@ int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
  * Correct the data using ECC, hw did almost everything for us
  */
 
-int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
                                uint8_t *read_ecc, uint8_t *calc_ecc)
 {
        uint16_t ecc_reg;
@@ -529,7 +529,7 @@ static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  * Start the nand engine
  */
 
-void r852_engine_enable(struct r852_device *dev)
+static void r852_engine_enable(struct r852_device *dev)
 {
        if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
                r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
@@ -547,7 +547,7 @@ void r852_engine_enable(struct r852_device *dev)
  * Stop the nand engine
  */
 
-void r852_engine_disable(struct r852_device *dev)
+static void r852_engine_disable(struct r852_device *dev)
 {
        r852_write_reg_dword(dev, R852_HW, 0);
        r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
@@ -557,7 +557,7 @@ void r852_engine_disable(struct r852_device *dev)
  * Test if card is present
  */
 
-void r852_card_update_present(struct r852_device *dev)
+static void r852_card_update_present(struct r852_device *dev)
 {
        unsigned long flags;
        uint8_t reg;
@@ -572,7 +572,7 @@ void r852_card_update_present(struct r852_device *dev)
  * Update card detection IRQ state according to current card state
  * which is read in r852_card_update_present
  */
-void r852_update_card_detect(struct r852_device *dev)
+static void r852_update_card_detect(struct r852_device *dev)
 {
        int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
        dev->card_unstable = 0;
@@ -586,8 +586,8 @@ void r852_update_card_detect(struct r852_device *dev)
        r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
 }
 
-ssize_t r852_media_type_show(struct device *sys_dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t r852_media_type_show(struct device *sys_dev,
+                       struct device_attribute *attr, char *buf)
 {
        struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
        struct r852_device *dev = r852_get_dev(mtd);
@@ -597,11 +597,11 @@ ssize_t r852_media_type_show(struct device *sys_dev,
        return strlen(data);
 }
 
-DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
 
 
 /* Detect properties of card in slot */
-void r852_update_media_status(struct r852_device *dev)
+static void r852_update_media_status(struct r852_device *dev)
 {
        uint8_t reg;
        unsigned long flags;
@@ -630,7 +630,7 @@ void r852_update_media_status(struct r852_device *dev)
  * Register the nand device
  * Called when the card is detected
  */
-int r852_register_nand_device(struct r852_device *dev)
+static int r852_register_nand_device(struct r852_device *dev)
 {
        dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
 
@@ -668,7 +668,7 @@ error1:
  * Unregister the card
  */
 
-void r852_unregister_nand_device(struct r852_device *dev)
+static void r852_unregister_nand_device(struct r852_device *dev)
 {
        if (!dev->card_registred)
                return;
@@ -682,7 +682,7 @@ void r852_unregister_nand_device(struct r852_device *dev)
 }
 
 /* Card state updater */
-void r852_card_detect_work(struct work_struct *work)
+static void r852_card_detect_work(struct work_struct *work)
 {
        struct r852_device *dev =
                container_of(work, struct r852_device, card_detect_work.work);
@@ -821,7 +821,7 @@ out:
        return ret;
 }
 
-int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+static int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
 {
        int error;
        struct nand_chip *chip;
@@ -961,7 +961,7 @@ error1:
        return error;
 }
 
-void r852_remove(struct pci_dev *pci_dev)
+static void r852_remove(struct pci_dev *pci_dev)
 {
        struct r852_device *dev = pci_get_drvdata(pci_dev);
 
@@ -992,7 +992,7 @@ void r852_remove(struct pci_dev *pci_dev)
        pci_disable_device(pci_dev);
 }
 
-void r852_shutdown(struct pci_dev *pci_dev)
+static void r852_shutdown(struct pci_dev *pci_dev)
 {
        struct r852_device *dev = pci_get_drvdata(pci_dev);
 
@@ -1002,7 +1002,7 @@ void r852_shutdown(struct pci_dev *pci_dev)
        pci_disable_device(pci_dev);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int r852_suspend(struct device *device)
 {
        struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
@@ -1055,9 +1055,6 @@ static int r852_resume(struct device *device)
        r852_update_card_detect(dev);
        return 0;
 }
-#else
-#define r852_suspend   NULL
-#define r852_resume    NULL
 #endif
 
 static const struct pci_device_id r852_pci_id_tbl[] = {
index d65afd23e171c16587dffd4fd0044121eb43313a..d65cbe903d4015e37076f0505db987ee4383e0d5 100644 (file)
@@ -150,7 +150,7 @@ static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
 
 static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
 {
-       return dev->dev.platform_data;
+       return dev_get_platdata(&dev->dev);
 }
 
 static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
@@ -697,8 +697,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
 {
        struct s3c2410_nand_info *info = to_nand_info(pdev);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (info == NULL)
                return 0;
 
index e57e18e8c2893ab8077693e13385bb06ddaad9af..a3c84ebbe39227dac06bae3d0f9014849cf48b17 100644 (file)
@@ -137,7 +137,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
        dma_cap_mask_t mask;
        struct dma_slave_config cfg;
        struct platform_device *pdev = flctl->pdev;
-       struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
+       struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
        int ret;
 
        if (!pdata)
@@ -1131,7 +1131,7 @@ static int flctl_probe(struct platform_device *pdev)
        if (pdev->dev.of_node)
                pdata = flctl_parse_dt(&pdev->dev);
        else
-               pdata = pdev->dev.platform_data;
+               pdata = dev_get_platdata(&pdev->dev);
 
        if (!pdata) {
                dev_err(&pdev->dev, "no setup data defined\n");
index 127bc42718217a68c4c83fa89cf64b3cbed7d95c..87908d760feb067c3cb179b35bfa5a3c462ab737 100644 (file)
@@ -112,7 +112,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
        struct resource *r;
        int err = 0;
        struct sharpsl_nand *sharpsl;
-       struct sharpsl_nand_platform_data *data = pdev->dev.platform_data;
+       struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
 
        if (!data) {
                dev_err(&pdev->dev, "no platform data!\n");
@@ -194,7 +194,6 @@ err_add:
        nand_release(&sharpsl->mtd);
 
 err_scan:
-       platform_set_drvdata(pdev, NULL);
        iounmap(sharpsl->io);
 err_ioremap:
 err_get_res:
@@ -212,8 +211,6 @@ static int sharpsl_nand_remove(struct platform_device *pdev)
        /* Release resources, unregister device */
        nand_release(&sharpsl->mtd);
 
-       platform_set_drvdata(pdev, NULL);
-
        iounmap(sharpsl->io);
 
        /* Free the MTD device structure */
index e8181edebddd10923904c4db4be17adf79d29316..e06b5e5d3287dbaceaeed98941a77dcb595a5b4c 100644 (file)
@@ -42,7 +42,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
        struct mtd_oob_ops ops;
        struct sm_oob oob;
-       int ret, error = 0;
+       int ret;
 
        memset(&oob, -1, SM_OOB_SIZE);
        oob.block_status = 0x0F;
@@ -61,11 +61,10 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
                printk(KERN_NOTICE
                        "sm_common: can't mark sector at %i as bad\n",
                                                                (int)ofs);
-               error = -EIO;
-       } else
-               mtd->ecc_stats.badblocks++;
+               return -EIO;
+       }
 
-       return error;
+       return 0;
 }
 
 static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
index 508e9e04b0926a5b16c4620dc1f2a821e945680d..396530d87ecfb824f5e7a4ede7a92e6c8f0d3a82 100644 (file)
@@ -357,7 +357,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
 
 static int tmio_probe(struct platform_device *dev)
 {
-       struct tmio_nand_data *data = dev->dev.platform_data;
+       struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
        struct resource *fcr = platform_get_resource(dev,
                        IORESOURCE_MEM, 0);
        struct resource *ccr = platform_get_resource(dev,
index 7ed654c68b0867af79c82da6f65cc210fb827498..235714a421dd6770851f2f5026522123f46dbefc 100644 (file)
@@ -87,7 +87,7 @@ static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
 static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
 {
        struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
-       struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+       struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
 
        return drvdata->base + (reg << plat->shift);
 }
@@ -138,7 +138,7 @@ static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
        struct nand_chip *chip = mtd->priv;
        struct txx9ndfmc_priv *txx9_priv = chip->priv;
        struct platform_device *dev = txx9_priv->dev;
-       struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+       struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
 
        if (ctrl & NAND_CTRL_CHANGE) {
                u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
@@ -225,7 +225,7 @@ static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
 
 static void txx9ndfmc_initialize(struct platform_device *dev)
 {
-       struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+       struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
        struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
        int tmout = 100;
 
@@ -274,19 +274,17 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
 
 static int __init txx9ndfmc_probe(struct platform_device *dev)
 {
-       struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
+       struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
        int hold, spw;
        int i;
        struct txx9ndfmc_drvdata *drvdata;
        unsigned long gbusclk = plat->gbus_clock;
        struct resource *res;
 
-       res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
        drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
        if (!drvdata)
                return -ENOMEM;
+       res = platform_get_resource(dev, IORESOURCE_MEM, 0);
        drvdata->base = devm_ioremap_resource(&dev->dev, res);
        if (IS_ERR(drvdata->base))
                return PTR_ERR(drvdata->base);
@@ -387,7 +385,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
        struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
        int i;
 
-       platform_set_drvdata(dev, NULL);
        if (!drvdata)
                return 0;
        for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
index 553d6d6d560322c4bcafa44a184e6c09e864b621..7843a4491217954f7549524a52ed2e1b133302e9 100644 (file)
 #include <linux/slab.h>
 #include <linux/mtd/partitions.h>
 
+static bool node_has_compatible(struct device_node *pp)
+{
+       return of_get_property(pp, "compatible", NULL);
+}
+
 static int parse_ofpart_partitions(struct mtd_info *master,
                                   struct mtd_partition **pparts,
                                   struct mtd_part_parser_data *data)
@@ -40,8 +45,12 @@ static int parse_ofpart_partitions(struct mtd_info *master,
        /* First count the subnodes */
        pp = NULL;
        nr_parts = 0;
-       while ((pp = of_get_next_child(node, pp)))
+       while ((pp = of_get_next_child(node, pp))) {
+               if (node_has_compatible(pp))
+                       continue;
+
                nr_parts++;
+       }
 
        if (nr_parts == 0)
                return 0;
@@ -57,6 +66,9 @@ static int parse_ofpart_partitions(struct mtd_info *master,
                int len;
                int a_cells, s_cells;
 
+               if (node_has_compatible(pp))
+                       continue;
+
                reg = of_get_property(pp, "reg", &len);
                if (!reg) {
                        nr_parts--;
index 9f11562f849dbb0f7f5a4836cc31ce9c9a7e3cb5..63699fffc96de24b3098f629ea495184103386c8 100644 (file)
@@ -38,7 +38,7 @@ struct onenand_info {
 static int generic_onenand_probe(struct platform_device *pdev)
 {
        struct onenand_info *info;
-       struct onenand_platform_data *pdata = pdev->dev.platform_data;
+       struct onenand_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct resource *res = pdev->resource;
        unsigned long size = resource_size(res);
        int err;
@@ -94,8 +94,6 @@ static int generic_onenand_remove(struct platform_device *pdev)
        struct resource *res = pdev->resource;
        unsigned long size = resource_size(res);
 
-       platform_set_drvdata(pdev, NULL);
-
        if (info) {
                onenand_release(&info->mtd);
                release_mem_region(res->start, size);
index d98b198edd53a27139c1eaa0ffad5ae5f0fac9f2..558071bf92de0ed607355067b08ebea7fe3833b4 100644 (file)
@@ -639,7 +639,7 @@ static int omap2_onenand_probe(struct platform_device *pdev)
        struct resource *res;
        struct mtd_part_parser_data ppdata = {};
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (pdata == NULL) {
                dev_err(&pdev->dev, "platform data missing\n");
                return -ENODEV;
@@ -810,7 +810,6 @@ static int omap2_onenand_remove(struct platform_device *pdev)
        if (c->dma_channel != -1)
                omap_free_dma(c->dma_channel);
        omap2_onenand_shutdown(pdev);
-       platform_set_drvdata(pdev, NULL);
        if (c->gpio_irq) {
                free_irq(gpio_to_irq(c->gpio_irq), c);
                gpio_free(c->gpio_irq);
index 66fe3b7e78515679bae0f8ae75ab9bfb5b509958..08d0085f3e939fb277cc9c21b3b3004eab662206 100644 (file)
@@ -133,7 +133,6 @@ static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_desc
 {
        struct onenand_chip *this = mtd->priv;
 
-        bd->options &= ~NAND_BBT_SCANEMPTY;
        return create_bbt(mtd, this->page_buf, bd, -1);
 }
 
index 2cf74085f93524c784f9adc3c3920713da570f5a..df7400dd4df847b321bb1fdedbb5ac522e9c5b83 100644 (file)
@@ -867,7 +867,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
        struct resource *r;
        int size, err;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        /* No need to check pdata. the platform data is optional */
 
        size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
@@ -1073,7 +1073,6 @@ static int s3c_onenand_remove(struct platform_device *pdev)
        release_mem_region(onenand->base_res->start,
                           resource_size(onenand->base_res));
 
-       platform_set_drvdata(pdev, NULL);
        kfree(onenand->oob_buf);
        kfree(onenand->page_buf);
        kfree(onenand);
index f9d5615c572747ee6137a89327aa974322fd949f..4b8e89583f2a5d83f366515f4ed0fa5c4239361d 100644 (file)
@@ -22,7 +22,7 @@
 
 
 
-struct workqueue_struct *cache_flush_workqueue;
+static struct workqueue_struct *cache_flush_workqueue;
 
 static int cache_timeout = 1000;
 module_param(cache_timeout, int, S_IRUGO);
@@ -41,7 +41,7 @@ struct sm_sysfs_attribute {
        int len;
 };
 
-ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
+static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
                     char *buf)
 {
        struct sm_sysfs_attribute *sm_attr =
@@ -54,7 +54,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
 
 #define NUM_ATTRIBUTES 1
 #define SM_CIS_VENDOR_OFFSET 0x59
-struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
+static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
 {
        struct attribute_group *attr_group;
        struct attribute **attributes;
@@ -107,7 +107,7 @@ error1:
        return NULL;
 }
 
-void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
+static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
 {
        struct attribute **attributes = ftl->disk_attributes->attrs;
        int i;
@@ -571,7 +571,7 @@ static const uint8_t cis_signature[] = {
 };
 /* Find out media parameters.
  * This ideally has to be based on nand id, but for now device size is enough */
-int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
+static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
 {
        int i;
        int size_in_megs = mtd->size / (1024 * 1024);
@@ -878,7 +878,7 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
 }
 
 /* Get and automatically initialize an FTL mapping for one zone */
-struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
+static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
 {
        struct ftl_zone *zone;
        int error;
@@ -899,7 +899,7 @@ struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
 /* ----------------- cache handling ------------------------------------------*/
 
 /* Initialize the one block cache */
-void sm_cache_init(struct sm_ftl *ftl)
+static void sm_cache_init(struct sm_ftl *ftl)
 {
        ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
        ftl->cache_clean = 1;
@@ -909,7 +909,7 @@ void sm_cache_init(struct sm_ftl *ftl)
 }
 
 /* Put sector in one block cache */
-void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
+static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
 {
        memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
        clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
@@ -917,7 +917,7 @@ void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
 }
 
 /* Read a sector from the cache */
-int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
+static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
 {
        if (test_bit(boffset / SM_SECTOR_SIZE,
                &ftl->cache_data_invalid_bitmap))
@@ -928,7 +928,7 @@ int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
 }
 
 /* Write the cache to hardware */
-int sm_cache_flush(struct sm_ftl *ftl)
+static int sm_cache_flush(struct sm_ftl *ftl)
 {
        struct ftl_zone *zone;
 
@@ -1274,10 +1274,10 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
 static __init int sm_module_init(void)
 {
        int error = 0;
-       cache_flush_workqueue = create_freezable_workqueue("smflush");
 
-       if (IS_ERR(cache_flush_workqueue))
-               return PTR_ERR(cache_flush_workqueue);
+       cache_flush_workqueue = create_freezable_workqueue("smflush");
+       if (!cache_flush_workqueue)
+               return -ENOMEM;
 
        error = register_mtd_blktrans(&sm_ftl_ops);
        if (error)
index bd0065c0d359f7e207523ca06f2aa0221dc78fad..937a829bb70111c4e44ad110b9d9dc029f0b8d94 100644 (file)
@@ -7,3 +7,12 @@ obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
 obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
 obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
 obj-$(CONFIG_MTD_TESTS) += mtd_nandbiterrs.o
+
+mtd_oobtest-objs := oobtest.o mtd_test.o
+mtd_pagetest-objs := pagetest.o mtd_test.o
+mtd_readtest-objs := readtest.o mtd_test.o
+mtd_speedtest-objs := speedtest.o mtd_test.o
+mtd_stresstest-objs := stresstest.o mtd_test.o
+mtd_subpagetest-objs := subpagetest.o mtd_test.o
+mtd_torturetest-objs := torturetest.o mtd_test.o
+mtd_nandbiterrs-objs := nandbiterrs.o mtd_test.o
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
new file mode 100644 (file)
index 0000000..c818a63
--- /dev/null
@@ -0,0 +1,114 @@
+#define pr_fmt(fmt) "mtd_test: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+
+#include "mtd_test.h"
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum)
+{
+       int err;
+       struct erase_info ei;
+       loff_t addr = ebnum * mtd->erasesize;
+
+       memset(&ei, 0, sizeof(struct erase_info));
+       ei.mtd  = mtd;
+       ei.addr = addr;
+       ei.len  = mtd->erasesize;
+
+       err = mtd_erase(mtd, &ei);
+       if (err) {
+               pr_info("error %d while erasing EB %d\n", err, ebnum);
+               return err;
+       }
+
+       if (ei.state == MTD_ERASE_FAILED) {
+               pr_info("some erase error occurred at EB %d\n", ebnum);
+               return -EIO;
+       }
+       return 0;
+}
+
+static int is_block_bad(struct mtd_info *mtd, unsigned int ebnum)
+{
+       int ret;
+       loff_t addr = ebnum * mtd->erasesize;
+
+       ret = mtd_block_isbad(mtd, addr);
+       if (ret)
+               pr_info("block %d is bad\n", ebnum);
+
+       return ret;
+}
+
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+                                       unsigned int eb, int ebcnt)
+{
+       int i, bad = 0;
+
+       if (!mtd_can_have_bb(mtd))
+               return 0;
+
+       pr_info("scanning for bad eraseblocks\n");
+       for (i = 0; i < ebcnt; ++i) {
+               bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0;
+               if (bbt[i])
+                       bad += 1;
+               cond_resched();
+       }
+       pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
+
+       return 0;
+}
+
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+                               unsigned int eb, int ebcnt)
+{
+       int err;
+       unsigned int i;
+
+       for (i = 0; i < ebcnt; ++i) {
+               if (bbt[i])
+                       continue;
+               err = mtdtest_erase_eraseblock(mtd, eb + i);
+               if (err)
+                       return err;
+               cond_resched();
+       }
+
+       return 0;
+}
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf)
+{
+       size_t read;
+       int err;
+
+       err = mtd_read(mtd, addr, size, &read, buf);
+       /* Ignore corrected ECC errors */
+       if (mtd_is_bitflip(err))
+               err = 0;
+       if (!err && read != size)
+               err = -EIO;
+       if (err)
+               pr_err("error: read failed at %#llx\n", addr);
+
+       return err;
+}
+
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+               const void *buf)
+{
+       size_t written;
+       int err;
+
+       err = mtd_write(mtd, addr, size, &written, buf);
+       if (!err && written != size)
+               err = -EIO;
+       if (err)
+               pr_err("error: write failed at %#llx\n", addr);
+
+       return err;
+}
diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h
new file mode 100644 (file)
index 0000000..f437c77
--- /dev/null
@@ -0,0 +1,11 @@
+#include <linux/mtd/mtd.h>
+
+int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum);
+int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+                                       unsigned int eb, int ebcnt);
+int mtdtest_erase_good_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
+                               unsigned int eb, int ebcnt);
+
+int mtdtest_read(struct mtd_info *mtd, loff_t addr, size_t size, void *buf);
+int mtdtest_write(struct mtd_info *mtd, loff_t addr, size_t size,
+               const void *buf);
similarity index 93%
rename from drivers/mtd/tests/mtd_nandbiterrs.c
rename to drivers/mtd/tests/nandbiterrs.c
index 207bf9a9972f3ee400412a1bb40ea4e06cc19745..3cd3aabbe1cd8bd3a92a1131e4955c83c41d4431 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/err.h>
 #include <linux/mtd/nand.h>
 #include <linux/slab.h>
+#include "mtd_test.h"
 
 static int dev;
 module_param(dev, int, S_IRUGO);
@@ -98,47 +99,13 @@ static uint8_t hash(unsigned offset)
        return c;
 }
 
-static int erase_block(void)
-{
-       int err;
-       struct erase_info ei;
-       loff_t addr = eraseblock * mtd->erasesize;
-
-       pr_info("erase_block\n");
-
-       memset(&ei, 0, sizeof(struct erase_info));
-       ei.mtd  = mtd;
-       ei.addr = addr;
-       ei.len  = mtd->erasesize;
-
-       err = mtd_erase(mtd, &ei);
-       if (err || ei.state == MTD_ERASE_FAILED) {
-               pr_err("error %d while erasing\n", err);
-               if (!err)
-                       err = -EIO;
-               return err;
-       }
-
-       return 0;
-}
-
 /* Writes wbuffer to page */
 static int write_page(int log)
 {
-       int err = 0;
-       size_t written;
-
        if (log)
                pr_info("write_page\n");
 
-       err = mtd_write(mtd, offset, mtd->writesize, &written, wbuffer);
-       if (err || written != mtd->writesize) {
-               pr_err("error: write failed at %#llx\n", (long long)offset);
-               if (!err)
-                       err = -EIO;
-       }
-
-       return err;
+       return mtdtest_write(mtd, offset, mtd->writesize, wbuffer);
 }
 
 /* Re-writes the data area while leaving the OOB alone. */
@@ -415,7 +382,7 @@ static int __init mtd_nandbiterrs_init(void)
                goto exit_rbuffer;
        }
 
-       err = erase_block();
+       err = mtdtest_erase_eraseblock(mtd, eraseblock);
        if (err)
                goto exit_error;
 
@@ -428,7 +395,7 @@ static int __init mtd_nandbiterrs_init(void)
                goto exit_error;
 
        /* We leave the block un-erased in case of test failure. */
-       err = erase_block();
+       err = mtdtest_erase_eraseblock(mtd, eraseblock);
        if (err)
                goto exit_error;
 
similarity index 90%
rename from drivers/mtd/tests/mtd_oobtest.c
rename to drivers/mtd/tests/oobtest.c
index 3e24b379ffa42e8564df4e0be373ef2fe6aa21a5..ff35c465bfeea5cb4f8598d95e05993c8cc590db 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/sched.h>
 #include <linux/random.h>
 
+#include "mtd_test.h"
+
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -49,49 +51,6 @@ static int use_len_max;
 static int vary_offset;
 static struct rnd_state rnd_state;
 
-static int erase_eraseblock(int ebnum)
-{
-       int err;
-       struct erase_info ei;
-       loff_t addr = ebnum * mtd->erasesize;
-
-       memset(&ei, 0, sizeof(struct erase_info));
-       ei.mtd  = mtd;
-       ei.addr = addr;
-       ei.len  = mtd->erasesize;
-
-       err = mtd_erase(mtd, &ei);
-       if (err) {
-               pr_err("error %d while erasing EB %d\n", err, ebnum);
-               return err;
-       }
-
-       if (ei.state == MTD_ERASE_FAILED) {
-               pr_err("some erase error occurred at EB %d\n", ebnum);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-static int erase_whole_device(void)
-{
-       int err;
-       unsigned int i;
-
-       pr_info("erasing whole device\n");
-       for (i = 0; i < ebcnt; ++i) {
-               if (bbt[i])
-                       continue;
-               err = erase_eraseblock(i);
-               if (err)
-                       return err;
-               cond_resched();
-       }
-       pr_info("erased %u eraseblocks\n", i);
-       return 0;
-}
-
 static void do_vary_offset(void)
 {
        use_len -= 1;
@@ -304,38 +263,6 @@ static int verify_all_eraseblocks(void)
        return 0;
 }
 
-static int is_block_bad(int ebnum)
-{
-       int ret;
-       loff_t addr = ebnum * mtd->erasesize;
-
-       ret = mtd_block_isbad(mtd, addr);
-       if (ret)
-               pr_info("block %d is bad\n", ebnum);
-       return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
-       int i, bad = 0;
-
-       bbt = kmalloc(ebcnt, GFP_KERNEL);
-       if (!bbt) {
-               pr_err("error: cannot allocate memory\n");
-               return -ENOMEM;
-       }
-
-       pr_info("scanning for bad eraseblocks\n");
-       for (i = 0; i < ebcnt; ++i) {
-               bbt[i] = is_block_bad(i) ? 1 : 0;
-               if (bbt[i])
-                       bad += 1;
-               cond_resched();
-       }
-       pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
-       return 0;
-}
-
 static int __init mtd_oobtest_init(void)
 {
        int err = 0;
@@ -380,17 +307,16 @@ static int __init mtd_oobtest_init(void)
 
        err = -ENOMEM;
        readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!readbuf) {
-               pr_err("error: cannot allocate memory\n");
+       if (!readbuf)
                goto out;
-       }
        writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!writebuf) {
-               pr_err("error: cannot allocate memory\n");
+       if (!writebuf)
+               goto out;
+       bbt = kzalloc(ebcnt, GFP_KERNEL);
+       if (!bbt)
                goto out;
-       }
 
-       err = scan_for_bad_eraseblocks();
+       err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -402,7 +328,7 @@ static int __init mtd_oobtest_init(void)
        /* First test: write all OOB, read it back and verify */
        pr_info("test 1 of 5\n");
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -422,7 +348,7 @@ static int __init mtd_oobtest_init(void)
         */
        pr_info("test 2 of 5\n");
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -452,7 +378,7 @@ static int __init mtd_oobtest_init(void)
         */
        pr_info("test 3 of 5\n");
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -485,7 +411,7 @@ static int __init mtd_oobtest_init(void)
        /* Fourth test: try to write off end of device */
        pr_info("test 4 of 5\n");
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -577,7 +503,7 @@ static int __init mtd_oobtest_init(void)
                        errcnt += 1;
                }
 
-               err = erase_eraseblock(ebcnt - 1);
+               err = mtdtest_erase_eraseblock(mtd, ebcnt - 1);
                if (err)
                        goto out;
 
@@ -626,7 +552,7 @@ static int __init mtd_oobtest_init(void)
        pr_info("test 5 of 5\n");
 
        /* Erase all eraseblocks */
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
similarity index 63%
rename from drivers/mtd/tests/mtd_pagetest.c
rename to drivers/mtd/tests/pagetest.c
index 0c1140b6c2863b92087d1b8156895a5a5162e664..44b96e999ad4694c5ad222c357d98bc22c27c247 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/sched.h>
 #include <linux/random.h>
 
+#include "mtd_test.h"
+
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -48,52 +50,18 @@ static int pgcnt;
 static int errcnt;
 static struct rnd_state rnd_state;
 
-static int erase_eraseblock(int ebnum)
-{
-       int err;
-       struct erase_info ei;
-       loff_t addr = ebnum * mtd->erasesize;
-
-       memset(&ei, 0, sizeof(struct erase_info));
-       ei.mtd  = mtd;
-       ei.addr = addr;
-       ei.len  = mtd->erasesize;
-
-       err = mtd_erase(mtd, &ei);
-       if (err) {
-               pr_err("error %d while erasing EB %d\n", err, ebnum);
-               return err;
-       }
-
-       if (ei.state == MTD_ERASE_FAILED) {
-               pr_err("some erase error occurred at EB %d\n",
-                      ebnum);
-               return -EIO;
-       }
-
-       return 0;
-}
-
 static int write_eraseblock(int ebnum)
 {
-       int err = 0;
-       size_t written;
        loff_t addr = ebnum * mtd->erasesize;
 
        prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
        cond_resched();
-       err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
-       if (err || written != mtd->erasesize)
-               pr_err("error: write failed at %#llx\n",
-                      (long long)addr);
-
-       return err;
+       return mtdtest_write(mtd, addr, mtd->erasesize, writebuf);
 }
 
 static int verify_eraseblock(int ebnum)
 {
        uint32_t j;
-       size_t read;
        int err = 0, i;
        loff_t addr0, addrn;
        loff_t addr = ebnum * mtd->erasesize;
@@ -109,31 +77,16 @@ static int verify_eraseblock(int ebnum)
        prandom_bytes_state(&rnd_state, writebuf, mtd->erasesize);
        for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
                /* Do a read to set the internal dataRAMs to different data */
-               err = mtd_read(mtd, addr0, bufsize, &read, twopages);
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != bufsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              (long long)addr0);
+               err = mtdtest_read(mtd, addr0, bufsize, twopages);
+               if (err)
                        return err;
-               }
-               err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != bufsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              (long long)(addrn - bufsize));
+               err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+               if (err)
                        return err;
-               }
                memset(twopages, 0, bufsize);
-               err = mtd_read(mtd, addr, bufsize, &read, twopages);
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != bufsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              (long long)addr);
+               err = mtdtest_read(mtd, addr, bufsize, twopages);
+               if (err)
                        break;
-               }
                if (memcmp(twopages, writebuf + (j * pgsize), bufsize)) {
                        pr_err("error: verify failed at %#llx\n",
                               (long long)addr);
@@ -145,31 +98,16 @@ static int verify_eraseblock(int ebnum)
                struct rnd_state old_state = rnd_state;
 
                /* Do a read to set the internal dataRAMs to different data */
-               err = mtd_read(mtd, addr0, bufsize, &read, twopages);
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != bufsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              (long long)addr0);
+               err = mtdtest_read(mtd, addr0, bufsize, twopages);
+               if (err)
                        return err;
-               }
-               err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != bufsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              (long long)(addrn - bufsize));
+               err = mtdtest_read(mtd, addrn - bufsize, bufsize, twopages);
+               if (err)
                        return err;
-               }
                memset(twopages, 0, bufsize);
-               err = mtd_read(mtd, addr, bufsize, &read, twopages);
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != bufsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              (long long)addr);
+               err = mtdtest_read(mtd, addr, bufsize, twopages);
+               if (err)
                        return err;
-               }
                memcpy(boundary, writebuf + mtd->erasesize - pgsize, pgsize);
                prandom_bytes_state(&rnd_state, boundary + pgsize, pgsize);
                if (memcmp(twopages, boundary, bufsize)) {
@@ -184,17 +122,14 @@ static int verify_eraseblock(int ebnum)
 
 static int crosstest(void)
 {
-       size_t read;
        int err = 0, i;
        loff_t addr, addr0, addrn;
        unsigned char *pp1, *pp2, *pp3, *pp4;
 
        pr_info("crosstest\n");
        pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
-       if (!pp1) {
-               pr_err("error: cannot allocate memory\n");
+       if (!pp1)
                return -ENOMEM;
-       }
        pp2 = pp1 + pgsize;
        pp3 = pp2 + pgsize;
        pp4 = pp3 + pgsize;
@@ -210,24 +145,16 @@ static int crosstest(void)
 
        /* Read 2nd-to-last page to pp1 */
        addr = addrn - pgsize - pgsize;
-       err = mtd_read(mtd, addr, pgsize, &read, pp1);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != pgsize) {
-               pr_err("error: read failed at %#llx\n",
-                      (long long)addr);
+       err = mtdtest_read(mtd, addr, pgsize, pp1);
+       if (err) {
                kfree(pp1);
                return err;
        }
 
        /* Read 3rd-to-last page to pp1 */
        addr = addrn - pgsize - pgsize - pgsize;
-       err = mtd_read(mtd, addr, pgsize, &read, pp1);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != pgsize) {
-               pr_err("error: read failed at %#llx\n",
-                      (long long)addr);
+       err = mtdtest_read(mtd, addr, pgsize, pp1);
+       if (err) {
                kfree(pp1);
                return err;
        }
@@ -235,12 +162,8 @@ static int crosstest(void)
        /* Read first page to pp2 */
        addr = addr0;
        pr_info("reading page at %#llx\n", (long long)addr);
-       err = mtd_read(mtd, addr, pgsize, &read, pp2);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != pgsize) {
-               pr_err("error: read failed at %#llx\n",
-                      (long long)addr);
+       err = mtdtest_read(mtd, addr, pgsize, pp2);
+       if (err) {
                kfree(pp1);
                return err;
        }
@@ -248,12 +171,8 @@ static int crosstest(void)
        /* Read last page to pp3 */
        addr = addrn - pgsize;
        pr_info("reading page at %#llx\n", (long long)addr);
-       err = mtd_read(mtd, addr, pgsize, &read, pp3);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != pgsize) {
-               pr_err("error: read failed at %#llx\n",
-                      (long long)addr);
+       err = mtdtest_read(mtd, addr, pgsize, pp3);
+       if (err) {
                kfree(pp1);
                return err;
        }
@@ -261,12 +180,8 @@ static int crosstest(void)
        /* Read first page again to pp4 */
        addr = addr0;
        pr_info("reading page at %#llx\n", (long long)addr);
-       err = mtd_read(mtd, addr, pgsize, &read, pp4);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != pgsize) {
-               pr_err("error: read failed at %#llx\n",
-                      (long long)addr);
+       err = mtdtest_read(mtd, addr, pgsize, pp4);
+       if (err) {
                kfree(pp1);
                return err;
        }
@@ -285,7 +200,6 @@ static int crosstest(void)
 
 static int erasecrosstest(void)
 {
-       size_t read, written;
        int err = 0, i, ebnum, ebnum2;
        loff_t addr0;
        char *readbuf = twopages;
@@ -304,30 +218,22 @@ static int erasecrosstest(void)
                ebnum2 -= 1;
 
        pr_info("erasing block %d\n", ebnum);
-       err = erase_eraseblock(ebnum);
+       err = mtdtest_erase_eraseblock(mtd, ebnum);
        if (err)
                return err;
 
        pr_info("writing 1st page of block %d\n", ebnum);
        prandom_bytes_state(&rnd_state, writebuf, pgsize);
        strcpy(writebuf, "There is no data like this!");
-       err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
-       if (err || written != pgsize) {
-               pr_info("error: write failed at %#llx\n",
-                      (long long)addr0);
-               return err ? err : -1;
-       }
+       err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+       if (err)
+               return err;
 
        pr_info("reading 1st page of block %d\n", ebnum);
        memset(readbuf, 0, pgsize);
-       err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != pgsize) {
-               pr_err("error: read failed at %#llx\n",
-                      (long long)addr0);
-               return err ? err : -1;
-       }
+       err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+       if (err)
+               return err;
 
        pr_info("verifying 1st page of block %d\n", ebnum);
        if (memcmp(writebuf, readbuf, pgsize)) {
@@ -337,35 +243,27 @@ static int erasecrosstest(void)
        }
 
        pr_info("erasing block %d\n", ebnum);
-       err = erase_eraseblock(ebnum);
+       err = mtdtest_erase_eraseblock(mtd, ebnum);
        if (err)
                return err;
 
        pr_info("writing 1st page of block %d\n", ebnum);
        prandom_bytes_state(&rnd_state, writebuf, pgsize);
        strcpy(writebuf, "There is no data like this!");
-       err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
-       if (err || written != pgsize) {
-               pr_err("error: write failed at %#llx\n",
-                      (long long)addr0);
-               return err ? err : -1;
-       }
+       err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+       if (err)
+               return err;
 
        pr_info("erasing block %d\n", ebnum2);
-       err = erase_eraseblock(ebnum2);
+       err = mtdtest_erase_eraseblock(mtd, ebnum2);
        if (err)
                return err;
 
        pr_info("reading 1st page of block %d\n", ebnum);
        memset(readbuf, 0, pgsize);
-       err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != pgsize) {
-               pr_err("error: read failed at %#llx\n",
-                      (long long)addr0);
-               return err ? err : -1;
-       }
+       err = mtdtest_read(mtd, addr0, pgsize, readbuf);
+       if (err)
+               return err;
 
        pr_info("verifying 1st page of block %d\n", ebnum);
        if (memcmp(writebuf, readbuf, pgsize)) {
@@ -381,7 +279,6 @@ static int erasecrosstest(void)
 
 static int erasetest(void)
 {
-       size_t read, written;
        int err = 0, i, ebnum, ok = 1;
        loff_t addr0;
 
@@ -395,33 +292,25 @@ static int erasetest(void)
        }
 
        pr_info("erasing block %d\n", ebnum);
-       err = erase_eraseblock(ebnum);
+       err = mtdtest_erase_eraseblock(mtd, ebnum);
        if (err)
                return err;
 
        pr_info("writing 1st page of block %d\n", ebnum);
        prandom_bytes_state(&rnd_state, writebuf, pgsize);
-       err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
-       if (err || written != pgsize) {
-               pr_err("error: write failed at %#llx\n",
-                      (long long)addr0);
-               return err ? err : -1;
-       }
+       err = mtdtest_write(mtd, addr0, pgsize, writebuf);
+       if (err)
+               return err;
 
        pr_info("erasing block %d\n", ebnum);
-       err = erase_eraseblock(ebnum);
+       err = mtdtest_erase_eraseblock(mtd, ebnum);
        if (err)
                return err;
 
        pr_info("reading 1st page of block %d\n", ebnum);
-       err = mtd_read(mtd, addr0, pgsize, &read, twopages);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != pgsize) {
-               pr_err("error: read failed at %#llx\n",
-                      (long long)addr0);
-               return err ? err : -1;
-       }
+       err = mtdtest_read(mtd, addr0, pgsize, twopages);
+       if (err)
+               return err;
 
        pr_info("verifying 1st page of block %d is all 0xff\n",
               ebnum);
@@ -440,38 +329,6 @@ static int erasetest(void)
        return err;
 }
 
-static int is_block_bad(int ebnum)
-{
-       loff_t addr = ebnum * mtd->erasesize;
-       int ret;
-
-       ret = mtd_block_isbad(mtd, addr);
-       if (ret)
-               pr_info("block %d is bad\n", ebnum);
-       return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
-       int i, bad = 0;
-
-       bbt = kzalloc(ebcnt, GFP_KERNEL);
-       if (!bbt) {
-               pr_err("error: cannot allocate memory\n");
-               return -ENOMEM;
-       }
-
-       pr_info("scanning for bad eraseblocks\n");
-       for (i = 0; i < ebcnt; ++i) {
-               bbt[i] = is_block_bad(i) ? 1 : 0;
-               if (bbt[i])
-                       bad += 1;
-               cond_resched();
-       }
-       pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
-       return 0;
-}
-
 static int __init mtd_pagetest_init(void)
 {
        int err = 0;
@@ -516,36 +373,28 @@ static int __init mtd_pagetest_init(void)
        err = -ENOMEM;
        bufsize = pgsize * 2;
        writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!writebuf) {
-               pr_err("error: cannot allocate memory\n");
+       if (!writebuf)
                goto out;
-       }
        twopages = kmalloc(bufsize, GFP_KERNEL);
-       if (!twopages) {
-               pr_err("error: cannot allocate memory\n");
+       if (!twopages)
                goto out;
-       }
        boundary = kmalloc(bufsize, GFP_KERNEL);
-       if (!boundary) {
-               pr_err("error: cannot allocate memory\n");
+       if (!boundary)
                goto out;
-       }
 
-       err = scan_for_bad_eraseblocks();
+       bbt = kzalloc(ebcnt, GFP_KERNEL);
+       if (!bbt)
+               goto out;
+       err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
        /* Erase all eraseblocks */
        pr_info("erasing whole device\n");
-       for (i = 0; i < ebcnt; ++i) {
-               if (bbt[i])
-                       continue;
-               err = erase_eraseblock(i);
-               if (err)
-                       goto out;
-               cond_resched();
-       }
-       pr_info("erased %u eraseblocks\n", i);
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+       if (err)
+               goto out;
+       pr_info("erased %u eraseblocks\n", ebcnt);
 
        /* Write all eraseblocks */
        prandom_seed_state(&rnd_state, 1);
similarity index 83%
rename from drivers/mtd/tests/mtd_readtest.c
rename to drivers/mtd/tests/readtest.c
index 266de04b6d29ef1666ab54ea05250411536ed0ef..626e66d0f7e7a081271cae6c88f608edc8be8727 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 
+#include "mtd_test.h"
+
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -44,7 +46,6 @@ static int pgcnt;
 
 static int read_eraseblock_by_page(int ebnum)
 {
-       size_t read;
        int i, ret, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
@@ -52,16 +53,10 @@ static int read_eraseblock_by_page(int ebnum)
 
        for (i = 0; i < pgcnt; i++) {
                memset(buf, 0 , pgsize);
-               ret = mtd_read(mtd, addr, pgsize, &read, buf);
-               if (ret == -EUCLEAN)
-                       ret = 0;
-               if (ret || read != pgsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              (long long)addr);
+               ret = mtdtest_read(mtd, addr, pgsize, buf);
+               if (ret) {
                        if (!err)
                                err = ret;
-                       if (!err)
-                               err = -EINVAL;
                }
                if (mtd->oobsize) {
                        struct mtd_oob_ops ops;
@@ -127,41 +122,6 @@ static void dump_eraseblock(int ebnum)
                }
 }
 
-static int is_block_bad(int ebnum)
-{
-       loff_t addr = ebnum * mtd->erasesize;
-       int ret;
-
-       ret = mtd_block_isbad(mtd, addr);
-       if (ret)
-               pr_info("block %d is bad\n", ebnum);
-       return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
-       int i, bad = 0;
-
-       bbt = kzalloc(ebcnt, GFP_KERNEL);
-       if (!bbt) {
-               pr_err("error: cannot allocate memory\n");
-               return -ENOMEM;
-       }
-
-       if (!mtd_can_have_bb(mtd))
-               return 0;
-
-       pr_info("scanning for bad eraseblocks\n");
-       for (i = 0; i < ebcnt; ++i) {
-               bbt[i] = is_block_bad(i) ? 1 : 0;
-               if (bbt[i])
-                       bad += 1;
-               cond_resched();
-       }
-       pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
-       return 0;
-}
-
 static int __init mtd_readtest_init(void)
 {
        uint64_t tmp;
@@ -204,17 +164,16 @@ static int __init mtd_readtest_init(void)
 
        err = -ENOMEM;
        iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!iobuf) {
-               pr_err("error: cannot allocate memory\n");
+       if (!iobuf)
                goto out;
-       }
        iobuf1 = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!iobuf1) {
-               pr_err("error: cannot allocate memory\n");
+       if (!iobuf1)
                goto out;
-       }
 
-       err = scan_for_bad_eraseblocks();
+       bbt = kzalloc(ebcnt, GFP_KERNEL);
+       if (!bbt)
+               goto out;
+       err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
similarity index 69%
rename from drivers/mtd/tests/mtd_speedtest.c
rename to drivers/mtd/tests/speedtest.c
index a6ce9c1fa6c56c2cfcbc345e7fd53d323b7b7cbf..87ff6a29f84ee94bacc657aaf6f52700aa45074d 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/sched.h>
 #include <linux/random.h>
 
+#include "mtd_test.h"
+
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -49,33 +51,6 @@ static int pgcnt;
 static int goodebcnt;
 static struct timeval start, finish;
 
-
-static int erase_eraseblock(int ebnum)
-{
-       int err;
-       struct erase_info ei;
-       loff_t addr = ebnum * mtd->erasesize;
-
-       memset(&ei, 0, sizeof(struct erase_info));
-       ei.mtd  = mtd;
-       ei.addr = addr;
-       ei.len  = mtd->erasesize;
-
-       err = mtd_erase(mtd, &ei);
-       if (err) {
-               pr_err("error %d while erasing EB %d\n", err, ebnum);
-               return err;
-       }
-
-       if (ei.state == MTD_ERASE_FAILED) {
-               pr_err("some erase error occurred at EB %d\n",
-                      ebnum);
-               return -EIO;
-       }
-
-       return 0;
-}
-
 static int multiblock_erase(int ebnum, int blocks)
 {
        int err;
@@ -103,54 +78,23 @@ static int multiblock_erase(int ebnum, int blocks)
        return 0;
 }
 
-static int erase_whole_device(void)
-{
-       int err;
-       unsigned int i;
-
-       for (i = 0; i < ebcnt; ++i) {
-               if (bbt[i])
-                       continue;
-               err = erase_eraseblock(i);
-               if (err)
-                       return err;
-               cond_resched();
-       }
-       return 0;
-}
-
 static int write_eraseblock(int ebnum)
 {
-       size_t written;
-       int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
-       err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
-       if (err || written != mtd->erasesize) {
-               pr_err("error: write failed at %#llx\n", addr);
-               if (!err)
-                       err = -EINVAL;
-       }
-
-       return err;
+       return mtdtest_write(mtd, addr, mtd->erasesize, iobuf);
 }
 
 static int write_eraseblock_by_page(int ebnum)
 {
-       size_t written;
        int i, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
 
        for (i = 0; i < pgcnt; i++) {
-               err = mtd_write(mtd, addr, pgsize, &written, buf);
-               if (err || written != pgsize) {
-                       pr_err("error: write failed at %#llx\n",
-                              addr);
-                       if (!err)
-                               err = -EINVAL;
+               err = mtdtest_write(mtd, addr, pgsize, buf);
+               if (err)
                        break;
-               }
                addr += pgsize;
                buf += pgsize;
        }
@@ -160,74 +104,41 @@ static int write_eraseblock_by_page(int ebnum)
 
 static int write_eraseblock_by_2pages(int ebnum)
 {
-       size_t written, sz = pgsize * 2;
+       size_t sz = pgsize * 2;
        int i, n = pgcnt / 2, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
 
        for (i = 0; i < n; i++) {
-               err = mtd_write(mtd, addr, sz, &written, buf);
-               if (err || written != sz) {
-                       pr_err("error: write failed at %#llx\n",
-                              addr);
-                       if (!err)
-                               err = -EINVAL;
+               err = mtdtest_write(mtd, addr, sz, buf);
+               if (err)
                        return err;
-               }
                addr += sz;
                buf += sz;
        }
-       if (pgcnt % 2) {
-               err = mtd_write(mtd, addr, pgsize, &written, buf);
-               if (err || written != pgsize) {
-                       pr_err("error: write failed at %#llx\n",
-                              addr);
-                       if (!err)
-                               err = -EINVAL;
-               }
-       }
+       if (pgcnt % 2)
+               err = mtdtest_write(mtd, addr, pgsize, buf);
 
        return err;
 }
 
 static int read_eraseblock(int ebnum)
 {
-       size_t read;
-       int err = 0;
        loff_t addr = ebnum * mtd->erasesize;
 
-       err = mtd_read(mtd, addr, mtd->erasesize, &read, iobuf);
-       /* Ignore corrected ECC errors */
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (err || read != mtd->erasesize) {
-               pr_err("error: read failed at %#llx\n", addr);
-               if (!err)
-                       err = -EINVAL;
-       }
-
-       return err;
+       return mtdtest_read(mtd, addr, mtd->erasesize, iobuf);
 }
 
 static int read_eraseblock_by_page(int ebnum)
 {
-       size_t read;
        int i, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
 
        for (i = 0; i < pgcnt; i++) {
-               err = mtd_read(mtd, addr, pgsize, &read, buf);
-               /* Ignore corrected ECC errors */
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != pgsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              addr);
-                       if (!err)
-                               err = -EINVAL;
+               err = mtdtest_read(mtd, addr, pgsize, buf);
+               if (err)
                        break;
-               }
                addr += pgsize;
                buf += pgsize;
        }
@@ -237,53 +148,24 @@ static int read_eraseblock_by_page(int ebnum)
 
 static int read_eraseblock_by_2pages(int ebnum)
 {
-       size_t read, sz = pgsize * 2;
+       size_t sz = pgsize * 2;
        int i, n = pgcnt / 2, err = 0;
        loff_t addr = ebnum * mtd->erasesize;
        void *buf = iobuf;
 
        for (i = 0; i < n; i++) {
-               err = mtd_read(mtd, addr, sz, &read, buf);
-               /* Ignore corrected ECC errors */
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != sz) {
-                       pr_err("error: read failed at %#llx\n",
-                              addr);
-                       if (!err)
-                               err = -EINVAL;
+               err = mtdtest_read(mtd, addr, sz, buf);
+               if (err)
                        return err;
-               }
                addr += sz;
                buf += sz;
        }
-       if (pgcnt % 2) {
-               err = mtd_read(mtd, addr, pgsize, &read, buf);
-               /* Ignore corrected ECC errors */
-               if (mtd_is_bitflip(err))
-                       err = 0;
-               if (err || read != pgsize) {
-                       pr_err("error: read failed at %#llx\n",
-                              addr);
-                       if (!err)
-                               err = -EINVAL;
-               }
-       }
+       if (pgcnt % 2)
+               err = mtdtest_read(mtd, addr, pgsize, buf);
 
        return err;
 }
 
-static int is_block_bad(int ebnum)
-{
-       loff_t addr = ebnum * mtd->erasesize;
-       int ret;
-
-       ret = mtd_block_isbad(mtd, addr);
-       if (ret)
-               pr_info("block %d is bad\n", ebnum);
-       return ret;
-}
-
 static inline void start_timing(void)
 {
        do_gettimeofday(&start);
@@ -308,32 +190,6 @@ static long calc_speed(void)
        return k;
 }
 
-static int scan_for_bad_eraseblocks(void)
-{
-       int i, bad = 0;
-
-       bbt = kzalloc(ebcnt, GFP_KERNEL);
-       if (!bbt) {
-               pr_err("error: cannot allocate memory\n");
-               return -ENOMEM;
-       }
-
-       if (!mtd_can_have_bb(mtd))
-               goto out;
-
-       pr_info("scanning for bad eraseblocks\n");
-       for (i = 0; i < ebcnt; ++i) {
-               bbt[i] = is_block_bad(i) ? 1 : 0;
-               if (bbt[i])
-                       bad += 1;
-               cond_resched();
-       }
-       pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
-out:
-       goodebcnt = ebcnt - bad;
-       return 0;
-}
-
 static int __init mtd_speedtest_init(void)
 {
        int err, i, blocks, j, k;
@@ -384,18 +240,23 @@ static int __init mtd_speedtest_init(void)
 
        err = -ENOMEM;
        iobuf = kmalloc(mtd->erasesize, GFP_KERNEL);
-       if (!iobuf) {
-               pr_err("error: cannot allocate memory\n");
+       if (!iobuf)
                goto out;
-       }
 
        prandom_bytes(iobuf, mtd->erasesize);
 
-       err = scan_for_bad_eraseblocks();
+       bbt = kzalloc(ebcnt, GFP_KERNEL);
+       if (!bbt)
+               goto out;
+       err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
+       for (i = 0; i < ebcnt; i++) {
+               if (!bbt[i])
+                       goodebcnt++;
+       }
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -429,7 +290,7 @@ static int __init mtd_speedtest_init(void)
        speed = calc_speed();
        pr_info("eraseblock read speed is %ld KiB/s\n", speed);
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -463,7 +324,7 @@ static int __init mtd_speedtest_init(void)
        speed = calc_speed();
        pr_info("page read speed is %ld KiB/s\n", speed);
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -500,14 +361,9 @@ static int __init mtd_speedtest_init(void)
        /* Erase all eraseblocks */
        pr_info("Testing erase speed\n");
        start_timing();
-       for (i = 0; i < ebcnt; ++i) {
-               if (bbt[i])
-                       continue;
-               err = erase_eraseblock(i);
-               if (err)
-                       goto out;
-               cond_resched();
-       }
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
+       if (err)
+               goto out;
        stop_timing();
        speed = calc_speed();
        pr_info("erase speed is %ld KiB/s\n", speed);
similarity index 74%
rename from drivers/mtd/tests/mtd_stresstest.c
rename to drivers/mtd/tests/stresstest.c
index 787f539d16ca436099826f0626bb4c57efe6da6d..c9d42cc2df1b5303804217f7621af9539cffc28f 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/vmalloc.h>
 #include <linux/random.h>
 
+#include "mtd_test.h"
+
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -81,49 +83,11 @@ static int rand_len(int offs)
        return len;
 }
 
-static int erase_eraseblock(int ebnum)
-{
-       int err;
-       struct erase_info ei;
-       loff_t addr = ebnum * mtd->erasesize;
-
-       memset(&ei, 0, sizeof(struct erase_info));
-       ei.mtd  = mtd;
-       ei.addr = addr;
-       ei.len  = mtd->erasesize;
-
-       err = mtd_erase(mtd, &ei);
-       if (unlikely(err)) {
-               pr_err("error %d while erasing EB %d\n", err, ebnum);
-               return err;
-       }
-
-       if (unlikely(ei.state == MTD_ERASE_FAILED)) {
-               pr_err("some erase error occurred at EB %d\n",
-                      ebnum);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-static int is_block_bad(int ebnum)
-{
-       loff_t addr = ebnum * mtd->erasesize;
-       int ret;
-
-       ret = mtd_block_isbad(mtd, addr);
-       if (ret)
-               pr_info("block %d is bad\n", ebnum);
-       return ret;
-}
-
 static int do_read(void)
 {
-       size_t read;
        int eb = rand_eb();
        int offs = rand_offs();
-       int len = rand_len(offs), err;
+       int len = rand_len(offs);
        loff_t addr;
 
        if (bbt[eb + 1]) {
@@ -133,28 +97,17 @@ static int do_read(void)
                        len = mtd->erasesize - offs;
        }
        addr = eb * mtd->erasesize + offs;
-       err = mtd_read(mtd, addr, len, &read, readbuf);
-       if (mtd_is_bitflip(err))
-               err = 0;
-       if (unlikely(err || read != len)) {
-               pr_err("error: read failed at 0x%llx\n",
-                      (long long)addr);
-               if (!err)
-                       err = -EINVAL;
-               return err;
-       }
-       return 0;
+       return mtdtest_read(mtd, addr, len, readbuf);
 }
 
 static int do_write(void)
 {
        int eb = rand_eb(), offs, err, len;
-       size_t written;
        loff_t addr;
 
        offs = offsets[eb];
        if (offs >= mtd->erasesize) {
-               err = erase_eraseblock(eb);
+               err = mtdtest_erase_eraseblock(mtd, eb);
                if (err)
                        return err;
                offs = offsets[eb] = 0;
@@ -165,21 +118,16 @@ static int do_write(void)
                if (bbt[eb + 1])
                        len = mtd->erasesize - offs;
                else {
-                       err = erase_eraseblock(eb + 1);
+                       err = mtdtest_erase_eraseblock(mtd, eb + 1);
                        if (err)
                                return err;
                        offsets[eb + 1] = 0;
                }
        }
        addr = eb * mtd->erasesize + offs;
-       err = mtd_write(mtd, addr, len, &written, writebuf);
-       if (unlikely(err || written != len)) {
-               pr_err("error: write failed at 0x%llx\n",
-                      (long long)addr);
-               if (!err)
-                       err = -EINVAL;
+       err = mtdtest_write(mtd, addr, len, writebuf);
+       if (unlikely(err))
                return err;
-       }
        offs += len;
        while (offs > mtd->erasesize) {
                offsets[eb++] = mtd->erasesize;
@@ -197,30 +145,6 @@ static int do_operation(void)
                return do_write();
 }
 
-static int scan_for_bad_eraseblocks(void)
-{
-       int i, bad = 0;
-
-       bbt = kzalloc(ebcnt, GFP_KERNEL);
-       if (!bbt) {
-               pr_err("error: cannot allocate memory\n");
-               return -ENOMEM;
-       }
-
-       if (!mtd_can_have_bb(mtd))
-               return 0;
-
-       pr_info("scanning for bad eraseblocks\n");
-       for (i = 0; i < ebcnt; ++i) {
-               bbt[i] = is_block_bad(i) ? 1 : 0;
-               if (bbt[i])
-                       bad += 1;
-               cond_resched();
-       }
-       pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
-       return 0;
-}
-
 static int __init mtd_stresstest_init(void)
 {
        int err;
@@ -276,15 +200,16 @@ static int __init mtd_stresstest_init(void)
        readbuf = vmalloc(bufsize);
        writebuf = vmalloc(bufsize);
        offsets = kmalloc(ebcnt * sizeof(int), GFP_KERNEL);
-       if (!readbuf || !writebuf || !offsets) {
-               pr_err("error: cannot allocate memory\n");
+       if (!readbuf || !writebuf || !offsets)
                goto out;
-       }
        for (i = 0; i < ebcnt; i++)
                offsets[i] = mtd->erasesize;
        prandom_bytes(writebuf, bufsize);
 
-       err = scan_for_bad_eraseblocks();
+       bbt = kzalloc(ebcnt, GFP_KERNEL);
+       if (!bbt)
+               goto out;
+       err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
similarity index 86%
rename from drivers/mtd/tests/mtd_subpagetest.c
rename to drivers/mtd/tests/subpagetest.c
index aade56f2794541f9d0545be12a0110f9ffe18ea1..e2c0adf24cfc35aa6692ff5267b93babc523c69c 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/sched.h>
 #include <linux/random.h>
 
+#include "mtd_test.h"
+
 static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
@@ -51,50 +53,6 @@ static inline void clear_data(unsigned char *buf, size_t len)
        memset(buf, 0, len);
 }
 
-static int erase_eraseblock(int ebnum)
-{
-       int err;
-       struct erase_info ei;
-       loff_t addr = ebnum * mtd->erasesize;
-
-       memset(&ei, 0, sizeof(struct erase_info));
-       ei.mtd  = mtd;
-       ei.addr = addr;
-       ei.len  = mtd->erasesize;
-
-       err = mtd_erase(mtd, &ei);
-       if (err) {
-               pr_err("error %d while erasing EB %d\n", err, ebnum);
-               return err;
-       }
-
-       if (ei.state == MTD_ERASE_FAILED) {
-               pr_err("some erase error occurred at EB %d\n",
-                      ebnum);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-static int erase_whole_device(void)
-{
-       int err;
-       unsigned int i;
-
-       pr_info("erasing whole device\n");
-       for (i = 0; i < ebcnt; ++i) {
-               if (bbt[i])
-                       continue;
-               err = erase_eraseblock(i);
-               if (err)
-                       return err;
-               cond_resched();
-       }
-       pr_info("erased %u eraseblocks\n", i);
-       return 0;
-}
-
 static int write_eraseblock(int ebnum)
 {
        size_t written;
@@ -317,38 +275,6 @@ static int verify_all_eraseblocks_ff(void)
        return 0;
 }
 
-static int is_block_bad(int ebnum)
-{
-       loff_t addr = ebnum * mtd->erasesize;
-       int ret;
-
-       ret = mtd_block_isbad(mtd, addr);
-       if (ret)
-               pr_info("block %d is bad\n", ebnum);
-       return ret;
-}
-
-static int scan_for_bad_eraseblocks(void)
-{
-       int i, bad = 0;
-
-       bbt = kzalloc(ebcnt, GFP_KERNEL);
-       if (!bbt) {
-               pr_err("error: cannot allocate memory\n");
-               return -ENOMEM;
-       }
-
-       pr_info("scanning for bad eraseblocks\n");
-       for (i = 0; i < ebcnt; ++i) {
-               bbt[i] = is_block_bad(i) ? 1 : 0;
-               if (bbt[i])
-                       bad += 1;
-               cond_resched();
-       }
-       pr_info("scanned %d eraseblocks, %d are bad\n", i, bad);
-       return 0;
-}
-
 static int __init mtd_subpagetest_init(void)
 {
        int err = 0;
@@ -393,21 +319,20 @@ static int __init mtd_subpagetest_init(void)
        err = -ENOMEM;
        bufsize = subpgsize * 32;
        writebuf = kmalloc(bufsize, GFP_KERNEL);
-       if (!writebuf) {
-               pr_info("error: cannot allocate memory\n");
+       if (!writebuf)
                goto out;
-       }
        readbuf = kmalloc(bufsize, GFP_KERNEL);
-       if (!readbuf) {
-               pr_info("error: cannot allocate memory\n");
+       if (!readbuf)
+               goto out;
+       bbt = kzalloc(ebcnt, GFP_KERNEL);
+       if (!bbt)
                goto out;
-       }
 
-       err = scan_for_bad_eraseblocks();
+       err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -439,7 +364,7 @@ static int __init mtd_subpagetest_init(void)
        }
        pr_info("verified %u eraseblocks\n", i);
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
@@ -477,7 +402,7 @@ static int __init mtd_subpagetest_init(void)
        }
        pr_info("verified %u eraseblocks\n", i);
 
-       err = erase_whole_device();
+       err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
        if (err)
                goto out;
 
similarity index 90%
rename from drivers/mtd/tests/mtd_torturetest.c
rename to drivers/mtd/tests/torturetest.c
index 3a9f6a6a79f99d4c5662ab3fc90051a23e2ffbed..eeab96973cf07e6dcab864c5ee4e23eff0f0a32c 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include "mtd_test.h"
 
 #define RETRIES 3
 
@@ -92,35 +93,6 @@ static inline void stop_timing(void)
        do_gettimeofday(&finish);
 }
 
-/*
- * Erase eraseblock number @ebnum.
- */
-static inline int erase_eraseblock(int ebnum)
-{
-       int err;
-       struct erase_info ei;
-       loff_t addr = ebnum * mtd->erasesize;
-
-       memset(&ei, 0, sizeof(struct erase_info));
-       ei.mtd  = mtd;
-       ei.addr = addr;
-       ei.len  = mtd->erasesize;
-
-       err = mtd_erase(mtd, &ei);
-       if (err) {
-               pr_err("error %d while erasing EB %d\n", err, ebnum);
-               return err;
-       }
-
-       if (ei.state == MTD_ERASE_FAILED) {
-               pr_err("some erase error occurred at EB %d\n",
-                      ebnum);
-               return -EIO;
-       }
-
-       return 0;
-}
-
 /*
  * Check that the contents of eraseblock number @enbum is equivalent to the
  * @buf buffer.
@@ -208,7 +180,7 @@ static inline int write_pattern(int ebnum, void *buf)
 static int __init tort_init(void)
 {
        int err = 0, i, infinite = !cycles_count;
-       int *bad_ebs;
+       unsigned char *bad_ebs;
 
        printk(KERN_INFO "\n");
        printk(KERN_INFO "=================================================\n");
@@ -265,7 +237,7 @@ static int __init tort_init(void)
        if (!check_buf)
                goto out_patt_FF;
 
-       bad_ebs = kcalloc(ebcnt, sizeof(*bad_ebs), GFP_KERNEL);
+       bad_ebs = kzalloc(ebcnt, GFP_KERNEL);
        if (!bad_ebs)
                goto out_check_buf;
 
@@ -283,40 +255,16 @@ static int __init tort_init(void)
                }
        }
 
-       /*
-        * Check if there is a bad eraseblock among those we are going to test.
-        */
-       if (mtd_can_have_bb(mtd)) {
-               for (i = eb; i < eb + ebcnt; i++) {
-                       err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
-
-                       if (err < 0) {
-                               pr_info("block_isbad() returned %d "
-                                      "for EB %d\n", err, i);
-                               goto out;
-                       }
-
-                       if (err) {
-                               pr_err("EB %d is bad. Skip it.\n", i);
-                               bad_ebs[i - eb] = 1;
-                       }
-               }
-       }
+       err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+       if (err)
+               goto out;
 
        start_timing();
        while (1) {
                int i;
                void *patt;
 
-               /* Erase all eraseblocks */
-               for (i = eb; i < eb + ebcnt; i++) {
-                       if (bad_ebs[i - eb])
-                               continue;
-                       err = erase_eraseblock(i);
-                       if (err)
-                               goto out;
-                       cond_resched();
-               }
+               mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
 
                /* Check if the eraseblocks contain only 0xFF bytes */
                if (check) {
index 154275182b4b22d8943e4be1ba6d5eb119694e3a..f5aa4b02cfa676f3270dc9b9a87bd87ef031893d 100644 (file)
@@ -1343,7 +1343,7 @@ out:
 static int invalidate_fastmap(struct ubi_device *ubi,
                              struct ubi_fastmap_layout *fm)
 {
-       int ret, i;
+       int ret;
        struct ubi_vid_hdr *vh;
 
        ret = erase_block(ubi, fm->e[0]->pnum);
@@ -1360,9 +1360,6 @@ static int invalidate_fastmap(struct ubi_device *ubi,
        vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
        ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
 
-       for (i = 0; i < fm->used_blocks; i++)
-               ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
-
        return ret;
 }
 
index 5df49d3cb5c7c05e7644bd9e534950b070336d5b..c95bfb183c62b185f2f6cc17b5d84245bdd2d13d 100644 (file)
@@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
                        dbg_wl("no WL needed: min used EC %d, max free EC %d",
                               e1->ec, e2->ec);
+
+                       /* Give the unused PEB back */
+                       wl_tree_add(e2, &ubi->free);
                        goto out_cancel;
                }
                self_check_in_wl_tree(ubi, e1, &ubi->used);
index 390061d096934f83c04171a1a8a62bf9d8345ec5..90102652c82a9651a09a49741453fd76dbeb47a4 100644 (file)
@@ -143,10 +143,9 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
  */
 static inline struct port *__get_first_port(struct bonding *bond)
 {
-       if (bond->slave_cnt == 0)
-               return NULL;
+       struct slave *first_slave = bond_first_slave(bond);
 
-       return &(SLAVE_AD_INFO(bond->first_slave).port);
+       return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
 }
 
 /**
@@ -159,13 +158,16 @@ static inline struct port *__get_first_port(struct bonding *bond)
 static inline struct port *__get_next_port(struct port *port)
 {
        struct bonding *bond = __get_bond_by_port(port);
-       struct slave *slave = port->slave;
+       struct slave *slave = port->slave, *slave_next;
 
        // If there's no bond for this port, or this is the last slave
-       if ((bond == NULL) || (slave->next == bond->first_slave))
+       if (bond == NULL)
+               return NULL;
+       slave_next = bond_next_slave(bond, slave);
+       if (!slave_next || bond_is_first_slave(bond, slave_next))
                return NULL;
 
-       return &(SLAVE_AD_INFO(slave->next).port);
+       return &(SLAVE_AD_INFO(slave_next).port);
 }
 
 /**
@@ -178,12 +180,14 @@ static inline struct port *__get_next_port(struct port *port)
 static inline struct aggregator *__get_first_agg(struct port *port)
 {
        struct bonding *bond = __get_bond_by_port(port);
+       struct slave *first_slave;
 
        // If there's no bond for this port, or bond has no slaves
-       if ((bond == NULL) || (bond->slave_cnt == 0))
+       if (bond == NULL)
                return NULL;
+       first_slave = bond_first_slave(bond);
 
-       return &(SLAVE_AD_INFO(bond->first_slave).aggregator);
+       return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
 }
 
 /**
@@ -195,14 +199,17 @@ static inline struct aggregator *__get_first_agg(struct port *port)
  */
 static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
 {
-       struct slave *slave = aggregator->slave;
+       struct slave *slave = aggregator->slave, *slave_next;
        struct bonding *bond = bond_get_bond_by_slave(slave);
 
        // If there's no bond for this aggregator, or this is the last slave
-       if ((bond == NULL) || (slave->next == bond->first_slave))
+       if (bond == NULL)
+               return NULL;
+       slave_next = bond_next_slave(bond, slave);
+       if (!slave_next || bond_is_first_slave(bond, slave_next))
                return NULL;
 
-       return &(SLAVE_AD_INFO(slave->next).aggregator);
+       return &(SLAVE_AD_INFO(slave_next).aggregator);
 }
 
 /*
@@ -2110,7 +2117,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        read_lock(&bond->lock);
 
        //check if there are any slaves
-       if (bond->slave_cnt == 0)
+       if (list_empty(&bond->slave_list))
                goto re_arm;
 
        // check if agg_select_timer timer after initialize is timed out
@@ -2336,8 +2343,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 int bond_3ad_set_carrier(struct bonding *bond)
 {
        struct aggregator *active;
+       struct slave *first_slave;
 
-       active = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator));
+       first_slave = bond_first_slave(bond);
+       if (!first_slave)
+               return 0;
+       active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
        if (active) {
                /* are enough slaves available to consider link up? */
                if (active->num_of_ports < bond->params.min_links) {
@@ -2415,6 +2426,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        struct ad_info ad_info;
        int res = 1;
 
+       read_lock(&bond->lock);
        if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
                pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
                         dev->name);
@@ -2432,7 +2444,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 
        slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
 
                if (agg && (agg->aggregator_identifier == agg_id)) {
@@ -2464,6 +2476,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        }
 
 out:
+       read_unlock(&bond->lock);
        if (res) {
                /* no suitable interface, frame not sent */
                kfree_skb(skb);
@@ -2501,7 +2514,6 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
  */
 void bond_3ad_update_lacp_rate(struct bonding *bond)
 {
-       int i;
        struct slave *slave;
        struct port *port = NULL;
        int lacp_fast;
@@ -2509,7 +2521,7 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
        write_lock_bh(&bond->lock);
        lacp_fast = bond->params.lacp_fast;
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                port = &(SLAVE_AD_INFO(slave).port);
                if (port->slave == NULL)
                        continue;
index 4ea8ed150d469d55c741d63c09fa4fc7f7a29fa5..3a5db7b1df6845829c686bd8cece52e0467190e5 100644 (file)
@@ -224,13 +224,12 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 {
        struct slave *slave, *least_loaded;
        long long max_gap;
-       int i;
 
        least_loaded = NULL;
        max_gap = LLONG_MIN;
 
        /* Find the slave with the largest gap */
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (SLAVE_IS_OK(slave)) {
                        long long gap = compute_gap(slave);
 
@@ -386,11 +385,10 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
        struct slave *rx_slave, *slave, *start_at;
        int i = 0;
 
-       if (bond_info->next_rx_slave) {
+       if (bond_info->next_rx_slave)
                start_at = bond_info->next_rx_slave;
-       } else {
-               start_at = bond->first_slave;
-       }
+       else
+               start_at = bond_first_slave(bond);
 
        rx_slave = NULL;
 
@@ -405,7 +403,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
        }
 
        if (rx_slave) {
-               bond_info->next_rx_slave = rx_slave->next;
+               slave = bond_next_slave(bond, rx_slave);
+               bond_info->next_rx_slave = slave;
        }
 
        return rx_slave;
@@ -1173,9 +1172,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
 {
        struct slave *tmp_slave1, *free_mac_slave = NULL;
        struct slave *has_bond_addr = bond->curr_active_slave;
-       int i;
 
-       if (bond->slave_cnt == 0) {
+       if (list_empty(&bond->slave_list)) {
                /* this is the first slave */
                return 0;
        }
@@ -1196,7 +1194,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        /* The slave's address is equal to the address of the bond.
         * Search for a spare address in the bond for this slave.
         */
-       bond_for_each_slave(bond, tmp_slave1, i) {
+       bond_for_each_slave(bond, tmp_slave1) {
                if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
                        /* no slave has tmp_slave1's perm addr
                         * as its curr addr
@@ -1246,17 +1244,15 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
  */
 static int alb_set_mac_address(struct bonding *bond, void *addr)
 {
-       struct sockaddr sa;
-       struct slave *slave, *stop_at;
        char tmp_addr[ETH_ALEN];
+       struct slave *slave;
+       struct sockaddr sa;
        int res;
-       int i;
 
-       if (bond->alb_info.rlb_enabled) {
+       if (bond->alb_info.rlb_enabled)
                return 0;
-       }
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                /* save net_device's current hw address */
                memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
 
@@ -1276,8 +1272,7 @@ unwind:
        sa.sa_family = bond->dev->type;
 
        /* unwind from head to the slave that failed */
-       stop_at = slave;
-       bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+       bond_for_each_slave_continue_reverse(bond, slave) {
                memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
                dev_set_mac_address(slave->dev, &sa);
                memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
@@ -1342,6 +1337,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 
        /* make sure that the curr_active_slave do not change during tx
         */
+       read_lock(&bond->lock);
        read_lock(&bond->curr_slave_lock);
 
        switch (ntohs(skb->protocol)) {
@@ -1446,11 +1442,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        }
 
        read_unlock(&bond->curr_slave_lock);
-
+       read_unlock(&bond->lock);
        if (res) {
                /* no suitable interface, frame not sent */
                kfree_skb(skb);
        }
+
        return NETDEV_TX_OK;
 }
 
@@ -1460,11 +1457,10 @@ void bond_alb_monitor(struct work_struct *work)
                                            alb_work.work);
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
        struct slave *slave;
-       int i;
 
        read_lock(&bond->lock);
 
-       if (bond->slave_cnt == 0) {
+       if (list_empty(&bond->slave_list)) {
                bond_info->tx_rebalance_counter = 0;
                bond_info->lp_counter = 0;
                goto re_arm;
@@ -1482,9 +1478,8 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave(bond, slave, i) {
+               bond_for_each_slave(bond, slave)
                        alb_send_learning_packets(slave, slave->dev->dev_addr);
-               }
 
                read_unlock(&bond->curr_slave_lock);
 
@@ -1496,7 +1491,7 @@ void bond_alb_monitor(struct work_struct *work)
 
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave(bond, slave, i) {
+               bond_for_each_slave(bond, slave) {
                        tlb_clear_slave(bond, slave, 1);
                        if (slave == bond->curr_active_slave) {
                                SLAVE_TLB_INFO(slave).load =
@@ -1602,9 +1597,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
  */
 void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
 {
-       if (bond->slave_cnt > 1) {
+       if (!list_empty(&bond->slave_list))
                alb_change_hw_addr_on_detach(bond, slave);
-       }
 
        tlb_clear_slave(bond, slave, 0);
 
@@ -1661,9 +1655,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
 {
        struct slave *swap_slave;
 
-       if (bond->curr_active_slave == new_slave) {
+       if (bond->curr_active_slave == new_slave)
                return;
-       }
 
        if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
                dev_set_promiscuity(bond->curr_active_slave->dev, -1);
@@ -1672,11 +1665,10 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        }
 
        swap_slave = bond->curr_active_slave;
-       bond->curr_active_slave = new_slave;
+       rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
-       if (!new_slave || (bond->slave_cnt == 0)) {
+       if (!new_slave || list_empty(&bond->slave_list))
                return;
-       }
 
        /* set the new curr_active_slave to the bonds mac address
         * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
@@ -1689,9 +1681,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
         * ignored so we can mess with their MAC addresses without
         * fear of interference from transmit activity.
         */
-       if (swap_slave) {
+       if (swap_slave)
                tlb_clear_slave(bond, swap_slave, 1);
-       }
        tlb_clear_slave(bond, new_slave, 1);
 
        write_unlock_bh(&bond->curr_slave_lock);
index e48cb339c0c6e5b71eb4ec24bef6d4f8238ea665..4264a7631cbab0ca581e2cb35235300655f79bd2 100644 (file)
@@ -77,6 +77,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/pkt_sched.h>
+#include <linux/rculist.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -106,7 +107,7 @@ static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
 static char *arp_validate;
 static char *arp_all_targets;
 static char *fail_over_mac;
-static int all_slaves_active = 0;
+static int all_slaves_active;
 static struct bond_params bonding_defaults;
 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
 
@@ -273,7 +274,7 @@ const char *bond_mode_name(int mode)
                [BOND_MODE_ALB] = "adaptive load balancing",
        };
 
-       if (mode < 0 || mode > BOND_MODE_ALB)
+       if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
                return "unknown";
 
        return names[mode];
@@ -441,10 +442,10 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
                                __be16 proto, u16 vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave, *stop_at;
-       int i, res;
+       struct slave *slave;
+       int res;
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                res = vlan_vid_add(slave->dev, proto, vid);
                if (res)
                        goto unwind;
@@ -454,15 +455,14 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
        if (res) {
                pr_err("%s: Error: Failed to add vlan id %d\n",
                       bond_dev->name, vid);
-               return res;
+               goto unwind;
        }
 
        return 0;
 
 unwind:
-       /* unwind from head to the slave that failed */
-       stop_at = slave;
-       bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+       /* unwind from the slave that failed */
+       bond_for_each_slave_continue_reverse(bond, slave)
                vlan_vid_del(slave->dev, proto, vid);
 
        return res;
@@ -478,9 +478,9 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
-       int i, res;
+       int res;
 
-       bond_for_each_slave(bond, slave, i)
+       bond_for_each_slave(bond, slave)
                vlan_vid_del(slave->dev, proto, vid);
 
        res = bond_del_vlan(bond, vid);
@@ -493,33 +493,6 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
        return 0;
 }
 
-static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
-{
-       struct vlan_entry *vlan;
-       int res;
-
-       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-               res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
-                                  vlan->vlan_id);
-               if (res)
-                       pr_warning("%s: Failed to add vlan id %d to device %s\n",
-                                  bond->dev->name, vlan->vlan_id,
-                                  slave_dev->name);
-       }
-}
-
-static void bond_del_vlans_from_slave(struct bonding *bond,
-                                     struct net_device *slave_dev)
-{
-       struct vlan_entry *vlan;
-
-       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-               if (!vlan->vlan_id)
-                       continue;
-               vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
-       }
-}
-
 /*------------------------------- Link status -------------------------------*/
 
 /*
@@ -532,15 +505,14 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
 static int bond_set_carrier(struct bonding *bond)
 {
        struct slave *slave;
-       int i;
 
-       if (bond->slave_cnt == 0)
+       if (list_empty(&bond->slave_list))
                goto down;
 
        if (bond->params.mode == BOND_MODE_8023AD)
                return bond_3ad_set_carrier(bond);
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (slave->link == BOND_LINK_UP) {
                        if (!netif_carrier_ok(bond->dev)) {
                                netif_carrier_on(bond->dev);
@@ -681,8 +653,8 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
                }
        } else {
                struct slave *slave;
-               int i;
-               bond_for_each_slave(bond, slave, i) {
+
+               bond_for_each_slave(bond, slave) {
                        err = dev_set_promiscuity(slave->dev, inc);
                        if (err)
                                return err;
@@ -705,8 +677,8 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
                }
        } else {
                struct slave *slave;
-               int i;
-               bond_for_each_slave(bond, slave, i) {
+
+               bond_for_each_slave(bond, slave) {
                        err = dev_set_allmulti(slave->dev, inc);
                        if (err)
                                return err;
@@ -715,15 +687,6 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        return err;
 }
 
-static void __bond_resend_igmp_join_requests(struct net_device *dev)
-{
-       struct in_device *in_dev;
-
-       in_dev = __in_dev_get_rcu(dev);
-       if (in_dev)
-               ip_mc_rejoin_groups(in_dev);
-}
-
 /*
  * Retrieve the list of registered multicast addresses for the bonding
  * device and retransmit an IGMP JOIN request to the current active
@@ -731,33 +694,12 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
  */
 static void bond_resend_igmp_join_requests(struct bonding *bond)
 {
-       struct net_device *bond_dev, *vlan_dev, *upper_dev;
-       struct vlan_entry *vlan;
-
-       read_lock(&bond->lock);
-       rcu_read_lock();
-
-       bond_dev = bond->dev;
-
-       /* rejoin all groups on bond device */
-       __bond_resend_igmp_join_requests(bond_dev);
-
-       /*
-        * if bond is enslaved to a bridge,
-        * then rejoin all groups on its master
-        */
-       upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
-       if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
-               __bond_resend_igmp_join_requests(upper_dev);
-
-       /* rejoin all groups on vlan devices */
-       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-               vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
-                                               vlan->vlan_id);
-               if (vlan_dev)
-                       __bond_resend_igmp_join_requests(vlan_dev);
+       if (!rtnl_trylock()) {
+               queue_delayed_work(bond->wq, &bond->mcast_work, 1);
+               return;
        }
-       rcu_read_unlock();
+       call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
+       rtnl_unlock();
 
        /* We use curr_slave_lock to protect against concurrent access to
         * igmp_retrans from multiple running instances of this function and
@@ -769,7 +711,6 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
                queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
        }
        write_unlock_bh(&bond->curr_slave_lock);
-       read_unlock(&bond->lock);
 }
 
 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -808,6 +749,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
                              struct slave *old_active)
 {
+       ASSERT_RTNL();
+
        if (old_active) {
                if (bond->dev->flags & IFF_PROMISC)
                        dev_set_promiscuity(old_active->dev, -1);
@@ -966,9 +909,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
        new_active = bond->curr_active_slave;
 
        if (!new_active) { /* there were no active slaves left */
-               if (bond->slave_cnt > 0)   /* found one slave */
-                       new_active = bond->first_slave;
-               else
+               new_active = bond_first_slave(bond);
+               if (!new_active)
                        return NULL; /* still no slave, return NULL */
        }
 
@@ -1071,7 +1013,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                if (new_active)
                        bond_set_slave_active_flags(new_active);
        } else {
-               bond->curr_active_slave = new_active;
+               rcu_assign_pointer(bond->curr_active_slave, new_active);
        }
 
        if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
@@ -1115,7 +1057,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
            ((USES_PRIMARY(bond->params.mode) && new_active) ||
             bond->params.mode == BOND_MODE_ROUNDROBIN)) {
                bond->igmp_retrans = bond->params.resend_igmp;
-               queue_delayed_work(bond->wq, &bond->mcast_work, 0);
+               queue_delayed_work(bond->wq, &bond->mcast_work, 1);
        }
 }
 
@@ -1161,17 +1103,7 @@ void bond_select_active_slave(struct bonding *bond)
  */
 static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
 {
-       if (bond->first_slave == NULL) { /* attaching the first slave */
-               new_slave->next = new_slave;
-               new_slave->prev = new_slave;
-               bond->first_slave = new_slave;
-       } else {
-               new_slave->next = bond->first_slave;
-               new_slave->prev = bond->first_slave->prev;
-               new_slave->next->prev = new_slave;
-               new_slave->prev->next = new_slave;
-       }
-
+       list_add_tail_rcu(&new_slave->list, &bond->slave_list);
        bond->slave_cnt++;
 }
 
@@ -1187,22 +1119,7 @@ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
  */
 static void bond_detach_slave(struct bonding *bond, struct slave *slave)
 {
-       if (slave->next)
-               slave->next->prev = slave->prev;
-
-       if (slave->prev)
-               slave->prev->next = slave->next;
-
-       if (bond->first_slave == slave) { /* slave is the first slave */
-               if (bond->slave_cnt > 1) { /* there are more slave */
-                       bond->first_slave = slave->next;
-               } else {
-                       bond->first_slave = NULL; /* slave was the last one */
-               }
-       }
-
-       slave->next = NULL;
-       slave->prev = NULL;
+       list_del_rcu(&slave->list);
        bond->slave_cnt--;
 }
 
@@ -1249,47 +1166,31 @@ static void bond_poll_controller(struct net_device *bond_dev)
 {
 }
 
-static void __bond_netpoll_cleanup(struct bonding *bond)
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
+       struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
-       int i;
 
-       bond_for_each_slave(bond, slave, i)
+       bond_for_each_slave(bond, slave)
                if (IS_UP(slave->dev))
                        slave_disable_netpoll(slave);
 }
-static void bond_netpoll_cleanup(struct net_device *bond_dev)
-{
-       struct bonding *bond = netdev_priv(bond_dev);
-
-       read_lock(&bond->lock);
-       __bond_netpoll_cleanup(bond);
-       read_unlock(&bond->lock);
-}
 
 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
 {
        struct bonding *bond = netdev_priv(dev);
        struct slave *slave;
-       int i, err = 0;
+       int err = 0;
 
-       read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                err = slave_enable_netpoll(slave);
                if (err) {
-                       __bond_netpoll_cleanup(bond);
+                       bond_netpoll_cleanup(dev);
                        break;
                }
        }
-       read_unlock(&bond->lock);
        return err;
 }
-
-static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
-{
-       return bond->dev->npinfo;
-}
-
 #else
 static inline int slave_enable_netpoll(struct slave *slave)
 {
@@ -1311,11 +1212,10 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        struct slave *slave;
        struct bonding *bond = netdev_priv(dev);
        netdev_features_t mask;
-       int i;
 
        read_lock(&bond->lock);
 
-       if (!bond->first_slave) {
+       if (list_empty(&bond->slave_list)) {
                /* Disable adding VLANs to empty bond. But why? --mq */
                features |= NETIF_F_VLAN_CHALLENGED;
                goto out;
@@ -1325,7 +1225,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                features = netdev_increment_features(features,
                                                     slave->dev->features,
                                                     mask);
@@ -1349,15 +1249,14 @@ static void bond_compute_features(struct bonding *bond)
        unsigned short max_hard_header_len = ETH_HLEN;
        unsigned int gso_max_size = GSO_MAX_SIZE;
        u16 gso_max_segs = GSO_MAX_SEGS;
-       int i;
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
 
        read_lock(&bond->lock);
 
-       if (!bond->first_slave)
+       if (list_empty(&bond->slave_list))
                goto done;
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                vlan_features = netdev_increment_features(vlan_features,
                        slave->dev->vlan_features, BOND_VLAN_FEATURES);
 
@@ -1545,7 +1444,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * bond ether type mutual exclusion - don't allow slaves of dissimilar
         * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
         */
-       if (bond->slave_cnt == 0) {
+       if (list_empty(&bond->slave_list)) {
                if (bond_dev->type != slave_dev->type) {
                        pr_debug("%s: change device type from %d to %d\n",
                                 bond_dev->name,
@@ -1584,7 +1483,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        if (slave_ops->ndo_set_mac_address == NULL) {
-               if (bond->slave_cnt == 0) {
+               if (list_empty(&bond->slave_list)) {
                        pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
                                   bond_dev->name);
                        bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1600,7 +1499,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
-       if (!bond->slave_cnt && bond->dev->addr_assign_type == NET_ADDR_RANDOM)
+       if (list_empty(&bond->slave_list) &&
+           bond->dev->addr_assign_type == NET_ADDR_RANDOM)
                bond_set_dev_addr(bond->dev, slave_dev);
 
        new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1608,7 +1508,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                res = -ENOMEM;
                goto err_undo_flags;
        }
-
+       INIT_LIST_HEAD(&new_slave->list);
        /*
         * Set the new_slave's queue_id to be zero.  Queue ID mapping
         * is set via sysfs or module option if desired.
@@ -1703,7 +1603,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                dev_mc_add(slave_dev, lacpdu_multicast);
        }
 
-       bond_add_vlans_on_slave(bond, slave_dev);
+       if (vlan_vids_add_by_dev(slave_dev, bond_dev)) {
+               pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
+                      bond_dev->name, slave_dev->name);
+               goto err_close;
+       }
 
        write_lock_bh(&bond->lock);
 
@@ -1794,15 +1698,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 */
                bond_set_slave_inactive_flags(new_slave);
                /* if this is the first slave */
-               if (bond->slave_cnt == 1) {
+               if (bond_first_slave(bond) == new_slave) {
                        SLAVE_AD_INFO(new_slave).id = 1;
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
                        bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
                } else {
+                       struct slave *prev_slave;
+
+                       prev_slave = bond_prev_slave(bond, new_slave);
                        SLAVE_AD_INFO(new_slave).id =
-                               SLAVE_AD_INFO(new_slave->prev).id + 1;
+                               SLAVE_AD_INFO(prev_slave).id + 1;
                }
 
                bond_3ad_bind_slave(new_slave);
@@ -1824,7 +1731,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 * so we can change it without calling change_active_interface()
                 */
                if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
-                       bond->curr_active_slave = new_slave;
+                       rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
                break;
        } /* switch(bond_mode) */
@@ -1834,7 +1741,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        bond_set_carrier(bond);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       slave_dev->npinfo = bond_netpoll_info(bond);
+       slave_dev->npinfo = bond->dev->npinfo;
        if (slave_dev->npinfo) {
                if (slave_enable_netpoll(new_slave)) {
                        read_unlock(&bond->lock);
@@ -1876,7 +1783,7 @@ err_detach:
        if (!USES_PRIMARY(bond->params.mode))
                bond_hw_addr_flush(bond_dev, slave_dev);
 
-       bond_del_vlans_from_slave(bond, slave_dev);
+       vlan_vids_del_by_dev(slave_dev, bond_dev);
        write_lock_bh(&bond->lock);
        bond_detach_slave(bond, new_slave);
        if (bond->primary_slave == new_slave)
@@ -1921,7 +1828,7 @@ err_free:
 err_undo_flags:
        bond_compute_features(bond);
        /* Enslave of first slave has failed and we need to fix master's mac */
-       if (bond->slave_cnt == 0 &&
+       if (list_empty(&bond->slave_list) &&
            ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
                eth_hw_addr_random(bond_dev);
 
@@ -1977,15 +1884,6 @@ static int __bond_release_one(struct net_device *bond_dev,
        netdev_rx_handler_unregister(slave_dev);
        write_lock_bh(&bond->lock);
 
-       if (!all && !bond->params.fail_over_mac) {
-               if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
-                   bond->slave_cnt > 1)
-                       pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
-                                  bond_dev->name, slave_dev->name,
-                                  slave->perm_hwaddr,
-                                  bond_dev->name, slave_dev->name);
-       }
-
        /* Inform AD package of unbinding of slave. */
        if (bond->params.mode == BOND_MODE_8023AD) {
                /* must be called before the slave is
@@ -2006,6 +1904,15 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* release the slave from its bond */
        bond_detach_slave(bond, slave);
 
+       if (!all && !bond->params.fail_over_mac) {
+               if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
+                   !list_empty(&bond->slave_list))
+                       pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
+                                  bond_dev->name, slave_dev->name,
+                                  slave->perm_hwaddr,
+                                  bond_dev->name, slave_dev->name);
+       }
+
        if (bond->primary_slave == slave)
                bond->primary_slave = NULL;
 
@@ -2024,7 +1931,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        }
 
        if (all) {
-               bond->curr_active_slave = NULL;
+               rcu_assign_pointer(bond->curr_active_slave, NULL);
        } else if (oldcurrent == slave) {
                /*
                 * Note that we hold RTNL over this sequence, so there
@@ -2042,7 +1949,7 @@ static int __bond_release_one(struct net_device *bond_dev,
                write_lock_bh(&bond->lock);
        }
 
-       if (bond->slave_cnt == 0) {
+       if (list_empty(&bond->slave_list)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
 
@@ -2056,8 +1963,9 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        write_unlock_bh(&bond->lock);
        unblock_netpoll_tx();
+       synchronize_rcu();
 
-       if (bond->slave_cnt == 0) {
+       if (list_empty(&bond->slave_list)) {
                call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
                call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
        }
@@ -2071,7 +1979,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* must do this from outside any spinlocks */
        bond_destroy_slave_symlinks(bond_dev, slave_dev);
 
-       bond_del_vlans_from_slave(bond, slave_dev);
+       vlan_vids_del_by_dev(slave_dev, bond_dev);
 
        /* If the mode USES_PRIMARY, then this cases was handled above by
         * bond_change_active_slave(..., NULL)
@@ -2128,7 +2036,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        int ret;
 
        ret = bond_release(bond_dev, slave_dev);
-       if ((ret == 0) && (bond->slave_cnt == 0)) {
+       if (ret == 0 && list_empty(&bond->slave_list)) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                pr_info("%s: destroying bond %s.\n",
                        bond_dev->name, bond_dev->name);
@@ -2165,23 +2073,19 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
 
        read_lock(&bond->lock);
 
-       read_lock(&bond->curr_slave_lock);
        old_active = bond->curr_active_slave;
-       read_unlock(&bond->curr_slave_lock);
-
        new_active = bond_get_slave_by_dev(bond, slave_dev);
-
        /*
         * Changing to the current active: do nothing; return success.
         */
-       if (new_active && (new_active == old_active)) {
+       if (new_active && new_active == old_active) {
                read_unlock(&bond->lock);
                return 0;
        }
 
-       if ((new_active) &&
-           (old_active) &&
-           (new_active->link == BOND_LINK_UP) &&
+       if (new_active &&
+           old_active &&
+           new_active->link == BOND_LINK_UP &&
            IS_UP(new_active->dev)) {
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
@@ -2213,13 +2117,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       int i = 0, res = -ENODEV;
        struct slave *slave;
-       int i, res = -ENODEV;
 
        read_lock(&bond->lock);
-
-       bond_for_each_slave(bond, slave, i) {
-               if (i == (int)info->slave_id) {
+       bond_for_each_slave(bond, slave) {
+               if (i++ == (int)info->slave_id) {
                        res = 0;
                        strcpy(info->slave_name, slave->dev->name);
                        info->link = slave->link;
@@ -2228,7 +2131,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
                        break;
                }
        }
-
        read_unlock(&bond->lock);
 
        return res;
@@ -2239,13 +2141,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
 
 static int bond_miimon_inspect(struct bonding *bond)
 {
+       int link_state, commit = 0;
        struct slave *slave;
-       int i, link_state, commit = 0;
        bool ignore_updelay;
 
        ignore_updelay = !bond->curr_active_slave ? true : false;
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                slave->new_link = BOND_LINK_NOCHANGE;
 
                link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2340,9 +2242,8 @@ static int bond_miimon_inspect(struct bonding *bond)
 static void bond_miimon_commit(struct bonding *bond)
 {
        struct slave *slave;
-       int i;
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
                        continue;
@@ -2447,7 +2348,7 @@ void bond_mii_monitor(struct work_struct *work)
 
        delay = msecs_to_jiffies(bond->params.miimon);
 
-       if (bond->slave_cnt == 0)
+       if (list_empty(&bond->slave_list))
                goto re_arm;
 
        should_notify_peers = bond_should_notify_peers(bond);
@@ -2713,6 +2614,20 @@ out_unlock:
        return RX_HANDLER_ANOTHER;
 }
 
+/* function to verify if we're in the arp_interval timeslice, returns true if
+ * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
+ * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
+ */
+static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
+                                 int mod)
+{
+       int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
+
+       return time_in_range(jiffies,
+                            last_act - delta_in_ticks,
+                            last_act + mod * delta_in_ticks + delta_in_ticks/2);
+}
+
 /*
  * this function is called regularly to monitor each slave's link
  * ensuring that traffic is being sent and received when arp monitoring
@@ -2726,21 +2641,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                                            arp_work.work);
        struct slave *slave, *oldcurrent;
        int do_failover = 0;
-       int delta_in_ticks, extra_ticks;
-       int i;
 
        read_lock(&bond->lock);
 
-       delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
-       extra_ticks = delta_in_ticks / 2;
-
-       if (bond->slave_cnt == 0)
+       if (list_empty(&bond->slave_list))
                goto re_arm;
 
-       read_lock(&bond->curr_slave_lock);
        oldcurrent = bond->curr_active_slave;
-       read_unlock(&bond->curr_slave_lock);
-
        /* see if any of the previous devices are up now (i.e. they have
         * xmt and rcv traffic). the curr_active_slave does not come into
         * the picture unless it is null. also, slave->jiffies is not needed
@@ -2749,16 +2656,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
         * TODO: what about up/down delay in arp mode? it wasn't here before
         *       so it can wait
         */
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                unsigned long trans_start = dev_trans_start(slave->dev);
 
                if (slave->link != BOND_LINK_UP) {
-                       if (time_in_range(jiffies,
-                               trans_start - delta_in_ticks,
-                               trans_start + delta_in_ticks + extra_ticks) &&
-                           time_in_range(jiffies,
-                               slave->dev->last_rx - delta_in_ticks,
-                               slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
+                       if (bond_time_in_interval(bond, trans_start, 1) &&
+                           bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
 
                                slave->link  = BOND_LINK_UP;
                                bond_set_active_slave(slave);
@@ -2786,12 +2689,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                         * when the source ip is 0, so don't take the link down
                         * if we don't know our ip yet
                         */
-                       if (!time_in_range(jiffies,
-                               trans_start - delta_in_ticks,
-                               trans_start + 2 * delta_in_ticks + extra_ticks) ||
-                           !time_in_range(jiffies,
-                               slave->dev->last_rx - delta_in_ticks,
-                               slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
+                       if (!bond_time_in_interval(bond, trans_start, 2) ||
+                           !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
 
                                slave->link  = BOND_LINK_DOWN;
                                bond_set_backup_slave(slave);
@@ -2831,7 +2730,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
 
 re_arm:
        if (bond->params.arp_interval)
-               queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
+               queue_delayed_work(bond->wq, &bond->arp_work,
+                                  msecs_to_jiffies(bond->params.arp_interval));
 
        read_unlock(&bond->lock);
 }
@@ -2844,32 +2744,21 @@ re_arm:
  *
  * Called with bond->lock held for read.
  */
-static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
+static int bond_ab_arp_inspect(struct bonding *bond)
 {
+       unsigned long trans_start, last_rx;
        struct slave *slave;
-       int i, commit = 0;
-       unsigned long trans_start;
-       int extra_ticks;
+       int commit = 0;
 
-       /* All the time comparisons below need some extra time. Otherwise, on
-        * fast networks the ARP probe/reply may arrive within the same jiffy
-        * as it was sent.  Then, the next time the ARP monitor is run, one
-        * arp_interval will already have passed in the comparisons.
-        */
-       extra_ticks = delta_in_ticks / 2;
-
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                slave->new_link = BOND_LINK_NOCHANGE;
+               last_rx = slave_last_rx(bond, slave);
 
                if (slave->link != BOND_LINK_UP) {
-                       if (time_in_range(jiffies,
-                               slave_last_rx(bond, slave) - delta_in_ticks,
-                               slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
-
+                       if (bond_time_in_interval(bond, last_rx, 1)) {
                                slave->new_link = BOND_LINK_UP;
                                commit++;
                        }
-
                        continue;
                }
 
@@ -2878,9 +2767,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 * active.  This avoids bouncing, as the last receive
                 * times need a full ARP monitor cycle to be updated.
                 */
-               if (time_in_range(jiffies,
-                                 slave->jiffies - delta_in_ticks,
-                                 slave->jiffies + 2 * delta_in_ticks + extra_ticks))
+               if (bond_time_in_interval(bond, slave->jiffies, 2))
                        continue;
 
                /*
@@ -2898,10 +2785,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 */
                if (!bond_is_active_slave(slave) &&
                    !bond->current_arp_slave &&
-                   !time_in_range(jiffies,
-                       slave_last_rx(bond, slave) - delta_in_ticks,
-                       slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
-
+                   !bond_time_in_interval(bond, last_rx, 3)) {
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
                }
@@ -2914,13 +2798,8 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 */
                trans_start = dev_trans_start(slave->dev);
                if (bond_is_active_slave(slave) &&
-                   (!time_in_range(jiffies,
-                       trans_start - delta_in_ticks,
-                       trans_start + 2 * delta_in_ticks + extra_ticks) ||
-                    !time_in_range(jiffies,
-                       slave_last_rx(bond, slave) - delta_in_ticks,
-                       slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
-
+                   (!bond_time_in_interval(bond, trans_start, 2) ||
+                    !bond_time_in_interval(bond, last_rx, 2))) {
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
                }
@@ -2935,24 +2814,21 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
  *
  * Called with RTNL and bond->lock for read.
  */
-static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
+static void bond_ab_arp_commit(struct bonding *bond)
 {
-       struct slave *slave;
-       int i;
        unsigned long trans_start;
+       struct slave *slave;
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
                        continue;
 
                case BOND_LINK_UP:
                        trans_start = dev_trans_start(slave->dev);
-                       if ((!bond->curr_active_slave &&
-                            time_in_range(jiffies,
-                                          trans_start - delta_in_ticks,
-                                          trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
-                           bond->curr_active_slave != slave) {
+                       if (bond->curr_active_slave != slave ||
+                           (!bond->curr_active_slave &&
+                            bond_time_in_interval(bond, trans_start, 1))) {
                                slave->link = BOND_LINK_UP;
                                if (bond->current_arp_slave) {
                                        bond_set_slave_inactive_flags(
@@ -3014,7 +2890,7 @@ do_failover:
  */
 static void bond_ab_arp_probe(struct bonding *bond)
 {
-       struct slave *slave;
+       struct slave *slave, *next_slave;
        int i;
 
        read_lock(&bond->curr_slave_lock);
@@ -3038,7 +2914,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
         */
 
        if (!bond->current_arp_slave) {
-               bond->current_arp_slave = bond->first_slave;
+               bond->current_arp_slave = bond_first_slave(bond);
                if (!bond->current_arp_slave)
                        return;
        }
@@ -3046,7 +2922,8 @@ static void bond_ab_arp_probe(struct bonding *bond)
        bond_set_slave_inactive_flags(bond->current_arp_slave);
 
        /* search for next candidate */
-       bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) {
+       next_slave = bond_next_slave(bond, bond->current_arp_slave);
+       bond_for_each_slave_from(bond, slave, i, next_slave) {
                if (IS_UP(slave->dev)) {
                        slave->link = BOND_LINK_BACK;
                        bond_set_slave_active_flags(slave);
@@ -3087,12 +2964,12 @@ void bond_activebackup_arp_mon(struct work_struct *work)
 
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
 
-       if (bond->slave_cnt == 0)
+       if (list_empty(&bond->slave_list))
                goto re_arm;
 
        should_notify_peers = bond_should_notify_peers(bond);
 
-       if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
+       if (bond_ab_arp_inspect(bond)) {
                read_unlock(&bond->lock);
 
                /* Race avoidance with bond_close flush of workqueue */
@@ -3105,7 +2982,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
 
                read_lock(&bond->lock);
 
-               bond_ab_arp_commit(bond, delta_in_ticks);
+               bond_ab_arp_commit(bond);
 
                read_unlock(&bond->lock);
                rtnl_unlock();
@@ -3234,6 +3111,10 @@ static int bond_slave_netdev_event(unsigned long event,
        case NETDEV_FEAT_CHANGE:
                bond_compute_features(bond);
                break;
+       case NETDEV_RESEND_IGMP:
+               /* Propagate to master device */
+               call_netdevice_notifiers(event, slave->bond->dev);
+               break;
        default:
                break;
        }
@@ -3403,13 +3284,12 @@ static int bond_open(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
-       int i;
 
        /* reset slave->backup and slave->inactive */
        read_lock(&bond->lock);
-       if (bond->slave_cnt > 0) {
+       if (!list_empty(&bond->slave_list)) {
                read_lock(&bond->curr_slave_lock);
-               bond_for_each_slave(bond, slave, i) {
+               bond_for_each_slave(bond, slave) {
                        if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
                                && (slave != bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave);
@@ -3477,13 +3357,11 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
        struct bonding *bond = netdev_priv(bond_dev);
        struct rtnl_link_stats64 temp;
        struct slave *slave;
-       int i;
 
        memset(stats, 0, sizeof(*stats));
 
        read_lock_bh(&bond->lock);
-
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                const struct rtnl_link_stats64 *sstats =
                        dev_get_stats(slave->dev, &temp);
 
@@ -3513,7 +3391,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
                stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
                stats->tx_window_errors += sstats->tx_window_errors;
        }
-
        read_unlock_bh(&bond->lock);
 
        return stats;
@@ -3652,41 +3529,35 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
-       int i;
 
-       read_lock(&bond->lock);
+       ASSERT_RTNL();
 
        if (USES_PRIMARY(bond->params.mode)) {
-               read_lock(&bond->curr_slave_lock);
-               slave = bond->curr_active_slave;
+               slave = rtnl_dereference(bond->curr_active_slave);
                if (slave) {
                        dev_uc_sync(slave->dev, bond_dev);
                        dev_mc_sync(slave->dev, bond_dev);
                }
-               read_unlock(&bond->curr_slave_lock);
        } else {
-               bond_for_each_slave(bond, slave, i) {
+               bond_for_each_slave(bond, slave) {
                        dev_uc_sync_multiple(slave->dev, bond_dev);
                        dev_mc_sync_multiple(slave->dev, bond_dev);
                }
        }
-
-       read_unlock(&bond->lock);
 }
 
 static int bond_neigh_init(struct neighbour *n)
 {
        struct bonding *bond = netdev_priv(n->dev);
-       struct slave *slave = bond->first_slave;
        const struct net_device_ops *slave_ops;
        struct neigh_parms parms;
+       struct slave *slave;
        int ret;
 
+       slave = bond_first_slave(bond);
        if (!slave)
                return 0;
-
        slave_ops = slave->dev->netdev_ops;
-
        if (!slave_ops->ndo_neigh_setup)
                return 0;
 
@@ -3735,9 +3606,8 @@ static int bond_neigh_setup(struct net_device *dev,
 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave, *stop_at;
+       struct slave *slave;
        int res = 0;
-       int i;
 
        pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
                 (bond_dev ? bond_dev->name : "None"), new_mtu);
@@ -3757,10 +3627,10 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
         * call to the base driver.
         */
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                pr_debug("s %p s->p %p c_m %p\n",
                         slave,
-                        slave->prev,
+                        bond_prev_slave(bond, slave),
                         slave->dev->netdev_ops->ndo_change_mtu);
 
                res = dev_set_mtu(slave->dev, new_mtu);
@@ -3785,8 +3655,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 
 unwind:
        /* unwind from head to the slave that failed */
-       stop_at = slave;
-       bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+       bond_for_each_slave_continue_reverse(bond, slave) {
                int tmp_res;
 
                tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
@@ -3810,9 +3679,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct sockaddr *sa = addr, tmp_sa;
-       struct slave *slave, *stop_at;
+       struct slave *slave;
        int res = 0;
-       int i;
 
        if (bond->params.mode == BOND_MODE_ALB)
                return bond_alb_set_mac_address(bond_dev, addr);
@@ -3845,7 +3713,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         * call to the base driver.
         */
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
                pr_debug("slave %p %s\n", slave, slave->dev->name);
 
@@ -3877,8 +3745,7 @@ unwind:
        tmp_sa.sa_family = bond_dev->type;
 
        /* unwind from head to the slave that failed */
-       stop_at = slave;
-       bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+       bond_for_each_slave_continue_reverse(bond, slave) {
                int tmp_res;
 
                tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
@@ -3891,12 +3758,50 @@ unwind:
        return res;
 }
 
+/**
+ * bond_xmit_slave_id - transmit skb through slave with slave_id
+ * @bond: bonding device that is transmitting
+ * @skb: buffer to transmit
+ * @slave_id: slave id up to slave_cnt-1 through which to transmit
+ *
+ * This function tries to transmit through slave with slave_id but in case
+ * it fails, it tries to find the first available slave for transmission.
+ * The skb is consumed in all cases, thus the function is void.
+ */
+void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+{
+       struct slave *slave;
+       int i = slave_id;
+
+       /* Here we start from the slave with slave_id */
+       bond_for_each_slave_rcu(bond, slave) {
+               if (--i < 0) {
+                       if (slave_can_tx(slave)) {
+                               bond_dev_queue_xmit(bond, skb, slave->dev);
+                               return;
+                       }
+               }
+       }
+
+       /* Here we start from the first slave up to slave_id */
+       i = slave_id;
+       bond_for_each_slave_rcu(bond, slave) {
+               if (--i < 0)
+                       break;
+               if (slave_can_tx(slave)) {
+                       bond_dev_queue_xmit(bond, skb, slave->dev);
+                       return;
+               }
+       }
+       /* no slave that can tx has been found */
+       kfree_skb(skb);
+}
+
 static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave, *start_at;
-       int i, slave_no, res = 1;
        struct iphdr *iph = ip_hdr(skb);
+       struct slave *slave;
 
        /*
         * Start with the curr_active_slave that joined the bond as the
@@ -3905,50 +3810,20 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
         * send the join/membership reports.  The curr_active_slave found
         * will send all of this type of traffic.
         */
-       if ((iph->protocol == IPPROTO_IGMP) &&
-           (skb->protocol == htons(ETH_P_IP))) {
-
-               read_lock(&bond->curr_slave_lock);
-               slave = bond->curr_active_slave;
-               read_unlock(&bond->curr_slave_lock);
-
-               if (!slave)
-                       goto out;
+       if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
+               slave = rcu_dereference(bond->curr_active_slave);
+               if (slave && slave_can_tx(slave))
+                       bond_dev_queue_xmit(bond, skb, slave->dev);
+               else
+                       bond_xmit_slave_id(bond, skb, 0);
        } else {
-               /*
-                * Concurrent TX may collide on rr_tx_counter; we accept
-                * that as being rare enough not to justify using an
-                * atomic op here.
-                */
-               slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
-
-               bond_for_each_slave(bond, slave, i) {
-                       slave_no--;
-                       if (slave_no < 0)
-                               break;
-               }
-       }
-
-       start_at = slave;
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               if (IS_UP(slave->dev) &&
-                   (slave->link == BOND_LINK_UP) &&
-                   bond_is_active_slave(slave)) {
-                       res = bond_dev_queue_xmit(bond, skb, slave->dev);
-                       break;
-               }
-       }
-
-out:
-       if (res) {
-               /* no suitable interface, frame not sent */
-               kfree_skb(skb);
+               bond_xmit_slave_id(bond, skb,
+                                  bond->rr_tx_counter++ % bond->slave_cnt);
        }
 
        return NETDEV_TX_OK;
 }
 
-
 /*
  * in active-backup mode, we know that bond->curr_active_slave is always valid if
  * the bond has a usable interface.
@@ -3956,18 +3831,12 @@ out:
 static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       int res = 1;
-
-       read_lock(&bond->curr_slave_lock);
-
-       if (bond->curr_active_slave)
-               res = bond_dev_queue_xmit(bond, skb,
-                       bond->curr_active_slave->dev);
-
-       read_unlock(&bond->curr_slave_lock);
+       struct slave *slave;
 
-       if (res)
-               /* no suitable interface, frame not sent */
+       slave = rcu_dereference(bond->curr_active_slave);
+       if (slave)
+               bond_dev_queue_xmit(bond, skb, slave->dev);
+       else
                kfree_skb(skb);
 
        return NETDEV_TX_OK;
@@ -3981,87 +3850,39 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
 static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave, *start_at;
-       int slave_no;
-       int i;
-       int res = 1;
 
-       slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
-
-       bond_for_each_slave(bond, slave, i) {
-               slave_no--;
-               if (slave_no < 0)
-                       break;
-       }
-
-       start_at = slave;
-
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               if (IS_UP(slave->dev) &&
-                   (slave->link == BOND_LINK_UP) &&
-                   bond_is_active_slave(slave)) {
-                       res = bond_dev_queue_xmit(bond, skb, slave->dev);
-                       break;
-               }
-       }
-
-       if (res) {
-               /* no suitable interface, frame not sent */
-               kfree_skb(skb);
-       }
+       bond_xmit_slave_id(bond, skb,
+                          bond->xmit_hash_policy(skb, bond->slave_cnt));
 
        return NETDEV_TX_OK;
 }
 
-/*
- * in broadcast mode, we send everything to all usable interfaces.
- */
+/* in broadcast mode, we send everything to all usable interfaces. */
 static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave, *start_at;
-       struct net_device *tx_dev = NULL;
-       int i;
-       int res = 1;
-
-       read_lock(&bond->curr_slave_lock);
-       start_at = bond->curr_active_slave;
-       read_unlock(&bond->curr_slave_lock);
-
-       if (!start_at)
-               goto out;
+       struct slave *slave = NULL;
 
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               if (IS_UP(slave->dev) &&
-                   (slave->link == BOND_LINK_UP) &&
-                   bond_is_active_slave(slave)) {
-                       if (tx_dev) {
-                               struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
-                               if (!skb2) {
-                                       pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
-                                              bond_dev->name);
-                                       continue;
-                               }
+       bond_for_each_slave_rcu(bond, slave) {
+               if (bond_is_last_slave(bond, slave))
+                       break;
+               if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+                       struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
-                               res = bond_dev_queue_xmit(bond, skb2, tx_dev);
-                               if (res) {
-                                       kfree_skb(skb2);
-                                       continue;
-                               }
+                       if (!skb2) {
+                               pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
+                                      bond_dev->name);
+                               continue;
                        }
-                       tx_dev = slave->dev;
+                       /* bond_dev_queue_xmit always returns 0 */
+                       bond_dev_queue_xmit(bond, skb2, slave->dev);
                }
        }
-
-       if (tx_dev)
-               res = bond_dev_queue_xmit(bond, skb, tx_dev);
-
-out:
-       if (res)
-               /* no suitable interface, frame not sent */
+       if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+               bond_dev_queue_xmit(bond, skb, slave->dev);
+       else
                kfree_skb(skb);
 
-       /* frame sent to all suitable interfaces */
        return NETDEV_TX_OK;
 }
 
@@ -4089,15 +3910,15 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
 static inline int bond_slave_override(struct bonding *bond,
                                      struct sk_buff *skb)
 {
-       int i, res = 1;
        struct slave *slave = NULL;
        struct slave *check_slave;
+       int res = 1;
 
        if (!skb->queue_mapping)
                return 1;
 
        /* Find out if any slaves have the same mapping as this skb. */
-       bond_for_each_slave(bond, check_slave, i) {
+       bond_for_each_slave_rcu(bond, check_slave) {
                if (check_slave->queue_id == skb->queue_mapping) {
                        slave = check_slave;
                        break;
@@ -4182,14 +4003,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (is_netpoll_tx_blocked(dev))
                return NETDEV_TX_BUSY;
 
-       read_lock(&bond->lock);
-
-       if (bond->slave_cnt)
+       rcu_read_lock();
+       if (!list_empty(&bond->slave_list))
                ret = __bond_start_xmit(skb, dev);
        else
                kfree_skb(skb);
-
-       read_unlock(&bond->lock);
+       rcu_read_unlock();
 
        return ret;
 }
@@ -4230,9 +4049,8 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
                                     struct ethtool_cmd *ecmd)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
-       int i;
        unsigned long speed = 0;
+       struct slave *slave;
 
        ecmd->duplex = DUPLEX_UNKNOWN;
        ecmd->port = PORT_OTHER;
@@ -4243,7 +4061,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
         * this is an accurate maximum.
         */
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (SLAVE_IS_OK(slave)) {
                        if (slave->speed != SPEED_UNKNOWN)
                                speed += slave->speed;
@@ -4254,6 +4072,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
        }
        ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
        read_unlock(&bond->lock);
+
        return 0;
 }
 
@@ -4317,7 +4136,7 @@ static void bond_setup(struct net_device *bond_dev)
        /* initialize rwlocks */
        rwlock_init(&bond->lock);
        rwlock_init(&bond->curr_slave_lock);
-
+       INIT_LIST_HEAD(&bond->slave_list);
        bond->params = bonding_defaults;
 
        /* Initialize pointers */
@@ -4374,13 +4193,14 @@ static void bond_setup(struct net_device *bond_dev)
 static void bond_uninit(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave, *tmp_slave;
        struct vlan_entry *vlan, *tmp;
 
        bond_netpoll_cleanup(bond_dev);
 
        /* Release the bonded slaves */
-       while (bond->first_slave != NULL)
-               __bond_release_one(bond_dev, bond->first_slave->dev, true);
+       list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+               __bond_release_one(bond_dev, slave->dev, true);
        pr_info("%s: released all slaves\n", bond_dev->name);
 
        list_del(&bond->bond_list);
index 4060d41f0ee7b15bc228b6bda9488a9e6cc9c821..20a6ee25bb63e42cdf89c0273d8e1afa30234f26 100644 (file)
@@ -12,7 +12,6 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        struct bonding *bond = seq->private;
        loff_t off = 0;
        struct slave *slave;
-       int i;
 
        /* make sure the bond won't be taken away */
        rcu_read_lock();
@@ -21,10 +20,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        if (*pos == 0)
                return SEQ_START_TOKEN;
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave)
                if (++off == *pos)
                        return slave;
-       }
 
        return NULL;
 }
@@ -36,11 +34,13 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 
        ++*pos;
        if (v == SEQ_START_TOKEN)
-               return bond->first_slave;
+               return bond_first_slave(bond);
 
-       slave = slave->next;
+       if (bond_is_last_slave(bond, slave))
+               return NULL;
+       slave = bond_next_slave(bond, slave);
 
-       return (slave == bond->first_slave) ? NULL : slave;
+       return slave;
 }
 
 static void bond_info_seq_stop(struct seq_file *seq, void *v)
index dc36a3d7d9e983a15583260a572e6dac7451acb5..0f539de640dcb6f471378a5d44e47c0946a01d0c 100644 (file)
@@ -209,12 +209,12 @@ void bond_destroy_slave_symlinks(struct net_device *master,
 static ssize_t bonding_show_slaves(struct device *d,
                                   struct device_attribute *attr, char *buf)
 {
-       struct slave *slave;
-       int i, res = 0;
        struct bonding *bond = to_bond(d);
+       struct slave *slave;
+       int res = 0;
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (res > (PAGE_SIZE - IFNAMSIZ)) {
                        /* not enough space for another interface name */
                        if ((PAGE_SIZE - res) > 10)
@@ -227,6 +227,7 @@ static ssize_t bonding_show_slaves(struct device *d,
        read_unlock(&bond->lock);
        if (res)
                buf[res-1] = '\n'; /* eat the leftover space */
+
        return res;
 }
 
@@ -325,7 +326,7 @@ static ssize_t bonding_store_mode(struct device *d,
                goto out;
        }
 
-       if (bond->slave_cnt > 0) {
+       if (!list_empty(&bond->slave_list)) {
                pr_err("unable to update mode of %s because it has slaves.\n",
                        bond->dev->name);
                ret = -EPERM;
@@ -501,20 +502,25 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
                                           struct device_attribute *attr,
                                           const char *buf, size_t count)
 {
-       int new_value;
+       int new_value, ret = count;
        struct bonding *bond = to_bond(d);
 
-       if (bond->slave_cnt != 0) {
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       if (!list_empty(&bond->slave_list)) {
                pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
                       bond->dev->name);
-               return -EPERM;
+               ret = -EPERM;
+               goto out;
        }
 
        new_value = bond_parse_parm(buf, fail_over_mac_tbl);
        if (new_value < 0) {
                pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
                       bond->dev->name, buf);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        bond->params.fail_over_mac = new_value;
@@ -522,7 +528,9 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
                bond->dev->name, fail_over_mac_tbl[new_value].modename,
                new_value);
 
-       return count;
+out:
+       rtnl_unlock();
+       return ret;
 }
 
 static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
@@ -661,7 +669,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                         &newtarget);
                /* not to race with bond_arp_rcv */
                write_lock_bh(&bond->lock);
-               bond_for_each_slave(bond, slave, i)
+               bond_for_each_slave(bond, slave)
                        slave->target_last_arp_rx[ind] = jiffies;
                targets[ind] = newtarget;
                write_unlock_bh(&bond->lock);
@@ -687,7 +695,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                        &newtarget);
 
                write_lock_bh(&bond->lock);
-               bond_for_each_slave(bond, slave, i) {
+               bond_for_each_slave(bond, slave) {
                        targets_rx = slave->target_last_arp_rx;
                        j = ind;
                        for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -1078,10 +1086,9 @@ static ssize_t bonding_store_primary(struct device *d,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       int i;
-       struct slave *slave;
        struct bonding *bond = to_bond(d);
        char ifname[IFNAMSIZ];
+       struct slave *slave;
 
        if (!rtnl_trylock())
                return restart_syscall();
@@ -1107,7 +1114,7 @@ static ssize_t bonding_store_primary(struct device *d,
                goto out;
        }
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
                        pr_info("%s: Setting %s as primary slave.\n",
                                bond->dev->name, slave->dev->name);
@@ -1236,16 +1243,16 @@ static ssize_t bonding_show_active_slave(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       struct slave *curr;
        struct bonding *bond = to_bond(d);
+       struct slave *curr;
        int count = 0;
 
-       read_lock(&bond->curr_slave_lock);
-       curr = bond->curr_active_slave;
-       read_unlock(&bond->curr_slave_lock);
-
+       rcu_read_lock();
+       curr = rcu_dereference(bond->curr_active_slave);
        if (USES_PRIMARY(bond->params.mode) && curr)
                count = sprintf(buf, "%s\n", curr->dev->name);
+       rcu_read_unlock();
+
        return count;
 }
 
@@ -1253,16 +1260,14 @@ static ssize_t bonding_store_active_slave(struct device *d,
                                          struct device_attribute *attr,
                                          const char *buf, size_t count)
 {
-       int i;
-       struct slave *slave;
-       struct slave *old_active = NULL;
-       struct slave *new_active = NULL;
+       struct slave *slave, *old_active, *new_active;
        struct bonding *bond = to_bond(d);
        char ifname[IFNAMSIZ];
 
        if (!rtnl_trylock())
                return restart_syscall();
 
+       old_active = new_active = NULL;
        block_netpoll_tx();
        read_lock(&bond->lock);
        write_lock_bh(&bond->curr_slave_lock);
@@ -1279,12 +1284,12 @@ static ssize_t bonding_store_active_slave(struct device *d,
        if (!strlen(ifname) || buf[0] == '\n') {
                pr_info("%s: Clearing current active slave.\n",
                        bond->dev->name);
-               bond->curr_active_slave = NULL;
+               rcu_assign_pointer(bond->curr_active_slave, NULL);
                bond_select_active_slave(bond);
                goto out;
        }
 
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
                        old_active = bond->curr_active_slave;
                        new_active = slave;
@@ -1295,8 +1300,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
                                        bond->dev->name,
                                        slave->dev->name);
                                goto out;
-                       }
-                       else {
+                       } else {
                                if ((new_active) &&
                                    (old_active) &&
                                    (new_active->link == BOND_LINK_UP) &&
@@ -1307,8 +1311,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
                                                slave->dev->name);
                                        bond_change_active_slave(bond,
                                                                 new_active);
-                               }
-                               else {
+                               } else {
                                        pr_info("%s: Could not set %s as"
                                                " active slave; either %s is"
                                                " down or the link is down.\n",
@@ -1344,14 +1347,9 @@ static ssize_t bonding_show_mii_status(struct device *d,
                                       struct device_attribute *attr,
                                       char *buf)
 {
-       struct slave *curr;
        struct bonding *bond = to_bond(d);
 
-       read_lock(&bond->curr_slave_lock);
-       curr = bond->curr_active_slave;
-       read_unlock(&bond->curr_slave_lock);
-
-       return sprintf(buf, "%s\n", curr ? "up" : "down");
+       return sprintf(buf, "%s\n", bond->curr_active_slave ? "up" : "down");
 }
 static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
 
@@ -1470,15 +1468,15 @@ static ssize_t bonding_show_queue_id(struct device *d,
                                     struct device_attribute *attr,
                                     char *buf)
 {
-       struct slave *slave;
-       int i, res = 0;
        struct bonding *bond = to_bond(d);
+       struct slave *slave;
+       int res = 0;
 
        if (!rtnl_trylock())
                return restart_syscall();
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
                        /* not enough space for another interface_name:queue_id pair */
                        if ((PAGE_SIZE - res) > 10)
@@ -1493,6 +1491,7 @@ static ssize_t bonding_show_queue_id(struct device *d,
        if (res)
                buf[res-1] = '\n'; /* eat the leftover space */
        rtnl_unlock();
+
        return res;
 }
 
@@ -1507,7 +1506,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
        struct slave *slave, *update_slave;
        struct bonding *bond = to_bond(d);
        u16 qid;
-       int i, ret = count;
+       int ret = count;
        char *delim;
        struct net_device *sdev = NULL;
 
@@ -1542,7 +1541,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
 
        /* Search for thes slave and check for duplicate qids */
        update_slave = NULL;
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (sdev == slave->dev)
                        /*
                         * We don't need to check the matching
@@ -1594,8 +1593,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                                           struct device_attribute *attr,
                                           const char *buf, size_t count)
 {
-       int i, new_value, ret = count;
        struct bonding *bond = to_bond(d);
+       int new_value, ret = count;
        struct slave *slave;
 
        if (sscanf(buf, "%d", &new_value) != 1) {
@@ -1618,7 +1617,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
        }
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave, i) {
+       bond_for_each_slave(bond, slave) {
                if (!bond_is_active_slave(slave)) {
                        if (new_value)
                                slave->inactive = 0;
index 42d1c6599cba944cd1854759f010b5c6776c438b..4bf52d5f637ef687ef5bd9f31e848212c2b3ff62 100644 (file)
        set_fs(fs);                     \
        res; })
 
+/* slave list primitives */
+#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+
+/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
+#define bond_first_slave(bond) \
+       list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+#define bond_last_slave(bond) \
+       (list_empty(&(bond)->slave_list) ? NULL : \
+                                          bond_to_slave((bond)->slave_list.prev))
+
+#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
+#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
+
+/* Since bond_first/last_slave can return NULL, these can return NULL too */
+#define bond_next_slave(bond, pos) \
+       (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
+                                        bond_to_slave((pos)->list.next))
+
+#define bond_prev_slave(bond, pos) \
+       (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
+                                         bond_to_slave((pos)->list.prev))
+
 /**
  * bond_for_each_slave_from - iterate the slaves list from a starting point
  * @bond:      the bond holding this list.
  *
  * Caller must hold bond->lock
  */
-#define bond_for_each_slave_from(bond, pos, cnt, start)        \
-       for (cnt = 0, pos = start;                              \
-            cnt < (bond)->slave_cnt;                           \
-             cnt++, pos = (pos)->next)
+#define bond_for_each_slave_from(bond, pos, cnt, start) \
+       for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
+            cnt++, pos = bond_next_slave(bond, pos))
 
 /**
- * bond_for_each_slave_from_to - iterate the slaves list from start point to stop point
- * @bond:      the bond holding this list.
- * @pos:       current slave.
- * @cnt:       counter for number max of moves
- * @start:     start point.
- * @stop:      stop point.
+ * bond_for_each_slave - iterate over all slaves
+ * @bond:      the bond holding this list
+ * @pos:       current slave
  *
  * Caller must hold bond->lock
  */
-#define bond_for_each_slave_from_to(bond, pos, cnt, start, stop)       \
-       for (cnt = 0, pos = start;                                      \
-            ((cnt < (bond)->slave_cnt) && (pos != (stop)->next));      \
-             cnt++, pos = (pos)->next)
+#define bond_for_each_slave(bond, pos) \
+       list_for_each_entry(pos, &(bond)->slave_list, list)
+
+/* Caller must have rcu_read_lock */
+#define bond_for_each_slave_rcu(bond, pos) \
+       list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
 
 /**
- * bond_for_each_slave - iterate the slaves list from head
- * @bond:      the bond holding this list.
- * @pos:       current slave.
- * @cnt:       counter for max number of moves
+ * bond_for_each_slave_reverse - iterate in reverse from a given position
+ * @bond:      the bond holding this list
+ * @pos:       slave to continue from
  *
  * Caller must hold bond->lock
  */
-#define bond_for_each_slave(bond, pos, cnt)    \
-               bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave)
-
+#define bond_for_each_slave_continue_reverse(bond, pos) \
+       list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 extern atomic_t netpoll_block_tx;
@@ -174,8 +192,7 @@ struct vlan_entry {
 
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
-       struct slave *next;
-       struct slave *prev;
+       struct list_head list;
        struct bonding *bond; /* our master */
        int    delay;
        unsigned long jiffies;
@@ -215,7 +232,7 @@ struct slave {
  */
 struct bonding {
        struct   net_device *dev; /* first - useful for panic debug */
-       struct   slave *first_slave;
+       struct   list_head slave_list;
        struct   slave *curr_active_slave;
        struct   slave *current_arp_slave;
        struct   slave *primary_slave;
@@ -270,13 +287,10 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
                                                  struct net_device *slave_dev)
 {
        struct slave *slave = NULL;
-       int i;
 
-       bond_for_each_slave(bond, slave, i) {
-               if (slave->dev == slave_dev) {
+       bond_for_each_slave(bond, slave)
+               if (slave->dev == slave_dev)
                        return slave;
-               }
-       }
 
        return NULL;
 }
@@ -416,10 +430,20 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
        return addr;
 }
 
+static inline bool slave_can_tx(struct slave *slave)
+{
+       if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
+           bond_is_active_slave(slave))
+               return true;
+       else
+               return false;
+}
+
 struct bond_net;
 
 struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
 int bond_create(struct net *net, const char *name);
 int bond_create_sysfs(struct bond_net *net);
 void bond_destroy_sysfs(struct bond_net *net);
@@ -477,10 +501,9 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
 static inline struct slave *bond_slave_has_mac(struct bonding *bond,
                                               const u8 *mac)
 {
-       int i = 0;
        struct slave *tmp;
 
-       bond_for_each_slave(bond, tmp, i)
+       bond_for_each_slave(bond, tmp)
                if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
                        return tmp;
 
index 7b0be0910f4be5116901b1ff25668bb089a033df..c48174ed49ccdbd1d4da15e356036d0bc6ca6c9f 100644 (file)
@@ -850,12 +850,17 @@ static int flexcan_open(struct net_device *dev)
        struct flexcan_priv *priv = netdev_priv(dev);
        int err;
 
-       clk_prepare_enable(priv->clk_ipg);
-       clk_prepare_enable(priv->clk_per);
+       err = clk_prepare_enable(priv->clk_ipg);
+       if (err)
+               return err;
+
+       err = clk_prepare_enable(priv->clk_per);
+       if (err)
+               goto out_disable_ipg;
 
        err = open_candev(dev);
        if (err)
-               goto out;
+               goto out_disable_per;
 
        err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
        if (err)
@@ -875,8 +880,9 @@ static int flexcan_open(struct net_device *dev)
 
  out_close:
        close_candev(dev);
- out:
+ out_disable_per:
        clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
        clk_disable_unprepare(priv->clk_ipg);
 
        return err;
@@ -933,8 +939,13 @@ static int register_flexcandev(struct net_device *dev)
        struct flexcan_regs __iomem *regs = priv->base;
        u32 reg, err;
 
-       clk_prepare_enable(priv->clk_ipg);
-       clk_prepare_enable(priv->clk_per);
+       err = clk_prepare_enable(priv->clk_ipg);
+       if (err)
+               return err;
+
+       err = clk_prepare_enable(priv->clk_per);
+       if (err)
+               goto out_disable_ipg;
 
        /* select "bus clock", chip must be disabled */
        flexcan_chip_disable(priv);
@@ -959,15 +970,16 @@ static int register_flexcandev(struct net_device *dev)
        if (!(reg & FLEXCAN_MCR_FEN)) {
                netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
                err = -ENODEV;
-               goto out;
+               goto out_disable_per;
        }
 
        err = register_candev(dev);
 
- out:
+ out_disable_per:
        /* disable core and turn off clocks */
        flexcan_chip_disable(priv);
        clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
        clk_disable_unprepare(priv->clk_ipg);
 
        return err;
@@ -1001,7 +1013,6 @@ static int flexcan_probe(struct platform_device *pdev)
        struct resource *mem;
        struct clk *clk_ipg = NULL, *clk_per = NULL;
        void __iomem *base;
-       resource_size_t mem_size;
        int err, irq;
        u32 clock_freq = 0;
 
@@ -1013,43 +1024,25 @@ static int flexcan_probe(struct platform_device *pdev)
                clk_ipg = devm_clk_get(&pdev->dev, "ipg");
                if (IS_ERR(clk_ipg)) {
                        dev_err(&pdev->dev, "no ipg clock defined\n");
-                       err = PTR_ERR(clk_ipg);
-                       goto failed_clock;
+                       return PTR_ERR(clk_ipg);
                }
                clock_freq = clk_get_rate(clk_ipg);
 
                clk_per = devm_clk_get(&pdev->dev, "per");
                if (IS_ERR(clk_per)) {
                        dev_err(&pdev->dev, "no per clock defined\n");
-                       err = PTR_ERR(clk_per);
-                       goto failed_clock;
+                       return PTR_ERR(clk_per);
                }
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if (!mem || irq <= 0) {
-               err = -ENODEV;
-               goto failed_get;
-       }
+       if (irq <= 0)
+               return -ENODEV;
 
-       mem_size = resource_size(mem);
-       if (!request_mem_region(mem->start, mem_size, pdev->name)) {
-               err = -EBUSY;
-               goto failed_get;
-       }
-
-       base = ioremap(mem->start, mem_size);
-       if (!base) {
-               err = -ENOMEM;
-               goto failed_map;
-       }
-
-       dev = alloc_candev(sizeof(struct flexcan_priv), 1);
-       if (!dev) {
-               err = -ENOMEM;
-               goto failed_alloc;
-       }
+       base = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        of_id = of_match_device(flexcan_of_match, &pdev->dev);
        if (of_id) {
@@ -1058,10 +1051,13 @@ static int flexcan_probe(struct platform_device *pdev)
                devtype_data = (struct flexcan_devtype_data *)
                        pdev->id_entry->driver_data;
        } else {
-               err = -ENODEV;
-               goto failed_devtype;
+               return -ENODEV;
        }
 
+       dev = alloc_candev(sizeof(struct flexcan_priv), 1);
+       if (!dev)
+               return -ENOMEM;
+
        dev->netdev_ops = &flexcan_netdev_ops;
        dev->irq = irq;
        dev->flags |= IFF_ECHO;
@@ -1104,28 +1100,15 @@ static int flexcan_probe(struct platform_device *pdev)
        return 0;
 
  failed_register:
- failed_devtype:
        free_candev(dev);
- failed_alloc:
-       iounmap(base);
- failed_map:
-       release_mem_region(mem->start, mem_size);
- failed_get:
- failed_clock:
        return err;
 }
 
 static int flexcan_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
-       struct flexcan_priv *priv = netdev_priv(dev);
-       struct resource *mem;
 
        unregister_flexcandev(dev);
-       iounmap(priv->base);
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(mem->start, resource_size(mem));
 
        free_candev(dev);
 
index e1d26433d61921b3e0460421212006bcb5e0e9ee..b7232a9b7756cb48f8fd143b4f5a31f45757151a 100644 (file)
@@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev)
 
 #ifdef CONFIG_AX88796_93CX6
        if (ax->plat->flags & AXFLG_HAS_93CX6) {
-               unsigned char mac_addr[6];
+               unsigned char mac_addr[ETH_ALEN];
                struct eeprom_93cx6 eeprom;
 
                eeprom.data = ei_local;
@@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev)
                                       (__le16 __force *)mac_addr,
                                       sizeof(mac_addr) >> 1);
 
-               memcpy(dev->dev_addr, mac_addr, 6);
+               memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
        }
 #endif
        if (ax->plat->wordlength == 2) {
index 2037080c504d67e6128797f4a920891fed5a6d0e..506b0248c4001b48382f3ef6be2ccbc47724c8fa 100644 (file)
@@ -90,6 +90,7 @@ source "drivers/net/ethernet/marvell/Kconfig"
 source "drivers/net/ethernet/mellanox/Kconfig"
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
 source "drivers/net/ethernet/myricom/Kconfig"
 
 config FEALNX
index 390bd0bfaa2721b655a80f5c1f74b848f2665cd5..c0b8789952e711fb77e44fc214d06ee8cac5e8f3 100644 (file)
@@ -42,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
 obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
 obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
index ed21307276430895fadc736b16f8539afe1713cf..2d8e28819779eed0388109791fe0126f5af763f8 100644 (file)
@@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
        char *chipname;
        struct net_device *dev;
        const struct pcnet32_access *a = NULL;
-       u8 promaddr[6];
+       u8 promaddr[ETH_ALEN];
        int ret = -ENODEV;
 
        /* reset the chip */
@@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
        }
 
        /* read PROM address and compare with CSR address */
-       for (i = 0; i < 6; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                promaddr[i] = inb(ioaddr + i);
 
-       if (memcmp(promaddr, dev->dev_addr, 6) ||
+       if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
            !is_valid_ether_addr(dev->dev_addr)) {
                if (is_valid_ether_addr(promaddr)) {
                        if (pcnet32_debug & NETIF_MSG_PROBE) {
index 52c96036dcc41ca5174f97b52c633126c8aea3cf..2fa5b86f139db626f1839c7b3bc9087444179b28 100644 (file)
@@ -130,7 +130,7 @@ config BNX2X_SRIOV
 
 config BGMAC
        tristate "BCMA bus GBit core support"
-       depends on BCMA_HOST_SOC && HAS_DMA
+       depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
        select PHYLIB
        ---help---
          This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
index b1bcd4ba47444e0f36c1bc4f9d8a2ff446ba5882..89ff09ec7d288f9ec4a24fc42f6ba871868527b2 100644 (file)
@@ -1747,11 +1747,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
        if (!bcm_enet_shared_base[0])
                return -ENODEV;
 
-       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
        res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
-       if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
+       if (!res_irq || !res_irq_rx || !res_irq_tx)
                return -ENODEV;
 
        ret = 0;
@@ -1767,9 +1766,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
        if (ret)
                goto out;
 
-       priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
-       if (priv->base == NULL) {
-               ret = -ENOMEM;
+       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+       if (IS_ERR(priv->base)) {
+               ret = PTR_ERR(priv->base);
                goto out;
        }
 
index 6a2de1d79ff6c011188f2ccfb8e48c315d93825c..4148058cef8165e374f3eb0c9c85275b036d33b8 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2.c: Broadcom NX2 network driver.
  *
- * Copyright (c) 2004-2011 Broadcom Corporation
+ * Copyright (c) 2004-2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.2.3"
-#define DRV_MODULE_RELDATE     "June 27, 2012"
+#define DRV_MODULE_VERSION     "2.2.4"
+#define DRV_MODULE_RELDATE     "Aug 05, 2013"
 #define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-6.2.3.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-6.0.15.fw"
 #define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -3908,136 +3908,121 @@ init_cpu_err:
        return rc;
 }
 
-static int
-bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+static void
+bnx2_setup_wol(struct bnx2 *bp)
 {
-       u16 pmcsr;
+       int i;
+       u32 val, wol_msg;
 
-       pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+       if (bp->wol) {
+               u32 advertising;
+               u8 autoneg;
 
-       switch (state) {
-       case PCI_D0: {
-               u32 val;
+               autoneg = bp->autoneg;
+               advertising = bp->advertising;
 
-               pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
-                       (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
-                       PCI_PM_CTRL_PME_STATUS);
+               if (bp->phy_port == PORT_TP) {
+                       bp->autoneg = AUTONEG_SPEED;
+                       bp->advertising = ADVERTISED_10baseT_Half |
+                               ADVERTISED_10baseT_Full |
+                               ADVERTISED_100baseT_Half |
+                               ADVERTISED_100baseT_Full |
+                               ADVERTISED_Autoneg;
+               }
 
-               if (pmcsr & PCI_PM_CTRL_STATE_MASK)
-                       /* delay required during transition out of D3hot */
-                       msleep(20);
+               spin_lock_bh(&bp->phy_lock);
+               bnx2_setup_phy(bp, bp->phy_port);
+               spin_unlock_bh(&bp->phy_lock);
 
-               val = BNX2_RD(bp, BNX2_EMAC_MODE);
-               val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
-               val &= ~BNX2_EMAC_MODE_MPKT;
-               BNX2_WR(bp, BNX2_EMAC_MODE, val);
+               bp->autoneg = autoneg;
+               bp->advertising = advertising;
 
-               val = BNX2_RD(bp, BNX2_RPM_CONFIG);
-               val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
-               BNX2_WR(bp, BNX2_RPM_CONFIG, val);
-               break;
-       }
-       case PCI_D3hot: {
-               int i;
-               u32 val, wol_msg;
-
-               if (bp->wol) {
-                       u32 advertising;
-                       u8 autoneg;
-
-                       autoneg = bp->autoneg;
-                       advertising = bp->advertising;
-
-                       if (bp->phy_port == PORT_TP) {
-                               bp->autoneg = AUTONEG_SPEED;
-                               bp->advertising = ADVERTISED_10baseT_Half |
-                                       ADVERTISED_10baseT_Full |
-                                       ADVERTISED_100baseT_Half |
-                                       ADVERTISED_100baseT_Full |
-                                       ADVERTISED_Autoneg;
-                       }
+               bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
 
-                       spin_lock_bh(&bp->phy_lock);
-                       bnx2_setup_phy(bp, bp->phy_port);
-                       spin_unlock_bh(&bp->phy_lock);
+               val = BNX2_RD(bp, BNX2_EMAC_MODE);
 
-                       bp->autoneg = autoneg;
-                       bp->advertising = advertising;
+               /* Enable port mode. */
+               val &= ~BNX2_EMAC_MODE_PORT;
+               val |= BNX2_EMAC_MODE_MPKT_RCVD |
+                      BNX2_EMAC_MODE_ACPI_RCVD |
+                      BNX2_EMAC_MODE_MPKT;
+               if (bp->phy_port == PORT_TP) {
+                       val |= BNX2_EMAC_MODE_PORT_MII;
+               } else {
+                       val |= BNX2_EMAC_MODE_PORT_GMII;
+                       if (bp->line_speed == SPEED_2500)
+                               val |= BNX2_EMAC_MODE_25G_MODE;
+               }
 
-                       bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+               BNX2_WR(bp, BNX2_EMAC_MODE, val);
 
-                       val = BNX2_RD(bp, BNX2_EMAC_MODE);
+               /* receive all multicast */
+               for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+                       BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+                               0xffffffff);
+               }
+               BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
 
-                       /* Enable port mode. */
-                       val &= ~BNX2_EMAC_MODE_PORT;
-                       val |= BNX2_EMAC_MODE_MPKT_RCVD |
-                              BNX2_EMAC_MODE_ACPI_RCVD |
-                              BNX2_EMAC_MODE_MPKT;
-                       if (bp->phy_port == PORT_TP)
-                               val |= BNX2_EMAC_MODE_PORT_MII;
-                       else {
-                               val |= BNX2_EMAC_MODE_PORT_GMII;
-                               if (bp->line_speed == SPEED_2500)
-                                       val |= BNX2_EMAC_MODE_25G_MODE;
-                       }
+               val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
+               BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+               BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
+               BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
 
-                       BNX2_WR(bp, BNX2_EMAC_MODE, val);
+               /* Need to enable EMAC and RPM for WOL. */
+               BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+                       BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
+                       BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
+                       BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
 
-                       /* receive all multicast */
-                       for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
-                               BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
-                                       0xffffffff);
-                       }
-                       BNX2_WR(bp, BNX2_EMAC_RX_MODE,
-                               BNX2_EMAC_RX_MODE_SORT_MODE);
+               val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+               val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+               BNX2_WR(bp, BNX2_RPM_CONFIG, val);
 
-                       val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
-                             BNX2_RPM_SORT_USER0_MC_EN;
-                       BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
-                       BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
-                       BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
-                               BNX2_RPM_SORT_USER0_ENA);
+               wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+       } else {
+                       wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+       }
 
-                       /* Need to enable EMAC and RPM for WOL. */
-                       BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
-                               BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
-                               BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
-                               BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
+       if (!(bp->flags & BNX2_FLAG_NO_WOL))
+               bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
 
-                       val = BNX2_RD(bp, BNX2_RPM_CONFIG);
-                       val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
-                       BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+}
 
-                       wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
-               }
-               else {
-                       wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
-               }
+static int
+bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+{
+       switch (state) {
+       case PCI_D0: {
+               u32 val;
+
+               pci_enable_wake(bp->pdev, PCI_D0, false);
+               pci_set_power_state(bp->pdev, PCI_D0);
 
-               if (!(bp->flags & BNX2_FLAG_NO_WOL))
-                       bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
-                                    1, 0);
+               val = BNX2_RD(bp, BNX2_EMAC_MODE);
+               val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
+               val &= ~BNX2_EMAC_MODE_MPKT;
+               BNX2_WR(bp, BNX2_EMAC_MODE, val);
 
-               pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+               val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+               val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+               BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+               break;
+       }
+       case PCI_D3hot: {
+               bnx2_setup_wol(bp);
+               pci_wake_from_d3(bp->pdev, bp->wol);
                if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
                    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
 
                        if (bp->wol)
-                               pmcsr |= 3;
-               }
-               else {
-                       pmcsr |= 3;
-               }
-               if (bp->wol) {
-                       pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+                               pci_set_power_state(bp->pdev, PCI_D3hot);
+               } else {
+                       pci_set_power_state(bp->pdev, PCI_D3hot);
                }
-               pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
-                                     pmcsr);
 
                /* No more memory access after this point until
                 * device is brought back to D0.
                 */
-               udelay(50);
                break;
        }
        default:
@@ -6317,7 +6302,6 @@ bnx2_open(struct net_device *dev)
 
        netif_carrier_off(dev);
 
-       bnx2_set_power_state(bp, PCI_D0);
        bnx2_disable_int(bp);
 
        rc = bnx2_setup_int_mode(bp, disable_msi);
@@ -6724,7 +6708,6 @@ bnx2_close(struct net_device *dev)
        bnx2_del_napi(bp);
        bp->link_up = 0;
        netif_carrier_off(bp->dev);
-       bnx2_set_power_state(bp, PCI_D3hot);
        return 0;
 }
 
@@ -7081,6 +7064,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        else {
                bp->wol = 0;
        }
+
+       device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
        return 0;
 }
 
@@ -7156,9 +7142,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        struct bnx2 *bp = netdev_priv(dev);
        int rc;
 
-       if (!netif_running(dev))
-               return -EAGAIN;
-
        /* parameters already validated in ethtool_get_eeprom */
 
        rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7173,9 +7156,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        struct bnx2 *bp = netdev_priv(dev);
        int rc;
 
-       if (!netif_running(dev))
-               return -EAGAIN;
-
        /* parameters already validated in ethtool_set_eeprom */
 
        rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7535,8 +7515,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
-       bnx2_set_power_state(bp, PCI_D0);
-
        memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
        if (etest->flags & ETH_TEST_FL_OFFLINE) {
                int i;
@@ -7585,8 +7563,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
                etest->flags |= ETH_TEST_FL_FAILED;
 
        }
-       if (!netif_running(bp->dev))
-               bnx2_set_power_state(bp, PCI_D3hot);
 }
 
 static void
@@ -7658,8 +7634,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
 
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
-               bnx2_set_power_state(bp, PCI_D0);
-
                bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
                BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
                return 1;       /* cycle on/off once per second */
@@ -7680,9 +7654,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
        case ETHTOOL_ID_INACTIVE:
                BNX2_WR(bp, BNX2_EMAC_LED, 0);
                BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
-
-               if (!netif_running(dev))
-                       bnx2_set_power_state(bp, PCI_D3hot);
                break;
        }
 
@@ -8130,8 +8101,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                goto err_out_release;
        }
 
-       bnx2_set_power_state(bp, PCI_D0);
-
        /* Configure byte swap and enable write to the reg_window registers.
         * Rely on CPU to do target byte swapping on big endian systems
         * The chip's target access swapping will not swap all accesses
@@ -8170,13 +8139,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 
        if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
            BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
-               if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+               if (pdev->msix_cap)
                        bp->flags |= BNX2_FLAG_MSIX_CAP;
        }
 
        if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
            BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
-               if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
+               if (pdev->msi_cap)
                        bp->flags |= BNX2_FLAG_MSI_CAP;
        }
 
@@ -8369,6 +8338,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
                bp->wol = 0;
        }
 
+       if (bp->flags & BNX2_FLAG_NO_WOL)
+               device_set_wakeup_capable(&bp->pdev->dev, false);
+       else
+               device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
        if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
                bp->tx_quick_cons_trip_int =
                        bp->tx_quick_cons_trip;
@@ -8609,46 +8583,52 @@ bnx2_remove_one(struct pci_dev *pdev)
 }
 
 static int
-bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
+bnx2_suspend(struct device *device)
 {
+       struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2 *bp = netdev_priv(dev);
 
-       /* PCI register 4 needs to be saved whether netif_running() or not.
-        * MSI address and data need to be saved if using MSI and
-        * netif_running().
-        */
-       pci_save_state(pdev);
-       if (!netif_running(dev))
-               return 0;
-
-       cancel_work_sync(&bp->reset_task);
-       bnx2_netif_stop(bp, true);
-       netif_device_detach(dev);
-       del_timer_sync(&bp->timer);
-       bnx2_shutdown_chip(bp);
-       bnx2_free_skbs(bp);
-       bnx2_set_power_state(bp, pci_choose_state(pdev, state));
+       if (netif_running(dev)) {
+               cancel_work_sync(&bp->reset_task);
+               bnx2_netif_stop(bp, true);
+               netif_device_detach(dev);
+               del_timer_sync(&bp->timer);
+               bnx2_shutdown_chip(bp);
+               __bnx2_free_irq(bp);
+               bnx2_free_skbs(bp);
+       }
+       bnx2_setup_wol(bp);
        return 0;
 }
 
 static int
-bnx2_resume(struct pci_dev *pdev)
+bnx2_resume(struct device *device)
 {
+       struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2 *bp = netdev_priv(dev);
 
-       pci_restore_state(pdev);
        if (!netif_running(dev))
                return 0;
 
        bnx2_set_power_state(bp, PCI_D0);
        netif_device_attach(dev);
+       bnx2_request_irq(bp);
        bnx2_init_nic(bp, 1);
        bnx2_netif_start(bp, true);
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
+#define BNX2_PM_OPS (&bnx2_pm_ops)
+
+#else
+
+#define BNX2_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
 /**
  * bnx2_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -8694,24 +8674,28 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2 *bp = netdev_priv(dev);
-       pci_ers_result_t result;
-       int err;
+       pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+       int err = 0;
 
        rtnl_lock();
        if (pci_enable_device(pdev)) {
                dev_err(&pdev->dev,
                        "Cannot re-enable PCI device after reset\n");
-               result = PCI_ERS_RESULT_DISCONNECT;
        } else {
                pci_set_master(pdev);
                pci_restore_state(pdev);
                pci_save_state(pdev);
 
-               if (netif_running(dev)) {
-                       bnx2_set_power_state(bp, PCI_D0);
-                       bnx2_init_nic(bp, 1);
-               }
-               result = PCI_ERS_RESULT_RECOVERED;
+               if (netif_running(dev))
+                       err = bnx2_init_nic(bp, 1);
+
+               if (!err)
+                       result = PCI_ERS_RESULT_RECOVERED;
+       }
+
+       if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
+               bnx2_napi_enable(bp);
+               dev_close(dev);
        }
        rtnl_unlock();
 
@@ -8748,6 +8732,28 @@ static void bnx2_io_resume(struct pci_dev *pdev)
        rtnl_unlock();
 }
 
+static void bnx2_shutdown(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct bnx2 *bp;
+
+       if (!dev)
+               return;
+
+       bp = netdev_priv(dev);
+       if (!bp)
+               return;
+
+       rtnl_lock();
+       if (netif_running(dev))
+               dev_close(bp->dev);
+
+       if (system_state == SYSTEM_POWER_OFF)
+               bnx2_set_power_state(bp, PCI_D3hot);
+
+       rtnl_unlock();
+}
+
 static const struct pci_error_handlers bnx2_err_handler = {
        .error_detected = bnx2_io_error_detected,
        .slot_reset     = bnx2_io_slot_reset,
@@ -8759,9 +8765,9 @@ static struct pci_driver bnx2_pci_driver = {
        .id_table       = bnx2_pci_tbl,
        .probe          = bnx2_init_one,
        .remove         = bnx2_remove_one,
-       .suspend        = bnx2_suspend,
-       .resume         = bnx2_resume,
+       .driver.pm      = BNX2_PM_OPS,
        .err_handler    = &bnx2_err_handler,
+       .shutdown       = bnx2_shutdown,
 };
 
 module_pci_driver(bnx2_pci_driver);
index 172efbecfea2b502c26cf8043770575b229743d4..18cb2d23e56b344db90f8c7cef007f02f256f64e 100644 (file)
@@ -1,6 +1,6 @@
 /* bnx2.h: Broadcom NX2 network driver.
  *
- * Copyright (c) 2004-2011 Broadcom Corporation
+ * Copyright (c) 2004-2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index ce9b387b5a1962949582354f7b0dcfecb621a08d..12202f81735cc944a7072f6d50a74580fce04eed 100644 (file)
@@ -1331,8 +1331,10 @@ enum {
        BNX2X_SP_RTNL_ENABLE_SRIOV,
        BNX2X_SP_RTNL_VFPF_MCAST,
        BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
-       BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+       BNX2X_SP_RTNL_RX_MODE,
        BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+       BNX2X_SP_RTNL_TX_STOP,
+       BNX2X_SP_RTNL_TX_RESUME,
 };
 
 struct bnx2x_prev_path_list {
index f2d1ff10054b28ccaa9b7baddfa408821c7409af..2e90868a9276edad8c754494f32d714ee1bc0620 100644 (file)
@@ -2060,7 +2060,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
        rparam.mcast_obj = &bp->mcast_obj;
        __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
 
-       /* Add a DEL command... */
+       /* Add a DEL command... - Since we're doing a driver cleanup only,
+        * we take a lock surrounding both the initial send and the CONTs,
+        * as we don't want a true completion to disrupt us in the middle.
+        */
+       netif_addr_lock_bh(bp->dev);
        rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
        if (rc < 0)
                BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2072,11 +2076,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
                if (rc < 0) {
                        BNX2X_ERR("Failed to clean multi-cast object: %d\n",
                                  rc);
+                       netif_addr_unlock_bh(bp->dev);
                        return;
                }
 
                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
        }
+       netif_addr_unlock_bh(bp->dev);
 }
 
 #ifndef BNX2X_STOP_ON_ERROR
@@ -2432,9 +2438,7 @@ int bnx2x_load_cnic(struct bnx2x *bp)
        }
 
        /* Initialize Rx filter. */
-       netif_addr_lock_bh(bp->dev);
-       bnx2x_set_rx_mode(bp->dev);
-       netif_addr_unlock_bh(bp->dev);
+       bnx2x_set_rx_mode_inner(bp);
 
        /* re-read iscsi info */
        bnx2x_get_iscsi_info(bp);
@@ -2704,9 +2708,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        /* Start fast path */
 
        /* Initialize Rx filter. */
-       netif_addr_lock_bh(bp->dev);
-       bnx2x_set_rx_mode(bp->dev);
-       netif_addr_unlock_bh(bp->dev);
+       bnx2x_set_rx_mode_inner(bp);
 
        /* Start the Tx */
        switch (load_mode) {
index c07a6d054cfe970b031582400ef71212bd45780c..38be494ffa6eb65f6013127832c87d3b07491323 100644 (file)
@@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
  * netif_addr_lock_bh()
  */
 void bnx2x_set_rx_mode(struct net_device *dev);
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
 
 /**
  * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
index f9122f2d6b657d0e674c7b036635b60e97586609..fcf2761d8828804d3edf7ca8e2ad245576d23685 100644 (file)
 #include "bnx2x_dcb.h"
 
 /* forward declarations of dcbx related functions */
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
 static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
 static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
 static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
                                          u32 *set_configuration_ets_pg,
                                          u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
                bnx2x_pfc_clear(bp);
 }
 
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
 {
        struct bnx2x_func_state_params func_params = {NULL};
+       int rc;
 
        func_params.f_obj = &bp->func_obj;
        func_params.cmd = BNX2X_F_CMD_TX_STOP;
 
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
        DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
-       return bnx2x_func_state_change(bp, &func_params);
+
+       rc = bnx2x_func_state_change(bp, &func_params);
+       if (rc) {
+               BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+               bnx2x_panic();
+       }
+
+       return rc;
 }
 
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
 {
        struct bnx2x_func_state_params func_params = {NULL};
        struct bnx2x_func_tx_start_params *tx_params =
                &func_params.params.tx_start;
+       int rc;
 
        func_params.f_obj = &bp->func_obj;
        func_params.cmd = BNX2X_F_CMD_TX_START;
 
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
        bnx2x_dcbx_fw_struct(bp, tx_params);
 
        DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
-       return bnx2x_func_state_change(bp, &func_params);
+
+       rc = bnx2x_func_state_change(bp, &func_params);
+       if (rc) {
+               BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+               bnx2x_panic();
+       }
+
+       return rc;
 }
 
 static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                        if (IS_MF(bp))
                                bnx2x_link_sync_notify(bp);
 
-                       bnx2x_dcbx_stop_hw_tx(bp);
+                       set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
+
+                       schedule_delayed_work(&bp->sp_rtnl_task, 0);
 
                        return;
                }
@@ -757,7 +779,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                /* ets may affect cmng configuration: reinit it in hw */
                bnx2x_set_local_cmng(bp);
 
-               bnx2x_dcbx_resume_hw_tx(bp);
+               set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
+
+               schedule_delayed_work(&bp->sp_rtnl_task, 0);
 
                return;
        case BNX2X_DCBX_STATE_TX_RELEASED:
@@ -2367,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
                case DCB_FEATCFG_ATTR_PG:
                        if (bp->dcbx_local_feat.ets.enabled)
                                *flags |= DCB_FEATCFG_ENABLE;
-                       if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+                       if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+                                             DCBX_REMOTE_MIB_ERROR))
                                *flags |= DCB_FEATCFG_ERROR;
                        break;
                case DCB_FEATCFG_ATTR_PFC:
                        if (bp->dcbx_local_feat.pfc.enabled)
                                *flags |= DCB_FEATCFG_ENABLE;
                        if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
-                           DCBX_LOCAL_PFC_MISMATCH))
+                                             DCBX_LOCAL_PFC_MISMATCH |
+                                             DCBX_REMOTE_MIB_ERROR))
                                *flags |= DCB_FEATCFG_ERROR;
                        break;
                case DCB_FEATCFG_ATTR_APP:
                        if (bp->dcbx_local_feat.app.enabled)
                                *flags |= DCB_FEATCFG_ENABLE;
                        if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
-                           DCBX_LOCAL_APP_MISMATCH))
+                                             DCBX_LOCAL_APP_MISMATCH |
+                                             DCBX_REMOTE_MIB_ERROR))
                                *flags |= DCB_FEATCFG_ERROR;
                        break;
                default:
index 125bd1b6586ffc1f96b5fc946a4ee5a4613ce5a4..804b8f64463e80a1fcb45f51bda976b4d8544062 100644 (file)
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
 #endif /* BCM_DCBNL */
 
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
 #endif /* BNX2X_DCB_H */
index 955d6cfd9cb7c48179b587a7bf5239a572b7e9e8..8a9351707396d421c2a81eab5819d72671d08caa 100644 (file)
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
                bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
 }
 
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+       u32 pause_enabled = 0;
+
+       if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+               if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+                       pause_enabled = 1;
+
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                          USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+                      pause_enabled);
+       }
+
+       DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+          pause_enabled ? "enabled" : "disabled");
+}
+
 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
 {
        int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
 
                bnx2x_release_phy_lock(bp);
 
+               bnx2x_init_dropless_fc(bp);
+
                bnx2x_calc_fc_adv(bp);
 
                if (bp->link_vars.link_up) {
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp)
                bnx2x_phy_init(&bp->link_params, &bp->link_vars);
                bnx2x_release_phy_lock(bp);
 
+               bnx2x_init_dropless_fc(bp);
+
                bnx2x_calc_fc_adv(bp);
        } else
                BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2556,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
 
        bnx2x_link_update(&bp->link_params, &bp->link_vars);
 
-       if (bp->link_vars.link_up) {
-
-               /* dropless flow control */
-               if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
-                       int port = BP_PORT(bp);
-                       u32 pause_enabled = 0;
-
-                       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
-                               pause_enabled = 1;
+       bnx2x_init_dropless_fc(bp);
 
-                       REG_WR(bp, BAR_USTRORM_INTMEM +
-                              USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
-                              pause_enabled);
-               }
+       if (bp->link_vars.link_up) {
 
                if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
                        struct host_port_stats *pstats;
@@ -9634,17 +9644,21 @@ sp_rtnl_not_reset:
                }
        }
 
-       if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
-                              &bp->sp_rtnl_state)) {
-               DP(BNX2X_MSG_SP,
-                  "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
-               bnx2x_vfpf_storm_rx_mode(bp);
+       if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+               DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+               bnx2x_set_rx_mode_inner(bp);
        }
 
        if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
                               &bp->sp_rtnl_state))
                bnx2x_pf_set_vfs_vlan(bp);
 
+       if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
+               bnx2x_dcbx_stop_hw_tx(bp);
+
+       if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
+               bnx2x_dcbx_resume_hw_tx(bp);
+
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
@@ -9941,8 +9955,6 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
 
 static int bnx2x_do_flr(struct bnx2x *bp)
 {
-       int i;
-       u16 status;
        struct pci_dev *dev = bp->pdev;
 
        if (CHIP_IS_E1x(bp)) {
@@ -9957,20 +9969,8 @@ static int bnx2x_do_flr(struct bnx2x *bp)
                return -EINVAL;
        }
 
-       /* Wait for Transaction Pending bit clean */
-       for (i = 0; i < 4; i++) {
-               if (i)
-                       msleep((1 << (i - 1)) * 100);
-
-               pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
-               if (!(status & PCI_EXP_DEVSTA_TRPND))
-                       goto clear;
-       }
-
-       dev_err(&dev->dev,
-               "transaction is not cleared; proceeding with reset anyway\n");
-
-clear:
+       if (!pci_wait_for_pending_transaction(dev))
+               dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
 
        BNX2X_DEV_INFO("Initiating FLR\n");
        bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
@@ -11147,6 +11147,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
        int tmp;
        u32 cfg;
 
+       if (IS_VF(bp))
+               return 0;
+
        if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
                /* Take function: tmp = func */
                tmp = BP_ABS_FUNC(bp);
@@ -11860,34 +11863,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
 void bnx2x_set_rx_mode(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       u32 rx_mode = BNX2X_RX_MODE_NORMAL;
 
        if (bp->state != BNX2X_STATE_OPEN) {
                DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
                return;
+       } else {
+               /* Schedule an SP task to handle rest of change */
+               DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
+               smp_mb__before_clear_bit();
+               set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
+               smp_mb__after_clear_bit();
+               schedule_delayed_work(&bp->sp_rtnl_task, 0);
        }
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+       u32 rx_mode = BNX2X_RX_MODE_NORMAL;
 
        DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
 
-       if (dev->flags & IFF_PROMISC)
+       netif_addr_lock_bh(bp->dev);
+
+       if (bp->dev->flags & IFF_PROMISC) {
                rx_mode = BNX2X_RX_MODE_PROMISC;
-       else if ((dev->flags & IFF_ALLMULTI) ||
-                ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
-                 CHIP_IS_E1(bp)))
+       } else if ((bp->dev->flags & IFF_ALLMULTI) ||
+                  ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+                   CHIP_IS_E1(bp))) {
                rx_mode = BNX2X_RX_MODE_ALLMULTI;
-       else {
+       else {
                if (IS_PF(bp)) {
                        /* some multicasts */
                        if (bnx2x_set_mc_list(bp) < 0)
                                rx_mode = BNX2X_RX_MODE_ALLMULTI;
 
+                       /* release bh lock, as bnx2x_set_uc_list might sleep */
+                       netif_addr_unlock_bh(bp->dev);
                        if (bnx2x_set_uc_list(bp) < 0)
                                rx_mode = BNX2X_RX_MODE_PROMISC;
+                       netif_addr_lock_bh(bp->dev);
                } else {
                        /* configuring mcast to a vf involves sleeping (when we
-                        * wait for the pf's response). Since this function is
-                        * called from non sleepable context we must schedule
-                        * a work item for this purpose
+                        * wait for the pf's response).
                         */
                        smp_mb__before_clear_bit();
                        set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
@@ -11905,22 +11922,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
                set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+               netif_addr_unlock_bh(bp->dev);
                return;
        }
 
        if (IS_PF(bp)) {
                bnx2x_set_storm_rx_mode(bp);
+               netif_addr_unlock_bh(bp->dev);
        } else {
-               /* configuring rx mode to storms in a vf involves sleeping (when
-                * we wait for the pf's response). Since this function is
-                * called from non sleepable context we must schedule
-                * a work item for this purpose
+               /* VF will need to request the PF to make this change, and so
+                * the VF needs to release the bottom-half lock prior to the
+                * request (as it will likely require sleep on the VF side)
                 */
-               smp_mb__before_clear_bit();
-               set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
-                       &bp->sp_rtnl_state);
-               smp_mb__after_clear_bit();
-               schedule_delayed_work(&bp->sp_rtnl_task, 0);
+               netif_addr_unlock_bh(bp->dev);
+               bnx2x_vfpf_storm_rx_mode(bp);
        }
 }
 
@@ -12545,16 +12560,14 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
                                     int cnic_cnt, bool is_vf)
 {
-       int pos, index;
+       int index;
        u16 control = 0;
 
-       pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-
        /*
         * If MSI-X is not supported - return number of SBs needed to support
         * one fast path queue: one FP queue + SB for CNIC
         */
-       if (!pos) {
+       if (!pdev->msix_cap) {
                dev_info(&pdev->dev, "no msix capability found\n");
                return 1 + cnic_cnt;
        }
@@ -12567,7 +12580,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
         * without the default SB.
         * For VFs there is no default SB, then we return (index+1).
         */
-       pci_read_config_word(pdev, pos  + PCI_MSI_FLAGS, &control);
+       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
 
        index = control & PCI_MSIX_FLAGS_QSIZE;
 
index 8f03c984550f328c88764d83f04813b344445b1d..1d46b68fb7664d69ca0e26b66d6340d9d18cb233 100644 (file)
@@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
        }
 }
 
-static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
-                                                struct bnx2x_exe_queue_obj *o)
-{
-       spin_lock_bh(&o->lock);
-
-       __bnx2x_exe_queue_reset_pending(bp, o);
-
-       spin_unlock_bh(&o->lock);
-}
-
 /**
  * bnx2x_exe_queue_step - execute one execution chunk atomically
  *
@@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
  * @o:                 queue
  * @ramrod_flags:      flags
  *
- * (Atomicity is ensured using the exe_queue->lock).
+ * (Should be called while holding the exe_queue->lock).
  */
 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
                                       struct bnx2x_exe_queue_obj *o,
@@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
 
        memset(&spacer, 0, sizeof(spacer));
 
-       spin_lock_bh(&o->lock);
-
        /* Next step should not be performed until the current is finished,
         * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
         * properly clear object internals without sending any command to the FW
@@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
                        DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
                        __bnx2x_exe_queue_reset_pending(bp, o);
                } else {
-                       spin_unlock_bh(&o->lock);
                        return 1;
                }
        }
@@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
        }
 
        /* Sanity check */
-       if (!cur_len) {
-               spin_unlock_bh(&o->lock);
+       if (!cur_len)
                return 0;
-       }
 
        rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
        if (rc < 0)
@@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
                 */
                __bnx2x_exe_queue_reset_pending(bp, o);
 
-       spin_unlock_bh(&o->lock);
        return rc;
 }
 
@@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
        return true;
 }
 
+/**
+ * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
+ *
+ * @bp:                device handle
+ * @o:         vlan_mac object
+ *
+ * @details: Non-blocking implementation; should be called under execution
+ *           queue lock.
+ */
+static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
+                                           struct bnx2x_vlan_mac_obj *o)
+{
+       if (o->head_reader) {
+               DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
+               return -EBUSY;
+       }
+
+       DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
+       return 0;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
+ *
+ * @bp:                device handle
+ * @o:         vlan_mac object
+ *
+ * @details Should be called under execution queue lock; notice it might release
+ *          and reclaim it during its run.
+ */
+static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
+                                           struct bnx2x_vlan_mac_obj *o)
+{
+       int rc;
+       unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+       DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
+          ramrod_flags);
+       o->head_exe_request = false;
+       o->saved_ramrod_flags = 0;
+       rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
+       if (rc != 0) {
+               BNX2X_ERR("execution of pending commands failed with rc %d\n",
+                         rc);
+#ifdef BNX2X_STOP_ON_ERROR
+               bnx2x_panic();
+#endif
+       }
+}
+
+/**
+ * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
+ *
+ * @bp:                        device handle
+ * @o:                 vlan_mac object
+ * @ramrod_flags:      ramrod flags of missed execution
+ *
+ * @details Should be called under execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
+                                   struct bnx2x_vlan_mac_obj *o,
+                                   unsigned long ramrod_flags)
+{
+       o->head_exe_request = true;
+       o->saved_ramrod_flags = ramrod_flags;
+       DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
+          ramrod_flags);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp:                        device handle
+ * @o:                 vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ *          execution exists, it would perform it - possibly releasing and
+ *          reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+                                           struct bnx2x_vlan_mac_obj *o)
+{
+       /* It's possible a new pending execution was added since this writer
+        * executed. If so, execute again. [Ad infinitum]
+        */
+       while (o->head_exe_request) {
+               DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
+               __bnx2x_vlan_mac_h_exec_pending(bp, o);
+       }
+}
+
+/**
+ * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp:                        device handle
+ * @o:                 vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would perform it -
+ *          possibly releasing and reclaiming the execution queue lock.
+ */
+void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+                                  struct bnx2x_vlan_mac_obj *o)
+{
+       spin_lock_bh(&o->exe_queue.lock);
+       __bnx2x_vlan_mac_h_write_unlock(bp, o);
+       spin_unlock_bh(&o->exe_queue.lock);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp:                        device handle
+ * @o:                 vlan_mac object
+ *
+ * @details Should be called under the execution queue lock. May sleep. May
+ *          release and reclaim execution queue lock during its run.
+ */
+static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+                                       struct bnx2x_vlan_mac_obj *o)
+{
+       /* If we got here, we're holding lock --> no WRITER exists */
+       o->head_reader++;
+       DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
+          o->head_reader);
+
+       return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp:                        device handle
+ * @o:                 vlan_mac object
+ *
+ * @details May sleep. Claims and releases execution queue lock during its run.
+ */
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+                              struct bnx2x_vlan_mac_obj *o)
+{
+       int rc;
+
+       spin_lock_bh(&o->exe_queue.lock);
+       rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
+       spin_unlock_bh(&o->exe_queue.lock);
+
+       return rc;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp:                        device handle
+ * @o:                 vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ *          execution exists, it would be performed if this was the last
+ *          reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+                                         struct bnx2x_vlan_mac_obj *o)
+{
+       if (!o->head_reader) {
+               BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
+#ifdef BNX2X_STOP_ON_ERROR
+               bnx2x_panic();
+#endif
+       } else {
+               o->head_reader--;
+               DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
+                  o->head_reader);
+       }
+
+       /* It's possible a new pending execution was added, and that this reader
+        * was last - if so we need to execute the command.
+        */
+       if (!o->head_reader && o->head_exe_request) {
+               DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
+
+               /* Writer release will do the trick */
+               __bnx2x_vlan_mac_h_write_unlock(bp, o);
+       }
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp:                        device handle
+ * @o:                 vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would be performed if this
+ *          was the last reader. Claims and releases the execution queue lock
+ *          during its run.
+ */
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+                                 struct bnx2x_vlan_mac_obj *o)
+{
+       spin_lock_bh(&o->exe_queue.lock);
+       __bnx2x_vlan_mac_h_read_unlock(bp, o);
+       spin_unlock_bh(&o->exe_queue.lock);
+}
+
 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
                                int n, u8 *base, u8 stride, u8 size)
 {
        struct bnx2x_vlan_mac_registry_elem *pos;
        u8 *next = base;
        int counter = 0;
+       int read_lock;
+
+       DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
+       read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+       if (read_lock != 0)
+               BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
 
        /* traverse list */
        list_for_each_entry(pos, &o->head, link) {
@@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
                        next += stride + size;
                }
        }
+
+       if (read_lock == 0) {
+               DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
+               bnx2x_vlan_mac_h_read_unlock(bp, o);
+       }
+
        return counter * ETH_ALEN;
 }
 
@@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
        return -EBUSY;
 }
 
+static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
+                                        struct bnx2x_vlan_mac_obj *o,
+                                        unsigned long *ramrod_flags)
+{
+       int rc = 0;
+
+       spin_lock_bh(&o->exe_queue.lock);
+
+       DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
+       rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
+
+       if (rc != 0) {
+               __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
+
+               /* Calling function should not diffrentiate between this case
+                * and the case in which there is already a pending ramrod
+                */
+               rc = 1;
+       } else {
+               rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+       }
+       spin_unlock_bh(&o->exe_queue.lock);
+
+       return rc;
+}
+
 /**
  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
  *
@@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
        struct bnx2x_raw_obj *r = &o->raw;
        int rc;
 
+       /* Clearing the pending list & raw state should be made
+        * atomically (as execution flow assumes they represent the same).
+        */
+       spin_lock_bh(&o->exe_queue.lock);
+
        /* Reset pending list */
-       bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+       __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
 
        /* Clear pending */
        r->clear_pending(r);
 
+       spin_unlock_bh(&o->exe_queue.lock);
+
        /* If ramrod failed this is most likely a SW bug */
        if (cqe->message.error)
                return -EINVAL;
 
        /* Run the next bulk of pending commands if requested */
        if (test_bit(RAMROD_CONT, ramrod_flags)) {
-               rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+               rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
+
                if (rc < 0)
                        return rc;
        }
@@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
  * @p:
  *
  */
-int bnx2x_config_vlan_mac(
-       struct bnx2x *bp,
-       struct bnx2x_vlan_mac_ramrod_params *p)
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+                          struct bnx2x_vlan_mac_ramrod_params *p)
 {
        int rc = 0;
        struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac(
        /* Execute commands if required */
        if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
            test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
-               rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+               rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
+                                                  &p->ramrod_flags);
                if (rc < 0)
                        return rc;
        }
@@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac(
                                return rc;
 
                        /* Make a next step */
-                       rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
-                                                 ramrod_flags);
+                       rc = __bnx2x_vlan_mac_execute_step(bp,
+                                                          p->vlan_mac_obj,
+                                                          &p->ramrod_flags);
                        if (rc < 0)
                                return rc;
                }
@@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
                                  unsigned long *ramrod_flags)
 {
        struct bnx2x_vlan_mac_registry_elem *pos = NULL;
-       int rc = 0;
        struct bnx2x_vlan_mac_ramrod_params p;
        struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
        struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+       int read_lock;
+       int rc = 0;
 
        /* Clear pending commands first */
 
@@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
        __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
        __clear_bit(RAMROD_CONT, &p.ramrod_flags);
 
+       DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
+       read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+       if (read_lock != 0)
+               return read_lock;
+
        list_for_each_entry(pos, &o->head, link) {
                if (pos->vlan_mac_flags == *vlan_mac_flags) {
                        p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
@@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
                        rc = bnx2x_config_vlan_mac(bp, &p);
                        if (rc < 0) {
                                BNX2X_ERR("Failed to add a new DEL command\n");
+                               bnx2x_vlan_mac_h_read_unlock(bp, o);
                                return rc;
                        }
                }
        }
 
+       DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
+       bnx2x_vlan_mac_h_read_unlock(bp, o);
+
        p.ramrod_flags = *ramrod_flags;
        __set_bit(RAMROD_CONT, &p.ramrod_flags);
 
@@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
        struct bnx2x_credit_pool_obj *vlans_pool)
 {
        INIT_LIST_HEAD(&o->head);
+       o->head_reader = 0;
+       o->head_exe_request = false;
+       o->saved_ramrod_flags = 0;
 
        o->macs_pool = macs_pool;
        o->vlans_pool = vlans_pool;
index 798dfe9967336fedc4a5e07806c7216c83a955ed..533a3abd8c827b341ef0ca0ed9fc69262de1f646 100644 (file)
@@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj {
         * entries.
         */
        struct list_head                head;
+       /* Implement a simple reader/writer lock on the head list.
+        * all these fields should only be accessed under the exe_queue lock
+        */
+       u8              head_reader; /* Num. of readers accessing head list */
+       bool            head_exe_request; /* Pending execution request. */
+       unsigned long   saved_ramrod_flags; /* Ramrods of pending execution */
 
        /* TODO: Add it's initialization in the init functions */
        struct bnx2x_exe_queue_obj      exe_queue;
@@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
                             struct bnx2x_credit_pool_obj *macs_pool,
                             struct bnx2x_credit_pool_obj *vlans_pool);
 
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+                                       struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+                                 struct bnx2x_vlan_mac_obj *o);
+int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
+                               struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+                                         struct bnx2x_vlan_mac_obj *o);
 int bnx2x_config_vlan_mac(struct bnx2x *bp,
-                         struct bnx2x_vlan_mac_ramrod_params *p);
+                          struct bnx2x_vlan_mac_ramrod_params *p);
 
 int bnx2x_vlan_mac_move(struct bnx2x *bp,
                        struct bnx2x_vlan_mac_ramrod_params *p,
index 44104fb27947fba144575a33a0be3cf1343fe39d..fbc026c4cab2d7d3b100c635f95c6dc295cd44b3 100644 (file)
@@ -491,12 +491,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp,
         * and a valid credit counter
         */
        if (!vfop->rc && args->credit) {
-               int cnt = 0;
                struct list_head *pos;
+               int read_lock;
+               int cnt = 0;
+
+               read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+               if (read_lock)
+                       DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
 
                list_for_each(pos, &obj->head)
                        cnt++;
 
+               if (!read_lock)
+                       bnx2x_vlan_mac_h_read_unlock(bp, obj);
+
                atomic_set(args->credit, cnt);
        }
 }
@@ -1747,11 +1755,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
 
 void bnx2x_iov_init_dmae(struct bnx2x *bp)
 {
-       DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
-       if (!IS_SRIOV(bp))
-               return;
-
-       REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+       if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+               REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
 }
 
 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -3084,8 +3089,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
        pci_disable_sriov(bp->pdev);
 }
 
-static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
-                              struct bnx2x_virtf *vf)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+                            struct bnx2x_virtf **vf,
+                            struct pf_vf_bulletin_content **bulletin)
 {
        if (bp->state != BNX2X_STATE_OPEN) {
                BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,12 +3109,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
                return -EINVAL;
        }
 
-       if (!vf) {
+       /* init members */
+       *vf = BP_VF(bp, vfidx);
+       *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+       if (!*vf) {
                BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
                          vfidx);
                return -EINVAL;
        }
 
+       if (!*bulletin) {
+               BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
+                         vfidx);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -3116,17 +3132,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                        struct ifla_vf_info *ivi)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
-       struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
-       struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
-       struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+       struct bnx2x_virtf *vf = NULL;
+       struct pf_vf_bulletin_content *bulletin = NULL;
+       struct bnx2x_vlan_mac_obj *mac_obj;
+       struct bnx2x_vlan_mac_obj *vlan_obj;
        int rc;
 
-       /* sanity */
-       rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+       /* sanity and init */
+       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
        if (rc)
                return rc;
-       if (!mac_obj || !vlan_obj || !bulletin) {
+       mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+       vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+       if (!mac_obj || !vlan_obj) {
                BNX2X_ERR("VF partially initialized\n");
                return -EINVAL;
        }
@@ -3183,11 +3201,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
 {
        struct bnx2x *bp = netdev_priv(dev);
        int rc, q_logical_state;
-       struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
-       struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+       struct bnx2x_virtf *vf = NULL;
+       struct pf_vf_bulletin_content *bulletin = NULL;
 
-       /* sanity */
-       rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+       /* sanity and init */
+       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
        if (rc)
                return rc;
        if (!is_valid_ether_addr(mac)) {
@@ -3249,11 +3267,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
 {
        struct bnx2x *bp = netdev_priv(dev);
        int rc, q_logical_state;
-       struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
-       struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+       struct bnx2x_virtf *vf = NULL;
+       struct pf_vf_bulletin_content *bulletin = NULL;
 
-       /* sanity */
-       rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+       /* sanity and init */
+       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
        if (rc)
                return rc;
 
index d78d4cf140ed6d20a0e513d3c974740cf6c145cd..4f8a5357cedc4b93e7dd592805f0ca30e40e456f 100644 (file)
@@ -1,6 +1,6 @@
 /* cnic.c: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1427,6 +1427,28 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
        rcu_read_unlock();
 }
 
+static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
+                                      int en_tcp_dack)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct bnx2x *bp = netdev_priv(dev->netdev);
+       u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
+       u16 tstorm_flags = 0;
+
+       if (time_stamps) {
+               xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+               tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+       }
+       if (en_tcp_dack)
+               tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
+
+       CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+                XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
+
+       CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+                 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
+}
+
 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
 {
        struct cnic_local *cp = dev->cnic_priv;
@@ -1506,6 +1528,10 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
        CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
                  hq_bds);
 
+       cnic_bnx2x_set_tcp_options(dev,
+                       req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
+                       req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
+
        return 0;
 }
 
@@ -2035,9 +2061,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
        xstorm_buf->pseudo_header_checksum =
                swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
 
-       if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
-               tstorm_buf->params |=
-                       L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
        if (kwqe3->ka_timeout) {
                tstorm_buf->ka_enable = 1;
                tstorm_buf->ka_timeout = kwqe3->ka_timeout;
@@ -2084,25 +2107,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
                 mac[0]);
 }
 
-static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
-{
-       struct cnic_local *cp = dev->cnic_priv;
-       struct bnx2x *bp = netdev_priv(dev->netdev);
-       u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
-       u16 tstorm_flags = 0;
-
-       if (tcp_ts) {
-               xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
-               tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
-       }
-
-       CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
-                XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
-
-       CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
-                 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
-}
-
 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
                              u32 num, int *work)
 {
@@ -2178,9 +2182,6 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
        CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
                  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
 
-       cnic_bnx2x_set_tcp_timestamp(dev,
-               kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
-
        ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
                        kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
        if (!ret)
@@ -3603,6 +3604,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
        csk1->rcv_buf = DEF_RCV_BUF;
        csk1->snd_buf = DEF_SND_BUF;
        csk1->seed = DEF_SEED;
+       csk1->tcp_flags = 0;
 
        *csk = csk1;
        return 0;
@@ -4020,15 +4022,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
                cnic_cm_upcall(cp, csk, opcode);
                break;
 
-       case L5CM_RAMROD_CMD_ID_CLOSE:
-               if (l4kcqe->status != 0) {
-                       netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
-                                   "status 0x%x\n", l4kcqe->status);
+       case L5CM_RAMROD_CMD_ID_CLOSE: {
+               struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
+
+               if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
+                       netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
+                                   l4kcqe->status, l5kcqe->completion_status);
                        opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
                        /* Fall through */
                } else {
                        break;
                }
+       }
        case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
        case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
        case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4219,7 +4224,7 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
        u32 port = CNIC_PORT(cp);
 
        cnic_init_bnx2x_mac(dev);
-       cnic_bnx2x_set_tcp_timestamp(dev, 1);
+       cnic_bnx2x_set_tcp_options(dev, 0, 1);
 
        CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
                  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
@@ -5271,6 +5276,13 @@ static int cnic_register_netdev(struct cnic_dev *dev)
        if (err)
                netdev_err(dev->netdev, "register_cnic failed\n");
 
+       /* Read iSCSI config again.  On some bnx2x device, iSCSI config
+        * can change after firmware is downloaded.
+        */
+       dev->max_iscsi_conn = ethdev->max_iscsi_conn;
+       if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+               dev->max_iscsi_conn = 0;
+
        return err;
 }
 
@@ -5628,7 +5640,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
 
        dev = cnic_from_netdev(netdev);
 
-       if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
+       if (!dev && event == NETDEV_REGISTER) {
                /* Check for the hot-plug device */
                dev = is_cnic_dev(netdev);
                if (dev) {
@@ -5644,7 +5656,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
                else if (event == NETDEV_UNREGISTER)
                        cnic_ulp_exit(dev);
 
-               if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
+               if (event == NETDEV_UP) {
                        if (cnic_register_netdev(dev) != 0) {
                                cnic_put(dev);
                                goto done;
@@ -5693,21 +5705,8 @@ static struct notifier_block cnic_netdev_notifier = {
 
 static void cnic_release(void)
 {
-       struct cnic_dev *dev;
        struct cnic_uio_dev *udev;
 
-       while (!list_empty(&cnic_dev_list)) {
-               dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
-               if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
-                       cnic_ulp_stop(dev);
-                       cnic_stop_hw(dev);
-               }
-
-               cnic_ulp_exit(dev);
-               cnic_unregister_netdev(dev);
-               list_del_init(&dev->list);
-               cnic_free_dev(dev);
-       }
        while (!list_empty(&cnic_udev_list)) {
                udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
                                  list);
index 62c670619ae6600a631bad3a351ceb1abec08e5f..e7a247473596ebcbf10688935ab088ac9157a016 100644 (file)
@@ -1,6 +1,6 @@
 /* cnic.h: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index ede3db35d757e9c51a5f07cef320ecf323515295..95a8e4b11c9fcce45a0f24acea5bd1b86fa53ab9 100644 (file)
@@ -1,7 +1,7 @@
 
 /* cnic.c: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags {
        u16 flags;
 #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
 #define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
-#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
-#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12)
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12
 #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
 #define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
 #define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
index ec9bb9ad4bb35c852ef5f0ff62d86118ec6cbaa7..95aff7642b853c58c281e8fd78c3e9e86a8aa254 100644 (file)
@@ -1,6 +1,6 @@
 /* cnic_if.h: Broadcom CNIC core network driver.
  *
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
 
 #include "bnx2x/bnx2x_mfw_req.h"
 
-#define CNIC_MODULE_VERSION    "2.5.16"
-#define CNIC_MODULE_RELDATE    "Dec 05, 2012"
+#define CNIC_MODULE_VERSION    "2.5.17"
+#define CNIC_MODULE_RELDATE    "July 28, 2013"
 
 #define CNIC_ULP_RDMA          0
 #define CNIC_ULP_ISCSI         1
@@ -238,8 +238,8 @@ struct cnic_sock {
        u16     src_port;
        u16     dst_port;
        u16     vlan_id;
-       unsigned char old_ha[6];
-       unsigned char ha[6];
+       unsigned char old_ha[ETH_ALEN];
+       unsigned char ha[ETH_ALEN];
        u32     mtu;
        u32     cid;
        u32     l5_cid;
@@ -308,7 +308,7 @@ struct cnic_dev {
 #define CNIC_F_BNX2_CLASS      3
 #define CNIC_F_BNX2X_CLASS     4
        atomic_t        ref_count;
-       u8              mac_addr[6];
+       u8              mac_addr[ETH_ALEN];
 
        int             max_iscsi_conn;
        int             max_fcoe_conn;
index 0da2214ef1b9e895903612809b21ed070cf32fa3..95b8995187d73958d048f9da89c8658c3b97ea39 100644 (file)
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    132
+#define TG3_MIN_NUM                    133
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "May 21, 2013"
+#define DRV_MODULE_RELDATE     "Jul 29, 2013"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -4226,8 +4226,6 @@ static int tg3_power_down_prepare(struct tg3 *tp)
 
 static void tg3_power_down(struct tg3 *tp)
 {
-       tg3_power_down_prepare(tp);
-
        pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
        pci_set_power_state(tp->pdev, PCI_D3hot);
 }
@@ -6095,10 +6093,12 @@ static u64 tg3_refclk_read(struct tg3 *tp)
 /* tp->lock must be held */
 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
 {
-       tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
+       u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+
+       tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
        tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
        tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
-       tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
+       tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
 }
 
 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
@@ -6214,6 +6214,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp,
 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
                          struct ptp_clock_request *rq, int on)
 {
+       struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+       u32 clock_ctl;
+       int rval = 0;
+
+       switch (rq->type) {
+       case PTP_CLK_REQ_PEROUT:
+               if (rq->perout.index != 0)
+                       return -EINVAL;
+
+               tg3_full_lock(tp, 0);
+               clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+               clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
+
+               if (on) {
+                       u64 nsec;
+
+                       nsec = rq->perout.start.sec * 1000000000ULL +
+                              rq->perout.start.nsec;
+
+                       if (rq->perout.period.sec || rq->perout.period.nsec) {
+                               netdev_warn(tp->dev,
+                                           "Device supports only a one-shot timesync output, period must be 0\n");
+                               rval = -EINVAL;
+                               goto err_out;
+                       }
+
+                       if (nsec & (1ULL << 63)) {
+                               netdev_warn(tp->dev,
+                                           "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
+                               rval = -EINVAL;
+                               goto err_out;
+                       }
+
+                       tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
+                       tw32(TG3_EAV_WATCHDOG0_MSB,
+                            TG3_EAV_WATCHDOG0_EN |
+                            ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
+
+                       tw32(TG3_EAV_REF_CLCK_CTL,
+                            clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
+               } else {
+                       tw32(TG3_EAV_WATCHDOG0_MSB, 0);
+                       tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
+               }
+
+err_out:
+               tg3_full_unlock(tp);
+               return rval;
+
+       default:
+               break;
+       }
+
        return -EOPNOTSUPP;
 }
 
@@ -6223,7 +6276,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
        .max_adj        = 250000000,
        .n_alarm        = 0,
        .n_ext_ts       = 0,
-       .n_per_out      = 0,
+       .n_per_out      = 1,
        .pps            = 0,
        .adjfreq        = tg3_ptp_adjfreq,
        .adjtime        = tg3_ptp_adjtime,
@@ -10367,6 +10420,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
        if (tg3_flag(tp, 5755_PLUS))
                tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
 
+       if (tg3_asic_rev(tp) == ASIC_REV_5762)
+               tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
+
        if (tg3_flag(tp, ENABLE_RSS))
                tp->rx_mode |= RX_MODE_RSS_ENABLE |
                               RX_MODE_RSS_ITBL_HASH_BITS_7 |
@@ -11502,7 +11558,7 @@ static int tg3_close(struct net_device *dev)
        memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
        memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
 
-       tg3_power_down(tp);
+       tg3_power_down_prepare(tp);
 
        tg3_carrier_off(tp);
 
@@ -11724,9 +11780,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        if (tg3_flag(tp, NO_NVRAM))
                return -EINVAL;
 
-       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
-               return -EAGAIN;
-
        offset = eeprom->offset;
        len = eeprom->len;
        eeprom->len = 0;
@@ -11784,9 +11837,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        u8 *buf;
        __be32 start, end;
 
-       if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
-               return -EAGAIN;
-
        if (tg3_flag(tp, NO_NVRAM) ||
            eeprom->magic != TG3_EEPROM_MAGIC)
                return -EINVAL;
@@ -13515,7 +13565,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
                        tg3_phy_start(tp);
        }
        if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
-               tg3_power_down(tp);
+               tg3_power_down_prepare(tp);
 
 }
 
@@ -15917,7 +15967,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
         */
        if (tg3_flag(tp, 5780_CLASS)) {
                tg3_flag_set(tp, 40BIT_DMA_BUG);
-               tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+               tp->msi_cap = tp->pdev->msi_cap;
        } else {
                struct pci_dev *bridge = NULL;
 
@@ -17547,11 +17597,6 @@ static int tg3_init_one(struct pci_dev *pdev,
            tg3_asic_rev(tp) == ASIC_REV_5762)
                tg3_flag_set(tp, PTP_CAPABLE);
 
-       if (tg3_flag(tp, 5717_PLUS)) {
-               /* Resume a low-power mode */
-               tg3_frob_aux_power(tp, false);
-       }
-
        tg3_timer_init(tp);
 
        tg3_carrier_off(tp);
@@ -17755,6 +17800,23 @@ out:
 
 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
 
+static void tg3_shutdown(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct tg3 *tp = netdev_priv(dev);
+
+       rtnl_lock();
+       netif_device_detach(dev);
+
+       if (netif_running(dev))
+               dev_close(dev);
+
+       if (system_state == SYSTEM_POWER_OFF)
+               tg3_power_down(tp);
+
+       rtnl_unlock();
+}
+
 /**
  * tg3_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -17914,6 +17976,7 @@ static struct pci_driver tg3_driver = {
        .remove         = tg3_remove_one,
        .err_handler    = &tg3_err_handler,
        .driver.pm      = &tg3_pm_ops,
+       .shutdown       = tg3_shutdown,
 };
 
 module_pci_driver(tg3_driver);
index cd63d1189aae9fdfbcceb5615d76b8d6d74704d2..ddb8be1298eab2b66eac319a4abce91a72da8896 100644 (file)
 #define  RX_MODE_RSS_ITBL_HASH_BITS_7   0x00700000
 #define  RX_MODE_RSS_ENABLE             0x00800000
 #define  RX_MODE_IPV6_CSUM_ENABLE       0x01000000
+#define  RX_MODE_IPV4_FRAG_FIX          0x02000000
 #define MAC_RX_STATUS                  0x0000046c
 #define  RX_STATUS_REMOTE_TX_XOFFED     0x00000001
 #define  RX_STATUS_XOFF_RCVD            0x00000002
 #define TG3_EAV_REF_CLCK_CTL           0x00006908
 #define  TG3_EAV_REF_CLCK_CTL_STOP      0x00000002
 #define  TG3_EAV_REF_CLCK_CTL_RESUME    0x00000004
+#define  TG3_EAV_CTL_TSYNC_GPIO_MASK    (0x3 << 16)
+#define  TG3_EAV_CTL_TSYNC_WDOG0        (1 << 17)
+
+#define TG3_EAV_WATCHDOG0_LSB          0x00006918
+#define TG3_EAV_WATCHDOG0_MSB          0x0000691c
+#define  TG3_EAV_WATCHDOG0_EN           (1 << 31)
+#define  TG3_EAV_WATCHDOG_MSB_MASK     0x7fffffff
+
 #define TG3_EAV_REF_CLK_CORRECT_CTL    0x00006928
 #define  TG3_EAV_REF_CLK_CORRECT_EN     (1 << 31)
 #define  TG3_EAV_REF_CLK_CORRECT_NEG    (1 << 30)
 
 #define TG3_EAV_REF_CLK_CORRECT_MASK   0xffffff
-/* 0x690c --> 0x7000 unused */
+
+/* 0x692c --> 0x7000 unused */
 
 /* NVRAM Control registers */
 #define NVRAM_CMD                      0x00007000
index 57cd1bff59f1cd056f5d7b68755612ef7e985426..3c07064b2bc44f2bf0d25c760e98f92a42a031ab 100644 (file)
@@ -1419,7 +1419,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
        bna_bfi_rx_enet_start(rx);
 }
 
-void
+static void
 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
 {
 }
@@ -1472,7 +1472,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
        bna_rxf_start(&rx->rxf);
 }
 
-void
+static void
 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
 {
 }
@@ -1528,7 +1528,7 @@ bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
        }
 }
 
-void
+static void
 bna_rx_sm_started_entry(struct bna_rx *rx)
 {
        struct bna_rxp *rxp;
@@ -1593,12 +1593,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
        }
 }
 
-void
+static void
 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
 {
 }
 
-void
+static void
 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
 {
        switch (event) {
index 4058b856eb710779bf7f164ed1547749f6a26b94..76ae09999b5b86bd15c71da88368eb0a9dea63a7 100644 (file)
@@ -1157,7 +1157,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
  */
 void *cxgb_alloc_mem(unsigned long size)
 {
-       void *p = kzalloc(size, GFP_KERNEL);
+       void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
 
        if (!p)
                p = vzalloc(size);
index 2aafb809e067b7a204a3fb1a92412756d9ceb78f..dfd1e36f57531f0e9e9cfeed14048460d96f6e30 100644 (file)
@@ -576,6 +576,7 @@ struct adapter {
        struct l2t_data *l2t;
        void *uld_handle[CXGB4_ULD_MAX];
        struct list_head list_node;
+       struct list_head rcu_node;
 
        struct tid_info tids;
        void **tid_release_head;
index 5a3256b083f23f7e69d8013c4ca1ac0585ec33c4..0d0665ca6f1914f77fa3aa23b68254d42fcd0c86 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/workqueue.h>
 #include <net/neighbour.h>
 #include <net/netevent.h>
+#include <net/addrconf.h>
 #include <asm/uaccess.h>
 
 #include "cxgb4.h"
 #include "t4fw_api.h"
 #include "l2t.h"
 
+#include <../drivers/net/bonding/bonding.h>
+
+#ifdef DRV_VERSION
+#undef DRV_VERSION
+#endif
 #define DRV_VERSION "2.0.0-ko"
 #define DRV_DESC "Chelsio T4/T5 Network Driver"
 
@@ -400,6 +406,9 @@ static struct dentry *cxgb4_debugfs_root;
 
 static LIST_HEAD(adapter_list);
 static DEFINE_MUTEX(uld_mutex);
+/* Adapter list to be accessed from atomic context */
+static LIST_HEAD(adap_rcu_list);
+static DEFINE_SPINLOCK(adap_rcu_lock);
 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
 static const char *uld_str[] = { "RDMA", "iSCSI" };
 
@@ -1133,7 +1142,7 @@ out:      release_firmware(fw);
  */
 void *t4_alloc_mem(size_t size)
 {
-       void *p = kzalloc(size, GFP_KERNEL);
+       void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
 
        if (!p)
                p = vzalloc(size);
@@ -3227,6 +3236,38 @@ static int tid_init(struct tid_info *t)
        return 0;
 }
 
+static int cxgb4_clip_get(const struct net_device *dev,
+                         const struct in6_addr *lip)
+{
+       struct adapter *adap;
+       struct fw_clip_cmd c;
+
+       adap = netdev2adap(dev);
+       memset(&c, 0, sizeof(c));
+       c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
+                       FW_CMD_REQUEST | FW_CMD_WRITE);
+       c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+       *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+       *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+       return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+static int cxgb4_clip_release(const struct net_device *dev,
+                             const struct in6_addr *lip)
+{
+       struct adapter *adap;
+       struct fw_clip_cmd c;
+
+       adap = netdev2adap(dev);
+       memset(&c, 0, sizeof(c));
+       c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
+                       FW_CMD_REQUEST | FW_CMD_READ);
+       c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+       *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+       *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+       return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
 /**
  *     cxgb4_create_server - create an IP server
  *     @dev: the device
@@ -3246,6 +3287,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
        struct sk_buff *skb;
        struct adapter *adap;
        struct cpl_pass_open_req *req;
+       int ret;
 
        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
        if (!skb)
@@ -3263,10 +3305,78 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
        req->opt0 = cpu_to_be64(TX_CHAN(chan));
        req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
                                SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
-       return t4_mgmt_tx(adap, skb);
+       ret = t4_mgmt_tx(adap, skb);
+       return net_xmit_eval(ret);
 }
 EXPORT_SYMBOL(cxgb4_create_server);
 
+/*     cxgb4_create_server6 - create an IPv6 server
+ *     @dev: the device
+ *     @stid: the server TID
+ *     @sip: local IPv6 address to bind server to
+ *     @sport: the server's TCP port
+ *     @queue: queue to direct messages from this server to
+ *
+ *     Create an IPv6 server for the given port and address.
+ *     Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+                        const struct in6_addr *sip, __be16 sport,
+                        unsigned int queue)
+{
+       unsigned int chan;
+       struct sk_buff *skb;
+       struct adapter *adap;
+       struct cpl_pass_open_req6 *req;
+       int ret;
+
+       skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       adap = netdev2adap(dev);
+       req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, 0);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
+       req->local_port = sport;
+       req->peer_port = htons(0);
+       req->local_ip_hi = *(__be64 *)(sip->s6_addr);
+       req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
+       req->peer_ip_hi = cpu_to_be64(0);
+       req->peer_ip_lo = cpu_to_be64(0);
+       chan = rxq_to_chan(&adap->sge, queue);
+       req->opt0 = cpu_to_be64(TX_CHAN(chan));
+       req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+                               SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+       ret = t4_mgmt_tx(adap, skb);
+       return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_create_server6);
+
+int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+                       unsigned int queue, bool ipv6)
+{
+       struct sk_buff *skb;
+       struct adapter *adap;
+       struct cpl_close_listsvr_req *req;
+       int ret;
+
+       adap = netdev2adap(dev);
+
+       skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, 0);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
+       req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
+                               LISTSVR_IPV6(0)) | QUEUENO(queue));
+       ret = t4_mgmt_tx(adap, skb);
+       return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_remove_server);
+
 /**
  *     cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
  *     @mtus: the HW MTU table
@@ -3721,6 +3831,10 @@ static void attach_ulds(struct adapter *adap)
 {
        unsigned int i;
 
+       spin_lock(&adap_rcu_lock);
+       list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
+       spin_unlock(&adap_rcu_lock);
+
        mutex_lock(&uld_mutex);
        list_add_tail(&adap->list_node, &adapter_list);
        for (i = 0; i < CXGB4_ULD_MAX; i++)
@@ -3746,6 +3860,10 @@ static void detach_ulds(struct adapter *adap)
                netevent_registered = false;
        }
        mutex_unlock(&uld_mutex);
+
+       spin_lock(&adap_rcu_lock);
+       list_del_rcu(&adap->rcu_node);
+       spin_unlock(&adap_rcu_lock);
 }
 
 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
@@ -3809,6 +3927,168 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
 }
 EXPORT_SYMBOL(cxgb4_unregister_uld);
 
+/* Check if netdev on which event is occured belongs to us or not. Return
+ * suceess (1) if it belongs otherwise failure (0).
+ */
+static int cxgb4_netdev(struct net_device *netdev)
+{
+       struct adapter *adap;
+       int i;
+
+       spin_lock(&adap_rcu_lock);
+       list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
+               for (i = 0; i < MAX_NPORTS; i++)
+                       if (adap->port[i] == netdev) {
+                               spin_unlock(&adap_rcu_lock);
+                               return 1;
+                       }
+       spin_unlock(&adap_rcu_lock);
+       return 0;
+}
+
+static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
+                   unsigned long event)
+{
+       int ret = NOTIFY_DONE;
+
+       rcu_read_lock();
+       if (cxgb4_netdev(event_dev)) {
+               switch (event) {
+               case NETDEV_UP:
+                       ret = cxgb4_clip_get(event_dev,
+                               (const struct in6_addr *)ifa->addr.s6_addr);
+                       if (ret < 0) {
+                               rcu_read_unlock();
+                               return ret;
+                       }
+                       ret = NOTIFY_OK;
+                       break;
+               case NETDEV_DOWN:
+                       cxgb4_clip_release(event_dev,
+                               (const struct in6_addr *)ifa->addr.s6_addr);
+                       ret = NOTIFY_OK;
+                       break;
+               default:
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       return ret;
+}
+
+static int cxgb4_inet6addr_handler(struct notifier_block *this,
+               unsigned long event, void *data)
+{
+       struct inet6_ifaddr *ifa = data;
+       struct net_device *event_dev;
+       int ret = NOTIFY_DONE;
+       struct bonding *bond = netdev_priv(ifa->idev->dev);
+       struct slave *slave;
+       struct pci_dev *first_pdev = NULL;
+
+       if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
+               event_dev = vlan_dev_real_dev(ifa->idev->dev);
+               ret = clip_add(event_dev, ifa, event);
+       } else if (ifa->idev->dev->flags & IFF_MASTER) {
+               /* It is possible that two different adapters are bonded in one
+                * bond. We need to find such different adapters and add clip
+                * in all of them only once.
+                */
+               read_lock(&bond->lock);
+               bond_for_each_slave(bond, slave) {
+                       if (!first_pdev) {
+                               ret = clip_add(slave->dev, ifa, event);
+                               /* If clip_add is success then only initialize
+                                * first_pdev since it means it is our device
+                                */
+                               if (ret == NOTIFY_OK)
+                                       first_pdev = to_pci_dev(
+                                                       slave->dev->dev.parent);
+                       } else if (first_pdev !=
+                                  to_pci_dev(slave->dev->dev.parent))
+                                       ret = clip_add(slave->dev, ifa, event);
+               }
+               read_unlock(&bond->lock);
+       } else
+               ret = clip_add(ifa->idev->dev, ifa, event);
+
+       return ret;
+}
+
+static struct notifier_block cxgb4_inet6addr_notifier = {
+       .notifier_call = cxgb4_inet6addr_handler
+};
+
+/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
+ * a physical device.
+ * The physical device reference is needed to send the actul CLIP command.
+ */
+static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
+{
+       struct inet6_dev *idev = NULL;
+       struct inet6_ifaddr *ifa;
+       int ret = 0;
+
+       idev = __in6_dev_get(root_dev);
+       if (!idev)
+               return ret;
+
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(ifa, &idev->addr_list, if_list) {
+               ret = cxgb4_clip_get(dev,
+                               (const struct in6_addr *)ifa->addr.s6_addr);
+               if (ret < 0)
+                       break;
+       }
+       read_unlock_bh(&idev->lock);
+
+       return ret;
+}
+
+static int update_root_dev_clip(struct net_device *dev)
+{
+       struct net_device *root_dev = NULL;
+       int i, ret = 0;
+
+       /* First populate the real net device's IPv6 addresses */
+       ret = update_dev_clip(dev, dev);
+       if (ret)
+               return ret;
+
+       /* Parse all bond and vlan devices layered on top of the physical dev */
+       for (i = 0; i < VLAN_N_VID; i++) {
+               root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
+               if (!root_dev)
+                       continue;
+
+               ret = update_dev_clip(root_dev, dev);
+               if (ret)
+                       break;
+       }
+       return ret;
+}
+
+static void update_clip(const struct adapter *adap)
+{
+       int i;
+       struct net_device *dev;
+       int ret;
+
+       rcu_read_lock();
+
+       for (i = 0; i < MAX_NPORTS; i++) {
+               dev = adap->port[i];
+               ret = 0;
+
+               if (dev)
+                       ret = update_root_dev_clip(dev);
+
+               if (ret < 0)
+                       break;
+       }
+       rcu_read_unlock();
+}
+
 /**
  *     cxgb_up - enable the adapter
  *     @adap: adapter being enabled
@@ -3854,6 +4134,7 @@ static int cxgb_up(struct adapter *adap)
        t4_intr_enable(adap);
        adap->flags |= FULL_INIT_DONE;
        notify_ulds(adap, CXGB4_STATE_UP);
+       update_clip(adap);
  out:
        return err;
  irq_err:
@@ -5870,11 +6151,15 @@ static int __init cxgb4_init_module(void)
        ret = pci_register_driver(&cxgb4_driver);
        if (ret < 0)
                debugfs_remove(cxgb4_debugfs_root);
+
+       register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+
        return ret;
 }
 
 static void __exit cxgb4_cleanup_module(void)
 {
+       unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
        pci_unregister_driver(&cxgb4_driver);
        debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
        flush_workqueue(workq);
index 4faf4d067ee71947c74c0a3e6e234208b315bee2..6f21f2451c3052a24ecd2e8d8d16ee9bdf9996fe 100644 (file)
@@ -154,6 +154,11 @@ struct in6_addr;
 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
                        __be32 sip, __be16 sport, __be16 vlan,
                        unsigned int queue);
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+                        const struct in6_addr *sip, __be16 sport,
+                        unsigned int queue);
+int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+                       unsigned int queue, bool ipv6);
 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
                               __be32 sip, __be16 sport, __be16 vlan,
                               unsigned int queue,
index 01d484441200d4675faff505338fb7b7a0789078..cd6874b571ee2585c3ef1c9f8c418c7c643b8d46 100644 (file)
@@ -320,6 +320,21 @@ struct cpl_act_open_req6 {
        __be32 opt2;
 };
 
+struct cpl_t5_act_open_req6 {
+       WR_HDR;
+       union opcode_tid ot;
+       __be16 local_port;
+       __be16 peer_port;
+       __be64 local_ip_hi;
+       __be64 local_ip_lo;
+       __be64 peer_ip_hi;
+       __be64 peer_ip_lo;
+       __be64 opt0;
+       __be32 rsvd;
+       __be32 opt2;
+       __be64 params;
+};
+
 struct cpl_act_open_rpl {
        union opcode_tid ot;
        __be32 atid_status;
@@ -405,7 +420,7 @@ struct cpl_close_listsvr_req {
        WR_HDR;
        union opcode_tid ot;
        __be16 reply_ctrl;
-#define LISTSVR_IPV6 (1 << 14)
+#define LISTSVR_IPV6(x) ((x) << 14)
        __be16 rsvd;
 };
 
index d1c755f78aaf63fb02f7a3a6451a08de16ce17bc..6f77ac487743edfbe899f470805f58cc38d9c61b 100644 (file)
@@ -616,6 +616,7 @@ enum fw_cmd_opcodes {
        FW_RSS_IND_TBL_CMD             = 0x20,
        FW_RSS_GLB_CONFIG_CMD          = 0x22,
        FW_RSS_VI_CONFIG_CMD           = 0x23,
+       FW_CLIP_CMD                    = 0x28,
        FW_LASTC2E_CMD                 = 0x40,
        FW_ERROR_CMD                   = 0x80,
        FW_DEBUG_CMD                   = 0x81,
@@ -2062,6 +2063,28 @@ struct fw_rss_vi_config_cmd {
        } u;
 };
 
+struct fw_clip_cmd {
+       __be32 op_to_write;
+       __be32 alloc_to_len16;
+       __be64 ip_hi;
+       __be64 ip_lo;
+       __be32 r4[2];
+};
+
+#define S_FW_CLIP_CMD_ALLOC     31
+#define M_FW_CLIP_CMD_ALLOC     0x1
+#define V_FW_CLIP_CMD_ALLOC(x)  ((x) << S_FW_CLIP_CMD_ALLOC)
+#define G_FW_CLIP_CMD_ALLOC(x)  \
+       (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC)
+#define F_FW_CLIP_CMD_ALLOC     V_FW_CLIP_CMD_ALLOC(1U)
+
+#define S_FW_CLIP_CMD_FREE      30
+#define M_FW_CLIP_CMD_FREE      0x1
+#define V_FW_CLIP_CMD_FREE(x)   ((x) << S_FW_CLIP_CMD_FREE)
+#define G_FW_CLIP_CMD_FREE(x)   \
+       (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE)
+#define F_FW_CLIP_CMD_FREE      V_FW_CLIP_CMD_FREE(1U)
+
 enum fw_error_type {
        FW_ERROR_TYPE_EXCEPTION         = 0x0,
        FW_ERROR_TYPE_HWMODULE          = 0x1,
index 9d4974bba247904162de48e382ae8989be8f1a7b..239e1e46545de438a2482c5c0a721de9800335cc 100644 (file)
@@ -1,5 +1,6 @@
 obj-$(CONFIG_ENIC) := enic.o
 
 enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
-       enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o
+       enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
+       enic_ethtool.o enic_api.o
 
index afe9b1662b8cef8c2ba09c73febdca3583f2642a..be167318015a47aa63c09164293cc64f0939c387 100644 (file)
@@ -32,8 +32,8 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.1.1.39"
-#define DRV_COPYRIGHT          "Copyright 2008-2011 Cisco Systems, Inc"
+#define DRV_VERSION            "2.1.1.43"
+#define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
 
@@ -96,6 +96,7 @@ struct enic {
 #ifdef CONFIG_PCI_IOV
        u16 num_vfs;
 #endif
+       spinlock_t enic_api_lock;
        struct enic_port_profile *pp;
 
        /* work queue cache line section */
@@ -127,9 +128,57 @@ static inline struct device *enic_get_dev(struct enic *enic)
        return &(enic->pdev->dev);
 }
 
+static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
+{
+       return rq;
+}
+
+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
+{
+       return enic->rq_count + wq;
+}
+
+static inline unsigned int enic_legacy_io_intr(void)
+{
+       return 0;
+}
+
+static inline unsigned int enic_legacy_err_intr(void)
+{
+       return 1;
+}
+
+static inline unsigned int enic_legacy_notify_intr(void)
+{
+       return 2;
+}
+
+static inline unsigned int enic_msix_rq_intr(struct enic *enic,
+       unsigned int rq)
+{
+       return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_wq_intr(struct enic *enic,
+       unsigned int wq)
+{
+       return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_err_intr(struct enic *enic)
+{
+       return enic->rq_count + enic->wq_count;
+}
+
+static inline unsigned int enic_msix_notify_intr(struct enic *enic)
+{
+       return enic->rq_count + enic->wq_count + 1;
+}
+
 void enic_reset_addr_lists(struct enic *enic);
 int enic_sriov_enabled(struct enic *enic);
 int enic_is_valid_vf(struct enic *enic, int vf);
 int enic_is_dynamic(struct enic *enic);
+void enic_set_ethtool_ops(struct net_device *netdev);
 
 #endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
new file mode 100644 (file)
index 0000000..e13efbd
--- /dev/null
@@ -0,0 +1,48 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_api.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+       enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+       int err;
+       struct enic *enic = netdev_priv(netdev);
+       struct vnic_dev *vdev = enic->vdev;
+
+       spin_lock(&enic->enic_api_lock);
+       spin_lock(&enic->devcmd_lock);
+
+       vnic_dev_cmd_proxy_by_index_start(vdev, vf);
+       err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
+       vnic_dev_cmd_proxy_end(vdev);
+
+       spin_unlock(&enic->devcmd_lock);
+       spin_unlock(&enic->enic_api_lock);
+
+       return err;
+}
+EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index);
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
new file mode 100644 (file)
index 0000000..6b9f925
--- /dev/null
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __ENIC_API_H__
+#define __ENIC_API_H__
+
+#include <linux/netdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+       enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait);
+
+#endif
index 08bded051b93c1c0b3436056f20655fc08691a24..129b14a4efb088ef4c83212090a4c5a4125bee1a 100644 (file)
@@ -20,6 +20,7 @@
 #define _ENIC_DEV_H_
 
 #include "vnic_dev.h"
+#include "vnic_vic.h"
 
 /*
  * Calls the devcmd function given by argument vnicdevcmdfn.
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
new file mode 100644 (file)
index 0000000..47e3562
--- /dev/null
@@ -0,0 +1,257 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+struct enic_stat {
+       char name[ETH_GSTRING_LEN];
+       unsigned int index;
+};
+
+#define ENIC_TX_STAT(stat) { \
+       .name = #stat, \
+       .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
+}
+
+#define ENIC_RX_STAT(stat) { \
+       .name = #stat, \
+       .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
+}
+
+static const struct enic_stat enic_tx_stats[] = {
+       ENIC_TX_STAT(tx_frames_ok),
+       ENIC_TX_STAT(tx_unicast_frames_ok),
+       ENIC_TX_STAT(tx_multicast_frames_ok),
+       ENIC_TX_STAT(tx_broadcast_frames_ok),
+       ENIC_TX_STAT(tx_bytes_ok),
+       ENIC_TX_STAT(tx_unicast_bytes_ok),
+       ENIC_TX_STAT(tx_multicast_bytes_ok),
+       ENIC_TX_STAT(tx_broadcast_bytes_ok),
+       ENIC_TX_STAT(tx_drops),
+       ENIC_TX_STAT(tx_errors),
+       ENIC_TX_STAT(tx_tso),
+};
+
+static const struct enic_stat enic_rx_stats[] = {
+       ENIC_RX_STAT(rx_frames_ok),
+       ENIC_RX_STAT(rx_frames_total),
+       ENIC_RX_STAT(rx_unicast_frames_ok),
+       ENIC_RX_STAT(rx_multicast_frames_ok),
+       ENIC_RX_STAT(rx_broadcast_frames_ok),
+       ENIC_RX_STAT(rx_bytes_ok),
+       ENIC_RX_STAT(rx_unicast_bytes_ok),
+       ENIC_RX_STAT(rx_multicast_bytes_ok),
+       ENIC_RX_STAT(rx_broadcast_bytes_ok),
+       ENIC_RX_STAT(rx_drop),
+       ENIC_RX_STAT(rx_no_bufs),
+       ENIC_RX_STAT(rx_errors),
+       ENIC_RX_STAT(rx_rss),
+       ENIC_RX_STAT(rx_crc_errors),
+       ENIC_RX_STAT(rx_frames_64),
+       ENIC_RX_STAT(rx_frames_127),
+       ENIC_RX_STAT(rx_frames_255),
+       ENIC_RX_STAT(rx_frames_511),
+       ENIC_RX_STAT(rx_frames_1023),
+       ENIC_RX_STAT(rx_frames_1518),
+       ENIC_RX_STAT(rx_frames_to_max),
+};
+
+static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
+static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+
+static int enic_get_settings(struct net_device *netdev,
+       struct ethtool_cmd *ecmd)
+{
+       struct enic *enic = netdev_priv(netdev);
+
+       ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+       ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+       ecmd->port = PORT_FIBRE;
+       ecmd->transceiver = XCVR_EXTERNAL;
+
+       if (netif_carrier_ok(netdev)) {
+               ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
+               ecmd->duplex = DUPLEX_FULL;
+       } else {
+               ethtool_cmd_speed_set(ecmd, -1);
+               ecmd->duplex = -1;
+       }
+
+       ecmd->autoneg = AUTONEG_DISABLE;
+
+       return 0;
+}
+
+static void enic_get_drvinfo(struct net_device *netdev,
+       struct ethtool_drvinfo *drvinfo)
+{
+       struct enic *enic = netdev_priv(netdev);
+       struct vnic_devcmd_fw_info *fw_info;
+
+       enic_dev_fw_info(enic, &fw_info);
+
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, fw_info->fw_version,
+               sizeof(drvinfo->fw_version));
+       strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+               sizeof(drvinfo->bus_info));
+}
+
+static void enic_get_strings(struct net_device *netdev, u32 stringset,
+       u8 *data)
+{
+       unsigned int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < enic_n_tx_stats; i++) {
+                       memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
+               for (i = 0; i < enic_n_rx_stats; i++) {
+                       memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
+       }
+}
+
+static int enic_get_sset_count(struct net_device *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return enic_n_tx_stats + enic_n_rx_stats;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void enic_get_ethtool_stats(struct net_device *netdev,
+       struct ethtool_stats *stats, u64 *data)
+{
+       struct enic *enic = netdev_priv(netdev);
+       struct vnic_stats *vstats;
+       unsigned int i;
+
+       enic_dev_stats_dump(enic, &vstats);
+
+       for (i = 0; i < enic_n_tx_stats; i++)
+               *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
+       for (i = 0; i < enic_n_rx_stats; i++)
+               *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
+}
+
+static u32 enic_get_msglevel(struct net_device *netdev)
+{
+       struct enic *enic = netdev_priv(netdev);
+       return enic->msg_enable;
+}
+
+static void enic_set_msglevel(struct net_device *netdev, u32 value)
+{
+       struct enic *enic = netdev_priv(netdev);
+       enic->msg_enable = value;
+}
+
+static int enic_get_coalesce(struct net_device *netdev,
+       struct ethtool_coalesce *ecmd)
+{
+       struct enic *enic = netdev_priv(netdev);
+
+       ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+       ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+       return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+       struct ethtool_coalesce *ecmd)
+{
+       struct enic *enic = netdev_priv(netdev);
+       u32 tx_coalesce_usecs;
+       u32 rx_coalesce_usecs;
+       unsigned int i, intr;
+
+       tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
+               vnic_dev_get_intr_coal_timer_max(enic->vdev));
+       rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
+               vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+       switch (vnic_dev_get_intr_mode(enic->vdev)) {
+       case VNIC_DEV_INTR_MODE_INTX:
+               if (tx_coalesce_usecs != rx_coalesce_usecs)
+                       return -EINVAL;
+
+               intr = enic_legacy_io_intr();
+               vnic_intr_coalescing_timer_set(&enic->intr[intr],
+                       tx_coalesce_usecs);
+               break;
+       case VNIC_DEV_INTR_MODE_MSI:
+               if (tx_coalesce_usecs != rx_coalesce_usecs)
+                       return -EINVAL;
+
+               vnic_intr_coalescing_timer_set(&enic->intr[0],
+                       tx_coalesce_usecs);
+               break;
+       case VNIC_DEV_INTR_MODE_MSIX:
+               for (i = 0; i < enic->wq_count; i++) {
+                       intr = enic_msix_wq_intr(enic, i);
+                       vnic_intr_coalescing_timer_set(&enic->intr[intr],
+                               tx_coalesce_usecs);
+               }
+
+               for (i = 0; i < enic->rq_count; i++) {
+                       intr = enic_msix_rq_intr(enic, i);
+                       vnic_intr_coalescing_timer_set(&enic->intr[intr],
+                               rx_coalesce_usecs);
+               }
+
+               break;
+       default:
+               break;
+       }
+
+       enic->tx_coalesce_usecs = tx_coalesce_usecs;
+       enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+       return 0;
+}
+
+static const struct ethtool_ops enic_ethtool_ops = {
+       .get_settings = enic_get_settings,
+       .get_drvinfo = enic_get_drvinfo,
+       .get_msglevel = enic_get_msglevel,
+       .set_msglevel = enic_set_msglevel,
+       .get_link = ethtool_op_get_link,
+       .get_strings = enic_get_strings,
+       .get_sset_count = enic_get_sset_count,
+       .get_ethtool_stats = enic_get_ethtool_stats,
+       .get_coalesce = enic_get_coalesce,
+       .set_coalesce = enic_set_coalesce,
+};
+
+void enic_set_ethtool_ops(struct net_device *netdev)
+{
+       SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+}
index 992ec2ee64d919333ec1c28151a9a3acd52d25e0..bcf15b176f4147da05431b037354e2bfa9dc5734 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
-#include <linux/ethtool.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -73,57 +72,6 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, enic_id_table);
 
-struct enic_stat {
-       char name[ETH_GSTRING_LEN];
-       unsigned int offset;
-};
-
-#define ENIC_TX_STAT(stat)     \
-       { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
-#define ENIC_RX_STAT(stat)     \
-       { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
-
-static const struct enic_stat enic_tx_stats[] = {
-       ENIC_TX_STAT(tx_frames_ok),
-       ENIC_TX_STAT(tx_unicast_frames_ok),
-       ENIC_TX_STAT(tx_multicast_frames_ok),
-       ENIC_TX_STAT(tx_broadcast_frames_ok),
-       ENIC_TX_STAT(tx_bytes_ok),
-       ENIC_TX_STAT(tx_unicast_bytes_ok),
-       ENIC_TX_STAT(tx_multicast_bytes_ok),
-       ENIC_TX_STAT(tx_broadcast_bytes_ok),
-       ENIC_TX_STAT(tx_drops),
-       ENIC_TX_STAT(tx_errors),
-       ENIC_TX_STAT(tx_tso),
-};
-
-static const struct enic_stat enic_rx_stats[] = {
-       ENIC_RX_STAT(rx_frames_ok),
-       ENIC_RX_STAT(rx_frames_total),
-       ENIC_RX_STAT(rx_unicast_frames_ok),
-       ENIC_RX_STAT(rx_multicast_frames_ok),
-       ENIC_RX_STAT(rx_broadcast_frames_ok),
-       ENIC_RX_STAT(rx_bytes_ok),
-       ENIC_RX_STAT(rx_unicast_bytes_ok),
-       ENIC_RX_STAT(rx_multicast_bytes_ok),
-       ENIC_RX_STAT(rx_broadcast_bytes_ok),
-       ENIC_RX_STAT(rx_drop),
-       ENIC_RX_STAT(rx_no_bufs),
-       ENIC_RX_STAT(rx_errors),
-       ENIC_RX_STAT(rx_rss),
-       ENIC_RX_STAT(rx_crc_errors),
-       ENIC_RX_STAT(rx_frames_64),
-       ENIC_RX_STAT(rx_frames_127),
-       ENIC_RX_STAT(rx_frames_255),
-       ENIC_RX_STAT(rx_frames_511),
-       ENIC_RX_STAT(rx_frames_1023),
-       ENIC_RX_STAT(rx_frames_1518),
-       ENIC_RX_STAT(rx_frames_to_max),
-};
-
-static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
-static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-
 int enic_is_dynamic(struct enic *enic)
 {
        return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -148,222 +96,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
 #endif
 }
 
-static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
-{
-       return rq;
-}
-
-static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
-{
-       return enic->rq_count + wq;
-}
-
-static inline unsigned int enic_legacy_io_intr(void)
-{
-       return 0;
-}
-
-static inline unsigned int enic_legacy_err_intr(void)
-{
-       return 1;
-}
-
-static inline unsigned int enic_legacy_notify_intr(void)
-{
-       return 2;
-}
-
-static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
-{
-       return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
-}
-
-static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
-{
-       return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
-}
-
-static inline unsigned int enic_msix_err_intr(struct enic *enic)
-{
-       return enic->rq_count + enic->wq_count;
-}
-
-static inline unsigned int enic_msix_notify_intr(struct enic *enic)
-{
-       return enic->rq_count + enic->wq_count + 1;
-}
-
-static int enic_get_settings(struct net_device *netdev,
-       struct ethtool_cmd *ecmd)
-{
-       struct enic *enic = netdev_priv(netdev);
-
-       ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-       ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
-       ecmd->port = PORT_FIBRE;
-       ecmd->transceiver = XCVR_EXTERNAL;
-
-       if (netif_carrier_ok(netdev)) {
-               ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
-               ecmd->duplex = DUPLEX_FULL;
-       } else {
-               ethtool_cmd_speed_set(ecmd, -1);
-               ecmd->duplex = -1;
-       }
-
-       ecmd->autoneg = AUTONEG_DISABLE;
-
-       return 0;
-}
-
-static void enic_get_drvinfo(struct net_device *netdev,
-       struct ethtool_drvinfo *drvinfo)
-{
-       struct enic *enic = netdev_priv(netdev);
-       struct vnic_devcmd_fw_info *fw_info;
-
-       enic_dev_fw_info(enic, &fw_info);
-
-       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
-       strlcpy(drvinfo->fw_version, fw_info->fw_version,
-               sizeof(drvinfo->fw_version));
-       strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
-               sizeof(drvinfo->bus_info));
-}
-
-static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
-       unsigned int i;
-
-       switch (stringset) {
-       case ETH_SS_STATS:
-               for (i = 0; i < enic_n_tx_stats; i++) {
-                       memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
-                       data += ETH_GSTRING_LEN;
-               }
-               for (i = 0; i < enic_n_rx_stats; i++) {
-                       memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
-                       data += ETH_GSTRING_LEN;
-               }
-               break;
-       }
-}
-
-static int enic_get_sset_count(struct net_device *netdev, int sset)
-{
-       switch (sset) {
-       case ETH_SS_STATS:
-               return enic_n_tx_stats + enic_n_rx_stats;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static void enic_get_ethtool_stats(struct net_device *netdev,
-       struct ethtool_stats *stats, u64 *data)
-{
-       struct enic *enic = netdev_priv(netdev);
-       struct vnic_stats *vstats;
-       unsigned int i;
-
-       enic_dev_stats_dump(enic, &vstats);
-
-       for (i = 0; i < enic_n_tx_stats; i++)
-               *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
-       for (i = 0; i < enic_n_rx_stats; i++)
-               *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
-}
-
-static u32 enic_get_msglevel(struct net_device *netdev)
-{
-       struct enic *enic = netdev_priv(netdev);
-       return enic->msg_enable;
-}
-
-static void enic_set_msglevel(struct net_device *netdev, u32 value)
-{
-       struct enic *enic = netdev_priv(netdev);
-       enic->msg_enable = value;
-}
-
-static int enic_get_coalesce(struct net_device *netdev,
-       struct ethtool_coalesce *ecmd)
-{
-       struct enic *enic = netdev_priv(netdev);
-
-       ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
-       ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
-
-       return 0;
-}
-
-static int enic_set_coalesce(struct net_device *netdev,
-       struct ethtool_coalesce *ecmd)
-{
-       struct enic *enic = netdev_priv(netdev);
-       u32 tx_coalesce_usecs;
-       u32 rx_coalesce_usecs;
-       unsigned int i, intr;
-
-       tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
-               vnic_dev_get_intr_coal_timer_max(enic->vdev));
-       rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
-               vnic_dev_get_intr_coal_timer_max(enic->vdev));
-
-       switch (vnic_dev_get_intr_mode(enic->vdev)) {
-       case VNIC_DEV_INTR_MODE_INTX:
-               if (tx_coalesce_usecs != rx_coalesce_usecs)
-                       return -EINVAL;
-
-               intr = enic_legacy_io_intr();
-               vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                       tx_coalesce_usecs);
-               break;
-       case VNIC_DEV_INTR_MODE_MSI:
-               if (tx_coalesce_usecs != rx_coalesce_usecs)
-                       return -EINVAL;
-
-               vnic_intr_coalescing_timer_set(&enic->intr[0],
-                       tx_coalesce_usecs);
-               break;
-       case VNIC_DEV_INTR_MODE_MSIX:
-               for (i = 0; i < enic->wq_count; i++) {
-                       intr = enic_msix_wq_intr(enic, i);
-                       vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                               tx_coalesce_usecs);
-               }
-
-               for (i = 0; i < enic->rq_count; i++) {
-                       intr = enic_msix_rq_intr(enic, i);
-                       vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                               rx_coalesce_usecs);
-               }
-
-               break;
-       default:
-               break;
-       }
-
-       enic->tx_coalesce_usecs = tx_coalesce_usecs;
-       enic->rx_coalesce_usecs = rx_coalesce_usecs;
-
-       return 0;
-}
-
-static const struct ethtool_ops enic_ethtool_ops = {
-       .get_settings = enic_get_settings,
-       .get_drvinfo = enic_get_drvinfo,
-       .get_msglevel = enic_get_msglevel,
-       .set_msglevel = enic_set_msglevel,
-       .get_link = ethtool_op_get_link,
-       .get_strings = enic_get_strings,
-       .get_sset_count = enic_get_sset_count,
-       .get_ethtool_stats = enic_get_ethtool_stats,
-       .get_coalesce = enic_get_coalesce,
-       .set_coalesce = enic_set_coalesce,
-};
-
 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
 {
        struct enic *enic = vnic_dev_priv(wq->vdev);
@@ -2001,6 +1733,7 @@ static void enic_reset(struct work_struct *work)
 
        rtnl_lock();
 
+       spin_lock(&enic->enic_api_lock);
        enic_dev_hang_notify(enic);
        enic_stop(enic->netdev);
        enic_dev_hang_reset(enic);
@@ -2009,6 +1742,8 @@ static void enic_reset(struct work_struct *work)
        enic_set_rss_nic_cfg(enic);
        enic_dev_set_ig_vlan_rewrite_mode(enic);
        enic_open(enic->netdev);
+       spin_unlock(&enic->enic_api_lock);
+       call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
 
        rtnl_unlock();
 }
@@ -2421,6 +2156,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
 
        spin_lock_init(&enic->devcmd_lock);
+       spin_lock_init(&enic->enic_api_lock);
 
        /*
         * Set ingress vlan rewrite mode before vnic initialization
@@ -2496,7 +2232,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->netdev_ops = &enic_netdev_ops;
 
        netdev->watchdog_timeo = 2 * HZ;
-       netdev->ethtool_ops = &enic_ethtool_ops;
+       enic_set_ethtool_ops(netdev);
 
        netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
        if (ENIC_SETTING(enic, LOOP)) {
index 25be2734c3feba3cf0d3fa631f64da360248fa80..69f60afd6577ec2d7c5c41540f81fbc59b7049a6 100644 (file)
@@ -47,6 +47,9 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
        int offload_mode, int cq_entry, int sop, int eop, int loopback)
 {
        struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+       u8 desc_skip_cnt = 1;
+       u8 compressed_send = 0;
+       u64 wrid = 0;
 
        wq_enet_desc_enc(desc,
                (u64)dma_addr | VNIC_PADDR_TARGET,
@@ -59,7 +62,8 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
                (u16)vlan_tag,
                (u8)loopback);
 
-       vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+       vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
+                       (u8)cq_entry, compressed_send, wrid);
 }
 
 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
@@ -120,6 +124,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
        dma_addr_t dma_addr, unsigned int len)
 {
        struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+       u64 wrid = 0;
        u8 type = os_buf_index ?
                RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
 
@@ -127,7 +132,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
                (u64)dma_addr | VNIC_PADDR_TARGET,
                type, (u16)len);
 
-       vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
+       vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
 }
 
 struct enic;
index 23d555255cf8ed99ef3c949ceeb0c3b42822ff9b..b9a0d78fd6391698a15a169cf206ec27b098b2c0 100644 (file)
@@ -281,11 +281,25 @@ enum vnic_devcmd_cmd {
         *              0 if no VIF-CONFIG-INFO TLV was ever received. */
        CMD_CONFIG_INFO_GET     = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
 
+       /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+        *            (u32)a1=INT13_CMD_xxx
+        */
+       CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+       /* Set default vlan:
+        * in: (u16)a0=new default vlan
+        *     (u16)a1=zero for overriding vlan with param a0,
+        *                     non-zero for resetting vlan to the default
+        * out: (u16)a0=old default vlan
+        */
+       CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
        /* init_prov_info2:
         * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
         * the vnic until CMD_ENABLE2 is issued.
         *     (u64)a0=paddr of vnic_devcmd_provinfo
-        *     (u32)a1=sizeof provision info */
+        *     (u32)a1=sizeof provision info
+        */
        CMD_INIT_PROV_INFO2  = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
 
        /* enable2:
@@ -339,16 +353,57 @@ enum vnic_devcmd_cmd {
        CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
 
        /*
-        * cmd_set_mac_addr
-        *      set mac address
+        * Set the predefined mac address as default
         * in:
         *   (u48)a0 = mac addr
-        *
         */
        CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+       /* Update the provisioning info of the given VIF
+        *     (u64)a0=paddr of vnic_devcmd_provinfo
+        *     (u32)a1=sizeof provision info
+        */
+       CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+       /* Add a filter.
+        * in: (u64) a0= filter address
+        *     (u32) a1= size of filter
+        * out: (u32) a0=filter identifier
+        */
+       CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
+
+       /* Delete a filter.
+        * in: (u32) a0=filter identifier
+        */
+       CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
+
+       /* Enable a Queue Pair in User space NIC
+        * in: (u32) a0=Queue Pair number
+        *     (u32) a1= command
+        */
+       CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
+
+       /* Disable a Queue Pair in User space NIC
+        * in: (u32) a0=Queue Pair number
+        *     (u32) a1= command
+        */
+       CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
+
+       /* Stats dump Queue Pair in User space NIC
+        * in: (u32) a0=Queue Pair number
+        *     (u64) a1=host buffer addr for status dump
+        *     (u32) a2=length of the buffer
+        */
+       CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
+
+       /* Clear stats for Queue Pair in User space NIC
+        * in: (u32) a0=Queue Pair number
+        */
+       CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
 };
 
 /* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_STANDBY 0x0
 #define CMD_ENABLE2_ACTIVE  0x1
 
 /* flags for CMD_OPEN */
@@ -364,6 +419,9 @@ enum vnic_devcmd_cmd {
 #define CMD_PFILTER_PROMISCUOUS                0x08
 #define CMD_PFILTER_ALL_MULTICAST      0x10
 
+/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
+#define CMD_QP_RQWQ                     0x0
+
 /* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
 #define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK              0
 #define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN         1
@@ -390,6 +448,7 @@ enum vnic_devcmd_error {
        ERR_EMAXRES = 10,
        ERR_ENOTSUPPORTED = 11,
        ERR_EINPROGRESS = 12,
+       ERR_MAX
 };
 
 /*
@@ -435,6 +494,115 @@ struct vnic_devcmd_provinfo {
        u8 data[0];
 };
 
+/* These are used in flags field of different filters to denote
+ * valid fields used.
+ */
+#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
+
+#define FILTER_FIELDS_USNIC ( \
+                       FILTER_FIELD_VALID(1) | \
+                       FILTER_FIELD_VALID(2) | \
+                       FILTER_FIELD_VALID(3) | \
+                       FILTER_FIELD_VALID(4))
+
+#define FILTER_FIELDS_IPV4_5TUPLE ( \
+                       FILTER_FIELD_VALID(1) | \
+                       FILTER_FIELD_VALID(2) | \
+                       FILTER_FIELD_VALID(3) | \
+                       FILTER_FIELD_VALID(4) | \
+                       FILTER_FIELD_VALID(5))
+
+#define FILTER_FIELDS_MAC_VLAN ( \
+                       FILTER_FIELD_VALID(1) | \
+                       FILTER_FIELD_VALID(2))
+
+#define FILTER_FIELD_USNIC_VLAN    FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_USNIC_PROTO   FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_USNIC_ID      FILTER_FIELD_VALID(4)
+
+struct filter_usnic_id {
+       u32 flags;
+       u16 vlan;
+       u16 ethtype;
+       u8 proto_version;
+       u32 usnic_id;
+} __packed;
+
+#define FILTER_FIELD_5TUP_PROTO  FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+
+/* Enums for the protocol field. */
+enum protocol_e {
+       PROTO_UDP = 0,
+       PROTO_TCP = 1,
+};
+
+struct filter_ipv4_5tuple {
+       u32 flags;
+       u32 protocol;
+       u32 src_addr;
+       u32 dst_addr;
+       u16 src_port;
+       u16 dst_port;
+} __packed;
+
+#define FILTER_FIELD_VMQ_VLAN   FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VMQ_MAC    FILTER_FIELD_VALID(2)
+
+struct filter_mac_vlan {
+       u32 flags;
+       u16 vlan;
+       u8 mac_addr[6];
+} __packed;
+
+/* Specifies the filter_action type. */
+enum {
+       FILTER_ACTION_RQ_STEERING = 0,
+       FILTER_ACTION_MAX
+};
+
+struct filter_action {
+       u32 type;
+       union {
+               u32 rq_idx;
+       } u;
+} __packed;
+
+/* Specifies the filter type. */
+enum filter_type {
+       FILTER_USNIC_ID = 0,
+       FILTER_IPV4_5TUPLE = 1,
+       FILTER_MAC_VLAN = 2,
+       FILTER_MAX
+};
+
+struct filter {
+       u32 type;
+       union {
+               struct filter_usnic_id usnic;
+               struct filter_ipv4_5tuple ipv4;
+               struct filter_mac_vlan mac_vlan;
+       } u;
+} __packed;
+
+enum {
+       CLSF_TLV_FILTER = 0,
+       CLSF_TLV_ACTION = 1,
+};
+
+/* Maximum size of buffer to CMD_ADD_FILTER */
+#define FILTER_MAX_BUF_SIZE 100
+
+struct filter_tlv {
+       u_int32_t type;
+       u_int32_t length;
+       u_int32_t val[0];
+};
+
 /*
  * Writing cmd register causes STAT_BUSY to get set in status register.
  * When cmd completes, STAT_BUSY will be cleared.
index 7e1488fc8ab28bc3ab6c5f6565e9cf61c70f62d9..36a2ed606c911f21355360fad81eb39b18162c59 100644 (file)
 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
 {
        struct vnic_rq_buf *buf;
-       struct vnic_dev *vdev;
        unsigned int i, j, count = rq->ring.desc_count;
        unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
 
-       vdev = rq->vdev;
-
        for (i = 0; i < blks; i++) {
                rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
                if (!rq->bufs[i])
@@ -141,7 +138,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
        unsigned int error_interrupt_enable,
        unsigned int error_interrupt_offset)
 {
-       u32 fetch_index;
+       u32 fetch_index = 0;
 
        /* Use current fetch_index as the ring starting point */
        fetch_index = ioread32(&rq->ctrl->fetch_index);
index 2056586f4d4b02641ec9609e3858b3c0368822ff..ee7bc95af278c691acebb358341a9885017863f3 100644 (file)
@@ -72,6 +72,7 @@ struct vnic_rq_buf {
        unsigned int len;
        unsigned int index;
        void *desc;
+       uint64_t wr_id;
 };
 
 struct vnic_rq {
@@ -110,7 +111,8 @@ static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
 
 static inline void vnic_rq_post(struct vnic_rq *rq,
        void *os_buf, unsigned int os_buf_index,
-       dma_addr_t dma_addr, unsigned int len)
+       dma_addr_t dma_addr, unsigned int len,
+       uint64_t wrid)
 {
        struct vnic_rq_buf *buf = rq->to_use;
 
@@ -118,6 +120,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
        buf->os_buf_index = os_buf_index;
        buf->dma_addr = dma_addr;
        buf->len = len;
+       buf->wr_id = wrid;
 
        buf = buf->next;
        rq->to_use = buf;
index 5e0d7a2be9bc4a95c32864ba7fc25d14b0b5f3c9..3e6b8d54dafcbac2e7c628b850378b89cb8ab7fe 100644 (file)
 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
 {
        struct vnic_wq_buf *buf;
-       struct vnic_dev *vdev;
        unsigned int i, j, count = wq->ring.desc_count;
        unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
 
-       vdev = wq->vdev;
-
        for (i = 0; i < blks; i++) {
                wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
                if (!wq->bufs[i])
index 7dd937ac11c23e51fefdedf7ce72f70d7f2b76e9..2c6c70804a39edb475d4c97e1f3e617aad387ed4 100644 (file)
@@ -58,6 +58,10 @@ struct vnic_wq_buf {
        unsigned int index;
        int sop;
        void *desc;
+       uint64_t wr_id; /* Cookie */
+       uint8_t cq_entry; /* Gets completion event from hw */
+       uint8_t desc_skip_cnt; /* Num descs to occupy */
+       uint8_t compressed_send; /* Both hdr and payload in one desc */
 };
 
 /* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
@@ -102,14 +106,20 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
 
 static inline void vnic_wq_post(struct vnic_wq *wq,
        void *os_buf, dma_addr_t dma_addr,
-       unsigned int len, int sop, int eop)
+       unsigned int len, int sop, int eop,
+       uint8_t desc_skip_cnt, uint8_t cq_entry,
+       uint8_t compressed_send, uint64_t wrid)
 {
        struct vnic_wq_buf *buf = wq->to_use;
 
        buf->sop = sop;
+       buf->cq_entry = cq_entry;
+       buf->compressed_send = compressed_send;
+       buf->desc_skip_cnt = desc_skip_cnt;
        buf->os_buf = eop ? os_buf : NULL;
        buf->dma_addr = dma_addr;
        buf->len = len;
+       buf->wr_id = wrid;
 
        buf = buf->next;
        if (eop) {
@@ -123,7 +133,7 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
        }
        wq->to_use = buf;
 
-       wq->ring.desc_avail--;
+       wq->ring.desc_avail -= desc_skip_cnt;
 }
 
 static inline void vnic_wq_service(struct vnic_wq *wq,
index c94152f1c6be5396b511902a28478bae1c00e442..4e8cfa2ac803abb13c9c692ef971618cc26d664b 100644 (file)
@@ -1304,7 +1304,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct tulip_private *tp;
        /* See note below on the multiport cards. */
-       static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+       static unsigned char last_phys_addr[ETH_ALEN] = {
+               0x00, 'L', 'i', 'n', 'u', 'x'
+       };
        static int last_irq;
        static int multiport_cnt;       /* For four-port boards w/one EEPROM */
        int i, irq;
@@ -1627,8 +1629,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev->dev_addr[i] = last_phys_addr[i] + 1;
 #if defined(CONFIG_SPARC)
                addr = of_get_property(dp, "local-mac-address", &len);
-               if (addr && len == 6)
-                       memcpy(dev->dev_addr, addr, 6);
+               if (addr && len == ETH_ALEN)
+                       memcpy(dev->dev_addr, addr, ETH_ALEN);
 #endif
 #if defined(__i386__) || defined(__x86_64__)   /* Patch up x86 BIOS bug. */
                if (last_irq)
index 50d9c631593090e8d2b335fcb4d947346091fa42..bf3bf6f22c998b7c7ef2563be2041737a15959fe 100644 (file)
@@ -469,6 +469,17 @@ static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
        }
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sundance_poll_controller(struct net_device *dev)
+{
+       struct netdev_private *np = netdev_priv(dev);
+
+       disable_irq(np->pci_dev->irq);
+       intr_handler(np->pci_dev->irq, dev);
+       enable_irq(np->pci_dev->irq);
+}
+#endif
+
 static const struct net_device_ops netdev_ops = {
        .ndo_open               = netdev_open,
        .ndo_stop               = netdev_close,
@@ -480,6 +491,9 @@ static const struct net_device_ops netdev_ops = {
        .ndo_change_mtu         = change_mtu,
        .ndo_set_mac_address    = sundance_set_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = sundance_poll_controller,
+#endif
 };
 
 static int sundance_probe1(struct pci_dev *pdev,
index c827b1b6b1ceb81c5d10bdc2db6fcea3192701a3..11c815db54422d80c748a2a999649ee84c664f31 100644 (file)
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "4.6.62.0u"
+#define DRV_VER                        "4.9.134.0u"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
index 8ec5d74ad44d75e4fb9d7c871b0ddd95654c74d1..85923e2d63b93e523c5a8b211e720a806146a168 100644 (file)
@@ -258,7 +258,8 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
                (struct be_async_event_grp5_pvid_state *)evt);
        break;
        default:
-               dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
+               dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
+                        event_type);
                break;
        }
 }
@@ -279,7 +280,8 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
                adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
        break;
        default:
-               dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
+               dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
+                        event_type);
        break;
        }
 }
@@ -1010,9 +1012,9 @@ static u32 be_encoded_q_len(int q_len)
        return len_encoded;
 }
 
-int be_cmd_mccq_ext_create(struct be_adapter *adapter,
-                       struct be_queue_info *mccq,
-                       struct be_queue_info *cq)
+static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
+                               struct be_queue_info *mccq,
+                               struct be_queue_info *cq)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mcc_ext_create *req;
@@ -1068,9 +1070,9 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
        return status;
 }
 
-int be_cmd_mccq_org_create(struct be_adapter *adapter,
-                       struct be_queue_info *mccq,
-                       struct be_queue_info *cq)
+static int be_cmd_mccq_org_create(struct be_adapter *adapter,
+                               struct be_queue_info *mccq,
+                               struct be_queue_info *cq)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mcc_create *req;
@@ -1339,6 +1341,10 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
        if (!status) {
                struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
                *if_handle = le32_to_cpu(resp->interface_id);
+
+               /* Hack to retrieve VF's pmac-id on BE3 */
+               if (BE3_chip(adapter) && !be_physfn(adapter))
+                       adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
        }
 
 err:
@@ -1460,6 +1466,12 @@ static int be_mac_to_link_speed(int mac_speed)
                return 1000;
        case PHY_LINK_SPEED_10GBPS:
                return 10000;
+       case PHY_LINK_SPEED_20GBPS:
+               return 20000;
+       case PHY_LINK_SPEED_25GBPS:
+               return 25000;
+       case PHY_LINK_SPEED_40GBPS:
+               return 40000;
        }
        return 0;
 }
@@ -1520,7 +1532,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_cntl_addnl_attribs *req;
-       int status;
+       int status = 0;
 
        spin_lock_bh(&adapter->mcc_lock);
 
@@ -2444,6 +2456,12 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                        le16_to_cpu(resp_phy_info->fixed_speeds_supported);
                adapter->phy.misc_params =
                        le32_to_cpu(resp_phy_info->misc_params);
+
+               if (BE2_chip(adapter)) {
+                       adapter->phy.fixed_speeds_supported =
+                               BE_SUPPORTED_SPEED_10GBPS |
+                               BE_SUPPORTED_SPEED_1GBPS;
+               }
        }
        pci_free_consistent(adapter->pdev, cmd.size,
                                cmd.va, cmd.dma);
@@ -2606,9 +2624,44 @@ err:
        return status;
 }
 
-/* Uses synchronous MCCQ */
+/* Set privilege(s) for a function */
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+                            u32 domain)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_set_fn_privileges *req;
+       int status;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+
+       req = embedded_payload(wrb);
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                              OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
+                              wrb, NULL);
+       req->hdr.domain = domain;
+       if (lancer_chip(adapter))
+               req->privileges_lancer = cpu_to_le32(privileges);
+       else
+               req->privileges = cpu_to_le32(privileges);
+
+       status = be_mcc_notify_wait(adapter);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
+/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
+ * pmac_id_valid: false => pmac_id or MAC address is requested.
+ *               If pmac_id is returned, pmac_id_valid is returned as true
+ */
 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
-                            bool *pmac_id_active, u32 *pmac_id, u8 domain)
+                            bool *pmac_id_valid, u32 *pmac_id, u8 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_mac_list *req;
@@ -2644,12 +2697,25 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
                               get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
        req->hdr.domain = domain;
        req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
-       req->perm_override = 1;
+       if (*pmac_id_valid) {
+               req->mac_id = cpu_to_le32(*pmac_id);
+               req->iface_id = cpu_to_le16(adapter->if_handle);
+               req->perm_override = 0;
+       } else {
+               req->perm_override = 1;
+       }
 
        status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_get_mac_list *resp =
                                                get_mac_list_cmd.va;
+
+               if (*pmac_id_valid) {
+                       memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
+                              ETH_ALEN);
+                       goto out;
+               }
+
                mac_count = resp->true_mac_count + resp->pseudo_mac_count;
                /* Mac list returned could contain one or more active mac_ids
                 * or one or more true or pseudo permanant mac addresses.
@@ -2667,14 +2733,14 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
                         * is 6 bytes
                         */
                        if (mac_addr_size == sizeof(u32)) {
-                               *pmac_id_active = true;
+                               *pmac_id_valid = true;
                                mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
                                *pmac_id = le32_to_cpu(mac_id);
                                goto out;
                        }
                }
                /* If no active mac_id found, return first mac addr */
-               *pmac_id_active = false;
+               *pmac_id_valid = false;
                memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
                                                                ETH_ALEN);
        }
@@ -2686,6 +2752,41 @@ out:
        return status;
 }
 
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+{
+       bool active = true;
+
+       if (BEx_chip(adapter))
+               return be_cmd_mac_addr_query(adapter, mac, false,
+                                            adapter->if_handle, curr_pmac_id);
+       else
+               /* Fetch the MAC address using pmac_id */
+               return be_cmd_get_mac_from_list(adapter, mac, &active,
+                                               &curr_pmac_id, 0);
+}
+
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
+{
+       int status;
+       bool pmac_valid = false;
+
+       memset(mac, 0, ETH_ALEN);
+
+       if (BEx_chip(adapter)) {
+               if (be_physfn(adapter))
+                       status = be_cmd_mac_addr_query(adapter, mac, true, 0,
+                                                      0);
+               else
+                       status = be_cmd_mac_addr_query(adapter, mac, false,
+                                                      adapter->if_handle, 0);
+       } else {
+               status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
+                                                 NULL, 0);
+       }
+
+       return status;
+}
+
 /* Uses synchronous MCCQ */
 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
                        u8 mac_count, u32 domain)
@@ -2729,6 +2830,25 @@ err:
        return status;
 }
 
+/* Wrapper to delete any active MACs and provision the new mac.
+ * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
+ * current list are active.
+ */
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
+{
+       bool active_mac = false;
+       u8 old_mac[ETH_ALEN];
+       u32 pmac_id;
+       int status;
+
+       status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+                                         &pmac_id, dom);
+       if (!status && active_mac)
+               be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
+
+       return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
+}
+
 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
                        u32 domain, u16 intf_id)
 {
@@ -3060,8 +3180,8 @@ err:
 }
 
 /* Uses mbox */
-int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
-                                  u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
+                                       u8 domain, struct be_dma_mem *cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_profile_config *req;
@@ -3088,8 +3208,8 @@ int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
 }
 
 /* Uses sync mcc */
-int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
-                                  u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
+                                       u8 domain, struct be_dma_mem *cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_profile_config *req;
index 1b3b9e886412ddd689a0abbee536e6ba0d4ad50c..6237192a55d1b7fabbfd5a4d0642e6f05b8072eb 100644 (file)
@@ -202,6 +202,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_READ_TRANSRECV_DATA              73
 #define OPCODE_COMMON_GET_PORT_NAME                    77
 #define OPCODE_COMMON_SET_INTERRUPT_ENABLE             89
+#define OPCODE_COMMON_SET_FN_PRIVILEGES                        100
 #define OPCODE_COMMON_GET_PHY_DETAILS                  102
 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP          103
 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES   121
@@ -965,7 +966,10 @@ enum {
        PHY_LINK_SPEED_10MBPS = 0x1,
        PHY_LINK_SPEED_100MBPS = 0x2,
        PHY_LINK_SPEED_1GBPS = 0x3,
-       PHY_LINK_SPEED_10GBPS = 0x4
+       PHY_LINK_SPEED_10GBPS = 0x4,
+       PHY_LINK_SPEED_20GBPS = 0x5,
+       PHY_LINK_SPEED_25GBPS = 0x6,
+       PHY_LINK_SPEED_40GBPS = 0x7
 };
 
 struct be_cmd_resp_link_status {
@@ -1480,6 +1484,11 @@ struct be_cmd_resp_get_fn_privileges {
        u32 privilege_mask;
 };
 
+struct be_cmd_req_set_fn_privileges {
+       struct be_cmd_req_hdr hdr;
+       u32 privileges;         /* Used by BE3, SH-R */
+       u32 privileges_lancer;  /* Used by Lancer */
+};
 
 /******************** GET/SET_MACLIST  **************************/
 #define BE_MAX_MAC                     64
@@ -1927,11 +1936,18 @@ extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
 extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
 extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
                                    u32 *privilege, u32 domain);
+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
+                                   u32 privileges, u32 vf_num);
 extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
                                    bool *pmac_id_active, u32 *pmac_id,
                                    u8 domain);
+extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
+                                u8 *mac);
+extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
 extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
                                                u8 mac_count, u32 domain);
+extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
+                         u32 dom);
 extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
                        u32 domain, u16 intf_id);
 extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
index 181edb522450ada0c9ba72e9a736d312594cc183..ff2b40db38ba5ce04c36883ce66e3f3b880aed10 100644 (file)
@@ -247,54 +247,54 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
 static int be_mac_addr_set(struct net_device *netdev, void *p)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct device *dev = &adapter->pdev->dev;
        struct sockaddr *addr = p;
-       int status = 0;
-       u8 current_mac[ETH_ALEN];
-       u32 pmac_id = adapter->pmac_id[0];
-       bool active_mac = true;
+       int status;
+       u8 mac[ETH_ALEN];
+       u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       /* For BE VF, MAC address is already activated by PF.
-        * Hence only operation left is updating netdev->devaddr.
-        * Update it if user is passing the same MAC which was used
-        * during configuring VF MAC from PF(Hypervisor).
+       /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
+        * privilege or if PF did not provision the new MAC address.
+        * On BE3, this cmd will always fail if the VF doesn't have the
+        * FILTMGMT privilege. This failure is OK, only if the PF programmed
+        * the MAC for the VF.
         */
-       if (!lancer_chip(adapter) && !be_physfn(adapter)) {
-               status = be_cmd_mac_addr_query(adapter, current_mac,
-                                              false, adapter->if_handle, 0);
-               if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
-                       goto done;
-               else
-                       goto err;
-       }
+       status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+                                adapter->if_handle, &adapter->pmac_id[0], 0);
+       if (!status) {
+               curr_pmac_id = adapter->pmac_id[0];
 
-       if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
-               goto done;
+               /* Delete the old programmed MAC. This call may fail if the
+                * old MAC was already deleted by the PF driver.
+                */
+               if (adapter->pmac_id[0] != old_pmac_id)
+                       be_cmd_pmac_del(adapter, adapter->if_handle,
+                                       old_pmac_id, 0);
+       }
 
-       /* For Lancer check if any MAC is active.
-        * If active, get its mac id.
+       /* Decide if the new MAC is successfully activated only after
+        * querying the FW
         */
-       if (lancer_chip(adapter) && !be_physfn(adapter))
-               be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
-                                        &pmac_id, 0);
-
-       status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
-                                adapter->if_handle,
-                                &adapter->pmac_id[0], 0);
-
+       status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
        if (status)
                goto err;
 
-       if (active_mac)
-               be_cmd_pmac_del(adapter, adapter->if_handle,
-                               pmac_id, 0);
-done:
+       /* The MAC change did not happen, either due to lack of privilege
+        * or PF didn't pre-provision.
+        */
+       if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+               status = -EPERM;
+               goto err;
+       }
+
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+       dev_info(dev, "MAC address changed to %pM\n", mac);
        return 0;
 err:
-       dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
+       dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
        return status;
 }
 
@@ -472,7 +472,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
        ACCESS_ONCE(*acc) = newacc;
 }
 
-void populate_erx_stats(struct be_adapter *adapter,
+static void populate_erx_stats(struct be_adapter *adapter,
                        struct be_rx_obj *rxo,
                        u32 erx_stat)
 {
@@ -1146,9 +1146,6 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
        int status;
-       bool active_mac = false;
-       u32 pmac_id;
-       u8 old_mac[ETH_ALEN];
 
        if (!sriov_enabled(adapter))
                return -EPERM;
@@ -1156,20 +1153,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
                return -EINVAL;
 
-       if (lancer_chip(adapter)) {
-               status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
-                                                 &pmac_id, vf + 1);
-               if (!status && active_mac)
-                       be_cmd_pmac_del(adapter, vf_cfg->if_handle,
-                                       pmac_id, vf + 1);
-
-               status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
-       } else {
-               status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
-                                        vf_cfg->pmac_id, vf + 1);
+       if (BEx_chip(adapter)) {
+               be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
+                               vf + 1);
 
                status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
                                         &vf_cfg->pmac_id, vf + 1);
+       } else {
+               status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+                                       vf + 1);
        }
 
        if (status)
@@ -1490,8 +1482,9 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
 }
 
 /* Process the RX completion indicated by rxcp when GRO is enabled */
-void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
-                            struct be_rx_compl_info *rxcp)
+static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
+                                   struct napi_struct *napi,
+                                   struct be_rx_compl_info *rxcp)
 {
        struct be_adapter *adapter = rxo->adapter;
        struct be_rx_page_info *page_info;
@@ -2267,7 +2260,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
        return (work_done < budget); /* Done */
 }
 
-int be_poll(struct napi_struct *napi, int budget)
+static int be_poll(struct napi_struct *napi, int budget)
 {
        struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
        struct be_adapter *adapter = eqo->adapter;
@@ -2735,13 +2728,13 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
        be_vf_eth_addr_generate(adapter, mac);
 
        for_all_vfs(adapter, vf_cfg, vf) {
-               if (lancer_chip(adapter)) {
-                       status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
-               } else {
+               if (BEx_chip(adapter))
                        status = be_cmd_pmac_add(adapter, mac,
                                                 vf_cfg->if_handle,
                                                 &vf_cfg->pmac_id, vf + 1);
-               }
+               else
+                       status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+                                               vf + 1);
 
                if (status)
                        dev_err(&adapter->pdev->dev,
@@ -2759,7 +2752,7 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
        int status, vf;
        u8 mac[ETH_ALEN];
        struct be_vf_cfg *vf_cfg;
-       bool active;
+       bool active = false;
 
        for_all_vfs(adapter, vf_cfg, vf) {
                be_cmd_get_mac_from_list(adapter, mac, &active,
@@ -2788,11 +2781,12 @@ static void be_vf_clear(struct be_adapter *adapter)
        pci_disable_sriov(adapter->pdev);
 
        for_all_vfs(adapter, vf_cfg, vf) {
-               if (lancer_chip(adapter))
-                       be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
-               else
+               if (BEx_chip(adapter))
                        be_cmd_pmac_del(adapter, vf_cfg->if_handle,
                                        vf_cfg->pmac_id, vf + 1);
+               else
+                       be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
+                                      vf + 1);
 
                be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
        }
@@ -2803,7 +2797,7 @@ done:
 
 static int be_clear(struct be_adapter *adapter)
 {
-       int i = 1;
+       int i;
 
        if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
                cancel_delayed_work_sync(&adapter->work);
@@ -2813,9 +2807,11 @@ static int be_clear(struct be_adapter *adapter)
        if (sriov_enabled(adapter))
                be_vf_clear(adapter);
 
-       for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
+       /* delete the primary mac along with the uc-mac list */
+       for (i = 0; i < (adapter->uc_macs + 1); i++)
                be_cmd_pmac_del(adapter, adapter->if_handle,
-                       adapter->pmac_id[i], 0);
+                               adapter->pmac_id[i], 0);
+       adapter->uc_macs = 0;
 
        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
 
@@ -2880,6 +2876,7 @@ static int be_vf_setup(struct be_adapter *adapter)
        u16 def_vlan, lnk_speed;
        int status, old_vfs, vf;
        struct device *dev = &adapter->pdev->dev;
+       u32 privileges;
 
        old_vfs = pci_num_vf(adapter->pdev);
        if (old_vfs) {
@@ -2923,6 +2920,18 @@ static int be_vf_setup(struct be_adapter *adapter)
        }
 
        for_all_vfs(adapter, vf_cfg, vf) {
+               /* Allow VFs to programs MAC/VLAN filters */
+               status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
+               if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
+                       status = be_cmd_set_fn_privileges(adapter,
+                                                         privileges |
+                                                         BE_PRIV_FILTMGMT,
+                                                         vf + 1);
+                       if (!status)
+                               dev_info(dev, "VF%d has FILTMGMT privilege\n",
+                                        vf);
+               }
+
                /* BE3 FW, by default, caps VF TX-rate to 100mbps.
                 * Allow full available bandwidth
                 */
@@ -2971,41 +2980,6 @@ static void be_setup_init(struct be_adapter *adapter)
                adapter->cmd_privileges = MIN_PRIVILEGES;
 }
 
-static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
-                          bool *active_mac, u32 *pmac_id)
-{
-       int status = 0;
-
-       if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
-               memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
-               if (!lancer_chip(adapter) && !be_physfn(adapter))
-                       *active_mac = true;
-               else
-                       *active_mac = false;
-
-               return status;
-       }
-
-       if (lancer_chip(adapter)) {
-               status = be_cmd_get_mac_from_list(adapter, mac,
-                                                 active_mac, pmac_id, 0);
-               if (*active_mac) {
-                       status = be_cmd_mac_addr_query(adapter, mac, false,
-                                                      if_handle, *pmac_id);
-               }
-       } else if (be_physfn(adapter)) {
-               /* For BE3, for PF get permanent MAC */
-               status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
-               *active_mac = false;
-       } else {
-               /* For BE3, for VF get soft MAC assigned by PF*/
-               status = be_cmd_mac_addr_query(adapter, mac, false,
-                                              if_handle, 0);
-               *active_mac = true;
-       }
-       return status;
-}
-
 static void be_get_resources(struct be_adapter *adapter)
 {
        u16 dev_num_vfs;
@@ -3022,13 +2996,6 @@ static void be_get_resources(struct be_adapter *adapter)
        }
 
        if (profile_present) {
-               /* Sanity fixes for Lancer */
-               adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
-                                             BE_UC_PMAC_COUNT);
-               adapter->max_vlans = min_t(u16, adapter->max_vlans,
-                                          BE_NUM_VLANS_SUPPORTED);
-               adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
-                                              BE_MAX_MC);
                adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
                                               MAX_TX_QS);
                adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
@@ -3111,14 +3078,38 @@ err:
        return status;
 }
 
+static int be_mac_setup(struct be_adapter *adapter)
+{
+       u8 mac[ETH_ALEN];
+       int status;
+
+       if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+               status = be_cmd_get_perm_mac(adapter, mac);
+               if (status)
+                       return status;
+
+               memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+               memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+       } else {
+               /* Maybe the HW was reset; dev_addr must be re-programmed */
+               memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
+       }
+
+       /* On BE3 VFs this cmd may fail due to lack of privilege.
+        * Ignore the failure as in this case pmac_id is fetched
+        * in the IFACE_CREATE cmd.
+        */
+       be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+                       &adapter->pmac_id[0], 0);
+       return 0;
+}
+
 static int be_setup(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
        u32 en_flags;
        u32 tx_fc, rx_fc;
        int status;
-       u8 mac[ETH_ALEN];
-       bool active_mac;
 
        be_setup_init(adapter);
 
@@ -3158,36 +3149,18 @@ static int be_setup(struct be_adapter *adapter)
 
        en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                        BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
-
        if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
                en_flags |= BE_IF_FLAGS_RSS;
-
        en_flags = en_flags & adapter->if_cap_flags;
-
        status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
                                  &adapter->if_handle, 0);
        if (status != 0)
                goto err;
 
-       memset(mac, 0, ETH_ALEN);
-       active_mac = false;
-       status = be_get_mac_addr(adapter, mac, adapter->if_handle,
-                                &active_mac, &adapter->pmac_id[0]);
-       if (status != 0)
+       status = be_mac_setup(adapter);
+       if (status)
                goto err;
 
-       if (!active_mac) {
-               status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
-                                        &adapter->pmac_id[0], 0);
-               if (status != 0)
-                       goto err;
-       }
-
-       if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
-               memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
-               memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
-       }
-
        status = be_tx_qs_create(adapter);
        if (status)
                goto err;
@@ -3241,7 +3214,7 @@ static void be_netpoll(struct net_device *netdev)
 #endif
 
 #define FW_FILE_HDR_SIGN       "ServerEngines Corp. "
-char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
+static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
 
 static bool be_flash_redboot(struct be_adapter *adapter,
                        const u8 *p, u32 img_start, int image_size,
@@ -3298,7 +3271,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
 
 }
 
-struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
+static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
                                         int header_size,
                                         const struct firmware *fw)
 {
@@ -4164,7 +4137,8 @@ static void be_worker(struct work_struct *work)
                        be_cmd_get_stats(adapter, &adapter->stats_cmd);
        }
 
-       if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+       if (be_physfn(adapter) &&
+           MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
                be_cmd_get_die_temperature(adapter);
 
        for_all_rx_queues(adapter, rxo, i) {
@@ -4253,7 +4227,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
 
        status = pci_enable_pcie_error_reporting(pdev);
        if (status)
-               dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
+               dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
 
        status = be_ctrl_init(adapter);
        if (status)
index f3d126dcc104fa88c8385cb235449fdbf0cdd99e..645e846daa5ccc22963aff64ebe511221e2acba4 100644 (file)
@@ -93,7 +93,7 @@ void be_roce_dev_add(struct be_adapter *adapter)
        }
 }
 
-void _be_roce_dev_remove(struct be_adapter *adapter)
+static void _be_roce_dev_remove(struct be_adapter *adapter)
 {
        if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
                ocrdma_drv->remove(adapter->ocrdma_dev);
@@ -110,7 +110,7 @@ void be_roce_dev_remove(struct be_adapter *adapter)
        }
 }
 
-void _be_roce_dev_open(struct be_adapter *adapter)
+static void _be_roce_dev_open(struct be_adapter *adapter)
 {
        if (ocrdma_drv && adapter->ocrdma_dev &&
            ocrdma_drv->state_change_handler)
@@ -126,7 +126,7 @@ void be_roce_dev_open(struct be_adapter *adapter)
        }
 }
 
-void _be_roce_dev_close(struct be_adapter *adapter)
+static void _be_roce_dev_close(struct be_adapter *adapter)
 {
        if (ocrdma_drv && adapter->ocrdma_dev &&
            ocrdma_drv->state_change_handler)
index 77ea0db0bbfc3e326d8137a26623de8fba1c4945..fdf9307ba9e6ee412e832266d5f05edcf9269334 100644 (file)
@@ -2056,10 +2056,6 @@ fec_probe(struct platform_device *pdev)
        if (of_id)
                pdev->id_entry = of_id->data;
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r)
-               return -ENXIO;
-
        /* Init network device */
        ndev = alloc_etherdev(sizeof(struct fec_enet_private));
        if (!ndev)
@@ -2077,6 +2073,7 @@ fec_probe(struct platform_device *pdev)
                fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
 #endif
 
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        fep->hwp = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(fep->hwp)) {
                ret = PTR_ERR(fep->hwp);
@@ -2126,10 +2123,25 @@ fec_probe(struct platform_device *pdev)
                fep->bufdesc_ex = 0;
        }
 
-       clk_prepare_enable(fep->clk_ahb);
-       clk_prepare_enable(fep->clk_ipg);
-       clk_prepare_enable(fep->clk_enet_out);
-       clk_prepare_enable(fep->clk_ptp);
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               goto failed_clk;
+
+       ret = clk_prepare_enable(fep->clk_ipg);
+       if (ret)
+               goto failed_clk_ipg;
+
+       if (fep->clk_enet_out) {
+               ret = clk_prepare_enable(fep->clk_enet_out);
+               if (ret)
+                       goto failed_clk_enet_out;
+       }
+
+       if (fep->clk_ptp) {
+               ret = clk_prepare_enable(fep->clk_ptp);
+               if (ret)
+                       goto failed_clk_ptp;
+       }
 
        fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
@@ -2160,14 +2172,10 @@ fec_probe(struct platform_device *pdev)
                        ret = irq;
                        goto failed_irq;
                }
-               ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
-               if (ret) {
-                       while (--i >= 0) {
-                               irq = platform_get_irq(pdev, i);
-                               free_irq(irq, ndev);
-                       }
+               ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
+                                      IRQF_DISABLED, pdev->name, ndev);
+               if (ret)
                        goto failed_irq;
-               }
        }
 
        ret = fec_enet_mii_init(pdev);
@@ -2191,19 +2199,19 @@ failed_register:
        fec_enet_mii_remove(fep);
 failed_mii_init:
 failed_irq:
-       for (i = 0; i < FEC_IRQ_NUM; i++) {
-               irq = platform_get_irq(pdev, i);
-               if (irq > 0)
-                       free_irq(irq, ndev);
-       }
 failed_init:
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 failed_regulator:
-       clk_disable_unprepare(fep->clk_ahb);
+       if (fep->clk_ptp)
+               clk_disable_unprepare(fep->clk_ptp);
+failed_clk_ptp:
+       if (fep->clk_enet_out)
+               clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
        clk_disable_unprepare(fep->clk_ipg);
-       clk_disable_unprepare(fep->clk_enet_out);
-       clk_disable_unprepare(fep->clk_ptp);
+failed_clk_ipg:
+       clk_disable_unprepare(fep->clk_ahb);
 failed_clk:
 failed_ioremap:
        free_netdev(ndev);
@@ -2216,25 +2224,21 @@ fec_drv_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int i;
 
        cancel_delayed_work_sync(&(fep->delay_work.delay_work));
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
        del_timer_sync(&fep->time_keep);
-       for (i = 0; i < FEC_IRQ_NUM; i++) {
-               int irq = platform_get_irq(pdev, i);
-               if (irq > 0)
-                       free_irq(irq, ndev);
-       }
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
-       clk_disable_unprepare(fep->clk_ptp);
+       if (fep->clk_ptp)
+               clk_disable_unprepare(fep->clk_ptp);
        if (fep->ptp_clock)
                ptp_clock_unregister(fep->ptp_clock);
-       clk_disable_unprepare(fep->clk_enet_out);
-       clk_disable_unprepare(fep->clk_ahb);
+       if (fep->clk_enet_out)
+               clk_disable_unprepare(fep->clk_enet_out);
        clk_disable_unprepare(fep->clk_ipg);
+       clk_disable_unprepare(fep->clk_ahb);
        free_netdev(ndev);
 
        return 0;
@@ -2251,9 +2255,12 @@ fec_suspend(struct device *dev)
                fec_stop(ndev);
                netif_device_detach(ndev);
        }
-       clk_disable_unprepare(fep->clk_enet_out);
-       clk_disable_unprepare(fep->clk_ahb);
+       if (fep->clk_ptp)
+               clk_disable_unprepare(fep->clk_ptp);
+       if (fep->clk_enet_out)
+               clk_disable_unprepare(fep->clk_enet_out);
        clk_disable_unprepare(fep->clk_ipg);
+       clk_disable_unprepare(fep->clk_ahb);
 
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
@@ -2274,15 +2281,44 @@ fec_resume(struct device *dev)
                        return ret;
        }
 
-       clk_prepare_enable(fep->clk_enet_out);
-       clk_prepare_enable(fep->clk_ahb);
-       clk_prepare_enable(fep->clk_ipg);
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               goto failed_clk_ahb;
+
+       ret = clk_prepare_enable(fep->clk_ipg);
+       if (ret)
+               goto failed_clk_ipg;
+
+       if (fep->clk_enet_out) {
+               ret = clk_prepare_enable(fep->clk_enet_out);
+               if (ret)
+                       goto failed_clk_enet_out;
+       }
+
+       if (fep->clk_ptp) {
+               ret = clk_prepare_enable(fep->clk_ptp);
+               if (ret)
+                       goto failed_clk_ptp;
+       }
+
        if (netif_running(ndev)) {
                fec_restart(ndev, fep->full_duplex);
                netif_device_attach(ndev);
        }
 
        return 0;
+
+failed_clk_ptp:
+       if (fep->clk_enet_out)
+               clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+       clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+       clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
+       return ret;
 }
 #endif /* CONFIG_PM_SLEEP */
 
index 360a578c2bb7feffb7e36592a4750f577574fd52..e0528900db023cada5870329317996511e407e73 100644 (file)
@@ -123,12 +123,10 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
 
 static int mpc52xx_fec_mdio_remove(struct platform_device *of)
 {
-       struct device *dev = &of->dev;
-       struct mii_bus *bus = dev_get_drvdata(dev);
+       struct mii_bus *bus = platform_get_drvdata(of);
        struct mpc52xx_fec_mdio_priv *priv = bus->priv;
 
        mdiobus_unregister(bus);
-       dev_set_drvdata(dev, NULL);
        iounmap(priv->regs);
        kfree(priv);
        mdiobus_free(bus);
index c93a05654b46125b6eeda59cf6e0bbfc83c5f9ef..c4f65067cf7c30d94482b400ff86532da5546171 100644 (file)
@@ -409,7 +409,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
        priv->regs = priv->map + data->mii_offset;
 
        new_bus->parent = &pdev->dev;
-       dev_set_drvdata(&pdev->dev, new_bus);
+       platform_set_drvdata(pdev, new_bus);
 
        if (data->get_tbipa) {
                for_each_child_of_node(np, tbi) {
@@ -468,8 +468,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
 
        mdiobus_unregister(bus);
 
-       dev_set_drvdata(device, NULL);
-
        iounmap(priv->map);
        mdiobus_free(bus);
 
index 8d2db7b808b7cb4321dbc6edeb12ebab94b7b0f0..b2c91dcd245f7985b66da22346a9b6f7ec5f3fb8 100644 (file)
@@ -593,7 +593,6 @@ static int gfar_parse_group(struct device_node *np,
                        return -EINVAL;
        }
 
-       grp->grp_id = priv->num_grps;
        grp->priv = priv;
        spin_lock_init(&grp->grplock);
        if (priv->mode == MQ_MG_MODE) {
@@ -1017,7 +1016,14 @@ static int gfar_probe(struct platform_device *ofdev)
        /* We need to delay at least 3 TX clocks */
        udelay(2);
 
-       tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+       tempval = 0;
+       if (!priv->pause_aneg_en && priv->tx_pause_en)
+               tempval |= MACCFG1_TX_FLOW;
+       if (!priv->pause_aneg_en && priv->rx_pause_en)
+               tempval |= MACCFG1_RX_FLOW;
+       /* the soft reset bit is not self-resetting, so we need to
+        * clear it before resuming normal operation
+        */
        gfar_write(&regs->maccfg1, tempval);
 
        /* Initialize MACCFG2. */
@@ -1461,7 +1467,7 @@ static int init_phy(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
        uint gigabit_support =
                priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
-               SUPPORTED_1000baseT_Full : 0;
+               GFAR_SUPPORTED_GBIT : 0;
        phy_interface_t interface;
 
        priv->oldlink = 0;
@@ -2052,6 +2058,24 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
        return skip_txbd(bdp, 1, base, ring_size);
 }
 
+/* eTSEC12: csum generation not supported for some fcb offsets */
+static inline bool gfar_csum_errata_12(struct gfar_private *priv,
+                                      unsigned long fcb_addr)
+{
+       return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
+              (fcb_addr % 0x20) > 0x18);
+}
+
+/* eTSEC76: csum generation for frames larger than 2500 may
+ * cause excess delays before start of transmission
+ */
+static inline bool gfar_csum_errata_76(struct gfar_private *priv,
+                                      unsigned int len)
+{
+       return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
+              (len > 2500));
+}
+
 /* This is called by the kernel when a frame is ready for transmission.
  * It is pointed to by the dev->hard_start_xmit function pointer
  */
@@ -2064,23 +2088,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct txfcb *fcb = NULL;
        struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
        u32 lstatus;
-       int i, rq = 0, do_tstamp = 0;
+       int i, rq = 0;
+       int do_tstamp, do_csum, do_vlan;
        u32 bufaddr;
        unsigned long flags;
-       unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
-
-       /* TOE=1 frames larger than 2500 bytes may see excess delays
-        * before start of transmission.
-        */
-       if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
-                    skb->ip_summed == CHECKSUM_PARTIAL &&
-                    skb->len > 2500)) {
-               int ret;
-
-               ret = skb_checksum_help(skb);
-               if (ret)
-                       return ret;
-       }
+       unsigned int nr_frags, nr_txbds, length, fcb_len = 0;
 
        rq = skb->queue_mapping;
        tx_queue = priv->tx_queue[rq];
@@ -2088,21 +2100,23 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        base = tx_queue->tx_bd_base;
        regs = tx_queue->grp->regs;
 
+       do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
+       do_vlan = vlan_tx_tag_present(skb);
+       do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                   priv->hwts_tx_en;
+
+       if (do_csum || do_vlan)
+               fcb_len = GMAC_FCB_LEN;
+
        /* check if time stamp should be generated */
-       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
-                    priv->hwts_tx_en)) {
-               do_tstamp = 1;
-               fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
-       }
+       if (unlikely(do_tstamp))
+               fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
 
        /* make space for additional header when fcb is needed */
-       if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
-            vlan_tx_tag_present(skb) ||
-            unlikely(do_tstamp)) &&
-           (skb_headroom(skb) < fcb_length)) {
+       if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
                struct sk_buff *skb_new;
 
-               skb_new = skb_realloc_headroom(skb, fcb_length);
+               skb_new = skb_realloc_headroom(skb, fcb_len);
                if (!skb_new) {
                        dev->stats.tx_errors++;
                        kfree_skb(skb);
@@ -2185,36 +2199,38 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                memset(skb->data, 0, GMAC_TXPAL_LEN);
        }
 
-       /* Set up checksumming */
-       if (CHECKSUM_PARTIAL == skb->ip_summed) {
+       /* Add TxFCB if required */
+       if (fcb_len) {
                fcb = gfar_add_fcb(skb);
-               /* as specified by errata */
-               if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
-                            ((unsigned long)fcb % 0x20) > 0x18)) {
+               lstatus |= BD_LFLAG(TXBD_TOE);
+       }
+
+       /* Set up checksumming */
+       if (do_csum) {
+               gfar_tx_checksum(skb, fcb, fcb_len);
+
+               if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
+                   unlikely(gfar_csum_errata_76(priv, skb->len))) {
                        __skb_pull(skb, GMAC_FCB_LEN);
                        skb_checksum_help(skb);
-               } else {
-                       lstatus |= BD_LFLAG(TXBD_TOE);
-                       gfar_tx_checksum(skb, fcb, fcb_length);
+                       if (do_vlan || do_tstamp) {
+                               /* put back a new fcb for vlan/tstamp TOE */
+                               fcb = gfar_add_fcb(skb);
+                       } else {
+                               /* Tx TOE not used */
+                               lstatus &= ~(BD_LFLAG(TXBD_TOE));
+                               fcb = NULL;
+                       }
                }
        }
 
-       if (vlan_tx_tag_present(skb)) {
-               if (unlikely(NULL == fcb)) {
-                       fcb = gfar_add_fcb(skb);
-                       lstatus |= BD_LFLAG(TXBD_TOE);
-               }
-
+       if (do_vlan)
                gfar_tx_vlan(skb, fcb);
-       }
 
        /* Setup tx hardware time stamping if requested */
        if (unlikely(do_tstamp)) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-               if (fcb == NULL)
-                       fcb = gfar_add_fcb(skb);
                fcb->ptp = 1;
-               lstatus |= BD_LFLAG(TXBD_TOE);
        }
 
        txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
@@ -2226,9 +2242,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * the full frame length.
         */
        if (unlikely(do_tstamp)) {
-               txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
+               txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
                txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
-                                        (skb_headlen(skb) - fcb_length);
+                                        (skb_headlen(skb) - fcb_len);
                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
        } else {
                lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -3014,6 +3030,41 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
        return IRQ_HANDLED;
 }
 
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+       struct phy_device *phydev = priv->phydev;
+       u32 val = 0;
+
+       if (!phydev->duplex)
+               return val;
+
+       if (!priv->pause_aneg_en) {
+               if (priv->tx_pause_en)
+                       val |= MACCFG1_TX_FLOW;
+               if (priv->rx_pause_en)
+                       val |= MACCFG1_RX_FLOW;
+       } else {
+               u16 lcl_adv, rmt_adv;
+               u8 flowctrl;
+               /* get link partner capabilities */
+               rmt_adv = 0;
+               if (phydev->pause)
+                       rmt_adv = LPA_PAUSE_CAP;
+               if (phydev->asym_pause)
+                       rmt_adv |= LPA_PAUSE_ASYM;
+
+               lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+
+               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+               if (flowctrl & FLOW_CTRL_TX)
+                       val |= MACCFG1_TX_FLOW;
+               if (flowctrl & FLOW_CTRL_RX)
+                       val |= MACCFG1_RX_FLOW;
+       }
+
+       return val;
+}
+
 /* Called every time the controller might need to be made
  * aware of new link state.  The PHY code conveys this
  * information through variables in the phydev structure, and this
@@ -3032,6 +3083,7 @@ static void adjust_link(struct net_device *dev)
        lock_tx_qs(priv);
 
        if (phydev->link) {
+               u32 tempval1 = gfar_read(&regs->maccfg1);
                u32 tempval = gfar_read(&regs->maccfg2);
                u32 ecntrl = gfar_read(&regs->ecntrl);
 
@@ -3080,6 +3132,10 @@ static void adjust_link(struct net_device *dev)
                        priv->oldspeed = phydev->speed;
                }
 
+               tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+               tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+               gfar_write(&regs->maccfg1, tempval1);
                gfar_write(&regs->maccfg2, tempval);
                gfar_write(&regs->ecntrl, ecntrl);
 
index 04b552cd419d5f2d8b1546c8371b5ebbe13bf8ff..46f56f36118f8c34e8fa818dc27e0906c386dfcc 100644 (file)
@@ -146,6 +146,10 @@ extern const char gfar_driver_version[];
                | SUPPORTED_Autoneg \
                | SUPPORTED_MII)
 
+#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
+               | SUPPORTED_Pause \
+               | SUPPORTED_Asym_Pause)
+
 /* TBI register addresses */
 #define MII_TBICON             0x11
 
@@ -1009,7 +1013,6 @@ struct gfar_irqinfo {
  *     @napi: the napi poll function
  *     @priv: back pointer to the priv structure
  *     @regs: the ioremapped register space for this group
- *     @grp_id: group id for this group
  *     @irqinfo: TX/RX/ER irq data for this group
  */
 
@@ -1018,11 +1021,10 @@ struct gfar_priv_grp {
        struct  napi_struct napi;
        struct gfar_private *priv;
        struct gfar __iomem *regs;
-       unsigned int grp_id;
+       unsigned int rstat;
        unsigned long num_rx_queues;
        unsigned long rx_bit_map;
        /* cacheline 3 */
-       unsigned int rstat;
        unsigned int tstat;
        unsigned long num_tx_queues;
        unsigned long tx_bit_map;
@@ -1102,7 +1104,11 @@ struct gfar_private {
                /* Wake-on-LAN enabled */
                wol_en:1,
                /* Enable priorty based Tx scheduling in Hw */
-               prio_sched_en:1;
+               prio_sched_en:1,
+               /* Flow control flags */
+               pause_aneg_en:1,
+               tx_pause_en:1,
+               rx_pause_en:1;
 
        /* The total tx and rx ring size for the enabled queues */
        unsigned int total_tx_ring_size;
index 21cd88124ca96a2ad3342358531868175c958fc8..d3d7ede27ef14854f11b2f7c4a9648d34574dbeb 100644 (file)
@@ -535,6 +535,78 @@ static int gfar_sringparam(struct net_device *dev,
        return err;
 }
 
+static void gfar_gpauseparam(struct net_device *dev,
+                            struct ethtool_pauseparam *epause)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+
+       epause->autoneg = !!priv->pause_aneg_en;
+       epause->rx_pause = !!priv->rx_pause_en;
+       epause->tx_pause = !!priv->tx_pause_en;
+}
+
+static int gfar_spauseparam(struct net_device *dev,
+                           struct ethtool_pauseparam *epause)
+{
+       struct gfar_private *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 oldadv, newadv;
+
+       if (!(phydev->supported & SUPPORTED_Pause) ||
+           (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+            (epause->rx_pause != epause->tx_pause)))
+               return -EINVAL;
+
+       priv->rx_pause_en = priv->tx_pause_en = 0;
+       if (epause->rx_pause) {
+               priv->rx_pause_en = 1;
+
+               if (epause->tx_pause) {
+                       priv->tx_pause_en = 1;
+                       /* FLOW_CTRL_RX & TX */
+                       newadv = ADVERTISED_Pause;
+               } else  /* FLOW_CTLR_RX */
+                       newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+       } else if (epause->tx_pause) {
+               priv->tx_pause_en = 1;
+               /* FLOW_CTLR_TX */
+               newadv = ADVERTISED_Asym_Pause;
+       } else
+               newadv = 0;
+
+       if (epause->autoneg)
+               priv->pause_aneg_en = 1;
+       else
+               priv->pause_aneg_en = 0;
+
+       oldadv = phydev->advertising &
+               (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+       if (oldadv != newadv) {
+               phydev->advertising &=
+                       ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+               phydev->advertising |= newadv;
+               if (phydev->autoneg)
+                       /* inform link partner of our
+                        * new flow ctrl settings
+                        */
+                       return phy_start_aneg(phydev);
+
+               if (!epause->autoneg) {
+                       u32 tempval;
+                       tempval = gfar_read(&regs->maccfg1);
+                       tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+                       if (priv->tx_pause_en)
+                               tempval |= MACCFG1_TX_FLOW;
+                       if (priv->rx_pause_en)
+                               tempval |= MACCFG1_RX_FLOW;
+                       gfar_write(&regs->maccfg1, tempval);
+               }
+       }
+
+       return 0;
+}
+
 int gfar_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct gfar_private *priv = netdev_priv(dev);
@@ -1806,6 +1878,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
        .set_coalesce = gfar_scoalesce,
        .get_ringparam = gfar_gringparam,
        .set_ringparam = gfar_sringparam,
+       .get_pauseparam = gfar_gpauseparam,
+       .set_pauseparam = gfar_spauseparam,
        .get_strings = gfar_gstrings,
        .get_sset_count = gfar_sset_count,
        .get_ethtool_stats = gfar_fill_stats,
index 3c43dac894ecddde910e2ce8e784c455944af915..5930c39672db25eee560dabd16cf38d1ca54636b 100644 (file)
@@ -3911,14 +3911,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
 
 static int ucc_geth_remove(struct platform_device* ofdev)
 {
-       struct device *device = &ofdev->dev;
-       struct net_device *dev = dev_get_drvdata(device);
+       struct net_device *dev = platform_get_drvdata(ofdev);
        struct ucc_geth_private *ugeth = netdev_priv(dev);
 
        unregister_netdev(dev);
        free_netdev(dev);
        ucc_geth_memclean(ugeth);
-       dev_set_drvdata(device, NULL);
 
        return 0;
 }
index 93346f00486ba78ddf719a23de6ab71fd9058ea7..79aef681ac85db206d65f0a31944c7ee1a6a3374 100644 (file)
@@ -133,8 +133,8 @@ struct rfd_struct
   unsigned char  last;         /* Bit15,Last Frame on List / Bit14,suspend */
   unsigned short next;         /* linkoffset to next RFD */
   unsigned short rbd_offset;   /* pointeroffset to RBD-buffer */
-  unsigned char  dest[6];      /* ethernet-address, destination */
-  unsigned char  source[6];    /* ethernet-address, source */
+  unsigned char  dest[ETH_ALEN];       /* ethernet-address, destination */
+  unsigned char  source[ETH_ALEN];     /* ethernet-address, source */
   unsigned short length;       /* 802.3 frame-length */
   unsigned short zero_dummy;   /* dummy */
 };
index d300a0c0eafc0521a4e86688abd7e54afd0ce58e..2d3b064d6924a90ee4e9715615faf83e50a53512 100644 (file)
@@ -2955,8 +2955,6 @@ static int emac_remove(struct platform_device *ofdev)
 
        DBG(dev, "remove" NL);
 
-       dev_set_drvdata(&ofdev->dev, NULL);
-
        unregister_netdev(dev->ndev);
 
        cancel_work_sync(&dev->reset_work);
index 5115ae76a5d1c02f0788138e68fac4acdb551767..ada6e210279f3750e26ea5720ca3bbe5057abf2c 100644 (file)
@@ -1175,15 +1175,12 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
                config->rx_discard_short_frames = 0x0;  /* 1=discard, 0=save */
        }
 
-       netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
-                    "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
-                    c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
-       netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
-                    "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
-                    c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
-       netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
-                    "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
-                    c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+       netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
+                    c + 0);
+       netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
+                    c + 8);
+       netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
+                    c + 16);
        return 0;
 }
 
index 4c303e2a7cb3f3aae1ed974d6a066168c7e82c4a..104fcec86af323dc6faab94d4581acb02d3e8729 100644 (file)
@@ -2057,6 +2057,7 @@ const struct e1000_info e1000_82583_info = {
                                  | FLAG_HAS_JUMBO_FRAMES
                                  | FLAG_HAS_CTRLEXT_ON_LOAD,
        .flags2                 = FLAG2_DISABLE_ASPM_L0S
+                                 | FLAG2_DISABLE_ASPM_L1
                                  | FLAG2_NO_DISABLE_RX,
        .pba                    = 32,
        .max_hw_frame_size      = DEFAULT_JUMBO,
index ffbc08f56c40cd9f55c2753b74d294b5caa540b4..ad0edd11015d7b40a14d79f06afdab8ecec76cef 100644 (file)
@@ -90,9 +90,6 @@ struct e1000_info;
 
 #define E1000_MNG_VLAN_NONE            (-1)
 
-/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS                        (MAX_PS_BUFFERS - 1)
-
 #define DEFAULT_JUMBO                  9234
 
 /* Time to wait before putting the device into D3 if there's no link (in ms). */
index 59c22bf18701bf6505b13c0f16228a43694e2bb7..e4ebd7ddf5f2e42a7ea2e214022eabab8d95b114 100644 (file)
@@ -173,7 +173,7 @@ static int e1000_get_settings(struct net_device *netdev,
                        speed = adapter->link_speed;
                        ecmd->duplex = adapter->link_duplex - 1;
                }
-       } else {
+       } else if (!pm_runtime_suspended(netdev->dev.parent)) {
                u32 status = er32(STATUS);
                if (status & E1000_STATUS_LU) {
                        if (status & E1000_STATUS_SPEED_1000)
@@ -264,6 +264,9 @@ static int e1000_set_settings(struct net_device *netdev,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
+       int ret_val = 0;
+
+       pm_runtime_get_sync(netdev->dev.parent);
 
        /* When SoL/IDER sessions are active, autoneg/speed/duplex
         * cannot be changed
@@ -271,7 +274,8 @@ static int e1000_set_settings(struct net_device *netdev,
        if (hw->phy.ops.check_reset_block &&
            hw->phy.ops.check_reset_block(hw)) {
                e_err("Cannot change link characteristics when SoL/IDER is active.\n");
-               return -EINVAL;
+               ret_val = -EINVAL;
+               goto out;
        }
 
        /* MDI setting is only allowed when autoneg enabled because
@@ -279,13 +283,16 @@ static int e1000_set_settings(struct net_device *netdev,
         * duplex is forced.
         */
        if (ecmd->eth_tp_mdix_ctrl) {
-               if (hw->phy.media_type != e1000_media_type_copper)
-                       return -EOPNOTSUPP;
+               if (hw->phy.media_type != e1000_media_type_copper) {
+                       ret_val = -EOPNOTSUPP;
+                       goto out;
+               }
 
                if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
                    (ecmd->autoneg != AUTONEG_ENABLE)) {
                        e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
-                       return -EINVAL;
+                       ret_val = -EINVAL;
+                       goto out;
                }
        }
 
@@ -307,8 +314,8 @@ static int e1000_set_settings(struct net_device *netdev,
                u32 speed = ethtool_cmd_speed(ecmd);
                /* calling this overrides forced MDI setting */
                if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
-                       clear_bit(__E1000_RESETTING, &adapter->state);
-                       return -EINVAL;
+                       ret_val = -EINVAL;
+                       goto out;
                }
        }
 
@@ -331,8 +338,10 @@ static int e1000_set_settings(struct net_device *netdev,
                e1000e_reset(adapter);
        }
 
+out:
+       pm_runtime_put_sync(netdev->dev.parent);
        clear_bit(__E1000_RESETTING, &adapter->state);
-       return 0;
+       return ret_val;
 }
 
 static void e1000_get_pauseparam(struct net_device *netdev,
@@ -366,6 +375,8 @@ static int e1000_set_pauseparam(struct net_device *netdev,
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        if (adapter->fc_autoneg == AUTONEG_ENABLE) {
                hw->fc.requested_mode = e1000_fc_default;
                if (netif_running(adapter->netdev)) {
@@ -398,6 +409,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
        }
 
 out:
+       pm_runtime_put_sync(netdev->dev.parent);
        clear_bit(__E1000_RESETTING, &adapter->state);
        return retval;
 }
@@ -428,6 +440,8 @@ static void e1000_get_regs(struct net_device *netdev,
        u32 *regs_buff = p;
        u16 phy_data;
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        memset(p, 0, E1000_REGS_LEN * sizeof(u32));
 
        regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
@@ -472,6 +486,8 @@ static void e1000_get_regs(struct net_device *netdev,
        e1e_rphy(hw, MII_STAT1000, &phy_data);
        regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
        regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
+
+       pm_runtime_put_sync(netdev->dev.parent);
 }
 
 static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -504,6 +520,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
        if (!eeprom_buff)
                return -ENOMEM;
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        if (hw->nvm.type == e1000_nvm_eeprom_spi) {
                ret_val = e1000_read_nvm(hw, first_word,
                                         last_word - first_word + 1,
@@ -517,6 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
                }
        }
 
+       pm_runtime_put_sync(netdev->dev.parent);
+
        if (ret_val) {
                /* a read error occurred, throw away the result */
                memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -566,6 +586,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
 
        ptr = (void *)eeprom_buff;
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        if (eeprom->offset & 1) {
                /* need read/modify/write of first changed EEPROM word */
                /* only the second byte of the word is being modified */
@@ -606,6 +628,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
                ret_val = e1000e_update_nvm_checksum(hw);
 
 out:
+       pm_runtime_put_sync(netdev->dev.parent);
        kfree(eeprom_buff);
        return ret_val;
 }
@@ -701,6 +724,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
                }
        }
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        e1000e_down(adapter);
 
        /* We can't just free everything and then setup again, because the
@@ -739,6 +764,7 @@ err_setup_rx:
                e1000e_free_tx_resources(temp_tx);
 err_setup:
        e1000e_up(adapter);
+       pm_runtime_put_sync(netdev->dev.parent);
 free_temp:
        vfree(temp_tx);
        vfree(temp_rx);
@@ -1732,6 +1758,8 @@ static void e1000_diag_test(struct net_device *netdev,
        u8 autoneg;
        bool if_running = netif_running(netdev);
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        set_bit(__E1000_TESTING, &adapter->state);
 
        if (!if_running) {
@@ -1817,6 +1845,8 @@ static void e1000_diag_test(struct net_device *netdev,
        }
 
        msleep_interruptible(4 * 1000);
+
+       pm_runtime_put_sync(netdev->dev.parent);
 }
 
 static void e1000_get_wol(struct net_device *netdev,
@@ -1891,6 +1921,8 @@ static int e1000_set_phys_id(struct net_device *netdev,
 
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
+               pm_runtime_get_sync(netdev->dev.parent);
+
                if (!hw->mac.ops.blink_led)
                        return 2;       /* cycle on/off twice per second */
 
@@ -1902,6 +1934,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
                        e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
                hw->mac.ops.led_off(hw);
                hw->mac.ops.cleanup_led(hw);
+               pm_runtime_put_sync(netdev->dev.parent);
                break;
 
        case ETHTOOL_ID_ON:
@@ -1912,6 +1945,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
                hw->mac.ops.led_off(hw);
                break;
        }
+
        return 0;
 }
 
@@ -1950,11 +1984,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
                adapter->itr_setting = adapter->itr & ~3;
        }
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        if (adapter->itr_setting != 0)
                e1000e_write_itr(adapter, adapter->itr);
        else
                e1000e_write_itr(adapter, 0);
 
+       pm_runtime_put_sync(netdev->dev.parent);
+
        return 0;
 }
 
@@ -1968,7 +2006,9 @@ static int e1000_nway_reset(struct net_device *netdev)
        if (!adapter->hw.mac.autoneg)
                return -EINVAL;
 
+       pm_runtime_get_sync(netdev->dev.parent);
        e1000e_reinit_locked(adapter);
+       pm_runtime_put_sync(netdev->dev.parent);
 
        return 0;
 }
@@ -1982,7 +2022,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
        int i;
        char *p = NULL;
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        e1000e_get_stats64(netdev, &net_stats);
+
+       pm_runtime_put_sync(netdev->dev.parent);
+
        for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
                switch (e1000_gstrings_stats[i].type) {
                case NETDEV_STATS:
@@ -2033,7 +2078,11 @@ static int e1000_get_rxnfc(struct net_device *netdev,
        case ETHTOOL_GRXFH: {
                struct e1000_adapter *adapter = netdev_priv(netdev);
                struct e1000_hw *hw = &adapter->hw;
-               u32 mrqc = er32(MRQC);
+               u32 mrqc;
+
+               pm_runtime_get_sync(netdev->dev.parent);
+               mrqc = er32(MRQC);
+               pm_runtime_put_sync(netdev->dev.parent);
 
                if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
                        return 0;
@@ -2096,9 +2145,13 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
                return -EOPNOTSUPP;
        }
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        ret_val = hw->phy.ops.acquire(hw);
-       if (ret_val)
+       if (ret_val) {
+               pm_runtime_put_sync(netdev->dev.parent);
                return -EBUSY;
+       }
 
        /* EEE Capability */
        ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2117,14 +2170,11 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
 
        /* EEE PCS Status */
        ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
+       if (ret_val)
+               goto release;
        if (hw->phy.type == e1000_phy_82579)
                phy_data <<= 8;
 
-release:
-       hw->phy.ops.release(hw);
-       if (ret_val)
-               return -ENODATA;
-
        /* Result of the EEE auto negotiation - there is no register that
         * has the status of the EEE negotiation so do a best-guess based
         * on whether Tx or Rx LPI indications have been received.
@@ -2136,7 +2186,14 @@ release:
        edata->tx_lpi_enabled = true;
        edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
 
-       return 0;
+release:
+       hw->phy.ops.release(hw);
+       if (ret_val)
+               ret_val = -ENODATA;
+
+       pm_runtime_put_sync(netdev->dev.parent);
+
+       return ret_val;
 }
 
 static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
@@ -2169,12 +2226,16 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
 
        hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
 
+       pm_runtime_get_sync(netdev->dev.parent);
+
        /* reset the link */
        if (netif_running(netdev))
                e1000e_reinit_locked(adapter);
        else
                e1000e_reset(adapter);
 
+       pm_runtime_put_sync(netdev->dev.parent);
+
        return 0;
 }
 
@@ -2212,19 +2273,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
        return 0;
 }
 
-static int e1000e_ethtool_begin(struct net_device *netdev)
-{
-       return pm_runtime_get_sync(netdev->dev.parent);
-}
-
-static void e1000e_ethtool_complete(struct net_device *netdev)
-{
-       pm_runtime_put_sync(netdev->dev.parent);
-}
-
 static const struct ethtool_ops e1000_ethtool_ops = {
-       .begin                  = e1000e_ethtool_begin,
-       .complete               = e1000e_ethtool_complete,
        .get_settings           = e1000_get_settings,
        .set_settings           = e1000_set_settings,
        .get_drvinfo            = e1000_get_drvinfo,
index a6f903a9b7731cd3460d5337b98d3ae79a4d3661..b799fd9b6aa9e163f7a705bc6c6c2e35fc94ac8d 100644 (file)
@@ -90,6 +90,10 @@ struct e1000_hw;
 #define E1000_DEV_ID_PCH_LPT_I217_V            0x153B
 #define E1000_DEV_ID_PCH_LPTLP_I218_LM         0x155A
 #define E1000_DEV_ID_PCH_LPTLP_I218_V          0x1559
+#define E1000_DEV_ID_PCH_I218_LM2              0x15A0
+#define E1000_DEV_ID_PCH_I218_V2               0x15A1
+#define E1000_DEV_ID_PCH_I218_LM3              0x15A2  /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_I218_V3               0x15A3  /* Wildcat Point PCH */
 
 #define E1000_REVISION_4       4
 
@@ -227,6 +231,9 @@ union e1000_rx_desc_extended {
 };
 
 #define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS                        (MAX_PS_BUFFERS - 1)
 /* Receive Descriptor - Packet Split */
 union e1000_rx_desc_packet_split {
        struct {
@@ -251,7 +258,8 @@ union e1000_rx_desc_packet_split {
                } middle;
                struct {
                        __le16 header_status;
-                       __le16 length[3];       /* length of buffers 1-3 */
+                       /* length of buffers 1-3 */
+                       __le16 length[PS_PAGE_BUFFERS];
                } upper;
                __le64 reserved;
        } wb; /* writeback */
index 9dde390f7e71c34f327f1e1bc213dce5338b917c..af08188d7e624471ed3e8b87df7d39fd24ba3815 100644 (file)
@@ -185,6 +185,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
        u32 phy_id = 0;
        s32 ret_val;
        u16 retry_count;
+       u32 mac_reg = 0;
 
        for (retry_count = 0; retry_count < 2; retry_count++) {
                ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
@@ -203,11 +204,11 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 
        if (hw->phy.id) {
                if (hw->phy.id == phy_id)
-                       return true;
+                       goto out;
        } else if (phy_id) {
                hw->phy.id = phy_id;
                hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
-               return true;
+               goto out;
        }
 
        /* In case the PHY needs to be in mdio slow mode,
@@ -219,7 +220,22 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
                ret_val = e1000e_get_phy_id(hw);
        hw->phy.ops.acquire(hw);
 
-       return !ret_val;
+       if (ret_val)
+               return false;
+out:
+       if (hw->mac.type == e1000_pch_lpt) {
+               /* Unforce SMBus mode in PHY */
+               e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+               phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+               e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+
+               /* Unforce SMBus mode in MAC */
+               mac_reg = er32(CTRL_EXT);
+               mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+               ew32(CTRL_EXT, mac_reg);
+       }
+
+       return true;
 }
 
 /**
@@ -233,7 +249,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 {
        u32 mac_reg, fwsm = er32(FWSM);
        s32 ret_val;
-       u16 phy_reg;
 
        /* Gate automatic PHY configuration by hardware on managed and
         * non-managed 82579 and newer adapters.
@@ -262,22 +277,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
                mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
                ew32(CTRL_EXT, mac_reg);
 
+               /* Wait 50 milliseconds for MAC to finish any retries
+                * that it might be trying to perform from previous
+                * attempts to acknowledge any phy read requests.
+                */
+               msleep(50);
+
                /* fall-through */
        case e1000_pch2lan:
-               if (e1000_phy_is_accessible_pchlan(hw)) {
-                       if (hw->mac.type == e1000_pch_lpt) {
-                               /* Unforce SMBus mode in PHY */
-                               e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
-                               phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
-                               e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
-
-                               /* Unforce SMBus mode in MAC */
-                               mac_reg = er32(CTRL_EXT);
-                               mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
-                               ew32(CTRL_EXT, mac_reg);
-                       }
+               if (e1000_phy_is_accessible_pchlan(hw))
                        break;
-               }
 
                /* fall-through */
        case e1000_pchlan:
@@ -287,6 +296,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 
                if (hw->phy.ops.check_reset_block(hw)) {
                        e_dbg("Required LANPHYPC toggle blocked by ME\n");
+                       ret_val = -E1000_ERR_PHY;
                        break;
                }
 
@@ -298,15 +308,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
                mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
                ew32(FEXTNVM3, mac_reg);
 
-               if (hw->mac.type == e1000_pch_lpt) {
-                       /* Toggling LANPHYPC brings the PHY out of SMBus mode
-                        * So ensure that the MAC is also out of SMBus mode
-                        */
-                       mac_reg = er32(CTRL_EXT);
-                       mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
-                       ew32(CTRL_EXT, mac_reg);
-               }
-
                /* Toggle LANPHYPC Value bit */
                mac_reg = er32(CTRL);
                mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -325,6 +326,21 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
                                usleep_range(5000, 10000);
                        } while (!(er32(CTRL_EXT) &
                                   E1000_CTRL_EXT_LPCD) && count--);
+                       usleep_range(30000, 60000);
+                       if (e1000_phy_is_accessible_pchlan(hw))
+                               break;
+
+                       /* Toggling LANPHYPC brings the PHY out of SMBus mode
+                        * so ensure that the MAC is also out of SMBus mode
+                        */
+                       mac_reg = er32(CTRL_EXT);
+                       mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+                       ew32(CTRL_EXT, mac_reg);
+
+                       if (e1000_phy_is_accessible_pchlan(hw))
+                               break;
+
+                       ret_val = -E1000_ERR_PHY;
                }
                break;
        default:
@@ -332,13 +348,14 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
        }
 
        hw->phy.ops.release(hw);
-
-       /* Reset the PHY before any access to it.  Doing so, ensures
-        * that the PHY is in a known good state before we read/write
-        * PHY registers.  The generic reset is sufficient here,
-        * because we haven't determined the PHY type yet.
-        */
-       ret_val = e1000e_phy_hw_reset_generic(hw);
+       if (!ret_val) {
+               /* Reset the PHY before any access to it.  Doing so, ensures
+                * that the PHY is in a known good state before we read/write
+                * PHY registers.  The generic reset is sufficient here,
+                * because we haven't determined the PHY type yet.
+                */
+               ret_val = e1000e_phy_hw_reset_generic(hw);
+       }
 
 out:
        /* Ungate automatic PHY configuration on non-managed 82579 */
@@ -793,29 +810,31 @@ release:
  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
  *  preventing further DMA write requests.  Workaround the issue by disabling
  *  the de-assertion of the clock request when in 1Gpbs mode.
+ *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ *  speeds in order to avoid Tx hangs.
  **/
 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
 {
        u32 fextnvm6 = er32(FEXTNVM6);
+       u32 status = er32(STATUS);
        s32 ret_val = 0;
+       u16 reg;
 
-       if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
-               u16 kmrn_reg;
-
+       if (link && (status & E1000_STATUS_SPEED_1000)) {
                ret_val = hw->phy.ops.acquire(hw);
                if (ret_val)
                        return ret_val;
 
                ret_val =
                    e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
-                                               &kmrn_reg);
+                                               &reg);
                if (ret_val)
                        goto release;
 
                ret_val =
                    e1000e_write_kmrn_reg_locked(hw,
                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
-                                                kmrn_reg &
+                                                reg &
                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
                if (ret_val)
                        goto release;
@@ -827,12 +846,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
                ret_val =
                    e1000e_write_kmrn_reg_locked(hw,
                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
-                                                kmrn_reg);
+                                                reg);
 release:
                hw->phy.ops.release(hw);
        } else {
                /* clear FEXTNVM6 bit 8 on link down or 10/100 */
-               ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+               fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+               if (!link || ((status & E1000_STATUS_SPEED_100) &&
+                             (status & E1000_STATUS_FD)))
+                       goto update_fextnvm6;
+
+               ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
+               if (ret_val)
+                       return ret_val;
+
+               /* Clear link status transmit timeout */
+               reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+               if (status & E1000_STATUS_SPEED_100) {
+                       /* Set inband Tx timeout to 5x10us for 100Half */
+                       reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+                       /* Do not extend the K1 entry latency for 100Half */
+                       fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+               } else {
+                       /* Set inband Tx timeout to 50x10us for 10Full/Half */
+                       reg |= 50 <<
+                           I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+                       /* Extend the K1 entry latency for 10 Mbps */
+                       fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+               }
+
+               ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
+               if (ret_val)
+                       return ret_val;
+
+update_fextnvm6:
+               ew32(FEXTNVM6, fextnvm6);
        }
 
        return ret_val;
@@ -993,7 +1045,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
 
        /* Work-around I218 hang issue */
        if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
-           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
                ret_val = e1000_k1_workaround_lpt_lp(hw, link);
                if (ret_val)
                        return ret_val;
@@ -4168,7 +4222,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
                u16 phy_reg, device_id = hw->adapter->pdev->device;
 
                if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
-                   (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+                   (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+                   (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+                   (device_id == E1000_DEV_ID_PCH_I218_V3)) {
                        u32 fextnvm6 = er32(FEXTNVM6);
 
                        ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
index 80034a2b297c1c17f39f5f66d6dbacfab512e5eb..59865695b2826a388b721b2313253e25d86a7c94 100644 (file)
@@ -93,6 +93,7 @@
 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
 
 #define E1000_FEXTNVM6_REQ_PLL_CLK     0x00000100
+#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION       0x00000200
 
 #define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
 
 
 #define SW_FLAG_TIMEOUT                1000    /* SW Semaphore flag timeout in ms */
 
+/* Inband Control */
+#define I217_INBAND_CTRL                               PHY_REG(770, 18)
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK     0x3F00
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT    8
+
 /* PHY Low Power Idle Control */
 #define I82579_LPI_CTRL                                PHY_REG(772, 20)
 #define I82579_LPI_CTRL_100_ENABLE             0x2000
index 77f81cbb601a4b248f4018f76a3d6efbb94726bc..e6d2c0f8f76a8cf7f32fcbb9f0e7f8f126064bf4 100644 (file)
@@ -2979,17 +2979,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
        u32 pages = 0;
 
        /* Workaround Si errata on PCHx - configure jumbo frame flow */
-       if (hw->mac.type >= e1000_pch2lan) {
-               s32 ret_val;
-
-               if (adapter->netdev->mtu > ETH_DATA_LEN)
-                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
-               else
-                       ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
-
-               if (ret_val)
-                       e_dbg("failed to enable jumbo frame workaround mode\n");
-       }
+       if ((hw->mac.type >= e1000_pch2lan) &&
+           (adapter->netdev->mtu > ETH_DATA_LEN) &&
+           e1000_lv_jumbo_workaround_ich8lan(hw, true))
+               e_dbg("failed to enable jumbo frame workaround mode\n");
 
        /* Program MC offset vector base */
        rctl = er32(RCTL);
@@ -3826,6 +3819,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
                        break;
                }
 
+               pba = 14;
+               ew32(PBA, pba);
                fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
                fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
                break;
@@ -4034,6 +4029,12 @@ void e1000e_down(struct e1000_adapter *adapter)
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
 
+       /* Disable Si errata workaround on PCHx for jumbo frame flow */
+       if ((hw->mac.type >= e1000_pch2lan) &&
+           (adapter->netdev->mtu > ETH_DATA_LEN) &&
+           e1000_lv_jumbo_workaround_ich8lan(hw, false))
+               e_dbg("failed to disable jumbo frame workaround mode\n");
+
        if (!pci_channel_offline(adapter->pdev))
                e1000e_reset(adapter);
 
@@ -4683,11 +4684,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_phy_regs *phy = &adapter->phy_regs;
 
-       if ((er32(STATUS) & E1000_STATUS_LU) &&
+       if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
+           (er32(STATUS) & E1000_STATUS_LU) &&
            (adapter->hw.phy.media_type == e1000_media_type_copper)) {
                int ret_val;
 
-               pm_runtime_get_sync(&adapter->pdev->dev);
                ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
                ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
                ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4698,7 +4699,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
                ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
                if (ret_val)
                        e_warn("Error reading PHY register\n");
-               pm_runtime_put_sync(&adapter->pdev->dev);
        } else {
                /* Do not read PHY registers if link is not up
                 * Set values to typical power-on defaults
@@ -5995,6 +5995,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
         */
        e1000e_release_hw_control(adapter);
 
+       pci_clear_master(pdev);
+
        /* The pci-e switch on some quad port adapters will report a
         * correctable error when the MAC transitions from D0 to D3.  To
         * prevent this we need to mask off the correctable errors on the
@@ -6723,10 +6725,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->hw.fc.current_mode = e1000_fc_default;
        adapter->hw.phy.autoneg_advertised = 0x2f;
 
-       /* ring size defaults */
-       adapter->rx_ring->count = E1000_DEFAULT_RXD;
-       adapter->tx_ring->count = E1000_DEFAULT_TXD;
-
        /* Initial Wake on LAN setting - If APM wake is enabled in
         * the EEPROM, enable the ACPI Magic Packet filter
         */
@@ -6976,6 +6974,10 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
 
        { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
 };
index a6494e5daffe9d4b26171b570251b9a97d78991d..0ac6b11c6e4ec323aaa50db976e2a996b1b02d19 100644 (file)
@@ -618,9 +618,8 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7)
 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP         (u32)(1 << 8)
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_ENABLED                        (u32)(1 << 10)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED            (u32)(1 << 11)
-#define IXGBE_FLAG2_BRIDGE_MODE_VEB            (u32)(1 << 12)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED            (u32)(1 << 10)
+#define IXGBE_FLAG2_BRIDGE_MODE_VEB            (u32)(1 << 11)
 
        /* Tx fast path data */
        int num_tx_queues;
@@ -754,7 +753,7 @@ enum ixgbe_state_t {
        __IXGBE_DOWN,
        __IXGBE_SERVICE_SCHED,
        __IXGBE_IN_SFP_INIT,
-       __IXGBE_READ_I2C,
+       __IXGBE_PTP_RUNNING,
 };
 
 struct ixgbe_cb {
index 4a5bfb6b3af05b09e5d5895e0a2b82bf04e10d0b..a26f3fee4f359be56b4346b9a5d871efedc8ca1d 100644 (file)
@@ -1018,8 +1018,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
        u16 sfp_addr = 0;
        u16 sfp_data = 0;
        u16 sfp_stat = 0;
+       u16 gssr;
        u32 i;
 
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               gssr = IXGBE_GSSR_PHY1_SM;
+       else
+               gssr = IXGBE_GSSR_PHY0_SM;
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+               return IXGBE_ERR_SWFW_SYNC;
+
        if (hw->phy.type == ixgbe_phy_nl) {
                /*
                 * phy SDA/SCL registers are at addresses 0xC30A to
@@ -1028,17 +1037,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
                 */
                sfp_addr = (dev_addr << 8) + byte_offset;
                sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
-               hw->phy.ops.write_reg(hw,
-                                     IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
-                                     MDIO_MMD_PMAPMD,
-                                     sfp_addr);
+               hw->phy.ops.write_reg_mdi(hw,
+                                         IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+                                         MDIO_MMD_PMAPMD,
+                                         sfp_addr);
 
                /* Poll status */
                for (i = 0; i < 100; i++) {
-                       hw->phy.ops.read_reg(hw,
-                                            IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
-                                            MDIO_MMD_PMAPMD,
-                                            &sfp_stat);
+                       hw->phy.ops.read_reg_mdi(hw,
+                                               IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+                                               MDIO_MMD_PMAPMD,
+                                               &sfp_stat);
                        sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
                        if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
                                break;
@@ -1052,8 +1061,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
                }
 
                /* Read data */
-               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
-                                    MDIO_MMD_PMAPMD, &sfp_data);
+               hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+                                       MDIO_MMD_PMAPMD, &sfp_data);
 
                *eeprom_data = (u8)(sfp_data >> 8);
        } else {
@@ -1061,6 +1070,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
        }
 
 out:
+       hw->mac.ops.release_swfw_sync(hw, gssr);
        return status;
 }
 
@@ -1321,11 +1331,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
 
 static struct ixgbe_phy_operations phy_ops_82598 = {
        .identify               = &ixgbe_identify_phy_generic,
-       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
+       .identify_sfp           = &ixgbe_identify_module_generic,
        .init                   = &ixgbe_init_phy_ops_82598,
        .reset                  = &ixgbe_reset_phy_generic,
        .read_reg               = &ixgbe_read_phy_reg_generic,
        .write_reg              = &ixgbe_write_phy_reg_generic,
+       .read_reg_mdi           = &ixgbe_read_phy_reg_mdi,
+       .write_reg_mdi          = &ixgbe_write_phy_reg_mdi,
        .setup_link             = &ixgbe_setup_phy_link_generic,
        .setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
        .read_i2c_sff8472       = &ixgbe_read_i2c_sff8472_82598,
index 0b82d38bc97daf6e67c6cd7a0d65caafa70ccd11..207f68fbe3d3c3c01d9b842f06c377a891962a14 100644 (file)
@@ -58,6 +58,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
                                          ixgbe_link_speed speed,
                                          bool autoneg_wait_to_complete);
 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+                                    u8 dev_addr, u8 *data);
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+                                     u8 dev_addr, u8 data);
 
 static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
 {
@@ -219,6 +223,25 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
        struct ixgbe_mac_info *mac = &hw->mac;
        struct ixgbe_phy_info *phy = &hw->phy;
        s32 ret_val = 0;
+       u32 esdp;
+
+       if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+               /* Store flag indicating I2C bus access control unit. */
+               hw->phy.qsfp_shared_i2c_bus = true;
+
+               /* Initialize access to QSFP+ I2C bus */
+               esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               esdp |= IXGBE_ESDP_SDP0_DIR;
+               esdp &= ~IXGBE_ESDP_SDP1_DIR;
+               esdp &= ~IXGBE_ESDP_SDP0;
+               esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+               esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+               IXGBE_WRITE_FLUSH(hw);
+
+               phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
+               phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
+       }
 
        /* Identify the PHY or SFP module */
        ret_val = phy->ops.identify(hw);
@@ -397,6 +420,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_LS:
                media_type = ixgbe_media_type_fiber_lco;
                break;
+       case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+               media_type = ixgbe_media_type_fiber_qsfp;
+               break;
        default:
                media_type = ixgbe_media_type_unknown;
                break;
@@ -526,6 +552,75 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
        }
 }
 
+/**
+ *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ *  @hw: pointer to hardware structure
+ *  @speed: link speed to set
+ *
+ *  We set the module speed differently for fixed fiber.  For other
+ *  multi-speed devices we don't have an error value so here if we
+ *  detect an error we just log it and exit.
+ */
+static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
+                                       ixgbe_link_speed speed)
+{
+       s32 status;
+       u8 rs, eeprom_data;
+
+       switch (speed) {
+       case IXGBE_LINK_SPEED_10GB_FULL:
+               /* one bit mask same as setting on */
+               rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+               break;
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+               break;
+       default:
+               hw_dbg(hw, "Invalid fixed module speed\n");
+               return;
+       }
+
+       /* Set RS0 */
+       status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+                                          IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                          &eeprom_data);
+       if (status) {
+               hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
+               goto out;
+       }
+
+       eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+       status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+                                           IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                           eeprom_data);
+       if (status) {
+               hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
+               goto out;
+       }
+
+       /* Set RS1 */
+       status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+                                          IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                          &eeprom_data);
+       if (status) {
+               hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
+               goto out;
+       }
+
+       eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+       status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+                                           IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                           eeprom_data);
+       if (status) {
+               hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
+               goto out;
+       }
+out:
+       return;
+}
+
 /**
  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
  *  @hw: pointer to hardware structure
@@ -573,9 +668,14 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        goto out;
 
                /* Set the module link speed */
-               esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
-               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-               IXGBE_WRITE_FLUSH(hw);
+               if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
+                       ixgbe_set_fiber_fixed_speed(hw,
+                                       IXGBE_LINK_SPEED_10GB_FULL);
+               } else {
+                       esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+                       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+                       IXGBE_WRITE_FLUSH(hw);
+               }
 
                /* Allow module to change analog characteristics (1G->10G) */
                msleep(40);
@@ -625,10 +725,15 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        goto out;
 
                /* Set the module link speed */
-               esdp_reg &= ~IXGBE_ESDP_SDP5;
-               esdp_reg |= IXGBE_ESDP_SDP5_DIR;
-               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-               IXGBE_WRITE_FLUSH(hw);
+               if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
+                       ixgbe_set_fiber_fixed_speed(hw,
+                                               IXGBE_LINK_SPEED_1GB_FULL);
+               } else {
+                       esdp_reg &= ~IXGBE_ESDP_SDP5;
+                       esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+                       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+                       IXGBE_WRITE_FLUSH(hw);
+               }
 
                /* Allow module to change analog characteristics (10G->1G) */
                msleep(40);
@@ -1872,7 +1977,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
                if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
                        goto out;
                else
-                       status = ixgbe_identify_sfp_module_generic(hw);
+                       status = ixgbe_identify_module_generic(hw);
        }
 
        /* Set PHY type none if no PHY detected */
@@ -1978,10 +2083,12 @@ sfp_check:
        switch (hw->phy.type) {
        case ixgbe_phy_sfp_passive_tyco:
        case ixgbe_phy_sfp_passive_unknown:
+       case ixgbe_phy_qsfp_passive_unknown:
                physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
                break;
        case ixgbe_phy_sfp_ftl_active:
        case ixgbe_phy_sfp_active_unknown:
+       case ixgbe_phy_qsfp_active_unknown:
                physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
                break;
        case ixgbe_phy_sfp_avago:
@@ -1999,6 +2106,15 @@ sfp_check:
                else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
                        physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
                break;
+       case ixgbe_phy_qsfp_intel:
+       case ixgbe_phy_qsfp_unknown:
+               hw->phy.ops.read_i2c_eeprom(hw,
+                       IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+               if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+               else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+               break;
        default:
                break;
        }
@@ -2236,6 +2352,112 @@ reset_pipeline_out:
        return ret_val;
 }
 
+/**
+ *  ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+                                    u8 dev_addr, u8 *data)
+{
+       u32 esdp;
+       s32 status;
+       s32 timeout = 200;
+
+       if (hw->phy.qsfp_shared_i2c_bus == true) {
+               /* Acquire I2C bus ownership. */
+               esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               esdp |= IXGBE_ESDP_SDP0;
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+               IXGBE_WRITE_FLUSH(hw);
+
+               while (timeout) {
+                       esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+                       if (esdp & IXGBE_ESDP_SDP1)
+                               break;
+
+                       usleep_range(5000, 10000);
+                       timeout--;
+               }
+
+               if (!timeout) {
+                       hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+                       status = IXGBE_ERR_I2C;
+                       goto release_i2c_access;
+               }
+       }
+
+       status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+       if (hw->phy.qsfp_shared_i2c_bus == true) {
+               /* Release I2C bus ownership. */
+               esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               esdp &= ~IXGBE_ESDP_SDP0;
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+               IXGBE_WRITE_FLUSH(hw);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+                                     u8 dev_addr, u8 data)
+{
+       u32 esdp;
+       s32 status;
+       s32 timeout = 200;
+
+       if (hw->phy.qsfp_shared_i2c_bus == true) {
+               /* Acquire I2C bus ownership. */
+               esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               esdp |= IXGBE_ESDP_SDP0;
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+               IXGBE_WRITE_FLUSH(hw);
+
+               while (timeout) {
+                       esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+                       if (esdp & IXGBE_ESDP_SDP1)
+                               break;
+
+                       usleep_range(5000, 10000);
+                       timeout--;
+               }
+
+               if (!timeout) {
+                       hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+                       status = IXGBE_ERR_I2C;
+                       goto release_i2c_access;
+               }
+       }
+
+       status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+       if (hw->phy.qsfp_shared_i2c_bus == true) {
+               /* Release I2C bus ownership. */
+               esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               esdp &= ~IXGBE_ESDP_SDP0;
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+               IXGBE_WRITE_FLUSH(hw);
+       }
+
+       return status;
+}
+
 static struct ixgbe_mac_operations mac_ops_82599 = {
        .init_hw                = &ixgbe_init_hw_generic,
        .reset_hw               = &ixgbe_reset_hw_82599,
@@ -2300,7 +2522,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
 
 static struct ixgbe_phy_operations phy_ops_82599 = {
        .identify               = &ixgbe_identify_phy_82599,
-       .identify_sfp           = &ixgbe_identify_sfp_module_generic,
+       .identify_sfp           = &ixgbe_identify_module_generic,
        .init                   = &ixgbe_init_phy_ops_82599,
        .reset                  = &ixgbe_reset_phy_generic,
        .read_reg               = &ixgbe_read_phy_reg_generic,
index 9bcdeb89af5a0a532689cb7b91b2b3b64fca380a..50e62a2b1a65f905709c9ffffdff66fa8273a296 100644 (file)
@@ -65,17 +65,42 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
  *  function check the device id to see if the associated phy supports
  *  autoneg flow control.
  **/
-s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
 {
+       bool supported = false;
+       ixgbe_link_speed speed;
+       bool link_up;
 
-       switch (hw->device_id) {
-       case IXGBE_DEV_ID_X540T:
-       case IXGBE_DEV_ID_X540T1:
-       case IXGBE_DEV_ID_82599_T3_LOM:
-               return 0;
+       switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber_fixed:
+       case ixgbe_media_type_fiber:
+               hw->mac.ops.check_link(hw, &speed, &link_up, false);
+               /* if link is down, assume supported */
+               if (link_up)
+                       supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+                               true : false;
+               else
+                       supported = true;
+               break;
+       case ixgbe_media_type_backplane:
+               supported = true;
+               break;
+       case ixgbe_media_type_copper:
+               /* only some copper devices support flow control autoneg */
+               switch (hw->device_id) {
+               case IXGBE_DEV_ID_82599_T3_LOM:
+               case IXGBE_DEV_ID_X540T:
+               case IXGBE_DEV_ID_X540T1:
+                       supported = true;
+                       break;
+               default:
+                       break;
+               }
        default:
-               return IXGBE_ERR_FC_NOT_SUPPORTED;
+               break;
        }
+
+       return supported;
 }
 
 /**
@@ -114,6 +139,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
         * we link at 10G, the 1G advertisement is harmless and vice versa.
         */
        switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber:
        case ixgbe_media_type_backplane:
                reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -234,7 +260,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
                                                      IXGBE_GSSR_MAC_CSR_SM);
 
        } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-                   (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+                   ixgbe_device_supports_autoneg_fc(hw)) {
                hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
                                      MDIO_MMD_AN, reg_cu);
        }
@@ -2380,6 +2406,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 
        switch (hw->phy.media_type) {
        /* Autoneg flow control on fiber adapters */
+       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber:
                if (speed == IXGBE_LINK_SPEED_1GB_FULL)
                        ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2392,7 +2419,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 
        /* Autoneg flow control on copper adapters */
        case ixgbe_media_type_copper:
-               if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+               if (ixgbe_device_supports_autoneg_fc(hw))
                        ret_val = ixgbe_fc_autoneg_copper(hw);
                break;
 
@@ -2479,42 +2506,39 @@ out:
  **/
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
 {
-       u32 gssr;
+       u32 gssr = 0;
        u32 swmask = mask;
        u32 fwmask = mask << 5;
-       s32 timeout = 200;
+       u32 timeout = 200;
+       u32 i;
 
-       while (timeout) {
+       for (i = 0; i < timeout; i++) {
                /*
-                * SW EEPROM semaphore bit is used for access to all
-                * SW_FW_SYNC/GSSR bits (not just EEPROM)
+                * SW NVM semaphore bit is used for access to all
+                * SW_FW_SYNC bits (not just NVM)
                 */
                if (ixgbe_get_eeprom_semaphore(hw))
                        return IXGBE_ERR_SWFW_SYNC;
 
                gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
-               if (!(gssr & (fwmask | swmask)))
-                       break;
-
-               /*
-                * Firmware currently using resource (fwmask) or other software
-                * thread currently using resource (swmask)
-                */
-               ixgbe_release_eeprom_semaphore(hw);
-               usleep_range(5000, 10000);
-               timeout--;
-       }
-
-       if (!timeout) {
-               hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
-               return IXGBE_ERR_SWFW_SYNC;
+               if (!(gssr & (fwmask | swmask))) {
+                       gssr |= swmask;
+                       IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+                       ixgbe_release_eeprom_semaphore(hw);
+                       return 0;
+               } else {
+                       /* Resource is currently in use by FW or SW */
+                       ixgbe_release_eeprom_semaphore(hw);
+                       usleep_range(5000, 10000);
+               }
        }
 
-       gssr |= swmask;
-       IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+       /* If time expired clear the bits holding the lock and retry */
+       if (gssr & (fwmask | swmask))
+               ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
 
-       ixgbe_release_eeprom_semaphore(hw);
-       return 0;
+       usleep_range(5000, 10000);
+       return IXGBE_ERR_SWFW_SYNC;
 }
 
 /**
index 22eee38868f1aa2801137e3b26eb2131efe9ec56..1315b8ac7f586f9ef05b346aa070e847ac481f2a 100644 (file)
@@ -80,7 +80,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
 void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
index 24e2e7aafda2d42201541ef9ee855ee26e1661cb..50c1e9b2fd806584b69e650d9bf551093f7445d8 100644 (file)
@@ -355,10 +355,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       if (hw->fc.disable_fc_autoneg)
-               pause->autoneg = 0;
-       else
+       if (ixgbe_device_supports_autoneg_fc(hw) &&
+           !hw->fc.disable_fc_autoneg)
                pause->autoneg = 1;
+       else
+               pause->autoneg = 0;
 
        if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
                pause->rx_pause = 1;
@@ -384,7 +385,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
 
        /* some devices do not support autoneg of link flow control */
        if ((pause->autoneg == AUTONEG_ENABLE) &&
-           (ixgbe_device_supports_autoneg_fc(hw) != 0))
+           !ixgbe_device_supports_autoneg_fc(hw))
                return -EINVAL;
 
        fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
@@ -1140,11 +1141,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
 #ifdef LL_EXTENDED_STATS
-                       sprintf(p, "tx_q_%u_napi_yield", i);
+                       sprintf(p, "tx_queue_%u_ll_napi_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_q_%u_misses", i);
+                       sprintf(p, "tx_queue_%u_ll_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_q_%u_cleaned", i);
+                       sprintf(p, "tx_queue_%u_ll_cleaned", i);
                        p += ETH_GSTRING_LEN;
 #endif /* LL_EXTENDED_STATS */
                }
@@ -1154,11 +1155,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
 #ifdef LL_EXTENDED_STATS
-                       sprintf(p, "rx_q_%u_ll_poll_yield", i);
+                       sprintf(p, "rx_queue_%u_ll_poll_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_q_%u_misses", i);
+                       sprintf(p, "rx_queue_%u_ll_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_q_%u_cleaned", i);
+                       sprintf(p, "rx_queue_%u_ll_cleaned", i);
                        p += ETH_GSTRING_LEN;
 #endif /* LL_EXTENDED_STATS */
                }
@@ -2909,33 +2910,21 @@ static int ixgbe_get_module_info(struct net_device *dev,
        struct ixgbe_hw *hw = &adapter->hw;
        u32 status;
        u8 sff8472_rev, addr_mode;
-       int ret_val = 0;
        bool page_swap = false;
 
-       /* avoid concurent i2c reads */
-       while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
-               msleep(100);
-
-       /* used by the service task */
-       set_bit(__IXGBE_READ_I2C, &adapter->state);
-
        /* Check whether we support SFF-8472 or not */
        status = hw->phy.ops.read_i2c_eeprom(hw,
                                             IXGBE_SFF_SFF_8472_COMP,
                                             &sff8472_rev);
-       if (status != 0) {
-               ret_val = -EIO;
-               goto err_out;
-       }
+       if (status != 0)
+               return -EIO;
 
        /* addressing mode is not supported */
        status = hw->phy.ops.read_i2c_eeprom(hw,
                                             IXGBE_SFF_SFF_8472_SWAP,
                                             &addr_mode);
-       if (status != 0) {
-               ret_val = -EIO;
-               goto err_out;
-       }
+       if (status != 0)
+               return -EIO;
 
        if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
                e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
@@ -2952,9 +2941,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
        }
 
-err_out:
-       clear_bit(__IXGBE_READ_I2C, &adapter->state);
-       return ret_val;
+       return 0;
 }
 
 static int ixgbe_get_module_eeprom(struct net_device *dev,
@@ -2968,48 +2955,25 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
        int i = 0;
        int ret_val = 0;
 
-       /* ixgbe_get_module_info is called before this function in all
-        * cases, so we do not need any checks we already do above,
-        * and can trust ee->len to be a known value.
-        */
+       if (ee->len == 0)
+               return -EINVAL;
+
+       for (i = ee->offset; i < ee->len; i++) {
+               /* I2C reads can take long time */
+               if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+                       return -EBUSY;
 
-       while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
-               msleep(100);
-       set_bit(__IXGBE_READ_I2C, &adapter->state);
+               if (i < ETH_MODULE_SFF_8079_LEN)
+                       status  = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+               else
+                       status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
 
-       /* Read the first block, SFF-8079 */
-       for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
-               status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
-               if (status != 0) {
-                       /* Error occured while reading module */
+               if (status != 0)
                        ret_val = -EIO;
-                       goto err_out;
-               }
-               data[i] = databyte;
-       }
 
-       /* If the second block is requested, check if SFF-8472 is supported. */
-       if (ee->len == ETH_MODULE_SFF_8472_LEN) {
-               if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
-                       return -EOPNOTSUPP;
-
-               /* Read the second block, SFF-8472 */
-               for (i = ETH_MODULE_SFF_8079_LEN;
-                    i < ETH_MODULE_SFF_8472_LEN; i++) {
-                       status = hw->phy.ops.read_i2c_sff8472(hw,
-                               i - ETH_MODULE_SFF_8079_LEN, &databyte);
-                       if (status != 0) {
-                               /* Error occured while reading module */
-                               ret_val = -EIO;
-                               goto err_out;
-                       }
-                       data[i] = databyte;
-               }
+               data[i - ee->offset] = databyte;
        }
 
-err_out:
-       clear_bit(__IXGBE_READ_I2C, &adapter->state);
-
        return ret_val;
 }
 
index be4b1fb3d0d2c38b62bcabdb2dc80a8332634cba..128d6b88532630693acc9f92ce99387140b2aec8 100644 (file)
@@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "3.13.10-k"
+#define DRV_VERSION "3.15.1-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2013 Intel Corporation.";
@@ -109,6 +109,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
@@ -195,6 +196,86 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
        return 0;
 }
 
+/**
+ * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
+ * @hw: hw specific details
+ *
+ * This function is used by probe to determine whether a device's PCI-Express
+ * bandwidth details should be gathered from the parent bus instead of from the
+ * device. Used to ensure that various locations all have the correct device ID
+ * checks.
+ */
+static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+{
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82599_SFP_SF_QP:
+       case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
+                                    int expected_gts)
+{
+       int max_gts = 0;
+       enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+       enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+       struct pci_dev *pdev;
+
+       /* determine whether to use the the parent device
+        */
+       if (ixgbe_pcie_from_parent(&adapter->hw))
+               pdev = adapter->pdev->bus->parent->self;
+       else
+               pdev = adapter->pdev;
+
+       if (pcie_get_minimum_link(pdev, &speed, &width) ||
+           speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+               e_dev_warn("Unable to determine PCI Express bandwidth.\n");
+               return;
+       }
+
+       switch (speed) {
+       case PCIE_SPEED_2_5GT:
+               /* 8b/10b encoding reduces max throughput by 20% */
+               max_gts = 2 * width;
+               break;
+       case PCIE_SPEED_5_0GT:
+               /* 8b/10b encoding reduces max throughput by 20% */
+               max_gts = 4 * width;
+               break;
+       case PCIE_SPEED_8_0GT:
+               /* 128b/130b encoding only reduces throughput by 1% */
+               max_gts = 8 * width;
+               break;
+       default:
+               e_dev_warn("Unable to determine PCI Express bandwidth.\n");
+               return;
+       }
+
+       e_dev_info("PCI Express bandwidth of %dGT/s available\n",
+                  max_gts);
+       e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
+                  (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+                   speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+                   speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+                   "Unknown"),
+                  width,
+                  (speed == PCIE_SPEED_2_5GT ? "20%" :
+                   speed == PCIE_SPEED_5_0GT ? "20%" :
+                   speed == PCIE_SPEED_8_0GT ? "N/a" :
+                   "Unknown"));
+
+       if (max_gts < expected_gts) {
+               e_dev_warn("This is not sufficient for optimal performance of this card.\n");
+               e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
+                       expected_gts);
+               e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
+       }
+}
+
 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
 {
        if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -3724,8 +3805,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
                vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
-               /* don't hardware filter vlans in promisc mode */
-               ixgbe_vlan_filter_disable(adapter);
+               /* Only disable hardware filter vlans in promiscuous mode
+                * if SR-IOV and VMDQ are disabled - otherwise ensure
+                * that hardware VLAN filters remain enabled.
+                */
+               if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
+                                       IXGBE_FLAG_SRIOV_ENABLED)))
+                       ixgbe_vlan_filter_disable(adapter);
+               else
+                       ixgbe_vlan_filter_enable(adapter);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
@@ -4352,7 +4440,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        if (hw->mac.san_mac_rar_index)
                hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
 
-       if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+       if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
                ixgbe_ptp_reset(adapter);
 }
 
@@ -4714,8 +4802,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        ixgbe_pbthresh_setup(adapter);
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
-       hw->fc.disable_fc_autoneg =
-               (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
+       hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
 
 #ifdef CONFIG_PCI_IOV
        /* assign number of SR-IOV VFs */
@@ -5681,7 +5768,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
 
        adapter->last_rx_ptp_check = jiffies;
 
-       if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+       if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
                ixgbe_ptp_start_cyclecounter(adapter);
 
        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
@@ -5727,7 +5814,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
        if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
                adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
 
-       if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+       if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
                ixgbe_ptp_start_cyclecounter(adapter);
 
        e_info(drv, "NIC Link is Down\n");
@@ -5826,10 +5913,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
            !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
                return;
 
-       /* concurent i2c reads are not supported */
-       if (test_bit(__IXGBE_READ_I2C, &adapter->state))
-               return;
-
        /* someone else is in init, wait until next service event */
        if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
                return;
@@ -6038,7 +6121,7 @@ static void ixgbe_service_task(struct work_struct *work)
        ixgbe_fdir_reinit_subtask(adapter);
        ixgbe_check_hang_subtask(adapter);
 
-       if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
+       if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
                ixgbe_ptp_overflow_check(adapter);
                ixgbe_ptp_rx_hang(adapter);
        }
@@ -7246,6 +7329,42 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
 };
 
+/**
+ * ixgbe_enumerate_functions - Get the number of ports this device has
+ * @adapter: adapter structure
+ *
+ * This function enumerates the phsyical functions co-located on a single slot,
+ * in order to determine how many ports a device has. This is most useful in
+ * determining the required GT/s of PCIe bandwidth necessary for optimal
+ * performance.
+ **/
+static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct list_head *entry;
+       int physfns = 0;
+
+       /* Some cards can not use the generic count PCIe functions method, and
+        * so must be hardcoded to the correct value.
+        */
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82599_SFP_SF_QP:
+       case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+               physfns = 4;
+               break;
+       default:
+               list_for_each(entry, &adapter->pdev->bus_list) {
+                       struct pci_dev *pdev =
+                               list_entry(entry, struct pci_dev, bus_list);
+                       /* don't count virtual functions */
+                       if (!pdev->is_virtfn)
+                               physfns++;
+               }
+       }
+
+       return physfns;
+}
+
 /**
  * ixgbe_wol_supported - Check whether device supports WoL
  * @hw: hw specific details
@@ -7328,7 +7447,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct ixgbe_hw *hw;
        const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
        static int cards_found;
-       int i, err, pci_using_dac;
+       int i, err, pci_using_dac, expected_gts;
        unsigned int indices = MAX_TX_QUEUES;
        u8 part_str[IXGBE_PBANUM_LENGTH];
 #ifdef IXGBE_FCOE
@@ -7617,7 +7736,7 @@ skip_sriov:
 
        /* pick up the PCI bus settings for reporting later */
        hw->mac.ops.get_bus_info(hw);
-       if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP)
+       if (ixgbe_pcie_from_parent(hw))
                ixgbe_get_parent_bus_info(adapter);
 
        /* print bus type/speed/width info */
@@ -7643,12 +7762,20 @@ skip_sriov:
                e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
                           hw->mac.type, hw->phy.type, part_str);
 
-       if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
-               e_dev_warn("PCI-Express bandwidth available for this card is "
-                          "not sufficient for optimal performance.\n");
-               e_dev_warn("For optimal performance a x8 PCI-Express slot "
-                          "is required.\n");
+       /* calculate the expected PCIe bandwidth required for optimal
+        * performance. Note that some older parts will never have enough
+        * bandwidth due to being older generation PCIe parts. We clamp these
+        * parts to ensure no warning is displayed if it can't be fixed.
+        */
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
+               break;
+       default:
+               expected_gts = ixgbe_enumerate_functions(adapter) * 10;
+               break;
        }
+       ixgbe_check_minimum_link(adapter, expected_gts);
 
        /* reset the hardware with the new settings */
        err = hw->mac.ops.start_hw(hw);
index e5691ccbce9dd09eac8caa20120531b5e3ad1a2a..369eef526bc19f3d9e15809e9821ef13ae46afe9 100644 (file)
@@ -203,8 +203,84 @@ out:
        return status;
 }
 
+/**
+ *  ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ *  the SWFW lock
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                      u16 *phy_data)
+{
+       u32 i, data, command;
+
+       /* Setup and write the address cycle command */
+       command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                  (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                  (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+       IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+       /* Check every 10 usec to see if the address cycle completed.
+        * The MDI Command bit will clear when the operation is
+        * complete
+        */
+       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+               udelay(10);
+
+               command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+               if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+                               break;
+       }
+
+
+       if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+               hw_dbg(hw, "PHY address command did not complete.\n");
+               return IXGBE_ERR_PHY;
+       }
+
+       /* Address cycle complete, setup and write the read
+        * command
+        */
+       command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                  (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                  (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+       IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+       /* Check every 10 usec to see if the address cycle
+        * completed. The MDI Command bit will clear when the
+        * operation is complete
+        */
+       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+               udelay(10);
+
+               command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+               if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+                       break;
+       }
+
+       if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+               hw_dbg(hw, "PHY read command didn't complete\n");
+               return IXGBE_ERR_PHY;
+       }
+
+       /* Read operation is complete.  Get the data
+        * from MSRWD
+        */
+       data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+       data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+       *phy_data = (u16)(data);
+
+       return 0;
+}
+
 /**
  *  ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ *  using the SWFW lock - this function is needed in most cases
  *  @hw: pointer to hardware structure
  *  @reg_addr: 32 bit address of PHY register to read
  *  @phy_data: Pointer to read data from PHY register
@@ -212,10 +288,7 @@ out:
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                                u32 device_type, u16 *phy_data)
 {
-       u32 command;
-       u32 i;
-       u32 data;
-       s32 status = 0;
+       s32 status;
        u16 gssr;
 
        if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -223,86 +296,93 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
        else
                gssr = IXGBE_GSSR_PHY0_SM;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
+               status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
+                                               phy_data);
+               hw->mac.ops.release_swfw_sync(hw, gssr);
+       } else {
                status = IXGBE_ERR_SWFW_SYNC;
+       }
 
-       if (status == 0) {
-               /* Setup and write the address cycle command */
-               command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-                          (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-                          (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-                          (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+       return status;
+}
 
-               IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+/**
+ *  ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ *  without SWFW lock
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 5 bit device type
+ *  @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+                               u32 device_type, u16 phy_data)
+{
+       u32 i, command;
 
-               /*
-                * Check every 10 usec to see if the address cycle completed.
-                * The MDI Command bit will clear when the operation is
-                * complete
-                */
-               for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-                       udelay(10);
+       /* Put the data in the MDI single read and write data register*/
+       IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
 
-                       command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+       /* Setup and write the address cycle command */
+       command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                  (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                  (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
 
-                       if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-                               break;
-               }
+       IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
-               if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-                       hw_dbg(hw, "PHY address command did not complete.\n");
-                       status = IXGBE_ERR_PHY;
-               }
+       /*
+        * Check every 10 usec to see if the address cycle completed.
+        * The MDI Command bit will clear when the operation is
+        * complete
+        */
+       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+               udelay(10);
 
-               if (status == 0) {
-                       /*
-                        * Address cycle complete, setup and write the read
-                        * command
-                        */
-                       command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-                                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-                                  (hw->phy.mdio.prtad <<
-                                   IXGBE_MSCA_PHY_ADDR_SHIFT) |
-                                  (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
-
-                       IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-                       /*
-                        * Check every 10 usec to see if the address cycle
-                        * completed. The MDI Command bit will clear when the
-                        * operation is complete
-                        */
-                       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-                               udelay(10);
-
-                               command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
-                               if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-                                       break;
-                       }
+               command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+               if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+                       break;
+       }
 
-                       if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-                               hw_dbg(hw, "PHY read command didn't complete\n");
-                               status = IXGBE_ERR_PHY;
-                       } else {
-                               /*
-                                * Read operation is complete.  Get the data
-                                * from MSRWD
-                                */
-                               data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
-                               data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
-                               *phy_data = (u16)(data);
-                       }
-               }
+       if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+               hw_dbg(hw, "PHY address cmd didn't complete\n");
+               return IXGBE_ERR_PHY;
+       }
 
-               hw->mac.ops.release_swfw_sync(hw, gssr);
+       /*
+        * Address cycle complete, setup and write the write
+        * command
+        */
+       command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                  (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                  (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+       IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+       /* Check every 10 usec to see if the address cycle
+        * completed. The MDI Command bit will clear when the
+        * operation is complete
+        */
+       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+               udelay(10);
+
+               command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+               if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+                       break;
        }
 
-       return status;
+       if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+               hw_dbg(hw, "PHY write cmd didn't complete\n");
+               return IXGBE_ERR_PHY;
+       }
+
+       return 0;
 }
 
 /**
  *  ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ *  using SWFW lock- this function is needed in most cases
  *  @hw: pointer to hardware structure
  *  @reg_addr: 32 bit PHY register to write
  *  @device_type: 5 bit device type
@@ -311,9 +391,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                                 u32 device_type, u16 phy_data)
 {
-       u32 command;
-       u32 i;
-       s32 status = 0;
+       s32 status;
        u16 gssr;
 
        if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -321,74 +399,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
        else
                gssr = IXGBE_GSSR_PHY0_SM;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
-               status = IXGBE_ERR_SWFW_SYNC;
-
-       if (status == 0) {
-               /* Put the data in the MDI single read and write data register*/
-               IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
-
-               /* Setup and write the address cycle command */
-               command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-                          (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-                          (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-                          (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
-               IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-               /*
-                * Check every 10 usec to see if the address cycle completed.
-                * The MDI Command bit will clear when the operation is
-                * complete
-                */
-               for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-                       udelay(10);
-
-                       command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
-                       if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-                               break;
-               }
-
-               if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-                       hw_dbg(hw, "PHY address cmd didn't complete\n");
-                       status = IXGBE_ERR_PHY;
-               }
-
-               if (status == 0) {
-                       /*
-                        * Address cycle complete, setup and write the write
-                        * command
-                        */
-                       command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-                                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-                                  (hw->phy.mdio.prtad <<
-                                   IXGBE_MSCA_PHY_ADDR_SHIFT) |
-                                  (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
-
-                       IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-                       /*
-                        * Check every 10 usec to see if the address cycle
-                        * completed. The MDI Command bit will clear when the
-                        * operation is complete
-                        */
-                       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-                               udelay(10);
-
-                               command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
-                               if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-                                       break;
-                       }
-
-                       if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-                               hw_dbg(hw, "PHY address cmd didn't complete\n");
-                               status = IXGBE_ERR_PHY;
-                       }
-               }
-
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
+               status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+                                                phy_data);
                hw->mac.ops.release_swfw_sync(hw, gssr);
+       } else {
+               status = IXGBE_ERR_SWFW_SYNC;
        }
 
        return status;
@@ -825,9 +841,35 @@ out:
 }
 
 /**
- *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ *  ixgbe_identify_module_generic - Identifies module type
  *  @hw: pointer to hardware structure
  *
+ *  Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+       switch (hw->mac.ops.get_media_type(hw)) {
+       case ixgbe_media_type_fiber:
+               status = ixgbe_identify_sfp_module_generic(hw);
+               break;
+       case ixgbe_media_type_fiber_qsfp:
+               status = ixgbe_identify_qsfp_module_generic(hw);
+               break;
+       default:
+               hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+               status = IXGBE_ERR_SFP_NOT_PRESENT;
+               break;
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ *  @hw: pointer to hardware structure
+*
  *  Searches for and identifies the SFP module and assigns appropriate PHY type.
  **/
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
@@ -1105,6 +1147,156 @@ err_read_i2c_eeprom:
        return IXGBE_ERR_SFP_NOT_PRESENT;
 }
 
+/**
+ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_adapter *adapter = hw->back;
+       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+       u32 vendor_oui = 0;
+       enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+       u8 identifier = 0;
+       u8 comp_codes_1g = 0;
+       u8 comp_codes_10g = 0;
+       u8 oui_bytes[3] = {0, 0, 0};
+       u16 enforce_sfp = 0;
+
+       if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+               hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+               status = IXGBE_ERR_SFP_NOT_PRESENT;
+               goto out;
+       }
+
+       status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+                                            &identifier);
+
+       if (status != 0)
+               goto err_read_i2c_eeprom;
+
+       if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+               hw->phy.type = ixgbe_phy_sfp_unsupported;
+               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+               goto out;
+       }
+
+       hw->phy.id = identifier;
+
+       /* LAN ID is needed for sfp_type determination */
+       hw->mac.ops.set_lan_id(hw);
+
+       status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
+                                            &comp_codes_10g);
+
+       if (status != 0)
+               goto err_read_i2c_eeprom;
+
+       if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+               hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
+               if (hw->bus.lan_id == 0)
+                       hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
+               else
+                       hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
+       } else if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) {
+               hw->phy.type = ixgbe_phy_qsfp_active_unknown;
+               if (hw->bus.lan_id == 0)
+                       hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core0;
+               else
+                       hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core1;
+       } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+                                    IXGBE_SFF_10GBASELR_CAPABLE)) {
+               if (hw->bus.lan_id == 0)
+                       hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
+               else
+                       hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
+       } else {
+               /* unsupported module type */
+               hw->phy.type = ixgbe_phy_sfp_unsupported;
+               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+               goto out;
+       }
+
+       if (hw->phy.sfp_type != stored_sfp_type)
+               hw->phy.sfp_setup_needed = true;
+
+       /* Determine if the QSFP+ PHY is dual speed or not. */
+       hw->phy.multispeed_fiber = false;
+       if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+            (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+           ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+            (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+               hw->phy.multispeed_fiber = true;
+
+       /* Determine PHY vendor for optical modules */
+       if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+                             IXGBE_SFF_10GBASELR_CAPABLE)) {
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                       IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+                                       &oui_bytes[0]);
+
+               if (status != 0)
+                       goto err_read_i2c_eeprom;
+
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                       IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+                                       &oui_bytes[1]);
+
+               if (status != 0)
+                       goto err_read_i2c_eeprom;
+
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                       IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+                                       &oui_bytes[2]);
+
+               if (status != 0)
+                       goto err_read_i2c_eeprom;
+
+               vendor_oui =
+                       ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+                        (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+                        (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+               if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
+                       hw->phy.type = ixgbe_phy_qsfp_intel;
+               else
+                       hw->phy.type = ixgbe_phy_qsfp_unknown;
+
+               hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+               if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+                       /* Make sure we're a supported PHY type */
+                       if (hw->phy.type == ixgbe_phy_qsfp_intel) {
+                               status = 0;
+                       } else {
+                               if (hw->allow_unsupported_sfp == true) {
+                                       e_warn(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+                                       status = 0;
+                               } else {
+                                       hw_dbg(hw,
+                                              "QSFP module not supported\n");
+                                       hw->phy.type =
+                                               ixgbe_phy_sfp_unsupported;
+                                       status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+                               }
+                       }
+               } else {
+                       status = 0;
+               }
+       }
+
+out:
+       return status;
+
+err_read_i2c_eeprom:
+       hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+       hw->phy.id = 0;
+       hw->phy.type = ixgbe_phy_unknown;
+
+       return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
 /**
  *  ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
  *  @hw: pointer to hardware structure
index 886a3431cf5bc36e3c38a22b5964fb4488b8ed66..138dadd7cf336e94ae84254653052d351cb3258b 100644 (file)
 #define IXGBE_I2C_EEPROM_DEV_ADDR2   0xA2
 
 /* EEPROM byte offsets */
-#define IXGBE_SFF_IDENTIFIER         0x0
-#define IXGBE_SFF_IDENTIFIER_SFP     0x3
-#define IXGBE_SFF_VENDOR_OUI_BYTE0   0x25
-#define IXGBE_SFF_VENDOR_OUI_BYTE1   0x26
-#define IXGBE_SFF_VENDOR_OUI_BYTE2   0x27
-#define IXGBE_SFF_1GBE_COMP_CODES    0x6
-#define IXGBE_SFF_10GBE_COMP_CODES   0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY   0x8
-#define IXGBE_SFF_CABLE_SPEC_COMP    0x3C
-#define IXGBE_SFF_SFF_8472_SWAP      0x5C
-#define IXGBE_SFF_SFF_8472_COMP      0x5E
+#define IXGBE_SFF_IDENTIFIER           0x0
+#define IXGBE_SFF_IDENTIFIER_SFP       0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0     0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1     0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2     0x27
+#define IXGBE_SFF_1GBE_COMP_CODES      0x6
+#define IXGBE_SFF_10GBE_COMP_CODES     0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY     0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP      0x3C
+#define IXGBE_SFF_SFF_8472_SWAP                0x5C
+#define IXGBE_SFF_SFF_8472_COMP                0x5E
+#define IXGBE_SFF_SFF_8472_OSCB                0x6E
+#define IXGBE_SFF_SFF_8472_ESCB                0x76
+#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0        0xA5
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1        0xA6
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2        0xA7
+#define IXGBE_SFF_QSFP_10GBE_COMP      0x83
+#define IXGBE_SFF_QSFP_1GBE_COMP       0x86
 
 /* Bitmasks */
 #define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
 #define IXGBE_SFF_1GBASET_CAPABLE            0x8
 #define IXGBE_SFF_10GBASESR_CAPABLE          0x10
 #define IXGBE_SFF_10GBASELR_CAPABLE          0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK  0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G   0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G    0x0
 #define IXGBE_SFF_ADDRESSING_MODE           0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE       0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE      0x8
 #define IXGBE_I2C_EEPROM_READ_MASK           0x100
 #define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -102,6 +115,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                                u32 device_type, u16 *phy_data);
 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                                 u32 device_type, u16 phy_data);
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+                          u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+                           u32 device_type, u16 phy_data);
 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
                                        ixgbe_link_speed speed,
@@ -121,7 +138,9 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
                                            u16 *firmware_version);
 
 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
                                         u16 *list_offset,
                                         u16 *data_offset);
index 331987d6815c91ef7f6296c1d3062821c3268a1b..5184e2a1a7d8249bc746ce6496e55f5789a0e949 100644 (file)
@@ -885,8 +885,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
 
        ixgbe_ptp_reset(adapter);
 
-       /* set the flag that PTP has been enabled */
-       adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+       /* enter the IXGBE_PTP_RUNNING state */
+       set_bit(__IXGBE_PTP_RUNNING, &adapter->state);
 
        return;
 }
@@ -899,10 +899,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
  */
 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
 {
-       /* stop the overflow check task */
-       adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
-                            IXGBE_FLAG2_PTP_PPS_ENABLED);
+       /* Leave the IXGBE_PTP_RUNNING state. */
+       if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
+               return;
 
+       /* stop the PPS signal */
+       adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
        ixgbe_ptp_setup_sdp(adapter);
 
        cancel_work_sync(&adapter->ptp_tx_work);
index 1e7d587c4e572f9efdd3f876b98e906cb3963561..73c8e73bb6e74754b36e2637c07ada9510e1fe4c 100644 (file)
@@ -173,39 +173,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
        ixgbe_disable_sriov(adapter);
 }
 
-static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
-{
-       struct pci_dev *pdev = adapter->pdev;
-       struct pci_dev *vfdev;
-       int dev_id;
-
-       switch (adapter->hw.mac.type) {
-       case ixgbe_mac_82599EB:
-               dev_id = IXGBE_DEV_ID_82599_VF;
-               break;
-       case ixgbe_mac_X540:
-               dev_id = IXGBE_DEV_ID_X540_VF;
-               break;
-       default:
-               return false;
-       }
-
-       /* loop through all the VFs to see if we own any that are assigned */
-       vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
-       while (vfdev) {
-               /* if we don't own it we don't care */
-               if (vfdev->is_virtfn && vfdev->physfn == pdev) {
-                       /* if it is assigned we cannot release it */
-                       if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
-                               return true;
-               }
-
-               vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
-       }
-
-       return false;
-}
-
 #endif /* #ifdef CONFIG_PCI_IOV */
 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 {
@@ -235,7 +202,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
         * without causing issues, so just leave the hardware
         * available but disabled
         */
-       if (ixgbe_vfs_are_assigned(adapter)) {
+       if (pci_vfs_assigned(adapter->pdev)) {
                e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
                return -EPERM;
        }
@@ -768,6 +735,29 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
        return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
 }
 
+static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
+{
+       u32 vlvf;
+       s32 regindex;
+
+       /* short cut the special case */
+       if (vlan == 0)
+               return 0;
+
+       /* Search for the vlan id in the VLVF entries */
+       for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+               if ((vlvf & VLAN_VID_MASK) == vlan)
+                       break;
+       }
+
+       /* Return a negative value if not found */
+       if (regindex >= IXGBE_VLVF_ENTRIES)
+               regindex = -1;
+
+       return regindex;
+}
+
 static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
                                 u32 *msgbuf, u32 vf)
 {
@@ -775,6 +765,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
        int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
        int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
        int err;
+       s32 reg_ndx;
+       u32 vlvf;
+       u32 bits;
        u8 tcs = netdev_get_num_tc(adapter->netdev);
 
        if (adapter->vfinfo[vf].pf_vlan || tcs) {
@@ -790,10 +783,50 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
        else if (adapter->vfinfo[vf].vlan_count)
                adapter->vfinfo[vf].vlan_count--;
 
+       /* in case of promiscuous mode any VLAN filter set for a VF must
+        * also have the PF pool added to it.
+        */
+       if (add && adapter->netdev->flags & IFF_PROMISC)
+               err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+
        err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
        if (!err && adapter->vfinfo[vf].spoofchk_enabled)
                hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
 
+       /* Go through all the checks to see if the VLAN filter should
+        * be wiped completely.
+        */
+       if (!add && adapter->netdev->flags & IFF_PROMISC) {
+               reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
+               if (reg_ndx < 0)
+                       goto out;
+               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
+               /* See if any other pools are set for this VLAN filter
+                * entry other than the PF.
+                */
+               if (VMDQ_P(0) < 32) {
+                       bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
+                       bits &= ~(1 << VMDQ_P(0));
+                       bits |= IXGBE_READ_REG(hw,
+                                              IXGBE_VLVFB(reg_ndx * 2) + 1);
+               } else {
+                       bits = IXGBE_READ_REG(hw,
+                                             IXGBE_VLVFB(reg_ndx * 2) + 1);
+                       bits &= ~(1 << (VMDQ_P(0) - 32));
+                       bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
+               }
+
+               /* If the filter was removed then ensure PF pool bit
+                * is cleared if the PF only added itself to the pool
+                * because the PF is in promiscuous mode.
+                */
+               if ((vlvf & VLAN_VID_MASK) == vid &&
+                   !test_bit(vid, adapter->active_vlans) && !bits)
+                       ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+       }
+
+out:
+
        return err;
 }
 
index 70c6aa3d3f959fc6f86fa032fc874376f714efe9..161ff18be77550bd71386b2d05fd5ec16ee7c4f3 100644 (file)
@@ -69,6 +69,7 @@
 #define IXGBE_DEV_ID_82599_LS            0x154F
 #define IXGBE_DEV_ID_X540T               0x1528
 #define IXGBE_DEV_ID_82599_SFP_SF_QP     0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP    0x1558
 #define IXGBE_DEV_ID_X540T1              0x1560
 
 /* VF Device IDs */
@@ -1520,9 +1521,11 @@ enum {
 #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
 #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
 #define IXGBE_ESDP_SDP0_DIR     0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR     0x00000200 /* SDP1 IO direction */
 #define IXGBE_ESDP_SDP4_DIR     0x00000004 /* SDP4 IO direction */
 #define IXGBE_ESDP_SDP5_DIR     0x00002000 /* SDP5 IO direction */
 #define IXGBE_ESDP_SDP0_NATIVE  0x00010000 /* SDP0 Native Function */
+#define IXGBE_ESDP_SDP1_NATIVE  0x00020000 /* SDP1 IO mode */
 
 /* LEDCTL Bit Masks */
 #define IXGBE_LED_IVRT_BASE      0x00000040
@@ -2582,6 +2585,10 @@ enum ixgbe_phy_type {
        ixgbe_phy_sfp_ftl_active,
        ixgbe_phy_sfp_unknown,
        ixgbe_phy_sfp_intel,
+       ixgbe_phy_qsfp_passive_unknown,
+       ixgbe_phy_qsfp_active_unknown,
+       ixgbe_phy_qsfp_intel,
+       ixgbe_phy_qsfp_unknown,
        ixgbe_phy_sfp_unsupported,
        ixgbe_phy_generic
 };
@@ -2622,6 +2629,8 @@ enum ixgbe_sfp_type {
 enum ixgbe_media_type {
        ixgbe_media_type_unknown = 0,
        ixgbe_media_type_fiber,
+       ixgbe_media_type_fiber_fixed,
+       ixgbe_media_type_fiber_qsfp,
        ixgbe_media_type_fiber_lco,
        ixgbe_media_type_copper,
        ixgbe_media_type_backplane,
@@ -2885,6 +2894,8 @@ struct ixgbe_phy_operations {
        s32 (*reset)(struct ixgbe_hw *);
        s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
        s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+       s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+       s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
        s32 (*setup_link)(struct ixgbe_hw *);
        s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
        s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
@@ -2953,6 +2964,7 @@ struct ixgbe_phy_info {
        bool                            smart_speed_active;
        bool                            multispeed_fiber;
        bool                            reset_if_overtemp;
+       bool                            qsfp_shared_i2c_bus;
 };
 
 #include "ixgbe_mbx.h"
index b017818bccae1a06bc0c85e88d29733f9a1365e6..2777c70c603b550676b77027fd96e7b9a79fbe6b 100644 (file)
 #define MVNETA_MAC_ADDR_HIGH                     0x2418
 #define MVNETA_SDMA_CONFIG                       0x241c
 #define      MVNETA_SDMA_BRST_SIZE_16            4
-#define      MVNETA_NO_DESC_SWAP                 0x0
 #define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
 #define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
 #define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
+#define      MVNETA_DESC_SWAP                    BIT(6)
 #define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
 #define MVNETA_PORT_STATUS                       0x2444
 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
@@ -264,8 +264,7 @@ struct mvneta_port {
  * layout of the transmit and reception DMA descriptors, and their
  * layout is therefore defined by the hardware design
  */
-struct mvneta_tx_desc {
-       u32  command;           /* Options used by HW for packet transmitting.*/
+
 #define MVNETA_TX_L3_OFF_SHIFT 0
 #define MVNETA_TX_IP_HLEN_SHIFT        8
 #define MVNETA_TX_L4_UDP       BIT(16)
@@ -280,15 +279,6 @@ struct mvneta_tx_desc {
 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
 #define MVNETA_TX_L4_CSUM_NOT  BIT(31)
 
-       u16  reserverd1;        /* csum_l4 (for future use)             */
-       u16  data_size;         /* Data size of transmitted packet in bytes */
-       u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
-       u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
-       u32  reserved3[4];      /* Reserved - (for future use)          */
-};
-
-struct mvneta_rx_desc {
-       u32  status;            /* Info about received packet           */
 #define MVNETA_RXD_ERR_CRC             0x0
 #define MVNETA_RXD_ERR_SUMMARY         BIT(16)
 #define MVNETA_RXD_ERR_OVERRUN         BIT(17)
@@ -299,16 +289,57 @@ struct mvneta_rx_desc {
 #define MVNETA_RXD_FIRST_LAST_DESC     (BIT(26) | BIT(27))
 #define MVNETA_RXD_L4_CSUM_OK          BIT(30)
 
+#if defined(__LITTLE_ENDIAN)
+struct mvneta_tx_desc {
+       u32  command;           /* Options used by HW for packet transmitting.*/
+       u16  reserverd1;        /* csum_l4 (for future use)             */
+       u16  data_size;         /* Data size of transmitted packet in bytes */
+       u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
+       u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
+       u32  reserved3[4];      /* Reserved - (for future use)          */
+};
+
+struct mvneta_rx_desc {
+       u32  status;            /* Info about received packet           */
        u16  reserved1;         /* pnc_info - (for future use, PnC)     */
        u16  data_size;         /* Size of received packet in bytes     */
+
        u32  buf_phys_addr;     /* Physical address of the buffer       */
        u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
+
        u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
        u16  reserved3;         /* prefetch_cmd, for future use         */
        u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
+
+       u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
+       u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
+};
+#else
+struct mvneta_tx_desc {
+       u16  data_size;         /* Data size of transmitted packet in bytes */
+       u16  reserverd1;        /* csum_l4 (for future use)             */
+       u32  command;           /* Options used by HW for packet transmitting.*/
+       u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
+       u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
+       u32  reserved3[4];      /* Reserved - (for future use)          */
+};
+
+struct mvneta_rx_desc {
+       u16  data_size;         /* Size of received packet in bytes     */
+       u16  reserved1;         /* pnc_info - (for future use, PnC)     */
+       u32  status;            /* Info about received packet           */
+
+       u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
+       u32  buf_phys_addr;     /* Physical address of the buffer       */
+
+       u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
+       u16  reserved3;         /* prefetch_cmd, for future use         */
+       u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
+
        u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
        u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 };
+#endif
 
 struct mvneta_tx_queue {
        /* Number of this TX queue, in the range 0-7 */
@@ -908,9 +939,11 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
        /* Default burst size */
        val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
        val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+       val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
 
-       val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
-               MVNETA_NO_DESC_SWAP);
+#if defined(__BIG_ENDIAN)
+       val |= MVNETA_DESC_SWAP;
+#endif
 
        /* Assign port SDMA configuration */
        mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
index 299d0184f983c95c1b68d043b1f5d28f0fe329f8..ea20182c6969245e53a3a9da563c76461555f7f5 100644 (file)
@@ -800,7 +800,16 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
                                    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 }
 
-int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+                    struct mlx4_vhcr *vhcr,
+                    struct mlx4_cmd_mailbox *inbox,
+                    struct mlx4_cmd_mailbox *outbox,
+                    struct mlx4_cmd_info *cmd)
+{
+       return -EPERM;
+}
+
+static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
                     struct mlx4_vhcr *vhcr,
                     struct mlx4_cmd_mailbox *inbox,
                     struct mlx4_cmd_mailbox *outbox,
@@ -1251,6 +1260,15 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = MLX4_CMD_UPDATE_QP_wrapper
        },
+       {
+               .opcode = MLX4_CMD_GET_OP_REQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
+       },
        {
                .opcode = MLX4_CMD_CONF_SPECIAL_QP,
                .has_inbox = false,
@@ -1526,7 +1544,7 @@ static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
        return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
 }
 
-int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
+static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
                                            int slave, int port)
 {
        struct mlx4_vport_oper_state *vp_oper;
index 9d4a1ea030d84a95055afbc07a08dd131d885f8d..b4881b6861590c16c55644fd2752abfcf569ad8e 100644 (file)
@@ -160,6 +160,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
                struct ieee_pfc *pfc)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_port_profile *prof = priv->prof;
        struct mlx4_en_dev *mdev = priv->mdev;
        int err;
 
@@ -169,15 +170,17 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
                        pfc->mbc,
                        pfc->delay);
 
-       priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
-       priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
+       prof->rx_pause = !pfc->pfc_en;
+       prof->tx_pause = !pfc->pfc_en;
+       prof->rx_ppp = pfc->pfc_en;
+       prof->tx_ppp = pfc->pfc_en;
 
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
                                    priv->rx_skb_size + ETH_FCS_LEN,
-                                   priv->prof->tx_pause,
-                                   priv->prof->tx_ppp,
-                                   priv->prof->rx_pause,
-                                   priv->prof->rx_ppp);
+                                   prof->tx_pause,
+                                   prof->tx_ppp,
+                                   prof->rx_pause,
+                                   prof->rx_ppp);
        if (err)
                en_err(priv, "Failed setting pause params\n");
 
index 7c492382da09937764e8c6bb9a835d1c05771021..0698c82d6ff1bf94598b4ed577b446289c4178be 100644 (file)
@@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
                       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
 }
 
+static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+                             struct mlx4_en_tx_ring *ring, int index,
+                             u8 owner)
+{
+       __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+       struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+       struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+       void *end = ring->buf + ring->buf_size;
+       __be32 *ptr = (__be32 *)tx_desc;
+       int i;
+
+       /* Optimize the common case when there are no wraparounds */
+       if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+               /* Stamp the freed descriptor */
+               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+                    i += STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += STAMP_DWORDS;
+               }
+       } else {
+               /* Stamp the freed descriptor */
+               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+                    i += STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += STAMP_DWORDS;
+                       if ((void *)ptr >= end) {
+                               ptr = ring->buf;
+                               stamp ^= cpu_to_be32(0x80000000);
+                       }
+               }
+       }
+}
+
 
 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                struct mlx4_en_tx_ring *ring,
@@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
        void *end = ring->buf + ring->buf_size;
        int frags = skb_shinfo(skb)->nr_frags;
        int i;
-       __be32 *ptr = (__be32 *)tx_desc;
-       __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
        struct skb_shared_hwtstamps hwts;
 
        if (timestamp) {
@@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                        skb_frag_size(frag), PCI_DMA_TODEVICE);
                        }
                }
-               /* Stamp the freed descriptor */
-               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
-                       *ptr = stamp;
-                       ptr += STAMP_DWORDS;
-               }
-
        } else {
                if (!tx_info->inl) {
                        if ((void *) data >= end) {
@@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                ++data;
                        }
                }
-               /* Stamp the freed descriptor */
-               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
-                       *ptr = stamp;
-                       ptr += STAMP_DWORDS;
-                       if ((void *) ptr >= end) {
-                               ptr = ring->buf;
-                               stamp ^= cpu_to_be32(0x80000000);
-                       }
-               }
-
        }
        dev_kfree_skb_any(skb);
        return tx_info->nr_txbb;
@@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
        struct mlx4_cqe *cqe;
        u16 index;
-       u16 new_index, ring_index;
+       u16 new_index, ring_index, stamp_index;
        u32 txbbs_skipped = 0;
+       u32 txbbs_stamp = 0;
        u32 cons_index = mcq->cons_index;
        int size = cq->size;
        u32 size_mask = ring->size_mask;
@@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        index = cons_index & size_mask;
        cqe = &buf[(index << factor) + factor];
        ring_index = ring->cons & size_mask;
+       stamp_index = ring_index;
 
        /* Process all completed CQEs */
        while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -345,6 +362,15 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
                 */
                rmb();
 
+               if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+                            MLX4_CQE_OPCODE_ERROR)) {
+                       struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
+
+                       en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
+                              cqe_err->vendor_err_syndrome,
+                              cqe_err->syndrome);
+               }
+
                /* Skip over last polled CQE */
                new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
 
@@ -359,6 +385,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
                                        priv, ring, ring_index,
                                        !!((ring->cons + txbbs_skipped) &
                                        ring->size), timestamp);
+
+                       mlx4_en_stamp_wqe(priv, ring, stamp_index,
+                                         !!((ring->cons + txbbs_stamp) &
+                                               ring->size));
+                       stamp_index = ring_index;
+                       txbbs_stamp = txbbs_skipped;
                        packets++;
                        bytes += ring->tx_info[ring_index].nr_bytes;
                } while (ring_index != new_index);
@@ -556,17 +588,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct device *ddev = priv->ddev;
        struct mlx4_en_tx_ring *ring;
        struct mlx4_en_tx_desc *tx_desc;
        struct mlx4_wqe_data_seg *data;
-       struct skb_frag_struct *frag;
        struct mlx4_en_tx_info *tx_info;
-       struct ethhdr *ethh;
        int tx_ind = 0;
        int nr_txbb;
        int desc_size;
        int real_size;
-       dma_addr_t dma;
        u32 index, bf_index;
        __be32 op_own;
        u16 vlan_tag = 0;
@@ -642,6 +672,61 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_info->skb = skb;
        tx_info->nr_txbb = nr_txbb;
 
+       if (lso_header_size)
+               data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
+                                                     DS_SIZE));
+       else
+               data = &tx_desc->data;
+
+       /* valid only for none inline segments */
+       tx_info->data_offset = (void *)data - (void *)tx_desc;
+
+       tx_info->linear = (lso_header_size < skb_headlen(skb) &&
+                          !is_inline(skb, NULL)) ? 1 : 0;
+
+       data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+       if (is_inline(skb, &fragptr)) {
+               tx_info->inl = 1;
+       } else {
+               /* Map fragments */
+               for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+                       struct skb_frag_struct *frag;
+                       dma_addr_t dma;
+
+                       frag = &skb_shinfo(skb)->frags[i];
+                       dma = skb_frag_dma_map(ddev, frag,
+                                              0, skb_frag_size(frag),
+                                              DMA_TO_DEVICE);
+                       if (dma_mapping_error(ddev, dma))
+                               goto tx_drop_unmap;
+
+                       data->addr = cpu_to_be64(dma);
+                       data->lkey = cpu_to_be32(mdev->mr.key);
+                       wmb();
+                       data->byte_count = cpu_to_be32(skb_frag_size(frag));
+                       --data;
+               }
+
+               /* Map linear part */
+               if (tx_info->linear) {
+                       u32 byte_count = skb_headlen(skb) - lso_header_size;
+                       dma_addr_t dma;
+
+                       dma = dma_map_single(ddev, skb->data +
+                                            lso_header_size, byte_count,
+                                            PCI_DMA_TODEVICE);
+                       if (dma_mapping_error(ddev, dma))
+                               goto tx_drop_unmap;
+
+                       data->addr = cpu_to_be64(dma);
+                       data->lkey = cpu_to_be32(mdev->mr.key);
+                       wmb();
+                       data->byte_count = cpu_to_be32(byte_count);
+               }
+               tx_info->inl = 0;
+       }
+
        /*
         * For timestamping add flag to skb_shinfo and
         * set flag for further reference
@@ -666,6 +751,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
+               struct ethhdr *ethh;
+
                /* Copy dst mac address to wqe. This allows loopback in eSwitch,
                 * so that VFs and PF can communicate with each other
                 */
@@ -688,8 +775,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                /* Copy headers;
                 * note that we already verified that it is linear */
                memcpy(tx_desc->lso.header, skb->data, lso_header_size);
-               data = ((void *) &tx_desc->lso +
-                       ALIGN(lso_header_size + 4, DS_SIZE));
 
                priv->port_stats.tso_packets++;
                i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
@@ -701,7 +786,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
                        ((ring->prod & ring->size) ?
                         cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
-               data = &tx_desc->data;
                tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
                ring->packets++;
 
@@ -710,38 +794,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
        AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
 
-
-       /* valid only for none inline segments */
-       tx_info->data_offset = (void *) data - (void *) tx_desc;
-
-       tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
-       data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
-
-       if (!is_inline(skb, &fragptr)) {
-               /* Map fragments */
-               for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
-                       frag = &skb_shinfo(skb)->frags[i];
-                       dma = skb_frag_dma_map(priv->ddev, frag,
-                                              0, skb_frag_size(frag),
-                                              DMA_TO_DEVICE);
-                       data->addr = cpu_to_be64(dma);
-                       data->lkey = cpu_to_be32(mdev->mr.key);
-                       wmb();
-                       data->byte_count = cpu_to_be32(skb_frag_size(frag));
-                       --data;
-               }
-
-               /* Map linear part */
-               if (tx_info->linear) {
-                       dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
-                                            skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
-                       data->addr = cpu_to_be64(dma);
-                       data->lkey = cpu_to_be32(mdev->mr.key);
-                       wmb();
-                       data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
-               }
-               tx_info->inl = 0;
-       } else {
+       if (tx_info->inl) {
                build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
                tx_info->inl = 1;
        }
@@ -781,6 +834,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
 
        return NETDEV_TX_OK;
 
+tx_drop_unmap:
+       en_err(priv, "DMA mapping error\n");
+
+       for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
+               data++;
+               dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
+                              be32_to_cpu(data->byte_count),
+                              PCI_DMA_TODEVICE);
+       }
+
 tx_drop:
        dev_kfree_skb_any(skb);
        priv->stats.tx_dropped++;
index 7e042869ef0cd2d9c5884147edd7b1c7d582b868..0416c5b3b35cc721c9e312e6b7b2ca0c0917cbf7 100644 (file)
@@ -79,6 +79,7 @@ enum {
                               (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
                               (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
                               (1ull << MLX4_EVENT_TYPE_CMD)                | \
+                              (1ull << MLX4_EVENT_TYPE_OP_REQUIRED)        | \
                               (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL)       | \
                               (1ull << MLX4_EVENT_TYPE_FLR_EVENT)          | \
                               (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
@@ -629,6 +630,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
                        break;
 
+               case MLX4_EVENT_TYPE_OP_REQUIRED:
+                       atomic_inc(&priv->opreq_count);
+                       /* FW commands can't be executed from interrupt context
+                        * working in deferred task
+                        */
+                       queue_work(mlx4_wq, &priv->opreq_task);
+                       break;
+
                case MLX4_EVENT_TYPE_COMM_CHANNEL:
                        if (!mlx4_is_master(dev)) {
                                mlx4_warn(dev, "Received comm channel event "
index 6fc6dabc78d542a3fbe9c6b470913e6744e5d196..0d63daa2f422e082d85fd627a892c9fb5f5c1945 100644 (file)
@@ -1696,3 +1696,107 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_write);
+
+enum {
+       ADD_TO_MCG = 0x26,
+};
+
+
+void mlx4_opreq_action(struct work_struct *work)
+{
+       struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
+                                             opreq_task);
+       struct mlx4_dev *dev = &priv->dev;
+       int num_tasks = atomic_read(&priv->opreq_count);
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mgm *mgm;
+       u32 *outbox;
+       u32 modifier;
+       u16 token;
+       u16 type_m;
+       u16 type;
+       int err;
+       u32 num_qps;
+       struct mlx4_qp qp;
+       int i;
+       u8 rem_mcg;
+       u8 prot;
+
+#define GET_OP_REQ_MODIFIER_OFFSET     0x08
+#define GET_OP_REQ_TOKEN_OFFSET                0x14
+#define GET_OP_REQ_TYPE_OFFSET         0x1a
+#define GET_OP_REQ_DATA_OFFSET         0x20
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
+               return;
+       }
+       outbox = mailbox->buf;
+
+       while (num_tasks) {
+               err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+                                  MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
+                                  MLX4_CMD_NATIVE);
+               if (err) {
+                       mlx4_err(dev, "Failed to retreive required operation: %d\n",
+                                err);
+                       return;
+               }
+               MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
+               MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
+               MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
+               type_m = type >> 12;
+               type &= 0xfff;
+
+               switch (type) {
+               case ADD_TO_MCG:
+                       if (dev->caps.steering_mode ==
+                           MLX4_STEERING_MODE_DEVICE_MANAGED) {
+                               mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
+                               err = EPERM;
+                               break;
+                       }
+                       mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
+                                                 GET_OP_REQ_DATA_OFFSET);
+                       num_qps = be32_to_cpu(mgm->members_count) &
+                                 MGM_QPN_MASK;
+                       rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
+                       prot = ((u8 *)(&mgm->members_count))[0] >> 6;
+
+                       for (i = 0; i < num_qps; i++) {
+                               qp.qpn = be32_to_cpu(mgm->qp[i]);
+                               if (rem_mcg)
+                                       err = mlx4_multicast_detach(dev, &qp,
+                                                                   mgm->gid,
+                                                                   prot, 0);
+                               else
+                                       err = mlx4_multicast_attach(dev, &qp,
+                                                                   mgm->gid,
+                                                                   mgm->gid[5]
+                                                                   , 0, prot,
+                                                                   NULL);
+                               if (err)
+                                       break;
+                       }
+                       break;
+               default:
+                       mlx4_warn(dev, "Bad type for required operation\n");
+                       err = EINVAL;
+                       break;
+               }
+               err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
+                              1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
+                              MLX4_CMD_NATIVE);
+               if (err) {
+                       mlx4_err(dev, "Failed to acknowledge required request: %d\n",
+                                err);
+                       goto out;
+               }
+               memset(outbox, 0, 0xffc);
+               num_tasks = atomic_dec_return(&priv->opreq_count);
+       }
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+}
index fdf41665a05931d3574a2dd7f3d53467d0e4d3f6..a0a368b7c93996724f49f8c5ece01ff3d5a86bc3 100644 (file)
@@ -220,5 +220,6 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
 int mlx4_NOP(struct mlx4_dev *dev);
 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
+void mlx4_opreq_action(struct work_struct *work);
 
 #endif /* MLX4_FW_H */
index 36be3208786a69ac5ae40ddd5111f79116db3f95..60c9f4f103fce1a2c7d815ccc303b7b94805068c 100644 (file)
@@ -1692,11 +1692,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                goto err_xrcd_table_free;
        }
 
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_init_mcg_table(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+                       goto err_mr_table_free;
+               }
+       }
+
        err = mlx4_init_eq_table(dev);
        if (err) {
                mlx4_err(dev, "Failed to initialize "
                         "event queue table, aborting.\n");
-               goto err_mr_table_free;
+               goto err_mcg_table_free;
        }
 
        err = mlx4_cmd_use_events(dev);
@@ -1746,19 +1754,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                goto err_srq_table_free;
        }
 
-       if (!mlx4_is_slave(dev)) {
-               err = mlx4_init_mcg_table(dev);
-               if (err) {
-                       mlx4_err(dev, "Failed to initialize "
-                                "multicast group table, aborting.\n");
-                       goto err_qp_table_free;
-               }
-       }
-
        err = mlx4_init_counters_table(dev);
        if (err && err != -ENOENT) {
                mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
-               goto err_mcg_table_free;
+               goto err_qp_table_free;
        }
 
        if (!mlx4_is_slave(dev)) {
@@ -1803,9 +1802,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
 err_counters_table_free:
        mlx4_cleanup_counters_table(dev);
 
-err_mcg_table_free:
-       mlx4_cleanup_mcg_table(dev);
-
 err_qp_table_free:
        mlx4_cleanup_qp_table(dev);
 
@@ -1821,6 +1817,10 @@ err_cmd_poll:
 err_eq_table_free:
        mlx4_cleanup_eq_table(dev);
 
+err_mcg_table_free:
+       if (!mlx4_is_slave(dev))
+               mlx4_cleanup_mcg_table(dev);
+
 err_mr_table_free:
        mlx4_cleanup_mr_table(dev);
 
@@ -2197,6 +2197,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                        }
                }
 
+               atomic_set(&priv->opreq_count, 0);
+               INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
+
                /*
                 * Now reset the HCA before we touch the PCI capabilities or
                 * attempt a firmware command, since a boot ROM may have left
@@ -2315,12 +2318,12 @@ err_port:
                mlx4_cleanup_port_info(&priv->port[port]);
 
        mlx4_cleanup_counters_table(dev);
-       mlx4_cleanup_mcg_table(dev);
        mlx4_cleanup_qp_table(dev);
        mlx4_cleanup_srq_table(dev);
        mlx4_cleanup_cq_table(dev);
        mlx4_cmd_use_polling(dev);
        mlx4_cleanup_eq_table(dev);
+       mlx4_cleanup_mcg_table(dev);
        mlx4_cleanup_mr_table(dev);
        mlx4_cleanup_xrcd_table(dev);
        mlx4_cleanup_pd_table(dev);
@@ -2403,12 +2406,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
                                                   RES_TR_FREE_SLAVES_ONLY);
 
                mlx4_cleanup_counters_table(dev);
-               mlx4_cleanup_mcg_table(dev);
                mlx4_cleanup_qp_table(dev);
                mlx4_cleanup_srq_table(dev);
                mlx4_cleanup_cq_table(dev);
                mlx4_cmd_use_polling(dev);
                mlx4_cleanup_eq_table(dev);
+               mlx4_cleanup_mcg_table(dev);
                mlx4_cleanup_mr_table(dev);
                mlx4_cleanup_xrcd_table(dev);
                mlx4_cleanup_pd_table(dev);
index f3e804f2a35f0bd2a9be0e32b7b577bed5518cfa..55f6245efb6cd250f2efda4e6941bdb81b1fd9f3 100644 (file)
 
 #include "mlx4.h"
 
-#define MGM_QPN_MASK       0x00FFFFFF
-#define MGM_BLCK_LB_BIT    30
-
 static const u8 zero_gid[16];  /* automatically initialized to 0 */
 
-struct mlx4_mgm {
-       __be32                  next_gid_index;
-       __be32                  members_count;
-       u32                     reserved[2];
-       u8                      gid[16];
-       __be32                  qp[MLX4_MAX_QP_PER_MGM];
-};
-
 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
 {
        return 1 << dev->oper_log_mgm_entry_size;
index 17d9277e33ef978f89828a83e44642986383e06a..348bb8c7d9a70bc58d1c51c4e7aca6ec495dcb8f 100644 (file)
@@ -554,6 +554,17 @@ struct mlx4_mfunc {
        struct mlx4_mfunc_master_ctx    master;
 };
 
+#define MGM_QPN_MASK       0x00FFFFFF
+#define MGM_BLCK_LB_BIT    30
+
+struct mlx4_mgm {
+       __be32                  next_gid_index;
+       __be32                  members_count;
+       u32                     reserved[2];
+       u8                      gid[16];
+       __be32                  qp[MLX4_MAX_QP_PER_MGM];
+};
+
 struct mlx4_cmd {
        struct pci_pool        *pool;
        void __iomem           *hcr;
@@ -802,6 +813,8 @@ struct mlx4_priv {
        u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
        __be64                  slave_node_guids[MLX4_MFUNC_MAX];
 
+       atomic_t                opreq_count;
+       struct work_struct      opreq_task;
 };
 
 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
index f984a89c27df1afa64a4cfbc10075479ccd76cf9..dd6876321116a0bbad26a5fbea55aca3731e024f 100644 (file)
@@ -1909,7 +1909,8 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
        int log_rq_stride = qpc->rq_size_stride & 7;
        int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
        int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
-       int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+       u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+       int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
        int sq_size;
        int rq_size;
        int total_pages;
index e393d998be89dd2128fccaef65efd36a92c09d26..94b3bd6fb5fd15a079fe3e47b18cffd6fde4e89d 100644 (file)
@@ -705,7 +705,8 @@ static void ks8842_rx_frame(struct net_device *netdev,
        ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
 }
 
-void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+static void ks8842_handle_rx(struct net_device *netdev,
+       struct ks8842_adapter *adapter)
 {
        u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
        netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
@@ -715,7 +716,8 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
        }
 }
 
-void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+static void ks8842_handle_tx(struct net_device *netdev,
+       struct ks8842_adapter *adapter)
 {
        u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
        netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
@@ -724,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
                netif_wake_queue(netdev);
 }
 
-void ks8842_handle_rx_overrun(struct net_device *netdev,
+static void ks8842_handle_rx_overrun(struct net_device *netdev,
        struct ks8842_adapter *adapter)
 {
        netdev_dbg(netdev, "%s: entry\n", __func__);
@@ -732,7 +734,7 @@ void ks8842_handle_rx_overrun(struct net_device *netdev,
        netdev->stats.rx_fifo_errors++;
 }
 
-void ks8842_tasklet(unsigned long arg)
+static void ks8842_tasklet(unsigned long arg)
 {
        struct net_device *netdev = (struct net_device *)arg;
        struct ks8842_adapter *adapter = netdev_priv(netdev);
index ac20098b542a37697f8e78989428653d6d29ba67..9f3f5dbe1d303352e6213db9d43aea6e30e6573a 100644 (file)
@@ -688,7 +688,7 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
 }
 
 
-void ks_enable_qmu(struct ks_net *ks)
+static void ks_enable_qmu(struct ks_net *ks)
 {
        u16 w;
 
diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig
new file mode 100644 (file)
index 0000000..1731e05
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# MOXART device configuration
+#
+
+config NET_VENDOR_MOXART
+       bool "MOXA ART devices"
+       default y
+       depends on (ARM && ARCH_MOXART)
+       ---help---
+         If you have a network (Ethernet) card belonging to this class, say Y
+         and read the Ethernet-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about MOXA ART devices. If you say Y, you will be asked
+         for your specific card in the following questions.
+
+if NET_VENDOR_MOXART
+
+config ARM_MOXART_ETHER
+       tristate "MOXART Ethernet support"
+       depends on ARM && ARCH_MOXART
+       select NET_CORE
+       ---help---
+         If you wish to compile a kernel for a hardware with MOXA ART SoC and
+         want to use the internal ethernet then you should answer Y to this.
+
+
+endif # NET_VENDOR_MOXART
diff --git a/drivers/net/ethernet/moxa/Makefile b/drivers/net/ethernet/moxa/Makefile
new file mode 100644 (file)
index 0000000..aa3c73e
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the MOXART network device drivers.
+#
+
+obj-$(CONFIG_ARM_MOXART_ETHER) += moxart_ether.o
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
new file mode 100644 (file)
index 0000000..6eee686
--- /dev/null
@@ -0,0 +1,560 @@
+/* MOXA ART Ethernet (RTL8201CP) driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/dma-mapping.h>
+
+#include "moxart_ether.h"
+
+static inline void moxart_emac_write(struct net_device *ndev,
+                                    unsigned int reg, unsigned long value)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       writel(value, priv->base + reg);
+}
+
+static void moxart_update_mac_address(struct net_device *ndev)
+{
+       moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
+                         ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
+       moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
+                         ((ndev->dev_addr[2] << 24) |
+                          (ndev->dev_addr[3] << 16) |
+                          (ndev->dev_addr[4] << 8) |
+                          (ndev->dev_addr[5])));
+}
+
+static int moxart_set_mac_address(struct net_device *ndev, void *addr)
+{
+       struct sockaddr *address = addr;
+
+       if (!is_valid_ether_addr(address->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+       moxart_update_mac_address(ndev);
+
+       return 0;
+}
+
+static void moxart_mac_free_memory(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < RX_DESC_NUM; i++)
+               dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
+                                priv->rx_buf_size, DMA_FROM_DEVICE);
+
+       if (priv->tx_desc_base)
+               dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
+                                 priv->tx_desc_base, priv->tx_base);
+
+       if (priv->rx_desc_base)
+               dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
+                                 priv->rx_desc_base, priv->rx_base);
+
+       kfree(priv->tx_buf_base);
+       kfree(priv->rx_buf_base);
+}
+
+static void moxart_mac_reset(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       writel(SW_RST, priv->base + REG_MAC_CTRL);
+       while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
+               mdelay(10);
+
+       writel(0, priv->base + REG_INTERRUPT_MASK);
+
+       priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
+}
+
+static void moxart_mac_enable(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
+       writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
+       writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
+
+       priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
+       writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+
+       priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
+       writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
+}
+
+static void moxart_mac_setup_desc_ring(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+       void __iomem *desc;
+       int i;
+
+       for (i = 0; i < TX_DESC_NUM; i++) {
+               desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
+               memset(desc, 0, TX_REG_DESC_SIZE);
+
+               priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
+       }
+       writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
+
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+
+       for (i = 0; i < RX_DESC_NUM; i++) {
+               desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
+               memset(desc, 0, RX_REG_DESC_SIZE);
+               writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+               writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
+                      desc + RX_REG_OFFSET_DESC1);
+
+               priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
+               priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+                                                    priv->rx_buf[i],
+                                                    priv->rx_buf_size,
+                                                    DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+                       netdev_err(ndev, "DMA mapping error\n");
+
+               writel(priv->rx_mapping[i],
+                      desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
+               writel(priv->rx_buf[i],
+                      desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
+       }
+       writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
+
+       priv->rx_head = 0;
+
+       /* reset the MAC controler TX/RX desciptor base address */
+       writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
+       writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
+}
+
+static int moxart_mac_open(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               return -EADDRNOTAVAIL;
+
+       napi_enable(&priv->napi);
+
+       moxart_mac_reset(ndev);
+       moxart_update_mac_address(ndev);
+       moxart_mac_setup_desc_ring(ndev);
+       moxart_mac_enable(ndev);
+       netif_start_queue(ndev);
+
+       netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
+                  __func__, readl(priv->base + REG_INTERRUPT_MASK),
+                  readl(priv->base + REG_MAC_CTRL));
+
+       return 0;
+}
+
+static int moxart_mac_stop(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       napi_disable(&priv->napi);
+
+       netif_stop_queue(ndev);
+
+       /* disable all interrupts */
+       writel(0, priv->base + REG_INTERRUPT_MASK);
+
+       /* disable all functions */
+       writel(0, priv->base + REG_MAC_CTRL);
+
+       return 0;
+}
+
+static int moxart_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct moxart_mac_priv_t *priv = container_of(napi,
+                                                     struct moxart_mac_priv_t,
+                                                     napi);
+       struct net_device *ndev = priv->ndev;
+       struct sk_buff *skb;
+       void __iomem *desc;
+       unsigned int desc0, len;
+       int rx_head = priv->rx_head;
+       int rx = 0;
+
+       while (1) {
+               desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
+               desc0 = readl(desc + RX_REG_OFFSET_DESC0);
+
+               if (desc0 & RX_DESC0_DMA_OWN)
+                       break;
+
+               if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
+                            RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
+                       net_dbg_ratelimited("packet error\n");
+                       priv->stats.rx_dropped++;
+                       priv->stats.rx_errors++;
+                       continue;
+               }
+
+               len = desc0 & RX_DESC0_FRAME_LEN_MASK;
+
+               if (len > RX_BUF_SIZE)
+                       len = RX_BUF_SIZE;
+
+               skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+               if (unlikely(!skb)) {
+                       net_dbg_ratelimited("build_skb failed\n");
+                       priv->stats.rx_dropped++;
+                       priv->stats.rx_errors++;
+               }
+
+               skb_put(skb, len);
+               skb->protocol = eth_type_trans(skb, ndev);
+               napi_gro_receive(&priv->napi, skb);
+               rx++;
+
+               ndev->last_rx = jiffies;
+               priv->stats.rx_packets++;
+               priv->stats.rx_bytes += len;
+               if (desc0 & RX_DESC0_MULTICAST)
+                       priv->stats.multicast++;
+
+               writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+
+               rx_head = RX_NEXT(rx_head);
+               priv->rx_head = rx_head;
+
+               if (rx >= budget)
+                       break;
+       }
+
+       if (rx < budget) {
+               napi_gro_flush(napi, false);
+               __napi_complete(napi);
+       }
+
+       priv->reg_imr |= RPKT_FINISH_M;
+       writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+
+       return rx;
+}
+
+static void moxart_tx_finished(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+       unsigned tx_head = priv->tx_head;
+       unsigned tx_tail = priv->tx_tail;
+
+       while (tx_tail != tx_head) {
+               dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+                                priv->tx_len[tx_tail], DMA_TO_DEVICE);
+
+               priv->stats.tx_packets++;
+               priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
+
+               dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
+               priv->tx_skb[tx_tail] = NULL;
+
+               tx_tail = TX_NEXT(tx_tail);
+       }
+       priv->tx_tail = tx_tail;
+}
+
+static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = (struct net_device *) dev_id;
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+       unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
+
+       if (ists & XPKT_OK_INT_STS)
+               moxart_tx_finished(ndev);
+
+       if (ists & RPKT_FINISH) {
+               if (napi_schedule_prep(&priv->napi)) {
+                       priv->reg_imr &= ~RPKT_FINISH_M;
+                       writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+                       __napi_schedule(&priv->napi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+       void __iomem *desc;
+       unsigned int len;
+       unsigned int tx_head = priv->tx_head;
+       u32 txdes1;
+       int ret = NETDEV_TX_BUSY;
+
+       desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
+
+       spin_lock_irq(&priv->txlock);
+       if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
+               net_dbg_ratelimited("no TX space for packet\n");
+               priv->stats.tx_dropped++;
+               goto out_unlock;
+       }
+
+       len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
+
+       priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+                                                  len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+               netdev_err(ndev, "DMA mapping error\n");
+               goto out_unlock;
+       }
+
+       priv->tx_len[tx_head] = len;
+       priv->tx_skb[tx_head] = skb;
+
+       writel(priv->tx_mapping[tx_head],
+              desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
+       writel(skb->data,
+              desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
+
+       if (skb->len < ETH_ZLEN) {
+               memset(&skb->data[skb->len],
+                      0, ETH_ZLEN - skb->len);
+               len = ETH_ZLEN;
+       }
+
+       txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
+       txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
+       txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
+       txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+       writel(txdes1, desc + TX_REG_OFFSET_DESC1);
+       writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
+
+       /* start to send packet */
+       writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
+
+       priv->tx_head = TX_NEXT(tx_head);
+
+       ndev->trans_start = jiffies;
+       ret = NETDEV_TX_OK;
+out_unlock:
+       spin_unlock_irq(&priv->txlock);
+
+       return ret;
+}
+
+static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       return &priv->stats;
+}
+
+static void moxart_mac_setmulticast(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+       struct netdev_hw_addr *ha;
+       int crc_val;
+
+       netdev_for_each_mc_addr(ha, ndev) {
+               crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
+               crc_val = (crc_val >> 26) & 0x3f;
+               if (crc_val >= 32) {
+                       writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
+                              (1UL << (crc_val - 32)),
+                              priv->base + REG_MCAST_HASH_TABLE1);
+               } else {
+                       writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
+                              (1UL << crc_val),
+                              priv->base + REG_MCAST_HASH_TABLE0);
+               }
+       }
+}
+
+static void moxart_mac_set_rx_mode(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       spin_lock_irq(&priv->txlock);
+
+       (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
+                                     (priv->reg_maccr &= ~RCV_ALL);
+
+       (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
+                                      (priv->reg_maccr &= ~RX_MULTIPKT);
+
+       if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
+               priv->reg_maccr |= HT_MULTI_EN;
+               moxart_mac_setmulticast(ndev);
+       } else {
+               priv->reg_maccr &= ~HT_MULTI_EN;
+       }
+
+       writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
+
+       spin_unlock_irq(&priv->txlock);
+}
+
+static struct net_device_ops moxart_netdev_ops = {
+       .ndo_open               = moxart_mac_open,
+       .ndo_stop               = moxart_mac_stop,
+       .ndo_start_xmit         = moxart_mac_start_xmit,
+       .ndo_get_stats          = moxart_mac_get_stats,
+       .ndo_set_rx_mode        = moxart_mac_set_rx_mode,
+       .ndo_set_mac_address    = moxart_set_mac_address,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = eth_change_mtu,
+};
+
+static int moxart_mac_probe(struct platform_device *pdev)
+{
+       struct device *p_dev = &pdev->dev;
+       struct device_node *node = p_dev->of_node;
+       struct net_device *ndev;
+       struct moxart_mac_priv_t *priv;
+       struct resource *res;
+       unsigned int irq;
+       int ret;
+
+       ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
+       if (!ndev)
+               return -ENOMEM;
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (irq <= 0) {
+               netdev_err(ndev, "irq_of_parse_and_map failed\n");
+               return -EINVAL;
+       }
+
+       priv = netdev_priv(ndev);
+       priv->ndev = ndev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ndev->base_addr = res->start;
+       priv->base = devm_ioremap_resource(p_dev, res);
+       ret = IS_ERR(priv->base);
+       if (ret) {
+               dev_err(p_dev, "devm_ioremap_resource failed\n");
+               goto init_fail;
+       }
+
+       spin_lock_init(&priv->txlock);
+
+       priv->tx_buf_size = TX_BUF_SIZE;
+       priv->rx_buf_size = RX_BUF_SIZE +
+                           SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
+                                               TX_DESC_NUM, &priv->tx_base,
+                                               GFP_DMA | GFP_KERNEL);
+       if (priv->tx_desc_base == NULL)
+               goto init_fail;
+
+       priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
+                                               RX_DESC_NUM, &priv->rx_base,
+                                               GFP_DMA | GFP_KERNEL);
+       if (priv->rx_desc_base == NULL)
+               goto init_fail;
+
+       priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
+                                   GFP_ATOMIC);
+       if (!priv->tx_buf_base)
+               goto init_fail;
+
+       priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
+                                   GFP_ATOMIC);
+       if (!priv->rx_buf_base)
+               goto init_fail;
+
+       platform_set_drvdata(pdev, ndev);
+
+       ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
+                              pdev->name, ndev);
+       if (ret) {
+               netdev_err(ndev, "devm_request_irq failed\n");
+               goto init_fail;
+       }
+
+       ether_setup(ndev);
+       ndev->netdev_ops = &moxart_netdev_ops;
+       netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+       ndev->irq = irq;
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               free_netdev(ndev);
+               goto init_fail;
+       }
+
+       netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
+                  __func__, ndev->irq, ndev->dev_addr);
+
+       return 0;
+
+init_fail:
+       netdev_err(ndev, "init failed\n");
+       moxart_mac_free_memory(ndev);
+
+       return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+
+       unregister_netdev(ndev);
+       free_irq(ndev->irq, ndev);
+       moxart_mac_free_memory(ndev);
+       platform_set_drvdata(pdev, NULL);
+       free_netdev(ndev);
+
+       return 0;
+}
+
+static const struct of_device_id moxart_mac_match[] = {
+       { .compatible = "moxa,moxart-mac" },
+       { }
+};
+
+struct __initdata platform_driver moxart_mac_driver = {
+       .probe  = moxart_mac_probe,
+       .remove = moxart_remove,
+       .driver = {
+               .name           = "moxart-ethernet",
+               .owner          = THIS_MODULE,
+               .of_match_table = moxart_mac_match,
+       },
+};
+module_platform_driver(moxart_mac_driver);
+
+MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
new file mode 100644 (file)
index 0000000..2be9280
--- /dev/null
@@ -0,0 +1,330 @@
+/* MOXA ART Ethernet (RTL8201CP) driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _MOXART_ETHERNET_H
+#define _MOXART_ETHERNET_H
+
+#define TX_REG_OFFSET_DESC0    0
+#define TX_REG_OFFSET_DESC1    4
+#define TX_REG_OFFSET_DESC2    8
+#define TX_REG_DESC_SIZE       16
+
+#define RX_REG_OFFSET_DESC0    0
+#define RX_REG_OFFSET_DESC1    4
+#define RX_REG_OFFSET_DESC2    8
+#define RX_REG_DESC_SIZE       16
+
+#define TX_DESC0_PKT_LATE_COL  0x1             /* abort, late collision */
+#define TX_DESC0_RX_PKT_EXS_COL        0x2             /* abort, >16 collisions */
+#define TX_DESC0_DMA_OWN       0x80000000      /* owned by controller */
+#define TX_DESC1_BUF_SIZE_MASK 0x7ff
+#define TX_DESC1_LTS           0x8000000       /* last TX packet */
+#define TX_DESC1_FTS           0x10000000      /* first TX packet */
+#define TX_DESC1_FIFO_COMPLETE 0x20000000
+#define TX_DESC1_INTR_COMPLETE 0x40000000
+#define TX_DESC1_END           0x80000000
+#define TX_DESC2_ADDRESS_PHYS  0
+#define TX_DESC2_ADDRESS_VIRT  4
+
+#define RX_DESC0_FRAME_LEN     0
+#define RX_DESC0_FRAME_LEN_MASK        0x7FF
+#define RX_DESC0_MULTICAST     0x10000
+#define RX_DESC0_BROADCAST     0x20000
+#define RX_DESC0_ERR           0x40000
+#define RX_DESC0_CRC_ERR       0x80000
+#define RX_DESC0_FTL           0x100000
+#define RX_DESC0_RUNT          0x200000        /* packet less than 64 bytes */
+#define RX_DESC0_ODD_NB                0x400000        /* receive odd nibbles */
+#define RX_DESC0_LRS           0x10000000      /* last receive segment */
+#define RX_DESC0_FRS           0x20000000      /* first receive segment */
+#define RX_DESC0_DMA_OWN       0x80000000
+#define RX_DESC1_BUF_SIZE_MASK 0x7FF
+#define RX_DESC1_END           0x80000000
+#define RX_DESC2_ADDRESS_PHYS  0
+#define RX_DESC2_ADDRESS_VIRT  4
+
+#define TX_DESC_NUM            64
+#define TX_DESC_NUM_MASK       (TX_DESC_NUM-1)
+#define TX_NEXT(N)             (((N) + 1) & (TX_DESC_NUM_MASK))
+#define TX_BUF_SIZE            1600
+#define TX_BUF_SIZE_MAX                (TX_DESC1_BUF_SIZE_MASK+1)
+
+#define RX_DESC_NUM            64
+#define RX_DESC_NUM_MASK       (RX_DESC_NUM-1)
+#define RX_NEXT(N)             (((N) + 1) & (RX_DESC_NUM_MASK))
+#define RX_BUF_SIZE            1600
+#define RX_BUF_SIZE_MAX                (RX_DESC1_BUF_SIZE_MASK+1)
+
+#define REG_INTERRUPT_STATUS   0
+#define REG_INTERRUPT_MASK     4
+#define REG_MAC_MS_ADDRESS     8
+#define REG_MAC_LS_ADDRESS     12
+#define REG_MCAST_HASH_TABLE0  16
+#define REG_MCAST_HASH_TABLE1  20
+#define REG_TX_POLL_DEMAND     24
+#define REG_RX_POLL_DEMAND     28
+#define REG_TXR_BASE_ADDRESS   32
+#define REG_RXR_BASE_ADDRESS   36
+#define REG_INT_TIMER_CTRL     40
+#define REG_APOLL_TIMER_CTRL   44
+#define REG_DMA_BLEN_CTRL      48
+#define REG_RESERVED1          52
+#define REG_MAC_CTRL           136
+#define REG_MAC_STATUS         140
+#define REG_PHY_CTRL           144
+#define REG_PHY_WRITE_DATA     148
+#define REG_FLOW_CTRL          152
+#define REG_BACK_PRESSURE      156
+#define REG_RESERVED2          160
+#define REG_TEST_SEED          196
+#define REG_DMA_FIFO_STATE     200
+#define REG_TEST_MODE          204
+#define REG_RESERVED3          208
+#define REG_TX_COL_COUNTER     212
+#define REG_RPF_AEP_COUNTER    216
+#define REG_XM_PG_COUNTER      220
+#define REG_RUNT_TLC_COUNTER   224
+#define REG_CRC_FTL_COUNTER    228
+#define REG_RLC_RCC_COUNTER    232
+#define REG_BROC_COUNTER       236
+#define REG_MULCA_COUNTER      240
+#define REG_RP_COUNTER         244
+#define REG_XP_COUNTER         248
+
+#define REG_PHY_CTRL_OFFSET    0x0
+#define REG_PHY_STATUS         0x1
+#define REG_PHY_ID1            0x2
+#define REG_PHY_ID2            0x3
+#define REG_PHY_ANA            0x4
+#define REG_PHY_ANLPAR         0x5
+#define REG_PHY_ANE            0x6
+#define REG_PHY_ECTRL1         0x10
+#define REG_PHY_QPDS           0x11
+#define REG_PHY_10BOP          0x12
+#define REG_PHY_ECTRL2         0x13
+#define REG_PHY_FTMAC100_WRITE 0x8000000
+#define REG_PHY_FTMAC100_READ  0x4000000
+
+/* REG_INTERRUPT_STATUS */
+#define RPKT_FINISH            BIT(0)  /* DMA data received */
+#define NORXBUF                        BIT(1)  /* receive buffer unavailable */
+#define XPKT_FINISH            BIT(2)  /* DMA moved data to TX FIFO */
+#define NOTXBUF                        BIT(3)  /* transmit buffer unavailable */
+#define XPKT_OK_INT_STS                BIT(4)  /* transmit to ethernet success */
+#define XPKT_LOST_INT_STS      BIT(5)  /* transmit ethernet lost (collision) */
+#define RPKT_SAV               BIT(6)  /* FIFO receive success */
+#define RPKT_LOST_INT_STS      BIT(7)  /* FIFO full, receive failed */
+#define AHB_ERR                        BIT(8)  /* AHB error */
+#define PHYSTS_CHG             BIT(9)  /* PHY link status change */
+
+/* REG_INTERRUPT_MASK */
+#define RPKT_FINISH_M          BIT(0)
+#define NORXBUF_M              BIT(1)
+#define XPKT_FINISH_M          BIT(2)
+#define NOTXBUF_M              BIT(3)
+#define XPKT_OK_M              BIT(4)
+#define XPKT_LOST_M            BIT(5)
+#define RPKT_SAV_M             BIT(6)
+#define RPKT_LOST_M            BIT(7)
+#define AHB_ERR_M              BIT(8)
+#define PHYSTS_CHG_M           BIT(9)
+
+/* REG_MAC_MS_ADDRESS */
+#define MAC_MADR_MASK          0xffff  /* 2 MSB MAC address */
+
+/* REG_INT_TIMER_CTRL */
+#define TXINT_TIME_SEL         BIT(15) /* TX cycle time period */
+#define TXINT_THR_MASK         0x7000
+#define TXINT_CNT_MASK         0xf00
+#define RXINT_TIME_SEL         BIT(7)  /* RX cycle time period */
+#define RXINT_THR_MASK         0x70
+#define RXINT_CNT_MASK         0xF
+
+/* REG_APOLL_TIMER_CTRL */
+#define TXPOLL_TIME_SEL                BIT(12) /* TX poll time period */
+#define TXPOLL_CNT_MASK                0xf00
+#define TXPOLL_CNT_SHIFT_BIT   8
+#define RXPOLL_TIME_SEL                BIT(4)  /* RX poll time period */
+#define RXPOLL_CNT_MASK                0xF
+#define RXPOLL_CNT_SHIFT_BIT   0
+
+/* REG_DMA_BLEN_CTRL */
+#define RX_THR_EN              BIT(9)  /* RX FIFO threshold arbitration */
+#define RXFIFO_HTHR_MASK       0x1c0
+#define RXFIFO_LTHR_MASK       0x38
+#define INCR16_EN              BIT(2)  /* AHB bus INCR16 burst command */
+#define INCR8_EN               BIT(1)  /* AHB bus INCR8 burst command */
+#define INCR4_EN               BIT(0)  /* AHB bus INCR4 burst command */
+
+/* REG_MAC_CTRL */
+#define RX_BROADPKT            BIT(17) /* receive broadcast packets */
+#define RX_MULTIPKT            BIT(16) /* receive all multicast packets */
+#define FULLDUP                        BIT(15) /* full duplex */
+#define CRC_APD                        BIT(14) /* append CRC to transmitted packet */
+#define RCV_ALL                        BIT(12) /* ignore incoming packet destination */
+#define RX_FTL                 BIT(11) /* accept packets larger than 1518 B */
+#define RX_RUNT                        BIT(10) /* accept packets smaller than 64 B */
+#define HT_MULTI_EN            BIT(9)  /* accept on hash and mcast pass */
+#define RCV_EN                 BIT(8)  /* receiver enable */
+#define ENRX_IN_HALFTX         BIT(6)  /* enable receive in half duplex mode */
+#define XMT_EN                 BIT(5)  /* transmit enable */
+#define CRC_DIS                        BIT(4)  /* disable CRC check when receiving */
+#define LOOP_EN                        BIT(3)  /* internal loop-back */
+#define SW_RST                 BIT(2)  /* software reset, last 64 AHB clocks */
+#define RDMA_EN                        BIT(1)  /* enable receive DMA chan */
+#define XDMA_EN                        BIT(0)  /* enable transmit DMA chan */
+
+/* REG_MAC_STATUS */
+#define COL_EXCEED             BIT(11) /* more than 16 collisions */
+#define LATE_COL               BIT(10) /* transmit late collision detected */
+#define XPKT_LOST              BIT(9)  /* transmit to ethernet lost */
+#define XPKT_OK                        BIT(8)  /* transmit to ethernet success */
+#define RUNT_MAC_STS           BIT(7)  /* receive runt detected */
+#define FTL_MAC_STS            BIT(6)  /* receive frame too long detected */
+#define CRC_ERR_MAC_STS                BIT(5)
+#define RPKT_LOST              BIT(4)  /* RX FIFO full, receive failed */
+#define RPKT_SAVE              BIT(3)  /* RX FIFO receive success */
+#define COL                    BIT(2)  /* collision, incoming packet dropped */
+#define MCPU_BROADCAST         BIT(1)
+#define MCPU_MULTICAST         BIT(0)
+
+/* REG_PHY_CTRL */
+#define MIIWR                  BIT(27) /* init write sequence (auto cleared)*/
+#define MIIRD                  BIT(26)
+#define REGAD_MASK             0x3e00000
+#define PHYAD_MASK             0x1f0000
+#define MIIRDATA_MASK          0xffff
+
+/* REG_PHY_WRITE_DATA */
+#define MIIWDATA_MASK          0xffff
+
+/* REG_FLOW_CTRL */
+#define PAUSE_TIME_MASK                0xffff0000
+#define FC_HIGH_MASK           0xf000
+#define FC_LOW_MASK            0xf00
+#define RX_PAUSE               BIT(4)  /* receive pause frame */
+#define TX_PAUSED              BIT(3)  /* transmit pause due to receive */
+#define FCTHR_EN               BIT(2)  /* enable threshold mode. */
+#define TX_PAUSE               BIT(1)  /* transmit pause frame */
+#define FC_EN                  BIT(0)  /* flow control mode enable */
+
+/* REG_BACK_PRESSURE */
+#define BACKP_LOW_MASK         0xf00
+#define BACKP_JAM_LEN_MASK     0xf0
+#define BACKP_MODE             BIT(1)  /* address mode */
+#define BACKP_ENABLE           BIT(0)
+
+/* REG_TEST_SEED */
+#define TEST_SEED_MASK         0x3fff
+
+/* REG_DMA_FIFO_STATE */
+#define TX_DMA_REQUEST         BIT(31)
+#define RX_DMA_REQUEST         BIT(30)
+#define TX_DMA_GRANT           BIT(29)
+#define RX_DMA_GRANT           BIT(28)
+#define TX_FIFO_EMPTY          BIT(27)
+#define RX_FIFO_EMPTY          BIT(26)
+#define TX_DMA2_SM_MASK                0x7000
+#define TX_DMA1_SM_MASK                0xf00
+#define RX_DMA2_SM_MASK                0x70
+#define RX_DMA1_SM_MASK                0xF
+
+/* REG_TEST_MODE */
+#define SINGLE_PKT             BIT(26) /* single packet mode */
+#define PTIMER_TEST            BIT(25) /* automatic polling timer test mode */
+#define ITIMER_TEST            BIT(24) /* interrupt timer test mode */
+#define TEST_SEED_SELECT       BIT(22)
+#define SEED_SELECT            BIT(21)
+#define TEST_MODE              BIT(20)
+#define TEST_TIME_MASK         0xffc00
+#define TEST_EXCEL_MASK                0x3e0
+
+/* REG_TX_COL_COUNTER */
+#define TX_MCOL_MASK           0xffff0000
+#define TX_MCOL_SHIFT_BIT      16
+#define TX_SCOL_MASK           0xffff
+#define TX_SCOL_SHIFT_BIT      0
+
+/* REG_RPF_AEP_COUNTER */
+#define RPF_MASK               0xffff0000
+#define RPF_SHIFT_BIT          16
+#define AEP_MASK               0xffff
+#define AEP_SHIFT_BIT          0
+
+/* REG_XM_PG_COUNTER */
+#define XM_MASK                        0xffff0000
+#define XM_SHIFT_BIT           16
+#define PG_MASK                        0xffff
+#define PG_SHIFT_BIT           0
+
+/* REG_RUNT_TLC_COUNTER */
+#define RUNT_CNT_MASK          0xffff0000
+#define RUNT_CNT_SHIFT_BIT     16
+#define TLCC_MASK              0xffff
+#define TLCC_SHIFT_BIT         0
+
+/* REG_CRC_FTL_COUNTER */
+#define CRCER_CNT_MASK         0xffff0000
+#define CRCER_CNT_SHIFT_BIT    16
+#define FTL_CNT_MASK           0xffff
+#define FTL_CNT_SHIFT_BIT      0
+
+/* REG_RLC_RCC_COUNTER */
+#define RLC_MASK               0xffff0000
+#define RLC_SHIFT_BIT          16
+#define RCC_MASK               0xffff
+#define RCC_SHIFT_BIT          0
+
+/* REG_PHY_STATUS */
+#define AN_COMPLETE            0x20
+#define LINK_STATUS            0x4
+
+struct moxart_mac_priv_t {
+       void __iomem *base;
+       struct net_device_stats stats;
+       unsigned int reg_maccr;
+       unsigned int reg_imr;
+       struct napi_struct napi;
+       struct net_device *ndev;
+
+       dma_addr_t rx_base;
+       dma_addr_t rx_mapping[RX_DESC_NUM];
+       void __iomem *rx_desc_base;
+       unsigned char *rx_buf_base;
+       unsigned char *rx_buf[RX_DESC_NUM];
+       unsigned int rx_head;
+       unsigned int rx_buf_size;
+
+       dma_addr_t tx_base;
+       dma_addr_t tx_mapping[TX_DESC_NUM];
+       void __iomem *tx_desc_base;
+       unsigned char *tx_buf_base;
+       unsigned char *tx_buf[RX_DESC_NUM];
+       unsigned int tx_head;
+       unsigned int tx_buf_size;
+
+       spinlock_t txlock;
+       unsigned int tx_len[TX_DESC_NUM];
+       struct sk_buff *tx_skb[TX_DESC_NUM];
+       unsigned int tx_tail;
+};
+
+#if TX_BUF_SIZE >= TX_BUF_SIZE_MAX
+#error MOXA ART Ethernet device driver TX buffer is too large!
+#endif
+#if RX_BUF_SIZE >= RX_BUF_SIZE_MAX
+#error MOXA ART Ethernet device driver RX buffer is too large!
+#endif
+
+#endif
index 967bae8b85c5f6c9bb0a720d97b3c0c17c940430..50a1d4a04eb010259ca5f33d34d92e877cc25e1b 100644 (file)
@@ -74,6 +74,7 @@
 #ifdef CONFIG_MTRR
 #include <asm/mtrr.h>
 #endif
+#include <net/busy_poll.h>
 
 #include "myri10ge_mcp.h"
 #include "myri10ge_mcp_gen_header.h"
@@ -194,6 +195,21 @@ struct myri10ge_slice_state {
        int cpu;
        __be32 __iomem *dca_tag;
 #endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       unsigned int state;
+#define SLICE_STATE_IDLE       0
+#define SLICE_STATE_NAPI       1       /* NAPI owns this slice */
+#define SLICE_STATE_POLL       2       /* poll owns this slice */
+#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
+#define SLICE_STATE_NAPI_YIELD 4       /* NAPI yielded this slice */
+#define SLICE_STATE_POLL_YIELD 8       /* poll yielded this slice */
+#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
+       spinlock_t lock;
+       unsigned long lock_napi_yield;
+       unsigned long lock_poll_yield;
+       unsigned long busy_poll_miss;
+       unsigned long busy_poll_cnt;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
        char irq_desc[32];
 };
 
@@ -244,7 +260,7 @@ struct myri10ge_priv {
        int fw_ver_minor;
        int fw_ver_tiny;
        int adopted_rx_filter_bug;
-       u8 mac_addr[6];         /* eeprom mac address */
+       u8 mac_addr[ETH_ALEN];          /* eeprom mac address */
        unsigned long serial_number;
        int vendor_specific_offset;
        int fw_multicast_support;
@@ -909,6 +925,92 @@ abort:
        return status;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+       spin_lock_init(&ss->lock);
+       ss->state = SLICE_STATE_IDLE;
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+       int rc = true;
+       spin_lock(&ss->lock);
+       if ((ss->state & SLICE_LOCKED)) {
+               WARN_ON((ss->state & SLICE_STATE_NAPI));
+               ss->state |= SLICE_STATE_NAPI_YIELD;
+               rc = false;
+               ss->lock_napi_yield++;
+       } else
+               ss->state = SLICE_STATE_NAPI;
+       spin_unlock(&ss->lock);
+       return rc;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+       spin_lock(&ss->lock);
+       WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
+       ss->state = SLICE_STATE_IDLE;
+       spin_unlock(&ss->lock);
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+       int rc = true;
+       spin_lock_bh(&ss->lock);
+       if ((ss->state & SLICE_LOCKED)) {
+               ss->state |= SLICE_STATE_POLL_YIELD;
+               rc = false;
+               ss->lock_poll_yield++;
+       } else
+               ss->state |= SLICE_STATE_POLL;
+       spin_unlock_bh(&ss->lock);
+       return rc;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+       spin_lock_bh(&ss->lock);
+       WARN_ON((ss->state & SLICE_STATE_NAPI));
+       ss->state = SLICE_STATE_IDLE;
+       spin_unlock_bh(&ss->lock);
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+       WARN_ON(!(ss->state & SLICE_LOCKED));
+       return (ss->state & SLICE_USER_PEND);
+}
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+       return false;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+       return false;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+       return false;
+}
+#endif
+
 static int myri10ge_reset(struct myri10ge_priv *mgp)
 {
        struct myri10ge_cmd cmd;
@@ -1300,6 +1402,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
        }
 }
 
+#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
+
 static inline int
 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
 {
@@ -1311,6 +1415,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
        struct pci_dev *pdev = mgp->pdev;
        struct net_device *dev = mgp->dev;
        u8 *va;
+       bool polling;
 
        if (len <= mgp->small_bytes) {
                rx = &ss->rx_small;
@@ -1325,7 +1430,15 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
        va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
        prefetch(va);
 
-       skb = napi_get_frags(&ss->napi);
+       /* When busy polling in user context, allocate skb and copy headers to
+        * skb's linear memory ourselves.  When not busy polling, use the napi
+        * gro api.
+        */
+       polling = myri10ge_ss_busy_polling(ss);
+       if (polling)
+               skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
+       else
+               skb = napi_get_frags(&ss->napi);
        if (unlikely(skb == NULL)) {
                ss->stats.rx_dropped++;
                for (i = 0, remainder = len; remainder > 0; i++) {
@@ -1364,8 +1477,29 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
        }
        myri10ge_vlan_rx(mgp->dev, va, skb);
        skb_record_rx_queue(skb, ss - &mgp->ss[0]);
+       skb_mark_napi_id(skb, &ss->napi);
+
+       if (polling) {
+               int hlen;
+
+               /* myri10ge_vlan_rx might have moved the header, so compute
+                * length and address again.
+                */
+               hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
+               va = page_address(skb_frag_page(&rx_frags[0])) +
+                       rx_frags[0].page_offset;
+               /* Copy header into the skb linear memory */
+               skb_copy_to_linear_data(skb, va, hlen);
+               rx_frags[0].page_offset += hlen;
+               rx_frags[0].size -= hlen;
+               skb->data_len -= hlen;
+               skb->tail += hlen;
+               skb->protocol = eth_type_trans(skb, dev);
+               netif_receive_skb(skb);
+       }
+       else
+               napi_gro_frags(&ss->napi);
 
-       napi_gro_frags(&ss->napi);
        return 1;
 }
 
@@ -1524,10 +1658,14 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
        if (ss->mgp->dca_enabled)
                myri10ge_update_dca(ss);
 #endif
+       /* Try later if the busy_poll handler is running. */
+       if (!myri10ge_ss_lock_napi(ss))
+               return budget;
 
        /* process as many rx events as NAPI will allow */
        work_done = myri10ge_clean_rx_done(ss, budget);
 
+       myri10ge_ss_unlock_napi(ss);
        if (work_done < budget) {
                napi_complete(napi);
                put_be32(htonl(3), ss->irq_claim);
@@ -1535,6 +1673,34 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int myri10ge_busy_poll(struct napi_struct *napi)
+{
+       struct myri10ge_slice_state *ss =
+           container_of(napi, struct myri10ge_slice_state, napi);
+       struct myri10ge_priv *mgp = ss->mgp;
+       int work_done;
+
+       /* Poll only when the link is up */
+       if (mgp->link_state != MXGEFW_LINK_UP)
+               return LL_FLUSH_FAILED;
+
+       if (!myri10ge_ss_lock_poll(ss))
+               return LL_FLUSH_BUSY;
+
+       /* Process a small number of packets */
+       work_done = myri10ge_clean_rx_done(ss, 4);
+       if (work_done)
+               ss->busy_poll_cnt += work_done;
+       else
+               ss->busy_poll_miss++;
+
+       myri10ge_ss_unlock_poll(ss);
+
+       return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 static irqreturn_t myri10ge_intr(int irq, void *arg)
 {
        struct myri10ge_slice_state *ss = arg;
@@ -1742,6 +1908,10 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
        "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
        "rx_small_cnt", "rx_big_cnt",
        "wake_queue", "stop_queue", "tx_linearized",
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
+       "rx_busy_poll_cnt",
+#endif
 };
 
 #define MYRI10GE_NET_STATS_LEN      21
@@ -1842,6 +2012,12 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
                data[i++] = (unsigned int)ss->tx.wake_queue;
                data[i++] = (unsigned int)ss->tx.stop_queue;
                data[i++] = (unsigned int)ss->tx.linearized;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+               data[i++] = ss->lock_napi_yield;
+               data[i++] = ss->lock_poll_yield;
+               data[i++] = ss->busy_poll_miss;
+               data[i++] = ss->busy_poll_cnt;
+#endif
        }
 }
 
@@ -2405,6 +2581,9 @@ static int myri10ge_open(struct net_device *dev)
                        goto abort_with_rings;
                }
 
+               /* Initialize the slice spinlock and state used for polling */
+               myri10ge_ss_init_lock(ss);
+
                /* must happen prior to any irq */
                napi_enable(&(ss)->napi);
        }
@@ -2481,9 +2660,19 @@ static int myri10ge_close(struct net_device *dev)
 
        del_timer_sync(&mgp->watchdog_timer);
        mgp->running = MYRI10GE_ETH_STOPPING;
+       local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
        for (i = 0; i < mgp->num_slices; i++) {
                napi_disable(&mgp->ss[i].napi);
+               /* Lock the slice to prevent the busy_poll handler from
+                * accessing it.  Later when we bring the NIC up, myri10ge_open
+                * resets the slice including this lock.
+                */
+               while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
+                       pr_info("Slice %d locked\n", i);
+                       mdelay(1);
+               }
        }
+       local_bh_enable();
        netif_carrier_off(dev);
 
        netif_tx_stop_all_queues(dev);
@@ -3569,8 +3758,11 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
                                          ss->fw_stats, ss->fw_stats_bus);
                        ss->fw_stats = NULL;
                }
+               napi_hash_del(&ss->napi);
                netif_napi_del(&ss->napi);
        }
+       /* Wait till napi structs are no longer used, and then free ss. */
+       synchronize_rcu();
        kfree(mgp->ss);
        mgp->ss = NULL;
 }
@@ -3606,6 +3798,7 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
                ss->dev = mgp->dev;
                netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
                               myri10ge_napi_weight);
+               napi_hash_add(&ss->napi);
        }
        return 0;
 abort:
@@ -3625,13 +3818,12 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
        struct pci_dev *pdev = mgp->pdev;
        char *old_fw;
        bool old_allocated;
-       int i, status, ncpus, msix_cap;
+       int i, status, ncpus;
 
        mgp->num_slices = 1;
-       msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
        ncpus = netif_get_num_default_rss_queues();
 
-       if (myri10ge_max_slices == 1 || msix_cap == 0 ||
+       if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
            (myri10ge_max_slices == -1 && ncpus < 2))
                return;
 
@@ -3749,6 +3941,9 @@ static const struct net_device_ops myri10ge_netdev_ops = {
        .ndo_change_mtu         = myri10ge_change_mtu,
        .ndo_set_rx_mode        = myri10ge_set_multicast_list,
        .ndo_set_mac_address    = myri10ge_set_mac_address,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = myri10ge_busy_poll,
+#endif
 };
 
 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
index e88bdb1aa669c1f60c9b161f2b81fb10150c7777..dcfe58fa3b8a1abc9eb858a91a171db7a7bfcc3d 100644 (file)
@@ -922,7 +922,7 @@ static void __init get_mac_address(struct net_device *dev)
 {
        struct w90p910_ether *ether = netdev_priv(dev);
        struct platform_device *pdev;
-       char addr[6];
+       char addr[ETH_ALEN];
 
        pdev = ether->pdev;
 
@@ -934,7 +934,7 @@ static void __init get_mac_address(struct net_device *dev)
        addr[5] = 0xa8;
 
        if (is_valid_ether_addr(addr))
-               memcpy(dev->dev_addr, &addr, 0x06);
+               memcpy(dev->dev_addr, &addr, ETH_ALEN);
        else
                dev_err(&pdev->dev, "invalid mac address\n");
 }
index 7779036690cca239265ddbe27eeac8076a546933..6797b1075874ae26cca6756bf450559ed7c7e6be 100644 (file)
@@ -581,6 +581,19 @@ struct pch_gbe_hw_stats {
        u32 intr_tcpip_err_count;
 };
 
+/**
+ * struct pch_gbe_privdata - PCI Device ID driver data
+ * @phy_tx_clk_delay:          Bool, configure the PHY TX delay in software
+ * @phy_disable_hibernate:     Bool, disable PHY hibernation
+ * @platform_init:             Platform initialization callback, called from
+ *                             probe, prior to PHY initialization.
+ */
+struct pch_gbe_privdata {
+       bool phy_tx_clk_delay;
+       bool phy_disable_hibernate;
+       int (*platform_init)(struct pci_dev *pdev);
+};
+
 /**
  * struct pch_gbe_adapter - board specific private data structure
  * @stats_lock:        Spinlock structure for status
@@ -604,6 +617,7 @@ struct pch_gbe_hw_stats {
  * @rx_buffer_len:     Receive buffer length
  * @tx_queue_len:      Transmit queue length
  * @have_msi:          PCI MSI mode flag
+ * @pch_gbe_privdata:  PCI Device ID driver_data
  */
 
 struct pch_gbe_adapter {
@@ -631,6 +645,7 @@ struct pch_gbe_adapter {
        int hwts_tx_en;
        int hwts_rx_en;
        struct pci_dev *ptp_pdev;
+       struct pch_gbe_privdata *pdata;
 };
 
 #define pch_gbe_hw_to_adapter(hw)      container_of(hw, struct pch_gbe_adapter, hw)
index 1129db0cdf82886c90494e92d963b36a2e574dc2..f0ceb89af93110c874eee7ec3d23cce82871d8a9 100644 (file)
@@ -118,6 +118,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
         * filled by get_settings() on a down link, speed is -1: */
        if (speed == UINT_MAX) {
                speed = SPEED_1000;
+               ethtool_cmd_speed_set(ecmd, speed);
                ecmd->duplex = DUPLEX_FULL;
        }
        ret = mii_ethtool_sset(&adapter->mii, ecmd);
index ab1039a95bf9e3ed883862515bc59a6f921df946..e19f1be60d5e40fc8428a766825f6815f9ce7e29 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/module.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_classify.h>
+#include <linux/gpio.h>
 
 #define DRV_VERSION     "1.01"
 const char pch_driver_version[] = DRV_VERSION;
@@ -111,6 +112,8 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
 
+#define MINNOW_PHY_RESET_GPIO          13
+
 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
@@ -682,7 +685,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
        }
        adapter->hw.phy.addr = adapter->mii.phy_id;
        netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
-       if (addr == 32)
+       if (addr == PCH_GBE_PHY_REGS_LEN)
                return -EAGAIN;
        /* Selected the phy and isolate the rest */
        for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
@@ -2635,6 +2638,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
        adapter->pdev = pdev;
        adapter->hw.back = adapter;
        adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
+       adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
+       if (adapter->pdata && adapter->pdata->platform_init)
+               adapter->pdata->platform_init(pdev);
 
        adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
                                               PCI_DEVFN(12, 4));
@@ -2710,6 +2716,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
 
        dev_dbg(&pdev->dev, "PCH Network Connection\n");
 
+       /* Disable hibernation on certain platforms */
+       if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
+               pch_gbe_phy_disable_hibernate(&adapter->hw);
+
        device_set_wakeup_enable(&pdev->dev, 1);
        return 0;
 
@@ -2720,7 +2730,46 @@ err_free_netdev:
        return ret;
 }
 
+/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
+ * ensure it is awake for probe and init. Request the line and reset the PHY.
+ */
+static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
+{
+       unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
+       unsigned gpio = MINNOW_PHY_RESET_GPIO;
+       int ret;
+
+       ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
+                                   "minnow_phy_reset");
+       if (ret) {
+               dev_err(&pdev->dev,
+                       "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
+               return ret;
+       }
+
+       gpio_set_value(gpio, 0);
+       usleep_range(1250, 1500);
+       gpio_set_value(gpio, 1);
+       usleep_range(1250, 1500);
+
+       return ret;
+}
+
+static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
+       .phy_tx_clk_delay = true,
+       .phy_disable_hibernate = true,
+       .platform_init = pch_gbe_minnow_platform_init,
+};
+
 static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
+       {.vendor = PCI_VENDOR_ID_INTEL,
+        .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+        .subvendor = PCI_VENDOR_ID_CIRCUITCO,
+        .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
+        .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+        .class_mask = (0xFFFF00),
+        .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
+        },
        {.vendor = PCI_VENDOR_ID_INTEL,
         .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
         .subvendor = PCI_ANY_ID,
index da079073a6c63e18528f51e359793b43bd001af2..8b7ff75fc8e0d5201c94a014c64781b632a896cc 100644 (file)
 #define MII_SR_100X_FD_CAPS      0x4000        /* 100X  Full Duplex Capable */
 #define MII_SR_100T4_CAPS        0x8000        /* 100T4 Capable */
 
+/* AR8031 PHY Debug Registers */
+#define PHY_AR803X_ID           0x00001374
+#define PHY_AR8031_DBG_OFF      0x1D
+#define PHY_AR8031_DBG_DAT      0x1E
+#define PHY_AR8031_SERDES       0x05
+#define PHY_AR8031_HIBERNATE    0x0B
+#define PHY_AR8031_SERDES_TX_CLK_DLY   0x0100 /* TX clock delay of 2.0ns */
+#define PHY_AR8031_PS_HIB_EN           0x8000 /* Hibernate enable */
+
 /* Phy Id Register (word 2) */
 #define PHY_REVISION_MASK        0x000F
 
@@ -248,6 +257,51 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
        pch_gbe_phy_sw_reset(hw);
 }
 
+/**
+ * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY
+ * @hw:                    Pointer to the HW structure
+ * Returns
+ *     0:              Successful.
+ *     -EINVAL:        Invalid argument.
+ */
+static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw)
+{
+       /* The RGMII interface requires a ~2ns TX clock delay. This is typically
+        * done in layout with a longer trace or via PHY strapping, but can also
+        * be done via PHY configuration registers.
+        */
+       struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+       u16 mii_reg;
+       int ret = 0;
+
+       switch (hw->phy.id) {
+       case PHY_AR803X_ID:
+               netdev_dbg(adapter->netdev,
+                          "Configuring AR803X PHY for 2ns TX clock delay\n");
+               pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg);
+               ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+                                                PHY_AR8031_SERDES);
+               if (ret)
+                       break;
+
+               pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+               mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY;
+               ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+                                                mii_reg);
+               break;
+       default:
+               netdev_err(adapter->netdev,
+                          "Unknown PHY (%x), could not set TX clock delay\n",
+                          hw->phy.id);
+               return -EINVAL;
+       }
+
+       if (ret)
+               netdev_err(adapter->netdev,
+                          "Could not configure tx clock delay for PHY\n");
+       return ret;
+}
+
 /**
  * pch_gbe_phy_init_setting - PHY initial setting
  * @hw:                    Pointer to the HW structure
@@ -277,4 +331,48 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
        pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
        mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
        pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
+
+       /* Setup a TX clock delay on certain platforms */
+       if (adapter->pdata && adapter->pdata->phy_tx_clk_delay)
+               pch_gbe_phy_tx_clk_delay(hw);
+}
+
+/**
+ * pch_gbe_phy_disable_hibernate - Disable the PHY low power state
+ * @hw:                    Pointer to the HW structure
+ * Returns
+ *     0:              Successful.
+ *     -EINVAL:        Invalid argument.
+ */
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw)
+{
+       struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+       u16 mii_reg;
+       int ret = 0;
+
+       switch (hw->phy.id) {
+       case PHY_AR803X_ID:
+               netdev_dbg(adapter->netdev,
+                          "Disabling hibernation for AR803X PHY\n");
+               ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+                                                PHY_AR8031_HIBERNATE);
+               if (ret)
+                       break;
+
+               pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+               mii_reg &= ~PHY_AR8031_PS_HIB_EN;
+               ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+                                                mii_reg);
+               break;
+       default:
+               netdev_err(adapter->netdev,
+                          "Unknown PHY (%x), could not disable hibernation\n",
+                          hw->phy.id);
+               return -EINVAL;
+       }
+
+       if (ret)
+               netdev_err(adapter->netdev,
+                          "Could not disable PHY hibernation\n");
+       return ret;
 }
index 03264dc7b5ec8a294da8c4e662f348ca74230d32..0cbe69206e04db47eb77956d7466937e6c357144 100644 (file)
@@ -33,5 +33,6 @@ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
 void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
 void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
 void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw);
 
 #endif /* _PCH_GBE_PHY_H_ */
index a5f0b5da614933c74ebbf005edba139b4bb74b88..f21ae7b6c766c3353e33faeab6fd32a14270e0cf 100644 (file)
@@ -191,7 +191,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
        struct device_node *dn = pci_device_to_OF_node(pdev);
        int len;
        const u8 *maddr;
-       u8 addr[6];
+       u8 addr[ETH_ALEN];
 
        if (!dn) {
                dev_dbg(&pdev->dev,
@@ -201,8 +201,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
 
        maddr = of_get_property(dn, "local-mac-address", &len);
 
-       if (maddr && len == 6) {
-               memcpy(mac->mac_addr, maddr, 6);
+       if (maddr && len == ETH_ALEN) {
+               memcpy(mac->mac_addr, maddr, ETH_ALEN);
                return 0;
        }
 
@@ -219,14 +219,15 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
                return -ENOENT;
        }
 
-       if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
-                  &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+       if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+                  &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+           != ETH_ALEN) {
                dev_warn(&pdev->dev,
                         "can't parse mac address, not configuring\n");
                return -EINVAL;
        }
 
-       memcpy(mac->mac_addr, addr, 6);
+       memcpy(mac->mac_addr, addr, ETH_ALEN);
 
        return 0;
 }
index e2f4efa8ad46a5d79384a2afd6e441a1552f02a0..f2749d46c125f738ce38e406958015fc37246640 100644 (file)
@@ -83,7 +83,7 @@ struct pasemi_mac {
 #define MAC_TYPE_GMAC  1
 #define MAC_TYPE_XAUI  2
 
-       u8              mac_addr[6];
+       u8              mac_addr[ETH_ALEN];
 
        struct net_lro_mgr      lro_mgr;
        struct net_lro_desc     lro_desc[MAX_LRO_DESCRIPTORS];
index 9fbb1cdbfa4764b48d010f5ca43a851b33234e20..8375cbde996976047475b9c5e6c16f33fd365df4 100644 (file)
@@ -536,10 +536,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
 {
        struct netxen_adapter *adapter = netdev_priv(netdev);
        struct netdev_hw_addr *ha;
-       u8 null_addr[6];
+       u8 null_addr[ETH_ALEN];
        int i;
 
-       memset(null_addr, 0, 6);
+       memset(null_addr, 0, ETH_ALEN);
 
        if (netdev->flags & IFF_PROMISC) {
 
index c401b0b4353d94d543e357c16c184c8771eafe7a..1046e9461509a57d314bb78052d88f7b26e2d6aa 100644 (file)
@@ -459,16 +459,14 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
 static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
 {
        u32 control;
-       int pos;
 
-       pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-       if (pos) {
-               pci_read_config_dword(pdev, pos, &control);
+       if (pdev->msix_cap) {
+               pci_read_config_dword(pdev, pdev->msix_cap, &control);
                if (enable)
                        control |= PCI_MSIX_FLAGS_ENABLE;
                else
                        control = 0;
-               pci_write_config_dword(pdev, pos, control);
+               pci_write_config_dword(pdev, pdev->msix_cap, control);
        }
 }
 
index 221645e9f182052fc7641155e1db37ef5044cb2f..3f03856768a82f17614daaa78de5c96d81e01126 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/tcp.h>
 #include <linux/skbuff.h>
 #include <linux/firmware.h>
-
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/timer.h>
@@ -37,9 +36,9 @@
 #include "qlcnic_83xx_hw.h"
 
 #define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 2
-#define _QLCNIC_LINUX_SUBVERSION 44
-#define QLCNIC_LINUX_VERSIONID  "5.2.44"
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 48
+#define QLCNIC_LINUX_VERSIONID  "5.3.48"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,6 +97,9 @@
 #define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
                                                        + MGMT_CMD_DESC_RESV)
 #define QLCNIC_MAX_TX_TIMEOUTS 2
+#define QLCNIC_MAX_TX_RINGS    8
+#define QLCNIC_MAX_SDS_RINGS   8
+
 /*
  * Following are the states of the Phantom. Phantom will set them and
  * Host will read to check if the fields are correct.
@@ -467,7 +469,9 @@ struct qlcnic_hardware_context {
        u32 *ext_reg_tbl;
        u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
        u32 mbox_reg[4];
-       spinlock_t mbx_lock;
+       struct qlcnic_mailbox *mailbox;
+       u8 extend_lb_time;
+       u8 phys_port_id[ETH_ALEN];
 };
 
 struct qlcnic_adapter_stats {
@@ -515,6 +519,7 @@ struct qlcnic_host_sds_ring {
        u32 num_desc;
        void __iomem *crb_sts_consumer;
 
+       struct qlcnic_host_tx_ring *tx_ring;
        struct status_desc *desc_head;
        struct qlcnic_adapter *adapter;
        struct napi_struct napi;
@@ -532,9 +537,17 @@ struct qlcnic_host_tx_ring {
        void __iomem *crb_intr_mask;
        char name[IFNAMSIZ + 12];
        u16 ctx_id;
+
+       u32 state;
        u32 producer;
        u32 sw_consumer;
        u32 num_desc;
+
+       u64 xmit_on;
+       u64 xmit_off;
+       u64 xmit_called;
+       u64 xmit_finished;
+
        void __iomem *crb_cmd_producer;
        struct cmd_desc_type0 *desc_head;
        struct qlcnic_adapter *adapter;
@@ -559,7 +572,6 @@ struct qlcnic_recv_context {
        u32 state;
        u16 context_id;
        u16 virt_port;
-
 };
 
 /* HW context creation */
@@ -604,6 +616,7 @@ struct qlcnic_recv_context {
 #define QLCNIC_CAP0_LRO_CONTIGUOUS     (1 << 8)
 #define QLCNIC_CAP0_VALIDOFF           (1 << 11)
 #define QLCNIC_CAP0_LRO_MSS            (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI           (1 << 22)
 
 /*
  * Context state
@@ -631,7 +644,7 @@ struct qlcnic_hostrq_rds_ring {
 
 struct qlcnic_hostrq_rx_ctx {
        __le64 host_rsp_dma_addr;       /* Response dma'd here */
-       __le32 capabilities[4]; /* Flag bit vector */
+       __le32 capabilities[4];         /* Flag bit vector */
        __le32 host_int_crb_mode;       /* Interrupt crb usage */
        __le32 host_rds_crb_mode;       /* RDS crb usage */
        /* These ring offsets are relative to data[0] below */
@@ -814,6 +827,7 @@ struct qlcnic_mac_list_s {
 #define QLCNIC_FW_CAPABILITY_BDG               BIT_8
 #define QLCNIC_FW_CAPABILITY_FVLANTX           BIT_9
 #define QLCNIC_FW_CAPABILITY_HW_LRO            BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX                BIT_4
 #define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK    BIT_27
 #define QLCNIC_FW_CAPABILITY_MORE_CAPS         BIT_31
 
@@ -913,6 +927,8 @@ struct qlcnic_ipaddr {
 #define QLCNIC_FW_LRO_MSS_CAP          0x8000
 #define QLCNIC_TX_INTR_SHARED          0x10000
 #define QLCNIC_APP_CHANGED_FLAGS       0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID                0x40000
+
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
        ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
 #define QLCNIC_IS_TSO_CAPABLE(adapter)  \
@@ -922,6 +938,7 @@ struct qlcnic_ipaddr {
 #define QLCNIC_BEACON_DISABLE          0xD
 
 #define QLCNIC_DEF_NUM_STS_DESC_RINGS  4
+#define QLCNIC_DEF_NUM_TX_RINGS                4
 #define QLCNIC_MSIX_TBL_SPACE          8192
 #define QLCNIC_PCI_REG_MSIX_TBL        0x44
 #define QLCNIC_MSIX_TBL_PGSIZE         4096
@@ -937,6 +954,7 @@ struct qlcnic_ipaddr {
 #define __QLCNIC_DIAG_RES_ALLOC                6
 #define __QLCNIC_LED_ENABLE            7
 #define __QLCNIC_ELB_INPROGRESS                8
+#define __QLCNIC_MULTI_TX_UNIQUE       9
 #define __QLCNIC_SRIOV_ENABLE          10
 #define __QLCNIC_SRIOV_CAPABLE         11
 #define __QLCNIC_MBX_POLL_ENABLE       12
@@ -950,12 +968,6 @@ struct qlcnic_ipaddr {
 #define QLCNIC_READD_AGE       20
 #define QLCNIC_LB_MAX_FILTERS  64
 #define QLCNIC_LB_BUCKET_SIZE  32
-
-/* QLCNIC Driver Error Code */
-#define QLCNIC_FW_NOT_RESPOND          51
-#define QLCNIC_TEST_IN_PROGRESS                52
-#define QLCNIC_UNDEFINED_ERROR         53
-#define QLCNIC_LB_CABLE_NOT_CONN       54
 #define QLCNIC_ILB_MAX_RCV_LOOP        10
 
 struct qlcnic_filter {
@@ -972,6 +984,21 @@ struct qlcnic_filter_hash {
        u16 fbucket_size;
 };
 
+/* Mailbox specific data structures */
+struct qlcnic_mailbox {
+       struct workqueue_struct *work_q;
+       struct qlcnic_adapter   *adapter;
+       struct qlcnic_mbx_ops   *ops;
+       struct work_struct      work;
+       struct completion       completion;
+       struct list_head        cmd_q;
+       unsigned long           status;
+       spinlock_t              queue_lock;     /* Mailbox queue lock */
+       spinlock_t              aen_lock;       /* Mailbox response/AEN lock */
+       atomic_t                rsp_status;
+       u32                     num_cmds;
+};
+
 struct qlcnic_adapter {
        struct qlcnic_hardware_context *ahw;
        struct qlcnic_recv_context *recv_ctx;
@@ -1385,9 +1412,20 @@ struct _cdrp_cmd {
 };
 
 struct qlcnic_cmd_args {
-       struct _cdrp_cmd req;
-       struct _cdrp_cmd rsp;
-       int op_type;
+       struct completion       completion;
+       struct list_head        list;
+       struct _cdrp_cmd        req;
+       struct _cdrp_cmd        rsp;
+       atomic_t                rsp_status;
+       int                     pay_size;
+       u32                     rsp_opcode;
+       u32                     total_cmds;
+       u32                     op_type;
+       u32                     type;
+       u32                     cmd_op;
+       u32                     *hdr;   /* Back channel message header */
+       u32                     *pay;   /* Back channel message payload */
+       u8                      func_num;
 };
 
 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1462,7 +1500,8 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
 
 void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
 void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+                              struct qlcnic_host_tx_ring *);
 
 int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
 void qlcnic_watchdog_task(struct work_struct *work);
@@ -1474,6 +1513,7 @@ void __qlcnic_set_multi(struct net_device *, u16);
 int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
 int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
 void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
 
 int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
 int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
@@ -1495,8 +1535,9 @@ int qlcnic_reset_context(struct qlcnic_adapter *);
 void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
 int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
+int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
 int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, int);
 void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
 void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
 int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1523,6 +1564,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
 void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
 void qlcnic_free_tx_rings(struct qlcnic_adapter *);
 int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
 
 void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
 void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
@@ -1585,6 +1627,26 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
                                tx_ring->producer;
 }
 
+static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+                                            struct net_device *netdev)
+{
+       int err, tx_q;
+
+       tx_q = adapter->max_drv_tx_rings;
+
+       netdev->num_tx_queues = tx_q;
+       netdev->real_num_tx_queues = tx_q;
+
+       err = netif_set_real_num_tx_queues(netdev, tx_q);
+       if (err)
+               dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
+                       tx_q);
+       else
+               dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+
+       return err;
+}
+
 struct qlcnic_nic_template {
        int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
        int (*config_led) (struct qlcnic_adapter *, u32, u32);
@@ -1600,6 +1662,20 @@ struct qlcnic_nic_template {
        int (*resume)(struct qlcnic_adapter *);
 };
 
+struct qlcnic_mbx_ops {
+       int (*enqueue_cmd) (struct qlcnic_adapter *,
+                           struct qlcnic_cmd_args *, unsigned long *);
+       void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+       void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+       void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+       void (*nofity_fw) (struct qlcnic_adapter *, u8);
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+
 /* Adapter hardware abstraction */
 struct qlcnic_hardware_ops {
        void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
@@ -1607,8 +1683,8 @@ struct qlcnic_hardware_ops {
        int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
        int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
        void (*get_ocm_win) (struct qlcnic_hardware_context *);
-       int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
-       int (*setup_intr) (struct qlcnic_adapter *, u8);
+       int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+       int (*setup_intr) (struct qlcnic_adapter *, u8, int);
        int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
                              struct qlcnic_adapter *, u32);
        int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1641,6 +1717,7 @@ struct qlcnic_hardware_ops {
        int (*get_board_info) (struct qlcnic_adapter *);
        void (*set_mac_filter_count) (struct qlcnic_adapter *);
        void (*free_mac_list) (struct qlcnic_adapter *);
+       int (*read_phys_port_id) (struct qlcnic_adapter *);
 };
 
 extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1669,14 +1746,15 @@ static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
 }
 
 static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
-                                        u8 *mac)
+                                        u8 *mac, u8 function)
 {
-       return adapter->ahw->hw_ops->get_mac_address(adapter, mac);
+       return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
 }
 
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
+                                   u8 num_intr, int txq)
 {
-       return adapter->ahw->hw_ops->setup_intr(adapter, num_intr);
+       return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
 }
 
 static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -1867,6 +1945,12 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
                adapter->ahw->hw_ops->set_mac_filter_count(adapter);
 }
 
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+       if (adapter->ahw->hw_ops->read_phys_port_id)
+               adapter->ahw->hw_ops->read_phys_port_id(adapter);
+}
+
 static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
                                            u32 key)
 {
@@ -1898,16 +1982,45 @@ static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
        adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
 }
 
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+       return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+       test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+       adapter->max_drv_tx_rings = 1;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
 static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
 {
-       writel(0, sds_ring->crb_intr_mask);
+       struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test &&
+           (adapter->flags & QLCNIC_MSIX_ENABLED))
+               writel(0x1, sds_ring->crb_intr_mask);
+       else
+               writel(0, sds_ring->crb_intr_mask);
 }
 
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
 static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
 {
        struct qlcnic_adapter *adapter = sds_ring->adapter;
 
-       writel(0x1, sds_ring->crb_intr_mask);
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test &&
+           (adapter->flags & QLCNIC_MSIX_ENABLED))
+               writel(0, sds_ring->crb_intr_mask);
+       else
+               writel(0x1, sds_ring->crb_intr_mask);
 
        if (!QLCNIC_IS_MSI_FAMILY(adapter))
                writel(0xfbff, adapter->tgt_mask_reg);
@@ -1939,9 +2052,11 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
                        __func__, ##_args);             \
        } while (0)
 
-#define PCI_DEVICE_ID_QLOGIC_QLE834X    0x8030
+#define PCI_DEVICE_ID_QLOGIC_QLE824X           0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X           0x8030
 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X        0x8430
-#define PCI_DEVICE_ID_QLOGIC_QLE824X   0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE844X           0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X        0x8440
 
 static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
 {
@@ -1955,6 +2070,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
        bool status;
 
        status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
                  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
 
        return status;
@@ -1968,7 +2085,11 @@ static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
 static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
 {
        unsigned short device = adapter->pdev->device;
+       bool status;
 
-       return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+       status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+                 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+
+       return status;
 }
 #endif                         /* __QLCNIC_H_ */
index 9d4bb7f839041967eaed1ef6fd0fd51b40f6ec6d..81b694cc14de33ef559b1a7bc4337a34cde66fc3 100644 (file)
@@ -149,7 +149,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
        .get_mac_address                = qlcnic_83xx_get_mac_address,
        .setup_intr                     = qlcnic_83xx_setup_intr,
        .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
-       .mbx_cmd                        = qlcnic_83xx_mbx_op,
+       .mbx_cmd                        = qlcnic_83xx_issue_cmd,
        .get_func_no                    = qlcnic_83xx_get_func_no,
        .api_lock                       = qlcnic_83xx_cam_lock,
        .api_unlock                     = qlcnic_83xx_cam_unlock,
@@ -261,7 +261,7 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
        }
 }
 
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
 {
        int err, i, num_msix;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -362,6 +362,10 @@ static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
                                     struct qlcnic_cmd_args *cmd)
 {
        int i;
+
+       if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+               return;
+
        for (i = 0; i < cmd->rsp.num; i++)
                cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
 }
@@ -398,24 +402,33 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
        return IRQ_HANDLED;
 }
 
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+       atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+       complete(&mbx->completion);
+}
+
 static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
 {
-       u32 resp, event;
+       u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        unsigned long flags;
 
-       spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
-
+       spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
        if (!(resp & QLCNIC_SET_OWNER))
                goto out;
 
        event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
-       if (event &  QLCNIC_MBX_ASYNC_EVENT)
+       if (event &  QLCNIC_MBX_ASYNC_EVENT) {
                __qlcnic_83xx_process_aen(adapter);
-
+       } else {
+               if (atomic_read(&mbx->rsp_status) != rsp_status)
+                       qlcnic_83xx_notify_mbx_response(mbx);
+       }
 out:
        qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
-       spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+       spin_unlock_irqrestore(&mbx->aen_lock, flags);
 }
 
 irqreturn_t qlcnic_83xx_intr(int irq, void *data)
@@ -515,7 +528,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
        }
 
        /* Enable mailbox interrupt */
-       qlcnic_83xx_enable_mbx_intrpt(adapter);
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
 
        return err;
 }
@@ -628,7 +641,7 @@ void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
        ahw->max_uc_count = count;
 }
 
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
 {
        u32 val;
 
@@ -682,11 +695,14 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
 static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
                                            u32 data[]);
 
-static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
-                           struct qlcnic_cmd_args *cmd)
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+                    struct qlcnic_cmd_args *cmd)
 {
        int i;
 
+       if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+               return;
+
        dev_info(&adapter->pdev->dev,
                 "Host MBX regs(%d)\n", cmd->req.num);
        for (i = 0; i < cmd->req.num; i++) {
@@ -705,120 +721,74 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
        pr_info("\n");
 }
 
-/* Mailbox response for mac rcode */
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+static inline void
+qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+                                   struct qlcnic_cmd_args *cmd)
 {
-       u32 fw_data;
-       u8 mac_cmd_rcode;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int opcode = LSW(cmd->req.arg[0]);
+       unsigned long max_loops;
 
-       fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
-       mac_cmd_rcode = (u8)fw_data;
-       if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
-           mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
-           mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
-               return QLCNIC_RCODE_SUCCESS;
-       return 1;
-}
+       max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
 
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
-{
-       u32 data;
-       struct qlcnic_hardware_context *ahw = adapter->ahw;
-       /* wait for mailbox completion */
-       do {
-               data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
-               if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
-                       data = QLCNIC_RCODE_TIMEOUT;
-                       break;
-               }
-               mdelay(1);
-       } while (!data);
-       return data;
+       for (; max_loops; max_loops--) {
+               if (atomic_read(&cmd->rsp_status) ==
+                   QLC_83XX_MBX_RESPONSE_ARRIVED)
+                       return;
+
+               udelay(1);
+       }
+
+       dev_err(&adapter->pdev->dev,
+               "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+               __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+       flush_workqueue(ahw->mailbox->work_q);
+       return;
 }
 
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
-                      struct qlcnic_cmd_args *cmd)
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+                         struct qlcnic_cmd_args *cmd)
 {
-       int i;
-       u16 opcode;
-       u8 mbx_err_code;
-       unsigned long flags;
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
-       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
+       int cmd_type, err, opcode;
+       unsigned long timeout;
 
        opcode = LSW(cmd->req.arg[0]);
-       if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
-               dev_info(&adapter->pdev->dev,
-                        "Mailbox cmd attempted, 0x%x\n", opcode);
-               dev_info(&adapter->pdev->dev, "Mailbox detached\n");
-               return 0;
+       cmd_type = cmd->type;
+       err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                       __func__, opcode, cmd->type, ahw->pci_func,
+                       ahw->op_mode);
+               return err;
        }
 
-       spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
-       mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-
-       if (mbx_val) {
-               QLCDB(adapter, DRV,
-                     "Mailbox cmd attempted, 0x%x\n", opcode);
-               QLCDB(adapter, DRV,
-                     "Mailbox not available, 0x%x, collect FW dump\n",
-                     mbx_val);
-               cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
-               spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
-               return cmd->rsp.arg[0];
-       }
-
-       /* Fill in mailbox registers */
-       mbx_cmd = cmd->req.arg[0];
-       writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
-       for (i = 1; i < cmd->req.num; i++)
-               writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
-
-       /* Signal FW about the impending command */
-       QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
-poll:
-       rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
-       if (rsp != QLCNIC_RCODE_TIMEOUT) {
-               /* Get the FW response data */
-               fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
-               if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
-                       __qlcnic_83xx_process_aen(adapter);
-                       goto poll;
-               }
-               mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
-               rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
-               opcode = QLCNIC_MBX_RSP(fw_data);
-               qlcnic_83xx_get_mbx_data(adapter, cmd);
-
-               switch (mbx_err_code) {
-               case QLCNIC_MBX_RSP_OK:
-               case QLCNIC_MBX_PORT_RSP_OK:
-                       rsp = QLCNIC_RCODE_SUCCESS;
-                       break;
-               default:
-                       if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
-                               rsp = qlcnic_83xx_mac_rcode(adapter);
-                               if (!rsp)
-                                       goto out;
-                       }
+       switch (cmd_type) {
+       case QLC_83XX_MBX_CMD_WAIT:
+               if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
                        dev_err(&adapter->pdev->dev,
-                               "MBX command 0x%x failed with err:0x%x\n",
-                               opcode, mbx_err_code);
-                       rsp = mbx_err_code;
-                       qlcnic_dump_mbx(adapter, cmd);
-                       break;
+                               "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                               __func__, opcode, cmd_type, ahw->pci_func,
+                               ahw->op_mode);
+                       flush_workqueue(mbx->work_q);
                }
-               goto out;
+               break;
+       case QLC_83XX_MBX_CMD_NO_WAIT:
+               return 0;
+       case QLC_83XX_MBX_CMD_BUSY_WAIT:
+               qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+               break;
+       default:
+               dev_err(&adapter->pdev->dev,
+                       "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                       __func__, opcode, cmd_type, ahw->pci_func,
+                       ahw->op_mode);
+               qlcnic_83xx_detach_mailbox_work(adapter);
        }
 
-       dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
-               QLCNIC_MBX_RSP(mbx_cmd));
-       rsp = QLCNIC_RCODE_TIMEOUT;
-out:
-       /* clear fw mbx control register */
-       QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
-       spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
-       return rsp;
+       return cmd->rsp_opcode;
 }
 
 int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -828,6 +798,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
        u32 temp;
        const struct qlcnic_mailbox_metadata *mbx_tbl;
 
+       memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
        mbx_tbl = qlcnic_83xx_mbx_tbl;
        size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
        for (i = 0; i < size; i++) {
@@ -850,6 +821,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
                        memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
                        temp = adapter->ahw->fw_hal_version << 29;
                        mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+                       mbx->cmd_op = type;
                        return 0;
                }
        }
@@ -888,9 +860,9 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
 
 void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
        u32 event[QLC_83XX_MBX_AEN_CNT];
        int i;
-       struct qlcnic_hardware_context *ahw = adapter->ahw;
 
        for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
                event[i] = readl(QLCNIC_MBX_FW(ahw, i));
@@ -910,6 +882,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
                                   &adapter->idc_aen_work, 0);
                break;
        case QLCNIC_MBX_TIME_EXTEND_EVENT:
+               ahw->extend_lb_time = event[1] >> 8 & 0xf;
                break;
        case QLCNIC_MBX_BC_EVENT:
                qlcnic_sriov_handle_bc_event(adapter, event[1]);
@@ -933,20 +906,23 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
 
 static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
 {
+       u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
-       u32 resp, event;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
        unsigned long flags;
 
-       spin_lock_irqsave(&ahw->mbx_lock, flags);
-
+       spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
        if (resp & QLCNIC_SET_OWNER) {
                event = readl(QLCNIC_MBX_FW(ahw, 0));
-               if (event &  QLCNIC_MBX_ASYNC_EVENT)
+               if (event &  QLCNIC_MBX_ASYNC_EVENT) {
                        __qlcnic_83xx_process_aen(adapter);
+               } else {
+                       if (atomic_read(&mbx->rsp_status) != rsp_status)
+                               qlcnic_83xx_notify_mbx_response(mbx);
+               }
        }
-
-       spin_unlock_irqrestore(&ahw->mbx_lock, flags);
+       spin_unlock_irqrestore(&mbx->aen_lock, flags);
 }
 
 static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
@@ -969,6 +945,7 @@ void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
                return;
 
        INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+       queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
 }
 
 void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
@@ -1355,8 +1332,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
 
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
                /* disable and free mailbox interrupt */
-               if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+               if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+                       qlcnic_83xx_enable_mbx_poll(adapter);
                        qlcnic_83xx_free_mbx_intr(adapter);
+               }
                adapter->ahw->loopback_state = 0;
                adapter->ahw->hw_ops->setup_link_event(adapter, 1);
        }
@@ -1377,6 +1356,8 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
                for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                        sds_ring = &adapter->recv_ctx->sds_rings[ring];
                        qlcnic_83xx_disable_intr(adapter, sds_ring);
+                       if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+                               qlcnic_83xx_enable_mbx_poll(adapter);
                }
        }
 
@@ -1386,6 +1367,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
        if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
                if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
                        err = qlcnic_83xx_setup_mbx_intr(adapter);
+                       qlcnic_83xx_disable_mbx_poll(adapter);
                        if (err) {
                                dev_err(&adapter->pdev->dev,
                                        "%s: failed to setup mbx interrupt\n",
@@ -1402,6 +1384,10 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
 
        if (netif_running(netdev))
                __qlcnic_up(adapter, netdev);
+
+       if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
+           !(adapter->flags & QLCNIC_MSIX_ENABLED))
+               qlcnic_83xx_disable_mbx_poll(adapter);
 out:
        netif_device_attach(netdev);
 }
@@ -1619,26 +1605,33 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
 
 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
 {
-       int err;
+       struct qlcnic_cmd_args *cmd = NULL;
        u32 temp = 0;
-       struct qlcnic_cmd_args cmd;
+       int err;
 
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return -EIO;
 
-       err = qlcnic_alloc_mbx_args(&cmd, adapter,
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
+               return -ENOMEM;
+
+       err = qlcnic_alloc_mbx_args(cmd, adapter,
                                    QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
        if (err)
-               return err;
+               goto out;
 
+       cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
        qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
-       cmd.req.arg[1] = (mode ? 1 : 0) | temp;
-       err = qlcnic_issue_cmd(adapter, &cmd);
-       if (err)
-               dev_info(&adapter->pdev->dev,
-                        "Promiscous mode config failed\n");
+       cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+       err = qlcnic_issue_cmd(adapter, cmd);
+       if (!err)
+               return err;
 
-       qlcnic_free_mbx_args(&cmd);
+       qlcnic_free_mbx_args(cmd);
+
+out:
+       kfree(cmd);
        return err;
 }
 
@@ -1651,7 +1644,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
        if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
                netdev_warn(netdev,
                            "Loopback test not supported in non privileged mode\n");
-               return ret;
+               return -ENOTSUPP;
        }
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
@@ -1679,19 +1672,17 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
        /* Poll for link up event before running traffic */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
-               if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
-                       qlcnic_83xx_process_aen(adapter);
 
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
-                       ret = -EIO;
+                       ret = -EBUSY;
                        goto free_diag_res;
                }
                if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
                        netdev_info(netdev,
                                    "Firmware didn't sent link up event to loopback request\n");
-                       ret = -QLCNIC_FW_NOT_RESPOND;
+                       ret = -ETIMEDOUT;
                        qlcnic_83xx_clear_lb_mode(adapter, mode);
                        goto free_diag_res;
                }
@@ -1700,7 +1691,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
        /* Make sure carrier is off and queue is stopped during loopback */
        if (netif_running(netdev)) {
                netif_carrier_off(netdev);
-               netif_stop_queue(netdev);
+               netif_tx_stop_all_queues(netdev);
        }
 
        ret = qlcnic_do_lb_test(adapter, mode);
@@ -1716,18 +1707,42 @@ fail_diag_alloc:
        return ret;
 }
 
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+                                            u32 *max_wait_count)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int temp;
+
+       netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
+                   ahw->extend_lb_time);
+       temp = ahw->extend_lb_time * 1000;
+       *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+       ahw->extend_lb_time = 0;
+}
+
 int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct net_device *netdev = adapter->netdev;
+       u32 config, max_wait_count;
        int status = 0, loop = 0;
-       u32 config;
 
+       ahw->extend_lb_time = 0;
+       max_wait_count = QLC_83XX_LB_WAIT_COUNT;
        status = qlcnic_83xx_get_port_config(adapter);
        if (status)
                return status;
 
        config = ahw->port_config;
+
+       /* Check if port is already in loopback mode */
+       if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+           (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+               netdev_err(netdev,
+                          "Port already in Loopback mode.\n");
+               return -EINPROGRESS;
+       }
+
        set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
 
        if (mode == QLCNIC_ILB_MODE)
@@ -1748,21 +1763,24 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
        /* Wait for Link and IDC Completion AEN */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
-               if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
-                       qlcnic_83xx_process_aen(adapter);
 
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
-                       return -EIO;
+                       return -EBUSY;
                }
-               if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
-                       netdev_err(netdev,
-                                  "Did not receive IDC completion AEN\n");
+
+               if (ahw->extend_lb_time)
+                       qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+                                                        &max_wait_count);
+
+               if (loop++ > max_wait_count) {
+                       netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+                                  __func__);
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
                        qlcnic_83xx_clear_lb_mode(adapter, mode);
-                       return -EIO;
+                       return -ETIMEDOUT;
                }
        } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
 
@@ -1774,10 +1792,12 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       u32 config = ahw->port_config, max_wait_count;
        struct net_device *netdev = adapter->netdev;
        int status = 0, loop = 0;
-       u32 config = ahw->port_config;
 
+       ahw->extend_lb_time = 0;
+       max_wait_count = QLC_83XX_LB_WAIT_COUNT;
        set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
        if (mode == QLCNIC_ILB_MODE)
                ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
@@ -1797,21 +1817,23 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
        /* Wait for Link and IDC Completion AEN */
        do {
                msleep(QLC_83XX_LB_MSLEEP_COUNT);
-               if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
-                       qlcnic_83xx_process_aen(adapter);
 
                if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
                        netdev_info(netdev,
                                    "Device is resetting, free LB test resources\n");
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
-                       return -EIO;
+                       return -EBUSY;
                }
 
-               if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
-                       netdev_err(netdev,
-                                  "Did not receive IDC completion AEN\n");
+               if (ahw->extend_lb_time)
+                       qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+                                                        &max_wait_count);
+
+               if (loop++ > max_wait_count) {
+                       netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+                                  __func__);
                        clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
-                       return -EIO;
+                       return -ETIMEDOUT;
                }
        } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
 
@@ -1950,25 +1972,31 @@ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
 int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
                                   u16 vlan_id, u8 op)
 {
-       int err;
-       u32 *buf, temp = 0;
-       struct qlcnic_cmd_args cmd;
+       struct qlcnic_cmd_args *cmd = NULL;
        struct qlcnic_macvlan_mbx mv;
+       u32 *buf, temp = 0;
+       int err;
 
        if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
                return -EIO;
 
-       err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
+               return -ENOMEM;
+
+       err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
        if (err)
-               return err;
+               goto out;
+
+       cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
 
        if (vlan_id)
                op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
                     QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
 
-       cmd.req.arg[1] = op | (1 << 8);
+       cmd->req.arg[1] = op | (1 << 8);
        qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
-       cmd.req.arg[1] |= temp;
+       cmd->req.arg[1] |= temp;
        mv.vlan = vlan_id;
        mv.mac_addr0 = addr[0];
        mv.mac_addr1 = addr[1];
@@ -1976,14 +2004,15 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
        mv.mac_addr3 = addr[3];
        mv.mac_addr4 = addr[4];
        mv.mac_addr5 = addr[5];
-       buf = &cmd.req.arg[2];
+       buf = &cmd->req.arg[2];
        memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
-       err = qlcnic_issue_cmd(adapter, &cmd);
-       if (err)
-               dev_err(&adapter->pdev->dev,
-                       "MAC-VLAN %s to CAM failed, err=%d.\n",
-                       ((op == 1) ? "add " : "delete "), err);
-       qlcnic_free_mbx_args(&cmd);
+       err = qlcnic_issue_cmd(adapter, cmd);
+       if (!err)
+               return err;
+
+       qlcnic_free_mbx_args(cmd);
+out:
+       kfree(cmd);
        return err;
 }
 
@@ -2008,12 +2037,14 @@ void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
        cmd->req.arg[1] = type;
 }
 
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+                               u8 function)
 {
        int err, i;
        struct qlcnic_cmd_args cmd;
        u32 mac_low, mac_high;
 
+       function = 0;
        err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
        if (err)
                return err;
@@ -2099,10 +2130,12 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
 irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
 {
        struct qlcnic_adapter *adapter = data;
-       unsigned long flags;
+       struct qlcnic_mailbox *mbx;
        u32 mask, resp, event;
+       unsigned long flags;
 
-       spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+       mbx = adapter->ahw->mailbox;
+       spin_lock_irqsave(&mbx->aen_lock, flags);
        resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
        if (!(resp & QLCNIC_SET_OWNER))
                goto out;
@@ -2110,11 +2143,13 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
        event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
        if (event &  QLCNIC_MBX_ASYNC_EVENT)
                __qlcnic_83xx_process_aen(adapter);
+       else
+               qlcnic_83xx_notify_mbx_response(mbx);
+
 out:
        mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
        writel(0, adapter->ahw->pci_base0 + mask);
-       spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
-
+       spin_unlock_irqrestore(&mbx->aen_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -3104,7 +3139,7 @@ int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
        status = qlcnic_83xx_set_port_config(adapter);
        if (status) {
                dev_info(&adapter->pdev->dev,
-                        "Faild to Set Link Speed and autoneg.\n");
+                        "Failed to Set Link Speed and autoneg.\n");
                adapter->ahw->port_config = config;
        }
        return status;
@@ -3477,3 +3512,306 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
                             idc->delay);
        return err;
 }
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+       INIT_COMPLETION(mbx->completion);
+       set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+       destroy_workqueue(mbx->work_q);
+       kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+                                 struct qlcnic_cmd_args *cmd)
+{
+       atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+       if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+               qlcnic_free_mbx_args(cmd);
+               kfree(cmd);
+               return;
+       }
+       complete(&cmd->completion);
+}
+
+static inline void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+       struct list_head *head = &mbx->cmd_q;
+       struct qlcnic_cmd_args *cmd = NULL;
+
+       spin_lock(&mbx->queue_lock);
+
+       while (!list_empty(head)) {
+               cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+               dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+                        __func__, cmd->cmd_op);
+               list_del(&cmd->list);
+               mbx->num_cmds--;
+               qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+       }
+
+       spin_unlock(&mbx->queue_lock);
+}
+
+static inline int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
+       u32 host_mbx_ctrl;
+
+       if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+               return -EBUSY;
+
+       host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+       if (host_mbx_ctrl) {
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+               ahw->idc.collect_dump = 1;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+                                             u8 issue_cmd)
+{
+       if (issue_cmd)
+               QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+       else
+               QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static inline void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+                                              struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+       spin_lock(&mbx->queue_lock);
+
+       list_del(&cmd->list);
+       mbx->num_cmds--;
+
+       spin_unlock(&mbx->queue_lock);
+
+       qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int i, j;
+
+       if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+               mbx_cmd = cmd->req.arg[0];
+               writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+               for (i = 1; i < cmd->req.num; i++)
+                       writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+       } else {
+               fw_hal_version = ahw->fw_hal_version;
+               hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+               total_size = cmd->pay_size + hdr_size;
+               tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+               mbx_cmd = tmp | fw_hal_version << 29;
+               writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+               /* Back channel specific operations bits */
+               mbx_cmd = 0x1 | 1 << 4;
+
+               if (qlcnic_sriov_pf_check(adapter))
+                       mbx_cmd |= cmd->func_num << 5;
+
+               writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+               for (i = 2, j = 0; j < hdr_size; i++, j++)
+                       writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+               for (j = 0; j < cmd->pay_size; j++, i++)
+                       writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+       }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+       complete(&mbx->completion);
+       cancel_work_sync(&mbx->work);
+       flush_workqueue(mbx->work_q);
+       qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static inline int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+                                             struct qlcnic_cmd_args *cmd,
+                                             unsigned long *timeout)
+{
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+       if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+               atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+               init_completion(&cmd->completion);
+               cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+               spin_lock(&mbx->queue_lock);
+
+               list_add_tail(&cmd->list, &mbx->cmd_q);
+               mbx->num_cmds++;
+               cmd->total_cmds = mbx->num_cmds;
+               *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+               queue_work(mbx->work_q, &mbx->work);
+
+               spin_unlock(&mbx->queue_lock);
+
+               return 0;
+       }
+
+       return -EBUSY;
+}
+
+static inline int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+                                             struct qlcnic_cmd_args *cmd)
+{
+       u8 mac_cmd_rcode;
+       u32 fw_data;
+
+       if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+               fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+               mac_cmd_rcode = (u8)fw_data;
+               if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+                   mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+                   mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+                       cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+                       return QLCNIC_RCODE_SUCCESS;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+                                      struct qlcnic_cmd_args *cmd)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct device *dev = &adapter->pdev->dev;
+       u8 mbx_err_code;
+       u32 fw_data;
+
+       fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+       mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+       qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+       switch (mbx_err_code) {
+       case QLCNIC_MBX_RSP_OK:
+       case QLCNIC_MBX_PORT_RSP_OK:
+               cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+               break;
+       default:
+               if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+                       break;
+
+               dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+                       __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+                       ahw->op_mode, mbx_err_code);
+               cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+               qlcnic_dump_mbx(adapter, cmd);
+       }
+
+       return;
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+       struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+                                                 work);
+       struct qlcnic_adapter *adapter = mbx->adapter;
+       struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+       struct device *dev = &adapter->pdev->dev;
+       atomic_t *rsp_status = &mbx->rsp_status;
+       struct list_head *head = &mbx->cmd_q;
+       struct qlcnic_hardware_context *ahw;
+       struct qlcnic_cmd_args *cmd = NULL;
+
+       ahw = adapter->ahw;
+
+       while (true) {
+               if (qlcnic_83xx_check_mbx_status(adapter)) {
+                       qlcnic_83xx_flush_mbx_queue(adapter);
+                       return;
+               }
+
+               atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+
+               spin_lock(&mbx->queue_lock);
+
+               if (list_empty(head)) {
+                       spin_unlock(&mbx->queue_lock);
+                       return;
+               }
+               cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+               spin_unlock(&mbx->queue_lock);
+
+               mbx_ops->encode_cmd(adapter, cmd);
+               mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+               if (wait_for_completion_timeout(&mbx->completion,
+                                               QLC_83XX_MBX_TIMEOUT)) {
+                       mbx_ops->decode_resp(adapter, cmd);
+                       mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+               } else {
+                       dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+                               __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+                               ahw->op_mode);
+                       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+                       qlcnic_dump_mbx(adapter, cmd);
+                       qlcnic_83xx_idc_request_reset(adapter,
+                                                     QLCNIC_FORCE_FW_DUMP_KEY);
+                       cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+               }
+               mbx_ops->dequeue_cmd(adapter, cmd);
+       }
+}
+
+static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+       .enqueue_cmd    = qlcnic_83xx_enqueue_mbx_cmd,
+       .dequeue_cmd    = qlcnic_83xx_dequeue_mbx_cmd,
+       .decode_resp    = qlcnic_83xx_decode_mbx_rsp,
+       .encode_cmd     = qlcnic_83xx_encode_mbx_cmd,
+       .nofity_fw      = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx;
+
+       ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+       if (!ahw->mailbox)
+               return -ENOMEM;
+
+       mbx = ahw->mailbox;
+       mbx->ops = &qlcnic_83xx_mbx_ops;
+       mbx->adapter = adapter;
+
+       spin_lock_init(&mbx->queue_lock);
+       spin_lock_init(&mbx->aen_lock);
+       INIT_LIST_HEAD(&mbx->cmd_q);
+       init_completion(&mbx->completion);
+
+       mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+       if (mbx->work_q == NULL) {
+               kfree(mbx);
+               return -ENOMEM;
+       }
+
+       INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+       set_bit(QLC_83XX_MBX_READY, &mbx->status);
+       return 0;
+}
index 272f56a2e14b10b14ff97714c063b3f62ba4c7f8..0fc56160d5844b06f67da66f06fc18aed1dcf222 100644 (file)
 /* Firmware image definitions */
 #define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
 #define QLC_83XX_FW_FILE_NAME          "83xx_fw.bin"
+#define QLC_84XX_FW_FILE_NAME          "84xx_fw.bin"
 #define QLC_83XX_BOOT_FROM_FLASH       0
 #define QLC_83XX_BOOT_FROM_FILE                0x12345678
 
+#define QLC_FW_FILE_NAME_LEN           20
 #define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
 
+#define QLC_83XX_MBX_POST_BC_OP                0x1
+#define QLC_83XX_MBX_COMPLETION                0x0
+#define QLC_83XX_MBX_REQUEST           0x1
+
+#define QLC_83XX_MBX_TIMEOUT           (5 * HZ)
+#define QLC_83XX_MBX_CMD_LOOP          5000000
+
 /* status descriptor mailbox data
  * @phy_addr_{low|high}: physical address of buffer
  * @sds_ring_size: buffer size
@@ -397,6 +406,7 @@ enum qlcnic_83xx_states {
 #define QLC_83XX_MAX_MC_COUNT                  38
 #define QLC_83XX_MAX_UC_COUNT                  4096
 
+#define QLC_83XX_PVID_STRIP_CAPABILITY         BIT_22
 #define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val)     (val & 0x80000000)
 #define QLC_83XX_GET_LRO_CAPABILITY(val)               (val & 0x20)
 #define QLC_83XX_GET_LSO_CAPABILITY(val)               (val & 0x40)
@@ -449,6 +459,20 @@ enum qlcnic_83xx_states {
 #define QLC_83xx_FLASH_MAX_WAIT_USEC           100
 #define QLC_83XX_FLASH_LOCK_TIMEOUT            10000
 
+enum qlc_83xx_mbx_cmd_type {
+       QLC_83XX_MBX_CMD_WAIT = 0,
+       QLC_83XX_MBX_CMD_NO_WAIT,
+       QLC_83XX_MBX_CMD_BUSY_WAIT,
+};
+
+enum qlc_83xx_mbx_response_states {
+       QLC_83XX_MBX_RESPONSE_WAIT = 0,
+       QLC_83XX_MBX_RESPONSE_ARRIVED,
+};
+
+#define QLC_83XX_MBX_RESPONSE_FAILED   0x2
+#define QLC_83XX_MBX_RESPONSE_UNKNOWN  0x3
+
 /* Additional registers in 83xx */
 enum qlc_83xx_ext_regs {
        QLCNIC_GLOBAL_RESET = 0,
@@ -498,8 +522,8 @@ enum qlc_83xx_ext_regs {
 
 /* 83xx funcitons */
 int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
 void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
 int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
 void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -540,7 +564,7 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
 void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
 int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
 int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
 void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
                               struct qlcnic_cmd_args *);
 int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
@@ -551,7 +575,7 @@ void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
 void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
 irqreturn_t qlcnic_83xx_handle_aen(int, void *);
 int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
 void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
 irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
 irqreturn_t qlcnic_83xx_intr(int, void *);
@@ -623,8 +647,6 @@ int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
 int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
 int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
 int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
 void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
 void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
 void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
index 345d987aede491b8d5daf0e1e268b0ed8a9f2a51..fb0ef36b529ba60c5747b53cbbebcb98e228dc4b 100644 (file)
@@ -399,6 +399,7 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        netif_device_detach(netdev);
+       qlcnic_83xx_detach_mailbox_work(adapter);
 
        /* Disable mailbox interrupt */
        qlcnic_83xx_disable_mbx_intr(adapter);
@@ -610,6 +611,9 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
 {
        int err;
 
+       qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
+
        /* register for NIC IDC AEN Events */
        qlcnic_83xx_register_nic_idc_func(adapter, 1);
 
@@ -617,7 +621,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
        if (err)
                return err;
 
-       qlcnic_83xx_enable_mbx_intrpt(adapter);
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
 
        if (qlcnic_83xx_configure_opmode(adapter)) {
                qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -641,7 +645,6 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
        struct qlcnic_hardware_context *ahw = adapter->ahw;
 
        qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
-       set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
        set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
 
@@ -811,9 +814,10 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
  **/
 static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
 {
-       u32 val;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
        int ret = 0;
+       u32 val;
 
        /* Perform NIC configuration based ready state entry actions */
        if (ahw->idc.state_entry(adapter))
@@ -825,7 +829,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
                        dev_err(&adapter->pdev->dev,
                                "Error: device temperature %d above limits\n",
                                adapter->ahw->temp);
-                       clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+                       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                        set_bit(__QLCNIC_RESETTING, &adapter->state);
                        qlcnic_83xx_idc_detach_driver(adapter);
                        qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -838,7 +842,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
        if (ret) {
                adapter->flags |= QLCNIC_FW_HANG;
                if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
-                       clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+                       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                        set_bit(__QLCNIC_RESETTING, &adapter->state);
                        qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
                }
@@ -846,6 +850,8 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
        }
 
        if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
                /* Move to need reset state and prepare for reset */
                qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
                return ret;
@@ -883,12 +889,13 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
  **/
 static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        int ret = 0;
 
        if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
                qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
                set_bit(__QLCNIC_RESETTING, &adapter->state);
-               clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
                        qlcnic_83xx_disable_vnic_mode(adapter, 1);
 
@@ -1080,7 +1087,6 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
        adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
 
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
-       set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
 
        /* Check if reset recovery is disabled */
@@ -1191,6 +1197,9 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
 {
        u32 val;
 
+       if (qlcnic_sriov_vf_check(adapter))
+               return;
+
        if (qlcnic_83xx_lock_driver(adapter)) {
                dev_err(&adapter->pdev->dev,
                        "%s:failed, please retry\n", __func__);
@@ -1939,12 +1948,36 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
                dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
 }
 
+static inline void qlcnic_83xx_get_fw_file_name(struct qlcnic_adapter *adapter,
+                                               char *file_name)
+{
+       struct pci_dev *pdev = adapter->pdev;
+
+       memset(file_name, 0, QLC_FW_FILE_NAME_LEN);
+
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_QLOGIC_QLE834X:
+               strncpy(file_name, QLC_83XX_FW_FILE_NAME,
+                       QLC_FW_FILE_NAME_LEN);
+               break;
+       case PCI_DEVICE_ID_QLOGIC_QLE844X:
+               strncpy(file_name, QLC_84XX_FW_FILE_NAME,
+                       QLC_FW_FILE_NAME_LEN);
+               break;
+       default:
+               dev_err(&pdev->dev, "%s: Invalid device id\n",
+                       __func__);
+       }
+}
+
 static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
 {
+       char fw_file_name[QLC_FW_FILE_NAME_LEN];
        int err = -EIO;
 
-       if (request_firmware(&adapter->ahw->fw_info.fw,
-                            QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) {
+       qlcnic_83xx_get_fw_file_name(adapter, fw_file_name);
+       if (request_firmware(&adapter->ahw->fw_info.fw, fw_file_name,
+                            &(adapter->pdev->dev))) {
                dev_err(&adapter->pdev->dev,
                        "No file FW image, loading flash FW image.\n");
                QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
@@ -2142,17 +2175,42 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
 int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err = 0;
 
-       if (qlcnic_sriov_vf_check(adapter))
-               return qlcnic_sriov_vf_init(adapter, pci_using_dac);
+       ahw->msix_supported = !!qlcnic_use_msi_x;
+       err = qlcnic_83xx_init_mailbox_work(adapter);
+       if (err)
+               goto exit;
 
-       if (qlcnic_83xx_check_hw_status(adapter))
-               return -EIO;
+       if (qlcnic_sriov_vf_check(adapter)) {
+               err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+               if (err)
+                       goto detach_mbx;
+               else
+                       return err;
+       }
 
-       /* Initilaize 83xx mailbox spinlock */
-       spin_lock_init(&ahw->mbx_lock);
+       err = qlcnic_83xx_check_hw_status(adapter);
+       if (err)
+               goto detach_mbx;
+
+       if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
+               qlcnic_83xx_read_flash_mfg_id(adapter);
+
+       err = qlcnic_83xx_idc_init(adapter);
+       if (err)
+               goto detach_mbx;
+
+       err = qlcnic_setup_intr(adapter, 0, 0);
+       if (err) {
+               dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+               goto disable_intr;
+       }
+
+       err = qlcnic_83xx_setup_mbx_intr(adapter);
+       if (err)
+               goto disable_mbx_intr;
 
-       set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
        qlcnic_83xx_clear_function_resources(adapter);
 
        INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
@@ -2160,22 +2218,29 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
        /* register for NIC IDC AEN Events */
        qlcnic_83xx_register_nic_idc_func(adapter, 1);
 
-       if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
-               qlcnic_83xx_read_flash_mfg_id(adapter);
-
-       if (qlcnic_83xx_idc_init(adapter))
-               return -EIO;
-
        /* Configure default, SR-IOV or Virtual NIC mode of operation */
-       if (qlcnic_83xx_configure_opmode(adapter))
-               return -EIO;
+       err = qlcnic_83xx_configure_opmode(adapter);
+       if (err)
+               goto disable_mbx_intr;
 
        /* Perform operating mode specific initialization */
-       if (adapter->nic_ops->init_driver(adapter))
-               return -EIO;
+       err = adapter->nic_ops->init_driver(adapter);
+       if (err)
+               goto disable_mbx_intr;
 
        /* Periodically monitor device status */
        qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+       return 0;
+
+disable_mbx_intr:
+       qlcnic_83xx_free_mbx_intr(adapter);
 
-       return adapter->ahw->idc.err_code;
+disable_intr:
+       qlcnic_teardown_intr(adapter);
+
+detach_mbx:
+       qlcnic_83xx_detach_mailbox_work(adapter);
+       qlcnic_83xx_free_mailbox(ahw->mailbox);
+exit:
+       return err;
 }
index d09389b33474d7c9ae730385e6fa3478fd77c832..d4f0e9591644cb8f9b3e0e7848c9f25ac90c3a0a 100644 (file)
@@ -38,6 +38,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
        {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
        {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
        {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+       {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
 };
 
 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
@@ -171,6 +172,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
                        break;
                }
                dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+               qlcnic_dump_mbx(adapter, cmd);
        } else if (rsp == QLCNIC_CDRP_RSP_OK)
                cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
 
@@ -243,40 +245,38 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
 
 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
 {
-       void *addr;
-       struct qlcnic_hostrq_rx_ctx *prq;
-       struct qlcnic_cardrsp_rx_ctx *prsp;
-       struct qlcnic_hostrq_rds_ring *prq_rds;
-       struct qlcnic_hostrq_sds_ring *prq_sds;
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+       struct net_device *netdev = adapter->netdev;
+       u32 temp_intr_crb_mode, temp_rds_crb_mode;
        struct qlcnic_cardrsp_rds_ring *prsp_rds;
        struct qlcnic_cardrsp_sds_ring *prsp_sds;
+       struct qlcnic_hostrq_rds_ring *prq_rds;
+       struct qlcnic_hostrq_sds_ring *prq_sds;
        struct qlcnic_host_rds_ring *rds_ring;
        struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_cmd_args cmd;
-
-       dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
-       u64 phys_addr;
-
+       struct qlcnic_cardrsp_rx_ctx *prsp;
+       struct qlcnic_hostrq_rx_ctx *prq;
        u8 i, nrds_rings, nsds_rings;
-       u16 temp_u16;
+       struct qlcnic_cmd_args cmd;
        size_t rq_size, rsp_size;
        u32 cap, reg, val, reg2;
+       u64 phys_addr;
+       u16 temp_u16;
+       void *addr;
        int err;
 
-       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
        nrds_rings = adapter->max_rds_rings;
        nsds_rings = adapter->max_sds_rings;
 
-       rq_size =
-               SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
-                                               nsds_rings);
-       rsp_size =
-               SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
-                                               nsds_rings);
+       rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+                                  nsds_rings);
+       rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+                                    nsds_rings);
 
        addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
-                       &hostrq_phys_addr, GFP_KERNEL);
+                                 &hostrq_phys_addr, GFP_KERNEL);
        if (addr == NULL)
                return -ENOMEM;
        prq = addr;
@@ -295,15 +295,20 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
                                                | QLCNIC_CAP0_VALIDOFF);
        cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
 
-       temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
-       prq->valid_field_offset = cpu_to_le16(temp_u16);
-       prq->txrx_sds_binding = nsds_rings - 1;
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test) {
+               cap |= QLCNIC_CAP0_TX_MULTI;
+       } else {
+               temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+               prq->valid_field_offset = cpu_to_le16(temp_u16);
+               prq->txrx_sds_binding = nsds_rings - 1;
+               temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+               prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+               temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+               prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+       }
 
        prq->capabilities[0] = cpu_to_le32(cap);
-       prq->host_int_crb_mode =
-               cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
-       prq->host_rds_crb_mode =
-               cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
 
        prq->num_rds_rings = cpu_to_le16(nrds_rings);
        prq->num_sds_rings = cpu_to_le16(nsds_rings);
@@ -317,10 +322,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
                        le32_to_cpu(prq->rds_ring_offset));
 
        for (i = 0; i < nrds_rings; i++) {
-
                rds_ring = &recv_ctx->rds_rings[i];
                rds_ring->producer = 0;
-
                prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
                prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
                prq_rds[i].ring_kind = cpu_to_le32(i);
@@ -331,14 +334,16 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
                        le32_to_cpu(prq->sds_ring_offset));
 
        for (i = 0; i < nsds_rings; i++) {
-
                sds_ring = &recv_ctx->sds_rings[i];
                sds_ring->consumer = 0;
                memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
-
                prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
                prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
-               prq_sds[i].msi_index = cpu_to_le16(i);
+               if (qlcnic_check_multi_tx(adapter) &&
+                   !adapter->ahw->diag_test)
+                       prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+               else
+                       prq_sds[i].msi_index = cpu_to_le16(i);
        }
 
        phys_addr = hostrq_phys_addr;
@@ -361,9 +366,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
 
        for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
                rds_ring = &recv_ctx->rds_rings[i];
-
                reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
-               rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
+               rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
        }
 
        prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -371,24 +375,30 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
 
        for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
                sds_ring = &recv_ctx->sds_rings[i];
-
                reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
-               reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+               if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+                       reg2 = ahw->intr_tbl[i].src;
+               else
+                       reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
 
-               sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
-               sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
+               sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+               sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
        }
 
        recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
        recv_ctx->context_id = le16_to_cpu(prsp->context_id);
        recv_ctx->virt_port = prsp->virt_port;
 
+       netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+                   recv_ctx->context_id, recv_ctx->state);
        qlcnic_free_mbx_args(&cmd);
+
 out_free_rsp:
        dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
                          cardrsp_phys_addr);
 out_free_rq:
        dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
        return err;
 }
 
@@ -416,16 +426,19 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
                                     struct qlcnic_host_tx_ring *tx_ring,
                                     int ring)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct net_device *netdev = adapter->netdev;
        struct qlcnic_hostrq_tx_ctx     *prq;
        struct qlcnic_hostrq_cds_ring   *prq_cds;
        struct qlcnic_cardrsp_tx_ctx    *prsp;
-       void    *rq_addr, *rsp_addr;
-       size_t  rq_size, rsp_size;
-       u32     temp;
        struct qlcnic_cmd_args cmd;
-       int     err;
-       u64     phys_addr;
-       dma_addr_t      rq_phys_addr, rsp_phys_addr;
+       u32 temp, intr_mask, temp_int_crb_mode;
+       dma_addr_t rq_phys_addr, rsp_phys_addr;
+       int temp_nsds_rings, index, err;
+       void *rq_addr, *rsp_addr;
+       size_t rq_size, rsp_size;
+       u64 phys_addr;
+       u16 msix_id;
 
        /* reset host resources */
        tx_ring->producer = 0;
@@ -447,18 +460,28 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
        }
 
        prq = rq_addr;
-
        prsp = rsp_addr;
 
        prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
 
        temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
-                                       QLCNIC_CAP0_LSO);
+               QLCNIC_CAP0_LSO);
+       if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+               temp |= QLCNIC_CAP0_TX_MULTI;
+
        prq->capabilities[0] = cpu_to_le32(temp);
 
-       prq->host_int_crb_mode =
-               cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
-       prq->msi_index = 0;
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test) {
+               temp_nsds_rings = adapter->max_sds_rings;
+               index = temp_nsds_rings + ring;
+               msix_id = ahw->intr_tbl[index].id;
+               prq->msi_index = cpu_to_le16(msix_id);
+       } else {
+               temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+               prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+               prq->msi_index = 0;
+       }
 
        prq->interrupt_ctl = 0;
        prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
@@ -480,15 +503,25 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
        err = qlcnic_issue_cmd(adapter, &cmd);
 
        if (err == QLCNIC_RCODE_SUCCESS) {
+               tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
                temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
                tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
                tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+               if (qlcnic_check_multi_tx(adapter) &&
+                   !adapter->ahw->diag_test &&
+                   (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+                       index = adapter->max_sds_rings + ring;
+                       intr_mask = ahw->intr_tbl[index].src;
+                       tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+               }
+
+               netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+                           tx_ring->ctx_id, tx_ring->state);
        } else {
-               dev_err(&adapter->pdev->dev,
-                       "Failed to create tx ctx in firmware%d\n", err);
+               netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+                          err);
                err = -EIO;
        }
-
        qlcnic_free_mbx_args(&cmd);
 
 out_free_rsp:
@@ -618,6 +651,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
                }
        }
 
+       if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+           qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+               err = qlcnic_82xx_mq_intrpt(dev, 1);
+               if (err)
+                       return err;
+       }
+
        err = qlcnic_fw_cmd_create_rx_ctx(dev);
        if (err)
                goto err_out;
@@ -639,13 +679,19 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
        }
 
        set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
        return 0;
 
 err_out:
+       if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+           qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+                       qlcnic_82xx_config_intrpt(dev, 0);
+
        if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
                if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
                        qlcnic_83xx_config_intrpt(dev, 0);
        }
+
        return err;
 }
 
@@ -659,6 +705,12 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
                        qlcnic_fw_cmd_del_tx_ctx(adapter,
                                                 &adapter->tx_ring[ring]);
 
+               if (qlcnic_82xx_check(adapter) &&
+                   (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+                   qlcnic_check_multi_tx(adapter) &&
+                   !adapter->ahw->diag_test)
+                               qlcnic_82xx_config_intrpt(adapter, 0);
+
                if (qlcnic_83xx_check(adapter) &&
                    (adapter->flags & QLCNIC_MSIX_ENABLED)) {
                        if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
@@ -723,8 +775,54 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
        }
 }
 
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct net_device *netdev = adapter->netdev;
+       struct qlcnic_cmd_args cmd;
+       u32 type, val;
+       int i, err = 0;
+
+       for (i = 0; i < ahw->num_msix; i++) {
+               qlcnic_alloc_mbx_args(&cmd, adapter,
+                                     QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+               type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+               val = type | (ahw->intr_tbl[i].type << 4);
+               if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+                       val |= (ahw->intr_tbl[i].id << 16);
+               cmd.req.arg[1] = val;
+               err = qlcnic_issue_cmd(adapter, &cmd);
+               if (err) {
+                       netdev_err(netdev, "Failed to %s interrupts %d\n",
+                                  op_type == QLCNIC_INTRPT_ADD ? "Add" :
+                                  "Delete", err);
+                       qlcnic_free_mbx_args(&cmd);
+                       return err;
+               }
+               val = cmd.rsp.arg[1];
+               if (LSB(val)) {
+                       netdev_info(netdev,
+                                   "failed to configure interrupt for %d\n",
+                                   ahw->intr_tbl[i].id);
+                       continue;
+               }
+               if (op_type) {
+                       ahw->intr_tbl[i].id = MSW(val);
+                       ahw->intr_tbl[i].enabled = 1;
+                       ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+               } else {
+                       ahw->intr_tbl[i].id = i;
+                       ahw->intr_tbl[i].enabled = 0;
+                       ahw->intr_tbl[i].src = 0;
+               }
+               qlcnic_free_mbx_args(&cmd);
+       }
+
+       return err;
+}
 
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+                               u8 function)
 {
        int err, i;
        struct qlcnic_cmd_args cmd;
@@ -734,7 +832,7 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
        if (err)
                return err;
 
-       cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
+       cmd.req.arg[1] = function | BIT_8;
        err = qlcnic_issue_cmd(adapter, &cmd);
 
        if (err == QLCNIC_RCODE_SUCCESS) {
index 7aac23ab31d1bb26b9ac3c069580a114b5dff299..7b0c90efb365e27c7c35fec7068348e37f0d3974 100644 (file)
@@ -125,6 +125,14 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
 };
 
 #define QLCNIC_STATS_LEN       ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+       "xmit_on",
+       "xmit_off",
+       "xmit_called",
+       "xmit_finished",
+};
+
 static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
        "ctx_rx_bytes",
        "ctx_rx_pkts",
@@ -630,15 +638,15 @@ qlcnic_set_ringparam(struct net_device *dev,
 static void qlcnic_get_channels(struct net_device *dev,
                struct ethtool_channels *channel)
 {
-       int min;
        struct qlcnic_adapter *adapter = netdev_priv(dev);
+       int min;
 
        min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
        channel->max_rx = rounddown_pow_of_two(min);
-       channel->max_tx = adapter->ahw->max_tx_ques;
+       channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
 
        channel->rx_count = adapter->max_sds_rings;
-       channel->tx_count = adapter->ahw->max_tx_ques;
+       channel->tx_count = adapter->max_drv_tx_rings;
 }
 
 static int qlcnic_set_channels(struct net_device *dev,
@@ -646,18 +654,27 @@ static int qlcnic_set_channels(struct net_device *dev,
 {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
        int err;
+       int txq = 0;
 
-       if (channel->other_count || channel->combined_count ||
-           channel->tx_count != channel->max_tx)
+       if (channel->other_count || channel->combined_count)
                return -EINVAL;
 
-       err = qlcnic_validate_max_rss(adapter, channel->rx_count);
-       if (err)
-               return err;
+       if (channel->rx_count) {
+               err = qlcnic_validate_max_rss(adapter, channel->rx_count);
+               if (err)
+                       return err;
+       }
 
-       err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
-       netdev_info(dev, "allocated 0x%x sds rings\n",
-                                adapter->max_sds_rings);
+       if (channel->tx_count) {
+               err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
+               if (err)
+                       return err;
+               txq = channel->tx_count;
+       }
+
+       err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
+       netdev_info(dev, "allocated 0x%x sds rings and  0x%x tx rings\n",
+                   adapter->max_sds_rings, adapter->max_drv_tx_rings);
        return err;
 }
 
@@ -893,6 +910,7 @@ free_diag_res:
 clear_diag_irq:
        adapter->max_sds_rings = max_sds_rings;
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
        return ret;
 }
 
@@ -966,6 +984,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
 int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       int max_drv_tx_rings = adapter->max_drv_tx_rings;
        int max_sds_rings = adapter->max_sds_rings;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1006,9 +1025,9 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
                msleep(500);
                qlcnic_process_rcv_ring_diag(sds_ring);
                if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
-                       netdev_info(netdev, "firmware didnt respond to loopback"
-                               " configure request\n");
-                       ret = -QLCNIC_FW_NOT_RESPOND;
+                       netdev_info(netdev,
+                                   "Firmware didn't sent link up event to loopback request\n");
+                       ret = -ETIMEDOUT;
                        goto free_res;
                } else if (adapter->ahw->diag_cnt) {
                        ret = adapter->ahw->diag_cnt;
@@ -1025,6 +1044,7 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
 
  clear_it:
        adapter->max_sds_rings = max_sds_rings;
+       adapter->max_drv_tx_rings = max_drv_tx_rings;
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
        return ret;
 }
@@ -1077,11 +1097,21 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
                       QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
                break;
        case ETH_SS_STATS:
+               num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
+               for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+                       for (index = 0; index < num_stats; index++) {
+                               sprintf(data, "tx_ring_%d %s", i,
+                                       qlcnic_tx_ring_stats_strings[index]);
+                               data += ETH_GSTRING_LEN;
+                       }
+               }
+
                for (index = 0; index < QLCNIC_STATS_LEN; index++) {
                        memcpy(data + index * ETH_GSTRING_LEN,
                               qlcnic_gstrings_stats[index].stat_string,
                               ETH_GSTRING_LEN);
                }
+
                if (qlcnic_83xx_check(adapter)) {
                        num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
                        for (i = 0; i < num_stats; i++, index++)
@@ -1173,11 +1203,22 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
                                     struct ethtool_stats *stats, u64 *data)
 {
        struct qlcnic_adapter *adapter = netdev_priv(dev);
+       struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_esw_statistics port_stats;
        struct qlcnic_mac_statistics mac_stats;
-       int index, ret, length, size;
+       int index, ret, length, size, ring;
        char *p;
 
+       memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
+       for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       *data++ = tx_ring->xmit_on;
+                       *data++ = tx_ring->xmit_off;
+                       *data++ = tx_ring->xmit_called;
+                       *data++ = tx_ring->xmit_finished;
+               }
+       }
        memset(data, 0, stats->n_stats * sizeof(u64));
        length = QLCNIC_STATS_LEN;
        for (index = 0; index < length; index++) {
index 4d5f59b2d153f55579bd3c3f2a64f3c96b9ca66e..f8adc7b01f1f5ef9e62899a9c68c5eea9d2e2cba 100644 (file)
@@ -387,7 +387,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
        if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
                return -EIO;
 
-       tx_ring = adapter->tx_ring;
+       tx_ring = &adapter->tx_ring[0];
        __netif_tx_lock_bh(tx_ring->txq);
 
        producer = tx_ring->producer;
@@ -740,6 +740,22 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
        return 0;
 }
 
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+       u8 mac[ETH_ALEN];
+       int ret;
+
+       ret = qlcnic_get_mac_address(adapter, mac,
+                                    adapter->ahw->physical_port);
+       if (ret)
+               return ret;
+
+       memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+       adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+       return 0;
+}
+
 /*
  * Send the interrupt coalescing parameter set by ethtool to the card.
  */
index 4a71b28effcb041421d1332cf3dc810a3b9fe8ae..786366c64b060e3cb1cc703ef0de06d7d6ecd7f0 100644 (file)
@@ -87,6 +87,7 @@ enum qlcnic_regs {
 #define        QLCNIC_CMD_CONFIG_VPORT                 0x32
 #define QLCNIC_CMD_GET_MAC_STATS               0x37
 #define QLCNIC_CMD_82XX_SET_DRV_VER            0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR           0x39
 #define QLCNIC_CMD_GET_LED_STATUS              0x3C
 #define QLCNIC_CMD_CONFIGURE_RSS               0x41
 #define QLCNIC_CMD_CONFIG_INTR_COAL            0x43
@@ -149,7 +150,6 @@ struct ethtool_stats;
 struct pci_device_id;
 struct qlcnic_host_sds_ring;
 struct qlcnic_host_tx_ring;
-struct qlcnic_host_tx_ring;
 struct qlcnic_hardware_context;
 struct qlcnic_adapter;
 
@@ -173,10 +173,12 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
 void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
 void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
 void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
 irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
                          struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
                                     struct qlcnic_host_tx_ring *tx_ring, int);
@@ -184,7 +186,7 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
 void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
                                   struct qlcnic_host_tx_ring *);
 int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
index 974d62607e138c792fb44224cf5a06b5f0794130..66c26cf7a2b89290524a2d85005ee847f43e925b 100644 (file)
@@ -127,12 +127,12 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
        }
 }
 
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+                              struct qlcnic_host_tx_ring *tx_ring)
 {
        struct qlcnic_cmd_buffer *cmd_buf;
        struct qlcnic_skb_frag *buffrag;
        int i, j;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
 
        cmd_buf = tx_ring->cmd_buf_arr;
        for (i = 0; i < tx_ring->num_desc; i++) {
@@ -241,7 +241,13 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
                sds_ring->irq = adapter->msix_entries[ring].vector;
                sds_ring->adapter = adapter;
                sds_ring->num_desc = adapter->num_rxd;
-
+               if (qlcnic_82xx_check(adapter)) {
+                       if (qlcnic_check_multi_tx(adapter) &&
+                           !adapter->ahw->diag_test)
+                               sds_ring->tx_ring = &adapter->tx_ring[ring];
+                       else
+                               sds_ring->tx_ring = &adapter->tx_ring[0];
+               }
                for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
                        INIT_LIST_HEAD(&sds_ring->free_list[i]);
        }
index 6946d354f44f76c1e6e990a092cc043a21cf7e09..774d5a7b2fdb71c7a166e2600801df8d26b448fa 100644 (file)
 struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
                                     struct qlcnic_host_rds_ring *, u16, u16);
 
+inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+                                 struct qlcnic_host_tx_ring *tx_ring)
+{
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test)
+               writel(0x0, tx_ring->crb_intr_mask);
+}
+
+
+static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
+                                        struct qlcnic_host_tx_ring *tx_ring)
+{
+       if (qlcnic_check_multi_tx(adapter) &&
+           !adapter->ahw->diag_test)
+               writel(1, tx_ring->crb_intr_mask);
+}
+
 inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
                                       struct qlcnic_host_tx_ring *tx_ring)
 {
@@ -147,10 +164,7 @@ static inline u8 qlcnic_mac_hash(u64 mac)
 static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
                                        u16 handle, u8 ring_id)
 {
-       unsigned short device = adapter->pdev->device;
-
-       if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
-           (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
+       if (qlcnic_83xx_check(adapter))
                return handle | (ring_id << 15);
        else
                return handle;
@@ -357,14 +371,14 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
 }
 
 static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
-                        struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+                        struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+                        struct qlcnic_host_tx_ring *tx_ring)
 {
        u8 l4proto, opcode = 0, hdr_len = 0;
        u16 flags = 0, vlan_tci = 0;
        int copied, offset, copy_len, size;
        struct cmd_desc_type0 *hwdesc;
        struct vlan_ethhdr *vh;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
        u16 protocol = ntohs(skb->protocol);
        u32 producer = tx_ring->producer;
 
@@ -547,7 +561,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc)
 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_cmd_buffer *pbuf;
        struct qlcnic_skb_frag *buffrag;
        struct cmd_desc_type0 *hwdesc, *first_desc;
@@ -556,10 +570,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        int i, k, frag_count, delta = 0;
        u32 producer, num_txd;
 
-       num_txd = tx_ring->num_desc;
-
        if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
-               netif_stop_queue(netdev);
+               netif_tx_stop_all_queues(netdev);
                return NETDEV_TX_BUSY;
        }
 
@@ -569,7 +581,14 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                        goto drop_packet;
        }
 
+       if (qlcnic_check_multi_tx(adapter))
+               tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+       else
+               tx_ring = &adapter->tx_ring[0];
+       num_txd = tx_ring->num_desc;
+
        frag_count = skb_shinfo(skb)->nr_frags + 1;
+
        /* 14 frags supported for normal packet and
         * 32 frags supported for TSO packet
         */
@@ -584,11 +603,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
-               netif_stop_queue(netdev);
+               netif_tx_stop_queue(tx_ring->txq);
                if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
-                       netif_start_queue(netdev);
+                       netif_tx_start_queue(tx_ring->txq);
                } else {
                        adapter->stats.xmit_off++;
+                       tx_ring->xmit_off++;
                        return NETDEV_TX_BUSY;
                }
        }
@@ -643,7 +663,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        tx_ring->producer = get_next_index(producer, num_txd);
        smp_mb();
 
-       if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+       if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
                goto unwind_buff;
 
        if (adapter->drv_mac_learn)
@@ -651,6 +671,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        adapter->stats.txbytes += skb->len;
        adapter->stats.xmitcalled++;
+       tx_ring->xmit_called++;
 
        qlcnic_update_cmd_producer(tx_ring);
 
@@ -673,7 +694,7 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
                adapter->ahw->linkup = 0;
                if (netif_running(netdev)) {
                        netif_carrier_off(netdev);
-                       netif_stop_queue(netdev);
+                       netif_tx_stop_all_queues(netdev);
                }
        } else if (!adapter->ahw->linkup && linkup) {
                netdev_info(netdev, "NIC Link is up\n");
@@ -768,9 +789,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
        struct net_device *netdev = adapter->netdev;
        struct qlcnic_skb_frag *frag;
 
-       if (!spin_trylock(&adapter->tx_clean_lock))
-               return 1;
-
        sw_consumer = tx_ring->sw_consumer;
        hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
 
@@ -788,6 +806,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
                                frag->dma = 0ULL;
                        }
                        adapter->stats.xmitfinished++;
+                       tx_ring->xmit_finished++;
                        dev_kfree_skb_any(buffer->skb);
                        buffer->skb = NULL;
                }
@@ -800,10 +819,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
        if (count && netif_running(netdev)) {
                tx_ring->sw_consumer = sw_consumer;
                smp_mb();
-               if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+               if (netif_tx_queue_stopped(tx_ring->txq) &&
+                   netif_carrier_ok(netdev)) {
                        if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
-                               netif_wake_queue(netdev);
+                               netif_tx_wake_queue(tx_ring->txq);
                                adapter->stats.xmit_on++;
+                               tx_ring->xmit_on++;
                        }
                }
                adapter->tx_timeo_cnt = 0;
@@ -823,7 +844,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
         */
        hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
        done = (sw_consumer == hw_consumer);
-       spin_unlock(&adapter->tx_clean_lock);
 
        return done;
 }
@@ -833,16 +853,40 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
        int tx_complete, work_done;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_adapter *adapter;
+       struct qlcnic_host_tx_ring *tx_ring;
 
        sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
        adapter = sds_ring->adapter;
-       tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
+       tx_ring = sds_ring->tx_ring;
+
+       tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
                                              budget);
        work_done = qlcnic_process_rcv_ring(sds_ring, budget);
        if ((work_done < budget) && tx_complete) {
                napi_complete(&sds_ring->napi);
-               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                        qlcnic_enable_int(sds_ring);
+                       qlcnic_enable_tx_intr(adapter, tx_ring);
+               }
+       }
+
+       return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+       struct qlcnic_host_tx_ring *tx_ring;
+       struct qlcnic_adapter *adapter;
+       int work_done;
+
+       tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+       adapter = tx_ring->adapter;
+
+       work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+       if (work_done) {
+               napi_complete(&tx_ring->napi);
+               if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+                       qlcnic_enable_tx_intr(adapter, tx_ring);
        }
 
        return work_done;
@@ -952,17 +996,17 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
                        break;
                case 1:
                        dev_info(dev, "loopback already in progress\n");
-                       adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+                       adapter->ahw->diag_cnt = -EINPROGRESS;
                        break;
                case 2:
                        dev_info(dev, "loopback cable is not connected\n");
-                       adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+                       adapter->ahw->diag_cnt = -ENODEV;
                        break;
                default:
                        dev_info(dev,
                                 "loopback configure request failed, err %x\n",
                                 ret);
-                       adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+                       adapter->ahw->diag_cnt = -EIO;
                        break;
                }
                break;
@@ -1414,6 +1458,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
        int ring, max_sds_rings;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_host_tx_ring *tx_ring;
 
        if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
                return -ENOMEM;
@@ -1422,12 +1467,22 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
 
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &recv_ctx->sds_rings[ring];
-               if (ring == adapter->max_sds_rings - 1)
-                       netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
-                                      QLCNIC_NETDEV_WEIGHT / max_sds_rings);
-               else
+               if (qlcnic_check_multi_tx(adapter) &&
+                   !adapter->ahw->diag_test &&
+                   (adapter->max_drv_tx_rings > 1)) {
                        netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
-                                      QLCNIC_NETDEV_WEIGHT*2);
+                                       QLCNIC_NETDEV_WEIGHT * 2);
+               } else {
+                       if (ring == (adapter->max_sds_rings - 1))
+                               netif_napi_add(netdev, &sds_ring->napi,
+                                              qlcnic_poll,
+                                              QLCNIC_NETDEV_WEIGHT /
+                                              max_sds_rings);
+                       else
+                               netif_napi_add(netdev, &sds_ring->napi,
+                                              qlcnic_rx_poll,
+                                              QLCNIC_NETDEV_WEIGHT * 2);
+               }
        }
 
        if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1435,6 +1490,14 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
                return -ENOMEM;
        }
 
+       if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+               for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
+                                      QLCNIC_NETDEV_WEIGHT);
+               }
+       }
+
        return 0;
 }
 
@@ -1443,6 +1506,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+       struct qlcnic_host_tx_ring *tx_ring;
 
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &recv_ctx->sds_rings[ring];
@@ -1450,6 +1514,14 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
        }
 
        qlcnic_free_sds_rings(adapter->recv_ctx);
+
+       if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+               for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       netif_napi_del(&tx_ring->napi);
+               }
+       }
+
        qlcnic_free_tx_rings(adapter);
 }
 
@@ -1457,6 +1529,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
 {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1467,12 +1540,24 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
                napi_enable(&sds_ring->napi);
                qlcnic_enable_int(sds_ring);
        }
+
+       if (qlcnic_check_multi_tx(adapter) &&
+           (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !adapter->ahw->diag_test &&
+           (adapter->max_drv_tx_rings > 1)) {
+               for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       napi_enable(&tx_ring->napi);
+                       qlcnic_enable_tx_intr(adapter, tx_ring);
+               }
+       }
 }
 
 void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
 {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_tx_ring *tx_ring;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
 
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1484,6 +1569,17 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
                napi_synchronize(&sds_ring->napi);
                napi_disable(&sds_ring->napi);
        }
+
+       if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+           !adapter->ahw->diag_test &&
+           qlcnic_check_multi_tx(adapter)) {
+               for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+                       tx_ring = &adapter->tx_ring[ring];
+                       qlcnic_disable_tx_int(adapter, tx_ring);
+                       napi_synchronize(&tx_ring->napi);
+                       napi_disable(&tx_ring->napi);
+               }
+       }
 }
 
 #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
@@ -1685,7 +1781,7 @@ static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
                        break;
                default:
                        dev_info(&adapter->pdev->dev,
-                                "Unkonwn opcode: 0x%x\n", opcode);
+                                "Unknown opcode: 0x%x\n", opcode);
                        goto skip;
                }
 
index bc05d016c85943834b687e0b2221495c88ea7190..8321d1a3f4b9f09d47d61c70b96a1fc1aba339cd 100644 (file)
@@ -100,6 +100,8 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
        ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
        ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
        ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+       ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
        {0,}
 };
 
@@ -145,6 +147,11 @@ static const u32 qlcnic_reg_tbl[] = {
 };
 
 static const struct qlcnic_board_info qlcnic_boards[] = {
+       { PCI_VENDOR_ID_QLOGIC,
+         PCI_DEVICE_ID_QLOGIC_QLE844X,
+         0x0,
+         0x0,
+         "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
        { PCI_VENDOR_ID_QLOGIC,
          PCI_DEVICE_ID_QLOGIC_QLE834X,
          PCI_VENDOR_ID_QLOGIC,
@@ -254,7 +261,6 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
 };
 
 #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
-#define QLC_MAX_SDS_RINGS      8
 
 static const
 struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -278,12 +284,15 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
 
 int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
 {
-       u8 mac_addr[ETH_ALEN];
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
+       u8 mac_addr[ETH_ALEN];
+       int ret;
 
-       if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
-               return -EIO;
+       ret = qlcnic_get_mac_address(adapter, mac_addr,
+                                    adapter->ahw->pci_func);
+       if (ret)
+               return ret;
 
        memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
        memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -425,6 +434,21 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
        cancel_delayed_work_sync(&adapter->fw_work);
 }
 
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+                                  struct netdev_phys_port_id *ppid)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = sizeof(ahw->phys_port_id);
+       memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+       return 0;
+}
+
 static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_open          = qlcnic_open,
        .ndo_stop          = qlcnic_close,
@@ -442,6 +466,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_fdb_add            = qlcnic_fdb_add,
        .ndo_fdb_del            = qlcnic_fdb_del,
        .ndo_fdb_dump           = qlcnic_fdb_dump,
+       .ndo_get_phys_port_id   = qlcnic_get_phys_port_id,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = qlcnic_poll_controller,
 #endif
@@ -514,13 +539,33 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
        .get_board_info                 = qlcnic_82xx_get_board_info,
        .set_mac_filter_count           = qlcnic_82xx_set_mac_filter_count,
        .free_mac_list                  = qlcnic_82xx_free_mac_list,
+       .read_phys_port_id              = qlcnic_82xx_read_phys_port_id,
 };
 
+static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int num_tx_q;
+
+       if (ahw->msix_supported &&
+           (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+               num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
+                                num_online_cpus());
+               if (num_tx_q > 1) {
+                       test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
+                                        &adapter->state);
+                       adapter->max_drv_tx_rings = num_tx_q;
+               }
+       } else {
+               adapter->max_drv_tx_rings = 1;
+       }
+}
+
 int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
 {
        struct pci_dev *pdev = adapter->pdev;
+       int max_tx_rings, max_sds_rings, tx_vector;
        int err = -1, i;
-       int max_tx_rings, tx_vector;
 
        if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
                max_tx_rings = 0;
@@ -554,7 +599,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
                                adapter->max_sds_rings = num_msix -
                                                         max_tx_rings - 1;
                        } else {
-                               adapter->max_sds_rings = num_msix;
+                               adapter->ahw->num_msix = num_msix;
+                               if (qlcnic_check_multi_tx(adapter) &&
+                                   !adapter->ahw->diag_test &&
+                                   (adapter->max_drv_tx_rings > 1))
+                                       max_sds_rings = num_msix - max_tx_rings;
+                               else
+                                       max_sds_rings = num_msix;
+
+                               adapter->max_sds_rings = max_sds_rings;
                        }
                        dev_info(&pdev->dev, "using msi-x interrupts\n");
                        return err;
@@ -570,6 +623,8 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
                                num_msix += (max_tx_rings + 1);
                        } else {
                                num_msix = rounddown_pow_of_two(err);
+                               if (qlcnic_check_multi_tx(adapter))
+                                       num_msix += max_tx_rings;
                        }
 
                        if (num_msix) {
@@ -605,6 +660,7 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
                adapter->msix_entries[0].vector = pdev->irq;
                return err;
        }
+
        if (qlcnic_use_msi || qlcnic_use_msi_x)
                return -EOPNOTSUPP;
 
@@ -621,28 +677,69 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
        return err;
 }
 
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
        int num_msix, err = 0;
 
        if (!num_intr)
                num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
 
-       if (adapter->ahw->msix_supported)
+       if (ahw->msix_supported) {
                num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
                                                num_intr));
-       else
+               if (qlcnic_check_multi_tx(adapter)) {
+                       if (txq)
+                               adapter->max_drv_tx_rings = txq;
+                       num_msix += adapter->max_drv_tx_rings;
+               }
+       } else {
                num_msix = 1;
+       }
 
        err = qlcnic_enable_msix(adapter, num_msix);
-       if (err == -ENOMEM || !err)
+       if (err == -ENOMEM)
                return err;
 
-       err = qlcnic_enable_msi_legacy(adapter);
-       if (!err)
+       if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+               qlcnic_disable_multi_tx(adapter);
+
+               err = qlcnic_enable_msi_legacy(adapter);
+               if (!err)
+                       return err;
+       }
+
+       return 0;
+}
+
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       int err, i;
+
+       if (qlcnic_check_multi_tx(adapter) &&
+           !ahw->diag_test &&
+           (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+               ahw->intr_tbl = vzalloc(ahw->num_msix *
+                                       sizeof(struct qlcnic_intrpt_config));
+               if (!ahw->intr_tbl)
+                       return -ENOMEM;
+
+               for (i = 0; i < ahw->num_msix; i++) {
+                       ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+                       ahw->intr_tbl[i].id = i;
+                       ahw->intr_tbl[i].src = 0;
+               }
+
+               err = qlcnic_82xx_config_intrpt(adapter, 1);
+               if (err)
+                       dev_err(&adapter->pdev->dev,
+                               "Failed to configure Interrupt for %d vector\n",
+                               ahw->num_msix);
                return err;
+       }
 
-       return -EIO;
+       return 0;
 }
 
 void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
@@ -829,7 +926,9 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
                *bar = QLCNIC_82XX_BAR0_LENGTH;
                break;
        case PCI_DEVICE_ID_QLOGIC_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_QLE844X:
        case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
                *bar = QLCNIC_83XX_BAR0_LENGTH;
                break;
        default:
@@ -1413,6 +1512,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
                        for (ring = 0; ring < num_sds_rings; ring++) {
                                sds_ring = &recv_ctx->sds_rings[ring];
                                if (qlcnic_82xx_check(adapter) &&
+                                   !qlcnic_check_multi_tx(adapter) &&
                                    (ring == (num_sds_rings - 1))) {
                                        if (!(adapter->flags &
                                              QLCNIC_MSIX_ENABLED))
@@ -1436,9 +1536,11 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
                                        return err;
                        }
                }
-               if (qlcnic_83xx_check(adapter) &&
-                   (adapter->flags & QLCNIC_MSIX_ENABLED) &&
-                   !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+               if ((qlcnic_82xx_check(adapter) &&
+                    qlcnic_check_multi_tx(adapter)) ||
+                   (qlcnic_83xx_check(adapter) &&
+                    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+                    !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
                        handler = qlcnic_msix_tx_intr;
                        for (ring = 0; ring < adapter->max_drv_tx_rings;
                             ring++) {
@@ -1473,8 +1575,10 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
                                free_irq(sds_ring->irq, sds_ring);
                        }
                }
-               if (qlcnic_83xx_check(adapter) &&
-                   !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+               if ((qlcnic_83xx_check(adapter) &&
+                    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+                   (qlcnic_82xx_check(adapter) &&
+                    qlcnic_check_multi_tx(adapter))) {
                        for (ring = 0; ring < adapter->max_drv_tx_rings;
                             ring++) {
                                tx_ring = &adapter->tx_ring[ring];
@@ -1510,8 +1614,10 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
 
        if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
                return 0;
+
        if (qlcnic_set_eswitch_port_config(adapter))
                return -EIO;
+
        qlcnic_get_lro_mss_capability(adapter);
 
        if (qlcnic_fw_create_ctx(adapter))
@@ -1558,6 +1664,8 @@ int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
 
 void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
 {
+       int ring;
+
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
                return;
 
@@ -1567,7 +1675,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
        if (qlcnic_sriov_vf_check(adapter))
                qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
        smp_mb();
-       spin_lock(&adapter->tx_clean_lock);
        netif_carrier_off(netdev);
        adapter->ahw->linkup = 0;
        netif_tx_disable(netdev);
@@ -1585,8 +1692,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
        adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
 
        qlcnic_reset_rx_buffers_list(adapter);
-       qlcnic_release_tx_buffers(adapter);
-       spin_unlock(&adapter->tx_clean_lock);
+
+       for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+               qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
 }
 
 /* Usage: During suspend and firmware recovery module */
@@ -1666,6 +1774,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_host_sds_ring *sds_ring;
+       int max_tx_rings = adapter->max_drv_tx_rings;
        int ring;
 
        clear_bit(__QLCNIC_DEV_UP, &adapter->state);
@@ -1682,6 +1791,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
 
        adapter->ahw->diag_test = 0;
        adapter->max_sds_rings = max_sds_rings;
+       adapter->max_drv_tx_rings = max_tx_rings;
 
        if (qlcnic_attach(adapter))
                goto out;
@@ -1750,6 +1860,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
        adapter->max_sds_rings = 1;
        adapter->ahw->diag_test = test;
        adapter->ahw->linkup = 0;
+       adapter->max_drv_tx_rings = 1;
 
        ret = qlcnic_attach(adapter);
        if (ret) {
@@ -1907,6 +2018,10 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
 
+       err = qlcnic_set_real_num_queues(adapter, netdev);
+       if (err)
+               return err;
+
        err = register_netdev(netdev);
        if (err) {
                dev_err(&pdev->dev, "failed to register net device\n");
@@ -1975,7 +2090,8 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
                tx_ring->cmd_buf_arr = cmd_buf_arr;
        }
 
-       if (qlcnic_83xx_check(adapter)) {
+       if (qlcnic_83xx_check(adapter) ||
+           (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
                for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
                        tx_ring = &adapter->tx_ring[ring];
                        tx_ring->adapter = adapter;
@@ -1986,6 +2102,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
                        }
                }
        }
+
        return 0;
 }
 
@@ -2048,9 +2165,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
                break;
        case PCI_DEVICE_ID_QLOGIC_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_QLE844X:
                qlcnic_83xx_register_map(ahw);
                break;
        case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+       case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
                qlcnic_sriov_vf_register_map(ahw);
                break;
        default:
@@ -2061,7 +2180,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_out_free_hw_res;
 
-       netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+       netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+                                  QLCNIC_MAX_TX_RINGS);
        if (!netdev) {
                err = -ENOMEM;
                goto err_out_iounmap;
@@ -2091,12 +2211,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                adapter->fdb_mac_learn = true;
        else if (qlcnic_mac_learn == DRV_MAC_LEARN)
                adapter->drv_mac_learn = true;
-       adapter->max_drv_tx_rings = 1;
 
        rwlock_init(&adapter->ahw->crb_lock);
        mutex_init(&adapter->ahw->mem_lock);
 
-       spin_lock_init(&adapter->tx_clean_lock);
        INIT_LIST_HEAD(&adapter->mac_list);
 
        if (qlcnic_82xx_check(adapter)) {
@@ -2108,12 +2226,27 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        goto err_out_free_hw;
                }
 
+               qlcnic_get_multiq_capability(adapter);
+
+               if ((adapter->ahw->act_pci_func > 2) &&
+                   qlcnic_check_multi_tx(adapter)) {
+                       adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
+                       dev_info(&adapter->pdev->dev,
+                                "vNIC mode enabled, Set max TX rings = %d\n",
+                                adapter->max_drv_tx_rings);
+               }
+
+               if (!qlcnic_check_multi_tx(adapter)) {
+                       clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+                       adapter->max_drv_tx_rings = 1;
+               }
                err = qlcnic_setup_idc_param(adapter);
                if (err)
                        goto err_out_free_hw;
 
                adapter->flags |= QLCNIC_NEED_FLR;
        } else if (qlcnic_83xx_check(adapter)) {
+               adapter->max_drv_tx_rings = 1;
                qlcnic_83xx_check_vf(adapter, ent);
                adapter->portnum = adapter->ahw->pci_func;
                err = qlcnic_83xx_init(adapter, pci_using_dac);
@@ -2132,6 +2265,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (qlcnic_read_mac_addr(adapter))
                dev_warn(&pdev->dev, "failed to read mac addr\n");
 
+       qlcnic_read_phys_port_id(adapter);
+
        if (adapter->portnum == 0) {
                qlcnic_get_board_name(adapter, board_name);
 
@@ -2145,16 +2280,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_warn(&pdev->dev,
                         "Device does not support MSI interrupts\n");
 
-       err = qlcnic_setup_intr(adapter, 0);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to setup interrupt\n");
-               goto err_out_disable_msi;
-       }
-
-       if (qlcnic_83xx_check(adapter)) {
-               err = qlcnic_83xx_setup_mbx_intr(adapter);
-               if (err)
+       if (qlcnic_82xx_check(adapter)) {
+               err = qlcnic_setup_intr(adapter, 0, 0);
+               if (err) {
+                       dev_err(&pdev->dev, "Failed to setup interrupt\n");
                        goto err_out_disable_msi;
+               }
        }
 
        err = qlcnic_get_act_pci_func(adapter);
@@ -2242,9 +2373,11 @@ static void qlcnic_remove(struct pci_dev *pdev)
        qlcnic_sriov_cleanup(adapter);
 
        if (qlcnic_83xx_check(adapter)) {
-               qlcnic_83xx_free_mbx_intr(adapter);
                qlcnic_83xx_register_nic_idc_func(adapter, 0);
                cancel_delayed_work_sync(&adapter->idc_aen_work);
+               qlcnic_83xx_free_mbx_intr(adapter);
+               qlcnic_83xx_detach_mailbox_work(adapter);
+               qlcnic_83xx_free_mailbox(ahw->mailbox);
        }
 
        qlcnic_detach(adapter);
@@ -2336,7 +2469,7 @@ static int qlcnic_open(struct net_device *netdev)
        if (err)
                goto err_out;
 
-       netif_start_queue(netdev);
+       netif_tx_start_all_queues(netdev);
 
        return 0;
 
@@ -2468,6 +2601,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
 static void qlcnic_tx_timeout(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_host_tx_ring *tx_ring;
+       int ring;
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return;
@@ -2481,6 +2616,25 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
                                                      QLCNIC_FORCE_FW_DUMP_KEY);
        } else {
                netdev_info(netdev, "Tx timeout, reset adapter context.\n");
+               if (qlcnic_82xx_check(adapter)) {
+                       for (ring = 0; ring < adapter->max_drv_tx_rings;
+                            ring++) {
+                               tx_ring = &adapter->tx_ring[ring];
+                               dev_info(&netdev->dev, "ring=%d\n", ring);
+                               dev_info(&netdev->dev, "crb_intr_mask=%d\n",
+                                        readl(tx_ring->crb_intr_mask));
+                               dev_info(&netdev->dev, "producer=%d\n",
+                                        readl(tx_ring->crb_cmd_producer));
+                               dev_info(&netdev->dev, "sw_consumer = %d\n",
+                                        tx_ring->sw_consumer);
+                               dev_info(&netdev->dev, "hw_consumer = %d\n",
+                                        le32_to_cpu(*(tx_ring->hw_consumer)));
+                               dev_info(&netdev->dev, "xmit-on=%llu\n",
+                                        tx_ring->xmit_on);
+                               dev_info(&netdev->dev, "xmit-off=%llu\n",
+                                        tx_ring->xmit_off);
+                       }
+               }
                adapter->ahw->reset_context = 1;
        }
 }
@@ -3245,7 +3399,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
        qlcnic_clr_drv_state(adapter);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
-       err = qlcnic_setup_intr(adapter, 0);
+       err = qlcnic_setup_intr(adapter, 0, 0);
 
        if (err) {
                kfree(adapter->msix_entries);
@@ -3370,16 +3524,65 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
        return err;
 }
 
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, int txq)
+{
+       struct net_device *netdev = adapter->netdev;
+       u8 max_hw = QLCNIC_MAX_TX_RINGS;
+       u32 max_allowed;
+
+       if (!qlcnic_82xx_check(adapter)) {
+               netdev_err(netdev, "No Multi TX-Q support\n");
+               return -EINVAL;
+       }
+
+       if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+               netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
+               return -EINVAL;
+       }
+
+       if (!qlcnic_check_multi_tx(adapter)) {
+               netdev_err(netdev, "No Multi TX-Q support\n");
+               return -EINVAL;
+       }
+
+       if (txq > QLCNIC_MAX_TX_RINGS) {
+               netdev_err(netdev, "Invalid ring count\n");
+               return -EINVAL;
+       }
+
+       max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
+                                                num_online_cpus()));
+       if ((txq > max_allowed) || !is_power_of_2(txq)) {
+               if (!is_power_of_2(txq))
+                       netdev_err(netdev,
+                                  "TX queue should be a power of 2\n");
+               if (txq > num_online_cpus())
+                       netdev_err(netdev,
+                                  "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
+                                  num_online_cpus());
+               netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
-                           __u32 val)
+                               __u32 val)
 {
        struct net_device *netdev = adapter->netdev;
        u8 max_hw = adapter->ahw->max_rx_ques;
        u32 max_allowed;
 
-       if (val > QLC_MAX_SDS_RINGS) {
+       if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+           !qlcnic_use_msi) {
+               netdev_err(netdev, "No RSS support in INT-x mode\n");
+               return -EINVAL;
+       }
+
+       if (val > QLCNIC_MAX_SDS_RINGS) {
                netdev_err(netdev, "RSS value should not be higher than %u\n",
-                          QLC_MAX_SDS_RINGS);
+                          QLCNIC_MAX_SDS_RINGS);
                return -EINVAL;
        }
 
@@ -3409,27 +3612,48 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
        return 0;
 }
 
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
 {
        int err;
        struct net_device *netdev = adapter->netdev;
+       int num_msix;
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
 
+       if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+           !qlcnic_use_msi) {
+               netdev_err(netdev, "No RSS support in INT-x mode\n");
+               return -EINVAL;
+       }
+
        netif_device_detach(netdev);
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
        qlcnic_detach(adapter);
 
+       if (qlcnic_82xx_check(adapter)) {
+               if (txq != 0)
+                       adapter->max_drv_tx_rings = txq;
+
+               if (qlcnic_check_multi_tx(adapter) &&
+                   (txq > adapter->max_drv_tx_rings))
+                       num_msix = adapter->max_drv_tx_rings;
+               else
+                       num_msix = data;
+       }
+
        if (qlcnic_83xx_check(adapter)) {
                qlcnic_83xx_free_mbx_intr(adapter);
                qlcnic_83xx_enable_mbx_poll(adapter);
        }
 
+       netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
+
        qlcnic_teardown_intr(adapter);
-       err = qlcnic_setup_intr(adapter, data);
+
+       err = qlcnic_setup_intr(adapter, data, txq);
        if (err) {
                kfree(adapter->msix_entries);
                netdev_err(netdev, "failed to setup interrupt\n");
@@ -3457,8 +3681,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
                        goto done;
                qlcnic_restore_indev_addr(netdev, NETDEV_UP);
        }
-       err = len;
- done:
+done:
        netif_device_attach(netdev);
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
        return err;
index 5d40045b3ceaa47038593dfffc52c986f18cef21..2f79ec5246dcf250e969c5e3810d7210079eafa1 100644 (file)
@@ -33,7 +33,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
-static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
                                  struct qlcnic_cmd_args *);
 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
 
@@ -45,7 +45,7 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
        .get_mac_address                = qlcnic_83xx_get_mac_address,
        .setup_intr                     = qlcnic_83xx_setup_intr,
        .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
-       .mbx_cmd                        = qlcnic_sriov_vf_mbx_op,
+       .mbx_cmd                        = qlcnic_sriov_issue_cmd,
        .get_func_no                    = qlcnic_83xx_get_func_no,
        .api_lock                       = qlcnic_83xx_cam_lock,
        .api_unlock                     = qlcnic_83xx_cam_unlock,
@@ -286,96 +286,38 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
                                    u32 *pay, u8 pci_func, u8 size)
 {
-       u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
-       unsigned long flags;
-       u16 opcode;
-       u8 mbx_err_code;
-       int i, j;
-
-       opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
-
-       if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
-               dev_info(&adapter->pdev->dev,
-                        "Mailbox cmd attempted, 0x%x\n", opcode);
-               dev_info(&adapter->pdev->dev, "Mailbox detached\n");
-               return 0;
-       }
-
-       spin_lock_irqsave(&ahw->mbx_lock, flags);
-
-       mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-       if (mbx_val) {
-               QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
-               spin_unlock_irqrestore(&ahw->mbx_lock, flags);
-               return QLCNIC_RCODE_TIMEOUT;
-       }
-       /* Fill in mailbox registers */
-       val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
-       mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
-
-       writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
-       mbx_cmd = 0x1 | (1 << 4);
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
+       struct qlcnic_cmd_args cmd;
+       unsigned long timeout;
+       int err;
 
-       if (qlcnic_sriov_pf_check(adapter))
-               mbx_cmd |= (pci_func << 5);
+       memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+       cmd.hdr = hdr;
+       cmd.pay = pay;
+       cmd.pay_size = size;
+       cmd.func_num = pci_func;
+       cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
+       cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
 
-       writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
-       for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
-                       i++, j++) {
-               writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
+       err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                       __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+                       ahw->op_mode);
+               return err;
        }
-       for (j = 0; j < size; j++, i++)
-               writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
-
-       /* Signal FW about the impending command */
-       QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 
-       /* Waiting for the mailbox cmd to complete and while waiting here
-        * some AEN might arrive. If more than 5 seconds expire we can
-        * assume something is wrong.
-        */
-poll:
-       rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
-       if (rsp != QLCNIC_RCODE_TIMEOUT) {
-               /* Get the FW response data */
-               fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
-               if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
-                       __qlcnic_83xx_process_aen(adapter);
-                       goto poll;
-               }
-               mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
-               rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
-               opcode = QLCNIC_MBX_RSP(fw_data);
-
-               switch (mbx_err_code) {
-               case QLCNIC_MBX_RSP_OK:
-               case QLCNIC_MBX_PORT_RSP_OK:
-                       rsp = QLCNIC_RCODE_SUCCESS;
-                       break;
-               default:
-                       if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
-                               rsp = qlcnic_83xx_mac_rcode(adapter);
-                               if (!rsp)
-                                       goto out;
-                       }
-                       dev_err(&adapter->pdev->dev,
-                               "MBX command 0x%x failed with err:0x%x\n",
-                               opcode, mbx_err_code);
-                       rsp = mbx_err_code;
-                       break;
-               }
-               goto out;
+       if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
+               dev_err(&adapter->pdev->dev,
+                       "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+                       __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+                       ahw->op_mode);
+               flush_workqueue(mbx->work_q);
        }
 
-       dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
-               QLCNIC_MBX_RSP(mbx_cmd));
-       rsp = QLCNIC_RCODE_TIMEOUT;
-out:
-       /* clear fw mbx control register */
-       QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
-       spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
-       return rsp;
+       return cmd.rsp_opcode;
 }
 
 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
@@ -456,10 +398,14 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
 }
 
 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
-                                     struct qlcnic_cmd_args *cmd)
+                                     struct qlcnic_cmd_args *cmd, u32 cap)
 {
-       adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
-       adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+       if (cap & QLC_83XX_PVID_STRIP_CAPABILITY) {
+               adapter->rx_pvid = 0;
+       } else {
+               adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
+               adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+       }
        return 0;
 }
 
@@ -490,12 +436,14 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
        return 0;
 }
 
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
+                                  struct qlcnic_info *info)
 {
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
        struct qlcnic_cmd_args cmd;
-       int ret;
+       int ret, cap;
 
+       cap = info->capabilities;
        ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
        if (ret)
                return ret;
@@ -511,7 +459,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
                        ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
                        break;
                case QLC_PVID_MODE:
-                       ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
+                       ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd, cap);
                        break;
                }
        }
@@ -522,8 +470,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
 
 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
 {
-       struct qlcnic_info nic_info;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_info nic_info;
        int err;
 
        err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
@@ -534,7 +482,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
        if (err)
                return -EIO;
 
-       err = qlcnic_sriov_get_vf_acl(adapter);
+       err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
        if (err)
                return err;
 
@@ -564,7 +512,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
                dev_warn(&adapter->pdev->dev,
                         "Device does not support MSI interrupts\n");
 
-       err = qlcnic_setup_intr(adapter, 1);
+       err = qlcnic_setup_intr(adapter, 1, 0);
        if (err) {
                dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
                goto err_out_disable_msi;
@@ -637,8 +585,6 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        int err;
 
-       spin_lock_init(&ahw->mbx_lock);
-       set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
        set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
        ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
        ahw->reset_context = 0;
@@ -1085,6 +1031,7 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
        if (test_bit(QLC_BC_VF_FLR, &vf->state))
                return;
 
+       memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
        trans = list_first_entry(&vf->rcv_act.wait_list,
                                 struct qlcnic_bc_trans, list);
        adapter = vf->adapter;
@@ -1234,6 +1181,7 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
                return;
        }
 
+       memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
        cmd_op = hdr->cmd_op;
        if (qlcnic_sriov_alloc_bc_trans(&trans))
                return;
@@ -1359,7 +1307,7 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
        if (enable)
                cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
 
-       err = qlcnic_83xx_mbx_op(adapter, &cmd);
+       err = qlcnic_83xx_issue_cmd(adapter, &cmd);
 
        if (err != QLCNIC_RCODE_SUCCESS) {
                dev_err(&adapter->pdev->dev,
@@ -1391,10 +1339,11 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
        return -EIO;
 }
 
-static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
                                  struct qlcnic_cmd_args *cmd)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
        struct device *dev = &adapter->pdev->dev;
        struct qlcnic_bc_trans *trans;
        int err;
@@ -1411,7 +1360,7 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
                goto cleanup_transaction;
 
 retry:
-       if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+       if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
                rsp = -EIO;
                QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
                      QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
@@ -1454,7 +1403,7 @@ err_out:
        if (rsp == QLCNIC_RCODE_TIMEOUT) {
                ahw->reset_context = 1;
                adapter->need_fw_reset = 1;
-               clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
        }
 
 cleanup_transaction:
@@ -1613,8 +1562,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
 {
        int err;
 
-       set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
-       qlcnic_83xx_enable_mbx_intrpt(adapter);
+       qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
 
        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
        if (err)
@@ -1657,8 +1606,10 @@ static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        u8 i, max_ints = ahw->num_msix - 1;
 
-       qlcnic_83xx_disable_mbx_intr(adapter);
        netif_device_detach(netdev);
+       qlcnic_83xx_detach_mailbox_work(adapter);
+       qlcnic_83xx_disable_mbx_intr(adapter);
+
        if (netif_running(netdev))
                qlcnic_down(adapter, netdev);
 
@@ -1702,6 +1653,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_mailbox *mbx = ahw->mailbox;
        struct device *dev = &adapter->pdev->dev;
        struct qlc_83xx_idc *idc = &ahw->idc;
        u8 func = ahw->pci_func;
@@ -1712,7 +1664,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
        /* Skip the context reset and check if FW is hung */
        if (adapter->reset_ctx_cnt < 3) {
                adapter->need_fw_reset = 1;
-               clear_bit(QLC_83XX_MBX_READY, &idc->status);
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                dev_info(dev,
                         "Resetting context, wait here to check if FW is in failed state\n");
                return 0;
@@ -1737,7 +1689,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
                 __func__, adapter->reset_ctx_cnt, func);
        set_bit(__QLCNIC_RESETTING, &adapter->state);
        adapter->need_fw_reset = 1;
-       clear_bit(QLC_83XX_MBX_READY, &idc->status);
+       clear_bit(QLC_83XX_MBX_READY, &mbx->status);
        qlcnic_sriov_vf_detach(adapter);
        adapter->need_fw_reset = 0;
 
@@ -1787,6 +1739,7 @@ static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
 static int
 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
 
        dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
@@ -1794,7 +1747,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
                set_bit(__QLCNIC_RESETTING, &adapter->state);
                adapter->tx_timeo_cnt = 0;
                adapter->reset_ctx_cnt = 0;
-               clear_bit(QLC_83XX_MBX_READY, &idc->status);
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                qlcnic_sriov_vf_detach(adapter);
        }
 
@@ -1803,6 +1756,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
 
 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
        u8 func = adapter->ahw->pci_func;
 
@@ -1812,7 +1766,7 @@ static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
                set_bit(__QLCNIC_RESETTING, &adapter->state);
                adapter->tx_timeo_cnt = 0;
                adapter->reset_ctx_cnt = 0;
-               clear_bit(QLC_83XX_MBX_READY, &idc->status);
+               clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                qlcnic_sriov_vf_detach(adapter);
        }
        return 0;
@@ -1990,7 +1944,7 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
        int err;
 
        set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
-       qlcnic_83xx_enable_mbx_intrpt(adapter);
+       qlcnic_83xx_enable_mbx_interrupt(adapter);
        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
        if (err)
                return err;
index 7e8d68263963aa5928f5ef1f145ad27624c14ce2..89943377846699e1e2719be8b2da16b07b144d09 100644 (file)
@@ -2149,7 +2149,7 @@ struct ql_adapter {
        struct timer_list timer;
        atomic_t lb_count;
        /* Keep local copy of current mac address. */
-       char current_mac_addr[6];
+       char current_mac_addr[ETH_ALEN];
 };
 
 /*
index b5eb4195fc9927a8b88493c7b1f783926928dc8c..95d245701c0ba9489985c4e562d52f15057609f0 100644 (file)
@@ -1898,9 +1898,6 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       if (regs->len > R8169_REGS_SIZE)
-               regs->len = R8169_REGS_SIZE;
-
        rtl_lock_work(tp);
        memcpy_fromio(p, tp->mmio_addr, regs->len);
        rtl_unlock_work(tp);
@@ -7088,7 +7085,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        RTL_W8(Cfg9346, Cfg9346_Unlock);
        RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
-       RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+       RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
        if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
index 19a8a045e07728868cbe3a6e08b841865068e3e3..a30c4395b23220902d4b785333b31b53abfb24c7 100644 (file)
@@ -13,4 +13,4 @@ config SH_ETH
          Renesas SuperH Ethernet device driver.
          This driver supporting CPUs are:
                - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
-                 R8A7740 and R8A7779.
+                 R8A7740, R8A777x and R8A7790.
index a753928bab9c683d54036f92350e6db466eaa7b9..c3570764f58f9dcfb774a8534fdde38403701e62 100644 (file)
@@ -189,6 +189,7 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
        [RMCR]          = 0x0258,
        [TFUCR]         = 0x0264,
        [RFOCR]         = 0x0268,
+       [RMIIMODE]      = 0x026c,
        [FCFTR]         = 0x0270,
        [TRIMD]         = 0x027c,
 };
@@ -377,6 +378,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
        .set_duplex     = sh_eth_set_duplex,
        .set_rate       = sh_eth_set_rate_r8a777x,
 
+       .register_type  = SH_ETH_REG_FAST_RCAR,
+
        .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
        .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
        .eesipr_value   = 0x01ff009f,
@@ -392,6 +395,29 @@ static struct sh_eth_cpu_data r8a777x_data = {
        .hw_swap        = 1,
 };
 
+/* R8A7790 */
+static struct sh_eth_cpu_data r8a7790_data = {
+       .set_duplex     = sh_eth_set_duplex,
+       .set_rate       = sh_eth_set_rate_r8a777x,
+
+       .register_type  = SH_ETH_REG_FAST_RCAR,
+
+       .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+       .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+       .eesipr_value   = 0x01ff009f,
+
+       .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+                         EESR_ECI,
+
+       .apr            = 1,
+       .mpr            = 1,
+       .tpauser        = 1,
+       .hw_swap        = 1,
+       .rmiimode       = 1,
+};
+
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -413,6 +439,8 @@ static struct sh_eth_cpu_data sh7724_data = {
        .set_duplex     = sh_eth_set_duplex,
        .set_rate       = sh_eth_set_rate_sh7724,
 
+       .register_type  = SH_ETH_REG_FAST_SH4,
+
        .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
        .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
        .eesipr_value   = 0x01ff009f,
@@ -451,6 +479,8 @@ static struct sh_eth_cpu_data sh7757_data = {
        .set_duplex     = sh_eth_set_duplex,
        .set_rate       = sh_eth_set_rate_sh7757,
 
+       .register_type  = SH_ETH_REG_FAST_SH4,
+
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
        .rmcr_value     = 0x00000001,
 
@@ -519,6 +549,8 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
        .set_duplex     = sh_eth_set_duplex,
        .set_rate       = sh_eth_set_rate_giga,
 
+       .register_type  = SH_ETH_REG_GIGABIT,
+
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -577,6 +609,8 @@ static struct sh_eth_cpu_data sh7734_data = {
        .set_duplex     = sh_eth_set_duplex,
        .set_rate       = sh_eth_set_rate_gether,
 
+       .register_type  = SH_ETH_REG_GIGABIT,
+
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -604,6 +638,8 @@ static struct sh_eth_cpu_data sh7763_data = {
        .set_duplex     = sh_eth_set_duplex,
        .set_rate       = sh_eth_set_rate_gether,
 
+       .register_type  = SH_ETH_REG_GIGABIT,
+
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -641,6 +677,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .set_duplex     = sh_eth_set_duplex,
        .set_rate       = sh_eth_set_rate_gether,
 
+       .register_type  = SH_ETH_REG_GIGABIT,
+
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -663,6 +701,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
 };
 
 static struct sh_eth_cpu_data sh7619_data = {
+       .register_type  = SH_ETH_REG_FAST_SH3_SH2,
+
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
        .apr            = 1,
@@ -672,6 +712,8 @@ static struct sh_eth_cpu_data sh7619_data = {
 };
 
 static struct sh_eth_cpu_data sh771x_data = {
+       .register_type  = SH_ETH_REG_FAST_SH3_SH2,
+
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
        .tsu            = 1,
 };
@@ -1124,6 +1166,9 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        if (ret)
                goto out;
 
+       if (mdp->cd->rmiimode)
+               sh_eth_write(ndev, 0x1, RMIIMODE);
+
        /* Descriptor format */
        sh_eth_ring_format(ndev);
        if (mdp->cd->rpadir)
@@ -2618,10 +2663,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        mdp->edmac_endian = pd->edmac_endian;
        mdp->no_ether_link = pd->no_ether_link;
        mdp->ether_link_active_low = pd->ether_link_active_low;
-       mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
 
        /* set cpu data */
        mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+       mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
        sh_eth_set_default_cpu_data(mdp->cd);
 
        /* set function */
@@ -2749,6 +2794,7 @@ static struct platform_device_id sh_eth_id_table[] = {
        { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
        { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
        { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
+       { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
        { }
 };
 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
index 99995bf38c403a3eb641a9d1f79fe5fc54aa6b8b..a0db02c63b1157930ca0fca070dd733aa39b7c54 100644 (file)
@@ -60,6 +60,7 @@ enum {
        EDOCR,
        TFUCR,
        RFOCR,
+       RMIIMODE,
        FCFTR,
        RPADIR,
        TRIMD,
@@ -156,6 +157,13 @@ enum {
        SH_ETH_MAX_REGISTER_OFFSET,
 };
 
+enum {
+       SH_ETH_REG_GIGABIT,
+       SH_ETH_REG_FAST_RCAR,
+       SH_ETH_REG_FAST_SH4,
+       SH_ETH_REG_FAST_SH3_SH2
+};
+
 /* Driver's parameters */
 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 #define SH4_SKB_RX_ALIGN       32
@@ -453,6 +461,7 @@ struct sh_eth_cpu_data {
        void (*set_rate)(struct net_device *ndev);
 
        /* mandatory initialize value */
+       int register_type;
        unsigned long eesipr_value;
 
        /* optional initialize value */
@@ -482,6 +491,7 @@ struct sh_eth_cpu_data {
        unsigned hw_crc:1;      /* E-DMAC have CSMR */
        unsigned select_mii:1;  /* EtherC have RMII_MII (MII select register) */
        unsigned shift_rd0:1;   /* shift Rx descriptor word 0 right by 16 */
+       unsigned rmiimode:1;    /* EtherC has RMIIMODE register */
 };
 
 struct sh_eth_private {
index 02df0894690d698c7234e8057a58714ad65a2915..ee18e6f7b4fe19d9b8e3bbec69fd5e2cd098cb81 100644 (file)
@@ -1770,9 +1770,6 @@ static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        struct sis190_private *tp = netdev_priv(dev);
        unsigned long flags;
 
-       if (regs->len > SIS190_REGS_SIZE)
-               regs->len = SIS190_REGS_SIZE;
-
        spin_lock_irqsave(&tp->lock, flags);
        memcpy_fromio(p, tp->mmio_addr, regs->len);
        spin_unlock_irqrestore(&tp->lock, flags);
index f5d7ad75e47903ce2156864c6dabd1444ae27baf..b7a39305472b156c05b21d0af850fd56fc3945dd 100644 (file)
@@ -1309,23 +1309,9 @@ static void sis900_timer(unsigned long data)
        struct sis900_private *sis_priv = netdev_priv(net_dev);
        struct mii_phy *mii_phy = sis_priv->mii;
        static const int next_tick = 5*HZ;
+       int speed = 0, duplex = 0;
        u16 status;
 
-       if (!sis_priv->autong_complete){
-               int uninitialized_var(speed), duplex = 0;
-
-               sis900_read_mode(net_dev, &speed, &duplex);
-               if (duplex){
-                       sis900_set_mode(sis_priv, speed, duplex);
-                       sis630_set_eq(net_dev, sis_priv->chipset_rev);
-                       netif_carrier_on(net_dev);
-               }
-
-               sis_priv->timer.expires = jiffies + HZ;
-               add_timer(&sis_priv->timer);
-               return;
-       }
-
        status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
        status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
 
@@ -1336,8 +1322,16 @@ static void sis900_timer(unsigned long data)
                status = sis900_default_phy(net_dev);
                mii_phy = sis_priv->mii;
 
-               if (status & MII_STAT_LINK)
-                       sis900_check_mode(net_dev, mii_phy);
+               if (status & MII_STAT_LINK) {
+                       WARN_ON(!(status & MII_STAT_AUTO_DONE));
+
+                       sis900_read_mode(net_dev, &speed, &duplex);
+                       if (duplex) {
+                               sis900_set_mode(sis_priv, speed, duplex);
+                               sis630_set_eq(net_dev, sis_priv->chipset_rev);
+                               netif_carrier_on(net_dev);
+                       }
+               }
        } else {
        /* Link ON -> OFF */
                 if (!(status & MII_STAT_LINK)){
index 03de76c7a177b6c9b54d571370e92d697c9a9ef6..da8be6e630961bac8478565741afe03b7b36a046 100644 (file)
@@ -109,9 +109,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
        const char *mac = NULL;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
-
        addr = devm_ioremap_resource(dev, res);
        if (IS_ERR(addr))
                return PTR_ERR(addr);
index 0d43fa9ff9801ccbc19a74510f9aef952e22a3f8..7217ee5d62730acbc43de559abc2fe555e043155 100644 (file)
@@ -1239,7 +1239,7 @@ static int bigmac_sbus_probe(struct platform_device *op)
 
 static int bigmac_sbus_remove(struct platform_device *op)
 {
-       struct bigmac *bp = dev_get_drvdata(&op->dev);
+       struct bigmac *bp = platform_get_drvdata(op);
        struct device *parent = op->dev.parent;
        struct net_device *net_dev = bp->dev;
        struct platform_device *qec_op;
@@ -1259,8 +1259,6 @@ static int bigmac_sbus_remove(struct platform_device *op)
 
        free_netdev(net_dev);
 
-       dev_set_drvdata(&op->dev, NULL);
-
        return 0;
 }
 
index 171f5b0809c4d1659cd26d8ba7255a915c1d8f70..c67e683a36e1e8edeac343e5b7fa56fa7547a926 100644 (file)
@@ -3231,7 +3231,7 @@ static int hme_sbus_probe(struct platform_device *op)
 
 static int hme_sbus_remove(struct platform_device *op)
 {
-       struct happy_meal *hp = dev_get_drvdata(&op->dev);
+       struct happy_meal *hp = platform_get_drvdata(op);
        struct net_device *net_dev = hp->dev;
 
        unregister_netdev(net_dev);
@@ -3250,8 +3250,6 @@ static int hme_sbus_remove(struct platform_device *op)
 
        free_netdev(net_dev);
 
-       dev_set_drvdata(&op->dev, NULL);
-
        return 0;
 }
 
index 22a7a43362113c60d251a0daa85b536bcebd3648..79974e31187ac19af63452a3a0c421a5ff2b7cf7 100644 (file)
@@ -34,9 +34,9 @@
 #include <linux/of_device.h>
 #include <linux/if_vlan.h>
 
-#include <linux/platform_data/cpsw.h>
 #include <linux/pinctrl/consumer.h>
 
+#include "cpsw.h"
 #include "cpsw_ale.h"
 #include "cpts.h"
 #include "davinci_cpdma.h"
@@ -82,6 +82,8 @@ do {                                                          \
 
 #define CPSW_VERSION_1         0x19010a
 #define CPSW_VERSION_2         0x19010c
+#define CPSW_VERSION_3         0x19010f
+#define CPSW_VERSION_4         0x190112
 
 #define HOST_PORT_NUM          0
 #define SLIVER_SIZE            0x40
@@ -91,6 +93,7 @@ do {                                                          \
 #define CPSW1_SLAVE_SIZE       0x040
 #define CPSW1_CPDMA_OFFSET     0x100
 #define CPSW1_STATERAM_OFFSET  0x200
+#define CPSW1_HW_STATS         0x400
 #define CPSW1_CPTS_OFFSET      0x500
 #define CPSW1_ALE_OFFSET       0x600
 #define CPSW1_SLIVER_OFFSET    0x700
@@ -99,6 +102,7 @@ do {                                                         \
 #define CPSW2_SLAVE_OFFSET     0x200
 #define CPSW2_SLAVE_SIZE       0x100
 #define CPSW2_CPDMA_OFFSET     0x800
+#define CPSW2_HW_STATS         0x900
 #define CPSW2_STATERAM_OFFSET  0xa00
 #define CPSW2_CPTS_OFFSET      0xc00
 #define CPSW2_ALE_OFFSET       0xd00
@@ -299,6 +303,44 @@ struct cpsw_sliver_regs {
        u32     rx_pri_map;
 };
 
+struct cpsw_hw_stats {
+       u32     rxgoodframes;
+       u32     rxbroadcastframes;
+       u32     rxmulticastframes;
+       u32     rxpauseframes;
+       u32     rxcrcerrors;
+       u32     rxaligncodeerrors;
+       u32     rxoversizedframes;
+       u32     rxjabberframes;
+       u32     rxundersizedframes;
+       u32     rxfragments;
+       u32     __pad_0[2];
+       u32     rxoctets;
+       u32     txgoodframes;
+       u32     txbroadcastframes;
+       u32     txmulticastframes;
+       u32     txpauseframes;
+       u32     txdeferredframes;
+       u32     txcollisionframes;
+       u32     txsinglecollframes;
+       u32     txmultcollframes;
+       u32     txexcessivecollisions;
+       u32     txlatecollisions;
+       u32     txunderrun;
+       u32     txcarriersenseerrors;
+       u32     txoctets;
+       u32     octetframes64;
+       u32     octetframes65t127;
+       u32     octetframes128t255;
+       u32     octetframes256t511;
+       u32     octetframes512t1023;
+       u32     octetframes1024tup;
+       u32     netoctets;
+       u32     rxsofoverruns;
+       u32     rxmofoverruns;
+       u32     rxdmaoverruns;
+};
+
 struct cpsw_slave {
        void __iomem                    *regs;
        struct cpsw_sliver_regs __iomem *sliver;
@@ -332,6 +374,7 @@ struct cpsw_priv {
        struct cpsw_platform_data       data;
        struct cpsw_ss_regs __iomem     *regs;
        struct cpsw_wr_regs __iomem     *wr_regs;
+       u8 __iomem                      *hw_stats;
        struct cpsw_host_regs __iomem   *host_port_regs;
        u32                             msg_enable;
        u32                             version;
@@ -354,6 +397,94 @@ struct cpsw_priv {
        u32 emac_port;
 };
 
+struct cpsw_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int type;
+       int sizeof_stat;
+       int stat_offset;
+};
+
+enum {
+       CPSW_STATS,
+       CPDMA_RX_STATS,
+       CPDMA_TX_STATS,
+};
+
+#define CPSW_STAT(m)           CPSW_STATS,                             \
+                               sizeof(((struct cpsw_hw_stats *)0)->m), \
+                               offsetof(struct cpsw_hw_stats, m)
+#define CPDMA_RX_STAT(m)       CPDMA_RX_STATS,                            \
+                               sizeof(((struct cpdma_chan_stats *)0)->m), \
+                               offsetof(struct cpdma_chan_stats, m)
+#define CPDMA_TX_STAT(m)       CPDMA_TX_STATS,                            \
+                               sizeof(((struct cpdma_chan_stats *)0)->m), \
+                               offsetof(struct cpdma_chan_stats, m)
+
+static const struct cpsw_stats cpsw_gstrings_stats[] = {
+       { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
+       { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
+       { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
+       { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
+       { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
+       { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
+       { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
+       { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
+       { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
+       { "Rx Fragments", CPSW_STAT(rxfragments) },
+       { "Rx Octets", CPSW_STAT(rxoctets) },
+       { "Good Tx Frames", CPSW_STAT(txgoodframes) },
+       { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
+       { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
+       { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
+       { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
+       { "Collisions", CPSW_STAT(txcollisionframes) },
+       { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
+       { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
+       { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
+       { "Late Collisions", CPSW_STAT(txlatecollisions) },
+       { "Tx Underrun", CPSW_STAT(txunderrun) },
+       { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
+       { "Tx Octets", CPSW_STAT(txoctets) },
+       { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
+       { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
+       { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
+       { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
+       { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
+       { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
+       { "Net Octets", CPSW_STAT(netoctets) },
+       { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
+       { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
+       { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
+       { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+       { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+       { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+       { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
+       { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+       { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+       { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+       { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+       { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+       { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+       { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+       { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
+       { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+       { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
+       { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
+       { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
+       { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
+       { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
+       { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
+       { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
+       { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
+       { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
+       { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
+       { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
+       { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
+       { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+
 #define napi_to_priv(napi)     container_of(napi, struct cpsw_priv, napi)
 #define for_each_slave(priv, func, arg...)                             \
        do {                                                            \
@@ -723,6 +854,69 @@ static int cpsw_set_coalesce(struct net_device *ndev,
        return 0;
 }
 
+static int cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return CPSW_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+       u8 *p = data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < CPSW_STATS_LEN; i++) {
+                       memcpy(p, cpsw_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       }
+}
+
+static void cpsw_get_ethtool_stats(struct net_device *ndev,
+                                   struct ethtool_stats *stats, u64 *data)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       struct cpdma_chan_stats rx_stats;
+       struct cpdma_chan_stats tx_stats;
+       u32 val;
+       u8 *p;
+       int i;
+
+       /* Collect Davinci CPDMA stats for Rx and Tx Channel */
+       cpdma_chan_get_stats(priv->rxch, &rx_stats);
+       cpdma_chan_get_stats(priv->txch, &tx_stats);
+
+       for (i = 0; i < CPSW_STATS_LEN; i++) {
+               switch (cpsw_gstrings_stats[i].type) {
+               case CPSW_STATS:
+                       val = readl(priv->hw_stats +
+                                   cpsw_gstrings_stats[i].stat_offset);
+                       data[i] = val;
+                       break;
+
+               case CPDMA_RX_STATS:
+                       p = (u8 *)&rx_stats +
+                               cpsw_gstrings_stats[i].stat_offset;
+                       data[i] = *(u32 *)p;
+                       break;
+
+               case CPDMA_TX_STATS:
+                       p = (u8 *)&tx_stats +
+                               cpsw_gstrings_stats[i].stat_offset;
+                       data[i] = *(u32 *)p;
+                       break;
+               }
+       }
+}
+
 static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
 {
        static char *leader = "........................................";
@@ -799,6 +993,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
                break;
        case CPSW_VERSION_2:
+       case CPSW_VERSION_3:
+       case CPSW_VERSION_4:
                slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
                break;
        }
@@ -1232,6 +1428,33 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
 
 }
 
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       struct sockaddr *addr = (struct sockaddr *)p;
+       int flags = 0;
+       u16 vid = 0;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       if (priv->data.dual_emac) {
+               vid = priv->slaves[priv->emac_port].port_vlan;
+               flags = ALE_VLAN;
+       }
+
+       cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+                          flags, vid);
+       cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+                          flags, vid);
+
+       memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+       memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+       for_each_slave(priv, cpsw_set_slave_mac, priv);
+
+       return 0;
+}
+
 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1326,6 +1549,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
        .ndo_stop               = cpsw_ndo_stop,
        .ndo_start_xmit         = cpsw_ndo_start_xmit,
        .ndo_change_rx_flags    = cpsw_ndo_change_rx_flags,
+       .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
        .ndo_do_ioctl           = cpsw_ndo_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = eth_change_mtu,
@@ -1416,6 +1640,29 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
                return -EOPNOTSUPP;
 }
 
+static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       int slave_no = cpsw_slave_index(priv);
+
+       wol->supported = 0;
+       wol->wolopts = 0;
+
+       if (priv->slaves[slave_no].phy)
+               phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
+}
+
+static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       int slave_no = cpsw_slave_index(priv);
+
+       if (priv->slaves[slave_no].phy)
+               return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
+       else
+               return -EOPNOTSUPP;
+}
+
 static const struct ethtool_ops cpsw_ethtool_ops = {
        .get_drvinfo    = cpsw_get_drvinfo,
        .get_msglevel   = cpsw_get_msglevel,
@@ -1426,6 +1673,11 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
        .set_settings   = cpsw_set_settings,
        .get_coalesce   = cpsw_get_coalesce,
        .set_coalesce   = cpsw_set_coalesce,
+       .get_sset_count         = cpsw_get_sset_count,
+       .get_strings            = cpsw_get_strings,
+       .get_ethtool_stats      = cpsw_get_ethtool_stats,
+       .get_wol        = cpsw_get_wol,
+       .set_wol        = cpsw_set_wol,
 };
 
 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1623,6 +1875,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        priv_sl2->host_port = priv->host_port;
        priv_sl2->host_port_regs = priv->host_port_regs;
        priv_sl2->wr_regs = priv->wr_regs;
+       priv_sl2->hw_stats = priv->hw_stats;
        priv_sl2->dma = priv->dma;
        priv_sl2->txch = priv->txch;
        priv_sl2->rxch = priv->rxch;
@@ -1780,7 +2033,8 @@ static int cpsw_probe(struct platform_device *pdev)
        switch (priv->version) {
        case CPSW_VERSION_1:
                priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
-               priv->cpts->reg       = ss_regs + CPSW1_CPTS_OFFSET;
+               priv->cpts->reg      = ss_regs + CPSW1_CPTS_OFFSET;
+               priv->hw_stats       = ss_regs + CPSW1_HW_STATS;
                dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
                dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
                ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
@@ -1790,8 +2044,11 @@ static int cpsw_probe(struct platform_device *pdev)
                dma_params.desc_mem_phys = 0;
                break;
        case CPSW_VERSION_2:
+       case CPSW_VERSION_3:
+       case CPSW_VERSION_4:
                priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
-               priv->cpts->reg       = ss_regs + CPSW2_CPTS_OFFSET;
+               priv->cpts->reg      = ss_regs + CPSW2_CPTS_OFFSET;
+               priv->hw_stats       = ss_regs + CPSW2_HW_STATS;
                dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
                dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
                ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
similarity index 86%
rename from include/linux/platform_data/cpsw.h
rename to drivers/net/ethernet/ti/cpsw.h
index bb3cd58d71e3fbac50c9aed1afbf7ef0cbeaa108..eb3e101ec04878c87f8a3a6dee6243f9d6970b52 100644 (file)
@@ -1,11 +1,10 @@
-/*
- * Texas Instruments Ethernet Switch Driver
+/* Texas Instruments Ethernet Switch Driver
  *
- * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments
  *
  * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
  *
  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  * kind, whether express or implied; without even the implied warranty
@@ -22,14 +21,13 @@ struct cpsw_slave_data {
        int             phy_if;
        u8              mac_addr[ETH_ALEN];
        u16             dual_emac_res_vlan;     /* Reserved VLAN for DualEMAC */
-
 };
 
 struct cpsw_platform_data {
+       struct cpsw_slave_data  *slave_data;
        u32     ss_reg_ofs;     /* Subsystem control register offset */
        u32     channels;       /* number of cpdma channels (symmetric) */
        u32     slaves;         /* number of slave cpgmac ports */
-       struct cpsw_slave_data  *slave_data;
        u32     active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
        u32     cpts_clock_mult;  /* convert input clock ticks to nanoseconds */
        u32     cpts_clock_shift; /* convert input clock ticks to nanoseconds */
index 16ddfc348062c4b3cf3b14d0aac1b7e2a2f70e6f..7f851438486379de0558e4dc5693d08fd2e1f0a9 100644 (file)
@@ -421,8 +421,7 @@ bail_out:
 
 static int davinci_mdio_remove(struct platform_device *pdev)
 {
-       struct device *dev = &pdev->dev;
-       struct davinci_mdio_data *data = dev_get_drvdata(dev);
+       struct davinci_mdio_data *data = platform_get_drvdata(pdev);
 
        if (data->bus) {
                mdiobus_unregister(data->bus);
@@ -434,8 +433,6 @@ static int davinci_mdio_remove(struct platform_device *pdev)
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       dev_set_drvdata(dev, NULL);
-
        kfree(data);
 
        return 0;
index 098b1c42b39368faef868e50fdbb3174a6ccf8d9..4083ba8839e1f2e73d1a1b872c4e4149a6db419e 100644 (file)
@@ -15,3 +15,14 @@ config TILE_NET
 
          To compile this driver as a module, choose M here: the module
          will be called tile_net.
+
+config PTP_1588_CLOCK_TILEGX
+        tristate "Tilera TILE-Gx mPIPE as PTP clock"
+        select PTP_1588_CLOCK
+        depends on TILE_NET
+        depends on TILEGX
+        ---help---
+          This driver adds support for using the mPIPE as a PTP
+          clock. This clock is only useful if your PTP programs are
+          getting hardware time stamps on the PTP Ethernet packets
+          using the SO_TIMESTAMPING API.
index f3c2d034b32c817530f3767de710de41c1d71b90..5d2a719fc68824c3b77ee5519406f4f9df31d42f 100644 (file)
 #include <linux/io.h>
 #include <linux/ctype.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/tcp.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
 
 #include <asm/checksum.h>
 #include <asm/homecache.h>
@@ -76,6 +79,9 @@
 
 #define MAX_FRAGS (MAX_SKB_FRAGS + 1)
 
+/* The "kinds" of buffer stacks (small/large/jumbo). */
+#define MAX_KINDS 3
+
 /* Size of completions data to allocate.
  * ISSUE: Probably more than needed since we don't use all the channels.
  */
@@ -130,29 +136,31 @@ struct tile_net_tx_wake {
 
 /* Info for a specific cpu. */
 struct tile_net_info {
-       /* The NAPI struct. */
-       struct napi_struct napi;
-       /* Packet queue. */
-       gxio_mpipe_iqueue_t iqueue;
        /* Our cpu. */
        int my_cpu;
-       /* True if iqueue is valid. */
-       bool has_iqueue;
-       /* NAPI flags. */
-       bool napi_added;
-       bool napi_enabled;
-       /* Number of small sk_buffs which must still be provided. */
-       unsigned int num_needed_small_buffers;
-       /* Number of large sk_buffs which must still be provided. */
-       unsigned int num_needed_large_buffers;
        /* A timer for handling egress completions. */
        struct hrtimer egress_timer;
        /* True if "egress_timer" is scheduled. */
        bool egress_timer_scheduled;
-       /* Comps for each egress channel. */
-       struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
-       /* Transmit wake timer for each egress channel. */
-       struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+       struct info_mpipe {
+               /* Packet queue. */
+               gxio_mpipe_iqueue_t iqueue;
+               /* The NAPI struct. */
+               struct napi_struct napi;
+               /* Number of buffers (by kind) which must still be provided. */
+               unsigned int num_needed_buffers[MAX_KINDS];
+               /* instance id. */
+               int instance;
+               /* True if iqueue is valid. */
+               bool has_iqueue;
+               /* NAPI flags. */
+               bool napi_added;
+               bool napi_enabled;
+               /* Comps for each egress channel. */
+               struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+               /* Transmit wake timer for each egress channel. */
+               struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+       } mpipe[NR_MPIPE_MAX];
 };
 
 /* Info for egress on a particular egress channel. */
@@ -177,19 +185,67 @@ struct tile_net_priv {
        int loopify_channel;
        /* The egress channel (channel or loopify_channel). */
        int echannel;
-       /* Total stats. */
-       struct net_device_stats stats;
+       /* mPIPE instance, 0 or 1. */
+       int instance;
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+       /* The timestamp config. */
+       struct hwtstamp_config stamp_cfg;
+#endif
 };
 
-/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
-static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+static struct mpipe_data {
+       /* The ingress irq. */
+       int ingress_irq;
 
-/* Devices currently associated with each channel.
- * NOTE: The array entry can become NULL after ifconfig down, but
- * we do not free the underlying net_device structures, so it is
- * safe to use a pointer after reading it from this array.
- */
-static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+       /* The "context" for all devices. */
+       gxio_mpipe_context_t context;
+
+       /* Egress info, indexed by "priv->echannel"
+        * (lazily created as needed).
+        */
+       struct tile_net_egress
+       egress_for_echannel[TILE_NET_CHANNELS];
+
+       /* Devices currently associated with each channel.
+        * NOTE: The array entry can become NULL after ifconfig down, but
+        * we do not free the underlying net_device structures, so it is
+        * safe to use a pointer after reading it from this array.
+        */
+       struct net_device
+       *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+       /* The actual memory allocated for the buffer stacks. */
+       void *buffer_stack_vas[MAX_KINDS];
+
+       /* The amount of memory allocated for each buffer stack. */
+       size_t buffer_stack_bytes[MAX_KINDS];
+
+       /* The first buffer stack index
+        * (small = +0, large = +1, jumbo = +2).
+        */
+       int first_buffer_stack;
+
+       /* The buckets. */
+       int first_bucket;
+       int num_buckets;
+
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+       /* PTP-specific data. */
+       struct ptp_clock *ptp_clock;
+       struct ptp_clock_info caps;
+
+       /* Lock for ptp accessors. */
+       struct mutex ptp_lock;
+#endif
+
+} mpipe_data[NR_MPIPE_MAX] = {
+       [0 ... (NR_MPIPE_MAX - 1)] {
+               .ingress_irq = -1,
+               .first_buffer_stack = -1,
+               .first_bucket = -1,
+               .num_buckets = 1
+       }
+};
 
 /* A mutex for "tile_net_devs_for_channel". */
 static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
@@ -197,34 +253,17 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
 /* The per-cpu info. */
 static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
 
-/* The "context" for all devices. */
-static gxio_mpipe_context_t context;
 
-/* Buffer sizes and mpipe enum codes for buffer stacks.
+/* The buffer size enums for each buffer stack.
  * See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ * We avoid the "10384" size because it can induce "false chaining"
+ * on "cut-through" jumbo packets.
  */
-#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
-#define BUFFER_SIZE_SMALL 128
-#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
-#define BUFFER_SIZE_LARGE 1664
-
-/* The small/large "buffer stacks". */
-static int small_buffer_stack = -1;
-static int large_buffer_stack = -1;
-
-/* Amount of memory allocated for each buffer stack. */
-static size_t buffer_stack_size;
-
-/* The actual memory allocated for the buffer stacks. */
-static void *small_buffer_stack_va;
-static void *large_buffer_stack_va;
-
-/* The buckets. */
-static int first_bucket = -1;
-static int num_buckets = 1;
-
-/* The ingress irq. */
-static int ingress_irq = -1;
+static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
+       GXIO_MPIPE_BUFFER_SIZE_128,
+       GXIO_MPIPE_BUFFER_SIZE_1664,
+       GXIO_MPIPE_BUFFER_SIZE_16384
+};
 
 /* Text value of tile_net.cpus if passed as a module parameter. */
 static char *network_cpus_string;
@@ -232,11 +271,21 @@ static char *network_cpus_string;
 /* The actual cpus in "network_cpus". */
 static struct cpumask network_cpus_map;
 
-/* If "loopify=LINK" was specified, this is "LINK". */
+/* If "tile_net.loopify=LINK" was specified, this is "LINK". */
 static char *loopify_link_name;
 
-/* If "tile_net.custom" was specified, this is non-NULL. */
-static char *custom_str;
+/* If "tile_net.custom" was specified, this is true. */
+static bool custom_flag;
+
+/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
+static uint jumbo_num;
+
+/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
+static inline int mpipe_instance(struct net_device *dev)
+{
+       struct tile_net_priv *priv = netdev_priv(dev);
+       return priv->instance;
+}
 
 /* The "tile_net.cpus" argument specifies the cpus that are dedicated
  * to handle ingress packets.
@@ -289,9 +338,15 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
 /* The "tile_net.custom" argument causes us to ignore the "conventional"
  * classifier metadata, in particular, the "l2_offset".
  */
-module_param_named(custom, custom_str, charp, 0444);
+module_param_named(custom, custom_flag, bool, 0444);
 MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
 
+/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
+ * and to allocate the given number of "jumbo" buffers.
+ */
+module_param_named(jumbo, jumbo_num, uint, 0444);
+MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
+
 /* Atomically update a statistics field.
  * Note that on TILE-Gx, this operation is fire-and-forget on the
  * issuing core (single-cycle dispatch) and takes only a few cycles
@@ -305,15 +360,16 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
 }
 
 /* Allocate and push a buffer. */
-static bool tile_net_provide_buffer(bool small)
+static bool tile_net_provide_buffer(int instance, int kind)
 {
-       int stack = small ? small_buffer_stack : large_buffer_stack;
+       struct mpipe_data *md = &mpipe_data[instance];
+       gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
+       size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
        const unsigned long buffer_alignment = 128;
        struct sk_buff *skb;
        int len;
 
-       len = sizeof(struct sk_buff **) + buffer_alignment;
-       len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+       len = sizeof(struct sk_buff **) + buffer_alignment + bs;
        skb = dev_alloc_skb(len);
        if (skb == NULL)
                return false;
@@ -328,7 +384,7 @@ static bool tile_net_provide_buffer(bool small)
        /* Make sure "skb" and the back-pointer have been flushed. */
        wmb();
 
-       gxio_mpipe_push_buffer(&context, stack,
+       gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
                               (void *)va_to_tile_io_addr(skb->data));
 
        return true;
@@ -354,11 +410,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va)
        return skb;
 }
 
-static void tile_net_pop_all_buffers(int stack)
+static void tile_net_pop_all_buffers(int instance, int stack)
 {
+       struct mpipe_data *md = &mpipe_data[instance];
+
        for (;;) {
                tile_io_addr_t addr =
-                       (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+                       (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
+                                                             stack);
                if (addr == 0)
                        break;
                dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
@@ -369,24 +428,111 @@ static void tile_net_pop_all_buffers(int stack)
 static void tile_net_provide_needed_buffers(void)
 {
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       int instance, kind;
+       for (instance = 0; instance < NR_MPIPE_MAX &&
+                    info->mpipe[instance].has_iqueue; instance++)      {
+               for (kind = 0; kind < MAX_KINDS; kind++) {
+                       while (info->mpipe[instance].num_needed_buffers[kind]
+                              != 0) {
+                               if (!tile_net_provide_buffer(instance, kind)) {
+                                       pr_notice("Tile %d still needs"
+                                                 " some buffers\n",
+                                                 info->my_cpu);
+                                       return;
+                               }
+                               info->mpipe[instance].
+                                       num_needed_buffers[kind]--;
+                       }
+               }
+       }
+}
 
-       while (info->num_needed_small_buffers != 0) {
-               if (!tile_net_provide_buffer(true))
-                       goto oops;
-               info->num_needed_small_buffers--;
+/* Get RX timestamp, and store it in the skb. */
+static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
+                             gxio_mpipe_idesc_t *idesc)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+       if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
+               struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+               shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
+                                                 idesc->time_stamp_ns);
        }
+#endif
+}
 
-       while (info->num_needed_large_buffers != 0) {
-               if (!tile_net_provide_buffer(false))
-                       goto oops;
-               info->num_needed_large_buffers--;
+/* Get TX timestamp, and store it in the skb. */
+static void tile_tx_timestamp(struct sk_buff *skb, int instance)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+       struct skb_shared_info *shtx = skb_shinfo(skb);
+       if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
+               struct mpipe_data *md = &mpipe_data[instance];
+               struct skb_shared_hwtstamps shhwtstamps;
+               struct timespec ts;
+
+               shtx->tx_flags |= SKBTX_IN_PROGRESS;
+               gxio_mpipe_get_timestamp(&md->context, &ts);
+               memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+               shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+               skb_tstamp_tx(skb, &shhwtstamps);
        }
+#endif
+}
 
-       return;
+/* Use ioctl() to enable or disable TX or RX timestamping. */
+static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
+                              int cmd)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+       struct hwtstamp_config config;
+       struct tile_net_priv *priv = netdev_priv(dev);
 
-oops:
-       /* Add a description to the page allocation failure dump. */
-       pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+       if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       if (config.flags)  /* reserved for future extensions */
+               return -EINVAL;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+       case HWTSTAMP_TX_ON:
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               break;
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_SOME:
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
+               return -EFAULT;
+
+       priv->stamp_cfg = config;
+       return 0;
+#else
+       return -EOPNOTSUPP;
+#endif
 }
 
 static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -409,6 +555,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
 {
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
        struct tile_net_priv *priv = netdev_priv(dev);
+       int instance = priv->instance;
 
        /* Encode the actual packet length. */
        skb_put(skb, len);
@@ -419,47 +566,52 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
        if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-       netif_receive_skb(skb);
+       /* Get RX timestamp from idesc. */
+       tile_rx_timestamp(priv, skb, idesc);
+
+       napi_gro_receive(&info->mpipe[instance].napi, skb);
 
        /* Update stats. */
-       tile_net_stats_add(1, &priv->stats.rx_packets);
-       tile_net_stats_add(len, &priv->stats.rx_bytes);
+       tile_net_stats_add(1, &dev->stats.rx_packets);
+       tile_net_stats_add(len, &dev->stats.rx_bytes);
 
        /* Need a new buffer. */
-       if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
-               info->num_needed_small_buffers++;
+       if (idesc->size == buffer_size_enums[0])
+               info->mpipe[instance].num_needed_buffers[0]++;
+       else if (idesc->size == buffer_size_enums[1])
+               info->mpipe[instance].num_needed_buffers[1]++;
        else
-               info->num_needed_large_buffers++;
+               info->mpipe[instance].num_needed_buffers[2]++;
 }
 
 /* Handle a packet.  Return true if "processed", false if "filtered". */
-static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
 {
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
-       struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+       struct mpipe_data *md = &mpipe_data[instance];
+       struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
        uint8_t l2_offset;
        void *va;
        void *buf;
        unsigned long len;
        bool filter;
 
-       /* Drop packets for which no buffer was available.
-        * NOTE: This happens under heavy load.
+       /* Drop packets for which no buffer was available (which can
+        * happen under heavy load), or for which the me/tr/ce flags
+        * are set (which can happen for jumbo cut-through packets,
+        * or with a customized classifier).
         */
-       if (idesc->be) {
-               struct tile_net_priv *priv = netdev_priv(dev);
-               tile_net_stats_add(1, &priv->stats.rx_dropped);
-               gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
-               if (net_ratelimit())
-                       pr_info("Dropping packet (insufficient buffers).\n");
-               return false;
+       if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
+               if (dev)
+                       tile_net_stats_add(1, &dev->stats.rx_errors);
+               goto drop;
        }
 
        /* Get the "l2_offset", if allowed. */
-       l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+       l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
 
-       /* Get the raw buffer VA (includes "headroom"). */
-       va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+       /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
+       va = tile_io_addr_to_va((unsigned long)idesc->va);
 
        /* Get the actual packet start/length. */
        buf = va + l2_offset;
@@ -470,7 +622,10 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
 
        filter = filter_packet(dev, buf);
        if (filter) {
-               gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+               if (dev)
+                       tile_net_stats_add(1, &dev->stats.rx_dropped);
+drop:
+               gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
        } else {
                struct sk_buff *skb = mpipe_buf_to_skb(va);
 
@@ -480,7 +635,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
                tile_net_receive_skb(dev, skb, idesc, len);
        }
 
-       gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+       gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
        return !filter;
 }
 
@@ -501,14 +656,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
        unsigned int work = 0;
        gxio_mpipe_idesc_t *idesc;
-       int i, n;
-
-       /* Process packets. */
-       while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+       int instance, i, n;
+       struct mpipe_data *md;
+       struct info_mpipe *info_mpipe =
+               container_of(napi, struct info_mpipe, napi);
+
+       instance = info_mpipe->instance;
+       while ((n = gxio_mpipe_iqueue_try_peek(
+                       &info_mpipe->iqueue,
+                       &idesc)) > 0) {
                for (i = 0; i < n; i++) {
                        if (i == TILE_NET_BATCH)
                                goto done;
-                       if (tile_net_handle_packet(idesc + i)) {
+                       if (tile_net_handle_packet(instance,
+                                                  idesc + i)) {
                                if (++work >= budget)
                                        goto done;
                        }
@@ -516,14 +677,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
        }
 
        /* There are no packets left. */
-       napi_complete(&info->napi);
+       napi_complete(&info_mpipe->napi);
 
+       md = &mpipe_data[instance];
        /* Re-enable hypervisor interrupts. */
-       gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+       gxio_mpipe_enable_notif_ring_interrupt(
+               &md->context, info->mpipe[instance].iqueue.ring);
 
        /* HACK: Avoid the "rotting packet" problem. */
-       if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
-               napi_schedule(&info->napi);
+       if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
+               napi_schedule(&info_mpipe->napi);
 
        /* ISSUE: Handle completions? */
 
@@ -533,11 +696,11 @@ done:
        return work;
 }
 
-/* Handle an ingress interrupt on the current cpu. */
-static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+/* Handle an ingress interrupt from an instance on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
 {
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
-       napi_schedule(&info->napi);
+       napi_schedule(&info->mpipe[(uint64_t)id].napi);
        return IRQ_HANDLED;
 }
 
@@ -579,7 +742,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
 {
        struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
        struct tile_net_priv *priv = netdev_priv(dev);
-       struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel];
+       int instance = priv->instance;
+       struct tile_net_tx_wake *tx_wake =
+               &info->mpipe[instance].tx_wake[priv->echannel];
 
        hrtimer_start(&tx_wake->timer,
                      ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
@@ -617,7 +782,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
        unsigned long irqflags;
        bool pending = false;
-       int i;
+       int i, instance;
 
        local_irq_save(irqflags);
 
@@ -625,13 +790,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
        info->egress_timer_scheduled = false;
 
        /* Free all possible comps for this tile. */
-       for (i = 0; i < TILE_NET_CHANNELS; i++) {
-               struct tile_net_egress *egress = &egress_for_echannel[i];
-               struct tile_net_comps *comps = info->comps_for_echannel[i];
-               if (comps->comp_last >= comps->comp_next)
-                       continue;
-               tile_net_free_comps(egress->equeue, comps, -1, true);
-               pending = pending || (comps->comp_last < comps->comp_next);
+       for (instance = 0; instance < NR_MPIPE_MAX &&
+                    info->mpipe[instance].has_iqueue; instance++) {
+               for (i = 0; i < TILE_NET_CHANNELS; i++) {
+                       struct tile_net_egress *egress =
+                               &mpipe_data[instance].egress_for_echannel[i];
+                       struct tile_net_comps *comps =
+                               info->mpipe[instance].comps_for_echannel[i];
+                       if (!egress || comps->comp_last >= comps->comp_next)
+                               continue;
+                       tile_net_free_comps(egress->equeue, comps, -1, true);
+                       pending = pending ||
+                               (comps->comp_last < comps->comp_next);
+               }
        }
 
        /* Reschedule timer if needed. */
@@ -643,37 +814,112 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
        return HRTIMER_NORESTART;
 }
 
-/* Helper function for "tile_net_update()".
- * "dev" (i.e. arg) is the device being brought up or down,
- * or NULL if all devices are now down.
- */
-static void tile_net_update_cpu(void *arg)
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+
+/* PTP clock operations. */
+
+static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 {
-       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
-       struct net_device *dev = arg;
+       int ret = 0;
+       struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+       mutex_lock(&md->ptp_lock);
+       if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb))
+               ret = -EINVAL;
+       mutex_unlock(&md->ptp_lock);
+       return ret;
+}
 
-       if (!info->has_iqueue)
-               return;
+static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       int ret = 0;
+       struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+       mutex_lock(&md->ptp_lock);
+       if (gxio_mpipe_adjust_timestamp(&md->context, delta))
+               ret = -EBUSY;
+       mutex_unlock(&md->ptp_lock);
+       return ret;
+}
 
-       if (dev != NULL) {
-               if (!info->napi_added) {
-                       netif_napi_add(dev, &info->napi,
-                                      tile_net_poll, TILE_NET_WEIGHT);
-                       info->napi_added = true;
-               }
-               if (!info->napi_enabled) {
-                       napi_enable(&info->napi);
-                       info->napi_enabled = true;
-               }
-               enable_percpu_irq(ingress_irq, 0);
-       } else {
-               disable_percpu_irq(ingress_irq);
-               if (info->napi_enabled) {
-                       napi_disable(&info->napi);
-                       info->napi_enabled = false;
-               }
-               /* FIXME: Drain the iqueue. */
-       }
+static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       int ret = 0;
+       struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+       mutex_lock(&md->ptp_lock);
+       if (gxio_mpipe_get_timestamp(&md->context, ts))
+               ret = -EBUSY;
+       mutex_unlock(&md->ptp_lock);
+       return ret;
+}
+
+static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
+                            const struct timespec *ts)
+{
+       int ret = 0;
+       struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+       mutex_lock(&md->ptp_lock);
+       if (gxio_mpipe_set_timestamp(&md->context, ts))
+               ret = -EBUSY;
+       mutex_unlock(&md->ptp_lock);
+       return ret;
+}
+
+static int ptp_mpipe_enable(struct ptp_clock_info *ptp,
+                           struct ptp_clock_request *request, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_mpipe_caps = {
+       .owner          = THIS_MODULE,
+       .name           = "mPIPE clock",
+       .max_adj        = 999999999,
+       .n_ext_ts       = 0,
+       .pps            = 0,
+       .adjfreq        = ptp_mpipe_adjfreq,
+       .adjtime        = ptp_mpipe_adjtime,
+       .gettime        = ptp_mpipe_gettime,
+       .settime        = ptp_mpipe_settime,
+       .enable         = ptp_mpipe_enable,
+};
+
+#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
+
+/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
+static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+       struct timespec ts;
+
+       getnstimeofday(&ts);
+       gxio_mpipe_set_timestamp(&md->context, &ts);
+
+       mutex_init(&md->ptp_lock);
+       md->caps = ptp_mpipe_caps;
+       md->ptp_clock = ptp_clock_register(&md->caps, NULL);
+       if (IS_ERR(md->ptp_clock))
+               netdev_err(dev, "ptp_clock_register failed %ld\n",
+                          PTR_ERR(md->ptp_clock));
+#endif
+}
+
+/* Initialize PTP fields in a new device. */
+static void init_ptp_dev(struct tile_net_priv *priv)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+       priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+       priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+#endif
+}
+
+/* Helper functions for "tile_net_update()". */
+static void enable_ingress_irq(void *irq)
+{
+       enable_percpu_irq((long)irq, 0);
+}
+
+static void disable_ingress_irq(void *irq)
+{
+       disable_percpu_irq((long)irq);
 }
 
 /* Helper function for tile_net_open() and tile_net_stop().
@@ -683,19 +929,22 @@ static int tile_net_update(struct net_device *dev)
 {
        static gxio_mpipe_rules_t rules;  /* too big to fit on the stack */
        bool saw_channel = false;
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
        int channel;
        int rc;
        int cpu;
 
-       gxio_mpipe_rules_init(&rules, &context);
+       saw_channel = false;
+       gxio_mpipe_rules_init(&rules, &md->context);
 
        for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
-               if (tile_net_devs_for_channel[channel] == NULL)
+               if (md->tile_net_devs_for_channel[channel] == NULL)
                        continue;
                if (!saw_channel) {
                        saw_channel = true;
-                       gxio_mpipe_rules_begin(&rules, first_bucket,
-                                              num_buckets, NULL);
+                       gxio_mpipe_rules_begin(&rules, md->first_bucket,
+                                              md->num_buckets, NULL);
                        gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
                }
                gxio_mpipe_rules_add_channel(&rules, channel);
@@ -706,102 +955,150 @@ static int tile_net_update(struct net_device *dev)
         */
        rc = gxio_mpipe_rules_commit(&rules);
        if (rc != 0) {
-               netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+               netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
+                           instance, rc);
                return -EIO;
        }
 
-       /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
-       for_each_online_cpu(cpu)
-               smp_call_function_single(cpu, tile_net_update_cpu,
-                                        (saw_channel ? dev : NULL), 1);
+       /* Update all cpus, sequentially (to protect "netif_napi_add()").
+        * We use on_each_cpu to handle the IPI mask or unmask.
+        */
+       if (!saw_channel)
+               on_each_cpu(disable_ingress_irq,
+                           (void *)(long)(md->ingress_irq), 1);
+       for_each_online_cpu(cpu) {
+               struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+
+               if (!info->mpipe[instance].has_iqueue)
+                       continue;
+               if (saw_channel) {
+                       if (!info->mpipe[instance].napi_added) {
+                               netif_napi_add(dev, &info->mpipe[instance].napi,
+                                              tile_net_poll, TILE_NET_WEIGHT);
+                               info->mpipe[instance].napi_added = true;
+                       }
+                       if (!info->mpipe[instance].napi_enabled) {
+                               napi_enable(&info->mpipe[instance].napi);
+                               info->mpipe[instance].napi_enabled = true;
+                       }
+               } else {
+                       if (info->mpipe[instance].napi_enabled) {
+                               napi_disable(&info->mpipe[instance].napi);
+                               info->mpipe[instance].napi_enabled = false;
+                       }
+                       /* FIXME: Drain the iqueue. */
+               }
+       }
+       if (saw_channel)
+               on_each_cpu(enable_ingress_irq,
+                           (void *)(long)(md->ingress_irq), 1);
 
        /* HACK: Allow packets to flow in the simulator. */
        if (saw_channel)
-               sim_enable_mpipe_links(0, -1);
+               sim_enable_mpipe_links(instance, -1);
 
        return 0;
 }
 
-/* Allocate and initialize mpipe buffer stacks, and register them in
- * the mPIPE TLBs, for both small and large packet sizes.
- * This routine supports tile_net_init_mpipe(), below.
- */
-static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+/* Initialize a buffer stack. */
+static int create_buffer_stack(struct net_device *dev,
+                              int kind, size_t num_buffers)
 {
        pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
-       int rc;
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
+       size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
+       int stack_idx = md->first_buffer_stack + kind;
+       void *va;
+       int i, rc;
 
-       /* Compute stack bytes; we round up to 64KB and then use
-        * alloc_pages() so we get the required 64KB alignment as well.
+       /* Round up to 64KB and then use alloc_pages() so we get the
+        * required 64KB alignment.
         */
-       buffer_stack_size =
-               ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
-                     64 * 1024);
+       md->buffer_stack_bytes[kind] =
+               ALIGN(needed, 64 * 1024);
 
-       /* Allocate two buffer stack indices. */
-       rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
-       if (rc < 0) {
-               netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
-                          rc);
-               return rc;
-       }
-       small_buffer_stack = rc;
-       large_buffer_stack = rc + 1;
-
-       /* Allocate the small memory stack. */
-       small_buffer_stack_va =
-               alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
-       if (small_buffer_stack_va == NULL) {
+       va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
+       if (va == NULL) {
                netdev_err(dev,
-                          "Could not alloc %zd bytes for buffer stacks\n",
-                          buffer_stack_size);
+                          "Could not alloc %zd bytes for buffer stack %d\n",
+                          md->buffer_stack_bytes[kind], kind);
                return -ENOMEM;
        }
-       rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
-                                         BUFFER_SIZE_SMALL_ENUM,
-                                         small_buffer_stack_va,
-                                         buffer_stack_size, 0);
+
+       /* Initialize the buffer stack. */
+       rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
+                                         buffer_size_enums[kind],  va,
+                                         md->buffer_stack_bytes[kind], 0);
        if (rc != 0) {
-               netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+               netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
+                          instance, rc);
+               free_pages_exact(va, md->buffer_stack_bytes[kind]);
                return rc;
        }
-       rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+
+       md->buffer_stack_vas[kind] = va;
+
+       rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
                                               hash_pte, 0);
        if (rc != 0) {
                netdev_err(dev,
-                          "gxio_mpipe_register_buffer_memory failed: %d\n",
-                          rc);
+                          "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
+                          instance, rc);
                return rc;
        }
 
-       /* Allocate the large buffer stack. */
-       large_buffer_stack_va =
-               alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
-       if (large_buffer_stack_va == NULL) {
-               netdev_err(dev,
-                          "Could not alloc %zd bytes for buffer stacks\n",
-                          buffer_stack_size);
-               return -ENOMEM;
-       }
-       rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
-                                         BUFFER_SIZE_LARGE_ENUM,
-                                         large_buffer_stack_va,
-                                         buffer_stack_size, 0);
-       if (rc != 0) {
-               netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
-                          rc);
-               return rc;
+       /* Provide initial buffers. */
+       for (i = 0; i < num_buffers; i++) {
+               if (!tile_net_provide_buffer(instance, kind)) {
+                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+                       return -ENOMEM;
+               }
        }
-       rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
-                                              hash_pte, 0);
-       if (rc != 0) {
+
+       return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev,
+                             int network_cpus_count)
+{
+       int num_kinds = MAX_KINDS - (jumbo_num == 0);
+       size_t num_buffers;
+       int rc;
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
+
+       /* Allocate the buffer stacks. */
+       rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
+       if (rc < 0) {
                netdev_err(dev,
-                          "gxio_mpipe_register_buffer_memory failed: %d\n",
-                          rc);
+                          "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
+                          instance, rc);
                return rc;
        }
+       md->first_buffer_stack = rc;
 
-       return 0;
+       /* Enough small/large buffers to (normally) avoid buffer errors. */
+       num_buffers =
+               network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+
+       /* Allocate the small memory stack. */
+       if (rc >= 0)
+               rc = create_buffer_stack(dev, 0, num_buffers);
+
+       /* Allocate the large buffer stack. */
+       if (rc >= 0)
+               rc = create_buffer_stack(dev, 1, num_buffers);
+
+       /* Allocate the jumbo buffer stack if needed. */
+       if (rc >= 0 && jumbo_num != 0)
+               rc = create_buffer_stack(dev, 2, jumbo_num);
+
+       return rc;
 }
 
 /* Allocate per-cpu resources (memory for completions and idescs).
@@ -812,6 +1109,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
 {
        struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
        int order, i, rc;
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
        struct page *page;
        void *addr;
 
@@ -826,7 +1125,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
        addr = pfn_to_kaddr(page_to_pfn(page));
        memset(addr, 0, COMPS_SIZE);
        for (i = 0; i < TILE_NET_CHANNELS; i++)
-               info->comps_for_echannel[i] =
+               info->mpipe[instance].comps_for_echannel[i] =
                        addr + i * sizeof(struct tile_net_comps);
 
        /* If this is a network cpu, create an iqueue. */
@@ -840,14 +1139,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
                        return -ENOMEM;
                }
                addr = pfn_to_kaddr(page_to_pfn(page));
-               rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
-                                           addr, NOTIF_RING_SIZE, 0);
+               rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
+                                           &md->context, ring++, addr,
+                                           NOTIF_RING_SIZE, 0);
                if (rc < 0) {
                        netdev_err(dev,
                                   "gxio_mpipe_iqueue_init failed: %d\n", rc);
                        return rc;
                }
-               info->has_iqueue = true;
+               info->mpipe[instance].has_iqueue = true;
        }
 
        return ring;
@@ -860,40 +1160,41 @@ static int init_notif_group_and_buckets(struct net_device *dev,
                                        int ring, int network_cpus_count)
 {
        int group, rc;
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
 
        /* Allocate one NotifGroup. */
-       rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+       rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
        if (rc < 0) {
-               netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
-                          rc);
+               netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
+                          instance, rc);
                return rc;
        }
        group = rc;
 
        /* Initialize global num_buckets value. */
        if (network_cpus_count > 4)
-               num_buckets = 256;
+               md->num_buckets = 256;
        else if (network_cpus_count > 1)
-               num_buckets = 16;
+               md->num_buckets = 16;
 
        /* Allocate some buckets, and set global first_bucket value. */
-       rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+       rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
        if (rc < 0) {
-               netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+               netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
+                          instance, rc);
                return rc;
        }
-       first_bucket = rc;
+       md->first_bucket = rc;
 
        /* Init group and buckets. */
        rc = gxio_mpipe_init_notif_group_and_buckets(
-               &context, group, ring, network_cpus_count,
-               first_bucket, num_buckets,
+               &md->context, group, ring, network_cpus_count,
+               md->first_bucket, md->num_buckets,
                GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
        if (rc != 0) {
-               netdev_err(
-                       dev,
-                       "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
-                       rc);
+               netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
+                          "mpipe[%d] %d\n", instance, rc);
                return rc;
        }
 
@@ -907,30 +1208,39 @@ static int init_notif_group_and_buckets(struct net_device *dev,
  */
 static int tile_net_setup_interrupts(struct net_device *dev)
 {
-       int cpu, rc;
+       int cpu, rc, irq;
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
+
+       irq = md->ingress_irq;
+       if (irq < 0) {
+               irq = create_irq();
+               if (irq < 0) {
+                       netdev_err(dev,
+                                  "create_irq failed: mpipe[%d] %d\n",
+                                  instance, irq);
+                       return irq;
+               }
+               tile_irq_activate(irq, TILE_IRQ_PERCPU);
 
-       rc = create_irq();
-       if (rc < 0) {
-               netdev_err(dev, "create_irq failed: %d\n", rc);
-               return rc;
-       }
-       ingress_irq = rc;
-       tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
-       rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
-                        0, "tile_net", NULL);
-       if (rc != 0) {
-               netdev_err(dev, "request_irq failed: %d\n", rc);
-               destroy_irq(ingress_irq);
-               ingress_irq = -1;
-               return rc;
+               rc = request_irq(irq, tile_net_handle_ingress_irq,
+                                0, "tile_net", (void *)((uint64_t)instance));
+
+               if (rc != 0) {
+                       netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
+                                  instance, rc);
+                       destroy_irq(irq);
+                       return rc;
+               }
+               md->ingress_irq = irq;
        }
 
        for_each_online_cpu(cpu) {
                struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
-               if (info->has_iqueue) {
-                       gxio_mpipe_request_notif_ring_interrupt(
-                               &context, cpu_x(cpu), cpu_y(cpu),
-                               KERNEL_PL, ingress_irq, info->iqueue.ring);
+               if (info->mpipe[instance].has_iqueue) {
+                       gxio_mpipe_request_notif_ring_interrupt(&md->context,
+                               cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
+                               info->mpipe[instance].iqueue.ring);
                }
        }
 
@@ -938,39 +1248,45 @@ static int tile_net_setup_interrupts(struct net_device *dev)
 }
 
 /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
-static void tile_net_init_mpipe_fail(void)
+static void tile_net_init_mpipe_fail(int instance)
 {
-       int cpu;
+       int kind, cpu;
+       struct mpipe_data *md = &mpipe_data[instance];
 
        /* Do cleanups that require the mpipe context first. */
-       if (small_buffer_stack >= 0)
-               tile_net_pop_all_buffers(small_buffer_stack);
-       if (large_buffer_stack >= 0)
-               tile_net_pop_all_buffers(large_buffer_stack);
+       for (kind = 0; kind < MAX_KINDS; kind++) {
+               if (md->buffer_stack_vas[kind] != NULL) {
+                       tile_net_pop_all_buffers(instance,
+                                                md->first_buffer_stack +
+                                                kind);
+               }
+       }
 
        /* Destroy mpipe context so the hardware no longer owns any memory. */
-       gxio_mpipe_destroy(&context);
+       gxio_mpipe_destroy(&md->context);
 
        for_each_online_cpu(cpu) {
                struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
-               free_pages((unsigned long)(info->comps_for_echannel[0]),
-                          get_order(COMPS_SIZE));
-               info->comps_for_echannel[0] = NULL;
-               free_pages((unsigned long)(info->iqueue.idescs),
+               free_pages(
+                       (unsigned long)(
+                               info->mpipe[instance].comps_for_echannel[0]),
+                       get_order(COMPS_SIZE));
+               info->mpipe[instance].comps_for_echannel[0] = NULL;
+               free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
                           get_order(NOTIF_RING_SIZE));
-               info->iqueue.idescs = NULL;
+               info->mpipe[instance].iqueue.idescs = NULL;
        }
 
-       if (small_buffer_stack_va)
-               free_pages_exact(small_buffer_stack_va, buffer_stack_size);
-       if (large_buffer_stack_va)
-               free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+       for (kind = 0; kind < MAX_KINDS; kind++) {
+               if (md->buffer_stack_vas[kind] != NULL) {
+                       free_pages_exact(md->buffer_stack_vas[kind],
+                                        md->buffer_stack_bytes[kind]);
+                       md->buffer_stack_vas[kind] = NULL;
+               }
+       }
 
-       small_buffer_stack_va = NULL;
-       large_buffer_stack_va = NULL;
-       large_buffer_stack = -1;
-       small_buffer_stack = -1;
-       first_bucket = -1;
+       md->first_buffer_stack = -1;
+       md->first_bucket = -1;
 }
 
 /* The first time any tilegx network device is opened, we initialize
@@ -984,9 +1300,11 @@ static void tile_net_init_mpipe_fail(void)
  */
 static int tile_net_init_mpipe(struct net_device *dev)
 {
-       int i, num_buffers, rc;
+       int rc;
        int cpu;
        int first_ring, ring;
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
        int network_cpus_count = cpus_weight(network_cpus_map);
 
        if (!hash_default) {
@@ -994,36 +1312,21 @@ static int tile_net_init_mpipe(struct net_device *dev)
                return -EIO;
        }
 
-       rc = gxio_mpipe_init(&context, 0);
+       rc = gxio_mpipe_init(&md->context, instance);
        if (rc != 0) {
-               netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+               netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
+                          instance, rc);
                return -EIO;
        }
 
        /* Set up the buffer stacks. */
-       num_buffers =
-               network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
-       rc = init_buffer_stacks(dev, num_buffers);
+       rc = init_buffer_stacks(dev, network_cpus_count);
        if (rc != 0)
                goto fail;
 
-       /* Provide initial buffers. */
-       rc = -ENOMEM;
-       for (i = 0; i < num_buffers; i++) {
-               if (!tile_net_provide_buffer(true)) {
-                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
-                       goto fail;
-               }
-       }
-       for (i = 0; i < num_buffers; i++) {
-               if (!tile_net_provide_buffer(false)) {
-                       netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
-                       goto fail;
-               }
-       }
-
        /* Allocate one NotifRing for each network cpu. */
-       rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+       rc = gxio_mpipe_alloc_notif_rings(&md->context,
+                                         network_cpus_count, 0, 0);
        if (rc < 0) {
                netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
                           rc);
@@ -1050,10 +1353,13 @@ static int tile_net_init_mpipe(struct net_device *dev)
        if (rc != 0)
                goto fail;
 
+       /* Register PTP clock and set mPIPE timestamp, if configured. */
+       register_ptp_clock(dev, md);
+
        return 0;
 
 fail:
-       tile_net_init_mpipe_fail();
+       tile_net_init_mpipe_fail(instance);
        return rc;
 }
 
@@ -1063,17 +1369,19 @@ fail:
  */
 static int tile_net_init_egress(struct net_device *dev, int echannel)
 {
+       static int ering = -1;
        struct page *headers_page, *edescs_page, *equeue_page;
        gxio_mpipe_edesc_t *edescs;
        gxio_mpipe_equeue_t *equeue;
        unsigned char *headers;
        int headers_order, edescs_order, equeue_order;
        size_t edescs_size;
-       int edma;
        int rc = -ENOMEM;
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
 
        /* Only initialize once. */
-       if (egress_for_echannel[echannel].equeue != NULL)
+       if (md->egress_for_echannel[echannel].equeue != NULL)
                return 0;
 
        /* Allocate memory for the "headers". */
@@ -1110,28 +1418,41 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
        }
        equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
 
-       /* Allocate an edma ring.  Note that in practice this can't
-        * fail, which is good, because we will leak an edma ring if so.
-        */
-       rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
-       if (rc < 0) {
-               netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
-                           rc);
-               goto fail_equeue;
+       /* Allocate an edma ring (using a one entry "free list"). */
+       if (ering < 0) {
+               rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
+               if (rc < 0) {
+                       netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
+                                   "mpipe[%d] %d\n", instance, rc);
+                       goto fail_equeue;
+               }
+               ering = rc;
        }
-       edma = rc;
 
        /* Initialize the equeue. */
-       rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+       rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
                                    edescs, edescs_size, 0);
        if (rc != 0) {
-               netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+               netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
+                          instance, rc);
                goto fail_equeue;
        }
 
+       /* Don't reuse the ering later. */
+       ering = -1;
+
+       if (jumbo_num != 0) {
+               /* Make sure "jumbo" packets can be egressed safely. */
+               if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
+                       /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
+                       netdev_warn(dev, "Jumbo packets may not be egressed"
+                                   " properly on channel %d\n", echannel);
+               }
+       }
+
        /* Done. */
-       egress_for_echannel[echannel].equeue = equeue;
-       egress_for_echannel[echannel].headers = headers;
+       md->egress_for_echannel[echannel].equeue = equeue;
+       md->egress_for_echannel[echannel].headers = headers;
        return 0;
 
 fail_equeue:
@@ -1151,11 +1472,25 @@ fail:
 static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
                              const char *link_name)
 {
-       int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
+       int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
        if (rc < 0) {
-               netdev_err(dev, "Failed to open '%s'\n", link_name);
+               netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
+                          link_name, instance, rc);
                return rc;
        }
+       if (jumbo_num != 0) {
+               u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
+               rc = gxio_mpipe_link_set_attr(link, attr, 1);
+               if (rc != 0) {
+                       netdev_err(dev,
+                                  "Cannot receive jumbo packets on '%s'\n",
+                                  link_name);
+                       gxio_mpipe_link_close(link);
+                       return rc;
+               }
+       }
        rc = gxio_mpipe_link_channel(link);
        if (rc < 0 || rc >= TILE_NET_CHANNELS) {
                netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
@@ -1169,12 +1504,23 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
 static int tile_net_open(struct net_device *dev)
 {
        struct tile_net_priv *priv = netdev_priv(dev);
-       int cpu, rc;
+       int cpu, rc, instance;
 
        mutex_lock(&tile_net_devs_for_channel_mutex);
 
-       /* Do one-time initialization the first time any device is opened. */
-       if (ingress_irq < 0) {
+       /* Get the instance info. */
+       rc = gxio_mpipe_link_instance(dev->name);
+       if (rc < 0 || rc >= NR_MPIPE_MAX) {
+               mutex_unlock(&tile_net_devs_for_channel_mutex);
+               return -EIO;
+       }
+
+       priv->instance = rc;
+       instance = rc;
+       if (!mpipe_data[rc].context.mmio_fast_base) {
+               /* Do one-time initialization per instance the first time
+                * any device is opened.
+                */
                rc = tile_net_init_mpipe(dev);
                if (rc != 0)
                        goto fail;
@@ -1205,7 +1551,7 @@ static int tile_net_open(struct net_device *dev)
        if (rc != 0)
                goto fail;
 
-       tile_net_devs_for_channel[priv->channel] = dev;
+       mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
 
        rc = tile_net_update(dev);
        if (rc != 0)
@@ -1217,7 +1563,7 @@ static int tile_net_open(struct net_device *dev)
        for_each_online_cpu(cpu) {
                struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
                struct tile_net_tx_wake *tx_wake =
-                       &info->tx_wake[priv->echannel];
+                       &info->mpipe[instance].tx_wake[priv->echannel];
 
                hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
                             HRTIMER_MODE_REL);
@@ -1243,7 +1589,7 @@ fail:
                priv->channel = -1;
        }
        priv->echannel = -1;
-       tile_net_devs_for_channel[priv->channel] = NULL;
+       mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
        mutex_unlock(&tile_net_devs_for_channel_mutex);
 
        /* Don't return raw gxio error codes to generic Linux. */
@@ -1255,18 +1601,20 @@ static int tile_net_stop(struct net_device *dev)
 {
        struct tile_net_priv *priv = netdev_priv(dev);
        int cpu;
+       int instance = priv->instance;
+       struct mpipe_data *md = &mpipe_data[instance];
 
        for_each_online_cpu(cpu) {
                struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
                struct tile_net_tx_wake *tx_wake =
-                       &info->tx_wake[priv->echannel];
+                       &info->mpipe[instance].tx_wake[priv->echannel];
 
                hrtimer_cancel(&tx_wake->timer);
                netif_stop_subqueue(dev, cpu);
        }
 
        mutex_lock(&tile_net_devs_for_channel_mutex);
-       tile_net_devs_for_channel[priv->channel] = NULL;
+       md->tile_net_devs_for_channel[priv->channel] = NULL;
        (void)tile_net_update(dev);
        if (priv->loopify_channel >= 0) {
                if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
@@ -1374,20 +1722,20 @@ static int tso_count_edescs(struct sk_buff *skb)
        return num_edescs;
 }
 
-/* Prepare modified copies of the skbuff headers.
- * FIXME: add support for IPv6.
- */
+/* Prepare modified copies of the skbuff headers. */
 static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
                                s64 slot)
 {
        struct skb_shared_info *sh = skb_shinfo(skb);
        struct iphdr *ih;
+       struct ipv6hdr *ih6;
        struct tcphdr *th;
        unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
        unsigned int data_len = skb->len - sh_len;
        unsigned char *data = skb->data;
        unsigned int ih_off, th_off, p_len;
        unsigned int isum_seed, tsum_seed, id, seq;
+       int is_ipv6;
        long f_id = -1;    /* id of the current fragment */
        long f_size = skb_headlen(skb) - sh_len;  /* current fragment size */
        long f_used = 0;  /* bytes used from the current fragment */
@@ -1395,18 +1743,24 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
        int segment;
 
        /* Locate original headers and compute various lengths. */
-       ih = ip_hdr(skb);
+       is_ipv6 = skb_is_gso_v6(skb);
+       if (is_ipv6) {
+               ih6 = ipv6_hdr(skb);
+               ih_off = skb_network_offset(skb);
+       } else {
+               ih = ip_hdr(skb);
+               ih_off = skb_network_offset(skb);
+               isum_seed = ((0xFFFF - ih->check) +
+                            (0xFFFF - ih->tot_len) +
+                            (0xFFFF - ih->id));
+               id = ntohs(ih->id);
+       }
+
        th = tcp_hdr(skb);
-       ih_off = skb_network_offset(skb);
        th_off = skb_transport_offset(skb);
        p_len = sh->gso_size;
 
-       /* Set up seed values for IP and TCP csum and initialize id and seq. */
-       isum_seed = ((0xFFFF - ih->check) +
-                    (0xFFFF - ih->tot_len) +
-                    (0xFFFF - ih->id));
        tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
-       id = ntohs(ih->id);
        seq = ntohl(th->seq);
 
        /* Prepare all the headers. */
@@ -1420,11 +1774,17 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
                memcpy(buf, data, sh_len);
 
                /* Update copied ip header. */
-               ih = (struct iphdr *)(buf + ih_off);
-               ih->tot_len = htons(sh_len + p_len - ih_off);
-               ih->id = htons(id);
-               ih->check = csum_long(isum_seed + ih->tot_len +
-                                     ih->id) ^ 0xffff;
+               if (is_ipv6) {
+                       ih6 = (struct ipv6hdr *)(buf + ih_off);
+                       ih6->payload_len = htons(sh_len + p_len - ih_off -
+                                                sizeof(*ih6));
+               } else {
+                       ih = (struct iphdr *)(buf + ih_off);
+                       ih->tot_len = htons(sh_len + p_len - ih_off);
+                       ih->id = htons(id);
+                       ih->check = csum_long(isum_seed + ih->tot_len +
+                                             ih->id) ^ 0xffff;
+               }
 
                /* Update copied tcp header. */
                th = (struct tcphdr *)(buf + th_off);
@@ -1475,8 +1835,9 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
 static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
                       struct sk_buff *skb, unsigned char *headers, s64 slot)
 {
-       struct tile_net_priv *priv = netdev_priv(dev);
        struct skb_shared_info *sh = skb_shinfo(skb);
+       int instance = mpipe_instance(dev);
+       struct mpipe_data *md = &mpipe_data[instance];
        unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
        unsigned int data_len = skb->len - sh_len;
        unsigned int p_len = sh->gso_size;
@@ -1499,8 +1860,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
        edesc_head.xfer_size = sh_len;
 
        /* This is only used to specify the TLB. */
-       edesc_head.stack_idx = large_buffer_stack;
-       edesc_body.stack_idx = large_buffer_stack;
+       edesc_head.stack_idx = md->first_buffer_stack;
+       edesc_body.stack_idx = md->first_buffer_stack;
 
        /* Egress all the edescs. */
        for (segment = 0; segment < sh->gso_segs; segment++) {
@@ -1553,8 +1914,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
        }
 
        /* Update stats. */
-       tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
-       tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+       tile_net_stats_add(tx_packets, &dev->stats.tx_packets);
+       tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes);
 }
 
 /* Do "TSO" handling for egress.
@@ -1575,8 +1936,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
        struct tile_net_priv *priv = netdev_priv(dev);
        int channel = priv->echannel;
-       struct tile_net_egress *egress = &egress_for_echannel[channel];
-       struct tile_net_comps *comps = info->comps_for_echannel[channel];
+       int instance = priv->instance;
+       struct mpipe_data *md = &mpipe_data[instance];
+       struct tile_net_egress *egress = &md->egress_for_echannel[channel];
+       struct tile_net_comps *comps =
+               info->mpipe[instance].comps_for_echannel[channel];
        gxio_mpipe_equeue_t *equeue = egress->equeue;
        unsigned long irqflags;
        int num_edescs;
@@ -1640,10 +2004,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
        struct tile_net_priv *priv = netdev_priv(dev);
-       struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+       int instance = priv->instance;
+       struct mpipe_data *md = &mpipe_data[instance];
+       struct tile_net_egress *egress =
+               &md->egress_for_echannel[priv->echannel];
        gxio_mpipe_equeue_t *equeue = egress->equeue;
        struct tile_net_comps *comps =
-               info->comps_for_echannel[priv->echannel];
+               info->mpipe[instance].comps_for_echannel[priv->echannel];
        unsigned int len = skb->len;
        unsigned char *data = skb->data;
        unsigned int num_edescs;
@@ -1660,7 +2027,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
        num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
 
        /* This is only used to specify the TLB. */
-       edesc.stack_idx = large_buffer_stack;
+       edesc.stack_idx = md->first_buffer_stack;
 
        /* Prepare the edescs. */
        for (i = 0; i < num_edescs; i++) {
@@ -1693,13 +2060,16 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
        for (i = 0; i < num_edescs; i++)
                gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
 
+       /* Store TX timestamp if needed. */
+       tile_tx_timestamp(skb, instance);
+
        /* Add a completion record. */
        add_comp(equeue, comps, slot - 1, skb);
 
        /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
-       tile_net_stats_add(1, &priv->stats.tx_packets);
+       tile_net_stats_add(1, &dev->stats.tx_packets);
        tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
-                          &priv->stats.tx_bytes);
+                          &dev->stats.tx_bytes);
 
        local_irq_restore(irqflags);
 
@@ -1727,20 +2097,18 @@ static void tile_net_tx_timeout(struct net_device *dev)
 /* Ioctl commands. */
 static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-       return -EOPNOTSUPP;
-}
+       if (cmd == SIOCSHWTSTAMP)
+               return tile_hwtstamp_ioctl(dev, rq, cmd);
 
-/* Get system network statistics for device. */
-static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
-{
-       struct tile_net_priv *priv = netdev_priv(dev);
-       return &priv->stats;
+       return -EOPNOTSUPP;
 }
 
 /* Change the MTU. */
 static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
 {
-       if ((new_mtu < 68) || (new_mtu > 1500))
+       if (new_mtu < 68)
+               return -EINVAL;
+       if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
                return -EINVAL;
        dev->mtu = new_mtu;
        return 0;
@@ -1772,9 +2140,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
  */
 static void tile_net_netpoll(struct net_device *dev)
 {
-       disable_percpu_irq(ingress_irq);
-       tile_net_handle_ingress_irq(ingress_irq, NULL);
-       enable_percpu_irq(ingress_irq, 0);
+       int instance = mpipe_instance(dev);
+       struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+       struct mpipe_data *md = &mpipe_data[instance];
+
+       disable_percpu_irq(md->ingress_irq);
+       napi_schedule(&info->mpipe[instance].napi);
+       enable_percpu_irq(md->ingress_irq, 0);
 }
 #endif
 
@@ -1784,7 +2156,6 @@ static const struct net_device_ops tile_net_ops = {
        .ndo_start_xmit = tile_net_tx,
        .ndo_select_queue = tile_net_select_queue,
        .ndo_do_ioctl = tile_net_ioctl,
-       .ndo_get_stats = tile_net_get_stats,
        .ndo_change_mtu = tile_net_change_mtu,
        .ndo_tx_timeout = tile_net_tx_timeout,
        .ndo_set_mac_address = tile_net_set_mac_address,
@@ -1800,14 +2171,21 @@ static const struct net_device_ops tile_net_ops = {
  */
 static void tile_net_setup(struct net_device *dev)
 {
+       netdev_features_t features = 0;
+
        ether_setup(dev);
        dev->netdev_ops = &tile_net_ops;
        dev->watchdog_timeo = TILE_NET_TIMEOUT;
-       dev->features |= NETIF_F_LLTX;
-       dev->features |= NETIF_F_HW_CSUM;
-       dev->features |= NETIF_F_SG;
-       dev->features |= NETIF_F_TSO;
        dev->mtu = 1500;
+
+       features |= NETIF_F_HW_CSUM;
+       features |= NETIF_F_SG;
+       features |= NETIF_F_TSO;
+       features |= NETIF_F_TSO6;
+
+       dev->hw_features   |= features;
+       dev->vlan_features |= features;
+       dev->features      |= features;
 }
 
 /* Allocate the device structure, register the device, and obtain the
@@ -1842,6 +2220,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
        priv->channel = -1;
        priv->loopify_channel = -1;
        priv->echannel = -1;
+       init_ptp_dev(priv);
 
        /* Get the MAC address and set it in the device struct; this must
         * be done before the device is opened.  If the MAC is all zeroes,
@@ -1871,9 +2250,12 @@ static void tile_net_init_module_percpu(void *unused)
 {
        struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
        int my_cpu = smp_processor_id();
+       int instance;
 
-       info->has_iqueue = false;
-
+       for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
+               info->mpipe[instance].has_iqueue = false;
+               info->mpipe[instance].instance = instance;
+       }
        info->my_cpu = my_cpu;
 
        /* Initialize the egress timer. */
@@ -1890,6 +2272,8 @@ static int __init tile_net_init_module(void)
 
        pr_info("Tilera Network Driver\n");
 
+       BUILD_BUG_ON(NR_MPIPE_MAX != 2);
+
        mutex_init(&tile_net_devs_for_channel_mutex);
 
        /* Initialize each CPU. */
index 36435499814b78d34020503caa41fdb00d2cd57f..106be47716e796105012efd7b8d644929faaccef 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/in6.h>
 #include <linux/timer.h>
 #include <linux/io.h>
+#include <linux/u64_stats_sync.h>
 #include <asm/checksum.h>
 #include <asm/homecache.h>
 
 /* ISSUE: This has not been thoroughly tested (except at 1500). */
 #define TILE_NET_MTU 1500
 
-/* HACK: Define to support GSO. */
-/* ISSUE: This may actually hurt performance of the TCP blaster. */
-/* #define TILE_NET_GSO */
-
-/* Define this to collapse "duplicate" acks. */
-/* #define IGNORE_DUP_ACKS */
-
 /* HACK: Define this to verify incoming packets. */
 /* #define TILE_NET_VERIFY_INGRESS */
 
@@ -156,10 +150,13 @@ struct tile_netio_queue {
  * Statistics counters for a specific cpu and device.
  */
 struct tile_net_stats_t {
-       u32 rx_packets;
-       u32 rx_bytes;
-       u32 tx_packets;
-       u32 tx_bytes;
+       struct u64_stats_sync syncp;
+       u64 rx_packets;         /* total packets received       */
+       u64 tx_packets;         /* total packets transmitted    */
+       u64 rx_bytes;           /* total bytes received         */
+       u64 tx_bytes;           /* total bytes transmitted      */
+       u64 rx_errors;          /* packets truncated or marked bad by hw */
+       u64 rx_dropped;         /* packets not for us or intf not up */
 };
 
 
@@ -218,8 +215,6 @@ struct tile_net_priv {
        int network_cpus_count;
        /* Credits per network cpu. */
        int network_cpus_credits;
-       /* Network stats. */
-       struct net_device_stats stats;
        /* For NetIO bringup retries. */
        struct delayed_work retry_work;
        /* Quick access to per cpu data. */
@@ -627,79 +622,6 @@ static void tile_net_handle_egress_timer(unsigned long arg)
 }
 
 
-#ifdef IGNORE_DUP_ACKS
-
-/*
- * Help detect "duplicate" ACKs.  These are sequential packets (for a
- * given flow) which are exactly 66 bytes long, sharing everything but
- * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
- * Tstamps=10@0x38.  The ID's are +1, the Hsum's are -1, the Ack's are
- * +N, and the Tstamps are usually identical.
- *
- * NOTE: Apparently truly duplicate acks (with identical "ack" values),
- * should not be collapsed, as they are used for some kind of flow control.
- */
-static bool is_dup_ack(char *s1, char *s2, unsigned int len)
-{
-       int i;
-
-       unsigned long long ignorable = 0;
-
-       /* Identification. */
-       ignorable |= (1ULL << 0x12);
-       ignorable |= (1ULL << 0x13);
-
-       /* Header checksum. */
-       ignorable |= (1ULL << 0x18);
-       ignorable |= (1ULL << 0x19);
-
-       /* ACK. */
-       ignorable |= (1ULL << 0x2a);
-       ignorable |= (1ULL << 0x2b);
-       ignorable |= (1ULL << 0x2c);
-       ignorable |= (1ULL << 0x2d);
-
-       /* WinSize. */
-       ignorable |= (1ULL << 0x30);
-       ignorable |= (1ULL << 0x31);
-
-       /* Checksum. */
-       ignorable |= (1ULL << 0x32);
-       ignorable |= (1ULL << 0x33);
-
-       for (i = 0; i < len; i++, ignorable >>= 1) {
-
-               if ((ignorable & 1) || (s1[i] == s2[i]))
-                       continue;
-
-#ifdef TILE_NET_DEBUG
-               /* HACK: Mention non-timestamp diffs. */
-               if (i < 0x38 && i != 0x2f &&
-                   net_ratelimit())
-                       pr_info("Diff at 0x%x\n", i);
-#endif
-
-               return false;
-       }
-
-#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
-       /* HACK: Do not suppress truly duplicate ACKs. */
-       /* ISSUE: Is this actually necessary or helpful? */
-       if (s1[0x2a] == s2[0x2a] &&
-           s1[0x2b] == s2[0x2b] &&
-           s1[0x2c] == s2[0x2c] &&
-           s1[0x2d] == s2[0x2d]) {
-               return false;
-       }
-#endif
-
-       return true;
-}
-
-#endif
-
-
-
 static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
 {
        struct tile_netio_queue *queue = &info->queue;
@@ -774,6 +696,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
        netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
 
        netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
+       netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
 
        /* Extract the packet size.  FIXME: Shouldn't the second line */
        /* get subtracted?  Mostly moot, since it should be "zero". */
@@ -806,40 +729,25 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
 #endif /* TILE_NET_DUMP_PACKETS */
 
 #ifdef TILE_NET_VERIFY_INGRESS
-       if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
-           NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
-               /* Bug 6624: Includes UDP packets with a "zero" checksum. */
-               pr_warning("Bad L4 checksum on %d byte packet.\n", len);
-       }
-       if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
-           NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
+       if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
                dump_packet(buf, len, "rx");
-               panic("Bad L3 checksum.");
-       }
-       switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
-       case NETIO_PKT_STATUS_OVERSIZE:
-               if (len >= 64) {
-                       dump_packet(buf, len, "rx");
-                       panic("Unexpected OVERSIZE.");
-               }
-               break;
-       case NETIO_PKT_STATUS_BAD:
-               pr_warning("Unexpected BAD %ld byte packet.\n", len);
+               panic("Unexpected OVERSIZE.");
        }
 #endif
 
        filter = 0;
 
-       /* ISSUE: Filter TCP packets with "bad" checksums? */
-
-       if (!(dev->flags & IFF_UP)) {
+       if (pkt_status == NETIO_PKT_STATUS_BAD) {
+               /* Handle CRC error and hardware truncation. */
+               filter = 2;
+       } else if (!(dev->flags & IFF_UP)) {
                /* Filter packets received before we're up. */
                filter = 1;
-       } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) {
+       } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
+                  pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
                /* Filter "truncated" packets. */
-               filter = 1;
+               filter = 2;
        } else if (!(dev->flags & IFF_PROMISC)) {
-               /* FIXME: Implement HW multicast filter. */
                if (!is_multicast_ether_addr(buf)) {
                        /* Filter packets not for our address. */
                        const u8 *mine = dev->dev_addr;
@@ -847,9 +755,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
                }
        }
 
-       if (filter) {
+       u64_stats_update_begin(&stats->syncp);
 
-               /* ISSUE: Update "drop" statistics? */
+       if (filter != 0) {
+
+               if (filter == 1)
+                       stats->rx_dropped++;
+               else
+                       stats->rx_errors++;
 
                tile_net_provide_linux_buffer(info, va, small);
 
@@ -881,6 +794,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
                stats->rx_bytes += len;
        }
 
+       u64_stats_update_end(&stats->syncp);
+
        /* ISSUE: It would be nice to defer this until the packet has */
        /* actually been processed. */
        tile_net_return_credit(info);
@@ -1907,8 +1822,10 @@ busy:
                kfree_skb(olds[i]);
 
        /* Update stats. */
+       u64_stats_update_begin(&stats->syncp);
        stats->tx_packets += num_segs;
        stats->tx_bytes += (num_segs * sh_len) + d_len;
+       u64_stats_update_end(&stats->syncp);
 
        /* Make sure the egress timer is scheduled. */
        tile_net_schedule_egress_timer(info);
@@ -1936,7 +1853,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
 
        unsigned int csum_start = skb_checksum_start_offset(skb);
 
-       lepp_frag_t frags[LEPP_MAX_FRAGS];
+       lepp_frag_t frags[1 + MAX_SKB_FRAGS];
 
        unsigned int num_frags;
 
@@ -1951,7 +1868,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
        unsigned int cmd_head, cmd_tail, cmd_next;
        unsigned int comp_tail;
 
-       lepp_cmd_t cmds[LEPP_MAX_FRAGS];
+       lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
 
 
        /*
@@ -2089,8 +2006,10 @@ busy:
                kfree_skb(olds[i]);
 
        /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
+       u64_stats_update_begin(&stats->syncp);
        stats->tx_packets++;
        stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
+       u64_stats_update_end(&stats->syncp);
 
        /* Make sure the egress timer is scheduled. */
        tile_net_schedule_egress_timer(info);
@@ -2127,30 +2046,51 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  *
  * Returns the address of the device statistics structure.
  */
-static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
+               struct rtnl_link_stats64 *stats)
 {
        struct tile_net_priv *priv = netdev_priv(dev);
-       u32 rx_packets = 0;
-       u32 tx_packets = 0;
-       u32 rx_bytes = 0;
-       u32 tx_bytes = 0;
+       u64 rx_packets = 0, tx_packets = 0;
+       u64 rx_bytes = 0, tx_bytes = 0;
+       u64 rx_errors = 0, rx_dropped = 0;
        int i;
 
        for_each_online_cpu(i) {
-               if (priv->cpu[i]) {
-                       rx_packets += priv->cpu[i]->stats.rx_packets;
-                       rx_bytes += priv->cpu[i]->stats.rx_bytes;
-                       tx_packets += priv->cpu[i]->stats.tx_packets;
-                       tx_bytes += priv->cpu[i]->stats.tx_bytes;
-               }
+               struct tile_net_stats_t *cpu_stats;
+               u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
+               u64 trx_errors, trx_dropped;
+               unsigned int start;
+
+               if (priv->cpu[i] == NULL)
+                       continue;
+               cpu_stats = &priv->cpu[i]->stats;
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+                       trx_packets = cpu_stats->rx_packets;
+                       ttx_packets = cpu_stats->tx_packets;
+                       trx_bytes   = cpu_stats->rx_bytes;
+                       ttx_bytes   = cpu_stats->tx_bytes;
+                       trx_errors  = cpu_stats->rx_errors;
+                       trx_dropped = cpu_stats->rx_dropped;
+               } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+
+               rx_packets += trx_packets;
+               tx_packets += ttx_packets;
+               rx_bytes   += trx_bytes;
+               tx_bytes   += ttx_bytes;
+               rx_errors  += trx_errors;
+               rx_dropped += trx_dropped;
        }
 
-       priv->stats.rx_packets = rx_packets;
-       priv->stats.rx_bytes = rx_bytes;
-       priv->stats.tx_packets = tx_packets;
-       priv->stats.tx_bytes = tx_bytes;
+       stats->rx_packets = rx_packets;
+       stats->tx_packets = tx_packets;
+       stats->rx_bytes   = rx_bytes;
+       stats->tx_bytes   = tx_bytes;
+       stats->rx_errors  = rx_errors;
+       stats->rx_dropped = rx_dropped;
 
-       return &priv->stats;
+       return stats;
 }
 
 
@@ -2287,7 +2227,7 @@ static const struct net_device_ops tile_net_ops = {
        .ndo_stop = tile_net_stop,
        .ndo_start_xmit = tile_net_tx,
        .ndo_do_ioctl = tile_net_ioctl,
-       .ndo_get_stats = tile_net_get_stats,
+       .ndo_get_stats64 = tile_net_get_stats64,
        .ndo_change_mtu = tile_net_change_mtu,
        .ndo_tx_timeout = tile_net_tx_timeout,
        .ndo_set_mac_address = tile_net_set_mac_address,
@@ -2305,39 +2245,30 @@ static const struct net_device_ops tile_net_ops = {
  */
 static void tile_net_setup(struct net_device *dev)
 {
-       PDEBUG("tile_net_setup()\n");
+       netdev_features_t features = 0;
 
        ether_setup(dev);
-
        dev->netdev_ops = &tile_net_ops;
-
        dev->watchdog_timeo = TILE_NET_TIMEOUT;
+       dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
+       dev->mtu = TILE_NET_MTU;
 
-       /* We want lockless xmit. */
-       dev->features |= NETIF_F_LLTX;
-
-       /* We support hardware tx checksums. */
-       dev->features |= NETIF_F_HW_CSUM;
-
-       /* We support scatter/gather. */
-       dev->features |= NETIF_F_SG;
-
-       /* We support TSO. */
-       dev->features |= NETIF_F_TSO;
+       features |= NETIF_F_HW_CSUM;
+       features |= NETIF_F_SG;
 
-#ifdef TILE_NET_GSO
-       /* We support GSO. */
-       dev->features |= NETIF_F_GSO;
-#endif
+       /* We support TSO iff the HV supports sufficient frags. */
+       if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
+               features |= NETIF_F_TSO;
 
+       /* We can't support HIGHDMA without hash_default, since we need
+        * to be able to finv() with a VA if we don't have hash_default.
+        */
        if (hash_default)
-               dev->features |= NETIF_F_HIGHDMA;
-
-       /* ISSUE: We should support NETIF_F_UFO. */
+               features |= NETIF_F_HIGHDMA;
 
-       dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
-
-       dev->mtu = TILE_NET_MTU;
+       dev->hw_features   |= features;
+       dev->vlan_features |= features;
+       dev->features      |= features;
 }
 
 
index b75eb9e0e867a88ac6531c2cd35f419d8553e88a..c8f088ab5fdfdbb6c9c757f5defd9c9b70588163 100644 (file)
@@ -2407,7 +2407,7 @@ static struct pci_driver rhine_driver = {
        .driver.pm      = RHINE_PM_OPS,
 };
 
-static struct dmi_system_id __initdata rhine_dmi_table[] = {
+static struct dmi_system_id rhine_dmi_table[] __initdata = {
        {
                .ident = "EPIA-M",
                .matches = {
index d01cacf8a7c279ee892b703715e84707214c186e..d022bf936572ea857cbcb9f0fded1899077d5a95 100644 (file)
@@ -2376,6 +2376,23 @@ out_0:
        return ret;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ *  velocity_poll_controller           -       Velocity Poll controller function
+ *  @dev: network device
+ *
+ *
+ *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
+ *  with interrupts disabled.
+ */
+static void velocity_poll_controller(struct net_device *dev)
+{
+       disable_irq(dev->irq);
+       velocity_intr(dev->irq, dev);
+       enable_irq(dev->irq);
+}
+#endif
+
 /**
  *     velocity_mii_ioctl              -       MII ioctl handler
  *     @dev: network device
@@ -2641,6 +2658,9 @@ static const struct net_device_ops velocity_netdev_ops = {
        .ndo_do_ioctl           = velocity_ioctl,
        .ndo_vlan_rx_add_vid    = velocity_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = velocity_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = velocity_poll_controller,
+#endif
 };
 
 /**
index fd4dbdae5331a3cf31732d9691a1356b7d1abf1c..4c619ea5189fc22308b8fe36851824e72eaad0ab 100644 (file)
@@ -1230,8 +1230,7 @@ error:
  */
 static int xemaclite_of_remove(struct platform_device *of_dev)
 {
-       struct device *dev = &of_dev->dev;
-       struct net_device *ndev = dev_get_drvdata(dev);
+       struct net_device *ndev = platform_get_drvdata(of_dev);
 
        struct net_local *lp = netdev_priv(ndev);
 
@@ -1250,7 +1249,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
        lp->phy_node = NULL;
 
        xemaclite_remove_ndev(ndev, of_dev);
-       dev_set_drvdata(dev, NULL);
 
        return 0;
 }
index 51f2bc37610188b44de5b8546062d16142571505..2dcc60fb37f1dee50beea82b878e98248c1688a0 100644 (file)
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
                        pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
                        pci_write_config_byte(pcidev,0x5a,0xc0);
                        WriteLPCReg(0x28, 0x70 );
-                       if (via_ircc_open(pcidev, &info, 0x3076) == 0)
-                               rc=0;
+                       rc = via_ircc_open(pcidev, &info, 0x3076);
                } else
                        rc = -ENODEV; //IR not turn on   
        } else { //Not VT1211
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
                        info.irq=FirIRQ;
                        info.dma=FirDRQ1;
                        info.dma2=FirDRQ0;
-                       if (via_ircc_open(pcidev, &info, 0x3096) == 0)
-                               rc=0;
+                       rc = via_ircc_open(pcidev, &info, 0x3096);
                } else
                        rc = -ENODEV; //IR not turn on !!!!!
        }//Not VT1211
index 16b43bf544b74dd3ee72599f56f429ca2dea8851..510a9b60fde1d2881d322a3278cc1613210bd54a 100644 (file)
@@ -600,6 +600,9 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        if (!vlan->port->passthru)
                return -EOPNOTSUPP;
 
+       if (flags & NLM_F_REPLACE)
+               return -EOPNOTSUPP;
+
        if (is_unicast_ether_addr(addr))
                err = dev_uc_add_excl(dev, addr);
        else if (is_multicast_ether_addr(addr))
index b51db2abfe442cd95fdbcb97e17aec702ec0d2c5..9dccb1edfd2aba2070023f4ae874bac0cc432293 100644 (file)
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops;
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
                      NETIF_F_TSO6 | NETIF_F_UFO)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+
 /*
  * RCU usage:
  * The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct macvtap_queue *q = macvtap_get_queue(dev, skb);
-       netdev_features_t features;
+       netdev_features_t features = TAP_FEATURES;
+
        if (!q)
                goto drop;
 
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
 
        skb->dev = dev;
        /* Apply the forward feature mask so that we perform segmentation
-        * according to users wishes.
+        * according to users wishes.  This only works if VNET_HDR is
+        * enabled.
         */
-       features = netif_skb_features(skb) & vlan->tap_features;
+       if (q->flags & IFF_VNET_HDR)
+               features |= vlan->tap_features;
        if (netif_needs_gso(skb, features)) {
                struct sk_buff *segs = __skb_gso_segment(skb, features, false);
 
@@ -524,7 +529,7 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
                linear = len;
 
        skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
-                                  err);
+                                  err, 0);
        if (!skb)
                return NULL;
 
@@ -536,86 +541,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
        return skb;
 }
 
-/* set skb frags from iovec, this can move to core network code for reuse */
-static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
-                                 int offset, size_t count)
-{
-       int len = iov_length(from, count) - offset;
-       int copy = skb_headlen(skb);
-       int size, offset1 = 0;
-       int i = 0;
-
-       /* Skip over from offset */
-       while (count && (offset >= from->iov_len)) {
-               offset -= from->iov_len;
-               ++from;
-               --count;
-       }
-
-       /* copy up to skb headlen */
-       while (count && (copy > 0)) {
-               size = min_t(unsigned int, copy, from->iov_len - offset);
-               if (copy_from_user(skb->data + offset1, from->iov_base + offset,
-                                  size))
-                       return -EFAULT;
-               if (copy > size) {
-                       ++from;
-                       --count;
-                       offset = 0;
-               } else
-                       offset += size;
-               copy -= size;
-               offset1 += size;
-       }
-
-       if (len == offset1)
-               return 0;
-
-       while (count--) {
-               struct page *page[MAX_SKB_FRAGS];
-               int num_pages;
-               unsigned long base;
-               unsigned long truesize;
-
-               len = from->iov_len - offset;
-               if (!len) {
-                       offset = 0;
-                       ++from;
-                       continue;
-               }
-               base = (unsigned long)from->iov_base + offset;
-               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
-               if (i + size > MAX_SKB_FRAGS)
-                       return -EMSGSIZE;
-               num_pages = get_user_pages_fast(base, size, 0, &page[i]);
-               if (num_pages != size) {
-                       int j;
-
-                       for (j = 0; j < num_pages; j++)
-                               put_page(page[i + j]);
-                       return -EFAULT;
-               }
-               truesize = size * PAGE_SIZE;
-               skb->data_len += len;
-               skb->len += len;
-               skb->truesize += truesize;
-               atomic_add(truesize, &skb->sk->sk_wmem_alloc);
-               while (len) {
-                       int off = base & ~PAGE_MASK;
-                       int size = min_t(int, len, PAGE_SIZE - off);
-                       __skb_fill_page_desc(skb, i, page[i], off, size);
-                       skb_shinfo(skb)->nr_frags++;
-                       /* increase sk_wmem_alloc */
-                       base += size;
-                       len -= size;
-                       i++;
-               }
-               offset = 0;
-               ++from;
-       }
-       return 0;
-}
-
 /*
  * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
  * be shared with the tun/tap driver.
@@ -698,29 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
        return 0;
 }
 
-static unsigned long iov_pages(const struct iovec *iv, int offset,
-                              unsigned long nr_segs)
-{
-       unsigned long seg, base;
-       int pages = 0, len, size;
-
-       while (nr_segs && (offset >= iv->iov_len)) {
-               offset -= iv->iov_len;
-               ++iv;
-               --nr_segs;
-       }
-
-       for (seg = 0; seg < nr_segs; seg++) {
-               base = (unsigned long)iv[seg].iov_base + offset;
-               len = iv[seg].iov_len - offset;
-               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
-               pages += size;
-               offset = 0;
-       }
-
-       return pages;
-}
-
 /* Get packet from user space buffer */
 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
                                const struct iovec *iv, unsigned long total_len,
@@ -1064,8 +966,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
        /* tap_features are the same as features on tun/tap and
         * reflect user expectations.
         */
-       vlan->tap_features = vlan->dev->features &
-                           (feature_mask | ~TUN_OFFLOADS);
+       vlan->tap_features = feature_mask;
        vlan->set_features = features;
        netdev_update_features(vlan->dev);
 
@@ -1161,10 +1062,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                            TUN_F_TSO_ECN | TUN_F_UFO))
                        return -EINVAL;
 
-               /* TODO: only accept frames with the features that
-                        got enabled for forwarded frames */
-               if (!(q->flags & IFF_VNET_HDR))
-                       return  -EINVAL;
                rtnl_lock();
                ret = set_offload(q, arg);
                rtnl_unlock();
index 9733bd239a866d4a38552c8c41fb9b9864b6e50f..f8e305d8da761cfc3ffd2ba36ae4a19f53ff1e01 100644 (file)
@@ -48,7 +48,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
        struct mdio_mux_mmioreg_state *s = data;
 
        if (current_child ^ desired_child) {
-               void *p = ioremap(s->phys, 1);
+               void __iomem *p = ioremap(s->phys, 1);
                uint8_t x, y;
 
                if (!p)
index 2510435f34edfd5e10cc4252c6e7dcfa35e95e62..c31aad0004cb5ed93453089114e9f7dc31894ab2 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/phy.h>
 #include <linux/micrel_phy.h>
+#include <linux/of.h>
 
 /* Operation Mode Strap Override */
 #define MII_KSZPHY_OMSO                                0x16
 #define KS8737_CTRL_INT_ACTIVE_HIGH            (1 << 14)
 #define KSZ8051_RMII_50MHZ_CLK                 (1 << 7)
 
+/* Write/read to/from extended registers */
+#define MII_KSZPHY_EXTREG                       0x0b
+#define KSZPHY_EXTREG_WRITE                     0x8000
+
+#define MII_KSZPHY_EXTREG_WRITE                 0x0c
+#define MII_KSZPHY_EXTREG_READ                  0x0d
+
+/* Extended registers */
+#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW         0x104
+#define MII_KSZPHY_RX_DATA_PAD_SKEW             0x105
+#define MII_KSZPHY_TX_DATA_PAD_SKEW             0x106
+
+#define PS_TO_REG                              200
+
 static int ksz_config_flags(struct phy_device *phydev)
 {
        int regval;
@@ -65,6 +80,20 @@ static int ksz_config_flags(struct phy_device *phydev)
        return 0;
 }
 
+static int kszphy_extended_write(struct phy_device *phydev,
+                                 u32 regnum, u16 val)
+{
+       phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
+       return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
+}
+
+static int kszphy_extended_read(struct phy_device *phydev,
+                                 u32 regnum)
+{
+       phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
+       return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
+}
+
 static int kszphy_ack_interrupt(struct phy_device *phydev)
 {
        /* bit[7..0] int status, which is a read and clear register. */
@@ -141,10 +170,82 @@ static int ks8051_config_init(struct phy_device *phydev)
        return rc < 0 ? rc : 0;
 }
 
+static int ksz9021_load_values_from_of(struct phy_device *phydev,
+                                      struct device_node *of_node, u16 reg,
+                                      char *field1, char *field2,
+                                      char *field3, char *field4)
+{
+       int val1 = -1;
+       int val2 = -2;
+       int val3 = -3;
+       int val4 = -4;
+       int newval;
+       int matches = 0;
+
+       if (!of_property_read_u32(of_node, field1, &val1))
+               matches++;
+
+       if (!of_property_read_u32(of_node, field2, &val2))
+               matches++;
+
+       if (!of_property_read_u32(of_node, field3, &val3))
+               matches++;
+
+       if (!of_property_read_u32(of_node, field4, &val4))
+               matches++;
+
+       if (!matches)
+               return 0;
+
+       if (matches < 4)
+               newval = kszphy_extended_read(phydev, reg);
+       else
+               newval = 0;
+
+       if (val1 != -1)
+               newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
+
+       if (val2 != -1)
+               newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
+
+       if (val3 != -1)
+               newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
+
+       if (val4 != -1)
+               newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
+
+       return kszphy_extended_write(phydev, reg, newval);
+}
+
+static int ksz9021_config_init(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct device_node *of_node = dev->of_node;
+
+       if (!of_node && dev->parent->of_node)
+               of_node = dev->parent->of_node;
+
+       if (of_node) {
+               ksz9021_load_values_from_of(phydev, of_node,
+                                   MII_KSZPHY_CLK_CONTROL_PAD_SKEW,
+                                   "txen-skew-ps", "txc-skew-ps",
+                                   "rxdv-skew-ps", "rxc-skew-ps");
+               ksz9021_load_values_from_of(phydev, of_node,
+                                   MII_KSZPHY_RX_DATA_PAD_SKEW,
+                                   "rxd0-skew-ps", "rxd1-skew-ps",
+                                   "rxd2-skew-ps", "rxd3-skew-ps");
+               ksz9021_load_values_from_of(phydev, of_node,
+                                   MII_KSZPHY_TX_DATA_PAD_SKEW,
+                                   "txd0-skew-ps", "txd1-skew-ps",
+                                   "txd2-skew-ps", "txd3-skew-ps");
+       }
+       return 0;
+}
+
 #define KSZ8873MLL_GLOBAL_CONTROL_4    0x06
 #define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX     (1 << 6)
 #define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED      (1 << 4)
-int ksz8873mll_read_status(struct phy_device *phydev)
+static int ksz8873mll_read_status(struct phy_device *phydev)
 {
        int regval;
 
@@ -281,7 +382,7 @@ static struct phy_driver ksphy_driver[] = {
        .name           = "Micrel KSZ9021 Gigabit PHY",
        .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-       .config_init    = kszphy_config_init,
+       .config_init    = ksz9021_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
index 8e7af8354342c9ce6440e3131aad536fbe385c87..138de837977f1e5762ecb8ae6fecac59ade8c181 100644 (file)
@@ -23,7 +23,7 @@
 #define RTL821x_INER_INIT      0x6400
 #define RTL821x_INSR           0x13
 
-#define        RTL8211E_INER_LINK_STAT 0x10
+#define        RTL8211E_INER_LINK_STATUS       0x400
 
 MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
                err = phy_write(phydev, RTL821x_INER,
-                               RTL8211E_INER_LINK_STAT);
+                               RTL8211E_INER_LINK_STATUS);
        else
                err = phy_write(phydev, RTL821x_INER, 0);
 
index 162464fe86bf7634fb9ac6c21ecd76e25beb10b6..6fa5ae00039fd65b2c634d0d22241d9c7a9d13e2 100644 (file)
@@ -47,7 +47,7 @@
 #define MAX_CALLID 65535
 
 static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
-static struct pppox_sock **callid_sock;
+static struct pppox_sock __rcu **callid_sock;
 
 static DEFINE_SPINLOCK(chan_lock);
 
@@ -83,11 +83,11 @@ static const struct proto_ops pptp_ops;
 struct pptp_gre_header {
        u8  flags;
        u8  ver;
-       u16 protocol;
-       u16 payload_len;
-       u16 call_id;
-       u32 seq;
-       u32 ack;
+       __be16 protocol;
+       __be16 payload_len;
+       __be16 call_id;
+       __be32 seq;
+       __be32 ack;
 } __packed;
 
 static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
index bff7e0b0b4e70d10ccdf9a6de11cdbae056a64f0..9ccccd40c4101b07c0f5ecbaec7c25484b67ceeb 100644 (file)
@@ -622,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
 }
 
 
+/*********************
+ * Peers notification
+ *********************/
+
+static void team_notify_peers_work(struct work_struct *work)
+{
+       struct team *team;
+
+       team = container_of(work, struct team, notify_peers.dw.work);
+
+       if (!rtnl_trylock()) {
+               schedule_delayed_work(&team->notify_peers.dw, 0);
+               return;
+       }
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+       rtnl_unlock();
+       if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+               schedule_delayed_work(&team->notify_peers.dw,
+                                     msecs_to_jiffies(team->notify_peers.interval));
+}
+
+static void team_notify_peers(struct team *team)
+{
+       if (!team->notify_peers.count || !netif_running(team->dev))
+               return;
+       atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
+       schedule_delayed_work(&team->notify_peers.dw, 0);
+}
+
+static void team_notify_peers_init(struct team *team)
+{
+       INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
+}
+
+static void team_notify_peers_fini(struct team *team)
+{
+       cancel_delayed_work_sync(&team->notify_peers.dw);
+}
+
+
+/*******************************
+ * Send multicast group rejoins
+ *******************************/
+
+static void team_mcast_rejoin_work(struct work_struct *work)
+{
+       struct team *team;
+
+       team = container_of(work, struct team, mcast_rejoin.dw.work);
+
+       if (!rtnl_trylock()) {
+               schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+               return;
+       }
+       call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
+       rtnl_unlock();
+       if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+               schedule_delayed_work(&team->mcast_rejoin.dw,
+                                     msecs_to_jiffies(team->mcast_rejoin.interval));
+}
+
+static void team_mcast_rejoin(struct team *team)
+{
+       if (!team->mcast_rejoin.count || !netif_running(team->dev))
+               return;
+       atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
+       schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+}
+
+static void team_mcast_rejoin_init(struct team *team)
+{
+       INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
+}
+
+static void team_mcast_rejoin_fini(struct team *team)
+{
+       cancel_delayed_work_sync(&team->mcast_rejoin.dw);
+}
+
+
 /************************
  * Rx path frame handler
  ************************/
@@ -846,6 +926,8 @@ static void team_port_enable(struct team *team,
        team_queue_override_port_add(team, port);
        if (team->ops.port_enabled)
                team->ops.port_enabled(team, port);
+       team_notify_peers(team);
+       team_mcast_rejoin(team);
 }
 
 static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -875,6 +957,8 @@ static void team_port_disable(struct team *team,
        team->en_port_count--;
        team_queue_override_port_del(team, port);
        team_adjust_ops(team);
+       team_notify_peers(team);
+       team_mcast_rejoin(team);
 }
 
 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -953,6 +1037,9 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
        struct netpoll *np;
        int err;
 
+       if (!team->dev->npinfo)
+               return 0;
+
        np = kzalloc(sizeof(*np), gfp);
        if (!np)
                return -ENOMEM;
@@ -979,12 +1066,6 @@ static void team_port_disable_netpoll(struct team_port *port)
        __netpoll_cleanup(np);
        kfree(np);
 }
-
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
-       return team->dev->npinfo;
-}
-
 #else
 static int team_port_enable_netpoll(struct team *team, struct team_port *port,
                                    gfp_t gfp)
@@ -994,10 +1075,6 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
 static void team_port_disable_netpoll(struct team_port *port)
 {
 }
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
-       return NULL;
-}
 #endif
 
 static void __team_port_change_port_added(struct team_port *port, bool linkup);
@@ -1079,13 +1156,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_vids_add;
        }
 
-       if (team_netpoll_info(team)) {
-               err = team_port_enable_netpoll(team, port, GFP_KERNEL);
-               if (err) {
-                       netdev_err(dev, "Failed to enable netpoll on device %s\n",
-                                  portname);
-                       goto err_enable_netpoll;
-               }
+       err = team_port_enable_netpoll(team, port, GFP_KERNEL);
+       if (err) {
+               netdev_err(dev, "Failed to enable netpoll on device %s\n",
+                          portname);
+               goto err_enable_netpoll;
        }
 
        err = netdev_master_upper_dev_link(port_dev, dev);
@@ -1205,6 +1280,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
        return team_change_mode(team, ctx->data.str_val);
 }
 
+static int team_notify_peers_count_get(struct team *team,
+                                      struct team_gsetter_ctx *ctx)
+{
+       ctx->data.u32_val = team->notify_peers.count;
+       return 0;
+}
+
+static int team_notify_peers_count_set(struct team *team,
+                                      struct team_gsetter_ctx *ctx)
+{
+       team->notify_peers.count = ctx->data.u32_val;
+       return 0;
+}
+
+static int team_notify_peers_interval_get(struct team *team,
+                                         struct team_gsetter_ctx *ctx)
+{
+       ctx->data.u32_val = team->notify_peers.interval;
+       return 0;
+}
+
+static int team_notify_peers_interval_set(struct team *team,
+                                         struct team_gsetter_ctx *ctx)
+{
+       team->notify_peers.interval = ctx->data.u32_val;
+       return 0;
+}
+
+static int team_mcast_rejoin_count_get(struct team *team,
+                                      struct team_gsetter_ctx *ctx)
+{
+       ctx->data.u32_val = team->mcast_rejoin.count;
+       return 0;
+}
+
+static int team_mcast_rejoin_count_set(struct team *team,
+                                      struct team_gsetter_ctx *ctx)
+{
+       team->mcast_rejoin.count = ctx->data.u32_val;
+       return 0;
+}
+
+static int team_mcast_rejoin_interval_get(struct team *team,
+                                         struct team_gsetter_ctx *ctx)
+{
+       ctx->data.u32_val = team->mcast_rejoin.interval;
+       return 0;
+}
+
+static int team_mcast_rejoin_interval_set(struct team *team,
+                                         struct team_gsetter_ctx *ctx)
+{
+       team->mcast_rejoin.interval = ctx->data.u32_val;
+       return 0;
+}
+
 static int team_port_en_option_get(struct team *team,
                                   struct team_gsetter_ctx *ctx)
 {
@@ -1316,6 +1447,30 @@ static const struct team_option team_options[] = {
                .getter = team_mode_option_get,
                .setter = team_mode_option_set,
        },
+       {
+               .name = "notify_peers_count",
+               .type = TEAM_OPTION_TYPE_U32,
+               .getter = team_notify_peers_count_get,
+               .setter = team_notify_peers_count_set,
+       },
+       {
+               .name = "notify_peers_interval",
+               .type = TEAM_OPTION_TYPE_U32,
+               .getter = team_notify_peers_interval_get,
+               .setter = team_notify_peers_interval_set,
+       },
+       {
+               .name = "mcast_rejoin_count",
+               .type = TEAM_OPTION_TYPE_U32,
+               .getter = team_mcast_rejoin_count_get,
+               .setter = team_mcast_rejoin_count_set,
+       },
+       {
+               .name = "mcast_rejoin_interval",
+               .type = TEAM_OPTION_TYPE_U32,
+               .getter = team_mcast_rejoin_interval_get,
+               .setter = team_mcast_rejoin_interval_set,
+       },
        {
                .name = "enabled",
                .type = TEAM_OPTION_TYPE_BOOL,
@@ -1396,6 +1551,10 @@ static int team_init(struct net_device *dev)
 
        INIT_LIST_HEAD(&team->option_list);
        INIT_LIST_HEAD(&team->option_inst_list);
+
+       team_notify_peers_init(team);
+       team_mcast_rejoin_init(team);
+
        err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
        if (err)
                goto err_options_register;
@@ -1406,6 +1565,8 @@ static int team_init(struct net_device *dev)
        return 0;
 
 err_options_register:
+       team_mcast_rejoin_fini(team);
+       team_notify_peers_fini(team);
        team_queue_override_fini(team);
 err_team_queue_override_init:
        free_percpu(team->pcpu_stats);
@@ -1425,6 +1586,8 @@ static void team_uninit(struct net_device *dev)
 
        __team_change_mode(team, NULL); /* cleanup */
        __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+       team_mcast_rejoin_fini(team);
+       team_notify_peers_fini(team);
        team_queue_override_fini(team);
        mutex_unlock(&team->lock);
 }
@@ -2698,6 +2861,10 @@ static int team_device_event(struct notifier_block *unused,
        case NETDEV_PRE_TYPE_CHANGE:
                /* Forbid to change type of underlaying device */
                return NOTIFY_BAD;
+       case NETDEV_RESEND_IGMP:
+               /* Propagate to master device */
+               call_netdevice_notifiers(event, port->team->dev);
+               break;
        }
        return NOTIFY_DONE;
 }
index 71af122edf2d639c0a87099d83998a0706bfd433..60a1e93e9d351a3a9f6e49ac3db1f4da7a25ada8 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_tun.h>
+#include <linux/if_vlan.h>
 #include <linux/crc32.h>
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
@@ -137,7 +138,10 @@ struct tun_file {
        struct fasync_struct *fasync;
        /* only used for fasnyc */
        unsigned int flags;
-       u16 queue_index;
+       union {
+               u16 queue_index;
+               unsigned int ifindex;
+       };
        struct list_head next;
        struct tun_struct *detached;
 };
@@ -497,7 +501,7 @@ static void tun_detach_all(struct net_device *dev)
                module_put(THIS_MODULE);
 }
 
-static int tun_attach(struct tun_struct *tun, struct file *file)
+static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
 {
        struct tun_file *tfile = file->private_data;
        int err;
@@ -522,7 +526,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
        err = 0;
 
        /* Re-attach the filter to presist device */
-       if (tun->filter_attached == true) {
+       if (!skip_filter && (tun->filter_attached == true)) {
                err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
                if (!err)
                        goto out;
@@ -739,6 +743,11 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
                          >= dev->tx_queue_len / tun->numqueues)
                goto drop;
 
+       if (skb->sk) {
+               sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
+               sw_tx_timestamp(skb);
+       }
+
        /* Orphan the skb - required as we might hang on to it
         * for indefinite time. */
        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
@@ -943,7 +952,7 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
                linear = len;
 
        skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
-                                  &err);
+                                  &err, 0);
        if (!skb)
                return ERR_PTR(err);
 
@@ -955,109 +964,6 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
        return skb;
 }
 
-/* set skb frags from iovec, this can move to core network code for reuse */
-static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
-                                 int offset, size_t count)
-{
-       int len = iov_length(from, count) - offset;
-       int copy = skb_headlen(skb);
-       int size, offset1 = 0;
-       int i = 0;
-
-       /* Skip over from offset */
-       while (count && (offset >= from->iov_len)) {
-               offset -= from->iov_len;
-               ++from;
-               --count;
-       }
-
-       /* copy up to skb headlen */
-       while (count && (copy > 0)) {
-               size = min_t(unsigned int, copy, from->iov_len - offset);
-               if (copy_from_user(skb->data + offset1, from->iov_base + offset,
-                                  size))
-                       return -EFAULT;
-               if (copy > size) {
-                       ++from;
-                       --count;
-                       offset = 0;
-               } else
-                       offset += size;
-               copy -= size;
-               offset1 += size;
-       }
-
-       if (len == offset1)
-               return 0;
-
-       while (count--) {
-               struct page *page[MAX_SKB_FRAGS];
-               int num_pages;
-               unsigned long base;
-               unsigned long truesize;
-
-               len = from->iov_len - offset;
-               if (!len) {
-                       offset = 0;
-                       ++from;
-                       continue;
-               }
-               base = (unsigned long)from->iov_base + offset;
-               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
-               if (i + size > MAX_SKB_FRAGS)
-                       return -EMSGSIZE;
-               num_pages = get_user_pages_fast(base, size, 0, &page[i]);
-               if (num_pages != size) {
-                       int j;
-
-                       for (j = 0; j < num_pages; j++)
-                               put_page(page[i + j]);
-                       return -EFAULT;
-               }
-               truesize = size * PAGE_SIZE;
-               skb->data_len += len;
-               skb->len += len;
-               skb->truesize += truesize;
-               atomic_add(truesize, &skb->sk->sk_wmem_alloc);
-               while (len) {
-                       int off = base & ~PAGE_MASK;
-                       int size = min_t(int, len, PAGE_SIZE - off);
-                       __skb_fill_page_desc(skb, i, page[i], off, size);
-                       skb_shinfo(skb)->nr_frags++;
-                       /* increase sk_wmem_alloc */
-                       base += size;
-                       len -= size;
-                       i++;
-               }
-               offset = 0;
-               ++from;
-       }
-       return 0;
-}
-
-static unsigned long iov_pages(const struct iovec *iv, int offset,
-                              unsigned long nr_segs)
-{
-       unsigned long seg, base;
-       int pages = 0, len, size;
-
-       while (nr_segs && (offset >= iv->iov_len)) {
-               offset -= iv->iov_len;
-               ++iv;
-               --nr_segs;
-       }
-
-       for (seg = 0; seg < nr_segs; seg++) {
-               base = (unsigned long)iv[seg].iov_base + offset;
-               len = iv[seg].iov_len - offset;
-               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
-               pages += size;
-               offset = 0;
-       }
-
-       return pages;
-}
-
 /* Get packet from user space buffer */
 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                            void *msg_control, const struct iovec *iv,
@@ -1262,6 +1168,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
 {
        struct tun_pi pi = { 0, skb->protocol };
        ssize_t total = 0;
+       int vlan_offset = 0;
 
        if (!(tun->flags & TUN_NO_PI)) {
                if ((len -= sizeof(pi)) < 0)
@@ -1325,11 +1232,40 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                total += tun->vnet_hdr_sz;
        }
 
-       len = min_t(int, skb->len, len);
+       if (!vlan_tx_tag_present(skb)) {
+               len = min_t(int, skb->len, len);
+       } else {
+               int copy, ret;
+               struct {
+                       __be16 h_vlan_proto;
+                       __be16 h_vlan_TCI;
+               } veth;
+
+               veth.h_vlan_proto = skb->vlan_proto;
+               veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+
+               vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+               len = min_t(int, skb->len + VLAN_HLEN, len);
+
+               copy = min_t(int, vlan_offset, len);
+               ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
+               len -= copy;
+               total += copy;
+               if (ret || !len)
+                       goto done;
+
+               copy = min_t(int, sizeof(veth), len);
+               ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
+               len -= copy;
+               total += copy;
+               if (ret || !len)
+                       goto done;
+       }
 
-       skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
-       total += skb->len;
+       skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
+       total += len;
 
+done:
        tun->dev->stats.tx_packets++;
        tun->dev->stats.tx_bytes += len;
 
@@ -1478,7 +1414,6 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
        return ret;
 }
 
-
 static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
                       struct msghdr *m, size_t total_len,
                       int flags)
@@ -1490,10 +1425,15 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (!tun)
                return -EBADFD;
 
-       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
                ret = -EINVAL;
                goto out;
        }
+       if (flags & MSG_ERRQUEUE) {
+               ret = sock_recv_errqueue(sock->sk, m, total_len,
+                                        SOL_PACKET, TUN_TX_TIMESTAMP);
+               goto out;
+       }
        ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
                          flags & MSG_DONTWAIT);
        if (ret > total_len) {
@@ -1617,7 +1557,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                if (err < 0)
                        return err;
 
-               err = tun_attach(tun, file);
+               err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
                if (err < 0)
                        return err;
 
@@ -1664,6 +1604,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                dev_net_set(dev, net);
                dev->rtnl_link_ops = &tun_link_ops;
+               dev->ifindex = tfile->ifindex;
 
                tun = netdev_priv(dev);
                tun->dev = dev;
@@ -1684,12 +1625,13 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                tun_flow_init(tun);
 
                dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
-                       TUN_USER_FEATURES;
+                                  TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+                                  NETIF_F_HW_VLAN_STAG_TX;
                dev->features = dev->hw_features;
                dev->vlan_features = dev->features;
 
                INIT_LIST_HEAD(&tun->disabled);
-               err = tun_attach(tun, file);
+               err = tun_attach(tun, file, false);
                if (err < 0)
                        goto err_free_dev;
 
@@ -1853,7 +1795,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = security_tun_dev_attach_queue(tun->security);
                if (ret < 0)
                        goto unlock;
-               ret = tun_attach(tun, file);
+               ret = tun_attach(tun, file, false);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
@@ -1879,6 +1821,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        kgid_t group;
        int sndbuf;
        int vnet_hdr_sz;
+       unsigned int ifindex;
        int ret;
 
        if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
@@ -1913,6 +1856,19 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                        ret = -EFAULT;
                goto unlock;
        }
+       if (cmd == TUNSETIFINDEX) {
+               ret = -EPERM;
+               if (tun)
+                       goto unlock;
+
+               ret = -EFAULT;
+               if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
+                       goto unlock;
+
+               ret = 0;
+               tfile->ifindex = ifindex;
+               goto unlock;
+       }
 
        ret = -EBADFD;
        if (!tun)
@@ -1925,6 +1881,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        case TUNGETIFF:
                tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
 
+               if (tfile->detached)
+                       ifr.ifr_flags |= IFF_DETACH_QUEUE;
+               if (!tfile->socket.sk->sk_filter)
+                       ifr.ifr_flags |= IFF_NOFILTER;
+
                if (copy_to_user(argp, &ifr, ifreq_len))
                        ret = -EFAULT;
                break;
@@ -2081,6 +2042,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                tun_detach_filter(tun, tun->numqueues);
                break;
 
+       case TUNGETFILTER:
+               ret = -EINVAL;
+               if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+                       break;
+               ret = -EFAULT;
+               if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
+                       break;
+               ret = 0;
+               break;
+
        default:
                ret = -EINVAL;
                break;
@@ -2161,6 +2132,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
        rcu_assign_pointer(tfile->tun, NULL);
        tfile->net = get_net(current->nsproxy->net_ns);
        tfile->flags = 0;
+       tfile->ifindex = 0;
 
        rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
        init_waitqueue_head(&tfile->wq.wait);
@@ -2276,6 +2248,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
        .get_msglevel   = tun_get_msglevel,
        .set_msglevel   = tun_set_msglevel,
        .get_link       = ethtool_op_get_link,
+       .get_ts_info    = ethtool_op_get_ts_info,
 };
 
 
index 346c032aa7955ba7ff09ca2a698cc1374e2ae630..bdaa12d07a127fae11a6209986d0473c32644f27 100644 (file)
@@ -178,6 +178,8 @@ struct asix_common_private {
        struct asix_rx_fixup_info rx_fixup_info;
 };
 
+extern const struct driver_info ax88172a_info;
+
 /* ASIX specific flags */
 #define FLAG_EEPROM_MAC                (1UL << 0)  /* init device MAC from eeprom */
 
index ad5d1e4384db7b3b6c3ab6f2406b5cd8075cd2d0..386a3df53678a23761308938221f4aa79b520538 100644 (file)
@@ -778,6 +778,9 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
        dev->hard_mtu = net->mtu + net->hard_header_len;
        ax88178_set_mfb(dev);
 
+       /* max qlen depend on hard_mtu and rx_urb_size */
+       usbnet_update_max_qlen(dev);
+
        return 0;
 }
 
@@ -943,8 +946,6 @@ static const struct driver_info hg20f9_info = {
        .data = FLAG_EEPROM_MAC,
 };
 
-extern const struct driver_info ax88172a_info;
-
 static const struct usb_device_id      products [] = {
 {
        // Linksys USB200M
index d012203b0f298dd1e1fc1a69192cda3d0c82c523..723b3879ecc2e195515037404f1bb4eae8a6f3f4 100644 (file)
@@ -161,7 +161,8 @@ static const struct net_device_ops ax88172a_netdev_ops = {
        .ndo_set_rx_mode        = asix_set_multicast,
 };
 
-int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88172a_get_settings(struct net_device *net,
+                                struct ethtool_cmd *cmd)
 {
        if (!net->phydev)
                return -ENODEV;
@@ -169,7 +170,8 @@ int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
        return phy_ethtool_gset(net->phydev, cmd);
 }
 
-int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88172a_set_settings(struct net_device *net,
+                                struct ethtool_cmd *cmd)
 {
        if (!net->phydev)
                return -ENODEV;
@@ -177,7 +179,7 @@ int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
        return phy_ethtool_sset(net->phydev, cmd);
 }
 
-int ax88172a_nway_reset(struct net_device *net)
+static int ax88172a_nway_reset(struct net_device *net)
 {
        if (!net->phydev)
                return -ENODEV;
index 2bc87e3a8141d259502bcfbfccc9e0a5b649800b..fb0caa289d0bfe53e8632305379afbc86abf2502 100644 (file)
@@ -688,6 +688,9 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu)
                                  2, 2, &tmp16);
        }
 
+       /* max qlen depend on hard_mtu and rx_urb_size */
+       usbnet_update_max_qlen(dev);
+
        return 0;
 }
 
@@ -1166,31 +1169,18 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
        int frame_size = dev->maxpacket;
        int mss = skb_shinfo(skb)->gso_size;
        int headroom;
-       int tailroom;
 
        tx_hdr1 = skb->len;
        tx_hdr2 = mss;
        if (((skb->len + 8) % frame_size) == 0)
                tx_hdr2 |= 0x80008000;  /* Enable padding */
 
-       headroom = skb_headroom(skb);
-       tailroom = skb_tailroom(skb);
-
-       if (!skb_header_cloned(skb) &&
-           !skb_cloned(skb) &&
-           (headroom + tailroom) >= 8) {
-               if (headroom < 8) {
-                       skb->data = memmove(skb->head + 8, skb->data, skb->len);
-                       skb_set_tail_pointer(skb, skb->len);
-               }
-       } else {
-               struct sk_buff *skb2;
+       headroom = skb_headroom(skb) - 8;
 
-               skb2 = skb_copy_expand(skb, 8, 0, flags);
+       if ((skb_header_cloned(skb) || headroom < 0) &&
+           pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) {
                dev_kfree_skb_any(skb);
-               skb = skb2;
-               if (!skb)
-                       return NULL;
+               return NULL;
        }
 
        skb_push(skb, 4);
index 11c51f275366a15999ad7684330bb644de76b557..f3fce412c0c1a38bb7a5522ebf0d2dd2139bbff5 100644 (file)
 #include <linux/crc32.h>
 #include <linux/if_vlan.h>
 #include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
 
 /* Version Information */
-#define DRIVER_VERSION "v1.0.0 (2013/05/03)"
+#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
 #define MODULENAME "r8152"
@@ -267,6 +270,12 @@ enum rtl_register_content {
        FULL_DUP        = 0x01,
 };
 
+#define RTL8152_MAX_TX         10
+#define RTL8152_MAX_RX         10
+#define INTBUFSIZE             2
+
+#define INTR_LINK              0x0004
+
 #define RTL8152_REQT_READ      0xc0
 #define RTL8152_REQT_WRITE     0x40
 #define RTL8152_REQ_GET_REGS   0x05
@@ -285,9 +294,9 @@ enum rtl_register_content {
 /* rtl8152 flags */
 enum rtl8152_flags {
        RTL8152_UNPLUG = 0,
-       RX_URB_FAIL,
        RTL8152_SET_RX_MODE,
-       WORK_ENABLE
+       WORK_ENABLE,
+       RTL8152_LINK_CHG,
 };
 
 /* Define these values to match your device */
@@ -311,21 +320,53 @@ struct tx_desc {
        u32 opts1;
 #define TX_FS                  (1 << 31) /* First segment of a packet */
 #define TX_LS                  (1 << 30) /* Final segment of a packet */
-#define TX_LEN_MASK            0xffff
+#define TX_LEN_MASK            0x3ffff
+
        u32 opts2;
+#define UDP_CS                 (1 << 31) /* Calculate UDP/IP checksum */
+#define TCP_CS                 (1 << 30) /* Calculate TCP/IP checksum */
+#define IPV4_CS                        (1 << 29) /* Calculate IPv4 checksum */
+#define IPV6_CS                        (1 << 28) /* Calculate IPv6 checksum */
+};
+
+struct r8152;
+
+struct rx_agg {
+       struct list_head list;
+       struct urb *urb;
+       struct r8152 *context;
+       void *buffer;
+       void *head;
+};
+
+struct tx_agg {
+       struct list_head list;
+       struct urb *urb;
+       struct r8152 *context;
+       void *buffer;
+       void *head;
+       u32 skb_num;
+       u32 skb_len;
 };
 
 struct r8152 {
        unsigned long flags;
        struct usb_device *udev;
        struct tasklet_struct tl;
+       struct usb_interface *intf;
        struct net_device *netdev;
-       struct urb *rx_urb, *tx_urb;
-       struct sk_buff *tx_skb, *rx_skb;
+       struct urb *intr_urb;
+       struct tx_agg tx_info[RTL8152_MAX_TX];
+       struct rx_agg rx_info[RTL8152_MAX_RX];
+       struct list_head rx_done, tx_free;
+       struct sk_buff_head tx_queue;
+       spinlock_t rx_lock, tx_lock;
        struct delayed_work schedule;
        struct mii_if_info mii;
+       int intr_interval;
        u32 msg_enable;
        u16 ocp_base;
+       u8 *intr_buff;
        u8 version;
        u8 speed;
 };
@@ -340,6 +381,7 @@ enum rtl_version {
  * The RTL chips use a 64 element hash table based on the Ethernet CRC.
  */
 static const int multicast_filter_limit = 32;
+static unsigned int rx_buf_sz = 16384;
 
 static
 int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
@@ -686,6 +728,9 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
        ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
 }
 
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
+
 static inline void set_ethernet_addr(struct r8152 *tp)
 {
        struct net_device *dev = tp->netdev;
@@ -716,26 +761,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
        return 0;
 }
 
-static int alloc_all_urbs(struct r8152 *tp)
-{
-       tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
-       if (!tp->rx_urb)
-               return 0;
-       tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
-       if (!tp->tx_urb) {
-               usb_free_urb(tp->rx_urb);
-               return 0;
-       }
-
-       return 1;
-}
-
-static void free_all_urbs(struct r8152 *tp)
-{
-       usb_free_urb(tp->rx_urb);
-       usb_free_urb(tp->tx_urb);
-}
-
 static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
 {
        return &dev->stats;
@@ -743,137 +768,574 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
 
 static void read_bulk_callback(struct urb *urb)
 {
-       struct r8152 *tp;
-       unsigned pkt_len;
-       struct sk_buff *skb;
        struct net_device *netdev;
-       struct net_device_stats *stats;
+       unsigned long flags;
        int status = urb->status;
+       struct rx_agg *agg;
+       struct r8152 *tp;
        int result;
-       struct rx_desc *rx_desc;
 
-       tp = urb->context;
+       agg = urb->context;
+       if (!agg)
+               return;
+
+       tp = agg->context;
        if (!tp)
                return;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
+
+       if (!test_bit(WORK_ENABLE, &tp->flags))
+               return;
+
        netdev = tp->netdev;
-       if (!netif_device_present(netdev))
+
+       /* When link down, the driver would cancel all bulks. */
+       /* This avoid the re-submitting bulk */
+       if (!netif_carrier_ok(netdev))
                return;
 
-       stats = rtl8152_get_stats(netdev);
        switch (status) {
        case 0:
-               break;
+               if (urb->actual_length < ETH_ZLEN)
+                       break;
+
+               spin_lock_irqsave(&tp->rx_lock, flags);
+               list_add_tail(&agg->list, &tp->rx_done);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
+               tasklet_schedule(&tp->tl);
+               return;
        case -ESHUTDOWN:
                set_bit(RTL8152_UNPLUG, &tp->flags);
                netif_device_detach(tp->netdev);
+               return;
        case -ENOENT:
                return; /* the urb is in unlink state */
        case -ETIME:
                pr_warn_ratelimited("may be reset is needed?..\n");
-               goto goon;
+               break;
        default:
                pr_warn_ratelimited("Rx status %d\n", status);
-               goto goon;
+               break;
        }
 
-       /* protect against short packets (tell me why we got some?!?) */
-       if (urb->actual_length < sizeof(*rx_desc))
-               goto goon;
-
-
-       rx_desc = (struct rx_desc *)urb->transfer_buffer;
-       pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
-       if (urb->actual_length < sizeof(struct rx_desc) + pkt_len)
-               goto goon;
-
-       skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
-       if (!skb)
-               goto goon;
-
-       memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len);
-       skb_put(skb, pkt_len);
-       skb->protocol = eth_type_trans(skb, netdev);
-       netif_rx(skb);
-       stats->rx_packets++;
-       stats->rx_bytes += pkt_len;
-goon:
-       usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
-                     tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
-                     (usb_complete_t)read_bulk_callback, tp);
-       result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
+       result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
        if (result == -ENODEV) {
                netif_device_detach(tp->netdev);
        } else if (result) {
-               set_bit(RX_URB_FAIL, &tp->flags);
-               goto resched;
+               spin_lock_irqsave(&tp->rx_lock, flags);
+               list_add_tail(&agg->list, &tp->rx_done);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
+               tasklet_schedule(&tp->tl);
+       }
+}
+
+static void write_bulk_callback(struct urb *urb)
+{
+       struct net_device_stats *stats;
+       unsigned long flags;
+       struct tx_agg *agg;
+       struct r8152 *tp;
+       int status = urb->status;
+
+       agg = urb->context;
+       if (!agg)
+               return;
+
+       tp = agg->context;
+       if (!tp)
+               return;
+
+       stats = rtl8152_get_stats(tp->netdev);
+       if (status) {
+               pr_warn_ratelimited("Tx status %d\n", status);
+               stats->tx_errors += agg->skb_num;
        } else {
-               clear_bit(RX_URB_FAIL, &tp->flags);
+               stats->tx_packets += agg->skb_num;
+               stats->tx_bytes += agg->skb_len;
        }
 
-       return;
-resched:
-       tasklet_schedule(&tp->tl);
+       spin_lock_irqsave(&tp->tx_lock, flags);
+       list_add_tail(&agg->list, &tp->tx_free);
+       spin_unlock_irqrestore(&tp->tx_lock, flags);
+
+       if (!netif_carrier_ok(tp->netdev))
+               return;
+
+       if (!test_bit(WORK_ENABLE, &tp->flags))
+               return;
+
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               return;
+
+       if (!skb_queue_empty(&tp->tx_queue))
+               tasklet_schedule(&tp->tl);
 }
 
-static void rx_fixup(unsigned long data)
+static void intr_callback(struct urb *urb)
 {
        struct r8152 *tp;
-       int status;
+       __u16 *d;
+       int status = urb->status;
+       int res;
+
+       tp = urb->context;
+       if (!tp)
+               return;
 
-       tp = (struct r8152 *)data;
        if (!test_bit(WORK_ENABLE, &tp->flags))
                return;
 
-       status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
-       if (status == -ENODEV) {
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
+               return;
+
+       switch (status) {
+       case 0:                 /* success */
+               break;
+       case -ECONNRESET:       /* unlink */
+       case -ESHUTDOWN:
                netif_device_detach(tp->netdev);
-       } else if (status) {
-               set_bit(RX_URB_FAIL, &tp->flags);
-               goto tlsched;
+       case -ENOENT:
+               return;
+       case -EOVERFLOW:
+               netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+               goto resubmit;
+       /* -EPIPE:  should clear the halt */
+       default:
+               netif_info(tp, intr, tp->netdev, "intr status %d\n", status);
+               goto resubmit;
+       }
+
+       d = urb->transfer_buffer;
+       if (INTR_LINK & __le16_to_cpu(d[0])) {
+               if (!(tp->speed & LINK_STATUS)) {
+                       set_bit(RTL8152_LINK_CHG, &tp->flags);
+                       schedule_delayed_work(&tp->schedule, 0);
+               }
        } else {
-               clear_bit(RX_URB_FAIL, &tp->flags);
+               if (tp->speed & LINK_STATUS) {
+                       set_bit(RTL8152_LINK_CHG, &tp->flags);
+                       schedule_delayed_work(&tp->schedule, 0);
+               }
        }
 
-       return;
-tlsched:
-       tasklet_schedule(&tp->tl);
+resubmit:
+       res = usb_submit_urb(urb, GFP_ATOMIC);
+       if (res == -ENODEV)
+               netif_device_detach(tp->netdev);
+       else if (res)
+               netif_err(tp, intr, tp->netdev,
+                       "can't resubmit intr, status %d\n", res);
 }
 
-static void write_bulk_callback(struct urb *urb)
+static inline void *rx_agg_align(void *data)
+{
+       return (void *)ALIGN((uintptr_t)data, 8);
+}
+
+static inline void *tx_agg_align(void *data)
+{
+       return (void *)ALIGN((uintptr_t)data, 4);
+}
+
+static void free_all_mem(struct r8152 *tp)
+{
+       int i;
+
+       for (i = 0; i < RTL8152_MAX_RX; i++) {
+               if (tp->rx_info[i].urb) {
+                       usb_free_urb(tp->rx_info[i].urb);
+                       tp->rx_info[i].urb = NULL;
+               }
+
+               if (tp->rx_info[i].buffer) {
+                       kfree(tp->rx_info[i].buffer);
+                       tp->rx_info[i].buffer = NULL;
+                       tp->rx_info[i].head = NULL;
+               }
+       }
+
+       for (i = 0; i < RTL8152_MAX_TX; i++) {
+               if (tp->tx_info[i].urb) {
+                       usb_free_urb(tp->tx_info[i].urb);
+                       tp->tx_info[i].urb = NULL;
+               }
+
+               if (tp->tx_info[i].buffer) {
+                       kfree(tp->tx_info[i].buffer);
+                       tp->tx_info[i].buffer = NULL;
+                       tp->tx_info[i].head = NULL;
+               }
+       }
+
+       if (tp->intr_urb) {
+               usb_free_urb(tp->intr_urb);
+               tp->intr_urb = NULL;
+       }
+
+       if (tp->intr_buff) {
+               kfree(tp->intr_buff);
+               tp->intr_buff = NULL;
+       }
+}
+
+static int alloc_all_mem(struct r8152 *tp)
+{
+       struct net_device *netdev = tp->netdev;
+       struct usb_interface *intf = tp->intf;
+       struct usb_host_interface *alt = intf->cur_altsetting;
+       struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
+       struct urb *urb;
+       int node, i;
+       u8 *buf;
+
+       node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
+
+       spin_lock_init(&tp->rx_lock);
+       spin_lock_init(&tp->tx_lock);
+       INIT_LIST_HEAD(&tp->rx_done);
+       INIT_LIST_HEAD(&tp->tx_free);
+       skb_queue_head_init(&tp->tx_queue);
+
+       for (i = 0; i < RTL8152_MAX_RX; i++) {
+               buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+               if (!buf)
+                       goto err1;
+
+               if (buf != rx_agg_align(buf)) {
+                       kfree(buf);
+                       buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+                       if (!buf)
+                               goto err1;
+               }
+
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       kfree(buf);
+                       goto err1;
+               }
+
+               INIT_LIST_HEAD(&tp->rx_info[i].list);
+               tp->rx_info[i].context = tp;
+               tp->rx_info[i].urb = urb;
+               tp->rx_info[i].buffer = buf;
+               tp->rx_info[i].head = rx_agg_align(buf);
+       }
+
+       for (i = 0; i < RTL8152_MAX_TX; i++) {
+               buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+               if (!buf)
+                       goto err1;
+
+               if (buf != tx_agg_align(buf)) {
+                       kfree(buf);
+                       buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+                       if (!buf)
+                               goto err1;
+               }
+
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       kfree(buf);
+                       goto err1;
+               }
+
+               INIT_LIST_HEAD(&tp->tx_info[i].list);
+               tp->tx_info[i].context = tp;
+               tp->tx_info[i].urb = urb;
+               tp->tx_info[i].buffer = buf;
+               tp->tx_info[i].head = tx_agg_align(buf);
+
+               list_add_tail(&tp->tx_info[i].list, &tp->tx_free);
+       }
+
+       tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (!tp->intr_urb)
+               goto err1;
+
+       tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL);
+       if (!tp->intr_buff)
+               goto err1;
+
+       tp->intr_interval = (int)ep_intr->desc.bInterval;
+       usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3),
+                    tp->intr_buff, INTBUFSIZE, intr_callback,
+                    tp, tp->intr_interval);
+
+       return 0;
+
+err1:
+       free_all_mem(tp);
+       return -ENOMEM;
+}
+
+static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
+{
+       struct tx_agg *agg = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tp->tx_lock, flags);
+       if (!list_empty(&tp->tx_free)) {
+               struct list_head *cursor;
+
+               cursor = tp->tx_free.next;
+               list_del_init(cursor);
+               agg = list_entry(cursor, struct tx_agg, list);
+       }
+       spin_unlock_irqrestore(&tp->tx_lock, flags);
+
+       return agg;
+}
+
+static void
+r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
+{
+       memset(desc, 0, sizeof(*desc));
+
+       desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               __be16 protocol;
+               u8 ip_protocol;
+               u32 opts2 = 0;
+
+               if (skb->protocol == htons(ETH_P_8021Q))
+                       protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+               else
+                       protocol = skb->protocol;
+
+               switch (protocol) {
+               case htons(ETH_P_IP):
+                       opts2 |= IPV4_CS;
+                       ip_protocol = ip_hdr(skb)->protocol;
+                       break;
+
+               case htons(ETH_P_IPV6):
+                       opts2 |= IPV6_CS;
+                       ip_protocol = ipv6_hdr(skb)->nexthdr;
+                       break;
+
+               default:
+                       ip_protocol = IPPROTO_RAW;
+                       break;
+               }
+
+               if (ip_protocol == IPPROTO_TCP) {
+                       opts2 |= TCP_CS;
+                       opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
+               } else if (ip_protocol == IPPROTO_UDP) {
+                       opts2 |= UDP_CS;
+               } else {
+                       WARN_ON_ONCE(1);
+               }
+
+               desc->opts2 = cpu_to_le32(opts2);
+       }
+}
+
+static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
+{
+       u32 remain;
+       u8 *tx_data;
+
+       tx_data = agg->head;
+       agg->skb_num = agg->skb_len = 0;
+       remain = rx_buf_sz - sizeof(struct tx_desc);
+
+       while (remain >= ETH_ZLEN) {
+               struct tx_desc *tx_desc;
+               struct sk_buff *skb;
+               unsigned int len;
+
+               skb = skb_dequeue(&tp->tx_queue);
+               if (!skb)
+                       break;
+
+               len = skb->len;
+               if (remain < len) {
+                       skb_queue_head(&tp->tx_queue, skb);
+                       break;
+               }
+
+               tx_desc = (struct tx_desc *)tx_data;
+               tx_data += sizeof(*tx_desc);
+
+               r8152_tx_csum(tp, tx_desc, skb);
+               memcpy(tx_data, skb->data, len);
+               agg->skb_num++;
+               agg->skb_len += len;
+               dev_kfree_skb_any(skb);
+
+               tx_data = tx_agg_align(tx_data + len);
+               remain = rx_buf_sz - sizeof(*tx_desc) -
+                        (u32)((void *)tx_data - agg->head);
+       }
+
+       usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+                         agg->head, (int)(tx_data - (u8 *)agg->head),
+                         (usb_complete_t)write_bulk_callback, agg);
+
+       return usb_submit_urb(agg->urb, GFP_ATOMIC);
+}
+
+static void rx_bottom(struct r8152 *tp)
+{
+       unsigned long flags;
+       struct list_head *cursor, *next;
+
+       spin_lock_irqsave(&tp->rx_lock, flags);
+       list_for_each_safe(cursor, next, &tp->rx_done) {
+               struct rx_desc *rx_desc;
+               struct rx_agg *agg;
+               unsigned pkt_len;
+               int len_used = 0;
+               struct urb *urb;
+               u8 *rx_data;
+               int ret;
+
+               list_del_init(cursor);
+               spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+               agg = list_entry(cursor, struct rx_agg, list);
+               urb = agg->urb;
+               if (urb->actual_length < ETH_ZLEN)
+                       goto submit;
+
+               rx_desc = agg->head;
+               rx_data = agg->head;
+               pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+               len_used += sizeof(struct rx_desc) + pkt_len;
+
+               while (urb->actual_length >= len_used) {
+                       struct net_device *netdev = tp->netdev;
+                       struct net_device_stats *stats;
+                       struct sk_buff *skb;
+
+                       if (pkt_len < ETH_ZLEN)
+                               break;
+
+                       stats = rtl8152_get_stats(netdev);
+
+                       pkt_len -= 4; /* CRC */
+                       rx_data += sizeof(struct rx_desc);
+
+                       skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+                       if (!skb) {
+                               stats->rx_dropped++;
+                               break;
+                       }
+                       memcpy(skb->data, rx_data, pkt_len);
+                       skb_put(skb, pkt_len);
+                       skb->protocol = eth_type_trans(skb, netdev);
+                       netif_rx(skb);
+                       stats->rx_packets++;
+                       stats->rx_bytes += pkt_len;
+
+                       rx_data = rx_agg_align(rx_data + pkt_len + 4);
+                       rx_desc = (struct rx_desc *)rx_data;
+                       pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+                       len_used = (int)(rx_data - (u8 *)agg->head);
+                       len_used += sizeof(struct rx_desc) + pkt_len;
+               }
+
+submit:
+               ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+               spin_lock_irqsave(&tp->rx_lock, flags);
+               if (ret && ret != -ENODEV) {
+                       list_add_tail(&agg->list, next);
+                       tasklet_schedule(&tp->tl);
+               }
+       }
+       spin_unlock_irqrestore(&tp->rx_lock, flags);
+}
+
+static void tx_bottom(struct r8152 *tp)
+{
+       int res;
+
+       do {
+               struct tx_agg *agg;
+
+               if (skb_queue_empty(&tp->tx_queue))
+                       break;
+
+               agg = r8152_get_tx_agg(tp);
+               if (!agg)
+                       break;
+
+               res = r8152_tx_agg_fill(tp, agg);
+               if (res) {
+                       struct net_device_stats *stats;
+                       struct net_device *netdev;
+                       unsigned long flags;
+
+                       netdev = tp->netdev;
+                       stats = rtl8152_get_stats(netdev);
+
+                       if (res == -ENODEV) {
+                               netif_device_detach(netdev);
+                       } else {
+                               netif_warn(tp, tx_err, netdev,
+                                          "failed tx_urb %d\n", res);
+                               stats->tx_dropped += agg->skb_num;
+                               spin_lock_irqsave(&tp->tx_lock, flags);
+                               list_add_tail(&agg->list, &tp->tx_free);
+                               spin_unlock_irqrestore(&tp->tx_lock, flags);
+                       }
+               }
+       } while (res == 0);
+}
+
+static void bottom_half(unsigned long data)
 {
        struct r8152 *tp;
-       int status = urb->status;
 
-       tp = urb->context;
-       if (!tp)
+       tp = (struct r8152 *)data;
+
+       if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
-       dev_kfree_skb_irq(tp->tx_skb);
-       if (!netif_device_present(tp->netdev))
+
+       if (!test_bit(WORK_ENABLE, &tp->flags))
                return;
-       if (status)
-               dev_info(&urb->dev->dev, "%s: Tx status %d\n",
-                        tp->netdev->name, status);
-       tp->netdev->trans_start = jiffies;
-       netif_wake_queue(tp->netdev);
+
+       /* When link down, the driver would cancel all bulks. */
+       /* This avoid the re-submitting bulk */
+       if (!netif_carrier_ok(tp->netdev))
+               return;
+
+       rx_bottom(tp);
+       tx_bottom(tp);
+}
+
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
+{
+       usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
+                     agg->head, rx_buf_sz,
+                     (usb_complete_t)read_bulk_callback, agg);
+
+       return usb_submit_urb(agg->urb, mem_flags);
 }
 
 static void rtl8152_tx_timeout(struct net_device *netdev)
 {
        struct r8152 *tp = netdev_priv(netdev);
-       struct net_device_stats *stats = rtl8152_get_stats(netdev);
+       int i;
+
        netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
-       usb_unlink_urb(tp->tx_urb);
-       stats->tx_errors++;
+       for (i = 0; i < RTL8152_MAX_TX; i++)
+               usb_unlink_urb(tp->tx_info[i].urb);
 }
 
 static void rtl8152_set_rx_mode(struct net_device *netdev)
 {
        struct r8152 *tp = netdev_priv(netdev);
 
-       if (tp->speed & LINK_STATUS)
+       if (tp->speed & LINK_STATUS) {
                set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+               schedule_delayed_work(&tp->schedule, 0);
+       }
 }
 
 static void _rtl8152_set_rx_mode(struct net_device *netdev)
@@ -923,33 +1385,39 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
 {
        struct r8152 *tp = netdev_priv(netdev);
        struct net_device_stats *stats = rtl8152_get_stats(netdev);
+       unsigned long flags;
+       struct tx_agg *agg = NULL;
        struct tx_desc *tx_desc;
        unsigned int len;
+       u8 *tx_data;
        int res;
 
-       netif_stop_queue(netdev);
-       len = skb->len;
-       if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) {
-               struct sk_buff *tx_skb;
+       skb_tx_timestamp(skb);
 
-               tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC);
-               dev_kfree_skb_any(skb);
-               if (!tx_skb) {
-                       stats->tx_dropped++;
-                       netif_wake_queue(netdev);
-                       return NETDEV_TX_OK;
-               }
-               skb = tx_skb;
+       /* If tx_queue is not empty, it means at least one previous packt */
+       /* is waiting for sending. Don't send current one before it.      */
+       if (skb_queue_empty(&tp->tx_queue))
+               agg = r8152_get_tx_agg(tp);
+
+       if (!agg) {
+               skb_queue_tail(&tp->tx_queue, skb);
+               return NETDEV_TX_OK;
        }
-       tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc));
-       memset(tx_desc, 0, sizeof(*tx_desc));
-       tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS);
-       tp->tx_skb = skb;
-       skb_tx_timestamp(skb);
-       usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
-                         skb->data, skb->len,
-                         (usb_complete_t)write_bulk_callback, tp);
-       res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC);
+
+       tx_desc = (struct tx_desc *)agg->head;
+       tx_data = agg->head + sizeof(*tx_desc);
+       agg->skb_num = agg->skb_len = 0;
+
+       len = skb->len;
+       r8152_tx_csum(tp, tx_desc, skb);
+       memcpy(tx_data, skb->data, len);
+       dev_kfree_skb_any(skb);
+       agg->skb_num++;
+       agg->skb_len += len;
+       usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+                         agg->head, len + sizeof(*tx_desc),
+                         (usb_complete_t)write_bulk_callback, agg);
+       res = usb_submit_urb(agg->urb, GFP_ATOMIC);
        if (res) {
                /* Can we get/handle EPIPE here? */
                if (res == -ENODEV) {
@@ -957,12 +1425,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
                } else {
                        netif_warn(tp, tx_err, netdev,
                                   "failed tx_urb %d\n", res);
-                       stats->tx_errors++;
-                       netif_start_queue(netdev);
+                       stats->tx_dropped++;
+                       spin_lock_irqsave(&tp->tx_lock, flags);
+                       list_add_tail(&agg->list, &tp->tx_free);
+                       spin_unlock_irqrestore(&tp->tx_lock, flags);
                }
-       } else {
-               stats->tx_packets++;
-               stats->tx_bytes += skb->len;
        }
 
        return NETDEV_TX_OK;
@@ -999,17 +1466,18 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
 
 static int rtl8152_enable(struct r8152 *tp)
 {
-       u32     ocp_data;
+       u32 ocp_data;
+       int i, ret;
        u8 speed;
 
        speed = rtl8152_get_speed(tp);
-       if (speed & _100bps) {
+       if (speed & _10bps) {
                ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
-               ocp_data &= ~EEEP_CR_EEEP_TX;
+               ocp_data |= EEEP_CR_EEEP_TX;
                ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
        } else {
                ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
-               ocp_data |= EEEP_CR_EEEP_TX;
+               ocp_data &= ~EEEP_CR_EEEP_TX;
                ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
        }
 
@@ -1023,23 +1491,34 @@ static int rtl8152_enable(struct r8152 *tp)
        ocp_data &= ~RXDY_GATED_EN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
 
-       usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
-                     tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
-                     (usb_complete_t)read_bulk_callback, tp);
+       INIT_LIST_HEAD(&tp->rx_done);
+       ret = 0;
+       for (i = 0; i < RTL8152_MAX_RX; i++) {
+               INIT_LIST_HEAD(&tp->rx_info[i].list);
+               ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
+       }
 
-       return usb_submit_urb(tp->rx_urb, GFP_KERNEL);
+       return ret;
 }
 
 static void rtl8152_disable(struct r8152 *tp)
 {
-       u32     ocp_data;
-       int     i;
+       struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
+       struct sk_buff *skb;
+       u32 ocp_data;
+       int i;
 
        ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
        ocp_data &= ~RCR_ACPT_ALL;
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
 
-       usb_kill_urb(tp->tx_urb);
+       while ((skb = skb_dequeue(&tp->tx_queue))) {
+               dev_kfree_skb(skb);
+               stats->tx_dropped++;
+       }
+
+       for (i = 0; i < RTL8152_MAX_TX; i++)
+               usb_kill_urb(tp->tx_info[i].urb);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
        ocp_data |= RXDY_GATED_EN;
@@ -1058,7 +1537,8 @@ static void rtl8152_disable(struct r8152 *tp)
                mdelay(1);
        }
 
-       usb_kill_urb(tp->rx_urb);
+       for (i = 0; i < RTL8152_MAX_RX; i++)
+               usb_kill_urb(tp->rx_info[i].urb);
 
        rtl8152_nic_reset(tp);
 }
@@ -1269,7 +1749,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
        r8152_mdio_write(tp, MII_BMCR, bmcr);
 
 out:
-       schedule_delayed_work(&tp->schedule, 5 * HZ);
 
        return ret;
 }
@@ -1292,6 +1771,7 @@ static void set_carrier(struct r8152 *tp)
        struct net_device *netdev = tp->netdev;
        u8 speed;
 
+       clear_bit(RTL8152_LINK_CHG, &tp->flags);
        speed = rtl8152_get_speed(tp);
 
        if (speed & LINK_STATUS) {
@@ -1303,7 +1783,9 @@ static void set_carrier(struct r8152 *tp)
        } else {
                if (tp->speed & LINK_STATUS) {
                        netif_carrier_off(netdev);
+                       tasklet_disable(&tp->tl);
                        rtl8152_disable(tp);
+                       tasklet_enable(&tp->tl);
                }
        }
        tp->speed = speed;
@@ -1319,13 +1801,12 @@ static void rtl_work_func_t(struct work_struct *work)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                goto out1;
 
-       set_carrier(tp);
+       if (test_bit(RTL8152_LINK_CHG, &tp->flags))
+               set_carrier(tp);
 
        if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
                _rtl8152_set_rx_mode(tp->netdev);
 
-       schedule_delayed_work(&tp->schedule, HZ);
-
 out1:
        return;
 }
@@ -1335,28 +1816,20 @@ static int rtl8152_open(struct net_device *netdev)
        struct r8152 *tp = netdev_priv(netdev);
        int res = 0;
 
-       tp->speed = rtl8152_get_speed(tp);
-       if (tp->speed & LINK_STATUS) {
-               res = rtl8152_enable(tp);
-               if (res) {
-                       if (res == -ENODEV)
-                               netif_device_detach(tp->netdev);
-
-                       netif_err(tp, ifup, netdev,
-                                 "rtl8152_open failed: %d\n", res);
-                       return res;
-               }
-
-               netif_carrier_on(netdev);
-       } else {
-               netif_stop_queue(netdev);
-               netif_carrier_off(netdev);
+       res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+       if (res) {
+               if (res == -ENODEV)
+                       netif_device_detach(tp->netdev);
+               netif_warn(tp, ifup, netdev,
+                       "intr_urb submit failed: %d\n", res);
+               return res;
        }
 
        rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+       tp->speed = 0;
+       netif_carrier_off(netdev);
        netif_start_queue(netdev);
        set_bit(WORK_ENABLE, &tp->flags);
-       schedule_delayed_work(&tp->schedule, 0);
 
        return res;
 }
@@ -1366,10 +1839,13 @@ static int rtl8152_close(struct net_device *netdev)
        struct r8152 *tp = netdev_priv(netdev);
        int res = 0;
 
+       usb_kill_urb(tp->intr_urb);
        clear_bit(WORK_ENABLE, &tp->flags);
        cancel_delayed_work_sync(&tp->schedule);
        netif_stop_queue(netdev);
+       tasklet_disable(&tp->tl);
        rtl8152_disable(tp);
+       tasklet_enable(&tp->tl);
 
        return res;
 }
@@ -1429,8 +1905,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
 
 static void r8152b_init(struct r8152 *tp)
 {
-       u32     ocp_data;
-       int     i;
+       u32 ocp_data;
+       int i;
 
        rtl_clear_bp(tp);
 
@@ -1475,9 +1951,9 @@ static void r8152b_init(struct r8152 *tp)
                        break;
        }
 
-       /* disable rx aggregation */
+       /* enable rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-       ocp_data |= RX_AGG_DISABLE;
+       ocp_data &= ~RX_AGG_DISABLE;
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -1489,7 +1965,9 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
 
        if (netif_running(tp->netdev)) {
                clear_bit(WORK_ENABLE, &tp->flags);
+               usb_kill_urb(tp->intr_urb);
                cancel_delayed_work_sync(&tp->schedule);
+               tasklet_disable(&tp->tl);
        }
 
        rtl8152_down(tp);
@@ -1504,10 +1982,12 @@ static int rtl8152_resume(struct usb_interface *intf)
        r8152b_init(tp);
        netif_device_attach(tp->netdev);
        if (netif_running(tp->netdev)) {
-               rtl8152_enable(tp);
+               rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+               tp->speed = 0;
+               netif_carrier_off(tp->netdev);
                set_bit(WORK_ENABLE, &tp->flags);
-               set_bit(RTL8152_SET_RX_MODE, &tp->flags);
-               schedule_delayed_work(&tp->schedule, 0);
+               usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+               tasklet_enable(&tp->tl);
        }
 
        return 0;
@@ -1619,6 +2099,7 @@ static int rtl8152_probe(struct usb_interface *intf,
        struct usb_device *udev = interface_to_usbdev(intf);
        struct r8152 *tp;
        struct net_device *netdev;
+       int ret;
 
        if (udev->actconfig->desc.bConfigurationValue != 1) {
                usb_driver_set_configuration(udev, 1);
@@ -1631,19 +2112,22 @@ static int rtl8152_probe(struct usb_interface *intf,
                return -ENOMEM;
        }
 
+       SET_NETDEV_DEV(netdev, &intf->dev);
        tp = netdev_priv(netdev);
        tp->msg_enable = 0x7FFF;
 
-       tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp);
+       tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
        INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
 
        tp->udev = udev;
        tp->netdev = netdev;
+       tp->intf = intf;
        netdev->netdev_ops = &rtl8152_netdev_ops;
        netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
-       netdev->features &= ~NETIF_F_IP_CSUM;
+
+       netdev->features |= NETIF_F_IP_CSUM;
+       netdev->hw_features = NETIF_F_IP_CSUM;
        SET_ETHTOOL_OPS(netdev, &ops);
-       tp->speed = 0;
 
        tp->mii.dev = netdev;
        tp->mii.mdio_read = read_mii_word;
@@ -1657,37 +2141,27 @@ static int rtl8152_probe(struct usb_interface *intf,
        r8152b_init(tp);
        set_ethernet_addr(tp);
 
-       if (!alloc_all_urbs(tp)) {
-               netif_err(tp, probe, netdev, "out of memory");
+       ret = alloc_all_mem(tp);
+       if (ret)
                goto out;
-       }
-
-       tp->rx_skb = netdev_alloc_skb(netdev,
-                       RTL8152_RMS + sizeof(struct rx_desc));
-       if (!tp->rx_skb)
-               goto out1;
 
        usb_set_intfdata(intf, tp);
-       SET_NETDEV_DEV(netdev, &intf->dev);
-
 
-       if (register_netdev(netdev) != 0) {
+       ret = register_netdev(netdev);
+       if (ret != 0) {
                netif_err(tp, probe, netdev, "couldn't register the device");
-               goto out2;
+               goto out1;
        }
 
        netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
 
        return 0;
 
-out2:
-       usb_set_intfdata(intf, NULL);
-       dev_kfree_skb(tp->rx_skb);
 out1:
-       free_all_urbs(tp);
+       usb_set_intfdata(intf, NULL);
 out:
        free_netdev(netdev);
-       return -EIO;
+       return ret;
 }
 
 static void rtl8152_unload(struct r8152 *tp)
@@ -1715,9 +2189,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
                tasklet_kill(&tp->tl);
                unregister_netdev(tp->netdev);
                rtl8152_unload(tp);
-               free_all_urbs(tp);
-               if (tp->rx_skb)
-                       dev_kfree_skb(tp->rx_skb);
+               free_all_mem(tp);
                free_netdev(tp->netdev);
        }
 }
@@ -1732,11 +2204,12 @@ MODULE_DEVICE_TABLE(usb, rtl8152_table);
 
 static struct usb_driver rtl8152_driver = {
        .name =         MODULENAME,
+       .id_table =     rtl8152_table,
        .probe =        rtl8152_probe,
        .disconnect =   rtl8152_disconnect,
-       .id_table =     rtl8152_table,
        .suspend =      rtl8152_suspend,
-       .resume =       rtl8152_resume
+       .resume =       rtl8152_resume,
+       .reset_resume = rtl8152_resume,
 };
 
 module_usb_driver(rtl8152_driver);
index 06ee82f557d45ba31b4847c187f57771ae2c73d2..e4811d7b5af1cb19aad389f231781cede61c3f81 100644 (file)
  * For high speed, each frame comfortably fits almost 36 max size
  * Ethernet packets (so queues should be bigger).
  *
- * REVISIT qlens should be members of 'struct usbnet'; the goal is to
- * let the USB host controller be busy for 5msec or more before an irq
- * is required, under load.  Jumbograms change the equation.
+ * The goal is to let the USB host controller be busy for 5msec or
+ * more before an irq is required, under load.  Jumbograms change
+ * the equation.
  */
-#define RX_MAX_QUEUE_MEMORY (60 * 1518)
-#define        RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
-                       (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
-#define        TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
-                       (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
+#define        MAX_QUEUE_MEMORY        (60 * 1518)
+#define        RX_QLEN(dev)            ((dev)->rx_qlen)
+#define        TX_QLEN(dev)            ((dev)->tx_qlen)
 
 // reawaken network queue this soon after stopping; else watchdog barks
 #define TX_TIMEOUT_JIFFIES     (5*HZ)
@@ -347,6 +345,31 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
 }
 EXPORT_SYMBOL_GPL(usbnet_skb_return);
 
+/* must be called if hard_mtu or rx_urb_size changed */
+void usbnet_update_max_qlen(struct usbnet *dev)
+{
+       enum usb_device_speed speed = dev->udev->speed;
+
+       switch (speed) {
+       case USB_SPEED_HIGH:
+               dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
+               dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
+               break;
+       case USB_SPEED_SUPER:
+               /*
+                * Not take default 5ms qlen for super speed HC to
+                * save memory, and iperf tests show 2.5ms qlen can
+                * work well
+                */
+               dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size;
+               dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
+               break;
+       default:
+               dev->rx_qlen = dev->tx_qlen = 4;
+       }
+}
+EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
+
 \f
 /*-------------------------------------------------------------------------
  *
@@ -375,6 +398,9 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
                        usbnet_unlink_rx_urbs(dev);
        }
 
+       /* max qlen depend on hard_mtu and rx_urb_size */
+       usbnet_update_max_qlen(dev);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(usbnet_change_mtu);
@@ -843,6 +869,9 @@ int usbnet_open (struct net_device *net)
                goto done;
        }
 
+       /* hard_mtu or rx_urb_size may change in reset() */
+       usbnet_update_max_qlen(dev);
+
        // insist peer be connected
        if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
                netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
@@ -927,6 +956,9 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
        if (dev->driver_info->link_reset)
                dev->driver_info->link_reset(dev);
 
+       /* hard_mtu or rx_urb_size may change in link_reset() */
+       usbnet_update_max_qlen(dev);
+
        return retval;
 
 }
@@ -1020,6 +1052,9 @@ static void __handle_link_change(struct usbnet *dev)
                tasklet_schedule(&dev->bh);
        }
 
+       /* hard_mtu or rx_urb_size may change during link change */
+       usbnet_update_max_qlen(dev);
+
        clear_bit(EVENT_LINK_CHANGE, &dev->flags);
 }
 
@@ -1599,6 +1634,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        if ((dev->driver_info->flags & FLAG_WWAN) != 0)
                SET_NETDEV_DEVTYPE(net, &wwan_type);
 
+       /* initialize max rx_qlen and tx_qlen */
+       usbnet_update_max_qlen(dev);
+
        status = register_netdev (net);
        if (status)
                goto out4;
index 3d2a90a626498d20c96ba7729beacec16b9b5040..f21600277583fac0b9e1a78065ccf588d8bc2f6d 100644 (file)
@@ -106,6 +106,9 @@ struct virtnet_info {
        /* Has control virtqueue */
        bool has_cvq;
 
+       /* Host can handle any s/g split between our header and packet data */
+       bool any_header_sg;
+
        /* enable config space updates */
        bool config_enable;
 
@@ -669,12 +672,28 @@ static void free_old_xmit_skbs(struct send_queue *sq)
 
 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
 {
-       struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+       struct skb_vnet_hdr *hdr;
        const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
        struct virtnet_info *vi = sq->vq->vdev->priv;
        unsigned num_sg;
+       unsigned hdr_len;
+       bool can_push;
 
        pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
+       if (vi->mergeable_rx_bufs)
+               hdr_len = sizeof hdr->mhdr;
+       else
+               hdr_len = sizeof hdr->hdr;
+
+       can_push = vi->any_header_sg &&
+               !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
+               !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
+       /* Even if we can, don't push here yet as this would skew
+        * csum_start offset below. */
+       if (can_push)
+               hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
+       else
+               hdr = skb_vnet_hdr(skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
@@ -703,15 +722,18 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
        }
 
-       hdr->mhdr.num_buffers = 0;
-
-       /* Encode metadata header at front. */
        if (vi->mergeable_rx_bufs)
-               sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
-       else
-               sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
+               hdr->mhdr.num_buffers = 0;
 
-       num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+       if (can_push) {
+               __skb_push(skb, hdr_len);
+               num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+               /* Pull header back to avoid skew in tx bytes calculations. */
+               __skb_pull(skb, hdr_len);
+       } else {
+               sg_set_buf(sq->sg, hdr, hdr_len);
+               num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+       }
        return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
 }
 
@@ -1552,6 +1574,9 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
                vi->mergeable_rx_bufs = true;
 
+       if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
+               vi->any_header_sg = true;
+
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
                vi->has_cvq = true;
 
@@ -1727,6 +1752,7 @@ static unsigned int features[] = {
        VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
        VIRTIO_NET_F_CTRL_MAC_ADDR,
+       VIRTIO_F_ANY_LAYOUT,
 };
 
 static struct virtio_driver virtio_net_driver = {
index 767f7af3bd40385ae18823ab579fff1709f79d7b..3b21aca0c0c249c6a6e3392d78b47e03fd7d9006 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/igmp.h>
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
+#include <linux/if_vlan.h>
 #include <linux/hash.h>
 #include <linux/ethtool.h>
 #include <net/arp.h>
@@ -41,6 +42,7 @@
 #include <net/inet_ecn.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/vxlan.h>
 
 #define VXLAN_VERSION  "0.1"
 
@@ -57,6 +59,7 @@
 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
 /* IP header + UDP + VXLAN + Ethernet header */
 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
 
 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
 
@@ -82,16 +85,6 @@ static int vxlan_net_id;
 
 static const u8 all_zeros_mac[ETH_ALEN];
 
-/* per UDP socket information */
-struct vxlan_sock {
-       struct hlist_node hlist;
-       struct rcu_head   rcu;
-       struct work_struct del_work;
-       atomic_t          refcnt;
-       struct socket     *sock;
-       struct hlist_head vni_list[VNI_HASH_SIZE];
-};
-
 /* per-network namespace private data for this module */
 struct vxlan_net {
        struct list_head  vxlan_list;
@@ -177,13 +170,18 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port)
 /* First remote destination for a forwarding entry.
  * Guaranteed to be non-NULL because remotes are never deleted.
  */
-static inline struct vxlan_rdst *first_remote(struct vxlan_fdb *fdb)
+static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
 {
-       return list_first_or_null_rcu(&fdb->remotes, struct vxlan_rdst, list);
+       return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
+}
+
+static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
+{
+       return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
 }
 
 /* Find VXLAN socket based on network namespace and UDP port */
-static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
+static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
 {
        struct vxlan_sock *vs;
 
@@ -194,16 +192,10 @@ static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
        return NULL;
 }
 
-/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
 {
-       struct vxlan_sock *vs;
        struct vxlan_dev *vxlan;
 
-       vs = vxlan_find_port(net, port);
-       if (!vs)
-               return NULL;
-
        hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
                if (vxlan->default_dst.remote_vni == id)
                        return vxlan;
@@ -212,6 +204,18 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
        return NULL;
 }
 
+/* Look up VNI in a per net namespace table */
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+{
+       struct vxlan_sock *vs;
+
+       vs = vxlan_find_sock(net, port);
+       if (!vs)
+               return NULL;
+
+       return vxlan_vs_find_vni(vs, id);
+}
+
 /* Fill in neighbour message in skbuff. */
 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
                          const struct vxlan_fdb *fdb,
@@ -297,7 +301,8 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
        if (skb == NULL)
                goto errout;
 
-       err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, first_remote(fdb));
+       err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
+                            first_remote_rtnl(fdb));
        if (err < 0) {
                /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -408,6 +413,26 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
        return NULL;
 }
 
+/* Replace destination of unicast mac */
+static int vxlan_fdb_replace(struct vxlan_fdb *f,
+                           __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
+{
+       struct vxlan_rdst *rd;
+
+       rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
+       if (rd)
+               return 0;
+
+       rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
+       if (!rd)
+               return 0;
+       rd->remote_ip = ip;
+       rd->remote_port = port;
+       rd->remote_vni = vni;
+       rd->remote_ifindex = ifindex;
+       return 1;
+}
+
 /* Add/update destinations for multicast */
 static int vxlan_fdb_append(struct vxlan_fdb *f,
                            __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
@@ -458,6 +483,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                        f->updated = jiffies;
                        notify = 1;
                }
+               if ((flags & NLM_F_REPLACE)) {
+                       /* Only change unicasts */
+                       if (!(is_multicast_ether_addr(f->eth_addr) ||
+                            is_zero_ether_addr(f->eth_addr))) {
+                               int rc = vxlan_fdb_replace(f, ip, port, vni,
+                                                          ifindex);
+
+                               if (rc < 0)
+                                       return rc;
+                               notify |= rc;
+                       } else
+                               return -EOPNOTSUPP;
+               }
                if ((flags & NLM_F_APPEND) &&
                    (is_multicast_ether_addr(f->eth_addr) ||
                     is_zero_ether_addr(f->eth_addr))) {
@@ -474,6 +512,11 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
                        return -ENOSPC;
 
+               /* Disallow replace to add a multicast entry */
+               if ((flags & NLM_F_REPLACE) &&
+                   (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
+                       return -EOPNOTSUPP;
+
                netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
                f = kmalloc(sizeof(*f), GFP_ATOMIC);
                if (!f)
@@ -499,12 +542,6 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
        return 0;
 }
 
-static void vxlan_fdb_free_rdst(struct rcu_head *head)
-{
-       struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
-       kfree(rd);
-}
-
 static void vxlan_fdb_free(struct rcu_head *head)
 {
        struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
@@ -644,7 +681,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
         */
        if (rd && !list_is_singular(&f->remotes)) {
                list_del_rcu(&rd->list);
-               call_rcu(&rd->rcu, vxlan_fdb_free_rdst);
+               kfree_rcu(rd, rcu);
                goto out;
        }
 
@@ -702,7 +739,7 @@ static bool vxlan_snoop(struct net_device *dev,
 
        f = vxlan_find_mac(vxlan, src_mac);
        if (likely(f)) {
-               struct vxlan_rdst *rdst = first_remote(f);
+               struct vxlan_rdst *rdst = first_remote_rcu(f);
 
                if (likely(rdst->remote_ip == src_ip))
                        return false;
@@ -758,8 +795,10 @@ static void vxlan_sock_hold(struct vxlan_sock *vs)
        atomic_inc(&vs->refcnt);
 }
 
-static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
+void vxlan_sock_release(struct vxlan_sock *vs)
 {
+       struct vxlan_net *vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
+
        if (!atomic_dec_and_test(&vs->refcnt))
                return;
 
@@ -769,6 +808,7 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
 
        queue_work(vxlan_wq, &vs->del_work);
 }
+EXPORT_SYMBOL_GPL(vxlan_sock_release);
 
 /* Callback to update multicast group membership when first VNI on
  * multicast asddress is brought up
@@ -777,7 +817,6 @@ static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
 static void vxlan_igmp_join(struct work_struct *work)
 {
        struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
-       struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
        struct vxlan_sock *vs = vxlan->vn_sock;
        struct sock *sk = vs->sock->sk;
        struct ip_mreqn mreq = {
@@ -789,7 +828,7 @@ static void vxlan_igmp_join(struct work_struct *work)
        ip_mc_join_group(sk, &mreq);
        release_sock(sk);
 
-       vxlan_sock_release(vn, vs);
+       vxlan_sock_release(vs);
        dev_put(vxlan->dev);
 }
 
@@ -797,7 +836,6 @@ static void vxlan_igmp_join(struct work_struct *work)
 static void vxlan_igmp_leave(struct work_struct *work)
 {
        struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
-       struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
        struct vxlan_sock *vs = vxlan->vn_sock;
        struct sock *sk = vs->sock->sk;
        struct ip_mreqn mreq = {
@@ -809,30 +847,23 @@ static void vxlan_igmp_leave(struct work_struct *work)
        ip_mc_leave_group(sk, &mreq);
        release_sock(sk);
 
-       vxlan_sock_release(vn, vs);
+       vxlan_sock_release(vs);
        dev_put(vxlan->dev);
 }
 
 /* Callback from net/ipv4/udp.c to receive packets */
 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
-       struct iphdr *oip;
+       struct vxlan_sock *vs;
        struct vxlanhdr *vxh;
-       struct vxlan_dev *vxlan;
-       struct pcpu_tstats *stats;
        __be16 port;
-       __u32 vni;
-       int err;
-
-       /* pop off outer UDP header */
-       __skb_pull(skb, sizeof(struct udphdr));
 
        /* Need Vxlan and inner Ethernet header to be present */
-       if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
+       if (!pskb_may_pull(skb, VXLAN_HLEN))
                goto error;
 
-       /* Drop packets with reserved bits set */
-       vxh = (struct vxlanhdr *) skb->data;
+       /* Return packets with reserved bits set */
+       vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
        if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
            (vxh->vx_vni & htonl(0xff))) {
                netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
@@ -840,28 +871,44 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                goto error;
        }
 
-       __skb_pull(skb, sizeof(struct vxlanhdr));
+       if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
+               goto drop;
 
-       /* Is this VNI defined? */
-       vni = ntohl(vxh->vx_vni) >> 8;
        port = inet_sk(sk)->inet_sport;
-       vxlan = vxlan_find_vni(sock_net(sk), vni, port);
-       if (!vxlan) {
-               netdev_dbg(skb->dev, "unknown vni %d port %u\n",
-                          vni, ntohs(port));
+
+       vs = vxlan_find_sock(sock_net(sk), port);
+       if (!vs)
                goto drop;
-       }
 
-       if (!pskb_may_pull(skb, ETH_HLEN)) {
-               vxlan->dev->stats.rx_length_errors++;
-               vxlan->dev->stats.rx_errors++;
+       vs->rcv(vs, skb, vxh->vx_vni);
+       return 0;
+
+drop:
+       /* Consume bad packet */
+       kfree_skb(skb);
+       return 0;
+
+error:
+       /* Return non vxlan pkt */
+       return 1;
+}
+
+static void vxlan_rcv(struct vxlan_sock *vs,
+                     struct sk_buff *skb, __be32 vx_vni)
+{
+       struct iphdr *oip;
+       struct vxlan_dev *vxlan;
+       struct pcpu_tstats *stats;
+       __u32 vni;
+       int err;
+
+       vni = ntohl(vx_vni) >> 8;
+       /* Is this VNI defined? */
+       vxlan = vxlan_vs_find_vni(vs, vni);
+       if (!vxlan)
                goto drop;
-       }
 
        skb_reset_mac_header(skb);
-
-       /* Re-examine inner Ethernet packet */
-       oip = ip_hdr(skb);
        skb->protocol = eth_type_trans(skb, vxlan->dev);
 
        /* Ignore packet loops (and multicast echo) */
@@ -869,11 +916,12 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                               vxlan->dev->dev_addr) == 0)
                goto drop;
 
+       /* Re-examine inner Ethernet packet */
+       oip = ip_hdr(skb);
        if ((vxlan->flags & VXLAN_F_LEARN) &&
            vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
                goto drop;
 
-       __skb_tunnel_rx(skb, vxlan->dev);
        skb_reset_network_header(skb);
 
        /* If the NIC driver gave us an encapsulated packet with
@@ -907,16 +955,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
        netif_rx(skb);
 
-       return 0;
-error:
-       /* Put UDP header back */
-       __skb_push(skb, sizeof(struct udphdr));
-
-       return 1;
+       return;
 drop:
        /* Consume bad packet */
        kfree_skb(skb);
-       return 0;
 }
 
 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
@@ -967,7 +1009,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
                }
 
                f = vxlan_find_mac(vxlan, n->ha);
-               if (f && first_remote(f)->remote_ip == htonl(INADDR_ANY)) {
+               if (f && first_remote_rcu(f)->remote_ip == htonl(INADDR_ANY)) {
                        /* bridge-local neighbor */
                        neigh_release(n);
                        goto out;
@@ -1035,11 +1077,8 @@ static void vxlan_sock_put(struct sk_buff *skb)
 }
 
 /* On transmit, associate with the tunnel socket */
-static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
+static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
 {
-       struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct sock *sk = vxlan->vn_sock->sock->sk;
-
        skb_orphan(skb);
        sock_hold(sk);
        skb->sk = sk;
@@ -1051,9 +1090,9 @@ static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
  *     better and maybe available from hardware
  *   secondary choice is to use jhash on the Ethernet header
  */
-static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
 {
-       unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
+       unsigned int range = (port_max - port_min) + 1;
        u32 hash;
 
        hash = skb_get_rxhash(skb);
@@ -1061,8 +1100,9 @@ static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
                hash = jhash(skb->data, 2 * ETH_ALEN,
                             (__force u32) skb->protocol);
 
-       return htons((((u64) hash * range) >> 32) + vxlan->port_min);
+       return htons((((u64) hash * range) >> 32) + port_min);
 }
+EXPORT_SYMBOL_GPL(vxlan_src_port);
 
 static int handle_offloads(struct sk_buff *skb)
 {
@@ -1078,6 +1118,64 @@ static int handle_offloads(struct sk_buff *skb)
        return 0;
 }
 
+int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs,
+                  struct rtable *rt, struct sk_buff *skb,
+                  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+                  __be16 src_port, __be16 dst_port, __be32 vni)
+{
+       struct vxlanhdr *vxh;
+       struct udphdr *uh;
+       int min_headroom;
+       int err;
+
+       if (!skb->encapsulation) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+
+       min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+                       + VXLAN_HLEN + sizeof(struct iphdr)
+                       + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+
+       /* Need space for new headers (invalidates iph ptr) */
+       err = skb_cow_head(skb, min_headroom);
+       if (unlikely(err))
+               return err;
+
+       if (vlan_tx_tag_present(skb)) {
+               if (WARN_ON(!__vlan_put_tag(skb,
+                                           skb->vlan_proto,
+                                           vlan_tx_tag_get(skb))))
+                       return -ENOMEM;
+
+               skb->vlan_tci = 0;
+       }
+
+       vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+       vxh->vx_flags = htonl(VXLAN_FLAGS);
+       vxh->vx_vni = vni;
+
+       __skb_push(skb, sizeof(*uh));
+       skb_reset_transport_header(skb);
+       uh = udp_hdr(skb);
+
+       uh->dest = dst_port;
+       uh->source = src_port;
+
+       uh->len = htons(skb->len);
+       uh->check = 0;
+
+       vxlan_set_owner(vs->sock->sk, skb);
+
+       err = handle_offloads(skb);
+       if (err)
+               return err;
+
+       return iptunnel_xmit(net, rt, skb, src, dst,
+                       IPPROTO_UDP, tos, ttl, df);
+}
+EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
+
 /* Bypass encapsulation if the destination is local */
 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
                               struct vxlan_dev *dst_vxlan)
@@ -1115,8 +1213,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct rtable *rt;
        const struct iphdr *old_iph;
-       struct vxlanhdr *vxh;
-       struct udphdr *uh;
        struct flowi4 fl4;
        __be32 dst;
        __be16 src_port, dst_port;
@@ -1138,15 +1234,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                goto drop;
        }
 
-       if (!skb->encapsulation) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
-
-       /* Need space for new headers (invalidates iph ptr) */
-       if (skb_cow_head(skb, VXLAN_HEADROOM))
-               goto drop;
-
        old_iph = ip_hdr(skb);
 
        ttl = vxlan->ttl;
@@ -1157,7 +1244,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        if (tos == 1)
                tos = ip_tunnel_get_dsfield(old_iph, skb);
 
-       src_port = vxlan_src_port(vxlan, skb);
+       src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
 
        memset(&fl4, 0, sizeof(fl4));
        fl4.flowi4_oif = rdst->remote_ifindex;
@@ -1174,9 +1261,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
        if (rt->dst.dev == dev) {
                netdev_dbg(dev, "circular route to %pI4\n", &dst);
-               ip_rt_put(rt);
                dev->stats.collisions++;
-               goto tx_error;
+               goto rt_tx_error;
        }
 
        /* Bypass encapsulation if the destination is local */
@@ -1191,30 +1277,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                vxlan_encap_bypass(skb, vxlan, dst_vxlan);
                return;
        }
-       vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
-       vxh->vx_flags = htonl(VXLAN_FLAGS);
-       vxh->vx_vni = htonl(vni << 8);
-
-       __skb_push(skb, sizeof(*uh));
-       skb_reset_transport_header(skb);
-       uh = udp_hdr(skb);
-
-       uh->dest = dst_port;
-       uh->source = src_port;
-
-       uh->len = htons(skb->len);
-       uh->check = 0;
-
-       vxlan_set_owner(dev, skb);
-
-       if (handle_offloads(skb))
-               goto drop;
 
        tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
        ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
 
-       err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst,
-                           IPPROTO_UDP, tos, ttl, df);
+       err = vxlan_xmit_skb(dev_net(dev), vxlan->vn_sock, rt, skb,
+                            fl4.saddr, dst, tos, ttl, df,
+                            src_port, dst_port, htonl(vni << 8));
+
+       if (err < 0)
+               goto rt_tx_error;
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
 
        return;
@@ -1223,6 +1295,8 @@ drop:
        dev->stats.tx_dropped++;
        goto tx_free;
 
+rt_tx_error:
+       ip_rt_put(rt);
 tx_error:
        dev->stats.tx_errors++;
 tx_free:
@@ -1321,25 +1395,31 @@ static void vxlan_cleanup(unsigned long arg)
        mod_timer(&vxlan->age_timer, next_timer);
 }
 
+static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
+{
+       __u32 vni = vxlan->default_dst.remote_vni;
+
+       vxlan->vn_sock = vs;
+       hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+}
+
 /* Setup stats when device is created */
 static int vxlan_init(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_sock *vs;
-       __u32 vni = vxlan->default_dst.remote_vni;
 
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
 
        spin_lock(&vn->sock_lock);
-       vs = vxlan_find_port(dev_net(dev), vxlan->dst_port);
+       vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
        if (vs) {
                /* If we have a socket with same port already, reuse it */
                atomic_inc(&vs->refcnt);
-               vxlan->vn_sock = vs;
-               hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+               vxlan_vs_add_dev(vs, vxlan);
        } else {
                /* otherwise make new socket outside of RTNL */
                dev_hold(dev);
@@ -1364,13 +1444,12 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
 static void vxlan_uninit(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_sock *vs = vxlan->vn_sock;
 
        vxlan_fdb_delete_default(vxlan);
 
        if (vs)
-               vxlan_sock_release(vn, vs);
+               vxlan_sock_release(vs);
        free_percpu(dev->tstats);
 }
 
@@ -1486,8 +1565,11 @@ static void vxlan_setup(struct net_device *dev)
        dev->features   |= NETIF_F_RXCSUM;
        dev->features   |= NETIF_F_GSO_SOFTWARE;
 
+       dev->vlan_features = dev->features;
+       dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
        dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
        dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
@@ -1587,8 +1669,10 @@ static void vxlan_del_work(struct work_struct *work)
        kfree_rcu(vs, rcu);
 }
 
-static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
+static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
+                                             vxlan_rcv_t *rcv, void *data)
 {
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_sock *vs;
        struct sock *sk;
        struct sockaddr_in vxlan_addr = {
@@ -1600,8 +1684,10 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
        unsigned int h;
 
        vs = kmalloc(sizeof(*vs), GFP_KERNEL);
-       if (!vs)
+       if (!vs) {
+               pr_debug("memory alocation failure\n");
                return ERR_PTR(-ENOMEM);
+       }
 
        for (h = 0; h < VNI_HASH_SIZE; ++h)
                INIT_HLIST_HEAD(&vs->vni_list[h]);
@@ -1629,57 +1715,70 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
                kfree(vs);
                return ERR_PTR(rc);
        }
+       atomic_set(&vs->refcnt, 1);
+       vs->rcv = rcv;
+       vs->data = data;
 
        /* Disable multicast loopback */
        inet_sk(sk)->mc_loop = 0;
+       spin_lock(&vn->sock_lock);
+       hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
+       spin_unlock(&vn->sock_lock);
 
        /* Mark socket as an encapsulation socket. */
        udp_sk(sk)->encap_type = 1;
        udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
        udp_encap_enable();
-       atomic_set(&vs->refcnt, 1);
+       return vs;
+}
+
+struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+                                 vxlan_rcv_t *rcv, void *data,
+                                 bool no_share)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct vxlan_sock *vs;
+
+       vs = vxlan_socket_create(net, port, rcv, data);
+       if (!IS_ERR(vs))
+               return vs;
+
+       if (no_share)   /* Return error if sharing is not allowed. */
+               return vs;
+
+       spin_lock(&vn->sock_lock);
+       vs = vxlan_find_sock(net, port);
+       if (vs) {
+               if (vs->rcv == rcv)
+                       atomic_inc(&vs->refcnt);
+               else
+                       vs = ERR_PTR(-EBUSY);
+       }
+       spin_unlock(&vn->sock_lock);
+
+       if (!vs)
+               vs = ERR_PTR(-EINVAL);
 
        return vs;
 }
+EXPORT_SYMBOL_GPL(vxlan_sock_add);
 
 /* Scheduled at device creation to bind to a socket */
 static void vxlan_sock_work(struct work_struct *work)
 {
-       struct vxlan_dev *vxlan
-               = container_of(work, struct vxlan_dev, sock_work);
-       struct net_device *dev = vxlan->dev;
-       struct net *net = dev_net(dev);
-       __u32 vni = vxlan->default_dst.remote_vni;
-       __be16 port = vxlan->dst_port;
+       struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
+       struct net *net = dev_net(vxlan->dev);
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-       struct vxlan_sock *nvs, *ovs;
-
-       nvs = vxlan_socket_create(net, port);
-       if (IS_ERR(nvs)) {
-               netdev_err(vxlan->dev, "Can not create UDP socket, %ld\n",
-                          PTR_ERR(nvs));
-               goto out;
-       }
+       __be16 port = vxlan->dst_port;
+       struct vxlan_sock *nvs;
 
+       nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false);
        spin_lock(&vn->sock_lock);
-       /* Look again to see if can reuse socket */
-       ovs = vxlan_find_port(net, port);
-       if (ovs) {
-               atomic_inc(&ovs->refcnt);
-               vxlan->vn_sock = ovs;
-               hlist_add_head_rcu(&vxlan->hlist, vni_head(ovs, vni));
-               spin_unlock(&vn->sock_lock);
-
-               sk_release_kernel(nvs->sock->sk);
-               kfree(nvs);
-       } else {
-               vxlan->vn_sock = nvs;
-               hlist_add_head_rcu(&nvs->hlist, vs_head(net, port));
-               hlist_add_head_rcu(&vxlan->hlist, vni_head(nvs, vni));
-               spin_unlock(&vn->sock_lock);
-       }
-out:
-       dev_put(dev);
+       if (!IS_ERR(nvs))
+               vxlan_vs_add_dev(nvs, vxlan);
+       spin_unlock(&vn->sock_lock);
+
+       dev_put(vxlan->dev);
 }
 
 static int vxlan_newlink(struct net *net, struct net_device *dev,
@@ -1794,7 +1893,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
        struct vxlan_dev *vxlan = netdev_priv(dev);
 
        spin_lock(&vn->sock_lock);
-       hlist_del_rcu(&vxlan->hlist);
+       if (!hlist_unhashed(&vxlan->hlist))
+               hlist_del_rcu(&vxlan->hlist);
        spin_unlock(&vn->sock_lock);
 
        list_del(&vxlan->next);
index d43f4efd3e07abc6d62fff0343cfd46cec609097..5bbcb5e3ee0c0cf5e9fefff42fb6892541e14548 100644 (file)
@@ -176,7 +176,7 @@ static u32  mac[  SBNI_MAX_NUM_CARDS ] __initdata;
 
 #ifndef MODULE
 typedef u32  iarr[];
-static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac };
+static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
 #endif
 
 /* A zero-terminated list of I/O addresses to be probed on ISA bus */
index daeafeff186bb4f0ed313d8f6a7e8661fe0267ef..e0ba7cd14252bda97d47eca7ac40620a25239169 100644 (file)
@@ -159,7 +159,7 @@ struct ath_common {
 
        bool btcoex_enabled;
        bool disable_ani;
-       bool antenna_diversity;
+       bool bt_ant_diversity;
 };
 
 struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
index 1a2ef51b69d91f4335beff63e5b781136cd4ff95..744da6d1c405d91a645428d0f686c3c9cb4f00f0 100644 (file)
 #include "debug.h"
 #include "htc.h"
 
+void ath10k_bmi_start(struct ath10k *ar)
+{
+       ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+       ar->bmi.done_sent = false;
+}
+
 int ath10k_bmi_done(struct ath10k *ar)
 {
        struct bmi_cmd cmd;
@@ -105,7 +111,8 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
                ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
                                                  &resp, &rxlen);
                if (ret) {
-                       ath10k_warn("unable to read from the device\n");
+                       ath10k_warn("unable to read from the device (%d)\n",
+                                   ret);
                        return ret;
                }
 
@@ -149,7 +156,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
                ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
                                                  NULL, NULL);
                if (ret) {
-                       ath10k_warn("unable to write to the device\n");
+                       ath10k_warn("unable to write to the device (%d)\n",
+                                   ret);
                        return ret;
                }
 
index 32c56aa33a5ebe4583d41e905d04561941b51f05..8d81ce1cec216c7b55fa1c0ab47b65cc41cafb0d 100644 (file)
@@ -184,6 +184,7 @@ struct bmi_target_info {
 #define BMI_CE_NUM_TO_TARG 0
 #define BMI_CE_NUM_TO_HOST 1
 
+void ath10k_bmi_start(struct ath10k *ar);
 int ath10k_bmi_done(struct ath10k *ar);
 int ath10k_bmi_get_target_info(struct ath10k *ar,
                               struct bmi_target_info *target_info);
index 61a8ac70d3cace13ab40c7c666b2a4c7200788d6..f8b969f518f84ecee55c4e38f5d5f8247d501cf0 100644 (file)
@@ -79,7 +79,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        void __iomem *indicator_addr;
 
-       if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+       if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
                ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
                return;
        }
@@ -637,6 +637,7 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
                ath10k_pci_wake(ar);
                src_ring->hw_index =
                        ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+               src_ring->hw_index &= nentries_mask;
                ath10k_pci_sleep(ar);
        }
        read_index = src_ring->hw_index;
@@ -950,10 +951,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
 
        ath10k_pci_wake(ar);
        src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+       src_ring->sw_index &= src_ring->nentries_mask;
        src_ring->hw_index = src_ring->sw_index;
 
        src_ring->write_index =
                ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+       src_ring->write_index &= src_ring->nentries_mask;
        ath10k_pci_sleep(ar);
 
        src_ring->per_transfer_context = (void **)ptr;
@@ -1035,8 +1038,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
 
        ath10k_pci_wake(ar);
        dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+       dest_ring->sw_index &= dest_ring->nentries_mask;
        dest_ring->write_index =
                ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+       dest_ring->write_index &= dest_ring->nentries_mask;
        ath10k_pci_sleep(ar);
 
        dest_ring->per_transfer_context = (void **)ptr;
index 2b3426b1ff3f88fb0302e04a6fffe70ea9c93e5a..7226c23b956991165f5f1f7c5e7253d951400387 100644 (file)
@@ -100,7 +100,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
                goto conn_fail;
 
        /* Start HTC */
-       status = ath10k_htc_start(ar->htc);
+       status = ath10k_htc_start(&ar->htc);
        if (status)
                goto conn_fail;
 
@@ -116,7 +116,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
        return 0;
 
 timeout:
-       ath10k_htc_stop(ar->htc);
+       ath10k_htc_stop(&ar->htc);
 conn_fail:
        return status;
 }
@@ -247,19 +247,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
 
 static int ath10k_download_board_data(struct ath10k *ar)
 {
+       const struct firmware *fw = ar->board_data;
        u32 board_data_size = QCA988X_BOARD_DATA_SZ;
        u32 address;
-       const struct firmware *fw;
        int ret;
 
-       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
-                                 ar->hw_params.fw.board);
-       if (IS_ERR(fw)) {
-               ath10k_err("could not fetch board data fw file (%ld)\n",
-                          PTR_ERR(fw));
-               return PTR_ERR(fw);
-       }
-
        ret = ath10k_push_board_ext_data(ar, fw);
        if (ret) {
                ath10k_err("could not push board ext data (%d)\n", ret);
@@ -286,32 +278,20 @@ static int ath10k_download_board_data(struct ath10k *ar)
        }
 
 exit:
-       release_firmware(fw);
        return ret;
 }
 
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
-       const struct firmware *fw;
-       u32 address;
+       const struct firmware *fw = ar->otp;
+       u32 address = ar->hw_params.patch_load_addr;
        u32 exec_param;
        int ret;
 
        /* OTP is optional */
 
-       if (ar->hw_params.fw.otp == NULL) {
-               ath10k_info("otp file not defined\n");
-               return 0;
-       }
-
-       address = ar->hw_params.patch_load_addr;
-
-       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
-                                 ar->hw_params.fw.otp);
-       if (IS_ERR(fw)) {
-               ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
+       if (!ar->otp)
                return 0;
-       }
 
        ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
        if (ret) {
@@ -327,28 +307,17 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
        }
 
 exit:
-       release_firmware(fw);
        return ret;
 }
 
 static int ath10k_download_fw(struct ath10k *ar)
 {
-       const struct firmware *fw;
+       const struct firmware *fw = ar->firmware;
        u32 address;
        int ret;
 
-       if (ar->hw_params.fw.fw == NULL)
-               return -EINVAL;
-
        address = ar->hw_params.patch_load_addr;
 
-       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
-                                 ar->hw_params.fw.fw);
-       if (IS_ERR(fw)) {
-               ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
-               return PTR_ERR(fw);
-       }
-
        ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
        if (ret) {
                ath10k_err("could not write fw (%d)\n", ret);
@@ -356,7 +325,74 @@ static int ath10k_download_fw(struct ath10k *ar)
        }
 
 exit:
-       release_firmware(fw);
+       return ret;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
+       if (ar->board_data && !IS_ERR(ar->board_data))
+               release_firmware(ar->board_data);
+
+       if (ar->otp && !IS_ERR(ar->otp))
+               release_firmware(ar->otp);
+
+       if (ar->firmware && !IS_ERR(ar->firmware))
+               release_firmware(ar->firmware);
+
+       ar->board_data = NULL;
+       ar->otp = NULL;
+       ar->firmware = NULL;
+}
+
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+       int ret = 0;
+
+       if (ar->hw_params.fw.fw == NULL) {
+               ath10k_err("firmware file not defined\n");
+               return -EINVAL;
+       }
+
+       if (ar->hw_params.fw.board == NULL) {
+               ath10k_err("board data file not defined");
+               return -EINVAL;
+       }
+
+       ar->board_data = ath10k_fetch_fw_file(ar,
+                                             ar->hw_params.fw.dir,
+                                             ar->hw_params.fw.board);
+       if (IS_ERR(ar->board_data)) {
+               ret = PTR_ERR(ar->board_data);
+               ath10k_err("could not fetch board data (%d)\n", ret);
+               goto err;
+       }
+
+       ar->firmware = ath10k_fetch_fw_file(ar,
+                                           ar->hw_params.fw.dir,
+                                           ar->hw_params.fw.fw);
+       if (IS_ERR(ar->firmware)) {
+               ret = PTR_ERR(ar->firmware);
+               ath10k_err("could not fetch firmware (%d)\n", ret);
+               goto err;
+       }
+
+       /* OTP may be undefined. If so, don't fetch it at all */
+       if (ar->hw_params.fw.otp == NULL)
+               return 0;
+
+       ar->otp = ath10k_fetch_fw_file(ar,
+                                      ar->hw_params.fw.dir,
+                                      ar->hw_params.fw.otp);
+       if (IS_ERR(ar->otp)) {
+               ret = PTR_ERR(ar->otp);
+               ath10k_err("could not fetch otp (%d)\n", ret);
+               goto err;
+       }
+
+       return 0;
+
+err:
+       ath10k_core_free_firmware_files(ar);
        return ret;
 }
 
@@ -440,8 +476,35 @@ static int ath10k_init_hw_params(struct ath10k *ar)
        return 0;
 }
 
+static void ath10k_core_restart(struct work_struct *work)
+{
+       struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+
+       mutex_lock(&ar->conf_mutex);
+
+       switch (ar->state) {
+       case ATH10K_STATE_ON:
+               ath10k_halt(ar);
+               ar->state = ATH10K_STATE_RESTARTING;
+               ieee80211_restart_hw(ar->hw);
+               break;
+       case ATH10K_STATE_OFF:
+               /* this can happen if driver is being unloaded */
+               ath10k_warn("cannot restart a device that hasn't been started\n");
+               break;
+       case ATH10K_STATE_RESTARTING:
+       case ATH10K_STATE_RESTARTED:
+               ar->state = ATH10K_STATE_WEDGED;
+               /* fall through */
+       case ATH10K_STATE_WEDGED:
+               ath10k_warn("device is wedged, will not restart\n");
+               break;
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
 struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
-                                 enum ath10k_bus bus,
                                  const struct ath10k_hif_ops *hif_ops)
 {
        struct ath10k *ar;
@@ -458,9 +521,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
 
        ar->hif.priv = hif_priv;
        ar->hif.ops = hif_ops;
-       ar->hif.bus = bus;
-
-       ar->free_vdev_map = 0xFF; /* 8 vdevs */
 
        init_completion(&ar->scan.started);
        init_completion(&ar->scan.completed);
@@ -487,6 +547,8 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
 
        init_waitqueue_head(&ar->event_queue);
 
+       INIT_WORK(&ar->restart_work, ath10k_core_restart);
+
        return ar;
 
 err_wq:
@@ -504,24 +566,11 @@ void ath10k_core_destroy(struct ath10k *ar)
 }
 EXPORT_SYMBOL(ath10k_core_destroy);
 
-
-int ath10k_core_register(struct ath10k *ar)
+int ath10k_core_start(struct ath10k *ar)
 {
-       struct ath10k_htc_ops htc_ops;
-       struct bmi_target_info target_info;
        int status;
 
-       memset(&target_info, 0, sizeof(target_info));
-       status = ath10k_bmi_get_target_info(ar, &target_info);
-       if (status)
-               goto err;
-
-       ar->target_version = target_info.version;
-       ar->hw->wiphy->hw_version = target_info.version;
-
-       status = ath10k_init_hw_params(ar);
-       if (status)
-               goto err;
+       ath10k_bmi_start(ar);
 
        if (ath10k_init_configure_target(ar)) {
                status = -EINVAL;
@@ -536,32 +585,32 @@ int ath10k_core_register(struct ath10k *ar)
        if (status)
                goto err;
 
-       htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
+       ar->htc.htc_ops.target_send_suspend_complete =
+               ath10k_send_suspend_complete;
 
-       ar->htc = ath10k_htc_create(ar, &htc_ops);
-       if (IS_ERR(ar->htc)) {
-               status = PTR_ERR(ar->htc);
-               ath10k_err("could not create HTC (%d)\n", status);
+       status = ath10k_htc_init(ar);
+       if (status) {
+               ath10k_err("could not init HTC (%d)\n", status);
                goto err;
        }
 
        status = ath10k_bmi_done(ar);
        if (status)
-               goto err_htc_destroy;
+               goto err;
 
        status = ath10k_wmi_attach(ar);
        if (status) {
                ath10k_err("WMI attach failed: %d\n", status);
-               goto err_htc_destroy;
+               goto err;
        }
 
-       status = ath10k_htc_wait_target(ar->htc);
+       status = ath10k_htc_wait_target(&ar->htc);
        if (status)
                goto err_wmi_detach;
 
-       ar->htt = ath10k_htt_attach(ar);
-       if (!ar->htt) {
-               status = -ENOMEM;
+       status = ath10k_htt_attach(ar);
+       if (status) {
+               ath10k_err("could not attach htt (%d)\n", status);
                goto err_wmi_detach;
        }
 
@@ -588,77 +637,127 @@ int ath10k_core_register(struct ath10k *ar)
                goto err_disconnect_htc;
        }
 
-       status = ath10k_htt_attach_target(ar->htt);
-       if (status)
-               goto err_disconnect_htc;
-
-       status = ath10k_mac_register(ar);
+       status = ath10k_htt_attach_target(&ar->htt);
        if (status)
                goto err_disconnect_htc;
 
-       status = ath10k_debug_create(ar);
-       if (status) {
-               ath10k_err("unable to initialize debugfs\n");
-               goto err_unregister_mac;
-       }
+       ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
 
        return 0;
 
-err_unregister_mac:
-       ath10k_mac_unregister(ar);
 err_disconnect_htc:
-       ath10k_htc_stop(ar->htc);
+       ath10k_htc_stop(&ar->htc);
 err_htt_detach:
-       ath10k_htt_detach(ar->htt);
+       ath10k_htt_detach(&ar->htt);
 err_wmi_detach:
        ath10k_wmi_detach(ar);
-err_htc_destroy:
-       ath10k_htc_destroy(ar->htc);
 err:
        return status;
 }
-EXPORT_SYMBOL(ath10k_core_register);
+EXPORT_SYMBOL(ath10k_core_start);
 
-void ath10k_core_unregister(struct ath10k *ar)
+void ath10k_core_stop(struct ath10k *ar)
 {
-       /* We must unregister from mac80211 before we stop HTC and HIF.
-        * Otherwise we will fail to submit commands to FW and mac80211 will be
-        * unhappy about callback failures. */
-       ath10k_mac_unregister(ar);
-       ath10k_htc_stop(ar->htc);
-       ath10k_htt_detach(ar->htt);
+       ath10k_htc_stop(&ar->htc);
+       ath10k_htt_detach(&ar->htt);
        ath10k_wmi_detach(ar);
-       ath10k_htc_destroy(ar->htc);
 }
-EXPORT_SYMBOL(ath10k_core_unregister);
+EXPORT_SYMBOL(ath10k_core_stop);
 
-int ath10k_core_target_suspend(struct ath10k *ar)
+/* mac80211 manages fw/hw initialization through start/stop hooks. However in
+ * order to know what hw capabilities should be advertised to mac80211 it is
+ * necessary to load the firmware (and tear it down immediately since start
+ * hook will try to init it again) before registering */
+static int ath10k_core_probe_fw(struct ath10k *ar)
 {
-       int ret;
+       struct bmi_target_info target_info;
+       int ret = 0;
+
+       ret = ath10k_hif_power_up(ar);
+       if (ret) {
+               ath10k_err("could not start pci hif (%d)\n", ret);
+               return ret;
+       }
 
-       ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+       memset(&target_info, 0, sizeof(target_info));
+       ret = ath10k_bmi_get_target_info(ar, &target_info);
+       if (ret) {
+               ath10k_err("could not get target info (%d)\n", ret);
+               ath10k_hif_power_down(ar);
+               return ret;
+       }
 
-       ret = ath10k_wmi_pdev_suspend_target(ar);
-       if (ret)
-               ath10k_warn("could not suspend target (%d)\n", ret);
+       ar->target_version = target_info.version;
+       ar->hw->wiphy->hw_version = target_info.version;
 
-       return ret;
+       ret = ath10k_init_hw_params(ar);
+       if (ret) {
+               ath10k_err("could not get hw params (%d)\n", ret);
+               ath10k_hif_power_down(ar);
+               return ret;
+       }
+
+       ret = ath10k_core_fetch_firmware_files(ar);
+       if (ret) {
+               ath10k_err("could not fetch firmware files (%d)\n", ret);
+               ath10k_hif_power_down(ar);
+               return ret;
+       }
+
+       ret = ath10k_core_start(ar);
+       if (ret) {
+               ath10k_err("could not init core (%d)\n", ret);
+               ath10k_core_free_firmware_files(ar);
+               ath10k_hif_power_down(ar);
+               return ret;
+       }
+
+       ath10k_core_stop(ar);
+       ath10k_hif_power_down(ar);
+       return 0;
 }
-EXPORT_SYMBOL(ath10k_core_target_suspend);
 
-int ath10k_core_target_resume(struct ath10k *ar)
+int ath10k_core_register(struct ath10k *ar)
 {
-       int ret;
+       int status;
 
-       ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+       status = ath10k_core_probe_fw(ar);
+       if (status) {
+               ath10k_err("could not probe fw (%d)\n", status);
+               return status;
+       }
 
-       ret = ath10k_wmi_pdev_resume_target(ar);
-       if (ret)
-               ath10k_warn("could not resume target (%d)\n", ret);
+       status = ath10k_mac_register(ar);
+       if (status) {
+               ath10k_err("could not register to mac80211 (%d)\n", status);
+               goto err_release_fw;
+       }
 
-       return ret;
+       status = ath10k_debug_create(ar);
+       if (status) {
+               ath10k_err("unable to initialize debugfs\n");
+               goto err_unregister_mac;
+       }
+
+       return 0;
+
+err_unregister_mac:
+       ath10k_mac_unregister(ar);
+err_release_fw:
+       ath10k_core_free_firmware_files(ar);
+       return status;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+       /* We must unregister from mac80211 before we stop HTC and HIF.
+        * Otherwise we will fail to submit commands to FW and mac80211 will be
+        * unhappy about callback failures. */
+       ath10k_mac_unregister(ar);
+       ath10k_core_free_firmware_files(ar);
 }
-EXPORT_SYMBOL(ath10k_core_target_resume);
+EXPORT_SYMBOL(ath10k_core_unregister);
 
 MODULE_AUTHOR("Qualcomm Atheros");
 MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
index 539336d1be4bec76e1bd0f0a42003051986c62fb..e4bba563ed4273613e58332e45c4146c04818516 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 
+#include "htt.h"
 #include "htc.h"
 #include "hw.h"
 #include "targaddrs.h"
 #define ATH10K_SCAN_ID 0
 #define WMI_READY_TIMEOUT (5 * HZ)
 #define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
+#define ATH10K_NUM_CHANS 38
 
 /* Antenna noise floor */
 #define ATH10K_DEFAULT_NOISE_FLOOR -95
 
 struct ath10k;
 
-enum ath10k_bus {
-       ATH10K_BUS_PCI,
-};
-
 struct ath10k_skb_cb {
        dma_addr_t paddr;
        bool is_mapped;
@@ -250,6 +248,28 @@ struct ath10k_debug {
        struct completion event_stats_compl;
 };
 
+enum ath10k_state {
+       ATH10K_STATE_OFF = 0,
+       ATH10K_STATE_ON,
+
+       /* When doing firmware recovery the device is first powered down.
+        * mac80211 is supposed to call in to start() hook later on. It is
+        * however possible that driver unloading and firmware crash overlap.
+        * mac80211 can wait on conf_mutex in stop() while the device is
+        * stopped in ath10k_core_restart() work holding conf_mutex. The state
+        * RESTARTED means that the device is up and mac80211 has started hw
+        * reconfiguration. Once mac80211 is done with the reconfiguration we
+        * set the state to STATE_ON in restart_complete(). */
+       ATH10K_STATE_RESTARTING,
+       ATH10K_STATE_RESTARTED,
+
+       /* The device has crashed while restarting hw. This state is like ON
+        * but commands are blocked in HTC and -ECOMM response is given. This
+        * prevents completion timeouts and makes the driver more responsive to
+        * userspace commands. This is also prevents recursive recovery. */
+       ATH10K_STATE_WEDGED,
+};
+
 struct ath10k {
        struct ath_common ath_common;
        struct ieee80211_hw *hw;
@@ -266,6 +286,7 @@ struct ath10k {
        u32 hw_max_tx_power;
        u32 ht_cap_info;
        u32 vht_cap_info;
+       u32 num_rf_chains;
 
        struct targetdef *targetdef;
        struct hostdef *hostdef;
@@ -274,19 +295,16 @@ struct ath10k {
 
        struct {
                void *priv;
-               enum ath10k_bus bus;
                const struct ath10k_hif_ops *ops;
        } hif;
 
-       struct ath10k_wmi wmi;
-
        wait_queue_head_t event_queue;
        bool is_target_paused;
 
        struct ath10k_bmi bmi;
-
-       struct ath10k_htc *htc;
-       struct ath10k_htt *htt;
+       struct ath10k_wmi wmi;
+       struct ath10k_htc htc;
+       struct ath10k_htt htt;
 
        struct ath10k_hw_params {
                u32 id;
@@ -301,6 +319,10 @@ struct ath10k {
                } fw;
        } hw_params;
 
+       const struct firmware *board_data;
+       const struct firmware *otp;
+       const struct firmware *firmware;
+
        struct {
                struct completion started;
                struct completion completed;
@@ -350,20 +372,28 @@ struct ath10k {
        struct completion offchan_tx_completed;
        struct sk_buff *offchan_tx_skb;
 
+       enum ath10k_state state;
+
+       struct work_struct restart_work;
+
+       /* cycle count is reported twice for each visited channel during scan.
+        * access protected by data_lock */
+       u32 survey_last_rx_clear_count;
+       u32 survey_last_cycle_count;
+       struct survey_info survey[ATH10K_NUM_CHANS];
+
 #ifdef CONFIG_ATH10K_DEBUGFS
        struct ath10k_debug debug;
 #endif
 };
 
 struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
-                                 enum ath10k_bus bus,
                                  const struct ath10k_hif_ops *hif_ops);
 void ath10k_core_destroy(struct ath10k *ar);
 
+int ath10k_core_start(struct ath10k *ar);
+void ath10k_core_stop(struct ath10k *ar);
 int ath10k_core_register(struct ath10k *ar);
 void ath10k_core_unregister(struct ath10k *ar);
 
-int ath10k_core_target_suspend(struct ath10k *ar);
-int ath10k_core_target_resume(struct ath10k *ar);
-
 #endif /* _CORE_H_ */
index 499034b873d1aac9dfeb9660db3cb0bda025e61a..3d65594fa098a40f638f73499d423e80d2a64a7e 100644 (file)
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
        struct wmi_pdev_stats *ps;
        int i;
 
-       mutex_lock(&ar->conf_mutex);
+       spin_lock_bh(&ar->data_lock);
 
        stats = &ar->debug.target_stats;
 
@@ -259,6 +259,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
                }
        }
 
+       spin_unlock_bh(&ar->data_lock);
        mutex_unlock(&ar->conf_mutex);
        complete(&ar->debug.event_stats_compl);
 }
@@ -268,35 +269,35 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
 {
        struct ath10k *ar = file->private_data;
        struct ath10k_target_stats *fw_stats;
-       char *buf;
+       char *buf = NULL;
        unsigned int len = 0, buf_len = 2500;
-       ssize_t ret_cnt;
+       ssize_t ret_cnt = 0;
        long left;
        int i;
        int ret;
 
        fw_stats = &ar->debug.target_stats;
 
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->state != ATH10K_STATE_ON)
+               goto exit;
+
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf)
-               return -ENOMEM;
+               goto exit;
 
        ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
        if (ret) {
                ath10k_warn("could not request stats (%d)\n", ret);
-               kfree(buf);
-               return -EIO;
+               goto exit;
        }
 
        left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
+       if (left <= 0)
+               goto exit;
 
-       if (left <= 0) {
-               kfree(buf);
-               return -ETIMEDOUT;
-       }
-
-       mutex_lock(&ar->conf_mutex);
-
+       spin_lock_bh(&ar->data_lock);
        len += scnprintf(buf + len, buf_len - len, "\n");
        len += scnprintf(buf + len, buf_len - len, "%30s\n",
                         "ath10k PDEV stats");
@@ -424,14 +425,15 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
                                 fw_stats->peer_stat[i].peer_tx_rate);
                len += scnprintf(buf + len, buf_len - len, "\n");
        }
+       spin_unlock_bh(&ar->data_lock);
 
        if (len > buf_len)
                len = buf_len;
 
        ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
 
+exit:
        mutex_unlock(&ar->conf_mutex);
-
        kfree(buf);
        return ret_cnt;
 }
@@ -443,6 +445,60 @@ static const struct file_operations fops_fw_stats = {
        .llseek = default_llseek,
 };
 
+static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
+                                            char __user *user_buf,
+                                            size_t count, loff_t *ppos)
+{
+       const char buf[] = "To simulate firmware crash write the keyword"
+                          " `crash` to this file.\nThis will force firmware"
+                          " to report a crash to the host system.\n";
+       return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
+                                             const char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       char buf[32] = {};
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+       if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       if (ar->state != ATH10K_STATE_ON &&
+           ar->state != ATH10K_STATE_RESTARTED) {
+               ret = -ENETDOWN;
+               goto exit;
+       }
+
+       ath10k_info("simulating firmware crash\n");
+
+       ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+       if (ret)
+               ath10k_warn("failed to force fw hang (%d)\n", ret);
+
+       if (ret == 0)
+               ret = count;
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+       .read = ath10k_read_simulate_fw_crash,
+       .write = ath10k_write_simulate_fw_crash,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 int ath10k_debug_create(struct ath10k *ar)
 {
        ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -459,6 +515,9 @@ int ath10k_debug_create(struct ath10k *ar)
        debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
                            &fops_wmi_services);
 
+       debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
+                           ar, &fops_simulate_fw_crash);
+
        return 0;
 }
 #endif /* CONFIG_ATH10K_DEBUGFS */
index 73a24d44d1b4a9a1fdac3e43acf9ed1e9c1cfbb5..dcdea68bcc0a0ffd091eee16e0b74654ef3339b7 100644 (file)
@@ -46,8 +46,11 @@ struct ath10k_hif_ops {
                                void *request, u32 request_len,
                                void *response, u32 *response_len);
 
+       /* Post BMI phase, after FW is loaded. Starts regular operation */
        int (*start)(struct ath10k *ar);
 
+       /* Clean up what start() did. This does not revert to BMI phase. If
+        * desired so, call power_down() and power_up() */
        void (*stop)(struct ath10k *ar);
 
        int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
@@ -66,10 +69,20 @@ struct ath10k_hif_ops {
         */
        void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
 
-       void (*init)(struct ath10k *ar,
-                    struct ath10k_hif_cb *callbacks);
+       void (*set_callbacks)(struct ath10k *ar,
+                             struct ath10k_hif_cb *callbacks);
 
        u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+
+       /* Power up the device and enter BMI transfer mode for FW download */
+       int (*power_up)(struct ath10k *ar);
+
+       /* Power down the device and free up resources. stop() must be called
+        * before this if start() was called earlier */
+       void (*power_down)(struct ath10k *ar);
+
+       int (*suspend)(struct ath10k *ar);
+       int (*resume)(struct ath10k *ar);
 };
 
 
@@ -122,10 +135,10 @@ static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
        ar->hif.ops->send_complete_check(ar, pipe_id, force);
 }
 
-static inline void ath10k_hif_init(struct ath10k *ar,
-                                  struct ath10k_hif_cb *callbacks)
+static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
+                                           struct ath10k_hif_cb *callbacks)
 {
-       ar->hif.ops->init(ar, callbacks);
+       ar->hif.ops->set_callbacks(ar, callbacks);
 }
 
 static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
@@ -134,4 +147,30 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
        return ar->hif.ops->get_free_queue_number(ar, pipe_id);
 }
 
+static inline int ath10k_hif_power_up(struct ath10k *ar)
+{
+       return ar->hif.ops->power_up(ar);
+}
+
+static inline void ath10k_hif_power_down(struct ath10k *ar)
+{
+       ar->hif.ops->power_down(ar);
+}
+
+static inline int ath10k_hif_suspend(struct ath10k *ar)
+{
+       if (!ar->hif.ops->suspend)
+               return -EOPNOTSUPP;
+
+       return ar->hif.ops->suspend(ar);
+}
+
+static inline int ath10k_hif_resume(struct ath10k *ar)
+{
+       if (!ar->hif.ops->resume)
+               return -EOPNOTSUPP;
+
+       return ar->hif.ops->resume(ar);
+}
+
 #endif /* _HIF_H_ */
index 74363c9493926229dbc37f2e23887106699399a9..ef3329ef52f369f0b65a7fd2e830ab07d99c573b 100644 (file)
@@ -246,15 +246,22 @@ int ath10k_htc_send(struct ath10k_htc *htc,
 {
        struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 
+       if (htc->ar->state == ATH10K_STATE_WEDGED)
+               return -ECOMM;
+
        if (eid >= ATH10K_HTC_EP_COUNT) {
                ath10k_warn("Invalid endpoint id: %d\n", eid);
                return -ENOENT;
        }
 
-       skb_push(skb, sizeof(struct ath10k_htc_hdr));
-
        spin_lock_bh(&htc->tx_lock);
+       if (htc->stopped) {
+               spin_unlock_bh(&htc->tx_lock);
+               return -ESHUTDOWN;
+       }
+
        __skb_queue_tail(&ep->tx_queue, skb);
+       skb_push(skb, sizeof(struct ath10k_htc_hdr));
        spin_unlock_bh(&htc->tx_lock);
 
        queue_work(htc->ar->workqueue, &ep->send_work);
@@ -265,25 +272,19 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
                                            struct sk_buff *skb,
                                            unsigned int eid)
 {
-       struct ath10k_htc *htc = ar->htc;
+       struct ath10k_htc *htc = &ar->htc;
        struct ath10k_htc_ep *ep = &htc->endpoint[eid];
-       bool stopping;
 
        ath10k_htc_notify_tx_completion(ep, skb);
        /* the skb now belongs to the completion handler */
 
+       /* note: when using TX credit flow, the re-checking of queues happens
+        * when credits flow back from the target.  in the non-TX credit case,
+        * we recheck after the packet completes */
        spin_lock_bh(&htc->tx_lock);
-       stopping = htc->stopping;
-       spin_unlock_bh(&htc->tx_lock);
-
-       if (!ep->tx_credit_flow_enabled && !stopping)
-               /*
-                * note: when using TX credit flow, the re-checking of
-                * queues happens when credits flow back from the target.
-                * in the non-TX credit case, we recheck after the packet
-                * completes
-                */
+       if (!ep->tx_credit_flow_enabled && !htc->stopped)
                queue_work(ar->workqueue, &ep->send_work);
+       spin_unlock_bh(&htc->tx_lock);
 
        return 0;
 }
@@ -414,7 +415,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
                                            u8 pipe_id)
 {
        int status = 0;
-       struct ath10k_htc *htc = ar->htc;
+       struct ath10k_htc *htc = &ar->htc;
        struct ath10k_htc_hdr *hdr;
        struct ath10k_htc_ep *ep;
        u16 payload_len;
@@ -751,8 +752,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
        tx_alloc = ath10k_htc_get_credit_allocation(htc,
                                                    conn_req->service_id);
        if (!tx_alloc)
-               ath10k_warn("HTC Service %s does not allocate target credits\n",
-                           htc_service_name(conn_req->service_id));
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "HTC Service %s does not allocate target credits\n",
+                          htc_service_name(conn_req->service_id));
 
        skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
        if (!skb) {
@@ -947,7 +949,7 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
        struct ath10k_htc_ep *ep;
 
        spin_lock_bh(&htc->tx_lock);
-       htc->stopping = true;
+       htc->stopped = true;
        spin_unlock_bh(&htc->tx_lock);
 
        for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
@@ -956,26 +958,18 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
        }
 
        ath10k_hif_stop(htc->ar);
-       ath10k_htc_reset_endpoint_states(htc);
 }
 
 /* registered target arrival callback from the HIF layer */
-struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
-                                    struct ath10k_htc_ops *htc_ops)
+int ath10k_htc_init(struct ath10k *ar)
 {
        struct ath10k_hif_cb htc_callbacks;
        struct ath10k_htc_ep *ep = NULL;
-       struct ath10k_htc *htc = NULL;
-
-       /* FIXME: use struct ath10k instead */
-       htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
-       if (!htc)
-               return ERR_PTR(-ENOMEM);
+       struct ath10k_htc *htc = &ar->htc;
 
        spin_lock_init(&htc->tx_lock);
 
-       memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
-
+       htc->stopped = false;
        ath10k_htc_reset_endpoint_states(htc);
 
        /* setup HIF layer callbacks */
@@ -986,15 +980,10 @@ struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
        /* Get HIF default pipe for HTC message exchange */
        ep = &htc->endpoint[ATH10K_HTC_EP_0];
 
-       ath10k_hif_init(ar, &htc_callbacks);
+       ath10k_hif_set_callbacks(ar, &htc_callbacks);
        ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
 
        init_completion(&htc->ctl_resp);
 
-       return htc;
-}
-
-void ath10k_htc_destroy(struct ath10k_htc *htc)
-{
-       kfree(htc);
+       return 0;
 }
index fa45844b59fb38c04ad7fd82396be05fcd6d2289..e1dd8c761853d7d3197c173aeacb9499d4bd274a 100644 (file)
@@ -335,7 +335,7 @@ struct ath10k_htc {
        struct ath10k *ar;
        struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
 
-       /* protects endpoint and stopping fields */
+       /* protects endpoint and stopped fields */
        spinlock_t tx_lock;
 
        struct ath10k_htc_ops htc_ops;
@@ -349,11 +349,10 @@ struct ath10k_htc {
        struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
        int target_credit_size;
 
-       bool stopping;
+       bool stopped;
 };
 
-struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
-                                    struct ath10k_htc_ops *htc_ops);
+int ath10k_htc_init(struct ath10k *ar);
 int ath10k_htc_wait_target(struct ath10k_htc *htc);
 int ath10k_htc_start(struct ath10k_htc *htc);
 int ath10k_htc_connect_service(struct ath10k_htc *htc,
@@ -362,7 +361,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
 int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
                    struct sk_buff *packet);
 void ath10k_htc_stop(struct ath10k_htc *htc);
-void ath10k_htc_destroy(struct ath10k_htc *htc);
 struct sk_buff *ath10k_htc_alloc_skb(int size);
 
 #endif
index 185a5468a2f28377cbbc1cc468e39e67153358e9..39342c5cfcb270d8ded3d53fb8b0aaba011cc789 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/if_ether.h>
 
 #include "htt.h"
 #include "core.h"
@@ -36,7 +37,7 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
        /* connect to control service */
        conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
 
-       status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
+       status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
                                            &conn_resp);
 
        if (status)
@@ -47,15 +48,11 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
        return 0;
 }
 
-struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
+int ath10k_htt_attach(struct ath10k *ar)
 {
-       struct ath10k_htt *htt;
+       struct ath10k_htt *htt = &ar->htt;
        int ret;
 
-       htt = kzalloc(sizeof(*htt), GFP_KERNEL);
-       if (!htt)
-               return NULL;
-
        htt->ar = ar;
        htt->max_throughput_mbps = 800;
 
@@ -65,8 +62,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
         * since ath10k_htt_rx_attach involves sending a rx ring configure
         * message to the target.
         */
-       if (ath10k_htt_htc_attach(htt))
+       ret = ath10k_htt_htc_attach(htt);
+       if (ret) {
+               ath10k_err("could not attach htt htc (%d)\n", ret);
                goto err_htc_attach;
+       }
 
        ret = ath10k_htt_tx_attach(htt);
        if (ret) {
@@ -74,8 +74,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
                goto err_htc_attach;
        }
 
-       if (ath10k_htt_rx_attach(htt))
+       ret = ath10k_htt_rx_attach(htt);
+       if (ret) {
+               ath10k_err("could not attach htt rx (%d)\n", ret);
                goto err_rx_attach;
+       }
 
        /*
         * Prefetch enough data to satisfy target
@@ -89,13 +92,12 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
                8 + /* llc snap */
                2; /* ip4 dscp or ip6 priority */
 
-       return htt;
+       return 0;
 
 err_rx_attach:
        ath10k_htt_tx_detach(htt);
 err_htc_attach:
-       kfree(htt);
-       return NULL;
+       return ret;
 }
 
 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
@@ -148,5 +150,4 @@ void ath10k_htt_detach(struct ath10k_htt *htt)
 {
        ath10k_htt_rx_detach(htt);
        ath10k_htt_tx_detach(htt);
-       kfree(htt);
 }
index a7a7aa040536b0e00b2a8bce8a28251a559e0eed..318be4629cded3b19248fe13c17d6def30253b92 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <linux/bug.h>
 
-#include "core.h"
 #include "htc.h"
 #include "rx_desc.h"
 
@@ -1317,7 +1316,7 @@ struct htt_rx_desc {
 #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
 #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
 
-struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar);
+int ath10k_htt_attach(struct ath10k *ar);
 int ath10k_htt_attach_target(struct ath10k_htt *htt);
 void ath10k_htt_detach(struct ath10k_htt *htt);
 
index de058d7adca82f2022ab97658323f49e410e483a..e784c40b904b55165a112df250c61351c606979b 100644 (file)
@@ -15,6 +15,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include "core.h"
 #include "htc.h"
 #include "htt.h"
 #include "txrx.h"
@@ -803,6 +804,37 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
        return false;
 }
 
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+{
+       struct htt_rx_desc *rxd;
+       u32 flags, info;
+       bool is_ip4, is_ip6;
+       bool is_tcp, is_udp;
+       bool ip_csum_ok, tcpudp_csum_ok;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       flags = __le32_to_cpu(rxd->attention.flags);
+       info = __le32_to_cpu(rxd->msdu_start.info1);
+
+       is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
+       is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
+       is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
+       is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
+       ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
+       tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
+
+       if (!is_ip4 && !is_ip6)
+               return CHECKSUM_NONE;
+       if (!is_tcp && !is_udp)
+               return CHECKSUM_NONE;
+       if (!ip_csum_ok)
+               return CHECKSUM_NONE;
+       if (!tcpudp_csum_ok)
+               return CHECKSUM_NONE;
+
+       return CHECKSUM_UNNECESSARY;
+}
+
 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                                  struct htt_rx_indication *rx)
 {
@@ -814,6 +846,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
        u8 *fw_desc;
        int i, j;
        int ret;
+       int ip_summed;
 
        memset(&info, 0, sizeof(info));
 
@@ -888,6 +921,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                                continue;
                        }
 
+                       /* The skb is not yet processed and it may be
+                        * reallocated. Since the offload is in the original
+                        * skb extract the checksum now and assign it later */
+                       ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
+
                        info.skb     = msdu_head;
                        info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
                        info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -913,6 +951,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                        if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
                                ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
 
+                       info.skb->ip_summed = ip_summed;
+
                        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
                                        info.skb->data, info.skb->len);
                        ath10k_process_rx(htt->ar, &info);
@@ -979,6 +1019,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
        info.status = HTT_RX_IND_MPDU_STATUS_OK;
        info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
                                RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+       info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
 
        if (tkip_mic_err) {
                ath10k_warn("tkip mic error\n");
@@ -1036,7 +1077,7 @@ end:
 
 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
-       struct ath10k_htt *htt = ar->htt;
+       struct ath10k_htt *htt = &ar->htt;
        struct htt_resp *resp = (struct htt_resp *)skb->data;
 
        /* confirm alignment */
index ef79106db247daa86e8dda7e1b0b370f25a95aec..656c2546b2949825a38b1b9b05d266e5af8bc5f0 100644 (file)
@@ -92,7 +92,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
 
        /* At the beginning free queue number should hint us the maximum
         * queue length */
-       pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
+       pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id;
        htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
                                                                   pipe);
 
@@ -153,7 +153,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
-       struct ath10k_htt *htt = ar->htt;
+       struct ath10k_htt *htt = &ar->htt;
 
        if (skb_cb->htt.is_conf) {
                dev_kfree_skb_any(skb);
@@ -194,7 +194,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
 
        ATH10K_SKB_CB(skb)->htt.is_conf = true;
 
-       ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+       ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
        if (ret) {
                dev_kfree_skb_any(skb);
                return ret;
@@ -281,7 +281,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 
        ATH10K_SKB_CB(skb)->htt.is_conf = true;
 
-       ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+       ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
        if (ret) {
                dev_kfree_skb_any(skb);
                return ret;
@@ -346,7 +346,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        skb_cb->htt.refcount = 2;
        skb_cb->htt.msdu = msdu;
 
-       res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+       res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
        if (res)
                goto err;
 
@@ -465,6 +465,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        flags1  = 0;
        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+       flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+       flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
 
        frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
 
@@ -486,7 +488,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        skb_cb->htt.txfrag = txfrag;
        skb_cb->htt.msdu = msdu;
 
-       res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+       res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
        if (res)
                goto err;
 
index da5c333d0d4bc434a4ef04e0606b982428549430..cf2ba4d850c9bf0cedb8d123c0e05cccb821e6a2 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/mac80211.h>
 #include <linux/etherdevice.h>
 
+#include "hif.h"
 #include "core.h"
 #include "debug.h"
 #include "wmi.h"
@@ -43,6 +44,8 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
                .macaddr = macaddr,
        };
 
+       lockdep_assert_held(&arvif->ar->conf_mutex);
+
        if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
                arg.key_flags = WMI_KEY_PAIRWISE;
        else
@@ -87,6 +90,8 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
        struct ath10k *ar = arvif->ar;
        int ret;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        INIT_COMPLETION(ar->install_key_done);
 
        ret = ath10k_send_key(arvif, key, cmd, macaddr);
@@ -327,6 +332,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
        return 0;
 }
 
+static int  ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+{
+       if (value != 0xFFFFFFFF)
+               value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
+                             ATH10K_RTS_MAX);
+
+       return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+                                        WMI_VDEV_PARAM_RTS_THRESHOLD,
+                                        value);
+}
+
+static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
+{
+       if (value != 0xFFFFFFFF)
+               value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
+                               ATH10K_FRAGMT_THRESHOLD_MIN,
+                               ATH10K_FRAGMT_THRESHOLD_MAX);
+
+       return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+                                        WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+                                        value);
+}
+
 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 {
        int ret;
@@ -364,6 +392,20 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
        spin_unlock_bh(&ar->data_lock);
 }
 
+static void ath10k_peer_cleanup_all(struct ath10k *ar)
+{
+       struct ath10k_peer *peer, *tmp;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+               list_del(&peer->list);
+               kfree(peer);
+       }
+       spin_unlock_bh(&ar->data_lock);
+}
+
 /************************/
 /* Interface management */
 /************************/
@@ -372,6 +414,8 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
 {
        int ret;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        ret = wait_for_completion_timeout(&ar->vdev_setup_done,
                                          ATH10K_VDEV_SETUP_TIMEOUT_HZ);
        if (ret == 0)
@@ -605,6 +649,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
 {
        int ret = 0;
 
+       lockdep_assert_held(&arvif->ar->conf_mutex);
+
        if (!info->enable_beacon) {
                ath10k_vdev_stop(arvif);
                return;
@@ -631,6 +677,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
 {
        int ret = 0;
 
+       lockdep_assert_held(&arvif->ar->conf_mutex);
+
        if (!info->ibss_joined) {
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
                if (ret)
@@ -680,6 +728,8 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
        enum wmi_sta_ps_mode psmode;
        int ret;
 
+       lockdep_assert_held(&arvif->ar->conf_mutex);
+
        if (vif->type != NL80211_IFTYPE_STATION)
                return;
 
@@ -722,6 +772,8 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
                                      struct ieee80211_bss_conf *bss_conf,
                                      struct wmi_peer_assoc_complete_arg *arg)
 {
+       lockdep_assert_held(&ar->conf_mutex);
+
        memcpy(arg->addr, sta->addr, ETH_ALEN);
        arg->vdev_id = arvif->vdev_id;
        arg->peer_aid = sta->aid;
@@ -764,6 +816,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
        const u8 *rsnie = NULL;
        const u8 *wpaie = NULL;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
                               info->bssid, NULL, 0, 0, 0);
        if (bss) {
@@ -804,6 +858,8 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
        u32 ratemask;
        int i;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
        ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
        rates = sband->bitrates;
@@ -827,6 +883,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
        int smps;
        int i, n;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        if (!ht_cap->ht_supported)
                return;
 
@@ -905,6 +963,8 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
        u32 uapsd = 0;
        u32 max_sp = 0;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        if (sta->wme)
                arg->peer_flags |= WMI_PEER_QOS;
 
@@ -1056,6 +1116,8 @@ static int ath10k_peer_assoc(struct ath10k *ar,
 {
        struct wmi_peer_assoc_complete_arg arg;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
 
        ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
@@ -1079,6 +1141,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        struct ieee80211_sta *ap_sta;
        int ret;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        rcu_read_lock();
 
        ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
@@ -1119,6 +1183,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        int ret;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        /*
         * For some reason, calling VDEV-DOWN before VDEV-STOP
         * makes the FW to send frames via HTT after disassociation.
@@ -1152,6 +1218,8 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
 {
        int ret = 0;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
        if (ret) {
                ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
@@ -1172,6 +1240,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
 {
        int ret = 0;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        ret = ath10k_clear_peer_keys(arvif, sta->addr);
        if (ret) {
                ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
@@ -1198,6 +1268,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
        int ret;
        int i;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        bands = hw->wiphy->bands;
        for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
                if (!bands[band])
@@ -1276,21 +1348,19 @@ static int ath10k_update_channel_list(struct ath10k *ar)
        return ret;
 }
 
-static void ath10k_reg_notifier(struct wiphy *wiphy,
-                               struct regulatory_request *request)
+static void ath10k_regd_update(struct ath10k *ar)
 {
-       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
        struct reg_dmn_pair_mapping *regpair;
-       struct ath10k *ar = hw->priv;
        int ret;
 
-       ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+       lockdep_assert_held(&ar->conf_mutex);
 
        ret = ath10k_update_channel_list(ar);
        if (ret)
                ath10k_warn("could not update channel list (%d)\n", ret);
 
        regpair = ar->ath_common.regulatory.regpair;
+
        /* Target allows setting up per-band regdomain but ath_common provides
         * a combined one only */
        ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1303,6 +1373,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
                ath10k_warn("could not set pdev regdomain (%d)\n", ret);
 }
 
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+                               struct regulatory_request *request)
+{
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct ath10k *ar = hw->priv;
+
+       ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+       mutex_lock(&ar->conf_mutex);
+       if (ar->state == ATH10K_STATE_ON)
+               ath10k_regd_update(ar);
+       mutex_unlock(&ar->conf_mutex);
+}
+
 /***************/
 /* TX handlers */
 /***************/
@@ -1322,9 +1406,9 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
                return;
 
        qos_ctl = ieee80211_get_qos_ctl(hdr);
-       memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
-               skb->len - ieee80211_hdrlen(hdr->frame_control));
-       skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
+       memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+               skb->data, (void *)qos_ctl - (void *)skb->data);
+       skb_pull(skb, IEEE80211_QOS_CTL_LEN);
 }
 
 static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@@ -1397,15 +1481,15 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
        int ret;
 
        if (ieee80211_is_mgmt(hdr->frame_control))
-               ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+               ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
        else if (ieee80211_is_nullfunc(hdr->frame_control))
                /* FW does not report tx status properly for NullFunc frames
                 * unless they are sent through mgmt tx path. mac80211 sends
                 * those frames when it detects link/beacon loss and depends on
                 * the tx status to be correct. */
-               ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+               ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
        else
-               ret = ath10k_htt_tx(ar->htt, skb);
+               ret = ath10k_htt_tx(&ar->htt, skb);
 
        if (ret) {
                ath10k_warn("tx failed (%d). dropping packet.\n", ret);
@@ -1552,6 +1636,10 @@ static int ath10k_abort_scan(struct ath10k *ar)
        ret = ath10k_wmi_stop_scan(ar, &arg);
        if (ret) {
                ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+               spin_lock_bh(&ar->data_lock);
+               ar->scan.in_progress = false;
+               ath10k_offchan_tx_purge(ar);
+               spin_unlock_bh(&ar->data_lock);
                return -EIO;
        }
 
@@ -1645,10 +1733,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
                tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
        }
 
-       ath10k_tx_h_qos_workaround(hw, control, skb);
-       ath10k_tx_h_update_wep_key(skb);
-       ath10k_tx_h_add_p2p_noa_ie(ar, skb);
-       ath10k_tx_h_seq_no(skb);
+       /* it makes no sense to process injected frames like that */
+       if (info->control.vif &&
+           info->control.vif->type != NL80211_IFTYPE_MONITOR) {
+               ath10k_tx_h_qos_workaround(hw, control, skb);
+               ath10k_tx_h_update_wep_key(skb);
+               ath10k_tx_h_add_p2p_noa_ie(ar, skb);
+               ath10k_tx_h_seq_no(skb);
+       }
 
        memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
        ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
@@ -1673,10 +1765,57 @@ static void ath10k_tx(struct ieee80211_hw *hw,
 /*
  * Initialize various parameters with default vaules.
  */
+void ath10k_halt(struct ath10k *ar)
+{
+       lockdep_assert_held(&ar->conf_mutex);
+
+       del_timer_sync(&ar->scan.timeout);
+       ath10k_offchan_tx_purge(ar);
+       ath10k_peer_cleanup_all(ar);
+       ath10k_core_stop(ar);
+       ath10k_hif_power_down(ar);
+
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.in_progress) {
+               del_timer(&ar->scan.timeout);
+               ar->scan.in_progress = false;
+               ieee80211_scan_completed(ar->hw, true);
+       }
+       spin_unlock_bh(&ar->data_lock);
+}
+
 static int ath10k_start(struct ieee80211_hw *hw)
 {
        struct ath10k *ar = hw->priv;
-       int ret;
+       int ret = 0;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->state != ATH10K_STATE_OFF &&
+           ar->state != ATH10K_STATE_RESTARTING) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       ret = ath10k_hif_power_up(ar);
+       if (ret) {
+               ath10k_err("could not init hif (%d)\n", ret);
+               ar->state = ATH10K_STATE_OFF;
+               goto exit;
+       }
+
+       ret = ath10k_core_start(ar);
+       if (ret) {
+               ath10k_err("could not init core (%d)\n", ret);
+               ath10k_hif_power_down(ar);
+               ar->state = ATH10K_STATE_OFF;
+               goto exit;
+       }
+
+       if (ar->state == ATH10K_STATE_OFF)
+               ar->state = ATH10K_STATE_ON;
+       else if (ar->state == ATH10K_STATE_RESTARTING)
+               ar->state = ATH10K_STATE_RESTARTED;
 
        ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
        if (ret)
@@ -1688,6 +1827,10 @@ static int ath10k_start(struct ieee80211_hw *hw)
                ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
                            ret);
 
+       ath10k_regd_update(ar);
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
        return 0;
 }
 
@@ -1695,18 +1838,48 @@ static void ath10k_stop(struct ieee80211_hw *hw)
 {
        struct ath10k *ar = hw->priv;
 
-       /* avoid leaks in case FW never confirms scan for offchannel */
+       mutex_lock(&ar->conf_mutex);
+       if (ar->state == ATH10K_STATE_ON ||
+           ar->state == ATH10K_STATE_RESTARTED ||
+           ar->state == ATH10K_STATE_WEDGED)
+               ath10k_halt(ar);
+
+       ar->state = ATH10K_STATE_OFF;
+       mutex_unlock(&ar->conf_mutex);
+
        cancel_work_sync(&ar->offchan_tx_work);
-       ath10k_offchan_tx_purge(ar);
+       cancel_work_sync(&ar->restart_work);
 }
 
-static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+static void ath10k_config_ps(struct ath10k *ar)
 {
        struct ath10k_generic_iter ar_iter;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       /* During HW reconfiguration mac80211 reports all interfaces that were
+        * running until reconfiguration was started. Since FW doesn't have any
+        * vdevs at this point we must not iterate over this interface list.
+        * This setting will be updated upon add_interface(). */
+       if (ar->state == ATH10K_STATE_RESTARTED)
+               return;
+
+       memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+       ar_iter.ar = ar;
+
+       ieee80211_iterate_active_interfaces_atomic(
+               ar->hw, IEEE80211_IFACE_ITER_NORMAL,
+               ath10k_ps_iter, &ar_iter);
+
+       if (ar_iter.ret)
+               ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
        struct ath10k *ar = hw->priv;
        struct ieee80211_conf *conf = &hw->conf;
        int ret = 0;
-       u32 flags;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -1718,18 +1891,8 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
                spin_unlock_bh(&ar->data_lock);
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_PS) {
-               memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
-               ar_iter.ar = ar;
-               flags = IEEE80211_IFACE_ITER_RESUME_ALL;
-
-               ieee80211_iterate_active_interfaces_atomic(hw,
-                                                          flags,
-                                                          ath10k_ps_iter,
-                                                          &ar_iter);
-
-               ret = ar_iter.ret;
-       }
+       if (changed & IEEE80211_CONF_CHANGE_PS)
+               ath10k_config_ps(ar);
 
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
                if (conf->flags & IEEE80211_CONF_MONITOR)
@@ -1738,6 +1901,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
                        ret = ath10k_monitor_destroy(ar);
        }
 
+       ath10k_wmi_flush_tx(ar);
        mutex_unlock(&ar->conf_mutex);
        return ret;
 }
@@ -1761,6 +1925,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&ar->conf_mutex);
 
+       memset(arvif, 0, sizeof(*arvif));
+
        arvif->ar = ar;
        arvif->vif = vif;
 
@@ -1859,6 +2025,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                        ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
        }
 
+       ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+       if (ret)
+               ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
+                           arvif->vdev_id, ret);
+
+       ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+       if (ret)
+               ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
+                           arvif->vdev_id, ret);
+
        if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
                ar->monitor_present = true;
 
@@ -2164,6 +2340,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
                        arg.ssids[i].len  = req->ssids[i].ssid_len;
                        arg.ssids[i].ssid = req->ssids[i].ssid;
                }
+       } else {
+               arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
        }
 
        if (req->n_channels) {
@@ -2363,6 +2541,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
        u32 value = 0;
        int ret = 0;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
                return 0;
 
@@ -2558,11 +2738,16 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
 
-       rts = min_t(u32, rts, ATH10K_RTS_MAX);
+       lockdep_assert_held(&arvif->ar->conf_mutex);
 
-       ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
-                                                WMI_VDEV_PARAM_RTS_THRESHOLD,
-                                                rts);
+       /* During HW reconfiguration mac80211 reports all interfaces that were
+        * running until reconfiguration was started. Since FW doesn't have any
+        * vdevs at this point we must not iterate over this interface list.
+        * This setting will be updated upon add_interface(). */
+       if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
+               return;
+
+       ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
        if (ar_iter->ret)
                ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
                            arvif->vdev_id);
@@ -2581,8 +2766,9 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
        ar_iter.ar = ar;
 
        mutex_lock(&ar->conf_mutex);
-       ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-                                           ath10k_set_rts_iter, &ar_iter);
+       ieee80211_iterate_active_interfaces_atomic(
+               hw, IEEE80211_IFACE_ITER_NORMAL,
+               ath10k_set_rts_iter, &ar_iter);
        mutex_unlock(&ar->conf_mutex);
 
        return ar_iter.ret;
@@ -2593,17 +2779,17 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
        struct ath10k_generic_iter *ar_iter = data;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
-       int ret;
 
-       frag = clamp_t(u32, frag,
-                      ATH10K_FRAGMT_THRESHOLD_MIN,
-                      ATH10K_FRAGMT_THRESHOLD_MAX);
+       lockdep_assert_held(&arvif->ar->conf_mutex);
 
-       ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
-                                       WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
-                                       frag);
+       /* During HW reconfiguration mac80211 reports all interfaces that were
+        * running until reconfiguration was started. Since FW doesn't have any
+        * vdevs at this point we must not iterate over this interface list.
+        * This setting will be updated upon add_interface(). */
+       if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
+               return;
 
-       ar_iter->ret = ret;
+       ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
        if (ar_iter->ret)
                ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
                            arvif->vdev_id);
@@ -2622,8 +2808,9 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
        ar_iter.ar = ar;
 
        mutex_lock(&ar->conf_mutex);
-       ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-                                           ath10k_set_frag_iter, &ar_iter);
+       ieee80211_iterate_active_interfaces_atomic(
+               hw, IEEE80211_IFACE_ITER_NORMAL,
+               ath10k_set_frag_iter, &ar_iter);
        mutex_unlock(&ar->conf_mutex);
 
        return ar_iter.ret;
@@ -2632,6 +2819,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
 static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
 {
        struct ath10k *ar = hw->priv;
+       bool skip;
        int ret;
 
        /* mac80211 doesn't care if we really xmit queued frames or not
@@ -2639,16 +2827,29 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
        if (drop)
                return;
 
-       ret = wait_event_timeout(ar->htt->empty_tx_wq, ({
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->state == ATH10K_STATE_WEDGED)
+               goto skip;
+
+       ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
                        bool empty;
-                       spin_lock_bh(&ar->htt->tx_lock);
-                       empty = bitmap_empty(ar->htt->used_msdu_ids,
-                                            ar->htt->max_num_pending_tx);
-                       spin_unlock_bh(&ar->htt->tx_lock);
-                       (empty);
+
+                       spin_lock_bh(&ar->htt.tx_lock);
+                       empty = bitmap_empty(ar->htt.used_msdu_ids,
+                                            ar->htt.max_num_pending_tx);
+                       spin_unlock_bh(&ar->htt.tx_lock);
+
+                       skip = (ar->state == ATH10K_STATE_WEDGED);
+
+                       (empty || skip);
                }), ATH10K_FLUSH_TIMEOUT_HZ);
-       if (ret <= 0)
+
+       if (ret <= 0 || skip)
                ath10k_warn("tx not flushed\n");
+
+skip:
+       mutex_unlock(&ar->conf_mutex);
 }
 
 /* TODO: Implement this function properly
@@ -2660,6 +2861,118 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
        return 1;
 }
 
+#ifdef CONFIG_PM
+static int ath10k_suspend(struct ieee80211_hw *hw,
+                         struct cfg80211_wowlan *wowlan)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       ar->is_target_paused = false;
+
+       ret = ath10k_wmi_pdev_suspend_target(ar);
+       if (ret) {
+               ath10k_warn("could not suspend target (%d)\n", ret);
+               return 1;
+       }
+
+       ret = wait_event_interruptible_timeout(ar->event_queue,
+                                              ar->is_target_paused == true,
+                                              1 * HZ);
+       if (ret < 0) {
+               ath10k_warn("suspend interrupted (%d)\n", ret);
+               goto resume;
+       } else if (ret == 0) {
+               ath10k_warn("suspend timed out - target pause event never came\n");
+               goto resume;
+       }
+
+       ret = ath10k_hif_suspend(ar);
+       if (ret) {
+               ath10k_warn("could not suspend hif (%d)\n", ret);
+               goto resume;
+       }
+
+       return 0;
+resume:
+       ret = ath10k_wmi_pdev_resume_target(ar);
+       if (ret)
+               ath10k_warn("could not resume target (%d)\n", ret);
+       return 1;
+}
+
+static int ath10k_resume(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       ret = ath10k_hif_resume(ar);
+       if (ret) {
+               ath10k_warn("could not resume hif (%d)\n", ret);
+               return 1;
+       }
+
+       ret = ath10k_wmi_pdev_resume_target(ar);
+       if (ret) {
+               ath10k_warn("could not resume target (%d)\n", ret);
+               return 1;
+       }
+
+       return 0;
+}
+#endif
+
+static void ath10k_restart_complete(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+
+       mutex_lock(&ar->conf_mutex);
+
+       /* If device failed to restart it will be in a different state, e.g.
+        * ATH10K_STATE_WEDGED */
+       if (ar->state == ATH10K_STATE_RESTARTED) {
+               ath10k_info("device successfully recovered\n");
+               ar->state = ATH10K_STATE_ON;
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
+                            struct survey_info *survey)
+{
+       struct ath10k *ar = hw->priv;
+       struct ieee80211_supported_band *sband;
+       struct survey_info *ar_survey = &ar->survey[idx];
+       int ret = 0;
+
+       mutex_lock(&ar->conf_mutex);
+
+       sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+       if (sband && idx >= sband->n_channels) {
+               idx -= sband->n_channels;
+               sband = NULL;
+       }
+
+       if (!sband)
+               sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+       if (!sband || idx >= sband->n_channels) {
+               ret = -ENOENT;
+               goto exit;
+       }
+
+       spin_lock_bh(&ar->data_lock);
+       memcpy(survey, ar_survey, sizeof(*survey));
+       spin_unlock_bh(&ar->data_lock);
+
+       survey->channel = &sband->channels[idx];
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
 static const struct ieee80211_ops ath10k_ops = {
        .tx                             = ath10k_tx,
        .start                          = ath10k_start,
@@ -2680,6 +2993,12 @@ static const struct ieee80211_ops ath10k_ops = {
        .set_frag_threshold             = ath10k_set_frag_threshold,
        .flush                          = ath10k_flush,
        .tx_last_beacon                 = ath10k_tx_last_beacon,
+       .restart_complete               = ath10k_restart_complete,
+       .get_survey                     = ath10k_get_survey,
+#ifdef CONFIG_PM
+       .suspend                        = ath10k_suspend,
+       .resume                         = ath10k_resume,
+#endif
 };
 
 #define RATETAB_ENT(_rate, _rateid, _flags) { \
@@ -2797,9 +3116,15 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
        .max    = 8,
        .types  = BIT(NL80211_IFTYPE_STATION)
                | BIT(NL80211_IFTYPE_P2P_CLIENT)
-               | BIT(NL80211_IFTYPE_P2P_GO)
-               | BIT(NL80211_IFTYPE_AP)
-       }
+       },
+       {
+       .max    = 3,
+       .types  = BIT(NL80211_IFTYPE_P2P_GO)
+       },
+       {
+       .max    = 7,
+       .types  = BIT(NL80211_IFTYPE_AP)
+       },
 };
 
 static const struct ieee80211_iface_combination ath10k_if_comb = {
@@ -2814,19 +3139,18 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
 {
        struct ieee80211_sta_vht_cap vht_cap = {0};
        u16 mcs_map;
+       int i;
 
        vht_cap.vht_supported = 1;
        vht_cap.cap = ar->vht_cap_info;
 
-       /* FIXME: check dynamically how many streams board supports */
-       mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
-               IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
-               IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
-               IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
-               IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
-               IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
-               IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
-               IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+       mcs_map = 0;
+       for (i = 0; i < 8; i++) {
+               if (i < ar->num_rf_chains)
+                       mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
+               else
+                       mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
+       }
 
        vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
        vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
@@ -2889,7 +3213,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
        if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
                ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
 
-       for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
+       for (i = 0; i < ar->num_rf_chains; i++)
                ht_cap.mcs.rx_mask[i] = 0xFF;
 
        ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
@@ -2948,8 +3272,10 @@ int ath10k_mac_register(struct ath10k *ar)
                channels = kmemdup(ath10k_2ghz_channels,
                                   sizeof(ath10k_2ghz_channels),
                                   GFP_KERNEL);
-               if (!channels)
-                       return -ENOMEM;
+               if (!channels) {
+                       ret = -ENOMEM;
+                       goto err_free;
+               }
 
                band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
                band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
@@ -2968,11 +3294,8 @@ int ath10k_mac_register(struct ath10k *ar)
                                   sizeof(ath10k_5ghz_channels),
                                   GFP_KERNEL);
                if (!channels) {
-                       if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
-                               band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
-                               kfree(band->channels);
-                       }
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto err_free;
                }
 
                band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
@@ -3032,29 +3355,36 @@ int ath10k_mac_register(struct ath10k *ar)
        ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
        ar->hw->wiphy->n_iface_combinations = 1;
 
+       ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
        ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
                            ath10k_reg_notifier);
        if (ret) {
                ath10k_err("Regulatory initialization failed\n");
-               return ret;
+               goto err_free;
        }
 
        ret = ieee80211_register_hw(ar->hw);
        if (ret) {
                ath10k_err("ieee80211 registration failed: %d\n", ret);
-               return ret;
+               goto err_free;
        }
 
        if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
                ret = regulatory_hint(ar->hw->wiphy,
                                      ar->ath_common.regulatory.alpha2);
                if (ret)
-                       goto exit;
+                       goto err_unregister;
        }
 
        return 0;
-exit:
+
+err_unregister:
        ieee80211_unregister_hw(ar->hw);
+err_free:
+       kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+       kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
        return ret;
 }
 
index 27fc92e58829c53aa385970ade282673aeb1fdb3..6fce9bfb19a5f2340d2e41fe2f668b7e8c9f4dce 100644 (file)
@@ -34,6 +34,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
 void ath10k_reset_scan(unsigned long ptr);
 void ath10k_offchan_tx_purge(struct ath10k *ar);
 void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_halt(struct ath10k *ar);
 
 static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
 {
index 33af4672c90916d38405870f67cf5773c5df5d43..e2f9ef50b1bd3999f6b7cc2989c24e407e9bd72a 100644 (file)
@@ -32,7 +32,7 @@
 #include "ce.h"
 #include "pci.h"
 
-unsigned int ath10k_target_ps;
+static unsigned int ath10k_target_ps;
 module_param(ath10k_target_ps, uint, 0644);
 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
 
@@ -54,6 +54,10 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
                                             int num);
 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
 static void ath10k_pci_stop_ce(struct ath10k *ar);
+static void ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_reset_target(struct ath10k *ar);
+static int ath10k_pci_start_intr(struct ath10k *ar);
+static void ath10k_pci_stop_intr(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
        /* host->target HTC control and raw streams */
@@ -718,6 +722,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
                           reg_dump_values[i + 1],
                           reg_dump_values[i + 2],
                           reg_dump_values[i + 3]);
+
+       ieee80211_queue_work(ar->hw, &ar->restart_work);
 }
 
 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
@@ -744,8 +750,8 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
        ath10k_ce_per_engine_service(ar, pipe);
 }
 
-static void ath10k_pci_hif_post_init(struct ath10k *ar,
-                                    struct ath10k_hif_cb *callbacks)
+static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
+                                        struct ath10k_hif_cb *callbacks)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
@@ -1250,10 +1256,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
        }
 }
 
+static void ath10k_pci_disable_irqs(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int i;
+
+       for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+               disable_irq(ar_pci->pdev->irq + i);
+}
+
 static void ath10k_pci_hif_stop(struct ath10k *ar)
 {
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
        ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
 
+       /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
+        * by ath10k_pci_start_intr(). */
+       ath10k_pci_disable_irqs(ar);
+
        ath10k_pci_stop_ce(ar);
 
        /* At this point, asynchronous threads are stopped, the target should
@@ -1263,7 +1284,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
        ath10k_pci_process_ce(ar);
        ath10k_pci_cleanup_ce(ar);
        ath10k_pci_buffer_cleanup(ar);
-       ath10k_pci_ce_deinit(ar);
+
+       ar_pci->started = 0;
 }
 
 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1735,6 +1757,124 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
        ath10k_pci_sleep(ar);
 }
 
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ret = ath10k_pci_start_intr(ar);
+       if (ret) {
+               ath10k_err("could not start interrupt handling (%d)\n", ret);
+               goto err;
+       }
+
+       /*
+        * Bring the target up cleanly.
+        *
+        * The target may be in an undefined state with an AUX-powered Target
+        * and a Host in WoW mode. If the Host crashes, loses power, or is
+        * restarted (without unloading the driver) then the Target is left
+        * (aux) powered and running. On a subsequent driver load, the Target
+        * is in an unexpected state. We try to catch that here in order to
+        * reset the Target and retry the probe.
+        */
+       ath10k_pci_device_reset(ar);
+
+       ret = ath10k_pci_reset_target(ar);
+       if (ret)
+               goto err_irq;
+
+       if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+               /* Force AWAKE forever */
+               ath10k_do_pci_wake(ar);
+
+       ret = ath10k_pci_ce_init(ar);
+       if (ret)
+               goto err_ps;
+
+       ret = ath10k_pci_init_config(ar);
+       if (ret)
+               goto err_ce;
+
+       ret = ath10k_pci_wake_target_cpu(ar);
+       if (ret) {
+               ath10k_err("could not wake up target CPU (%d)\n", ret);
+               goto err_ce;
+       }
+
+       return 0;
+
+err_ce:
+       ath10k_pci_ce_deinit(ar);
+err_ps:
+       if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+               ath10k_do_pci_sleep(ar);
+err_irq:
+       ath10k_pci_stop_intr(ar);
+err:
+       return ret;
+}
+
+static void ath10k_pci_hif_power_down(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ath10k_pci_stop_intr(ar);
+
+       ath10k_pci_ce_deinit(ar);
+       if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+               ath10k_do_pci_sleep(ar);
+}
+
+#ifdef CONFIG_PM
+
+#define ATH10K_PCI_PM_CONTROL 0x44
+
+static int ath10k_pci_hif_suspend(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct pci_dev *pdev = ar_pci->pdev;
+       u32 val;
+
+       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+       if ((val & 0x000000ff) != 0x3) {
+               pci_save_state(pdev);
+               pci_disable_device(pdev);
+               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+                                      (val & 0xffffff00) | 0x03);
+       }
+
+       return 0;
+}
+
+static int ath10k_pci_hif_resume(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct pci_dev *pdev = ar_pci->pdev;
+       u32 val;
+
+       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+       if ((val & 0x000000ff) != 0) {
+               pci_restore_state(pdev);
+               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+                                      val & 0xffffff00);
+               /*
+                * Suspend/Resume resets the PCI configuration space,
+                * so we have to re-disable the RETRY_TIMEOUT register (0x41)
+                * to keep PCI Tx retries from interfering with C3 CPU state
+                */
+               pci_read_config_dword(pdev, 0x40, &val);
+
+               if ((val & 0x0000ff00) != 0)
+                       pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+       }
+
+       return 0;
+}
+#endif
+
 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
        .send_head              = ath10k_pci_hif_send_head,
        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
@@ -1743,8 +1883,14 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
        .send_complete_check    = ath10k_pci_hif_send_complete_check,
-       .init                   = ath10k_pci_hif_post_init,
+       .set_callbacks          = ath10k_pci_hif_set_callbacks,
        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
+       .power_up               = ath10k_pci_hif_power_up,
+       .power_down             = ath10k_pci_hif_power_down,
+#ifdef CONFIG_PM
+       .suspend                = ath10k_pci_hif_suspend,
+       .resume                 = ath10k_pci_hif_resume,
+#endif
 };
 
 static void ath10k_pci_ce_tasklet(unsigned long ptr)
@@ -1872,8 +2018,13 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
        ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
                          ath10k_pci_msi_fw_handler,
                          IRQF_SHARED, "ath10k_pci", ar);
-       if (ret)
+       if (ret) {
+               ath10k_warn("request_irq(%d) failed %d\n",
+                           ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
+
+               pci_disable_msi(ar_pci->pdev);
                return ret;
+       }
 
        for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
                ret = request_irq(ar_pci->pdev->irq + i,
@@ -2059,9 +2210,9 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
        return 0;
 }
 
-static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
+static void ath10k_pci_device_reset(struct ath10k *ar)
 {
-       struct ath10k *ar = ar_pci->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        void __iomem *mem = ar_pci->mem;
        int i;
        u32 val;
@@ -2118,9 +2269,12 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
                case ATH10K_PCI_FEATURE_MSI_X:
                        ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
                        break;
-               case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
+               case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
                        ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
                        break;
+               case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
+                       ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+                       break;
                }
        }
 }
@@ -2145,7 +2299,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
        switch (pci_dev->device) {
        case QCA988X_1_0_DEVICE_ID:
-               set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
+               set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
                break;
        case QCA988X_2_0_DEVICE_ID:
                set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
@@ -2156,10 +2310,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                goto err_ar_pci;
        }
 
+       if (ath10k_target_ps)
+               set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
+
        ath10k_pci_dump_features(ar_pci);
 
-       ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
-                               &ath10k_pci_hif_ops);
+       ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
        if (!ar) {
                ath10k_err("ath10k_core_create failed!\n");
                ret = -EINVAL;
@@ -2167,7 +2323,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        }
 
        /* Enable QCA988X_1.0 HW workarounds */
-       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
+       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
                spin_lock_init(&ar_pci->hw_v1_workaround_lock);
 
        ar_pci->ar = ar;
@@ -2241,62 +2397,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
        ar_pci->cacheline_sz = dma_get_cache_alignment();
 
-       ret = ath10k_pci_start_intr(ar);
-       if (ret) {
-               ath10k_err("could not start interrupt handling (%d)\n", ret);
-               goto err_iomap;
-       }
-
-       /*
-        * Bring the target up cleanly.
-        *
-        * The target may be in an undefined state with an AUX-powered Target
-        * and a Host in WoW mode. If the Host crashes, loses power, or is
-        * restarted (without unloading the driver) then the Target is left
-        * (aux) powered and running. On a subsequent driver load, the Target
-        * is in an unexpected state. We try to catch that here in order to
-        * reset the Target and retry the probe.
-        */
-       ath10k_pci_device_reset(ar_pci);
-
-       ret = ath10k_pci_reset_target(ar);
-       if (ret)
-               goto err_intr;
-
-       if (ath10k_target_ps) {
-               ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
-       } else {
-               /* Force AWAKE forever */
-               ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
-               ath10k_do_pci_wake(ar);
-       }
-
-       ret = ath10k_pci_ce_init(ar);
-       if (ret)
-               goto err_intr;
-
-       ret = ath10k_pci_init_config(ar);
-       if (ret)
-               goto err_ce;
-
-       ret = ath10k_pci_wake_target_cpu(ar);
-       if (ret) {
-               ath10k_err("could not wake up target CPU (%d)\n", ret);
-               goto err_ce;
-       }
-
        ret = ath10k_core_register(ar);
        if (ret) {
                ath10k_err("could not register driver core (%d)\n", ret);
-               goto err_ce;
+               goto err_iomap;
        }
 
        return 0;
 
-err_ce:
-       ath10k_pci_ce_deinit(ar);
-err_intr:
-       ath10k_pci_stop_intr(ar);
 err_iomap:
        pci_iounmap(pdev, mem);
 err_master:
@@ -2333,7 +2441,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
        tasklet_kill(&ar_pci->msi_fw_err);
 
        ath10k_core_unregister(ar);
-       ath10k_pci_stop_intr(ar);
 
        pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ar_pci->mem);
@@ -2345,128 +2452,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
        kfree(ar_pci);
 }
 
-#if defined(CONFIG_PM_SLEEP)
-
-#define ATH10K_PCI_PM_CONTROL 0x44
-
-static int ath10k_pci_suspend(struct device *device)
-{
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct ath10k *ar = pci_get_drvdata(pdev);
-       struct ath10k_pci *ar_pci;
-       u32 val;
-       int ret, retval;
-
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
-
-       if (!ar)
-               return -ENODEV;
-
-       ar_pci = ath10k_pci_priv(ar);
-       if (!ar_pci)
-               return -ENODEV;
-
-       if (ath10k_core_target_suspend(ar))
-               return -EBUSY;
-
-       ret = wait_event_interruptible_timeout(ar->event_queue,
-                                               ar->is_target_paused == true,
-                                               1 * HZ);
-       if (ret < 0) {
-               ath10k_warn("suspend interrupted (%d)\n", ret);
-               retval = ret;
-               goto resume;
-       } else if (ret == 0) {
-               ath10k_warn("suspend timed out - target pause event never came\n");
-               retval = EIO;
-               goto resume;
-       }
-
-       /*
-        * reset is_target_paused and host can check that in next time,
-        * or it will always be TRUE and host just skip the waiting
-        * condition, it causes target assert due to host already
-        * suspend
-        */
-       ar->is_target_paused = false;
-
-       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
-       if ((val & 0x000000ff) != 0x3) {
-               pci_save_state(pdev);
-               pci_disable_device(pdev);
-               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
-                                      (val & 0xffffff00) | 0x03);
-       }
-
-       return 0;
-resume:
-       ret = ath10k_core_target_resume(ar);
-       if (ret)
-               ath10k_warn("could not resume (%d)\n", ret);
-
-       return retval;
-}
-
-static int ath10k_pci_resume(struct device *device)
-{
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct ath10k *ar = pci_get_drvdata(pdev);
-       struct ath10k_pci *ar_pci;
-       int ret;
-       u32 val;
-
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
-
-       if (!ar)
-               return -ENODEV;
-       ar_pci = ath10k_pci_priv(ar);
-
-       if (!ar_pci)
-               return -ENODEV;
-
-       ret = pci_enable_device(pdev);
-       if (ret) {
-               ath10k_warn("cannot enable PCI device: %d\n", ret);
-               return ret;
-       }
-
-       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
-       if ((val & 0x000000ff) != 0) {
-               pci_restore_state(pdev);
-               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
-                                      val & 0xffffff00);
-               /*
-                * Suspend/Resume resets the PCI configuration space,
-                * so we have to re-disable the RETRY_TIMEOUT register (0x41)
-                * to keep PCI Tx retries from interfering with C3 CPU state
-                */
-               pci_read_config_dword(pdev, 0x40, &val);
-
-               if ((val & 0x0000ff00) != 0)
-                       pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-       }
-
-       ret = ath10k_core_target_resume(ar);
-       if (ret)
-               ath10k_warn("target resume failed: %d\n", ret);
-
-       return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
-                        ath10k_pci_suspend,
-                        ath10k_pci_resume);
-
-#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
-
-#else
-
-#define ATH10K_PCI_PM_OPS NULL
-
-#endif /* CONFIG_PM_SLEEP */
-
 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
 
 static struct pci_driver ath10k_pci_driver = {
@@ -2474,7 +2459,6 @@ static struct pci_driver ath10k_pci_driver = {
        .id_table = ath10k_pci_id_table,
        .probe = ath10k_pci_probe,
        .remove = ath10k_pci_remove,
-       .driver.pm = ATH10K_PCI_PM_OPS,
 };
 
 static int __init ath10k_pci_init(void)
index d2a055a07dc66678fa8ce8f619fe07840042aeb1..871bb339d56dc3fbb19d475ed50ea938c59fefa2 100644 (file)
@@ -152,7 +152,8 @@ struct service_to_pipe {
 
 enum ath10k_pci_features {
        ATH10K_PCI_FEATURE_MSI_X                = 0,
-       ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND    = 1,
+       ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND    = 1,
+       ATH10K_PCI_FEATURE_SOC_POWER_SAVE       = 2,
 
        /* keep last */
        ATH10K_PCI_FEATURE_COUNT
@@ -311,7 +312,7 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        void __iomem *addr = ar_pci->mem;
 
-       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
                unsigned long irq_flags;
 
                spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
@@ -335,20 +336,22 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
        return ioread32(ar_pci->mem + offset);
 }
 
-extern unsigned int ath10k_target_ps;
-
 void ath10k_do_pci_wake(struct ath10k *ar);
 void ath10k_do_pci_sleep(struct ath10k *ar);
 
 static inline void ath10k_pci_wake(struct ath10k *ar)
 {
-       if (ath10k_target_ps)
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
                ath10k_do_pci_wake(ar);
 }
 
 static inline void ath10k_pci_sleep(struct ath10k *ar)
 {
-       if (ath10k_target_ps)
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
                ath10k_do_pci_sleep(ar);
 }
 
index 7d4b7987422d7b1e19b6a87c0a68e2db8157992a..55f90c761868ddd9fa228510efb27465d360c00b 100644 (file)
@@ -27,6 +27,13 @@ void ath10k_wmi_flush_tx(struct ath10k *ar)
 {
        int ret;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (ar->state == ATH10K_STATE_WEDGED) {
+               ath10k_warn("wmi flush skipped - device is wedged anyway\n");
+               return;
+       }
+
        ret = wait_event_timeout(ar->wmi.wq,
                                 atomic_read(&ar->wmi.pending_tx_count) == 0,
                                 5*HZ);
@@ -111,7 +118,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
 
        trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
 
-       status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
+       status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
        if (status) {
                dev_kfree_skb_any(skb);
                atomic_dec(&ar->wmi.pending_tx_count);
@@ -383,9 +390,82 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
        return 0;
 }
 
+static int freq_to_idx(struct ath10k *ar, int freq)
+{
+       struct ieee80211_supported_band *sband;
+       int band, ch, idx = 0;
+
+       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+               sband = ar->hw->wiphy->bands[band];
+               if (!sband)
+                       continue;
+
+               for (ch = 0; ch < sband->n_channels; ch++, idx++)
+                       if (sband->channels[ch].center_freq == freq)
+                               goto exit;
+       }
+
+exit:
+       return idx;
+}
+
 static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
 {
-       ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
+       struct wmi_chan_info_event *ev;
+       struct survey_info *survey;
+       u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
+       int idx;
+
+       ev = (struct wmi_chan_info_event *)skb->data;
+
+       err_code = __le32_to_cpu(ev->err_code);
+       freq = __le32_to_cpu(ev->freq);
+       cmd_flags = __le32_to_cpu(ev->cmd_flags);
+       noise_floor = __le32_to_cpu(ev->noise_floor);
+       rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
+       cycle_count = __le32_to_cpu(ev->cycle_count);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
+                  err_code, freq, cmd_flags, noise_floor, rx_clear_count,
+                  cycle_count);
+
+       spin_lock_bh(&ar->data_lock);
+
+       if (!ar->scan.in_progress) {
+               ath10k_warn("chan info event without a scan request?\n");
+               goto exit;
+       }
+
+       idx = freq_to_idx(ar, freq);
+       if (idx >= ARRAY_SIZE(ar->survey)) {
+               ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
+                           freq, idx);
+               goto exit;
+       }
+
+       if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+               /* During scanning chan info is reported twice for each
+                * visited channel. The reported cycle count is global
+                * and per-channel cycle count must be calculated */
+
+               cycle_count -= ar->survey_last_cycle_count;
+               rx_clear_count -= ar->survey_last_rx_clear_count;
+
+               survey = &ar->survey[idx];
+               survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
+               survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
+               survey->noise = noise_floor;
+               survey->filled = SURVEY_INFO_CHANNEL_TIME |
+                                SURVEY_INFO_CHANNEL_TIME_RX |
+                                SURVEY_INFO_NOISE_DBM;
+       }
+
+       ar->survey_last_rx_clear_count = rx_clear_count;
+       ar->survey_last_cycle_count = cycle_count;
+
+exit:
+       spin_unlock_bh(&ar->data_lock);
 }
 
 static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
@@ -501,8 +581,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
        ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
                                    (u8 *)skb_tail_pointer(bcn) - ies);
        if (!ie) {
-               /* highly unlikely for mac80211 */
-               ath10k_warn("no tim ie found;\n");
+               if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+                       ath10k_warn("no tim ie found;\n");
                return;
        }
 
@@ -861,6 +941,13 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
                (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
        ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
        ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+       ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+
+       if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+               ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+                           ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+               ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+       }
 
        ar->ath_common.regulatory.current_rd =
                __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
@@ -885,7 +972,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
        }
 
        ath10k_dbg(ATH10K_DBG_WMI,
-                  "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
+                  "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
                   __le32_to_cpu(ev->sw_version),
                   __le32_to_cpu(ev->sw_version_1),
                   __le32_to_cpu(ev->abi_version),
@@ -894,7 +981,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
                   __le32_to_cpu(ev->vht_cap_info),
                   __le32_to_cpu(ev->vht_supp_mcs),
                   __le32_to_cpu(ev->sys_cap_info),
-                  __le32_to_cpu(ev->num_mem_reqs));
+                  __le32_to_cpu(ev->num_mem_reqs),
+                  __le32_to_cpu(ev->num_rf_chains));
 
        complete(&ar->wmi.service_ready);
 }
@@ -1114,7 +1202,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
        /* connect to control service */
        conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
 
-       status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
+       status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
        if (status) {
                ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
                            status);
@@ -1748,6 +1836,9 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
        if (arg->key_data)
                memcpy(cmd->key_data, arg->key_data, arg->key_len);
 
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi vdev install key idx %d cipher %d len %d\n",
+                  arg->key_idx, arg->key_cipher, arg->key_len);
        return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
 }
 
@@ -2011,6 +2102,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
        cmd->peer_vht_rates.tx_mcs_set =
                __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
 
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi peer assoc vdev %d addr %pM\n",
+                  arg->vdev_id, arg->addr);
        return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
 }
 
@@ -2079,3 +2173,22 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
        ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
        return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
 }
+
+int ath10k_wmi_force_fw_hang(struct ath10k *ar,
+                            enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+       struct wmi_force_fw_hang_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+       cmd->type = __cpu_to_le32(type);
+       cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
+                  type, delay_ms);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
+}
index 9555f5a0e041cd36579d2d37be7b6e2023525ca0..2c5a4f8daf2ee4b80e9a9e84391586e638389271 100644 (file)
@@ -416,6 +416,7 @@ enum wmi_cmd_id {
        WMI_PDEV_FTM_INTG_CMDID,
        WMI_VDEV_SET_KEEPALIVE_CMDID,
        WMI_VDEV_GET_KEEPALIVE_CMDID,
+       WMI_FORCE_FW_HANG_CMDID,
 
        /* GPIO Configuration */
        WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
@@ -2930,6 +2931,11 @@ struct wmi_chan_info_event {
        __le32 cycle_count;
 } __packed;
 
+#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+
+/* FIXME: empirically extrapolated */
+#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
+
 /* Beacon filter wmi command info */
 #define BCN_FLT_MAX_SUPPORTED_IES      256
 #define BCN_FLT_MAX_ELEMS_IE_LIST      (BCN_FLT_MAX_SUPPORTED_IES / 32)
@@ -2972,6 +2978,22 @@ struct wmi_sta_keepalive_cmd {
        struct wmi_sta_keepalive_arp_resp arp_resp;
 } __packed;
 
+enum wmi_force_fw_hang_type {
+       WMI_FORCE_FW_HANG_ASSERT = 1,
+       WMI_FORCE_FW_HANG_NO_DETECT,
+       WMI_FORCE_FW_HANG_CTRL_EP_FULL,
+       WMI_FORCE_FW_HANG_EMPTY_POINT,
+       WMI_FORCE_FW_HANG_STACK_OVERFLOW,
+       WMI_FORCE_FW_HANG_INFINITE_LOOP,
+};
+
+#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
+
+struct wmi_force_fw_hang_cmd {
+       __le32 type;
+       __le32 delay_ms;
+} __packed;
+
 #define ATH10K_RTS_MAX         2347
 #define ATH10K_FRAGMT_THRESHOLD_MIN    540
 #define ATH10K_FRAGMT_THRESHOLD_MAX    2346
@@ -3048,5 +3070,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
 int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
                        const struct wmi_pdev_set_wmm_params_arg *arg);
 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
+int ath10k_wmi_force_fw_hang(struct ath10k *ar,
+                            enum wmi_force_fw_hang_type type, u32 delay_ms);
 
 #endif /* _WMI_H_ */
index 2d691b8b95b94bb3e57aedd26af2972cc128f683..74bd54d6acebaeb2052d0184898a6b7e13f27a49 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/average.h>
 #include <linux/leds.h>
 #include <net/mac80211.h>
+#include <net/cfg80211.h>
 
 /* RX/TX descriptor hw structs
  * TODO: Driver part should only see sw structs */
index ce67ab791eae9d008ed4c3d318b8c8949c30f428..48161edec8de84769fd9f3db92fa1c4aa165d70b 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/etherdevice.h>
 #include <linux/nl80211.h>
 
+#include <net/cfg80211.h>
 #include <net/ieee80211_radiotap.h>
 
 #include <asm/unaligned.h>
@@ -165,28 +166,36 @@ static const struct ieee80211_rate ath5k_rates[] = {
          .flags = IEEE80211_RATE_SHORT_PREAMBLE },
        { .bitrate = 60,
          .hw_value = ATH5K_RATE_CODE_6M,
-         .flags = 0 },
+         .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+                  IEEE80211_RATE_SUPPORTS_10MHZ },
        { .bitrate = 90,
          .hw_value = ATH5K_RATE_CODE_9M,
-         .flags = 0 },
+         .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+                  IEEE80211_RATE_SUPPORTS_10MHZ },
        { .bitrate = 120,
          .hw_value = ATH5K_RATE_CODE_12M,
-         .flags = 0 },
+         .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+                  IEEE80211_RATE_SUPPORTS_10MHZ },
        { .bitrate = 180,
          .hw_value = ATH5K_RATE_CODE_18M,
-         .flags = 0 },
+         .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+                  IEEE80211_RATE_SUPPORTS_10MHZ },
        { .bitrate = 240,
          .hw_value = ATH5K_RATE_CODE_24M,
-         .flags = 0 },
+         .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+                  IEEE80211_RATE_SUPPORTS_10MHZ },
        { .bitrate = 360,
          .hw_value = ATH5K_RATE_CODE_36M,
-         .flags = 0 },
+         .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+                  IEEE80211_RATE_SUPPORTS_10MHZ },
        { .bitrate = 480,
          .hw_value = ATH5K_RATE_CODE_48M,
-         .flags = 0 },
+         .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+                  IEEE80211_RATE_SUPPORTS_10MHZ },
        { .bitrate = 540,
          .hw_value = ATH5K_RATE_CODE_54M,
-         .flags = 0 },
+         .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+                  IEEE80211_RATE_SUPPORTS_10MHZ },
 };
 
 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -435,11 +444,27 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
  * Called with ah->lock.
  */
 int
-ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
+ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
 {
        ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
                  "channel set, resetting (%u -> %u MHz)\n",
-                 ah->curchan->center_freq, chan->center_freq);
+                 ah->curchan->center_freq, chandef->chan->center_freq);
+
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_20:
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
+               break;
+       case NL80211_CHAN_WIDTH_5:
+               ah->ah_bwmode = AR5K_BWMODE_5MHZ;
+               break;
+       case NL80211_CHAN_WIDTH_10:
+               ah->ah_bwmode = AR5K_BWMODE_10MHZ;
+               break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
 
        /*
         * To switch channels clear any pending DMA operations;
@@ -447,7 +472,7 @@ ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
         * hardware at the new frequency, and then re-enable
         * the relevant bits of the h/w.
         */
-       return ath5k_reset(ah, chan, true);
+       return ath5k_reset(ah, chandef->chan, true);
 }
 
 void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@@ -1400,6 +1425,16 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
 
        rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
        rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
+       switch (ah->ah_bwmode) {
+       case AR5K_BWMODE_5MHZ:
+               rxs->flag |= RX_FLAG_5MHZ;
+               break;
+       case AR5K_BWMODE_10MHZ:
+               rxs->flag |= RX_FLAG_10MHZ;
+               break;
+       default:
+               break;
+       }
 
        if (rxs->rate_idx >= 0 && rs->rs_rate ==
            ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
@@ -2507,6 +2542,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
        /* SW support for IBSS_RSN is provided by mac80211 */
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
+       hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+
        /* both antennas can be configured as RX or TX */
        hw->wiphy->available_antennas_tx = 0x3;
        hw->wiphy->available_antennas_rx = 0x3;
index ca9a83ceeee1d430364a690c5b10703b14fc27d9..97469d0fbad7676ad9d68a81e7d5c86eff43074a 100644 (file)
@@ -101,7 +101,7 @@ void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
 
 void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
                                        struct ieee80211_vif *vif);
-int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
+int ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef);
 void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
 void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
 void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
index 40825d43322edb511862c90bd2587395745852e7..4ee01f654235478ea3510847d6f207593541573f 100644 (file)
@@ -202,7 +202,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
        mutex_lock(&ah->lock);
 
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-               ret = ath5k_chan_set(ah, conf->chandef.chan);
+               ret = ath5k_chan_set(ah, &conf->chandef);
                if (ret < 0)
                        goto unlock;
        }
index 1f16b4227d8f0b1cfc11af7a2cafdda6169f9720..c60d36aa13e214526f1f66d56cffd8498e10882d 100644 (file)
@@ -144,11 +144,13 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
                sifs = AR5K_INIT_SIFS_HALF_RATE;
                preamble *= 2;
                sym_time *= 2;
+               bitrate = DIV_ROUND_UP(bitrate, 2);
                break;
        case AR5K_BWMODE_5MHZ:
                sifs = AR5K_INIT_SIFS_QUARTER_RATE;
                preamble *= 4;
                sym_time *= 4;
+               bitrate = DIV_ROUND_UP(bitrate, 4);
                break;
        default:
                sifs = AR5K_INIT_SIFS_DEFAULT_BG;
index 65fe929529a8f9b303858a329b7892d9f9c1cc27..0583c69d26dbb4a95b99fa82704e8af14f27b9b8 100644 (file)
@@ -566,9 +566,11 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
 {
        struct ieee80211_channel *channel = ah->ah_current_channel;
        enum ieee80211_band band;
+       struct ieee80211_supported_band *sband;
        struct ieee80211_rate *rate;
        u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
        u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+       u32 rate_flags, i;
 
        if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
                return -EINVAL;
@@ -605,7 +607,28 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
        else
                band = IEEE80211_BAND_2GHZ;
 
-       rate = &ah->sbands[band].bitrates[0];
+       switch (ah->ah_bwmode) {
+       case AR5K_BWMODE_5MHZ:
+               rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
+               break;
+       case AR5K_BWMODE_10MHZ:
+               rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
+               break;
+       default:
+               rate_flags = 0;
+               break;
+       }
+       sband = &ah->sbands[band];
+       rate = NULL;
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+               rate = &sband->bitrates[i];
+               break;
+       }
+       if (WARN_ON(!rate))
+               return -EINVAL;
+
        ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
 
        /* ack_tx_time includes an SIFS already */
index 6a67881f94d60b2ff9880c0e8280cfbf4ca7edb1..4f316bdcbab58da3911c6a2365da1989ba412744 100644 (file)
@@ -1836,6 +1836,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
 
        clear_bit(WMI_READY, &ar->flag);
 
+       if (ar->fw_recovery.enable)
+               del_timer_sync(&ar->fw_recovery.hb_timer);
+
        /*
         * After wmi_shudown all WMI events will be dropped. We
         * need to cleanup the buffers allocated in AP mode and
index d4fcfcad57d03df4d612708978286997c4c42a9e..5839fc23bdc789d5013f1c89e48f0f65eff37efe 100644 (file)
@@ -29,6 +29,9 @@ struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
        struct ath6kl_sta *conn = NULL;
        u8 i, max_conn;
 
+       if (is_zero_ether_addr(node_addr))
+               return NULL;
+
        max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
 
        for (i = 0; i < max_conn; i++) {
index acc9aa832f764df7512165f4b1c4b4e9a3050416..d67170ea1038a1d35f3acc56261e68ed2051cad4 100644 (file)
@@ -66,7 +66,8 @@ nla_put_failure:
        ath6kl_warn("nla_put failed on testmode rx skb!\n");
 }
 
-int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
+int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+                 void *data, int len)
 {
        struct ath6kl *ar = wiphy_priv(wiphy);
        struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
index fe651d6707df4ed36fc08b69a63b554565582fb5..9fbcdec3e208b6049f49598759f4e2f4ee7fb00a 100644 (file)
@@ -20,7 +20,8 @@
 #ifdef CONFIG_NL80211_TESTMODE
 
 void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
-int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len);
+int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+                 void *data, int len);
 
 #else
 
@@ -29,7 +30,9 @@ static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf,
 {
 }
 
-static inline int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
+static inline int ath6kl_tm_cmd(struct wiphy *wiphy,
+                               struct wireless_dev *wdev,
+                               void *data, int len)
 {
        return 0;
 }
index d491a31789863fa08ce24a2282b2ab72b10bf949..c91bc6111c230afe64542d5d3a3e0a71a15d5a42 100644 (file)
@@ -96,6 +96,16 @@ config ATH9K_LEGACY_RATE_CONTROL
          has to be passed to mac80211 using the module parameter,
          ieee80211_default_rc_algo.
 
+config ATH9K_RFKILL
+       bool "Atheros ath9k rfkill support" if EXPERT
+       depends on ATH9K
+       depends on RFKILL=y || RFKILL=ATH9K
+       default y
+       help
+         Say Y to have ath9k poll the RF-Kill GPIO every couple of
+         seconds. Turn off to save power, but enable it if you have
+         a platform that can toggle the RF-Kill GPIO.
+
 config ATH9K_HTC
        tristate "Atheros HTC based wireless cards support"
        depends on USB && MAC80211
index 664844c5d3d51ae8752514bb3976fa2074ba0afe..dd1cc73d7946104d2353a777586c6da4bf3c6f67 100644 (file)
 
 #include "ath9k.h"
 
-static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
+/*
+ * AR9285
+ * ======
+ *
+ * EEPROM has 2 4-bit fields containing the card configuration.
+ *
+ * antdiv_ctl1:
+ * ------------
+ * bb_enable_ant_div_lnadiv : 1
+ * bb_ant_div_alt_gaintb    : 1
+ * bb_ant_div_main_gaintb   : 1
+ * bb_enable_ant_fast_div   : 1
+ *
+ * antdiv_ctl2:
+ * -----------
+ * bb_ant_div_alt_lnaconf  : 2
+ * bb_ant_div_main_lnaconf : 2
+ *
+ * The EEPROM bits are used as follows:
+ * ------------------------------------
+ *
+ * bb_enable_ant_div_lnadiv      - Enable LNA path rx antenna diversity/combining.
+ *                                 Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *
+ * bb_ant_div_[alt/main]_gaintb  - 0 -> Antenna config Alt/Main uses gaintable 0
+ *                                 1 -> Antenna config Alt/Main uses gaintable 1
+ *                                 Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *
+ * bb_enable_ant_fast_div        - Enable fast antenna diversity.
+ *                                 Set in AR_PHY_CCK_DETECT.
+ *
+ * bb_ant_div_[alt/main]_lnaconf - Alt/Main LNA diversity/combining input config.
+ *                                 Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *                                 10=LNA1
+ *                                 01=LNA2
+ *                                 11=LNA1+LNA2
+ *                                 00=LNA1-LNA2
+ *
+ * AR9485 / AR9565 / AR9331
+ * ========================
+ *
+ * The same bits are present in the EEPROM, but the location in the
+ * EEPROM is different (ant_div_control in ar9300_BaseExtension_1).
+ *
+ * ant_div_alt_lnaconf      ==> bit 0~1
+ * ant_div_main_lnaconf     ==> bit 2~3
+ * ant_div_alt_gaintb       ==> bit 4
+ * ant_div_main_gaintb      ==> bit 5
+ * enable_ant_div_lnadiv    ==> bit 6
+ * enable_ant_fast_div      ==> bit 7
+ */
+
+static inline bool ath_is_alt_ant_ratio_better(struct ath_ant_comb *antcomb,
+                                              int alt_ratio, int maxdelta,
                                               int mindelta, int main_rssi_avg,
                                               int alt_rssi_avg, int pkt_count)
 {
-       return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
-                (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
-               (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
+       if (pkt_count <= 50)
+               return false;
+
+       if (alt_rssi_avg > main_rssi_avg + mindelta)
+               return true;
+
+       if (alt_ratio >= antcomb->ant_ratio2 &&
+           alt_rssi_avg >= antcomb->low_rssi_thresh &&
+           (alt_rssi_avg > main_rssi_avg + maxdelta))
+               return true;
+
+       return false;
 }
 
-static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
-                                             int curr_main_set, int curr_alt_set,
-                                             int alt_rssi_avg, int main_rssi_avg)
+static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf,
+                                             struct ath_ant_comb *antcomb,
+                                             int alt_ratio, int alt_rssi_avg,
+                                             int main_rssi_avg)
 {
-       bool result = false;
-       switch (div_group) {
+       bool result, set1, set2;
+
+       result = set1 = set2 = false;
+
+       if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2 &&
+           conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA1)
+               set1 = true;
+
+       if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA1 &&
+           conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+               set2 = true;
+
+       switch (conf->div_group) {
        case 0:
                if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
                        result = true;
                break;
        case 1:
        case 2:
-               if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
-                     (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
-                     (alt_rssi_avg >= (main_rssi_avg - 5))) ||
-                    ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
-                     (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
-                     (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
-                   (alt_rssi_avg >= 4))
+               if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
+                       break;
+
+               if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 5))) ||
+                   (set2 && (alt_rssi_avg >= (main_rssi_avg - 2))) ||
+                   (alt_ratio > antcomb->ant_ratio))
                        result = true;
-               else
-                       result = false;
+
+               break;
+       case 3:
+               if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
+                       break;
+
+               if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 3))) ||
+                   (set2 && (alt_rssi_avg >= (main_rssi_avg + 3))) ||
+                   (alt_ratio > antcomb->ant_ratio))
+                       result = true;
+
                break;
        }
 
@@ -108,6 +190,74 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
        }
 }
 
+static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
+                                 struct ath_hw_antcomb_conf *conf)
+{
+       /* set alt to the conf with maximun ratio */
+       if (antcomb->first_ratio && antcomb->second_ratio) {
+               if (antcomb->rssi_second > antcomb->rssi_third) {
+                       /* first alt*/
+                       if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+                           (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+                               /* Set alt LNA1 or LNA2*/
+                               if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+                                       conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                               else
+                                       conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+                       else
+                               /* Set alt to A+B or A-B */
+                               conf->alt_lna_conf =
+                                       antcomb->first_quick_scan_conf;
+               } else if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+                          (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) {
+                       /* Set alt LNA1 or LNA2 */
+                       if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                       else
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+               } else {
+                       /* Set alt to A+B or A-B */
+                       conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+               }
+       } else if (antcomb->first_ratio) {
+               /* first alt */
+               if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+                   (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+                       /* Set alt LNA1 or LNA2 */
+                       if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                       else
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+               else
+                       /* Set alt to A+B or A-B */
+                       conf->alt_lna_conf = antcomb->first_quick_scan_conf;
+       } else if (antcomb->second_ratio) {
+               /* second alt */
+               if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+                   (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+                       /* Set alt LNA1 or LNA2 */
+                       if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                       else
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+               else
+                       /* Set alt to A+B or A-B */
+                       conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+       } else {
+               /* main is largest */
+               if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
+                   (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
+                       /* Set alt LNA1 or LNA2 */
+                       if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                       else
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+               else
+                       /* Set alt to A+B or A-B */
+                       conf->alt_lna_conf = antcomb->main_conf;
+       }
+}
+
 static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
                                       struct ath_hw_antcomb_conf *div_ant_conf,
                                       int main_rssi_avg, int alt_rssi_avg,
@@ -129,7 +279,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
 
                if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
                        /* main is LNA1 */
-                       if (ath_is_alt_ant_ratio_better(alt_ratio,
+                       if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
                                                ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
                                                ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
                                                main_rssi_avg, alt_rssi_avg,
@@ -138,7 +288,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
                        else
                                antcomb->first_ratio = false;
                } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
-                       if (ath_is_alt_ant_ratio_better(alt_ratio,
+                       if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
                                                ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
                                                ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
                                                main_rssi_avg, alt_rssi_avg,
@@ -147,11 +297,11 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
                        else
                                antcomb->first_ratio = false;
                } else {
-                       if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
-                             (alt_rssi_avg > main_rssi_avg +
-                              ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
-                            (alt_rssi_avg > main_rssi_avg)) &&
-                           (antcomb->total_pkt_count > 50))
+                       if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+                                               ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+                                               0,
+                                               main_rssi_avg, alt_rssi_avg,
+                                               antcomb->total_pkt_count))
                                antcomb->first_ratio = true;
                        else
                                antcomb->first_ratio = false;
@@ -164,17 +314,21 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
                antcomb->rssi_first = main_rssi_avg;
                antcomb->rssi_third = alt_rssi_avg;
 
-               if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
+               switch(antcomb->second_quick_scan_conf) {
+               case ATH_ANT_DIV_COMB_LNA1:
                        antcomb->rssi_lna1 = alt_rssi_avg;
-               else if (antcomb->second_quick_scan_conf ==
-                        ATH_ANT_DIV_COMB_LNA2)
+                       break;
+               case ATH_ANT_DIV_COMB_LNA2:
                        antcomb->rssi_lna2 = alt_rssi_avg;
-               else if (antcomb->second_quick_scan_conf ==
-                        ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
+                       break;
+               case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
                        if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
                                antcomb->rssi_lna2 = main_rssi_avg;
                        else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
                                antcomb->rssi_lna1 = main_rssi_avg;
+                       break;
+               default:
+                       break;
                }
 
                if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
@@ -184,7 +338,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
                        div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
 
                if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
-                       if (ath_is_alt_ant_ratio_better(alt_ratio,
+                       if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
                                                ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
                                                ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
                                                main_rssi_avg, alt_rssi_avg,
@@ -193,7 +347,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
                        else
                                antcomb->second_ratio = false;
                } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
-                       if (ath_is_alt_ant_ratio_better(alt_ratio,
+                       if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
                                                ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
                                                ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
                                                main_rssi_avg, alt_rssi_avg,
@@ -202,105 +356,18 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
                        else
                                antcomb->second_ratio = false;
                } else {
-                       if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
-                             (alt_rssi_avg > main_rssi_avg +
-                              ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
-                            (alt_rssi_avg > main_rssi_avg)) &&
-                           (antcomb->total_pkt_count > 50))
+                       if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+                                               ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+                                               0,
+                                               main_rssi_avg, alt_rssi_avg,
+                                               antcomb->total_pkt_count))
                                antcomb->second_ratio = true;
                        else
                                antcomb->second_ratio = false;
                }
 
-               /* set alt to the conf with maximun ratio */
-               if (antcomb->first_ratio && antcomb->second_ratio) {
-                       if (antcomb->rssi_second > antcomb->rssi_third) {
-                               /* first alt*/
-                               if ((antcomb->first_quick_scan_conf ==
-                                   ATH_ANT_DIV_COMB_LNA1) ||
-                                   (antcomb->first_quick_scan_conf ==
-                                   ATH_ANT_DIV_COMB_LNA2))
-                                       /* Set alt LNA1 or LNA2*/
-                                       if (div_ant_conf->main_lna_conf ==
-                                           ATH_ANT_DIV_COMB_LNA2)
-                                               div_ant_conf->alt_lna_conf =
-                                                       ATH_ANT_DIV_COMB_LNA1;
-                                       else
-                                               div_ant_conf->alt_lna_conf =
-                                                       ATH_ANT_DIV_COMB_LNA2;
-                               else
-                                       /* Set alt to A+B or A-B */
-                                       div_ant_conf->alt_lna_conf =
-                                               antcomb->first_quick_scan_conf;
-                       } else if ((antcomb->second_quick_scan_conf ==
-                                  ATH_ANT_DIV_COMB_LNA1) ||
-                                  (antcomb->second_quick_scan_conf ==
-                                  ATH_ANT_DIV_COMB_LNA2)) {
-                               /* Set alt LNA1 or LNA2 */
-                               if (div_ant_conf->main_lna_conf ==
-                                   ATH_ANT_DIV_COMB_LNA2)
-                                       div_ant_conf->alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                               else
-                                       div_ant_conf->alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
-                       } else {
-                               /* Set alt to A+B or A-B */
-                               div_ant_conf->alt_lna_conf =
-                                       antcomb->second_quick_scan_conf;
-                       }
-               } else if (antcomb->first_ratio) {
-                       /* first alt */
-                       if ((antcomb->first_quick_scan_conf ==
-                           ATH_ANT_DIV_COMB_LNA1) ||
-                           (antcomb->first_quick_scan_conf ==
-                           ATH_ANT_DIV_COMB_LNA2))
-                                       /* Set alt LNA1 or LNA2 */
-                               if (div_ant_conf->main_lna_conf ==
-                                   ATH_ANT_DIV_COMB_LNA2)
-                                       div_ant_conf->alt_lna_conf =
-                                                       ATH_ANT_DIV_COMB_LNA1;
-                               else
-                                       div_ant_conf->alt_lna_conf =
-                                                       ATH_ANT_DIV_COMB_LNA2;
-                       else
-                               /* Set alt to A+B or A-B */
-                               div_ant_conf->alt_lna_conf =
-                                               antcomb->first_quick_scan_conf;
-               } else if (antcomb->second_ratio) {
-                               /* second alt */
-                       if ((antcomb->second_quick_scan_conf ==
-                           ATH_ANT_DIV_COMB_LNA1) ||
-                           (antcomb->second_quick_scan_conf ==
-                           ATH_ANT_DIV_COMB_LNA2))
-                               /* Set alt LNA1 or LNA2 */
-                               if (div_ant_conf->main_lna_conf ==
-                                   ATH_ANT_DIV_COMB_LNA2)
-                                       div_ant_conf->alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                               else
-                                       div_ant_conf->alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
-                       else
-                               /* Set alt to A+B or A-B */
-                               div_ant_conf->alt_lna_conf =
-                                               antcomb->second_quick_scan_conf;
-               } else {
-                       /* main is largest */
-                       if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
-                           (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
-                               /* Set alt LNA1 or LNA2 */
-                               if (div_ant_conf->main_lna_conf ==
-                                   ATH_ANT_DIV_COMB_LNA2)
-                                       div_ant_conf->alt_lna_conf =
-                                                       ATH_ANT_DIV_COMB_LNA1;
-                               else
-                                       div_ant_conf->alt_lna_conf =
-                                                       ATH_ANT_DIV_COMB_LNA2;
-                       else
-                               /* Set alt to A+B or A-B */
-                               div_ant_conf->alt_lna_conf = antcomb->main_conf;
-               }
+               ath_ant_set_alt_ratio(antcomb, div_ant_conf);
+
                break;
        default:
                break;
@@ -430,8 +497,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                        ant_conf->fast_div_bias = 0x1;
                        break;
                case 0x10: /* LNA2 A-B */
-                       if (!(antcomb->scan) &&
-                               (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+                       if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
@@ -440,15 +506,13 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                        ant_conf->fast_div_bias = 0x1;
                        break;
                case 0x13: /* LNA2 A+B */
-                       if (!(antcomb->scan) &&
-                               (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+                       if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
                        break;
                case 0x20: /* LNA1 A-B */
-                       if (!(antcomb->scan) &&
-                               (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+                       if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
@@ -457,8 +521,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                        ant_conf->fast_div_bias = 0x1;
                        break;
                case 0x23: /* LNA1 A+B */
-                       if (!(antcomb->scan) &&
-                               (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+                       if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
                                ant_conf->fast_div_bias = 0x1;
                        else
                                ant_conf->fast_div_bias = 0x2;
@@ -475,6 +538,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                default:
                        break;
                }
+
+               if (antcomb->fast_div_bias)
+                       ant_conf->fast_div_bias = antcomb->fast_div_bias;
        } else if (ant_conf->div_group == 3) {
                switch ((ant_conf->main_lna_conf << 4) |
                        ant_conf->alt_lna_conf) {
@@ -540,6 +606,138 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
        }
 }
 
+static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
+                            struct ath_hw_antcomb_conf *conf,
+                            int curr_alt_set, int alt_rssi_avg,
+                            int main_rssi_avg)
+{
+       switch (curr_alt_set) {
+       case ATH_ANT_DIV_COMB_LNA2:
+               antcomb->rssi_lna2 = alt_rssi_avg;
+               antcomb->rssi_lna1 = main_rssi_avg;
+               antcomb->scan = true;
+               /* set to A+B */
+               conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+               break;
+       case ATH_ANT_DIV_COMB_LNA1:
+               antcomb->rssi_lna1 = alt_rssi_avg;
+               antcomb->rssi_lna2 = main_rssi_avg;
+               antcomb->scan = true;
+               /* set to A+B */
+               conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+               break;
+       case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
+               antcomb->rssi_add = alt_rssi_avg;
+               antcomb->scan = true;
+               /* set to A-B */
+               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+               break;
+       case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
+               antcomb->rssi_sub = alt_rssi_avg;
+               antcomb->scan = false;
+               if (antcomb->rssi_lna2 >
+                   (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+                       /* use LNA2 as main LNA */
+                       if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
+                           (antcomb->rssi_add > antcomb->rssi_sub)) {
+                               /* set to A+B */
+                               conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+                       } else if (antcomb->rssi_sub >
+                                  antcomb->rssi_lna1) {
+                               /* set to A-B */
+                               conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+                       } else {
+                               /* set to LNA1 */
+                               conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                       }
+               } else {
+                       /* use LNA1 as main LNA */
+                       if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
+                           (antcomb->rssi_add > antcomb->rssi_sub)) {
+                               /* set to A+B */
+                               conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+                       } else if (antcomb->rssi_sub >
+                                  antcomb->rssi_lna1) {
+                               /* set to A-B */
+                               conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+                       } else {
+                               /* set to LNA2 */
+                               conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                               conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+                       }
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf,
+                              struct ath_ant_comb *antcomb,
+                              int alt_ratio, int alt_rssi_avg,
+                              int main_rssi_avg, int curr_main_set,
+                              int curr_alt_set)
+{
+       bool ret = false;
+
+       if (ath_ant_div_comb_alt_check(div_ant_conf, antcomb, alt_ratio,
+                                      alt_rssi_avg, main_rssi_avg)) {
+               if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
+                       /*
+                        * Switch main and alt LNA.
+                        */
+                       div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+                       div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+               } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
+                       div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+                       div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+               }
+
+               ret = true;
+       } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
+                  (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
+               /*
+                 Set alt to another LNA.
+               */
+               if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
+                       div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+               else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
+                       div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+
+               ret = true;
+       }
+
+       return ret;
+}
+
+static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb)
+{
+       int alt_ratio;
+
+       if (!antcomb->scan || !antcomb->alt_good)
+               return false;
+
+       if (time_after(jiffies, antcomb->scan_start_time +
+                      msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
+               return true;
+
+       if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
+               alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+                            antcomb->total_pkt_count);
+               if (alt_ratio < antcomb->ant_ratio)
+                       return true;
+       }
+
+       return false;
+}
+
 void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
 {
        struct ath_hw_antcomb_conf div_ant_conf;
@@ -549,41 +747,46 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
        int main_rssi = rs->rs_rssi_ctl0;
        int alt_rssi = rs->rs_rssi_ctl1;
        int rx_ant_conf,  main_ant_conf;
-       bool short_scan = false;
+       bool short_scan = false, ret;
 
        rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
                       ATH_ANT_RX_MASK;
        main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
                         ATH_ANT_RX_MASK;
 
+       if (alt_rssi >= antcomb->low_rssi_thresh) {
+               antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO;
+               antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2;
+       } else {
+               antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI;
+               antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI;
+       }
+
        /* Record packet only when both main_rssi and  alt_rssi is positive */
        if (main_rssi > 0 && alt_rssi > 0) {
                antcomb->total_pkt_count++;
                antcomb->main_total_rssi += main_rssi;
                antcomb->alt_total_rssi  += alt_rssi;
+
                if (main_ant_conf == rx_ant_conf)
                        antcomb->main_recv_cnt++;
                else
                        antcomb->alt_recv_cnt++;
        }
 
-       /* Short scan check */
-       if (antcomb->scan && antcomb->alt_good) {
-               if (time_after(jiffies, antcomb->scan_start_time +
-                   msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
-                       short_scan = true;
-               else
-                       if (antcomb->total_pkt_count ==
-                           ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
-                               alt_ratio = ((antcomb->alt_recv_cnt * 100) /
-                                           antcomb->total_pkt_count);
-                               if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
-                                       short_scan = true;
-                       }
+       if (main_ant_conf == rx_ant_conf) {
+               ANT_STAT_INC(ANT_MAIN, recv_cnt);
+               ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
+       } else {
+               ANT_STAT_INC(ANT_ALT, recv_cnt);
+               ANT_LNA_INC(ANT_ALT, rx_ant_conf);
        }
 
+       /* Short scan check */
+       short_scan = ath_ant_short_scan_check(antcomb);
+
        if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
-           rs->rs_moreaggr) && !short_scan)
+            rs->rs_moreaggr) && !short_scan)
                return;
 
        if (antcomb->total_pkt_count) {
@@ -595,15 +798,13 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
                                 antcomb->total_pkt_count);
        }
 
-
        ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
        curr_alt_set = div_ant_conf.alt_lna_conf;
        curr_main_set = div_ant_conf.main_lna_conf;
-
        antcomb->count++;
 
        if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
-               if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
+               if (alt_ratio > antcomb->ant_ratio) {
                        ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
                                                  main_rssi_avg);
                        antcomb->alt_good = true;
@@ -617,153 +818,47 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
        }
 
        if (!antcomb->scan) {
-               if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
-                                       alt_ratio, curr_main_set, curr_alt_set,
-                                       alt_rssi_avg, main_rssi_avg)) {
-                       if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
-                               /* Switch main and alt LNA */
-                               div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
-                               div_ant_conf.alt_lna_conf  =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                       } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
-                               div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                               div_ant_conf.alt_lna_conf  =
-                                               ATH_ANT_DIV_COMB_LNA2;
-                       }
-
-                       goto div_comb_done;
-               } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
-                          (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
-                       /* Set alt to another LNA */
-                       if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
-                               div_ant_conf.alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                       else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
-                               div_ant_conf.alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
-
-                       goto div_comb_done;
-               }
-
-               if ((alt_rssi_avg < (main_rssi_avg +
-                                    div_ant_conf.lna1_lna2_delta)))
+               ret = ath_ant_try_switch(&div_ant_conf, antcomb, alt_ratio,
+                                        alt_rssi_avg, main_rssi_avg,
+                                        curr_main_set, curr_alt_set);
+               if (ret)
                        goto div_comb_done;
        }
 
+       if (!antcomb->scan &&
+           (alt_rssi_avg < (main_rssi_avg + div_ant_conf.lna1_lna2_delta)))
+               goto div_comb_done;
+
        if (!antcomb->scan_not_start) {
-               switch (curr_alt_set) {
-               case ATH_ANT_DIV_COMB_LNA2:
-                       antcomb->rssi_lna2 = alt_rssi_avg;
-                       antcomb->rssi_lna1 = main_rssi_avg;
-                       antcomb->scan = true;
-                       /* set to A+B */
-                       div_ant_conf.main_lna_conf =
-                               ATH_ANT_DIV_COMB_LNA1;
-                       div_ant_conf.alt_lna_conf  =
-                               ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
-                       break;
-               case ATH_ANT_DIV_COMB_LNA1:
-                       antcomb->rssi_lna1 = alt_rssi_avg;
-                       antcomb->rssi_lna2 = main_rssi_avg;
-                       antcomb->scan = true;
-                       /* set to A+B */
-                       div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
-                       div_ant_conf.alt_lna_conf  =
-                               ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
-                       break;
-               case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
-                       antcomb->rssi_add = alt_rssi_avg;
-                       antcomb->scan = true;
-                       /* set to A-B */
-                       div_ant_conf.alt_lna_conf =
-                               ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
-                       break;
-               case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
-                       antcomb->rssi_sub = alt_rssi_avg;
-                       antcomb->scan = false;
-                       if (antcomb->rssi_lna2 >
-                           (antcomb->rssi_lna1 +
-                           ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
-                               /* use LNA2 as main LNA */
-                               if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
-                                   (antcomb->rssi_add > antcomb->rssi_sub)) {
-                                       /* set to A+B */
-                                       div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
-                                       div_ant_conf.alt_lna_conf  =
-                                               ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
-                               } else if (antcomb->rssi_sub >
-                                          antcomb->rssi_lna1) {
-                                       /* set to A-B */
-                                       div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
-                                       div_ant_conf.alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
-                               } else {
-                                       /* set to LNA1 */
-                                       div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
-                                       div_ant_conf.alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                               }
-                       } else {
-                               /* use LNA1 as main LNA */
-                               if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
-                                   (antcomb->rssi_add > antcomb->rssi_sub)) {
-                                       /* set to A+B */
-                                       div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                                       div_ant_conf.alt_lna_conf  =
-                                               ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
-                               } else if (antcomb->rssi_sub >
-                                          antcomb->rssi_lna1) {
-                                       /* set to A-B */
-                                       div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                                       div_ant_conf.alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
-                               } else {
-                                       /* set to LNA2 */
-                                       div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
-                                       div_ant_conf.alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
-                               }
-                       }
-                       break;
-               default:
-                       break;
-               }
+               ath_ant_try_scan(antcomb, &div_ant_conf, curr_alt_set,
+                                alt_rssi_avg, main_rssi_avg);
        } else {
                if (!antcomb->alt_good) {
                        antcomb->scan_not_start = false;
                        /* Set alt to another LNA */
                        if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
                                div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
+                                       ATH_ANT_DIV_COMB_LNA2;
                                div_ant_conf.alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
+                                       ATH_ANT_DIV_COMB_LNA1;
                        } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
                                div_ant_conf.main_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA1;
+                                       ATH_ANT_DIV_COMB_LNA1;
                                div_ant_conf.alt_lna_conf =
-                                               ATH_ANT_DIV_COMB_LNA2;
+                                       ATH_ANT_DIV_COMB_LNA2;
                        }
                        goto div_comb_done;
                }
+               ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
+                                                  main_rssi_avg, alt_rssi_avg,
+                                                  alt_ratio);
+               antcomb->quick_scan_cnt++;
        }
 
-       ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
-                                          main_rssi_avg, alt_rssi_avg,
-                                          alt_ratio);
-
-       antcomb->quick_scan_cnt++;
-
 div_comb_done:
        ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
        ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
+       ath9k_debug_stat_ant(sc, &div_ant_conf, main_rssi_avg, alt_rssi_avg);
 
        antcomb->scan_start_time = jiffies;
        antcomb->total_pkt_count = 0;
@@ -772,26 +867,3 @@ div_comb_done:
        antcomb->main_recv_cnt = 0;
        antcomb->alt_recv_cnt = 0;
 }
-
-void ath_ant_comb_update(struct ath_softc *sc)
-{
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       struct ath_hw_antcomb_conf div_ant_conf;
-       u8 lna_conf;
-
-       ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
-
-       if (sc->ant_rx == 1)
-               lna_conf = ATH_ANT_DIV_COMB_LNA1;
-       else
-               lna_conf = ATH_ANT_DIV_COMB_LNA2;
-
-       div_ant_conf.main_lna_conf = lna_conf;
-       div_ant_conf.alt_lna_conf = lna_conf;
-
-       ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
-
-       if (common->antenna_diversity)
-               ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
-}
index f4003512d8d57e223c59fc10477ad9f99d262ede..1fc1fa955d44fff8ddd60e7f231701f873df5473 100644 (file)
@@ -555,6 +555,69 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
        REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
 }
 
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
+{
+       struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+       u8 antdiv_ctrl1, antdiv_ctrl2;
+       u32 regval;
+
+       if (enable) {
+               antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE;
+               antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE;
+
+               /*
+                * Don't disable BT ant to allow BB to control SWCOM.
+                */
+               btcoex->bt_coex_mode2 &= (~(AR_BT_DISABLE_BT_ANT));
+               REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+               REG_WRITE(ah, AR_PHY_SWITCH_COM, ATH_BT_COEX_ANT_DIV_SWITCH_COM);
+               REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
+       } else {
+               /*
+                * Disable antenna diversity, use LNA1 only.
+                */
+               antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A;
+               antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A;
+
+               /*
+                * Disable BT Ant. to allow concurrent BT and WLAN receive.
+                */
+               btcoex->bt_coex_mode2 |= AR_BT_DISABLE_BT_ANT;
+               REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+               /*
+                * Program SWCOM table to make sure RF switch always parks
+                * at BT side.
+                */
+               REG_WRITE(ah, AR_PHY_SWITCH_COM, 0);
+               REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
+       }
+
+       regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+       regval &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL));
+        /*
+        * Clear ant_fast_div_bias [14:9] since for WB195,
+        * the main LNA is always LNA1.
+        */
+       regval &= (~(AR_PHY_9285_FAST_DIV_BIAS));
+       regval |= SM(antdiv_ctrl1, AR_PHY_9285_ANT_DIV_CTL);
+       regval |= SM(antdiv_ctrl2, AR_PHY_9285_ANT_DIV_ALT_LNACONF);
+       regval |= SM((antdiv_ctrl2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
+       regval |= SM((antdiv_ctrl1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB);
+       regval |= SM((antdiv_ctrl1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB);
+       REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
+
+       regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+       regval &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+       regval |= SM((antdiv_ctrl1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+       REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+}
+
+#endif
+
 static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
                                    struct ath_spec_scan *param)
 {
@@ -634,5 +697,9 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
        ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger;
        ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait;
 
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
+#endif
+
        ar9002_hw_set_nf_limits(ah);
 }
index f9eb2c3571692f9517cf70cd7492ebcb1b526436..6314ae2e93e34d0523b763031230bed4943244b7 100644 (file)
 #define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S    29
 #define AR_PHY_9285_ANT_DIV_MAIN_GAINTB     0x40000000
 #define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S   30
-#define AR_PHY_9285_ANT_DIV_LNA1            2
-#define AR_PHY_9285_ANT_DIV_LNA2            1
-#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2  3
-#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
 #define AR_PHY_9285_ANT_DIV_GAINTB_0        0
 #define AR_PHY_9285_ANT_DIV_GAINTB_1        1
 
+#define ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE  0x0b
+#define ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE  0x09
+#define ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A 0x04
+#define ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A 0x09
+#define ATH_BT_COEX_ANT_DIV_SWITCH_COM      0x66666666
+
 #define AR_PHY_EXT_CCA0             0x99b8
 #define AR_PHY_EXT_CCA0_THRESH62    0x000000FF
 #define AR_PHY_EXT_CCA0_THRESH62_S  0
index d105e43d22e165bc632d9fd41f9edabd04afaa8b..abdc7ee874139b26e42ded779d42da8295d05775 100644 (file)
@@ -3541,13 +3541,12 @@ static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz)
        return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt);
 }
 
-
-static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
+u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
 {
        return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon);
 }
 
-static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
+u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
 {
        return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2);
 }
@@ -3561,6 +3560,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
 
 static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
 {
+       struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_hw_capabilities *pCap = &ah->caps;
        int chain;
        u32 regval, value, gpio;
@@ -3614,6 +3614,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
        }
 
        value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
+       if (AR_SREV_9485(ah) && common->bt_ant_diversity) {
+               regval &= ~AR_SWITCH_TABLE_COM2_ALL;
+               regval |= ah->config.ant_ctrl_comm2g_switch_enable;
+
+       }
        REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
 
        if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
@@ -3645,8 +3650,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                regval &= (~AR_PHY_ANT_DIV_LNADIV);
                regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
 
+               if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+                       regval |= AR_ANT_DIV_ENABLE;
+
                if (AR_SREV_9565(ah)) {
-                       if (ah->shared_chain_lnadiv) {
+                       if (common->bt_ant_diversity) {
                                regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
                        } else {
                                regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
@@ -3656,10 +3664,14 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
 
                REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
 
-               /*enable fast_div */
+               /* enable fast_div */
                regval = REG_READ(ah, AR_PHY_CCK_DETECT);
                regval &= (~AR_FAST_DIV_ENABLE);
                regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+
+               if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+                       regval |= AR_FAST_DIV_ENABLE;
+
                REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
 
                if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
@@ -3673,9 +3685,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                                     AR_PHY_ANT_DIV_ALT_GAINTB |
                                     AR_PHY_ANT_DIV_MAIN_GAINTB));
                        /* by default use LNA1 for the main antenna */
-                       regval |= (AR_PHY_ANT_DIV_LNA1 <<
+                       regval |= (ATH_ANT_DIV_COMB_LNA1 <<
                                   AR_PHY_ANT_DIV_MAIN_LNACONF_S);
-                       regval |= (AR_PHY_ANT_DIV_LNA2 <<
+                       regval |= (ATH_ANT_DIV_COMB_LNA2 <<
                                   AR_PHY_ANT_DIV_ALT_LNACONF_S);
                        REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
                }
index 874f6570bd1cd7c266188dc7abe55295702f66d0..75d4fb41962f312567cd161c900b9d3578007f0d 100644 (file)
@@ -334,6 +334,8 @@ struct ar9300_eeprom {
 
 s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
 s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz);
+u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
 
 u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
 
index 5163abd3937c85e3032b3ecd7719d72451e66f18..f6c5c1b50471ee2f876d24238b6912182c449f13 100644 (file)
@@ -491,6 +491,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
        rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
        rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
 
+       rxs->rs_firstaggr = (rxsp->status11 & AR_RxFirstAggr) ? 1 : 0;
        rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
        rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
        rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
index 1f694ab3cc78ba2ede3f7ca4b192c2e7dded96ab..18a5aa4fe406670fe435f6aeefe4a9aab150b26d 100644 (file)
@@ -632,6 +632,22 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
 
        REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
                    AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+               REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
+                         AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
+
+               if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+                                  AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
+                       ah->enabled_cals |= TX_IQ_CAL;
+               else
+                       ah->enabled_cals &= ~TX_IQ_CAL;
+
+               if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
+                       ah->enabled_cals |= TX_CL_CAL;
+               else
+                       ah->enabled_cals &= ~TX_CL_CAL;
+       }
 }
 
 static void ar9003_hw_prog_ini(struct ath_hw *ah,
@@ -814,29 +830,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
        if (chan->channel == 2484)
                ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
 
-       if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
-               REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
-                         AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
-
        ah->modes_index = modesIndex;
        ar9003_hw_override_ini(ah);
        ar9003_hw_set_channel_regs(ah, chan);
        ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
        ath9k_hw_apply_txpower(ah, chan, false);
 
-       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
-               if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
-                                  AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
-                       ah->enabled_cals |= TX_IQ_CAL;
-               else
-                       ah->enabled_cals &= ~TX_IQ_CAL;
-
-               if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
-                       ah->enabled_cals |= TX_CL_CAL;
-               else
-                       ah->enabled_cals &= ~TX_CL_CAL;
-       }
-
        return 0;
 }
 
@@ -1173,6 +1172,10 @@ skip_ws_det:
                 * is_on == 0 means MRC CCK is OFF (more noise imm)
                 */
                bool is_on = param ? 1 : 0;
+
+               if (ah->caps.rx_chainmask == 1)
+                       break;
+
                REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
                              AR_PHY_MRC_CCK_ENABLE, is_on);
                REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
@@ -1413,65 +1416,111 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
        REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
 }
 
-static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
-                                                 bool enable)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
 {
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
        u8 ant_div_ctl1;
        u32 regval;
 
-       if (!AR_SREV_9565(ah))
+       if (!AR_SREV_9485(ah) && !AR_SREV_9565(ah))
                return;
 
-       ah->shared_chain_lnadiv = enable;
+       if (AR_SREV_9485(ah)) {
+               regval = ar9003_hw_ant_ctrl_common_2_get(ah,
+                                                IS_CHAN_2GHZ(ah->curchan));
+               if (enable) {
+                       regval &= ~AR_SWITCH_TABLE_COM2_ALL;
+                       regval |= ah->config.ant_ctrl_comm2g_switch_enable;
+               }
+               REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2,
+                             AR_SWITCH_TABLE_COM2_ALL, regval);
+       }
+
        ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
 
+       /*
+        * Set MAIN/ALT LNA conf.
+        * Set MAIN/ALT gain_tb.
+        */
        regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
        regval &= (~AR_ANT_DIV_CTRL_ALL);
        regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
-       regval &= ~AR_PHY_ANT_DIV_LNADIV;
-       regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
-
-       if (enable)
-               regval |= AR_ANT_DIV_ENABLE;
-
        REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
 
-       regval = REG_READ(ah, AR_PHY_CCK_DETECT);
-       regval &= ~AR_FAST_DIV_ENABLE;
-       regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
-
-       if (enable)
-               regval |= AR_FAST_DIV_ENABLE;
-
-       REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
-
-       if (enable) {
-               REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
-                           (1 << AR_PHY_ANT_SW_RX_PROT_S));
-               if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
-                       REG_SET_BIT(ah, AR_PHY_RESTART,
-                                   AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
-               REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
-                           AR_BTCOEX_WL_LNADIV_FORCE_ON);
-       } else {
-               REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
-               REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
-                           (1 << AR_PHY_ANT_SW_RX_PROT_S));
-               REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
-               REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
-                           AR_BTCOEX_WL_LNADIV_FORCE_ON);
-
+       if (AR_SREV_9485_11(ah)) {
+               /*
+                * Enable LNA diversity.
+                */
                regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
-               regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
-                       AR_PHY_ANT_DIV_ALT_LNACONF |
-                       AR_PHY_ANT_DIV_MAIN_GAINTB |
-                       AR_PHY_ANT_DIV_ALT_GAINTB);
-               regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
-               regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
+               regval &= ~AR_PHY_ANT_DIV_LNADIV;
+               regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+               if (enable)
+                       regval |= AR_ANT_DIV_ENABLE;
+
                REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+               /*
+                * Enable fast antenna diversity.
+                */
+               regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+               regval &= ~AR_FAST_DIV_ENABLE;
+               regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+               if (enable)
+                       regval |= AR_FAST_DIV_ENABLE;
+
+               REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+
+               if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+                       regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+                       regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+                                    AR_PHY_ANT_DIV_ALT_LNACONF |
+                                    AR_PHY_ANT_DIV_ALT_GAINTB |
+                                    AR_PHY_ANT_DIV_MAIN_GAINTB));
+                       /*
+                        * Set MAIN to LNA1 and ALT to LNA2 at the
+                        * beginning.
+                        */
+                       regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+                                  AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+                       regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+                                  AR_PHY_ANT_DIV_ALT_LNACONF_S);
+                       REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+               }
+       } else if (AR_SREV_9565(ah)) {
+               if (enable) {
+                       REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                                   (1 << AR_PHY_ANT_SW_RX_PROT_S));
+                       if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
+                               REG_SET_BIT(ah, AR_PHY_RESTART,
+                                           AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+                       REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+                                   AR_BTCOEX_WL_LNADIV_FORCE_ON);
+               } else {
+                       REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+                       REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                                   (1 << AR_PHY_ANT_SW_RX_PROT_S));
+                       REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+                       REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+                                   AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
+                       regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+                       regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+                                   AR_PHY_ANT_DIV_ALT_LNACONF |
+                                   AR_PHY_ANT_DIV_MAIN_GAINTB |
+                                   AR_PHY_ANT_DIV_ALT_GAINTB);
+                       regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+                                  AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+                       regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+                                  AR_PHY_ANT_DIV_ALT_LNACONF_S);
+                       REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+               }
        }
 }
 
+#endif
+
 static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
                                      struct ath9k_channel *chan,
                                      u8 *ini_reloaded)
@@ -1518,6 +1567,18 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
 
        REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
 
+       if (AR_SREV_9462_20_OR_LATER(ah)) {
+               /*
+                * CUS217 mix LNA mode.
+                */
+               if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
+                       REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
+                                       1, regWrites);
+                       REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+                                       modesIndex, regWrites);
+               }
+       }
+
        /*
         * For 5GHz channels requiring Fast Clock, apply
         * different modal values.
@@ -1528,7 +1589,11 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
        if (AR_SREV_9565(ah))
                REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
 
-       REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
+       /*
+        * JAPAN regulatory.
+        */
+       if (chan->channel == 2484)
+               ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
 
        ah->modes_index = modesIndex;
        *ini_reloaded = true;
@@ -1631,11 +1696,14 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 
        ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
        ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
-       ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
        ops->spectral_scan_config = ar9003_hw_spectral_scan_config;
        ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger;
        ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait;
 
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
+#endif
+
        ar9003_hw_set_nf_limits(ah);
        ar9003_hw_set_radar_conf(ah);
        memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
index d4d39f305a0b0177fbb7289a360f6d75849a58ea..23c019d0d9aa9f31e47e2ee50e23858130b1add8 100644 (file)
 #define AR_PHY_ANT_DIV_MAIN_GAINTB              0x40000000
 #define AR_PHY_ANT_DIV_MAIN_GAINTB_S            30
 
-#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2          0x0
-#define AR_PHY_ANT_DIV_LNA2                     0x1
-#define AR_PHY_ANT_DIV_LNA1                     0x2
-#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2           0x3
-
 #define AR_PHY_EXTCHN_PWRTHR1   (AR_AGC_BASE + 0x2c)
 #define AR_PHY_EXT_CHN_WIN      (AR_AGC_BASE + 0x30)
 #define AR_PHY_20_40_DET_THR    (AR_AGC_BASE + 0x34)
index c1224b5a257b86e6d9ab3ee8b7a38fa7d72fe7b3..8519e75a2e790f4c6616756536ce407b5ae79076 100644 (file)
@@ -72,17 +72,12 @@ struct ath_config {
 /*************************/
 
 #define ATH_TXBUF_RESET(_bf) do {                              \
-               (_bf)->bf_stale = false;                        \
                (_bf)->bf_lastbf = NULL;                        \
                (_bf)->bf_next = NULL;                          \
                memset(&((_bf)->bf_state), 0,                   \
                       sizeof(struct ath_buf_state));           \
        } while (0)
 
-#define ATH_RXBUF_RESET(_bf) do {              \
-               (_bf)->bf_stale = false;        \
-       } while (0)
-
 /**
  * enum buffer_type - Buffer type flags
  *
@@ -137,7 +132,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 #define ATH_AGGR_ENCRYPTDELIM      10
 /* minimum h/w qdepth to be sustained to maximize aggregation */
 #define ATH_AGGR_MIN_QDEPTH        2
-#define ATH_AMPDU_SUBFRAME_DEFAULT 32
+/* minimum h/w qdepth for non-aggregated traffic */
+#define ATH_NON_AGGR_MIN_QDEPTH    8
 
 #define IEEE80211_SEQ_SEQ_SHIFT    4
 #define IEEE80211_SEQ_MAX          4096
@@ -174,12 +170,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 
 #define ATH_TX_COMPLETE_POLL_INT       1000
 
-enum ATH_AGGR_STATUS {
-       ATH_AGGR_DONE,
-       ATH_AGGR_BAW_CLOSED,
-       ATH_AGGR_LIMITED,
-};
-
 #define ATH_TXFIFO_DEPTH 8
 struct ath_txq {
        int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
@@ -201,10 +191,10 @@ struct ath_txq {
 
 struct ath_atx_ac {
        struct ath_txq *txq;
-       int sched;
        struct list_head list;
        struct list_head tid_q;
        bool clear_ps_filter;
+       bool sched;
 };
 
 struct ath_frame_info {
@@ -212,14 +202,16 @@ struct ath_frame_info {
        int framelen;
        enum ath9k_key_type keytype;
        u8 keyix;
-       u8 retries;
        u8 rtscts_rate;
+       u8 retries : 7;
+       u8 baw_tracked : 1;
 };
 
 struct ath_buf_state {
        u8 bf_type;
        u8 bfs_paprd;
        u8 ndelim;
+       bool stale;
        u16 seqno;
        unsigned long bfs_paprd_timestamp;
 };
@@ -233,7 +225,6 @@ struct ath_buf {
        void *bf_desc;                  /* virtual addr of desc */
        dma_addr_t bf_daddr;            /* physical addr of desc */
        dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
-       bool bf_stale;
        struct ieee80211_tx_rate rates[4];
        struct ath_buf_state bf_state;
 };
@@ -241,16 +232,18 @@ struct ath_buf {
 struct ath_atx_tid {
        struct list_head list;
        struct sk_buff_head buf_q;
+       struct sk_buff_head retry_q;
        struct ath_node *an;
        struct ath_atx_ac *ac;
        unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
-       int bar_index;
        u16 seq_start;
        u16 seq_next;
        u16 baw_size;
-       int tidno;
+       u8 tidno;
        int baw_head;   /* first un-acked tx buffer */
        int baw_tail;   /* next unused tx buffer slot */
+
+       s8 bar_index;
        bool sched;
        bool paused;
        bool active;
@@ -262,16 +255,13 @@ struct ath_node {
        struct ieee80211_vif *vif; /* interface with which we're associated */
        struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
        struct ath_atx_ac ac[IEEE80211_NUM_ACS];
-       int ps_key;
 
        u16 maxampdu;
        u8 mpdudensity;
+       s8 ps_key;
 
        bool sleeping;
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-       struct dentry *node_stat;
-#endif
+       bool no_ps_filter;
 };
 
 struct ath_tx_control {
@@ -317,6 +307,7 @@ struct ath_rx {
        struct ath_descdma rxdma;
        struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
 
+       struct ath_buf *buf_hold;
        struct sk_buff *frag;
 
        u32 ampdu_ref;
@@ -367,6 +358,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
 /********/
 
 struct ath_vif {
+       struct ath_node mcast_node;
        int av_bslot;
        bool primary_sta_vif;
        __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
@@ -428,6 +420,7 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_set_beacon(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc);
 
 /*******************/
 /* Link Monitoring */
@@ -585,19 +578,14 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
 #define ATH_ANT_DIV_COMB_MAX_COUNT 100
 #define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
 #define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
 
 #define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
 
-enum ath9k_ant_div_comb_lna_conf {
-       ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
-       ATH_ANT_DIV_COMB_LNA2,
-       ATH_ANT_DIV_COMB_LNA1,
-       ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
-};
-
 struct ath_ant_comb {
        u16 count;
        u16 total_pkt_count;
@@ -614,27 +602,35 @@ struct ath_ant_comb {
        int rssi_first;
        int rssi_second;
        int rssi_third;
+       int ant_ratio;
+       int ant_ratio2;
        bool alt_good;
        int quick_scan_cnt;
-       int main_conf;
+       enum ath9k_ant_div_comb_lna_conf main_conf;
        enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
        enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
        bool first_ratio;
        bool second_ratio;
        unsigned long scan_start_time;
+
+       /*
+        * Card-specific config values.
+        */
+       int low_rssi_thresh;
+       int fast_div_bias;
 };
 
 void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
-void ath_ant_comb_update(struct ath_softc *sc);
 
 /********************/
 /* Main driver core */
 /********************/
 
-#define ATH9K_PCI_CUS198 0x0001
-#define ATH9K_PCI_CUS230 0x0002
-#define ATH9K_PCI_CUS217 0x0004
-#define ATH9K_PCI_WOW    0x0008
+#define ATH9K_PCI_CUS198     0x0001
+#define ATH9K_PCI_CUS230     0x0002
+#define ATH9K_PCI_CUS217     0x0004
+#define ATH9K_PCI_WOW        0x0008
+#define ATH9K_PCI_BT_ANT_DIV 0x0010
 
 /*
  * Default cache line size, in bytes.
@@ -761,6 +757,7 @@ struct ath_softc {
 #endif
 
        struct ath_descdma txsdma;
+       struct ieee80211_vif *csa_vif;
 
        struct ath_ant_comb ant_comb;
        u8 ant_tx, ant_rx;
index 1a17732bb089ca4b58e7d02880f4817b5f95a093..b5c16b3a37b953133038d0450616d3c5b2be16e3 100644 (file)
@@ -291,6 +291,23 @@ void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
                (unsigned long long)tsfadjust, avp->av_bslot);
 }
 
+bool ath9k_csa_is_finished(struct ath_softc *sc)
+{
+       struct ieee80211_vif *vif;
+
+       vif = sc->csa_vif;
+       if (!vif || !vif->csa_active)
+               return false;
+
+       if (!ieee80211_csa_is_complete(vif))
+               return false;
+
+       ieee80211_csa_finish(vif);
+
+       sc->csa_vif = NULL;
+       return true;
+}
+
 void ath9k_beacon_tasklet(unsigned long data)
 {
        struct ath_softc *sc = (struct ath_softc *)data;
@@ -336,6 +353,10 @@ void ath9k_beacon_tasklet(unsigned long data)
                return;
        }
 
+       /* EDMA devices check that in the tx completion function. */
+       if (!edma && ath9k_csa_is_finished(sc))
+               return;
+
        slot = ath9k_beacon_choose_slot(sc);
        vif = sc->beacon.bslot[slot];
 
index 344fdde1d7a36f39823606c1bf64cda5adb87188..d3063c21e16c7efbd67d073c756de2198e5794e5 100644 (file)
@@ -49,37 +49,40 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
 
-static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
-                                enum nl80211_channel_type channel_type)
+static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
 {
        u32 chanmode = 0;
 
-       switch (chan->band) {
+       switch (chandef->chan->band) {
        case IEEE80211_BAND_2GHZ:
-               switch (channel_type) {
-               case NL80211_CHAN_NO_HT:
-               case NL80211_CHAN_HT20:
+               switch (chandef->width) {
+               case NL80211_CHAN_WIDTH_20_NOHT:
+               case NL80211_CHAN_WIDTH_20:
                        chanmode = CHANNEL_G_HT20;
                        break;
-               case NL80211_CHAN_HT40PLUS:
-                       chanmode = CHANNEL_G_HT40PLUS;
+               case NL80211_CHAN_WIDTH_40:
+                       if (chandef->center_freq1 > chandef->chan->center_freq)
+                               chanmode = CHANNEL_G_HT40PLUS;
+                       else
+                               chanmode = CHANNEL_G_HT40MINUS;
                        break;
-               case NL80211_CHAN_HT40MINUS:
-                       chanmode = CHANNEL_G_HT40MINUS;
+               default:
                        break;
                }
                break;
        case IEEE80211_BAND_5GHZ:
-               switch (channel_type) {
-               case NL80211_CHAN_NO_HT:
-               case NL80211_CHAN_HT20:
+               switch (chandef->width) {
+               case NL80211_CHAN_WIDTH_20_NOHT:
+               case NL80211_CHAN_WIDTH_20:
                        chanmode = CHANNEL_A_HT20;
                        break;
-               case NL80211_CHAN_HT40PLUS:
-                       chanmode = CHANNEL_A_HT40PLUS;
+               case NL80211_CHAN_WIDTH_40:
+                       if (chandef->center_freq1 > chandef->chan->center_freq)
+                               chanmode = CHANNEL_A_HT40PLUS;
+                       else
+                               chanmode = CHANNEL_A_HT40MINUS;
                        break;
-               case NL80211_CHAN_HT40MINUS:
-                       chanmode = CHANNEL_A_HT40MINUS;
+               default:
                        break;
                }
                break;
@@ -94,13 +97,12 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
  * Update internal channel flags.
  */
 void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
-                              struct ieee80211_channel *chan,
-                              enum nl80211_channel_type channel_type)
+                              struct cfg80211_chan_def *chandef)
 {
-       ichan->channel = chan->center_freq;
-       ichan->chan = chan;
+       ichan->channel = chandef->chan->center_freq;
+       ichan->chan = chandef->chan;
 
-       if (chan->band == IEEE80211_BAND_2GHZ) {
+       if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
                ichan->chanmode = CHANNEL_G;
                ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
        } else {
@@ -108,8 +110,22 @@ void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
                ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
        }
 
-       if (channel_type != NL80211_CHAN_NO_HT)
-               ichan->chanmode = ath9k_get_extchanmode(chan, channel_type);
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_5:
+               ichan->channelFlags |= CHANNEL_QUARTER;
+               break;
+       case NL80211_CHAN_WIDTH_10:
+               ichan->channelFlags |= CHANNEL_HALF;
+               break;
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               break;
+       case NL80211_CHAN_WIDTH_20:
+       case NL80211_CHAN_WIDTH_40:
+               ichan->chanmode = ath9k_get_extchanmode(chandef);
+               break;
+       default:
+               WARN_ON(1);
+       }
 }
 EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
 
@@ -125,8 +141,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
 
        chan_idx = curchan->hw_value;
        channel = &ah->channels[chan_idx];
-       ath9k_cmn_update_ichannel(channel, curchan,
-                                 cfg80211_get_chandef_type(&hw->conf.chandef));
+       ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
 
        return channel;
 }
index 207d06995b15a9fd085de9e017f878c9cbd1b96d..e039bcbfbd7923b4f8013f1c4535915a83d4b1fc 100644 (file)
@@ -44,8 +44,7 @@
 
 int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
 void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
-                              struct ieee80211_channel *chan,
-                              enum nl80211_channel_type channel_type);
+                              struct cfg80211_chan_def *chandef);
 struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
                                               struct ath_hw *ah);
 int ath9k_cmn_count_streams(unsigned int chainmask, int max);
index 87454f6c7b4f0b1af790d8cab084e108f44f1d8c..c088744a6bfb6ea924fb97b07fe47671b53f5e74 100644 (file)
@@ -88,90 +88,6 @@ static const struct file_operations fops_debug = {
 
 #define DMA_BUF_LEN 1024
 
-static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
-                            size_t count, loff_t *ppos)
-{
-       struct ath_softc *sc = file->private_data;
-       struct ath_hw *ah = sc->sc_ah;
-       char buf[32];
-       unsigned int len;
-
-       len = sprintf(buf, "0x%08x\n", ah->txchainmask);
-       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
-                            size_t count, loff_t *ppos)
-{
-       struct ath_softc *sc = file->private_data;
-       struct ath_hw *ah = sc->sc_ah;
-       unsigned long mask;
-       char buf[32];
-       ssize_t len;
-
-       len = min(count, sizeof(buf) - 1);
-       if (copy_from_user(buf, user_buf, len))
-               return -EFAULT;
-
-       buf[len] = '\0';
-       if (kstrtoul(buf, 0, &mask))
-               return -EINVAL;
-
-       ah->txchainmask = mask;
-       ah->caps.tx_chainmask = mask;
-       return count;
-}
-
-static const struct file_operations fops_tx_chainmask = {
-       .read = read_file_tx_chainmask,
-       .write = write_file_tx_chainmask,
-       .open = simple_open,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
-
-static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
-                            size_t count, loff_t *ppos)
-{
-       struct ath_softc *sc = file->private_data;
-       struct ath_hw *ah = sc->sc_ah;
-       char buf[32];
-       unsigned int len;
-
-       len = sprintf(buf, "0x%08x\n", ah->rxchainmask);
-       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
-                            size_t count, loff_t *ppos)
-{
-       struct ath_softc *sc = file->private_data;
-       struct ath_hw *ah = sc->sc_ah;
-       unsigned long mask;
-       char buf[32];
-       ssize_t len;
-
-       len = min(count, sizeof(buf) - 1);
-       if (copy_from_user(buf, user_buf, len))
-               return -EFAULT;
-
-       buf[len] = '\0';
-       if (kstrtoul(buf, 0, &mask))
-               return -EINVAL;
-
-       ah->rxchainmask = mask;
-       ah->caps.rx_chainmask = mask;
-       return count;
-}
-
-static const struct file_operations fops_rx_chainmask = {
-       .read = read_file_rx_chainmask,
-       .write = write_file_rx_chainmask,
-       .open = simple_open,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
 
 static ssize_t read_file_ani(struct file *file, char __user *user_buf,
                             size_t count, loff_t *ppos)
@@ -270,25 +186,29 @@ static const struct file_operations fops_ani = {
        .llseek = default_llseek,
 };
 
-static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
-                                      size_t count, loff_t *ppos)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static ssize_t read_file_bt_ant_diversity(struct file *file,
+                                         char __user *user_buf,
+                                         size_t count, loff_t *ppos)
 {
        struct ath_softc *sc = file->private_data;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        char buf[32];
        unsigned int len;
 
-       len = sprintf(buf, "%d\n", common->antenna_diversity);
+       len = sprintf(buf, "%d\n", common->bt_ant_diversity);
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
-static ssize_t write_file_ant_diversity(struct file *file,
-                                       const char __user *user_buf,
-                                       size_t count, loff_t *ppos)
+static ssize_t write_file_bt_ant_diversity(struct file *file,
+                                          const char __user *user_buf,
+                                          size_t count, loff_t *ppos)
 {
        struct ath_softc *sc = file->private_data;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       unsigned long antenna_diversity;
+       struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps;
+       unsigned long bt_ant_diversity;
        char buf[32];
        ssize_t len;
 
@@ -296,26 +216,147 @@ static ssize_t write_file_ant_diversity(struct file *file,
        if (copy_from_user(buf, user_buf, len))
                return -EFAULT;
 
-       if (!AR_SREV_9565(sc->sc_ah))
+       if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
                goto exit;
 
        buf[len] = '\0';
-       if (kstrtoul(buf, 0, &antenna_diversity))
+       if (kstrtoul(buf, 0, &bt_ant_diversity))
                return -EINVAL;
 
-       common->antenna_diversity = !!antenna_diversity;
+       common->bt_ant_diversity = !!bt_ant_diversity;
        ath9k_ps_wakeup(sc);
-       ath_ant_comb_update(sc);
-       ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
-               common->antenna_diversity);
+       ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity);
+       ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n",
+               common->bt_ant_diversity);
        ath9k_ps_restore(sc);
 exit:
        return count;
 }
 
-static const struct file_operations fops_ant_diversity = {
-       .read = read_file_ant_diversity,
-       .write = write_file_ant_diversity,
+static const struct file_operations fops_bt_ant_diversity = {
+       .read = read_file_bt_ant_diversity,
+       .write = write_file_bt_ant_diversity,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+#endif
+
+void ath9k_debug_stat_ant(struct ath_softc *sc,
+                         struct ath_hw_antcomb_conf *div_ant_conf,
+                         int main_rssi_avg, int alt_rssi_avg)
+{
+       struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
+       struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
+
+       as_main->lna_attempt_cnt[div_ant_conf->main_lna_conf]++;
+       as_alt->lna_attempt_cnt[div_ant_conf->alt_lna_conf]++;
+
+       as_main->rssi_avg = main_rssi_avg;
+       as_alt->rssi_avg = alt_rssi_avg;
+}
+
+static ssize_t read_file_antenna_diversity(struct file *file,
+                                          char __user *user_buf,
+                                          size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
+       struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
+       struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
+       struct ath_hw_antcomb_conf div_ant_conf;
+       unsigned int len = 0, size = 1024;
+       ssize_t retval = 0;
+       char *buf;
+       char *lna_conf_str[4] = {"LNA1_MINUS_LNA2",
+                                "LNA2",
+                                "LNA1",
+                                "LNA1_PLUS_LNA2"};
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
+               len += snprintf(buf + len, size - len, "%s\n",
+                               "Antenna Diversity Combining is disabled");
+               goto exit;
+       }
+
+       ath9k_ps_wakeup(sc);
+       ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+       len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
+                       lna_conf_str[div_ant_conf.main_lna_conf]);
+       len += snprintf(buf + len, size - len, "Current ALT config  : %s\n",
+                       lna_conf_str[div_ant_conf.alt_lna_conf]);
+       len += snprintf(buf + len, size - len, "Average MAIN RSSI   : %d\n",
+                       as_main->rssi_avg);
+       len += snprintf(buf + len, size - len, "Average ALT RSSI    : %d\n\n",
+                       as_alt->rssi_avg);
+       ath9k_ps_restore(sc);
+
+       len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+       len += snprintf(buf + len, size - len, "-------------------\n");
+
+       len += snprintf(buf + len, size - len, "%30s%15s\n",
+                       "MAIN", "ALT");
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "TOTAL COUNT",
+                       as_main->recv_cnt,
+                       as_alt->recv_cnt);
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "LNA1",
+                       as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+                       as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "LNA2",
+                       as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+                       as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "LNA1 + LNA2",
+                       as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+                       as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "LNA1 - LNA2",
+                       as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+                       as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+       len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+       len += snprintf(buf + len, size - len, "--------------------\n");
+
+       len += snprintf(buf + len, size - len, "%30s%15s\n",
+                       "MAIN", "ALT");
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "LNA1",
+                       as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+                       as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "LNA2",
+                       as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+                       as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "LNA1 + LNA2",
+                       as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+                       as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                       "LNA1 - LNA2",
+                       as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+                       as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+exit:
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+static const struct file_operations fops_antenna_diversity = {
+       .read = read_file_antenna_diversity,
        .open = simple_open,
        .owner = THIS_MODULE,
        .llseek = default_llseek,
@@ -607,6 +648,28 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
        return retval;
 }
 
+static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
+                          char *buf, ssize_t size)
+{
+       ssize_t len = 0;
+
+       ath_txq_lock(sc, txq);
+
+       len += snprintf(buf + len, size - len, "%s: %d ",
+                       "qnum", txq->axq_qnum);
+       len += snprintf(buf + len, size - len, "%s: %2d ",
+                       "qdepth", txq->axq_depth);
+       len += snprintf(buf + len, size - len, "%s: %2d ",
+                       "ampdu-depth", txq->axq_ampdu_depth);
+       len += snprintf(buf + len, size - len, "%s: %3d ",
+                       "pending", txq->pending_frames);
+       len += snprintf(buf + len, size - len, "%s: %d\n",
+                       "stopped", txq->stopped);
+
+       ath_txq_unlock(sc, txq);
+       return len;
+}
+
 static ssize_t read_file_queues(struct file *file, char __user *user_buf,
                                size_t count, loff_t *ppos)
 {
@@ -624,24 +687,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                txq = sc->tx.txq_map[i];
-               len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
-
-               ath_txq_lock(sc, txq);
-
-               len += snprintf(buf + len, size - len, "%s: %d ",
-                               "qnum", txq->axq_qnum);
-               len += snprintf(buf + len, size - len, "%s: %2d ",
-                               "qdepth", txq->axq_depth);
-               len += snprintf(buf + len, size - len, "%s: %2d ",
-                               "ampdu-depth", txq->axq_ampdu_depth);
-               len += snprintf(buf + len, size - len, "%s: %3d ",
-                               "pending", txq->pending_frames);
-               len += snprintf(buf + len, size - len, "%s: %d\n",
-                               "stopped", txq->stopped);
-
-               ath_txq_unlock(sc, txq);
+               len += snprintf(buf + len, size - len, "(%s):  ", qname[i]);
+               len += print_queue(sc, txq, buf + len, size - len);
        }
 
+       len += snprintf(buf + len, size - len, "(CAB): ");
+       len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
+
        if (len > size)
                len = size;
 
@@ -1589,17 +1641,7 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
                           struct dentry *dir)
 {
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
-       an->node_stat = debugfs_create_file("node_stat", S_IRUGO,
-                                           dir, an, &fops_node_stat);
-}
-
-void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif,
-                             struct ieee80211_sta *sta,
-                             struct dentry *dir)
-{
-       struct ath_node *an = (struct ath_node *)sta->drv_priv;
-       debugfs_remove(an->node_stat);
+       debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat);
 }
 
 /* Ethtool support for get-stats */
@@ -1770,10 +1812,10 @@ int ath9k_init_debug(struct ath_hw *ah)
                            &fops_reset);
        debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
                            &fops_recv);
-       debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
-                           sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
-       debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
-                           sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
+       debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
+                         &ah->rxchainmask);
+       debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
+                         &ah->txchainmask);
        debugfs_create_file("ani", S_IRUSR | S_IWUSR,
                            sc->debug.debugfs_phy, sc, &fops_ani);
        debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -1814,9 +1856,11 @@ int ath9k_init_debug(struct ath_hw *ah)
                           sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
        debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
                           sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
-       debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
-                           sc->debug.debugfs_phy, sc, &fops_ant_diversity);
+       debugfs_create_file("antenna_diversity", S_IRUSR,
+                           sc->debug.debugfs_phy, sc, &fops_antenna_diversity);
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity);
        debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
                            &fops_btcoex);
 #endif
index fc679198a0f38049a385fb53ab25209b23945827..6e1556fa2f3e88713fa834cb56d3a4d9f6d87c48 100644 (file)
@@ -28,9 +28,13 @@ struct fft_sample_tlv;
 #ifdef CONFIG_ATH9K_DEBUGFS
 #define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
 #define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
+#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
+#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
 #else
 #define TX_STAT_INC(q, c) do { } while (0)
 #define RESET_STAT_INC(sc, type) do { } while (0)
+#define ANT_STAT_INC(i, c) do { } while (0)
+#define ANT_LNA_INC(i, c) do { } while (0)
 #endif
 
 enum ath_reset_type {
@@ -243,11 +247,22 @@ struct ath_rx_stats {
        u32 rx_spectral;
 };
 
+#define ANT_MAIN 0
+#define ANT_ALT  1
+
+struct ath_antenna_stats {
+       u32 recv_cnt;
+       u32 rssi_avg;
+       u32 lna_recv_cnt[4];
+       u32 lna_attempt_cnt[4];
+};
+
 struct ath_stats {
        struct ath_interrupt_stats istats;
        struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
        struct ath_rx_stats rxstats;
        struct ath_dfs_stats dfs_stats;
+       struct ath_antenna_stats ant_stats[2];
        u32 reset[__RESET_TYPE_MAX];
 };
 
@@ -277,14 +292,11 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
                           struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta,
                           struct dentry *dir);
-void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif,
-                             struct ieee80211_sta *sta,
-                             struct dentry *dir);
-
 void ath_debug_send_fft_sample(struct ath_softc *sc,
                               struct fft_sample_tlv *fft_sample);
-
+void ath9k_debug_stat_ant(struct ath_softc *sc,
+                         struct ath_hw_antcomb_conf *div_ant_conf,
+                         int main_rssi_avg, int alt_rssi_avg);
 #else
 
 #define RX_STAT_INC(c) /* NOP */
@@ -297,12 +309,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
 static inline void ath9k_deinit_debug(struct ath_softc *sc)
 {
 }
-
 static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
                                            enum ath9k_int status)
 {
 }
-
 static inline void ath_debug_stat_tx(struct ath_softc *sc,
                                     struct ath_buf *bf,
                                     struct ath_tx_status *ts,
@@ -310,10 +320,15 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
                                     unsigned int flags)
 {
 }
-
 static inline void ath_debug_stat_rx(struct ath_softc *sc,
                                     struct ath_rx_status *rs)
 {
+}
+static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
+                                       struct ath_hw_antcomb_conf *div_ant_conf,
+                                       int main_rssi_avg, int alt_rssi_avg)
+{
+
 }
 
 #endif /* CONFIG_ATH9K_DEBUGFS */
index c2bfd748eed81a99a150743985e4d6b1eef45621..9ea8e4b779c97c99b329619616e1ed232a1f5044 100644 (file)
@@ -812,6 +812,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
 static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
                                         struct ath9k_channel *chan)
 {
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct modal_eep_4k_header *pModal;
        struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
        struct base_eep_header_4k *pBase = &eep->baseEepHeader;
@@ -858,6 +859,24 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
 
                REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal);
                regVal = REG_READ(ah, AR_PHY_CCK_DETECT);
+
+               if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+                       /*
+                        * If diversity combining is enabled,
+                        * set MAIN to LNA1 and ALT to LNA2 initially.
+                        */
+                       regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+                       regVal &= (~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
+                                    AR_PHY_9285_ANT_DIV_ALT_LNACONF));
+
+                       regVal |= (ATH_ANT_DIV_COMB_LNA1 <<
+                                  AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S);
+                       regVal |= (ATH_ANT_DIV_COMB_LNA2 <<
+                                  AR_PHY_9285_ANT_DIV_ALT_LNACONF_S);
+                       regVal &= (~(AR_PHY_9285_FAST_DIV_BIAS));
+                       regVal |= (0 << AR_PHY_9285_FAST_DIV_BIAS_S);
+                       REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal);
+               }
        }
 
        if (pModal->version >= 2) {
index 5205a3625e849f3f6d3d775bb7b4b8f866aebca4..6d5d716adc1b85d24782b9e38b14c7ef2b0f504a 100644 (file)
@@ -115,10 +115,10 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
        cmd->skb = skb;
        cmd->hif_dev = hif_dev;
 
-       usb_fill_bulk_urb(urb, hif_dev->udev,
-                        usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
+       usb_fill_int_urb(urb, hif_dev->udev,
+                        usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
                         skb->data, skb->len,
-                        hif_usb_regout_cb, cmd);
+                        hif_usb_regout_cb, cmd, 1);
 
        usb_anchor_urb(urb, &hif_dev->regout_submitted);
        ret = usb_submit_urb(urb, GFP_KERNEL);
@@ -723,11 +723,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
                        return;
                }
 
-               usb_fill_bulk_urb(urb, hif_dev->udev,
-                                usb_rcvbulkpipe(hif_dev->udev,
+               usb_fill_int_urb(urb, hif_dev->udev,
+                                usb_rcvintpipe(hif_dev->udev,
                                                 USB_REG_IN_PIPE),
                                 nskb->data, MAX_REG_IN_BUF_SIZE,
-                                ath9k_hif_usb_reg_in_cb, nskb);
+                                ath9k_hif_usb_reg_in_cb, nskb, 1);
        }
 
 resubmit:
@@ -909,11 +909,11 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
                        goto err_skb;
                }
 
-               usb_fill_bulk_urb(urb, hif_dev->udev,
-                                 usb_rcvbulkpipe(hif_dev->udev,
+               usb_fill_int_urb(urb, hif_dev->udev,
+                                 usb_rcvintpipe(hif_dev->udev,
                                                  USB_REG_IN_PIPE),
                                  skb->data, MAX_REG_IN_BUF_SIZE,
-                                 ath9k_hif_usb_reg_in_cb, skb);
+                                 ath9k_hif_usb_reg_in_cb, skb, 1);
 
                /* Anchor URB */
                usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@@ -1031,9 +1031,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
 
 static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
 {
-       struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
-       struct usb_endpoint_descriptor *endp;
-       int ret, idx;
+       int ret;
 
        ret = ath9k_hif_usb_download_fw(hif_dev);
        if (ret) {
@@ -1043,20 +1041,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
                return ret;
        }
 
-       /* On downloading the firmware to the target, the USB descriptor of EP4
-        * is 'patched' to change the type of the endpoint to Bulk. This will
-        * bring down CPU usage during the scan period.
-        */
-       for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
-               endp = &alt->endpoint[idx].desc;
-               if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-                               == USB_ENDPOINT_XFER_INT) {
-                       endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
-                       endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
-                       endp->bInterval = 0;
-               }
-       }
-
        /* Alloc URBs */
        ret = ath9k_hif_usb_alloc_urbs(hif_dev);
        if (ret) {
@@ -1268,7 +1252,7 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
        if (!buf)
                return;
 
-       ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
+       ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE),
                           buf, 4, NULL, HZ);
        if (ret)
                dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
index 5c1bec18c9e3bcd51b501de76ee83e4e8ca2aa90..d44258172c0f640236719a7e8eb4e8480c9c682b 100644 (file)
@@ -1203,16 +1203,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
 
        if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
                struct ieee80211_channel *curchan = hw->conf.chandef.chan;
-               enum nl80211_channel_type channel_type =
-                       cfg80211_get_chandef_type(&hw->conf.chandef);
                int pos = curchan->hw_value;
 
                ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
                        curchan->center_freq);
 
                ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
-                                         hw->conf.chandef.chan,
-                                         channel_type);
+                                         &hw->conf.chandef);
 
                if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
                        ath_err(common, "Unable to set channel\n");
index 14b701140b49aa3b66c4eb1a280d51500a69f2db..83f4927aeacae1d07a2b18057ea313ad716b0cec 100644 (file)
@@ -78,13 +78,16 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
        ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
 }
 
-static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
-                                                       bool enable)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
 {
-       if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
-               ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
+       if (ath9k_hw_ops(ah)->set_bt_ant_diversity)
+               ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
 }
 
+#endif
+
 /* Private hardware call ops */
 
 /* PHY ops */
index 4ca0cb06010609ca490bfe4404883bde2ac85d66..b3a6891fe3d707d025443e9e347fdb85bf47d4b9 100644 (file)
@@ -1069,7 +1069,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
                if (IS_CHAN_A_FAST_CLOCK(ah, chan))
                    tx_lat += 11;
 
-               sifstime *= 2;
+               sifstime = 32;
                ack_offset = 16;
                slottime = 13;
        } else if (IS_CHAN_QUARTER_RATE(chan)) {
@@ -1079,7 +1079,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
                if (IS_CHAN_A_FAST_CLOCK(ah, chan))
                    tx_lat += 22;
 
-               sifstime *= 4;
+               sifstime = 64;
                ack_offset = 32;
                slottime = 21;
        } else {
@@ -1116,7 +1116,6 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
                ctstimeout += 48 - sifstime - ah->slottime;
        }
 
-
        ath9k_hw_set_sifs_time(ah, sifstime);
        ath9k_hw_setslottime(ah, slottime);
        ath9k_hw_set_ack_timeout(ah, acktimeout);
@@ -1496,16 +1495,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
                                    struct ath9k_channel *chan)
 {
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
+       bool band_switch = false, mode_diff = false;
+       u8 ini_reloaded = 0;
        u32 qnum;
        int r;
-       bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
-       bool band_switch, mode_diff;
-       u8 ini_reloaded;
 
-       band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) !=
-                     (ah->curchan->channelFlags & (CHANNEL_2GHZ |
-                                                   CHANNEL_5GHZ));
-       mode_diff = (chan->chanmode != ah->curchan->chanmode);
+       if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
+               u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
+               u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
+               band_switch = (cur != new);
+               mode_diff = (chan->chanmode != ah->curchan->chanmode);
+       }
 
        for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
                if (ath9k_hw_numtxpending(ah, qnum)) {
@@ -1520,11 +1521,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
                return false;
        }
 
-       if (edma && (band_switch || mode_diff)) {
+       if (band_switch || mode_diff) {
                ath9k_hw_mark_phy_inactive(ah);
                udelay(5);
 
-               ath9k_hw_init_pll(ah, NULL);
+               if (band_switch)
+                       ath9k_hw_init_pll(ah, chan);
 
                if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) {
                        ath_err(common, "Failed to do fast channel change\n");
@@ -1541,22 +1543,21 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
        }
        ath9k_hw_set_clockrate(ah);
        ath9k_hw_apply_txpower(ah, chan, false);
-       ath9k_hw_rfbus_done(ah);
 
        if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
                ath9k_hw_set_delta_slope(ah, chan);
 
        ath9k_hw_spur_mitigate_freq(ah, chan);
 
-       if (edma && (band_switch || mode_diff)) {
-               ah->ah_flags |= AH_FASTCC;
-               if (band_switch || ini_reloaded)
-                       ah->eep_ops->set_board_values(ah, chan);
+       if (band_switch || ini_reloaded)
+               ah->eep_ops->set_board_values(ah, chan);
 
-               ath9k_hw_init_bb(ah, chan);
+       ath9k_hw_init_bb(ah, chan);
+       ath9k_hw_rfbus_done(ah);
 
-               if (band_switch || ini_reloaded)
-                       ath9k_hw_init_cal(ah, chan);
+       if (band_switch || ini_reloaded) {
+               ah->ah_flags |= AH_FASTCC;
+               ath9k_hw_init_cal(ah, chan);
                ah->ah_flags &= ~AH_FASTCC;
        }
 
@@ -1778,16 +1779,11 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
 /*
  * Fast channel change:
  * (Change synthesizer based on channel freq without resetting chip)
- *
- * Don't do FCC when
- *   - Flag is not set
- *   - Chip is just coming out of full sleep
- *   - Channel to be set is same as current channel
- *   - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel)
  */
 static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
 {
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
        int ret;
 
        if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
@@ -1806,9 +1802,21 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
            (CHANNEL_HALF | CHANNEL_QUARTER))
                goto fail;
 
-       if ((chan->channelFlags & CHANNEL_ALL) !=
-           (ah->curchan->channelFlags & CHANNEL_ALL))
-               goto fail;
+       /*
+        * If cross-band fcc is not supoprted, bail out if
+        * either channelFlags or chanmode differ.
+        *
+        * chanmode will be different if the HT operating mode
+        * changes because of CSA.
+        */
+       if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
+               if ((chan->channelFlags & CHANNEL_ALL) !=
+                   (ah->curchan->channelFlags & CHANNEL_ALL))
+                       goto fail;
+
+               if (chan->chanmode != ah->curchan->chanmode)
+                       goto fail;
+       }
 
        if (!ath9k_hw_check_alive(ah))
                goto fail;
@@ -2047,7 +2055,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        ath9k_hw_apply_gpio_override(ah);
 
-       if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
+       if (AR_SREV_9565(ah) && common->bt_ant_diversity)
                REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
 
        return 0;
@@ -2504,7 +2512,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        else
                pCap->rts_aggr_limit = (8 * 1024);
 
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
+#ifdef CONFIG_ATH9K_RFKILL
        ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
        if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
                ah->rfkill_gpio =
@@ -2550,34 +2558,28 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
                pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
 
-       if (AR_SREV_9285(ah))
+       if (AR_SREV_9285(ah)) {
                if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
                        ant_div_ctl1 =
                                ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
-                       if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
+                       if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) {
                                pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
+                               ath_info(common, "Enable LNA combining\n");
+                       }
                }
+       }
+
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
                        pCap->hw_caps |= ATH9K_HW_CAP_APM;
        }
 
-
        if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
                ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
-               /*
-                * enable the diversity-combining algorithm only when
-                * both enable_lna_div and enable_fast_div are set
-                *              Table for Diversity
-                * ant_div_alt_lnaconf          bit 0-1
-                * ant_div_main_lnaconf         bit 2-3
-                * ant_div_alt_gaintb           bit 4
-                * ant_div_main_gaintb          bit 5
-                * enable_ant_div_lnadiv        bit 6
-                * enable_ant_fast_div          bit 7
-                */
-               if ((ant_div_ctl1 >> 0x6) == 0x3)
+               if ((ant_div_ctl1 >> 0x6) == 0x3) {
                        pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
+                       ath_info(common, "Enable LNA combining\n");
+               }
        }
 
        if (ath9k_hw_dfs_tested(ah))
@@ -2610,6 +2612,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
            ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
                        pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
 
+       /*
+        * Fast channel change across bands is available
+        * only for AR9462 and AR9565.
+        */
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+               pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
+
        return 0;
 }
 
index cd74b3afef7db179fc4fcedc23eaa509722da52c..64ff8e61c243b06a3b724a16eb0f29a8a953dce1 100644 (file)
@@ -247,6 +247,8 @@ enum ath9k_hw_caps {
        ATH9K_HW_CAP_DFS                        = BIT(16),
        ATH9K_HW_WOW_DEVICE_CAPABLE             = BIT(17),
        ATH9K_HW_CAP_PAPRD                      = BIT(18),
+       ATH9K_HW_CAP_FCC_BAND_SWITCH            = BIT(19),
+       ATH9K_HW_CAP_BT_ANT_DIV                 = BIT(20),
 };
 
 /*
@@ -310,6 +312,7 @@ struct ath9k_ops_config {
 
        /* Platform specific config */
        u32 xlna_gpio;
+       u32 ant_ctrl_comm2g_switch_enable;
        bool xatten_margin_cfg;
 };
 
@@ -716,11 +719,14 @@ struct ath_hw_ops {
                        struct ath_hw_antcomb_conf *antconf);
        void (*antdiv_comb_conf_set)(struct ath_hw *ah,
                        struct ath_hw_antcomb_conf *antconf);
-       void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
        void (*spectral_scan_config)(struct ath_hw *ah,
                                     struct ath_spec_scan *param);
        void (*spectral_scan_trigger)(struct ath_hw *ah);
        void (*spectral_scan_wait)(struct ath_hw *ah);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
+#endif
 };
 
 struct ath_nf_limits {
@@ -765,7 +771,6 @@ struct ath_hw {
        bool aspm_enabled;
        bool is_monitoring;
        bool need_an_top2_fixup;
-       bool shared_chain_lnadiv;
        u16 tx_trig_level;
 
        u32 nf_regs[6];
index 16f8b201642b71de28ba4920735d0c8a2f5ee8b0..abf1eb5d97ad9103fb06d1f3357ef61c76d6e9ba 100644 (file)
@@ -53,9 +53,9 @@ static int ath9k_btcoex_enable;
 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
 
-static int ath9k_enable_diversity;
-module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
-MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
+static int ath9k_bt_ant_diversity;
+module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
+MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
 
 bool is_ath9k_unloaded;
 /* We use the hw_value as an index into our private channel structure */
@@ -146,14 +146,22 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
        RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
        RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
        RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
-       RATE(60, 0x0b, 0),
-       RATE(90, 0x0f, 0),
-       RATE(120, 0x0a, 0),
-       RATE(180, 0x0e, 0),
-       RATE(240, 0x09, 0),
-       RATE(360, 0x0d, 0),
-       RATE(480, 0x08, 0),
-       RATE(540, 0x0c, 0),
+       RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                       IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                       IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
 };
 
 #ifdef CONFIG_MAC80211_LEDS
@@ -516,6 +524,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
 static void ath9k_init_platform(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
        struct ath_common *common = ath9k_hw_common(ah);
 
        if (common->bus_ops->ath_bus_type != ATH_PCI)
@@ -525,12 +534,21 @@ static void ath9k_init_platform(struct ath_softc *sc)
                               ATH9K_PCI_CUS230)) {
                ah->config.xlna_gpio = 9;
                ah->config.xatten_margin_cfg = true;
+               ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88;
+               sc->ant_comb.low_rssi_thresh = 20;
+               sc->ant_comb.fast_div_bias = 3;
 
                ath_info(common, "Set parameters for %s\n",
                         (sc->driver_data & ATH9K_PCI_CUS198) ?
                         "CUS198" : "CUS230");
-       } else if (sc->driver_data & ATH9K_PCI_CUS217) {
+       }
+
+       if (sc->driver_data & ATH9K_PCI_CUS217)
                ath_info(common, "CUS217 card detected\n");
+
+       if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
+               pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
+               ath_info(common, "Set BT/WLAN RX diversity capability\n");
        }
 }
 
@@ -584,6 +602,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 {
        struct ath9k_platform_data *pdata = sc->dev->platform_data;
        struct ath_hw *ah = NULL;
+       struct ath9k_hw_capabilities *pCap;
        struct ath_common *common;
        int ret = 0, i;
        int csz = 0;
@@ -600,6 +619,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        ah->reg_ops.rmw = ath9k_reg_rmw;
        atomic_set(&ah->intr_ref_cnt, -1);
        sc->sc_ah = ah;
+       pCap = &ah->caps;
 
        sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
 
@@ -631,11 +651,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        ath9k_init_platform(sc);
 
        /*
-        * Enable Antenna diversity only when BTCOEX is disabled
-        * and the user manually requests the feature.
+        * Enable WLAN/BT RX Antenna diversity only when:
+        *
+        * - BTCOEX is disabled.
+        * - the user manually requests the feature.
+        * - the HW cap is set using the platform data.
         */
-       if (!common->btcoex_enabled && ath9k_enable_diversity)
-               common->antenna_diversity = 1;
+       if (!common->btcoex_enabled && ath9k_bt_ant_diversity &&
+           (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
+               common->bt_ant_diversity = 1;
 
        spin_lock_init(&common->cc_lock);
 
@@ -710,13 +734,15 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *chan;
        struct ath_hw *ah = sc->sc_ah;
+       struct cfg80211_chan_def chandef;
        int i;
 
        sband = &sc->sbands[band];
        for (i = 0; i < sband->n_channels; i++) {
                chan = &sband->channels[i];
                ah->curchan = &ah->channels[chan->hw_value];
-               ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
+               cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
+               ath9k_cmn_update_ichannel(ah->curchan, &chandef);
                ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
        }
 }
@@ -834,6 +860,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
        hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
        hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+       hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+       hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
 
 #ifdef CONFIG_PM_SLEEP
        if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
index 2ef05ebffbcf4d7c2167ed46c26aa192060427ed..a3eff0986a3f95c1ea6b523fc71b02a1ac2b4eba 100644 (file)
@@ -583,9 +583,9 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
        rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
        rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
 
+       rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0;
        rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
-       rs->rs_moreaggr =
-               (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
+       rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
        rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
 
        /* directly mapped flags for ieee80211_rx_status */
index b02dfce964b4d82f952aaba959e9fcac5fbfe19c..bfccaceed44ef82f34c7e9df6562975f4322bf5e 100644 (file)
@@ -140,6 +140,7 @@ struct ath_rx_status {
        int8_t rs_rssi_ext1;
        int8_t rs_rssi_ext2;
        u8 rs_isaggr;
+       u8 rs_firstaggr;
        u8 rs_moreaggr;
        u8 rs_num_delims;
        u8 rs_flags;
@@ -569,6 +570,7 @@ struct ar5416_desc {
 #define AR_RxAggr           0x00020000
 #define AR_PostDelimCRCErr  0x00040000
 #define AR_RxStatusRsvd71   0x3ff80000
+#define AR_RxFirstAggr      0x20000000
 #define AR_DecryptBusyErr   0x40000000
 #define AR_KeyMiss          0x80000000
 
index 1737a3e336859013e2b9aeb6d49d61da1b35a688..ac9f18fa072964f3c3f6e867b7f4e2bf01102527 100644 (file)
@@ -238,9 +238,6 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
                ath_restart_work(sc);
        }
 
-       if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
-               ath_ant_comb_update(sc);
-
        ieee80211_wake_queues(sc->hw);
 
        return true;
@@ -966,6 +963,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath_vif *avp = (void *)vif->drv_priv;
+       struct ath_node *an = &avp->mcast_node;
 
        mutex_lock(&sc->mutex);
 
@@ -979,6 +978,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
+       an->sc = sc;
+       an->sta = NULL;
+       an->vif = vif;
+       an->no_ps_filter = true;
+       ath_tx_node_init(sc, an);
+
        mutex_unlock(&sc->mutex);
        return 0;
 }
@@ -1016,6 +1021,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_vif *avp = (void *)vif->drv_priv;
 
        ath_dbg(common, CONFIG, "Detach Interface\n");
 
@@ -1026,10 +1032,15 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
 
+       if (sc->csa_vif == vif)
+               sc->csa_vif = NULL;
+
        ath9k_ps_wakeup(sc);
        ath9k_calculate_summary_state(hw, NULL);
        ath9k_ps_restore(sc);
 
+       ath_tx_node_cleanup(sc, &avp->mcast_node);
+
        mutex_unlock(&sc->mutex);
 }
 
@@ -1193,8 +1204,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 
        if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
                struct ieee80211_channel *curchan = hw->conf.chandef.chan;
-               enum nl80211_channel_type channel_type =
-                       cfg80211_get_chandef_type(&conf->chandef);
                int pos = curchan->hw_value;
                int old_pos = -1;
                unsigned long flags;
@@ -1202,8 +1211,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                if (ah->curchan)
                        old_pos = ah->curchan - &ah->channels[0];
 
-               ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
-                       curchan->center_freq, channel_type);
+               ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+                       curchan->center_freq, hw->conf.chandef.width);
 
                /* update survey stats for the old channel before switching */
                spin_lock_irqsave(&common->cc_lock, flags);
@@ -1211,7 +1220,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                spin_unlock_irqrestore(&common->cc_lock, flags);
 
                ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
-                                         curchan, channel_type);
+                                         &conf->chandef);
 
                /*
                 * If the operating channel changes, change the survey in-use flags
@@ -1374,9 +1383,6 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
        struct ath_softc *sc = hw->priv;
        struct ath_node *an = (struct ath_node *) sta->drv_priv;
 
-       if (!sta->ht_cap.ht_supported)
-               return;
-
        switch (cmd) {
        case STA_NOTIFY_SLEEP:
                an->sleeping = true;
@@ -2094,7 +2100,7 @@ static void ath9k_wow_add_pattern(struct ath_softc *sc,
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath9k_wow_pattern *wow_pattern = NULL;
-       struct cfg80211_wowlan_trig_pkt_pattern *patterns = wowlan->patterns;
+       struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
        int mask_len;
        s8 i = 0;
 
@@ -2315,6 +2321,19 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
        clear_bit(SC_OP_SCANNING, &sc->sc_flags);
 }
 
+static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif,
+                                       struct cfg80211_chan_def *chandef)
+{
+       struct ath_softc *sc = hw->priv;
+
+       /* mac80211 does not support CSA in multi-if cases (yet) */
+       if (WARN_ON(sc->csa_vif))
+               return;
+
+       sc->csa_vif = vif;
+}
+
 struct ieee80211_ops ath9k_ops = {
        .tx                 = ath9k_tx,
        .start              = ath9k_start,
@@ -2359,8 +2378,8 @@ struct ieee80211_ops ath9k_ops = {
 
 #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
        .sta_add_debugfs    = ath9k_sta_add_debugfs,
-       .sta_remove_debugfs = ath9k_sta_remove_debugfs,
 #endif
        .sw_scan_start      = ath9k_sw_scan_start,
        .sw_scan_complete   = ath9k_sw_scan_complete,
+       .channel_switch_beacon     = ath9k_channel_switch_beacon,
 };
index c585c9b359733ff377669ec9ac8f76859e492cda..76e8c359bbf85a7d547c8a00ee5148fef5d71aa1 100644 (file)
@@ -29,6 +29,14 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
        { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI   */
        { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI   */
        { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+
+       /* AR9285 card for Asus */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x002B,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x2C37),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
        { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
        { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
        { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI   */
@@ -40,29 +48,101 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
                         0x0032,
                         PCI_VENDOR_ID_AZWAVE,
                         0x2086),
-         .driver_data = ATH9K_PCI_CUS198 },
+         .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
                         0x0032,
                         PCI_VENDOR_ID_AZWAVE,
                         0x1237),
-         .driver_data = ATH9K_PCI_CUS198 },
+         .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
                         0x0032,
                         PCI_VENDOR_ID_AZWAVE,
                         0x2126),
-         .driver_data = ATH9K_PCI_CUS198 },
+         .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
 
        /* PCI-E CUS230 */
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
                         0x0032,
                         PCI_VENDOR_ID_AZWAVE,
                         0x2152),
-         .driver_data = ATH9K_PCI_CUS230 },
+         .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
                         0x0032,
                         PCI_VENDOR_ID_FOXCONN,
                         0xE075),
-         .driver_data = ATH9K_PCI_CUS230 },
+         .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
+
+       /* WB225 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x3119),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x3122),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        0x185F, /* WNC */
+                        0x3119),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        0x185F, /* WNC */
+                        0x3027),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x4105),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x4106),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x410D),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x410E),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x410F),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0xC706),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0xC680),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0xC708),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_LENOVO,
+                        0x3218),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_LENOVO,
+                        0x3219),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
 
        { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E  AR9485 */
        { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E  AR9580 */
index 8b380305b0fc1acf8c2593acd474daa52f7eb6c9..4a1b99238ec225af3aef646f14021d94df3792c7 100644 (file)
 #define AR_PHY_PLL_CONTROL 0x16180
 #define AR_PHY_PLL_MODE 0x16184
 
+enum ath9k_ant_div_comb_lna_conf {
+       ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
+       ATH_ANT_DIV_COMB_LNA2,
+       ATH_ANT_DIV_COMB_LNA1,
+       ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
+};
+
 #endif
index 7eb1f4b458e4fb1f3c096fd297bb60b273d9c686..d3d7c51fa6c8bd65d7df5f803c99450aff980d95 100644 (file)
@@ -1275,15 +1275,21 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
 }
 
 static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
+                         struct cfg80211_chan_def *chandef,
                           struct ieee80211_sta *sta, void *priv_sta)
 {
        struct ath_softc *sc = priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_rate_priv *ath_rc_priv = priv_sta;
        int i, j = 0;
+       u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
 
        for (i = 0; i < sband->n_bitrates; i++) {
                if (sta->supp_rates[sband->band] & BIT(i)) {
+                       if ((rate_flags & sband->bitrates[i].flags)
+                           != rate_flags)
+                               continue;
+
                        ath_rc_priv->neg_rates.rs_rates[j]
                                = (sband->bitrates[i].bitrate * 2) / 10;
                        j++;
@@ -1313,6 +1319,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
 }
 
 static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
+                           struct cfg80211_chan_def *chandef,
                            struct ieee80211_sta *sta, void *priv_sta,
                            u32 changed)
 {
@@ -1324,8 +1331,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
                ath_rc_init(sc, priv_sta);
 
                ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
-                       "Operating HT Bandwidth changed to: %d\n",
-                       cfg80211_get_chandef_type(&sc->hw->conf.chandef));
+                       "Operating Bandwidth changed to: %d\n",
+                       sc->hw->conf.chandef.width);
        }
 }
 
index 865e043e8aa6408c92e2b512ffcc7ba69c4ed39f..4ee472a5a4e4ee6e81b1c0ffc820b6f685fb7ad1 100644 (file)
@@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
        struct ath_desc *ds;
        struct sk_buff *skb;
 
-       ATH_RXBUF_RESET(bf);
-
        ds = bf->bf_desc;
        ds->ds_link = 0; /* link to null */
        ds->ds_data = bf->bf_buf_addr;
@@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
        sc->rx.rxlink = &ds->ds_link;
 }
 
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+{
+       if (sc->rx.buf_hold)
+               ath_rx_buf_link(sc, sc->rx.buf_hold);
+
+       sc->rx.buf_hold = bf;
+}
+
 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
 {
        /* XXX block beacon interrupts */
@@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
 
        skb = bf->bf_mpdu;
 
-       ATH_RXBUF_RESET(bf);
        memset(skb->data, 0, ah->caps.rx_status_len);
        dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
                                ah->caps.rx_status_len, DMA_TO_DEVICE);
@@ -185,7 +190,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
 
 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
 {
-       skb_queue_head_init(&rx_edma->rx_fifo);
+       __skb_queue_head_init(&rx_edma->rx_fifo);
        rx_edma->rx_fifo_hwsize = size;
 }
 
@@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
        if (list_empty(&sc->rx.rxbuf))
                goto start_recv;
 
+       sc->rx.buf_hold = NULL;
        sc->rx.rxlink = NULL;
        list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
                ath_rx_buf_link(sc, bf);
@@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
        }
 
        bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+       if (bf == sc->rx.buf_hold)
+               return NULL;
+
        ds = bf->bf_desc;
 
        /*
@@ -755,7 +764,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
        bool is_mc, is_valid_tkip, strip_mic, mic_error;
        struct ath_hw *ah = common->ah;
        __le16 fc;
-       u8 rx_status_len = ah->caps.rx_status_len;
 
        fc = hdr->frame_control;
 
@@ -777,25 +785,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
            !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
                rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
-       if (!rx_stats->rs_datalen) {
-               RX_STAT_INC(rx_len_err);
-               return false;
-       }
-
-        /*
-         * rs_status follows rs_datalen so if rs_datalen is too large
-         * we can take a hint that hardware corrupted it, so ignore
-         * those frames.
-         */
-       if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
-               RX_STAT_INC(rx_len_err);
-               return false;
-       }
-
-       /* Only use error bits from the last fragment */
-       if (rx_stats->rs_more)
-               return true;
-
        mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
                !ieee80211_has_morefrags(fc) &&
                !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
@@ -814,8 +803,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
                        rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
                        mic_error = false;
                }
-               if (rx_stats->rs_status & ATH9K_RXERR_PHY)
-                       return false;
 
                if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
                    (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
@@ -865,6 +852,17 @@ static int ath9k_process_rate(struct ath_common *common,
        band = hw->conf.chandef.chan->band;
        sband = hw->wiphy->bands[band];
 
+       switch (hw->conf.chandef.width) {
+       case NL80211_CHAN_WIDTH_5:
+               rxs->flag |= RX_FLAG_5MHZ;
+               break;
+       case NL80211_CHAN_WIDTH_10:
+               rxs->flag |= RX_FLAG_10MHZ;
+               break;
+       default:
+               break;
+       }
+
        if (rx_stats->rs_rate & 0x80) {
                /* HT rate */
                rxs->flag |= RX_FLAG_HT;
@@ -898,129 +896,65 @@ static int ath9k_process_rate(struct ath_common *common,
 
 static void ath9k_process_rssi(struct ath_common *common,
                               struct ieee80211_hw *hw,
-                              struct ieee80211_hdr *hdr,
-                              struct ath_rx_status *rx_stats)
+                              struct ath_rx_status *rx_stats,
+                              struct ieee80211_rx_status *rxs)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = common->ah;
        int last_rssi;
        int rssi = rx_stats->rs_rssi;
 
-       if (!rx_stats->is_mybeacon ||
-           ((ah->opmode != NL80211_IFTYPE_STATION) &&
-            (ah->opmode != NL80211_IFTYPE_ADHOC)))
+       /*
+        * RSSI is not available for subframes in an A-MPDU.
+        */
+       if (rx_stats->rs_moreaggr) {
+               rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
                return;
-
-       if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
-               ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
-
-       last_rssi = sc->last_rssi;
-       if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-               rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
-       if (rssi < 0)
-               rssi = 0;
-
-       /* Update Beacon RSSI, this is used by ANI. */
-       ah->stats.avgbrssi = rssi;
-}
-
-/*
- * For Decrypt or Demic errors, we only mark packet status here and always push
- * up the frame up to let mac80211 handle the actual error case, be it no
- * decryption key or real decryption error. This let us keep statistics there.
- */
-static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
-                                  struct ieee80211_hdr *hdr,
-                                  struct ath_rx_status *rx_stats,
-                                  struct ieee80211_rx_status *rx_status,
-                                  bool *decrypt_error)
-{
-       struct ieee80211_hw *hw = sc->hw;
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       bool discard_current = sc->rx.discard_next;
-
-       sc->rx.discard_next = rx_stats->rs_more;
-       if (discard_current)
-               return -EINVAL;
+       }
 
        /*
-        * everything but the rate is checked here, the rate check is done
-        * separately to avoid doing two lookups for a rate for each frame.
+        * Check if the RSSI for the last subframe in an A-MPDU
+        * or an unaggregated frame is valid.
         */
-       if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
-               return -EINVAL;
-
-       /* Only use status info from the last fragment */
-       if (rx_stats->rs_more)
-               return 0;
+       if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+               rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+               return;
+       }
 
-       if (ath9k_process_rate(common, hw, rx_stats, rx_status))
-               return -EINVAL;
+       /*
+        * Update Beacon RSSI, this is used by ANI.
+        */
+       if (rx_stats->is_mybeacon &&
+           ((ah->opmode == NL80211_IFTYPE_STATION) ||
+            (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+               ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
+               last_rssi = sc->last_rssi;
 
-       ath9k_process_rssi(common, hw, hdr, rx_stats);
+               if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+                       rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+               if (rssi < 0)
+                       rssi = 0;
 
-       rx_status->band = hw->conf.chandef.chan->band;
-       rx_status->freq = hw->conf.chandef.chan->center_freq;
-       rx_status->signal = ah->noise + rx_stats->rs_rssi;
-       rx_status->antenna = rx_stats->rs_antenna;
-       rx_status->flag |= RX_FLAG_MACTIME_END;
-       if (rx_stats->rs_moreaggr)
-               rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+               ah->stats.avgbrssi = rssi;
+       }
 
-       sc->rx.discard_next = false;
-       return 0;
+       rxs->signal = ah->noise + rx_stats->rs_rssi;
 }
 
-static void ath9k_rx_skb_postprocess(struct ath_common *common,
-                                    struct sk_buff *skb,
-                                    struct ath_rx_status *rx_stats,
-                                    struct ieee80211_rx_status *rxs,
-                                    bool decrypt_error)
+static void ath9k_process_tsf(struct ath_rx_status *rs,
+                             struct ieee80211_rx_status *rxs,
+                             u64 tsf)
 {
-       struct ath_hw *ah = common->ah;
-       struct ieee80211_hdr *hdr;
-       int hdrlen, padpos, padsize;
-       u8 keyix;
-       __le16 fc;
-
-       /* see if any padding is done by the hw and remove it */
-       hdr = (struct ieee80211_hdr *) skb->data;
-       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-       fc = hdr->frame_control;
-       padpos = ieee80211_hdrlen(fc);
-
-       /* The MAC header is padded to have 32-bit boundary if the
-        * packet payload is non-zero. The general calculation for
-        * padsize would take into account odd header lengths:
-        * padsize = (4 - padpos % 4) % 4; However, since only
-        * even-length headers are used, padding can only be 0 or 2
-        * bytes and we can optimize this a bit. In addition, we must
-        * not try to remove padding from short control frames that do
-        * not have payload. */
-       padsize = padpos & 3;
-       if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
-               memmove(skb->data + padsize, skb->data, padpos);
-               skb_pull(skb, padsize);
-       }
+       u32 tsf_lower = tsf & 0xffffffff;
 
-       keyix = rx_stats->rs_keyix;
+       rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
+       if (rs->rs_tstamp > tsf_lower &&
+           unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
+               rxs->mactime -= 0x100000000ULL;
 
-       if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
-           ieee80211_has_protected(fc)) {
-               rxs->flag |= RX_FLAG_DECRYPTED;
-       } else if (ieee80211_has_protected(fc)
-                  && !decrypt_error && skb->len >= hdrlen + 4) {
-               keyix = skb->data[hdrlen + 3] >> 6;
-
-               if (test_bit(keyix, common->keymap))
-                       rxs->flag |= RX_FLAG_DECRYPTED;
-       }
-       if (ah->sw_mgmt_crypto &&
-           (rxs->flag & RX_FLAG_DECRYPTED) &&
-           ieee80211_is_mgmt(fc))
-               /* Use software decrypt for management frames. */
-               rxs->flag &= ~RX_FLAG_DECRYPTED;
+       if (rs->rs_tstamp < tsf_lower &&
+           unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
+               rxs->mactime += 0x100000000ULL;
 }
 
 #ifdef CONFIG_ATH9K_DEBUGFS
@@ -1133,6 +1067,234 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
 #endif
 }
 
+static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (ieee80211_is_beacon(hdr->frame_control)) {
+               RX_STAT_INC(rx_beacons);
+               if (!is_zero_ether_addr(common->curbssid) &&
+                   ether_addr_equal(hdr->addr3, common->curbssid))
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * For Decrypt or Demic errors, we only mark packet status here and always push
+ * up the frame up to let mac80211 handle the actual error case, be it no
+ * decryption key or real decryption error. This let us keep statistics there.
+ */
+static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+                                  struct sk_buff *skb,
+                                  struct ath_rx_status *rx_stats,
+                                  struct ieee80211_rx_status *rx_status,
+                                  bool *decrypt_error, u64 tsf)
+{
+       struct ieee80211_hw *hw = sc->hw;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ieee80211_hdr *hdr;
+       bool discard_current = sc->rx.discard_next;
+       int ret = 0;
+
+       /*
+        * Discard corrupt descriptors which are marked in
+        * ath_get_next_rx_buf().
+        */
+       sc->rx.discard_next = rx_stats->rs_more;
+       if (discard_current)
+               return -EINVAL;
+
+       /*
+        * Discard zero-length packets.
+        */
+       if (!rx_stats->rs_datalen) {
+               RX_STAT_INC(rx_len_err);
+               return -EINVAL;
+       }
+
+        /*
+         * rs_status follows rs_datalen so if rs_datalen is too large
+         * we can take a hint that hardware corrupted it, so ignore
+         * those frames.
+         */
+       if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
+               RX_STAT_INC(rx_len_err);
+               return -EINVAL;
+       }
+
+       /* Only use status info from the last fragment */
+       if (rx_stats->rs_more)
+               return 0;
+
+       /*
+        * Return immediately if the RX descriptor has been marked
+        * as corrupt based on the various error bits.
+        *
+        * This is different from the other corrupt descriptor
+        * condition handled above.
+        */
+       if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
+
+       ath9k_process_tsf(rx_stats, rx_status, tsf);
+       ath_debug_stat_rx(sc, rx_stats);
+
+       /*
+        * Process PHY errors and return so that the packet
+        * can be dropped.
+        */
+       if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
+               ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
+               if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
+                       RX_STAT_INC(rx_spectral);
+
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       /*
+        * everything but the rate is checked here, the rate check is done
+        * separately to avoid doing two lookups for a rate for each frame.
+        */
+       if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
+       if (rx_stats->is_mybeacon) {
+               sc->hw_busy_count = 0;
+               ath_start_rx_poll(sc, 3);
+       }
+
+       if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
+               ret =-EINVAL;
+               goto exit;
+       }
+
+       ath9k_process_rssi(common, hw, rx_stats, rx_status);
+
+       rx_status->band = hw->conf.chandef.chan->band;
+       rx_status->freq = hw->conf.chandef.chan->center_freq;
+       rx_status->antenna = rx_stats->rs_antenna;
+       rx_status->flag |= RX_FLAG_MACTIME_END;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+       if (ieee80211_is_data_present(hdr->frame_control) &&
+           !ieee80211_is_qos_nullfunc(hdr->frame_control))
+               sc->rx.num_pkts++;
+#endif
+
+exit:
+       sc->rx.discard_next = false;
+       return ret;
+}
+
+static void ath9k_rx_skb_postprocess(struct ath_common *common,
+                                    struct sk_buff *skb,
+                                    struct ath_rx_status *rx_stats,
+                                    struct ieee80211_rx_status *rxs,
+                                    bool decrypt_error)
+{
+       struct ath_hw *ah = common->ah;
+       struct ieee80211_hdr *hdr;
+       int hdrlen, padpos, padsize;
+       u8 keyix;
+       __le16 fc;
+
+       /* see if any padding is done by the hw and remove it */
+       hdr = (struct ieee80211_hdr *) skb->data;
+       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+       fc = hdr->frame_control;
+       padpos = ieee80211_hdrlen(fc);
+
+       /* The MAC header is padded to have 32-bit boundary if the
+        * packet payload is non-zero. The general calculation for
+        * padsize would take into account odd header lengths:
+        * padsize = (4 - padpos % 4) % 4; However, since only
+        * even-length headers are used, padding can only be 0 or 2
+        * bytes and we can optimize this a bit. In addition, we must
+        * not try to remove padding from short control frames that do
+        * not have payload. */
+       padsize = padpos & 3;
+       if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+               memmove(skb->data + padsize, skb->data, padpos);
+               skb_pull(skb, padsize);
+       }
+
+       keyix = rx_stats->rs_keyix;
+
+       if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+           ieee80211_has_protected(fc)) {
+               rxs->flag |= RX_FLAG_DECRYPTED;
+       } else if (ieee80211_has_protected(fc)
+                  && !decrypt_error && skb->len >= hdrlen + 4) {
+               keyix = skb->data[hdrlen + 3] >> 6;
+
+               if (test_bit(keyix, common->keymap))
+                       rxs->flag |= RX_FLAG_DECRYPTED;
+       }
+       if (ah->sw_mgmt_crypto &&
+           (rxs->flag & RX_FLAG_DECRYPTED) &&
+           ieee80211_is_mgmt(fc))
+               /* Use software decrypt for management frames. */
+               rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+
+/*
+ * Run the LNA combining algorithm only in these cases:
+ *
+ * Standalone WLAN cards with both LNA/Antenna diversity
+ * enabled in the EEPROM.
+ *
+ * WLAN+BT cards which are in the supported card list
+ * in ath_pci_id_table and the user has loaded the
+ * driver with "bt_ant_diversity" set to true.
+ */
+static void ath9k_antenna_check(struct ath_softc *sc,
+                               struct ath_rx_status *rs)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath9k_hw_capabilities *pCap = &ah->caps;
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
+               return;
+
+       /*
+        * All MPDUs in an aggregate will use the same LNA
+        * as the first MPDU.
+        */
+       if (rs->rs_isaggr && !rs->rs_firstaggr)
+               return;
+
+       /*
+        * Change the default rx antenna if rx diversity
+        * chooses the other antenna 3 times in a row.
+        */
+       if (sc->rx.defant != rs->rs_antenna) {
+               if (++sc->rx.rxotherant >= 3)
+                       ath_setdefantenna(sc, rs->rs_antenna);
+       } else {
+               sc->rx.rxotherant = 0;
+       }
+
+       if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
+               if (common->bt_ant_diversity)
+                       ath_ant_comb_scan(sc, rs);
+       } else {
+               ath_ant_comb_scan(sc, rs);
+       }
+}
+
 static void ath9k_apply_ampdu_details(struct ath_softc *sc,
        struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
 {
@@ -1159,15 +1321,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_hw *hw = sc->hw;
-       struct ieee80211_hdr *hdr;
        int retval;
        struct ath_rx_status rs;
        enum ath9k_rx_qtype qtype;
        bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
        int dma_type;
-       u8 rx_status_len = ah->caps.rx_status_len;
        u64 tsf = 0;
-       u32 tsf_lower = 0;
        unsigned long flags;
        dma_addr_t new_buf_addr;
 
@@ -1179,7 +1338,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
        qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
 
        tsf = ath9k_hw_gettsf64(ah);
-       tsf_lower = tsf & 0xffffffff;
 
        do {
                bool decrypt_error = false;
@@ -1206,55 +1364,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                else
                        hdr_skb = skb;
 
-               hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
                rxs = IEEE80211_SKB_RXCB(hdr_skb);
-               if (ieee80211_is_beacon(hdr->frame_control)) {
-                       RX_STAT_INC(rx_beacons);
-                       if (!is_zero_ether_addr(common->curbssid) &&
-                           ether_addr_equal(hdr->addr3, common->curbssid))
-                               rs.is_mybeacon = true;
-                       else
-                               rs.is_mybeacon = false;
-               }
-               else
-                       rs.is_mybeacon = false;
-
-               if (ieee80211_is_data_present(hdr->frame_control) &&
-                   !ieee80211_is_qos_nullfunc(hdr->frame_control))
-                       sc->rx.num_pkts++;
-
-               ath_debug_stat_rx(sc, &rs);
-
                memset(rxs, 0, sizeof(struct ieee80211_rx_status));
 
-               rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
-               if (rs.rs_tstamp > tsf_lower &&
-                   unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
-                       rxs->mactime -= 0x100000000ULL;
-
-               if (rs.rs_tstamp < tsf_lower &&
-                   unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
-                       rxs->mactime += 0x100000000ULL;
-
-               if (rs.rs_phyerr == ATH9K_PHYERR_RADAR)
-                       ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime);
-
-               if (rs.rs_status & ATH9K_RXERR_PHY) {
-                       if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
-                               RX_STAT_INC(rx_spectral);
-                               goto requeue_drop_frag;
-                       }
-               }
-
-               retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs,
-                                                &decrypt_error);
+               retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
+                                                &decrypt_error, tsf);
                if (retval)
                        goto requeue_drop_frag;
 
-               if (rs.is_mybeacon) {
-                       sc->hw_busy_count = 0;
-                       ath_start_rx_poll(sc, 3);
-               }
                /* Ensure we always have an skb to requeue once we are done
                 * processing the current buffer's skb */
                requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1308,8 +1425,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                        sc->rx.frag = skb;
                        goto requeue;
                }
-               if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC)
-                       goto requeue_drop_frag;
 
                if (sc->rx.frag) {
                        int space = skb->len - skb_tailroom(hdr_skb);
@@ -1328,22 +1443,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                        skb = hdr_skb;
                }
 
-
-               if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
-
-                       /*
-                        * change the default rx antenna if rx diversity
-                        * chooses the other antenna 3 times in a row.
-                        */
-                       if (sc->rx.defant != rs.rs_antenna) {
-                               if (++sc->rx.rxotherant >= 3)
-                                       ath_setdefantenna(sc, rs.rs_antenna);
-                       } else {
-                               sc->rx.rxotherant = 0;
-                       }
-
-               }
-
                if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
                        skb_trim(skb, skb->len - 8);
 
@@ -1355,8 +1454,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                        ath_rx_ps(sc, skb, rs.is_mybeacon);
                spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
-               if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
-                       ath_ant_comb_scan(sc, &rs);
+               ath9k_antenna_check(sc, &rs);
 
                ath9k_apply_ampdu_details(sc, &rs, rxs);
 
@@ -1375,7 +1473,7 @@ requeue:
                if (edma) {
                        ath_rx_edma_buf_link(sc, qtype);
                } else {
-                       ath_rx_buf_link(sc, bf);
+                       ath_rx_buf_relink(sc, bf);
                        ath9k_hw_rxena(ah);
                }
        } while (1);
index 9279927326203d02f421233b9456f074cb92e5ec..35b515fe3ffa41e00dc614b6590cc2eabead9de4 100644 (file)
@@ -135,6 +135,9 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
 
 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
 {
+       if (!tid->an->sta)
+               return;
+
        ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
                           seqno << IEEE80211_SEQ_SEQ_SHIFT);
 }
@@ -168,6 +171,71 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
        }
 }
 
+static struct ath_atx_tid *
+ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+       u8 tidno = 0;
+
+       hdr = (struct ieee80211_hdr *) skb->data;
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               tidno = ieee80211_get_qos_ctl(hdr)[0];
+
+       tidno &= IEEE80211_QOS_CTL_TID_MASK;
+       return ATH_AN_2_TID(an, tidno);
+}
+
+static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
+{
+       return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
+}
+
+static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
+{
+       struct sk_buff *skb;
+
+       skb = __skb_dequeue(&tid->retry_q);
+       if (!skb)
+               skb = __skb_dequeue(&tid->buf_q);
+
+       return skb;
+}
+
+/*
+ * ath_tx_tid_change_state:
+ * - clears a-mpdu flag of previous session
+ * - force sequence number allocation to fix next BlockAck Window
+ */
+static void
+ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+       struct ath_txq *txq = tid->ac->txq;
+       struct ieee80211_tx_info *tx_info;
+       struct sk_buff *skb, *tskb;
+       struct ath_buf *bf;
+       struct ath_frame_info *fi;
+
+       skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
+               fi = get_frame_info(skb);
+               bf = fi->bf;
+
+               tx_info = IEEE80211_SKB_CB(skb);
+               tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+               if (bf)
+                       continue;
+
+               bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+               if (!bf) {
+                       __skb_unlink(skb, &tid->buf_q);
+                       ath_txq_skb_done(sc, txq, skb);
+                       ieee80211_free_txskb(sc->hw, skb);
+                       continue;
+               }
+       }
+
+}
+
 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
        struct ath_txq *txq = tid->ac->txq;
@@ -182,28 +250,22 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 
        memset(&ts, 0, sizeof(ts));
 
-       while ((skb = __skb_dequeue(&tid->buf_q))) {
+       while ((skb = __skb_dequeue(&tid->retry_q))) {
                fi = get_frame_info(skb);
                bf = fi->bf;
-
                if (!bf) {
-                       bf = ath_tx_setup_buffer(sc, txq, tid, skb);
-                       if (!bf) {
-                               ath_txq_skb_done(sc, txq, skb);
-                               ieee80211_free_txskb(sc->hw, skb);
-                               continue;
-                       }
+                       ath_txq_skb_done(sc, txq, skb);
+                       ieee80211_free_txskb(sc->hw, skb);
+                       continue;
                }
 
-               if (fi->retries) {
-                       list_add_tail(&bf->list, &bf_head);
+               if (fi->baw_tracked) {
                        ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
                        sendbar = true;
-               } else {
-                       ath_set_rates(tid->an->vif, tid->an->sta, bf);
-                       ath_tx_send_normal(sc, txq, NULL, skb);
                }
+
+               list_add_tail(&bf->list, &bf_head);
+               ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
        }
 
        if (sendbar) {
@@ -232,13 +294,16 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
 }
 
 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
-                            u16 seqno)
+                            struct ath_buf *bf)
 {
+       struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+       u16 seqno = bf->bf_state.seqno;
        int index, cindex;
 
        index  = ATH_BA_INDEX(tid->seq_start, seqno);
        cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
        __set_bit(cindex, tid->tx_buf);
+       fi->baw_tracked = 1;
 
        if (index >= ((tid->baw_tail - tid->baw_head) &
                (ATH_TID_MAX_BUFS - 1))) {
@@ -247,12 +312,6 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
        }
 }
 
-/*
- * TODO: For frame(s) that are in the retry state, we will reuse the
- * sequence number(s) without setting the retry bit. The
- * alternative is to give up on these and BAR the receiver's window
- * forward.
- */
 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
                          struct ath_atx_tid *tid)
 
@@ -266,7 +325,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
        memset(&ts, 0, sizeof(ts));
        INIT_LIST_HEAD(&bf_head);
 
-       while ((skb = __skb_dequeue(&tid->buf_q))) {
+       while ((skb = ath_tid_dequeue(tid))) {
                fi = get_frame_info(skb);
                bf = fi->bf;
 
@@ -276,14 +335,8 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
                }
 
                list_add_tail(&bf->list, &bf_head);
-
-               ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
                ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
        }
-
-       tid->seq_next = tid->seq_start;
-       tid->baw_tail = tid->baw_head;
-       tid->bar_index = -1;
 }
 
 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
@@ -403,7 +456,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        struct ieee80211_tx_rate rates[4];
        struct ath_frame_info *fi;
        int nframes;
-       u8 tidno;
        bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
        int i, retries;
        int bar_index = -1;
@@ -429,7 +481,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                while (bf) {
                        bf_next = bf->bf_next;
 
-                       if (!bf->bf_stale || bf_next != NULL)
+                       if (!bf->bf_state.stale || bf_next != NULL)
                                list_move_tail(&bf->list, &bf_head);
 
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
@@ -440,8 +492,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        }
 
        an = (struct ath_node *)sta->drv_priv;
-       tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
-       tid = ATH_AN_2_TID(an, tidno);
+       tid = ath_get_skb_tid(sc, an, skb);
        seq_first = tid->seq_start;
        isba = ts->ts_flags & ATH9K_TX_BA;
 
@@ -453,7 +504,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
         * Only BlockAcks have a TID and therefore normal Acks cannot be
         * checked
         */
-       if (isba && tidno != ts->tid)
+       if (isba && tid->tidno != ts->tid)
                txok = false;
 
        isaggr = bf_isaggr(bf);
@@ -489,7 +540,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                tx_info = IEEE80211_SKB_CB(skb);
                fi = get_frame_info(skb);
 
-               if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
+               if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
+                   !tid->active) {
                        /*
                         * Outside of the current BlockAck window,
                         * maybe part of a previous session
@@ -522,7 +574,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                 * not a holding desc.
                 */
                INIT_LIST_HEAD(&bf_head);
-               if (bf_next != NULL || !bf_last->bf_stale)
+               if (bf_next != NULL || !bf_last->bf_state.stale)
                        list_move_tail(&bf->list, &bf_head);
 
                if (!txpending) {
@@ -546,7 +598,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                                ieee80211_sta_eosp(sta);
                        }
                        /* retry the un-acked ones */
-                       if (bf->bf_next == NULL && bf_last->bf_stale) {
+                       if (bf->bf_next == NULL && bf_last->bf_state.stale) {
                                struct ath_buf *tbf;
 
                                tbf = ath_clone_txbuf(sc, bf_last);
@@ -583,7 +635,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                if (an->sleeping)
                        ieee80211_sta_set_buffered(sta, tid->tidno, true);
 
-               skb_queue_splice(&bf_pending, &tid->buf_q);
+               skb_queue_splice_tail(&bf_pending, &tid->retry_q);
                if (!an->sleeping) {
                        ath_tx_queue_tid(txq, tid);
 
@@ -641,7 +693,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
        } else
                ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
 
-       if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
+       if (!flush)
                ath_txq_schedule(sc, txq);
 }
 
@@ -815,15 +867,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
 
 static struct ath_buf *
 ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
-                       struct ath_atx_tid *tid)
+                       struct ath_atx_tid *tid, struct sk_buff_head **q)
 {
+       struct ieee80211_tx_info *tx_info;
        struct ath_frame_info *fi;
        struct sk_buff *skb;
        struct ath_buf *bf;
        u16 seqno;
 
        while (1) {
-               skb = skb_peek(&tid->buf_q);
+               *q = &tid->retry_q;
+               if (skb_queue_empty(*q))
+                       *q = &tid->buf_q;
+
+               skb = skb_peek(*q);
                if (!skb)
                        break;
 
@@ -831,14 +888,26 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
                bf = fi->bf;
                if (!fi->bf)
                        bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+               else
+                       bf->bf_state.stale = false;
 
                if (!bf) {
-                       __skb_unlink(skb, &tid->buf_q);
+                       __skb_unlink(skb, *q);
                        ath_txq_skb_done(sc, txq, skb);
                        ieee80211_free_txskb(sc->hw, skb);
                        continue;
                }
 
+               bf->bf_next = NULL;
+               bf->bf_lastbf = bf;
+
+               tx_info = IEEE80211_SKB_CB(skb);
+               tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+               if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+                       bf->bf_state.bf_type = 0;
+                       return bf;
+               }
+
                bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
                seqno = bf->bf_state.seqno;
 
@@ -852,73 +921,52 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
 
                        INIT_LIST_HEAD(&bf_head);
                        list_add(&bf->list, &bf_head);
-                       __skb_unlink(skb, &tid->buf_q);
+                       __skb_unlink(skb, *q);
                        ath_tx_update_baw(sc, tid, seqno);
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
                        continue;
                }
 
-               bf->bf_next = NULL;
-               bf->bf_lastbf = bf;
                return bf;
        }
 
        return NULL;
 }
 
-static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
-                                            struct ath_txq *txq,
-                                            struct ath_atx_tid *tid,
-                                            struct list_head *bf_q,
-                                            int *aggr_len)
+static bool
+ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
+                struct ath_atx_tid *tid, struct list_head *bf_q,
+                struct ath_buf *bf_first, struct sk_buff_head *tid_q,
+                int *aggr_len)
 {
 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
-       struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
-       int rl = 0, nframes = 0, ndelim, prev_al = 0;
+       struct ath_buf *bf = bf_first, *bf_prev = NULL;
+       int nframes = 0, ndelim;
        u16 aggr_limit = 0, al = 0, bpad = 0,
-               al_delta, h_baw = tid->baw_size / 2;
-       enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+           al_delta, h_baw = tid->baw_size / 2;
        struct ieee80211_tx_info *tx_info;
        struct ath_frame_info *fi;
        struct sk_buff *skb;
+       bool closed = false;
 
-       do {
-               bf = ath_tx_get_tid_subframe(sc, txq, tid);
-               if (!bf) {
-                       status = ATH_AGGR_BAW_CLOSED;
-                       break;
-               }
+       bf = bf_first;
+       aggr_limit = ath_lookup_rate(sc, bf, tid);
 
+       do {
                skb = bf->bf_mpdu;
                fi = get_frame_info(skb);
 
-               if (!bf_first)
-                       bf_first = bf;
-
-               if (!rl) {
-                       ath_set_rates(tid->an->vif, tid->an->sta, bf);
-                       aggr_limit = ath_lookup_rate(sc, bf, tid);
-                       rl = 1;
-               }
-
                /* do not exceed aggregation limit */
                al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
+               if (nframes) {
+                       if (aggr_limit < al + bpad + al_delta ||
+                           ath_lookup_legacy(bf) || nframes >= h_baw)
+                               break;
 
-               if (nframes &&
-                   ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
-                    ath_lookup_legacy(bf))) {
-                       status = ATH_AGGR_LIMITED;
-                       break;
-               }
-
-               tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
-               if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
-                       break;
-
-               /* do not exceed subframe limit */
-               if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
-                       status = ATH_AGGR_LIMITED;
-                       break;
+                       tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+                       if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+                           !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
+                               break;
                }
 
                /* add padding for previous frame to aggregation length */
@@ -936,22 +984,37 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                bf->bf_next = NULL;
 
                /* link buffers of this frame to the aggregate */
-               if (!fi->retries)
-                       ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
+               if (!fi->baw_tracked)
+                       ath_tx_addto_baw(sc, tid, bf);
                bf->bf_state.ndelim = ndelim;
 
-               __skb_unlink(skb, &tid->buf_q);
+               __skb_unlink(skb, tid_q);
                list_add_tail(&bf->list, bf_q);
                if (bf_prev)
                        bf_prev->bf_next = bf;
 
                bf_prev = bf;
 
-       } while (!skb_queue_empty(&tid->buf_q));
+               bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+               if (!bf) {
+                       closed = true;
+                       break;
+               }
+       } while (ath_tid_has_buffered(tid));
+
+       bf = bf_first;
+       bf->bf_lastbf = bf_prev;
+
+       if (bf == bf_prev) {
+               al = get_frame_info(bf->bf_mpdu)->framelen;
+               bf->bf_state.bf_type = BUF_AMPDU;
+       } else {
+               TX_STAT_INC(txq->axq_qnum, a_aggr);
+       }
 
        *aggr_len = al;
 
-       return status;
+       return closed;
 #undef PADBYTES
 }
 
@@ -1023,7 +1086,7 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
 }
 
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
-                            struct ath_tx_info *info, int len)
+                            struct ath_tx_info *info, int len, bool rts)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct sk_buff *skb;
@@ -1032,6 +1095,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
        const struct ieee80211_rate *rate;
        struct ieee80211_hdr *hdr;
        struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+       u32 rts_thresh = sc->hw->wiphy->rts_threshold;
        int i;
        u8 rix = 0;
 
@@ -1054,7 +1118,17 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
                rix = rates[i].idx;
                info->rates[i].Tries = rates[i].count;
 
-                   if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+               /*
+                * Handle RTS threshold for unaggregated HT frames.
+                */
+               if (bf_isampdu(bf) && !bf_isaggr(bf) &&
+                   (rates[i].flags & IEEE80211_TX_RC_MCS) &&
+                   unlikely(rts_thresh != (u32) -1)) {
+                       if (!rts_thresh || (len > rts_thresh))
+                               rts = true;
+               }
+
+               if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
                        info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
                        info->flags |= ATH9K_TXDESC_RTSENA;
                } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -1147,6 +1221,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
        struct ath_hw *ah = sc->sc_ah;
        struct ath_buf *bf_first = NULL;
        struct ath_tx_info info;
+       u32 rts_thresh = sc->hw->wiphy->rts_threshold;
+       bool rts = false;
 
        memset(&info, 0, sizeof(info));
        info.is_first = true;
@@ -1183,7 +1259,22 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
                                info.flags |= (u32) bf->bf_state.bfs_paprd <<
                                              ATH9K_TXDESC_PAPRD_S;
 
-                       ath_buf_set_rate(sc, bf, &info, len);
+                       /*
+                        * mac80211 doesn't handle RTS threshold for HT because
+                        * the decision has to be taken based on AMPDU length
+                        * and aggregation is done entirely inside ath9k.
+                        * Set the RTS/CTS flag for the first subframe based
+                        * on the threshold.
+                        */
+                       if (aggr && (bf == bf_first) &&
+                           unlikely(rts_thresh != (u32) -1)) {
+                               /*
+                                * "len" is the size of the entire AMPDU.
+                                */
+                               if (!rts_thresh || (len > rts_thresh))
+                                       rts = true;
+                       }
+                       ath_buf_set_rate(sc, bf, &info, len, rts);
                }
 
                info.buf_addr[0] = bf->bf_buf_addr;
@@ -1212,53 +1303,86 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
        }
 }
 
-static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
-                             struct ath_atx_tid *tid)
+static void
+ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
+                 struct ath_atx_tid *tid, struct list_head *bf_q,
+                 struct ath_buf *bf_first, struct sk_buff_head *tid_q)
 {
-       struct ath_buf *bf;
-       enum ATH_AGGR_STATUS status;
-       struct ieee80211_tx_info *tx_info;
-       struct list_head bf_q;
-       int aggr_len;
+       struct ath_buf *bf = bf_first, *bf_prev = NULL;
+       struct sk_buff *skb;
+       int nframes = 0;
 
        do {
-               if (skb_queue_empty(&tid->buf_q))
-                       return;
+               struct ieee80211_tx_info *tx_info;
+               skb = bf->bf_mpdu;
 
-               INIT_LIST_HEAD(&bf_q);
+               nframes++;
+               __skb_unlink(skb, tid_q);
+               list_add_tail(&bf->list, bf_q);
+               if (bf_prev)
+                       bf_prev->bf_next = bf;
+               bf_prev = bf;
 
-               status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
+               if (nframes >= 2)
+                       break;
 
-               /*
-                * no frames picked up to be aggregated;
-                * block-ack window is not open.
-                */
-               if (list_empty(&bf_q))
+               bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+               if (!bf)
                        break;
 
-               bf = list_first_entry(&bf_q, struct ath_buf, list);
-               bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
                tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+               if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+                       break;
 
-               if (tid->ac->clear_ps_filter) {
-                       tid->ac->clear_ps_filter = false;
-                       tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
-               } else {
-                       tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
-               }
+               ath_set_rates(tid->an->vif, tid->an->sta, bf);
+       } while (1);
+}
 
-               /* if only one frame, send as non-aggregate */
-               if (bf == bf->bf_lastbf) {
-                       aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
-                       bf->bf_state.bf_type = BUF_AMPDU;
-               } else {
-                       TX_STAT_INC(txq->axq_qnum, a_aggr);
-               }
+static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
+                             struct ath_atx_tid *tid, bool *stop)
+{
+       struct ath_buf *bf;
+       struct ieee80211_tx_info *tx_info;
+       struct sk_buff_head *tid_q;
+       struct list_head bf_q;
+       int aggr_len = 0;
+       bool aggr, last = true;
+
+       if (!ath_tid_has_buffered(tid))
+               return false;
+
+       INIT_LIST_HEAD(&bf_q);
+
+       bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+       if (!bf)
+               return false;
+
+       tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+       aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
+       if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
+               (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
+               *stop = true;
+               return false;
+       }
 
-               ath_tx_fill_desc(sc, bf, txq, aggr_len);
-               ath_tx_txqaddbuf(sc, txq, &bf_q, false);
-       } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
-                status != ATH_AGGR_BAW_CLOSED);
+       ath_set_rates(tid->an->vif, tid->an->sta, bf);
+       if (aggr)
+               last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
+                                       tid_q, &aggr_len);
+       else
+               ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
+
+       if (list_empty(&bf_q))
+               return false;
+
+       if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
+               tid->ac->clear_ps_filter = false;
+               tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+       }
+
+       ath_tx_fill_desc(sc, bf, txq, aggr_len);
+       ath_tx_txqaddbuf(sc, txq, &bf_q, false);
+       return true;
 }
 
 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1282,6 +1406,9 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
                an->mpdudensity = density;
        }
 
+       /* force sequence number allocation for pending frames */
+       ath_tx_tid_change_state(sc, txtid);
+
        txtid->active = true;
        txtid->paused = true;
        *ssn = txtid->seq_start = txtid->seq_next;
@@ -1301,8 +1428,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
 
        ath_txq_lock(sc, txq);
        txtid->active = false;
-       txtid->paused = true;
+       txtid->paused = false;
        ath_tx_flush_tid(sc, txtid);
+       ath_tx_tid_change_state(sc, txtid);
        ath_txq_unlock_complete(sc, txq);
 }
 
@@ -1326,7 +1454,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
 
                ath_txq_lock(sc, txq);
 
-               buffered = !skb_queue_empty(&tid->buf_q);
+               buffered = ath_tid_has_buffered(tid);
 
                tid->sched = false;
                list_del(&tid->list);
@@ -1358,7 +1486,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
                ath_txq_lock(sc, txq);
                ac->clear_ps_filter = true;
 
-               if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
+               if (!tid->paused && ath_tid_has_buffered(tid)) {
                        ath_tx_queue_tid(txq, tid);
                        ath_txq_schedule(sc, txq);
                }
@@ -1383,7 +1511,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
        tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
        tid->paused = false;
 
-       if (!skb_queue_empty(&tid->buf_q)) {
+       if (ath_tid_has_buffered(tid)) {
                ath_tx_queue_tid(txq, tid);
                ath_txq_schedule(sc, txq);
        }
@@ -1403,6 +1531,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
        struct ieee80211_tx_info *info;
        struct list_head bf_q;
        struct ath_buf *bf_tail = NULL, *bf;
+       struct sk_buff_head *tid_q;
        int sent = 0;
        int i;
 
@@ -1418,15 +1547,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
                        continue;
 
                ath_txq_lock(sc, tid->ac->txq);
-               while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
-                       bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
+               while (nframes > 0) {
+                       bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
                        if (!bf)
                                break;
 
-                       __skb_unlink(bf->bf_mpdu, &tid->buf_q);
+                       __skb_unlink(bf->bf_mpdu, tid_q);
                        list_add_tail(&bf->list, &bf_q);
                        ath_set_rates(tid->an->vif, tid->an->sta, bf);
-                       ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
+                       ath_tx_addto_baw(sc, tid, bf);
                        bf->bf_state.bf_type &= ~BUF_AGGR;
                        if (bf_tail)
                                bf_tail->bf_next = bf;
@@ -1436,7 +1565,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
                        sent++;
                        TX_STAT_INC(txq->axq_qnum, a_queued_hw);
 
-                       if (skb_queue_empty(&tid->buf_q))
+                       if (an->sta && !ath_tid_has_buffered(tid))
                                ieee80211_sta_set_buffered(an->sta, i, false);
                }
                ath_txq_unlock_complete(sc, tid->ac->txq);
@@ -1595,7 +1724,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
        while (!list_empty(list)) {
                bf = list_first_entry(list, struct ath_buf, list);
 
-               if (bf->bf_stale) {
+               if (bf->bf_state.stale) {
                        list_del(&bf->list);
 
                        ath_tx_return_buffer(sc, bf);
@@ -1689,25 +1818,27 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  */
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
-       struct ath_atx_ac *ac, *ac_tmp, *last_ac;
+       struct ath_atx_ac *ac, *last_ac;
        struct ath_atx_tid *tid, *last_tid;
+       bool sent = false;
 
        if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
-           list_empty(&txq->axq_acq) ||
-           txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+           list_empty(&txq->axq_acq))
                return;
 
        rcu_read_lock();
 
-       ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
        last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
+       while (!list_empty(&txq->axq_acq)) {
+               bool stop = false;
 
-       list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+               ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
                last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
                list_del(&ac->list);
                ac->sched = false;
 
                while (!list_empty(&ac->tid_q)) {
+
                        tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
                                               list);
                        list_del(&tid->list);
@@ -1716,17 +1847,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                        if (tid->paused)
                                continue;
 
-                       ath_tx_sched_aggr(sc, txq, tid);
+                       if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+                               sent = true;
 
                        /*
                         * add tid to round-robin queue if more frames
                         * are pending for the tid
                         */
-                       if (!skb_queue_empty(&tid->buf_q))
+                       if (ath_tid_has_buffered(tid))
                                ath_tx_queue_tid(txq, tid);
 
-                       if (tid == last_tid ||
-                           txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+                       if (stop || tid == last_tid)
                                break;
                }
 
@@ -1735,9 +1866,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                        list_add_tail(&ac->list, &txq->axq_acq);
                }
 
-               if (ac == last_ac ||
-                   txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+               if (stop)
                        break;
+
+               if (ac == last_ac) {
+                       if (!sent)
+                               break;
+
+                       sent = false;
+                       last_ac = list_entry(txq->axq_acq.prev,
+                                            struct ath_atx_ac, list);
+               }
        }
 
        rcu_read_unlock();
@@ -1816,58 +1955,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
        }
 }
 
-static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
-                             struct ath_atx_tid *tid, struct sk_buff *skb,
-                             struct ath_tx_control *txctl)
-{
-       struct ath_frame_info *fi = get_frame_info(skb);
-       struct list_head bf_head;
-       struct ath_buf *bf;
-
-       /*
-        * Do not queue to h/w when any of the following conditions is true:
-        * - there are pending frames in software queue
-        * - the TID is currently paused for ADDBA/BAR request
-        * - seqno is not within block-ack window
-        * - h/w queue depth exceeds low water mark
-        */
-       if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
-            !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
-            txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
-           txq != sc->tx.uapsdq) {
-               /*
-                * Add this frame to software queue for scheduling later
-                * for aggregation.
-                */
-               TX_STAT_INC(txq->axq_qnum, a_queued_sw);
-               __skb_queue_tail(&tid->buf_q, skb);
-               if (!txctl->an || !txctl->an->sleeping)
-                       ath_tx_queue_tid(txq, tid);
-               return;
-       }
-
-       bf = ath_tx_setup_buffer(sc, txq, tid, skb);
-       if (!bf) {
-               ath_txq_skb_done(sc, txq, skb);
-               ieee80211_free_txskb(sc->hw, skb);
-               return;
-       }
-
-       ath_set_rates(tid->an->vif, tid->an->sta, bf);
-       bf->bf_state.bf_type = BUF_AMPDU;
-       INIT_LIST_HEAD(&bf_head);
-       list_add(&bf->list, &bf_head);
-
-       /* Add sub-frame to BAW */
-       ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
-
-       /* Queue to h/w without aggregation */
-       TX_STAT_INC(txq->axq_qnum, a_queued_hw);
-       bf->bf_lastbf = bf;
-       ath_tx_fill_desc(sc, bf, txq, fi->framelen);
-       ath_tx_txqaddbuf(sc, txq, &bf_head, false);
-}
-
 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
                               struct ath_atx_tid *tid, struct sk_buff *skb)
 {
@@ -2010,6 +2097,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = txctl->sta;
        struct ieee80211_vif *vif = info->control.vif;
+       struct ath_vif *avp;
        struct ath_softc *sc = hw->priv;
        int frmlen = skb->len + FCS_LEN;
        int padpos, padsize;
@@ -2017,6 +2105,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
        /* NOTE:  sta can be NULL according to net/mac80211.h */
        if (sta)
                txctl->an = (struct ath_node *)sta->drv_priv;
+       else if (vif && ieee80211_is_data(hdr->frame_control)) {
+               avp = (void *)vif->drv_priv;
+               txctl->an = &avp->mcast_node;
+       }
 
        if (info->control.hw_key)
                frmlen += info->control.hw_key->icv_len;
@@ -2066,7 +2158,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
        struct ath_txq *txq = txctl->txq;
        struct ath_atx_tid *tid = NULL;
        struct ath_buf *bf;
-       u8 tidno;
        int q;
        int ret;
 
@@ -2094,22 +2185,25 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                ath_txq_unlock(sc, txq);
                txq = sc->tx.uapsdq;
                ath_txq_lock(sc, txq);
-       }
-
-       if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
-               tidno = ieee80211_get_qos_ctl(hdr)[0] &
-                       IEEE80211_QOS_CTL_TID_MASK;
-               tid = ATH_AN_2_TID(txctl->an, tidno);
+       } else if (txctl->an &&
+                  ieee80211_is_data_present(hdr->frame_control)) {
+               tid = ath_get_skb_tid(sc, txctl->an, skb);
 
                WARN_ON(tid->ac->txq != txctl->txq);
-       }
 
-       if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
+               if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+                       tid->ac->clear_ps_filter = true;
+
                /*
-                * Try aggregation if it's a unicast data frame
-                * and the destination is HT capable.
+                * Add this frame to software queue for scheduling later
+                * for aggregation.
                 */
-               ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
+               TX_STAT_INC(txq->axq_qnum, a_queued_sw);
+               __skb_queue_tail(&tid->buf_q, skb);
+               if (!txctl->an->sleeping)
+                       ath_tx_queue_tid(txq, tid);
+
+               ath_txq_schedule(sc, txq);
                goto out;
        }
 
@@ -2168,7 +2262,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 
                bf->bf_lastbf = bf;
                ath_set_rates(vif, NULL, bf);
-               ath_buf_set_rate(sc, bf, &info, fi->framelen);
+               ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
                duration += info.rates[0].PktDuration;
                if (bf_tail)
                        bf_tail->bf_next = bf;
@@ -2372,8 +2466,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 
                if (list_empty(&txq->axq_q)) {
                        txq->axq_link = NULL;
-                       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
-                               ath_txq_schedule(sc, txq);
+                       ath_txq_schedule(sc, txq);
                        break;
                }
                bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
@@ -2387,7 +2480,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                 * it with the STALE flag.
                 */
                bf_held = NULL;
-               if (bf->bf_stale) {
+               if (bf->bf_state.stale) {
                        bf_held = bf;
                        if (list_is_last(&bf_held->list, &txq->axq_q))
                                break;
@@ -2411,7 +2504,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                 * however leave the last descriptor back as the holding
                 * descriptor for hw.
                 */
-               lastbf->bf_stale = true;
+               lastbf->bf_state.stale = true;
                INIT_LIST_HEAD(&bf_head);
                if (!list_is_singular(&lastbf->list))
                        list_cut_position(&bf_head,
@@ -2466,6 +2559,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                if (ts.qid == sc->beacon.beaconq) {
                        sc->beacon.tx_processed = true;
                        sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
+
+                       ath9k_csa_is_finished(sc);
                        continue;
                }
 
@@ -2482,7 +2577,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                }
 
                bf = list_first_entry(fifo_list, struct ath_buf, list);
-               if (bf->bf_stale) {
+               if (bf->bf_state.stale) {
                        list_del(&bf->list);
                        ath_tx_return_buffer(sc, bf);
                        bf = list_first_entry(fifo_list, struct ath_buf, list);
@@ -2504,7 +2599,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                                ath_tx_txqaddbuf(sc, txq, &bf_q, true);
                        }
                } else {
-                       lastbf->bf_stale = true;
+                       lastbf->bf_state.stale = true;
                        if (bf != lastbf)
                                list_cut_position(&bf_head, fifo_list,
                                                  lastbf->list.prev);
@@ -2595,6 +2690,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
                tid->paused    = false;
                tid->active        = false;
                __skb_queue_head_init(&tid->buf_q);
+               __skb_queue_head_init(&tid->retry_q);
                acno = TID_TO_WME_AC(tidno);
                tid->ac = &an->ac[acno];
        }
@@ -2602,6 +2698,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
        for (acno = 0, ac = &an->ac[acno];
             acno < IEEE80211_NUM_ACS; acno++, ac++) {
                ac->sched    = false;
+               ac->clear_ps_filter = true;
                ac->txq = sc->tx.txq_map[acno];
                INIT_LIST_HEAD(&ac->tid_q);
        }
index f891d514d88175ba055588f85a9adf7507195c4e..990dd42ae79ed312652a336f46fc1ab4c9ad7ad8 100644 (file)
@@ -11,9 +11,6 @@ wil6210-y += txrx.o
 wil6210-y += debug.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 
-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
-       subdir-ccflags-y += -Werror
-endif
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
 
index ab636767fbde098ce41bd952aa7d950b6932cb28..1caa31992a7e1ecc25b03d32d78aaf12894fcd78 100644 (file)
@@ -51,7 +51,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
                        if ((i % 64) == 0 && (i != 0))
                                seq_printf(s, "\n");
                        seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
-                                       "S" : (vring->ctx[i] ? "H" : "h"));
+                                       "S" : (vring->ctx[i].skb ? "H" : "h"));
                }
                seq_printf(s, "\n");
        }
@@ -406,7 +406,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
                volatile struct vring_tx_desc *d =
                                &(vring->va[dbg_txdesc_index].tx);
                volatile u32 *u = (volatile u32 *)d;
-               struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+               struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
 
                seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
                seq_printf(s, "  MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
index 29dd1e58cb170b20c36ff215ae64412a74e88266..717178f09aa8e0a2f9eb5196eb1c8654d2295cb2 100644 (file)
@@ -127,6 +127,8 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
 
        ndev->netdev_ops = &wil_netdev_ops;
        ndev->ieee80211_ptr = wdev;
+       ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
        SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
        wdev->netdev = ndev;
 
index eff1239be53adc93d1e9dcbe2ac290951e902e12..e59239d22b9493cee2cb1bdc2f2d6066a86e94e2 100644 (file)
@@ -37,36 +37,40 @@ static inline void trace_ ## name(proto) {}
 #endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
 
 DECLARE_EVENT_CLASS(wil6210_wmi,
-       TP_PROTO(u16 id, void *buf, u16 buf_len),
+       TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
 
-       TP_ARGS(id, buf, buf_len),
+       TP_ARGS(wmi, buf, buf_len),
 
        TP_STRUCT__entry(
+               __field(u8, mid)
                __field(u16, id)
+               __field(u32, timestamp)
                __field(u16, buf_len)
                __dynamic_array(u8, buf, buf_len)
        ),
 
        TP_fast_assign(
-               __entry->id = id;
+               __entry->mid = wmi->mid;
+               __entry->id = le16_to_cpu(wmi->id);
+               __entry->timestamp = le32_to_cpu(wmi->timestamp);
                __entry->buf_len = buf_len;
                memcpy(__get_dynamic_array(buf), buf, buf_len);
        ),
 
        TP_printk(
-               "id 0x%04x len %d",
-               __entry->id, __entry->buf_len
+               "MID %d id 0x%04x len %d timestamp %d",
+               __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
        )
 );
 
 DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
-       TP_PROTO(u16 id, void *buf, u16 buf_len),
-       TP_ARGS(id, buf, buf_len)
+       TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+       TP_ARGS(wmi, buf, buf_len)
 );
 
 DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
-       TP_PROTO(u16 id, void *buf, u16 buf_len),
-       TP_ARGS(id, buf, buf_len)
+       TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+       TP_ARGS(wmi, buf, buf_len)
 );
 
 #define WIL6210_MSG_MAX (200)
index d240b24e1ccfafdd7d809c4b45951cbe2747a31a..d505b2676a736381366652e6815891286e6fe319 100644 (file)
@@ -18,6 +18,9 @@
 #include <net/ieee80211_radiotap.h>
 #include <linux/if_arp.h>
 #include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
 
 #include "wil6210.h"
 #include "wmi.h"
@@ -70,7 +73,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
 
        vring->swhead = 0;
        vring->swtail = 0;
-       vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+       vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
        if (!vring->ctx) {
                vring->va = NULL;
                return -ENOMEM;
@@ -108,39 +111,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
 
        while (!wil_vring_is_empty(vring)) {
                dma_addr_t pa;
-               struct sk_buff *skb;
                u16 dmalen;
+               struct wil_ctx *ctx;
 
                if (tx) {
                        struct vring_tx_desc dd, *d = &dd;
                        volatile struct vring_tx_desc *_d =
                                        &vring->va[vring->swtail].tx;
 
+                       ctx = &vring->ctx[vring->swtail];
                        *d = *_d;
                        pa = wil_desc_addr(&d->dma.addr);
                        dmalen = le16_to_cpu(d->dma.length);
-                       skb = vring->ctx[vring->swtail];
-                       if (skb) {
-                               dma_unmap_single(dev, pa, dmalen,
-                                                DMA_TO_DEVICE);
-                               dev_kfree_skb_any(skb);
-                               vring->ctx[vring->swtail] = NULL;
-                       } else {
+                       if (vring->ctx[vring->swtail].mapped_as_page) {
                                dma_unmap_page(dev, pa, dmalen,
                                               DMA_TO_DEVICE);
+                       } else {
+                               dma_unmap_single(dev, pa, dmalen,
+                                                DMA_TO_DEVICE);
                        }
+                       if (ctx->skb)
+                               dev_kfree_skb_any(ctx->skb);
                        vring->swtail = wil_vring_next_tail(vring);
                } else { /* rx */
                        struct vring_rx_desc dd, *d = &dd;
                        volatile struct vring_rx_desc *_d =
-                                       &vring->va[vring->swtail].rx;
+                                       &vring->va[vring->swhead].rx;
 
+                       ctx = &vring->ctx[vring->swhead];
                        *d = *_d;
                        pa = wil_desc_addr(&d->dma.addr);
                        dmalen = le16_to_cpu(d->dma.length);
-                       skb = vring->ctx[vring->swhead];
                        dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
-                       kfree_skb(skb);
+                       kfree_skb(ctx->skb);
                        wil_vring_advance_head(vring, 1);
                }
        }
@@ -187,7 +190,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
        d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
        d->dma.length = cpu_to_le16(sz);
        *_d = *d;
-       vring->ctx[i] = skb;
+       vring->ctx[i].skb = skb;
 
        return 0;
 }
@@ -352,11 +355,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
                return NULL;
        }
 
-       skb = vring->ctx[vring->swhead];
+       skb = vring->ctx[vring->swhead].skb;
        d = wil_skb_rxdesc(skb);
        *d = *_d;
        pa = wil_desc_addr(&d->dma.addr);
-       vring->ctx[vring->swhead] = NULL;
+       vring->ctx[vring->swhead].skb = NULL;
        wil_vring_advance_head(vring, 1);
 
        dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
@@ -407,6 +410,21 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
                return NULL;
        }
 
+       /* L4 IDENT is on when HW calculated checksum, check status
+        * and in case of error drop the packet
+        * higher stack layers will handle retransmission (if required)
+        */
+       if (d->dma.status & RX_DMA_STATUS_L4_IDENT) {
+               /* L4 protocol identified, csum calculated */
+               if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               /* If HW reports bad checksum, let IP stack re-check it
+                * For example, HW don't understand Microsoft IP stack that
+                * mis-calculates TCP checksum - if it should be 0x0,
+                * it writes 0xffff in violation of RFC 1624
+                */
+       }
+
        ds_bits = wil_rxdesc_ds_bits(d);
        if (ds_bits == 1) {
                /*
@@ -646,6 +664,53 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
        return 0;
 }
 
+static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
+                               struct vring_tx_desc *d,
+                               struct sk_buff *skb)
+{
+       int protocol;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       switch (skb->protocol) {
+       case cpu_to_be16(ETH_P_IP):
+               protocol = ip_hdr(skb)->protocol;
+               break;
+       case cpu_to_be16(ETH_P_IPV6):
+               protocol = ipv6_hdr(skb)->nexthdr;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (protocol) {
+       case IPPROTO_TCP:
+               d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+               /* L4 header len: TCP header length */
+               d->dma.d0 |=
+               (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+               break;
+       case IPPROTO_UDP:
+               /* L4 header len: UDP header length */
+               d->dma.d0 |=
+               (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       d->dma.ip_length = skb_network_header_len(skb);
+       d->dma.b11 = ETH_HLEN; /* MAC header length */
+       d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
+       /* Enable TCP/UDP checksum */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+       /* Calculate pseudo-header */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+
+       return 0;
+}
+
 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                        struct sk_buff *skb)
 {
@@ -655,7 +720,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        u32 swhead = vring->swhead;
        int avail = wil_vring_avail_tx(vring);
        int nr_frags = skb_shinfo(skb)->nr_frags;
-       uint f;
+       uint f = 0;
        int vring_index = vring - wil->vring_tx;
        uint i = swhead;
        dma_addr_t pa;
@@ -686,13 +751,20 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                return -EINVAL;
        /* 1-st segment */
        wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
+       /* Process TCP/UDP checksum offloading */
+       if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
+               wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n",
+                       vring_index);
+               goto dma_error;
+       }
+
        d->mac.d[2] |= ((nr_frags + 1) <<
                       MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
        if (nr_frags)
                *_d = *d;
 
        /* middle segments */
-       for (f = 0; f < nr_frags; f++) {
+       for (; f < nr_frags; f++) {
                const struct skb_frag_struct *frag =
                                &skb_shinfo(skb)->frags[f];
                int len = skb_frag_size(frag);
@@ -703,7 +775,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                if (unlikely(dma_mapping_error(dev, pa)))
                        goto dma_error;
                wil_tx_desc_map(d, pa, len, vring_index);
-               vring->ctx[i] = NULL;
+               vring->ctx[i].mapped_as_page = 1;
                *_d = *d;
        }
        /* for the last seg only */
@@ -712,6 +784,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
        *_d = *d;
 
+       /* hold reference to skb
+        * to prevent skb release before accounting
+        * in case of immediate "tx done"
+        */
+       vring->ctx[i].skb = skb_get(skb);
+
        wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
 
@@ -720,29 +798,31 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
        trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
        iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
-       /* hold reference to skb
-        * to prevent skb release before accounting
-        * in case of immediate "tx done"
-        */
-       vring->ctx[i] = skb_get(skb);
 
        return 0;
  dma_error:
        /* unmap what we have mapped */
-       /* Note: increment @f to operate with positive index */
-       for (f++; f > 0; f--) {
+       nr_frags = f + 1; /* frags mapped + one for skb head */
+       for (f = 0; f < nr_frags; f++) {
                u16 dmalen;
+               struct wil_ctx *ctx;
 
                i = (swhead + f) % vring->size;
+               ctx = &vring->ctx[i];
                _d = &(vring->va[i].tx);
                *d = *_d;
                _d->dma.status = TX_DMA_STATUS_DU;
                pa = wil_desc_addr(&d->dma.addr);
                dmalen = le16_to_cpu(d->dma.length);
-               if (vring->ctx[i])
-                       dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
-               else
+               if (ctx->mapped_as_page)
                        dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+
+               if (ctx->skb)
+                       dev_kfree_skb_any(ctx->skb);
+
+               memset(ctx, 0, sizeof(*ctx));
        }
 
        return -EINVAL;
@@ -821,8 +901,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                                              &vring->va[vring->swtail].tx;
                struct vring_tx_desc dd, *d = &dd;
                dma_addr_t pa;
-               struct sk_buff *skb;
                u16 dmalen;
+               struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+               struct sk_buff *skb = ctx->skb;
 
                *d = *_d;
 
@@ -840,7 +921,11 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                                  (const void *)d, sizeof(*d), false);
 
                pa = wil_desc_addr(&d->dma.addr);
-               skb = vring->ctx[vring->swtail];
+               if (ctx->mapped_as_page)
+                       dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+
                if (skb) {
                        if (d->dma.error == 0) {
                                ndev->stats.tx_packets++;
@@ -849,16 +934,15 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                                ndev->stats.tx_errors++;
                        }
 
-                       dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
                        dev_kfree_skb_any(skb);
-                       vring->ctx[vring->swtail] = NULL;
-               } else {
-                       dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
                }
-               d->dma.addr.addr_low = 0;
-               d->dma.addr.addr_high = 0;
-               d->dma.length = 0;
-               d->dma.status = TX_DMA_STATUS_DU;
+               memset(ctx, 0, sizeof(*ctx));
+               /*
+                * There is no need to touch HW descriptor:
+                * - ststus bit TX_DMA_STATUS_DU is set by design,
+                *   so hardware will not try to process this desc.,
+                * - rest of descriptor will be initialized on Tx.
+                */
                vring->swtail = wil_vring_next_tail(vring);
                done++;
        }
index 859aea68a1faa72169f3578a43bd4b1abb607dce..b3828279204c97d4057c7fc8253b5c54d43deed5 100644 (file)
@@ -235,7 +235,16 @@ struct vring_tx_mac {
 
 #define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
 #define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
-#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */
+
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */
 
 
 #define TX_DMA_STATUS_DU         BIT(0)
@@ -334,8 +343,17 @@ struct vring_rx_mac {
 
 #define RX_DMA_D0_CMD_DMA_IT     BIT(10)
 
+/* Error field, offload bits */
+#define RX_DMA_ERROR_L3_ERR   BIT(4)
+#define RX_DMA_ERROR_L4_ERR   BIT(5)
+
+
+/* Status field */
 #define RX_DMA_STATUS_DU         BIT(0)
 #define RX_DMA_STATUS_ERROR      BIT(2)
+
+#define RX_DMA_STATUS_L3_IDENT   BIT(4)
+#define RX_DMA_STATUS_L4_IDENT   BIT(5)
 #define RX_DMA_STATUS_PHY_INFO   BIT(6)
 
 struct vring_rx_dma {
index 44fdab51de7e52b9c9565c318c2b6eac2df3527e..c4a51638736a27ab4a41ffe1e3e5026400e4e249 100644 (file)
@@ -156,11 +156,22 @@ struct wil6210_mbox_hdr {
 /* max. value for wil6210_mbox_hdr.len */
 #define MAX_MBOXITEM_SIZE   (240)
 
+/**
+ * struct wil6210_mbox_hdr_wmi - WMI header
+ *
+ * @mid: MAC ID
+ *     00 - default, created by FW
+ *     01..0f - WiFi ports, driver to create
+ *     10..fe - debug
+ *     ff - broadcast
+ * @id: command/event ID
+ * @timestamp: FW fills for events, free-running msec timer
+ */
 struct wil6210_mbox_hdr_wmi {
-       u8 reserved0[2];
+       u8 mid;
+       u8 reserved;
        __le16 id;
-       __le16 info1; /* bits [0..3] - device_id, rest - unused */
-       u8 reserved1[2];
+       __le32 timestamp;
 } __packed;
 
 struct pending_wmi_event {
@@ -172,6 +183,14 @@ struct pending_wmi_event {
        } __packed event;
 };
 
+/**
+ * struct wil_ctx - software context for Vring descriptor
+ */
+struct wil_ctx {
+       struct sk_buff *skb;
+       u8 mapped_as_page:1;
+};
+
 union vring_desc;
 
 struct vring {
@@ -181,7 +200,7 @@ struct vring {
        u32 swtail;
        u32 swhead;
        u32 hwtail; /* write here to inform hw */
-       void **ctx; /* void *ctx[size] - software context */
+       struct wil_ctx *ctx; /* ctx[size] - software context */
 };
 
 enum { /* for wil6210_priv.status */
index dc8059ad4bab0d6d979d298d22d3bbf913211e85..5220f158b8f5d77bdf9d07db5b9261d1780551d1 100644 (file)
@@ -172,8 +172,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
                        .len = cpu_to_le16(sizeof(cmd.wmi) + len),
                },
                .wmi = {
+                       .mid = 0,
                        .id = cpu_to_le16(cmdid),
-                       .info1 = 0,
                },
        };
        struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -248,7 +248,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
                  offsetof(struct wil6210_mbox_ctl, tx.head));
 
-       trace_wil6210_wmi_cmd(cmdid, buf, len);
+       trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
 
        /* interrupt to FW */
        iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
@@ -640,9 +640,13 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                            hdr.flags);
                if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
                    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
-                       u16 id = le16_to_cpu(evt->event.wmi.id);
-                       wil_dbg_wmi(wil, "WMI event 0x%04x\n", id);
-                       trace_wil6210_wmi_event(id, &evt->event.wmi, len);
+                       struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
+                       u16 id = le16_to_cpu(wmi->id);
+                       u32 tstamp = le32_to_cpu(wmi->timestamp);
+                       wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
+                                   id, wmi->mid, tstamp);
+                       trace_wil6210_wmi_event(wmi, &wmi[1],
+                                               len - sizeof(*wmi));
                }
                wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
                                 &evt->event.hdr, sizeof(hdr) + len, true);
@@ -920,6 +924,12 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
                cmd.sniffer_cfg.phy_support =
                        cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
                                    ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+       } else {
+               /* Initialize offload (in non-sniffer mode).
+                * Linux IP stack always calculates IP checksum
+                * HW always calculate TCP/UDP checksum
+                */
+               cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
        }
        /* typical time for secure PCP is 840ms */
        rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
index e3f3c48f86d4c4f6f0d42acc34fd8b480c3207b6..e13b1a65c65fe7469882b4eee83e6a08f567bce4 100644 (file)
@@ -592,6 +592,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                      uint flags, u8 *buf, uint nbytes)
 {
        struct sk_buff *mypkt;
+       struct sk_buff_head pktq;
        int err;
 
        mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -602,7 +603,10 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        }
 
        memcpy(mypkt->data, buf, nbytes);
-       err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+       __skb_queue_head_init(&pktq);
+       __skb_queue_tail(&pktq, mypkt);
+       err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
+       __skb_dequeue_tail(&pktq);
 
        brcmu_pkt_buf_free_skb(mypkt);
        return err;
@@ -611,22 +615,18 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
 
 int
 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, struct sk_buff *pkt)
+                     uint flags, struct sk_buff_head *pktq)
 {
        uint width;
        int err = 0;
-       struct sk_buff_head pkt_list;
 
        brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
-                 fn, addr, pkt->len);
+                 fn, addr, pktq->qlen);
 
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
        brcmf_sdio_addrprep(sdiodev, width, &addr);
 
-       skb_queue_head_init(&pkt_list);
-       skb_queue_tail(&pkt_list, pkt);
-       err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
-       skb_dequeue_tail(&pkt_list);
+       err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
 
        return err;
 }
index 289e386f01f66facd7f7e62f4f618f55d8afeaf6..64f4a2bc8ddedf6c131d8b8d70408acb3ba17d9b 100644 (file)
@@ -350,7 +350,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 
        sdiodev->bus_if = bus_if;
        bus_if->bus_priv.sdio = sdiodev;
-       bus_if->align = BRCMF_SDALIGN;
        dev_set_drvdata(&func->dev, bus_if);
        dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
        sdiodev->dev = &sdiodev->func[1]->dev;
index 86cbfe2c7c6cbbf404827a6cfff726b12499cf3f..2eb9e642c9bf80d604bea8d201d86a0bb5c1a524 100644 (file)
 #define BRCMF_E_IF_DEL                         2
 #define BRCMF_E_IF_CHANGE                      3
 
+#define BRCMF_E_IF_FLAG_NOIF                   1
+
 #define BRCMF_E_IF_ROLE_STA                    0
 #define BRCMF_E_IF_ROLE_AP                     1
 #define BRCMF_E_IF_ROLE_WDS                    2
 #define BRCMF_DCMD_MEDLEN      1536
 #define BRCMF_DCMD_MAXLEN      8192
 
+#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS                256
+
 /* Pattern matching filter. Specifies an offset within received packets to
  * start matching, the pattern to match, the size of the pattern, and a bitmask
  * that indicates which bits within the pattern should be matched.
@@ -505,6 +509,25 @@ struct brcmf_dcmd {
        uint needed;            /* bytes needed (optional) */
 };
 
+/**
+ * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
+ *
+ * @pktslots: dynamic allocated array for ordering AMPDU packets.
+ * @flow_id: AMPDU flow identifier.
+ * @cur_idx: last AMPDU index from firmware.
+ * @exp_idx: expected next AMPDU index.
+ * @max_idx: maximum amount of packets per AMPDU.
+ * @pend_pkts: number of packets currently in @pktslots.
+ */
+struct brcmf_ampdu_rx_reorder {
+       struct sk_buff **pktslots;
+       u8 flow_id;
+       u8 cur_idx;
+       u8 exp_idx;
+       u8 max_idx;
+       u8 pend_pkts;
+};
+
 /* Forward decls for struct brcmf_pub (see below) */
 struct brcmf_proto;    /* device communication protocol info */
 struct brcmf_cfg80211_dev; /* cfg80211 device info */
@@ -536,9 +559,10 @@ struct brcmf_pub {
 
        struct brcmf_fweh_info fweh;
 
-       bool fw_signals;
        struct brcmf_fws_info *fws;
-       spinlock_t fws_spinlock;
+
+       struct brcmf_ampdu_rx_reorder
+               *reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
 #ifdef DEBUG
        struct dentry *dbgfs_dir;
 #endif
@@ -604,6 +628,9 @@ struct brcmf_if {
        wait_queue_head_t pend_8021x_wait;
 };
 
+struct brcmf_skb_reorder_data {
+       u8 *reorder;
+};
 
 extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
index 080395f49fa5922400419d821e1fda4fcc1b460e..f7c1985844e44c3ed0464a4e21588d538f693959 100644 (file)
@@ -36,7 +36,11 @@ struct brcmf_bus_dcmd {
  *
  * @init: prepare for communication with dongle.
  * @stop: clear pending frames, disable data flow.
- * @txdata: send a data frame to the dongle (callee disposes skb).
+ * @txdata: send a data frame to the dongle. When the data
+ *     has been transferred, the common driver must be
+ *     notified using brcmf_txcomplete(). The common
+ *     driver calls this function with interrupts
+ *     disabled.
  * @txctl: transmit a control request message to dongle.
  * @rxctl: receive a control response message from dongle.
  * @gettxq: obtain a reference of bus transmit queue (optional).
@@ -65,7 +69,6 @@ struct brcmf_bus_ops {
  * @maxctl: maximum size for rxctl request message.
  * @tx_realloc: number of tx packets realloced for headroom.
  * @dstats: dongle-based statistical data.
- * @align: alignment requirement for the bus.
  * @dcmd_list: bus/device specific dongle initialization commands.
  * @chip: device identifier of the dongle chip.
  * @chiprev: revision of the dongle chip.
@@ -80,7 +83,6 @@ struct brcmf_bus {
        enum brcmf_bus_state state;
        uint maxctl;
        unsigned long tx_realloc;
-       u8 align;
        u32 chip;
        u32 chiprev;
        struct list_head dcmd_list;
index 80099016d21f4a04cb22b5e3b98ccd84608e658a..e067aec1fbf113220d1a1054ca3dfceb027448a2 100644 (file)
@@ -38,6 +38,19 @@ MODULE_LICENSE("Dual BSD/GPL");
 
 #define MAX_WAIT_FOR_8021X_TX          50      /* msecs */
 
+/* AMPDU rx reordering definitions */
+#define BRCMF_RXREORDER_FLOWID_OFFSET          0
+#define BRCMF_RXREORDER_MAXIDX_OFFSET          2
+#define BRCMF_RXREORDER_FLAGS_OFFSET           4
+#define BRCMF_RXREORDER_CURIDX_OFFSET          6
+#define BRCMF_RXREORDER_EXPIDX_OFFSET          8
+
+#define BRCMF_RXREORDER_DEL_FLOW               0x01
+#define BRCMF_RXREORDER_FLUSH_ALL              0x02
+#define BRCMF_RXREORDER_CURIDX_VALID           0x04
+#define BRCMF_RXREORDER_EXPIDX_VALID           0x08
+#define BRCMF_RXREORDER_NEW_HOLE               0x10
+
 /* Error bits */
 int brcmf_msg_level;
 module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
@@ -265,17 +278,234 @@ void brcmf_txflowblock(struct device *dev, bool state)
 {
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
-       int i;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       if (brcmf_fws_fc_active(drvr->fws)) {
-               brcmf_fws_bus_blocked(drvr, state);
+       brcmf_fws_bus_blocked(drvr, state);
+}
+
+static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+       skb->dev = ifp->ndev;
+       skb->protocol = eth_type_trans(skb, skb->dev);
+
+       if (skb->pkt_type == PACKET_MULTICAST)
+               ifp->stats.multicast++;
+
+       /* Process special event packets */
+       brcmf_fweh_process_skb(ifp->drvr, skb);
+
+       if (!(ifp->ndev->flags & IFF_UP)) {
+               brcmu_pkt_buf_free_skb(skb);
+               return;
+       }
+
+       ifp->stats.rx_bytes += skb->len;
+       ifp->stats.rx_packets++;
+
+       brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
+       if (in_interrupt())
+               netif_rx(skb);
+       else
+               /* If the receive is not processed inside an ISR,
+                * the softirqd must be woken explicitly to service
+                * the NET_RX_SOFTIRQ.  This is handled by netif_rx_ni().
+                */
+               netif_rx_ni(skb);
+}
+
+static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
+                                        u8 start, u8 end,
+                                        struct sk_buff_head *skb_list)
+{
+       /* initialize return list */
+       __skb_queue_head_init(skb_list);
+
+       if (rfi->pend_pkts == 0) {
+               brcmf_dbg(INFO, "no packets in reorder queue\n");
+               return;
+       }
+
+       do {
+               if (rfi->pktslots[start]) {
+                       __skb_queue_tail(skb_list, rfi->pktslots[start]);
+                       rfi->pktslots[start] = NULL;
+               }
+               start++;
+               if (start > rfi->max_idx)
+                       start = 0;
+       } while (start != end);
+       rfi->pend_pkts -= skb_queue_len(skb_list);
+}
+
+static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
+                                        struct sk_buff *pkt)
+{
+       u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
+       struct brcmf_ampdu_rx_reorder *rfi;
+       struct sk_buff_head reorder_list;
+       struct sk_buff *pnext;
+       u8 flags;
+       u32 buf_size;
+
+       flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
+       flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
+
+       /* validate flags and flow id */
+       if (flags == 0xFF) {
+               brcmf_err("invalid flags...so ignore this packet\n");
+               brcmf_netif_rx(ifp, pkt);
+               return;
+       }
+
+       rfi = ifp->drvr->reorder_flows[flow_id];
+       if (flags & BRCMF_RXREORDER_DEL_FLOW) {
+               brcmf_dbg(INFO, "flow-%d: delete\n",
+                         flow_id);
+
+               if (rfi == NULL) {
+                       brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
+                                 flow_id);
+                       brcmf_netif_rx(ifp, pkt);
+                       return;
+               }
+
+               brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
+                                            &reorder_list);
+               /* add the last packet */
+               __skb_queue_tail(&reorder_list, pkt);
+               kfree(rfi);
+               ifp->drvr->reorder_flows[flow_id] = NULL;
+               goto netif_rx;
+       }
+       /* from here on we need a flow reorder instance */
+       if (rfi == NULL) {
+               buf_size = sizeof(*rfi);
+               max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+
+               buf_size += (max_idx + 1) * sizeof(pkt);
+
+               /* allocate space for flow reorder info */
+               brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
+                         flow_id, max_idx);
+               rfi = kzalloc(buf_size, GFP_ATOMIC);
+               if (rfi == NULL) {
+                       brcmf_err("failed to alloc buffer\n");
+                       brcmf_netif_rx(ifp, pkt);
+                       return;
+               }
+
+               ifp->drvr->reorder_flows[flow_id] = rfi;
+               rfi->pktslots = (struct sk_buff **)(rfi+1);
+               rfi->max_idx = max_idx;
+       }
+       if (flags & BRCMF_RXREORDER_NEW_HOLE)  {
+               if (rfi->pend_pkts) {
+                       brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
+                                                    rfi->exp_idx,
+                                                    &reorder_list);
+                       WARN_ON(rfi->pend_pkts);
+               } else {
+                       __skb_queue_head_init(&reorder_list);
+               }
+               rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+               rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+               rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+               rfi->pktslots[rfi->cur_idx] = pkt;
+               rfi->pend_pkts++;
+               brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
+                         flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
+       } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
+               cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+               exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+               if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
+                       /* still in the current hole */
+                       /* enqueue the current on the buffer chain */
+                       if (rfi->pktslots[cur_idx] != NULL) {
+                               brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
+                               brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+                               rfi->pktslots[cur_idx] = NULL;
+                       }
+                       rfi->pktslots[cur_idx] = pkt;
+                       rfi->pend_pkts++;
+                       rfi->cur_idx = cur_idx;
+                       brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
+                                 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+                       /* can return now as there is no reorder
+                        * list to process.
+                        */
+                       return;
+               }
+               if (rfi->exp_idx == cur_idx) {
+                       if (rfi->pktslots[cur_idx] != NULL) {
+                               brcmf_dbg(INFO, "error buffer pending..free it\n");
+                               brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+                               rfi->pktslots[cur_idx] = NULL;
+                       }
+                       rfi->pktslots[cur_idx] = pkt;
+                       rfi->pend_pkts++;
+
+                       /* got the expected one. flush from current to expected
+                        * and update expected
+                        */
+                       brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
+                                 flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+                       rfi->cur_idx = cur_idx;
+                       rfi->exp_idx = exp_idx;
+
+                       brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
+                                                    &reorder_list);
+                       brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
+                                 flow_id, skb_queue_len(&reorder_list),
+                                 rfi->pend_pkts);
+               } else {
+                       u8 end_idx;
+
+                       brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
+                                 flow_id, flags, rfi->cur_idx, rfi->exp_idx,
+                                 cur_idx, exp_idx);
+                       if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+                               end_idx = rfi->exp_idx;
+                       else
+                               end_idx = exp_idx;
+
+                       /* flush pkts first */
+                       brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+                                                    &reorder_list);
+
+                       if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
+                               __skb_queue_tail(&reorder_list, pkt);
+                       } else {
+                               rfi->pktslots[cur_idx] = pkt;
+                               rfi->pend_pkts++;
+                       }
+                       rfi->exp_idx = exp_idx;
+                       rfi->cur_idx = cur_idx;
+               }
        } else {
-               for (i = 0; i < BRCMF_MAX_IFS; i++)
-                       brcmf_txflowblock_if(drvr->iflist[i],
-                                            BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
-                                            state);
+               /* explicity window move updating the expected index */
+               exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+               brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
+                         flow_id, flags, rfi->exp_idx, exp_idx);
+               if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+                       end_idx =  rfi->exp_idx;
+               else
+                       end_idx =  exp_idx;
+
+               brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+                                            &reorder_list);
+               __skb_queue_tail(&reorder_list, pkt);
+               /* set the new expected idx */
+               rfi->exp_idx = exp_idx;
+       }
+netif_rx:
+       skb_queue_walk_safe(&reorder_list, pkt, pnext) {
+               __skb_unlink(pkt, &reorder_list);
+               brcmf_netif_rx(ifp, pkt);
        }
 }
 
@@ -285,16 +515,18 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
        struct brcmf_if *ifp;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
+       struct brcmf_skb_reorder_data *rd;
        u8 ifidx;
        int ret;
 
-       brcmf_dbg(DATA, "Enter\n");
+       brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
+                 skb_queue_len(skb_list));
 
        skb_queue_walk_safe(skb_list, skb, pnext) {
                skb_unlink(skb, skb_list);
 
                /* process and remove protocol-specific header */
-               ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
+               ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
                ifp = drvr->iflist[ifidx];
 
                if (ret || !ifp || !ifp->ndev) {
@@ -304,31 +536,11 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
                        continue;
                }
 
-               skb->dev = ifp->ndev;
-               skb->protocol = eth_type_trans(skb, skb->dev);
-
-               if (skb->pkt_type == PACKET_MULTICAST)
-                       ifp->stats.multicast++;
-
-               /* Process special event packets */
-               brcmf_fweh_process_skb(drvr, skb);
-
-               if (!(ifp->ndev->flags & IFF_UP)) {
-                       brcmu_pkt_buf_free_skb(skb);
-                       continue;
-               }
-
-               ifp->stats.rx_bytes += skb->len;
-               ifp->stats.rx_packets++;
-
-               if (in_interrupt())
-                       netif_rx(skb);
+               rd = (struct brcmf_skb_reorder_data *)skb->cb;
+               if (rd->reorder)
+                       brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
                else
-                       /* If the receive is not processed inside an ISR,
-                        * the softirqd must be woken explicitly to service the
-                        * NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
-                        */
-                       netif_rx_ni(skb);
+                       brcmf_netif_rx(ifp, skb);
        }
 }
 
@@ -889,7 +1101,6 @@ int brcmf_bus_start(struct device *dev)
        if (ret < 0)
                goto fail;
 
-       drvr->fw_signals = true;
        ret = brcmf_fws_init(drvr);
        if (ret < 0)
                goto fail;
index 264111968320e19950880bc910bc90818169577d..1aa75d5951b82720f21f0ff486e701252837333b 100644 (file)
@@ -201,13 +201,6 @@ struct rte_console {
 #define SFC_CRC4WOOS   (1 << 2)        /* CRC error for write out of sync */
 #define SFC_ABORTALL   (1 << 3)        /* Abort all in-progress frames */
 
-/* HW frame tag */
-#define SDPCM_FRAMETAG_LEN     4       /* 2 bytes len, 2 bytes check val */
-
-/* Total length of frame header for dongle protocol */
-#define SDPCM_HDRLEN   (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
-#define SDPCM_RESERVE  (SDPCM_HDRLEN + BRCMF_SDALIGN)
-
 /*
  * Software allocation of To SB Mailbox resources
  */
@@ -250,38 +243,6 @@ struct rte_console {
 /* Current protocol version */
 #define SDPCM_PROT_VERSION     4
 
-/* SW frame header */
-#define SDPCM_PACKET_SEQUENCE(p)       (((u8 *)p)[0] & 0xff)
-
-#define SDPCM_CHANNEL_MASK             0x00000f00
-#define SDPCM_CHANNEL_SHIFT            8
-#define SDPCM_PACKET_CHANNEL(p)                (((u8 *)p)[1] & 0x0f)
-
-#define SDPCM_NEXTLEN_OFFSET           2
-
-/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
-#define SDPCM_DOFFSET_OFFSET           3       /* Data Offset */
-#define SDPCM_DOFFSET_VALUE(p)         (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
-#define SDPCM_DOFFSET_MASK             0xff000000
-#define SDPCM_DOFFSET_SHIFT            24
-#define SDPCM_FCMASK_OFFSET            4       /* Flow control */
-#define SDPCM_FCMASK_VALUE(p)          (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
-#define SDPCM_WINDOW_OFFSET            5       /* Credit based fc */
-#define SDPCM_WINDOW_VALUE(p)          (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
-
-#define SDPCM_SWHEADER_LEN     8       /* SW header is 64 bits */
-
-/* logical channel numbers */
-#define SDPCM_CONTROL_CHANNEL  0       /* Control channel Id */
-#define SDPCM_EVENT_CHANNEL    1       /* Asyc Event Indication Channel Id */
-#define SDPCM_DATA_CHANNEL     2       /* Data Xmit/Recv Channel Id */
-#define SDPCM_GLOM_CHANNEL     3       /* For coalesced packets */
-#define SDPCM_TEST_CHANNEL     15      /* Reserved for test/debug packets */
-
-#define SDPCM_SEQUENCE_WRAP    256     /* wrap-around val for 8bit frame seq */
-
-#define SDPCM_GLOMDESC(p)      (((u8 *)p)[1] & 0x80)
-
 /*
  * Shared structure between dongle and the host.
  * The structure contains pointers to trap or assert information.
@@ -396,8 +357,8 @@ struct sdpcm_shared_le {
        __le32 brpt_addr;
 };
 
-/* SDIO read frame info */
-struct brcmf_sdio_read {
+/* dongle SDIO bus specific header info */
+struct brcmf_sdio_hdrinfo {
        u8 seq_num;
        u8 channel;
        u16 len;
@@ -431,7 +392,7 @@ struct brcmf_sdio {
        u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
        u8 *rxhdr;              /* Header of current rx frame (in hdrbuf) */
        u8 rx_seq;              /* Receive sequence number (expected) */
-       struct brcmf_sdio_read cur_read;
+       struct brcmf_sdio_hdrinfo cur_read;
                                /* info of current read frame */
        bool rxskip;            /* Skip receive (awaiting NAK ACK) */
        bool rxpending;         /* Data frame pending in dongle */
@@ -500,6 +461,8 @@ struct brcmf_sdio {
        struct brcmf_sdio_count sdcnt;
        bool sr_enabled; /* SaveRestore enabled */
        bool sleeping; /* SDIO bus sleeping */
+
+       u8 tx_hdrlen;           /* sdio bus header length for tx packet */
 };
 
 /* clkstate */
@@ -510,7 +473,6 @@ struct brcmf_sdio {
 
 #ifdef DEBUG
 static int qcount[NUMPRIO];
-static int tx_packets[NUMPRIO];
 #endif                         /* DEBUG */
 
 #define DEFAULT_SDIO_DRIVE_STRENGTH    6       /* in milliamps */
@@ -1043,18 +1005,63 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
        }
 }
 
-static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
-                              struct brcmf_sdio_read *rd,
-                              enum brcmf_sdio_frmtype type)
+/**
+ * brcmfmac sdio bus specific header
+ * This is the lowest layer header wrapped on the packets transmitted between
+ * host and WiFi dongle which contains information needed for SDIO core and
+ * firmware
+ *
+ * It consists of 2 parts: hw header and software header
+ * hardware header (frame tag) - 4 bytes
+ * Byte 0~1: Frame length
+ * Byte 2~3: Checksum, bit-wise inverse of frame length
+ * software header - 8 bytes
+ * Byte 0: Rx/Tx sequence number
+ * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
+ * Byte 2: Length of next data frame, reserved for Tx
+ * Byte 3: Data offset
+ * Byte 4: Flow control bits, reserved for Tx
+ * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
+ * Byte 6~7: Reserved
+ */
+#define SDPCM_HWHDR_LEN                        4
+#define SDPCM_SWHDR_LEN                        8
+#define SDPCM_HDRLEN                   (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
+/* software header */
+#define SDPCM_SEQ_MASK                 0x000000ff
+#define SDPCM_SEQ_WRAP                 256
+#define SDPCM_CHANNEL_MASK             0x00000f00
+#define SDPCM_CHANNEL_SHIFT            8
+#define SDPCM_CONTROL_CHANNEL          0       /* Control */
+#define SDPCM_EVENT_CHANNEL            1       /* Asyc Event Indication */
+#define SDPCM_DATA_CHANNEL             2       /* Data Xmit/Recv */
+#define SDPCM_GLOM_CHANNEL             3       /* Coalesced packets */
+#define SDPCM_TEST_CHANNEL             15      /* Test/debug packets */
+#define SDPCM_GLOMDESC(p)              (((u8 *)p)[1] & 0x80)
+#define SDPCM_NEXTLEN_MASK             0x00ff0000
+#define SDPCM_NEXTLEN_SHIFT            16
+#define SDPCM_DOFFSET_MASK             0xff000000
+#define SDPCM_DOFFSET_SHIFT            24
+#define SDPCM_FCMASK_MASK              0x000000ff
+#define SDPCM_WINDOW_MASK              0x0000ff00
+#define SDPCM_WINDOW_SHIFT             8
+
+static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
+{
+       u32 hdrvalue;
+       hdrvalue = *(u32 *)swheader;
+       return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
+}
+
+static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
+                             struct brcmf_sdio_hdrinfo *rd,
+                             enum brcmf_sdio_frmtype type)
 {
        u16 len, checksum;
        u8 rx_seq, fc, tx_seq_max;
+       u32 swheader;
 
-       /*
-        * 4 bytes hardware header (frame tag)
-        * Byte 0~1: Frame length
-        * Byte 2~3: Checksum, bit-wise inverse of frame length
-        */
+       /* hw header */
        len = get_unaligned_le16(header);
        checksum = get_unaligned_le16(header + sizeof(u16));
        /* All zero means no more to read */
@@ -1083,24 +1090,16 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
        }
        rd->len = len;
 
-       /*
-        * 8 bytes hardware header
-        * Byte 0: Rx sequence number
-        * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
-        * Byte 2: Length of next data frame
-        * Byte 3: Data offset
-        * Byte 4: Flow control bits
-        * Byte 5: Maximum Sequence number allow for Tx
-        * Byte 6~7: Reserved
-        */
-       if (type == BRCMF_SDIO_FT_SUPER &&
-           SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
+       /* software header */
+       header += SDPCM_HWHDR_LEN;
+       swheader = le32_to_cpu(*(__le32 *)header);
+       if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
                brcmf_err("Glom descriptor found in superframe head\n");
                rd->len = 0;
                return -EINVAL;
        }
-       rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
-       rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
+       rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
+       rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
        if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
            type != BRCMF_SDIO_FT_SUPER) {
                brcmf_err("HW header length too long\n");
@@ -1120,7 +1119,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
                rd->len = 0;
                return -EINVAL;
        }
-       rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       rd->dat_offset = brcmf_sdio_getdatoffset(header);
        if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
                brcmf_err("seq %d: bad data offset\n", rx_seq);
                bus->sdcnt.rx_badhdr++;
@@ -1137,14 +1136,15 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
        /* no need to check the reset for subframe */
        if (type == BRCMF_SDIO_FT_SUB)
                return 0;
-       rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+       rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
        if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
                /* only warm for NON glom packet */
                if (rd->channel != SDPCM_GLOM_CHANNEL)
                        brcmf_err("seq %d: next length error\n", rx_seq);
                rd->len_nxtfrm = 0;
        }
-       fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       swheader = le32_to_cpu(*(__le32 *)(header + 4));
+       fc = swheader & SDPCM_FCMASK_MASK;
        if (bus->flowcontrol != fc) {
                if (~bus->flowcontrol & fc)
                        bus->sdcnt.fc_xoff++;
@@ -1153,7 +1153,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
                bus->sdcnt.fc_rcvd++;
                bus->flowcontrol = fc;
        }
-       tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+       tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
        if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
                brcmf_err("seq %d: max tx seq number error\n", rx_seq);
                tx_seq_max = bus->tx_seq + 2;
@@ -1163,18 +1163,40 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
        return 0;
 }
 
+static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
+{
+       *(__le16 *)header = cpu_to_le16(frm_length);
+       *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
+}
+
+static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
+                             struct brcmf_sdio_hdrinfo *hd_info)
+{
+       u32 sw_header;
+
+       brcmf_sdio_update_hwhdr(header, hd_info->len);
+
+       sw_header = bus->tx_seq;
+       sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
+                    SDPCM_CHANNEL_MASK;
+       sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
+                    SDPCM_DOFFSET_MASK;
+       *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
+       *(((__le32 *)header) + 2) = 0;
+}
+
 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 {
        u16 dlen, totlen;
        u8 *dptr, num = 0;
-
+       u32 align = 0;
        u16 sublen;
        struct sk_buff *pfirst, *pnext;
 
        int errcode;
        u8 doff, sfdoff;
 
-       struct brcmf_sdio_read rd_new;
+       struct brcmf_sdio_hdrinfo rd_new;
 
        /* If packets, issue read(s) and send up packet chain */
        /* Return sequence numbers consumed? */
@@ -1182,6 +1204,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
        brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
                  bus->glomd, skb_peek(&bus->glom));
 
+       if (bus->sdiodev->pdata)
+               align = bus->sdiodev->pdata->sd_sgentry_align;
+       if (align < 4)
+               align = 4;
+
        /* If there's a descriptor, generate the packet chain */
        if (bus->glomd) {
                pfirst = pnext = NULL;
@@ -1205,9 +1232,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                                pnext = NULL;
                                break;
                        }
-                       if (sublen % BRCMF_SDALIGN) {
+                       if (sublen % align) {
                                brcmf_err("sublen %d not multiple of %d\n",
-                                         sublen, BRCMF_SDALIGN);
+                                         sublen, align);
                        }
                        totlen += sublen;
 
@@ -1220,7 +1247,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                        }
 
                        /* Allocate/chain packet for next subframe */
-                       pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
+                       pnext = brcmu_pkt_buf_get_skb(sublen + align);
                        if (pnext == NULL) {
                                brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
                                          num, sublen);
@@ -1229,7 +1256,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                        skb_queue_tail(&bus->glom, pnext);
 
                        /* Adhere to start alignment requirements */
-                       pkt_align(pnext, sublen, BRCMF_SDALIGN);
+                       pkt_align(pnext, sublen, align);
                }
 
                /* If all allocations succeeded, save packet chain
@@ -1305,8 +1332,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                rd_new.seq_num = rxseq;
                rd_new.len = dlen;
                sdio_claim_host(bus->sdiodev->func[1]);
-               errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
-                                             BRCMF_SDIO_FT_SUPER);
+               errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
+                                            BRCMF_SDIO_FT_SUPER);
                sdio_release_host(bus->sdiodev->func[1]);
                bus->cur_read.len = rd_new.len_nxtfrm << 4;
 
@@ -1324,8 +1351,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                        rd_new.len = pnext->len;
                        rd_new.seq_num = rxseq++;
                        sdio_claim_host(bus->sdiodev->func[1]);
-                       errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
-                                                     BRCMF_SDIO_FT_SUB);
+                       errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
+                                                    BRCMF_SDIO_FT_SUB);
                        sdio_release_host(bus->sdiodev->func[1]);
                        brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
                                           pnext->data, 32, "subframe:\n");
@@ -1357,7 +1384,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
                        dptr = (u8 *) (pfirst->data);
                        sublen = get_unaligned_le16(dptr);
-                       doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+                       doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
 
                        brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
                                           dptr, pfirst->len,
@@ -1535,7 +1562,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
        uint rxleft = 0;        /* Remaining number of frames allowed */
        int ret;                /* Return code from calls */
        uint rxcount = 0;       /* Total frames read */
-       struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
+       struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
        u8 head_read = 0;
 
        brcmf_dbg(TRACE, "Enter\n");
@@ -1583,8 +1610,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
                                           bus->rxhdr, SDPCM_HDRLEN,
                                           "RxHdr:\n");
 
-                       if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
-                                               BRCMF_SDIO_FT_NORMAL)) {
+                       if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
+                                              BRCMF_SDIO_FT_NORMAL)) {
                                sdio_release_host(bus->sdiodev->func[1]);
                                if (!bus->rxpending)
                                        break;
@@ -1648,8 +1675,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
                        memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
                        rd_new.seq_num = rd->seq_num;
                        sdio_claim_host(bus->sdiodev->func[1]);
-                       if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
-                                               BRCMF_SDIO_FT_NORMAL)) {
+                       if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
+                                              BRCMF_SDIO_FT_NORMAL)) {
                                rd->len = 0;
                                brcmu_pkt_buf_free_skb(pkt);
                        }
@@ -1693,7 +1720,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 
                /* Save superframe descriptor and allocate packet frame */
                if (rd->channel == SDPCM_GLOM_CHANNEL) {
-                       if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+                       if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
                                brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
                                          rd->len);
                                brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
@@ -1759,85 +1786,168 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
        return;
 }
 
+/* flag marking a dummy skb added for DMA alignment requirement */
+#define DUMMY_SKB_FLAG         0x10000
+/* bit mask of data length chopped from the previous packet */
+#define DUMMY_SKB_CHOP_LEN_MASK        0xffff
+/**
+ * brcmf_sdio_txpkt_prep - packet preparation for transmit
+ * @bus: brcmf_sdio structure pointer
+ * @pktq: packet list pointer
+ * @chan: virtual channel to transmit the packet
+ *
+ * Processes to be applied to the packet
+ *     - Align data buffer pointer
+ *     - Align data buffer length
+ *     - Prepare header
+ * Return: negative value if there is error
+ */
+static int
+brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+                     uint chan)
+{
+       u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
+       int ntail;
+       struct sk_buff *pkt_next, *pkt_new;
+       u8 *dat_buf;
+       unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+       struct brcmf_sdio_hdrinfo hd_info = {0};
+
+       /* SDIO ADMA requires at least 32 bit alignment */
+       head_align = 4;
+       sg_align = 4;
+       if (bus->sdiodev->pdata) {
+               head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
+                            bus->sdiodev->pdata->sd_head_align : 4;
+               sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
+                          bus->sdiodev->pdata->sd_sgentry_align : 4;
+       }
+       /* sg entry alignment should be a divisor of block size */
+       WARN_ON(blksize % sg_align);
+
+       pkt_next = pktq->next;
+       dat_buf = (u8 *)(pkt_next->data);
+
+       /* Check head padding */
+       head_pad = ((unsigned long)dat_buf % head_align);
+       if (head_pad) {
+               if (skb_headroom(pkt_next) < head_pad) {
+                       bus->sdiodev->bus_if->tx_realloc++;
+                       head_pad = 0;
+                       if (skb_cow(pkt_next, head_pad))
+                               return -ENOMEM;
+               }
+               skb_push(pkt_next, head_pad);
+               dat_buf = (u8 *)(pkt_next->data);
+               memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
+       }
+
+       /* Check tail padding */
+       pkt_new = NULL;
+       tail_chop = pkt_next->len % sg_align;
+       tail_pad = sg_align - tail_chop;
+       tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
+       if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
+               pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+               if (pkt_new == NULL)
+                       return -ENOMEM;
+               memcpy(pkt_new->data,
+                      pkt_next->data + pkt_next->len - tail_chop,
+                      tail_chop);
+               *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
+               skb_trim(pkt_next, pkt_next->len - tail_chop);
+               __skb_queue_after(pktq, pkt_next, pkt_new);
+       } else {
+               ntail = pkt_next->data_len + tail_pad -
+                       (pkt_next->end - pkt_next->tail);
+               if (skb_cloned(pkt_next) || ntail > 0)
+                       if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
+                               return -ENOMEM;
+               if (skb_linearize(pkt_next))
+                       return -ENOMEM;
+               dat_buf = (u8 *)(pkt_next->data);
+               __skb_put(pkt_next, tail_pad);
+       }
+
+       /* Now prep the header */
+       if (pkt_new)
+               hd_info.len = pkt_next->len + tail_chop;
+       else
+               hd_info.len = pkt_next->len - tail_pad;
+       hd_info.channel = chan;
+       hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+       brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
+
+       if (BRCMF_BYTES_ON() &&
+           ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+            (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+               brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
+       else if (BRCMF_HDRS_ON())
+               brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
+                                  "Tx Header:\n");
+
+       return 0;
+}
+
+/**
+ * brcmf_sdio_txpkt_postp - packet post processing for transmit
+ * @bus: brcmf_sdio structure pointer
+ * @pktq: packet list pointer
+ *
+ * Processes to be applied to the packet
+ *     - Remove head padding
+ *     - Remove tail padding
+ */
+static void
+brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
+{
+       u8 *hdr;
+       u32 dat_offset;
+       u32 dummy_flags, chop_len;
+       struct sk_buff *pkt_next, *tmp, *pkt_prev;
+
+       skb_queue_walk_safe(pktq, pkt_next, tmp) {
+               dummy_flags = *(u32 *)(pkt_next->cb);
+               if (dummy_flags & DUMMY_SKB_FLAG) {
+                       chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+                       if (chop_len) {
+                               pkt_prev = pkt_next->prev;
+                               memcpy(pkt_prev->data + pkt_prev->len,
+                                      pkt_next->data, chop_len);
+                               skb_put(pkt_prev, chop_len);
+                       }
+                       __skb_unlink(pkt_next, pktq);
+                       brcmu_pkt_buf_free_skb(pkt_next);
+               } else {
+                       hdr = pkt_next->data + SDPCM_HWHDR_LEN;
+                       dat_offset = le32_to_cpu(*(__le32 *)hdr);
+                       dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
+                                    SDPCM_DOFFSET_SHIFT;
+                       skb_pull(pkt_next, dat_offset);
+               }
+       }
+}
+
 /* Writes a HW/SW header into the packet and sends it. */
 /* Assumes: (a) header space already there, (b) caller holds lock */
 static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
                              uint chan)
 {
        int ret;
-       u8 *frame;
-       u16 len, pad = 0;
-       u32 swheader;
        int i;
+       struct sk_buff_head localq;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       frame = (u8 *) (pkt->data);
-
-       /* Add alignment padding, allocate new packet if needed */
-       pad = ((unsigned long)frame % BRCMF_SDALIGN);
-       if (pad) {
-               if (skb_headroom(pkt) < pad) {
-                       brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
-                                 skb_headroom(pkt), pad);
-                       bus->sdiodev->bus_if->tx_realloc++;
-                       ret = skb_cow(pkt, BRCMF_SDALIGN);
-                       if (ret)
-                               goto done;
-                       pad = ((unsigned long)frame % BRCMF_SDALIGN);
-               }
-               skb_push(pkt, pad);
-               frame = (u8 *) (pkt->data);
-               memset(frame, 0, pad + SDPCM_HDRLEN);
-       }
-       /* precondition: pad < BRCMF_SDALIGN */
-
-       /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
-       len = (u16) (pkt->len);
-       *(__le16 *) frame = cpu_to_le16(len);
-       *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
-
-       /* Software tag: channel, sequence number, data offset */
-       swheader =
-           ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
-           (((pad +
-              SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
-
-       *(((__le32 *) frame) + 1) = cpu_to_le32(swheader);
-       *(((__le32 *) frame) + 2) = 0;
-
-#ifdef DEBUG
-       tx_packets[pkt->priority]++;
-#endif
-
-       brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
-                          ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
-                           (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
-                          frame, len, "Tx Frame:\n");
-       brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
-                            ((BRCMF_CTL_ON() &&
-                              chan == SDPCM_CONTROL_CHANNEL) ||
-                             (BRCMF_DATA_ON() &&
-                              chan != SDPCM_CONTROL_CHANNEL))) &&
-                          BRCMF_HDRS_ON(),
-                          frame, min_t(u16, len, 16), "TxHdr:\n");
-
-       /* Raise len to next SDIO block to eliminate tail command */
-       if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
-               u16 pad = bus->blocksize - (len % bus->blocksize);
-               if ((pad <= bus->roundup) && (pad < bus->blocksize))
-                               len += pad;
-       } else if (len % BRCMF_SDALIGN) {
-               len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
-       }
-
-       /* Some controllers have trouble with odd bytes -- round to even */
-       if (len & (ALIGNMENT - 1))
-                       len = roundup(len, ALIGNMENT);
+       __skb_queue_head_init(&localq);
+       __skb_queue_tail(&localq, pkt);
+       ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
+       if (ret)
+               goto done;
 
        sdio_claim_host(bus->sdiodev->func[1]);
        ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
-                                   SDIO_FUNC_2, F2SYNC, pkt);
+                                   SDIO_FUNC_2, F2SYNC, &localq);
        bus->sdcnt.f2txdata++;
 
        if (ret < 0) {
@@ -1865,11 +1975,11 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
        }
        sdio_release_host(bus->sdiodev->func[1]);
        if (ret == 0)
-               bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+               bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
 
 done:
-       /* restore pkt buffer pointer before calling tx complete routine */
-       skb_pull(pkt, SDPCM_HDRLEN + pad);
+       brcmf_sdio_txpkt_postp(bus, &localq);
+       __skb_dequeue_tail(&localq);
        brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
        return ret;
 }
@@ -1880,7 +1990,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        u32 intstatus = 0;
        int ret = 0, prec_out;
        uint cnt = 0;
-       uint datalen;
        u8 tx_prec_map;
 
        brcmf_dbg(TRACE, "Enter\n");
@@ -1896,7 +2005,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
                        break;
                }
                spin_unlock_bh(&bus->txqlock);
-               datalen = pkt->len - SDPCM_HDRLEN;
 
                ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
 
@@ -2221,7 +2329,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
                        }
 
                } else {
-                       bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+                       bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
                }
                sdio_release_host(bus->sdiodev->func[1]);
                bus->ctrl_frame_stat = false;
@@ -2276,13 +2384,14 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
+       ulong flags;
 
        brcmf_dbg(TRACE, "Enter\n");
 
        datalen = pkt->len;
 
        /* Add space for the header */
-       skb_push(pkt, SDPCM_HDRLEN);
+       skb_push(pkt, bus->tx_hdrlen);
        /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
 
        prec = prio2prec((pkt->priority & PRIOMASK));
@@ -2293,10 +2402,9 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
        bus->sdcnt.fcqueued++;
 
        /* Priority based enq */
-       spin_lock_bh(&bus->txqlock);
+       spin_lock_irqsave(&bus->txqlock, flags);
        if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
-               skb_pull(pkt, SDPCM_HDRLEN);
-               brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
+               skb_pull(pkt, bus->tx_hdrlen);
                brcmf_err("out of bus->txq !!!\n");
                ret = -ENOSR;
        } else {
@@ -2307,7 +2415,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
                bus->txoff = true;
                brcmf_txflowblock(bus->sdiodev->dev, true);
        }
-       spin_unlock_bh(&bus->txqlock);
+       spin_unlock_irqrestore(&bus->txqlock, flags);
 
 #ifdef DEBUG
        if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2436,7 +2544,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
                return ret;
        }
 
-       bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+       bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
 
        return ret;
 }
@@ -2446,19 +2554,19 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 {
        u8 *frame;
        u16 len;
-       u32 swheader;
        uint retries = 0;
        u8 doff = 0;
        int ret = -1;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
+       struct brcmf_sdio_hdrinfo hd_info = {0};
 
        brcmf_dbg(TRACE, "Enter\n");
 
        /* Back the pointer to make a room for bus header */
-       frame = msg - SDPCM_HDRLEN;
-       len = (msglen += SDPCM_HDRLEN);
+       frame = msg - bus->tx_hdrlen;
+       len = (msglen += bus->tx_hdrlen);
 
        /* Add alignment padding (optional for ctl frames) */
        doff = ((unsigned long)frame % BRCMF_SDALIGN);
@@ -2466,10 +2574,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
                frame -= doff;
                len += doff;
                msglen += doff;
-               memset(frame, 0, doff + SDPCM_HDRLEN);
+               memset(frame, 0, doff + bus->tx_hdrlen);
        }
        /* precondition: doff < BRCMF_SDALIGN */
-       doff += SDPCM_HDRLEN;
+       doff += bus->tx_hdrlen;
 
        /* Round send length to next SDIO block */
        if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
@@ -2491,18 +2599,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        brcmf_sdbrcm_bus_sleep(bus, false, false);
        sdio_release_host(bus->sdiodev->func[1]);
 
-       /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
-       *(__le16 *) frame = cpu_to_le16((u16) msglen);
-       *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
-
-       /* Software tag: channel, sequence number, data offset */
-       swheader =
-           ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
-            SDPCM_CHANNEL_MASK)
-           | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
-                            SDPCM_DOFFSET_MASK);
-       put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
-       put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+       hd_info.len = (u16)msglen;
+       hd_info.channel = SDPCM_CONTROL_CHANNEL;
+       hd_info.dat_offset = doff;
+       brcmf_sdio_hdpack(bus, frame, &hd_info);
 
        if (!data_ok(bus)) {
                brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
@@ -3733,7 +3833,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
        struct brcmf_sdio *bus;
        struct brcmf_bus_dcmd *dlst;
        u32 dngl_txglom;
-       u32 dngl_txglomalign;
+       u32 txglomalign = 0;
        u8 idx;
 
        brcmf_dbg(TRACE, "Enter\n");
@@ -3752,7 +3852,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
        bus->txbound = BRCMF_TXBOUND;
        bus->rxbound = BRCMF_RXBOUND;
        bus->txminmax = BRCMF_TXMINMAX;
-       bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+       bus->tx_seq = SDPCM_SEQ_WRAP - 1;
 
        INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
        bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
@@ -3794,8 +3894,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
        bus->sdiodev->bus_if->chip = bus->ci->chip;
        bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
 
-       /* Attach to the brcmf/OS/network interface */
-       ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
+       /* default sdio bus header length for tx packet */
+       bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
+
+       /* Attach to the common layer, reserve hdr space */
+       ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
        if (ret != 0) {
                brcmf_err("brcmf_attach failed\n");
                goto fail;
@@ -3827,9 +3930,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
                        dlst->param_len = sizeof(u32);
                } else {
                        /* otherwise, set txglomalign */
-                       dngl_txglomalign = bus->sdiodev->bus_if->align;
+                       if (sdiodev->pdata)
+                               txglomalign = sdiodev->pdata->sd_sgentry_align;
+                       /* SDIO ADMA requires at least 32 bit alignment */
+                       if (txglomalign < 4)
+                               txglomalign = 4;
                        dlst->name = "bus:txglomalign";
-                       dlst->param = (char *)&dngl_txglomalign;
+                       dlst->param = (char *)&txglomalign;
                        dlst->param_len = sizeof(u32);
                }
                list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
index 83ee53a7c76eae14073ef3e78850663879d71371..fad77dd2a3a543f511de0371eaf94ed4a1fc34f1 100644 (file)
@@ -185,6 +185,10 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
                  ifevent->action, ifevent->ifidx, ifevent->bssidx,
                  ifevent->flags, ifevent->role);
 
+       if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
+               brcmf_dbg(EVENT, "event can be ignored\n");
+               return;
+       }
        if (ifevent->ifidx >= BRCMF_MAX_IFS) {
                brcmf_err("invalid interface index: %u\n",
                          ifevent->ifidx);
index 665ef69e974b43c05ca802677fd6c36ce677baf9..ecabb04f33c3059c603513511cfd2679d23d6bb0 100644 (file)
@@ -69,4 +69,25 @@ struct brcmf_fil_bss_enable_le {
        __le32 enable;
 };
 
+/**
+ * struct tdls_iovar - common structure for tdls iovars.
+ *
+ * @ea: ether address of peer station.
+ * @mode: mode value depending on specific tdls iovar.
+ * @chanspec: channel specification.
+ * @pad: unused (for future use).
+ */
+struct brcmf_tdls_iovar_le {
+       u8 ea[ETH_ALEN];                /* Station address */
+       u8 mode;                        /* mode: depends on iovar */
+       __le16 chanspec;
+       __le32 pad;                     /* future */
+};
+
+enum brcmf_tdls_manual_ep_ops {
+       BRCMF_TDLS_MANUAL_EP_CREATE = 1,
+       BRCMF_TDLS_MANUAL_EP_DELETE = 3,
+       BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
+};
+
 #endif /* FWIL_TYPES_H_ */
index 29b1f24c2d0f92c86a55dade5e0e61a943004608..82f9140f3d35481766b317f4a31204fa7b4da6b7 100644 (file)
@@ -422,9 +422,12 @@ struct brcmf_fws_macdesc_table {
 
 struct brcmf_fws_info {
        struct brcmf_pub *drvr;
+       spinlock_t spinlock;
+       ulong flags;
        struct brcmf_fws_stats stats;
        struct brcmf_fws_hanger hanger;
        enum brcmf_fws_fcmode fcmode;
+       bool fw_signals;
        bool bcmc_credit_check;
        struct brcmf_fws_macdesc_table desc;
        struct workqueue_struct *fws_wq;
@@ -483,6 +486,18 @@ static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws,
 }
 #undef BRCMF_FWS_TLV_DEF
 
+static void brcmf_fws_lock(struct brcmf_fws_info *fws)
+               __acquires(&fws->spinlock)
+{
+       spin_lock_irqsave(&fws->spinlock, fws->flags);
+}
+
+static void brcmf_fws_unlock(struct brcmf_fws_info *fws)
+               __releases(&fws->spinlock)
+{
+       spin_unlock_irqrestore(&fws->spinlock, fws->flags);
+}
+
 static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
 {
        u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
@@ -869,8 +884,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
                skcb->state = BRCMF_FWS_SKBSTATE_TIM;
                bus = fws->drvr->bus_if;
                err = brcmf_fws_hdrpush(fws, skb);
-               if (err == 0)
+               if (err == 0) {
+                       brcmf_fws_unlock(fws);
                        err = brcmf_bus_txdata(bus, skb);
+                       brcmf_fws_lock(fws);
+               }
                if (err)
                        brcmu_pkt_buf_free_skb(skb);
                return true;
@@ -905,26 +923,10 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
        return 0;
 }
 
-/* using macro so sparse checking does not complain
- * about locking imbalance.
- */
-#define brcmf_fws_lock(drvr, flags)                            \
-do {                                                           \
-       flags = 0;                                              \
-       spin_lock_irqsave(&((drvr)->fws_spinlock), (flags));    \
-} while (0)
-
-/* using macro so sparse checking does not complain
- * about locking imbalance.
- */
-#define brcmf_fws_unlock(drvr, flags) \
-       spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
-
 static
 int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
 {
        struct brcmf_fws_mac_descriptor *entry, *existing;
-       ulong flags;
        u8 mac_handle;
        u8 ifidx;
        u8 *addr;
@@ -938,10 +940,10 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
                if (entry->occupied) {
                        brcmf_dbg(TRACE, "deleting %s mac %pM\n",
                                  entry->name, addr);
-                       brcmf_fws_lock(fws->drvr, flags);
+                       brcmf_fws_lock(fws);
                        brcmf_fws_macdesc_cleanup(fws, entry, -1);
                        brcmf_fws_macdesc_deinit(entry);
-                       brcmf_fws_unlock(fws->drvr, flags);
+                       brcmf_fws_unlock(fws);
                } else
                        fws->stats.mac_update_failed++;
                return 0;
@@ -950,13 +952,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
        existing = brcmf_fws_macdesc_lookup(fws, addr);
        if (IS_ERR(existing)) {
                if (!entry->occupied) {
-                       brcmf_fws_lock(fws->drvr, flags);
+                       brcmf_fws_lock(fws);
                        entry->mac_handle = mac_handle;
                        brcmf_fws_macdesc_init(entry, addr, ifidx);
                        brcmf_fws_macdesc_set_name(fws, entry);
                        brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
                                        BRCMF_FWS_PSQ_LEN);
-                       brcmf_fws_unlock(fws->drvr, flags);
+                       brcmf_fws_unlock(fws);
                        brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
                } else {
                        fws->stats.mac_update_failed++;
@@ -964,13 +966,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
        } else {
                if (entry != existing) {
                        brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
-                       brcmf_fws_lock(fws->drvr, flags);
+                       brcmf_fws_lock(fws);
                        memcpy(entry, existing,
                               offsetof(struct brcmf_fws_mac_descriptor, psq));
                        entry->mac_handle = mac_handle;
                        brcmf_fws_macdesc_deinit(existing);
                        brcmf_fws_macdesc_set_name(fws, entry);
-                       brcmf_fws_unlock(fws->drvr, flags);
+                       brcmf_fws_unlock(fws);
                        brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
                                  addr);
                } else {
@@ -986,7 +988,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
                                            u8 type, u8 *data)
 {
        struct brcmf_fws_mac_descriptor *entry;
-       ulong flags;
        u8 mac_handle;
        int ret;
 
@@ -996,7 +997,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
                fws->stats.mac_ps_update_failed++;
                return -ESRCH;
        }
-       brcmf_fws_lock(fws->drvr, flags);
+       brcmf_fws_lock(fws);
        /* a state update should wipe old credits */
        entry->requested_credit = 0;
        entry->requested_packet = 0;
@@ -1011,7 +1012,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
                brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
                ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
        }
-       brcmf_fws_unlock(fws->drvr, flags);
+       brcmf_fws_unlock(fws);
        return ret;
 }
 
@@ -1019,7 +1020,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
                                              u8 type, u8 *data)
 {
        struct brcmf_fws_mac_descriptor *entry;
-       ulong flags;
        u8 ifidx;
        int ret;
 
@@ -1038,7 +1038,7 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
 
        brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
                  entry->name);
-       brcmf_fws_lock(fws->drvr, flags);
+       brcmf_fws_lock(fws);
        switch (type) {
        case BRCMF_FWS_TYPE_INTERFACE_OPEN:
                entry->state = BRCMF_FWS_STATE_OPEN;
@@ -1050,10 +1050,10 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
                break;
        default:
                ret = -EINVAL;
-               brcmf_fws_unlock(fws->drvr, flags);
+               brcmf_fws_unlock(fws);
                goto fail;
        }
-       brcmf_fws_unlock(fws->drvr, flags);
+       brcmf_fws_unlock(fws);
        return ret;
 
 fail:
@@ -1065,7 +1065,6 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
                                      u8 *data)
 {
        struct brcmf_fws_mac_descriptor *entry;
-       ulong flags;
 
        entry = &fws->desc.nodes[data[1] & 0x1F];
        if (!entry->occupied) {
@@ -1079,14 +1078,14 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
        brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
                  brcmf_fws_get_tlv_name(type), type, entry->name,
                  data[0], data[2]);
-       brcmf_fws_lock(fws->drvr, flags);
+       brcmf_fws_lock(fws);
        if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
                entry->requested_credit = data[0];
        else
                entry->requested_packet = data[0];
 
        entry->ac_bitmap = data[2];
-       brcmf_fws_unlock(fws->drvr, flags);
+       brcmf_fws_unlock(fws);
        return BRCMF_FWS_RET_OK_SCHEDULE;
 }
 
@@ -1160,7 +1159,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
 static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
 {
        /* only schedule dequeue when there are credits for delayed traffic */
-       if (fws->fifo_credit_map & fws->fifo_delay_map)
+       if ((fws->fifo_credit_map & fws->fifo_delay_map) ||
+           (!brcmf_fws_fc_active(fws) && fws->fifo_delay_map))
                queue_work(fws->fws_wq, &fws->fws_dequeue_work);
 }
 
@@ -1383,7 +1383,6 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
 static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
                                             u8 *data)
 {
-       ulong flags;
        int i;
 
        if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
@@ -1392,19 +1391,18 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
        }
 
        brcmf_dbg(DATA, "enter: data %pM\n", data);
-       brcmf_fws_lock(fws->drvr, flags);
+       brcmf_fws_lock(fws);
        for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
                brcmf_fws_return_credits(fws, i, data[i]);
 
        brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
                  fws->fifo_delay_map);
-       brcmf_fws_unlock(fws->drvr, flags);
+       brcmf_fws_unlock(fws);
        return BRCMF_FWS_RET_OK_SCHEDULE;
 }
 
 static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
 {
-       ulong lflags;
        __le32 status_le;
        u32 status;
        u32 hslot;
@@ -1418,9 +1416,9 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
        hslot = brcmf_txstatus_get_field(status, HSLOT);
        genbit = brcmf_txstatus_get_field(status, GENERATION);
 
-       brcmf_fws_lock(fws->drvr, lflags);
+       brcmf_fws_lock(fws);
        brcmf_fws_txs_process(fws, flags, hslot, genbit);
-       brcmf_fws_unlock(fws->drvr, lflags);
+       brcmf_fws_unlock(fws);
        return BRCMF_FWS_RET_OK_NOSCHEDULE;
 }
 
@@ -1440,7 +1438,6 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
 {
        struct brcmf_fws_info *fws = ifp->drvr->fws;
        int i;
-       ulong flags;
        u8 *credits = data;
 
        if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
@@ -1453,7 +1450,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
        fws->creditmap_received = true;
 
        brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
-       brcmf_fws_lock(ifp->drvr, flags);
+       brcmf_fws_lock(fws);
        for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
                if (*credits)
                        fws->fifo_credit_map |= 1 << i;
@@ -1462,7 +1459,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
                fws->fifo_credit[i] = *credits++;
        }
        brcmf_fws_schedule_deq(fws);
-       brcmf_fws_unlock(ifp->drvr, flags);
+       brcmf_fws_unlock(fws);
        return 0;
 }
 
@@ -1471,18 +1468,18 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
                                                void *data)
 {
        struct brcmf_fws_info *fws = ifp->drvr->fws;
-       ulong flags;
 
-       brcmf_fws_lock(ifp->drvr, flags);
+       brcmf_fws_lock(fws);
        if (fws)
                fws->bcmc_credit_check = true;
-       brcmf_fws_unlock(ifp->drvr, flags);
+       brcmf_fws_unlock(fws);
        return 0;
 }
 
 int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
                      struct sk_buff *skb)
 {
+       struct brcmf_skb_reorder_data *rd;
        struct brcmf_fws_info *fws = drvr->fws;
        u8 *signal_data;
        s16 data_len;
@@ -1497,8 +1494,10 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
 
        WARN_ON(signal_len > skb->len);
 
+       if (!signal_len)
+               return 0;
        /* if flow control disabled, skip to packet data and leave */
-       if (!signal_len || !drvr->fw_signals) {
+       if (!fws->fw_signals) {
                skb_pull(skb, signal_len);
                return 0;
        }
@@ -1536,9 +1535,12 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
 
                err = BRCMF_FWS_RET_OK_NOSCHEDULE;
                switch (type) {
-               case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
                case BRCMF_FWS_TYPE_COMP_TXSTATUS:
                        break;
+               case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
+                       rd = (struct brcmf_skb_reorder_data *)skb->cb;
+                       rd->reorder = data;
+                       break;
                case BRCMF_FWS_TYPE_MACDESC_ADD:
                case BRCMF_FWS_TYPE_MACDESC_DEL:
                        brcmf_fws_macdesc_indicate(fws, type, data);
@@ -1694,17 +1696,22 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
                return PTR_ERR(entry);
 
        brcmf_fws_precommit_skb(fws, fifo, skb);
+       entry->transit_count++;
+       if (entry->suppressed)
+               entry->suppr_transit_count++;
+       brcmf_fws_unlock(fws);
        rc = brcmf_bus_txdata(bus, skb);
+       brcmf_fws_lock(fws);
        brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
                  skcb->if_flags, skcb->htod, rc);
        if (rc < 0) {
+               entry->transit_count--;
+               if (entry->suppressed)
+                       entry->suppr_transit_count--;
                brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
                goto rollback;
        }
 
-       entry->transit_count++;
-       if (entry->suppressed)
-               entry->suppr_transit_count++;
        fws->stats.pkt2bus++;
        fws->stats.send_pkts[fifo]++;
        if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
@@ -1741,11 +1748,11 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
        struct brcmf_fws_info *fws = drvr->fws;
        struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
        struct ethhdr *eh = (struct ethhdr *)(skb->data);
-       ulong flags;
        int fifo = BRCMF_FWS_FIFO_BCMC;
        bool multicast = is_multicast_ether_addr(eh->h_dest);
        bool pae = eh->h_proto == htons(ETH_P_PAE);
 
+       brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
        /* determine the priority */
        if (!skb->priority)
                skb->priority = cfg80211_classify8021d(skb);
@@ -1754,14 +1761,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
        if (pae)
                atomic_inc(&ifp->pend_8021x_cnt);
 
-       if (!brcmf_fws_fc_active(fws)) {
-               /* If the protocol uses a data header, apply it */
-               brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
-
-               /* Use bus module to send data frame */
-               return brcmf_bus_txdata(drvr->bus_if, skb);
-       }
-
        /* set control buffer information */
        skcb->if_flags = 0;
        skcb->state = BRCMF_FWS_SKBSTATE_NEW;
@@ -1769,7 +1768,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
        if (!multicast)
                fifo = brcmf_fws_prio2fifo[skb->priority];
 
-       brcmf_fws_lock(drvr, flags);
+       brcmf_fws_lock(fws);
        if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
                fws->borrow_defer_timestamp = jiffies +
                                              BRCMF_FWS_BORROW_DEFER_PERIOD;
@@ -1789,7 +1788,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
                }
                brcmu_pkt_buf_free_skb(skb);
        }
-       brcmf_fws_unlock(drvr, flags);
+       brcmf_fws_unlock(fws);
        return 0;
 }
 
@@ -1809,7 +1808,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
        struct brcmf_fws_info *fws = ifp->drvr->fws;
        struct brcmf_fws_mac_descriptor *entry;
 
-       if (!ifp->ndev || !ifp->drvr->fw_signals)
+       if (!ifp->ndev)
                return;
 
        entry = &fws->desc.iface[ifp->ifidx];
@@ -1824,31 +1823,54 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
 void brcmf_fws_del_interface(struct brcmf_if *ifp)
 {
        struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
-       ulong flags;
 
        if (!entry)
                return;
 
-       brcmf_fws_lock(ifp->drvr, flags);
+       brcmf_fws_lock(ifp->drvr->fws);
        ifp->fws_desc = NULL;
        brcmf_dbg(TRACE, "deleting %s\n", entry->name);
        brcmf_fws_macdesc_deinit(entry);
        brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
-       brcmf_fws_unlock(ifp->drvr, flags);
+       brcmf_fws_unlock(ifp->drvr->fws);
 }
 
 static void brcmf_fws_dequeue_worker(struct work_struct *worker)
 {
        struct brcmf_fws_info *fws;
+       struct brcmf_pub *drvr;
        struct sk_buff *skb;
-       ulong flags;
        int fifo;
+       u32 hslot;
+       u32 ifidx;
+       int ret;
 
        fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
+       drvr = fws->drvr;
 
-       brcmf_fws_lock(fws->drvr, flags);
+       brcmf_fws_lock(fws);
        for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
             fifo--) {
+               if (!brcmf_fws_fc_active(fws)) {
+                       while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) {
+                               hslot = brcmf_skb_htod_tag_get_field(skb,
+                                                                    HSLOT);
+                               brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
+                                                       &skb, true);
+                               ifidx = brcmf_skb_if_flags_get_field(skb,
+                                                                    INDEX);
+                               brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
+                               /* Use bus module to send data frame */
+                               brcmf_fws_unlock(fws);
+                               ret = brcmf_bus_txdata(drvr->bus_if, skb);
+                               brcmf_fws_lock(fws);
+                               if (ret < 0)
+                                       brcmf_txfinalize(drvr, skb, false);
+                               if (fws->bus_flow_blocked)
+                                       break;
+                       }
+                       continue;
+               }
                while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
                       (fifo == BRCMF_FWS_FIFO_BCMC))) {
                        skb = brcmf_fws_deq(fws, fifo);
@@ -1876,42 +1898,43 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
                        }
                }
        }
-       brcmf_fws_unlock(fws->drvr, flags);
+       brcmf_fws_unlock(fws);
 }
 
 int brcmf_fws_init(struct brcmf_pub *drvr)
 {
+       struct brcmf_fws_info *fws;
        u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
        int rc;
 
-       if (!drvr->fw_signals)
-               return 0;
-
-       spin_lock_init(&drvr->fws_spinlock);
-
        drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
        if (!drvr->fws) {
                rc = -ENOMEM;
                goto fail;
        }
 
+       fws = drvr->fws;
+
+       spin_lock_init(&fws->spinlock);
+
        /* set linkage back */
-       drvr->fws->drvr = drvr;
-       drvr->fws->fcmode = fcmode;
+       fws->drvr = drvr;
+       fws->fcmode = fcmode;
 
-       drvr->fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
-       if (drvr->fws->fws_wq == NULL) {
+       fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
+       if (fws->fws_wq == NULL) {
                brcmf_err("workqueue creation failed\n");
                rc = -EBADF;
                goto fail;
        }
-       INIT_WORK(&drvr->fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
+       INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
 
        /* enable firmware signalling if fcmode active */
-       if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE)
+       if (fws->fcmode != BRCMF_FWS_FCMODE_NONE)
                tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS |
                       BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS |
-                      BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
+                      BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+                      BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE;
 
        rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
                                 brcmf_fws_notify_credit_map);
@@ -1927,31 +1950,33 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
                goto fail;
        }
 
-       /* setting the iovar may fail if feature is unsupported
+       /* Setting the iovar may fail if feature is unsupported
         * so leave the rc as is so driver initialization can
-        * continue.
+        * continue. Set mode back to none indicating not enabled.
         */
+       fws->fw_signals = true;
        if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) {
                brcmf_err("failed to set bdcv2 tlv signaling\n");
-               goto fail_event;
+               fws->fcmode = BRCMF_FWS_FCMODE_NONE;
+               fws->fw_signals = false;
        }
 
-       brcmf_fws_hanger_init(&drvr->fws->hanger);
-       brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0);
-       brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other);
-       brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
+       if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
+               brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
+
+       brcmf_fws_hanger_init(&fws->hanger);
+       brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
+       brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
+       brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
                        BRCMF_FWS_PSQ_LEN);
 
        /* create debugfs file for statistics */
-       brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
+       brcmf_debugfs_create_fws_stats(drvr, &fws->stats);
 
        brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
-                 drvr->fw_signals ? "enabled" : "disabled", tlv);
+                 fws->fw_signals ? "enabled" : "disabled", tlv);
        return 0;
 
-fail_event:
-       brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT);
-       brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
 fail:
        brcmf_fws_deinit(drvr);
        return rc;
@@ -1960,24 +1985,18 @@ fail:
 void brcmf_fws_deinit(struct brcmf_pub *drvr)
 {
        struct brcmf_fws_info *fws = drvr->fws;
-       ulong flags;
 
        if (!fws)
                return;
 
-       /* disable firmware signalling entirely
-        * to avoid using the workqueue.
-        */
-       drvr->fw_signals = false;
-
        if (drvr->fws->fws_wq)
                destroy_workqueue(drvr->fws->fws_wq);
 
        /* cleanup */
-       brcmf_fws_lock(drvr, flags);
+       brcmf_fws_lock(fws);
        brcmf_fws_cleanup(fws, -1);
        drvr->fws = NULL;
-       brcmf_fws_unlock(drvr, flags);
+       brcmf_fws_unlock(fws);
 
        /* free top structure */
        kfree(fws);
@@ -1985,7 +2004,7 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr)
 
 bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
 {
-       if (!fws)
+       if (!fws->creditmap_received)
                return false;
 
        return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
@@ -1993,17 +2012,16 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
 
 void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
 {
-       ulong flags;
        u32 hslot;
 
        if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
                brcmu_pkt_buf_free_skb(skb);
                return;
        }
-       brcmf_fws_lock(fws->drvr, flags);
+       brcmf_fws_lock(fws);
        hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
        brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
-       brcmf_fws_unlock(fws->drvr, flags);
+       brcmf_fws_unlock(fws);
 }
 
 void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
index 09786a539950ed9738f6e7d8c3eea1230bb119d5..2b5407f002e53bf90b8e320d0b54384b2a200f9b 100644 (file)
@@ -208,7 +208,7 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  */
 extern int
 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, struct sk_buff *pkt);
+                     uint flags, struct sk_buff_head *pktq);
 extern int
 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                      uint flags, u8 *buf, uint nbytes);
index 322cadc51deddcbfe8d7c560dbc93669b167b574..39e01a7c8556f2ce022bd5b8c545c2e2a615af5d 100644 (file)
@@ -614,7 +614,6 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
        return 0;
 
 fail:
-       brcmf_txcomplete(dev, skb, false);
        return ret;
 }
 
index 7fa71f73cfe89c5bfb5f2fca8e1e83c4d7124045..571f013cebbb0d0a80f32e2351975b659d7d53d1 100644 (file)
@@ -3155,7 +3155,9 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
 }
 
 #ifdef CONFIG_NL80211_TESTMODE
-static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
+static int brcmf_cfg80211_testmode(struct wiphy *wiphy,
+                                  struct wireless_dev *wdev,
+                                  void *data, int len)
 {
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct net_device *ndev = cfg_to_ndev(cfg);
@@ -4126,6 +4128,53 @@ static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy,
        clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
 }
 
+static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
+{
+       int ret;
+
+       switch (oper) {
+       case NL80211_TDLS_DISCOVERY_REQ:
+               ret = BRCMF_TDLS_MANUAL_EP_DISCOVERY;
+               break;
+       case NL80211_TDLS_SETUP:
+               ret = BRCMF_TDLS_MANUAL_EP_CREATE;
+               break;
+       case NL80211_TDLS_TEARDOWN:
+               ret = BRCMF_TDLS_MANUAL_EP_DELETE;
+               break;
+       default:
+               brcmf_err("unsupported operation: %d\n", oper);
+               ret = -EOPNOTSUPP;
+       }
+       return ret;
+}
+
+static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
+                                   struct net_device *ndev, u8 *peer,
+                                   enum nl80211_tdls_operation oper)
+{
+       struct brcmf_if *ifp;
+       struct brcmf_tdls_iovar_le info;
+       int ret = 0;
+
+       ret = brcmf_convert_nl80211_tdls_oper(oper);
+       if (ret < 0)
+               return ret;
+
+       ifp = netdev_priv(ndev);
+       memset(&info, 0, sizeof(info));
+       info.mode = (u8)ret;
+       if (peer)
+               memcpy(info.ea, peer, ETH_ALEN);
+
+       ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint",
+                                      &info, sizeof(info));
+       if (ret < 0)
+               brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret);
+
+       return ret;
+}
+
 static struct cfg80211_ops wl_cfg80211_ops = {
        .add_virtual_intf = brcmf_cfg80211_add_iface,
        .del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -4164,9 +4213,8 @@ static struct cfg80211_ops wl_cfg80211_ops = {
        .stop_p2p_device = brcmf_p2p_stop_device,
        .crit_proto_start = brcmf_cfg80211_crit_proto_start,
        .crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
-#ifdef CONFIG_NL80211_TESTMODE
-       .testmode_cmd = brcmf_cfg80211_testmode
-#endif
+       .tdls_oper = brcmf_cfg80211_tdls_oper,
+       CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
 };
 
 static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
@@ -4287,7 +4335,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
        wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
        wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
                        WIPHY_FLAG_OFFCHAN_TX |
-                       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+                       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+                       WIPHY_FLAG_SUPPORTS_TDLS;
        wiphy->mgmt_stypes = brcmf_txrx_stypes;
        wiphy->max_remain_on_channel_duration = 5000;
        brcmf_wiphy_pno_params(wiphy);
@@ -4908,6 +4957,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
                goto cfg80211_p2p_attach_out;
        }
 
+       err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
+       if (err) {
+               brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
+               wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
+       }
+
        err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION,
                                    &io_type);
        if (err) {
index bd982856d3853b4c6415b43c137d76b3eb4eeabf..fa391e4eb09893019bdc515bf61f03eeb66179f0 100644 (file)
@@ -928,9 +928,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
                        }
                } else if (txs->phyerr) {
                        update_rate = false;
-                       brcms_err(wlc->hw->d11core,
-                                 "%s: ampdu tx phy error (0x%x)\n",
-                                 __func__, txs->phyerr);
+                       brcms_dbg_ht(wlc->hw->d11core,
+                                    "%s: ampdu tx phy error (0x%x)\n",
+                                    __func__, txs->phyerr);
                }
        }
 
index 1860c572b3c476a88527418725848a62f7f9e45a..4fb9635d3919d29a5c927d55086946d1afe18269 100644 (file)
@@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
 
 /*
  * post receive buffers
- *  return false is refill failed completely and ring is empty this will stall
- *  the rx dma and user might want to call rxfill again asap. This unlikely
- *  happens on memory-rich NIC, but often on memory-constrained dongle
+ *  Return false if refill failed completely or dma mapping failed. The ring
+ *  is empty, which will stall the rx dma and user might want to call rxfill
+ *  again asap. This is unlikely to happen on a memory-rich NIC, but often on
+ *  memory-constrained dongle.
  */
 bool dma_rxfill(struct dma_pub *pub)
 {
@@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
 
                pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
                                    DMA_FROM_DEVICE);
+               if (dma_mapping_error(di->dmadev, pa))
+                       return false;
 
                /* save the free packet pointer */
                di->rxp[rxout] = p;
@@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
 
        /* get physical address of buffer start */
        pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
-
+       /* if mapping failed, free skb */
+       if (dma_mapping_error(di->dmadev, pa)) {
+               brcmu_pkt_buf_free_skb(p);
+               return;
+       }
        /* With a DMA segment list, Descriptor table is filled
         * using the segment list instead of looping over
         * buffers in multi-chain DMA. Therefore, EOF for SGLIST
index 9fd6f2fef11bfe546126c86ec857507d8fe8d9eb..7ca10bf4a4d3f81cee965a9045d725dd898e6c58 100644 (file)
@@ -882,8 +882,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
        mcl = le16_to_cpu(txh->MacTxControlLow);
 
        if (txs->phyerr)
-               brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
-                         txs->phyerr, txh->MainRates);
+               brcms_dbg_tx(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
+                            txs->phyerr, txh->MainRates);
 
        if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
                brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
index cbb74d7a9be537d5a88671e0ecd2602e11abbaa6..9e0ca3048657533df52055ac8c30e01006e95048 100644 (file)
@@ -1199,7 +1199,7 @@ bool wsm_flush_tx(struct cw1200_common *priv)
 
        if (priv->bh_error) {
                /* In case of failure do not wait for magic. */
-               pr_err("[WSM] Fatal error occured, will not flush TX.\n");
+               pr_err("[WSM] Fatal error occurred, will not flush TX.\n");
                return false;
        } else {
                /* Get a timestamp of "oldest" frame */
index 7afc613c37068cd23eced45680bfdfc3bc8635c0..48086e849515bfe43815971e1d4586c57e4c6ed6 100644 (file)
@@ -832,7 +832,7 @@ struct wsm_tx {
        /* the MSDU shall be terminated. Overrides the global */
        /* dot11MaxTransmitMsduLifeTime setting [optional] */
        /* Device will set the default value if this is 0. */
-       u32 expire_time;
+       __le32 expire_time;
 
        /* WSM_HT_TX_... */
        __le32 ht_tx_parameters;
index ac074731335a5ed1b7ef393dfd15ae6c87299d03..e5090309824e53c04d1961c0fd6993e80c5aa61e 100644 (file)
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
 
        data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
 
-       memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+       memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
        data->flags = 1; /* has quality information */
-       memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+       memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
               sizeof(struct iw_quality) * data->length);
 
        kfree(addr);
index 15f0fad39add227550e3d0a49fedd24737da6aba..e4f56ad26cd8493db42612e846b94ad9c8368378 100644 (file)
@@ -667,7 +667,7 @@ static int prism2_open(struct net_device *dev)
        if (local->no_pri) {
                printk(KERN_DEBUG "%s: could not set interface UP - no PRI "
                       "f/w\n", dev->name);
-               return 1;
+               return -ENODEV;
        }
 
        if ((local->func->card_present && !local->func->card_present(local)) ||
@@ -682,7 +682,7 @@ static int prism2_open(struct net_device *dev)
                printk(KERN_WARNING "%s: could not enable MAC port\n",
                       dev->name);
                prism2_close(dev);
-               return 1;
+               return -ENODEV;
        }
        if (!local->dev_enabled)
                prism2_callback(local, PRISM2_CALLBACK_ENABLE);
index fe31590a51b2d81b41b40b2f862c96c5b2e10980..aea667b430c3ac7e70295562f33de2227813a198 100644 (file)
@@ -887,6 +887,7 @@ il3945_remove_debugfs(void *il, void *il_sta)
  */
 static void
 il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+                        struct cfg80211_chan_def *chandef,
                         struct ieee80211_sta *sta, void *il_sta)
 {
 }
index c092033945cc460022e1c8ea28e2622a9db0a79f..f09e257759d5a930cfdaa1c2a69484355677403f 100644 (file)
@@ -475,6 +475,8 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
        }
 }
 
+#define SMALL_PACKET_SIZE 256
+
 static void
 il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
                               struct ieee80211_rx_status *stats)
@@ -483,14 +485,13 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
        struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
        struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
-       u16 len = le16_to_cpu(rx_hdr->len);
+       u32 len = le16_to_cpu(rx_hdr->len);
        struct sk_buff *skb;
        __le16 fc = hdr->frame_control;
+       u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
 
        /* We received data from the HW, so stop the watchdog */
-       if (unlikely
-           (len + IL39_RX_FRAME_SIZE >
-            PAGE_SIZE << il->hw_params.rx_page_order)) {
+       if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) {
                D_DROP("Corruption detected!\n");
                return;
        }
@@ -506,26 +507,32 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
                D_INFO("Woke queues - frame received on passive channel\n");
        }
 
-       skb = dev_alloc_skb(128);
+       skb = dev_alloc_skb(SMALL_PACKET_SIZE);
        if (!skb) {
                IL_ERR("dev_alloc_skb failed\n");
                return;
        }
 
        if (!il3945_mod_params.sw_crypto)
-               il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+               il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
                                      le32_to_cpu(rx_end->status), stats);
 
-       skb_add_rx_frag(skb, 0, rxb->page,
-                       (void *)rx_hdr->payload - (void *)pkt, len,
-                       len);
-
+       /* If frame is small enough to fit into skb->head, copy it
+        * and do not consume a full page
+        */
+       if (len <= SMALL_PACKET_SIZE) {
+               memcpy(skb_put(skb, len), rx_hdr->payload, len);
+       } else {
+               skb_add_rx_frag(skb, 0, rxb->page,
+                               (void *)rx_hdr->payload - (void *)pkt, len,
+                               fraglen);
+               il->alloc_rxb_page--;
+               rxb->page = NULL;
+       }
        il_update_stats(il, false, fc, len);
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
        ieee80211_rx(il->hw, skb);
-       il->alloc_rxb_page--;
-       rxb->page = NULL;
 }
 
 #define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
index f2ed62e373408d3882a3e463af81b88f3a83b7b1..b411ab90528413cd63e5ccfdad52f7dbb04953ec 100644 (file)
@@ -574,9 +574,11 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
        return decrypt_out;
 }
 
+#define SMALL_PACKET_SIZE 256
+
 static void
 il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
-                              u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+                              u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
                               struct ieee80211_rx_status *stats)
 {
        struct sk_buff *skb;
@@ -598,21 +600,25 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
            il_set_decrypted_flag(il, hdr, ampdu_status, stats))
                return;
 
-       skb = dev_alloc_skb(128);
+       skb = dev_alloc_skb(SMALL_PACKET_SIZE);
        if (!skb) {
                IL_ERR("dev_alloc_skb failed\n");
                return;
        }
 
-       skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
-                       len);
+       if (len <= SMALL_PACKET_SIZE) {
+               memcpy(skb_put(skb, len), hdr, len);
+       } else {
+               skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
+                               len, PAGE_SIZE << il->hw_params.rx_page_order);
+               il->alloc_rxb_page--;
+               rxb->page = NULL;
+       }
 
        il_update_stats(il, false, fc, len);
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
        ieee80211_rx(il->hw, skb);
-       il->alloc_rxb_page--;
-       rxb->page = NULL;
 }
 
 /* Called for N_RX (legacy ABG frames), or
index ed3c42a63a4369678f2764049f0b7e257f535fb4..3ccbaf791b48bfea34d3053d4d7d3cce6e165b22 100644 (file)
@@ -2803,6 +2803,7 @@ il4965_rs_remove_debugfs(void *il, void *il_sta)
  */
 static void
 il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+                        struct cfg80211_chan_def *chandef,
                         struct ieee80211_sta *sta, void *il_sta)
 {
 }
index cbaa5c2c410f7f8b5cc5cc4c2807fd9fa95f657e..3eb2102ce2366e47fce1b149cc68fb14926e1e22 100644 (file)
@@ -22,6 +22,8 @@ config IWLWIFI
                Intel Wireless WiFi Link 6150BGN 2 Adapter
                Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
                Intel 2000 Series Wi-Fi Adapters
+               Intel 7260 Wi-Fi Adapter
+               Intel 3160 Wi-Fi Adapter
 
 
          This driver uses the kernel's mac80211 subsystem.
@@ -46,17 +48,16 @@ config IWLDVM
        depends on IWLWIFI
        default IWLWIFI
        help
-         This is the driver supporting the DVM firmware which is
-         currently the only firmware available for existing devices.
+         This is the driver that supports the DVM firmware which is
+         used by most existing devices (with the exception of 7260
+         and 3160).
 
 config IWLMVM
        tristate "Intel Wireless WiFi MVM Firmware support"
        depends on IWLWIFI
        help
-         This is the driver supporting the MVM firmware which is
-         currently only available for 7000 series devices.
-
-         Say yes if you have such a device.
+         This is the driver that supports the MVM firmware which is
+         currently only available for 7260 and 3160 devices.
 
 # don't call it _MODULE -- will confuse Kconfig/fixdep/...
 config IWLWIFI_OPMODE_MODULAR
@@ -127,20 +128,3 @@ config IWLWIFI_DEVICE_TRACING
          If unsure, say Y so we can help you better when problems
          occur.
 endmenu
-
-config IWLWIFI_P2P
-       def_bool y
-       bool "iwlwifi experimental P2P support"
-       depends on IWLWIFI
-       help
-         This option enables experimental P2P support for some devices
-         based on microcode support. Since P2P support is still under
-         development, this option may even enable it for some devices
-         now that turn out to not support it in the future due to
-         microcode restrictions.
-
-         To determine if your microcode supports the experimental P2P
-         offered by this option, check if the driver advertises AP
-         support when it is loaded.
-
-         Say Y only if you want to experiment with P2P.
index 18355110deffcacf4f6f6074995cf749be7d1574..f2a86ffc3b4cf09440874c88b6f0cc3d804ec3a6 100644 (file)
@@ -106,7 +106,6 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
 #define STATUS_CHANNEL_SWITCH_PENDING 11
 #define STATUS_SCAN_COMPLETE   12
 #define STATUS_POWER_PMI       13
-#define STATUS_SCAN_ROC_EXPIRED 14
 
 struct iwl_ucode_capabilities;
 
@@ -250,7 +249,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
 
 /* scan */
 void iwlagn_post_scan(struct iwl_priv *priv);
-void iwlagn_disable_roc(struct iwl_priv *priv);
 int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
 void iwl_init_scan_params(struct iwl_priv *priv);
 int iwl_scan_cancel(struct iwl_priv *priv);
@@ -265,10 +263,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
                                   enum iwl_scan_type scan_type,
                                   enum ieee80211_band band);
 
-void iwl_scan_roc_expired(struct iwl_priv *priv);
-void iwl_scan_offchannel_skb(struct iwl_priv *priv);
-void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
-
 /* For faster active scanning, scan will move to the next channel if fewer than
  * PLCP_QUIET_THRESH packets are heard on this channel within
  * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
index d5329489245a74ccb5cac363e3831e36648c1ce3..d94f8ab15004cb93f2e757f92a91fd3e669759cb 100644 (file)
 } while (0)
 
 /* file operation */
-#define DEBUGFS_READ_FUNC(name)                                         \
-static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
-                                       char __user *user_buf,          \
-                                       size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name)                                        \
-static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
-                                       const char __user *user_buf,    \
-                                       size_t count, loff_t *ppos);
-
-
 #define DEBUGFS_READ_FILE_OPS(name)                                     \
-       DEBUGFS_READ_FUNC(name);                                        \
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
        .read = iwl_dbgfs_##name##_read,                                \
        .open = simple_open,                                            \
@@ -89,7 +77,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = {          \
 };
 
 #define DEBUGFS_WRITE_FILE_OPS(name)                                    \
-       DEBUGFS_WRITE_FUNC(name);                                       \
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
        .write = iwl_dbgfs_##name##_write,                              \
        .open = simple_open,                                            \
@@ -98,8 +85,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = {          \
 
 
 #define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
-       DEBUGFS_READ_FUNC(name);                                        \
-       DEBUGFS_WRITE_FUNC(name);                                       \
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
        .write = iwl_dbgfs_##name##_write,                              \
        .read = iwl_dbgfs_##name##_read,                                \
index 60a4e0d15715bec71a5db1a7457ddb90ba5b59b9..a79fdd137f956ce5cb4de2ef7e1c0f239e76895a 100644 (file)
@@ -540,7 +540,6 @@ struct iwl_rxon_context {
 enum iwl_scan_type {
        IWL_SCAN_NORMAL,
        IWL_SCAN_RADIO_RESET,
-       IWL_SCAN_ROC,
 };
 
 /**
@@ -825,12 +824,6 @@ struct iwl_priv {
        struct reply_tx_error_statistics reply_tx_stats;
        struct reply_agg_tx_error_statistics reply_agg_tx_stats;
 
-       /* remain-on-channel offload support */
-       struct ieee80211_channel *hw_roc_channel;
-       struct delayed_work hw_roc_disable_work;
-       int hw_roc_duration;
-       bool hw_roc_setup, hw_roc_start_notified;
-
        /* bt coex */
        u8 bt_enable_flag;
        u8 bt_status;
index 822f1a00efbb7c5f40ed3edf7ca14f11103d49cc..cae4d3182e334f9451e38d0afada8a8468785195 100644 (file)
@@ -76,29 +76,6 @@ static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
        },
 };
 
-static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_STATION),
-       },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_P2P_GO) |
-                        BIT(NL80211_IFTYPE_AP),
-       },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
-       {
-               .max = 2,
-               .types = BIT(NL80211_IFTYPE_STATION),
-       },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
-       },
-};
-
 static const struct ieee80211_iface_combination
 iwlagn_iface_combinations_dualmode[] = {
        { .num_different_channels = 1,
@@ -114,21 +91,6 @@ iwlagn_iface_combinations_dualmode[] = {
        },
 };
 
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_p2p[] = {
-       { .num_different_channels = 1,
-         .max_interfaces = 2,
-         .beacon_int_infra_match = true,
-         .limits = iwlagn_p2p_sta_go_limits,
-         .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
-       },
-       { .num_different_channels = 1,
-         .max_interfaces = 2,
-         .limits = iwlagn_p2p_2sta_limits,
-         .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
-       },
-};
-
 /*
  * Not a mac80211 entry point function, but it fits in with all the
  * other mac80211 functions grouped here.
@@ -186,19 +148,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
 
        BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
 
-       if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
-               hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
-               hw->wiphy->n_iface_combinations =
-                       ARRAY_SIZE(iwlagn_iface_combinations_p2p);
-       } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+       if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
                hw->wiphy->iface_combinations =
                        iwlagn_iface_combinations_dualmode;
                hw->wiphy->n_iface_combinations =
                        ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
        }
 
-       hw->wiphy->max_remain_on_channel_duration = 500;
-
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
                            WIPHY_FLAG_DISABLE_BEACON_HINTS |
                            WIPHY_FLAG_IBSS_RSN;
@@ -1068,7 +1024,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
 
-       if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+       if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+               return;
+
+       if (ctx->vif)
                ieee80211_chswitch_done(ctx->vif, is_success);
 }
 
@@ -1156,126 +1115,6 @@ done:
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    struct ieee80211_channel *channel,
-                                    int duration,
-                                    enum ieee80211_roc_type type)
-{
-       struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
-       int err = 0;
-
-       if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
-               return -EOPNOTSUPP;
-
-       if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
-               return -EOPNOTSUPP;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->mutex);
-
-       if (test_bit(STATUS_SCAN_HW, &priv->status)) {
-               /* mac80211 should not scan while ROC or ROC while scanning */
-               if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
-                       err = -EBUSY;
-                       goto out;
-               }
-
-               iwl_scan_cancel_timeout(priv, 100);
-
-               if (test_bit(STATUS_SCAN_HW, &priv->status)) {
-                       err = -EBUSY;
-                       goto out;
-               }
-       }
-
-       priv->hw_roc_channel = channel;
-       /* convert from ms to TU */
-       priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
-       priv->hw_roc_start_notified = false;
-       cancel_delayed_work(&priv->hw_roc_disable_work);
-
-       if (!ctx->is_active) {
-               static const struct iwl_qos_info default_qos_data = {
-                       .def_qos_parm = {
-                               .ac[0] = {
-                                       .cw_min = cpu_to_le16(3),
-                                       .cw_max = cpu_to_le16(7),
-                                       .aifsn = 2,
-                                       .edca_txop = cpu_to_le16(1504),
-                               },
-                               .ac[1] = {
-                                       .cw_min = cpu_to_le16(7),
-                                       .cw_max = cpu_to_le16(15),
-                                       .aifsn = 2,
-                                       .edca_txop = cpu_to_le16(3008),
-                               },
-                               .ac[2] = {
-                                       .cw_min = cpu_to_le16(15),
-                                       .cw_max = cpu_to_le16(1023),
-                                       .aifsn = 3,
-                               },
-                               .ac[3] = {
-                                       .cw_min = cpu_to_le16(15),
-                                       .cw_max = cpu_to_le16(1023),
-                                       .aifsn = 7,
-                               },
-                       },
-               };
-
-               ctx->is_active = true;
-               ctx->qos_data = default_qos_data;
-               ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
-               memcpy(ctx->staging.node_addr,
-                      priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
-                      ETH_ALEN);
-               memcpy(ctx->staging.bssid_addr,
-                      priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
-                      ETH_ALEN);
-               err = iwlagn_commit_rxon(priv, ctx);
-               if (err)
-                       goto out;
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
-                                            RXON_FILTER_PROMISC_MSK |
-                                            RXON_FILTER_CTL2HOST_MSK;
-
-               err = iwlagn_commit_rxon(priv, ctx);
-               if (err) {
-                       iwlagn_disable_roc(priv);
-                       goto out;
-               }
-               priv->hw_roc_setup = true;
-       }
-
-       err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
-       if (err)
-               iwlagn_disable_roc(priv);
-
- out:
-       mutex_unlock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return err;
-}
-
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
-
-       if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
-               return -EOPNOTSUPP;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->mutex);
-       iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
-       iwlagn_disable_roc(priv);
-       mutex_unlock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return 0;
-}
-
 static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif,
                                     enum ieee80211_rssi_event rssi_event)
@@ -1431,12 +1270,8 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
                           viftype, vif->addr);
 
-       cancel_delayed_work_sync(&priv->hw_roc_disable_work);
-
        mutex_lock(&priv->mutex);
 
-       iwlagn_disable_roc(priv);
-
        if (!iwl_is_ready_rf(priv)) {
                IWL_WARN(priv, "Try to add interface when device not ready\n");
                err = -EINVAL;
@@ -1763,8 +1598,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
        .channel_switch = iwlagn_mac_channel_switch,
        .flush = iwlagn_mac_flush,
        .tx_last_beacon = iwlagn_mac_tx_last_beacon,
-       .remain_on_channel = iwlagn_mac_remain_on_channel,
-       .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
        .rssi_callback = iwlagn_mac_rssi_callback,
        .set_tim = iwlagn_mac_set_tim,
 };
index 1531a4fc09601bd101fde1e20a528d7631f8baf8..7aad766865cf5d09fb5f0ab7b150189039bccd53 100644 (file)
@@ -587,11 +587,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
        priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
                BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
 
-       if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
-               priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
-                       BIT(NL80211_IFTYPE_P2P_CLIENT) |
-                       BIT(NL80211_IFTYPE_P2P_GO);
-
        priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
        priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
        priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -854,14 +849,6 @@ void iwl_down(struct iwl_priv *priv)
 
        iwl_scan_cancel_timeout(priv, 200);
 
-       /*
-        * If active, scanning won't cancel it, so say it expired.
-        * No race since we hold the mutex here and a new one
-        * can't come in at this time.
-        */
-       if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
-               ieee80211_remain_on_channel_expired(priv->hw);
-
        exit_pending =
                test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
 
@@ -1002,41 +989,6 @@ static void iwl_bg_restart(struct work_struct *data)
        }
 }
 
-
-
-
-void iwlagn_disable_roc(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (!priv->hw_roc_setup)
-               return;
-
-       ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-       priv->hw_roc_channel = NULL;
-
-       memset(ctx->staging.node_addr, 0, ETH_ALEN);
-
-       iwlagn_commit_rxon(priv, ctx);
-
-       ctx->is_active = false;
-       priv->hw_roc_setup = false;
-}
-
-static void iwlagn_disable_roc_work(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv,
-                                            hw_roc_disable_work.work);
-
-       mutex_lock(&priv->mutex);
-       iwlagn_disable_roc(priv);
-       mutex_unlock(&priv->mutex);
-}
-
 /*****************************************************************************
  *
  * driver setup and teardown
@@ -1053,8 +1005,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
        INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
        INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
        INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
-       INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
-                         iwlagn_disable_roc_work);
 
        iwl_setup_scan_deferred_work(priv);
 
@@ -1082,7 +1032,6 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
 
        cancel_work_sync(&priv->bt_full_concurrency);
        cancel_work_sync(&priv->bt_runtime_config);
-       cancel_delayed_work_sync(&priv->hw_roc_disable_work);
 
        del_timer_sync(&priv->statistics_periodic);
        del_timer_sync(&priv->ucode_trace);
@@ -1169,12 +1118,6 @@ static void iwl_option_config(struct iwl_priv *priv)
 #else
        IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
 #endif
-
-#ifdef CONFIG_IWLWIFI_P2P
-       IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
-#else
-       IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n");
-#endif
 }
 
 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
@@ -1315,10 +1258,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
 
        ucode_flags = fw->ucode_capa.flags;
 
-#ifndef CONFIG_IWLWIFI_P2P
-       ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
-#endif
-
        if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
                priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
                trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
@@ -1413,7 +1352,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
                 * if not PAN, then don't support P2P -- might be a uCode
                 * packaging bug or due to the eeprom check above
                 */
-               ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
                priv->sta_key_max_num = STA_KEY_MAX_NUM;
                trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
 
index 1b693944123b5b0eda877f8d93a4c64775cc04ec..b647e506564cb78cd6117a70c0e09a3036c69d7b 100644 (file)
@@ -2826,9 +2826,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
 
        lq_sta->flush_timer = 0;
        lq_sta->supp_rates = sta->supp_rates[sband->band];
-       for (j = 0; j < LQ_SIZE; j++)
-               for (i = 0; i < IWL_RATE_COUNT; i++)
-                       rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
 
        IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
                       sta_id);
@@ -3319,7 +3316,8 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
  * station is added we ignore it.
  */
 static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
-                        struct ieee80211_sta *sta, void *priv_sta)
+                             struct cfg80211_chan_def *chandef,
+                             struct ieee80211_sta *sta, void *priv_sta)
 {
 }
 static struct rate_control_ops rs_ops = {
index cd1ad00191857e962f74a45c0bbcfe56e1591514..d7ce2f12a90724a4e4b046908311c3ff1a35a1f4 100644 (file)
@@ -564,11 +564,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
        cmd.slots[0].type = 0; /* BSS */
        cmd.slots[1].type = 1; /* PAN */
 
-       if (priv->hw_roc_setup) {
-               /* both contexts must be used for this to happen */
-               slot1 = IWL_MIN_SLOT_TIME;
-               slot0 = 3000;
-       } else if (ctx_bss->vif && ctx_pan->vif) {
+       if (ctx_bss->vif && ctx_pan->vif) {
                int bcnint = ctx_pan->beacon_int;
                int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
 
index 8c686a5b90ac6657ad3e5e4126c61fba67726654..35e0ee8b4e5b64162ecdb2287ae53ab3acd5ee4b 100644 (file)
@@ -100,9 +100,6 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
                ieee80211_scan_completed(priv->hw, aborted);
        }
 
-       if (priv->scan_type == IWL_SCAN_ROC)
-               iwl_scan_roc_expired(priv);
-
        priv->scan_type = IWL_SCAN_NORMAL;
        priv->scan_vif = NULL;
        priv->scan_request = NULL;
@@ -130,9 +127,6 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
                goto out_settings;
        }
 
-       if (priv->scan_type == IWL_SCAN_ROC)
-               iwl_scan_roc_expired(priv);
-
        if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
                int err;
 
@@ -284,12 +278,6 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
                       le32_to_cpu(notif->tsf_low),
                       notif->status, notif->beacon_timer);
 
-       if (priv->scan_type == IWL_SCAN_ROC &&
-           !priv->hw_roc_start_notified) {
-               ieee80211_ready_on_channel(priv->hw);
-               priv->hw_roc_start_notified = true;
-       }
-
        return 0;
 }
 
@@ -697,8 +685,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
        scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
 
-       if (priv->scan_type != IWL_SCAN_ROC &&
-           iwl_is_any_associated(priv)) {
+       if (iwl_is_any_associated(priv)) {
                u16 interval = 0;
                u32 extra;
                u32 suspend_time = 100;
@@ -706,9 +693,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
 
                IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
                switch (priv->scan_type) {
-               case IWL_SCAN_ROC:
-                       WARN_ON(1);
-                       break;
                case IWL_SCAN_RADIO_RESET:
                        interval = 0;
                        break;
@@ -728,11 +712,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                scan->suspend_time = cpu_to_le32(scan_suspend_time);
                IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
                               scan_suspend_time, interval);
-       } else if (priv->scan_type == IWL_SCAN_ROC) {
-               scan->suspend_time = 0;
-               scan->max_out_time = 0;
-               scan->quiet_time = 0;
-               scan->quiet_plcp_th = 0;
        }
 
        switch (priv->scan_type) {
@@ -774,9 +753,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                } else
                        IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
                break;
-       case IWL_SCAN_ROC:
-               IWL_DEBUG_SCAN(priv, "Start ROC scan.\n");
-               break;
        }
 
        scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
@@ -898,7 +874,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                                        scan_cmd_size - sizeof(*scan));
                break;
        case IWL_SCAN_RADIO_RESET:
-       case IWL_SCAN_ROC:
                /* use bcast addr, will not be transmitted but must be valid */
                cmd_len = iwl_fill_probe_req(
                                        (struct ieee80211_mgmt *)scan->data,
@@ -926,46 +901,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                                is_active, n_probes,
                                (void *)&scan->data[cmd_len]);
                break;
-       case IWL_SCAN_ROC: {
-               struct iwl_scan_channel *scan_ch;
-               int n_chan, i;
-               u16 dwell;
-
-               dwell = iwl_limit_dwell(priv, priv->hw_roc_duration);
-               n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell);
-
-               scan->channel_count = n_chan;
-
-               scan_ch = (void *)&scan->data[cmd_len];
-
-               for (i = 0; i < n_chan; i++) {
-                       scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
-                       scan_ch->channel =
-                               cpu_to_le16(priv->hw_roc_channel->hw_value);
-
-                       if (i == n_chan - 1)
-                               dwell = priv->hw_roc_duration - i * dwell;
-
-                       scan_ch->active_dwell =
-                       scan_ch->passive_dwell = cpu_to_le16(dwell);
-
-                       /* Set txpower levels to defaults */
-                       scan_ch->dsp_atten = 110;
-
-                       /* NOTE: if we were doing 6Mb OFDM for scans we'd use
-                        * power level:
-                        * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
-                        */
-                       if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ)
-                               scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
-                       else
-                               scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
-                       scan_ch++;
-               }
-               }
-
-               break;
        }
 
        if (scan->channel_count == 0) {
@@ -1035,7 +970,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
 
        IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
                        scan_type == IWL_SCAN_NORMAL ? "" :
-                       scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
                        "internal short ");
 
        set_bit(STATUS_SCANNING, &priv->status);
@@ -1149,40 +1083,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
                mutex_unlock(&priv->mutex);
        }
 }
-
-void iwl_scan_roc_expired(struct iwl_priv *priv)
-{
-       /*
-        * The status bit should be set here, to prevent a race
-        * where the atomic_read returns 1, but before the execution continues
-        * iwl_scan_offchannel_skb_status() checks if the status bit is set
-        */
-       set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
-
-       if (atomic_read(&priv->num_aux_in_flight) == 0) {
-               ieee80211_remain_on_channel_expired(priv->hw);
-               priv->hw_roc_channel = NULL;
-               schedule_delayed_work(&priv->hw_roc_disable_work,
-                                     10 * HZ);
-
-               clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
-       } else {
-               IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
-                              atomic_read(&priv->num_aux_in_flight));
-       }
-}
-
-void iwl_scan_offchannel_skb(struct iwl_priv *priv)
-{
-       WARN_ON(!priv->hw_roc_start_notified);
-       atomic_inc(&priv->num_aux_in_flight);
-}
-
-void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
-{
-       if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
-           test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
-               IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
-               iwl_scan_roc_expired(priv);
-       }
-}
index 5ee983faa679d21d09ba1700821172d84501ea4d..da442b81370a769054b4b77a946daa4bbd856ed8 100644 (file)
@@ -87,7 +87,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
                 priv->lib->bt_params->advanced_bt_coexist &&
                 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
                 ieee80211_is_reassoc_req(fc) ||
-                skb->protocol == cpu_to_be16(ETH_P_PAE)))
+                info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
                tx_flags |= TX_CMD_FLG_IGNORE_BT;
 
 
@@ -478,9 +478,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        if (sta_priv && sta_priv->client && !is_agg)
                atomic_inc(&sta_priv->pending_frames);
 
-       if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
-               iwl_scan_offchannel_skb(priv);
-
        return 0;
 
 drop_unlock_sta:
@@ -1158,7 +1155,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
        struct sk_buff *skb;
        struct iwl_rxon_context *ctx;
        bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
-       bool is_offchannel_skb;
 
        tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
                IWLAGN_TX_RES_TID_POS;
@@ -1178,8 +1174,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
 
        __skb_queue_head_init(&skbs);
 
-       is_offchannel_skb = false;
-
        if (tx_resp->frame_count == 1) {
                u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
                next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1256,8 +1250,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        if (!is_agg)
                                iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
 
-                       is_offchannel_skb =
-                               (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
                        freed++;
                }
 
@@ -1271,14 +1263,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                if (!is_agg && freed != 1)
                        IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
 
-               /*
-                * An offchannel frame can be send only on the AUX queue, where
-                * there is no aggregation (and reordering) so it only is single
-                * skb is expected to be processed.
-                */
-               if (is_offchannel_skb && freed != 1)
-                       IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
-
                IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
                                   iwl_get_tx_fail_reason(status), status);
 
@@ -1298,9 +1282,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                ieee80211_tx_status_ni(priv->hw, skb);
        }
 
-       if (is_offchannel_skb)
-               iwl_scan_offchannel_skb_status(priv);
-
        return 0;
 }
 
index 22b7fa5b971af46bf0a3afbbb49e83bdcfc24568..76e14c046d9402cc0b7fbe39b0966c8195f0708a 100644 (file)
@@ -99,6 +99,7 @@ static const struct iwl_base_params iwl7000_base_params = {
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = true,
+       .pcie_l1_allowed = true,
 };
 
 static const struct iwl_ht_params iwl7000_ht_params = {
@@ -126,6 +127,16 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
        .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
 };
 
+const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
+       .name = "Intel(R) Dual Band Wireless AC 7260",
+       .fw_name_pre = IWL7260_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL7260_NVM_VERSION,
+       .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+       .high_temp = true,
+};
+
 const struct iwl_cfg iwl7260_2n_cfg = {
        .name = "Intel(R) Dual Band Wireless N 7260",
        .fw_name_pre = IWL7260_FW_PRE,
index 83b9ff6ff3ad8771a2bbc02d16e58371c7c87fbd..e4d370bff30679fceacc1bc82907ccb6bc194f22 100644 (file)
@@ -152,6 +152,7 @@ struct iwl_base_params {
        unsigned int wd_timeout;
        u32 max_event_log_size;
        const bool shadow_reg_enable;
+       const bool pcie_l1_allowed;
 };
 
 /*
@@ -205,6 +206,7 @@ struct iwl_eeprom_params {
  * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
  * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
  * @internal_wimax_coex: internal wifi/wimax combo device
+ * @high_temp: Is this NIC is designated to be in high temperature.
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -233,6 +235,7 @@ struct iwl_cfg {
        enum iwl_led_mode led_mode;
        const bool rx_with_siso_diversity;
        const bool internal_wimax_coex;
+       bool high_temp;
 };
 
 /*
@@ -283,6 +286,7 @@ extern const struct iwl_cfg iwl135_bgn_cfg;
 #endif /* CONFIG_IWLDVM */
 #if IS_ENABLED(CONFIG_IWLMVM)
 extern const struct iwl_cfg iwl7260_2ac_cfg;
+extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
 extern const struct iwl_cfg iwl7260_2n_cfg;
 extern const struct iwl_cfg iwl7260_n_cfg;
 extern const struct iwl_cfg iwl3160_2ac_cfg;
index 7edb8519c8a49eadc3f5798fa8fa08bfbd25fe04..b2bb32a781ddd9675efe77da6e4d164de45d0b60 100644 (file)
@@ -145,6 +145,7 @@ do {                                                                \
 #define IWL_DL_RX              0x01000000
 #define IWL_DL_ISR             0x02000000
 #define IWL_DL_HT              0x04000000
+#define IWL_DL_EXTERNAL                0x08000000
 /* 0xF0000000 - 0x10000000 */
 #define IWL_DL_11H             0x10000000
 #define IWL_DL_STATS           0x20000000
@@ -153,6 +154,7 @@ do {                                                                \
 
 #define IWL_DEBUG_INFO(p, f, a...)     IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
 #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
 #define IWL_DEBUG_TEMP(p, f, a...)     IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
 #define IWL_DEBUG_SCAN(p, f, a...)     IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
 #define IWL_DEBUG_RX(p, f, a...)       IWL_DEBUG(p, IWL_DL_RX, f, ## a)
index 4491c1c72cc7d6e00ade51930342b16233172690..684c416d34936c5ecdb6451f2d3707babae6b180 100644 (file)
 static inline bool iwl_trace_data(struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
-       if (ieee80211_is_data(hdr->frame_control))
-               return skb->protocol != cpu_to_be16(ETH_P_PAE);
-       return false;
+       if (!ieee80211_is_data(hdr->frame_control))
+               return false;
+       return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
 }
 
 static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
index d0162d426f887b9949e97c1b3163490535f68482..99e1da3123c9a07649008e48d2a2a56dc685d820 100644 (file)
@@ -843,7 +843,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        int i;
        bool load_module = false;
 
-       fw->ucode_capa.max_probe_length = 200;
+       fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
        fw->ucode_capa.standard_phy_calibration_size =
                        IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
 
@@ -1032,8 +1032,10 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
        int ret;
 
        drv = kzalloc(sizeof(*drv), GFP_KERNEL);
-       if (!drv)
-               return NULL;
+       if (!drv) {
+               ret = -ENOMEM;
+               goto err;
+       }
 
        drv->trans = trans;
        drv->dev = trans->dev;
@@ -1078,7 +1080,7 @@ err_free_dbgfs:
 err_free_drv:
 #endif
        kfree(drv);
-
+err:
        return ERR_PTR(ret);
 }
 
index f844d5c748c09ef49418cf6c7999e0146a188a77..a1223680bc70d2f765e32180176ac7997884a3d7 100644 (file)
  * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
  * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
  * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
+ * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
+ * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ *     (rather than two) IPv6 addresses
+ * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
  */
 enum iwl_ucode_tlv_flag {
-       IWL_UCODE_TLV_FLAGS_PAN         = BIT(0),
-       IWL_UCODE_TLV_FLAGS_NEWSCAN     = BIT(1),
-       IWL_UCODE_TLV_FLAGS_MFP         = BIT(2),
-       IWL_UCODE_TLV_FLAGS_P2P         = BIT(3),
-       IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
+       IWL_UCODE_TLV_FLAGS_PAN                 = BIT(0),
+       IWL_UCODE_TLV_FLAGS_NEWSCAN             = BIT(1),
+       IWL_UCODE_TLV_FLAGS_MFP                 = BIT(2),
+       IWL_UCODE_TLV_FLAGS_P2P                 = BIT(3),
+       IWL_UCODE_TLV_FLAGS_DW_BC_TABLE         = BIT(4),
+       IWL_UCODE_TLV_FLAGS_UAPSD               = BIT(6),
+       IWL_UCODE_TLV_FLAGS_RX_ENERGY_API       = BIT(8),
+       IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2   = BIT(9),
+       IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS     = BIT(10),
+       IWL_UCODE_TLV_FLAGS_BF_UPDATED          = BIT(11),
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -88,6 +99,9 @@ enum iwl_ucode_tlv_flag {
 #define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE                19
 #define IWL_MAX_PHY_CALIBRATE_TBL_SIZE                 253
 
+/* The default max probe length if not specified by the firmware file */
+#define IWL_DEFAULT_MAX_PROBE_LENGTH   200
+
 /**
  * enum iwl_ucode_type
  *
index 305c81f2c2b4af8245046c794a31174d33164e94..dfa4d2e3aaa28124c6074a497d7e003d72cd86a2 100644 (file)
@@ -33,6 +33,8 @@
 #include "iwl-io.h"
 #include "iwl-csr.h"
 #include "iwl-debug.h"
+#include "iwl-fh.h"
+#include "iwl-csr.h"
 
 #define IWL_POLL_INTERVAL 10   /* microseconds */
 
@@ -166,3 +168,68 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
        }
 }
 IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
+
+static const char *get_fh_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+       switch (cmd) {
+       IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+       IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+       IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+       IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+       IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+       IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+       IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+       IWL_CMD(FH_TSSR_TX_STATUS_REG);
+       IWL_CMD(FH_TSSR_TX_ERROR_REG);
+       default:
+               return "UNKNOWN";
+       }
+#undef IWL_CMD
+}
+
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+{
+       int i;
+       static const u32 fh_tbl[] = {
+               FH_RSCSR_CHNL0_STTS_WPTR_REG,
+               FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+               FH_RSCSR_CHNL0_WPTR,
+               FH_MEM_RCSR_CHNL0_CONFIG_REG,
+               FH_MEM_RSSR_SHARED_CTRL_REG,
+               FH_MEM_RSSR_RX_STATUS_REG,
+               FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+               FH_TSSR_TX_STATUS_REG,
+               FH_TSSR_TX_ERROR_REG
+       };
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (buf) {
+               int pos = 0;
+               size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+               *buf = kmalloc(bufsz, GFP_KERNEL);
+               if (!*buf)
+                       return -ENOMEM;
+
+               pos += scnprintf(*buf + pos, bufsz - pos,
+                               "FH register values:\n");
+
+               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+                       pos += scnprintf(*buf + pos, bufsz - pos,
+                               "  %34s: 0X%08x\n",
+                               get_fh_string(fh_tbl[i]),
+                               iwl_read_direct32(trans, fh_tbl[i]));
+
+               return pos;
+       }
+#endif
+
+       IWL_ERR(trans, "FH register values:\n");
+       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
+               IWL_ERR(trans, "  %34s: 0X%08x\n",
+                       get_fh_string(fh_tbl[i]),
+                       iwl_read_direct32(trans, fh_tbl[i]));
+
+       return 0;
+}
index fd9f5b97fff3c937f0d5c177737e043f6256cd51..63d10ec08dbc2f894ea769169d17d0be2027a4c4 100644 (file)
@@ -77,4 +77,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
                            u32 bits, u32 mask);
 void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
 
+/* Error handling */
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
+
 #endif
index acd2665afb8cd9c942cee42527b62acc923a6749..b76a9a8fc0b3dbe831f74ce7d20a811e6cf17ada 100644 (file)
@@ -118,6 +118,7 @@ static const u8 iwl_nvm_channels[] = {
 #define LAST_2GHZ_HT_PLUS      9
 #define LAST_5GHZ_HT           161
 
+#define DEFAULT_MAX_TX_POWER 16
 
 /* rate data (static) */
 static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -232,8 +233,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
                /* Initialize regulatory-based run-time data */
 
-               /* TODO: read the real value from the NVM */
-               channel->max_power = 0;
+               /*
+                * Default value - highest tx power value.  max_power
+                * is not used in mvm, and is used for backwards compatibility
+                */
+               channel->max_power = DEFAULT_MAX_TX_POWER;
                is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
                IWL_DEBUG_EEPROM(dev,
                                 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
index 98c7aa7346da1ebb78e6ab98dd4c6fffb25d296b..976448a57d02649a7263b499bcc4974a5a1db18c 100644 (file)
@@ -93,7 +93,7 @@ struct iwl_cfg;
  *     1) The driver layer (iwl-drv.c) chooses the op_mode based on the
  *        capabilities advertized by the fw file (in TLV format).
  *     2) The driver layer starts the op_mode (ops->start)
- *     3) The op_mode registers registers mac80211
+ *     3) The op_mode registers mac80211
  *     4) The op_mode is governed by mac80211
  *     5) The driver layer stops the op_mode
  */
@@ -112,7 +112,7 @@ struct iwl_cfg;
  * @stop: stop the op_mode. Must free all the memory allocated.
  *     May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
- *     HCMD the this Rx responds to.
+ *     HCMD this Rx responds to.
  *     This callback may sleep, it is called from a threaded IRQ handler.
  * @queue_full: notifies that a HW queue is full.
  *     Must be atomic and called with BH disabled.
index a70c7b9d9bad897345fb1e1e89d5c421e0d8a3da..ff8cc75c189d4d842abf8611fb8c5e7c7a63bf38 100644 (file)
@@ -97,8 +97,6 @@
 
 #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS         (0x00000800)
 
-#define APMG_RTC_INT_STT_RFKILL                (0x10000000)
-
 /* Device system time */
 #define DEVICE_SYSTEM_TIME_REG 0xA0206C
 
index 8d91422c598265c6b69b32874cf3d77fc7c9554c..dd57a36ecb1005d7571290a59925fd4d641b61aa 100644 (file)
@@ -180,7 +180,7 @@ struct iwl_rx_packet {
  * enum CMD_MODE - how to send the host commands ?
  *
  * @CMD_SYNC: The caller will be stalled until the fw responds to the command
- * @CMD_ASYNC: Return right away and don't want for the response
+ * @CMD_ASYNC: Return right away and don't wait for the response
  * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
  *     response. The caller needs to call iwl_free_resp when done.
  */
@@ -218,7 +218,7 @@ struct iwl_device_cmd {
  *
  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
  *     ring. The transport layer doesn't map the command's buffer to DMA, but
- *     rather copies it to an previously allocated DMA buffer. This flag tells
+ *     rather copies it to a previously allocated DMA buffer. This flag tells
  *     the transport layer not to copy the command, but to map the existing
  *     buffer (that is passed in) instead. This saves the memcpy and allows
  *     commands that are bigger than the fixed buffer to be submitted.
@@ -243,7 +243,7 @@ enum iwl_hcmd_dataflag {
  * @handler_status: return value of the handler of the command
  *     (put in setup_rx_handlers) - valid for SYNC mode only
  * @flags: can be CMD_*
- * @len: array of the lenths of the chunks in data
+ * @len: array of the lengths of the chunks in data
  * @dataflags: IWL_HCMD_DFL_*
  * @id: id of the host command
  */
@@ -396,8 +396,6 @@ struct iwl_trans;
  *     May sleep
  * @dbgfs_register: add the dbgfs files under this directory. Files will be
  *     automatically deleted.
- * @suspend: stop the device unless WoWLAN is configured
- * @resume: resume activity of the device
  * @write8: write a u8 to a register at offset ofs from the BAR
  * @write32: write a u32 to a register at offset ofs from the BAR
  * @read32: read a u32 register at offset ofs from the BAR
@@ -443,10 +441,7 @@ struct iwl_trans_ops {
 
        int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
        int (*wait_tx_queue_empty)(struct iwl_trans *trans);
-#ifdef CONFIG_PM_SLEEP
-       int (*suspend)(struct iwl_trans *trans);
-       int (*resume)(struct iwl_trans *trans);
-#endif
+
        void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
        void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
        u32 (*read32)(struct iwl_trans *trans, u32 ofs);
@@ -700,18 +695,6 @@ static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
        return trans->ops->dbgfs_register(trans, dir);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static inline int iwl_trans_suspend(struct iwl_trans *trans)
-{
-       return trans->ops->suspend(trans);
-}
-
-static inline int iwl_trans_resume(struct iwl_trans *trans)
-{
-       return trans->ops->resume(trans);
-}
-#endif
-
 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
 {
        trans->ops->write8(trans, ofs, val);
index ff856e543ae85689549fbd1fcae64320a2ca67fa..6d73817850ce6753c5a88481b78d1d1f0f1d3864 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
 iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o bt-coex.o
+iwlmvm-y += power.o power_legacy.o bt-coex.o
 iwlmvm-y += led.o tt.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
index dbd622a3929ce47c0c91ca4a5d78b82c2c38eaca..0fad98b85f60dd8dedca668296b27382b15ca0f6 100644 (file)
@@ -220,66 +220,87 @@ static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
 
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 {
-       struct iwl_bt_coex_cmd cmd = {
-               .max_kill = 5,
-               .bt3_time_t7_value = 1,
-               .bt3_prio_sample_time = 2,
-               .bt3_timer_t2_value = 0xc,
+       struct iwl_bt_coex_cmd *bt_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .flags = CMD_SYNC,
        };
        int ret;
 
-       cmd.flags = iwlwifi_mod_params.bt_coex_active ?
+       /* go to CALIB state in internal BT-Coex state machine */
+       ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
+                             BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+       if (ret)
+               return ret;
+
+       ret  = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
+                              BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+       if (ret)
+               return ret;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+
+       bt_cmd->max_kill = 5;
+       bt_cmd->bt3_time_t7_value = 1;
+       bt_cmd->bt3_prio_sample_time = 2;
+       bt_cmd->bt3_timer_t2_value = 0xc;
+
+       bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
                        BT_COEX_NW : BT_COEX_DISABLE;
-       cmd.flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
+       bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
 
-       cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
-                                       BT_VALID_BT_PRIO_BOOST |
-                                       BT_VALID_MAX_KILL |
-                                       BT_VALID_3W_TMRS |
-                                       BT_VALID_KILL_ACK |
-                                       BT_VALID_KILL_CTS |
-                                       BT_VALID_REDUCED_TX_POWER |
-                                       BT_VALID_LUT);
+       bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+                                           BT_VALID_BT_PRIO_BOOST |
+                                           BT_VALID_MAX_KILL |
+                                           BT_VALID_3W_TMRS |
+                                           BT_VALID_KILL_ACK |
+                                           BT_VALID_KILL_CTS |
+                                           BT_VALID_REDUCED_TX_POWER |
+                                           BT_VALID_LUT);
 
        if (mvm->cfg->bt_shared_single_ant)
-               memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup,
+               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
                       sizeof(iwl_single_shared_ant_lookup));
        else if (is_loose_coex())
-               memcpy(&cmd.decision_lut, iwl_loose_lookup,
+               memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
                       sizeof(iwl_tight_lookup));
        else
-               memcpy(&cmd.decision_lut, iwl_tight_lookup,
+               memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
                       sizeof(iwl_tight_lookup));
 
-       cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
-       cmd.kill_ack_msk =
+       bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+       bt_cmd->kill_ack_msk =
                cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
-       cmd.kill_cts_msk =
+       bt_cmd->kill_cts_msk =
                cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
 
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
 
-       /* go to CALIB state in internal BT-Coex state machine */
-       ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
-                             BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
-       if (ret)
-               return ret;
-
-       ret  = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
-                              BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
-       if (ret)
-               return ret;
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
 
-       return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
-                                   sizeof(cmd), &cmd);
+       kfree(bt_cmd);
+       return ret;
 }
 
 static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
                                           bool reduced_tx_power)
 {
        enum iwl_bt_kill_msk bt_kill_msk;
-       struct iwl_bt_coex_cmd cmd = {};
+       struct iwl_bt_coex_cmd *bt_cmd;
        struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .data[0] = &bt_cmd,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .flags = CMD_SYNC,
+       };
+       int ret = 0;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -308,24 +329,40 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
                return 0;
 
        mvm->bt_kill_msk = bt_kill_msk;
-       cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
-       cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
-       cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+
+       bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
+       bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
+       bt_cmd->valid_bit_msk =
+               cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
 
        IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
-       return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
-                                   sizeof(cmd), &cmd);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
 }
 
 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
                                       bool enable)
 {
-       struct iwl_bt_coex_cmd cmd = {
-               .valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
-               .bt_reduced_tx_power = sta_id,
+       struct iwl_bt_coex_cmd *bt_cmd;
+       /* Send ASYNC since this can be sent from an atomic context */
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_DUP, },
+               .flags = CMD_ASYNC,
        };
+
        struct ieee80211_sta *sta;
        struct iwl_mvm_sta *mvmsta;
+       int ret;
 
        /* This can happen if the station has been removed right now */
        if (sta_id == IWL_MVM_STATION_COUNT)
@@ -339,17 +376,26 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
        if (mvmsta->bt_reduced_txpower == enable)
                return 0;
 
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+
+       bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
+       bt_cmd->bt_reduced_tx_power = sta_id;
+
        if (enable)
-               cmd.bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
+               bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
 
        IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
                       enable ? "en" : "dis", sta_id);
 
        mvmsta->bt_reduced_txpower = enable;
 
-       /* Send ASYNC since this can be sent from an atomic context */
-       return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_ASYNC,
-                                   sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
 }
 
 struct iwl_bt_iterator_data {
@@ -384,6 +430,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
 
        smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
+       /* non associated BSSes aren't to be considered */
+       if (!vif->bss_conf.assoc)
+               return;
+
        if (band != IEEE80211_BAND_2GHZ) {
                iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
                                    smps_mode);
@@ -523,6 +573,8 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
                                        lockdep_is_held(&mvm->mutex));
        mvmsta = (void *)sta->drv_priv;
 
+       data->num_bss_ifaces++;
+
        /*
         * This interface doesn't support reduced Tx power (because of low
         * RSSI probably), then set bt_kill_msk to default values.
@@ -588,23 +640,5 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_band band;
-
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-       if (chanctx_conf && chanctx_conf->def.chan)
-               band = chanctx_conf->def.chan->band;
-       else
-               band = -1;
-       rcu_read_unlock();
-
-       /* if we are in 2GHz we will get a notification from the fw */
-       if (band == IEEE80211_BAND_2GHZ)
-               return;
-
-       /* else, we can remove all the constraints */
-       memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
-
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
new file mode 100644 (file)
index 0000000..2bf29f7
--- /dev/null
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __MVM_CONSTANTS_H
+#define __MVM_CONSTANTS_H
+
+#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT     (100 * USEC_PER_MSEC)
+#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT     (100 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT      (10 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT      (10 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
+#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS       20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS       20
+#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT       50
+#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT       50
+#define IWL_MVM_PS_SNOOZE_INTERVAL             25
+#define IWL_MVM_PS_SNOOZE_WINDOW               50
+#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW                25
+
+#endif /* __MVM_CONSTANTS_H */
index 83da884cf3032564df37a96be42c1601496fbc52..417639f77b01c8a9a166de79e4374cefa2313ba6 100644 (file)
@@ -105,7 +105,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
        list_for_each_entry(ifa, &idev->addr_list, if_list) {
                mvmvif->target_ipv6_addrs[idx] = ifa->addr;
                idx++;
-               if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS)
+               if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
                        break;
        }
        read_unlock_bh(&idev->lock);
@@ -378,36 +378,68 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
 static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
                                      struct ieee80211_vif *vif)
 {
-       struct iwl_proto_offload_cmd cmd = {};
+       union {
+               struct iwl_proto_offload_cmd_v1 v1;
+               struct iwl_proto_offload_cmd_v2 v2;
+       } cmd = {};
+       struct iwl_proto_offload_cmd_common *common;
+       u32 enabled = 0, size;
 #if IS_ENABLED(CONFIG_IPV6)
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int i;
 
-       if (mvmvif->num_target_ipv6_addrs) {
-               cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS);
-               memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN);
-       }
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+               if (mvmvif->num_target_ipv6_addrs) {
+                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+                       memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+               }
+
+               BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+                            sizeof(mvmvif->target_ipv6_addrs[0]));
 
-       BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) !=
-                    sizeof(mvmvif->target_ipv6_addrs[i]));
+               for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+                                   IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
+                       memcpy(cmd.v2.target_ipv6_addr[i],
+                              &mvmvif->target_ipv6_addrs[i],
+                              sizeof(cmd.v2.target_ipv6_addr[i]));
+       } else {
+               if (mvmvif->num_target_ipv6_addrs) {
+                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+                       memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+               }
 
-       for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++)
-               memcpy(cmd.target_ipv6_addr[i],
-                      &mvmvif->target_ipv6_addrs[i],
-                      sizeof(cmd.target_ipv6_addr[i]));
+               BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+                            sizeof(mvmvif->target_ipv6_addrs[0]));
+
+               for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+                                   IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
+                       memcpy(cmd.v1.target_ipv6_addr[i],
+                              &mvmvif->target_ipv6_addrs[i],
+                              sizeof(cmd.v1.target_ipv6_addr[i]));
+       }
 #endif
 
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+               common = &cmd.v2.common;
+               size = sizeof(cmd.v2);
+       } else {
+               common = &cmd.v1.common;
+               size = sizeof(cmd.v1);
+       }
+
        if (vif->bss_conf.arp_addr_cnt) {
-               cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP);
-               cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
-               memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN);
+               enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
+               common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+               memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
        }
 
-       if (!cmd.enabled)
+       if (!enabled)
                return 0;
 
+       common->enabled = cpu_to_le32(enabled);
+
        return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
-                                   sizeof(cmd), &cmd);
+                                   size, &cmd);
 }
 
 enum iwl_mvm_tcp_packet_type {
@@ -1077,73 +1109,16 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        return __iwl_mvm_suspend(hw, wowlan, false);
 }
 
-static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
-                                        struct ieee80211_vif *vif)
+static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         struct iwl_wowlan_status *status)
 {
-       u32 base = mvm->error_event_table;
-       struct error_table_start {
-               /* cf. struct iwl_error_event_table */
-               u32 valid;
-               u32 error_id;
-       } err_info;
+       struct sk_buff *pkt = NULL;
        struct cfg80211_wowlan_wakeup wakeup = {
                .pattern_idx = -1,
        };
        struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
-       struct iwl_host_cmd cmd = {
-               .id = WOWLAN_GET_STATUSES,
-               .flags = CMD_SYNC | CMD_WANT_SKB,
-       };
-       struct iwl_wowlan_status *status;
-       u32 reasons;
-       int ret, len;
-       struct sk_buff *pkt = NULL;
-
-       iwl_trans_read_mem_bytes(mvm->trans, base,
-                                &err_info, sizeof(err_info));
-
-       if (err_info.valid) {
-               IWL_INFO(mvm, "error table is valid (%d)\n",
-                        err_info.valid);
-               if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
-                       wakeup.rfkill_release = true;
-                       ieee80211_report_wowlan_wakeup(vif, &wakeup,
-                                                      GFP_KERNEL);
-               }
-               return;
-       }
-
-       /* only for tracing for now */
-       ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
-       if (ret)
-               IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-       if (ret) {
-               IWL_ERR(mvm, "failed to query status (%d)\n", ret);
-               return;
-       }
-
-       /* RF-kill already asserted again... */
-       if (!cmd.resp_pkt)
-               return;
-
-       len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-       if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
-               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-               goto out;
-       }
-
-       status = (void *)cmd.resp_pkt->data;
-
-       if (len - sizeof(struct iwl_cmd_header) !=
-           sizeof(*status) +
-           ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
-               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
-               goto out;
-       }
-
-       reasons = le32_to_cpu(status->wakeup_reasons);
+       u32 reasons = le32_to_cpu(status->wakeup_reasons);
 
        if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
                wakeup_report = NULL;
@@ -1206,6 +1181,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                        pktsize -= hdrlen;
 
                        if (ieee80211_has_protected(hdr->frame_control)) {
+                               /*
+                                * This is unlocked and using gtk_i(c)vlen,
+                                * but since everything is under RTNL still
+                                * that's not really a problem - changing
+                                * it would be difficult.
+                                */
                                if (is_multicast_ether_addr(hdr->addr1)) {
                                        ivlen = mvm->gtk_ivlen;
                                        icvlen += mvm->gtk_icvlen;
@@ -1256,9 +1237,82 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
  report:
        ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
        kfree_skb(pkt);
+}
 
- out:
+/* releases the MVM mutex */
+static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+                                        struct ieee80211_vif *vif)
+{
+       u32 base = mvm->error_event_table;
+       struct error_table_start {
+               /* cf. struct iwl_error_event_table */
+               u32 valid;
+               u32 error_id;
+       } err_info;
+       struct iwl_host_cmd cmd = {
+               .id = WOWLAN_GET_STATUSES,
+               .flags = CMD_SYNC | CMD_WANT_SKB,
+       };
+       struct iwl_wowlan_status *status;
+       int ret, len;
+
+       iwl_trans_read_mem_bytes(mvm->trans, base,
+                                &err_info, sizeof(err_info));
+
+       if (err_info.valid) {
+               IWL_INFO(mvm, "error table is valid (%d)\n",
+                        err_info.valid);
+               if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+                       struct cfg80211_wowlan_wakeup wakeup = {
+                               .rfkill_release = true,
+                       };
+                       ieee80211_report_wowlan_wakeup(vif, &wakeup,
+                                                      GFP_KERNEL);
+               }
+               goto out_unlock;
+       }
+
+       /* only for tracing for now */
+       ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+       if (ret)
+               IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret) {
+               IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+               goto out_unlock;
+       }
+
+       /* RF-kill already asserted again... */
+       if (!cmd.resp_pkt)
+               goto out_unlock;
+
+       len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+       if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+               goto out_free_resp;
+       }
+
+       status = (void *)cmd.resp_pkt->data;
+
+       if (len - sizeof(struct iwl_cmd_header) !=
+           sizeof(*status) +
+           ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
+               IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+               goto out_free_resp;
+       }
+
+       /* now we have all the data we need, unlock to avoid mac80211 issues */
+       mutex_unlock(&mvm->mutex);
+
+       iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+       iwl_free_resp(&cmd);
+       return;
+
+ out_free_resp:
        iwl_free_resp(&cmd);
+ out_unlock:
+       mutex_unlock(&mvm->mutex);
 }
 
 static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1315,10 +1369,13 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        iwl_mvm_read_d3_sram(mvm);
 
        iwl_mvm_query_wakeup_reasons(mvm, vif);
+       /* has unlocked the mutex, so skip that */
+       goto out;
 
  out_unlock:
        mutex_unlock(&mvm->mutex);
 
+ out:
        if (!test && vif)
                ieee80211_resume_disconnect(vif);
 
index c24a744910acd447c0b7376670e5e97b038cb09c..aac81b8984b05ec87f50fa09b32f7ab9a3dc36ed 100644 (file)
@@ -352,6 +352,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
                IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
                dbgfs_pm->lprx_rssi_threshold = val;
                break;
+       case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+               IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+               dbgfs_pm->snooze_ena = val;
+               break;
        }
 }
 
@@ -405,6 +409,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
                    POWER_LPRX_RSSI_THRESHOLD_MIN)
                        return -EINVAL;
                param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+       } else if (!strncmp("snooze_enable=", buf, 14)) {
+               if (sscanf(buf + 14, "%d", &val) != 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
        } else {
                return -EINVAL;
        }
@@ -424,40 +432,11 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
        struct ieee80211_vif *vif = file->private_data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm *mvm = mvmvif->dbgfs_data;
-       struct iwl_powertable_cmd cmd = {};
-       char buf[256];
+       char buf[512];
        int bufsz = sizeof(buf);
-       int pos = 0;
+       int pos;
 
-       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
-                        (cmd.flags &
-                        cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
-                        0 : 1);
-       pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
-                        le32_to_cpu(cmd.skip_dtim_periods));
-       pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
-                        iwlmvm_mod_params.power_scheme);
-       pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
-                        le16_to_cpu(cmd.flags));
-       pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
-                        cmd.keep_alive_seconds);
-
-       if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
-               pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
-                                (cmd.flags &
-                                cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
-                                1 : 0);
-               pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
-                                le32_to_cpu(cmd.rx_data_timeout));
-               pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
-                                le32_to_cpu(cmd.tx_data_timeout));
-               if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
-                       pos += scnprintf(buf+pos, bufsz-pos,
-                                        "lprx_rssi_threshold = %d\n",
-                                        le32_to_cpu(cmd.lprx_rssi_threshold));
-       }
+       pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
@@ -621,25 +600,160 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 }
 #undef BT_MBOX_PRINT
 
+#define PRINT_STATS_LE32(_str, _val)                                   \
+                        pos += scnprintf(buf + pos, bufsz - pos,       \
+                                         fmt_table, _str,              \
+                                         le32_to_cpu(_val))
+
+static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
+                                         char __user *user_buf, size_t count,
+                                         loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       static const char *fmt_table = "\t%-30s %10u\n";
+       static const char *fmt_header = "%-32s\n";
+       int pos = 0;
+       char *buf;
+       int ret;
+       int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
+                   sizeof(struct mvm_statistics_rx_non_phy) * 10 +
+                   sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
+       struct mvm_statistics_rx_phy *ofdm;
+       struct mvm_statistics_rx_phy *cck;
+       struct mvm_statistics_rx_non_phy *general;
+       struct mvm_statistics_rx_ht_phy *ht;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+
+       ofdm = &mvm->rx_stats.ofdm;
+       cck = &mvm->rx_stats.cck;
+       general = &mvm->rx_stats.general;
+       ht = &mvm->rx_stats.ofdm_ht;
+
+       pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+                        "Statistics_Rx - OFDM");
+       PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt);
+       PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt);
+       PRINT_STATS_LE32("plcp_err", ofdm->plcp_err);
+       PRINT_STATS_LE32("crc32_err", ofdm->crc32_err);
+       PRINT_STATS_LE32("overrun_err", ofdm->overrun_err);
+       PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err);
+       PRINT_STATS_LE32("crc32_good", ofdm->crc32_good);
+       PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt);
+       PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt);
+       PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout);
+       PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout);
+       PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts);
+       PRINT_STATS_LE32("rxe_frame_lmt_overrun",
+                        ofdm->rxe_frame_limit_overrun);
+       PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt);
+       PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt);
+       PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt);
+       PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill);
+       PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err);
+       PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum);
+       PRINT_STATS_LE32("reserved", ofdm->reserved);
+
+       pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+                        "Statistics_Rx - CCK");
+       PRINT_STATS_LE32("ina_cnt", cck->ina_cnt);
+       PRINT_STATS_LE32("fina_cnt", cck->fina_cnt);
+       PRINT_STATS_LE32("plcp_err", cck->plcp_err);
+       PRINT_STATS_LE32("crc32_err", cck->crc32_err);
+       PRINT_STATS_LE32("overrun_err", cck->overrun_err);
+       PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err);
+       PRINT_STATS_LE32("crc32_good", cck->crc32_good);
+       PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt);
+       PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt);
+       PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout);
+       PRINT_STATS_LE32("fina_timeout", cck->fina_timeout);
+       PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts);
+       PRINT_STATS_LE32("rxe_frame_lmt_overrun",
+                        cck->rxe_frame_limit_overrun);
+       PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt);
+       PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt);
+       PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt);
+       PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill);
+       PRINT_STATS_LE32("mh_format_err", cck->mh_format_err);
+       PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum);
+       PRINT_STATS_LE32("reserved", cck->reserved);
+
+       pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+                        "Statistics_Rx - GENERAL");
+       PRINT_STATS_LE32("bogus_cts", general->bogus_cts);
+       PRINT_STATS_LE32("bogus_ack", general->bogus_ack);
+       PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames);
+       PRINT_STATS_LE32("filtered_frames", general->filtered_frames);
+       PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons);
+       PRINT_STATS_LE32("channel_beacons", general->channel_beacons);
+       PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon);
+       PRINT_STATS_LE32("adc_rx_saturation_time",
+                        general->adc_rx_saturation_time);
+       PRINT_STATS_LE32("ina_detection_search_time",
+                        general->ina_detection_search_time);
+       PRINT_STATS_LE32("beacon_silence_rssi_a",
+                        general->beacon_silence_rssi_a);
+       PRINT_STATS_LE32("beacon_silence_rssi_b",
+                        general->beacon_silence_rssi_b);
+       PRINT_STATS_LE32("beacon_silence_rssi_c",
+                        general->beacon_silence_rssi_c);
+       PRINT_STATS_LE32("interference_data_flag",
+                        general->interference_data_flag);
+       PRINT_STATS_LE32("channel_load", general->channel_load);
+       PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms);
+       PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a);
+       PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b);
+       PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c);
+       PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
+       PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
+       PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
+       PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
+       PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
+
+       pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+                        "Statistics_Rx - HT");
+       PRINT_STATS_LE32("plcp_err", ht->plcp_err);
+       PRINT_STATS_LE32("overrun_err", ht->overrun_err);
+       PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err);
+       PRINT_STATS_LE32("crc32_good", ht->crc32_good);
+       PRINT_STATS_LE32("crc32_err", ht->crc32_err);
+       PRINT_STATS_LE32("mh_format_err", ht->mh_format_err);
+       PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good);
+       PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt);
+       PRINT_STATS_LE32("agg_cnt", ht->agg_cnt);
+       PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs);
+
+       mutex_unlock(&mvm->mutex);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+
+       return ret;
+}
+#undef PRINT_STAT_LE32
+
 static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
                                          const char __user *user_buf,
                                          size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
-       bool restart_fw = iwlwifi_mod_params.restart_fw;
        int ret;
 
-       iwlwifi_mod_params.restart_fw = true;
-
        mutex_lock(&mvm->mutex);
 
+       /* allow one more restart that we're provoking here */
+       if (mvm->restart_fw >= 0)
+               mvm->restart_fw++;
+
        /* take the return value to make compiler happy - it will fail anyway */
        ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
 
        mutex_unlock(&mvm->mutex);
 
-       iwlwifi_mod_params.restart_fw = restart_fw;
-
        return count;
 }
 
@@ -661,8 +775,14 @@ static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
        case MVM_DEBUGFS_BF_ROAMING_STATE:
                dbgfs_bf->bf_roaming_state = value;
                break;
-       case MVM_DEBUGFS_BF_TEMPERATURE_DELTA:
-               dbgfs_bf->bf_temperature_delta = value;
+       case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+               dbgfs_bf->bf_temp_threshold = value;
+               break;
+       case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+               dbgfs_bf->bf_temp_fast_filter = value;
+               break;
+       case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+               dbgfs_bf->bf_temp_slow_filter = value;
                break;
        case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
                dbgfs_bf->bf_enable_beacon_filter = value;
@@ -721,13 +841,27 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
                    value > IWL_BF_ROAMING_STATE_MAX)
                        return -EINVAL;
                param = MVM_DEBUGFS_BF_ROAMING_STATE;
-       } else if (!strncmp("bf_temperature_delta=", buf, 21)) {
-               if (sscanf(buf+21, "%d", &value) != 1)
+       } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+               if (sscanf(buf+18, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+                   value > IWL_BF_TEMP_THRESHOLD_MAX)
                        return -EINVAL;
-               if (value < IWL_BF_TEMPERATURE_DELTA_MIN ||
-                   value > IWL_BF_TEMPERATURE_DELTA_MAX)
+               param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+       } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+               if (sscanf(buf+20, "%d", &value) != 1)
                        return -EINVAL;
-               param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA;
+               if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+                   value > IWL_BF_TEMP_FAST_FILTER_MAX)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+       } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+               if (sscanf(buf+20, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+                   value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
        } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
                if (sscanf(buf+24, "%d", &value) != 1)
                        return -EINVAL;
@@ -769,10 +903,7 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
        if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
                ret = iwl_mvm_disable_beacon_filter(mvm, vif);
        } else {
-               if (mvmvif->bf_enabled)
-                       ret = iwl_mvm_enable_beacon_filter(mvm, vif);
-               else
-                       ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+               ret = iwl_mvm_enable_beacon_filter(mvm, vif);
        }
        mutex_unlock(&mvm->mutex);
 
@@ -789,41 +920,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
        int pos = 0;
        const size_t bufsz = sizeof(buf);
        struct iwl_beacon_filter_cmd cmd = {
-               .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,
-               .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT,
-               .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,
-               .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,
-               .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT,
-               .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,
-               .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),
-               .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT),
-               .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT,
+               IWL_BF_CMD_CONFIG_DEFAULTS,
+               .bf_enable_beacon_filter =
+                       cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+               .ba_enable_beacon_abort =
+                       cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
        };
 
        iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
-       if (mvmvif->bf_enabled)
-               cmd.bf_enable_beacon_filter = 1;
+       if (mvmvif->bf_data.bf_enabled)
+               cmd.bf_enable_beacon_filter = cpu_to_le32(1);
        else
                cmd.bf_enable_beacon_filter = 0;
 
        pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
-                        cmd.bf_energy_delta);
+                        le32_to_cpu(cmd.bf_energy_delta));
        pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
-                        cmd.bf_roaming_energy_delta);
+                        le32_to_cpu(cmd.bf_roaming_energy_delta));
        pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
-                        cmd.bf_roaming_state);
-       pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n",
-                        cmd.bf_temperature_delta);
+                        le32_to_cpu(cmd.bf_roaming_state));
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+                        le32_to_cpu(cmd.bf_temp_threshold));
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+                        le32_to_cpu(cmd.bf_temp_fast_filter));
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+                        le32_to_cpu(cmd.bf_temp_slow_filter));
        pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
-                        cmd.bf_enable_beacon_filter);
+                        le32_to_cpu(cmd.bf_enable_beacon_filter));
        pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
-                        cmd.bf_debug_flag);
+                        le32_to_cpu(cmd.bf_debug_flag));
        pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
-                        cmd.bf_escape_timer);
+                        le32_to_cpu(cmd.bf_escape_timer));
        pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
-                        cmd.ba_escape_timer);
+                        le32_to_cpu(cmd.ba_escape_timer));
        pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
-                        cmd.ba_enable_beacon_abort);
+                        le32_to_cpu(cmd.ba_enable_beacon_abort));
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
@@ -934,6 +1065,7 @@ MVM_DEBUGFS_READ_FILE_OPS(stations);
 MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
 MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
 MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
 #ifdef CONFIG_PM_SLEEP
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
@@ -957,6 +1089,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
 #ifdef CONFIG_PM_SLEEP
        MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
index 6f8b2c16ae171965253c0d7e0e149ab3ff0d52f6..df72fcdf81705b54e9a36f51ede8a652f8bcfa2d 100644 (file)
@@ -98,34 +98,63 @@ enum iwl_proto_offloads {
        IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
 };
 
-#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS       2
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1    2
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2    6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX   6
 
 /**
- * struct iwl_proto_offload_cmd - ARP/NS offload configuration
+ * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
  * @enabled: enable flags
  * @remote_ipv4_addr: remote address to answer to (or zero if all)
  * @host_ipv4_addr: our IPv4 address to respond to queries for
  * @arp_mac_addr: our MAC address for ARP responses
- * @remote_ipv6_addr: remote address to answer to (or zero if all)
- * @solicited_node_ipv6_addr: broken -- solicited node address exists
- *     for each target address
- * @target_ipv6_addr: our target addresses
- * @ndp_mac_addr: neighbor soliciation response MAC address
+ * @reserved: unused
  */
-struct iwl_proto_offload_cmd {
+struct iwl_proto_offload_cmd_common {
        __le32 enabled;
        __be32 remote_ipv4_addr;
        __be32 host_ipv4_addr;
        u8 arp_mac_addr[ETH_ALEN];
-       __le16 reserved1;
+       __le16 reserved;
+} __packed;
 
+/**
+ * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ *     for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd_v1 {
+       struct iwl_proto_offload_cmd_common common;
        u8 remote_ipv6_addr[16];
        u8 solicited_node_ipv6_addr[16];
-       u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16];
+       u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
        u8 ndp_mac_addr[ETH_ALEN];
        __le16 reserved2;
 } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
 
+/**
+ * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ *     for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd_v2 {
+       struct iwl_proto_offload_cmd_common common;
+       u8 remote_ipv6_addr[16];
+       u8 solicited_node_ipv6_addr[16];
+       u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
+       u8 ndp_mac_addr[ETH_ALEN];
+       u8 numValidIPv6Addresses;
+       u8 reserved2[3];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
+
 
 /*
  * WOWLAN_PATTERNS
index a6da359a80c3e1125a7b0733b5ae9863d8740e12..8e7ab41079ca6ca51eba975a2e39c92cee5f002b 100644 (file)
  *             '1' Driver enables PM (use rest of parameters)
  * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
  *             '1' PM could sleep over DTIM till listen Interval.
+ * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ *             access categories are both delivery and trigger enabled.
+ * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ *             PBW Snoozing enabled
  * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
  * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
 */
@@ -86,6 +90,8 @@ enum iwl_power_flags {
        POWER_FLAGS_POWER_SAVE_ENA_MSK          = BIT(0),
        POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK    = BIT(1),
        POWER_FLAGS_SKIP_OVER_DTIM_MSK          = BIT(2),
+       POWER_FLAGS_SNOOZE_ENA_MSK              = BIT(5),
+       POWER_FLAGS_BT_SCO_ENA                  = BIT(8),
        POWER_FLAGS_ADVANCE_PM_ENA_MSK          = BIT(9),
        POWER_FLAGS_LPRX_ENA_MSK                = BIT(11),
 };
@@ -93,7 +99,8 @@ enum iwl_power_flags {
 #define IWL_POWER_VEC_SIZE 5
 
 /**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct iwl_powertable_cmd - legacy power command. Beside old API support this
+ *     is used also with a new power API for device wide power settings.
  * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
  *
  * @flags:             Power table command flags from POWER_FLAGS_*
@@ -124,6 +131,76 @@ struct iwl_powertable_cmd {
        __le32 lprx_rssi_threshold;
 } __packed;
 
+/**
+ * struct iwl_mac_power_cmd - New power command containing uAPSD support
+ * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color:      MAC contex identifier
+ * @flags:             Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds:        Keep alive period in seconds. Default - 25 sec.
+ *                     Minimum allowed:- 3 * DTIM. Keep alive period must be
+ *                     set regardless of power scheme or current power state.
+ *                     FW use this value also when PM is disabled.
+ * @rx_data_timeout:    Minimum time (usec) from last Rx packet for AM to
+ *                     PSM transition - legacy PM
+ * @tx_data_timeout:    Minimum time (usec) from last Tx packet for AM to
+ *                     PSM transition - legacy PM
+ * @sleep_interval:    not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ *                     is set. For example, if it is required to skip over
+ *                     one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ *                     PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ *                     PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ *                     Default: 80dbm
+ * @num_skip_dtim:     Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval:   Maximum time between attempts to retrieve buffered data
+ *                     from the AP [msec]
+ * @snooze_window:     A window of time in which PBW snoozing insures that all
+ *                     packets received. It is also the minimum time from last
+ *                     received unicast RX packet, before client stops snoozing
+ *                     for data. [msec]
+ * @snooze_step:       TBD
+ * @qndp_tid:          TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags:    Set trigger-enabled and delivery-enabled indication for
+ *                     each corresponding AC.
+ *                     Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp:      Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ *                     values.
+ * @heavy_tx_thld_packets:     TX threshold measured in number of packets
+ * @heavy_rx_thld_packets:     RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage:  TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage:  RX threshold measured in load's percentage
+ * @limited_ps_threshold:
+*/
+struct iwl_mac_power_cmd {
+       /* CONTEXT_DESC_API_T_VER_1 */
+       __le32 id_and_color;
+
+       /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+       __le16 flags;
+       __le16 keep_alive_seconds;
+       __le32 rx_data_timeout;
+       __le32 tx_data_timeout;
+       __le32 rx_data_timeout_uapsd;
+       __le32 tx_data_timeout_uapsd;
+       u8 lprx_rssi_threshold;
+       u8 skip_dtim_periods;
+       __le16 snooze_interval;
+       __le16 snooze_window;
+       u8 snooze_step;
+       u8 qndp_tid;
+       u8 uapsd_ac_flags;
+       u8 uapsd_max_sp;
+       u8 heavy_tx_thld_packets;
+       u8 heavy_rx_thld_packets;
+       u8 heavy_tx_thld_percentage;
+       u8 heavy_rx_thld_percentage;
+       u8 limited_ps_threshold;
+       u8 reserved;
+} __packed;
+
 /**
  * struct iwl_beacon_filter_cmd
  * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
@@ -143,11 +220,21 @@ struct iwl_powertable_cmd {
  *      calculated for current beacon is less than the threshold, use
  *      Roaming Energy Delta Threshold, otherwise use normal Energy Delta
  *      Threshold. Typical energy threshold is -72dBm.
- * @bf_temperature_delta: Send Beacon to driver if delta in temperature values
- *      calculated for this and the last passed beacon is greater than  this
- *      threshold. Zero value means that the temperature changeis ignored for
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ *     filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ *      If the current temperature is above this threshold - Fast filter
+ *     will be used, If the current temperature is below this threshold -
+ *     Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ *      calculated for this and the last passed beacon is greater than this
+ *      threshold. Zero value means that the temperature change is ignored for
  *      beacon filtering; beacons will not be  forced to be sent to driver
  *      regardless of whether its temerature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ *      calculated for this and the last passed beacon is greater than this
+ *      threshold. Zero value means that the temperature change is ignored for
+ *      beacon filtering; beacons will not be forced to be sent to driver
+ *      regardless of whether its temerature has been changed.
  * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
  * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
  *      for a specific period of time. Units: Beacons.
@@ -156,17 +243,17 @@ struct iwl_powertable_cmd {
  * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
  */
 struct iwl_beacon_filter_cmd {
-       u8 bf_energy_delta;
-       u8 bf_roaming_energy_delta;
-       u8 bf_roaming_state;
-       u8 bf_temperature_delta;
-       u8 bf_enable_beacon_filter;
-       u8 bf_debug_flag;
-       __le16 reserved1;
+       __le32 bf_energy_delta;
+       __le32 bf_roaming_energy_delta;
+       __le32 bf_roaming_state;
+       __le32 bf_temp_threshold;
+       __le32 bf_temp_fast_filter;
+       __le32 bf_temp_slow_filter;
+       __le32 bf_enable_beacon_filter;
+       __le32 bf_debug_flag;
        __le32 bf_escape_timer;
        __le32 ba_escape_timer;
-       u8 ba_enable_beacon_abort;
-       u8 reserved2[3];
+       __le32 ba_enable_beacon_abort;
 } __packed;
 
 /* Beacon filtering and beacon abort */
@@ -182,9 +269,17 @@ struct iwl_beacon_filter_cmd {
 #define IWL_BF_ROAMING_STATE_MAX 255
 #define IWL_BF_ROAMING_STATE_MIN 0
 
-#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5
-#define IWL_BF_TEMPERATURE_DELTA_MAX 255
-#define IWL_BF_TEMPERATURE_DELTA_MIN 0
+#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_MAX 255
+#define IWL_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_MAX 255
+#define IWL_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
 
 #define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
 
@@ -194,19 +289,23 @@ struct iwl_beacon_filter_cmd {
 #define IWL_BF_ESCAPE_TIMER_MAX 1024
 #define IWL_BF_ESCAPE_TIMER_MIN 0
 
-#define IWL_BA_ESCAPE_TIMER_DEFAULT 3
+#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D3 6
 #define IWL_BA_ESCAPE_TIMER_MAX 1024
 #define IWL_BA_ESCAPE_TIMER_MIN 0
 
 #define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
 
-#define IWL_BF_CMD_CONFIG_DEFAULTS                                     \
-       .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,                 \
-       .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \
-       .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,               \
-       .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,       \
-       .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,                     \
-       .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),    \
+#define IWL_BF_CMD_CONFIG_DEFAULTS                                          \
+       .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT),         \
+       .bf_roaming_energy_delta =                                           \
+               cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT),            \
+       .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT),       \
+       .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT),     \
+       .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
+       .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
+       .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT),             \
+       .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),         \
        .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
 
 #endif
index 365095a0c3b3101e3c87b76036dbd82b352b9deb..83cb9b992ea4622f39ff03a4c5bbcea36a7d128f 100644 (file)
@@ -137,6 +137,8 @@ struct iwl_ssid_ie {
  *@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
  *@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
  *@SCAN_FLAGS_FRAGMENTED_SCAN:
+ *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
+ *     in the past hour, even if they are marked as passive.
  */
 enum iwl_scan_flags {
        SCAN_FLAGS_PERIODIC_SCAN                = BIT(0),
@@ -144,6 +146,7 @@ enum iwl_scan_flags {
        SCAN_FLAGS_DELAYED_SCAN_LOWBAND         = BIT(2),
        SCAN_FLAGS_DELAYED_SCAN_HIGHBAND        = BIT(3),
        SCAN_FLAGS_FRAGMENTED_SCAN              = BIT(4),
+       SCAN_FLAGS_PASSIVE2ACTIVE               = BIT(5),
 };
 
 /**
@@ -178,7 +181,7 @@ enum iwl_scan_type {
  * @quiet_time: in msecs, dwell this time for active scan on quiet channels
  * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
  *     this number of packets were received (typically 1)
- * @passive2active: is auto switching from passive to active allowed (0 or 1)
+ * @passive2active: is auto switching from passive to active during scan allowed
  * @rxchain_sel_flags: RXON_RX_CHAIN_*
  * @max_out_time: in usecs, max out of serving channel time
  * @suspend_time: how long to pause scan when returning to service channel:
index 700cce731770ab800640a421b2c99b3d06b6fad1..d606197bde8f41aeba9c7580b9c9f4d8dc8dd959 100644 (file)
@@ -91,7 +91,6 @@
  * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
  * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
  * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
- * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that
  * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
  * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
  * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
@@ -120,7 +119,6 @@ enum iwl_tx_flags {
        TX_CMD_FLG_RESP_TO_DRV          = BIT(21),
        TX_CMD_FLG_CCMP_AGG             = BIT(22),
        TX_CMD_FLG_TKIP_MIC_DONE        = BIT(23),
-       TX_CMD_FLG_CTS_ONLY             = BIT(24),
        TX_CMD_FLG_DUR                  = BIT(25),
        TX_CMD_FLG_FW_DROP              = BIT(26),
        TX_CMD_FLG_EXEC_PAPD            = BIT(27),
index cbfb3beae7838740ca7f72e54705b197ec4e6b49..66264cc5a0168d296ddac86be6617cd56f1d84d5 100644 (file)
@@ -136,7 +136,7 @@ enum {
        CALIB_RES_NOTIF_PHY_DB = 0x6b,
        /* PHY_DB_CMD = 0x6c, */
 
-       /* Power */
+       /* Power - legacy power table command */
        POWER_TABLE_CMD = 0x77,
 
        /* Thermal Throttling*/
@@ -159,6 +159,7 @@ enum {
        TX_ANT_CONFIGURATION_CMD = 0x98,
        BT_CONFIG = 0x9b,
        STATISTICS_NOTIFICATION = 0x9d,
+       REDUCE_TX_POWER_CMD = 0x9f,
 
        /* RF-KILL commands and notifications */
        CARD_STATE_CMD = 0xa0,
@@ -166,6 +167,9 @@ enum {
 
        MISSED_BEACONS_NOTIFICATION = 0xa2,
 
+       /* Power - new power table command */
+       MAC_PM_POWER_TABLE = 0xa9,
+
        REPLY_RX_PHY_CMD = 0xc0,
        REPLY_RX_MPDU_CMD = 0xc1,
        BA_NOTIF = 0xc5,
@@ -223,6 +227,19 @@ struct iwl_tx_ant_cfg_cmd {
        __le32 valid;
 } __packed;
 
+/**
+ * struct iwl_reduce_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @flags: (reserved for future implementation)
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in dBms.
+ */
+struct iwl_reduce_tx_power_cmd {
+       u8 flags;
+       u8 mac_context_id;
+       __le16 pwr_restriction;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
+
 /*
  * Calibration control struct.
  * Sent as part of the phy configuration command.
@@ -482,71 +499,199 @@ enum iwl_time_event_type {
        TE_MAX
 }; /* MAC_EVENT_TYPE_API_E_VER_1 */
 
+
+
+/* Time event - defines for command API v1 */
+
+/*
+ * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ *     the first fragment is scheduled.
+ * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ *     the first 2 fragments are scheduled.
+ * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ *     number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+       TE_V1_FRAG_NONE = 0,
+       TE_V1_FRAG_SINGLE = 1,
+       TE_V1_FRAG_DUAL = 2,
+       TE_V1_FRAG_ENDLESS = 0xffffffff
+};
+
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define TE_V1_FRAG_MAX_MSK     0x0fffffff
+/* Repeat the time event endlessly (until removed) */
+#define TE_V1_REPEAT_ENDLESS   0xffffffff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_V1_REPEAT_MAX_MSK_V1        0x0fffffff
+
 /* Time Event dependencies: none, on another TE, or in a specific time */
 enum {
-       TE_INDEPENDENT          = 0,
-       TE_DEP_OTHER            = 1,
-       TE_DEP_TSF              = 2,
-       TE_EVENT_SOCIOPATHIC    = 4,
+       TE_V1_INDEPENDENT               = 0,
+       TE_V1_DEP_OTHER                 = BIT(0),
+       TE_V1_DEP_TSF                   = BIT(1),
+       TE_V1_EVENT_SOCIOPATHIC         = BIT(2),
 }; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
 /*
+ * @TE_V1_NOTIF_NONE: no notifications
+ * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ *
  * Supported Time event notifications configuration.
  * A notification (both event and fragment) includes a status indicating weather
  * the FW was able to schedule the event or not. For fragment start/end
  * notification the status is always success. There is no start/end fragment
  * notification for monolithic events.
- *
- * @TE_NOTIF_NONE: no notifications
- * @TE_NOTIF_HOST_EVENT_START: request/receive notification on event start
- * @TE_NOTIF_HOST_EVENT_END:request/receive notification on event end
- * @TE_NOTIF_INTERNAL_EVENT_START: internal FW use
- * @TE_NOTIF_INTERNAL_EVENT_END: internal FW use.
- * @TE_NOTIF_HOST_FRAG_START: request/receive notification on frag start
- * @TE_NOTIF_HOST_FRAG_END:request/receive notification on frag end
- * @TE_NOTIF_INTERNAL_FRAG_START: internal FW use.
- * @TE_NOTIF_INTERNAL_FRAG_END: internal FW use.
  */
 enum {
-       TE_NOTIF_NONE = 0,
-       TE_NOTIF_HOST_EVENT_START = 0x1,
-       TE_NOTIF_HOST_EVENT_END = 0x2,
-       TE_NOTIF_INTERNAL_EVENT_START = 0x4,
-       TE_NOTIF_INTERNAL_EVENT_END = 0x8,
-       TE_NOTIF_HOST_FRAG_START = 0x10,
-       TE_NOTIF_HOST_FRAG_END = 0x20,
-       TE_NOTIF_INTERNAL_FRAG_START = 0x40,
-       TE_NOTIF_INTERNAL_FRAG_END = 0x80
+       TE_V1_NOTIF_NONE = 0,
+       TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
+       TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
+       TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
+       TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
+       TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
+       TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
+       TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
+       TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
 }; /* MAC_EVENT_ACTION_API_E_VER_2 */
 
+
+/**
+ * struct iwl_time_event_cmd_api_v1 - configuring Time Events
+ * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
+ * with version 2. determined by IWL_UCODE_TLV_FLAGS)
+ * ( TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ *     If the action is ADD, then it means the type of event to add.
+ *     For all other actions it is the unique event ID assigned when the
+ *     event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @interval_reciprocal: 2^32 / interval
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
+ * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
+ *     and TE_V1_EVENT_SOCIOPATHIC
+ * @is_present: 0 or 1, are we present or absent during the Time Event
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
+ */
+struct iwl_time_event_cmd_v1 {
+       /* COMMON_INDEX_HDR_API_S_VER_1 */
+       __le32 id_and_color;
+       __le32 action;
+       __le32 id;
+       /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+       __le32 apply_time;
+       __le32 max_delay;
+       __le32 dep_policy;
+       __le32 depends_on;
+       __le32 is_present;
+       __le32 max_frags;
+       __le32 interval;
+       __le32 interval_reciprocal;
+       __le32 duration;
+       __le32 repeat;
+       __le32 notify;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+
+
+/* Time event - defines for command API v2 */
+
 /*
- * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed.
- * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
  *  the first fragment is scheduled.
- * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
  *  the first 2 fragments are scheduled.
- * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number
- *  of fragments are valid.
+ * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ *  number of fragments are valid.
  *
  * Other than the constant defined above, specifying a fragmentation value 'x'
  * means that the event can be fragmented but only the first 'x' will be
  * scheduled.
  */
 enum {
-       TE_FRAG_NONE = 0,
-       TE_FRAG_SINGLE = 1,
-       TE_FRAG_DUAL = 2,
-       TE_FRAG_ENDLESS = 0xffffffff
+       TE_V2_FRAG_NONE = 0,
+       TE_V2_FRAG_SINGLE = 1,
+       TE_V2_FRAG_DUAL = 2,
+       TE_V2_FRAG_MAX = 0xfe,
+       TE_V2_FRAG_ENDLESS = 0xff
 };
 
 /* Repeat the time event endlessly (until removed) */
-#define TE_REPEAT_ENDLESS      (0xffffffff)
+#define TE_V2_REPEAT_ENDLESS   0xff
 /* If a Time Event has bounded repetitions, this is the maximal value */
-#define TE_REPEAT_MAX_MSK      (0x0fffffff)
-/* If a Time Event can be fragmented, this is the max number of fragments */
-#define TE_FRAG_MAX_MSK                (0x0fffffff)
+#define TE_V2_REPEAT_MAX       0xfe
+
+#define TE_V2_PLACEMENT_POS    12
+#define TE_V2_ABSENCE_POS      15
+
+/* Time event policy values (for time event cmd api v2)
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @TE_V2_DEP_OTHER: depends on another time event
+ * @TE_V2_DEP_TSF: depends on a specific time
+ * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
+ * @TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+enum {
+       TE_V2_DEFAULT_POLICY = 0x0,
+
+       /* notifications (event start/stop, fragment start/stop) */
+       TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
+       TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
+       TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
+       TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
+
+       TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
+       TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
+       TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
+       TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+
+       TE_V2_NOTIF_MSK = 0xff,
+
+       /* placement characteristics */
+       TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
+       TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
+       TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
+
+       /* are we present or absent during the Time Event. */
+       TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
+};
 
 /**
- * struct iwl_time_event_cmd - configuring Time Events
+ * struct iwl_time_event_cmd_api_v2 - configuring Time Events
+ * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWL_UCODE_TLV_FLAGS)
  * ( TIME_EVENT_CMD = 0x29 )
  * @id_and_color: ID and color of the relevant MAC
  * @action: action to perform, one of FW_CTXT_ACTION_*
@@ -558,32 +703,30 @@ enum {
  * @max_delay: maximum delay to event's start (apply time), in TU
  * @depends_on: the unique ID of the event we depend on (if any)
  * @interval: interval between repetitions, in TU
- * @interval_reciprocal: 2^32 / interval
  * @duration: duration of event in TU
  * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
- * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
- * @is_present: 0 or 1, are we present or absent during the Time Event
  * @max_frags: maximal number of fragments the Time Event can be divided to
- * @notify: notifications using TE_NOTIF_* (whom to notify when)
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ *     on event and/or fragment start and/or end
+ *     using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
+ *     TE_EVENT_SOCIOPATHIC
+ *     using TE_ABSENCE and using TE_NOTIF_*
  */
-struct iwl_time_event_cmd {
+struct iwl_time_event_cmd_v2 {
        /* COMMON_INDEX_HDR_API_S_VER_1 */
        __le32 id_and_color;
        __le32 action;
        __le32 id;
-       /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+       /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
        __le32 apply_time;
        __le32 max_delay;
-       __le32 dep_policy;
        __le32 depends_on;
-       __le32 is_present;
-       __le32 max_frags;
        __le32 interval;
-       __le32 interval_reciprocal;
        __le32 duration;
-       __le32 repeat;
-       __le32 notify;
-} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+       u8 repeat;
+       u8 max_frags;
+       __le16 policy;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
 
 /**
  * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
@@ -765,6 +908,14 @@ struct iwl_phy_context_cmd {
 } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
 
 #define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
+
 #define IWL_RX_INFO_AGC_IDX 1
 #define IWL_RX_INFO_RSSI_AB_IDX 2
 #define IWL_OFDM_AGC_A_MSK 0x0000007f
@@ -1170,7 +1321,7 @@ struct mvm_statistics_general {
        struct mvm_statistics_general_common common;
        __le32 beacon_filtered;
        __le32 missed_beacons;
-       __s8 beacon_filter_everage_energy;
+       __s8 beacon_filter_average_energy;
        __s8 beacon_filter_reason;
        __s8 beacon_filter_current_energy;
        __s8 beacon_filter_reserved;
index cd7c0032cc583073bd5acaa7ceebbd19457aa1c4..c76299a3a1e0821708a91885ffab780b624fdbc1 100644 (file)
 
 #define UCODE_VALID_OK cpu_to_le32(0x1)
 
-/* Default calibration values for WkP - set to INIT image w/o running */
-static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
-static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
-
-struct iwl_calib_default_data {
-       u16 size;
-       void *data;
-};
-
-#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
-
-static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
-       [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
-       [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
-};
-
 struct iwl_mvm_alive_data {
        bool valid;
        u32 scd_base_addr;
@@ -248,40 +232,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
                                    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
 }
 
-static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
-{
-       u8 cmd_raw[16]; /* holds the variable size commands */
-       struct iwl_set_calib_default_cmd *cmd =
-               (struct iwl_set_calib_default_cmd *)cmd_raw;
-       int ret, i;
-
-       /* Setting default values for calibrations we don't run */
-       for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) {
-               u16 cmd_len;
-
-               if (wkp_calib_default_data[i].size == 0)
-                       continue;
-
-               memset(cmd_raw, 0, sizeof(cmd_raw));
-               cmd_len = wkp_calib_default_data[i].size + sizeof(cmd);
-               cmd->calib_index = cpu_to_le16(i);
-               cmd->length = cpu_to_le16(wkp_calib_default_data[i].size);
-               if (WARN_ONCE(cmd_len > sizeof(cmd_raw),
-                             "Need to enlarge cmd_raw to %d\n", cmd_len))
-                       break;
-               memcpy(cmd->data, wkp_calib_default_data[i].data,
-                      wkp_calib_default_data[i].size);
-               ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0,
-                                          sizeof(*cmd) +
-                                          wkp_calib_default_data[i].size,
-                                          cmd);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 {
        struct iwl_notification_wait calib_wait;
@@ -342,11 +292,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        if (ret)
                goto error;
 
-       /* need to set default values */
-       ret = iwl_set_default_calibrations(mvm);
-       if (ret)
-               goto error;
-
        /*
         * Send phy configurations command to init uCode
         * to start the 16.0 uCode init image internal calibrations.
index 94aae9c8562c41fadd4069c20d7ae3cbe8eb2683..5fe23a5ea9b66b662cd746c113b644b1318517e2 100644 (file)
@@ -264,7 +264,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
                return 0;
 
        /* Therefore, in recovery, we can't get here */
-       WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+       if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+               return -EBUSY;
 
        mvmvif->id = find_first_bit(data.available_mac_ids,
                                    NUM_MAC_INDEX_DRIVER);
index f19baf0dea6b9c84dac1a86d393c8ddb68aa5f53..9833cdf6177cd34d199de063cb0ff466042cfed0 100644 (file)
@@ -153,7 +153,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
                    IEEE80211_HW_AMPDU_AGGREGATION |
                    IEEE80211_HW_TIMING_BEACON_ONLY |
-                   IEEE80211_HW_CONNECTION_MONITOR;
+                   IEEE80211_HW_CONNECTION_MONITOR |
+                   IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+                   IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+                   IEEE80211_HW_SUPPORTS_UAPSD;
 
        hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
        hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -188,6 +191,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 
        hw->wiphy->max_remain_on_channel_duration = 10000;
        hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+       hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+       hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
        /* Extract MAC address */
        memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -506,7 +511,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
-       /* Allocate resources for the MAC context, and add it the the fw  */
+       /* Allocate resources for the MAC context, and add it to the fw  */
        ret = iwl_mvm_mac_ctxt_init(mvm, vif);
        if (ret)
                goto out_unlock;
@@ -552,6 +557,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                        goto out_release;
                }
 
+               iwl_mvm_vif_dbgfs_register(mvm, vif);
                goto out_unlock;
        }
 
@@ -566,16 +572,18 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        iwl_mvm_power_update_mode(mvm, vif);
 
        /* beacon filtering */
+       ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+       if (ret)
+               goto out_remove_mac;
+
        if (!mvm->bf_allowed_vif &&
-           vif->type == NL80211_IFTYPE_STATION && !vif->p2p){
+           vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+           mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
                mvm->bf_allowed_vif = mvmvif;
-               vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+               vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+                                    IEEE80211_VIF_SUPPORTS_CQM_RSSI;
        }
 
-       ret = iwl_mvm_disable_beacon_filter(mvm, vif);
-       if (ret)
-               goto out_release;
-
        /*
         * P2P_DEVICE interface does not have a channel context assigned to it,
         * so a dedicated PHY context is allocated to it and the corresponding
@@ -586,7 +594,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
                if (!mvmvif->phy_ctxt) {
                        ret = -ENOSPC;
-                       goto out_remove_mac;
+                       goto out_free_bf;
                }
 
                iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
@@ -610,6 +618,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        iwl_mvm_binding_remove_vif(mvm, vif);
  out_unref_phy:
        iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+ out_free_bf:
+       if (mvm->bf_allowed_vif == mvmvif) {
+               mvm->bf_allowed_vif = NULL;
+               vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+                                      IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+       }
  out_remove_mac:
        mvmvif->phy_ctxt = NULL;
        iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -674,7 +688,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
 
        if (mvm->bf_allowed_vif == mvmvif) {
                mvm->bf_allowed_vif = NULL;
-               vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+               vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+                                      IEEE80211_VIF_SUPPORTS_CQM_RSSI);
        }
 
        iwl_mvm_vif_dbgfs_clean(mvm, vif);
@@ -719,6 +734,20 @@ out_release:
        mutex_unlock(&mvm->mutex);
 }
 
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                               s8 tx_power)
+{
+       /* FW is in charge of regulatory enforcement */
+       struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
+               .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
+               .pwr_restriction = cpu_to_le16(tx_power),
+       };
+
+       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+                                   sizeof(reduce_txpwr_cmd),
+                                   &reduce_txpwr_cmd);
+}
+
 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
 {
        return 0;
@@ -766,7 +795,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                                IWL_ERR(mvm, "failed to update quotas\n");
                                return;
                        }
-                       iwl_mvm_bt_coex_vif_assoc(mvm, vif);
                        iwl_mvm_configure_mcast_filter(mvm, vif);
                } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
                        /* remove AP station now that the MAC is unassoc */
@@ -779,9 +807,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                        if (ret)
                                IWL_ERR(mvm, "failed to update quotas\n");
                }
-               ret = iwl_mvm_power_update_mode(mvm, vif);
-               if (ret)
-                       IWL_ERR(mvm, "failed to update power mode\n");
+
+               /* reset rssi values */
+               mvmvif->bf_data.ave_beacon_signal = 0;
+
+               if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
+                       /* Workaround for FW bug, otherwise FW disables device
+                        * power save upon disassociation
+                        */
+                       ret = iwl_mvm_power_update_mode(mvm, vif);
+                       if (ret)
+                               IWL_ERR(mvm, "failed to update power mode\n");
+               }
+               iwl_mvm_bt_coex_vif_assoc(mvm, vif);
        } else if (changes & BSS_CHANGED_BEACON_INFO) {
                /*
                 * We received a beacon _after_ association so
@@ -789,11 +827,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                 */
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
-       } else if (changes & BSS_CHANGED_PS) {
+       } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
                ret = iwl_mvm_power_update_mode(mvm, vif);
                if (ret)
                        IWL_ERR(mvm, "failed to update power mode\n");
        }
+       if (changes & BSS_CHANGED_TXPOWER) {
+               IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+                               bss_conf->txpower);
+               iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+       }
+
+       if (changes & BSS_CHANGED_CQM) {
+               IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
+               /* reset cqm events tracking */
+               mvmvif->bf_data.last_cqm_event = 0;
+               ret = iwl_mvm_update_beacon_filter(mvm, vif);
+               if (ret)
+                       IWL_ERR(mvm, "failed to update CQM thresholds\n");
+       }
 }
 
 static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
index 420e82d379d9826690d297de690c3e84a2307f87..b0389279cc1ed774f9f90f20137d11b6691d8eae 100644 (file)
@@ -76,6 +76,7 @@
 #include "iwl-trans.h"
 #include "sta.h"
 #include "fw-api.h"
+#include "constants.h"
 
 #define IWL_INVALID_MAC80211_QUEUE     0xff
 #define IWL_MVM_MAX_ADDRESSES          5
@@ -91,6 +92,9 @@ enum iwl_mvm_tx_fifo {
 };
 
 extern struct ieee80211_ops iwl_mvm_hw_ops;
+extern const struct iwl_mvm_power_ops pm_legacy_ops;
+extern const struct iwl_mvm_power_ops pm_mac_ops;
+
 /**
  * struct iwl_mvm_mod_params - module parameters for iwlmvm
  * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
@@ -149,6 +153,22 @@ enum iwl_power_scheme {
 };
 
 #define IWL_CONN_MAX_LISTEN_INTERVAL   70
+#define IWL_UAPSD_AC_INFO              (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
+                                        IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
+                                        IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+                                        IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+#define IWL_UAPSD_MAX_SP               IEEE80211_WMM_IE_STA_QOSINFO_SP_2
+
+struct iwl_mvm_power_ops {
+       int (*power_update_mode)(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif);
+       int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                               char *buf, int bufsz);
+#endif
+};
+
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 enum iwl_dbgfs_pm_mask {
@@ -160,10 +180,11 @@ enum iwl_dbgfs_pm_mask {
        MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
        MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
        MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
+       MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
 };
 
 struct iwl_dbgfs_pm {
-       u8 keep_alive_seconds;
+       u16 keep_alive_seconds;
        u32 rx_data_timeout;
        u32 tx_data_timeout;
        bool skip_over_dtim;
@@ -171,6 +192,7 @@ struct iwl_dbgfs_pm {
        bool disable_power_off;
        bool lprx_ena;
        u32 lprx_rssi_threshold;
+       bool snooze_ena;
        int mask;
 };
 
@@ -180,24 +202,28 @@ enum iwl_dbgfs_bf_mask {
        MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
        MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
        MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
-       MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3),
-       MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4),
-       MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5),
-       MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6),
-       MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7),
-       MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8),
+       MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
+       MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
+       MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
+       MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
+       MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
+       MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
+       MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
+       MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
 };
 
 struct iwl_dbgfs_bf {
-       u8 bf_energy_delta;
-       u8 bf_roaming_energy_delta;
-       u8 bf_roaming_state;
-       u8 bf_temperature_delta;
-       u8 bf_enable_beacon_filter;
-       u8 bf_debug_flag;
+       u32 bf_energy_delta;
+       u32 bf_roaming_energy_delta;
+       u32 bf_roaming_state;
+       u32 bf_temp_threshold;
+       u32 bf_temp_fast_filter;
+       u32 bf_temp_slow_filter;
+       u32 bf_enable_beacon_filter;
+       u32 bf_debug_flag;
        u32 bf_escape_timer;
        u32 ba_escape_timer;
-       u8 ba_enable_beacon_abort;
+       u32 ba_enable_beacon_abort;
        int mask;
 };
 #endif
@@ -208,6 +234,21 @@ enum iwl_mvm_smps_type_request {
        NUM_IWL_MVM_SMPS_REQ,
 };
 
+/**
+* struct iwl_mvm_vif_bf_data - beacon filtering related data
+* @bf_enabled: indicates if beacon filtering is enabled
+* @ba_enabled: indicated if beacon abort is enabled
+* @last_beacon_signal: last beacon rssi signal in dbm
+* @ave_beacon_signal: average beacon signal
+* @last_cqm_event: rssi of the last cqm event
+*/
+struct iwl_mvm_vif_bf_data {
+       bool bf_enabled;
+       bool ba_enabled;
+       s8 ave_beacon_signal;
+       s8 last_cqm_event;
+};
+
 /**
  * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
  * @id: between 0 and 3
@@ -233,8 +274,7 @@ struct iwl_mvm_vif {
        bool uploaded;
        bool ap_active;
        bool monitor_active;
-       /* indicate whether beacon filtering is enabled */
-       bool bf_enabled;
+       struct iwl_mvm_vif_bf_data bf_data;
 
        u32 ap_beacon_time;
 
@@ -268,7 +308,7 @@ struct iwl_mvm_vif {
 
 #if IS_ENABLED(CONFIG_IPV6)
        /* IPv6 addresses for WoWLAN */
-       struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS];
+       struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
        int num_target_ipv6_addrs;
 #endif
 #endif
@@ -402,6 +442,8 @@ struct iwl_mvm {
 
        struct iwl_notif_wait_data notif_wait;
 
+       struct mvm_statistics_rx rx_stats;
+
        unsigned long transport_queue_stop;
        u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
        atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
@@ -459,6 +501,9 @@ struct iwl_mvm {
         */
        u8 vif_count;
 
+       /* -1 for always, 0 for never, >0 for that many times */
+       s8 restart_fw;
+
        struct led_classdev led;
 
        struct ieee80211_vif *p2p_device_vif;
@@ -482,6 +527,8 @@ struct iwl_mvm {
        /* Thermal Throttling and CTkill */
        struct iwl_mvm_tt_mgmt thermal_throttle;
        s32 temperature;        /* Celsius */
+
+       const struct iwl_mvm_power_ops *pm_ops;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -525,6 +572,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
                                        enum ieee80211_band band);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
 u8 first_antenna(u8 mask);
 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
 
@@ -660,10 +708,26 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
                        u8 flags, bool init);
 
 /* power managment */
-int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                            struct iwl_powertable_cmd *cmd);
+static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
+                                           struct ieee80211_vif *vif)
+{
+       return mvm->pm_ops->power_update_mode(mvm, vif);
+}
+
+static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif)
+{
+       return mvm->pm_ops->power_disable(mvm, vif);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
+                                           struct ieee80211_vif *vif,
+                                           char *buf, int bufsz)
+{
+       return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
+}
+#endif
 
 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
 void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -707,6 +771,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
                                 struct ieee80211_vif *vif);
 int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
                                  struct ieee80211_vif *vif);
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+                                  struct iwl_beacon_filter_cmd *cmd);
+int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+                               struct ieee80211_vif *vif, bool enable);
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif);
 
 /* SMPS */
 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
index af79a14063a9bcffbb0a624d3b93f97160dbe057..2fcc8ef88a68d78fbc0f2225bbabc733d8d4e619 100644 (file)
@@ -275,6 +275,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(BEACON_NOTIFICATION),
        CMD(BEACON_TEMPLATE_CMD),
        CMD(STATISTICS_NOTIFICATION),
+       CMD(REDUCE_TX_POWER_CMD),
        CMD(TX_ANT_CONFIGURATION_CMD),
        CMD(D3_CONFIG_CMD),
        CMD(PROT_OFFLOAD_CONFIG_CMD),
@@ -301,6 +302,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(MCAST_FILTER_CMD),
        CMD(REPLY_BEACON_FILTERING_CMD),
        CMD(REPLY_THERMAL_MNG_BACKOFF),
+       CMD(MAC_PM_POWER_TABLE),
 };
 #undef CMD
 
@@ -340,6 +342,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mvm->fw = fw;
        mvm->hw = hw;
 
+       mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
+
        mutex_init(&mvm->mutex);
        spin_lock_init(&mvm->async_handlers_lock);
        INIT_LIST_HEAD(&mvm->time_event_list);
@@ -431,6 +435,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (err)
                goto out_unregister;
 
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)
+               mvm->pm_ops = &pm_mac_ops;
+       else
+               mvm->pm_ops = &pm_legacy_ops;
+
+       memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+
        return op_mode;
 
  out_unregister:
@@ -638,6 +649,22 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
        ieee80211_free_txskb(mvm->hw, skb);
 }
 
+struct iwl_mvm_reprobe {
+       struct device *dev;
+       struct work_struct work;
+};
+
+static void iwl_mvm_reprobe_wk(struct work_struct *wk)
+{
+       struct iwl_mvm_reprobe *reprobe;
+
+       reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
+       if (device_reprobe(reprobe->dev))
+               dev_err(reprobe->dev, "reprobe failed!\n");
+       kfree(reprobe);
+       module_put(THIS_MODULE);
+}
+
 static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
 {
        iwl_abort_notification_waits(&mvm->notif_wait);
@@ -649,9 +676,30 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
         * can't recover this since we're already half suspended.
         */
        if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
-               IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
-       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
-                  iwlwifi_mod_params.restart_fw) {
+               struct iwl_mvm_reprobe *reprobe;
+
+               IWL_ERR(mvm,
+                       "Firmware error during reconfiguration - reprobe!\n");
+
+               /*
+                * get a module reference to avoid doing this while unloading
+                * anyway and to avoid scheduling a work with code that's
+                * being removed.
+                */
+               if (!try_module_get(THIS_MODULE)) {
+                       IWL_ERR(mvm, "Module is being unloaded - abort\n");
+                       return;
+               }
+
+               reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
+               if (!reprobe) {
+                       module_put(THIS_MODULE);
+                       return;
+               }
+               reprobe->dev = mvm->trans->dev;
+               INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
+               schedule_work(&reprobe->work);
+       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
                /*
                 * This is a bit racy, but worst case we tell mac80211 about
                 * a stopped/aborted (sched) scan when that was already done
@@ -669,6 +717,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
                        break;
                }
 
+               if (mvm->restart_fw > 0)
+                       mvm->restart_fw--;
                ieee80211_restart_hw(mvm->hw);
        }
 }
@@ -678,6 +728,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
        iwl_mvm_dump_nic_error_log(mvm);
+       if (!mvm->restart_fw)
+               iwl_mvm_dump_sram(mvm);
 
        iwl_mvm_nic_restart(mvm);
 }
index e7ca965a89b82a833366651d27d31f63501ff313..21407a353a3b0e623c87ae3627957ae45ae3b162 100644 (file)
@@ -75,8 +75,8 @@
 
 #define POWER_KEEP_ALIVE_PERIOD_SEC    25
 
-static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
-                                         struct iwl_beacon_filter_cmd *cmd)
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+                                  struct iwl_beacon_filter_cmd *cmd)
 {
        int ret;
 
@@ -85,69 +85,110 @@ static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
 
        if (!ret) {
                IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
-                               cmd->ba_enable_beacon_abort);
+                               le32_to_cpu(cmd->ba_enable_beacon_abort));
                IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
-                               cmd->ba_escape_timer);
+                               le32_to_cpu(cmd->ba_escape_timer));
                IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
-                               cmd->bf_debug_flag);
+                               le32_to_cpu(cmd->bf_debug_flag));
                IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
-                               cmd->bf_enable_beacon_filter);
+                               le32_to_cpu(cmd->bf_enable_beacon_filter));
                IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
-                               cmd->bf_energy_delta);
+                               le32_to_cpu(cmd->bf_energy_delta));
                IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
-                               cmd->bf_escape_timer);
+                               le32_to_cpu(cmd->bf_escape_timer));
                IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
-                               cmd->bf_roaming_energy_delta);
+                               le32_to_cpu(cmd->bf_roaming_energy_delta));
                IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
-                               cmd->bf_roaming_state);
-               IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n",
-                               cmd->bf_temperature_delta);
+                               le32_to_cpu(cmd->bf_roaming_state));
+               IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+                               le32_to_cpu(cmd->bf_temp_threshold));
+               IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+                               le32_to_cpu(cmd->bf_temp_fast_filter));
+               IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+                               le32_to_cpu(cmd->bf_temp_slow_filter));
        }
        return ret;
 }
 
-static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
-                                      struct ieee80211_vif *vif, bool enable)
+static
+void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         struct iwl_beacon_filter_cmd *cmd)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (vif->bss_conf.cqm_rssi_thold) {
+               cmd->bf_energy_delta =
+                       cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
+               /* fw uses an absolute value for this */
+               cmd->bf_roaming_state =
+                       cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
+       }
+       cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+}
+
+int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+                               struct ieee80211_vif *vif, bool enable)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_beacon_filter_cmd cmd = {
                IWL_BF_CMD_CONFIG_DEFAULTS,
-               .bf_enable_beacon_filter = 1,
-               .ba_enable_beacon_abort = enable,
+               .bf_enable_beacon_filter = cpu_to_le32(1),
+               .ba_enable_beacon_abort = cpu_to_le32(enable),
        };
 
-       if (!mvmvif->bf_enabled)
+       if (!mvmvif->bf_data.bf_enabled)
                return 0;
 
+       if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+               cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+       mvmvif->bf_data.ba_enabled = enable;
+       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
        iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
        return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
 }
 
 static void iwl_mvm_power_log(struct iwl_mvm *mvm,
-                             struct iwl_powertable_cmd *cmd)
+                             struct iwl_mac_power_cmd *cmd)
 {
        IWL_DEBUG_POWER(mvm,
-                       "Sending power table command for power level %d, flags = 0x%X\n",
-                       iwlmvm_mod_params.power_scheme,
+                       "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+                       cmd->id_and_color, iwlmvm_mod_params.power_scheme,
                        le16_to_cpu(cmd->flags));
-       IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
-
-       if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
-               IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
-                               le32_to_cpu(cmd->rx_data_timeout));
-               IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
-                               le32_to_cpu(cmd->tx_data_timeout));
-               if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
-                       IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
-                                       le32_to_cpu(cmd->skip_dtim_periods));
-               if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
-                       IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
-                                       le32_to_cpu(cmd->lprx_rssi_threshold));
+       IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+                       le16_to_cpu(cmd->keep_alive_seconds));
+
+       if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
+               IWL_DEBUG_POWER(mvm, "Disable power management\n");
+               return;
+       }
+
+       IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+                       le32_to_cpu(cmd->rx_data_timeout));
+       IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+                       le32_to_cpu(cmd->tx_data_timeout));
+       if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+               IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+                               cmd->skip_dtim_periods);
+       if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+               IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+                               cmd->lprx_rssi_threshold);
+       if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+               IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
+               IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+                               le32_to_cpu(cmd->rx_data_timeout_uapsd));
+               IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
+                               le32_to_cpu(cmd->tx_data_timeout_uapsd));
+               IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
+               IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
+               IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
        }
 }
 
-void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                            struct iwl_powertable_cmd *cmd)
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif,
+                                   struct iwl_mac_power_cmd *cmd)
 {
        struct ieee80211_hw *hw = mvm->hw;
        struct ieee80211_chanctx_conf *chanctx_conf;
@@ -157,20 +198,29 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        bool radar_detect = false;
        struct iwl_mvm_vif *mvmvif __maybe_unused =
                iwl_mvm_vif_from_mac80211(vif);
+       enum ieee80211_ac_numbers ac;
+       bool tid_found = false;
+
+       cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                           mvmvif->color));
+       dtimper = hw->conf.ps_dtim_period ?: 1;
 
        /*
         * Regardless of power management state the driver must set
         * keep alive period. FW will use it for sending keep alive NDPs
-        * immediately after association.
+        * immediately after association. Check that keep alive period
+        * is at least 3 * DTIM
         */
-       cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
+       dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+       keep_alive = max_t(int, 3 * dtimper_msec,
+                          MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
+       keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+       cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
 
        if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-       if (!vif->bss_conf.assoc)
-               cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
@@ -186,12 +236,9 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
            (vif->bss_conf.beacon_rate->bitrate == 10 ||
             vif->bss_conf.beacon_rate->bitrate == 60)) {
                cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
-               cmd->lprx_rssi_threshold =
-                       cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
+               cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
        }
 
-       dtimper = hw->conf.ps_dtim_period ?: 1;
-
        /* Check if radar detection is required on current channel */
        rcu_read_lock();
        chanctx_conf = rcu_dereference(vif->chanctx_conf);
@@ -207,27 +254,82 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
            (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
             mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
                cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-               cmd->skip_dtim_periods = cpu_to_le32(3);
+               cmd->skip_dtim_periods = 3;
        }
 
-       /* Check that keep alive period is at least 3 * DTIM */
-       dtimper_msec = dtimper * vif->bss_conf.beacon_int;
-       keep_alive = max_t(int, 3 * dtimper_msec,
-                          MSEC_PER_SEC * cmd->keep_alive_seconds);
-       keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
-       cmd->keep_alive_seconds = keep_alive;
-
        if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
-               cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
-               cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+               cmd->rx_data_timeout =
+                       cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
+               cmd->tx_data_timeout =
+                       cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
        } else {
-               cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
-               cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+               cmd->rx_data_timeout =
+                       cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+               cmd->tx_data_timeout =
+                       cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+       }
+
+       for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+               if (!mvmvif->queue_params[ac].uapsd)
+                       continue;
+
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+               cmd->uapsd_ac_flags |= BIT(ac);
+
+               /* QNDP TID - the highest TID with no admission control */
+               if (!tid_found && !mvmvif->queue_params[ac].acm) {
+                       tid_found = true;
+                       switch (ac) {
+                       case IEEE80211_AC_VO:
+                               cmd->qndp_tid = 6;
+                               break;
+                       case IEEE80211_AC_VI:
+                               cmd->qndp_tid = 5;
+                               break;
+                       case IEEE80211_AC_BE:
+                               cmd->qndp_tid = 0;
+                               break;
+                       case IEEE80211_AC_BK:
+                               cmd->qndp_tid = 1;
+                               break;
+                       }
+               }
+       }
+
+       if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+               cmd->rx_data_timeout_uapsd =
+                       cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+               cmd->tx_data_timeout_uapsd =
+                       cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+
+               if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+                                           BIT(IEEE80211_AC_VI) |
+                                           BIT(IEEE80211_AC_BE) |
+                                           BIT(IEEE80211_AC_BK))) {
+                       cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+                       cmd->snooze_interval =
+                               cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+                       cmd->snooze_window =
+                               (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
+                               cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+                               cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+               }
+
+               cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+               cmd->heavy_tx_thld_packets =
+                       IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+               cmd->heavy_rx_thld_packets =
+                       IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+               cmd->heavy_tx_thld_percentage =
+                       IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+               cmd->heavy_rx_thld_percentage =
+                       IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
        }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
-               cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
+               cmd->keep_alive_seconds =
+                       cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
        if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
                if (mvmvif->dbgfs_pm.skip_over_dtim)
                        cmd->flags |=
@@ -243,8 +345,7 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                cmd->tx_data_timeout =
                        cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
        if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
-               cmd->skip_dtim_periods =
-                       cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
+               cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
        if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
                if (mvmvif->dbgfs_pm.lprx_ena)
                        cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
@@ -252,16 +353,24 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                        cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
        }
        if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
-               cmd->lprx_rssi_threshold =
-                       cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
+               cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
+               if (mvmvif->dbgfs_pm.snooze_ena)
+                       cmd->flags |=
+                               cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+               else
+                       cmd->flags &=
+                               cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
+       }
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 }
 
-int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
+                                        struct ieee80211_vif *vif)
 {
        int ret;
        bool ba_enable;
-       struct iwl_powertable_cmd cmd = {};
+       struct iwl_mac_power_cmd cmd = {};
 
        if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
@@ -280,7 +389,7 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
        iwl_mvm_power_log(mvm, &cmd);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+       ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
                                   sizeof(cmd), &cmd);
        if (ret)
                return ret;
@@ -291,15 +400,19 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
 }
 
-int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
+                                    struct ieee80211_vif *vif)
 {
-       struct iwl_powertable_cmd cmd = {};
+       struct iwl_mac_power_cmd cmd = {};
        struct iwl_mvm_vif *mvmvif __maybe_unused =
                iwl_mvm_vif_from_mac80211(vif);
 
        if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
+       cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                          mvmvif->color));
+
        if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
                cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
@@ -310,11 +423,98 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 #endif
        iwl_mvm_power_log(mvm, &cmd);
 
-       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
                                    sizeof(cmd), &cmd);
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif, char *buf,
+                                       int bufsz)
+{
+       struct iwl_mac_power_cmd cmd = {};
+       int pos = 0;
+
+       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+
+       pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+                        (cmd.flags &
+                        cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+                        0 : 1);
+       pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+                        iwlmvm_mod_params.power_scheme);
+       pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+                        le16_to_cpu(cmd.flags));
+       pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+                        le16_to_cpu(cmd.keep_alive_seconds));
+
+       if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+               pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+                                (cmd.flags &
+                                cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
+                                1 : 0);
+               pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+                                cmd.skip_dtim_periods);
+               if (!(cmd.flags &
+                     cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "rx_data_timeout = %d\n",
+                                        le32_to_cpu(cmd.rx_data_timeout));
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "tx_data_timeout = %d\n",
+                                        le32_to_cpu(cmd.tx_data_timeout));
+               }
+               if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "lprx_rssi_threshold = %d\n",
+                                        cmd.lprx_rssi_threshold);
+               if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+                       pos +=
+                       scnprintf(buf+pos, bufsz-pos,
+                                 "rx_data_timeout_uapsd = %d\n",
+                                 le32_to_cpu(cmd.rx_data_timeout_uapsd));
+                       pos +=
+                       scnprintf(buf+pos, bufsz-pos,
+                                 "tx_data_timeout_uapsd = %d\n",
+                                 le32_to_cpu(cmd.tx_data_timeout_uapsd));
+                       pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
+                                        cmd.qndp_tid);
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "uapsd_ac_flags = 0x%x\n",
+                                        cmd.uapsd_ac_flags);
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "uapsd_max_sp = %d\n",
+                                        cmd.uapsd_max_sp);
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "heavy_tx_thld_packets = %d\n",
+                                        cmd.heavy_tx_thld_packets);
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "heavy_rx_thld_packets = %d\n",
+                                        cmd.heavy_rx_thld_packets);
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "heavy_tx_thld_percentage = %d\n",
+                                        cmd.heavy_tx_thld_percentage);
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "heavy_rx_thld_percentage = %d\n",
+                                        cmd.heavy_rx_thld_percentage);
+                       pos +=
+                       scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
+                                 (cmd.flags &
+                                  cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
+                                 1 : 0);
+               }
+               if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "snooze_interval = %d\n",
+                                        cmd.snooze_interval);
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "snooze_window = %d\n",
+                                        cmd.snooze_window);
+               }
+       }
+       return pos;
+}
+
 void
 iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
                                         struct iwl_beacon_filter_cmd *cmd)
@@ -323,22 +523,30 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
        struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
 
        if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
-               cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta;
+               cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
        if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
                cmd->bf_roaming_energy_delta =
-                                dbgfs_bf->bf_roaming_energy_delta;
+                               cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
        if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
-               cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state;
-       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA)
-               cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta;
+               cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
+               cmd->bf_temp_threshold =
+                               cpu_to_le32(dbgfs_bf->bf_temp_threshold);
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
+               cmd->bf_temp_fast_filter =
+                               cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
+               cmd->bf_temp_slow_filter =
+                               cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
        if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
-               cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag;
+               cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
        if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
                cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
        if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
                cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
        if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
-               cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort;
+               cmd->ba_enable_beacon_abort =
+                               cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
 }
 #endif
 
@@ -348,7 +556,7 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_beacon_filter_cmd cmd = {
                IWL_BF_CMD_CONFIG_DEFAULTS,
-               .bf_enable_beacon_filter = 1,
+               .bf_enable_beacon_filter = cpu_to_le32(1),
        };
        int ret;
 
@@ -356,11 +564,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
            vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
+       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
        iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
 
        if (!ret)
-               mvmvif->bf_enabled = true;
+               mvmvif->bf_data.bf_enabled = true;
 
        return ret;
 }
@@ -372,13 +581,33 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
-       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
+           vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
 
        if (!ret)
-               mvmvif->bf_enabled = false;
+               mvmvif->bf_data.bf_enabled = false;
 
        return ret;
 }
+
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (!mvmvif->bf_data.bf_enabled)
+               return 0;
+
+       return iwl_mvm_enable_beacon_filter(mvm, vif);
+}
+
+const struct iwl_mvm_power_ops pm_mac_ops = {
+       .power_update_mode = iwl_mvm_power_mac_update_mode,
+       .power_disable = iwl_mvm_power_mac_disable,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
new file mode 100644 (file)
index 0000000..2ce79ba
--- /dev/null
@@ -0,0 +1,319 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw-api-power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC    25
+
+static void iwl_mvm_power_log(struct iwl_mvm *mvm,
+                             struct iwl_powertable_cmd *cmd)
+{
+       IWL_DEBUG_POWER(mvm,
+                       "Sending power table command for power level %d, flags = 0x%X\n",
+                       iwlmvm_mod_params.power_scheme,
+                       le16_to_cpu(cmd->flags));
+       IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
+
+       if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+               IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+                               le32_to_cpu(cmd->rx_data_timeout));
+               IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+                               le32_to_cpu(cmd->tx_data_timeout));
+               if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+                       IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+                                       le32_to_cpu(cmd->skip_dtim_periods));
+               if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+                       IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+                                       le32_to_cpu(cmd->lprx_rssi_threshold));
+       }
+}
+
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif,
+                                   struct iwl_powertable_cmd *cmd)
+{
+       struct ieee80211_hw *hw = mvm->hw;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_channel *chan;
+       int dtimper, dtimper_msec;
+       int keep_alive;
+       bool radar_detect = false;
+       struct iwl_mvm_vif *mvmvif __maybe_unused =
+               iwl_mvm_vif_from_mac80211(vif);
+
+       /*
+        * Regardless of power management state the driver must set
+        * keep alive period. FW will use it for sending keep alive NDPs
+        * immediately after association.
+        */
+       cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
+
+       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+               return;
+
+       cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+       if (!vif->bss_conf.assoc)
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+           mvmvif->dbgfs_pm.disable_power_off)
+               cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+       if (!vif->bss_conf.ps)
+               return;
+
+       cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+       if (vif->bss_conf.beacon_rate &&
+           (vif->bss_conf.beacon_rate->bitrate == 10 ||
+            vif->bss_conf.beacon_rate->bitrate == 60)) {
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+               cmd->lprx_rssi_threshold =
+                       cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
+       }
+
+       dtimper = hw->conf.ps_dtim_period ?: 1;
+
+       /* Check if radar detection is required on current channel */
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       WARN_ON(!chanctx_conf);
+       if (chanctx_conf) {
+               chan = chanctx_conf->def.chan;
+               radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+       }
+       rcu_read_unlock();
+
+       /* Check skip over DTIM conditions */
+       if (!radar_detect && (dtimper <= 10) &&
+           (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
+            mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd->skip_dtim_periods = cpu_to_le32(3);
+       }
+
+       /* Check that keep alive period is at least 3 * DTIM */
+       dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+       keep_alive = max_t(int, 3 * dtimper_msec,
+                          MSEC_PER_SEC * cmd->keep_alive_seconds);
+       keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+       cmd->keep_alive_seconds = keep_alive;
+
+       if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
+               cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+               cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+       } else {
+               cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+               cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+       }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
+               cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+               if (mvmvif->dbgfs_pm.skip_over_dtim)
+                       cmd->flags |=
+                               cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               else
+                       cmd->flags &=
+                               cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+       }
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
+               cmd->rx_data_timeout =
+                       cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
+               cmd->tx_data_timeout =
+                       cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
+               cmd->skip_dtim_periods =
+                       cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
+               if (mvmvif->dbgfs_pm.lprx_ena)
+                       cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+               else
+                       cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
+       }
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
+               cmd->lprx_rssi_threshold =
+                       cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+}
+
+static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
+                                           struct ieee80211_vif *vif)
+{
+       int ret;
+       bool ba_enable;
+       struct iwl_powertable_cmd cmd = {};
+
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return 0;
+
+       /*
+        * TODO: The following vif_count verification is temporary condition.
+        * Avoid power mode update if more than one interface is currently
+        * active. Remove this condition when FW will support power management
+        * on multiple MACs.
+        */
+       IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
+                       mvm->vif_count);
+       if (mvm->vif_count > 1)
+               return 0;
+
+       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+       iwl_mvm_power_log(mvm, &cmd);
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+                                  sizeof(cmd), &cmd);
+       if (ret)
+               return ret;
+
+       ba_enable = !!(cmd.flags &
+                      cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+
+       return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
+}
+
+static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif)
+{
+       struct iwl_powertable_cmd cmd = {};
+       struct iwl_mvm_vif *mvmvif __maybe_unused =
+               iwl_mvm_vif_from_mac80211(vif);
+
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return 0;
+
+       if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+               cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+           mvmvif->dbgfs_pm.disable_power_off)
+               cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+       iwl_mvm_power_log(mvm, &cmd);
+
+       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+                                   sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
+                                          struct ieee80211_vif *vif, char *buf,
+                                          int bufsz)
+{
+       struct iwl_powertable_cmd cmd = {};
+       int pos = 0;
+
+       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+
+       pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+                        (cmd.flags &
+                        cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+                        0 : 1);
+       pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+                        le32_to_cpu(cmd.skip_dtim_periods));
+       pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+                        iwlmvm_mod_params.power_scheme);
+       pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+                        le16_to_cpu(cmd.flags));
+       pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+                        cmd.keep_alive_seconds);
+
+       if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+               pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+                                (cmd.flags &
+                                cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
+                                1 : 0);
+               pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+                                le32_to_cpu(cmd.rx_data_timeout));
+               pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+                                le32_to_cpu(cmd.tx_data_timeout));
+               if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+                       pos += scnprintf(buf+pos, bufsz-pos,
+                                        "lprx_rssi_threshold = %d\n",
+                                        le32_to_cpu(cmd.lprx_rssi_threshold));
+       }
+       return pos;
+}
+#endif
+
+const struct iwl_mvm_power_ops pm_legacy_ops = {
+       .power_update_mode = iwl_mvm_power_legacy_update_mode,
+       .power_disable = iwl_mvm_power_legacy_disable,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
+#endif
+};
index 29d49cf0fdb207893091e30542d104e34e829d5f..5c6ae16ec52b934c16835a4fecdffb300a1df38b 100644 (file)
@@ -131,23 +131,22 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
 
 int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
 {
-       struct iwl_time_quota_cmd cmd;
-       int i, idx, ret, num_active_bindings, quota, quota_rem;
+       struct iwl_time_quota_cmd cmd = {};
+       int i, idx, ret, num_active_macs, quota, quota_rem;
        struct iwl_mvm_quota_iterator_data data = {
                .n_interfaces = {},
                .colors = { -1, -1, -1, -1 },
                .new_vif = newvif,
        };
 
+       lockdep_assert_held(&mvm->mutex);
+
        /* update all upon completion */
        if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
                return 0;
 
-       BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1);
-
-       lockdep_assert_held(&mvm->mutex);
-
-       memset(&cmd, 0, sizeof(cmd));
+       /* iterator data above must match */
+       BUILD_BUG_ON(MAX_BINDINGS != 4);
 
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -162,18 +161,17 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
         * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
         * equally between all the bindings that require quota
         */
-       num_active_bindings = 0;
+       num_active_macs = 0;
        for (i = 0; i < MAX_BINDINGS; i++) {
                cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
-               if (data.n_interfaces[i] > 0)
-                       num_active_bindings++;
+               num_active_macs += data.n_interfaces[i];
        }
 
        quota = 0;
        quota_rem = 0;
-       if (num_active_bindings) {
-               quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
-               quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+       if (num_active_macs) {
+               quota = IWL_MVM_MAX_QUOTA / num_active_macs;
+               quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
        }
 
        for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -187,7 +185,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
                        cmd.quotas[idx].quota = cpu_to_le32(0);
                        cmd.quotas[idx].max_duration = cpu_to_le32(0);
                } else {
-                       cmd.quotas[idx].quota = cpu_to_le32(quota);
+                       cmd.quotas[idx].quota =
+                               cpu_to_le32(quota * data.n_interfaces[i]);
                        cmd.quotas[idx].max_duration =
                                cpu_to_le32(IWL_MVM_MAX_QUOTA);
                }
index b328a988c130ec6136f5198d78225870877aa1a4..4ffaa3fa153f78e7d14f2ad90223b01146562c50 100644 (file)
 #define IWL_RATE_SCALE_FLUSH_INTVL   (3*HZ)
 
 static u8 rs_ht_to_legacy[] = {
-       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
-       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
-       IWL_RATE_6M_INDEX,
-       IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
-       IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
-       IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
-       IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+       [IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
+       [IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
+       [IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
+       [IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
+       [IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
+       [IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
+       [IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
+       [IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
+       [IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
+       [IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
+       [IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
+       [IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
+       [IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
 };
 
 static const u8 ant_toggle_lookup[] = {
-       /*ANT_NONE -> */ ANT_NONE,
-       /*ANT_A    -> */ ANT_B,
-       /*ANT_B    -> */ ANT_C,
-       /*ANT_AB   -> */ ANT_BC,
-       /*ANT_C    -> */ ANT_A,
-       /*ANT_AC   -> */ ANT_AB,
-       /*ANT_BC   -> */ ANT_AC,
-       /*ANT_ABC  -> */ ANT_ABC,
+       [ANT_NONE] = ANT_NONE,
+       [ANT_A] = ANT_B,
+       [ANT_B] = ANT_C,
+       [ANT_AB] = ANT_BC,
+       [ANT_C] = ANT_A,
+       [ANT_AC] = ANT_AB,
+       [ANT_BC] = ANT_AC,
+       [ANT_ABC] = ANT_ABC,
 };
 
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn)                   \
        [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
                                    IWL_RATE_SISO_##s##M_PLCP, \
                                    IWL_RATE_MIMO2_##s##M_PLCP,\
-                                   IWL_RATE_MIMO3_##s##M_PLCP,\
-                                   IWL_RATE_##r##M_IEEE,      \
-                                   IWL_RATE_##ip##M_INDEX,    \
-                                   IWL_RATE_##in##M_INDEX,    \
                                    IWL_RATE_##rp##M_INDEX,    \
-                                   IWL_RATE_##rn##M_INDEX,    \
-                                   IWL_RATE_##pp##M_INDEX,    \
-                                   IWL_RATE_##np##M_INDEX }
+                                   IWL_RATE_##rn##M_INDEX }
 
 /*
  * Parameter order:
- *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *   rate, ht rate, prev rate, next rate
  *
  * If there isn't a valid next or previous rate then INV is used which
  * maps to IWL_RATE_INVALID
  *
  */
 static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
-       IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
-       IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
-       IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
-       IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
-       IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
-       IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
-       IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
-       IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
-       IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
-       IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
-       IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
-       IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-       IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+       IWL_DECLARE_RATE_INFO(1, INV, INV, 2),   /*  1mbps */
+       IWL_DECLARE_RATE_INFO(2, INV, 1, 5),     /*  2mbps */
+       IWL_DECLARE_RATE_INFO(5, INV, 2, 11),    /*5.5mbps */
+       IWL_DECLARE_RATE_INFO(11, INV, 9, 12),   /* 11mbps */
+       IWL_DECLARE_RATE_INFO(6, 6, 5, 11),      /*  6mbps */
+       IWL_DECLARE_RATE_INFO(9, 6, 6, 11),      /*  9mbps */
+       IWL_DECLARE_RATE_INFO(12, 12, 11, 18),   /* 12mbps */
+       IWL_DECLARE_RATE_INFO(18, 18, 12, 24),   /* 18mbps */
+       IWL_DECLARE_RATE_INFO(24, 24, 18, 36),   /* 24mbps */
+       IWL_DECLARE_RATE_INFO(36, 36, 24, 48),   /* 36mbps */
+       IWL_DECLARE_RATE_INFO(48, 48, 36, 54),   /* 48mbps */
+       IWL_DECLARE_RATE_INFO(54, 54, 48, INV),  /* 54mbps */
+       IWL_DECLARE_RATE_INFO(60, 60, 48, INV),  /* 60mbps */
        /* FIXME:RS:          ^^    should be INV (legacy) */
 };
 
@@ -128,9 +128,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
        if (rate_n_flags & RATE_MCS_HT_MSK) {
                idx = rs_extract_rate(rate_n_flags);
 
-               if (idx >= IWL_RATE_MIMO3_6M_PLCP)
-                       idx = idx - IWL_RATE_MIMO3_6M_PLCP;
-               else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+               WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
+               if (idx >= IWL_RATE_MIMO2_6M_PLCP)
                        idx = idx - IWL_RATE_MIMO2_6M_PLCP;
 
                idx += IWL_FIRST_OFDM_RATE;
@@ -162,10 +161,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-                            u32 *rate_n_flags, int index);
+                            u32 *rate_n_flags);
 #else
 static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-                            u32 *rate_n_flags, int index)
+                            u32 *rate_n_flags)
 {}
 #endif
 
@@ -212,20 +211,6 @@ static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
 };
 
-static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0,  99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
-       {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
-       {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
-       {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0, 152, 0, 211, 239, 255, 279,  290,  294,  297}, /* Norm */
-       {0, 0, 0, 0, 160, 0, 219, 245, 261, 284,  294,  297,  300}, /* SGI */
-       {0, 0, 0, 0, 254, 0, 443, 584, 695, 868,  984, 1030, 1070}, /* AGG */
-       {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
-};
-
 /* mbps, mcs */
 static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
        {  "1", "BPSK DSSS"},
@@ -260,82 +245,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
        return (ant_type & valid_antenna) == ant_type;
 }
 
-/*
- *     removes the old data from the statistics. All data that is older than
- *     TID_MAX_TIME_DIFF, will be deleted.
- */
-static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
-       /* The oldest age we want to keep */
-       u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
-       while (tl->queue_count &&
-              (tl->time_stamp < oldest_time)) {
-               tl->total -= tl->packet_count[tl->head];
-               tl->packet_count[tl->head] = 0;
-               tl->time_stamp += TID_QUEUE_CELL_SPACING;
-               tl->queue_count--;
-               tl->head++;
-               if (tl->head >= TID_QUEUE_MAX_SIZE)
-                       tl->head = 0;
-       }
-}
-
-/*
- *     increment traffic load value for tid and also remove
- *     any old values if passed the certain time period
- */
-static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
-                          struct ieee80211_hdr *hdr)
-{
-       u32 curr_time = jiffies_to_msecs(jiffies);
-       u32 time_diff;
-       s32 index;
-       struct iwl_traffic_load *tl = NULL;
-       u8 tid;
-
-       if (ieee80211_is_data_qos(hdr->frame_control)) {
-               u8 *qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & 0xf;
-       } else {
-               return IWL_MAX_TID_COUNT;
-       }
-
-       if (unlikely(tid >= IWL_MAX_TID_COUNT))
-               return IWL_MAX_TID_COUNT;
-
-       tl = &lq_data->load[tid];
-
-       curr_time -= curr_time % TID_ROUND_VALUE;
-
-       /* Happens only for the first packet. Initialize the data */
-       if (!(tl->queue_count)) {
-               tl->total = 1;
-               tl->time_stamp = curr_time;
-               tl->queue_count = 1;
-               tl->head = 0;
-               tl->packet_count[0] = 1;
-               return IWL_MAX_TID_COUNT;
-       }
-
-       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
-       index = time_diff / TID_QUEUE_CELL_SPACING;
-
-       /* The history is too long: remove data that is older than */
-       /* TID_MAX_TIME_DIFF */
-       if (index >= TID_QUEUE_MAX_SIZE)
-               rs_tl_rm_old_stats(tl, curr_time);
-
-       index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
-       tl->packet_count[index] = tl->packet_count[index] + 1;
-       tl->total = tl->total + 1;
-
-       if ((index + 1) > tl->queue_count)
-               tl->queue_count = index + 1;
-
-       return tid;
-}
-
 #ifdef CONFIG_MAC80211_DEBUGFS
 /**
  * Program the device to use fixed rate for frame transmit
@@ -349,7 +258,6 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
        lq_sta->active_legacy_rate = 0x0FFF;    /* 1 - 54 MBits, includes CCK */
        lq_sta->active_siso_rate   = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
        lq_sta->active_mimo2_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
-       lq_sta->active_mimo3_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
 
        IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
                       lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
@@ -361,45 +269,11 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
 }
 #endif
 
-/*
-       get the traffic load value for tid
-*/
-static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
-       u32 curr_time = jiffies_to_msecs(jiffies);
-       u32 time_diff;
-       s32 index;
-       struct iwl_traffic_load *tl = NULL;
-
-       if (tid >= IWL_MAX_TID_COUNT)
-               return 0;
-
-       tl = &(lq_data->load[tid]);
-
-       curr_time -= curr_time % TID_ROUND_VALUE;
-
-       if (!(tl->queue_count))
-               return 0;
-
-       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
-       index = time_diff / TID_QUEUE_CELL_SPACING;
-
-       /* The history is too long: remove data that is older than */
-       /* TID_MAX_TIME_DIFF */
-       if (index >= TID_QUEUE_MAX_SIZE)
-               rs_tl_rm_old_stats(tl, curr_time);
-
-       return tl->total;
-}
-
 static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
                                      struct iwl_lq_sta *lq_data, u8 tid,
                                      struct ieee80211_sta *sta)
 {
        int ret = -EAGAIN;
-       u32 load;
-
-       load = rs_tl_get_load(lq_data, tid);
 
        /*
         * Don't create TX aggregation sessions when in high
@@ -563,7 +437,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
                else if (is_mimo2(tbl->lq_type))
                        rate_n_flags |= iwl_rates[index].plcp_mimo2;
                else
-                       rate_n_flags |= iwl_rates[index].plcp_mimo3;
+                       WARN_ON_ONCE(1);
        } else {
                IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
        }
@@ -601,7 +475,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
        u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
        u8 mcs;
 
-       memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+       memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
        *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
 
        if (*rate_idx  == IWL_RATE_INVALID) {
@@ -640,12 +514,8 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
                } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
                        if (num_of_ant == 2)
                                tbl->lq_type = LQ_MIMO2;
-               /* MIMO3 */
                } else {
-                       if (num_of_ant == 3) {
-                               tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
-                               tbl->lq_type = LQ_MIMO3;
-                       }
+                       WARN_ON_ONCE(num_of_ant == 3);
                }
        }
        return 0;
@@ -711,10 +581,10 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
        } else {
                if (is_siso(rate_type))
                        return lq_sta->active_siso_rate;
-               else if (is_mimo2(rate_type))
+               else {
+                       WARN_ON_ONCE(!is_mimo2(rate_type));
                        return lq_sta->active_mimo2_rate;
-               else
-                       return lq_sta->active_mimo3_rate;
+               }
        }
 }
 
@@ -1089,7 +959,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
        }
 
        /* Choose among many HT tables depending on number of streams
-        * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
+        * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
         * status */
        if (is_siso(tbl->lq_type) && !tbl->is_ht40)
                ht_tbl_pointer = expected_tpt_siso20MHz;
@@ -1097,12 +967,10 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
                ht_tbl_pointer = expected_tpt_siso40MHz;
        else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
                ht_tbl_pointer = expected_tpt_mimo2_20MHz;
-       else if (is_mimo2(tbl->lq_type))
+       else {
+               WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
                ht_tbl_pointer = expected_tpt_mimo2_40MHz;
-       else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40)
-               ht_tbl_pointer = expected_tpt_mimo3_20MHz;
-       else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
-               ht_tbl_pointer = expected_tpt_mimo3_40MHz;
+       }
 
        if (!tbl->is_SGI && !lq_sta->is_agg)            /* Normal */
                tbl->expected_tpt = ht_tbl_pointer[0];
@@ -1273,58 +1141,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
        return 0;
 }
 
-/*
- * Set up search table for MIMO3
- */
-static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
-                            struct iwl_lq_sta *lq_sta,
-                            struct ieee80211_sta *sta,
-                            struct iwl_scale_tbl_info *tbl, int index)
-{
-       u16 rate_mask;
-       s32 rate;
-       s8 is_green = lq_sta->is_green;
-
-       if (!sta->ht_cap.ht_supported)
-               return -1;
-
-       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
-               return -1;
-
-       /* Need both Tx chains/antennas to support MIMO */
-       if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 3)
-               return -1;
-
-       IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
-
-       tbl->lq_type = LQ_MIMO3;
-       tbl->action = 0;
-       tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
-       rate_mask = lq_sta->active_mimo3_rate;
-
-       if (iwl_is_ht40_tx_allowed(sta))
-               tbl->is_ht40 = 1;
-       else
-               tbl->is_ht40 = 0;
-
-       rs_set_expected_tpt_table(lq_sta, tbl);
-
-       rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
-       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
-                      rate, rate_mask);
-       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
-               IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
-                              rate, rate_mask);
-               return -1;
-       }
-       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
-
-       IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
-                      tbl->current_rate, is_green);
-       return 0;
-}
-
 /*
  * Set up search table for SISO
  */
@@ -1434,21 +1250,14 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
                        }
 
                        break;
-               case IWL_LEGACY_SWITCH_MIMO2_AB:
-               case IWL_LEGACY_SWITCH_MIMO2_AC:
-               case IWL_LEGACY_SWITCH_MIMO2_BC:
+               case IWL_LEGACY_SWITCH_MIMO2:
                        IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
 
                        /* Set up search table to try MIMO */
                        memcpy(search_tbl, tbl, sz);
                        search_tbl->is_SGI = 0;
 
-                       if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
-                               search_tbl->ant_type = ANT_AB;
-                       else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
-                               search_tbl->ant_type = ANT_AC;
-                       else
-                               search_tbl->ant_type = ANT_BC;
+                       search_tbl->ant_type = ANT_AB;
 
                        if (!rs_is_valid_ant(valid_tx_ant,
                                             search_tbl->ant_type))
@@ -1461,30 +1270,11 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
                                goto out;
                        }
                        break;
-
-               case IWL_LEGACY_SWITCH_MIMO3_ABC:
-                       IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
-
-                       /* Set up search table to try MIMO3 */
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = 0;
-
-                       search_tbl->ant_type = ANT_ABC;
-
-                       if (!rs_is_valid_ant(valid_tx_ant,
-                                            search_tbl->ant_type))
-                               break;
-
-                       ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
-                                                search_tbl, index);
-                       if (!ret) {
-                               lq_sta->action_counter = 0;
-                               goto out;
-                       }
-                       break;
+               default:
+                       WARN_ON_ONCE(1);
                }
                tbl->action++;
-               if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+               if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
                        tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
 
                if (tbl->action == start_action)
@@ -1496,7 +1286,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
 out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
-       if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+       if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
                tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
@@ -1531,7 +1321,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
        case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
                /* avoid antenna B unless MIMO */
                if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
-                       tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
+                       tbl->action = IWL_SISO_SWITCH_MIMO2;
                break;
        case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
        case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1573,19 +1363,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
                                goto out;
                        }
                        break;
-               case IWL_SISO_SWITCH_MIMO2_AB:
-               case IWL_SISO_SWITCH_MIMO2_AC:
-               case IWL_SISO_SWITCH_MIMO2_BC:
+               case IWL_SISO_SWITCH_MIMO2:
                        IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
                        memcpy(search_tbl, tbl, sz);
                        search_tbl->is_SGI = 0;
 
-                       if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
-                               search_tbl->ant_type = ANT_AB;
-                       else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
-                               search_tbl->ant_type = ANT_AC;
-                       else
-                               search_tbl->ant_type = ANT_BC;
+                       search_tbl->ant_type = ANT_AB;
 
                        if (!rs_is_valid_ant(valid_tx_ant,
                                             search_tbl->ant_type))
@@ -1626,24 +1409,11 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
                                                      index, is_green);
                        update_search_tbl_counter = 1;
                        goto out;
-               case IWL_SISO_SWITCH_MIMO3_ABC:
-                       IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n");
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = 0;
-                       search_tbl->ant_type = ANT_ABC;
-
-                       if (!rs_is_valid_ant(valid_tx_ant,
-                                            search_tbl->ant_type))
-                               break;
-
-                       ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
-                                                search_tbl, index);
-                       if (!ret)
-                               goto out;
-                       break;
+               default:
+                       WARN_ON_ONCE(1);
                }
                tbl->action++;
-               if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+               if (tbl->action > IWL_SISO_SWITCH_GI)
                        tbl->action = IWL_SISO_SWITCH_ANTENNA1;
 
                if (tbl->action == start_action)
@@ -1655,7 +1425,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
  out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
-       if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
+       if (tbl->action > IWL_SISO_SWITCH_GI)
                tbl->action = IWL_SISO_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
@@ -1696,8 +1466,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
                break;
        case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
                /* avoid antenna B unless MIMO */
-               if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
-                   tbl->action == IWL_MIMO2_SWITCH_SISO_C)
+               if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
                        tbl->action = IWL_MIMO2_SWITCH_SISO_A;
                break;
        default:
@@ -1730,7 +1499,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
                        break;
                case IWL_MIMO2_SWITCH_SISO_A:
                case IWL_MIMO2_SWITCH_SISO_B:
-               case IWL_MIMO2_SWITCH_SISO_C:
                        IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
 
                        /* Set up new search table for SISO */
@@ -1738,10 +1506,8 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
 
                        if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
                                search_tbl->ant_type = ANT_A;
-                       else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+                       else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
                                search_tbl->ant_type = ANT_B;
-                       else
-                               search_tbl->ant_type = ANT_C;
 
                        if (!rs_is_valid_ant(valid_tx_ant,
                                             search_tbl->ant_type))
@@ -1784,26 +1550,11 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
                                                      index, is_green);
                        update_search_tbl_counter = 1;
                        goto out;
-
-               case IWL_MIMO2_SWITCH_MIMO3_ABC:
-                       IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = 0;
-                       search_tbl->ant_type = ANT_ABC;
-
-                       if (!rs_is_valid_ant(valid_tx_ant,
-                                            search_tbl->ant_type))
-                               break;
-
-                       ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
-                                                search_tbl, index);
-                       if (!ret)
-                               goto out;
-
-                       break;
+               default:
+                       WARN_ON_ONCE(1);
                }
                tbl->action++;
-               if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+               if (tbl->action > IWL_MIMO2_SWITCH_GI)
                        tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
 
                if (tbl->action == start_action)
@@ -1814,7 +1565,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
  out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
-       if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+       if (tbl->action > IWL_MIMO2_SWITCH_GI)
                tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
@@ -1822,171 +1573,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
        return 0;
 }
 
-/*
- * Try to switch to new modulation mode from MIMO3
- */
-static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_sta *sta, int index)
-{
-       s8 is_green = lq_sta->is_green;
-       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-       struct iwl_scale_tbl_info *search_tbl =
-                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-       struct iwl_rate_scale_data *window = &(tbl->win[index]);
-       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-       u8 start_action;
-       u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-       u8 tx_chains_num = num_of_ant(valid_tx_ant);
-       int ret;
-       u8 update_search_tbl_counter = 0;
-
-       switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
-       case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
-               /* nothing */
-               break;
-       case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
-       case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
-               /* avoid antenna B and MIMO */
-               if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
-                       tbl->action = IWL_MIMO3_SWITCH_SISO_A;
-               break;
-       case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
-               /* avoid antenna B unless MIMO */
-               if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
-                   tbl->action == IWL_MIMO3_SWITCH_SISO_C)
-                       tbl->action = IWL_MIMO3_SWITCH_SISO_A;
-               break;
-       default:
-               IWL_ERR(mvm, "Invalid BT load %d",
-                       BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
-               break;
-       }
-
-       start_action = tbl->action;
-       while (1) {
-               lq_sta->action_counter++;
-               switch (tbl->action) {
-               case IWL_MIMO3_SWITCH_ANTENNA1:
-               case IWL_MIMO3_SWITCH_ANTENNA2:
-                       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
-
-                       if (tx_chains_num <= 3)
-                               break;
-
-                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-                               break;
-
-                       memcpy(search_tbl, tbl, sz);
-                       if (rs_toggle_antenna(valid_tx_ant,
-                                             &search_tbl->current_rate,
-                                             search_tbl))
-                               goto out;
-                       break;
-               case IWL_MIMO3_SWITCH_SISO_A:
-               case IWL_MIMO3_SWITCH_SISO_B:
-               case IWL_MIMO3_SWITCH_SISO_C:
-                       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
-
-                       /* Set up new search table for SISO */
-                       memcpy(search_tbl, tbl, sz);
-
-                       if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
-                               search_tbl->ant_type = ANT_A;
-                       else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
-                               search_tbl->ant_type = ANT_B;
-                       else
-                               search_tbl->ant_type = ANT_C;
-
-                       if (!rs_is_valid_ant(valid_tx_ant,
-                                            search_tbl->ant_type))
-                               break;
-
-                       ret = rs_switch_to_siso(mvm, lq_sta, sta,
-                                               search_tbl, index);
-                       if (!ret)
-                               goto out;
-
-                       break;
-
-               case IWL_MIMO3_SWITCH_MIMO2_AB:
-               case IWL_MIMO3_SWITCH_MIMO2_AC:
-               case IWL_MIMO3_SWITCH_MIMO2_BC:
-                       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
-
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = 0;
-                       if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
-                               search_tbl->ant_type = ANT_AB;
-                       else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
-                               search_tbl->ant_type = ANT_AC;
-                       else
-                               search_tbl->ant_type = ANT_BC;
-
-                       if (!rs_is_valid_ant(valid_tx_ant,
-                                            search_tbl->ant_type))
-                               break;
-
-                       ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
-                                                search_tbl, index);
-                       if (!ret)
-                               goto out;
-
-                       break;
-
-               case IWL_MIMO3_SWITCH_GI:
-                       if (!tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_20))
-                               break;
-                       if (tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_40))
-                               break;
-
-                       IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
-
-                       /* Set up new search table for MIMO */
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = !tbl->is_SGI;
-                       rs_set_expected_tpt_table(lq_sta, search_tbl);
-                       /*
-                        * If active table already uses the fastest possible
-                        * modulation (dual stream with short guard interval),
-                        * and it's working well, there's no need to look
-                        * for a better type of modulation!
-                        */
-                       if (tbl->is_SGI) {
-                               s32 tpt = lq_sta->last_tpt / 100;
-                               if (tpt >= search_tbl->expected_tpt[index])
-                                       break;
-                       }
-                       search_tbl->current_rate =
-                               rate_n_flags_from_tbl(mvm, search_tbl,
-                                                     index, is_green);
-                       update_search_tbl_counter = 1;
-                       goto out;
-               }
-               tbl->action++;
-               if (tbl->action > IWL_MIMO3_SWITCH_GI)
-                       tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
-
-               if (tbl->action == start_action)
-                       break;
-       }
-       search_tbl->lq_type = LQ_NONE;
-       return 0;
- out:
-       lq_sta->search_better_tbl = 1;
-       tbl->action++;
-       if (tbl->action > IWL_MIMO3_SWITCH_GI)
-               tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
-       if (update_search_tbl_counter)
-               search_tbl->action = tbl->action;
-
-       return 0;
-}
-
 /*
  * Check whether we should continue using same modulation mode, or
  * begin search for a new mode, based on:
@@ -2086,6 +1672,22 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
        iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
 }
 
+static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
+                    struct ieee80211_hdr *hdr)
+{
+       u8 tid = IWL_MAX_TID_COUNT;
+
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & 0xf;
+       }
+
+       if (unlikely(tid > IWL_MAX_TID_COUNT))
+               tid = IWL_MAX_TID_COUNT;
+
+       return tid;
+}
+
 /*
  * Do rate scaling and search for new modulation mode.
  */
@@ -2129,7 +1731,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 
        lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
 
-       tid = rs_tl_add_packet(lq_sta, hdr);
+       tid = rs_get_tid(lq_sta, hdr);
        if ((tid != IWL_MAX_TID_COUNT) &&
            (lq_sta->tx_agg_tid_en & (1 << tid))) {
                tid_data = &sta_priv->tid_data[tid];
@@ -2377,8 +1979,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                scale_action = 0;
 
        if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
-            IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
-            (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+            IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
                if (lq_sta->last_bt_traffic >
                    BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
                        /*
@@ -2395,8 +1996,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
 
        if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
-            IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
-            (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+            IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
                /* search for a new modulation */
                rs_stay_in_table(lq_sta, true);
                goto lq_update;
@@ -2456,7 +2056,7 @@ lq_update:
                else if (is_mimo2(tbl->lq_type))
                        rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
                else
-                       rs_move_mimo3_to_other(mvm, lq_sta, sta, index);
+                       WARN_ON_ONCE(1);
 
                /* If new "search" mode was selected, set up in uCode table */
                if (lq_sta->search_better_tbl) {
@@ -2621,11 +2221,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
                rate_idx -= IWL_FIRST_OFDM_RATE;
                /* 6M and 9M shared same MCS index */
                rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+               WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
+                            IWL_RATE_MIMO3_6M_PLCP);
                if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
-                   IWL_RATE_MIMO3_6M_PLCP)
-                       rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
-               else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
-                        IWL_RATE_MIMO2_6M_PLCP)
+                   IWL_RATE_MIMO2_6M_PLCP)
                        rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
                info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
                if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
@@ -2688,9 +2287,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
        lq_sta->flush_timer = 0;
        lq_sta->supp_rates = sta->supp_rates[sband->band];
-       for (j = 0; j < LQ_SIZE; j++)
-               for (i = 0; i < IWL_RATE_COUNT; i++)
-                       rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
 
        IWL_DEBUG_RATE(mvm,
                       "LQ: *** rate scale station global init for station %d ***\n",
@@ -2727,16 +2323,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        lq_sta->active_mimo2_rate &= ~((u16)0x2);
        lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
 
-       lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
-       lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
-       lq_sta->active_mimo3_rate &= ~((u16)0x2);
-       lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
-
        IWL_DEBUG_RATE(mvm,
-                      "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
+                      "SISO-RATE=%X MIMO2-RATE=%X\n",
                       lq_sta->active_siso_rate,
-                      lq_sta->active_mimo2_rate,
-                      lq_sta->active_mimo3_rate);
+                      lq_sta->active_mimo2_rate);
 
        /* These values will be overridden later */
        lq_sta->lq.single_stream_ant_msk =
@@ -2780,7 +2370,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
        struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
 
        /* Override starting rate (index 0) if needed for debug purposes */
-       rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+       rs_dbgfs_set_mcs(lq_sta, &new_rate);
 
        /* Interpret new_rate (rate_n_flags) */
        rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2827,7 +2417,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
                        }
 
                        /* Override next rate if needed for debug purposes */
-                       rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+                       rs_dbgfs_set_mcs(lq_sta, &new_rate);
 
                        /* Fill next table entry */
                        lq_cmd->rs_table[index] =
@@ -2869,7 +2459,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
                use_ht_possible = 0;
 
                /* Override next rate if needed for debug purposes */
-               rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+               rs_dbgfs_set_mcs(lq_sta, &new_rate);
 
                /* Fill next table entry */
                lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
@@ -2914,7 +2504,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-                            u32 *rate_n_flags, int index)
+                            u32 *rate_n_flags)
 {
        struct iwl_mvm *mvm;
        u8 valid_tx_ant;
@@ -2999,8 +2589,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
           (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
        if (is_Ht(tbl->lq_type)) {
                desc += sprintf(buff+desc, " %s",
-                  (is_siso(tbl->lq_type)) ? "SISO" :
-                  ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
+                  (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
                   desc += sprintf(buff+desc, " %s",
                   (tbl->is_ht40) ? "40MHz" : "20MHz");
                   desc += sprintf(buff+desc, " %s %s %s\n",
@@ -3100,32 +2689,6 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
        .llseek = default_llseek,
 };
 
-static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
-                       char __user *user_buf, size_t count, loff_t *ppos)
-{
-       struct iwl_lq_sta *lq_sta = file->private_data;
-       struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
-       char buff[120];
-       int desc = 0;
-
-       if (is_Ht(tbl->lq_type))
-               desc += sprintf(buff+desc,
-                               "Bit Rate= %d Mb/s\n",
-                               tbl->expected_tpt[lq_sta->last_txrate_idx]);
-       else
-               desc += sprintf(buff+desc,
-                               "Bit Rate= %d Mb/s\n",
-                               iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
-       .read = rs_sta_dbgfs_rate_scale_data_read,
-       .open = simple_open,
-       .llseek = default_llseek,
-};
-
 static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
 {
        struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -3135,9 +2698,6 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
        lq_sta->rs_sta_dbgfs_stats_table_file =
                debugfs_create_file("rate_stats_table", S_IRUSR, dir,
                                    lq_sta, &rs_sta_dbgfs_stats_table_ops);
-       lq_sta->rs_sta_dbgfs_rate_scale_data_file =
-               debugfs_create_file("rate_scale_data", S_IRUSR, dir,
-                                   lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
        lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
                debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
                                  &lq_sta->tx_agg_tid_en);
@@ -3148,7 +2708,6 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
        struct iwl_lq_sta *lq_sta = mvm_sta;
        debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
        debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-       debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
        debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
 }
 #endif
@@ -3159,8 +2718,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
  * station is added we ignore it.
  */
 static void rs_rate_init_stub(void *mvm_r,
-                                struct ieee80211_supported_band *sband,
-                                struct ieee80211_sta *sta, void *mvm_sta)
+                             struct ieee80211_supported_band *sband,
+                             struct cfg80211_chan_def *chandef,
+                             struct ieee80211_sta *sta, void *mvm_sta)
 {
 }
 static struct rate_control_ops rs_mvm_ops = {
@@ -3193,13 +2753,14 @@ void iwl_mvm_rate_control_unregister(void)
  * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
  * Tx protection, according to this rquest and previous requests,
  * and send the LQ command.
- * @lq: The LQ command
  * @mvmsta: The station
  * @enable: Enable Tx protection?
  */
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
-                         struct iwl_mvm_sta *mvmsta, bool enable)
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+                         bool enable)
 {
+       struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+
        lockdep_assert_held(&mvm->mutex);
 
        if (enable) {
index cff4f6da77338a4056c585ef9ec2368d4416c6a6..335cf16829023e6702ef1d5921c19cccb3196c84 100644 (file)
@@ -38,14 +38,8 @@ struct iwl_rs_rate_info {
        u8 plcp;        /* uCode API:  IWL_RATE_6M_PLCP, etc. */
        u8 plcp_siso;   /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
        u8 plcp_mimo2;  /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
-       u8 plcp_mimo3;  /* uCode API:  IWL_RATE_MIMO3_6M_PLCP, etc. */
-       u8 ieee;        /* MAC header:  IWL_RATE_6M_IEEE, etc. */
-       u8 prev_ieee;    /* previous rate in IEEE speeds */
-       u8 next_ieee;    /* next rate in IEEE speeds */
        u8 prev_rs;      /* previous rate used in rs algo */
        u8 next_rs;      /* next rate used in rs algo */
-       u8 prev_rs_tgg;  /* previous rate used in TGG rs algo */
-       u8 next_rs_tgg;  /* next rate used in TGG rs algo */
 };
 
 #define IWL_RATE_60M_PLCP 3
@@ -120,23 +114,6 @@ enum {
        IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
 };
 
-/* MAC header values for bit rates */
-enum {
-       IWL_RATE_6M_IEEE  = 12,
-       IWL_RATE_9M_IEEE  = 18,
-       IWL_RATE_12M_IEEE = 24,
-       IWL_RATE_18M_IEEE = 36,
-       IWL_RATE_24M_IEEE = 48,
-       IWL_RATE_36M_IEEE = 72,
-       IWL_RATE_48M_IEEE = 96,
-       IWL_RATE_54M_IEEE = 108,
-       IWL_RATE_60M_IEEE = 120,
-       IWL_RATE_1M_IEEE  = 2,
-       IWL_RATE_2M_IEEE  = 4,
-       IWL_RATE_5M_IEEE  = 11,
-       IWL_RATE_11M_IEEE = 22,
-};
-
 #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
 
 #define IWL_INVALID_VALUE    -1
@@ -165,47 +142,22 @@ enum {
 #define IWL_LEGACY_SWITCH_ANTENNA1      0
 #define IWL_LEGACY_SWITCH_ANTENNA2      1
 #define IWL_LEGACY_SWITCH_SISO          2
-#define IWL_LEGACY_SWITCH_MIMO2_AB      3
-#define IWL_LEGACY_SWITCH_MIMO2_AC      4
-#define IWL_LEGACY_SWITCH_MIMO2_BC      5
-#define IWL_LEGACY_SWITCH_MIMO3_ABC     6
+#define IWL_LEGACY_SWITCH_MIMO2         3
 
 /* possible actions when in siso mode */
 #define IWL_SISO_SWITCH_ANTENNA1        0
 #define IWL_SISO_SWITCH_ANTENNA2        1
-#define IWL_SISO_SWITCH_MIMO2_AB        2
-#define IWL_SISO_SWITCH_MIMO2_AC        3
-#define IWL_SISO_SWITCH_MIMO2_BC        4
-#define IWL_SISO_SWITCH_GI              5
-#define IWL_SISO_SWITCH_MIMO3_ABC       6
-
+#define IWL_SISO_SWITCH_MIMO2           2
+#define IWL_SISO_SWITCH_GI              3
 
 /* possible actions when in mimo mode */
 #define IWL_MIMO2_SWITCH_ANTENNA1       0
 #define IWL_MIMO2_SWITCH_ANTENNA2       1
 #define IWL_MIMO2_SWITCH_SISO_A         2
 #define IWL_MIMO2_SWITCH_SISO_B         3
-#define IWL_MIMO2_SWITCH_SISO_C         4
-#define IWL_MIMO2_SWITCH_GI             5
-#define IWL_MIMO2_SWITCH_MIMO3_ABC      6
-
+#define IWL_MIMO2_SWITCH_GI             4
 
-/* possible actions when in mimo3 mode */
-#define IWL_MIMO3_SWITCH_ANTENNA1       0
-#define IWL_MIMO3_SWITCH_ANTENNA2       1
-#define IWL_MIMO3_SWITCH_SISO_A         2
-#define IWL_MIMO3_SWITCH_SISO_B         3
-#define IWL_MIMO3_SWITCH_SISO_C         4
-#define IWL_MIMO3_SWITCH_MIMO2_AB       5
-#define IWL_MIMO3_SWITCH_MIMO2_AC       6
-#define IWL_MIMO3_SWITCH_MIMO2_BC       7
-#define IWL_MIMO3_SWITCH_GI             8
-
-
-#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
-
-/*FIXME:RS:add possible actions for MIMO3*/
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
 
 #define IWL_ACTION_LIMIT               3       /* # possible actions */
 
@@ -240,15 +192,13 @@ enum iwl_table_type {
        LQ_A,
        LQ_SISO,        /* high-throughput types */
        LQ_MIMO2,
-       LQ_MIMO3,
        LQ_MAX,
 };
 
 #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
 #define is_siso(tbl) ((tbl) == LQ_SISO)
 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
-#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
+#define is_mimo(tbl) is_mimo2(tbl)
 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
 #define is_a_band(tbl) ((tbl) == LQ_A)
 #define is_g_and(tbl) ((tbl) == LQ_G)
@@ -290,17 +240,6 @@ struct iwl_scale_tbl_info {
        struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
 };
 
-struct iwl_traffic_load {
-       unsigned long time_stamp;       /* age of the oldest statistics */
-       u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
-                                                * slice */
-       u32 total;                      /* total num of packets during the
-                                        * last TID_MAX_TIME_DIFF */
-       u8 queue_count;                 /* number of queues that has
-                                        * been used since the last cleanup */
-       u8 head;                        /* start of the circular buffer */
-};
-
 /**
  * struct iwl_lq_sta -- driver's rate scaling private structure
  *
@@ -331,18 +270,15 @@ struct iwl_lq_sta {
        u16 active_legacy_rate;
        u16 active_siso_rate;
        u16 active_mimo2_rate;
-       u16 active_mimo3_rate;
        s8 max_rate_idx;     /* Max rate set by user */
        u8 missed_rate_counter;
 
        struct iwl_lq_cmd lq;
        struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
-       struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
        u8 tx_agg_tid_en;
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct dentry *rs_sta_dbgfs_scale_table_file;
        struct dentry *rs_sta_dbgfs_stats_table_file;
-       struct dentry *rs_sta_dbgfs_rate_scale_data_file;
        struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
        u32 dbg_fixed_rate;
 #endif
@@ -404,7 +340,7 @@ extern void iwl_mvm_rate_control_unregister(void);
 
 struct iwl_mvm_sta;
 
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
-                         struct iwl_mvm_sta *mvmsta, bool enable);
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+                         bool enable);
 
 #endif /* __rs__ */
index e4930d5027d228ff4e411ec2e8c6289e1c4366d2..2a8cb5a60535d161f410116c32967cbd9a983987 100644 (file)
@@ -124,24 +124,15 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
        ieee80211_rx_ni(mvm->hw, skb);
 }
 
-/*
- * iwl_mvm_calc_rssi - calculate the rssi in dBm
- * @phy_info: the phy information for the coming packet
- */
-static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
-                            struct iwl_rx_phy_info *phy_info)
+static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
+                             struct iwl_rx_phy_info *phy_info,
+                             struct ieee80211_rx_status *rx_status)
 {
        int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
        int rssi_all_band_a, rssi_all_band_b;
        u32 agc_a, agc_b, max_agc;
        u32 val;
 
-       /* Find max rssi among 2 possible receivers.
-        * These values are measured by the Digital Signal Processor (DSP).
-        * They should stay fairly constant even as the signal strength varies,
-        * if the radio's Automatic Gain Control (AGC) is working right.
-        * AGC value (see below) will provide the "interesting" info.
-        */
        val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
        agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
        agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
@@ -166,7 +157,51 @@ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
        IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
                        rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
 
-       return max_rssi_dbm;
+       rx_status->signal = max_rssi_dbm;
+       rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+                               RX_RES_PHY_FLAGS_ANTENNA)
+                                       >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+       rx_status->chain_signal[0] = rssi_a_dbm;
+       rx_status->chain_signal[1] = rssi_b_dbm;
+}
+
+/*
+ * iwl_mvm_get_signal_strength - use new rx PHY INFO API
+ * values are reported by the fw as positive values - need to negate
+ * to obtain their dBM.  Account for missing antennas by replacing 0
+ * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
+ */
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+                                       struct iwl_rx_phy_info *phy_info,
+                                       struct ieee80211_rx_status *rx_status)
+{
+       int energy_a, energy_b, energy_c, max_energy;
+       u32 val;
+
+       val =
+           le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
+       energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
+                                               IWL_RX_INFO_ENERGY_ANT_A_POS;
+       energy_a = energy_a ? -energy_a : -256;
+       energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
+                                               IWL_RX_INFO_ENERGY_ANT_B_POS;
+       energy_b = energy_b ? -energy_b : -256;
+       energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
+                                               IWL_RX_INFO_ENERGY_ANT_C_POS;
+       energy_c = energy_c ? -energy_c : -256;
+       max_energy = max(energy_a, energy_b);
+       max_energy = max(max_energy, energy_c);
+
+       IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
+                       energy_a, energy_b, energy_c, max_energy);
+
+       rx_status->signal = max_energy;
+       rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+                               RX_RES_PHY_FLAGS_ANTENNA)
+                                       >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+       rx_status->chain_signal[0] = energy_a;
+       rx_status->chain_signal[1] = energy_b;
+       rx_status->chain_signal[2] = energy_c;
 }
 
 /*
@@ -289,29 +324,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
         */
        /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
 
-       /* Find max signal strength (dBm) among 3 antenna/receiver chains */
-       rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info);
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
+               iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
+       else
+               iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
 
        IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
                              (unsigned long long)rx_status.mactime);
 
-       /*
-        * "antenna number"
-        *
-        * It seems that the antenna field in the phy flags value
-        * is actually a bit field. This is undefined by radiotap,
-        * it wants an actual antenna number but I always get "7"
-        * for most legacy frames I receive indicating that the
-        * same frame was received on all three RX chains.
-        *
-        * I think this field should be removed in favor of a
-        * new 802.11n radiotap field "RX chains" that is defined
-        * as a bitmask.
-        */
-       rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) &
-                               RX_RES_PHY_FLAGS_ANTENNA)
-                               >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
        /* set the preamble flag if appropriate */
        if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
                rx_status.flag |= RX_FLAG_SHORTPRE;
@@ -364,11 +384,74 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        return 0;
 }
 
+static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
+                                        struct iwl_notif_statistics *stats)
+{
+       /*
+        * NOTE FW aggregates the statistics - BUT the statistics are cleared
+        * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
+        * bit set.
+        */
+       lockdep_assert_held(&mvm->mutex);
+       memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
+}
+
+struct iwl_mvm_stat_data {
+       struct iwl_notif_statistics *stats;
+       struct iwl_mvm *mvm;
+};
+
+static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_stat_data *data = _data;
+       struct iwl_notif_statistics *stats = data->stats;
+       struct iwl_mvm *mvm = data->mvm;
+       int sig = -stats->general.beacon_filter_average_energy;
+       int last_event;
+       int thold = vif->bss_conf.cqm_rssi_thold;
+       int hyst = vif->bss_conf.cqm_rssi_hyst;
+       u16 id = le32_to_cpu(stats->rx.general.mac_id);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (mvmvif->id != id)
+               return;
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       mvmvif->bf_data.ave_beacon_signal = sig;
+
+       if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+               return;
+
+       /* CQM Notification */
+       last_event = mvmvif->bf_data.last_cqm_event;
+       if (thold && sig < thold && (last_event == 0 ||
+                                    sig < last_event - hyst)) {
+               mvmvif->bf_data.last_cqm_event = sig;
+               IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
+                            sig);
+               ieee80211_cqm_rssi_notify(
+                       vif,
+                       NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+                       GFP_KERNEL);
+       } else if (sig > thold &&
+                  (last_event == 0 || sig > last_event + hyst)) {
+               mvmvif->bf_data.last_cqm_event = sig;
+               IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
+                            sig);
+               ieee80211_cqm_rssi_notify(
+                       vif,
+                       NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+                       GFP_KERNEL);
+       }
+}
+
 /*
  * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
  *
  * TODO: This handler is implemented partially.
- * It only gets the NIC's temperature.
  */
 int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
                          struct iwl_rx_cmd_buffer *rxb,
@@ -377,11 +460,20 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_notif_statistics *stats = (void *)&pkt->data;
        struct mvm_statistics_general_common *common = &stats->general.common;
+       struct iwl_mvm_stat_data data = {
+               .stats = stats,
+               .mvm = mvm,
+       };
 
        if (mvm->temperature != le32_to_cpu(common->temperature)) {
                mvm->temperature = le32_to_cpu(common->temperature);
                iwl_mvm_tt_handler(mvm);
        }
+       iwl_mvm_update_rx_statistics(mvm, stats);
 
+       ieee80211_iterate_active_interfaces(mvm->hw,
+                                           IEEE80211_IFACE_ITER_NORMAL,
+                                           iwl_mvm_stat_iterator,
+                                           &data);
        return 0;
 }
index acdff6b67e0460e669e5c25192af245df93ac4b5..9a7ab84953000234b463ac1636cabcf506b95433 100644 (file)
@@ -301,10 +301,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
         */
        if (req->n_ssids > 0) {
                cmd->passive2active = cpu_to_le16(1);
+               cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
                ssid = req->ssids[0].ssid;
                ssid_len = req->ssids[0].ssid_len;
        } else {
                cmd->passive2active = 0;
+               cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
        }
 
        iwl_mvm_scan_fill_ssids(cmd, req);
index 563f559b902da8560f7cbdd06726872d7849b34b..44add291531bb505a6d388fde3fdf961b7cc14e2 100644 (file)
@@ -826,8 +826,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                 * method for HT traffic
                 * this function also sends the LQ command
                 */
-               return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
-                                            mvmsta, true);
+               return iwl_mvm_tx_protection(mvm, mvmsta, true);
                /*
                 * TODO: remove the TLC_RTS flag when we tear down the last
                 * AGG session (agg_tids_count in DVM)
index ad9bbca992133cc096ff0b90c5048ae248b635c9..76a3c177e100ab28660ac9433fe5c26edc5a79e8 100644 (file)
@@ -73,7 +73,6 @@
 #include "iwl-prph.h"
 
 /* A TimeUnit is 1024 microsecond */
-#define TU_TO_JIFFIES(_tu)     (usecs_to_jiffies((_tu) * 1024))
 #define MSEC_TO_TU(_msec)      (_msec*1000/1024)
 
 /*
@@ -138,6 +137,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
        schedule_work(&mvm->roc_done_wk);
 }
 
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       const char *errmsg)
+{
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return false;
+       if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+               return false;
+       if (errmsg)
+               IWL_ERR(mvm, "%s\n", errmsg);
+       ieee80211_connection_loss(vif);
+       return true;
+}
+
 /*
  * Handles a FW notification for an event that is known to the driver.
  *
@@ -163,10 +176,15 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
         * P2P Device discoveribility, while there are other higher priority
         * events in the system).
         */
-       WARN_ONCE(!le32_to_cpu(notif->status),
-                 "Failed to schedule time event\n");
+       if (WARN_ONCE(!le32_to_cpu(notif->status),
+                     "Failed to schedule time event\n")) {
+               if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
+                       iwl_mvm_te_clear_data(mvm, te_data);
+                       return;
+               }
+       }
 
-       if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
+       if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
                IWL_DEBUG_TE(mvm,
                             "TE ended - current time %lu, estimated end %lu\n",
                             jiffies, te_data->end_jiffies);
@@ -180,19 +198,12 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
                 * By now, we should have finished association
                 * and know the dtim period.
                 */
-               if (te_data->vif->type == NL80211_IFTYPE_STATION &&
-                   (!te_data->vif->bss_conf.assoc ||
-                    !te_data->vif->bss_conf.dtim_period)) {
-                       IWL_ERR(mvm,
-                               "No assocation and the time event is over already...\n");
-                       ieee80211_connection_loss(te_data->vif);
-               }
-
+               iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+                       "No association and the time event is over already...");
                iwl_mvm_te_clear_data(mvm, te_data);
-       } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
+       } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
                te_data->running = true;
-               te_data->end_jiffies = jiffies +
-                       TU_TO_JIFFIES(te_data->duration);
+               te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
 
                if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                        set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
@@ -257,10 +268,67 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
        return true;
 }
 
+/* used to convert from time event API v2 to v1 */
+#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
+                            TE_V2_EVENT_SOCIOPATHIC)
+static inline u16 te_v2_get_notify(__le16 policy)
+{
+       return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
+}
+
+static inline u16 te_v2_get_dep_policy(__le16 policy)
+{
+       return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
+               TE_V2_PLACEMENT_POS;
+}
+
+static inline u16 te_v2_get_absence(__le16 policy)
+{
+       return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
+}
+
+static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
+                               struct iwl_time_event_cmd_v1 *cmd_v1)
+{
+       cmd_v1->id_and_color = cmd_v2->id_and_color;
+       cmd_v1->action = cmd_v2->action;
+       cmd_v1->id = cmd_v2->id;
+       cmd_v1->apply_time = cmd_v2->apply_time;
+       cmd_v1->max_delay = cmd_v2->max_delay;
+       cmd_v1->depends_on = cmd_v2->depends_on;
+       cmd_v1->interval = cmd_v2->interval;
+       cmd_v1->duration = cmd_v2->duration;
+       if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
+               cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
+       else
+               cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
+       cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
+       cmd_v1->interval_reciprocal = 0; /* unused */
+
+       cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
+       cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
+       cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
+}
+
+static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
+                                      const struct iwl_time_event_cmd_v2 *cmd)
+{
+       struct iwl_time_event_cmd_v1 cmd_v1;
+
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
+               return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+                                           sizeof(*cmd), cmd);
+
+       iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
+       return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+                                   sizeof(cmd_v1), &cmd_v1);
+}
+
+
 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif,
                                       struct iwl_mvm_time_event_data *te_data,
-                                      struct iwl_time_event_cmd *te_cmd)
+                                      struct iwl_time_event_cmd_v2 *te_cmd)
 {
        static const u8 time_event_response[] = { TIME_EVENT_CMD };
        struct iwl_notification_wait wait_time_event;
@@ -296,8 +364,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                   ARRAY_SIZE(time_event_response),
                                   iwl_mvm_time_event_response, te_data);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
-                                  sizeof(*te_cmd), te_cmd);
+       ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
                iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -324,13 +391,12 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       struct iwl_time_event_cmd time_cmd = {};
+       struct iwl_time_event_cmd_v2 time_cmd = {};
 
        lockdep_assert_held(&mvm->mutex);
 
        if (te_data->running &&
-           time_after(te_data->end_jiffies,
-                      jiffies + TU_TO_JIFFIES(min_duration))) {
+           time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
                IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
                             jiffies_to_msecs(te_data->end_jiffies - jiffies));
                return;
@@ -359,17 +425,14 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
        time_cmd.apply_time =
                cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
 
-       time_cmd.dep_policy = TE_INDEPENDENT;
-       time_cmd.is_present = cpu_to_le32(1);
-       time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
+       time_cmd.max_frags = TE_V2_FRAG_NONE;
        time_cmd.max_delay = cpu_to_le32(500);
        /* TODO: why do we need to interval = bi if it is not periodic? */
        time_cmd.interval = cpu_to_le32(1);
-       time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
        time_cmd.duration = cpu_to_le32(duration);
-       time_cmd.repeat = cpu_to_le32(1);
-       time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
-                                     TE_NOTIF_HOST_EVENT_END);
+       time_cmd.repeat = 1;
+       time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+                                     TE_V2_NOTIF_HOST_EVENT_END);
 
        iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
 }
@@ -383,7 +446,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                               struct iwl_mvm_vif *mvmvif,
                               struct iwl_mvm_time_event_data *te_data)
 {
-       struct iwl_time_event_cmd time_cmd = {};
+       struct iwl_time_event_cmd_v2 time_cmd = {};
        u32 id, uid;
        int ret;
 
@@ -420,8 +483,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 
        IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
-       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
-                                  sizeof(time_cmd), &time_cmd);
+       ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
        if (WARN_ON(ret))
                return;
 }
@@ -441,7 +503,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       struct iwl_time_event_cmd time_cmd = {};
+       struct iwl_time_event_cmd_v2 time_cmd = {};
 
        lockdep_assert_held(&mvm->mutex);
        if (te_data->running) {
@@ -472,8 +534,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        }
 
        time_cmd.apply_time = cpu_to_le32(0);
-       time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
-       time_cmd.is_present = cpu_to_le32(1);
        time_cmd.interval = cpu_to_le32(1);
 
        /*
@@ -482,12 +542,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
         * scheduled. To improve the chances of it being scheduled, allow them
         * to be fragmented, and in addition allow them to be delayed.
         */
-       time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
+       time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
        time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
        time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
-       time_cmd.repeat = cpu_to_le32(1);
-       time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
-                                     TE_NOTIF_HOST_EVENT_END);
+       time_cmd.repeat = 1;
+       time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+                                     TE_V2_NOTIF_HOST_EVENT_END);
 
        return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
 }
index d6ae7f16ac11932865a923b2a3bd68f70b3bbeb4..1f3282dff5136fa305c894c9c35ee5914dd63ddd 100644 (file)
@@ -391,8 +391,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
                mvmsta = (void *)sta->drv_priv;
                if (enable == mvmsta->tt_tx_protection)
                        continue;
-               err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
-                                           mvmsta, enable);
+               err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
                if (err) {
                        IWL_ERR(mvm, "Failed to %s Tx protection\n",
                                enable ? "enable" : "disable");
@@ -513,12 +512,39 @@ static const struct iwl_tt_params iwl7000_tt_params = {
        .support_tx_backoff = true,
 };
 
+static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
+       .ct_kill_entry = 118,
+       .ct_kill_exit = 96,
+       .ct_kill_duration = 5,
+       .dynamic_smps_entry = 114,
+       .dynamic_smps_exit = 110,
+       .tx_protection_entry = 114,
+       .tx_protection_exit = 108,
+       .tx_backoff = {
+               {.temperature = 112, .backoff = 300},
+               {.temperature = 113, .backoff = 800},
+               {.temperature = 114, .backoff = 1500},
+               {.temperature = 115, .backoff = 3000},
+               {.temperature = 116, .backoff = 5000},
+               {.temperature = 117, .backoff = 10000},
+       },
+       .support_ct_kill = true,
+       .support_dynamic_smps = true,
+       .support_tx_protection = true,
+       .support_tx_backoff = true,
+};
+
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
 {
        struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
 
        IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
-       tt->params = &iwl7000_tt_params;
+
+       if (mvm->cfg->high_temp)
+               tt->params = &iwl7000_high_temp_tt_params;
+       else
+               tt->params = &iwl7000_tt_params;
+
        tt->throttle = false;
        INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
 }
index f0e96a927407d3ae1473481f5e7d3eeb894a5a2b..e05440d90319b339f203a2f1ff9658a0d97f615e 100644 (file)
@@ -91,11 +91,10 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
 
        /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
-       if (info->band == IEEE80211_BAND_2GHZ        &&
-           (skb->protocol == cpu_to_be16(ETH_P_PAE)  ||
-            is_multicast_ether_addr(hdr->addr1)      ||
-            ieee80211_is_back_req(fc)                ||
-            ieee80211_is_mgmt(fc)))
+       if (info->band == IEEE80211_BAND_2GHZ &&
+           (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
+            is_multicast_ether_addr(hdr->addr1) ||
+            ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
                tx_flags |= TX_CMD_FLG_BT_DIS;
 
        if (ieee80211_has_morefrags(fc))
@@ -123,6 +122,8 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                 * it
                 */
                WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+       } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+               tx_cmd->pm_frame_timeout = cpu_to_le16(2);
        } else {
                tx_cmd->pm_frame_timeout = 0;
        }
@@ -171,7 +172,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
        }
 
        /*
-        * for data packets, rate info comes from the table inside he fw. This
+        * for data packets, rate info comes from the table inside the fw. This
         * table is controlled by LINK_QUALITY commands
         */
 
index 1e1332839e4a745b2a57f9d837b112fff52d75bd..a9c3574914348ad3583fd19387a6c48211530f8d 100644 (file)
@@ -453,6 +453,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
 }
 
+void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
+{
+       const struct fw_img *img;
+       int ofs, len = 0;
+       u8 *buf;
+
+       if (!mvm->ucode_loaded)
+               return;
+
+       img = &mvm->fw->img[mvm->cur_ucode];
+       ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+       len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+       buf = kzalloc(len, GFP_KERNEL);
+       if (!buf)
+               return;
+
+       iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
+       iwl_print_hex_error(mvm->trans, buf, len);
+
+       kfree(buf);
+}
+
 /**
  * iwl_mvm_send_lq_cmd() - Send link quality command
  * @init: This command is sent as part of station initialization right
index ff13458efc27096343f17e5c408465d9dce1253a..dc02cb9792afbbb48c23f01d748c262bee1828fd 100644 (file)
@@ -273,9 +273,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
        {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
@@ -325,15 +325,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int ret;
 
        iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
-       if (iwl_trans == NULL)
-               return -ENOMEM;
+       if (IS_ERR(iwl_trans))
+               return PTR_ERR(iwl_trans);
 
        pci_set_drvdata(pdev, iwl_trans);
 
        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
        trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
 
-       if (IS_ERR_OR_NULL(trans_pcie->drv)) {
+       if (IS_ERR(trans_pcie->drv)) {
                ret = PTR_ERR(trans_pcie->drv);
                goto out_free_trans;
        }
@@ -368,21 +368,19 @@ static void iwl_pci_remove(struct pci_dev *pdev)
 
 static int iwl_pci_suspend(struct device *device)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
-
        /* Before you put code here, think about WoWLAN. You cannot check here
         * whether WoWLAN is enabled or not, and your code will run even if
         * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
         */
 
-       return iwl_trans_suspend(iwl_trans);
+       return 0;
 }
 
 static int iwl_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
-       struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
+       struct iwl_trans *trans = pci_get_drvdata(pdev);
+       bool hw_rfkill;
 
        /* Before you put code here, think about WoWLAN. You cannot check here
         * whether WoWLAN is enabled or not, and your code will run even if
@@ -395,7 +393,15 @@ static int iwl_pci_resume(struct device *device)
         */
        pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
-       return iwl_trans_resume(iwl_trans);
+       if (!trans->op_mode)
+               return 0;
+
+       iwl_enable_rfkill_int(trans);
+
+       hw_rfkill = iwl_is_rfkill_set(trans);
+       iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+       return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
index b654dcdd048a599232b3488fefcf852e4218f004..fa22639b63c947d68247698cf109070a823b792b 100644 (file)
@@ -392,7 +392,6 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
 /*****************************************************
 * Error handling
 ******************************************************/
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
 void iwl_pcie_dump_csr(struct iwl_trans *trans);
 
 /*****************************************************
index f600e68a410a1abe454170bd65d29b5937df41a9..3f237b42eb36d3c94cc84f94d0ba86b6a7861e41 100644 (file)
  */
 static int iwl_rxq_space(const struct iwl_rxq *rxq)
 {
-       int s = rxq->read - rxq->write;
-
-       if (s <= 0)
-               s += RX_QUEUE_SIZE;
-       /* keep some buffer to not confuse full and empty queue */
-       s -= 2;
-       if (s < 0)
-               s = 0;
-       return s;
+       /* Make sure RX_QUEUE_SIZE is a power of 2 */
+       BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+
+       /*
+        * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+        * between empty and completely full queues.
+        * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+        * defined for negative dividends.
+        */
+       return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
 }
 
 /*
@@ -793,7 +794,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
        }
 
        iwl_pcie_dump_csr(trans);
-       iwl_pcie_dump_fh(trans, NULL);
+       iwl_dump_fh(trans, NULL);
 
        set_bit(STATUS_FW_ERROR, &trans_pcie->status);
        clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
@@ -888,14 +889,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
                iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
                if (hw_rfkill) {
-                       /*
-                        * Clear the interrupt in APMG if the NIC is going down.
-                        * Note that when the NIC exits RFkill (else branch), we
-                        * can't access prph and the NIC will be reset in
-                        * start_hw anyway.
-                        */
-                       iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
-                                      APMG_RTC_INT_STT_RFKILL);
                        set_bit(STATUS_RFKILL, &trans_pcie->status);
                        if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
                                               &trans_pcie->status))
@@ -1128,6 +1121,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
        struct iwl_trans *trans = data;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u32 inta, inta_mask;
+       irqreturn_t ret = IRQ_NONE;
 
        lockdep_assert_held(&trans_pcie->irq_lock);
 
@@ -1176,10 +1170,8 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
        /* the thread will service interrupts and re-enable them */
        if (likely(inta))
                return IRQ_WAKE_THREAD;
-       else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
-                !trans_pcie->inta)
-               iwl_enable_interrupts(trans);
-       return IRQ_HANDLED;
+
+       ret = IRQ_HANDLED;
 
 none:
        /* re-enable interrupts here since we don't have anything to service. */
@@ -1188,7 +1180,7 @@ none:
            !trans_pcie->inta)
                iwl_enable_interrupts(trans);
 
-       return IRQ_NONE;
+       return ret;
 }
 
 /* interrupt handler using ict table, with this interrupt driver will
@@ -1207,6 +1199,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
        u32 val = 0;
        u32 read;
        unsigned long flags;
+       irqreturn_t ret = IRQ_NONE;
 
        if (!trans)
                return IRQ_NONE;
@@ -1219,7 +1212,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
         * use legacy interrupt.
         */
        if (unlikely(!trans_pcie->use_ict)) {
-               irqreturn_t ret = iwl_pcie_isr(irq, data);
+               ret = iwl_pcie_isr(irq, data);
                spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
                return ret;
        }
@@ -1288,17 +1281,9 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
        if (likely(inta)) {
                spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
                return IRQ_WAKE_THREAD;
-       } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
-                !trans_pcie->inta) {
-               /* Allow interrupt if was disabled by this handler and
-                * no tasklet was schedules, We should not enable interrupt,
-                * tasklet will enable it.
-                */
-               iwl_enable_interrupts(trans);
        }
 
-       spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-       return IRQ_HANDLED;
+       ret = IRQ_HANDLED;
 
  none:
        /* re-enable interrupts here since we don't have anything to service.
@@ -1309,5 +1294,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
                iwl_enable_interrupts(trans);
 
        spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-       return IRQ_NONE;
+       return ret;
 }
index 96cfcdd390794060ee6c72c3951984f33da5ff38..c3f904d422b08b1074345fcba99a31390f663945 100644 (file)
@@ -820,25 +820,6 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
                clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
-{
-       return 0;
-}
-
-static int iwl_trans_pcie_resume(struct iwl_trans *trans)
-{
-       bool hw_rfkill;
-
-       iwl_enable_rfkill_int(trans);
-
-       hw_rfkill = iwl_is_rfkill_set(trans);
-       iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
-
-       return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
                                                unsigned long *flags)
 {
@@ -1038,71 +1019,6 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
 }
 
-static const char *get_fh_string(int cmd)
-{
-#define IWL_CMD(x) case x: return #x
-       switch (cmd) {
-       IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
-       IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
-       IWL_CMD(FH_RSCSR_CHNL0_WPTR);
-       IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
-       IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
-       IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
-       IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
-       IWL_CMD(FH_TSSR_TX_STATUS_REG);
-       IWL_CMD(FH_TSSR_TX_ERROR_REG);
-       default:
-               return "UNKNOWN";
-       }
-#undef IWL_CMD
-}
-
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
-{
-       int i;
-       static const u32 fh_tbl[] = {
-               FH_RSCSR_CHNL0_STTS_WPTR_REG,
-               FH_RSCSR_CHNL0_RBDCB_BASE_REG,
-               FH_RSCSR_CHNL0_WPTR,
-               FH_MEM_RCSR_CHNL0_CONFIG_REG,
-               FH_MEM_RSSR_SHARED_CTRL_REG,
-               FH_MEM_RSSR_RX_STATUS_REG,
-               FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
-               FH_TSSR_TX_STATUS_REG,
-               FH_TSSR_TX_ERROR_REG
-       };
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (buf) {
-               int pos = 0;
-               size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
-
-               *buf = kmalloc(bufsz, GFP_KERNEL);
-               if (!*buf)
-                       return -ENOMEM;
-
-               pos += scnprintf(*buf + pos, bufsz - pos,
-                               "FH register values:\n");
-
-               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
-                       pos += scnprintf(*buf + pos, bufsz - pos,
-                               "  %34s: 0X%08x\n",
-                               get_fh_string(fh_tbl[i]),
-                               iwl_read_direct32(trans, fh_tbl[i]));
-
-               return pos;
-       }
-#endif
-
-       IWL_ERR(trans, "FH register values:\n");
-       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
-               IWL_ERR(trans, "  %34s: 0X%08x\n",
-                       get_fh_string(fh_tbl[i]),
-                       iwl_read_direct32(trans, fh_tbl[i]));
-
-       return 0;
-}
-
 static const char *get_csr_string(int cmd)
 {
 #define IWL_CMD(x) case x: return #x
@@ -1183,18 +1099,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
 } while (0)
 
 /* file operation */
-#define DEBUGFS_READ_FUNC(name)                                         \
-static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
-                                       char __user *user_buf,          \
-                                       size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name)                                        \
-static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
-                                       const char __user *user_buf,    \
-                                       size_t count, loff_t *ppos);
-
 #define DEBUGFS_READ_FILE_OPS(name)                                    \
-       DEBUGFS_READ_FUNC(name);                                        \
 static const struct file_operations iwl_dbgfs_##name##_ops = {         \
        .read = iwl_dbgfs_##name##_read,                                \
        .open = simple_open,                                            \
@@ -1202,7 +1107,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = {            \
 };
 
 #define DEBUGFS_WRITE_FILE_OPS(name)                                    \
-       DEBUGFS_WRITE_FUNC(name);                                       \
 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
        .write = iwl_dbgfs_##name##_write,                              \
        .open = simple_open,                                            \
@@ -1210,8 +1114,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = {          \
 };
 
 #define DEBUGFS_READ_WRITE_FILE_OPS(name)                              \
-       DEBUGFS_READ_FUNC(name);                                        \
-       DEBUGFS_WRITE_FUNC(name);                                       \
 static const struct file_operations iwl_dbgfs_##name##_ops = {         \
        .write = iwl_dbgfs_##name##_write,                              \
        .read = iwl_dbgfs_##name##_read,                                \
@@ -1395,7 +1297,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
        int pos = 0;
        ssize_t ret = -EFAULT;
 
-       ret = pos = iwl_pcie_dump_fh(trans, &buf);
+       ret = pos = iwl_dump_fh(trans, &buf);
        if (buf) {
                ret = simple_read_from_buffer(user_buf,
                                              count, ppos, buf, pos);
@@ -1459,10 +1361,6 @@ static const struct iwl_trans_ops trans_ops_pcie = {
 
        .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
 
-#ifdef CONFIG_PM_SLEEP
-       .suspend = iwl_trans_pcie_suspend,
-       .resume = iwl_trans_pcie_resume,
-#endif
        .write8 = iwl_trans_pcie_write8,
        .write32 = iwl_trans_pcie_write32,
        .read32 = iwl_trans_pcie_read32,
@@ -1488,9 +1386,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        trans = kzalloc(sizeof(struct iwl_trans) +
                        sizeof(struct iwl_trans_pcie), GFP_KERNEL);
-
-       if (!trans)
-               return NULL;
+       if (!trans) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -1502,14 +1401,19 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        spin_lock_init(&trans_pcie->reg_lock);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
-       /* W/A - seems to solve weird behavior. We need to remove this if we
-        * don't want to stay in L1 all the time. This wastes a lot of power */
-       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-                              PCIE_LINK_STATE_CLKPM);
-
-       if (pci_enable_device(pdev)) {
-               err = -ENODEV;
+       err = pci_enable_device(pdev);
+       if (err)
                goto out_no_pci;
+
+       if (!cfg->base_params->pcie_l1_allowed) {
+               /*
+                * W/A - seems to solve weird behavior. We need to remove this
+                * if we don't want to stay in L1 all the time. This wastes a
+                * lot of power.
+                */
+               pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+                                      PCIE_LINK_STATE_L1 |
+                                      PCIE_LINK_STATE_CLKPM);
        }
 
        pci_set_master(pdev);
@@ -1579,17 +1483,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
 
-       if (!trans->dev_cmd_pool)
+       if (!trans->dev_cmd_pool) {
+               err = -ENOMEM;
                goto out_pci_disable_msi;
+       }
 
        trans_pcie->inta_mask = CSR_INI_SET_MASK;
 
        if (iwl_pcie_alloc_ict(trans))
                goto out_free_cmd_pool;
 
-       if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
-                                iwl_pcie_irq_handler,
-                                IRQF_SHARED, DRV_NAME, trans)) {
+       err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+                                  iwl_pcie_irq_handler,
+                                  IRQF_SHARED, DRV_NAME, trans);
+       if (err) {
                IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
                goto out_free_ict;
        }
@@ -1608,5 +1515,6 @@ out_pci_disable_device:
        pci_disable_device(pdev);
 out_no_pci:
        kfree(trans);
-       return NULL;
+out:
+       return ERR_PTR(err);
 }
index c47c92165aba91d2e97631f1953bd4e480cabad7..f45eb29c2ede0b62cc1723f82ae7f30cc1f5a8ed 100644 (file)
  ***************************************************/
 static int iwl_queue_space(const struct iwl_queue *q)
 {
-       int s = q->read_ptr - q->write_ptr;
-
-       if (q->read_ptr > q->write_ptr)
-               s -= q->n_bd;
-
-       if (s <= 0)
-               s += q->n_window;
-       /* keep some reserve to not confuse empty and full situations */
-       s -= 2;
-       if (s < 0)
-               s = 0;
-       return s;
+       unsigned int max;
+       unsigned int used;
+
+       /*
+        * To avoid ambiguity between empty and completely full queues, there
+        * should always be less than q->n_bd elements in the queue.
+        * If q->n_window is smaller than q->n_bd, there is no need to reserve
+        * any queue entries for this purpose.
+        */
+       if (q->n_window < q->n_bd)
+               max = q->n_window;
+       else
+               max = q->n_bd - 1;
+
+       /*
+        * q->n_bd is a power of 2, so the following is equivalent to modulo by
+        * q->n_bd and is well defined for negative dividends.
+        */
+       used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);
+
+       if (WARN_ON(used > max))
+               return 0;
+
+       return max - used;
 }
 
 /*
@@ -451,13 +463,10 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
                return -EINVAL;
        }
 
-       if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
+       if (WARN(addr & ~IWL_TX_DMA_MASK,
+                "Unaligned address = %llx\n", (unsigned long long)addr))
                return -EINVAL;
 
-       if (unlikely(addr & ~IWL_TX_DMA_MASK))
-               IWL_ERR(trans, "Unaligned address = %llx\n",
-                       (unsigned long long)addr);
-
        iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 
        return 0;
@@ -829,7 +838,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
                                  sizeof(struct iwl_txq), GFP_KERNEL);
        if (!trans_pcie->txq) {
                IWL_ERR(trans, "Not enough memory for txq\n");
-               ret = ENOMEM;
+               ret = -ENOMEM;
                goto error;
        }
 
@@ -1153,10 +1162,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
 /*
  * iwl_pcie_enqueue_hcmd - enqueue a uCode command
  * @priv: device private data point
- * @cmd: a point to the ucode command structure
+ * @cmd: a pointer to the ucode command structure
  *
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
  * command queue.
  */
 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
@@ -1619,10 +1628,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        txq = &trans_pcie->txq[txq_id];
        q = &txq->q;
 
-       if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
-               WARN_ON_ONCE(1);
+       if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+                     "TX on unused queue %d\n", txq_id))
                return -EINVAL;
-       }
 
        spin_lock(&txq->lock);
 
@@ -1632,7 +1640,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
         * Check here that the packets are in the right place on the ring.
         */
        wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
-       WARN_ONCE(trans_pcie->txq[txq_id].ampdu &&
+       WARN_ONCE(txq->ampdu &&
                  (wifi_seq & 0xff) != q->write_ptr,
                  "Q: %d WiFi Seq %d tfdNum %d",
                  txq_id, wifi_seq, q->write_ptr);
@@ -1664,7 +1672,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
         */
        len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
              hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
-       tb1_len = (len + 3) & ~3;
+       tb1_len = ALIGN(len, 4);
 
        /* Tell NIC about any 2-byte padding after MAC header */
        if (tb1_len != len)
index cb34c7895f2a299b8c6b8f6c094f79cd9ab88c85..a0d2aacd5e09c9634a37a8e3026ae867952efd57 100644 (file)
@@ -867,7 +867,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
 
        if (WARN_ON(skb->len < 10)) {
                /* Should not happen; just a sanity check for addr1 use */
-               dev_kfree_skb(skb);
+               ieee80211_free_txskb(hw, skb);
                return;
        }
 
@@ -884,13 +884,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
        }
 
        if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
-               dev_kfree_skb(skb);
+               ieee80211_free_txskb(hw, skb);
                return;
        }
 
        if (data->idle && !data->tmp_chan) {
                wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
-               dev_kfree_skb(skb);
+               ieee80211_free_txskb(hw, skb);
                return;
        }
 
@@ -1364,6 +1364,7 @@ static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
 static int hwsim_fops_ps_write(void *dat, u64 val);
 
 static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
                                       void *data, int len)
 {
        struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -2309,7 +2310,9 @@ static int __init init_mac80211_hwsim(void)
                        hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
 
                hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
-                                   WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+                                   WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+                                   WIPHY_FLAG_AP_UAPSD;
+               hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
 
                /* ask mac80211 to reserve space for magic */
                hw->vif_data_size = sizeof(struct hwsim_vif_priv);
index a78e0651409c66e8f521bfe1842e2bd04f48f745..8f9f54231a1c416a3f47ee7346b0ca026c143aab 100644 (file)
@@ -189,7 +189,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
 
                skb_src = skb_dequeue(&pra_list->skb_head);
 
-               pra_list->total_pkts_size -= skb_src->len;
+               pra_list->total_pkt_count--;
 
                atomic_dec(&priv->wmm.tx_pkts_queued);
 
@@ -268,7 +268,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
 
                skb_queue_tail(&pra_list->skb_head, skb_aggr);
 
-               pra_list->total_pkts_size += skb_aggr->len;
+               pra_list->total_pkt_count++;
 
                atomic_inc(&priv->wmm.tx_pkts_queued);
 
index 89459db4c53b9f1468f832f4e3d0be0200b7ec22..ca149aea15175769386089c87c6d85035cd8c304 100644 (file)
@@ -25,7 +25,9 @@ module_param(reg_alpha2, charp, 0);
 
 static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
        {
-               .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
+               .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
+                                  BIT(NL80211_IFTYPE_P2P_GO) |
+                                  BIT(NL80211_IFTYPE_P2P_CLIENT),
        },
        {
                .max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -189,6 +191,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        struct sk_buff *skb;
        u16 pkt_len;
        const struct ieee80211_mgmt *mgmt;
+       struct mwifiex_txinfo *tx_info;
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
 
        if (!buf || !len) {
@@ -216,6 +219,10 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                return -ENOMEM;
        }
 
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       tx_info->bss_num = priv->bss_num;
+       tx_info->bss_type = priv->bss_type;
+
        mwifiex_form_mgmt_frame(skb, buf, len);
        mwifiex_queue_tx_pkt(priv, skb);
 
@@ -235,16 +242,20 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
                                     u16 frame_type, bool reg)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+       u32 mask;
 
        if (reg)
-               priv->mgmt_frame_mask |= BIT(frame_type >> 4);
+               mask = priv->mgmt_frame_mask | BIT(frame_type >> 4);
        else
-               priv->mgmt_frame_mask &= ~BIT(frame_type >> 4);
-
-       mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
-                              HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
+               mask = priv->mgmt_frame_mask & ~BIT(frame_type >> 4);
 
-       wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+       if (mask != priv->mgmt_frame_mask) {
+               priv->mgmt_frame_mask = mask;
+               mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
+                                      HostCmd_ACT_GEN_SET, 0,
+                                      &priv->mgmt_frame_mask);
+               wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+       }
 }
 
 /*
@@ -2296,10 +2307,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 }
 EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
 
-#ifdef CONFIG_PM
 static bool
-mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
-                            s8 *byte_seq)
+mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
+                            u8 max_byte_seq)
 {
        int j, k, valid_byte_cnt = 0;
        bool dont_care_byte = false;
@@ -2317,16 +2327,17 @@ mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
                                        dont_care_byte = true;
                        }
 
-                       if (valid_byte_cnt > MAX_BYTESEQ)
+                       if (valid_byte_cnt > max_byte_seq)
                                return false;
                }
        }
 
-       byte_seq[MAX_BYTESEQ] = valid_byte_cnt;
+       byte_seq[max_byte_seq] = valid_byte_cnt;
 
        return true;
 }
 
+#ifdef CONFIG_PM
 static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
                                    struct cfg80211_wowlan *wowlan)
 {
@@ -2335,7 +2346,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
        struct mwifiex_mef_entry *mef_entry;
        int i, filt_num = 0, ret;
        bool first_pat = true;
-       u8 byte_seq[MAX_BYTESEQ + 1];
+       u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
        const u8 ipv4_mc_mac[] = {0x33, 0x33};
        const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
        struct mwifiex_private *priv =
@@ -2365,7 +2376,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
        for (i = 0; i < wowlan->n_patterns; i++) {
                memset(byte_seq, 0, sizeof(byte_seq));
                if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
-                                                 byte_seq)) {
+                                                 byte_seq,
+                                                 MWIFIEX_MEF_MAX_BYTESEQ)) {
                        wiphy_err(wiphy, "Pattern not supported\n");
                        kfree(mef_entry);
                        return -EOPNOTSUPP;
@@ -2373,16 +2385,16 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 
                if (!wowlan->patterns[i].pkt_offset) {
                        if (!(byte_seq[0] & 0x01) &&
-                           (byte_seq[MAX_BYTESEQ] == 1)) {
+                           (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
                                mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
                                continue;
                        } else if (is_broadcast_ether_addr(byte_seq)) {
                                mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
                                continue;
                        } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
-                                   (byte_seq[MAX_BYTESEQ] == 2)) ||
+                                   (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) ||
                                   (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
-                                   (byte_seq[MAX_BYTESEQ] == 3))) {
+                                   (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) {
                                mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
                                continue;
                        }
@@ -2408,7 +2420,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
                mef_entry->filter[filt_num].repeat = 16;
                memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
                       ETH_ALEN);
-               mef_entry->filter[filt_num].byte_seq[MAX_BYTESEQ] = ETH_ALEN;
+               mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
+                                                               ETH_ALEN;
                mef_entry->filter[filt_num].offset = 14;
                mef_entry->filter[filt_num].filt_type = TYPE_EQ;
                if (filt_num)
@@ -2442,6 +2455,119 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
 }
 #endif
 
+static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
+{
+       const u8 ipv4_mc_mac[] = {0x33, 0x33};
+       const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
+       const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff};
+
+       if ((byte_seq[0] & 0x01) &&
+           (byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 1))
+               return PACKET_TYPE_UNICAST;
+       else if (!memcmp(byte_seq, bc_mac, 4))
+               return PACKET_TYPE_BROADCAST;
+       else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
+                 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 2) ||
+                (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
+                 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 3))
+               return PACKET_TYPE_MULTICAST;
+
+       return 0;
+}
+
+static int
+mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
+                               struct cfg80211_coalesce_rules *crule,
+                               struct mwifiex_coalesce_rule *mrule)
+{
+       u8 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ + 1];
+       struct filt_field_param *param;
+       int i;
+
+       mrule->max_coalescing_delay = crule->delay;
+
+       param = mrule->params;
+
+       for (i = 0; i < crule->n_patterns; i++) {
+               memset(byte_seq, 0, sizeof(byte_seq));
+               if (!mwifiex_is_pattern_supported(&crule->patterns[i],
+                                                 byte_seq,
+                                               MWIFIEX_COALESCE_MAX_BYTESEQ)) {
+                       dev_err(priv->adapter->dev, "Pattern not supported\n");
+                       return -EOPNOTSUPP;
+               }
+
+               if (!crule->patterns[i].pkt_offset) {
+                       u8 pkt_type;
+
+                       pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq);
+                       if (pkt_type && mrule->pkt_type) {
+                               dev_err(priv->adapter->dev,
+                                       "Multiple packet types not allowed\n");
+                               return -EOPNOTSUPP;
+                       } else if (pkt_type) {
+                               mrule->pkt_type = pkt_type;
+                               continue;
+                       }
+               }
+
+               if (crule->condition == NL80211_COALESCE_CONDITION_MATCH)
+                       param->operation = RECV_FILTER_MATCH_TYPE_EQ;
+               else
+                       param->operation = RECV_FILTER_MATCH_TYPE_NE;
+
+               param->operand_len = byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ];
+               memcpy(param->operand_byte_stream, byte_seq,
+                      param->operand_len);
+               param->offset = crule->patterns[i].pkt_offset;
+               param++;
+
+               mrule->num_of_fields++;
+       }
+
+       if (!mrule->pkt_type) {
+               dev_err(priv->adapter->dev,
+                       "Packet type can not be determined\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
+                                        struct cfg80211_coalesce *coalesce)
+{
+       struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+       int i, ret;
+       struct mwifiex_ds_coalesce_cfg coalesce_cfg;
+       struct mwifiex_private *priv =
+                       mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+       memset(&coalesce_cfg, 0, sizeof(coalesce_cfg));
+       if (!coalesce) {
+               dev_dbg(adapter->dev,
+                       "Disable coalesce and reset all previous rules\n");
+               return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
+                                            HostCmd_ACT_GEN_SET, 0,
+                                            &coalesce_cfg);
+       }
+
+       coalesce_cfg.num_of_rules = coalesce->n_rules;
+       for (i = 0; i < coalesce->n_rules; i++) {
+               ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i],
+                                                     &coalesce_cfg.rule[i]);
+               if (ret) {
+                       dev_err(priv->adapter->dev,
+                               "Recheck the patterns provided for rule %d\n",
+                               i + 1);
+                       return ret;
+               }
+       }
+
+       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
+                                    HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
+}
+
 /* station cfg80211 operations */
 static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2476,12 +2602,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .resume = mwifiex_cfg80211_resume,
        .set_wakeup = mwifiex_cfg80211_set_wakeup,
 #endif
+       .set_coalesce = mwifiex_cfg80211_set_coalesce,
 };
 
 #ifdef CONFIG_PM
 static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
        .flags = WIPHY_WOWLAN_MAGIC_PKT,
-       .n_patterns = MWIFIEX_MAX_FILTERS,
+       .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
        .pattern_min_len = 1,
        .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
        .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
@@ -2499,6 +2626,15 @@ static bool mwifiex_is_valid_alpha2(const char *alpha2)
        return false;
 }
 
+static const struct wiphy_coalesce_support mwifiex_coalesce_support = {
+       .n_rules = MWIFIEX_COALESCE_MAX_RULES,
+       .max_delay = MWIFIEX_MAX_COALESCING_DELAY,
+       .n_patterns = MWIFIEX_COALESCE_MAX_FILTERS,
+       .pattern_min_len = 1,
+       .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
+       .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
+};
+
 /*
  * This function registers the device with CFG802.11 subsystem.
  *
@@ -2560,6 +2696,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        wiphy->wowlan = &mwifiex_wowlan_support;
 #endif
 
+       wiphy->coalesce = &mwifiex_coalesce_support;
+
        wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
                                    NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
                                    NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
index 5178c4630d89310e17a5db810977fa5aad8ca525..9eefacbc844bfab2eae14777c70719db377b3f7d 100644 (file)
@@ -404,11 +404,43 @@ mwifiex_is_rate_auto(struct mwifiex_private *priv)
                return false;
 }
 
-/*
- * This function gets the supported data rates.
- *
- * The function works in both Ad-Hoc and infra mode by printing the
- * band and returning the data rates.
+/* This function gets the supported data rates from bitmask inside
+ * cfg80211_scan_request.
+ */
+u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
+                                   u8 *rates, u8 radio_type)
+{
+       struct wiphy *wiphy = priv->adapter->wiphy;
+       struct cfg80211_scan_request *request = priv->scan_request;
+       u32 num_rates, rate_mask;
+       struct ieee80211_supported_band *sband;
+       int i;
+
+       if (radio_type) {
+               sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+               if (WARN_ON_ONCE(!sband))
+                       return 0;
+               rate_mask = request->rates[IEEE80211_BAND_5GHZ];
+       } else {
+               sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+               if (WARN_ON_ONCE(!sband))
+                       return 0;
+               rate_mask = request->rates[IEEE80211_BAND_2GHZ];
+       }
+
+       num_rates = 0;
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if ((BIT(i) & rate_mask) == 0)
+                       continue; /* skip rate */
+               rates[num_rates++] = (u8)(sband->bitrates[i].bitrate / 5);
+       }
+
+       return num_rates;
+}
+
+/* This function gets the supported data rates. The function works in
+ * both Ad-Hoc and infra mode by printing the band and returning the
+ * data rates.
  */
 u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
 {
index 94cc09d48444600129620a247c3e227d10865e59..a5993475daef17b0aa09da324559bd8ea2789efa 100644 (file)
@@ -75,7 +75,8 @@
 #define MWIFIEX_BUF_FLAG_REQUEUED_PKT      BIT(0)
 #define MWIFIEX_BUF_FLAG_BRIDGED_PKT      BIT(1)
 
-#define MWIFIEX_BRIDGED_PKTS_THRESHOLD     1024
+#define MWIFIEX_BRIDGED_PKTS_THR_HIGH      1024
+#define MWIFIEX_BRIDGED_PKTS_THR_LOW        128
 
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
index 1b45aa5333008c9475595f91123c1bcb2f81e018..c9ad1c0d338d259071212f396d845d86f26d5904 100644 (file)
@@ -85,9 +85,6 @@ enum KEY_TYPE_ID {
 #define WAPI_KEY_LEN                   50
 
 #define MAX_POLL_TRIES                 100
-
-#define MAX_MULTI_INTERFACE_POLL_TRIES  1000
-
 #define MAX_FIRMWARE_POLL_TRIES                        100
 
 #define FIRMWARE_READY_SDIO                            0xfedc
@@ -156,6 +153,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_UAP_PS_AO_TIMER    (PROPRIETARY_TLV_BASE_ID + 123)
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
 
@@ -297,6 +295,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_CAU_REG_ACCESS                    0x00ed
 #define HostCmd_CMD_SET_BSS_MODE                      0x00f7
 #define HostCmd_CMD_PCIE_DESC_DETAILS                 0x00fa
+#define HostCmd_CMD_COALESCE_CFG                      0x010a
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG                         0x0112
@@ -453,7 +452,7 @@ enum P2P_MODES {
        (((event_cause) >> 24) & 0x00ff)
 
 #define MWIFIEX_MAX_PATTERN_LEN                20
-#define MWIFIEX_MAX_OFFSET_LEN         50
+#define MWIFIEX_MAX_OFFSET_LEN         100
 #define STACK_NBYTES                   100
 #define TYPE_DNUM                      1
 #define TYPE_BYTESEQ                   2
@@ -1369,11 +1368,6 @@ struct host_cmd_ds_802_11_eeprom_access {
        u8 value;
 } __packed;
 
-struct host_cmd_tlv {
-       __le16 type;
-       __le16 len;
-} __packed;
-
 struct mwifiex_assoc_event {
        u8 sta_addr[ETH_ALEN];
        __le16 type;
@@ -1399,99 +1393,99 @@ struct host_cmd_11ac_vht_cfg {
 } __packed;
 
 struct host_cmd_tlv_akmp {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        __le16 key_mgmt;
        __le16 key_mgmt_operation;
 } __packed;
 
 struct host_cmd_tlv_pwk_cipher {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        __le16 proto;
        u8 cipher;
        u8 reserved;
 } __packed;
 
 struct host_cmd_tlv_gwk_cipher {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 cipher;
        u8 reserved;
 } __packed;
 
 struct host_cmd_tlv_passphrase {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 passphrase[0];
 } __packed;
 
 struct host_cmd_tlv_wep_key {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 key_index;
        u8 is_default;
        u8 key[1];
 };
 
 struct host_cmd_tlv_auth_type {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 auth_type;
 } __packed;
 
 struct host_cmd_tlv_encrypt_protocol {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        __le16 proto;
 } __packed;
 
 struct host_cmd_tlv_ssid {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 ssid[0];
 } __packed;
 
 struct host_cmd_tlv_rates {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 rates[0];
 } __packed;
 
 struct host_cmd_tlv_bcast_ssid {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 bcast_ctl;
 } __packed;
 
 struct host_cmd_tlv_beacon_period {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        __le16 period;
 } __packed;
 
 struct host_cmd_tlv_dtim_period {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 period;
 } __packed;
 
 struct host_cmd_tlv_frag_threshold {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        __le16 frag_thr;
 } __packed;
 
 struct host_cmd_tlv_rts_threshold {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        __le16 rts_thr;
 } __packed;
 
 struct host_cmd_tlv_retry_limit {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 limit;
 } __packed;
 
 struct host_cmd_tlv_mac_addr {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 mac_addr[ETH_ALEN];
 } __packed;
 
 struct host_cmd_tlv_channel_band {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        u8 band_config;
        u8 channel;
 } __packed;
 
 struct host_cmd_tlv_ageout_timer {
-       struct host_cmd_tlv tlv;
+       struct mwifiex_ie_types_header header;
        __le32 sta_ao_timer;
 } __packed;
 
@@ -1604,6 +1598,27 @@ struct host_cmd_ds_802_11_cfg_data {
        __le16 data_len;
 } __packed;
 
+struct coalesce_filt_field_param {
+       u8 operation;
+       u8 operand_len;
+       __le16 offset;
+       u8 operand_byte_stream[4];
+};
+
+struct coalesce_receive_filt_rule {
+       struct mwifiex_ie_types_header header;
+       u8 num_of_fields;
+       u8 pkt_type;
+       __le16 max_coalescing_delay;
+       struct coalesce_filt_field_param params[0];
+} __packed;
+
+struct host_cmd_ds_coalesce_cfg {
+       __le16 action;
+       __le16 num_of_rules;
+       struct coalesce_receive_filt_rule rule[0];
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -1664,6 +1679,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_sta_deauth sta_deauth;
                struct host_cmd_11ac_vht_cfg vht_cfg;
                struct host_cmd_ds_802_11_cfg_data cfg_data;
+               struct host_cmd_ds_coalesce_cfg coalesce_cfg;
        } params;
 } __packed;
 
index e38342f86c515e6e574fbae572e7c51dbe7c8fa8..220af4fe0fc65b18b575c81025c436d303f01bbd 100644 (file)
@@ -87,7 +87,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
        u8 *tmp;
 
        input_len = le16_to_cpu(ie_list->len);
-       travel_len = sizeof(struct host_cmd_tlv);
+       travel_len = sizeof(struct mwifiex_ie_types_header);
 
        ie_list->len = 0;
 
index 2cf8b964e966c5dec94b37ca9a826799d73f4dee..e021a581a143872d231d6c77de60dad877bcfa2a 100644 (file)
@@ -135,6 +135,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
 
        priv->csa_chan = 0;
        priv->csa_expire_time = 0;
+       priv->del_list_idx = 0;
 
        return mwifiex_add_bss_prio_tbl(priv);
 }
@@ -377,18 +378,11 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
 static void
 mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
 {
-       int i;
-
        if (!adapter) {
                pr_err("%s: adapter is NULL\n", __func__);
                return;
        }
 
-       for (i = 0; i < adapter->priv_num; i++) {
-               if (adapter->priv[i])
-                       del_timer_sync(&adapter->priv[i]->scan_delay_timer);
-       }
-
        mwifiex_cancel_all_pending_cmd(adapter);
 
        /* Free lock variables */
@@ -398,13 +392,8 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
        dev_dbg(adapter->dev, "info: free cmd buffer\n");
        mwifiex_free_cmd_buffer(adapter);
 
-       del_timer(&adapter->cmd_timer);
-
        dev_dbg(adapter->dev, "info: free scan table\n");
 
-       if (adapter->if_ops.cleanup_if)
-               adapter->if_ops.cleanup_if(adapter);
-
        if (adapter->sleep_cfm)
                dev_kfree_skb_any(adapter->sleep_cfm);
 }
@@ -702,7 +691,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
                if (!adapter->winner) {
                        dev_notice(adapter->dev,
                                   "FW already running! Skip FW dnld\n");
-                       poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
                        goto poll_fw;
                }
        }
index 7f27e45680b5ec9427727e716a737e394b176f30..00a95f4c6a6c1885eabab4652ff4cbe173d2b7ae 100644 (file)
@@ -362,13 +362,13 @@ struct mwifiex_ds_misc_subsc_evt {
        struct subsc_evt_cfg bcn_h_rssi_cfg;
 };
 
-#define MAX_BYTESEQ            6       /* non-adjustable */
-#define MWIFIEX_MAX_FILTERS    10
+#define MWIFIEX_MEF_MAX_BYTESEQ                6       /* non-adjustable */
+#define MWIFIEX_MEF_MAX_FILTERS                10
 
 struct mwifiex_mef_filter {
        u16 repeat;
        u16 offset;
-       s8 byte_seq[MAX_BYTESEQ + 1];
+       s8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
        u8 filt_type;
        u8 filt_action;
 };
@@ -376,7 +376,7 @@ struct mwifiex_mef_filter {
 struct mwifiex_mef_entry {
        u8 mode;
        u8 action;
-       struct mwifiex_mef_filter filter[MWIFIEX_MAX_FILTERS];
+       struct mwifiex_mef_filter filter[MWIFIEX_MEF_MAX_FILTERS];
 };
 
 struct mwifiex_ds_mef_cfg {
@@ -397,4 +397,39 @@ enum {
        MWIFIEX_FUNC_SHUTDOWN,
 };
 
+enum COALESCE_OPERATION {
+       RECV_FILTER_MATCH_TYPE_EQ = 0x80,
+       RECV_FILTER_MATCH_TYPE_NE,
+};
+
+enum COALESCE_PACKET_TYPE {
+       PACKET_TYPE_UNICAST = 1,
+       PACKET_TYPE_MULTICAST = 2,
+       PACKET_TYPE_BROADCAST = 3
+};
+
+#define MWIFIEX_COALESCE_MAX_RULES     8
+#define MWIFIEX_COALESCE_MAX_BYTESEQ   4       /* non-adjustable */
+#define MWIFIEX_COALESCE_MAX_FILTERS   4
+#define MWIFIEX_MAX_COALESCING_DELAY   100     /* in msecs */
+
+struct filt_field_param {
+       u8 operation;
+       u8 operand_len;
+       u16 offset;
+       u8 operand_byte_stream[MWIFIEX_COALESCE_MAX_BYTESEQ];
+};
+
+struct mwifiex_coalesce_rule {
+       u16 max_coalescing_delay;
+       u8 num_of_fields;
+       u8 pkt_type;
+       struct filt_field_param params[MWIFIEX_COALESCE_MAX_FILTERS];
+};
+
+struct mwifiex_ds_coalesce_cfg {
+       u16 num_of_rules;
+       struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
+};
+
 #endif /* !_MWIFIEX_IOCTL_H_ */
index 12e778159ec58f5be70ab8c3e9d515beab28b48a..9d7c0e6c4fc7419facd68a3c61f251fbedb86143 100644 (file)
@@ -1427,6 +1427,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
 
        switch (priv->bss_mode) {
        case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
                return mwifiex_deauthenticate_infra(priv, mac);
        case NL80211_IFTYPE_ADHOC:
                return mwifiex_send_cmd_sync(priv,
index 1753431de361b807890ec8bcb177760f457faa07..3402bffdd016ca21ff1ce424d98772d9f24ee3d5 100644 (file)
@@ -191,12 +191,16 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
 {
        s32 i;
 
+       if (adapter->if_ops.cleanup_if)
+               adapter->if_ops.cleanup_if(adapter);
+
        del_timer(&adapter->cmd_timer);
 
        /* Free private structures */
        for (i = 0; i < adapter->priv_num; i++) {
                if (adapter->priv[i]) {
                        mwifiex_free_curr_bcn(adapter->priv[i]);
+                       del_timer_sync(&adapter->priv[i]->scan_delay_timer);
                        kfree(adapter->priv[i]);
                }
        }
@@ -385,6 +389,17 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
        pr_debug("info: %s: free adapter\n", __func__);
 }
 
+/*
+ * This function cancels all works in the queue and destroys
+ * the main workqueue.
+ */
+static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
+{
+       flush_workqueue(adapter->workqueue);
+       destroy_workqueue(adapter->workqueue);
+       adapter->workqueue = NULL;
+}
+
 /*
  * This function gets firmware and initializes it.
  *
@@ -394,16 +409,18 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
  */
 static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
 {
-       int ret;
+       int ret, i;
        char fmt[64];
        struct mwifiex_private *priv;
        struct mwifiex_adapter *adapter = context;
        struct mwifiex_fw_image fw;
+       struct semaphore *sem = adapter->card_sem;
+       bool init_failed = false;
 
        if (!firmware) {
                dev_err(adapter->dev,
                        "Failed to get firmware %s\n", adapter->fw_name);
-               goto done;
+               goto err_dnld_fw;
        }
 
        memset(&fw, 0, sizeof(struct mwifiex_fw_image));
@@ -416,7 +433,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        else
                ret = mwifiex_dnld_fw(adapter, &fw);
        if (ret == -1)
-               goto done;
+               goto err_dnld_fw;
 
        dev_notice(adapter->dev, "WLAN FW is active\n");
 
@@ -428,13 +445,15 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        }
 
        /* enable host interrupt after fw dnld is successful */
-       if (adapter->if_ops.enable_int)
-               adapter->if_ops.enable_int(adapter);
+       if (adapter->if_ops.enable_int) {
+               if (adapter->if_ops.enable_int(adapter))
+                       goto err_dnld_fw;
+       }
 
        adapter->init_wait_q_woken = false;
        ret = mwifiex_init_fw(adapter);
        if (ret == -1) {
-               goto done;
+               goto err_init_fw;
        } else if (!ret) {
                adapter->hw_status = MWIFIEX_HW_STATUS_READY;
                goto done;
@@ -443,12 +462,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        wait_event_interruptible(adapter->init_wait_q,
                                 adapter->init_wait_q_woken);
        if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
-               goto done;
+               goto err_init_fw;
 
        priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
        if (mwifiex_register_cfg80211(adapter)) {
                dev_err(adapter->dev, "cannot register with cfg80211\n");
-               goto err_init_fw;
+               goto err_register_cfg80211;
        }
 
        rtnl_lock();
@@ -479,20 +498,52 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        goto done;
 
 err_add_intf:
-       mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
+       for (i = 0; i < adapter->priv_num; i++) {
+               priv = adapter->priv[i];
+
+               if (!priv)
+                       continue;
+
+               if (priv->wdev && priv->netdev)
+                       mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
+       }
        rtnl_unlock();
+err_register_cfg80211:
+       wiphy_unregister(adapter->wiphy);
+       wiphy_free(adapter->wiphy);
 err_init_fw:
        if (adapter->if_ops.disable_int)
                adapter->if_ops.disable_int(adapter);
+err_dnld_fw:
        pr_debug("info: %s: unregister device\n", __func__);
-       adapter->if_ops.unregister_dev(adapter);
+       if (adapter->if_ops.unregister_dev)
+               adapter->if_ops.unregister_dev(adapter);
+
+       if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
+           (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
+               pr_debug("info: %s: shutdown mwifiex\n", __func__);
+               adapter->init_wait_q_woken = false;
+
+               if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+                       wait_event_interruptible(adapter->init_wait_q,
+                                                adapter->init_wait_q_woken);
+       }
+       adapter->surprise_removed = true;
+       mwifiex_terminate_workqueue(adapter);
+       init_failed = true;
 done:
        if (adapter->cal_data) {
                release_firmware(adapter->cal_data);
                adapter->cal_data = NULL;
        }
-       release_firmware(adapter->firmware);
+       if (adapter->firmware) {
+               release_firmware(adapter->firmware);
+               adapter->firmware = NULL;
+       }
        complete(&adapter->fw_load);
+       if (init_failed)
+               mwifiex_free_adapter(adapter);
+       up(sem);
        return;
 }
 
@@ -802,18 +853,6 @@ static void mwifiex_main_work_queue(struct work_struct *work)
        mwifiex_main_process(adapter);
 }
 
-/*
- * This function cancels all works in the queue and destroys
- * the main workqueue.
- */
-static void
-mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
-{
-       flush_workqueue(adapter->workqueue);
-       destroy_workqueue(adapter->workqueue);
-       adapter->workqueue = NULL;
-}
-
 /*
  * This function adds the card.
  *
@@ -842,6 +881,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
        }
 
        adapter->iface_type = iface_type;
+       adapter->card_sem = sem;
 
        adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
        adapter->surprise_removed = false;
@@ -872,17 +912,12 @@ mwifiex_add_card(void *card, struct semaphore *sem,
                goto err_init_fw;
        }
 
-       up(sem);
        return 0;
 
 err_init_fw:
        pr_debug("info: %s: unregister device\n", __func__);
        if (adapter->if_ops.unregister_dev)
                adapter->if_ops.unregister_dev(adapter);
-err_registerdev:
-       adapter->surprise_removed = true;
-       mwifiex_terminate_workqueue(adapter);
-err_kmalloc:
        if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
            (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
                pr_debug("info: %s: shutdown mwifiex\n", __func__);
@@ -892,7 +927,10 @@ err_kmalloc:
                        wait_event_interruptible(adapter->init_wait_q,
                                                 adapter->init_wait_q_woken);
        }
-
+err_registerdev:
+       adapter->surprise_removed = true;
+       mwifiex_terminate_workqueue(adapter);
+err_kmalloc:
        mwifiex_free_adapter(adapter);
 
 err_init_sw:
index 253e0bd38e25e22ce911e6e49b4d99261afe596d..d2e5ccd891da2eebd7697bd1ac31989ccdc792ad 100644 (file)
@@ -204,11 +204,11 @@ struct mwifiex_ra_list_tbl {
        struct list_head list;
        struct sk_buff_head skb_head;
        u8 ra[ETH_ALEN];
-       u32 total_pkts_size;
        u32 is_11n_enabled;
        u16 max_amsdu;
-       u16 pkt_count;
+       u16 ba_pkt_count;
        u8 ba_packet_thr;
+       u16 total_pkt_count;
 };
 
 struct mwifiex_tid_tbl {
@@ -515,6 +515,7 @@ struct mwifiex_private {
        bool scan_aborting;
        u8 csa_chan;
        unsigned long csa_expire_time;
+       u8 del_list_idx;
 };
 
 enum mwifiex_ba_status {
@@ -748,6 +749,7 @@ struct mwifiex_adapter {
 
        atomic_t is_tx_received;
        atomic_t pending_bridged_pkts;
+       struct semaphore *card_sem;
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -900,6 +902,8 @@ int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
 u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv,
                                    u8 *rates);
 u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates);
+u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
+                                   u8 *rates, u8 radio_type);
 u8 mwifiex_is_rate_auto(struct mwifiex_private *priv);
 extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE];
 void mwifiex_save_curr_bcn(struct mwifiex_private *priv);
index 20c9c4c7b0b2eb64fa75bd4180fd85015d77b33e..52da8ee7599a041d7922180122c1744d19fe743e 100644 (file)
@@ -76,7 +76,7 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
        return false;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 /*
  * Kernel needs to suspend all functions separately. Therefore all
  * registered functions must have drivers with suspend and resume
@@ -85,11 +85,12 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
  * If already not suspended, this function allocates and sends a host
  * sleep activate request to the firmware and turns off the traffic.
  */
-static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+static int mwifiex_pcie_suspend(struct device *dev)
 {
        struct mwifiex_adapter *adapter;
        struct pcie_service_card *card;
        int hs_actived;
+       struct pci_dev *pdev = to_pci_dev(dev);
 
        if (pdev) {
                card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -120,10 +121,11 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
  * If already not resumed, this function turns on the traffic and
  * sends a host sleep cancel request to the firmware.
  */
-static int mwifiex_pcie_resume(struct pci_dev *pdev)
+static int mwifiex_pcie_resume(struct device *dev)
 {
        struct mwifiex_adapter *adapter;
        struct pcie_service_card *card;
+       struct pci_dev *pdev = to_pci_dev(dev);
 
        if (pdev) {
                card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -211,9 +213,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        wait_for_completion(&adapter->fw_load);
 
        if (user_rmmod) {
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
                if (adapter->is_suspended)
-                       mwifiex_pcie_resume(pdev);
+                       mwifiex_pcie_resume(&pdev->dev);
 #endif
 
                for (i = 0; i < adapter->priv_num; i++)
@@ -233,6 +235,14 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        kfree(card);
 }
 
+static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
+{
+       user_rmmod = 1;
+       mwifiex_pcie_remove(pdev);
+
+       return;
+}
+
 static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
        {
                PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
@@ -249,17 +259,24 @@ static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
 
 MODULE_DEVICE_TABLE(pci, mwifiex_ids);
 
+#ifdef CONFIG_PM_SLEEP
+/* Power Management Hooks */
+static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
+                               mwifiex_pcie_resume);
+#endif
+
 /* PCI Device Driver */
 static struct pci_driver __refdata mwifiex_pcie = {
        .name     = "mwifiex_pcie",
        .id_table = mwifiex_ids,
        .probe    = mwifiex_pcie_probe,
        .remove   = mwifiex_pcie_remove,
-#ifdef CONFIG_PM
-       /* Power Management Hooks */
-       .suspend  = mwifiex_pcie_suspend,
-       .resume   = mwifiex_pcie_resume,
+#ifdef CONFIG_PM_SLEEP
+       .driver   = {
+               .pm = &mwifiex_pcie_pm_ops,
+       },
 #endif
+       .shutdown = mwifiex_pcie_shutdown,
 };
 
 /*
@@ -1925,7 +1942,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
                        ret = 0;
                        break;
                } else {
-                       mdelay(100);
+                       msleep(100);
                        ret = -1;
                }
        }
@@ -1937,12 +1954,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
                else if (!winner_status) {
                        dev_err(adapter->dev, "PCI-E is the winner\n");
                        adapter->winner = 1;
-                       ret = -1;
                } else {
                        dev_err(adapter->dev,
                                "PCI-E is not the winner <%#x,%d>, exit dnld\n",
                                ret, adapter->winner);
-                       ret = 0;
                }
        }
 
index c447d9bd1aa93746f5ad68b7c9205e1e85e20810..8cf7d50a7603121682c7f9a9684a64a978de85c9 100644 (file)
@@ -543,6 +543,37 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
        return chan_idx;
 }
 
+/* This function appends rate TLV to scan config command. */
+static int
+mwifiex_append_rate_tlv(struct mwifiex_private *priv,
+                       struct mwifiex_scan_cmd_config *scan_cfg_out,
+                       u8 radio)
+{
+       struct mwifiex_ie_types_rates_param_set *rates_tlv;
+       u8 rates[MWIFIEX_SUPPORTED_RATES], *tlv_pos;
+       u32 rates_size;
+
+       memset(rates, 0, sizeof(rates));
+
+       tlv_pos = (u8 *)scan_cfg_out->tlv_buf + scan_cfg_out->tlv_buf_len;
+
+       if (priv->scan_request)
+               rates_size = mwifiex_get_rates_from_cfg80211(priv, rates,
+                                                            radio);
+       else
+               rates_size = mwifiex_get_supported_rates(priv, rates);
+
+       dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n",
+               rates_size);
+       rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos;
+       rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
+       rates_tlv->header.len = cpu_to_le16((u16) rates_size);
+       memcpy(rates_tlv->rates, rates, rates_size);
+       scan_cfg_out->tlv_buf_len += sizeof(rates_tlv->header) + rates_size;
+
+       return rates_size;
+}
+
 /*
  * This function constructs and sends multiple scan config commands to
  * the firmware.
@@ -564,9 +595,10 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
        struct mwifiex_chan_scan_param_set *tmp_chan_list;
        struct mwifiex_chan_scan_param_set *start_chan;
 
-       u32 tlv_idx;
+       u32 tlv_idx, rates_size;
        u32 total_scan_time;
        u32 done_early;
+       u8 radio_type;
 
        if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
                dev_dbg(priv->adapter->dev,
@@ -591,6 +623,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
 
                tlv_idx = 0;
                total_scan_time = 0;
+               radio_type = 0;
                chan_tlv_out->header.len = 0;
                start_chan = tmp_chan_list;
                done_early = false;
@@ -612,6 +645,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                                continue;
                        }
 
+                       radio_type = tmp_chan_list->radio_type;
                        dev_dbg(priv->adapter->dev,
                                "info: Scan: Chan(%3d), Radio(%d),"
                                " Mode(%d, %d), Dur(%d)\n",
@@ -692,6 +726,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                        break;
                }
 
+               rates_size = mwifiex_append_rate_tlv(priv, scan_cfg_out,
+                                                    radio_type);
+
                priv->adapter->scan_channels = start_chan;
 
                /* Send the scan command to the firmware with the specified
@@ -699,6 +736,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
                                             HostCmd_ACT_GEN_SET, 0,
                                             scan_cfg_out);
+
+               /* rate IE is updated per scan command but same starting
+                * pointer is used each time so that rate IE from earlier
+                * scan_cfg_out->buf is overwritten with new one.
+                */
+               scan_cfg_out->tlv_buf_len -=
+                           sizeof(struct mwifiex_ie_types_header) + rates_size;
+
                if (ret)
                        break;
        }
@@ -741,7 +786,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        struct mwifiex_adapter *adapter = priv->adapter;
        struct mwifiex_ie_types_num_probes *num_probes_tlv;
        struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
-       struct mwifiex_ie_types_rates_param_set *rates_tlv;
        u8 *tlv_pos;
        u32 num_probes;
        u32 ssid_len;
@@ -753,8 +797,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        u8 radio_type;
        int i;
        u8 ssid_filter;
-       u8 rates[MWIFIEX_SUPPORTED_RATES];
-       u32 rates_size;
        struct mwifiex_ie_types_htcap *ht_cap;
 
        /* The tlv_buf_len is calculated for each scan command.  The TLVs added
@@ -889,19 +931,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
 
        }
 
-       /* Append rates tlv */
-       memset(rates, 0, sizeof(rates));
-
-       rates_size = mwifiex_get_supported_rates(priv, rates);
-
-       rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos;
-       rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
-       rates_tlv->header.len = cpu_to_le16((u16) rates_size);
-       memcpy(rates_tlv->rates, rates, rates_size);
-       tlv_pos += sizeof(rates_tlv->header) + rates_size;
-
-       dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size);
-
        if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
            (priv->adapter->config_bands & BAND_GN ||
             priv->adapter->config_bands & BAND_AN)) {
index 09185c9632483b324d63953652ac6acc93a36752..0e2070f72fed37457c078390a7b263a8d88a6e7e 100644 (file)
@@ -50,9 +50,6 @@ static struct mwifiex_if_ops sdio_ops;
 
 static struct semaphore add_remove_card_sem;
 
-static int mwifiex_sdio_resume(struct device *dev);
-static void mwifiex_sdio_interrupt(struct sdio_func *func);
-
 /*
  * SDIO probe.
  *
@@ -112,6 +109,51 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
        return ret;
 }
 
+/*
+ * SDIO resume.
+ *
+ * Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not resumed, this function turns on the traffic and
+ * sends a host sleep cancel request to the firmware.
+ */
+static int mwifiex_sdio_resume(struct device *dev)
+{
+       struct sdio_func *func = dev_to_sdio_func(dev);
+       struct sdio_mmc_card *card;
+       struct mwifiex_adapter *adapter;
+       mmc_pm_flag_t pm_flag = 0;
+
+       if (func) {
+               pm_flag = sdio_get_host_pm_caps(func);
+               card = sdio_get_drvdata(func);
+               if (!card || !card->adapter) {
+                       pr_err("resume: invalid card or adapter\n");
+                       return 0;
+               }
+       } else {
+               pr_err("resume: sdio_func is not specified\n");
+               return 0;
+       }
+
+       adapter = card->adapter;
+
+       if (!adapter->is_suspended) {
+               dev_warn(adapter->dev, "device already resumed\n");
+               return 0;
+       }
+
+       adapter->is_suspended = false;
+
+       /* Disable Host Sleep */
+       mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
+                         MWIFIEX_ASYNC_CMD);
+
+       return 0;
+}
+
 /*
  * SDIO remove.
  *
@@ -212,51 +254,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
        return ret;
 }
 
-/*
- * SDIO resume.
- *
- * Kernel needs to suspend all functions separately. Therefore all
- * registered functions must have drivers with suspend and resume
- * methods. Failing that the kernel simply removes the whole card.
- *
- * If already not resumed, this function turns on the traffic and
- * sends a host sleep cancel request to the firmware.
- */
-static int mwifiex_sdio_resume(struct device *dev)
-{
-       struct sdio_func *func = dev_to_sdio_func(dev);
-       struct sdio_mmc_card *card;
-       struct mwifiex_adapter *adapter;
-       mmc_pm_flag_t pm_flag = 0;
-
-       if (func) {
-               pm_flag = sdio_get_host_pm_caps(func);
-               card = sdio_get_drvdata(func);
-               if (!card || !card->adapter) {
-                       pr_err("resume: invalid card or adapter\n");
-                       return 0;
-               }
-       } else {
-               pr_err("resume: sdio_func is not specified\n");
-               return 0;
-       }
-
-       adapter = card->adapter;
-
-       if (!adapter->is_suspended) {
-               dev_warn(adapter->dev, "device already resumed\n");
-               return 0;
-       }
-
-       adapter->is_suspended = false;
-
-       /* Disable Host Sleep */
-       mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
-                         MWIFIEX_ASYNC_CMD);
-
-       return 0;
-}
-
 /* Device ID for SD8786 */
 #define SDIO_DEVICE_ID_MARVELL_8786   (0x9116)
 /* Device ID for SD8787 */
@@ -706,6 +703,65 @@ static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
        sdio_release_host(func);
 }
 
+/*
+ * This function reads the interrupt status from card.
+ */
+static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       u8 sdio_ireg;
+       unsigned long flags;
+
+       if (mwifiex_read_data_sync(adapter, card->mp_regs,
+                                  card->reg->max_mp_regs,
+                                  REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
+               dev_err(adapter->dev, "read mp_regs failed\n");
+               return;
+       }
+
+       sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
+       if (sdio_ireg) {
+               /*
+                * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
+                * For SDIO new mode CMD port interrupts
+                *      DN_LD_CMD_PORT_HOST_INT_STATUS and/or
+                *      UP_LD_CMD_PORT_HOST_INT_STATUS
+                * Clear the interrupt status register
+                */
+               dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
+               spin_lock_irqsave(&adapter->int_lock, flags);
+               adapter->int_status |= sdio_ireg;
+               spin_unlock_irqrestore(&adapter->int_lock, flags);
+       }
+}
+
+/*
+ * SDIO interrupt handler.
+ *
+ * This function reads the interrupt status from firmware and handles
+ * the interrupt in current thread (ksdioirqd) right away.
+ */
+static void
+mwifiex_sdio_interrupt(struct sdio_func *func)
+{
+       struct mwifiex_adapter *adapter;
+       struct sdio_mmc_card *card;
+
+       card = sdio_get_drvdata(func);
+       if (!card || !card->adapter) {
+               pr_debug("int: func=%p card=%p adapter=%p\n",
+                        func, card, card ? card->adapter : NULL);
+               return;
+       }
+       adapter = card->adapter;
+
+       if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
+               adapter->ps_state = PS_STATE_AWAKE;
+
+       mwifiex_interrupt_status(adapter);
+       mwifiex_main_process(adapter);
+}
+
 /*
  * This function enables the host interrupt.
  *
@@ -944,7 +1000,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
                        ret = 0;
                        break;
                } else {
-                       mdelay(100);
+                       msleep(100);
                        ret = -1;
                }
        }
@@ -962,65 +1018,6 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
        return ret;
 }
 
-/*
- * This function reads the interrupt status from card.
- */
-static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
-{
-       struct sdio_mmc_card *card = adapter->card;
-       u8 sdio_ireg;
-       unsigned long flags;
-
-       if (mwifiex_read_data_sync(adapter, card->mp_regs,
-                                  card->reg->max_mp_regs,
-                                  REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
-               dev_err(adapter->dev, "read mp_regs failed\n");
-               return;
-       }
-
-       sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
-       if (sdio_ireg) {
-               /*
-                * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
-                * For SDIO new mode CMD port interrupts
-                *      DN_LD_CMD_PORT_HOST_INT_STATUS and/or
-                *      UP_LD_CMD_PORT_HOST_INT_STATUS
-                * Clear the interrupt status register
-                */
-               dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
-               spin_lock_irqsave(&adapter->int_lock, flags);
-               adapter->int_status |= sdio_ireg;
-               spin_unlock_irqrestore(&adapter->int_lock, flags);
-       }
-}
-
-/*
- * SDIO interrupt handler.
- *
- * This function reads the interrupt status from firmware and handles
- * the interrupt in current thread (ksdioirqd) right away.
- */
-static void
-mwifiex_sdio_interrupt(struct sdio_func *func)
-{
-       struct mwifiex_adapter *adapter;
-       struct sdio_mmc_card *card;
-
-       card = sdio_get_drvdata(func);
-       if (!card || !card->adapter) {
-               pr_debug("int: func=%p card=%p adapter=%p\n",
-                        func, card, card ? card->adapter : NULL);
-               return;
-       }
-       adapter = card->adapter;
-
-       if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
-               adapter->ps_state = PS_STATE_AWAKE;
-
-       mwifiex_interrupt_status(adapter);
-       mwifiex_main_process(adapter);
-}
-
 /*
  * This function decodes a received packet.
  *
index 8ece48580642b28e92ecf5d28629d8c2585dec05..c0268b5977480b384f04f452e3278a9443c93e85 100644 (file)
@@ -707,8 +707,9 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
                if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
                        tlv_mac = (void *)((u8 *)&key_material->key_param_set +
                                           key_param_len);
-                       tlv_mac->tlv.type = cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
-                       tlv_mac->tlv.len = cpu_to_le16(ETH_ALEN);
+                       tlv_mac->header.type =
+                                       cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
+                       tlv_mac->header.len = cpu_to_le16(ETH_ALEN);
                        memcpy(tlv_mac->mac_addr, enc_key->mac_addr, ETH_ALEN);
                        cmd_size = key_param_len + S_DS_GEN +
                                   sizeof(key_material->action) +
@@ -1069,7 +1070,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
        int i, byte_len;
        u8 *stack_ptr = *buffer;
 
-       for (i = 0; i < MWIFIEX_MAX_FILTERS; i++) {
+       for (i = 0; i < MWIFIEX_MEF_MAX_FILTERS; i++) {
                filter = &mef_entry->filter[i];
                if (!filter->filt_type)
                        break;
@@ -1078,7 +1079,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
                *stack_ptr = TYPE_DNUM;
                stack_ptr += 1;
 
-               byte_len = filter->byte_seq[MAX_BYTESEQ];
+               byte_len = filter->byte_seq[MWIFIEX_MEF_MAX_BYTESEQ];
                memcpy(stack_ptr, filter->byte_seq, byte_len);
                stack_ptr += byte_len;
                *stack_ptr = byte_len;
@@ -1183,6 +1184,70 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
        return 0;
 }
 
+static int
+mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
+                        struct host_cmd_ds_command *cmd,
+                        u16 cmd_action, void *data_buf)
+{
+       struct host_cmd_ds_coalesce_cfg *coalesce_cfg =
+                                               &cmd->params.coalesce_cfg;
+       struct mwifiex_ds_coalesce_cfg *cfg = data_buf;
+       struct coalesce_filt_field_param *param;
+       u16 cnt, idx, length;
+       struct coalesce_receive_filt_rule *rule;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_COALESCE_CFG);
+       cmd->size = cpu_to_le16(S_DS_GEN);
+
+       coalesce_cfg->action = cpu_to_le16(cmd_action);
+       coalesce_cfg->num_of_rules = cpu_to_le16(cfg->num_of_rules);
+       rule = coalesce_cfg->rule;
+
+       for (cnt = 0; cnt < cfg->num_of_rules; cnt++) {
+               rule->header.type = cpu_to_le16(TLV_TYPE_COALESCE_RULE);
+               rule->max_coalescing_delay =
+                       cpu_to_le16(cfg->rule[cnt].max_coalescing_delay);
+               rule->pkt_type = cfg->rule[cnt].pkt_type;
+               rule->num_of_fields = cfg->rule[cnt].num_of_fields;
+
+               length = 0;
+
+               param = rule->params;
+               for (idx = 0; idx < cfg->rule[cnt].num_of_fields; idx++) {
+                       param->operation = cfg->rule[cnt].params[idx].operation;
+                       param->operand_len =
+                                       cfg->rule[cnt].params[idx].operand_len;
+                       param->offset =
+                               cpu_to_le16(cfg->rule[cnt].params[idx].offset);
+                       memcpy(param->operand_byte_stream,
+                              cfg->rule[cnt].params[idx].operand_byte_stream,
+                              param->operand_len);
+
+                       length += sizeof(struct coalesce_filt_field_param);
+
+                       param++;
+               }
+
+               /* Total rule length is sizeof max_coalescing_delay(u16),
+                * num_of_fields(u8), pkt_type(u8) and total length of the all
+                * params
+                */
+               rule->header.len = cpu_to_le16(length + sizeof(u16) +
+                                              sizeof(u8) + sizeof(u8));
+
+               /* Add the rule length to the command size*/
+               le16_add_cpu(&cmd->size, le16_to_cpu(rule->header.len) +
+                            sizeof(struct mwifiex_ie_types_header));
+
+               rule = (void *)((u8 *)rule->params + length);
+       }
+
+       /* Add sizeof action, num_of_rules to total command length */
+       le16_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16));
+
+       return 0;
+}
+
 /*
  * This function prepares the commands before sending them to the firmware.
  *
@@ -1406,6 +1471,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_MEF_CFG:
                ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf);
                break;
+       case HostCmd_CMD_COALESCE_CFG:
+               ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
+                                              data_buf);
+               break;
        default:
                dev_err(priv->adapter->dev,
                        "PREP_CMD: unknown cmd- %#x\n", cmd_no);
index d85df158cc6c03cb9002ee300d9c2f774fcdd4fa..6a814eb2671a3a48a99d383cc725f2b69924ee26 100644 (file)
@@ -997,6 +997,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_MEF_CFG:
                break;
+       case HostCmd_CMD_COALESCE_CFG:
+               break;
        default:
                dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
                        resp->command);
index ea265ec0e522333837a13ab43a16d74ccaae7550..8b057524b252e535307644ddaa98e12df75a35cd 100644 (file)
@@ -201,6 +201,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_DEAUTHENTICATED:
                dev_dbg(adapter->dev, "event: Deauthenticated\n");
+               if (priv->wps.session_enable) {
+                       dev_dbg(adapter->dev,
+                               "info: receive deauth event in wps session\n");
+                       break;
+               }
                adapter->dbg.num_event_deauth++;
                if (priv->media_connected) {
                        reason_code =
@@ -211,6 +216,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_DISASSOCIATED:
                dev_dbg(adapter->dev, "event: Disassociated\n");
+               if (priv->wps.session_enable) {
+                       dev_dbg(adapter->dev,
+                               "info: receive disassoc event in wps session\n");
+                       break;
+               }
                adapter->dbg.num_event_disassoc++;
                if (priv->media_connected) {
                        reason_code =
index 8af97abf71088589b61cbc847fdc494a616de7c7..f084412eee0b7cdeced4f2b21b4140b01d1b141e 100644 (file)
@@ -797,15 +797,16 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
                               u8 *ie_data_ptr, u16 ie_len)
 {
        if (ie_len) {
-               priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
-               if (!priv->wps_ie)
-                       return -ENOMEM;
-               if (ie_len > sizeof(priv->wps_ie)) {
+               if (ie_len > MWIFIEX_MAX_VSIE_LEN) {
                        dev_dbg(priv->adapter->dev,
                                "info: failed to copy WPS IE, too big\n");
-                       kfree(priv->wps_ie);
                        return -1;
                }
+
+               priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
+               if (!priv->wps_ie)
+                       return -ENOMEM;
+
                memcpy(priv->wps_ie, ie_data_ptr, ie_len);
                priv->wps_ie_len = ie_len;
                dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
index 2de882dead0f879dee34831e0c61adc470d9dd70..64424c81b44f5bf55601e75a8a4d47e51b6bc9b9 100644 (file)
@@ -293,9 +293,9 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
        u8 *tlv = *tlv_buf;
 
        tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
-       tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
-       tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
-                                       sizeof(struct host_cmd_tlv));
+       tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
+       tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
+                                       sizeof(struct mwifiex_ie_types_header));
        tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
        tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
        cmd_size += sizeof(struct host_cmd_tlv_akmp);
@@ -303,10 +303,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
 
        if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
                pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
-               pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
-               pwk_cipher->tlv.len =
+               pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+               pwk_cipher->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
                pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
                cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
@@ -315,10 +315,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
 
        if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
                pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
-               pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
-               pwk_cipher->tlv.len =
+               pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+               pwk_cipher->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
                pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
                cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
@@ -327,10 +327,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
 
        if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
                gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
-               gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
-               gwk_cipher->tlv.len =
+               gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
+               gwk_cipher->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
                cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
                tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
@@ -338,13 +338,15 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
 
        if (bss_cfg->wpa_cfg.length) {
                passphrase = (struct host_cmd_tlv_passphrase *)tlv;
-               passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
-               passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
+               passphrase->header.type =
+                               cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
+               passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
                memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
                       bss_cfg->wpa_cfg.length);
-               cmd_size += sizeof(struct host_cmd_tlv) +
+               cmd_size += sizeof(struct mwifiex_ie_types_header) +
                            bss_cfg->wpa_cfg.length;
-               tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length;
+               tlv += sizeof(struct mwifiex_ie_types_header) +
+                               bss_cfg->wpa_cfg.length;
        }
 
        *param_size = cmd_size;
@@ -403,16 +405,17 @@ mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
                    (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
                     bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
                        wep_key = (struct host_cmd_tlv_wep_key *)tlv;
-                       wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
-                       wep_key->tlv.len =
+                       wep_key->header.type =
+                               cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
+                       wep_key->header.len =
                                cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
                        wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
                        wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
                        memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
                               bss_cfg->wep_cfg[i].length);
-                       cmd_size += sizeof(struct host_cmd_tlv) + 2 +
+                       cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
                                    bss_cfg->wep_cfg[i].length;
-                       tlv += sizeof(struct host_cmd_tlv) + 2 +
+                       tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
                                    bss_cfg->wep_cfg[i].length;
                }
        }
@@ -449,16 +452,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
 
        if (bss_cfg->ssid.ssid_len) {
                ssid = (struct host_cmd_tlv_ssid *)tlv;
-               ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
-               ssid->tlv.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
+               ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
+               ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
                memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
-               cmd_size += sizeof(struct host_cmd_tlv) +
+               cmd_size += sizeof(struct mwifiex_ie_types_header) +
                            bss_cfg->ssid.ssid_len;
-               tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+               tlv += sizeof(struct mwifiex_ie_types_header) +
+                               bss_cfg->ssid.ssid_len;
 
                bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
-               bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
-               bcast_ssid->tlv.len =
+               bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+               bcast_ssid->header.len =
                                cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
                bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
                cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
@@ -466,13 +470,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        }
        if (bss_cfg->rates[0]) {
                tlv_rates = (struct host_cmd_tlv_rates *)tlv;
-               tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
+               tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
 
                for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
                     i++)
                        tlv_rates->rates[i] = bss_cfg->rates[i];
 
-               tlv_rates->tlv.len = cpu_to_le16(i);
+               tlv_rates->header.len = cpu_to_le16(i);
                cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
                tlv += sizeof(struct host_cmd_tlv_rates) + i;
        }
@@ -482,10 +486,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
            (bss_cfg->band_cfg == BAND_CONFIG_A &&
             bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
                chan_band = (struct host_cmd_tlv_channel_band *)tlv;
-               chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
-               chan_band->tlv.len =
+               chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
+               chan_band->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                chan_band->band_config = bss_cfg->band_cfg;
                chan_band->channel = bss_cfg->channel;
                cmd_size += sizeof(struct host_cmd_tlv_channel_band);
@@ -494,11 +498,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
            bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
                beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
-               beacon_period->tlv.type =
+               beacon_period->header.type =
                                        cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
-               beacon_period->tlv.len =
+               beacon_period->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
                cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
                tlv += sizeof(struct host_cmd_tlv_beacon_period);
@@ -506,21 +510,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
            bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
                dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
-               dtim_period->tlv.type = cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
-               dtim_period->tlv.len =
+               dtim_period->header.type =
+                       cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
+               dtim_period->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                dtim_period->period = bss_cfg->dtim_period;
                cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
                tlv += sizeof(struct host_cmd_tlv_dtim_period);
        }
        if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
                rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
-               rts_threshold->tlv.type =
+               rts_threshold->header.type =
                                        cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
-               rts_threshold->tlv.len =
+               rts_threshold->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
                cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
                tlv += sizeof(struct host_cmd_tlv_frag_threshold);
@@ -528,21 +533,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
            (bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
                frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
-               frag_threshold->tlv.type =
+               frag_threshold->header.type =
                                cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
-               frag_threshold->tlv.len =
+               frag_threshold->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
                cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
                tlv += sizeof(struct host_cmd_tlv_frag_threshold);
        }
        if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
                retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
-               retry_limit->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
-               retry_limit->tlv.len =
+               retry_limit->header.type =
+                       cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
+               retry_limit->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
-                                   sizeof(struct host_cmd_tlv));
+                                   sizeof(struct mwifiex_ie_types_header));
                retry_limit->limit = (u8)bss_cfg->retry_limit;
                cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
                tlv += sizeof(struct host_cmd_tlv_retry_limit);
@@ -557,21 +563,21 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
        if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
            (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
                auth_type = (struct host_cmd_tlv_auth_type *)tlv;
-               auth_type->tlv.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
-               auth_type->tlv.len =
+               auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
+               auth_type->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
-                       sizeof(struct host_cmd_tlv));
+                       sizeof(struct mwifiex_ie_types_header));
                auth_type->auth_type = (u8)bss_cfg->auth_mode;
                cmd_size += sizeof(struct host_cmd_tlv_auth_type);
                tlv += sizeof(struct host_cmd_tlv_auth_type);
        }
        if (bss_cfg->protocol) {
                encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
-               encrypt_protocol->tlv.type =
+               encrypt_protocol->header.type =
                        cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
-               encrypt_protocol->tlv.len =
+               encrypt_protocol->header.len =
                        cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
-                       - sizeof(struct host_cmd_tlv));
+                       - sizeof(struct mwifiex_ie_types_header));
                encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
                cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
                tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
@@ -608,9 +614,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
 
        if (bss_cfg->sta_ao_timer) {
                ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
-               ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
-               ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) -
-                                               sizeof(struct host_cmd_tlv));
+               ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
+               ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) -
+                                       sizeof(struct mwifiex_ie_types_header));
                ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
                cmd_size += sizeof(*ao_timer);
                tlv += sizeof(*ao_timer);
@@ -618,9 +624,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
 
        if (bss_cfg->ps_sta_ao_timer) {
                ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
-               ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
-               ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) -
-                                                  sizeof(struct host_cmd_tlv));
+               ps_ao_timer->header.type =
+                               cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
+               ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) -
+                               sizeof(struct mwifiex_ie_types_header));
                ps_ao_timer->sta_ao_timer =
                                        cpu_to_le32(bss_cfg->ps_sta_ao_timer);
                cmd_size += sizeof(*ps_ao_timer);
@@ -636,16 +643,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
 static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
 {
        struct mwifiex_ie_list *ap_ie = cmd_buf;
-       struct host_cmd_tlv *tlv_ie = (struct host_cmd_tlv *)tlv;
+       struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
 
        if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
                return -1;
 
-       *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct host_cmd_tlv);
+       *ie_size += le16_to_cpu(ap_ie->len) +
+                       sizeof(struct mwifiex_ie_types_header);
 
        tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
        tlv_ie->len = ap_ie->len;
-       tlv += sizeof(struct host_cmd_tlv);
+       tlv += sizeof(struct mwifiex_ie_types_header);
 
        memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
 
index a018e42d117eb49b9b89716e70910f299de2ef68..1cfe5a738c479e53e3dd5ba21f460096d2a16e53 100644 (file)
 #include "11n_aggr.h"
 #include "11n_rxreorder.h"
 
+/* This function checks if particular RA list has packets more than low bridge
+ * packet threshold and then deletes packet from this RA list.
+ * Function deletes packets from such RA list and returns true. If no such list
+ * is found, false is returned.
+ */
+static bool
+mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
+                                 struct list_head *ra_list_head)
+{
+       struct mwifiex_ra_list_tbl *ra_list;
+       struct sk_buff *skb, *tmp;
+       bool pkt_deleted = false;
+       struct mwifiex_txinfo *tx_info;
+       struct mwifiex_adapter *adapter = priv->adapter;
+
+       list_for_each_entry(ra_list, ra_list_head, list) {
+               if (skb_queue_empty(&ra_list->skb_head))
+                       continue;
+
+               skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
+                       tx_info = MWIFIEX_SKB_TXCB(skb);
+                       if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
+                               __skb_unlink(skb, &ra_list->skb_head);
+                               mwifiex_write_data_complete(adapter, skb, 0,
+                                                           -1);
+                               atomic_dec(&priv->wmm.tx_pkts_queued);
+                               pkt_deleted = true;
+                       }
+                       if ((atomic_read(&adapter->pending_bridged_pkts) <=
+                                            MWIFIEX_BRIDGED_PKTS_THR_LOW))
+                               break;
+               }
+       }
+
+       return pkt_deleted;
+}
+
+/* This function deletes packets from particular RA List. RA list index
+ * from which packets are deleted is preserved so that packets from next RA
+ * list are deleted upon subsequent call thus maintaining fairness.
+ */
+static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
+{
+       unsigned long flags;
+       struct list_head *ra_list;
+       int i;
+
+       spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+       for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
+               if (priv->del_list_idx == MAX_NUM_TID)
+                       priv->del_list_idx = 0;
+               ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list;
+               if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list)) {
+                       priv->del_list_idx++;
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+
 static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
                                         struct sk_buff *skb)
 {
@@ -40,10 +103,11 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
        rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
 
        if ((atomic_read(&adapter->pending_bridged_pkts) >=
-                                            MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
+                                            MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
                dev_err(priv->adapter->dev,
                        "Tx: Bridge packet limit reached. Drop packet!\n");
                kfree_skb(skb);
+               mwifiex_uap_cleanup_tx_queues(priv);
                return;
        }
 
@@ -95,10 +159,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
        atomic_inc(&adapter->tx_pending);
        atomic_inc(&adapter->pending_bridged_pkts);
 
-       if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
-               mwifiex_set_trans_start(priv->netdev);
-               mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
-       }
        return;
 }
 
index f90fe21e5bfda65d5bc2a0d6fc6638ac8765e72e..fca98b5d7de49d53ce21bd4cb68bdfcef8352bda 100644 (file)
@@ -786,6 +786,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        return 0;
 }
 
+static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
+{
+       struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+
+       usb_set_intfdata(card->intf, NULL);
+}
+
 static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                                    struct mwifiex_fw_image *fw)
 {
@@ -978,6 +985,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
 
 static struct mwifiex_if_ops usb_ops = {
        .register_dev =         mwifiex_register_dev,
+       .unregister_dev =       mwifiex_unregister_dev,
        .wakeup =               mwifiex_pm_wakeup_card,
        .wakeup_complete =      mwifiex_pm_wakeup_card_complete,
 
index 944e8846f6fc757e8f937f0ece42e243279e9a29..2e8f9cdea54d719cf0f25b4b29d816eb0709ff7a 100644 (file)
@@ -120,7 +120,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
 
        memcpy(ra_list->ra, ra, ETH_ALEN);
 
-       ra_list->total_pkts_size = 0;
+       ra_list->total_pkt_count = 0;
 
        dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
 
@@ -188,7 +188,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
                        ra_list, ra_list->is_11n_enabled);
 
                if (ra_list->is_11n_enabled) {
-                       ra_list->pkt_count = 0;
+                       ra_list->ba_pkt_count = 0;
                        ra_list->ba_packet_thr =
                                              mwifiex_get_random_ba_threshold();
                }
@@ -679,8 +679,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
 
        skb_queue_tail(&ra_list->skb_head, skb);
 
-       ra_list->total_pkts_size += skb->len;
-       ra_list->pkt_count++;
+       ra_list->ba_pkt_count++;
+       ra_list->total_pkt_count++;
 
        if (atomic_read(&priv->wmm.highest_queued_prio) <
                                                tos_to_tid_inv[tid_down])
@@ -1037,7 +1037,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
        tx_info = MWIFIEX_SKB_TXCB(skb);
        dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
 
-       ptr->total_pkts_size -= skb->len;
+       ptr->total_pkt_count--;
 
        if (!skb_queue_empty(&ptr->skb_head))
                skb_next = skb_peek(&ptr->skb_head);
@@ -1062,8 +1062,8 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
 
                skb_queue_tail(&ptr->skb_head, skb);
 
-               ptr->total_pkts_size += skb->len;
-               ptr->pkt_count++;
+               ptr->total_pkt_count++;
+               ptr->ba_pkt_count++;
                tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
@@ -1224,7 +1224,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                   mwifiex_send_single_packet() */
        } else {
                if (mwifiex_is_ampdu_allowed(priv, tid) &&
-                   ptr->pkt_count > ptr->ba_packet_thr) {
+                   ptr->ba_pkt_count > ptr->ba_packet_thr) {
                        if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
                                mwifiex_create_ba_tbl(priv, ptr->ra, tid,
                                                      BA_SETUP_INPROGRESS);
index 3e60a31582f8b3b15f359a5325a8031de96f4427..68dbbb9c6d1259eb3d83d7c1458126f6fc69a2f2 100644 (file)
@@ -166,6 +166,12 @@ config RT2800USB_RT35XX
          rt2800usb driver.
          Supported chips: RT3572
 
+config RT2800USB_RT3573
+       bool "rt2800usb - Include support for rt3573 devices (EXPERIMENTAL)"
+       ---help---
+         This enables support for RT3573 chipset based wireless USB devices
+         in the rt2800usb driver.
+
 config RT2800USB_RT53XX
        bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
        ---help---
index d78c495a86a0b2eed4f3910d1f911617d6e50877..a3132414ac9f4602454b1294d01c4f34dd01dbc3 100644 (file)
@@ -88,6 +88,7 @@
 #define REV_RT3071E                    0x0211
 #define REV_RT3090E                    0x0211
 #define REV_RT3390E                    0x0211
+#define REV_RT3593E                    0x0211
 #define REV_RT5390F                    0x0502
 #define REV_RT5390R                    0x1502
 #define REV_RT5592C                    0x0221
 #define TX_PWR_CFG_0_9MBS              FIELD32(0x00f00000)
 #define TX_PWR_CFG_0_12MBS             FIELD32(0x0f000000)
 #define TX_PWR_CFG_0_18MBS             FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_0_CCK1_CH0          FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_CCK1_CH1          FIELD32(0x000000f0)
+#define TX_PWR_CFG_0_CCK5_CH0          FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_CCK5_CH1          FIELD32(0x0000f000)
+#define TX_PWR_CFG_0_OFDM6_CH0         FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_OFDM6_CH1         FIELD32(0x00f00000)
+#define TX_PWR_CFG_0_OFDM12_CH0                FIELD32(0x0f000000)
+#define TX_PWR_CFG_0_OFDM12_CH1                FIELD32(0xf0000000)
 
 /*
  * TX_PWR_CFG_1:
 #define TX_PWR_CFG_1_MCS1              FIELD32(0x00f00000)
 #define TX_PWR_CFG_1_MCS2              FIELD32(0x0f000000)
 #define TX_PWR_CFG_1_MCS3              FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_1_OFDM24_CH0                FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_OFDM24_CH1                FIELD32(0x000000f0)
+#define TX_PWR_CFG_1_OFDM48_CH0                FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_OFDM48_CH1                FIELD32(0x0000f000)
+#define TX_PWR_CFG_1_MCS0_CH0          FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_MCS0_CH1          FIELD32(0x00f00000)
+#define TX_PWR_CFG_1_MCS2_CH0          FIELD32(0x0f000000)
+#define TX_PWR_CFG_1_MCS2_CH1          FIELD32(0xf0000000)
 
 /*
  * TX_PWR_CFG_2:
 #define TX_PWR_CFG_2_MCS9              FIELD32(0x00f00000)
 #define TX_PWR_CFG_2_MCS10             FIELD32(0x0f000000)
 #define TX_PWR_CFG_2_MCS11             FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_2_MCS4_CH0          FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_MCS4_CH1          FIELD32(0x000000f0)
+#define TX_PWR_CFG_2_MCS6_CH0          FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_MCS6_CH1          FIELD32(0x0000f000)
+#define TX_PWR_CFG_2_MCS8_CH0          FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_MCS8_CH1          FIELD32(0x00f00000)
+#define TX_PWR_CFG_2_MCS10_CH0         FIELD32(0x0f000000)
+#define TX_PWR_CFG_2_MCS10_CH1         FIELD32(0xf0000000)
 
 /*
  * TX_PWR_CFG_3:
 #define TX_PWR_CFG_3_UKNOWN2           FIELD32(0x00f00000)
 #define TX_PWR_CFG_3_UKNOWN3           FIELD32(0x0f000000)
 #define TX_PWR_CFG_3_UKNOWN4           FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_3_MCS12_CH0         FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_MCS12_CH1         FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_MCS14_CH0         FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_MCS14_CH1         FIELD32(0x0000f000)
+#define TX_PWR_CFG_3_STBC0_CH0         FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_STBC0_CH1         FIELD32(0x00f00000)
+#define TX_PWR_CFG_3_STBC2_CH0         FIELD32(0x0f000000)
+#define TX_PWR_CFG_3_STBC2_CH1         FIELD32(0xf0000000)
 
 /*
  * TX_PWR_CFG_4:
 #define TX_PWR_CFG_4_UKNOWN6           FIELD32(0x000000f0)
 #define TX_PWR_CFG_4_UKNOWN7           FIELD32(0x00000f00)
 #define TX_PWR_CFG_4_UKNOWN8           FIELD32(0x0000f000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_3_STBC4_CH0         FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_STBC4_CH1         FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_STBC6_CH0         FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_STBC6_CH1         FIELD32(0x0000f000)
 
 /*
  * TX_PIN_CFG:
  */
 #define EXP_ACK_TIME                   0x1380
 
+/* TX_PWR_CFG_5 */
+#define TX_PWR_CFG_5                   0x1384
+#define TX_PWR_CFG_5_MCS16_CH0         FIELD32(0x0000000f)
+#define TX_PWR_CFG_5_MCS16_CH1         FIELD32(0x000000f0)
+#define TX_PWR_CFG_5_MCS16_CH2         FIELD32(0x00000f00)
+#define TX_PWR_CFG_5_MCS18_CH0         FIELD32(0x000f0000)
+#define TX_PWR_CFG_5_MCS18_CH1         FIELD32(0x00f00000)
+#define TX_PWR_CFG_5_MCS18_CH2         FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_6 */
+#define TX_PWR_CFG_6                   0x1388
+#define TX_PWR_CFG_6_MCS20_CH0         FIELD32(0x0000000f)
+#define TX_PWR_CFG_6_MCS20_CH1         FIELD32(0x000000f0)
+#define TX_PWR_CFG_6_MCS20_CH2         FIELD32(0x00000f00)
+#define TX_PWR_CFG_6_MCS22_CH0         FIELD32(0x000f0000)
+#define TX_PWR_CFG_6_MCS22_CH1         FIELD32(0x00f00000)
+#define TX_PWR_CFG_6_MCS22_CH2         FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_0_EXT */
+#define TX_PWR_CFG_0_EXT               0x1390
+#define TX_PWR_CFG_0_EXT_CCK1_CH2      FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_EXT_CCK5_CH2      FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_EXT_OFDM6_CH2     FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_EXT_OFDM12_CH2    FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_1_EXT */
+#define TX_PWR_CFG_1_EXT               0x1394
+#define TX_PWR_CFG_1_EXT_OFDM24_CH2    FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_EXT_OFDM48_CH2    FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_EXT_MCS0_CH2      FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_EXT_MCS2_CH2      FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_2_EXT */
+#define TX_PWR_CFG_2_EXT               0x1398
+#define TX_PWR_CFG_2_EXT_MCS4_CH2      FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_EXT_MCS6_CH2      FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_EXT_MCS8_CH2      FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_EXT_MCS10_CH2     FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_3_EXT */
+#define TX_PWR_CFG_3_EXT               0x139c
+#define TX_PWR_CFG_3_EXT_MCS12_CH2     FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_EXT_MCS14_CH2     FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_EXT_STBC0_CH2     FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_EXT_STBC2_CH2     FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_4_EXT */
+#define TX_PWR_CFG_4_EXT               0x13a0
+#define TX_PWR_CFG_4_EXT_STBC4_CH2     FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_EXT_STBC6_CH2     FIELD32(0x00000f00)
+
+/* TX_PWR_CFG_7 */
+#define TX_PWR_CFG_7                   0x13d4
+#define TX_PWR_CFG_7_OFDM54_CH0                FIELD32(0x0000000f)
+#define TX_PWR_CFG_7_OFDM54_CH1                FIELD32(0x000000f0)
+#define TX_PWR_CFG_7_OFDM54_CH2                FIELD32(0x00000f00)
+#define TX_PWR_CFG_7_MCS7_CH0          FIELD32(0x000f0000)
+#define TX_PWR_CFG_7_MCS7_CH1          FIELD32(0x00f00000)
+#define TX_PWR_CFG_7_MCS7_CH2          FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_8 */
+#define TX_PWR_CFG_8                   0x13d8
+#define TX_PWR_CFG_8_MCS15_CH0         FIELD32(0x0000000f)
+#define TX_PWR_CFG_8_MCS15_CH1         FIELD32(0x000000f0)
+#define TX_PWR_CFG_8_MCS15_CH2         FIELD32(0x00000f00)
+#define TX_PWR_CFG_8_MCS23_CH0         FIELD32(0x000f0000)
+#define TX_PWR_CFG_8_MCS23_CH1         FIELD32(0x00f00000)
+#define TX_PWR_CFG_8_MCS23_CH2         FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_9 */
+#define TX_PWR_CFG_9                   0x13dc
+#define TX_PWR_CFG_9_STBC7_CH0         FIELD32(0x0000000f)
+#define TX_PWR_CFG_9_STBC7_CH1         FIELD32(0x000000f0)
+#define TX_PWR_CFG_9_STBC7_CH2         FIELD32(0x00000f00)
+
 /*
  * RX_FILTER_CFG: RX configuration register.
  */
@@ -1975,6 +2092,10 @@ struct mac_iveiv_entry {
 #define BBP109_TX0_POWER               FIELD8(0x0f)
 #define BBP109_TX1_POWER               FIELD8(0xf0)
 
+/* BBP 110 */
+#define BBP110_TX2_POWER               FIELD8(0x0f)
+
+
 /*
  * BBP 138: Unknown
  */
@@ -2024,6 +2145,12 @@ struct mac_iveiv_entry {
 #define RFCSR3_PA2_CASCODE_BIAS_CCKK   FIELD8(0x80)
 /* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
 #define RFCSR3_VCOCAL_EN               FIELD8(0x80)
+/* Bits for RF3050 */
+#define RFCSR3_BIT1                    FIELD8(0x02)
+#define RFCSR3_BIT2                    FIELD8(0x04)
+#define RFCSR3_BIT3                    FIELD8(0x08)
+#define RFCSR3_BIT4                    FIELD8(0x10)
+#define RFCSR3_BIT5                    FIELD8(0x20)
 
 /*
  * FRCSR 5:
@@ -2036,6 +2163,8 @@ struct mac_iveiv_entry {
 #define RFCSR6_R1                      FIELD8(0x03)
 #define RFCSR6_R2                      FIELD8(0x40)
 #define RFCSR6_TXDIV           FIELD8(0x0c)
+/* bits for RF3053 */
+#define RFCSR6_VCO_IC                  FIELD8(0xc0)
 
 /*
  * RFCSR 7:
@@ -2060,7 +2189,12 @@ struct mac_iveiv_entry {
  * RFCSR 11:
  */
 #define RFCSR11_R                      FIELD8(0x03)
+#define RFCSR11_PLL_MOD                        FIELD8(0x0c)
 #define RFCSR11_MOD                    FIELD8(0xc0)
+/* bits for RF3053 */
+/* TODO: verify RFCSR11_MOD usage on other chips */
+#define RFCSR11_PLL_IDOH               FIELD8(0x40)
+
 
 /*
  * RFCSR 12:
@@ -2092,6 +2226,10 @@ struct mac_iveiv_entry {
 #define RFCSR17_R                      FIELD8(0x20)
 #define RFCSR17_CODE                   FIELD8(0x7f)
 
+/* RFCSR 18 */
+#define RFCSR18_XO_TUNE_BYPASS         FIELD8(0x40)
+
+
 /*
  * RFCSR 20:
  */
@@ -2152,6 +2290,12 @@ struct mac_iveiv_entry {
 #define RFCSR31_RX_H20M                        FIELD8(0x20)
 #define RFCSR31_RX_CALIB               FIELD8(0x7f)
 
+/* RFCSR 32 bits for RF3053 */
+#define RFCSR32_TX_AGC_FC              FIELD8(0xf8)
+
+/* RFCSR 36 bits for RF3053 */
+#define RFCSR36_RF_BS                  FIELD8(0x80)
+
 /*
  * RFCSR 38:
  */
@@ -2160,6 +2304,7 @@ struct mac_iveiv_entry {
 /*
  * RFCSR 39:
  */
+#define RFCSR39_RX_DIV                 FIELD8(0x40)
 #define RFCSR39_RX_LO2_EN              FIELD8(0x80)
 
 /*
@@ -2167,12 +2312,36 @@ struct mac_iveiv_entry {
  */
 #define RFCSR49_TX                     FIELD8(0x3f)
 #define RFCSR49_EP                     FIELD8(0xc0)
+/* bits for RT3593 */
+#define RFCSR49_TX_LO1_IC              FIELD8(0x1c)
+#define RFCSR49_TX_DIV                 FIELD8(0x20)
 
 /*
  * RFCSR 50:
  */
 #define RFCSR50_TX                     FIELD8(0x3f)
 #define RFCSR50_EP                     FIELD8(0xc0)
+/* bits for RT3593 */
+#define RFCSR50_TX_LO1_EN              FIELD8(0x20)
+#define RFCSR50_TX_LO2_EN              FIELD8(0x10)
+
+/* RFCSR 51 */
+/* bits for RT3593 */
+#define RFCSR51_BITS01                 FIELD8(0x03)
+#define RFCSR51_BITS24                 FIELD8(0x1c)
+#define RFCSR51_BITS57                 FIELD8(0xe0)
+
+#define RFCSR53_TX_POWER               FIELD8(0x3f)
+#define RFCSR53_UNKNOWN                        FIELD8(0xc0)
+
+#define RFCSR54_TX_POWER               FIELD8(0x3f)
+#define RFCSR54_UNKNOWN                        FIELD8(0xc0)
+
+#define RFCSR55_TX_POWER               FIELD8(0x3f)
+#define RFCSR55_UNKNOWN                        FIELD8(0xc0)
+
+#define RFCSR57_DRV_CC                 FIELD8(0xfc)
+
 
 /*
  * RF registers
@@ -2206,28 +2375,67 @@ struct mac_iveiv_entry {
  * The wordsize of the EEPROM is 16 bits.
  */
 
-/*
- * Chip ID
- */
-#define EEPROM_CHIP_ID                 0x0000
+enum rt2800_eeprom_word {
+       EEPROM_CHIP_ID = 0,
+       EEPROM_VERSION,
+       EEPROM_MAC_ADDR_0,
+       EEPROM_MAC_ADDR_1,
+       EEPROM_MAC_ADDR_2,
+       EEPROM_NIC_CONF0,
+       EEPROM_NIC_CONF1,
+       EEPROM_FREQ,
+       EEPROM_LED_AG_CONF,
+       EEPROM_LED_ACT_CONF,
+       EEPROM_LED_POLARITY,
+       EEPROM_NIC_CONF2,
+       EEPROM_LNA,
+       EEPROM_RSSI_BG,
+       EEPROM_RSSI_BG2,
+       EEPROM_TXMIXER_GAIN_BG,
+       EEPROM_RSSI_A,
+       EEPROM_RSSI_A2,
+       EEPROM_TXMIXER_GAIN_A,
+       EEPROM_EIRP_MAX_TX_POWER,
+       EEPROM_TXPOWER_DELTA,
+       EEPROM_TXPOWER_BG1,
+       EEPROM_TXPOWER_BG2,
+       EEPROM_TSSI_BOUND_BG1,
+       EEPROM_TSSI_BOUND_BG2,
+       EEPROM_TSSI_BOUND_BG3,
+       EEPROM_TSSI_BOUND_BG4,
+       EEPROM_TSSI_BOUND_BG5,
+       EEPROM_TXPOWER_A1,
+       EEPROM_TXPOWER_A2,
+       EEPROM_TSSI_BOUND_A1,
+       EEPROM_TSSI_BOUND_A2,
+       EEPROM_TSSI_BOUND_A3,
+       EEPROM_TSSI_BOUND_A4,
+       EEPROM_TSSI_BOUND_A5,
+       EEPROM_TXPOWER_BYRATE,
+       EEPROM_BBP_START,
+
+       /* IDs for extended EEPROM format used by three-chain devices */
+       EEPROM_EXT_LNA2,
+       EEPROM_EXT_TXPOWER_BG3,
+       EEPROM_EXT_TXPOWER_A3,
+
+       /* New values must be added before this */
+       EEPROM_WORD_COUNT
+};
 
 /*
  * EEPROM Version
  */
-#define EEPROM_VERSION                 0x0001
 #define EEPROM_VERSION_FAE             FIELD16(0x00ff)
 #define EEPROM_VERSION_VERSION         FIELD16(0xff00)
 
 /*
  * HW MAC address.
  */
-#define EEPROM_MAC_ADDR_0              0x0002
 #define EEPROM_MAC_ADDR_BYTE0          FIELD16(0x00ff)
 #define EEPROM_MAC_ADDR_BYTE1          FIELD16(0xff00)
-#define EEPROM_MAC_ADDR_1              0x0003
 #define EEPROM_MAC_ADDR_BYTE2          FIELD16(0x00ff)
 #define EEPROM_MAC_ADDR_BYTE3          FIELD16(0xff00)
-#define EEPROM_MAC_ADDR_2              0x0004
 #define EEPROM_MAC_ADDR_BYTE4          FIELD16(0x00ff)
 #define EEPROM_MAC_ADDR_BYTE5          FIELD16(0xff00)
 
@@ -2237,7 +2445,6 @@ struct mac_iveiv_entry {
  * TXPATH: 1: 1T, 2: 2T, 3: 3T
  * RF_TYPE: RFIC type
  */
-#define        EEPROM_NIC_CONF0                0x001a
 #define EEPROM_NIC_CONF0_RXPATH                FIELD16(0x000f)
 #define EEPROM_NIC_CONF0_TXPATH                FIELD16(0x00f0)
 #define EEPROM_NIC_CONF0_RF_TYPE               FIELD16(0x0f00)
@@ -2261,7 +2468,6 @@ struct mac_iveiv_entry {
  * BT_COEXIST: 0: disable, 1: enable
  * DAC_TEST: 0: disable, 1: enable
  */
-#define        EEPROM_NIC_CONF1                0x001b
 #define EEPROM_NIC_CONF1_HW_RADIO              FIELD16(0x0001)
 #define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC               FIELD16(0x0002)
 #define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G               FIELD16(0x0004)
@@ -2281,7 +2487,6 @@ struct mac_iveiv_entry {
 /*
  * EEPROM frequency
  */
-#define        EEPROM_FREQ                     0x001d
 #define EEPROM_FREQ_OFFSET             FIELD16(0x00ff)
 #define EEPROM_FREQ_LED_MODE           FIELD16(0x7f00)
 #define EEPROM_FREQ_LED_POLARITY       FIELD16(0x1000)
@@ -2298,9 +2503,6 @@ struct mac_iveiv_entry {
  * POLARITY_GPIO_4: Polarity GPIO4 setting.
  * LED_MODE: Led mode.
  */
-#define EEPROM_LED_AG_CONF             0x001e
-#define EEPROM_LED_ACT_CONF            0x001f
-#define EEPROM_LED_POLARITY            0x0020
 #define EEPROM_LED_POLARITY_RDY_BG     FIELD16(0x0001)
 #define EEPROM_LED_POLARITY_RDY_A      FIELD16(0x0002)
 #define EEPROM_LED_POLARITY_ACT                FIELD16(0x0004)
@@ -2317,7 +2519,6 @@ struct mac_iveiv_entry {
  * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
  * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
  */
-#define EEPROM_NIC_CONF2               0x0021
 #define EEPROM_NIC_CONF2_RX_STREAM             FIELD16(0x000f)
 #define EEPROM_NIC_CONF2_TX_STREAM             FIELD16(0x00f0)
 #define EEPROM_NIC_CONF2_CRYSTAL               FIELD16(0x0600)
@@ -2325,54 +2526,46 @@ struct mac_iveiv_entry {
 /*
  * EEPROM LNA
  */
-#define EEPROM_LNA                     0x0022
 #define EEPROM_LNA_BG                  FIELD16(0x00ff)
 #define EEPROM_LNA_A0                  FIELD16(0xff00)
 
 /*
  * EEPROM RSSI BG offset
  */
-#define EEPROM_RSSI_BG                 0x0023
 #define EEPROM_RSSI_BG_OFFSET0         FIELD16(0x00ff)
 #define EEPROM_RSSI_BG_OFFSET1         FIELD16(0xff00)
 
 /*
  * EEPROM RSSI BG2 offset
  */
-#define EEPROM_RSSI_BG2                        0x0024
 #define EEPROM_RSSI_BG2_OFFSET2                FIELD16(0x00ff)
 #define EEPROM_RSSI_BG2_LNA_A1         FIELD16(0xff00)
 
 /*
  * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
  */
-#define EEPROM_TXMIXER_GAIN_BG         0x0024
 #define EEPROM_TXMIXER_GAIN_BG_VAL     FIELD16(0x0007)
 
 /*
  * EEPROM RSSI A offset
  */
-#define EEPROM_RSSI_A                  0x0025
 #define EEPROM_RSSI_A_OFFSET0          FIELD16(0x00ff)
 #define EEPROM_RSSI_A_OFFSET1          FIELD16(0xff00)
 
 /*
  * EEPROM RSSI A2 offset
  */
-#define EEPROM_RSSI_A2                 0x0026
 #define EEPROM_RSSI_A2_OFFSET2         FIELD16(0x00ff)
 #define EEPROM_RSSI_A2_LNA_A2          FIELD16(0xff00)
 
 /*
  * EEPROM TXMIXER GAIN A offset (note overlaps with EEPROM RSSI A2).
  */
-#define EEPROM_TXMIXER_GAIN_A          0x0026
 #define EEPROM_TXMIXER_GAIN_A_VAL      FIELD16(0x0007)
 
 /*
  * EEPROM EIRP Maximum TX power values(unit: dbm)
  */
-#define EEPROM_EIRP_MAX_TX_POWER       0x0027
 #define EEPROM_EIRP_MAX_TX_POWER_2GHZ  FIELD16(0x00ff)
 #define EEPROM_EIRP_MAX_TX_POWER_5GHZ  FIELD16(0xff00)
 
@@ -2383,7 +2576,6 @@ struct mac_iveiv_entry {
  * TYPE: 1: Plus the delta value, 0: minus the delta value
  * ENABLE: enable tx power compensation for 40BW
  */
-#define EEPROM_TXPOWER_DELTA           0x0028
 #define EEPROM_TXPOWER_DELTA_VALUE_2G  FIELD16(0x003f)
 #define EEPROM_TXPOWER_DELTA_TYPE_2G   FIELD16(0x0040)
 #define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
@@ -2394,8 +2586,6 @@ struct mac_iveiv_entry {
 /*
  * EEPROM TXPOWER 802.11BG
  */
-#define        EEPROM_TXPOWER_BG1              0x0029
-#define        EEPROM_TXPOWER_BG2              0x0030
 #define EEPROM_TXPOWER_BG_SIZE         7
 #define EEPROM_TXPOWER_BG_1            FIELD16(0x00ff)
 #define EEPROM_TXPOWER_BG_2            FIELD16(0xff00)
@@ -2407,7 +2597,6 @@ struct mac_iveiv_entry {
  * MINUS3: If the actual TSSI is below this boundary, tx power needs to be
  *         reduced by (agc_step * -3)
  */
-#define EEPROM_TSSI_BOUND_BG1          0x0037
 #define EEPROM_TSSI_BOUND_BG1_MINUS4   FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_BG1_MINUS3   FIELD16(0xff00)
 
@@ -2418,7 +2607,6 @@ struct mac_iveiv_entry {
  * MINUS1: If the actual TSSI is below this boundary, tx power needs to be
  *         reduced by (agc_step * -1)
  */
-#define EEPROM_TSSI_BOUND_BG2          0x0038
 #define EEPROM_TSSI_BOUND_BG2_MINUS2   FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_BG2_MINUS1   FIELD16(0xff00)
 
@@ -2428,7 +2616,6 @@ struct mac_iveiv_entry {
  * PLUS1: If the actual TSSI is above this boundary, tx power needs to be
  *        increased by (agc_step * 1)
  */
-#define EEPROM_TSSI_BOUND_BG3          0x0039
 #define EEPROM_TSSI_BOUND_BG3_REF      FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_BG3_PLUS1    FIELD16(0xff00)
 
@@ -2439,7 +2626,6 @@ struct mac_iveiv_entry {
  * PLUS3: If the actual TSSI is above this boundary, tx power needs to be
  *        increased by (agc_step * 3)
  */
-#define EEPROM_TSSI_BOUND_BG4          0x003a
 #define EEPROM_TSSI_BOUND_BG4_PLUS2    FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_BG4_PLUS3    FIELD16(0xff00)
 
@@ -2449,19 +2635,20 @@ struct mac_iveiv_entry {
  *        increased by (agc_step * 4)
  * AGC_STEP: Temperature compensation step.
  */
-#define EEPROM_TSSI_BOUND_BG5          0x003b
 #define EEPROM_TSSI_BOUND_BG5_PLUS4    FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_BG5_AGC_STEP FIELD16(0xff00)
 
 /*
  * EEPROM TXPOWER 802.11A
  */
-#define EEPROM_TXPOWER_A1              0x003c
-#define EEPROM_TXPOWER_A2              0x0053
 #define EEPROM_TXPOWER_A_SIZE          6
 #define EEPROM_TXPOWER_A_1             FIELD16(0x00ff)
 #define EEPROM_TXPOWER_A_2             FIELD16(0xff00)
 
+/* EEPROM_TXPOWER_{A,G} fields for RT3593 */
+#define EEPROM_TXPOWER_ALC             FIELD8(0x1f)
+#define EEPROM_TXPOWER_FINE_CTRL       FIELD8(0xe0)
+
 /*
  * EEPROM temperature compensation boundaries 802.11A
  * MINUS4: If the actual TSSI is below this boundary, tx power needs to be
@@ -2469,7 +2656,6 @@ struct mac_iveiv_entry {
  * MINUS3: If the actual TSSI is below this boundary, tx power needs to be
  *         reduced by (agc_step * -3)
  */
-#define EEPROM_TSSI_BOUND_A1           0x006a
 #define EEPROM_TSSI_BOUND_A1_MINUS4    FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_A1_MINUS3    FIELD16(0xff00)
 
@@ -2480,7 +2666,6 @@ struct mac_iveiv_entry {
  * MINUS1: If the actual TSSI is below this boundary, tx power needs to be
  *         reduced by (agc_step * -1)
  */
-#define EEPROM_TSSI_BOUND_A2           0x006b
 #define EEPROM_TSSI_BOUND_A2_MINUS2    FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_A2_MINUS1    FIELD16(0xff00)
 
@@ -2490,7 +2675,6 @@ struct mac_iveiv_entry {
  * PLUS1: If the actual TSSI is above this boundary, tx power needs to be
  *        increased by (agc_step * 1)
  */
-#define EEPROM_TSSI_BOUND_A3           0x006c
 #define EEPROM_TSSI_BOUND_A3_REF       FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_A3_PLUS1     FIELD16(0xff00)
 
@@ -2501,7 +2685,6 @@ struct mac_iveiv_entry {
  * PLUS3: If the actual TSSI is above this boundary, tx power needs to be
  *        increased by (agc_step * 3)
  */
-#define EEPROM_TSSI_BOUND_A4           0x006d
 #define EEPROM_TSSI_BOUND_A4_PLUS2     FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_A4_PLUS3     FIELD16(0xff00)
 
@@ -2511,14 +2694,12 @@ struct mac_iveiv_entry {
  *        increased by (agc_step * 4)
  * AGC_STEP: Temperature compensation step.
  */
-#define EEPROM_TSSI_BOUND_A5           0x006e
 #define EEPROM_TSSI_BOUND_A5_PLUS4     FIELD16(0x00ff)
 #define EEPROM_TSSI_BOUND_A5_AGC_STEP  FIELD16(0xff00)
 
 /*
  * EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode
  */
-#define EEPROM_TXPOWER_BYRATE          0x006f
 #define EEPROM_TXPOWER_BYRATE_SIZE     9
 
 #define EEPROM_TXPOWER_BYRATE_RATE0    FIELD16(0x000f)
@@ -2529,11 +2710,14 @@ struct mac_iveiv_entry {
 /*
  * EEPROM BBP.
  */
-#define        EEPROM_BBP_START                0x0078
 #define EEPROM_BBP_SIZE                        16
 #define EEPROM_BBP_VALUE               FIELD16(0x00ff)
 #define EEPROM_BBP_REG_ID              FIELD16(0xff00)
 
+/* EEPROM_EXT_LNA2 */
+#define EEPROM_EXT_LNA2_A1             FIELD16(0x00ff)
+#define EEPROM_EXT_LNA2_A2             FIELD16(0xff00)
+
 /*
  * EEPROM IQ Calibration, unlike other entries those are byte addresses.
  */
@@ -2630,6 +2814,7 @@ struct mac_iveiv_entry {
 #define TXWI_DESC_SIZE_5WORDS          (5 * sizeof(__le32))
 
 #define RXWI_DESC_SIZE_4WORDS          (4 * sizeof(__le32))
+#define RXWI_DESC_SIZE_5WORDS          (5 * sizeof(__le32))
 #define RXWI_DESC_SIZE_6WORDS          (6 * sizeof(__le32))
 
 /*
@@ -2750,18 +2935,15 @@ struct mac_iveiv_entry {
 #define MAX_A_TXPOWER  15
 #define DEFAULT_TXPOWER        5
 
+#define MIN_A_TXPOWER_3593     0
+#define MAX_A_TXPOWER_3593     31
+
 #define TXPOWER_G_FROM_DEV(__txpower) \
        ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
 
-#define TXPOWER_G_TO_DEV(__txpower) \
-       clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
-
 #define TXPOWER_A_FROM_DEV(__txpower) \
        ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
 
-#define TXPOWER_A_TO_DEV(__txpower) \
-       clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
-
 /*
  *  Board's maximun TX power limitation
  */
index 1f80ea5e29dde51068a2ad0815bbd2e4a49b6fcb..313da6ac2ee4bc36fc32d1da4b1c84f208b3a626 100644 (file)
@@ -221,6 +221,157 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
        mutex_unlock(&rt2x00dev->csr_mutex);
 }
 
+static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = {
+       [EEPROM_CHIP_ID]                = 0x0000,
+       [EEPROM_VERSION]                = 0x0001,
+       [EEPROM_MAC_ADDR_0]             = 0x0002,
+       [EEPROM_MAC_ADDR_1]             = 0x0003,
+       [EEPROM_MAC_ADDR_2]             = 0x0004,
+       [EEPROM_NIC_CONF0]              = 0x001a,
+       [EEPROM_NIC_CONF1]              = 0x001b,
+       [EEPROM_FREQ]                   = 0x001d,
+       [EEPROM_LED_AG_CONF]            = 0x001e,
+       [EEPROM_LED_ACT_CONF]           = 0x001f,
+       [EEPROM_LED_POLARITY]           = 0x0020,
+       [EEPROM_NIC_CONF2]              = 0x0021,
+       [EEPROM_LNA]                    = 0x0022,
+       [EEPROM_RSSI_BG]                = 0x0023,
+       [EEPROM_RSSI_BG2]               = 0x0024,
+       [EEPROM_TXMIXER_GAIN_BG]        = 0x0024, /* overlaps with RSSI_BG2 */
+       [EEPROM_RSSI_A]                 = 0x0025,
+       [EEPROM_RSSI_A2]                = 0x0026,
+       [EEPROM_TXMIXER_GAIN_A]         = 0x0026, /* overlaps with RSSI_A2 */
+       [EEPROM_EIRP_MAX_TX_POWER]      = 0x0027,
+       [EEPROM_TXPOWER_DELTA]          = 0x0028,
+       [EEPROM_TXPOWER_BG1]            = 0x0029,
+       [EEPROM_TXPOWER_BG2]            = 0x0030,
+       [EEPROM_TSSI_BOUND_BG1]         = 0x0037,
+       [EEPROM_TSSI_BOUND_BG2]         = 0x0038,
+       [EEPROM_TSSI_BOUND_BG3]         = 0x0039,
+       [EEPROM_TSSI_BOUND_BG4]         = 0x003a,
+       [EEPROM_TSSI_BOUND_BG5]         = 0x003b,
+       [EEPROM_TXPOWER_A1]             = 0x003c,
+       [EEPROM_TXPOWER_A2]             = 0x0053,
+       [EEPROM_TSSI_BOUND_A1]          = 0x006a,
+       [EEPROM_TSSI_BOUND_A2]          = 0x006b,
+       [EEPROM_TSSI_BOUND_A3]          = 0x006c,
+       [EEPROM_TSSI_BOUND_A4]          = 0x006d,
+       [EEPROM_TSSI_BOUND_A5]          = 0x006e,
+       [EEPROM_TXPOWER_BYRATE]         = 0x006f,
+       [EEPROM_BBP_START]              = 0x0078,
+};
+
+static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
+       [EEPROM_CHIP_ID]                = 0x0000,
+       [EEPROM_VERSION]                = 0x0001,
+       [EEPROM_MAC_ADDR_0]             = 0x0002,
+       [EEPROM_MAC_ADDR_1]             = 0x0003,
+       [EEPROM_MAC_ADDR_2]             = 0x0004,
+       [EEPROM_NIC_CONF0]              = 0x001a,
+       [EEPROM_NIC_CONF1]              = 0x001b,
+       [EEPROM_NIC_CONF2]              = 0x001c,
+       [EEPROM_EIRP_MAX_TX_POWER]      = 0x0020,
+       [EEPROM_FREQ]                   = 0x0022,
+       [EEPROM_LED_AG_CONF]            = 0x0023,
+       [EEPROM_LED_ACT_CONF]           = 0x0024,
+       [EEPROM_LED_POLARITY]           = 0x0025,
+       [EEPROM_LNA]                    = 0x0026,
+       [EEPROM_EXT_LNA2]               = 0x0027,
+       [EEPROM_RSSI_BG]                = 0x0028,
+       [EEPROM_TXPOWER_DELTA]          = 0x0028, /* Overlaps with RSSI_BG */
+       [EEPROM_RSSI_BG2]               = 0x0029,
+       [EEPROM_TXMIXER_GAIN_BG]        = 0x0029, /* Overlaps with RSSI_BG2 */
+       [EEPROM_RSSI_A]                 = 0x002a,
+       [EEPROM_RSSI_A2]                = 0x002b,
+       [EEPROM_TXMIXER_GAIN_A]         = 0x002b, /* Overlaps with RSSI_A2 */
+       [EEPROM_TXPOWER_BG1]            = 0x0030,
+       [EEPROM_TXPOWER_BG2]            = 0x0037,
+       [EEPROM_EXT_TXPOWER_BG3]        = 0x003e,
+       [EEPROM_TSSI_BOUND_BG1]         = 0x0045,
+       [EEPROM_TSSI_BOUND_BG2]         = 0x0046,
+       [EEPROM_TSSI_BOUND_BG3]         = 0x0047,
+       [EEPROM_TSSI_BOUND_BG4]         = 0x0048,
+       [EEPROM_TSSI_BOUND_BG5]         = 0x0049,
+       [EEPROM_TXPOWER_A1]             = 0x004b,
+       [EEPROM_TXPOWER_A2]             = 0x0065,
+       [EEPROM_EXT_TXPOWER_A3]         = 0x007f,
+       [EEPROM_TSSI_BOUND_A1]          = 0x009a,
+       [EEPROM_TSSI_BOUND_A2]          = 0x009b,
+       [EEPROM_TSSI_BOUND_A3]          = 0x009c,
+       [EEPROM_TSSI_BOUND_A4]          = 0x009d,
+       [EEPROM_TSSI_BOUND_A5]          = 0x009e,
+       [EEPROM_TXPOWER_BYRATE]         = 0x00a0,
+};
+
+static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev,
+                                            const enum rt2800_eeprom_word word)
+{
+       const unsigned int *map;
+       unsigned int index;
+
+       if (WARN_ONCE(word >= EEPROM_WORD_COUNT,
+                     "%s: invalid EEPROM word %d\n",
+                     wiphy_name(rt2x00dev->hw->wiphy), word))
+               return 0;
+
+       if (rt2x00_rt(rt2x00dev, RT3593))
+               map = rt2800_eeprom_map_ext;
+       else
+               map = rt2800_eeprom_map;
+
+       index = map[word];
+
+       /* Index 0 is valid only for EEPROM_CHIP_ID.
+        * Otherwise it means that the offset of the
+        * given word is not initialized in the map,
+        * or that the field is not usable on the
+        * actual chipset.
+        */
+       WARN_ONCE(word != EEPROM_CHIP_ID && index == 0,
+                 "%s: invalid access of EEPROM word %d\n",
+                 wiphy_name(rt2x00dev->hw->wiphy), word);
+
+       return index;
+}
+
+static void *rt2800_eeprom_addr(struct rt2x00_dev *rt2x00dev,
+                               const enum rt2800_eeprom_word word)
+{
+       unsigned int index;
+
+       index = rt2800_eeprom_word_index(rt2x00dev, word);
+       return rt2x00_eeprom_addr(rt2x00dev, index);
+}
+
+static void rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev,
+                              const enum rt2800_eeprom_word word, u16 *data)
+{
+       unsigned int index;
+
+       index = rt2800_eeprom_word_index(rt2x00dev, word);
+       rt2x00_eeprom_read(rt2x00dev, index, data);
+}
+
+static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev,
+                               const enum rt2800_eeprom_word word, u16 data)
+{
+       unsigned int index;
+
+       index = rt2800_eeprom_word_index(rt2x00dev, word);
+       rt2x00_eeprom_write(rt2x00dev, index, data);
+}
+
+static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev,
+                                         const enum rt2800_eeprom_word array,
+                                         unsigned int offset,
+                                         u16 *data)
+{
+       unsigned int index;
+
+       index = rt2800_eeprom_word_index(rt2x00dev, array);
+       rt2x00_eeprom_read(rt2x00dev, index + offset, data);
+}
+
 static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
 {
        u32 reg;
@@ -370,6 +521,29 @@ void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev)
 }
 EXPORT_SYMBOL_GPL(rt2800_disable_wpdma);
 
+void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
+                              unsigned short *txwi_size,
+                              unsigned short *rxwi_size)
+{
+       switch (rt2x00dev->chip.rt) {
+       case RT3593:
+               *txwi_size = TXWI_DESC_SIZE_4WORDS;
+               *rxwi_size = RXWI_DESC_SIZE_5WORDS;
+               break;
+
+       case RT5592:
+               *txwi_size = TXWI_DESC_SIZE_5WORDS;
+               *rxwi_size = RXWI_DESC_SIZE_6WORDS;
+               break;
+
+       default:
+               *txwi_size = TXWI_DESC_SIZE_4WORDS;
+               *rxwi_size = RXWI_DESC_SIZE_4WORDS;
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800_get_txwi_rxwi_size);
+
 static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
 {
        u16 fw_crc;
@@ -609,16 +783,16 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
        u8 offset2;
 
        if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
                offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
                offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
                offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2);
        } else {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
                offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0);
                offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1);
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
                offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2);
        }
 
@@ -890,6 +1064,9 @@ const struct rt2x00debug rt2800_rt2x00debug = {
                .word_count     = CSR_REG_SIZE / sizeof(u32),
        },
        .eeprom = {
+               /* NOTE: The local EEPROM access functions can't
+                * be used here, use the generic versions instead.
+                */
                .read           = rt2x00_eeprom_read,
                .write          = rt2x00_eeprom_write,
                .word_base      = EEPROM_BASE,
@@ -1547,7 +1724,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
        led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3;
        if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) ||
            led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
                led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE);
                if (led_ctrl == 0 || led_ctrl > 0x40) {
                        rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, led_g_mode);
@@ -1609,7 +1786,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
                        rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
                break;
        case 3:
-               rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
+               rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
                break;
        }
 
@@ -1622,7 +1799,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
                    rt2x00_rt(rt2x00dev, RT3090) ||
                    rt2x00_rt(rt2x00dev, RT3352) ||
                    rt2x00_rt(rt2x00dev, RT3390)) {
-                       rt2x00_eeprom_read(rt2x00dev,
+                       rt2800_eeprom_read(rt2x00dev,
                                           EEPROM_NIC_CONF1, &eeprom);
                        if (rt2x00_get_field16(eeprom,
                                                EEPROM_NIC_CONF1_ANT_DIVERSITY))
@@ -1649,6 +1826,13 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
 
        rt2800_bbp_write(rt2x00dev, 3, r3);
        rt2800_bbp_write(rt2x00dev, 1, r1);
+
+       if (rt2x00_rt(rt2x00dev, RT3593)) {
+               if (ant->rx_chain_num == 1)
+                       rt2800_bbp_write(rt2x00dev, 86, 0x00);
+               else
+                       rt2800_bbp_write(rt2x00dev, 86, 0x46);
+       }
 }
 EXPORT_SYMBOL_GPL(rt2800_config_ant);
 
@@ -1659,17 +1843,31 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
        short lna_gain;
 
        if (libconf->rf.channel <= 14) {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
                lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
        } else if (libconf->rf.channel <= 64) {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
                lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
        } else if (libconf->rf.channel <= 128) {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
-               lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
+               if (rt2x00_rt(rt2x00dev, RT3593)) {
+                       rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
+                       lna_gain = rt2x00_get_field16(eeprom,
+                                                     EEPROM_EXT_LNA2_A1);
+               } else {
+                       rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+                       lna_gain = rt2x00_get_field16(eeprom,
+                                                     EEPROM_RSSI_BG2_LNA_A1);
+               }
        } else {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
-               lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
+               if (rt2x00_rt(rt2x00dev, RT3593)) {
+                       rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
+                       lna_gain = rt2x00_get_field16(eeprom,
+                                                     EEPROM_EXT_LNA2_A2);
+               } else {
+                       rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+                       lna_gain = rt2x00_get_field16(eeprom,
+                                                     EEPROM_RSSI_A2_LNA_A2);
+               }
        }
 
        rt2x00dev->lna_gain = lna_gain;
@@ -1993,6 +2191,303 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
        rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
 }
 
+static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_conf *conf,
+                                        struct rf_channel *rf,
+                                        struct channel_info *info)
+{
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+       u8 txrx_agc_fc;
+       u8 txrx_h20m;
+       u8 rfcsr;
+       u8 bbp;
+       const bool txbf_enabled = false; /* TODO */
+
+       /* TODO: use TX{0,1,2}FinePowerControl values from EEPROM */
+       rt2800_bbp_read(rt2x00dev, 109, &bbp);
+       rt2x00_set_field8(&bbp, BBP109_TX0_POWER, 0);
+       rt2x00_set_field8(&bbp, BBP109_TX1_POWER, 0);
+       rt2800_bbp_write(rt2x00dev, 109, bbp);
+
+       rt2800_bbp_read(rt2x00dev, 110, &bbp);
+       rt2x00_set_field8(&bbp, BBP110_TX2_POWER, 0);
+       rt2800_bbp_write(rt2x00dev, 110, bbp);
+
+       if (rf->channel <= 14) {
+               /* Restore BBP 25 & 26 for 2.4 GHz */
+               rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25);
+               rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26);
+       } else {
+               /* Hard code BBP 25 & 26 for 5GHz */
+
+               /* Enable IQ Phase correction */
+               rt2800_bbp_write(rt2x00dev, 25, 0x09);
+               /* Setup IQ Phase correction value */
+               rt2800_bbp_write(rt2x00dev, 26, 0xff);
+       }
+
+       rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+       rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3 & 0xf);
+
+       rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR11_R, (rf->rf2 & 0x3));
+       rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR11_PLL_IDOH, 1);
+       if (rf->channel <= 14)
+               rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 1);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 2);
+       rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 53, &rfcsr);
+       if (rf->channel <= 14) {
+               rfcsr = 0;
+               rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
+                                 info->default_power1 & 0x1f);
+       } else {
+               if (rt2x00_is_usb(rt2x00dev))
+                       rfcsr = 0x40;
+
+               rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
+                                 ((info->default_power1 & 0x18) << 1) |
+                                 (info->default_power1 & 7));
+       }
+       rt2800_rfcsr_write(rt2x00dev, 53, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 55, &rfcsr);
+       if (rf->channel <= 14) {
+               rfcsr = 0;
+               rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
+                                 info->default_power2 & 0x1f);
+       } else {
+               if (rt2x00_is_usb(rt2x00dev))
+                       rfcsr = 0x40;
+
+               rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
+                                 ((info->default_power2 & 0x18) << 1) |
+                                 (info->default_power2 & 7));
+       }
+       rt2800_rfcsr_write(rt2x00dev, 55, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 54, &rfcsr);
+       if (rf->channel <= 14) {
+               rfcsr = 0;
+               rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
+                                 info->default_power3 & 0x1f);
+       } else {
+               if (rt2x00_is_usb(rt2x00dev))
+                       rfcsr = 0x40;
+
+               rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
+                                 ((info->default_power3 & 0x18) << 1) |
+                                 (info->default_power3 & 7));
+       }
+       rt2800_rfcsr_write(rt2x00dev, 54, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+
+       switch (rt2x00dev->default_ant.tx_chain_num) {
+       case 3:
+               rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+               /* fallthrough */
+       case 2:
+               rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+               /* fallthrough */
+       case 1:
+               rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+               break;
+       }
+
+       switch (rt2x00dev->default_ant.rx_chain_num) {
+       case 3:
+               rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+               /* fallthrough */
+       case 2:
+               rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+               /* fallthrough */
+       case 1:
+               rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+               break;
+       }
+       rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+       /* TODO: frequency calibration? */
+
+       if (conf_is_ht40(conf)) {
+               txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40,
+                                               RFCSR24_TX_AGC_FC);
+               txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw40,
+                                             RFCSR24_TX_H20M);
+       } else {
+               txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw20,
+                                               RFCSR24_TX_AGC_FC);
+               txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw20,
+                                             RFCSR24_TX_H20M);
+       }
+
+       /* NOTE: the reference driver does not writes the new value
+        * back to RFCSR 32
+        */
+       rt2800_rfcsr_read(rt2x00dev, 32, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR32_TX_AGC_FC, txrx_agc_fc);
+
+       if (rf->channel <= 14)
+               rfcsr = 0xa0;
+       else
+               rfcsr = 0x80;
+       rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, txrx_h20m);
+       rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, txrx_h20m);
+       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+       /* Band selection */
+       rt2800_rfcsr_read(rt2x00dev, 36, &rfcsr);
+       if (rf->channel <= 14)
+               rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0);
+       rt2800_rfcsr_write(rt2x00dev, 36, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 34, &rfcsr);
+       if (rf->channel <= 14)
+               rfcsr = 0x3c;
+       else
+               rfcsr = 0x20;
+       rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+       if (rf->channel <= 14)
+               rfcsr = 0x1a;
+       else
+               rfcsr = 0x12;
+       rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+       if (rf->channel >= 1 && rf->channel <= 14)
+               rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
+       else if (rf->channel >= 36 && rf->channel <= 64)
+               rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
+       else if (rf->channel >= 100 && rf->channel <= 128)
+               rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
+       rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+       rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
+
+       if (rf->channel <= 14) {
+               rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+       } else {
+               rt2800_rfcsr_write(rt2x00dev, 10, 0xd8);
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x23);
+       }
+
+       rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR51_BITS01, 1);
+       rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+       if (rf->channel <= 14) {
+               rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 5);
+               rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 3);
+       } else {
+               rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 4);
+               rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 2);
+       }
+       rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+       if (rf->channel <= 14)
+               rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 3);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 2);
+
+       if (txbf_enabled)
+               rt2x00_set_field8(&rfcsr, RFCSR49_TX_DIV, 1);
+
+       rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO1_EN, 0);
+       rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 57, &rfcsr);
+       if (rf->channel <= 14)
+               rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x1b);
+       else
+               rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x0f);
+       rt2800_rfcsr_write(rt2x00dev, 57, rfcsr);
+
+       if (rf->channel <= 14) {
+               rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
+               rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
+       } else {
+               rt2800_rfcsr_write(rt2x00dev, 44, 0x9b);
+               rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
+       }
+
+       /* Initiate VCO calibration */
+       rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+       if (rf->channel <= 14) {
+               rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+       } else {
+               rt2x00_set_field8(&rfcsr, RFCSR3_BIT1, 1);
+               rt2x00_set_field8(&rfcsr, RFCSR3_BIT2, 1);
+               rt2x00_set_field8(&rfcsr, RFCSR3_BIT3, 1);
+               rt2x00_set_field8(&rfcsr, RFCSR3_BIT4, 1);
+               rt2x00_set_field8(&rfcsr, RFCSR3_BIT5, 1);
+               rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+       }
+       rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+
+       if (rf->channel >= 1 && rf->channel <= 14) {
+               rfcsr = 0x23;
+               if (txbf_enabled)
+                       rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+               rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+               rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
+       } else if (rf->channel >= 36 && rf->channel <= 64) {
+               rfcsr = 0x36;
+               if (txbf_enabled)
+                       rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+               rt2800_rfcsr_write(rt2x00dev, 39, 0x36);
+
+               rt2800_rfcsr_write(rt2x00dev, 45, 0xeb);
+       } else if (rf->channel >= 100 && rf->channel <= 128) {
+               rfcsr = 0x32;
+               if (txbf_enabled)
+                       rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+               rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+               rt2800_rfcsr_write(rt2x00dev, 45, 0xb3);
+       } else {
+               rfcsr = 0x30;
+               if (txbf_enabled)
+                       rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+               rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+               rt2800_rfcsr_write(rt2x00dev, 45, 0x9b);
+       }
+}
+
 #define POWER_BOUND            0x27
 #define POWER_BOUND_5G         0x2b
 #define FREQ_OFFSET_BOUND      0x5f
@@ -2563,6 +3058,23 @@ static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
        rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
 }
 
+static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
+                                 unsigned int channel,
+                                 char txpower)
+{
+       if (rt2x00_rt(rt2x00dev, RT3593))
+               txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
+
+       if (channel <= 14)
+               return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
+
+       if (rt2x00_rt(rt2x00dev, RT3593))
+               return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
+                              MAX_A_TXPOWER_3593);
+       else
+               return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
+}
+
 static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                                  struct ieee80211_conf *conf,
                                  struct rf_channel *rf,
@@ -2572,13 +3084,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        unsigned int tx_pin;
        u8 bbp, rfcsr;
 
-       if (rf->channel <= 14) {
-               info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
-               info->default_power2 = TXPOWER_G_TO_DEV(info->default_power2);
-       } else {
-               info->default_power1 = TXPOWER_A_TO_DEV(info->default_power1);
-               info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
-       }
+       info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+                                                    info->default_power1);
+       info->default_power2 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+                                                    info->default_power2);
+       if (rt2x00dev->default_ant.tx_chain_num > 2)
+               info->default_power3 =
+                       rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+                                             info->default_power3);
 
        switch (rt2x00dev->chip.rf) {
        case RF2020:
@@ -2591,6 +3104,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        case RF3052:
                rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
                break;
+       case RF3053:
+               rt2800_config_channel_rf3053(rt2x00dev, conf, rf, info);
+               break;
        case RF3290:
                rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
                break;
@@ -2636,6 +3152,23 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
                rt2800_bbp_write(rt2x00dev, 27, 0x20);
                rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
+       } else if (rt2x00_rt(rt2x00dev, RT3593)) {
+               if (rf->channel > 14) {
+                       /* Disable CCK Packet detection on 5GHz */
+                       rt2800_bbp_write(rt2x00dev, 70, 0x00);
+               } else {
+                       rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+               }
+
+               if (conf_is_ht40(conf))
+                       rt2800_bbp_write(rt2x00dev, 105, 0x04);
+               else
+                       rt2800_bbp_write(rt2x00dev, 105, 0x34);
+
+               rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 77, 0x98);
        } else {
                rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
                rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
@@ -2651,16 +3184,27 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                                rt2800_bbp_write(rt2x00dev, 82, 0x62);
                                rt2800_bbp_write(rt2x00dev, 75, 0x46);
                        } else {
-                               rt2800_bbp_write(rt2x00dev, 82, 0x84);
+                               if (rt2x00_rt(rt2x00dev, RT3593))
+                                       rt2800_bbp_write(rt2x00dev, 82, 0x62);
+                               else
+                                       rt2800_bbp_write(rt2x00dev, 82, 0x84);
                                rt2800_bbp_write(rt2x00dev, 75, 0x50);
                        }
+                       if (rt2x00_rt(rt2x00dev, RT3593))
+                               rt2800_bbp_write(rt2x00dev, 83, 0x8a);
                }
+
        } else {
                if (rt2x00_rt(rt2x00dev, RT3572))
                        rt2800_bbp_write(rt2x00dev, 82, 0x94);
+               else if (rt2x00_rt(rt2x00dev, RT3593))
+                       rt2800_bbp_write(rt2x00dev, 82, 0x82);
                else
                        rt2800_bbp_write(rt2x00dev, 82, 0xf2);
 
+               if (rt2x00_rt(rt2x00dev, RT3593))
+                       rt2800_bbp_write(rt2x00dev, 83, 0x9a);
+
                if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
                        rt2800_bbp_write(rt2x00dev, 75, 0x46);
                else
@@ -2731,6 +3275,41 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        if (rt2x00_rt(rt2x00dev, RT3572))
                rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
 
+       if (rt2x00_rt(rt2x00dev, RT3593)) {
+               if (rt2x00_is_usb(rt2x00dev)) {
+                       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+
+                       /* Band selection. GPIO #8 controls all paths */
+                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
+                       if (rf->channel <= 14)
+                               rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
+                       else
+                               rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+
+                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
+
+                       /* LNA PE control.
+                       * GPIO #4 controls PE0 and PE1,
+                       * GPIO #7 controls PE2
+                       */
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
+
+                       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+               }
+
+               /* AGC init */
+               if (rf->channel <= 14)
+                       reg = 0x1c + 2 * rt2x00dev->lna_gain;
+               else
+                       reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
+
+               rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+
+               usleep_range(1000, 1500);
+       }
+
        if (rt2x00_rt(rt2x00dev, RT5592)) {
                rt2800_bbp_write(rt2x00dev, 195, 141);
                rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
@@ -2798,62 +3377,62 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
         * Example TSSI bounds  0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
         */
        if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
                tssi_bounds[0] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG1_MINUS4);
                tssi_bounds[1] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG1_MINUS3);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
                tssi_bounds[2] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG2_MINUS2);
                tssi_bounds[3] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG2_MINUS1);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
                tssi_bounds[4] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG3_REF);
                tssi_bounds[5] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG3_PLUS1);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
                tssi_bounds[6] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG4_PLUS2);
                tssi_bounds[7] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG4_PLUS3);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
                tssi_bounds[8] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_BG5_PLUS4);
 
                step = rt2x00_get_field16(eeprom,
                                          EEPROM_TSSI_BOUND_BG5_AGC_STEP);
        } else {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
                tssi_bounds[0] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A1_MINUS4);
                tssi_bounds[1] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A1_MINUS3);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
                tssi_bounds[2] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A2_MINUS2);
                tssi_bounds[3] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A2_MINUS1);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
                tssi_bounds[4] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A3_REF);
                tssi_bounds[5] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A3_PLUS1);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
                tssi_bounds[6] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A4_PLUS2);
                tssi_bounds[7] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A4_PLUS3);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
                tssi_bounds[8] = rt2x00_get_field16(eeprom,
                                        EEPROM_TSSI_BOUND_A5_PLUS4);
 
@@ -2899,7 +3478,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
        u8 comp_type;
        int comp_value = 0;
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
 
        /*
         * HT40 compensation not required.
@@ -2966,6 +3545,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
        u8 eirp_txpower_criterion;
        u8 reg_limit;
 
+       if (rt2x00_rt(rt2x00dev, RT3593))
+               return min_t(u8, txpower, 0xc);
+
        if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
                /*
                 * Check if eirp txpower exceed txpower_limit.
@@ -2974,12 +3556,12 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
                 * .11b data rate need add additional 4dbm
                 * when calculating eirp txpower.
                 */
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1,
-                                  &eeprom);
+               rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                             1, &eeprom);
                criterion = rt2x00_get_field16(eeprom,
                                               EEPROM_TXPOWER_BYRATE_RATE0);
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
+               rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
                                   &eeprom);
 
                if (band == IEEE80211_BAND_2GHZ)
@@ -3001,6 +3583,412 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
        return min_t(u8, txpower, 0xc);
 }
 
+
+enum {
+       TX_PWR_CFG_0_IDX,
+       TX_PWR_CFG_1_IDX,
+       TX_PWR_CFG_2_IDX,
+       TX_PWR_CFG_3_IDX,
+       TX_PWR_CFG_4_IDX,
+       TX_PWR_CFG_5_IDX,
+       TX_PWR_CFG_6_IDX,
+       TX_PWR_CFG_7_IDX,
+       TX_PWR_CFG_8_IDX,
+       TX_PWR_CFG_9_IDX,
+       TX_PWR_CFG_0_EXT_IDX,
+       TX_PWR_CFG_1_EXT_IDX,
+       TX_PWR_CFG_2_EXT_IDX,
+       TX_PWR_CFG_3_EXT_IDX,
+       TX_PWR_CFG_4_EXT_IDX,
+       TX_PWR_CFG_IDX_COUNT,
+};
+
+static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_channel *chan,
+                                        int power_level)
+{
+       u8 txpower;
+       u16 eeprom;
+       u32 regs[TX_PWR_CFG_IDX_COUNT];
+       unsigned int offset;
+       enum ieee80211_band band = chan->band;
+       int delta;
+       int i;
+
+       memset(regs, '\0', sizeof(regs));
+
+       /* TODO: adapt TX power reduction from the rt28xx code */
+
+       /* calculate temperature compensation delta */
+       delta = rt2800_get_gain_calibration_delta(rt2x00dev);
+
+       if (band == IEEE80211_BAND_5GHZ)
+               offset = 16;
+       else
+               offset = 0;
+
+       if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+               offset += 8;
+
+       /* read the next four txpower values */
+       rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                     offset, &eeprom);
+
+       /* CCK 1MBS,2MBS */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+                          TX_PWR_CFG_0_CCK1_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+                          TX_PWR_CFG_0_CCK1_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+                          TX_PWR_CFG_0_EXT_CCK1_CH2, txpower);
+
+       /* CCK 5.5MBS,11MBS */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+                          TX_PWR_CFG_0_CCK5_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+                          TX_PWR_CFG_0_CCK5_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+                          TX_PWR_CFG_0_EXT_CCK5_CH2, txpower);
+
+       /* OFDM 6MBS,9MBS */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+                          TX_PWR_CFG_0_OFDM6_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+                          TX_PWR_CFG_0_OFDM6_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+                          TX_PWR_CFG_0_EXT_OFDM6_CH2, txpower);
+
+       /* OFDM 12MBS,18MBS */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+                          TX_PWR_CFG_0_OFDM12_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+                          TX_PWR_CFG_0_OFDM12_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+                          TX_PWR_CFG_0_EXT_OFDM12_CH2, txpower);
+
+       /* read the next four txpower values */
+       rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                     offset + 1, &eeprom);
+
+       /* OFDM 24MBS,36MBS */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+                          TX_PWR_CFG_1_OFDM24_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+                          TX_PWR_CFG_1_OFDM24_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+                          TX_PWR_CFG_1_EXT_OFDM24_CH2, txpower);
+
+       /* OFDM 48MBS */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+                          TX_PWR_CFG_1_OFDM48_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+                          TX_PWR_CFG_1_OFDM48_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+                          TX_PWR_CFG_1_EXT_OFDM48_CH2, txpower);
+
+       /* OFDM 54MBS */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+                          TX_PWR_CFG_7_OFDM54_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+                          TX_PWR_CFG_7_OFDM54_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+                          TX_PWR_CFG_7_OFDM54_CH2, txpower);
+
+       /* read the next four txpower values */
+       rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                     offset + 2, &eeprom);
+
+       /* MCS 0,1 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+                          TX_PWR_CFG_1_MCS0_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+                          TX_PWR_CFG_1_MCS0_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+                          TX_PWR_CFG_1_EXT_MCS0_CH2, txpower);
+
+       /* MCS 2,3 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+                          TX_PWR_CFG_1_MCS2_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+                          TX_PWR_CFG_1_MCS2_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+                          TX_PWR_CFG_1_EXT_MCS2_CH2, txpower);
+
+       /* MCS 4,5 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+                          TX_PWR_CFG_2_MCS4_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+                          TX_PWR_CFG_2_MCS4_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+                          TX_PWR_CFG_2_EXT_MCS4_CH2, txpower);
+
+       /* MCS 6 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+                          TX_PWR_CFG_2_MCS6_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+                          TX_PWR_CFG_2_MCS6_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+                          TX_PWR_CFG_2_EXT_MCS6_CH2, txpower);
+
+       /* read the next four txpower values */
+       rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                     offset + 3, &eeprom);
+
+       /* MCS 7 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+                          TX_PWR_CFG_7_MCS7_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+                          TX_PWR_CFG_7_MCS7_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+                          TX_PWR_CFG_7_MCS7_CH2, txpower);
+
+       /* MCS 8,9 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+                          TX_PWR_CFG_2_MCS8_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+                          TX_PWR_CFG_2_MCS8_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+                          TX_PWR_CFG_2_EXT_MCS8_CH2, txpower);
+
+       /* MCS 10,11 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+                          TX_PWR_CFG_2_MCS10_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+                          TX_PWR_CFG_2_MCS10_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+                          TX_PWR_CFG_2_EXT_MCS10_CH2, txpower);
+
+       /* MCS 12,13 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+                          TX_PWR_CFG_3_MCS12_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+                          TX_PWR_CFG_3_MCS12_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+                          TX_PWR_CFG_3_EXT_MCS12_CH2, txpower);
+
+       /* read the next four txpower values */
+       rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                     offset + 4, &eeprom);
+
+       /* MCS 14 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+                          TX_PWR_CFG_3_MCS14_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+                          TX_PWR_CFG_3_MCS14_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+                          TX_PWR_CFG_3_EXT_MCS14_CH2, txpower);
+
+       /* MCS 15 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+                          TX_PWR_CFG_8_MCS15_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+                          TX_PWR_CFG_8_MCS15_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+                          TX_PWR_CFG_8_MCS15_CH2, txpower);
+
+       /* MCS 16,17 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+                          TX_PWR_CFG_5_MCS16_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+                          TX_PWR_CFG_5_MCS16_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+                          TX_PWR_CFG_5_MCS16_CH2, txpower);
+
+       /* MCS 18,19 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+                          TX_PWR_CFG_5_MCS18_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+                          TX_PWR_CFG_5_MCS18_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+                          TX_PWR_CFG_5_MCS18_CH2, txpower);
+
+       /* read the next four txpower values */
+       rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                     offset + 5, &eeprom);
+
+       /* MCS 20,21 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+                          TX_PWR_CFG_6_MCS20_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+                          TX_PWR_CFG_6_MCS20_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+                          TX_PWR_CFG_6_MCS20_CH2, txpower);
+
+       /* MCS 22 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+                          TX_PWR_CFG_6_MCS22_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+                          TX_PWR_CFG_6_MCS22_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+                          TX_PWR_CFG_6_MCS22_CH2, txpower);
+
+       /* MCS 23 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+                          TX_PWR_CFG_8_MCS23_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+                          TX_PWR_CFG_8_MCS23_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+                          TX_PWR_CFG_8_MCS23_CH2, txpower);
+
+       /* read the next four txpower values */
+       rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                     offset + 6, &eeprom);
+
+       /* STBC, MCS 0,1 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+                          TX_PWR_CFG_3_STBC0_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+                          TX_PWR_CFG_3_STBC0_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+                          TX_PWR_CFG_3_EXT_STBC0_CH2, txpower);
+
+       /* STBC, MCS 2,3 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+                          TX_PWR_CFG_3_STBC2_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+                          TX_PWR_CFG_3_STBC2_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+                          TX_PWR_CFG_3_EXT_STBC2_CH2, txpower);
+
+       /* STBC, MCS 4,5 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE0,
+                          txpower);
+
+       /* STBC, MCS 6 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE2, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE3, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE2,
+                          txpower);
+
+       /* read the next four txpower values */
+       rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                     offset + 7, &eeprom);
+
+       /* STBC, MCS 7 */
+       txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+       txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+                                           txpower, delta);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+                          TX_PWR_CFG_9_STBC7_CH0, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+                          TX_PWR_CFG_9_STBC7_CH1, txpower);
+       rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+                          TX_PWR_CFG_9_STBC7_CH2, txpower);
+
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, regs[TX_PWR_CFG_0_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, regs[TX_PWR_CFG_1_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, regs[TX_PWR_CFG_2_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, regs[TX_PWR_CFG_3_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, regs[TX_PWR_CFG_4_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_5, regs[TX_PWR_CFG_5_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_6, regs[TX_PWR_CFG_6_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, regs[TX_PWR_CFG_7_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, regs[TX_PWR_CFG_8_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, regs[TX_PWR_CFG_9_IDX]);
+
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_0_EXT,
+                             regs[TX_PWR_CFG_0_EXT_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_1_EXT,
+                             regs[TX_PWR_CFG_1_EXT_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_2_EXT,
+                             regs[TX_PWR_CFG_2_EXT_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_3_EXT,
+                             regs[TX_PWR_CFG_3_EXT_IDX]);
+       rt2800_register_write(rt2x00dev, TX_PWR_CFG_4_EXT,
+                             regs[TX_PWR_CFG_4_EXT_IDX]);
+
+       for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
+               rt2x00_dbg(rt2x00dev,
+                          "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
+                          (band == IEEE80211_BAND_5GHZ) ? '5' : '2',
+                          (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
+                                                               '4' : '2',
+                          (i > TX_PWR_CFG_9_IDX) ?
+                                       (i - TX_PWR_CFG_9_IDX - 1) : i,
+                          (i > TX_PWR_CFG_9_IDX) ? "_EXT" : "",
+                          (unsigned long) regs[i]);
+}
+
 /*
  * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
  * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
@@ -3010,9 +3998,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
  * EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to
  * current conditions (i.e. band, bandwidth, temperature, user settings).
  */
-static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
-                                 struct ieee80211_channel *chan,
-                                 int power_level)
+static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
+                                        struct ieee80211_channel *chan,
+                                        int power_level)
 {
        u8 txpower, r1;
        u16 eeprom;
@@ -3080,8 +4068,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
                rt2800_register_read(rt2x00dev, offset, &reg);
 
                /* read the next four txpower values */
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
-                                  &eeprom);
+               rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                             i, &eeprom);
 
                is_rate_b = i ? 0 : 1;
                /*
@@ -3129,8 +4117,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
                rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
 
                /* read the next four txpower values */
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
-                                  &eeprom);
+               rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+                                             i + 1, &eeprom);
 
                is_rate_b = 0;
                /*
@@ -3184,6 +4172,16 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
        }
 }
 
+static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
+                                 struct ieee80211_channel *chan,
+                                 int power_level)
+{
+       if (rt2x00_rt(rt2x00dev, RT3593))
+               rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
+       else
+               rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level);
+}
+
 void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
 {
        rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan,
@@ -3219,6 +4217,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
                rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
                rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
                break;
+       case RF3053:
        case RF3290:
        case RF5360:
        case RF5370:
@@ -3528,7 +4527,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
                    rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
                    rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
-                       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+                       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+                                          &eeprom);
                        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
                                rt2800_register_write(rt2x00dev, TX_SW_CFG2,
                                                      0x0000002c);
@@ -3559,6 +4559,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        } else if (rt2x00_rt(rt2x00dev, RT3572)) {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+       } else if (rt2x00_rt(rt2x00dev, RT3593)) {
+               rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
+               rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+               if (rt2x00_rt_rev_lt(rt2x00dev, RT3593, REV_RT3593E)) {
+                       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+                                          &eeprom);
+                       if (rt2x00_get_field16(eeprom,
+                                              EEPROM_NIC_CONF1_DAC_TEST))
+                               rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+                                                     0x0000001f);
+                       else
+                               rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+                                                     0x0000000f);
+               } else {
+                       rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+                                             0x00000000);
+               }
        } else if (rt2x00_rt(rt2x00dev, RT5390) ||
                   rt2x00_rt(rt2x00dev, RT5392) ||
                   rt2x00_rt(rt2x00dev, RT5592)) {
@@ -3989,7 +5006,7 @@ static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev)
        u8 value;
 
        rt2800_bbp_read(rt2x00dev, 138, &value);
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
                value |= 0x20;
        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
@@ -4332,6 +5349,22 @@ static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev)
        rt2800_disable_unused_dac_adc(rt2x00dev);
 }
 
+static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev)
+{
+       rt2800_init_bbp_early(rt2x00dev);
+
+       rt2800_bbp_write(rt2x00dev, 79, 0x13);
+       rt2800_bbp_write(rt2x00dev, 80, 0x05);
+       rt2800_bbp_write(rt2x00dev, 81, 0x33);
+       rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
+       rt2800_bbp_write(rt2x00dev, 84, 0x19);
+
+       /* Enable DC filter */
+       if (rt2x00_rt_rev_gte(rt2x00dev, RT3593, REV_RT3593E))
+               rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+}
+
 static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
 {
        int ant, div_mode;
@@ -4402,7 +5435,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
 
        rt2800_disable_unused_dac_adc(rt2x00dev);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
        div_mode = rt2x00_get_field16(eeprom,
                                      EEPROM_NIC_CONF1_ANT_DIVERSITY);
        ant = (div_mode == 3) ? 1 : 0;
@@ -4488,7 +5521,7 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
 
        rt2800_bbp4_mac_if_ctrl(rt2x00dev);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
        div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
        ant = (div_mode == 3) ? 1 : 0;
        rt2800_bbp_read(rt2x00dev, 152, &value);
@@ -4547,6 +5580,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        case RT3572:
                rt2800_init_bbp_3572(rt2x00dev);
                break;
+       case RT3593:
+               rt2800_init_bbp_3593(rt2x00dev);
+               return;
        case RT5390:
        case RT5392:
                rt2800_init_bbp_53xx(rt2x00dev);
@@ -4557,7 +5593,8 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
        }
 
        for (i = 0; i < EEPROM_BBP_SIZE; i++) {
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+               rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_BBP_START, i,
+                                             &eeprom);
 
                if (eeprom != 0xffff && eeprom != 0x0000) {
                        reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -4728,7 +5765,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
        if (rt2x00_rt(rt2x00dev, RT3090)) {
                /*  Turn off unused DAC1 and ADC1 to reduce power consumption */
                rt2800_bbp_read(rt2x00dev, 138, &bbp);
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
                if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
                        rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
                if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -4771,6 +5808,42 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
        }
 }
 
+static void rt2800_normal_mode_setup_3593(struct rt2x00_dev *rt2x00dev)
+{
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+       u8 rfcsr;
+       u8 tx_gain;
+
+       rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO2_EN, 0);
+       rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+       tx_gain = rt2x00_get_field8(drv_data->txmixer_gain_24g,
+                                   RFCSR17_TXMIXER_GAIN);
+       rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, tx_gain);
+       rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
+       rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
+       rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+       rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+       rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+       rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+       /* TODO: enable stream mode */
+}
+
 static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
 {
        u8 reg;
@@ -4778,7 +5851,7 @@ static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
 
        /*  Turn off unused DAC1 and ADC1 to reduce power consumption */
        rt2800_bbp_read(rt2x00dev, 138, &reg);
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
                rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0);
        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -4884,7 +5957,8 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
                rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
                if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
                    rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
-                       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+                       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+                                          &eeprom);
                        if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
                                rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
                        else
@@ -5152,6 +6226,136 @@ static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev)
        rt2800_normal_mode_setup_3xxx(rt2x00dev);
 }
 
+static void rt3593_post_bbp_init(struct rt2x00_dev *rt2x00dev)
+{
+       u8 bbp;
+       bool txbf_enabled = false; /* FIXME */
+
+       rt2800_bbp_read(rt2x00dev, 105, &bbp);
+       if (rt2x00dev->default_ant.rx_chain_num == 1)
+               rt2x00_set_field8(&bbp, BBP105_MLD, 0);
+       else
+               rt2x00_set_field8(&bbp, BBP105_MLD, 1);
+       rt2800_bbp_write(rt2x00dev, 105, bbp);
+
+       rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+       rt2800_bbp_write(rt2x00dev, 92, 0x02);
+       rt2800_bbp_write(rt2x00dev, 82, 0x82);
+       rt2800_bbp_write(rt2x00dev, 106, 0x05);
+       rt2800_bbp_write(rt2x00dev, 104, 0x92);
+       rt2800_bbp_write(rt2x00dev, 88, 0x90);
+       rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+       rt2800_bbp_write(rt2x00dev, 47, 0x48);
+       rt2800_bbp_write(rt2x00dev, 120, 0x50);
+
+       if (txbf_enabled)
+               rt2800_bbp_write(rt2x00dev, 163, 0xbd);
+       else
+               rt2800_bbp_write(rt2x00dev, 163, 0x9d);
+
+       /* SNR mapping */
+       rt2800_bbp_write(rt2x00dev, 142, 6);
+       rt2800_bbp_write(rt2x00dev, 143, 160);
+       rt2800_bbp_write(rt2x00dev, 142, 7);
+       rt2800_bbp_write(rt2x00dev, 143, 161);
+       rt2800_bbp_write(rt2x00dev, 142, 8);
+       rt2800_bbp_write(rt2x00dev, 143, 162);
+
+       /* ADC/DAC control */
+       rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
+       /* RX AGC energy lower bound in log2 */
+       rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+       /* FIXME: BBP 105 owerwrite? */
+       rt2800_bbp_write(rt2x00dev, 105, 0x04);
+
+}
+
+static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
+{
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+       u32 reg;
+       u8 rfcsr;
+
+       /* Disable GPIO #4 and #7 function for LAN PE control */
+       rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+       rt2x00_set_field32(&reg, GPIO_SWITCH_4, 0);
+       rt2x00_set_field32(&reg, GPIO_SWITCH_7, 0);
+       rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
+
+       /* Initialize default register values */
+       rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
+       rt2800_rfcsr_write(rt2x00dev, 3, 0x80);
+       rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+       rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+       rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+       rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
+       rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
+       rt2800_rfcsr_write(rt2x00dev, 12, 0x4e);
+       rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+       rt2800_rfcsr_write(rt2x00dev, 18, 0x40);
+       rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+       rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+       rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+       rt2800_rfcsr_write(rt2x00dev, 32, 0x78);
+       rt2800_rfcsr_write(rt2x00dev, 33, 0x3b);
+       rt2800_rfcsr_write(rt2x00dev, 34, 0x3c);
+       rt2800_rfcsr_write(rt2x00dev, 35, 0xe0);
+       rt2800_rfcsr_write(rt2x00dev, 38, 0x86);
+       rt2800_rfcsr_write(rt2x00dev, 39, 0x23);
+       rt2800_rfcsr_write(rt2x00dev, 44, 0xd3);
+       rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
+       rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
+       rt2800_rfcsr_write(rt2x00dev, 49, 0x8e);
+       rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
+       rt2800_rfcsr_write(rt2x00dev, 51, 0x75);
+       rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
+       rt2800_rfcsr_write(rt2x00dev, 53, 0x18);
+       rt2800_rfcsr_write(rt2x00dev, 54, 0x18);
+       rt2800_rfcsr_write(rt2x00dev, 55, 0x18);
+       rt2800_rfcsr_write(rt2x00dev, 56, 0xdb);
+       rt2800_rfcsr_write(rt2x00dev, 57, 0x6e);
+
+       /* Initiate calibration */
+       /* TODO: use rt2800_rf_init_calibration ? */
+       rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+       rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+
+       rt2800_adjust_freq_offset(rt2x00dev);
+
+       rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
+       rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1);
+       rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
+
+       rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+       rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+       rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+       rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+       usleep_range(1000, 1500);
+       rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+       rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
+       rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+       /* Set initial values for RX filter calibration */
+       drv_data->calibration_bw20 = 0x1f;
+       drv_data->calibration_bw40 = 0x2f;
+
+       /* Save BBP 25 & 26 values for later use in channel switching */
+       rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
+       rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
+
+       rt2800_led_open_drain_enable(rt2x00dev);
+       rt2800_normal_mode_setup_3593(rt2x00dev);
+
+       rt3593_post_bbp_init(rt2x00dev);
+
+       /* TODO: enable stream mode support */
+}
+
 static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
 {
        rt2800_rf_init_calibration(rt2x00dev, 2);
@@ -5380,6 +6584,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
        case RT3572:
                rt2800_init_rfcsr_3572(rt2x00dev);
                break;
+       case RT3593:
+               rt2800_init_rfcsr_3593(rt2x00dev);
+               break;
        case RT5390:
                rt2800_init_rfcsr_5390(rt2x00dev);
                break;
@@ -5456,15 +6663,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
        /*
         * Initialize LED control
         */
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
        rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff,
                           word & 0xff, (word >> 8) & 0xff);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
        rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff,
                           word & 0xff, (word >> 8) & 0xff);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
        rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff,
                           word & 0xff, (word >> 8) & 0xff);
 
@@ -5560,6 +6767,34 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
 }
 EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
 
+static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev)
+{
+       u16 word;
+
+       if (rt2x00_rt(rt2x00dev, RT3593))
+               return 0;
+
+       rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
+       if ((word & 0x00ff) != 0x00ff)
+               return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
+
+       return 0;
+}
+
+static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev)
+{
+       u16 word;
+
+       if (rt2x00_rt(rt2x00dev, RT3593))
+               return 0;
+
+       rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
+       if ((word & 0x00ff) != 0x00ff)
+               return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
+
+       return 0;
+}
+
 static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
@@ -5578,18 +6813,18 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
        /*
         * Start validation of the data that has been read.
         */
-       mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
+       mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
        if (!is_valid_ether_addr(mac)) {
                eth_random_addr(mac);
                rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
        }
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
        if (word == 0xffff) {
                rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
                rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
                rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820);
-               rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
                rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
        } else if (rt2x00_rt(rt2x00dev, RT2860) ||
                   rt2x00_rt(rt2x00dev, RT2872)) {
@@ -5598,10 +6833,10 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
                 */
                if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2)
                        rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
-               rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
        }
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
        if (word == 0xffff) {
                rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0);
                rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0);
@@ -5618,24 +6853,24 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
                rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0);
                rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0);
                rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0);
-               rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
                rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
        }
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
        if ((word & 0x00ff) == 0x00ff) {
                rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
-               rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
                rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
        }
        if ((word & 0xff00) == 0xff00) {
                rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
                                   LED_MODE_TXRX_ACTIVITY);
                rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
-               rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
-               rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
-               rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
-               rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
                rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word);
        }
 
@@ -5644,56 +6879,61 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
         * lna0 as correct value. Note that EEPROM_LNA
         * is never validated.
         */
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
        default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
        if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
                rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
        if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
                rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
-       rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
+       rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
-       if ((word & 0x00ff) != 0x00ff) {
-               drv_data->txmixer_gain_24g =
-                       rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
-       } else {
-               drv_data->txmixer_gain_24g = 0;
-       }
+       drv_data->txmixer_gain_24g = rt2800_get_txmixer_gain_24g(rt2x00dev);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
        if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
                rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
-       if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
-           rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
-               rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
-                                  default_lna_gain);
-       rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
-
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
-       if ((word & 0x00ff) != 0x00ff) {
-               drv_data->txmixer_gain_5g =
-                       rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
-       } else {
-               drv_data->txmixer_gain_5g = 0;
+       if (!rt2x00_rt(rt2x00dev, RT3593)) {
+               if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
+                   rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
+                       rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
+                                          default_lna_gain);
        }
+       rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
+       drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev);
+
+       rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
        if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
                rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
        if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
                rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
-       rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
+       rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
        if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
                rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
-       if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
-           rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
-               rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
-                                  default_lna_gain);
-       rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+       if (!rt2x00_rt(rt2x00dev, RT3593)) {
+               if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
+                   rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
+                       rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
+                                          default_lna_gain);
+       }
+       rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+
+       if (rt2x00_rt(rt2x00dev, RT3593)) {
+               rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &word);
+               if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 ||
+                   rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff)
+                       rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
+                                          default_lna_gain);
+               if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0x00 ||
+                   rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0xff)
+                       rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
+                                          default_lna_gain);
+               rt2800_eeprom_write(rt2x00dev, EEPROM_EXT_LNA2, word);
+       }
 
        return 0;
 }
@@ -5707,7 +6947,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        /*
         * Read EEPROM word for configuration.
         */
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
 
        /*
         * Identify RF chipset by EEPROM value
@@ -5717,7 +6957,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        if (rt2x00_rt(rt2x00dev, RT3290) ||
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
+               rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
        else
                rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
 
@@ -5731,6 +6971,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        case RF3021:
        case RF3022:
        case RF3052:
+       case RF3053:
        case RF3290:
        case RF3320:
        case RF3322:
@@ -5757,7 +6998,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->default_ant.rx_chain_num =
            rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
 
        if (rt2x00_rt(rt2x00dev, RT3070) ||
            rt2x00_rt(rt2x00dev, RT3090) ||
@@ -5810,7 +7051,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        /*
         * Read frequency offset and RF programming sequence.
         */
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
        rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
 
        /*
@@ -5827,7 +7068,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        /*
         * Check if support EIRP tx power limit feature.
         */
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
 
        if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
                                        EIRP_MAX_TX_POWER_LIMIT)
@@ -6109,12 +7350,79 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
        {196, 83, 0, 12, 1},
 };
 
+static const struct rf_channel rf_vals_3053[] = {
+       /* Channel, N, R, K */
+       {1, 241, 2, 2},
+       {2, 241, 2, 7},
+       {3, 242, 2, 2},
+       {4, 242, 2, 7},
+       {5, 243, 2, 2},
+       {6, 243, 2, 7},
+       {7, 244, 2, 2},
+       {8, 244, 2, 7},
+       {9, 245, 2, 2},
+       {10, 245, 2, 7},
+       {11, 246, 2, 2},
+       {12, 246, 2, 7},
+       {13, 247, 2, 2},
+       {14, 248, 2, 4},
+
+       {36, 0x56, 0, 4},
+       {38, 0x56, 0, 6},
+       {40, 0x56, 0, 8},
+       {44, 0x57, 0, 0},
+       {46, 0x57, 0, 2},
+       {48, 0x57, 0, 4},
+       {52, 0x57, 0, 8},
+       {54, 0x57, 0, 10},
+       {56, 0x58, 0, 0},
+       {60, 0x58, 0, 4},
+       {62, 0x58, 0, 6},
+       {64, 0x58, 0, 8},
+
+       {100, 0x5B, 0, 8},
+       {102, 0x5B, 0, 10},
+       {104, 0x5C, 0, 0},
+       {108, 0x5C, 0, 4},
+       {110, 0x5C, 0, 6},
+       {112, 0x5C, 0, 8},
+
+       /* NOTE: Channel 114 has been removed intentionally.
+        * The EEPROM contains no TX power values for that,
+        * and it is disabled in the vendor driver as well.
+        */
+
+       {116, 0x5D, 0, 0},
+       {118, 0x5D, 0, 2},
+       {120, 0x5D, 0, 4},
+       {124, 0x5D, 0, 8},
+       {126, 0x5D, 0, 10},
+       {128, 0x5E, 0, 0},
+       {132, 0x5E, 0, 4},
+       {134, 0x5E, 0, 6},
+       {136, 0x5E, 0, 8},
+       {140, 0x5F, 0, 0},
+
+       {149, 0x5F, 0, 9},
+       {151, 0x5F, 0, 11},
+       {153, 0x60, 0, 1},
+       {157, 0x60, 0, 5},
+       {159, 0x60, 0, 7},
+       {161, 0x60, 0, 9},
+       {165, 0x61, 0, 1},
+       {167, 0x61, 0, 3},
+       {169, 0x61, 0, 5},
+       {171, 0x61, 0, 7},
+       {173, 0x61, 0, 9},
+};
+
 static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
        struct hw_mode_spec *spec = &rt2x00dev->spec;
        struct channel_info *info;
        char *default_power1;
        char *default_power2;
+       char *default_power3;
        unsigned int i;
        u16 eeprom;
        u32 reg;
@@ -6148,7 +7456,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 
        SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
        SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
-                               rt2x00_eeprom_addr(rt2x00dev,
+                               rt2800_eeprom_addr(rt2x00dev,
                                                   EEPROM_MAC_ADDR_0));
 
        /*
@@ -6164,7 +7472,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->hw->max_report_rates = 7;
        rt2x00dev->hw->max_rate_tries = 1;
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
 
        /*
         * Initialize hw_mode information.
@@ -6199,6 +7507,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                spec->supported_bands |= SUPPORT_BAND_5GHZ;
                spec->num_channels = ARRAY_SIZE(rf_vals_3x);
                spec->channels = rf_vals_3x;
+       } else if (rt2x00_rf(rt2x00dev, RF3053)) {
+               spec->supported_bands |= SUPPORT_BAND_5GHZ;
+               spec->num_channels = ARRAY_SIZE(rf_vals_3053);
+               spec->channels = rf_vals_3053;
        } else if (rt2x00_rf(rt2x00dev, RF5592)) {
                spec->supported_bands |= SUPPORT_BAND_5GHZ;
 
@@ -6264,21 +7576,40 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 
        spec->channels_info = info;
 
-       default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
-       default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+       default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
+       default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+
+       if (rt2x00dev->default_ant.tx_chain_num > 2)
+               default_power3 = rt2800_eeprom_addr(rt2x00dev,
+                                                   EEPROM_EXT_TXPOWER_BG3);
+       else
+               default_power3 = NULL;
 
        for (i = 0; i < 14; i++) {
                info[i].default_power1 = default_power1[i];
                info[i].default_power2 = default_power2[i];
+               if (default_power3)
+                       info[i].default_power3 = default_power3[i];
        }
 
        if (spec->num_channels > 14) {
-               default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
-               default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
+               default_power1 = rt2800_eeprom_addr(rt2x00dev,
+                                                   EEPROM_TXPOWER_A1);
+               default_power2 = rt2800_eeprom_addr(rt2x00dev,
+                                                   EEPROM_TXPOWER_A2);
+
+               if (rt2x00dev->default_ant.tx_chain_num > 2)
+                       default_power3 =
+                               rt2800_eeprom_addr(rt2x00dev,
+                                                  EEPROM_EXT_TXPOWER_A3);
+               else
+                       default_power3 = NULL;
 
                for (i = 14; i < spec->num_channels; i++) {
                        info[i].default_power1 = default_power1[i - 14];
                        info[i].default_power2 = default_power2[i - 14];
+                       if (default_power3)
+                               info[i].default_power3 = default_power3[i - 14];
                }
        }
 
@@ -6289,6 +7620,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        case RF3022:
        case RF3320:
        case RF3052:
+       case RF3053:
        case RF3290:
        case RF5360:
        case RF5370:
@@ -6327,6 +7659,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
        case RT3352:
        case RT3390:
        case RT3572:
+       case RT3593:
        case RT5390:
        case RT5392:
        case RT5592:
index 6ec739466db46e681f86fc4f59a282949082cf38..a94ba447e63c93eb10744ba2adf8ff2aa00f1886 100644 (file)
@@ -226,4 +226,8 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
                      struct survey_info *survey);
 void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
 
+void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
+                              unsigned short *txwi_size,
+                              unsigned short *rxwi_size);
+
 #endif /* RT2800LIB_H */
index 00055627eb8de41282a8248f0ad855a87fe00c86..f8f2abbfbb6554f1f432c3e7a8b51facb5d9adec 100644 (file)
@@ -507,9 +507,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
 
        if (rt2x00_is_pcie(rt2x00dev) &&
-           (rt2x00_rt(rt2x00dev, RT3572) ||
+           (rt2x00_rt(rt2x00dev, RT3090) ||
+            rt2x00_rt(rt2x00dev, RT3390) ||
+            rt2x00_rt(rt2x00dev, RT3572) ||
+            rt2x00_rt(rt2x00dev, RT3593) ||
             rt2x00_rt(rt2x00dev, RT5390) ||
-            rt2x00_rt(rt2x00dev, RT5392))) {
+            rt2x00_rt(rt2x00dev, RT5392) ||
+            rt2x00_rt(rt2x00dev, RT5592))) {
                rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
                rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
                rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -1189,12 +1193,17 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
 
 static void rt2800pci_queue_init(struct data_queue *queue)
 {
+       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+       unsigned short txwi_size, rxwi_size;
+
+       rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
+
        switch (queue->qid) {
        case QID_RX:
                queue->limit = 128;
                queue->data_size = AGGREGATION_SIZE;
                queue->desc_size = RXD_DESC_SIZE;
-               queue->winfo_size = RXWI_DESC_SIZE_4WORDS;
+               queue->winfo_size = rxwi_size;
                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
                break;
 
@@ -1205,7 +1214,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
                queue->limit = 64;
                queue->data_size = AGGREGATION_SIZE;
                queue->desc_size = TXD_DESC_SIZE;
-               queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+               queue->winfo_size = txwi_size;
                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
                break;
 
@@ -1213,7 +1222,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
                queue->limit = 8;
                queue->data_size = 0; /* No DMA required for beacons */
                queue->desc_size = TXD_DESC_SIZE;
-               queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+               queue->winfo_size = txwi_size;
                queue->priv_size = sizeof(struct queue_entry_priv_mmio);
                break;
 
index 840833b26bfaeeda520a5ce29fba81bf839af284..338034e18243af16f810eb4b7a1e9a3c8fc92e6a 100644 (file)
@@ -854,13 +854,7 @@ static void rt2800usb_queue_init(struct data_queue *queue)
        struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
        unsigned short txwi_size, rxwi_size;
 
-       if (rt2x00_rt(rt2x00dev, RT5592)) {
-               txwi_size = TXWI_DESC_SIZE_5WORDS;
-               rxwi_size = RXWI_DESC_SIZE_6WORDS;
-       } else {
-               txwi_size = TXWI_DESC_SIZE_4WORDS;
-               rxwi_size = RXWI_DESC_SIZE_4WORDS;
-       }
+       rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
 
        switch (queue->qid) {
        case QID_RX:
@@ -1194,6 +1188,40 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Zinwell */
        { USB_DEVICE(0x5a57, 0x0284) },
 #endif
+#ifdef CONFIG_RT2800USB_RT3573
+       /* AirLive */
+       { USB_DEVICE(0x1b75, 0x7733) },
+       /* ASUS */
+       { USB_DEVICE(0x0b05, 0x17bc) },
+       { USB_DEVICE(0x0b05, 0x17ad) },
+       /* Belkin */
+       { USB_DEVICE(0x050d, 0x1103) },
+       /* Cameo */
+       { USB_DEVICE(0x148f, 0xf301) },
+       /* Edimax */
+       { USB_DEVICE(0x7392, 0x7733) },
+       /* Hawking */
+       { USB_DEVICE(0x0e66, 0x0020) },
+       { USB_DEVICE(0x0e66, 0x0021) },
+       /* I-O DATA */
+       { USB_DEVICE(0x04bb, 0x094e) },
+       /* Linksys */
+       { USB_DEVICE(0x13b1, 0x003b) },
+       /* Logitec */
+       { USB_DEVICE(0x0789, 0x016b) },
+       /* NETGEAR */
+       { USB_DEVICE(0x0846, 0x9012) },
+       { USB_DEVICE(0x0846, 0x9019) },
+       /* Planex */
+       { USB_DEVICE(0x2019, 0xed19) },
+       /* Ralink */
+       { USB_DEVICE(0x148f, 0x3573) },
+       /* Sitecom */
+       { USB_DEVICE(0x0df6, 0x0067) },
+       { USB_DEVICE(0x0df6, 0x006a) },
+       /* ZyXEL */
+       { USB_DEVICE(0x0586, 0x3421) },
+#endif
 #ifdef CONFIG_RT2800USB_RT53XX
        /* Arcadyan */
        { USB_DEVICE(0x043e, 0x7a12) },
index ee3fc570b11d04b0a6531f182abb1113dcd9be93..fe4c572db52c2749317690b75ad4cc296aec5157 100644 (file)
@@ -211,6 +211,7 @@ struct channel_info {
        short max_power;
        short default_power1;
        short default_power2;
+       short default_power3;
 };
 
 /*
index aa95c6cf3545432bf93b0743af97900a91f55ec9..6c8a33b6ee225082d137df7e392306fa0101534a 100644 (file)
@@ -936,7 +936,7 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
        spin_unlock_irqrestore(&queue->index_lock, irqflags);
 }
 
-void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
+static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
 {
        switch (queue->qid) {
        case QID_AC_VO:
index 298b615964e861acb23c9ccd85e7b154854e9cda..f646b7585d9bf99ae35d8c7196d4e9f14c198438 100644 (file)
@@ -688,7 +688,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
        find_p2p_ie = true;
        /*to find noa ie*/
        while (ie + 1 < end) {
-               noa_len = READEF2BYTE(&ie[1]);
+               noa_len = READEF2BYTE((__le16 *)&ie[1]);
                if (ie + 3 + ie[1] > end)
                        return;
 
@@ -717,13 +717,13 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
                                                 READEF1BYTE(ie+index);
                                        index += 1;
                                        p2pinfo->noa_duration[i] =
-                                                READEF4BYTE(ie+index);
+                                                READEF4BYTE((__le32 *)ie+index);
                                        index += 4;
                                        p2pinfo->noa_interval[i] =
-                                                READEF4BYTE(ie+index);
+                                                READEF4BYTE((__le32 *)ie+index);
                                        index += 4;
                                        p2pinfo->noa_start_time[i] =
-                                                READEF4BYTE(ie+index);
+                                                READEF4BYTE((__le32 *)ie+index);
                                        index += 4;
                                }
 
@@ -780,7 +780,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
        RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
        /*to find noa ie*/
        while (ie + 1 < end) {
-               noa_len = READEF2BYTE(&ie[1]);
+               noa_len = READEF2BYTE((__le16 *)&ie[1]);
                if (ie + 3 + ie[1] > end)
                        return;
 
@@ -809,13 +809,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
                                                         READEF1BYTE(ie+index);
                                        index += 1;
                                        p2pinfo->noa_duration[i] =
-                                                        READEF4BYTE(ie+index);
+                                                        READEF4BYTE((__le32 *)ie+index);
                                        index += 4;
                                        p2pinfo->noa_interval[i] =
-                                                        READEF4BYTE(ie+index);
+                                                        READEF4BYTE((__le32 *)ie+index);
                                        index += 4;
                                        p2pinfo->noa_start_time[i] =
-                                                        READEF4BYTE(ie+index);
+                                                        READEF4BYTE((__le32 *)ie+index);
                                        index += 4;
                                }
 
index f9f059dadb734af694c992bb8634fe866d317a61..a98acefb8c06a3802290c9130de368b8effcab83 100644 (file)
@@ -218,6 +218,7 @@ static void rtl_tx_status(void *ppriv,
 
 static void rtl_rate_init(void *ppriv,
                          struct ieee80211_supported_band *sband,
+                         struct cfg80211_chan_def *chandef,
                          struct ieee80211_sta *sta, void *priv_sta)
 {
 }
index 57e4cc5833a992525207914f0b1688db5e4cabc3..557bc5b8327eef6d9b998fcc90e52270d84e6077 100644 (file)
@@ -341,7 +341,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
                        wait_h2c_limit--;
                        if (wait_h2c_limit == 0) {
                                RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                        "Wating too long for FW read "
+                                        "Waiting too long for FW read "
                                         "clear HMEBox(%d)!\n", boxnum);
                                break;
                        }
@@ -351,7 +351,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
                        isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
                        u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
                        RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                "Wating for FW read clear HMEBox(%d)!!! "
+                                "Waiting for FW read clear HMEBox(%d)!!! "
                                 "0x130 = %2x\n", boxnum, u1b_tmp);
                }
 
index 8e3ec1e25644688c469a38f2e057bbf90a6558cb..0f7812e0c8aa0c75d5382c52b4872431447c8108 100644 (file)
@@ -109,5 +109,8 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
 void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
                         u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
 bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
+                                struct ieee80211_sta *sta,
+                                u8 rssi_level);
 
 #endif
index 262e1e4c6e5b007065ed2e834e139446566bde6f..a1310abd0d54605e6bbd1a1cc9f9200bc3ecbe43 100644 (file)
@@ -49,8 +49,5 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
 u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
                            enum radio_path rfpath, u32 regaddr, u32 bitmask);
 void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
-                                struct ieee80211_sta *sta,
-                                u8 rssi_level);
 
 #endif
index dedfa1ed3e02e66fa1b2ef687592709356c1e724..ba1502b172a6a05f3fb34aa0e97c7ef6a7260ac1 100644 (file)
@@ -330,7 +330,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
                        wait_h2c_limmit--;
                        if (wait_h2c_limmit == 0) {
                                RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                        "Wating too long for FW read clear HMEBox(%d)!\n",
+                                        "Waiting too long for FW read clear HMEBox(%d)!\n",
                                         boxnum);
                                break;
                        }
@@ -340,7 +340,7 @@ static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
                        isfw_rd = rtl8723ae_check_fw_read_last_h2c(hw, boxnum);
                        u1tmp = rtl_read_byte(rtlpriv, 0x1BF);
                        RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-                                "Wating for FW read clear HMEBox(%d)!!! "
+                                "Waiting for FW read clear HMEBox(%d)!!! "
                                 "0x1BF = %2x\n", boxnum, u1tmp);
                }
 
index b8db55c868c7b0027e96bf28f7174a00a308eb0f..d1b19c38a907ddba1d5dfe4106e8caa2ce49315c 100644 (file)
@@ -1315,7 +1315,7 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
 
 #ifdef CONFIG_PM
 static int
-wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
+wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
 {
        int num_fields = 0, in_field = 0, fields_size = 0;
        int i, pattern_len = 0;
@@ -1458,9 +1458,9 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
  * Allocates an RX filter returned through f
  * which needs to be freed using rx_filter_free()
  */
-static int wl1271_convert_wowlan_pattern_to_rx_filter(
-       struct cfg80211_wowlan_trig_pkt_pattern *p,
-       struct wl12xx_rx_filter **f)
+static int
+wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
+                                          struct wl12xx_rx_filter **f)
 {
        int i, j, ret = 0;
        struct wl12xx_rx_filter *filter;
@@ -1562,7 +1562,7 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
 
        /* Translate WoWLAN patterns into filters */
        for (i = 0; i < wow->n_patterns; i++) {
-               struct cfg80211_wowlan_trig_pkt_pattern *p;
+               struct cfg80211_pkt_pattern *p;
                struct wl12xx_rx_filter *filter = NULL;
 
                p = &wow->patterns[i];
index f3442762d884b04def7e2bb2e8b8bee603b3f0ae..527590f2adfbe66b088a99c48d5ce382ff030efd 100644 (file)
@@ -356,7 +356,8 @@ out:
        return ret;
 }
 
-int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
+int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 void *data, int len)
 {
        struct wl1271 *wl = hw->priv;
        struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
index 8071654259eacd59eaa9aba46da40329f5c6aec2..61d8434d859ab37121e5b7decf7f2e11597da00f 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <net/mac80211.h>
 
-int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len);
+int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 void *data, int len);
 
 #endif /* __WL1271_TESTMODE_H__ */
index 4941f201d6c8dc5a4b62446a9153a4384a3cb4f4..b8ba1f925e75521a2886b42a8a3d95dac69c0aa9 100644 (file)
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
                goto exit;
 
        err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
-           USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+           USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
        if (err < 0)
                goto exit;
 
+       memcpy(&ret, buf, sizeof(ret));
+
        if (ret & 0x80) {
                err = -EIO;
                goto exit;
index c5c30fb1d7bfe8510175fcbf156d385d036e012f..9a53f13c88df50a705c84b89e4fe7c081dbe3c88 100644 (file)
@@ -60,7 +60,7 @@ struct nfcsim {
 static struct nfcsim *dev0;
 static struct nfcsim *dev1;
 
-struct workqueue_struct *wq;
+static struct workqueue_struct *wq;
 
 static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
 {
@@ -481,7 +481,7 @@ static void nfcsim_free_device(struct nfcsim *dev)
        kfree(dev);
 }
 
-int __init nfcsim_init(void)
+static int __init nfcsim_init(void)
 {
        int rc;
 
@@ -522,7 +522,7 @@ exit:
        return rc;
 }
 
-void __exit nfcsim_exit(void)
+static void __exit nfcsim_exit(void)
 {
        nfcsim_cleanup_dev(dev0, 1);
        nfcsim_cleanup_dev(dev1, 1);
index daf92ac209f898d87ab2b3e08df379248da7e7a0..5df730be88a388ba28f705fde82fdce04b1ff88e 100644 (file)
@@ -83,12 +83,20 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
 
 /* How much time we spend listening for initiators */
 #define PN533_LISTEN_TIME 2
+/* Delay between each poll frame (ms) */
+#define PN533_POLL_INTERVAL 10
 
-/* Standard pn533 frame definitions */
+/* Standard pn533 frame definitions (standard and extended)*/
 #define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
                                        + 2) /* data[0] TFI, data[1] CC */
 #define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
 
+#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \
+                                       + 2) /* data[0] TFI, data[1] CC */
+
+#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+#define PN533_CMD_DATAFRAME_MAXLEN     240     /* max data length (send) */
+
 /*
  * Max extended frame payload len, excluding TFI and CC
  * which are already in PN533_FRAME_HEADER_LEN.
@@ -99,6 +107,10 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
                                  Postamble (1) */
 #define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
 #define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
+/* Half start code (3), LEN (4) should be 0xffff for extended frame */
+#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \
+                                       && (hdr)->datalen_checksum == 0xFF)
+#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)])
 
 /* start of frame */
 #define PN533_STD_FRAME_SOF 0x00FF
@@ -124,7 +136,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
 #define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
 
 /* PN533 Commands */
-#define PN533_STD_FRAME_CMD(f) (f->data[1])
+#define PN533_FRAME_CMD(f) (f->data[1])
 
 #define PN533_CMD_GET_FIRMWARE_VERSION 0x02
 #define PN533_CMD_RF_CONFIGURATION 0x32
@@ -168,8 +180,9 @@ struct pn533_fw_version {
 #define PN533_CFGITEM_MAX_RETRIES 0x05
 #define PN533_CFGITEM_PASORI      0x82
 
-#define PN533_CFGITEM_RF_FIELD_ON  0x1
-#define PN533_CFGITEM_RF_FIELD_OFF 0x0
+#define PN533_CFGITEM_RF_FIELD_AUTO_RFCA 0x2
+#define PN533_CFGITEM_RF_FIELD_ON        0x1
+#define PN533_CFGITEM_RF_FIELD_OFF       0x0
 
 #define PN533_CONFIG_TIMING_102 0xb
 #define PN533_CONFIG_TIMING_204 0xc
@@ -257,7 +270,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
                        .initiator_data.felica = {
                                .opcode = PN533_FELICA_OPC_SENSF_REQ,
                                .sc = PN533_FELICA_SENSF_SC_ALL,
-                               .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
+                               .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
                                .tsn = 0x03,
                        },
                },
@@ -270,7 +283,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
                        .initiator_data.felica = {
                                .opcode = PN533_FELICA_OPC_SENSF_REQ,
                                .sc = PN533_FELICA_SENSF_SC_ALL,
-                               .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
+                               .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
                                .tsn = 0x03,
                        },
                 },
@@ -352,13 +365,16 @@ struct pn533 {
        struct urb *in_urb;
 
        struct sk_buff_head resp_q;
+       struct sk_buff_head fragment_skb;
 
        struct workqueue_struct *wq;
        struct work_struct cmd_work;
        struct work_struct cmd_complete_work;
-       struct work_struct poll_work;
-       struct work_struct mi_work;
+       struct delayed_work poll_work;
+       struct work_struct mi_rx_work;
+       struct work_struct mi_tx_work;
        struct work_struct tg_work;
+       struct work_struct rf_work;
 
        struct list_head cmd_queue;
        struct pn533_cmd *cmd;
@@ -366,6 +382,7 @@ struct pn533 {
        struct mutex cmd_lock;  /* protects cmd queue */
 
        void *cmd_complete_mi_arg;
+       void *cmd_complete_dep_arg;
 
        struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
        u8 poll_mod_count;
@@ -404,6 +421,15 @@ struct pn533_std_frame {
        u8 data[];
 } __packed;
 
+struct pn533_ext_frame {       /* Extended Information frame */
+       u8 preamble;
+       __be16 start_frame;
+       __be16 eif_flag;        /* fixed to 0xFFFF */
+       __be16 datalen;
+       u8 datalen_checksum;
+       u8 data[];
+} __packed;
+
 struct pn533_frame_ops {
        void (*tx_frame_init)(void *frame, u8 cmd_code);
        void (*tx_frame_finish)(void *frame);
@@ -411,7 +437,7 @@ struct pn533_frame_ops {
        int tx_header_len;
        int tx_tail_len;
 
-       bool (*rx_is_frame_valid)(void *frame);
+       bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev);
        int (*rx_frame_size)(void *frame);
        int rx_header_len;
        int rx_tail_len;
@@ -486,7 +512,7 @@ static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
        frame->datalen += len;
 }
 
-static bool pn533_acr122_is_rx_frame_valid(void *_frame)
+static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
 {
        struct pn533_acr122_rx_frame *frame = _frame;
 
@@ -511,7 +537,7 @@ static u8 pn533_acr122_get_cmd_code(void *frame)
 {
        struct pn533_acr122_rx_frame *f = frame;
 
-       return PN533_STD_FRAME_CMD(f);
+       return PN533_FRAME_CMD(f);
 }
 
 static struct pn533_frame_ops pn533_acr122_frame_ops = {
@@ -530,6 +556,12 @@ static struct pn533_frame_ops pn533_acr122_frame_ops = {
        .get_cmd_code = pn533_acr122_get_cmd_code,
 };
 
+/* The rule: value(high byte) + value(low byte) + checksum = 0 */
+static inline u8 pn533_ext_checksum(u16 value)
+{
+       return ~(u8)(((value & 0xFF00) >> 8) + (u8)(value & 0xFF)) + 1;
+}
+
 /* The rule: value + checksum = 0 */
 static inline u8 pn533_std_checksum(u8 value)
 {
@@ -555,7 +587,7 @@ static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code)
        frame->preamble = 0;
        frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF);
        PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT;
-       PN533_STD_FRAME_CMD(frame) = cmd_code;
+       PN533_FRAME_CMD(frame) = cmd_code;
        frame->datalen = 2;
 }
 
@@ -578,21 +610,41 @@ static void pn533_std_tx_update_payload_len(void *_frame, int len)
        frame->datalen += len;
 }
 
-static bool pn533_std_rx_frame_is_valid(void *_frame)
+static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev)
 {
        u8 checksum;
-       struct pn533_std_frame *frame = _frame;
+       struct pn533_std_frame *stdf = _frame;
 
-       if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+       if (stdf->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
                return false;
 
-       checksum = pn533_std_checksum(frame->datalen);
-       if (checksum != frame->datalen_checksum)
-               return false;
+       if (likely(!PN533_STD_IS_EXTENDED(stdf))) {
+               /* Standard frame code */
+               dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN;
 
-       checksum = pn533_std_data_checksum(frame->data, frame->datalen);
-       if (checksum != PN533_STD_FRAME_CHECKSUM(frame))
-               return false;
+               checksum = pn533_std_checksum(stdf->datalen);
+               if (checksum != stdf->datalen_checksum)
+                       return false;
+
+               checksum = pn533_std_data_checksum(stdf->data, stdf->datalen);
+               if (checksum != PN533_STD_FRAME_CHECKSUM(stdf))
+                       return false;
+       } else {
+               /* Extended */
+               struct pn533_ext_frame *eif = _frame;
+
+               dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN;
+
+               checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen));
+               if (checksum != eif->datalen_checksum)
+                       return false;
+
+               /* check data checksum */
+               checksum = pn533_std_data_checksum(eif->data,
+                                                  be16_to_cpu(eif->datalen));
+               if (checksum != PN533_EXT_FRAME_CHECKSUM(eif))
+                       return false;
+       }
 
        return true;
 }
@@ -612,6 +664,14 @@ static inline int pn533_std_rx_frame_size(void *frame)
 {
        struct pn533_std_frame *f = frame;
 
+       /* check for Extended Information frame */
+       if (PN533_STD_IS_EXTENDED(f)) {
+               struct pn533_ext_frame *eif = frame;
+
+               return sizeof(struct pn533_ext_frame)
+                       + be16_to_cpu(eif->datalen) + PN533_STD_FRAME_TAIL_LEN;
+       }
+
        return sizeof(struct pn533_std_frame) + f->datalen +
               PN533_STD_FRAME_TAIL_LEN;
 }
@@ -619,8 +679,12 @@ static inline int pn533_std_rx_frame_size(void *frame)
 static u8 pn533_std_get_cmd_code(void *frame)
 {
        struct pn533_std_frame *f = frame;
+       struct pn533_ext_frame *eif = frame;
 
-       return PN533_STD_FRAME_CMD(f);
+       if (PN533_STD_IS_EXTENDED(f))
+               return PN533_FRAME_CMD(eif);
+       else
+               return PN533_FRAME_CMD(f);
 }
 
 static struct pn533_frame_ops pn533_std_frame_ops = {
@@ -675,7 +739,7 @@ static void pn533_recv_response(struct urb *urb)
        print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
                             dev->ops->rx_frame_size(in_frame), false);
 
-       if (!dev->ops->rx_is_frame_valid(in_frame)) {
+       if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
                nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
                cmd->status = -EIO;
                goto sched_wq;
@@ -1657,7 +1721,56 @@ static void pn533_listen_mode_timer(unsigned long data)
 
        pn533_poll_next_mod(dev);
 
-       queue_work(dev->wq, &dev->poll_work);
+       queue_delayed_work(dev->wq, &dev->poll_work,
+                          msecs_to_jiffies(PN533_POLL_INTERVAL));
+}
+
+static int pn533_rf_complete(struct pn533 *dev, void *arg,
+                            struct sk_buff *resp)
+{
+       int rc = 0;
+
+       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+
+               nfc_dev_err(&dev->interface->dev, "%s RF setting error %d",
+                           __func__, rc);
+
+               return rc;
+       }
+
+       queue_delayed_work(dev->wq, &dev->poll_work,
+                          msecs_to_jiffies(PN533_POLL_INTERVAL));
+
+       dev_kfree_skb(resp);
+       return rc;
+}
+
+static void pn533_wq_rf(struct work_struct *work)
+{
+       struct pn533 *dev = container_of(work, struct pn533, rf_work);
+       struct sk_buff *skb;
+       int rc;
+
+       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+       skb = pn533_alloc_skb(dev, 2);
+       if (!skb)
+               return;
+
+       *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD;
+       *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+
+       rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb,
+                                 pn533_rf_complete, NULL);
+       if (rc < 0) {
+               dev_kfree_skb(skb);
+               nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc);
+       }
+
+       return;
 }
 
 static int pn533_poll_complete(struct pn533 *dev, void *arg,
@@ -1705,7 +1818,8 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
        }
 
        pn533_poll_next_mod(dev);
-       queue_work(dev->wq, &dev->poll_work);
+       /* Not target found, turn radio off */
+       queue_work(dev->wq, &dev->rf_work);
 
 done:
        dev_kfree_skb(resp);
@@ -1770,7 +1884,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
 
 static void pn533_wq_poll(struct work_struct *work)
 {
-       struct pn533 *dev = container_of(work, struct pn533, poll_work);
+       struct pn533 *dev = container_of(work, struct pn533, poll_work.work);
        struct pn533_poll_modulations *cur_mod;
        int rc;
 
@@ -1799,6 +1913,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
                            u32 im_protocols, u32 tm_protocols)
 {
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+       u8 rand_mod;
 
        nfc_dev_dbg(&dev->interface->dev,
                    "%s: im protocols 0x%x tm protocols 0x%x",
@@ -1822,11 +1937,15 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
                        tm_protocols = 0;
        }
 
-       dev->poll_mod_curr = 0;
        pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
        dev->poll_protocols = im_protocols;
        dev->listen_protocols = tm_protocols;
 
+       /* Do not always start polling from the same modulation */
+       get_random_bytes(&rand_mod, sizeof(rand_mod));
+       rand_mod %= dev->poll_mod_count;
+       dev->poll_mod_curr = rand_mod;
+
        return pn533_send_poll_frame(dev);
 }
 
@@ -1845,6 +1964,7 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
        }
 
        pn533_abort_cmd(dev, GFP_KERNEL);
+       flush_delayed_work(&dev->poll_work);
        pn533_poll_reset_mod_list(dev);
 }
 
@@ -2037,28 +2157,15 @@ error:
        return rc;
 }
 
-static int pn533_mod_to_baud(struct pn533 *dev)
-{
-       switch (dev->poll_mod_curr) {
-       case PN533_POLL_MOD_106KBPS_A:
-               return 0;
-       case PN533_POLL_MOD_212KBPS_FELICA:
-               return 1;
-       case PN533_POLL_MOD_424KBPS_FELICA:
-               return 2;
-       default:
-               return -EINVAL;
-       }
-}
-
+static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
 #define PASSIVE_DATA_LEN 5
 static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
                             u8 comm_mode, u8 *gb, size_t gb_len)
 {
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
        struct sk_buff *skb;
-       int rc, baud, skb_len;
-       u8 *next, *arg;
+       int rc, skb_len;
+       u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
 
        u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
 
@@ -2076,41 +2183,39 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
                return -EBUSY;
        }
 
-       baud = pn533_mod_to_baud(dev);
-       if (baud < 0) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Invalid curr modulation %d", dev->poll_mod_curr);
-               return baud;
-       }
-
        skb_len = 3 + gb_len; /* ActPass + BR + Next */
-       if (comm_mode == NFC_COMM_PASSIVE)
-               skb_len += PASSIVE_DATA_LEN;
+       skb_len += PASSIVE_DATA_LEN;
 
-       if (target && target->nfcid2_len)
-               skb_len += NFC_NFCID3_MAXSIZE;
+       /* NFCID3 */
+       skb_len += NFC_NFCID3_MAXSIZE;
+       if (target && !target->nfcid2_len) {
+               nfcid3[0] = 0x1;
+               nfcid3[1] = 0xfe;
+               get_random_bytes(nfcid3 + 2, 6);
+       }
 
        skb = pn533_alloc_skb(dev, skb_len);
        if (!skb)
                return -ENOMEM;
 
        *skb_put(skb, 1) = !comm_mode;  /* ActPass */
-       *skb_put(skb, 1) = baud;  /* Baud rate */
+       *skb_put(skb, 1) = 0x02;  /* 424 kbps */
 
        next = skb_put(skb, 1);  /* Next */
        *next = 0;
 
-       if (comm_mode == NFC_COMM_PASSIVE && baud > 0) {
-               memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data,
-                      PASSIVE_DATA_LEN);
-               *next |= 1;
-       }
+       /* Copy passive data */
+       memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+       *next |= 1;
 
-       if (target && target->nfcid2_len) {
+       /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+       if (target && target->nfcid2_len)
                memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
                       target->nfcid2_len);
-               *next |= 2;
-       }
+       else
+               memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+                      NFC_NFCID3_MAXSIZE);
+       *next |= 2;
 
        if (gb != NULL && gb_len > 0) {
                memcpy(skb_put(skb, gb_len), gb, gb_len);
@@ -2127,6 +2232,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
 
        *arg = !comm_mode;
 
+       pn533_rf_field(dev->nfc_dev, 0);
+
        rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
                                  pn533_in_dep_link_up_complete, arg);
 
@@ -2232,7 +2339,15 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
 
        if (mi) {
                dev->cmd_complete_mi_arg = arg;
-               queue_work(dev->wq, &dev->mi_work);
+               queue_work(dev->wq, &dev->mi_rx_work);
+               return -EINPROGRESS;
+       }
+
+       /* Prepare for the next round */
+       if (skb_queue_len(&dev->fragment_skb) > 0) {
+               dev->cmd_complete_dep_arg = arg;
+               queue_work(dev->wq, &dev->mi_tx_work);
+
                return -EINPROGRESS;
        }
 
@@ -2253,6 +2368,50 @@ _error:
        return rc;
 }
 
+/* Split the Tx skb into small chunks */
+static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
+{
+       struct sk_buff *frag;
+       int  frag_size;
+
+       do {
+               /* Remaining size */
+               if (skb->len > PN533_CMD_DATAFRAME_MAXLEN)
+                       frag_size = PN533_CMD_DATAFRAME_MAXLEN;
+               else
+                       frag_size = skb->len;
+
+               /* Allocate and reserve */
+               frag = pn533_alloc_skb(dev, frag_size);
+               if (!frag) {
+                       skb_queue_purge(&dev->fragment_skb);
+                       break;
+               }
+
+               /* Reserve the TG/MI byte */
+               skb_reserve(frag, 1);
+
+               /* MI + TG */
+               if (frag_size  == PN533_CMD_DATAFRAME_MAXLEN)
+                       *skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1);
+               else
+                       *skb_push(frag, sizeof(u8)) =  1; /* TG */
+
+               memcpy(skb_put(frag, frag_size), skb->data, frag_size);
+
+               /* Reduce the size of incoming buffer */
+               skb_pull(skb, frag_size);
+
+               /* Add this to skb_queue */
+               skb_queue_tail(&dev->fragment_skb, frag);
+
+       } while (skb->len > 0);
+
+       dev_kfree_skb(skb);
+
+       return skb_queue_len(&dev->fragment_skb);
+}
+
 static int pn533_transceive(struct nfc_dev *nfc_dev,
                            struct nfc_target *target, struct sk_buff *skb,
                            data_exchange_cb_t cb, void *cb_context)
@@ -2263,15 +2422,6 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
 
        nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
 
-       if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
-               /* TODO: Implement support to multi-part data exchange */
-               nfc_dev_err(&dev->interface->dev,
-                           "Data length greater than the max allowed: %d",
-                           PN533_CMD_DATAEXCH_DATA_MAXLEN);
-               rc = -ENOSYS;
-               goto error;
-       }
-
        if (!dev->tgt_active_prot) {
                nfc_dev_err(&dev->interface->dev,
                            "Can't exchange data if there is no active target");
@@ -2299,7 +2449,20 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
                        break;
                }
        default:
-               *skb_push(skb, sizeof(u8)) =  1; /*TG*/
+               /* jumbo frame ? */
+               if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
+                       rc = pn533_fill_fragment_skbs(dev, skb);
+                       if (rc <= 0)
+                               goto error;
+
+                       skb = skb_dequeue(&dev->fragment_skb);
+                       if (!skb) {
+                               rc = -EIO;
+                               goto error;
+                       }
+               } else {
+                       *skb_push(skb, sizeof(u8)) =  1; /* TG */
+               }
 
                rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE,
                                           skb, pn533_data_exchange_complete,
@@ -2370,7 +2533,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
 
 static void pn533_wq_mi_recv(struct work_struct *work)
 {
-       struct pn533 *dev = container_of(work, struct pn533, mi_work);
+       struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
 
        struct sk_buff *skb;
        int rc;
@@ -2418,6 +2581,61 @@ error:
        queue_work(dev->wq, &dev->cmd_work);
 }
 
+static void pn533_wq_mi_send(struct work_struct *work)
+{
+       struct pn533 *dev = container_of(work, struct pn533, mi_tx_work);
+       struct sk_buff *skb;
+       int rc;
+
+       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+       /* Grab the first skb in the queue */
+       skb = skb_dequeue(&dev->fragment_skb);
+
+       if (skb == NULL) {      /* No more data */
+               /* Reset the queue for future use */
+               skb_queue_head_init(&dev->fragment_skb);
+               goto error;
+       }
+
+       switch (dev->device_type) {
+       case PN533_DEVICE_PASORI:
+               if (dev->tgt_active_prot != NFC_PROTO_FELICA) {
+                       rc = -EIO;
+                       break;
+               }
+
+               rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU,
+                                                skb,
+                                                pn533_data_exchange_complete,
+                                                dev->cmd_complete_dep_arg);
+
+               break;
+
+       default:
+               /* Still some fragments? */
+               rc = pn533_send_cmd_direct_async(dev,PN533_CMD_IN_DATA_EXCHANGE,
+                                                skb,
+                                                pn533_data_exchange_complete,
+                                                dev->cmd_complete_dep_arg);
+
+               break;
+       }
+
+       if (rc == 0) /* success */
+               return;
+
+       nfc_dev_err(&dev->interface->dev,
+                   "Error %d when trying to perform data_exchange", rc);
+
+       dev_kfree_skb(skb);
+       kfree(dev->cmd_complete_dep_arg);
+
+error:
+       pn533_send_ack(dev, GFP_KERNEL);
+       queue_work(dev->wq, &dev->cmd_work);
+}
+
 static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
                                                                u8 cfgdata_len)
 {
@@ -2562,6 +2780,8 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
        u8 rf_field = !!rf;
        int rc;
 
+       rf_field |= PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+
        rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
                                     (u8 *)&rf_field, 1);
        if (rc) {
@@ -2605,17 +2825,6 @@ static int pn533_setup(struct pn533 *dev)
 
        switch (dev->device_type) {
        case PN533_DEVICE_STD:
-               max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
-               max_retries.mx_rty_psl = 2;
-               max_retries.mx_rty_passive_act =
-                       PN533_CONFIG_MAX_RETRIES_NO_RETRY;
-
-               timing.rfu = PN533_CONFIG_TIMING_102;
-               timing.atr_res_timeout = PN533_CONFIG_TIMING_204;
-               timing.dep_timeout = PN533_CONFIG_TIMING_409;
-
-               break;
-
        case PN533_DEVICE_PASORI:
        case PN533_DEVICE_ACR122U:
                max_retries.mx_rty_atr = 0x2;
@@ -2729,9 +2938,11 @@ static int pn533_probe(struct usb_interface *interface,
 
        INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
        INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
-       INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
+       INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
+       INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
        INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
-       INIT_WORK(&dev->poll_work, pn533_wq_poll);
+       INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
+       INIT_WORK(&dev->rf_work, pn533_wq_rf);
        dev->wq = alloc_ordered_workqueue("pn533", 0);
        if (dev->wq == NULL)
                goto error;
@@ -2741,6 +2952,7 @@ static int pn533_probe(struct usb_interface *interface,
        dev->listen_timer.function = pn533_listen_mode_timer;
 
        skb_queue_head_init(&dev->resp_q);
+       skb_queue_head_init(&dev->fragment_skb);
 
        INIT_LIST_HEAD(&dev->cmd_queue);
 
@@ -2842,6 +3054,7 @@ static void pn533_disconnect(struct usb_interface *interface)
        usb_kill_urb(dev->in_urb);
        usb_kill_urb(dev->out_urb);
 
+       flush_delayed_work(&dev->poll_work);
        destroy_workqueue(dev->wq);
 
        skb_queue_purge(&dev->resp_q);
index 8cf64c19f0229c97952ecd9f9fb689b29c6c3240..01e27d4bdd0d7abcfdc4d91c7f6963974f3b4e48 100644 (file)
 #include <linux/miscdevice.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
-
+#include <linux/nfc.h>
+#include <linux/firmware.h>
+#include <linux/unaligned/access_ok.h>
 #include <linux/platform_data/pn544.h>
 
 #include <net/nfc/hci.h>
 #include <net/nfc/llc.h>
+#include <net/nfc/nfc.h>
 
 #include "pn544.h"
 
@@ -55,6 +58,58 @@ MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
 
 #define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
 
+#define PN544_FW_CMD_WRITE 0x08
+#define PN544_FW_CMD_CHECK 0x06
+
+struct pn544_i2c_fw_frame_write {
+       u8 cmd;
+       u16 be_length;
+       u8 be_dest_addr[3];
+       u16 be_datalen;
+       u8 data[];
+} __packed;
+
+struct pn544_i2c_fw_frame_check {
+       u8 cmd;
+       u16 be_length;
+       u8 be_start_addr[3];
+       u16 be_datalen;
+       u16 be_crc;
+} __packed;
+
+struct pn544_i2c_fw_frame_response {
+       u8 status;
+       u16 be_length;
+} __packed;
+
+struct pn544_i2c_fw_blob {
+       u32 be_size;
+       u32 be_destaddr;
+       u8 data[];
+};
+
+#define PN544_FW_CMD_RESULT_TIMEOUT 0x01
+#define PN544_FW_CMD_RESULT_BAD_CRC 0x02
+#define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08
+#define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B
+#define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11
+#define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18
+#define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74
+
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+
+#define PN544_FW_WRITE_BUFFER_MAX_LEN 0x9f7
+#define PN544_FW_I2C_MAX_PAYLOAD PN544_HCI_I2C_LLC_MAX_SIZE
+#define PN544_FW_I2C_WRITE_FRAME_HEADER_LEN 8
+#define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\
+                                        PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\
+                                        PN544_FW_WRITE_BUFFER_MAX_LEN)
+
+#define FW_WORK_STATE_IDLE 1
+#define FW_WORK_STATE_START 2
+#define FW_WORK_STATE_WAIT_WRITE_ANSWER 3
+#define FW_WORK_STATE_WAIT_CHECK_ANSWER 4
+
 struct pn544_i2c_phy {
        struct i2c_client *i2c_dev;
        struct nfc_hci_dev *hdev;
@@ -64,7 +119,18 @@ struct pn544_i2c_phy {
        unsigned int gpio_fw;
        unsigned int en_polarity;
 
+       struct work_struct fw_work;
+       int fw_work_state;
+       char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+       const struct firmware *fw;
+       u32 fw_blob_dest_addr;
+       size_t fw_blob_size;
+       const u8 *fw_blob_data;
+       size_t fw_written;
+       int fw_cmd_result;
+
        int powered;
+       int run_mode;
 
        int hard_fault;         /*
                                 * < 0 if hardware error occured (e.g. i2c err)
@@ -122,15 +188,22 @@ out:
        gpio_set_value(phy->gpio_en, !phy->en_polarity);
 }
 
+static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
+{
+       gpio_set_value(phy->gpio_fw, run_mode == PN544_FW_MODE ? 1 : 0);
+       gpio_set_value(phy->gpio_en, phy->en_polarity);
+       usleep_range(10000, 15000);
+
+       phy->run_mode = run_mode;
+}
+
 static int pn544_hci_i2c_enable(void *phy_id)
 {
        struct pn544_i2c_phy *phy = phy_id;
 
        pr_info(DRIVER_DESC ": %s\n", __func__);
 
-       gpio_set_value(phy->gpio_fw, 0);
-       gpio_set_value(phy->gpio_en, phy->en_polarity);
-       usleep_range(10000, 15000);
+       pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
 
        phy->powered = 1;
 
@@ -305,6 +378,42 @@ flush:
        return r;
 }
 
+static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
+{
+       int r;
+       struct pn544_i2c_fw_frame_response response;
+       struct i2c_client *client = phy->i2c_dev;
+
+       r = i2c_master_recv(client, (char *) &response, sizeof(response));
+       if (r != sizeof(response)) {
+               dev_err(&client->dev, "cannot read fw status\n");
+               return -EIO;
+       }
+
+       usleep_range(3000, 6000);
+
+       switch (response.status) {
+       case 0:
+               return 0;
+       case PN544_FW_CMD_RESULT_TIMEOUT:
+               return -ETIMEDOUT;
+       case PN544_FW_CMD_RESULT_BAD_CRC:
+               return -ENODATA;
+       case PN544_FW_CMD_RESULT_ACCESS_DENIED:
+               return -EACCES;
+       case PN544_FW_CMD_RESULT_PROTOCOL_ERROR:
+               return -EPROTO;
+       case PN544_FW_CMD_RESULT_INVALID_PARAMETER:
+               return -EINVAL;
+       case PN544_FW_CMD_RESULT_INVALID_LENGTH:
+               return -EBADMSG;
+       case PN544_FW_CMD_RESULT_WRITE_FAILED:
+               return -EIO;
+       default:
+               return -EIO;
+       }
+}
+
 /*
  * Reads an shdlc frame from the chip. This is not as straightforward as it
  * seems. There are cases where we could loose the frame start synchronization.
@@ -339,19 +448,23 @@ static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
        if (phy->hard_fault != 0)
                return IRQ_HANDLED;
 
-       r = pn544_hci_i2c_read(phy, &skb);
-       if (r == -EREMOTEIO) {
-               phy->hard_fault = r;
+       if (phy->run_mode == PN544_FW_MODE) {
+               phy->fw_cmd_result = pn544_hci_i2c_fw_read_status(phy);
+               schedule_work(&phy->fw_work);
+       } else {
+               r = pn544_hci_i2c_read(phy, &skb);
+               if (r == -EREMOTEIO) {
+                       phy->hard_fault = r;
 
-               nfc_hci_recv_frame(phy->hdev, NULL);
+                       nfc_hci_recv_frame(phy->hdev, NULL);
 
-               return IRQ_HANDLED;
-       } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
-               return IRQ_HANDLED;
-       }
-
-       nfc_hci_recv_frame(phy->hdev, skb);
+                       return IRQ_HANDLED;
+               } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+                       return IRQ_HANDLED;
+               }
 
+               nfc_hci_recv_frame(phy->hdev, skb);
+       }
        return IRQ_HANDLED;
 }
 
@@ -361,6 +474,215 @@ static struct nfc_phy_ops i2c_phy_ops = {
        .disable = pn544_hci_i2c_disable,
 };
 
+static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
+{
+       struct pn544_i2c_phy *phy = phy_id;
+
+       pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n",
+               firmware_name);
+
+       strcpy(phy->firmware_name, firmware_name);
+
+       phy->fw_work_state = FW_WORK_STATE_START;
+
+       schedule_work(&phy->fw_work);
+
+       return 0;
+}
+
+static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy,
+                                          int result)
+{
+       pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result);
+
+       pn544_hci_i2c_disable(phy);
+
+       phy->fw_work_state = FW_WORK_STATE_IDLE;
+
+       if (phy->fw) {
+               release_firmware(phy->fw);
+               phy->fw = NULL;
+       }
+
+       nfc_fw_download_done(phy->hdev->ndev, phy->firmware_name, (u32) -result);
+}
+
+static int pn544_hci_i2c_fw_write_cmd(struct i2c_client *client, u32 dest_addr,
+                                     const u8 *data, u16 datalen)
+{
+       u8 frame[PN544_FW_I2C_MAX_PAYLOAD];
+       struct pn544_i2c_fw_frame_write *framep;
+       u16 params_len;
+       int framelen;
+       int r;
+
+       if (datalen > PN544_FW_I2C_WRITE_DATA_MAX_LEN)
+               datalen = PN544_FW_I2C_WRITE_DATA_MAX_LEN;
+
+       framep = (struct pn544_i2c_fw_frame_write *) frame;
+
+       params_len = sizeof(framep->be_dest_addr) +
+                    sizeof(framep->be_datalen) + datalen;
+       framelen = params_len + sizeof(framep->cmd) +
+                            sizeof(framep->be_length);
+
+       framep->cmd = PN544_FW_CMD_WRITE;
+
+       put_unaligned_be16(params_len, &framep->be_length);
+
+       framep->be_dest_addr[0] = (dest_addr & 0xff0000) >> 16;
+       framep->be_dest_addr[1] = (dest_addr & 0xff00) >> 8;
+       framep->be_dest_addr[2] = dest_addr & 0xff;
+
+       put_unaligned_be16(datalen, &framep->be_datalen);
+
+       memcpy(framep->data, data, datalen);
+
+       r = i2c_master_send(client, frame, framelen);
+
+       if (r == framelen)
+               return datalen;
+       else if (r < 0)
+               return r;
+       else
+               return -EIO;
+}
+
+static int pn544_hci_i2c_fw_check_cmd(struct i2c_client *client, u32 start_addr,
+                                     const u8 *data, u16 datalen)
+{
+       struct pn544_i2c_fw_frame_check frame;
+       int r;
+       u16 crc;
+
+       /* calculate local crc for the data we want to check */
+       crc = crc_ccitt(0xffff, data, datalen);
+
+       frame.cmd = PN544_FW_CMD_CHECK;
+
+       put_unaligned_be16(sizeof(frame.be_start_addr) +
+                          sizeof(frame.be_datalen) + sizeof(frame.be_crc),
+                          &frame.be_length);
+
+       /* tell the chip the memory region to which our crc applies */
+       frame.be_start_addr[0] = (start_addr & 0xff0000) >> 16;
+       frame.be_start_addr[1] = (start_addr & 0xff00) >> 8;
+       frame.be_start_addr[2] = start_addr & 0xff;
+
+       put_unaligned_be16(datalen, &frame.be_datalen);
+
+       /*
+        * and give our local crc. Chip will calculate its own crc for the
+        * region and compare with ours.
+        */
+       put_unaligned_be16(crc, &frame.be_crc);
+
+       r = i2c_master_send(client, (const char *) &frame, sizeof(frame));
+
+       if (r == sizeof(frame))
+               return 0;
+       else if (r < 0)
+               return r;
+       else
+               return -EIO;
+}
+
+static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy)
+{
+       int r;
+
+       r = pn544_hci_i2c_fw_write_cmd(phy->i2c_dev,
+                                      phy->fw_blob_dest_addr + phy->fw_written,
+                                      phy->fw_blob_data + phy->fw_written,
+                                      phy->fw_blob_size - phy->fw_written);
+       if (r < 0)
+               return r;
+
+       phy->fw_written += r;
+       phy->fw_work_state = FW_WORK_STATE_WAIT_WRITE_ANSWER;
+
+       return 0;
+}
+
+static void pn544_hci_i2c_fw_work(struct work_struct *work)
+{
+       struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy,
+                                               fw_work);
+       int r;
+       struct pn544_i2c_fw_blob *blob;
+
+       switch (phy->fw_work_state) {
+       case FW_WORK_STATE_START:
+               pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE);
+
+               r = request_firmware(&phy->fw, phy->firmware_name,
+                                    &phy->i2c_dev->dev);
+               if (r < 0)
+                       goto exit_state_start;
+
+               blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
+               phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+               phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr);
+               phy->fw_blob_data = blob->data;
+
+               phy->fw_written = 0;
+               r = pn544_hci_i2c_fw_write_chunk(phy);
+
+exit_state_start:
+               if (r < 0)
+                       pn544_hci_i2c_fw_work_complete(phy, r);
+               break;
+
+       case FW_WORK_STATE_WAIT_WRITE_ANSWER:
+               r = phy->fw_cmd_result;
+               if (r < 0)
+                       goto exit_state_wait_write_answer;
+
+               if (phy->fw_written == phy->fw_blob_size) {
+                       r = pn544_hci_i2c_fw_check_cmd(phy->i2c_dev,
+                                                      phy->fw_blob_dest_addr,
+                                                      phy->fw_blob_data,
+                                                      phy->fw_blob_size);
+                       if (r < 0)
+                               goto exit_state_wait_write_answer;
+                       phy->fw_work_state = FW_WORK_STATE_WAIT_CHECK_ANSWER;
+                       break;
+               }
+
+               r = pn544_hci_i2c_fw_write_chunk(phy);
+
+exit_state_wait_write_answer:
+               if (r < 0)
+                       pn544_hci_i2c_fw_work_complete(phy, r);
+               break;
+
+       case FW_WORK_STATE_WAIT_CHECK_ANSWER:
+               r = phy->fw_cmd_result;
+               if (r < 0)
+                       goto exit_state_wait_check_answer;
+
+               blob = (struct pn544_i2c_fw_blob *) (phy->fw_blob_data +
+                      phy->fw_blob_size);
+               phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+               if (phy->fw_blob_size != 0) {
+                       phy->fw_blob_dest_addr =
+                                       get_unaligned_be32(&blob->be_destaddr);
+                       phy->fw_blob_data = blob->data;
+
+                       phy->fw_written = 0;
+                       r = pn544_hci_i2c_fw_write_chunk(phy);
+               }
+
+exit_state_wait_check_answer:
+               if (r < 0 || phy->fw_blob_size == 0)
+                       pn544_hci_i2c_fw_work_complete(phy, r);
+               break;
+
+       default:
+               break;
+       }
+}
+
 static int pn544_hci_i2c_probe(struct i2c_client *client,
                               const struct i2c_device_id *id)
 {
@@ -384,6 +706,9 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
                return -ENOMEM;
        }
 
+       INIT_WORK(&phy->fw_work, pn544_hci_i2c_fw_work);
+       phy->fw_work_state = FW_WORK_STATE_IDLE;
+
        phy->i2c_dev = client;
        i2c_set_clientdata(client, phy);
 
@@ -420,7 +745,8 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
 
        r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
                            PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM,
-                           PN544_HCI_I2C_LLC_MAX_PAYLOAD, &phy->hdev);
+                           PN544_HCI_I2C_LLC_MAX_PAYLOAD,
+                           pn544_hci_i2c_fw_download, &phy->hdev);
        if (r < 0)
                goto err_hci;
 
@@ -443,6 +769,10 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "%s\n", __func__);
 
+       cancel_work_sync(&phy->fw_work);
+       if (phy->fw_work_state != FW_WORK_STATE_IDLE)
+               pn544_hci_i2c_fw_work_complete(phy, -ENODEV);
+
        pn544_hci_remove(phy->hdev);
 
        if (phy->powered)
index b5d3d18179eb1da92d35700a8f06fd91ad6a7666..ee67de50c36fc47d57551db072f6c9a474ae8afc 100644 (file)
@@ -45,7 +45,7 @@ static int pn544_mei_probe(struct mei_cl_device *device,
 
        r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
                            MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
-                           &phy->hdev);
+                           NULL, &phy->hdev);
        if (r < 0) {
                nfc_mei_phy_free(phy);
 
index 0d17da7675b75b245a26d79498fc58418054f71d..078e62feba1715e9c4cc3bfd246e6c947082735b 100644 (file)
@@ -31,9 +31,6 @@
 /* Timing restrictions (ms) */
 #define PN544_HCI_RESETVEN_TIME                30
 
-#define HCI_MODE 0
-#define FW_MODE 1
-
 enum pn544_state {
        PN544_ST_COLD,
        PN544_ST_FW_READY,
@@ -130,6 +127,8 @@ struct pn544_hci_info {
        int async_cb_type;
        data_exchange_cb_t async_cb;
        void *async_cb_context;
+
+       fw_download_t fw_download;
 };
 
 static int pn544_hci_open(struct nfc_hci_dev *hdev)
@@ -782,6 +781,17 @@ exit:
        return r;
 }
 
+static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
+                                const char *firmware_name)
+{
+       struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+       if (info->fw_download == NULL)
+               return -ENOTSUPP;
+
+       return info->fw_download(info->phy_id, firmware_name);
+}
+
 static struct nfc_hci_ops pn544_hci_ops = {
        .open = pn544_hci_open,
        .close = pn544_hci_close,
@@ -796,11 +806,12 @@ static struct nfc_hci_ops pn544_hci_ops = {
        .tm_send = pn544_hci_tm_send,
        .check_presence = pn544_hci_check_presence,
        .event_received = pn544_hci_event_received,
+       .fw_download = pn544_hci_fw_download,
 };
 
 int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
                    int phy_headroom, int phy_tailroom, int phy_payload,
-                   struct nfc_hci_dev **hdev)
+                   fw_download_t fw_download, struct nfc_hci_dev **hdev)
 {
        struct pn544_hci_info *info;
        u32 protocols;
@@ -816,6 +827,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
 
        info->phy_ops = phy_ops;
        info->phy_id = phy_id;
+       info->fw_download = fw_download;
        info->state = PN544_ST_COLD;
        mutex_init(&info->info_lock);
 
index f47c6454914b512a2da5acae0b5a40b06f747dbd..01020e5854437e29b5bb254e14b2e920eefb29fc 100644 (file)
 
 #define DRIVER_DESC "HCI NFC driver for PN544"
 
+#define PN544_HCI_MODE 0
+#define PN544_FW_MODE 1
+
+typedef int (*fw_download_t)(void *context, const char *firmware_name);
+
 int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
                    int phy_headroom, int phy_tailroom, int phy_payload,
-                   struct nfc_hci_dev **hdev);
+                   fw_download_t fw_download, struct nfc_hci_dev **hdev);
 void pn544_hci_remove(struct nfc_hci_dev *hdev);
 
 #endif /* __LOCAL_PN544_H_ */
index 6bb7cf2de556b559d1f54f9d1c7c3ff297138a3a..3f473d158d79567447ae720dd8d4bb48cf1e53b6 100644 (file)
@@ -550,7 +550,8 @@ int __init of_flat_dt_match(unsigned long node, const char *const *compat)
  */
 void __init early_init_dt_check_for_initrd(unsigned long node)
 {
-       unsigned long start, end, len;
+       u64 start, end;
+       unsigned long len;
        __be32 *prop;
 
        pr_debug("Looking for initrd properties... ");
@@ -558,15 +559,16 @@ void __init early_init_dt_check_for_initrd(unsigned long node)
        prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
        if (!prop)
                return;
-       start = of_read_ulong(prop, len/4);
+       start = of_read_number(prop, len/4);
 
        prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
        if (!prop)
                return;
-       end = of_read_ulong(prop, len/4);
+       end = of_read_number(prop, len/4);
 
        early_init_dt_setup_initrd_arch(start, end);
-       pr_debug("initrd_start=0x%lx  initrd_end=0x%lx\n", start, end);
+       pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n",
+                (unsigned long long)start, (unsigned long long)end);
 }
 #else
 inline void early_init_dt_check_for_initrd(unsigned long node)
index e0a6514ab46c20eb902453c3b321774d1e787ba0..b0d1ff8b09917dbcd197ec1a50ac0bcd1f93fd4a 100644 (file)
@@ -196,7 +196,7 @@ EXPORT_SYMBOL(of_device_alloc);
  * Returns pointer to created platform device, or NULL if a device was not
  * registered.  Unavailable devices will not get registered.
  */
-struct platform_device *of_platform_device_create_pdata(
+static struct platform_device *of_platform_device_create_pdata(
                                        struct device_node *np,
                                        const char *bus_id,
                                        void *platform_data,
index 1184ff6fe864823fd427d35ff8c34aaa8fe585b4..e5ba4eb4e5b3f74c114cc2635b2e44cc9c16f74a 100644 (file)
@@ -4,6 +4,7 @@ menu "PCI host controller drivers"
 config PCI_MVEBU
        bool "Marvell EBU PCIe controller"
        depends on ARCH_MVEBU || ARCH_KIRKWOOD
+       depends on OF
 
 config PCIE_DW
        bool
index 086d8500e849d36db4684dac6d462d71af291f43..ab79ccb5bbff623da1ed2191e8c0b3c22a3f0536 100644 (file)
@@ -1,2 +1,3 @@
-obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
new file mode 100644 (file)
index 0000000..012ca8a
--- /dev/null
@@ -0,0 +1,530 @@
+/*
+ * PCIe host controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x)      container_of(x, struct exynos_pcie, pp)
+
+struct exynos_pcie {
+       void __iomem            *elbi_base;
+       void __iomem            *phy_base;
+       void __iomem            *block_base;
+       int                     reset_gpio;
+       struct clk              *clk;
+       struct clk              *bus_clk;
+       struct pcie_port        pp;
+};
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE                 0x000
+#define IRQ_INTA_ASSERT                        (0x1 << 0)
+#define IRQ_INTB_ASSERT                        (0x1 << 2)
+#define IRQ_INTC_ASSERT                        (0x1 << 4)
+#define IRQ_INTD_ASSERT                        (0x1 << 6)
+#define PCIE_IRQ_LEVEL                 0x004
+#define PCIE_IRQ_SPECIAL               0x008
+#define PCIE_IRQ_EN_PULSE              0x00c
+#define PCIE_IRQ_EN_LEVEL              0x010
+#define PCIE_IRQ_EN_SPECIAL            0x014
+#define PCIE_PWR_RESET                 0x018
+#define PCIE_CORE_RESET                        0x01c
+#define PCIE_CORE_RESET_ENABLE         (0x1 << 0)
+#define PCIE_STICKY_RESET              0x020
+#define PCIE_NONSTICKY_RESET           0x024
+#define PCIE_APP_INIT_RESET            0x028
+#define PCIE_APP_LTSSM_ENABLE          0x02c
+#define PCIE_ELBI_RDLH_LINKUP          0x064
+#define PCIE_ELBI_LTSSM_ENABLE         0x1
+#define PCIE_ELBI_SLV_AWMISC           0x11c
+#define PCIE_ELBI_SLV_ARMISC           0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE       (0x1 << 21)
+
+/* PCIe Purple registers */
+#define PCIE_PHY_GLOBAL_RESET          0x000
+#define PCIE_PHY_COMMON_RESET          0x004
+#define PCIE_PHY_CMN_REG               0x008
+#define PCIE_PHY_MAC_RESET             0x00c
+#define PCIE_PHY_PLL_LOCKED            0x010
+#define PCIE_PHY_TRSVREG_RESET         0x020
+#define PCIE_PHY_TRSV_RESET            0x024
+
+/* PCIe PHY registers */
+#define PCIE_PHY_IMPEDANCE             0x004
+#define PCIE_PHY_PLL_DIV_0             0x008
+#define PCIE_PHY_PLL_BIAS              0x00c
+#define PCIE_PHY_DCC_FEEDBACK          0x014
+#define PCIE_PHY_PLL_DIV_1             0x05c
+#define PCIE_PHY_TRSV0_EMP_LVL         0x084
+#define PCIE_PHY_TRSV0_DRV_LVL         0x088
+#define PCIE_PHY_TRSV0_RXCDR           0x0ac
+#define PCIE_PHY_TRSV0_LVCC            0x0dc
+#define PCIE_PHY_TRSV1_EMP_LVL         0x144
+#define PCIE_PHY_TRSV1_RXCDR           0x16c
+#define PCIE_PHY_TRSV1_LVCC            0x19c
+#define PCIE_PHY_TRSV2_EMP_LVL         0x204
+#define PCIE_PHY_TRSV2_RXCDR           0x22c
+#define PCIE_PHY_TRSV2_LVCC            0x25c
+#define PCIE_PHY_TRSV3_EMP_LVL         0x2c4
+#define PCIE_PHY_TRSV3_RXCDR           0x2ec
+#define PCIE_PHY_TRSV3_LVCC            0x31c
+
+static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       if (on) {
+               val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC);
+               val |= PCIE_ELBI_SLV_DBI_ENABLE;
+               writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC);
+       } else {
+               val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC);
+               val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+               writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_AWMISC);
+       }
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       if (on) {
+               val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC);
+               val |= PCIE_ELBI_SLV_DBI_ENABLE;
+               writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC);
+       } else {
+               val = readl(exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC);
+               val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+               writel(val, exynos_pcie->elbi_base + PCIE_ELBI_SLV_ARMISC);
+       }
+}
+
+static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       void __iomem *elbi_base = exynos_pcie->elbi_base;
+
+       val = readl(elbi_base + PCIE_CORE_RESET);
+       val &= ~PCIE_CORE_RESET_ENABLE;
+       writel(val, elbi_base + PCIE_CORE_RESET);
+       writel(0, elbi_base + PCIE_PWR_RESET);
+       writel(0, elbi_base + PCIE_STICKY_RESET);
+       writel(0, elbi_base + PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       void __iomem *elbi_base = exynos_pcie->elbi_base;
+       void __iomem *block_base = exynos_pcie->block_base;
+
+       val = readl(elbi_base + PCIE_CORE_RESET);
+       val |= PCIE_CORE_RESET_ENABLE;
+       writel(val, elbi_base + PCIE_CORE_RESET);
+       writel(1, elbi_base + PCIE_STICKY_RESET);
+       writel(1, elbi_base + PCIE_NONSTICKY_RESET);
+       writel(1, elbi_base + PCIE_APP_INIT_RESET);
+       writel(0, elbi_base + PCIE_APP_INIT_RESET);
+       writel(1, block_base + PCIE_PHY_MAC_RESET);
+}
+
+static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
+{
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       void __iomem *block_base = exynos_pcie->block_base;
+
+       writel(0, block_base + PCIE_PHY_MAC_RESET);
+       writel(1, block_base + PCIE_PHY_GLOBAL_RESET);
+}
+
+static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
+{
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       void __iomem *elbi_base = exynos_pcie->elbi_base;
+       void __iomem *block_base = exynos_pcie->block_base;
+
+       writel(0, block_base + PCIE_PHY_GLOBAL_RESET);
+       writel(1, elbi_base + PCIE_PWR_RESET);
+       writel(0, block_base + PCIE_PHY_COMMON_RESET);
+       writel(0, block_base + PCIE_PHY_CMN_REG);
+       writel(0, block_base + PCIE_PHY_TRSVREG_RESET);
+       writel(0, block_base + PCIE_PHY_TRSV_RESET);
+}
+
+static void exynos_pcie_init_phy(struct pcie_port *pp)
+{
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       void __iomem *phy_base = exynos_pcie->phy_base;
+
+       /* DCC feedback control off */
+       writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK);
+
+       /* set TX/RX impedance */
+       writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE);
+
+       /* set 50Mhz PHY clock */
+       writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0);
+       writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1);
+
+       /* set TX Differential output for lane 0 */
+       writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL);
+
+       /* set TX Pre-emphasis Level Control for lane 0 to minimum */
+       writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
+
+       /* set RX clock and data recovery bandwidth */
+       writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS);
+       writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR);
+       writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR);
+       writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR);
+       writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR);
+
+       /* change TX Pre-emphasis Level Control for lanes */
+       writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
+       writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL);
+       writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL);
+       writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL);
+
+       /* set LVCC */
+       writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC);
+       writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC);
+       writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC);
+       writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC);
+}
+
+static void exynos_pcie_assert_reset(struct pcie_port *pp)
+{
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       if (exynos_pcie->reset_gpio >= 0)
+               devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
+                               GPIOF_OUT_INIT_HIGH, "RESET");
+       return;
+}
+
+static int exynos_pcie_establish_link(struct pcie_port *pp)
+{
+       u32 val;
+       int count = 0;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       void __iomem *elbi_base = exynos_pcie->elbi_base;
+       void __iomem *block_base = exynos_pcie->block_base;
+       void __iomem *phy_base = exynos_pcie->phy_base;
+
+       if (dw_pcie_link_up(pp)) {
+               dev_err(pp->dev, "Link already up\n");
+               return 0;
+       }
+
+       /* assert reset signals */
+       exynos_pcie_assert_core_reset(pp);
+       exynos_pcie_assert_phy_reset(pp);
+
+       /* de-assert phy reset */
+       exynos_pcie_deassert_phy_reset(pp);
+
+       /* initialize phy */
+       exynos_pcie_init_phy(pp);
+
+       /* pulse for common reset */
+       writel(1, block_base + PCIE_PHY_COMMON_RESET);
+       udelay(500);
+       writel(0, block_base + PCIE_PHY_COMMON_RESET);
+
+       /* de-assert core reset */
+       exynos_pcie_deassert_core_reset(pp);
+
+       /* setup root complex */
+       dw_pcie_setup_rc(pp);
+
+       /* assert reset signal */
+       exynos_pcie_assert_reset(pp);
+
+       /* assert LTSSM enable */
+       writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE);
+
+       /* check if the link is up or not */
+       while (!dw_pcie_link_up(pp)) {
+               mdelay(100);
+               count++;
+               if (count == 10) {
+                       while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) {
+                               val = readl(block_base + PCIE_PHY_PLL_LOCKED);
+                               dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+                       }
+                       dev_err(pp->dev, "PCIe Link Fail\n");
+                       return -EINVAL;
+               }
+       }
+
+       dev_info(pp->dev, "Link up\n");
+
+       return 0;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       void __iomem *elbi_base = exynos_pcie->elbi_base;
+
+       val = readl(elbi_base + PCIE_IRQ_PULSE);
+       writel(val, elbi_base + PCIE_IRQ_PULSE);
+       return;
+}
+
+static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       void __iomem *elbi_base = exynos_pcie->elbi_base;
+
+       /* enable INTX interrupt */
+       val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+               IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
+       writel(val, elbi_base + PCIE_IRQ_EN_PULSE);
+       return;
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+       struct pcie_port *pp = arg;
+
+       exynos_pcie_clear_irq_pulse(pp);
+       return IRQ_HANDLED;
+}
+
+static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
+{
+       exynos_pcie_enable_irq_pulse(pp);
+       return;
+}
+
+static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
+                                       void __iomem *dbi_base, u32 *val)
+{
+       exynos_pcie_sideband_dbi_r_mode(pp, true);
+       *val = readl(dbi_base);
+       exynos_pcie_sideband_dbi_r_mode(pp, false);
+       return;
+}
+
+static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
+                                       u32 val, void __iomem *dbi_base)
+{
+       exynos_pcie_sideband_dbi_w_mode(pp, true);
+       writel(val, dbi_base);
+       exynos_pcie_sideband_dbi_w_mode(pp, false);
+       return;
+}
+
+static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+                               u32 *val)
+{
+       int ret;
+
+       exynos_pcie_sideband_dbi_r_mode(pp, true);
+       ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+       exynos_pcie_sideband_dbi_r_mode(pp, false);
+       return ret;
+}
+
+static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+                               u32 val)
+{
+       int ret;
+
+       exynos_pcie_sideband_dbi_w_mode(pp, true);
+       ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val);
+       exynos_pcie_sideband_dbi_w_mode(pp, false);
+       return ret;
+}
+
+static int exynos_pcie_link_up(struct pcie_port *pp)
+{
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+       u32 val = readl(exynos_pcie->elbi_base + PCIE_ELBI_RDLH_LINKUP);
+
+       if (val == PCIE_ELBI_LTSSM_ENABLE)
+               return 1;
+
+       return 0;
+}
+
+static void exynos_pcie_host_init(struct pcie_port *pp)
+{
+       exynos_pcie_establish_link(pp);
+       exynos_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops exynos_pcie_host_ops = {
+       .readl_rc = exynos_pcie_readl_rc,
+       .writel_rc = exynos_pcie_writel_rc,
+       .rd_own_conf = exynos_pcie_rd_own_conf,
+       .wr_own_conf = exynos_pcie_wr_own_conf,
+       .link_up = exynos_pcie_link_up,
+       .host_init = exynos_pcie_host_init,
+};
+
+static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
+{
+       int ret;
+
+       pp->irq = platform_get_irq(pdev, 1);
+       if (!pp->irq) {
+               dev_err(&pdev->dev, "failed to get irq\n");
+               return -ENODEV;
+       }
+       ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
+                               IRQF_SHARED, "exynos-pcie", pp);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to request irq\n");
+               return ret;
+       }
+
+       pp->root_bus_nr = -1;
+       pp->ops = &exynos_pcie_host_ops;
+
+       spin_lock_init(&pp->conf_lock);
+       ret = dw_pcie_host_init(pp);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to initialize host\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __init exynos_pcie_probe(struct platform_device *pdev)
+{
+       struct exynos_pcie *exynos_pcie;
+       struct pcie_port *pp;
+       struct device_node *np = pdev->dev.of_node;
+       struct resource *elbi_base;
+       struct resource *phy_base;
+       struct resource *block_base;
+       int ret;
+
+       exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
+                               GFP_KERNEL);
+       if (!exynos_pcie) {
+               dev_err(&pdev->dev, "no memory for exynos pcie\n");
+               return -ENOMEM;
+       }
+
+       pp = &exynos_pcie->pp;
+
+       pp->dev = &pdev->dev;
+
+       exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+
+       exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+       if (IS_ERR(exynos_pcie->clk)) {
+               dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
+               return PTR_ERR(exynos_pcie->clk);
+       }
+       ret = clk_prepare_enable(exynos_pcie->clk);
+       if (ret)
+               return ret;
+
+       exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+       if (IS_ERR(exynos_pcie->bus_clk)) {
+               dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
+               ret = PTR_ERR(exynos_pcie->bus_clk);
+               goto fail_clk;
+       }
+       ret = clk_prepare_enable(exynos_pcie->bus_clk);
+       if (ret)
+               goto fail_clk;
+
+       elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+       if (IS_ERR(exynos_pcie->elbi_base))
+               return PTR_ERR(exynos_pcie->elbi_base);
+
+       phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+       if (IS_ERR(exynos_pcie->phy_base))
+               return PTR_ERR(exynos_pcie->phy_base);
+
+       block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+       exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
+       if (IS_ERR(exynos_pcie->block_base))
+               return PTR_ERR(exynos_pcie->block_base);
+
+       ret = add_pcie_port(pp, pdev);
+       if (ret < 0)
+               goto fail_bus_clk;
+
+       platform_set_drvdata(pdev, exynos_pcie);
+       return 0;
+
+fail_bus_clk:
+       clk_disable_unprepare(exynos_pcie->bus_clk);
+fail_clk:
+       clk_disable_unprepare(exynos_pcie->clk);
+       return ret;
+}
+
+static int __exit exynos_pcie_remove(struct platform_device *pdev)
+{
+       struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(exynos_pcie->bus_clk);
+       clk_disable_unprepare(exynos_pcie->clk);
+
+       return 0;
+}
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+       { .compatible = "samsung,exynos5440-pcie", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
+
+static struct platform_driver exynos_pcie_driver = {
+       .remove         = __exit_p(exynos_pcie_remove),
+       .driver = {
+               .name   = "exynos-pcie",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(exynos_pcie_of_match),
+       },
+};
+
+/* Exynos PCIe driver does not allow module unload */
+
+static int __init pcie_init(void)
+{
+       return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
+}
+subsys_initcall(pcie_init);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
index 7bf3926aecc0d765af64f166cbeb871ea66ac127..ce1543a584a30c1eca9391adad1dcf3e21e7a87b 100644 (file)
@@ -725,9 +725,9 @@ mvebu_pcie_map_registers(struct platform_device *pdev,
 
        ret = of_address_to_resource(np, 0, &regs);
        if (ret)
-               return NULL;
+               return ERR_PTR(ret);
 
-       return devm_request_and_ioremap(&pdev->dev, &regs);
+       return devm_ioremap_resource(&pdev->dev, &regs);
 }
 
 static int __init mvebu_pcie_probe(struct platform_device *pdev)
@@ -817,9 +817,10 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
                        continue;
 
                port->base = mvebu_pcie_map_registers(pdev, child, port);
-               if (!port->base) {
+               if (IS_ERR(port->base)) {
                        dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
                                port->port, port->lane);
+                       port->base = NULL;
                        continue;
                }
 
index 26bdbda8ff90347ff60897a80a3d7ae444a5556e..77b0c257f215f1e4913a2abdc349d16bfbc6b9a8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PCIe host controller driver for Samsung EXYNOS SoCs
+ * Synopsys Designware PCIe host controller driver
  *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  * published by the Free Software Foundation.
  */
 
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
 #include <linux/kernel.h>
-#include <linux/list.h>
 #include <linux/module.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_gpio.h>
-#include <linux/of_pci.h>
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
-#include <linux/platform_device.h>
-#include <linux/resource.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
 #include <linux/types.h>
 
-struct pcie_port_info {
-       u32             cfg0_size;
-       u32             cfg1_size;
-       u32             io_size;
-       u32             mem_size;
-       phys_addr_t     io_bus_addr;
-       phys_addr_t     mem_bus_addr;
-};
-
-struct pcie_port {
-       struct device           *dev;
-       u8                      controller;
-       u8                      root_bus_nr;
-       void __iomem            *dbi_base;
-       void __iomem            *elbi_base;
-       void __iomem            *phy_base;
-       void __iomem            *purple_base;
-       u64                     cfg0_base;
-       void __iomem            *va_cfg0_base;
-       u64                     cfg1_base;
-       void __iomem            *va_cfg1_base;
-       u64                     io_base;
-       u64                     mem_base;
-       spinlock_t              conf_lock;
-       struct resource         cfg;
-       struct resource         io;
-       struct resource         mem;
-       struct pcie_port_info   config;
-       struct clk              *clk;
-       struct clk              *bus_clk;
-       int                     irq;
-       int                     reset_gpio;
-};
-
-/*
- * Exynos PCIe IP consists of Synopsys specific part and Exynos
- * specific part. Only core block is a Synopsys designware part;
- * other parts are Exynos specific.
- */
+#include "pcie-designware.h"
 
 /* Synopsis specific PCIE configuration registers */
 #define PCIE_PORT_LINK_CONTROL         0x710
 #define PORT_LINK_MODE_MASK            (0x3f << 16)
+#define PORT_LINK_MODE_1_LANES         (0x1 << 16)
+#define PORT_LINK_MODE_2_LANES         (0x3 << 16)
 #define PORT_LINK_MODE_4_LANES         (0x7 << 16)
 
 #define PCIE_LINK_WIDTH_SPEED_CONTROL  0x80C
 #define PORT_LOGIC_SPEED_CHANGE                (0x1 << 17)
 #define PORT_LOGIC_LINK_WIDTH_MASK     (0x1ff << 8)
-#define PORT_LOGIC_LINK_WIDTH_4_LANES  (0x7 << 8)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES  (0x1 << 8)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES  (0x2 << 8)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES  (0x4 << 8)
 
 #define PCIE_MSI_ADDR_LO               0x820
 #define PCIE_MSI_ADDR_HI               0x824
@@ -108,69 +62,16 @@ struct pcie_port {
 #define PCIE_ATU_FUNC(x)               (((x) & 0x7) << 16)
 #define PCIE_ATU_UPPER_TARGET          0x91C
 
-/* Exynos specific PCIE configuration registers */
-
-/* PCIe ELBI registers */
-#define PCIE_IRQ_PULSE                 0x000
-#define IRQ_INTA_ASSERT                        (0x1 << 0)
-#define IRQ_INTB_ASSERT                        (0x1 << 2)
-#define IRQ_INTC_ASSERT                        (0x1 << 4)
-#define IRQ_INTD_ASSERT                        (0x1 << 6)
-#define PCIE_IRQ_LEVEL                 0x004
-#define PCIE_IRQ_SPECIAL               0x008
-#define PCIE_IRQ_EN_PULSE              0x00c
-#define PCIE_IRQ_EN_LEVEL              0x010
-#define PCIE_IRQ_EN_SPECIAL            0x014
-#define PCIE_PWR_RESET                 0x018
-#define PCIE_CORE_RESET                        0x01c
-#define PCIE_CORE_RESET_ENABLE         (0x1 << 0)
-#define PCIE_STICKY_RESET              0x020
-#define PCIE_NONSTICKY_RESET           0x024
-#define PCIE_APP_INIT_RESET            0x028
-#define PCIE_APP_LTSSM_ENABLE          0x02c
-#define PCIE_ELBI_RDLH_LINKUP          0x064
-#define PCIE_ELBI_LTSSM_ENABLE         0x1
-#define PCIE_ELBI_SLV_AWMISC           0x11c
-#define PCIE_ELBI_SLV_ARMISC           0x120
-#define PCIE_ELBI_SLV_DBI_ENABLE       (0x1 << 21)
-
-/* PCIe Purple registers */
-#define PCIE_PHY_GLOBAL_RESET          0x000
-#define PCIE_PHY_COMMON_RESET          0x004
-#define PCIE_PHY_CMN_REG               0x008
-#define PCIE_PHY_MAC_RESET             0x00c
-#define PCIE_PHY_PLL_LOCKED            0x010
-#define PCIE_PHY_TRSVREG_RESET         0x020
-#define PCIE_PHY_TRSV_RESET            0x024
-
-/* PCIe PHY registers */
-#define PCIE_PHY_IMPEDANCE             0x004
-#define PCIE_PHY_PLL_DIV_0             0x008
-#define PCIE_PHY_PLL_BIAS              0x00c
-#define PCIE_PHY_DCC_FEEDBACK          0x014
-#define PCIE_PHY_PLL_DIV_1             0x05c
-#define PCIE_PHY_TRSV0_EMP_LVL         0x084
-#define PCIE_PHY_TRSV0_DRV_LVL         0x088
-#define PCIE_PHY_TRSV0_RXCDR           0x0ac
-#define PCIE_PHY_TRSV0_LVCC            0x0dc
-#define PCIE_PHY_TRSV1_EMP_LVL         0x144
-#define PCIE_PHY_TRSV1_RXCDR           0x16c
-#define PCIE_PHY_TRSV1_LVCC            0x19c
-#define PCIE_PHY_TRSV2_EMP_LVL         0x204
-#define PCIE_PHY_TRSV2_RXCDR           0x22c
-#define PCIE_PHY_TRSV2_LVCC            0x25c
-#define PCIE_PHY_TRSV3_EMP_LVL         0x2c4
-#define PCIE_PHY_TRSV3_RXCDR           0x2ec
-#define PCIE_PHY_TRSV3_LVCC            0x31c
-
-static struct hw_pci exynos_pci;
+static struct hw_pci dw_pci;
+
+unsigned long global_io_offset;
 
 static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
 {
        return sys->private_data;
 }
 
-static inline int cfg_read(void *addr, int where, int size, u32 *val)
+int cfg_read(void __iomem *addr, int where, int size, u32 *val)
 {
        *val = readl(addr);
 
@@ -184,7 +85,7 @@ static inline int cfg_read(void *addr, int where, int size, u32 *val)
        return PCIBIOS_SUCCESSFUL;
 }
 
-static inline int cfg_write(void *addr, int where, int size, u32 val)
+int cfg_write(void __iomem *addr, int where, int size, u32 val)
 {
        if (size == 4)
                writel(val, addr);
@@ -198,155 +99,241 @@ static inline int cfg_write(void *addr, int where, int size, u32 val)
        return PCIBIOS_SUCCESSFUL;
 }
 
-static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+static inline void dw_pcie_readl_rc(struct pcie_port *pp,
+                               void __iomem *dbi_addr, u32 *val)
 {
-       u32 val;
-
-       if (on) {
-               val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
-               val |= PCIE_ELBI_SLV_DBI_ENABLE;
-               writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
-       } else {
-               val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
-               val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
-               writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
-       }
-}
-
-static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
-{
-       u32 val;
-
-       if (on) {
-               val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
-               val |= PCIE_ELBI_SLV_DBI_ENABLE;
-               writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
-       } else {
-               val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
-               val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
-               writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
-       }
-}
-
-static inline void readl_rc(struct pcie_port *pp, void *dbi_base, u32 *val)
-{
-       exynos_pcie_sideband_dbi_r_mode(pp, true);
-       *val = readl(dbi_base);
-       exynos_pcie_sideband_dbi_r_mode(pp, false);
-       return;
+       if (pp->ops->readl_rc)
+               pp->ops->readl_rc(pp, dbi_addr, val);
+       else
+               *val = readl(dbi_addr);
 }
 
-static inline void writel_rc(struct pcie_port *pp, u32 val, void *dbi_base)
+static inline void dw_pcie_writel_rc(struct pcie_port *pp,
+                               u32 val, void __iomem *dbi_addr)
 {
-       exynos_pcie_sideband_dbi_w_mode(pp, true);
-       writel(val, dbi_base);
-       exynos_pcie_sideband_dbi_w_mode(pp, false);
-       return;
+       if (pp->ops->writel_rc)
+               pp->ops->writel_rc(pp, val, dbi_addr);
+       else
+               writel(val, dbi_addr);
 }
 
-static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
                                u32 *val)
 {
        int ret;
 
-       exynos_pcie_sideband_dbi_r_mode(pp, true);
-       ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
-       exynos_pcie_sideband_dbi_r_mode(pp, false);
+       if (pp->ops->rd_own_conf)
+               ret = pp->ops->rd_own_conf(pp, where, size, val);
+       else
+               ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+
        return ret;
 }
 
-static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
                                u32 val)
 {
        int ret;
 
-       exynos_pcie_sideband_dbi_w_mode(pp, true);
-       ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val);
-       exynos_pcie_sideband_dbi_w_mode(pp, false);
+       if (pp->ops->wr_own_conf)
+               ret = pp->ops->wr_own_conf(pp, where, size, val);
+       else
+               ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size,
+                               val);
+
        return ret;
 }
 
-static void exynos_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
+int dw_pcie_link_up(struct pcie_port *pp)
+{
+       if (pp->ops->link_up)
+               return pp->ops->link_up(pp);
+       else
+               return 0;
+}
+
+int __init dw_pcie_host_init(struct pcie_port *pp)
+{
+       struct device_node *np = pp->dev->of_node;
+       struct of_pci_range range;
+       struct of_pci_range_parser parser;
+       u32 val;
+
+       if (of_pci_range_parser_init(&parser, np)) {
+               dev_err(pp->dev, "missing ranges property\n");
+               return -EINVAL;
+       }
+
+       /* Get the I/O and memory ranges from DT */
+       for_each_of_pci_range(&parser, &range) {
+               unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
+               if (restype == IORESOURCE_IO) {
+                       of_pci_range_to_resource(&range, np, &pp->io);
+                       pp->io.name = "I/O";
+                       pp->io.start = max_t(resource_size_t,
+                                            PCIBIOS_MIN_IO,
+                                            range.pci_addr + global_io_offset);
+                       pp->io.end = min_t(resource_size_t,
+                                          IO_SPACE_LIMIT,
+                                          range.pci_addr + range.size
+                                          + global_io_offset);
+                       pp->config.io_size = resource_size(&pp->io);
+                       pp->config.io_bus_addr = range.pci_addr;
+               }
+               if (restype == IORESOURCE_MEM) {
+                       of_pci_range_to_resource(&range, np, &pp->mem);
+                       pp->mem.name = "MEM";
+                       pp->config.mem_size = resource_size(&pp->mem);
+                       pp->config.mem_bus_addr = range.pci_addr;
+               }
+               if (restype == 0) {
+                       of_pci_range_to_resource(&range, np, &pp->cfg);
+                       pp->config.cfg0_size = resource_size(&pp->cfg)/2;
+                       pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+               }
+       }
+
+       if (!pp->dbi_base) {
+               pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
+                                       resource_size(&pp->cfg));
+               if (!pp->dbi_base) {
+                       dev_err(pp->dev, "error with ioremap\n");
+                       return -ENOMEM;
+               }
+       }
+
+       pp->cfg0_base = pp->cfg.start;
+       pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+       pp->io_base = pp->io.start;
+       pp->mem_base = pp->mem.start;
+
+       pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
+                                       pp->config.cfg0_size);
+       if (!pp->va_cfg0_base) {
+               dev_err(pp->dev, "error with ioremap in function\n");
+               return -ENOMEM;
+       }
+       pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
+                                       pp->config.cfg1_size);
+       if (!pp->va_cfg1_base) {
+               dev_err(pp->dev, "error with ioremap\n");
+               return -ENOMEM;
+       }
+
+       if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
+               dev_err(pp->dev, "Failed to parse the number of lanes\n");
+               return -EINVAL;
+       }
+
+       if (pp->ops->host_init)
+               pp->ops->host_init(pp);
+
+       dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+       /* program correct class for RC */
+       dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+       dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+       val |= PORT_LOGIC_SPEED_CHANGE;
+       dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+
+       dw_pci.nr_controllers = 1;
+       dw_pci.private_data = (void **)&pp;
+
+       pci_common_init(&dw_pci);
+       pci_assign_unassigned_resources();
+#ifdef CONFIG_PCI_DOMAINS
+       dw_pci.domain++;
+#endif
+
+       return 0;
+}
+
+static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
 {
        u32 val;
        void __iomem *dbi_base = pp->dbi_base;
 
        /* Program viewport 0 : OUTBOUND : CFG0 */
        val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
-       writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
-       writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE);
-       writel_rc(pp, (pp->cfg0_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
-       writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
+       dw_pcie_writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32),
+                       dbi_base + PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
                        dbi_base + PCIE_ATU_LIMIT);
-       writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
-       writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
-       writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1);
+       dw_pcie_writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
+       dw_pcie_writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
+       dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1);
        val = PCIE_ATU_ENABLE;
-       writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
 }
 
-static void exynos_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
+static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
 {
        u32 val;
        void __iomem *dbi_base = pp->dbi_base;
 
        /* Program viewport 1 : OUTBOUND : CFG1 */
        val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
-       writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
-       writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1);
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
+       dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1);
        val = PCIE_ATU_ENABLE;
-       writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
-       writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE);
-       writel_rc(pp, (pp->cfg1_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
-       writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
+       dw_pcie_writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32),
+                       dbi_base + PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
                        dbi_base + PCIE_ATU_LIMIT);
-       writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
-       writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
+       dw_pcie_writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
+       dw_pcie_writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
 }
 
-static void exynos_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
 {
        u32 val;
        void __iomem *dbi_base = pp->dbi_base;
 
        /* Program viewport 0 : OUTBOUND : MEM */
        val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
-       writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
-       writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1);
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
+       dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1);
        val = PCIE_ATU_ENABLE;
-       writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
-       writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE);
-       writel_rc(pp, (pp->mem_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
-       writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
+       dw_pcie_writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->mem_base >> 32),
+                       dbi_base + PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
                        dbi_base + PCIE_ATU_LIMIT);
-       writel_rc(pp, pp->config.mem_bus_addr,
+       dw_pcie_writel_rc(pp, pp->config.mem_bus_addr,
                        dbi_base + PCIE_ATU_LOWER_TARGET);
-       writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+       dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
                        dbi_base + PCIE_ATU_UPPER_TARGET);
 }
 
-static void exynos_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
 {
        u32 val;
        void __iomem *dbi_base = pp->dbi_base;
 
        /* Program viewport 1 : OUTBOUND : IO */
        val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
-       writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
-       writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1);
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
+       dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1);
        val = PCIE_ATU_ENABLE;
-       writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
-       writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE);
-       writel_rc(pp, (pp->io_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
-       writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
+       dw_pcie_writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE);
+       dw_pcie_writel_rc(pp, (pp->io_base >> 32),
+                       dbi_base + PCIE_ATU_UPPER_BASE);
+       dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
                        dbi_base + PCIE_ATU_LIMIT);
-       writel_rc(pp, pp->config.io_bus_addr,
+       dw_pcie_writel_rc(pp, pp->config.io_bus_addr,
                        dbi_base + PCIE_ATU_LOWER_TARGET);
-       writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+       dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
                        dbi_base + PCIE_ATU_UPPER_TARGET);
 }
 
-static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
                u32 devfn, int where, int size, u32 *val)
 {
        int ret = PCIBIOS_SUCCESSFUL;
@@ -357,19 +344,19 @@ static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
        address = where & ~0x3;
 
        if (bus->parent->number == pp->root_bus_nr) {
-               exynos_pcie_prog_viewport_cfg0(pp, busdev);
+               dw_pcie_prog_viewport_cfg0(pp, busdev);
                ret = cfg_read(pp->va_cfg0_base + address, where, size, val);
-               exynos_pcie_prog_viewport_mem_outbound(pp);
+               dw_pcie_prog_viewport_mem_outbound(pp);
        } else {
-               exynos_pcie_prog_viewport_cfg1(pp, busdev);
+               dw_pcie_prog_viewport_cfg1(pp, busdev);
                ret = cfg_read(pp->va_cfg1_base + address, where, size, val);
-               exynos_pcie_prog_viewport_io_outbound(pp);
+               dw_pcie_prog_viewport_io_outbound(pp);
        }
 
        return ret;
 }
 
-static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
                u32 devfn, int where, int size, u32 val)
 {
        int ret = PCIBIOS_SUCCESSFUL;
@@ -380,59 +367,25 @@ static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
        address = where & ~0x3;
 
        if (bus->parent->number == pp->root_bus_nr) {
-               exynos_pcie_prog_viewport_cfg0(pp, busdev);
+               dw_pcie_prog_viewport_cfg0(pp, busdev);
                ret = cfg_write(pp->va_cfg0_base + address, where, size, val);
-               exynos_pcie_prog_viewport_mem_outbound(pp);
+               dw_pcie_prog_viewport_mem_outbound(pp);
        } else {
-               exynos_pcie_prog_viewport_cfg1(pp, busdev);
+               dw_pcie_prog_viewport_cfg1(pp, busdev);
                ret = cfg_write(pp->va_cfg1_base + address, where, size, val);
-               exynos_pcie_prog_viewport_io_outbound(pp);
+               dw_pcie_prog_viewport_io_outbound(pp);
        }
 
        return ret;
 }
 
-static unsigned long global_io_offset;
 
-static int exynos_pcie_setup(int nr, struct pci_sys_data *sys)
-{
-       struct pcie_port *pp;
-
-       pp = sys_to_pcie(sys);
-
-       if (!pp)
-               return 0;
-
-       if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
-               sys->io_offset = global_io_offset - pp->config.io_bus_addr;
-               pci_ioremap_io(sys->io_offset, pp->io.start);
-               global_io_offset += SZ_64K;
-               pci_add_resource_offset(&sys->resources, &pp->io,
-                                       sys->io_offset);
-       }
-
-       sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
-       pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
-
-       return 1;
-}
-
-static int exynos_pcie_link_up(struct pcie_port *pp)
-{
-       u32 val = readl(pp->elbi_base + PCIE_ELBI_RDLH_LINKUP);
-
-       if (val == PCIE_ELBI_LTSSM_ENABLE)
-               return 1;
-
-       return 0;
-}
-
-static int exynos_pcie_valid_config(struct pcie_port *pp,
+static int dw_pcie_valid_config(struct pcie_port *pp,
                                struct pci_bus *bus, int dev)
 {
        /* If there is no link, then there is no device */
        if (bus->number != pp->root_bus_nr) {
-               if (!exynos_pcie_link_up(pp))
+               if (!dw_pcie_link_up(pp))
                        return 0;
        }
 
@@ -450,7 +403,7 @@ static int exynos_pcie_valid_config(struct pcie_port *pp,
        return 1;
 }
 
-static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
                        int size, u32 *val)
 {
        struct pcie_port *pp = sys_to_pcie(bus->sysdata);
@@ -462,23 +415,23 @@ static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
                return -EINVAL;
        }
 
-       if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
+       if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
                *val = 0xffffffff;
                return PCIBIOS_DEVICE_NOT_FOUND;
        }
 
        spin_lock_irqsave(&pp->conf_lock, flags);
        if (bus->number != pp->root_bus_nr)
-               ret = exynos_pcie_rd_other_conf(pp, bus, devfn,
+               ret = dw_pcie_rd_other_conf(pp, bus, devfn,
                                                where, size, val);
        else
-               ret = exynos_pcie_rd_own_conf(pp, where, size, val);
+               ret = dw_pcie_rd_own_conf(pp, where, size, val);
        spin_unlock_irqrestore(&pp->conf_lock, flags);
 
        return ret;
 }
 
-static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
                        int where, int size, u32 val)
 {
        struct pcie_port *pp = sys_to_pcie(bus->sysdata);
@@ -490,34 +443,56 @@ static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
                return -EINVAL;
        }
 
-       if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
+       if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        spin_lock_irqsave(&pp->conf_lock, flags);
        if (bus->number != pp->root_bus_nr)
-               ret = exynos_pcie_wr_other_conf(pp, bus, devfn,
+               ret = dw_pcie_wr_other_conf(pp, bus, devfn,
                                                where, size, val);
        else
-               ret = exynos_pcie_wr_own_conf(pp, where, size, val);
+               ret = dw_pcie_wr_own_conf(pp, where, size, val);
        spin_unlock_irqrestore(&pp->conf_lock, flags);
 
        return ret;
 }
 
-static struct pci_ops exynos_pcie_ops = {
-       .read = exynos_pcie_rd_conf,
-       .write = exynos_pcie_wr_conf,
+static struct pci_ops dw_pcie_ops = {
+       .read = dw_pcie_rd_conf,
+       .write = dw_pcie_wr_conf,
 };
 
-static struct pci_bus *exynos_pcie_scan_bus(int nr,
-                                       struct pci_sys_data *sys)
+int dw_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+       struct pcie_port *pp;
+
+       pp = sys_to_pcie(sys);
+
+       if (!pp)
+               return 0;
+
+       if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
+               sys->io_offset = global_io_offset - pp->config.io_bus_addr;
+               pci_ioremap_io(sys->io_offset, pp->io.start);
+               global_io_offset += SZ_64K;
+               pci_add_resource_offset(&sys->resources, &pp->io,
+                                       sys->io_offset);
+       }
+
+       sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
+       pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
+
+       return 1;
+}
+
+struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
 {
        struct pci_bus *bus;
        struct pcie_port *pp = sys_to_pcie(sys);
 
        if (pp) {
                pp->root_bus_nr = sys->busnr;
-               bus = pci_scan_root_bus(NULL, sys->busnr, &exynos_pcie_ops,
+               bus = pci_scan_root_bus(NULL, sys->busnr, &dw_pcie_ops,
                                        sys, &sys->resources);
        } else {
                bus = NULL;
@@ -527,20 +502,20 @@ static struct pci_bus *exynos_pcie_scan_bus(int nr,
        return bus;
 }
 
-static int exynos_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
 
        return pp->irq;
 }
 
-static struct hw_pci exynos_pci = {
-       .setup          = exynos_pcie_setup,
-       .scan           = exynos_pcie_scan_bus,
-       .map_irq        = exynos_pcie_map_irq,
+static struct hw_pci dw_pci = {
+       .setup          = dw_pcie_setup,
+       .scan           = dw_pcie_scan_bus,
+       .map_irq        = dw_pcie_map_irq,
 };
 
-static void exynos_pcie_setup_rc(struct pcie_port *pp)
+void dw_pcie_setup_rc(struct pcie_port *pp)
 {
        struct pcie_port_info *config = &pp->config;
        void __iomem *dbi_base = pp->dbi_base;
@@ -549,509 +524,67 @@ static void exynos_pcie_setup_rc(struct pcie_port *pp)
        u32 memlimit;
 
        /* set the number of lines as 4 */
-       readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val);
+       dw_pcie_readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val);
        val &= ~PORT_LINK_MODE_MASK;
-       val |= PORT_LINK_MODE_4_LANES;
-       writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL);
+       switch (pp->lanes) {
+       case 1:
+               val |= PORT_LINK_MODE_1_LANES;
+               break;
+       case 2:
+               val |= PORT_LINK_MODE_2_LANES;
+               break;
+       case 4:
+               val |= PORT_LINK_MODE_4_LANES;
+               break;
+       }
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL);
 
        /* set link width speed control register */
-       readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
+       dw_pcie_readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
        val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
-       val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
-       writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+       switch (pp->lanes) {
+       case 1:
+               val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+               break;
+       case 2:
+               val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+               break;
+       case 4:
+               val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+               break;
+       }
+       dw_pcie_writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
 
        /* setup RC BARs */
-       writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0);
-       writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1);
+       dw_pcie_writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0);
+       dw_pcie_writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1);
 
        /* setup interrupt pins */
-       readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val);
+       dw_pcie_readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val);
        val &= 0xffff00ff;
        val |= 0x00000100;
-       writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE);
+       dw_pcie_writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE);
 
        /* setup bus numbers */
-       readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val);
+       dw_pcie_readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val);
        val &= 0xff000000;
        val |= 0x00010100;
-       writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS);
+       dw_pcie_writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS);
 
        /* setup memory base, memory limit */
        membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
        memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
        val = memlimit | membase;
-       writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE);
+       dw_pcie_writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE);
 
        /* setup command register */
-       readl_rc(pp, dbi_base + PCI_COMMAND, &val);
+       dw_pcie_readl_rc(pp, dbi_base + PCI_COMMAND, &val);
        val &= 0xffff0000;
        val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
                PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
-       writel_rc(pp, val, dbi_base + PCI_COMMAND);
-}
-
-static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
-{
-       u32 val;
-       void __iomem *elbi_base = pp->elbi_base;
-
-       val = readl(elbi_base + PCIE_CORE_RESET);
-       val &= ~PCIE_CORE_RESET_ENABLE;
-       writel(val, elbi_base + PCIE_CORE_RESET);
-       writel(0, elbi_base + PCIE_PWR_RESET);
-       writel(0, elbi_base + PCIE_STICKY_RESET);
-       writel(0, elbi_base + PCIE_NONSTICKY_RESET);
-}
-
-static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
-{
-       u32 val;
-       void __iomem *elbi_base = pp->elbi_base;
-       void __iomem *purple_base = pp->purple_base;
-
-       val = readl(elbi_base + PCIE_CORE_RESET);
-       val |= PCIE_CORE_RESET_ENABLE;
-       writel(val, elbi_base + PCIE_CORE_RESET);
-       writel(1, elbi_base + PCIE_STICKY_RESET);
-       writel(1, elbi_base + PCIE_NONSTICKY_RESET);
-       writel(1, elbi_base + PCIE_APP_INIT_RESET);
-       writel(0, elbi_base + PCIE_APP_INIT_RESET);
-       writel(1, purple_base + PCIE_PHY_MAC_RESET);
-}
-
-static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
-{
-       void __iomem *purple_base = pp->purple_base;
-
-       writel(0, purple_base + PCIE_PHY_MAC_RESET);
-       writel(1, purple_base + PCIE_PHY_GLOBAL_RESET);
-}
-
-static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
-{
-       void __iomem *elbi_base = pp->elbi_base;
-       void __iomem *purple_base = pp->purple_base;
-
-       writel(0, purple_base + PCIE_PHY_GLOBAL_RESET);
-       writel(1, elbi_base + PCIE_PWR_RESET);
-       writel(0, purple_base + PCIE_PHY_COMMON_RESET);
-       writel(0, purple_base + PCIE_PHY_CMN_REG);
-       writel(0, purple_base + PCIE_PHY_TRSVREG_RESET);
-       writel(0, purple_base + PCIE_PHY_TRSV_RESET);
-}
-
-static void exynos_pcie_init_phy(struct pcie_port *pp)
-{
-       void __iomem *phy_base = pp->phy_base;
-
-       /* DCC feedback control off */
-       writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK);
-
-       /* set TX/RX impedance */
-       writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE);
-
-       /* set 50Mhz PHY clock */
-       writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0);
-       writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1);
-
-       /* set TX Differential output for lane 0 */
-       writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL);
-
-       /* set TX Pre-emphasis Level Control for lane 0 to minimum */
-       writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
-
-       /* set RX clock and data recovery bandwidth */
-       writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS);
-       writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR);
-       writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR);
-       writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR);
-       writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR);
-
-       /* change TX Pre-emphasis Level Control for lanes */
-       writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
-       writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL);
-       writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL);
-       writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL);
-
-       /* set LVCC */
-       writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC);
-       writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC);
-       writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC);
-       writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC);
-}
-
-static void exynos_pcie_assert_reset(struct pcie_port *pp)
-{
-       if (pp->reset_gpio >= 0)
-               devm_gpio_request_one(pp->dev, pp->reset_gpio,
-                               GPIOF_OUT_INIT_HIGH, "RESET");
-       return;
-}
-
-static int exynos_pcie_establish_link(struct pcie_port *pp)
-{
-       u32 val;
-       int count = 0;
-       void __iomem *elbi_base = pp->elbi_base;
-       void __iomem *purple_base = pp->purple_base;
-       void __iomem *phy_base = pp->phy_base;
-
-       if (exynos_pcie_link_up(pp)) {
-               dev_err(pp->dev, "Link already up\n");
-               return 0;
-       }
-
-       /* assert reset signals */
-       exynos_pcie_assert_core_reset(pp);
-       exynos_pcie_assert_phy_reset(pp);
-
-       /* de-assert phy reset */
-       exynos_pcie_deassert_phy_reset(pp);
-
-       /* initialize phy */
-       exynos_pcie_init_phy(pp);
-
-       /* pulse for common reset */
-       writel(1, purple_base + PCIE_PHY_COMMON_RESET);
-       udelay(500);
-       writel(0, purple_base + PCIE_PHY_COMMON_RESET);
-
-       /* de-assert core reset */
-       exynos_pcie_deassert_core_reset(pp);
-
-       /* setup root complex */
-       exynos_pcie_setup_rc(pp);
-
-       /* assert reset signal */
-       exynos_pcie_assert_reset(pp);
-
-       /* assert LTSSM enable */
-       writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE);
-
-       /* check if the link is up or not */
-       while (!exynos_pcie_link_up(pp)) {
-               mdelay(100);
-               count++;
-               if (count == 10) {
-                       while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) {
-                               val = readl(purple_base + PCIE_PHY_PLL_LOCKED);
-                               dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
-                       }
-                       dev_err(pp->dev, "PCIe Link Fail\n");
-                       return -EINVAL;
-               }
-       }
-
-       dev_info(pp->dev, "Link up\n");
-
-       return 0;
-}
-
-static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
-{
-       u32 val;
-       void __iomem *elbi_base = pp->elbi_base;
-
-       val = readl(elbi_base + PCIE_IRQ_PULSE);
-       writel(val, elbi_base + PCIE_IRQ_PULSE);
-       return;
-}
-
-static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
-{
-       u32 val;
-       void __iomem *elbi_base = pp->elbi_base;
-
-       /* enable INTX interrupt */
-       val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
-               IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
-       writel(val, elbi_base + PCIE_IRQ_EN_PULSE);
-       return;
-}
-
-static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
-{
-       struct pcie_port *pp = arg;
-
-       exynos_pcie_clear_irq_pulse(pp);
-       return IRQ_HANDLED;
-}
-
-static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
-{
-       exynos_pcie_enable_irq_pulse(pp);
-       return;
-}
-
-static void exynos_pcie_host_init(struct pcie_port *pp)
-{
-       struct pcie_port_info *config = &pp->config;
-       u32 val;
-
-       /* Keep first 64K for IO */
-       pp->cfg0_base = pp->cfg.start;
-       pp->cfg1_base = pp->cfg.start + config->cfg0_size;
-       pp->io_base = pp->io.start;
-       pp->mem_base = pp->mem.start;
-
-       /* enable link */
-       exynos_pcie_establish_link(pp);
-
-       exynos_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
-
-       /* program correct class for RC */
-       exynos_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
-
-       exynos_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
-       val |= PORT_LOGIC_SPEED_CHANGE;
-       exynos_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
-
-       exynos_pcie_enable_interrupts(pp);
-}
-
-static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
-{
-       struct resource *elbi_base;
-       struct resource *phy_base;
-       struct resource *purple_base;
-       int ret;
-
-       elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!elbi_base) {
-               dev_err(&pdev->dev, "couldn't get elbi base resource\n");
-               return -EINVAL;
-       }
-       pp->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
-       if (IS_ERR(pp->elbi_base))
-               return PTR_ERR(pp->elbi_base);
-
-       phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!phy_base) {
-               dev_err(&pdev->dev, "couldn't get phy base resource\n");
-               return -EINVAL;
-       }
-       pp->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
-       if (IS_ERR(pp->phy_base))
-               return PTR_ERR(pp->phy_base);
-
-       purple_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-       if (!purple_base) {
-               dev_err(&pdev->dev, "couldn't get purple base resource\n");
-               return -EINVAL;
-       }
-       pp->purple_base = devm_ioremap_resource(&pdev->dev, purple_base);
-       if (IS_ERR(pp->purple_base))
-               return PTR_ERR(pp->purple_base);
-
-       pp->irq = platform_get_irq(pdev, 1);
-       if (!pp->irq) {
-               dev_err(&pdev->dev, "failed to get irq\n");
-               return -ENODEV;
-       }
-       ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
-                               IRQF_SHARED, "exynos-pcie", pp);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to request irq\n");
-               return ret;
-       }
-
-       pp->dbi_base = devm_ioremap(&pdev->dev, pp->cfg.start,
-                               resource_size(&pp->cfg));
-       if (!pp->dbi_base) {
-               dev_err(&pdev->dev, "error with ioremap\n");
-               return -ENOMEM;
-       }
-
-       pp->root_bus_nr = -1;
-
-       spin_lock_init(&pp->conf_lock);
-       exynos_pcie_host_init(pp);
-       pp->va_cfg0_base = devm_ioremap(&pdev->dev, pp->cfg0_base,
-                                       pp->config.cfg0_size);
-       if (!pp->va_cfg0_base) {
-               dev_err(pp->dev, "error with ioremap in function\n");
-               return -ENOMEM;
-       }
-       pp->va_cfg1_base = devm_ioremap(&pdev->dev, pp->cfg1_base,
-                                       pp->config.cfg1_size);
-       if (!pp->va_cfg1_base) {
-               dev_err(pp->dev, "error with ioremap\n");
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static int __init exynos_pcie_probe(struct platform_device *pdev)
-{
-       struct pcie_port *pp;
-       struct device_node *np = pdev->dev.of_node;
-       struct of_pci_range range;
-       struct of_pci_range_parser parser;
-       int ret;
-
-       pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL);
-       if (!pp) {
-               dev_err(&pdev->dev, "no memory for pcie port\n");
-               return -ENOMEM;
-       }
-
-       pp->dev = &pdev->dev;
-
-       if (of_pci_range_parser_init(&parser, np)) {
-               dev_err(&pdev->dev, "missing ranges property\n");
-               return -EINVAL;
-       }
-
-       /* Get the I/O and memory ranges from DT */
-       for_each_of_pci_range(&parser, &range) {
-               unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
-               if (restype == IORESOURCE_IO) {
-                       of_pci_range_to_resource(&range, np, &pp->io);
-                       pp->io.name = "I/O";
-                       pp->io.start = max_t(resource_size_t,
-                                            PCIBIOS_MIN_IO,
-                                            range.pci_addr + global_io_offset);
-                       pp->io.end = min_t(resource_size_t,
-                                          IO_SPACE_LIMIT,
-                                          range.pci_addr + range.size
-                                          + global_io_offset);
-                       pp->config.io_size = resource_size(&pp->io);
-                       pp->config.io_bus_addr = range.pci_addr;
-               }
-               if (restype == IORESOURCE_MEM) {
-                       of_pci_range_to_resource(&range, np, &pp->mem);
-                       pp->mem.name = "MEM";
-                       pp->config.mem_size = resource_size(&pp->mem);
-                       pp->config.mem_bus_addr = range.pci_addr;
-               }
-               if (restype == 0) {
-                       of_pci_range_to_resource(&range, np, &pp->cfg);
-                       pp->config.cfg0_size = resource_size(&pp->cfg)/2;
-                       pp->config.cfg1_size = resource_size(&pp->cfg)/2;
-               }
-       }
-
-       pp->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
-
-       pp->clk = devm_clk_get(&pdev->dev, "pcie");
-       if (IS_ERR(pp->clk)) {
-               dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
-               return PTR_ERR(pp->clk);
-       }
-       ret = clk_prepare_enable(pp->clk);
-       if (ret)
-               return ret;
-
-       pp->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
-       if (IS_ERR(pp->bus_clk)) {
-               dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
-               ret = PTR_ERR(pp->bus_clk);
-               goto fail_clk;
-       }
-       ret = clk_prepare_enable(pp->bus_clk);
-       if (ret)
-               goto fail_clk;
-
-       ret = add_pcie_port(pp, pdev);
-       if (ret < 0)
-               goto fail_bus_clk;
-
-       pp->controller = exynos_pci.nr_controllers;
-       exynos_pci.nr_controllers = 1;
-       exynos_pci.private_data = (void **)&pp;
-
-       pci_common_init(&exynos_pci);
-       pci_assign_unassigned_resources();
-#ifdef CONFIG_PCI_DOMAINS
-       exynos_pci.domain++;
-#endif
-
-       platform_set_drvdata(pdev, pp);
-       return 0;
-
-fail_bus_clk:
-       clk_disable_unprepare(pp->bus_clk);
-fail_clk:
-       clk_disable_unprepare(pp->clk);
-       return ret;
-}
-
-static int __exit exynos_pcie_remove(struct platform_device *pdev)
-{
-       struct pcie_port *pp = platform_get_drvdata(pdev);
-
-       clk_disable_unprepare(pp->bus_clk);
-       clk_disable_unprepare(pp->clk);
-
-       return 0;
-}
-
-static const struct of_device_id exynos_pcie_of_match[] = {
-       { .compatible = "samsung,exynos5440-pcie", },
-       {},
-};
-MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
-
-static struct platform_driver exynos_pcie_driver = {
-       .remove         = __exit_p(exynos_pcie_remove),
-       .driver = {
-               .name   = "exynos-pcie",
-               .owner  = THIS_MODULE,
-               .of_match_table = of_match_ptr(exynos_pcie_of_match),
-       },
-};
-
-static int exynos_pcie_abort(unsigned long addr, unsigned int fsr,
-                       struct pt_regs *regs)
-{
-       unsigned long pc = instruction_pointer(regs);
-       unsigned long instr = *(unsigned long *)pc;
-
-       WARN_ONCE(1, "pcie abort\n");
-
-       /*
-        * If the instruction being executed was a read,
-        * make it look like it read all-ones.
-        */
-       if ((instr & 0x0c100000) == 0x04100000) {
-               int reg = (instr >> 12) & 15;
-               unsigned long val;
-
-               if (instr & 0x00400000)
-                       val = 255;
-               else
-                       val = -1;
-
-               regs->uregs[reg] = val;
-               regs->ARM_pc += 4;
-               return 0;
-       }
-
-       if ((instr & 0x0e100090) == 0x00100090) {
-               int reg = (instr >> 12) & 15;
-
-               regs->uregs[reg] = -1;
-               regs->ARM_pc += 4;
-               return 0;
-       }
-
-       return 1;
-}
-
-/* Exynos PCIe driver does not allow module unload */
-
-static int __init pcie_init(void)
-{
-       hook_fault_code(16 + 6, exynos_pcie_abort, SIGBUS, 0,
-                       "imprecise external abort");
-
-       platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
-
-       return 0;
+       dw_pcie_writel_rc(pp, val, dbi_base + PCI_COMMAND);
 }
-subsys_initcall(pcie_init);
 
 MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung PCIe host controller driver");
+MODULE_DESCRIPTION("Designware PCIe host controller driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
new file mode 100644 (file)
index 0000000..133820f
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Synopsys Designware PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct pcie_port_info {
+       u32             cfg0_size;
+       u32             cfg1_size;
+       u32             io_size;
+       u32             mem_size;
+       phys_addr_t     io_bus_addr;
+       phys_addr_t     mem_bus_addr;
+};
+
+struct pcie_port {
+       struct device           *dev;
+       u8                      root_bus_nr;
+       void __iomem            *dbi_base;
+       u64                     cfg0_base;
+       void __iomem            *va_cfg0_base;
+       u64                     cfg1_base;
+       void __iomem            *va_cfg1_base;
+       u64                     io_base;
+       u64                     mem_base;
+       spinlock_t              conf_lock;
+       struct resource         cfg;
+       struct resource         io;
+       struct resource         mem;
+       struct pcie_port_info   config;
+       int                     irq;
+       u32                     lanes;
+       struct pcie_host_ops    *ops;
+};
+
+struct pcie_host_ops {
+       void (*readl_rc)(struct pcie_port *pp,
+                       void __iomem *dbi_base, u32 *val);
+       void (*writel_rc)(struct pcie_port *pp,
+                       u32 val, void __iomem *dbi_base);
+       int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
+       int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+       int (*link_up)(struct pcie_port *pp);
+       void (*host_init)(struct pcie_port *pp);
+};
+
+extern unsigned long global_io_offset;
+
+int cfg_read(void __iomem *addr, int where, int size, u32 *val);
+int cfg_write(void __iomem *addr, int where, int size, u32 val);
+int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
+int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+int dw_pcie_link_up(struct pcie_port *pp);
+void dw_pcie_setup_rc(struct pcie_port *pp);
+int dw_pcie_host_init(struct pcie_port *pp);
+int dw_pcie_setup(int nr, struct pci_sys_data *sys);
+struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys);
+int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
index 6fdd49c6f0b91760978139827511a02a7bb10e7d..f4e0289246672c0a9157927303945f0934e26d31 100644 (file)
@@ -49,6 +49,7 @@
 #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
 #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
 
+struct acpiphp_context;
 struct acpiphp_bridge;
 struct acpiphp_slot;
 
@@ -59,6 +60,7 @@ struct slot {
        struct hotplug_slot     *hotplug_slot;
        struct acpiphp_slot     *acpi_slot;
        struct hotplug_slot_info info;
+       unsigned int sun;       /* ACPI _SUN (Slot User Number) value */
 };
 
 static inline const char *slot_name(struct slot *slot)
@@ -75,15 +77,11 @@ struct acpiphp_bridge {
        struct list_head list;
        struct list_head slots;
        struct kref ref;
-       acpi_handle handle;
 
-       /* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */
-       struct acpiphp_func *func;
+       struct acpiphp_context *context;
 
        int nr_slots;
 
-       u32 flags;
-
        /* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */
        struct pci_bus *pci_bus;
 
@@ -99,15 +97,13 @@ struct acpiphp_bridge {
  */
 struct acpiphp_slot {
        struct list_head node;
-       struct acpiphp_bridge *bridge;  /* parent */
+       struct pci_bus *bus;
        struct list_head funcs;         /* one slot may have different
                                           objects (i.e. for each function) */
        struct slot *slot;
        struct mutex crit_sect;
 
        u8              device;         /* pci device# */
-
-       unsigned long long sun;         /* ACPI _SUN (slot unique number) */
        u32             flags;          /* see below */
 };
 
@@ -119,16 +115,32 @@ struct acpiphp_slot {
  * typically 8 objects per slot (i.e. for each PCI function)
  */
 struct acpiphp_func {
-       struct acpiphp_slot *slot;      /* parent */
+       struct acpiphp_bridge *parent;
+       struct acpiphp_slot *slot;
 
        struct list_head sibling;
-       struct notifier_block nb;
-       acpi_handle     handle;
 
        u8              function;       /* pci function# */
        u32             flags;          /* see below */
 };
 
+struct acpiphp_context {
+       acpi_handle handle;
+       struct acpiphp_func func;
+       struct acpiphp_bridge *bridge;
+       unsigned int refcount;
+};
+
+static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
+{
+       return container_of(func, struct acpiphp_context, func);
+}
+
+static inline acpi_handle func_to_handle(struct acpiphp_func *func)
+{
+       return func_to_context(func)->handle;
+}
+
 /*
  * struct acpiphp_attention_info - device specific attention registration
  *
@@ -142,45 +154,32 @@ struct acpiphp_attention_info
        struct module *owner;
 };
 
-/* PCI bus bridge HID */
-#define ACPI_PCI_HOST_HID              "PNP0A03"
-
 /* ACPI _STA method value (ignore bit 4; battery present) */
 #define ACPI_STA_ALL                   (0x0000000f)
 
-/* bridge flags */
-#define BRIDGE_HAS_EJ0         (0x00000001)
-
 /* slot flags */
 
-#define SLOT_POWEREDON         (0x00000001)
-#define SLOT_ENABLED           (0x00000002)
-#define SLOT_MULTIFUNCTION     (0x00000004)
+#define SLOT_ENABLED           (0x00000001)
 
 /* function flags */
 
 #define FUNC_HAS_STA           (0x00000001)
 #define FUNC_HAS_EJ0           (0x00000002)
-#define FUNC_HAS_PS0           (0x00000010)
-#define FUNC_HAS_PS1           (0x00000020)
-#define FUNC_HAS_PS2           (0x00000040)
-#define FUNC_HAS_PS3           (0x00000080)
-#define FUNC_HAS_DCK            (0x00000100)
+#define FUNC_HAS_DCK            (0x00000004)
 
 /* function prototypes */
 
 /* acpiphp_core.c */
 int acpiphp_register_attention(struct acpiphp_attention_info*info);
 int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
-int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot);
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot, unsigned int sun);
 void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
 
 /* acpiphp_glue.c */
 typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
 
 int acpiphp_enable_slot(struct acpiphp_slot *slot);
-int acpiphp_disable_slot(struct acpiphp_slot *slot);
-int acpiphp_eject_slot(struct acpiphp_slot *slot);
+int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
 u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
 u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
 u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
index ca8127950fcd49397027370d82937246204a0e9b..bf2203ef1308bfa13f3e8b2744db6bd0b2adbcb8 100644 (file)
@@ -155,15 +155,11 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
 static int disable_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
-       int retval;
 
        dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        /* disable the specified slot */
-       retval = acpiphp_disable_slot(slot->acpi_slot);
-       if (!retval)
-               retval = acpiphp_eject_slot(slot->acpi_slot);
-       return retval;
+       return acpiphp_disable_and_eject_slot(slot->acpi_slot);
 }
 
 
@@ -290,7 +286,8 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
 }
 
 /* callback routine to initialize 'struct slot' for each slot */
-int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
+                                 unsigned int sun)
 {
        struct slot *slot;
        int retval = -ENOMEM;
@@ -317,12 +314,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
        slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
 
        acpiphp_slot->slot = slot;
-       snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
+       slot->sun = sun;
+       snprintf(name, SLOT_NAME_SIZE, "%u", sun);
 
-       retval = pci_hp_register(slot->hotplug_slot,
-                                       acpiphp_slot->bridge->pci_bus,
-                                       acpiphp_slot->device,
-                                       name);
+       retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus,
+                                acpiphp_slot->device, name);
        if (retval == -EBUSY)
                goto error_hpslot;
        if (retval) {
index 59df8575a48ce834fb48ea002689ed97ed9ca51a..8054ddcdaed0a195f02e83d19513c61b8b284368 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/pci.h>
 #include <linux/pci_hotplug.h>
 #include <linux/pci-acpi.h>
+#include <linux/pm_runtime.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 
 static LIST_HEAD(bridge_list);
 static DEFINE_MUTEX(bridge_mutex);
+static DEFINE_MUTEX(acpiphp_context_lock);
 
 #define MY_NAME "acpiphp_glue"
 
-static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
+static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
 static void acpiphp_set_hpp_values(struct pci_bus *bus);
-static void hotplug_event_func(acpi_handle handle, u32 type, void *context);
-static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
+static void hotplug_event(acpi_handle handle, u32 type, void *data);
 static void free_bridge(struct kref *kref);
 
-/* callback routine to check for the existence of a pci dock device */
-static acpi_status
-is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
+static void acpiphp_context_handler(acpi_handle handle, void *context)
 {
-       int *count = (int *)context;
+       /* Intentionally empty. */
+}
 
-       if (is_dock_device(handle)) {
-               (*count)++;
-               return AE_CTRL_TERMINATE;
-       } else {
-               return AE_OK;
+/**
+ * acpiphp_init_context - Create hotplug context and grab a reference to it.
+ * @handle: ACPI object handle to create the context for.
+ *
+ * Call under acpiphp_context_lock.
+ */
+static struct acpiphp_context *acpiphp_init_context(acpi_handle handle)
+{
+       struct acpiphp_context *context;
+       acpi_status status;
+
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
+       if (!context)
+               return NULL;
+
+       context->handle = handle;
+       context->refcount = 1;
+       status = acpi_attach_data(handle, acpiphp_context_handler, context);
+       if (ACPI_FAILURE(status)) {
+               kfree(context);
+               return NULL;
        }
+       return context;
+}
+
+/**
+ * acpiphp_get_context - Get hotplug context and grab a reference to it.
+ * @handle: ACPI object handle to get the context for.
+ *
+ * Call under acpiphp_context_lock.
+ */
+static struct acpiphp_context *acpiphp_get_context(acpi_handle handle)
+{
+       struct acpiphp_context *context = NULL;
+       acpi_status status;
+       void *data;
+
+       status = acpi_get_data(handle, acpiphp_context_handler, &data);
+       if (ACPI_SUCCESS(status)) {
+               context = data;
+               context->refcount++;
+       }
+       return context;
+}
+
+/**
+ * acpiphp_put_context - Drop a reference to ACPI hotplug context.
+ * @handle: ACPI object handle to put the context for.
+ *
+ * The context object is removed if there are no more references to it.
+ *
+ * Call under acpiphp_context_lock.
+ */
+static void acpiphp_put_context(struct acpiphp_context *context)
+{
+       if (--context->refcount)
+               return;
+
+       WARN_ON(context->bridge);
+       acpi_detach_data(context->handle, acpiphp_context_handler);
+       kfree(context);
 }
 
 static inline void get_bridge(struct acpiphp_bridge *bridge)
@@ -91,25 +146,36 @@ static inline void put_bridge(struct acpiphp_bridge *bridge)
 
 static void free_bridge(struct kref *kref)
 {
+       struct acpiphp_context *context;
        struct acpiphp_bridge *bridge;
        struct acpiphp_slot *slot, *next;
        struct acpiphp_func *func, *tmp;
 
+       mutex_lock(&acpiphp_context_lock);
+
        bridge = container_of(kref, struct acpiphp_bridge, ref);
 
        list_for_each_entry_safe(slot, next, &bridge->slots, node) {
-               list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
-                       kfree(func);
-               }
+               list_for_each_entry_safe(func, tmp, &slot->funcs, sibling)
+                       acpiphp_put_context(func_to_context(func));
+
                kfree(slot);
        }
 
-       /* Release reference acquired by acpiphp_bridge_handle_to_function() */
-       if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func)
-               put_bridge(bridge->func->slot->bridge);
+       context = bridge->context;
+       /* Root bridges will not have hotplug context. */
+       if (context) {
+               /* Release the reference taken by acpiphp_enumerate_slots(). */
+               put_bridge(context->func.parent);
+               context->bridge = NULL;
+               acpiphp_put_context(context);
+       }
+
        put_device(&bridge->pci_bus->dev);
        pci_dev_put(bridge->pci_dev);
        kfree(bridge);
+
+       mutex_unlock(&acpiphp_context_lock);
 }
 
 /*
@@ -119,15 +185,14 @@ static void free_bridge(struct kref *kref)
  * TBD - figure out a way to only call fixups for
  * systems that require them.
  */
-static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
-       void *v)
+static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
 {
-       struct acpiphp_func *func = container_of(nb, struct acpiphp_func, nb);
-       struct pci_bus *bus = func->slot->bridge->pci_bus;
+       struct acpiphp_context *context = data;
+       struct pci_bus *bus = context->func.slot->bus;
        u32 buses;
 
        if (!bus->self)
-               return  NOTIFY_OK;
+               return;
 
        /* fixup bad _DCK function that rewrites
         * secondary bridge on slot
@@ -143,12 +208,12 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
                        | ((unsigned int)(bus->busn_res.end) << 16);
                pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
        }
-       return NOTIFY_OK;
 }
 
 
 static const struct acpi_dock_ops acpiphp_dock_ops = {
-       .handler = hotplug_event_func,
+       .fixup = post_dock_fixups,
+       .handler = hotplug_event,
 };
 
 /* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -182,129 +247,118 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
 
 static void acpiphp_dock_init(void *data)
 {
-       struct acpiphp_func *func = data;
+       struct acpiphp_context *context = data;
 
-       get_bridge(func->slot->bridge);
+       get_bridge(context->func.parent);
 }
 
 static void acpiphp_dock_release(void *data)
 {
-       struct acpiphp_func *func = data;
+       struct acpiphp_context *context = data;
 
-       put_bridge(func->slot->bridge);
+       put_bridge(context->func.parent);
 }
 
 /* callback routine to register each ACPI PCI slot object */
-static acpi_status
-register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
+static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
+                                void **rv)
 {
-       struct acpiphp_bridge *bridge = (struct acpiphp_bridge *)context;
+       struct acpiphp_bridge *bridge = data;
+       struct acpiphp_context *context;
        struct acpiphp_slot *slot;
        struct acpiphp_func *newfunc;
-       acpi_handle tmp;
        acpi_status status = AE_OK;
-       unsigned long long adr, sun;
-       int device, function, retval, found = 0;
+       unsigned long long adr;
+       int device, function;
        struct pci_bus *pbus = bridge->pci_bus;
-       struct pci_dev *pdev;
+       struct pci_dev *pdev = bridge->pci_dev;
        u32 val;
 
-       if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
+       if (pdev && device_is_managed_by_native_pciehp(pdev))
                return AE_OK;
 
        status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
        if (ACPI_FAILURE(status)) {
-               warn("can't evaluate _ADR (%#x)\n", status);
+               acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
                return AE_OK;
        }
 
        device = (adr >> 16) & 0xffff;
        function = adr & 0xffff;
 
-       pdev = bridge->pci_dev;
-       if (pdev && device_is_managed_by_native_pciehp(pdev))
-               return AE_OK;
-
-       newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
-       if (!newfunc)
-               return AE_NO_MEMORY;
-
-       newfunc->handle = handle;
+       mutex_lock(&acpiphp_context_lock);
+       context = acpiphp_init_context(handle);
+       if (!context) {
+               mutex_unlock(&acpiphp_context_lock);
+               acpi_handle_err(handle, "No hotplug context\n");
+               return AE_NOT_EXIST;
+       }
+       newfunc = &context->func;
        newfunc->function = function;
+       newfunc->parent = bridge;
+       mutex_unlock(&acpiphp_context_lock);
 
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+       if (acpi_has_method(handle, "_EJ0"))
                newfunc->flags = FUNC_HAS_EJ0;
 
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp)))
+       if (acpi_has_method(handle, "_STA"))
                newfunc->flags |= FUNC_HAS_STA;
 
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS0", &tmp)))
-               newfunc->flags |= FUNC_HAS_PS0;
-
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS3", &tmp)))
-               newfunc->flags |= FUNC_HAS_PS3;
-
-       if (ACPI_SUCCESS(acpi_get_handle(handle, "_DCK", &tmp)))
+       if (acpi_has_method(handle, "_DCK"))
                newfunc->flags |= FUNC_HAS_DCK;
 
-       status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
-       if (ACPI_FAILURE(status)) {
-               /*
-                * use the count of the number of slots we've found
-                * for the number of the slot
-                */
-               sun = bridge->nr_slots+1;
-       }
-
        /* search for objects that share the same slot */
        list_for_each_entry(slot, &bridge->slots, node)
-               if (slot->device == device) {
-                       if (slot->sun != sun)
-                               warn("sibling found, but _SUN doesn't match!\n");
-                       found = 1;
-                       break;
-               }
+               if (slot->device == device)
+                       goto slot_found;
 
-       if (!found) {
-               slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
-               if (!slot) {
-                       kfree(newfunc);
-                       return AE_NO_MEMORY;
-               }
+       slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
+       if (!slot) {
+               status = AE_NO_MEMORY;
+               goto err;
+       }
 
-               slot->bridge = bridge;
-               slot->device = device;
-               slot->sun = sun;
-               INIT_LIST_HEAD(&slot->funcs);
-               mutex_init(&slot->crit_sect);
+       slot->bus = bridge->pci_bus;
+       slot->device = device;
+       INIT_LIST_HEAD(&slot->funcs);
+       mutex_init(&slot->crit_sect);
+
+       list_add_tail(&slot->node, &bridge->slots);
+
+       /* Register slots for ejectable funtions only. */
+       if (acpi_pci_check_ejectable(pbus, handle)  || is_dock_device(handle)) {
+               unsigned long long sun;
+               int retval;
 
-               mutex_lock(&bridge_mutex);
-               list_add_tail(&slot->node, &bridge->slots);
-               mutex_unlock(&bridge_mutex);
                bridge->nr_slots++;
+               status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
+               if (ACPI_FAILURE(status))
+                       sun = bridge->nr_slots;
 
                dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
-                   slot->sun, pci_domain_nr(pbus), pbus->number, device);
-               retval = acpiphp_register_hotplug_slot(slot);
+                   sun, pci_domain_nr(pbus), pbus->number, device);
+
+               retval = acpiphp_register_hotplug_slot(slot, sun);
                if (retval) {
+                       slot->slot = NULL;
+                       bridge->nr_slots--;
                        if (retval == -EBUSY)
                                warn("Slot %llu already registered by another "
-                                       "hotplug driver\n", slot->sun);
+                                       "hotplug driver\n", sun);
                        else
                                warn("acpiphp_register_hotplug_slot failed "
                                        "(err code = 0x%x)\n", retval);
-                       goto err_exit;
                }
+               /* Even if the slot registration fails, we can still use it. */
        }
 
+ slot_found:
        newfunc->slot = slot;
-       mutex_lock(&bridge_mutex);
        list_add_tail(&newfunc->sibling, &slot->funcs);
-       mutex_unlock(&bridge_mutex);
 
        if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function),
                                       &val, 60*1000))
-               slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
+               slot->flags |= SLOT_ENABLED;
 
        if (is_dock_device(handle)) {
                /* we don't want to call this device's _EJ0
@@ -313,136 +367,46 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
                 */
                newfunc->flags &= ~FUNC_HAS_EJ0;
                if (register_hotplug_dock_device(handle,
-                       &acpiphp_dock_ops, newfunc,
+                       &acpiphp_dock_ops, context,
                        acpiphp_dock_init, acpiphp_dock_release))
                        dbg("failed to register dock device\n");
-
-               /* we need to be notified when dock events happen
-                * outside of the hotplug operation, since we may
-                * need to do fixups before we can hotplug.
-                */
-               newfunc->nb.notifier_call = post_dock_fixups;
-               if (register_dock_notifier(&newfunc->nb))
-                       dbg("failed to register a dock notifier");
        }
 
        /* install notify handler */
        if (!(newfunc->flags & FUNC_HAS_DCK)) {
-               status = acpi_install_notify_handler(handle,
-                                            ACPI_SYSTEM_NOTIFY,
-                                            handle_hotplug_event_func,
-                                            newfunc);
-
+               status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+                                                    handle_hotplug_event,
+                                                    context);
                if (ACPI_FAILURE(status))
-                       err("failed to register interrupt notify handler\n");
-       } else
-               status = AE_OK;
-
-       return status;
-
- err_exit:
-       bridge->nr_slots--;
-       mutex_lock(&bridge_mutex);
-       list_del(&slot->node);
-       mutex_unlock(&bridge_mutex);
-       kfree(slot);
-       kfree(newfunc);
-
-       return AE_OK;
-}
-
-
-/* see if it's worth looking at this bridge */
-static int detect_ejectable_slots(acpi_handle handle)
-{
-       int found = acpi_pci_detect_ejectable(handle);
-       if (!found) {
-               acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
-                                   is_pci_dock_device, NULL, (void *)&found, NULL);
-       }
-       return found;
-}
-
-/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */
-static void init_bridge_misc(struct acpiphp_bridge *bridge)
-{
-       acpi_status status;
-
-       /* must be added to the list prior to calling register_slot */
-       mutex_lock(&bridge_mutex);
-       list_add(&bridge->list, &bridge_list);
-       mutex_unlock(&bridge_mutex);
-
-       /* register all slot objects under this bridge */
-       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1,
-                                    register_slot, NULL, bridge, NULL);
-       if (ACPI_FAILURE(status)) {
-               mutex_lock(&bridge_mutex);
-               list_del(&bridge->list);
-               mutex_unlock(&bridge_mutex);
-               return;
+                       acpi_handle_err(handle,
+                                       "failed to install notify handler\n");
        }
 
-       /* install notify handler for P2P bridges */
-       if (!pci_is_root_bus(bridge->pci_bus)) {
-               if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
-                       status = acpi_remove_notify_handler(bridge->func->handle,
-                                               ACPI_SYSTEM_NOTIFY,
-                                               handle_hotplug_event_func);
-                       if (ACPI_FAILURE(status))
-                               err("failed to remove notify handler\n");
-               }
-               status = acpi_install_notify_handler(bridge->handle,
-                                            ACPI_SYSTEM_NOTIFY,
-                                            handle_hotplug_event_bridge,
-                                            bridge);
-
-               if (ACPI_FAILURE(status)) {
-                       err("failed to register interrupt notify handler\n");
-               }
-       }
-}
-
-
-/* find acpiphp_func from acpiphp_bridge */
-static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle)
-{
-       struct acpiphp_bridge *bridge;
-       struct acpiphp_slot *slot;
-       struct acpiphp_func *func = NULL;
-
-       mutex_lock(&bridge_mutex);
-       list_for_each_entry(bridge, &bridge_list, list) {
-               list_for_each_entry(slot, &bridge->slots, node) {
-                       list_for_each_entry(func, &slot->funcs, sibling) {
-                               if (func->handle == handle) {
-                                       get_bridge(func->slot->bridge);
-                                       mutex_unlock(&bridge_mutex);
-                                       return func;
-                               }
-                       }
-               }
-       }
-       mutex_unlock(&bridge_mutex);
+       return AE_OK;
 
-       return NULL;
+ err:
+       mutex_lock(&acpiphp_context_lock);
+       acpiphp_put_context(context);
+       mutex_unlock(&acpiphp_context_lock);
+       return status;
 }
 
-
 static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
 {
-       struct acpiphp_bridge *bridge;
-
-       mutex_lock(&bridge_mutex);
-       list_for_each_entry(bridge, &bridge_list, list)
-               if (bridge->handle == handle) {
+       struct acpiphp_context *context;
+       struct acpiphp_bridge *bridge = NULL;
+
+       mutex_lock(&acpiphp_context_lock);
+       context = acpiphp_get_context(handle);
+       if (context) {
+               bridge = context->bridge;
+               if (bridge)
                        get_bridge(bridge);
-                       mutex_unlock(&bridge_mutex);
-                       return bridge;
-               }
-       mutex_unlock(&bridge_mutex);
 
-       return NULL;
+               acpiphp_put_context(context);
+       }
+       mutex_unlock(&acpiphp_context_lock);
+       return bridge;
 }
 
 static void cleanup_bridge(struct acpiphp_bridge *bridge)
@@ -450,40 +414,24 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
        struct acpiphp_slot *slot;
        struct acpiphp_func *func;
        acpi_status status;
-       acpi_handle handle = bridge->handle;
-
-       if (!pci_is_root_bus(bridge->pci_bus)) {
-               status = acpi_remove_notify_handler(handle,
-                                           ACPI_SYSTEM_NOTIFY,
-                                           handle_hotplug_event_bridge);
-               if (ACPI_FAILURE(status))
-                       err("failed to remove notify handler\n");
-       }
-
-       if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
-               status = acpi_install_notify_handler(bridge->func->handle,
-                                               ACPI_SYSTEM_NOTIFY,
-                                               handle_hotplug_event_func,
-                                               bridge->func);
-               if (ACPI_FAILURE(status))
-                       err("failed to install interrupt notify handler\n");
-       }
 
        list_for_each_entry(slot, &bridge->slots, node) {
                list_for_each_entry(func, &slot->funcs, sibling) {
-                       if (is_dock_device(func->handle)) {
-                               unregister_hotplug_dock_device(func->handle);
-                               unregister_dock_notifier(&func->nb);
-                       }
+                       acpi_handle handle = func_to_handle(func);
+
+                       if (is_dock_device(handle))
+                               unregister_hotplug_dock_device(handle);
+
                        if (!(func->flags & FUNC_HAS_DCK)) {
-                               status = acpi_remove_notify_handler(func->handle,
-                                               ACPI_SYSTEM_NOTIFY,
-                                               handle_hotplug_event_func);
+                               status = acpi_remove_notify_handler(handle,
+                                                       ACPI_SYSTEM_NOTIFY,
+                                                       handle_hotplug_event);
                                if (ACPI_FAILURE(status))
                                        err("failed to remove notify handler\n");
                        }
                }
-               acpiphp_unregister_hotplug_slot(slot);
+               if (slot->slot)
+                       acpiphp_unregister_hotplug_slot(slot);
        }
 
        mutex_lock(&bridge_mutex);
@@ -491,71 +439,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
        mutex_unlock(&bridge_mutex);
 }
 
-static int power_on_slot(struct acpiphp_slot *slot)
-{
-       acpi_status status;
-       struct acpiphp_func *func;
-       int retval = 0;
-
-       /* if already enabled, just skip */
-       if (slot->flags & SLOT_POWEREDON)
-               goto err_exit;
-
-       list_for_each_entry(func, &slot->funcs, sibling) {
-               if (func->flags & FUNC_HAS_PS0) {
-                       dbg("%s: executing _PS0\n", __func__);
-                       status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL);
-                       if (ACPI_FAILURE(status)) {
-                               warn("%s: _PS0 failed\n", __func__);
-                               retval = -1;
-                               goto err_exit;
-                       } else
-                               break;
-               }
-       }
-
-       /* TBD: evaluate _STA to check if the slot is enabled */
-
-       slot->flags |= SLOT_POWEREDON;
-
- err_exit:
-       return retval;
-}
-
-
-static int power_off_slot(struct acpiphp_slot *slot)
-{
-       acpi_status status;
-       struct acpiphp_func *func;
-
-       int retval = 0;
-
-       /* if already disabled, just skip */
-       if ((slot->flags & SLOT_POWEREDON) == 0)
-               goto err_exit;
-
-       list_for_each_entry(func, &slot->funcs, sibling) {
-               if (func->flags & FUNC_HAS_PS3) {
-                       status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL);
-                       if (ACPI_FAILURE(status)) {
-                               warn("%s: _PS3 failed\n", __func__);
-                               retval = -1;
-                               goto err_exit;
-                       } else
-                               break;
-               }
-       }
-
-       /* TBD: evaluate _STA to check if the slot is disabled */
-
-       slot->flags &= (~SLOT_POWEREDON);
-
- err_exit:
-       return retval;
-}
-
-
-
 /**
  * acpiphp_max_busnr - return the highest reserved bus number under the given bus.
  * @bus: bus to start search with
@@ -583,52 +466,32 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
        return max;
 }
 
-
 /**
- * acpiphp_bus_add - add a new bus to acpi subsystem
- * @func: acpiphp_func of the bridge
+ * acpiphp_bus_trim - Trim device objects in an ACPI namespace subtree.
+ * @handle: ACPI device object handle to start from.
  */
-static int acpiphp_bus_add(struct acpiphp_func *func)
+static void acpiphp_bus_trim(acpi_handle handle)
 {
-       struct acpi_device *device;
-       int ret_val;
-
-       if (!acpi_bus_get_device(func->handle, &device)) {
-               dbg("bus exists... trim\n");
-               /* this shouldn't be in here, so remove
-                * the bus then re-add it...
-                */
-               acpi_bus_trim(device);
-       }
-
-       ret_val = acpi_bus_scan(func->handle);
-       if (!ret_val)
-               ret_val = acpi_bus_get_device(func->handle, &device);
-
-       if (ret_val)
-               dbg("error adding bus, %x\n", -ret_val);
+       struct acpi_device *adev = NULL;
 
-       return ret_val;
+       acpi_bus_get_device(handle, &adev);
+       if (adev)
+               acpi_bus_trim(adev);
 }
 
-
 /**
- * acpiphp_bus_trim - trim a bus from acpi subsystem
- * @handle: handle to acpi namespace
+ * acpiphp_bus_add - Scan ACPI namespace subtree.
+ * @handle: ACPI object handle to start the scan from.
  */
-static int acpiphp_bus_trim(acpi_handle handle)
+static void acpiphp_bus_add(acpi_handle handle)
 {
-       struct acpi_device *device;
-       int retval;
-
-       retval = acpi_bus_get_device(handle, &device);
-       if (retval) {
-               dbg("acpi_device not found\n");
-               return retval;
-       }
+       struct acpi_device *adev = NULL;
 
-       acpi_bus_trim(device);
-       return 0;
+       acpiphp_bus_trim(handle);
+       acpi_bus_scan(handle);
+       acpi_bus_get_device(handle, &adev);
+       if (adev)
+               acpi_device_set_power(adev, ACPI_STATE_D0);
 }
 
 static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
@@ -645,7 +508,8 @@ static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
                params[1].type = ACPI_TYPE_INTEGER;
                params[1].integer.value = 1;
                /* _REG is optional, we don't care about if there is failure */
-               acpi_evaluate_object(func->handle, "_REG", &arg_list, NULL);
+               acpi_evaluate_object(func_to_handle(func), "_REG", &arg_list,
+                                    NULL);
        }
 }
 
@@ -653,59 +517,44 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
 {
        struct acpiphp_func *func;
 
-       if (!dev->subordinate)
-               return;
-
        /* quirk, or pcie could set it already */
        if (dev->is_hotplug_bridge)
                return;
 
-       if (PCI_SLOT(dev->devfn) != slot->device)
-               return;
-
        list_for_each_entry(func, &slot->funcs, sibling) {
                if (PCI_FUNC(dev->devfn) == func->function) {
-                       /* check if this bridge has ejectable slots */
-                       if ((detect_ejectable_slots(func->handle) > 0))
-                               dev->is_hotplug_bridge = 1;
+                       dev->is_hotplug_bridge = 1;
                        break;
                }
        }
 }
 
 /**
- * enable_device - enable, configure a slot
+ * enable_slot - enable, configure a slot
  * @slot: slot to be enabled
  *
  * This function should be called per *physical slot*,
  * not per each slot object in ACPI namespace.
  */
-static int __ref enable_device(struct acpiphp_slot *slot)
+static void __ref enable_slot(struct acpiphp_slot *slot)
 {
        struct pci_dev *dev;
-       struct pci_bus *bus = slot->bridge->pci_bus;
+       struct pci_bus *bus = slot->bus;
        struct acpiphp_func *func;
-       int num, max, pass;
+       int max, pass;
        LIST_HEAD(add_list);
 
-       if (slot->flags & SLOT_ENABLED)
-               goto err_exit;
-
        list_for_each_entry(func, &slot->funcs, sibling)
-               acpiphp_bus_add(func);
+               acpiphp_bus_add(func_to_handle(func));
 
-       num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
-       if (num == 0) {
-               /* Maybe only part of funcs are added. */
-               dbg("No new device found\n");
-               goto err_exit;
-       }
+       pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
 
        max = acpiphp_max_busnr(bus);
        for (pass = 0; pass < 2; pass++) {
                list_for_each_entry(dev, &bus->devices, bus_list) {
                        if (PCI_SLOT(dev->devfn) != slot->device)
                                continue;
+
                        if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
                            dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
                                max = pci_scan_bridge(bus, dev, max, pass);
@@ -744,16 +593,12 @@ static int __ref enable_device(struct acpiphp_slot *slot)
                        continue;
                }
        }
-
-
- err_exit:
-       return 0;
 }
 
 /* return first device in slot, acquiring a reference on it */
 static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
 {
-       struct pci_bus *bus = slot->bridge->pci_bus;
+       struct pci_bus *bus = slot->bus;
        struct pci_dev *dev;
        struct pci_dev *ret = NULL;
 
@@ -769,16 +614,16 @@ static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
 }
 
 /**
- * disable_device - disable a slot
+ * disable_slot - disable a slot
  * @slot: ACPI PHP slot
  */
-static int disable_device(struct acpiphp_slot *slot)
+static void disable_slot(struct acpiphp_slot *slot)
 {
        struct acpiphp_func *func;
        struct pci_dev *pdev;
 
        /*
-        * enable_device() enumerates all functions in this device via
+        * enable_slot() enumerates all functions in this device via
         * pci_scan_slot(), whether they have associated ACPI hotplug
         * methods (_EJ0, etc.) or not.  Therefore, we remove all functions
         * here.
@@ -788,13 +633,10 @@ static int disable_device(struct acpiphp_slot *slot)
                pci_dev_put(pdev);
        }
 
-       list_for_each_entry(func, &slot->funcs, sibling) {
-               acpiphp_bus_trim(func->handle);
-       }
+       list_for_each_entry(func, &slot->funcs, sibling)
+               acpiphp_bus_trim(func_to_handle(func));
 
        slot->flags &= (~SLOT_ENABLED);
-
-       return 0;
 }
 
 
@@ -812,18 +654,21 @@ static int disable_device(struct acpiphp_slot *slot)
  */
 static unsigned int get_slot_status(struct acpiphp_slot *slot)
 {
-       acpi_status status;
        unsigned long long sta = 0;
-       u32 dvid;
        struct acpiphp_func *func;
 
        list_for_each_entry(func, &slot->funcs, sibling) {
                if (func->flags & FUNC_HAS_STA) {
-                       status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta);
+                       acpi_status status;
+
+                       status = acpi_evaluate_integer(func_to_handle(func),
+                                                      "_STA", NULL, &sta);
                        if (ACPI_SUCCESS(status) && sta)
                                break;
                } else {
-                       pci_bus_read_config_dword(slot->bridge->pci_bus,
+                       u32 dvid;
+
+                       pci_bus_read_config_dword(slot->bus,
                                                  PCI_DEVFN(slot->device,
                                                            func->function),
                                                  PCI_VENDOR_ID, &dvid);
@@ -838,34 +683,42 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
 }
 
 /**
- * acpiphp_eject_slot - physically eject the slot
- * @slot: ACPI PHP slot
+ * trim_stale_devices - remove PCI devices that are not responding.
+ * @dev: PCI device to start walking the hierarchy from.
  */
-int acpiphp_eject_slot(struct acpiphp_slot *slot)
+static void trim_stale_devices(struct pci_dev *dev)
 {
-       acpi_status status;
-       struct acpiphp_func *func;
-       struct acpi_object_list arg_list;
-       union acpi_object arg;
+       acpi_handle handle = ACPI_HANDLE(&dev->dev);
+       struct pci_bus *bus = dev->subordinate;
+       bool alive = false;
 
-       list_for_each_entry(func, &slot->funcs, sibling) {
-               /* We don't want to call _EJ0 on non-existing functions. */
-               if ((func->flags & FUNC_HAS_EJ0)) {
-                       /* _EJ0 method take one argument */
-                       arg_list.count = 1;
-                       arg_list.pointer = &arg;
-                       arg.type = ACPI_TYPE_INTEGER;
-                       arg.integer.value = 1;
-
-                       status = acpi_evaluate_object(func->handle, "_EJ0", &arg_list, NULL);
-                       if (ACPI_FAILURE(status)) {
-                               warn("%s: _EJ0 failed\n", __func__);
-                               return -1;
-                       } else
-                               break;
-               }
+       if (handle) {
+               acpi_status status;
+               unsigned long long sta;
+
+               status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+               alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
+       }
+       if (!alive) {
+               u32 v;
+
+               /* Check if the device responds. */
+               alive = pci_bus_read_dev_vendor_id(dev->bus, dev->devfn, &v, 0);
+       }
+       if (!alive) {
+               pci_stop_and_remove_bus_device(dev);
+               if (handle)
+                       acpiphp_bus_trim(handle);
+       } else if (bus) {
+               struct pci_dev *child, *tmp;
+
+               /* The device is a bridge. so check the bus below it. */
+               pm_runtime_get_sync(&dev->dev);
+               list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+                       trim_stale_devices(child);
+
+               pm_runtime_put(&dev->dev);
        }
-       return 0;
 }
 
 /**
@@ -875,43 +728,30 @@ int acpiphp_eject_slot(struct acpiphp_slot *slot)
  * Iterate over all slots under this bridge and make sure that if a
  * card is present they are enabled, and if not they are disabled.
  */
-static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
+static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
 {
        struct acpiphp_slot *slot;
-       int retval = 0;
-       int enabled, disabled;
-
-       enabled = disabled = 0;
 
        list_for_each_entry(slot, &bridge->slots, node) {
-               unsigned int status = get_slot_status(slot);
-               if (slot->flags & SLOT_ENABLED) {
-                       if (status == ACPI_STA_ALL)
-                               continue;
-                       retval = acpiphp_disable_slot(slot);
-                       if (retval) {
-                               err("Error occurred in disabling\n");
-                               goto err_exit;
-                       } else {
-                               acpiphp_eject_slot(slot);
-                       }
-                       disabled++;
+               struct pci_bus *bus = slot->bus;
+               struct pci_dev *dev, *tmp;
+
+               mutex_lock(&slot->crit_sect);
+               /* wake up all functions */
+               if (get_slot_status(slot) == ACPI_STA_ALL) {
+                       /* remove stale devices if any */
+                       list_for_each_entry_safe(dev, tmp, &bus->devices,
+                                                bus_list)
+                               if (PCI_SLOT(dev->devfn) == slot->device)
+                                       trim_stale_devices(dev);
+
+                       /* configure all functions */
+                       enable_slot(slot);
                } else {
-                       if (status != ACPI_STA_ALL)
-                               continue;
-                       retval = acpiphp_enable_slot(slot);
-                       if (retval) {
-                               err("Error occurred in enabling\n");
-                               goto err_exit;
-                       }
-                       enabled++;
+                       disable_slot(slot);
                }
+               mutex_unlock(&slot->crit_sect);
        }
-
-       dbg("%s: %d enabled, %d disabled\n", __func__, enabled, disabled);
-
- err_exit:
-       return retval;
 }
 
 static void acpiphp_set_hpp_values(struct pci_bus *bus)
@@ -950,25 +790,6 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
  * ACPI event handlers
  */
 
-static acpi_status
-check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
-       struct acpiphp_bridge *bridge;
-       char objname[64];
-       struct acpi_buffer buffer = { .length = sizeof(objname),
-                                     .pointer = objname };
-
-       bridge = acpiphp_handle_to_bridge(handle);
-       if (bridge) {
-               acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
-               dbg("%s: re-enumerating slots under %s\n",
-                       __func__, objname);
-               acpiphp_check_bridge(bridge);
-               put_bridge(bridge);
-       }
-       return AE_OK ;
-}
-
 void acpiphp_check_host_bridge(acpi_handle handle)
 {
        struct acpiphp_bridge *bridge;
@@ -978,27 +799,23 @@ void acpiphp_check_host_bridge(acpi_handle handle)
                acpiphp_check_bridge(bridge);
                put_bridge(bridge);
        }
-
-       acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
-               ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
 }
 
-static void _handle_hotplug_event_bridge(struct work_struct *work)
+static void hotplug_event(acpi_handle handle, u32 type, void *data)
 {
+       struct acpiphp_context *context = data;
+       struct acpiphp_func *func = &context->func;
        struct acpiphp_bridge *bridge;
        char objname[64];
        struct acpi_buffer buffer = { .length = sizeof(objname),
                                      .pointer = objname };
-       struct acpi_hp_work *hp_work;
-       acpi_handle handle;
-       u32 type;
 
-       hp_work = container_of(work, struct acpi_hp_work, work);
-       handle = hp_work->handle;
-       type = hp_work->type;
-       bridge = (struct acpiphp_bridge *)hp_work->context;
+       mutex_lock(&acpiphp_context_lock);
+       bridge = context->bridge;
+       if (bridge)
+               get_bridge(bridge);
 
-       acpi_scan_lock_acquire();
+       mutex_unlock(&acpiphp_context_lock);
 
        acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
@@ -1007,188 +824,129 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
                /* bus re-enumerate */
                dbg("%s: Bus check notify on %s\n", __func__, objname);
                dbg("%s: re-enumerating slots under %s\n", __func__, objname);
-               acpiphp_check_bridge(bridge);
-               acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
-                       ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
+               if (bridge) {
+                       acpiphp_check_bridge(bridge);
+               } else {
+                       struct acpiphp_slot *slot = func->slot;
+
+                       mutex_lock(&slot->crit_sect);
+                       enable_slot(slot);
+                       mutex_unlock(&slot->crit_sect);
+               }
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK:
                /* device check */
                dbg("%s: Device check notify on %s\n", __func__, objname);
-               acpiphp_check_bridge(bridge);
-               break;
+               if (bridge)
+                       acpiphp_check_bridge(bridge);
+               else
+                       acpiphp_check_bridge(func->parent);
 
-       case ACPI_NOTIFY_DEVICE_WAKE:
-               /* wake event */
-               dbg("%s: Device wake notify on %s\n", __func__, objname);
                break;
 
        case ACPI_NOTIFY_EJECT_REQUEST:
                /* request device eject */
                dbg("%s: Device eject notify on %s\n", __func__, objname);
-               if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
-                       struct acpiphp_slot *slot;
-                       slot = bridge->func->slot;
-                       if (!acpiphp_disable_slot(slot))
-                               acpiphp_eject_slot(slot);
-               }
+               acpiphp_disable_and_eject_slot(func->slot);
                break;
+       }
 
-       case ACPI_NOTIFY_FREQUENCY_MISMATCH:
-               printk(KERN_ERR "Device %s cannot be configured due"
-                               " to a frequency mismatch\n", objname);
-               break;
+       if (bridge)
+               put_bridge(bridge);
+}
 
-       case ACPI_NOTIFY_BUS_MODE_MISMATCH:
-               printk(KERN_ERR "Device %s cannot be configured due"
-                               " to a bus mode mismatch\n", objname);
-               break;
+static void hotplug_event_work(struct work_struct *work)
+{
+       struct acpiphp_context *context;
+       struct acpi_hp_work *hp_work;
 
-       case ACPI_NOTIFY_POWER_FAULT:
-               printk(KERN_ERR "Device %s has suffered a power fault\n",
-                               objname);
-               break;
+       hp_work = container_of(work, struct acpi_hp_work, work);
+       context = hp_work->context;
+       acpi_scan_lock_acquire();
 
-       default:
-               warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
-               break;
-       }
+       hotplug_event(hp_work->handle, hp_work->type, context);
 
        acpi_scan_lock_release();
-       kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
-       put_bridge(bridge);
+       kfree(hp_work); /* allocated in handle_hotplug_event() */
+       put_bridge(context->func.parent);
 }
 
 /**
- * handle_hotplug_event_bridge - handle ACPI event on bridges
+ * handle_hotplug_event - handle ACPI hotplug event
  * @handle: Notify()'ed acpi_handle
  * @type: Notify code
- * @context: pointer to acpiphp_bridge structure
+ * @data: pointer to acpiphp_context structure
  *
- * Handles ACPI event notification on {host,p2p} bridges.
+ * Handles ACPI event notification on slots.
  */
-static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
-                                       void *context)
+static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
 {
-       struct acpiphp_bridge *bridge = context;
-
-       /*
-        * Currently the code adds all hotplug events to the kacpid_wq
-        * queue when it should add hotplug events to the kacpi_hotplug_wq.
-        * The proper way to fix this is to reorganize the code so that
-        * drivers (dock, etc.) do not call acpi_os_execute(), etc.
-        * For now just re-add this work to the kacpi_hotplug_wq so we
-        * don't deadlock on hotplug actions.
-        */
-       get_bridge(bridge);
-       alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
-}
-
-static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
-{
-       struct acpiphp_func *func = context;
-       char objname[64];
-       struct acpi_buffer buffer = { .length = sizeof(objname),
-                                     .pointer = objname };
-
-       acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+       struct acpiphp_context *context;
 
        switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
-               /* bus re-enumerate */
-               dbg("%s: Bus check notify on %s\n", __func__, objname);
-               acpiphp_enable_slot(func->slot);
-               break;
-
        case ACPI_NOTIFY_DEVICE_CHECK:
-               /* device check : re-enumerate from parent bus */
-               dbg("%s: Device check notify on %s\n", __func__, objname);
-               acpiphp_check_bridge(func->slot->bridge);
-               break;
-
-       case ACPI_NOTIFY_DEVICE_WAKE:
-               /* wake event */
-               dbg("%s: Device wake notify on %s\n", __func__, objname);
-               break;
-
        case ACPI_NOTIFY_EJECT_REQUEST:
-               /* request device eject */
-               dbg("%s: Device eject notify on %s\n", __func__, objname);
-               if (!(acpiphp_disable_slot(func->slot)))
-                       acpiphp_eject_slot(func->slot);
                break;
 
-       default:
-               warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
-               break;
-       }
-}
-
-static void _handle_hotplug_event_func(struct work_struct *work)
-{
-       struct acpi_hp_work *hp_work;
-       struct acpiphp_func *func;
+       case ACPI_NOTIFY_DEVICE_WAKE:
+               return;
 
-       hp_work = container_of(work, struct acpi_hp_work, work);
-       func = hp_work->context;
-       acpi_scan_lock_acquire();
+       case ACPI_NOTIFY_FREQUENCY_MISMATCH:
+               acpi_handle_err(handle, "Device cannot be configured due "
+                               "to a frequency mismatch\n");
+               return;
 
-       hotplug_event_func(hp_work->handle, hp_work->type, func);
+       case ACPI_NOTIFY_BUS_MODE_MISMATCH:
+               acpi_handle_err(handle, "Device cannot be configured due "
+                               "to a bus mode mismatch\n");
+               return;
 
-       acpi_scan_lock_release();
-       kfree(hp_work); /* allocated in handle_hotplug_event_func */
-       put_bridge(func->slot->bridge);
-}
+       case ACPI_NOTIFY_POWER_FAULT:
+               acpi_handle_err(handle, "Device has suffered a power fault\n");
+               return;
 
-/**
- * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots)
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @context: pointer to acpiphp_func structure
- *
- * Handles ACPI event notification on slots.
- */
-static void handle_hotplug_event_func(acpi_handle handle, u32 type,
-                                     void *context)
-{
-       struct acpiphp_func *func = context;
+       default:
+               acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
+               return;
+       }
 
-       /*
-        * Currently the code adds all hotplug events to the kacpid_wq
-        * queue when it should add hotplug events to the kacpi_hotplug_wq.
-        * The proper way to fix this is to reorganize the code so that
-        * drivers (dock, etc.) do not call acpi_os_execute(), etc.
-        * For now just re-add this work to the kacpi_hotplug_wq so we
-        * don't deadlock on hotplug actions.
-        */
-       get_bridge(func->slot->bridge);
-       alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_func);
+       mutex_lock(&acpiphp_context_lock);
+       context = acpiphp_get_context(handle);
+       if (context) {
+               get_bridge(context->func.parent);
+               acpiphp_put_context(context);
+               alloc_acpi_hp_work(handle, type, context, hotplug_event_work);
+       }
+       mutex_unlock(&acpiphp_context_lock);
 }
 
 /*
  * Create hotplug slots for the PCI bus.
  * It should always return 0 to avoid skipping following notifiers.
  */
-void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle)
+void acpiphp_enumerate_slots(struct pci_bus *bus)
 {
-       acpi_handle dummy_handle;
        struct acpiphp_bridge *bridge;
+       acpi_handle handle;
+       acpi_status status;
 
        if (acpiphp_disabled)
                return;
 
-       if (detect_ejectable_slots(handle) <= 0)
+       handle = ACPI_HANDLE(bus->bridge);
+       if (!handle)
                return;
 
        bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
-       if (bridge == NULL) {
-               err("out of memory\n");
+       if (!bridge) {
+               acpi_handle_err(handle, "No memory for bridge object\n");
                return;
        }
 
        INIT_LIST_HEAD(&bridge->slots);
        kref_init(&bridge->ref);
-       bridge->handle = handle;
        bridge->pci_dev = pci_dev_get(bus->self);
        bridge->pci_bus = bus;
 
@@ -1199,31 +957,62 @@ void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle)
         */
        get_device(&bus->dev);
 
-       if (!pci_is_root_bus(bridge->pci_bus) &&
-           ACPI_SUCCESS(acpi_get_handle(bridge->handle,
-                                       "_EJ0", &dummy_handle))) {
-               dbg("found ejectable p2p bridge\n");
-               bridge->flags |= BRIDGE_HAS_EJ0;
-               bridge->func = acpiphp_bridge_handle_to_function(handle);
+       if (!pci_is_root_bus(bridge->pci_bus)) {
+               struct acpiphp_context *context;
+
+               /*
+                * This bridge should have been registered as a hotplug function
+                * under its parent, so the context has to be there.  If not, we
+                * are in deep goo.
+                */
+               mutex_lock(&acpiphp_context_lock);
+               context = acpiphp_get_context(handle);
+               if (WARN_ON(!context)) {
+                       mutex_unlock(&acpiphp_context_lock);
+                       put_device(&bus->dev);
+                       kfree(bridge);
+                       return;
+               }
+               bridge->context = context;
+               context->bridge = bridge;
+               /* Get a reference to the parent bridge. */
+               get_bridge(context->func.parent);
+               mutex_unlock(&acpiphp_context_lock);
        }
 
-       init_bridge_misc(bridge);
+       /* must be added to the list prior to calling register_slot */
+       mutex_lock(&bridge_mutex);
+       list_add(&bridge->list, &bridge_list);
+       mutex_unlock(&bridge_mutex);
+
+       /* register all slot objects under this bridge */
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+                                    register_slot, NULL, bridge, NULL);
+       if (ACPI_FAILURE(status)) {
+               acpi_handle_err(handle, "failed to register slots\n");
+               cleanup_bridge(bridge);
+               put_bridge(bridge);
+       }
 }
 
 /* Destroy hotplug slots associated with the PCI bus */
 void acpiphp_remove_slots(struct pci_bus *bus)
 {
-       struct acpiphp_bridge *bridge, *tmp;
+       struct acpiphp_bridge *bridge;
 
        if (acpiphp_disabled)
                return;
 
-       list_for_each_entry_safe(bridge, tmp, &bridge_list, list)
+       mutex_lock(&bridge_mutex);
+       list_for_each_entry(bridge, &bridge_list, list)
                if (bridge->pci_bus == bus) {
+                       mutex_unlock(&bridge_mutex);
                        cleanup_bridge(bridge);
                        put_bridge(bridge);
-                       break;
+                       return;
                }
+
+       mutex_unlock(&bridge_mutex);
 }
 
 /**
@@ -1232,51 +1021,39 @@ void acpiphp_remove_slots(struct pci_bus *bus)
  */
 int acpiphp_enable_slot(struct acpiphp_slot *slot)
 {
-       int retval;
-
        mutex_lock(&slot->crit_sect);
+       /* configure all functions */
+       if (!(slot->flags & SLOT_ENABLED))
+               enable_slot(slot);
 
-       /* wake up all functions */
-       retval = power_on_slot(slot);
-       if (retval)
-               goto err_exit;
-
-       if (get_slot_status(slot) == ACPI_STA_ALL) {
-               /* configure all functions */
-               retval = enable_device(slot);
-               if (retval)
-                       power_off_slot(slot);
-       } else {
-               dbg("%s: Slot status is not ACPI_STA_ALL\n", __func__);
-               power_off_slot(slot);
-       }
-
- err_exit:
        mutex_unlock(&slot->crit_sect);
-       return retval;
+       return 0;
 }
 
 /**
- * acpiphp_disable_slot - power off slot
+ * acpiphp_disable_and_eject_slot - power off and eject slot
  * @slot: ACPI PHP slot
  */
-int acpiphp_disable_slot(struct acpiphp_slot *slot)
+int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
 {
+       struct acpiphp_func *func;
        int retval = 0;
 
        mutex_lock(&slot->crit_sect);
 
        /* unconfigure all functions */
-       retval = disable_device(slot);
-       if (retval)
-               goto err_exit;
+       disable_slot(slot);
+
+       list_for_each_entry(func, &slot->funcs, sibling)
+               if (func->flags & FUNC_HAS_EJ0) {
+                       acpi_handle handle = func_to_handle(func);
 
-       /* power off all functions */
-       retval = power_off_slot(slot);
-       if (retval)
-               goto err_exit;
+                       if (ACPI_FAILURE(acpi_evaluate_ej0(handle)))
+                               acpi_handle_err(handle, "_EJ0 failed\n");
+
+                       break;
+               }
 
- err_exit:
        mutex_unlock(&slot->crit_sect);
        return retval;
 }
@@ -1288,7 +1065,7 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
  */
 u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
 {
-       return (slot->flags & SLOT_POWEREDON);
+       return (slot->flags & SLOT_ENABLED);
 }
 
 
@@ -1298,11 +1075,7 @@ u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
  */
 u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
 {
-       unsigned int sta;
-
-       sta = get_slot_status(slot);
-
-       return (sta & ACPI_STA_DEVICE_UI) ? 0 : 1;
+       return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI);
 }
 
 
@@ -1312,9 +1085,5 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
  */
 u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
 {
-       unsigned int sta;
-
-       sta = get_slot_status(slot);
-
-       return (sta == 0) ? 0 : 1;
+       return !!get_slot_status(slot);
 }
index c35e8ad6db0142f0ddac2b4697cd4edef416cda7..2f5786c8522c2f170d8f57bc08c592e0ef78048a 100644 (file)
@@ -66,7 +66,7 @@ do {                                                  \
 #define IBM_HARDWARE_ID1 "IBM37D0"
 #define IBM_HARDWARE_ID2 "IBM37D4"
 
-#define hpslot_to_sun(A) (((struct slot *)((A)->private))->acpi_slot->sun)
+#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun)
 
 /* union apci_descriptor - allows access to the
  * various device descriptors that are embedded in the
@@ -270,7 +270,6 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
 
        if (subevent == 0x80) {
                dbg("%s: generationg bus event\n", __func__);
-               acpi_bus_generate_proc_event(note->device, note->event, detail);
                acpi_bus_generate_netlink_event(note->device->pnp.device_class,
                                                  dev_name(&note->device->dev),
                                                  note->event, detail);
index 7fb326983ed69a1a1c0796ab32cbd05c9200100c..541bbe6d5343e9c99da97f9e5fd8c74c554b24d3 100644 (file)
@@ -155,6 +155,7 @@ void pciehp_green_led_off(struct slot *slot);
 void pciehp_green_led_blink(struct slot *slot);
 int pciehp_check_link_status(struct controller *ctrl);
 void pciehp_release_ctrl(struct controller *ctrl);
+int pciehp_reset_slot(struct slot *slot, int probe);
 
 static inline const char *slot_name(struct slot *slot)
 {
index 7d72c5e2eba90cc98cb233142ba6baadd778d9ae..f4a18f51a29cc3bc1c17121ab265ce9e199da0ec 100644 (file)
@@ -69,6 +69,7 @@ static int get_power_status   (struct hotplug_slot *slot, u8 *value);
 static int get_attention_status        (struct hotplug_slot *slot, u8 *value);
 static int get_latch_status    (struct hotplug_slot *slot, u8 *value);
 static int get_adapter_status  (struct hotplug_slot *slot, u8 *value);
+static int reset_slot          (struct hotplug_slot *slot, int probe);
 
 /**
  * release_slot - free up the memory used by a slot
@@ -111,6 +112,7 @@ static int init_slot(struct controller *ctrl)
        ops->disable_slot = disable_slot;
        ops->get_power_status = get_power_status;
        ops->get_adapter_status = get_adapter_status;
+       ops->reset_slot = reset_slot;
        if (MRL_SENS(ctrl))
                ops->get_latch_status = get_latch_status;
        if (ATTN_LED(ctrl)) {
@@ -223,6 +225,16 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
        return pciehp_get_adapter_status(slot, value);
 }
 
+static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
+{
+       struct slot *slot = hotplug_slot->private;
+
+       ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+                __func__, slot_name(slot));
+
+       return pciehp_reset_slot(slot, probe);
+}
+
 static int pciehp_probe(struct pcie_device *dev)
 {
        int rc;
index b2255736ac81fa819e651f40deb17a6102c4240a..51f56ef4ab6f83a9382527f9061172800fff589a 100644 (file)
@@ -749,6 +749,37 @@ static void pcie_disable_notification(struct controller *ctrl)
                ctrl_warn(ctrl, "Cannot disable software notification\n");
 }
 
+/*
+ * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
+ * bus reset of the bridge, but if the slot supports surprise removal we need
+ * to disable presence detection around the bus reset and clear any spurious
+ * events after.
+ */
+int pciehp_reset_slot(struct slot *slot, int probe)
+{
+       struct controller *ctrl = slot->ctrl;
+
+       if (probe)
+               return 0;
+
+       if (HP_SUPR_RM(ctrl)) {
+               pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_PDCE);
+               if (pciehp_poll_mode)
+                       del_timer_sync(&ctrl->poll_timer);
+       }
+
+       pci_reset_bridge_secondary_bus(ctrl->pcie->port);
+
+       if (HP_SUPR_RM(ctrl)) {
+               pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC);
+               pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
+               if (pciehp_poll_mode)
+                       int_poll_timeout(ctrl->poll_timer.data);
+       }
+
+       return 0;
+}
+
 int pcie_init_notification(struct controller *ctrl)
 {
        if (pciehp_request_irq(ctrl))
index de8ffacf9c9b27cd1f758065e5b3f8158bab89b9..21a7182dccd435865347d223ead22e73a02fe0db 100644 (file)
@@ -286,7 +286,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
            (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
                return -EINVAL;
 
-       pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
        pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
        pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
        if (!offset || (nr_virtfn > 1 && !stride))
@@ -324,7 +323,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
 
                if (!pdev->is_physfn) {
                        pci_dev_put(pdev);
-                       return -ENODEV;
+                       return -ENOSYS;
                }
 
                rc = sysfs_create_link(&dev->dev.kobj,
@@ -334,6 +333,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
                        return rc;
        }
 
+       pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
        iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
        pci_cfg_access_lock(dev);
        pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
@@ -368,6 +368,7 @@ failed:
        iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
        pci_cfg_access_lock(dev);
        pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+       pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
        ssleep(1);
        pci_cfg_access_unlock(dev);
 
@@ -401,6 +402,7 @@ static void sriov_disable(struct pci_dev *dev)
                sysfs_remove_link(&dev->dev.kobj, "dep_link");
 
        iov->num_VFs = 0;
+       pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
 }
 
 static int sriov_init(struct pci_dev *dev, int pos)
@@ -662,7 +664,7 @@ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
        might_sleep();
 
        if (!dev->is_physfn)
-               return -ENODEV;
+               return -ENOSYS;
 
        return sriov_enable(dev, nr_virtfn);
 }
@@ -722,7 +724,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf);
  * @dev: the PCI device
  *
  * Returns number of VFs belonging to this device that are assigned to a guest.
- * If device is not a physical function returns -ENODEV.
+ * If device is not a physical function returns 0.
  */
 int pci_vfs_assigned(struct pci_dev *dev)
 {
@@ -767,12 +769,15 @@ EXPORT_SYMBOL_GPL(pci_vfs_assigned);
  * device's mutex held.
  *
  * Returns 0 if PF is an SRIOV-capable device and
- * value of numvfs valid. If not a PF with VFS, return -EINVAL;
+ * value of numvfs valid. If not a PF return -ENOSYS;
+ * if numvfs is invalid return -EINVAL;
  * if VFs already enabled, return -EBUSY.
  */
 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
 {
-       if (!dev->is_physfn || (numvfs > dev->sriov->total_VFs))
+       if (!dev->is_physfn)
+               return -ENOSYS;
+       if (numvfs > dev->sriov->total_VFs)
                return -EINVAL;
 
        /* Shouldn't change if VFs already enabled */
@@ -786,17 +791,17 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
 EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
 
 /**
- * pci_sriov_get_totalvfs -- get total VFs supported on this devic3
+ * pci_sriov_get_totalvfs -- get total VFs supported on this device
  * @dev: the PCI PF device
  *
  * For a PCIe device with SRIOV support, return the PCIe
  * SRIOV capability value of TotalVFs or the value of driver_max_VFs
- * if the driver reduced it.  Otherwise, -EINVAL.
+ * if the driver reduced it.  Otherwise 0.
  */
 int pci_sriov_get_totalvfs(struct pci_dev *dev)
 {
        if (!dev->is_physfn)
-               return -EINVAL;
+               return 0;
 
        if (dev->sriov->driver_max_VFs)
                return dev->sriov->driver_max_VFs;
index 01e264fb50e0ebf09a1c4b9b49e1e5e0da3bc7ff..7c29ee4ed0ae5559077b7b4efd69eb7ff2fd871a 100644 (file)
@@ -210,7 +210,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
        }
 
        if (!error)
-               dev_info(&dev->dev, "power state changed by ACPI to %s\n",
+               dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
                         acpi_power_state_string(state_conv[state]));
 
        return error;
@@ -290,24 +290,16 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = {
 
 void acpi_pci_add_bus(struct pci_bus *bus)
 {
-       acpi_handle handle = NULL;
-
-       if (bus->bridge)
-               handle = ACPI_HANDLE(bus->bridge);
-       if (acpi_pci_disabled || handle == NULL)
+       if (acpi_pci_disabled || !bus->bridge)
                return;
 
-       acpi_pci_slot_enumerate(bus, handle);
-       acpiphp_enumerate_slots(bus, handle);
+       acpi_pci_slot_enumerate(bus);
+       acpiphp_enumerate_slots(bus);
 }
 
 void acpi_pci_remove_bus(struct pci_bus *bus)
 {
-       /*
-        * bus->bridge->acpi_node.handle has already been reset to NULL
-        * when acpi_pci_remove_bus() is called, so don't check ACPI handle.
-        */
-       if (acpi_pci_disabled)
+       if (acpi_pci_disabled || !bus->bridge)
                return;
 
        acpiphp_remove_slots(bus);
index c0dbe1f61362aa058db84ffb971671a6ce35e8cd..7128cfdd64aa9d31c8628be30e7c65b33396dbfb 100644 (file)
@@ -131,19 +131,19 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
        return ret;
 }
 
-static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
+static ssize_t cpuaffinity_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
 }
+static DEVICE_ATTR_RO(cpuaffinity);
 
-static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
+static ssize_t cpulistaffinity_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
 {
        return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
 }
+static DEVICE_ATTR_RO(cpulistaffinity);
 
 /* show resources */
 static ssize_t
@@ -379,6 +379,7 @@ dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
        }
        return count;
 }
+static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store);
 
 #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
 static ssize_t d3cold_allowed_store(struct device *dev,
@@ -514,11 +515,20 @@ struct device_attribute pci_dev_attrs[] = {
        __ATTR_NULL,
 };
 
-struct device_attribute pcibus_dev_attrs[] = {
-       __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store),
-       __ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL),
-       __ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL),
-       __ATTR_NULL,
+static struct attribute *pcibus_attrs[] = {
+       &dev_attr_rescan.attr,
+       &dev_attr_cpuaffinity.attr,
+       &dev_attr_cpulistaffinity.attr,
+       NULL,
+};
+
+static const struct attribute_group pcibus_group = {
+       .attrs = pcibus_attrs,
+};
+
+const struct attribute_group *pcibus_groups[] = {
+       &pcibus_group,
+       NULL,
 };
 
 static ssize_t
index e37fea6e178d2dab00d3334848d231117d525013..fd7aa476bc4080301e12c09757bf15fd37d1ad41 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/pm_runtime.h>
+#include <linux/pci_hotplug.h>
 #include <asm-generic/pci-bridge.h>
 #include <asm/setup.h>
 #include "pci.h"
@@ -1992,7 +1993,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
 }
 
 /**
- * pci_add_save_buffer - allocate buffer for saving given capability registers
+ * pci_add_cap_save_buffer - allocate buffer for saving given capability registers
  * @dev: the PCI device
  * @cap: the capability to allocate the buffer for
  * @size: requested size of the buffer
@@ -2359,6 +2360,27 @@ void pci_enable_acs(struct pci_dev *dev)
        pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 }
 
+static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
+{
+       int pos;
+       u16 cap, ctrl;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+       if (!pos)
+               return false;
+
+       /*
+        * Except for egress control, capabilities are either required
+        * or only required if controllable.  Features missing from the
+        * capability field can therefore be assumed as hard-wired enabled.
+        */
+       pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
+       acs_flags &= (cap | PCI_ACS_EC);
+
+       pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+       return (ctrl & acs_flags) == acs_flags;
+}
+
 /**
  * pci_acs_enabled - test ACS against required flags for a given device
  * @pdev: device to test
@@ -2366,36 +2388,76 @@ void pci_enable_acs(struct pci_dev *dev)
  *
  * Return true if the device supports the provided flags.  Automatically
  * filters out flags that are not implemented on multifunction devices.
+ *
+ * Note that this interface checks the effective ACS capabilities of the
+ * device rather than the actual capabilities.  For instance, most single
+ * function endpoints are not required to support ACS because they have no
+ * opportunity for peer-to-peer access.  We therefore return 'true'
+ * regardless of whether the device exposes an ACS capability.  This makes
+ * it much easier for callers of this function to ignore the actual type
+ * or topology of the device when testing ACS support.
  */
 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
 {
-       int pos, ret;
-       u16 ctrl;
+       int ret;
 
        ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
        if (ret >= 0)
                return ret > 0;
 
+       /*
+        * Conventional PCI and PCI-X devices never support ACS, either
+        * effectively or actually.  The shared bus topology implies that
+        * any device on the bus can receive or snoop DMA.
+        */
        if (!pci_is_pcie(pdev))
                return false;
 
-       /* Filter out flags not applicable to multifunction */
-       if (pdev->multifunction)
-               acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
-                             PCI_ACS_EC | PCI_ACS_DT);
-
-       if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
-           pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
-           pdev->multifunction) {
-               pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
-               if (!pos)
-                       return false;
+       switch (pci_pcie_type(pdev)) {
+       /*
+        * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
+        * but since their primary inteface is PCI/X, we conservatively
+        * handle them as we would a non-PCIe device.
+        */
+       case PCI_EXP_TYPE_PCIE_BRIDGE:
+       /*
+        * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
+        * applicable... must never implement an ACS Extended Capability...".
+        * This seems arbitrary, but we take a conservative interpretation
+        * of this statement.
+        */
+       case PCI_EXP_TYPE_PCI_BRIDGE:
+       case PCI_EXP_TYPE_RC_EC:
+               return false;
+       /*
+        * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
+        * implement ACS in order to indicate their peer-to-peer capabilities,
+        * regardless of whether they are single- or multi-function devices.
+        */
+       case PCI_EXP_TYPE_DOWNSTREAM:
+       case PCI_EXP_TYPE_ROOT_PORT:
+               return pci_acs_flags_enabled(pdev, acs_flags);
+       /*
+        * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
+        * implemented by the remaining PCIe types to indicate peer-to-peer
+        * capabilities, but only when they are part of a multifunciton
+        * device.  The footnote for section 6.12 indicates the specific
+        * PCIe types included here.
+        */
+       case PCI_EXP_TYPE_ENDPOINT:
+       case PCI_EXP_TYPE_UPSTREAM:
+       case PCI_EXP_TYPE_LEG_END:
+       case PCI_EXP_TYPE_RC_END:
+               if (!pdev->multifunction)
+                       break;
 
-               pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
-               if ((ctrl & acs_flags) != acs_flags)
-                       return false;
+               return pci_acs_flags_enabled(pdev, acs_flags);
        }
 
+       /*
+        * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable
+        * to single function devices with the exception of downstream ports.
+        */
        return true;
 }
 
@@ -3098,19 +3160,17 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
 }
 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
 
-static int pcie_flr(struct pci_dev *dev, int probe)
+/**
+ * pci_wait_for_pending_transaction - waits for pending transaction
+ * @dev: the PCI device to operate on
+ *
+ * Return 0 if transaction is pending 1 otherwise.
+ */
+int pci_wait_for_pending_transaction(struct pci_dev *dev)
 {
        int i;
-       u32 cap;
        u16 status;
 
-       pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
-       if (!(cap & PCI_EXP_DEVCAP_FLR))
-               return -ENOTTY;
-
-       if (probe)
-               return 0;
-
        /* Wait for Transaction Pending bit clean */
        for (i = 0; i < 4; i++) {
                if (i)
@@ -3118,13 +3178,27 @@ static int pcie_flr(struct pci_dev *dev, int probe)
 
                pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
                if (!(status & PCI_EXP_DEVSTA_TRPND))
-                       goto clear;
+                       return 1;
        }
 
-       dev_err(&dev->dev, "transaction is not cleared; "
-                       "proceeding with reset anyway\n");
+       return 0;
+}
+EXPORT_SYMBOL(pci_wait_for_pending_transaction);
+
+static int pcie_flr(struct pci_dev *dev, int probe)
+{
+       u32 cap;
+
+       pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
+       if (!(cap & PCI_EXP_DEVCAP_FLR))
+               return -ENOTTY;
+
+       if (probe)
+               return 0;
+
+       if (!pci_wait_for_pending_transaction(dev))
+               dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
 
-clear:
        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
 
        msleep(100);
@@ -3215,9 +3289,42 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
        return 0;
 }
 
-static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+/**
+ * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
+ * @dev: Bridge device
+ *
+ * Use the bridge control register to assert reset on the secondary bus.
+ * Devices on the secondary bus are left in power-on state.
+ */
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
 {
        u16 ctrl;
+
+       pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
+       ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+       /*
+        * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
+        * this to 2ms to ensure that we meet the minium requirement.
+        */
+       msleep(2);
+
+       ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+
+       /*
+        * Trhfa for conventional PCI is 2^25 clock cycles.
+        * Assuming a minimum 33MHz clock this results in a 1s
+        * delay before we can consider subordinate devices to
+        * be re-initialized.  PCIe has some ways to shorten this,
+        * but we don't make use of them yet.
+        */
+       ssleep(1);
+}
+EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
        struct pci_dev *pdev;
 
        if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
@@ -3230,18 +3337,40 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
        if (probe)
                return 0;
 
-       pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
-       ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
-       msleep(100);
-
-       ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
-       msleep(100);
+       pci_reset_bridge_secondary_bus(dev->bus->self);
 
        return 0;
 }
 
+static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
+{
+       int rc = -ENOTTY;
+
+       if (!hotplug || !try_module_get(hotplug->ops->owner))
+               return rc;
+
+       if (hotplug->ops->reset_slot)
+               rc = hotplug->ops->reset_slot(hotplug, probe);
+
+       module_put(hotplug->ops->owner);
+
+       return rc;
+}
+
+static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+{
+       struct pci_dev *pdev;
+
+       if (dev->subordinate || !dev->slot)
+               return -ENOTTY;
+
+       list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+               if (pdev != dev && pdev->slot == dev->slot)
+                       return -ENOTTY;
+
+       return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
+}
+
 static int __pci_dev_reset(struct pci_dev *dev, int probe)
 {
        int rc;
@@ -3264,27 +3393,65 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe)
        if (rc != -ENOTTY)
                goto done;
 
+       rc = pci_dev_reset_slot_function(dev, probe);
+       if (rc != -ENOTTY)
+               goto done;
+
        rc = pci_parent_bus_reset(dev, probe);
 done:
        return rc;
 }
 
+static void pci_dev_lock(struct pci_dev *dev)
+{
+       pci_cfg_access_lock(dev);
+       /* block PM suspend, driver probe, etc. */
+       device_lock(&dev->dev);
+}
+
+static void pci_dev_unlock(struct pci_dev *dev)
+{
+       device_unlock(&dev->dev);
+       pci_cfg_access_unlock(dev);
+}
+
+static void pci_dev_save_and_disable(struct pci_dev *dev)
+{
+       /*
+        * Wake-up device prior to save.  PM registers default to D0 after
+        * reset and a simple register restore doesn't reliably return
+        * to a non-D0 state anyway.
+        */
+       pci_set_power_state(dev, PCI_D0);
+
+       pci_save_state(dev);
+       /*
+        * Disable the device by clearing the Command register, except for
+        * INTx-disable which is set.  This not only disables MMIO and I/O port
+        * BARs, but also prevents the device from being Bus Master, preventing
+        * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
+        * compliant devices, INTx-disable prevents legacy interrupts.
+        */
+       pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+}
+
+static void pci_dev_restore(struct pci_dev *dev)
+{
+       pci_restore_state(dev);
+}
+
 static int pci_dev_reset(struct pci_dev *dev, int probe)
 {
        int rc;
 
-       if (!probe) {
-               pci_cfg_access_lock(dev);
-               /* block PM suspend, driver probe, etc. */
-               device_lock(&dev->dev);
-       }
+       if (!probe)
+               pci_dev_lock(dev);
 
        rc = __pci_dev_reset(dev, probe);
 
-       if (!probe) {
-               device_unlock(&dev->dev);
-               pci_cfg_access_unlock(dev);
-       }
+       if (!probe)
+               pci_dev_unlock(dev);
+
        return rc;
 }
 /**
@@ -3375,22 +3542,249 @@ int pci_reset_function(struct pci_dev *dev)
        if (rc)
                return rc;
 
-       pci_save_state(dev);
-
-       /*
-        * both INTx and MSI are disabled after the Interrupt Disable bit
-        * is set and the Bus Master bit is cleared.
-        */
-       pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+       pci_dev_save_and_disable(dev);
 
        rc = pci_dev_reset(dev, 0);
 
-       pci_restore_state(dev);
+       pci_dev_restore(dev);
 
        return rc;
 }
 EXPORT_SYMBOL_GPL(pci_reset_function);
 
+/* Lock devices from the top of the tree down */
+static void pci_bus_lock(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               pci_dev_lock(dev);
+               if (dev->subordinate)
+                       pci_bus_lock(dev->subordinate);
+       }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_bus_unlock(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               if (dev->subordinate)
+                       pci_bus_unlock(dev->subordinate);
+               pci_dev_unlock(dev);
+       }
+}
+
+/* Lock devices from the top of the tree down */
+static void pci_slot_lock(struct pci_slot *slot)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+               if (!dev->slot || dev->slot != slot)
+                       continue;
+               pci_dev_lock(dev);
+               if (dev->subordinate)
+                       pci_bus_lock(dev->subordinate);
+       }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_slot_unlock(struct pci_slot *slot)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+               if (!dev->slot || dev->slot != slot)
+                       continue;
+               if (dev->subordinate)
+                       pci_bus_unlock(dev->subordinate);
+               pci_dev_unlock(dev);
+       }
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_bus_save_and_disable(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               pci_dev_save_and_disable(dev);
+               if (dev->subordinate)
+                       pci_bus_save_and_disable(dev->subordinate);
+       }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_bus_restore(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               pci_dev_restore(dev);
+               if (dev->subordinate)
+                       pci_bus_restore(dev->subordinate);
+       }
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_slot_save_and_disable(struct pci_slot *slot)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+               if (!dev->slot || dev->slot != slot)
+                       continue;
+               pci_dev_save_and_disable(dev);
+               if (dev->subordinate)
+                       pci_bus_save_and_disable(dev->subordinate);
+       }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_slot_restore(struct pci_slot *slot)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+               if (!dev->slot || dev->slot != slot)
+                       continue;
+               pci_dev_restore(dev);
+               if (dev->subordinate)
+                       pci_bus_restore(dev->subordinate);
+       }
+}
+
+static int pci_slot_reset(struct pci_slot *slot, int probe)
+{
+       int rc;
+
+       if (!slot)
+               return -ENOTTY;
+
+       if (!probe)
+               pci_slot_lock(slot);
+
+       might_sleep();
+
+       rc = pci_reset_hotplug_slot(slot->hotplug, probe);
+
+       if (!probe)
+               pci_slot_unlock(slot);
+
+       return rc;
+}
+
+/**
+ * pci_probe_reset_slot - probe whether a PCI slot can be reset
+ * @slot: PCI slot to probe
+ *
+ * Return 0 if slot can be reset, negative if a slot reset is not supported.
+ */
+int pci_probe_reset_slot(struct pci_slot *slot)
+{
+       return pci_slot_reset(slot, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
+
+/**
+ * pci_reset_slot - reset a PCI slot
+ * @slot: PCI slot to reset
+ *
+ * A PCI bus may host multiple slots, each slot may support a reset mechanism
+ * independent of other slots.  For instance, some slots may support slot power
+ * control.  In the case of a 1:1 bus to slot architecture, this function may
+ * wrap the bus reset to avoid spurious slot related events such as hotplug.
+ * Generally a slot reset should be attempted before a bus reset.  All of the
+ * function of the slot and any subordinate buses behind the slot are reset
+ * through this function.  PCI config space of all devices in the slot and
+ * behind the slot is saved before and restored after reset.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_slot(struct pci_slot *slot)
+{
+       int rc;
+
+       rc = pci_slot_reset(slot, 1);
+       if (rc)
+               return rc;
+
+       pci_slot_save_and_disable(slot);
+
+       rc = pci_slot_reset(slot, 0);
+
+       pci_slot_restore(slot);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_slot);
+
+static int pci_bus_reset(struct pci_bus *bus, int probe)
+{
+       if (!bus->self)
+               return -ENOTTY;
+
+       if (probe)
+               return 0;
+
+       pci_bus_lock(bus);
+
+       might_sleep();
+
+       pci_reset_bridge_secondary_bus(bus->self);
+
+       pci_bus_unlock(bus);
+
+       return 0;
+}
+
+/**
+ * pci_probe_reset_bus - probe whether a PCI bus can be reset
+ * @bus: PCI bus to probe
+ *
+ * Return 0 if bus can be reset, negative if a bus reset is not supported.
+ */
+int pci_probe_reset_bus(struct pci_bus *bus)
+{
+       return pci_bus_reset(bus, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
+
+/**
+ * pci_reset_bus - reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Do a bus reset on the given bus and any subordinate buses, saving
+ * and restoring state of all devices.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_bus(struct pci_bus *bus)
+{
+       int rc;
+
+       rc = pci_bus_reset(bus, 1);
+       if (rc)
+               return rc;
+
+       pci_bus_save_and_disable(bus);
+
+       rc = pci_bus_reset(bus, 0);
+
+       pci_bus_restore(bus);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_bus);
+
 /**
  * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
  * @dev: PCI device to query
@@ -3578,6 +3972,49 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
                                                  PCI_EXP_DEVCTL_PAYLOAD, v);
 }
 
+/**
+ * pcie_get_minimum_link - determine minimum link settings of a PCI device
+ * @dev: PCI device to query
+ * @speed: storage for minimum speed
+ * @width: storage for minimum width
+ *
+ * This function will walk up the PCI device chain and determine the minimum
+ * link width and speed of the device.
+ */
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+                         enum pcie_link_width *width)
+{
+       int ret;
+
+       *speed = PCI_SPEED_UNKNOWN;
+       *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+       while (dev) {
+               u16 lnksta;
+               enum pci_bus_speed next_speed;
+               enum pcie_link_width next_width;
+
+               ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+               if (ret)
+                       return ret;
+
+               next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+               next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+                       PCI_EXP_LNKSTA_NLW_SHIFT;
+
+               if (next_speed < *speed)
+                       *speed = next_speed;
+
+               if (next_width < *width)
+                       *width = next_width;
+
+               dev = dev->bus->self;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(pcie_get_minimum_link);
+
 /**
  * pci_select_bars - Make BAR mask from the type of resource
  * @dev: the PCI device for which BAR mask is made
index d1182c4a754e235b25fb2914d658010d05157756..8a00c063d7bc67af7a3256fdd4a81c3d4d87f80b 100644 (file)
@@ -6,6 +6,9 @@
 #define PCI_CFG_SPACE_SIZE     256
 #define PCI_CFG_SPACE_EXP_SIZE 4096
 
+extern const unsigned char pcix_bus_speed[];
+extern const unsigned char pcie_link_speed[];
+
 /* Functions internal to the PCI core code */
 
 int pci_create_sysfs_dev_files(struct pci_dev *pdev);
@@ -151,7 +154,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
 
 }
 extern struct device_attribute pci_dev_attrs[];
-extern struct device_attribute pcibus_dev_attrs[];
+extern const struct attribute_group *pcibus_groups[];
 extern struct device_type pci_dev_type;
 extern struct bus_attribute pci_bus_attrs[];
 
index 3b94cfcfa03bee1dd1a1d319f9d038a46b07b834..7958e59d6077dcf341e4e76f8bfb902c3ef81e10 100644 (file)
@@ -2,7 +2,7 @@
 # PCI Express Port Bus Configuration
 #
 config PCIEPORTBUS
-       bool "PCI Express support"
+       bool "PCI Express Port Bus support"
        depends on PCI
        help
          This automatically enables PCI Express Port Bus support. Users can
index 76ef634caf6f169ec7ca506e7e3aec7434a9753e..0bf82a20a0fb479ccfbdb43479bf9b0cf6ecff6a 100644 (file)
@@ -352,7 +352,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
        reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
        pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
 
-       aer_do_secondary_bus_reset(dev);
+       pci_reset_bridge_secondary_bus(dev);
        dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
 
        /* Clear Root Error Status */
index 90ea3e88041f74c25f3339d77cf8d4929784edfb..84420b7c9456ecbb0e43a9e8ca539af5b2ee1a20 100644 (file)
@@ -106,7 +106,6 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
 }
 
 extern struct bus_type pcie_port_bus_type;
-void aer_do_secondary_bus_reset(struct pci_dev *dev);
 int aer_init(struct pcie_device *dev);
 void aer_isr(struct work_struct *work);
 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
index 8b68ae59b7b6446f6405291ac868df18fdccee7c..85ca36f2136d0008af72c4aa0ddbaa1b321855e7 100644 (file)
@@ -366,39 +366,6 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
        return result_data.result;
 }
 
-/**
- * aer_do_secondary_bus_reset - perform secondary bus reset
- * @dev: pointer to bridge's pci_dev data structure
- *
- * Invoked when performing link reset at Root Port or Downstream Port.
- */
-void aer_do_secondary_bus_reset(struct pci_dev *dev)
-{
-       u16 p2p_ctrl;
-
-       /* Assert Secondary Bus Reset */
-       pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
-       p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
-       /*
-        * we should send hot reset message for 2ms to allow it time to
-        * propagate to all downstream ports
-        */
-       msleep(2);
-
-       /* De-assert Secondary Bus Reset */
-       p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
-       /*
-        * System software must wait for at least 100ms from the end
-        * of a reset of one or more device before it is permitted
-        * to issue Configuration Requests to those devices.
-        */
-       msleep(200);
-}
-
 /**
  * default_reset_link - default reset function
  * @dev: pointer to pci_dev data structure
@@ -408,7 +375,7 @@ void aer_do_secondary_bus_reset(struct pci_dev *dev)
  */
 static pci_ers_result_t default_reset_link(struct pci_dev *dev)
 {
-       aer_do_secondary_bus_reset(dev);
+       pci_reset_bridge_secondary_bus(dev);
        dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
        return PCI_ERS_RESULT_RECOVERED;
 }
index 46ada5c098ebed710f24a7deb426720149b219fd..bfd56f9ae57c08341fd993656f0f800b6e219858 100644 (file)
@@ -96,7 +96,7 @@ static void release_pcibus_dev(struct device *dev)
 static struct class pcibus_class = {
        .name           = "pci_bus",
        .dev_release    = &release_pcibus_dev,
-       .dev_attrs      = pcibus_dev_attrs,
+       .dev_groups     = pcibus_groups,
 };
 
 static int __init pcibus_class_init(void)
@@ -513,7 +513,7 @@ static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
        return bridge;
 }
 
-static unsigned char pcix_bus_speed[] = {
+const unsigned char pcix_bus_speed[] = {
        PCI_SPEED_UNKNOWN,              /* 0 */
        PCI_SPEED_66MHz_PCIX,           /* 1 */
        PCI_SPEED_100MHz_PCIX,          /* 2 */
@@ -532,7 +532,7 @@ static unsigned char pcix_bus_speed[] = {
        PCI_SPEED_133MHz_PCIX_533       /* F */
 };
 
-static unsigned char pcie_link_speed[] = {
+const unsigned char pcie_link_speed[] = {
        PCI_SPEED_UNKNOWN,              /* 0 */
        PCIE_SPEED_2_5GT,               /* 1 */
        PCIE_SPEED_5_0GT,               /* 2 */
index e85d23044ae0dcd43b20d0b840a71e551286a310..f6c31fabf3af0bbe24f20b949cc0b6373b11ab97 100644 (file)
@@ -3126,9 +3126,6 @@ static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
 
 static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
 {
-       int i;
-       u16 status;
-
        /*
         * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
         *
@@ -3140,20 +3137,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
        if (probe)
                return 0;
 
-       /* Wait for Transaction Pending bit clean */
-       for (i = 0; i < 4; i++) {
-               if (i)
-                       msleep((1 << (i - 1)) * 100);
-
-               pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
-               if (!(status & PCI_EXP_DEVSTA_TRPND))
-                       goto clear;
-       }
-
-       dev_err(&dev->dev, "transaction is not cleared; "
-                       "proceeding with reset anyway\n");
+       if (!pci_wait_for_pending_transaction(dev))
+               dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
 
-clear:
        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
 
        msleep(100);
@@ -3208,6 +3194,83 @@ reset_complete:
        return 0;
 }
 
+/*
+ * Device-specific reset method for Chelsio T4-based adapters.
+ */
+static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
+{
+       u16 old_command;
+       u16 msix_flags;
+
+       /*
+        * If this isn't a Chelsio T4-based device, return -ENOTTY indicating
+        * that we have no device-specific reset method.
+        */
+       if ((dev->device & 0xf000) != 0x4000)
+               return -ENOTTY;
+
+       /*
+        * If this is the "probe" phase, return 0 indicating that we can
+        * reset this device.
+        */
+       if (probe)
+               return 0;
+
+       /*
+        * T4 can wedge if there are DMAs in flight within the chip and Bus
+        * Master has been disabled.  We need to have it on till the Function
+        * Level Reset completes.  (BUS_MASTER is disabled in
+        * pci_reset_function()).
+        */
+       pci_read_config_word(dev, PCI_COMMAND, &old_command);
+       pci_write_config_word(dev, PCI_COMMAND,
+                             old_command | PCI_COMMAND_MASTER);
+
+       /*
+        * Perform the actual device function reset, saving and restoring
+        * configuration information around the reset.
+        */
+       pci_save_state(dev);
+
+       /*
+        * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
+        * are disabled when an MSI-X interrupt message needs to be delivered.
+        * So we briefly re-enable MSI-X interrupts for the duration of the
+        * FLR.  The pci_restore_state() below will restore the original
+        * MSI-X state.
+        */
+       pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
+       if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
+               pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
+                                     msix_flags |
+                                     PCI_MSIX_FLAGS_ENABLE |
+                                     PCI_MSIX_FLAGS_MASKALL);
+
+       /*
+        * Start of pcie_flr() code sequence.  This reset code is a copy of
+        * the guts of pcie_flr() because that's not an exported function.
+        */
+
+       if (!pci_wait_for_pending_transaction(dev))
+               dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+       pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+       msleep(100);
+
+       /*
+        * End of pcie_flr() code sequence.
+        */
+
+       /*
+        * Restore the configuration information (BAR values, etc.) including
+        * the original PCI Configuration Space Command word, and return
+        * success.
+        */
+       pci_restore_state(dev);
+       pci_write_config_word(dev, PCI_COMMAND, old_command);
+       return 0;
+}
+
 #define PCI_DEVICE_ID_INTEL_82599_SFP_VF   0x10ed
 #define PCI_DEVICE_ID_INTEL_IVB_M_VGA      0x0156
 #define PCI_DEVICE_ID_INTEL_IVB_M2_VGA     0x0166
@@ -3221,6 +3284,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
                reset_ivb_igd },
        { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
                reset_intel_generic_dev },
+       { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+               reset_chelsio_generic_dev },
        { 0 }
 };
 
@@ -3295,11 +3360,61 @@ struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
        return pci_dev_get(dev);
 }
 
+/*
+ * AMD has indicated that the devices below do not support peer-to-peer
+ * in any system where they are found in the southbridge with an AMD
+ * IOMMU in the system.  Multifunction devices that do not support
+ * peer-to-peer between functions can claim to support a subset of ACS.
+ * Such devices effectively enable request redirect (RR) and completion
+ * redirect (CR) since all transactions are redirected to the upstream
+ * root complex.
+ *
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
+ *
+ * 1002:4385 SBx00 SMBus Controller
+ * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
+ * 1002:4383 SBx00 Azalia (Intel HDA)
+ * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
+ * 1002:4384 SBx00 PCI to PCI Bridge
+ * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
+ */
+static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
+{
+#ifdef CONFIG_ACPI
+       struct acpi_table_header *header = NULL;
+       acpi_status status;
+
+       /* Targeting multifunction devices on the SB (appears on root bus) */
+       if (!dev->multifunction || !pci_is_root_bus(dev->bus))
+               return -ENODEV;
+
+       /* The IVRS table describes the AMD IOMMU */
+       status = acpi_get_table("IVRS", 0, &header);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       /* Filter out flags not applicable to multifunction */
+       acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
+
+       return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+#else
+       return -ENODEV;
+#endif
+}
+
 static const struct pci_dev_acs_enabled {
        u16 vendor;
        u16 device;
        int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
 } pci_dev_acs_enabled[] = {
+       { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
+       { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
+       { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
+       { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
+       { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
+       { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
        { 0 }
 };
 
index 64a7de22d9afe0edb548b3b20f96d7c288e53273..fc1aebd819da3612c0d0072b37952e01efa73c44 100644 (file)
@@ -814,14 +814,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
 {
        struct pci_dev *dev;
        struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
-       unsigned long size = 0, size0 = 0, size1 = 0;
+       resource_size_t size = 0, size0 = 0, size1 = 0;
        resource_size_t children_add_size = 0;
-       resource_size_t min_align, io_align, align;
+       resource_size_t min_align, align;
 
        if (!b_res)
                return;
 
-       io_align = min_align = window_alignment(bus, IORESOURCE_IO);
+       min_align = window_alignment(bus, IORESOURCE_IO);
        list_for_each_entry(dev, &bus->devices, bus_list) {
                int i;
 
@@ -848,9 +848,6 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
                }
        }
 
-       if (min_align > io_align)
-               min_align = io_align;
-
        size0 = calculate_iosize(size, min_size, size1,
                        resource_size(b_res), min_align);
        if (children_add_size > add_size)
@@ -874,8 +871,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
                add_to_list(realloc_head, bus->self, b_res, size1-size0,
                            min_align);
                dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
-                                "%pR to %pR add_size %lx\n", b_res,
-                                &bus->busn_res, size1-size0);
+                                "%pR to %pR add_size %llx\n", b_res,
+                                &bus->busn_res,
+                                (unsigned long long)size1-size0);
        }
 }
 
@@ -905,6 +903,8 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
  * pbus_size_mem() - size the memory window of a given bus
  *
  * @bus : the bus
+ * @mask: mask the resource flag, then compare it with type
+ * @type: the type of free resource from bridge
  * @min_size : the minimum memory window that must to be allocated
  * @add_size : additional optional memory window
  * @realloc_head : track the additional memory window on this list
@@ -1367,15 +1367,12 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
 static int __init pci_bus_get_depth(struct pci_bus *bus)
 {
        int depth = 0;
-       struct pci_dev *dev;
+       struct pci_bus *child_bus;
 
-       list_for_each_entry(dev, &bus->devices, bus_list) {
+       list_for_each_entry(child_bus, &bus->children, node){
                int ret;
-               struct pci_bus *b = dev->subordinate;
-               if (!b)
-                       continue;
 
-               ret = pci_bus_get_depth(b);
+               ret = pci_bus_get_depth(child_bus);
                if (ret + 1 > depth)
                        depth = ret + 1;
        }
index 8e268da6fdbd56f154c5cf1d12bc70a654bea1df..0e9c169b42f82a782b1166b2c05880ad4e1ad0da 100644 (file)
@@ -1543,7 +1543,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
 
        /* TODO Find a better way to handle events count. */
        count = asus->event_count[event % 128]++;
-       acpi_bus_generate_proc_event(asus->device, event, count);
        acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
                                        dev_name(&asus->device->dev), event,
                                        count);
index 5d26e70bed6c532d51c0fb326bc97f91e1a8a020..a6afd4108beb0592c15604efa9f779180d5349ef 100644 (file)
@@ -1269,7 +1269,6 @@ static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
        if (event > ACPI_MAX_SYS_NOTIFY)
                return;
        count = eeepc->event_count[event % 128]++;
-       acpi_bus_generate_proc_event(device, event, count);
        acpi_bus_generate_netlink_event(device->pnp.device_class,
                                        dev_name(&device->dev), event,
                                        count);
index 1c9386e7c58ce723ca583714b862604cbcfd25e5..52b8a97efde150f52d393b4e7a6c0be9360bc9ef 100644 (file)
@@ -773,8 +773,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
                                else
                                        set_lcd_level(newb);
                        }
-                       acpi_bus_generate_proc_event(fujitsu->dev,
-                               ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0);
                        keycode = KEY_BRIGHTNESSUP;
                } else if (oldb > newb) {
                        if (disable_brightness_adjust != 1) {
@@ -783,8 +781,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
                                else
                                        set_lcd_level(newb);
                        }
-                       acpi_bus_generate_proc_event(fujitsu->dev,
-                               ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0);
                        keycode = KEY_BRIGHTNESSDOWN;
                }
                break;
index 4add9a31bf601cf151c5f0b6ce938de73ba0aa96..984253da365d88426db4e398311e28a2489659e2 100644 (file)
@@ -464,9 +464,6 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
                                 "error getting hotkey status\n"));
                return;
        }
-
-       acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result);
-
        if (!sparse_keymap_report_event(hotk_input_dev,
                                        result & 0xf, result & 0x80, false))
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
index 2ac045f27f10112aa467b867a9b97a9a1790c189..069821b1fc22a9b139c355a675ec51036856820c 100644 (file)
@@ -1275,9 +1275,6 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                ev_type = HOTKEY;
                sony_laptop_report_input_event(real_ev);
        }
-
-       acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
-
        acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
                        dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
 }
@@ -4243,7 +4240,6 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
 
 found:
        sony_laptop_report_input_event(device_event);
-       acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event);
        sonypi_compat_report_event(device_event);
        return IRQ_HANDLED;
 }
index 54d31c0a9840b45e34e910af232137d4d349d711..be67e5e28d188e07766dd1a1e164c7c76c183693 100644 (file)
@@ -2022,8 +2022,6 @@ static u32 hotkey_driver_mask;            /* events needed by the driver */
 static u32 hotkey_user_mask;           /* events visible to userspace */
 static u32 hotkey_acpi_mask;           /* events enabled in firmware */
 
-static unsigned int hotkey_report_mode;
-
 static u16 *hotkey_keycode_map;
 
 static struct attribute_set *hotkey_dev_attributes;
@@ -2282,10 +2280,6 @@ static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
 static void tpacpi_hotkey_send_key(unsigned int scancode)
 {
        tpacpi_input_send_key_masked(scancode);
-       if (hotkey_report_mode < 2) {
-               acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
-                               0x80, TP_HKEY_EV_HOTKEY_BASE + scancode);
-       }
 }
 
 static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
@@ -2882,18 +2876,6 @@ static void hotkey_tablet_mode_notify_change(void)
                             "hotkey_tablet_mode");
 }
 
-/* sysfs hotkey report_mode -------------------------------------------- */
-static ssize_t hotkey_report_mode_show(struct device *dev,
-                          struct device_attribute *attr,
-                          char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-               (hotkey_report_mode != 0) ? hotkey_report_mode : 1);
-}
-
-static struct device_attribute dev_attr_hotkey_report_mode =
-       __ATTR(hotkey_report_mode, S_IRUGO, hotkey_report_mode_show, NULL);
-
 /* sysfs wakeup reason (pollable) -------------------------------------- */
 static ssize_t hotkey_wakeup_reason_show(struct device *dev,
                           struct device_attribute *attr,
@@ -2935,7 +2917,6 @@ static struct attribute *hotkey_attributes[] __initdata = {
        &dev_attr_hotkey_enable.attr,
        &dev_attr_hotkey_bios_enabled.attr,
        &dev_attr_hotkey_bios_mask.attr,
-       &dev_attr_hotkey_report_mode.attr,
        &dev_attr_hotkey_wakeup_reason.attr,
        &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
        &dev_attr_hotkey_mask.attr,
@@ -3439,11 +3420,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
                "initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\n",
                hotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);
 
-       dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
-                       "legacy ibm/hotkey event reporting over procfs %s\n",
-                       (hotkey_report_mode < 2) ?
-                               "enabled" : "disabled");
-
        tpacpi_inputdev->open = &hotkey_inputdev_open;
        tpacpi_inputdev->close = &hotkey_inputdev_close;
 
@@ -3737,13 +3713,6 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
                                  "event happened to %s\n", TPACPI_MAIL);
                }
 
-               /* Legacy events */
-               if (!ignore_acpi_ev &&
-                   (send_acpi_ev || hotkey_report_mode < 2)) {
-                       acpi_bus_generate_proc_event(ibm->acpi->device,
-                                                    event, hkey);
-               }
-
                /* netlink events */
                if (!ignore_acpi_ev && send_acpi_ev) {
                        acpi_bus_generate_netlink_event(
@@ -8840,11 +8809,6 @@ module_param(brightness_enable, uint, 0444);
 MODULE_PARM_DESC(brightness_enable,
                 "Enables backlight control when 1, disables when 0");
 
-module_param(hotkey_report_mode, uint, 0444);
-MODULE_PARM_DESC(hotkey_report_mode,
-                "used for backwards compatibility with userspace, "
-                "see documentation");
-
 #ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
 module_param_named(volume_mode, volume_mode, uint, 0444);
 MODULE_PARM_DESC(volume_mode,
@@ -8975,10 +8939,6 @@ static int __init thinkpad_acpi_module_init(void)
 
        tpacpi_lifecycle = TPACPI_LIFE_INIT;
 
-       /* Parameter checking */
-       if (hotkey_report_mode > 2)
-               return -EINVAL;
-
        /* Driver-level probe */
 
        ret = get_thinkpad_model_data(&thinkpad_id);
index 00e94032531a71c121b6a222efa14338e08ff6b3..12adb43a069317da31958ac4913418c385cedb77 100644 (file)
@@ -154,7 +154,7 @@ static int pnp_bus_match(struct device *dev, struct device_driver *drv)
        return 1;
 }
 
-static int pnp_bus_suspend(struct device *dev, pm_message_t state)
+static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
 {
        struct pnp_dev *pnp_dev = to_pnp_dev(dev);
        struct pnp_driver *pnp_drv = pnp_dev->driver;
@@ -180,6 +180,16 @@ static int pnp_bus_suspend(struct device *dev, pm_message_t state)
        return 0;
 }
 
+static int pnp_bus_suspend(struct device *dev)
+{
+       return __pnp_bus_suspend(dev, PMSG_SUSPEND);
+}
+
+static int pnp_bus_freeze(struct device *dev)
+{
+       return __pnp_bus_suspend(dev, PMSG_FREEZE);
+}
+
 static int pnp_bus_resume(struct device *dev)
 {
        struct pnp_dev *pnp_dev = to_pnp_dev(dev);
@@ -210,14 +220,19 @@ static int pnp_bus_resume(struct device *dev)
        return 0;
 }
 
+static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
+       .suspend = pnp_bus_suspend,
+       .freeze = pnp_bus_freeze,
+       .resume = pnp_bus_resume,
+};
+
 struct bus_type pnp_bus_type = {
        .name    = "pnp",
        .match   = pnp_bus_match,
        .probe   = pnp_device_probe,
        .remove  = pnp_device_remove,
        .shutdown = pnp_device_shutdown,
-       .suspend = pnp_bus_suspend,
-       .resume  = pnp_bus_resume,
+       .pm      = &pnp_bus_dev_pm_ops,
        .dev_attrs = pnp_interface_attrs,
 };
 
index 55cd459a39080d6d162074c74772e9850d88c8f1..34049b0b4c731e352ae48f62c7fd3edcfc71b517 100644 (file)
@@ -131,7 +131,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
        /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
        ret = 0;
        if (acpi_bus_power_manageable(handle))
-               acpi_bus_set_power(handle, ACPI_STATE_D3);
+               acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
                /* continue even if acpi_bus_set_power() fails */
        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
                ret = -ENODEV;
@@ -174,10 +174,10 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
 
        if (acpi_bus_power_manageable(handle)) {
                int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL,
-                                                            ACPI_STATE_D3);
+                                                       ACPI_STATE_D3_COLD);
                if (power_state < 0)
                        power_state = (state.event == PM_EVENT_ON) ?
-                                       ACPI_STATE_D0 : ACPI_STATE_D3;
+                                       ACPI_STATE_D0 : ACPI_STATE_D3_COLD;
 
                /*
                 * acpi_bus_set_power() often fails (keyboard port can't be
index ffff66b1c1aa0089f0a0f9f51555503de7f1209d..de029bbc1cc151ec0c3e43ac2c7f1a56bcc32935 100644 (file)
@@ -554,7 +554,7 @@ static irqreturn_t pm860x_vchg_handler(int irq, void *data)
                                        OVTEMP_AUTORECOVER,
                                        OVTEMP_AUTORECOVER);
                        dev_dbg(info->dev,
-                               "%s, pm8606 over-temp occure\n", __func__);
+                               "%s, pm8606 over-temp occurred\n", __func__);
                }
        }
 
@@ -562,7 +562,7 @@ static irqreturn_t pm860x_vchg_handler(int irq, void *data)
                set_vchg_threshold(info, VCHG_OVP_LOW, 0);
                info->allowed = 0;
                dev_dbg(info->dev,
-                       "%s,pm8607 over-vchg occure,vchg = %dmv\n",
+                       "%s,pm8607 over-vchg occurred,vchg = %dmv\n",
                        __func__, vchg);
        } else if (vchg < VCHG_OVP_LOW) {
                set_vchg_threshold(info, VCHG_NORMAL_LOW,
index 7b8979c63f4882e6c3e5375b9f9c35c40e5a829c..dcc0d9e5817d929d62f7231ff91fb322ffe99ed1 100644 (file)
@@ -357,7 +357,7 @@ config AB8500_BM
 
 config BATTERY_GOLDFISH
        tristate "Goldfish battery driver"
-       depends on GENERIC_HARDIRQS
+       depends on GENERIC_HARDIRQS && (GOLDFISH || COMPILE_TEST)
        help
          Say Y to enable support for the battery and AC power in the
          Goldfish emulator.
index c58d0e31bdef51918934aac528882513962c3fbb..d02ae02a7590c63f3199c676657d66d19c7af64e 100644 (file)
@@ -287,7 +287,7 @@ static struct gpio collie_batt_gpios[] = {
 };
 
 #ifdef CONFIG_PM
-static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
+static int collie_bat_suspend(struct ucb1x00_dev *dev)
 {
        /* flush all pending status updates */
        flush_work(&bat_work);
index 1c0bfcbae0623a235b189c7dc6b5cf5919f65bf9..ffa10ed83eb1559481c0f1252f37644013c0b9b1 100644 (file)
@@ -386,7 +386,7 @@ static int pm2_int_reg2(void *pm2_data, int val)
        if (val & (PM2XXX_INT3_ITCHPRECHARGEWD |
                                PM2XXX_INT3_ITCHCCWD | PM2XXX_INT3_ITCHCVWD)) {
                dev_dbg(pm2->dev,
-                       "Watchdog occured for precharge, CC and CV charge\n");
+                       "Watchdog occurred for precharge, CC and CV charge\n");
        }
 
        return ret;
index 29178f78d73cfb79868923bfee7ffbe30b393e1b..44420d1e9094cbbc559341b518fe698a7bd0b4fd 100644 (file)
@@ -118,7 +118,7 @@ static ssize_t power_supply_store_property(struct device *dev,
        long long_val;
 
        /* TODO: support other types than int */
-       ret = strict_strtol(buf, 10, &long_val);
+       ret = kstrtol(buf, 10, &long_val);
        if (ret < 0)
                return ret;
 
index ee039dcead04782a5389ff7827f271e2d3aa2192..5482280467e5b08765478ef4d12ca87f6579b9f7 100644 (file)
@@ -14,6 +14,12 @@ config POWER_RESET_GPIO
          If your board needs a GPIO high/low to power down, say Y and
          create a binding in your devicetree.
 
+config POWER_RESET_MSM
+       bool "Qualcomm MSM power-off driver"
+       depends on POWER_RESET && ARCH_MSM
+       help
+         Power off and restart support for Qualcomm boards.
+
 config POWER_RESET_QNAP
        bool "QNAP power-off driver"
        depends on OF_GPIO && POWER_RESET && PLAT_ORION
@@ -38,3 +44,10 @@ config POWER_RESET_VEXPRESS
        help
          Power off and reset support for the ARM Ltd. Versatile
          Express boards.
+
+config POWER_RESET_XGENE
+       bool "APM SoC X-Gene reset driver"
+       depends on ARM64
+       depends on POWER_RESET
+       help
+         Reboot support for the APM SoC X-Gene Eval boards.
index 372807fd83f78d32d352e1479721407293c24fce..3e6ed88725ac0889f32326f91c0450dceab13bc3 100644 (file)
@@ -1,4 +1,6 @@
 obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
+obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
 obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
 obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
 obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
+obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
new file mode 100644 (file)
index 0000000..774f9a3
--- /dev/null
@@ -0,0 +1,73 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+static void __iomem *msm_ps_hold;
+
+static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+       writel(0, msm_ps_hold);
+       mdelay(10000);
+}
+
+static void do_msm_poweroff(void)
+{
+       /* TODO: Add poweroff capability */
+       do_msm_restart(REBOOT_HARD, NULL);
+}
+
+static int msm_restart_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *mem;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       msm_ps_hold = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(msm_ps_hold))
+               return PTR_ERR(msm_ps_hold);
+
+       pm_power_off = do_msm_poweroff;
+       arm_pm_restart = do_msm_restart;
+       return 0;
+}
+
+static const struct of_device_id of_msm_restart_match[] = {
+       { .compatible = "qcom,pshold", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_msm_restart_match);
+
+static struct platform_driver msm_restart_driver = {
+       .probe = msm_restart_probe,
+       .driver = {
+               .name = "msm-restart",
+               .of_match_table = of_match_ptr(of_msm_restart_match),
+       },
+};
+
+static int __init msm_restart_init(void)
+{
+       return platform_driver_register(&msm_restart_driver);
+}
+device_initcall(msm_restart_init);
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
new file mode 100644 (file)
index 0000000..ecd55f8
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * AppliedMicro X-Gene SoC Reboot Driver
+ *
+ * Copyright (c) 2013, Applied Micro Circuits Corporation
+ * Author: Feng Kan <fkan@apm.com>
+ * Author: Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * This driver provides system reboot functionality for APM X-Gene SoC.
+ * For system shutdown, this is board specify. If a board designer
+ * implements GPIO shutdown, use the gpio-poweroff.c driver.
+ */
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <asm/system_misc.h>
+
+struct xgene_reboot_context {
+       struct platform_device *pdev;
+       void *csr;
+       u32 mask;
+};
+
+static struct xgene_reboot_context *xgene_restart_ctx;
+
+static void xgene_restart(char str, const char *cmd)
+{
+       struct xgene_reboot_context *ctx = xgene_restart_ctx;
+       unsigned long timeout;
+
+       /* Issue the reboot */
+       if (ctx)
+               writel(ctx->mask, ctx->csr);
+
+       timeout = jiffies + HZ;
+       while (time_before(jiffies, timeout))
+               cpu_relax();
+
+       dev_emerg(&ctx->pdev->dev, "Unable to restart system\n");
+}
+
+static int xgene_reboot_probe(struct platform_device *pdev)
+{
+       struct xgene_reboot_context *ctx;
+
+       ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx) {
+               dev_err(&pdev->dev, "out of memory for context\n");
+               return -ENODEV;
+       }
+
+       ctx->csr = of_iomap(pdev->dev.of_node, 0);
+       if (!ctx->csr) {
+               devm_kfree(&pdev->dev, ctx);
+               dev_err(&pdev->dev, "can not map resource\n");
+               return -ENODEV;
+       }
+
+       if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+               ctx->mask = 0xFFFFFFFF;
+
+       ctx->pdev = pdev;
+       arm_pm_restart = xgene_restart;
+       xgene_restart_ctx = ctx;
+
+       return 0;
+}
+
+static struct of_device_id xgene_reboot_of_match[] = {
+       { .compatible = "apm,xgene-reboot" },
+       {}
+};
+
+static struct platform_driver xgene_reboot_driver = {
+       .probe = xgene_reboot_probe,
+       .driver = {
+               .name = "xgene-reboot",
+               .of_match_table = xgene_reboot_of_match,
+       },
+};
+
+static int __init xgene_reboot_init(void)
+{
+       return platform_driver_register(&xgene_reboot_driver);
+}
+device_initcall(xgene_reboot_init);
index 0224de50c54017b0d60a0f9be53d13d21c44ff1d..f4d80df627c7097470a45638bf0392a688d50b93 100644 (file)
@@ -150,7 +150,7 @@ static void tosa_bat_external_power_changed(struct power_supply *psy)
 
 static irqreturn_t tosa_bat_gpio_isr(int irq, void *data)
 {
-       pr_info("tosa_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq)));
+       pr_info("tosa_bat_gpio irq\n");
        schedule_work(&bat_work);
        return IRQ_HANDLED;
 }
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
new file mode 100644 (file)
index 0000000..3459f60
--- /dev/null
@@ -0,0 +1,383 @@
+/*
+ * Regulators driver for Marvell 88PM800
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ * Joseph(Yossi) Hanin <yhanin@marvell.com>
+ * Yi Zhang <yizhang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/88pm80x.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
+
+/* LDO1 with DVC[0..3] */
+#define PM800_LDO1_VOUT                (0x08) /* VOUT1 */
+#define PM800_LDO1_VOUT_2      (0x09)
+#define PM800_LDO1_VOUT_3      (0x0A)
+#define PM800_LDO2_VOUT                (0x0B)
+#define PM800_LDO3_VOUT                (0x0C)
+#define PM800_LDO4_VOUT                (0x0D)
+#define PM800_LDO5_VOUT                (0x0E)
+#define PM800_LDO6_VOUT                (0x0F)
+#define PM800_LDO7_VOUT                (0x10)
+#define PM800_LDO8_VOUT                (0x11)
+#define PM800_LDO9_VOUT                (0x12)
+#define PM800_LDO10_VOUT       (0x13)
+#define PM800_LDO11_VOUT       (0x14)
+#define PM800_LDO12_VOUT       (0x15)
+#define PM800_LDO13_VOUT       (0x16)
+#define PM800_LDO14_VOUT       (0x17)
+#define PM800_LDO15_VOUT       (0x18)
+#define PM800_LDO16_VOUT       (0x19)
+#define PM800_LDO17_VOUT       (0x1A)
+#define PM800_LDO18_VOUT       (0x1B)
+#define PM800_LDO19_VOUT       (0x1C)
+
+/* BUCK1 with DVC[0..3] */
+#define PM800_BUCK1            (0x3C)
+#define PM800_BUCK1_1          (0x3D)
+#define PM800_BUCK1_2          (0x3E)
+#define PM800_BUCK1_3          (0x3F)
+#define PM800_BUCK2            (0x40)
+#define PM800_BUCK3            (0x41)
+#define PM800_BUCK3            (0x41)
+#define PM800_BUCK4            (0x42)
+#define PM800_BUCK4_1          (0x43)
+#define PM800_BUCK4_2          (0x44)
+#define PM800_BUCK4_3          (0x45)
+#define PM800_BUCK5            (0x46)
+
+#define PM800_BUCK_ENA         (0x50)
+#define PM800_LDO_ENA1_1       (0x51)
+#define PM800_LDO_ENA1_2       (0x52)
+#define PM800_LDO_ENA1_3       (0x53)
+
+#define PM800_LDO_ENA2_1       (0x56)
+#define PM800_LDO_ENA2_2       (0x57)
+#define PM800_LDO_ENA2_3       (0x58)
+
+#define PM800_BUCK1_MISC1      (0x78)
+#define PM800_BUCK3_MISC1      (0x7E)
+#define PM800_BUCK4_MISC1      (0x81)
+#define PM800_BUCK5_MISC1      (0x84)
+
+struct pm800_regulator_info {
+       struct regulator_desc desc;
+       int max_ua;
+};
+
+struct pm800_regulators {
+       struct regulator_dev *regulators[PM800_ID_RG_MAX];
+       struct pm80x_chip *chip;
+       struct regmap *map;
+};
+
+/*
+ * vreg - the buck regs string.
+ * ereg - the string for the enable register.
+ * ebit - the bit number in the enable register.
+ * amax - the current
+ * Buck has 2 kinds of voltage steps. It is easy to find voltage by ranges,
+ * not the constant voltage table.
+ * n_volt - Number of available selectors
+ */
+#define PM800_BUCK(vreg, ereg, ebit, amax, volt_ranges, n_volt)                \
+{                                                                      \
+       .desc   = {                                                     \
+               .name   = #vreg,                                        \
+               .ops    = &pm800_volt_range_ops,                        \
+               .type   = REGULATOR_VOLTAGE,                            \
+               .id     = PM800_ID_##vreg,                              \
+               .owner  = THIS_MODULE,                                  \
+               .n_voltages             = n_volt,                       \
+               .linear_ranges          = volt_ranges,                  \
+               .n_linear_ranges        = ARRAY_SIZE(volt_ranges),      \
+               .vsel_reg               = PM800_##vreg,                 \
+               .vsel_mask              = 0x7f,                         \
+               .enable_reg             = PM800_##ereg,                 \
+               .enable_mask            = 1 << (ebit),                  \
+       },                                                              \
+       .max_ua         = (amax),                                       \
+}
+
+/*
+ * vreg - the LDO regs string
+ * ereg -  the string for the enable register.
+ * ebit - the bit number in the enable register.
+ * amax - the current
+ * volt_table - the LDO voltage table
+ * For all the LDOes, there are too many ranges. Using volt_table will be
+ * simpler and faster.
+ */
+#define PM800_LDO(vreg, ereg, ebit, amax, ldo_volt_table)              \
+{                                                                      \
+       .desc   = {                                                     \
+               .name   = #vreg,                                        \
+               .ops    = &pm800_volt_table_ops,                        \
+               .type   = REGULATOR_VOLTAGE,                            \
+               .id     = PM800_ID_##vreg,                              \
+               .owner  = THIS_MODULE,                                  \
+               .n_voltages = ARRAY_SIZE(ldo_volt_table),               \
+               .vsel_reg       = PM800_##vreg##_VOUT,                  \
+               .vsel_mask      = 0x1f,                                 \
+               .enable_reg     = PM800_##ereg,                         \
+               .enable_mask    = 1 << (ebit),                          \
+               .volt_table     = ldo_volt_table,                       \
+       },                                                              \
+       .max_ua         = (amax),                                       \
+}
+
+/* Ranges are sorted in ascending order. */
+static const struct regulator_linear_range buck1_volt_range[] = {
+       { .min_uV = 600000, .max_uV = 1587500, .min_sel = 0, .max_sel = 0x4f,
+         .uV_step = 12500 },
+       { .min_uV = 1600000, .max_uV = 1800000, .min_sel = 0x50,
+         .max_sel = 0x54, .uV_step = 50000 },
+};
+
+/* BUCK 2~5 have same ranges. */
+static const struct regulator_linear_range buck2_5_volt_range[] = {
+       { .min_uV = 600000, .max_uV = 1587500,  .min_sel = 0, .max_sel = 0x4f,
+         .uV_step = 12500 },
+       { .min_uV = 1600000, .max_uV = 3300000, .min_sel = 0x50,
+         .max_sel = 0x72, .uV_step = 50000 },
+};
+
+static const unsigned int ldo1_volt_table[] = {
+       600000,  650000,  700000,  750000,  800000,  850000,  900000,  950000,
+       1000000, 1050000, 1100000, 1150000, 1200000, 1300000, 1400000, 1500000,
+};
+
+static const unsigned int ldo2_volt_table[] = {
+       1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000,
+};
+
+/* LDO 3~17 have same voltage table. */
+static const unsigned int ldo3_17_volt_table[] = {
+       1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000,
+       2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
+};
+
+/* LDO 18~19 have same voltage table. */
+static const unsigned int ldo18_19_volt_table[] = {
+       1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000,
+};
+
+static int pm800_get_current_limit(struct regulator_dev *rdev)
+{
+       struct pm800_regulator_info *info = rdev_get_drvdata(rdev);
+
+       return info->max_ua;
+}
+
+static struct regulator_ops pm800_volt_range_ops = {
+       .list_voltage = regulator_list_voltage_linear_range,
+       .map_voltage = regulator_map_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_current_limit = pm800_get_current_limit,
+};
+
+static struct regulator_ops pm800_volt_table_ops = {
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_iterate,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_current_limit = pm800_get_current_limit,
+};
+
+/* The array is indexed by id(PM800_ID_XXX) */
+static struct pm800_regulator_info pm800_regulator_info[] = {
+       PM800_BUCK(BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
+       PM800_BUCK(BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
+       PM800_BUCK(BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
+       PM800_BUCK(BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
+       PM800_BUCK(BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
+
+       PM800_LDO(LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
+       PM800_LDO(LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
+       PM800_LDO(LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
+       PM800_LDO(LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
+       PM800_LDO(LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
+};
+
+#define PM800_REGULATOR_OF_MATCH(_name, _id)                           \
+       [PM800_ID_##_id] = {                                            \
+               .name = #_name,                                         \
+               .driver_data = &pm800_regulator_info[PM800_ID_##_id],   \
+       }
+
+static struct of_regulator_match pm800_regulator_matches[] = {
+       PM800_REGULATOR_OF_MATCH(buck1, BUCK1),
+       PM800_REGULATOR_OF_MATCH(buck2, BUCK2),
+       PM800_REGULATOR_OF_MATCH(buck3, BUCK3),
+       PM800_REGULATOR_OF_MATCH(buck4, BUCK4),
+       PM800_REGULATOR_OF_MATCH(buck5, BUCK5),
+       PM800_REGULATOR_OF_MATCH(ldo1, LDO1),
+       PM800_REGULATOR_OF_MATCH(ldo2, LDO2),
+       PM800_REGULATOR_OF_MATCH(ldo3, LDO3),
+       PM800_REGULATOR_OF_MATCH(ldo4, LDO4),
+       PM800_REGULATOR_OF_MATCH(ldo5, LDO5),
+       PM800_REGULATOR_OF_MATCH(ldo6, LDO6),
+       PM800_REGULATOR_OF_MATCH(ldo7, LDO7),
+       PM800_REGULATOR_OF_MATCH(ldo8, LDO8),
+       PM800_REGULATOR_OF_MATCH(ldo9, LDO9),
+       PM800_REGULATOR_OF_MATCH(ldo10, LDO10),
+       PM800_REGULATOR_OF_MATCH(ldo11, LDO11),
+       PM800_REGULATOR_OF_MATCH(ldo12, LDO12),
+       PM800_REGULATOR_OF_MATCH(ldo13, LDO13),
+       PM800_REGULATOR_OF_MATCH(ldo14, LDO14),
+       PM800_REGULATOR_OF_MATCH(ldo15, LDO15),
+       PM800_REGULATOR_OF_MATCH(ldo16, LDO16),
+       PM800_REGULATOR_OF_MATCH(ldo17, LDO17),
+       PM800_REGULATOR_OF_MATCH(ldo18, LDO18),
+       PM800_REGULATOR_OF_MATCH(ldo19, LDO19),
+};
+
+static int pm800_regulator_dt_init(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       int ret;
+
+       ret = of_regulator_match(&pdev->dev, np,
+                                pm800_regulator_matches,
+                                ARRAY_SIZE(pm800_regulator_matches));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int pm800_regulator_probe(struct platform_device *pdev)
+{
+       struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+       struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
+       struct pm800_regulators *pm800_data;
+       struct pm800_regulator_info *info;
+       struct regulator_config config = { };
+       struct regulator_init_data *init_data;
+       int i, ret;
+
+       if (!pdata || pdata->num_regulators == 0) {
+               if (IS_ENABLED(CONFIG_OF)) {
+                       ret = pm800_regulator_dt_init(pdev);
+                       if (ret)
+                               return ret;
+               } else {
+                       return -ENODEV;
+               }
+       } else if (pdata->num_regulators) {
+               unsigned int count = 0;
+
+               /* Check whether num_regulator is valid. */
+               for (i = 0; i < ARRAY_SIZE(pdata->regulators); i++) {
+                       if (pdata->regulators[i])
+                               count++;
+               }
+               if (count != pdata->num_regulators)
+                       return -EINVAL;
+       } else {
+               return -EINVAL;
+       }
+
+       pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
+                                       GFP_KERNEL);
+       if (!pm800_data) {
+               dev_err(&pdev->dev, "Failed to allocate pm800_regualtors");
+               return -ENOMEM;
+       }
+
+       pm800_data->map = chip->subchip->regmap_power;
+       pm800_data->chip = chip;
+
+       platform_set_drvdata(pdev, pm800_data);
+
+       for (i = 0; i < PM800_ID_RG_MAX; i++) {
+               if (!pdata || pdata->num_regulators == 0)
+                       init_data = pm800_regulator_matches[i].init_data;
+               else
+                       init_data = pdata->regulators[i];
+               if (!init_data)
+                       continue;
+               info = pm800_regulator_matches[i].driver_data;
+               config.dev = &pdev->dev;
+               config.init_data = init_data;
+               config.driver_data = info;
+               config.regmap = pm800_data->map;
+               config.of_node = pm800_regulator_matches[i].of_node;
+
+               pm800_data->regulators[i] =
+                               regulator_register(&info->desc, &config);
+               if (IS_ERR(pm800_data->regulators[i])) {
+                       ret = PTR_ERR(pm800_data->regulators[i]);
+                       dev_err(&pdev->dev, "Failed to register %s\n",
+                               info->desc.name);
+
+                       while (--i >= 0)
+                               regulator_unregister(pm800_data->regulators[i]);
+
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int pm800_regulator_remove(struct platform_device *pdev)
+{
+       struct pm800_regulators *pm800_data = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < PM800_ID_RG_MAX; i++)
+               regulator_unregister(pm800_data->regulators[i]);
+
+       return 0;
+}
+
+static struct platform_driver pm800_regulator_driver = {
+       .driver         = {
+               .name   = "88pm80x-regulator",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = pm800_regulator_probe,
+       .remove         = pm800_regulator_remove,
+};
+
+module_platform_driver(pm800_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joseph(Yossi) Hanin <yhanin@marvell.com>");
+MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM800 PMIC");
+MODULE_ALIAS("platform:88pm800-regulator");
index 8a7cb1f43046acf8c17572088d250c5396a9b070..70230974468cb97ae9c76fe9fdb69ebc04e13560 100644 (file)
@@ -346,7 +346,7 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
 {
        struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
        struct pm8607_regulator_info *info = NULL;
-       struct regulator_init_data *pdata = pdev->dev.platform_data;
+       struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
        struct regulator_config config = { };
        struct resource *res;
        int i;
index f1e6ad98eebaa2c784a748e700785af479b80e96..a0f0bc4deca59d656065324418fb58386cd4cf5a 100644 (file)
@@ -64,15 +64,21 @@ config REGULATOR_USERSPACE_CONSUMER
 
          If unsure, say no.
 
-config REGULATOR_GPIO
-       tristate "GPIO regulator support"
-       depends on GPIOLIB
+config REGULATOR_88PM800
+       tristate "Marvell 88PM800 Power regulators"
+       depends on MFD_88PM800
        help
-         This driver provides support for regulators that can be
-         controlled via gpios.
-         It is capable of supporting current and voltage regulators
-         and the platform has to provide a mapping of GPIO-states
-         to target volts/amps.
+         This driver supports Marvell 88PM800 voltage regulator chips.
+         It delivers digitally programmable output,
+         the voltage is programmed via I2C interface.
+         It's suitable to support PXA988 chips to control VCC_MAIN and
+         various voltages.
+
+config REGULATOR_88PM8607
+       bool "Marvell 88PM8607 Power regulators"
+       depends on MFD_88PM860X=y
+       help
+         This driver supports 88PM8607 voltage regulator chips.
 
 config REGULATOR_AD5398
        tristate "Analog Devices AD5398/AD5821 regulators"
@@ -81,6 +87,14 @@ config REGULATOR_AD5398
          This driver supports AD5398 and AD5821 current regulator chips.
          If building into module, its name is ad5398.ko.
 
+config REGULATOR_ANATOP
+       tristate "Freescale i.MX on-chip ANATOP LDO regulators"
+       depends on MFD_SYSCON
+       help
+         Say y here to support Freescale i.MX on-chip ANATOP LDOs
+         regulators. It is recommended that this option be
+         enabled on i.MX6 platform.
+
 config REGULATOR_AAT2870
        tristate "AnalogicTech AAT2870 Regulators"
        depends on MFD_AAT2870_CORE
@@ -88,6 +102,22 @@ config REGULATOR_AAT2870
          If you have a AnalogicTech AAT2870 say Y to enable the
          regulator driver.
 
+config REGULATOR_AB3100
+       tristate "ST-Ericsson AB3100 Regulator functions"
+       depends on AB3100_CORE
+       default y if AB3100_CORE
+       help
+        These regulators correspond to functionality in the
+        AB3100 analog baseband dealing with power regulators
+        for the system.
+
+config REGULATOR_AB8500
+       bool "ST-Ericsson AB8500 Power Regulators"
+       depends on AB8500_CORE
+       help
+         This driver supports the regulators found on the ST-Ericsson mixed
+         signal AB8500 PMIC
+
 config REGULATOR_ARIZONA
        tristate "Wolfson Arizona class devices"
        depends on MFD_ARIZONA
@@ -96,6 +126,13 @@ config REGULATOR_ARIZONA
          Support for the regulators found on Wolfson Arizona class
          devices.
 
+config REGULATOR_AS3711
+       tristate "AS3711 PMIC"
+       depends on MFD_AS3711
+       help
+         This driver provides support for the voltage regulators on the
+         AS3711 PMIC
+
 config REGULATOR_DA903X
        tristate "Dialog Semiconductor DA9030/DA9034 regulators"
        depends on PMIC_DA903X
@@ -120,6 +157,27 @@ config REGULATOR_DA9055
          This driver can also be built as a module. If so, the module
          will be called da9055-regulator.
 
+config REGULATOR_DA9210
+       tristate "Dialog Semiconductor DA9210 regulator"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         Say y here to support for the Dialog Semiconductor DA9210.
+         The DA9210 is a multi-phase synchronous step down
+         converter 12A DC-DC Buck controlled through an I2C
+         interface.
+
+config REGULATOR_DBX500_PRCMU
+       bool
+
+config REGULATOR_DB8500_PRCMU
+       bool "ST-Ericsson DB8500 Voltage Domain Regulators"
+       depends on MFD_DB8500_PRCMU
+       select REGULATOR_DBX500_PRCMU
+       help
+         This driver supports the voltage domain regulators controlled by the
+         DB8500 PRCMU
+
 config REGULATOR_FAN53555
        tristate "Fairchild FAN53555 Regulator"
        depends on I2C
@@ -131,44 +189,57 @@ config REGULATOR_FAN53555
          input voltage supply of 2.5V to 5.5V. The output voltage is
          programmed through an I2C interface.
 
-config REGULATOR_ANATOP
-       tristate "Freescale i.MX on-chip ANATOP LDO regulators"
-       depends on MFD_SYSCON
+config REGULATOR_GPIO
+       tristate "GPIO regulator support"
+       depends on GPIOLIB
        help
-         Say y here to support Freescale i.MX on-chip ANATOP LDOs
-         regulators. It is recommended that this option be
-         enabled on i.MX6 platform.
+         This driver provides support for regulators that can be
+         controlled via gpios.
+         It is capable of supporting current and voltage regulators
+         and the platform has to provide a mapping of GPIO-states
+         to target volts/amps.
 
-config REGULATOR_MC13XXX_CORE
-       tristate
+config REGULATOR_ISL6271A
+       tristate "Intersil ISL6271A Power regulator"
+       depends on I2C
+       help
+         This driver supports ISL6271A voltage regulator chip.
 
-config REGULATOR_MC13783
-       tristate "Freescale MC13783 regulator driver"
-       depends on MFD_MC13783
-       select REGULATOR_MC13XXX_CORE
+config REGULATOR_LP3971
+       tristate "National Semiconductors LP3971 PMIC regulator driver"
+       depends on I2C
        help
-         Say y here to support the regulators found on the Freescale MC13783
-         PMIC.
+        Say Y here to support the voltage regulators and convertors
+        on National Semiconductors LP3971 PMIC
 
-config REGULATOR_MC13892
-       tristate "Freescale MC13892 regulator driver"
-       depends on MFD_MC13XXX
-       select REGULATOR_MC13XXX_CORE
+config REGULATOR_LP3972
+       tristate "National Semiconductors LP3972 PMIC regulator driver"
+       depends on I2C
        help
-         Say y here to support the regulators found on the Freescale MC13892
-         PMIC.
+        Say Y here to support the voltage regulators and convertors
+        on National Semiconductors LP3972 PMIC
 
-config REGULATOR_ISL6271A
-       tristate "Intersil ISL6271A Power regulator"
+config REGULATOR_LP872X
+       bool "TI/National Semiconductor LP8720/LP8725 voltage regulators"
+       depends on I2C=y
+       select REGMAP_I2C
+       help
+         This driver supports LP8720/LP8725 PMIC
+
+config REGULATOR_LP8755
+       tristate "TI LP8755 High Performance PMU driver"
        depends on I2C
+       select REGMAP_I2C
        help
-         This driver supports ISL6271A voltage regulator chip.
+         This driver supports LP8755 High Performance PMU driver. This
+         chip contains six step-down DC/DC converters which can support
+         9 mode multiphase configuration.
 
-config REGULATOR_88PM8607
-       bool "Marvell 88PM8607 Power regulators"
-       depends on MFD_88PM860X=y
+config REGULATOR_LP8788
+       bool "TI LP8788 Power Regulators"
+       depends on MFD_LP8788
        help
-         This driver supports 88PM8607 voltage regulator chips.
+         This driver supports LP8788 voltage regulator chip.
 
 config REGULATOR_MAX1586
        tristate "Maxim 1586/1587 voltage regulator"
@@ -259,48 +330,43 @@ config REGULATOR_MAX77693
          and one current regulator 'CHARGER'. This is suitable for
          Exynos-4x12 chips.
 
-config REGULATOR_PCAP
-       tristate "Motorola PCAP2 regulator driver"
-       depends on EZX_PCAP
-       help
-        This driver provides support for the voltage regulators of the
-        PCAP2 PMIC.
+config REGULATOR_MC13XXX_CORE
+       tristate
 
-config REGULATOR_LP3971
-       tristate "National Semiconductors LP3971 PMIC regulator driver"
-       depends on I2C
+config REGULATOR_MC13783
+       tristate "Freescale MC13783 regulator driver"
+       depends on MFD_MC13783
+       select REGULATOR_MC13XXX_CORE
        help
-        Say Y here to support the voltage regulators and convertors
-        on National Semiconductors LP3971 PMIC
+         Say y here to support the regulators found on the Freescale MC13783
+         PMIC.
 
-config REGULATOR_LP3972
-       tristate "National Semiconductors LP3972 PMIC regulator driver"
-       depends on I2C
+config REGULATOR_MC13892
+       tristate "Freescale MC13892 regulator driver"
+       depends on MFD_MC13XXX
+       select REGULATOR_MC13XXX_CORE
        help
-        Say Y here to support the voltage regulators and convertors
-        on National Semiconductors LP3972 PMIC
+         Say y here to support the regulators found on the Freescale MC13892
+         PMIC.
 
-config REGULATOR_LP872X
-       bool "TI/National Semiconductor LP8720/LP8725 voltage regulators"
-       depends on I2C=y
-       select REGMAP_I2C
+config REGULATOR_PALMAS
+       tristate "TI Palmas PMIC Regulators"
+       depends on MFD_PALMAS
        help
-         This driver supports LP8720/LP8725 PMIC
+         If you wish to control the regulators on the Palmas series of
+         chips say Y here. This will enable support for all the software
+         controllable SMPS/LDO regulators.
 
-config REGULATOR_LP8755
-       tristate "TI LP8755 High Performance PMU driver"
-       depends on I2C
-       select REGMAP_I2C
-       help
-         This driver supports LP8755 High Performance PMU driver. This
-         chip contains six step-down DC/DC converters which can support
-         9 mode multiphase configuration.
+         The regulators available on Palmas series chips vary depending
+         on the muxing. This is handled automatically in the driver by
+         reading the mux info from OTP.
 
-config REGULATOR_LP8788
-       bool "TI LP8788 Power Regulators"
-       depends on MFD_LP8788
+config REGULATOR_PCAP
+       tristate "Motorola PCAP2 regulator driver"
+       depends on EZX_PCAP
        help
-         This driver supports LP8788 voltage regulator chip.
+        This driver provides support for the voltage regulators of the
+        PCAP2 PMIC.
 
 config REGULATOR_PCF50633
        tristate "NXP PCF50633 regulator driver"
@@ -309,6 +375,14 @@ config REGULATOR_PCF50633
         Say Y here to support the voltage regulators and convertors
         on PCF50633
 
+config REGULATOR_PFUZE100
+       tristate "Support regulators on Freescale PFUZE100 PMIC"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         Say y here to support the regulators found on the Freescale PFUZE100
+         PMIC.
+
 config REGULATOR_RC5T583
        tristate "RICOH RC5T583 Power regulators"
        depends on MFD_RC5T583
@@ -335,44 +409,15 @@ config REGULATOR_S5M8767
         via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
         supports DVS mode with 8bits of output voltage control.
 
-config REGULATOR_AB3100
-       tristate "ST-Ericsson AB3100 Regulator functions"
-       depends on AB3100_CORE
-       default y if AB3100_CORE
-       help
-        These regulators correspond to functionality in the
-        AB3100 analog baseband dealing with power regulators
-        for the system.
-
-config REGULATOR_AB8500
-       bool "ST-Ericsson AB8500 Power Regulators"
-       depends on AB8500_CORE
-       help
-         This driver supports the regulators found on the ST-Ericsson mixed
-         signal AB8500 PMIC
-
-config REGULATOR_DBX500_PRCMU
-       bool
-
-config REGULATOR_DB8500_PRCMU
-       bool "ST-Ericsson DB8500 Voltage Domain Regulators"
-       depends on MFD_DB8500_PRCMU
-       select REGULATOR_DBX500_PRCMU
-       help
-         This driver supports the voltage domain regulators controlled by the
-         DB8500 PRCMU
-
-config REGULATOR_PALMAS
-       tristate "TI Palmas PMIC Regulators"
-       depends on MFD_PALMAS
+config REGULATOR_TI_ABB
+       bool "TI Adaptive Body Bias on-chip LDO"
+       depends on ARCH_OMAP
        help
-         If you wish to control the regulators on the Palmas series of
-         chips say Y here. This will enable support for all the software
-         controllable SMPS/LDO regulators.
-
-         The regulators available on Palmas series chips vary depending
-         on the muxing. This is handled automatically in the driver by
-         reading the mux info from OTP.
+         Select this option to support Texas Instruments' on-chip Adaptive Body
+         Bias (ABB) LDO regulators. It is recommended that this option be
+         enabled on required TI SoC. Certain Operating Performance Points
+         on TI SoCs may be unstable without enabling this as it provides
+         device specific optimized bias to allow/optimize functionality.
 
 config REGULATOR_TPS51632
        tristate "TI TPS51632 Power Regulator"
@@ -481,16 +526,6 @@ config REGULATOR_TWL4030
          This driver supports the voltage regulators provided by
          this family of companion chips.
 
-config REGULATOR_TI_ABB
-       bool "TI Adaptive Body Bias on-chip LDO"
-       depends on ARCH_OMAP
-       help
-         Select this option to support Texas Instruments' on-chip Adaptive Body
-         Bias (ABB) LDO regulators. It is recommended that this option be
-         enabled on required TI SoC. Certain Operating Performance Points
-         on TI SoCs may be unstable without enabling this as it provides
-         device specific optimized bias to allow/optimize functionality.
-
 config REGULATOR_VEXPRESS
        tristate "Versatile Express regulators"
        depends on VEXPRESS_CONFIG
@@ -526,12 +561,5 @@ config REGULATOR_WM8994
          This driver provides support for the voltage regulators on the
          WM8994 CODEC.
 
-config REGULATOR_AS3711
-       tristate "AS3711 PMIC"
-       depends on MFD_AS3711
-       help
-         This driver provides support for the voltage regulators on the
-         AS3711 PMIC
-
 endif
 
index ba4a3cf3afec57eef79179abf72ae1f51d5b3073..6d141a9295a06283fd9fd7a2551d7c8aa388946f 100644 (file)
@@ -3,12 +3,13 @@
 #
 
 
-obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o
+obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o
 obj-$(CONFIG_OF) += of_regulator.o
 obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
 obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
 obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
 
+obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o
 obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
 obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
 obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
 obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
 obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
 obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
+obj-$(CONFIG_REGULATOR_DA9210) += da9210-regulator.o
 obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
 obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
 obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
@@ -46,12 +48,14 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
 obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
+obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
 obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
 obj-$(CONFIG_REGULATOR_RC5T583)  += rc5t583-regulator.o
 obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
 obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
 obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
 obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
@@ -64,7 +68,6 @@ obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
 obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
 obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
 obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
-obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
 obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
 obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
index 8b5876356db97da46cd5ac0dd59dccb4b2521361..881159dfcb5e70fc3532d7a9ccc20cad99354459 100644 (file)
@@ -174,7 +174,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
 
        config.dev = &pdev->dev;
        config.driver_data = ri;
-       config.init_data = pdev->dev.platform_data;
+       config.init_data = dev_get_platdata(&pdev->dev);
 
        rdev = regulator_register(&ri->desc, &config);
        if (IS_ERR(rdev)) {
index 3be9e46594a134286b59010c3fa83511fa9c510e..7d5eaa874b2da9337e387b1271bd2e38a3d25d2e 100644 (file)
@@ -660,7 +660,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
 
 static int ab3100_regulators_probe(struct platform_device *pdev)
 {
-       struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
+       struct ab3100_platform_data *plfdata = dev_get_platdata(&pdev->dev);
        struct device_node *np = pdev->dev.of_node;
        int err = 0;
        u8 data;
index 6b981b5faa7015c53f10a463d7a53097edeab075..b2b203cb6b2f9373a0d09f5773f3d912922614dd 100644 (file)
@@ -214,7 +214,7 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id);
 static int ad5398_probe(struct i2c_client *client,
                                const struct i2c_device_id *id)
 {
-       struct regulator_init_data *init_data = client->dev.platform_data;
+       struct regulator_init_data *init_data = dev_get_platdata(&client->dev);
        struct regulator_config config = { };
        struct ad5398_chip_info *chip;
        const struct ad5398_current_data_format *df =
index 3da6bd6950cf9cafe43b1bd296d13f5f61c7b577..8406cd745da29e6a60a76624d1199516d7d46121 100644 (file)
@@ -30,102 +30,6 @@ struct as3711_regulator {
        struct regulator_dev *rdev;
 };
 
-static int as3711_list_voltage_sd(struct regulator_dev *rdev,
-                                 unsigned int selector)
-{
-       if (selector >= rdev->desc->n_voltages)
-               return -EINVAL;
-
-       if (!selector)
-               return 0;
-       if (selector < 0x41)
-               return 600000 + selector * 12500;
-       if (selector < 0x71)
-               return 1400000 + (selector - 0x40) * 25000;
-       return 2600000 + (selector - 0x70) * 50000;
-}
-
-static int as3711_list_voltage_aldo(struct regulator_dev *rdev,
-                                   unsigned int selector)
-{
-       if (selector >= rdev->desc->n_voltages)
-               return -EINVAL;
-
-       if (selector < 0x10)
-               return 1200000 + selector * 50000;
-       return 1800000 + (selector - 0x10) * 100000;
-}
-
-static int as3711_list_voltage_dldo(struct regulator_dev *rdev,
-                                   unsigned int selector)
-{
-       if (selector >= rdev->desc->n_voltages ||
-           (selector > 0x10 && selector < 0x20))
-               return -EINVAL;
-
-       if (selector < 0x11)
-               return 900000 + selector * 50000;
-       return 1750000 + (selector - 0x20) * 50000;
-}
-
-static int as3711_bound_check(struct regulator_dev *rdev,
-                             int *min_uV, int *max_uV)
-{
-       struct as3711_regulator *reg = rdev_get_drvdata(rdev);
-       struct as3711_regulator_info *info = reg->reg_info;
-
-       dev_dbg(&rdev->dev, "%s(), %d, %d, %d\n", __func__,
-               *min_uV, rdev->desc->min_uV, info->max_uV);
-
-       if (*max_uV < *min_uV ||
-           *min_uV > info->max_uV || rdev->desc->min_uV > *max_uV)
-               return -EINVAL;
-
-       if (rdev->desc->n_voltages == 1)
-               return 0;
-
-       if (*max_uV > info->max_uV)
-               *max_uV = info->max_uV;
-
-       if (*min_uV < rdev->desc->min_uV)
-               *min_uV = rdev->desc->min_uV;
-
-       return *min_uV;
-}
-
-static int as3711_sel_check(int min, int max, int bottom, int step)
-{
-       int sel, voltage;
-
-       /* Round up min, when dividing: keeps us within the range */
-       sel = DIV_ROUND_UP(min - bottom, step);
-       voltage = sel * step + bottom;
-       pr_debug("%s(): select %d..%d in %d+N*%d: %d\n", __func__,
-              min, max, bottom, step, sel);
-       if (voltage > max)
-               return -EINVAL;
-
-       return sel;
-}
-
-static int as3711_map_voltage_sd(struct regulator_dev *rdev,
-                                int min_uV, int max_uV)
-{
-       int ret;
-
-       ret = as3711_bound_check(rdev, &min_uV, &max_uV);
-       if (ret <= 0)
-               return ret;
-
-       if (min_uV <= 1400000)
-               return as3711_sel_check(min_uV, max_uV, 600000, 12500);
-
-       if (min_uV <= 2600000)
-               return as3711_sel_check(min_uV, max_uV, 1400000, 25000) + 0x40;
-
-       return as3711_sel_check(min_uV, max_uV, 2600000, 50000) + 0x70;
-}
-
 /*
  * The regulator API supports 4 modes of operataion: FAST, NORMAL, IDLE and
  * STANDBY. We map them in the following way to AS3711 SD1-4 DCDC modes:
@@ -180,44 +84,14 @@ static unsigned int as3711_get_mode_sd(struct regulator_dev *rdev)
        return -EINVAL;
 }
 
-static int as3711_map_voltage_aldo(struct regulator_dev *rdev,
-                                 int min_uV, int max_uV)
-{
-       int ret;
-
-       ret = as3711_bound_check(rdev, &min_uV, &max_uV);
-       if (ret <= 0)
-               return ret;
-
-       if (min_uV <= 1800000)
-               return as3711_sel_check(min_uV, max_uV, 1200000, 50000);
-
-       return as3711_sel_check(min_uV, max_uV, 1800000, 100000) + 0x10;
-}
-
-static int as3711_map_voltage_dldo(struct regulator_dev *rdev,
-                                 int min_uV, int max_uV)
-{
-       int ret;
-
-       ret = as3711_bound_check(rdev, &min_uV, &max_uV);
-       if (ret <= 0)
-               return ret;
-
-       if (min_uV <= 1700000)
-               return as3711_sel_check(min_uV, max_uV, 900000, 50000);
-
-       return as3711_sel_check(min_uV, max_uV, 1750000, 50000) + 0x20;
-}
-
 static struct regulator_ops as3711_sd_ops = {
        .is_enabled             = regulator_is_enabled_regmap,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
-       .list_voltage           = as3711_list_voltage_sd,
-       .map_voltage            = as3711_map_voltage_sd,
+       .list_voltage           = regulator_list_voltage_linear_range,
+       .map_voltage            = regulator_map_voltage_linear_range,
        .get_mode               = as3711_get_mode_sd,
        .set_mode               = as3711_set_mode_sd,
 };
@@ -228,8 +102,8 @@ static struct regulator_ops as3711_aldo_ops = {
        .disable                = regulator_disable_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
-       .list_voltage           = as3711_list_voltage_aldo,
-       .map_voltage            = as3711_map_voltage_aldo,
+       .list_voltage           = regulator_list_voltage_linear_range,
+       .map_voltage            = regulator_map_voltage_linear_range,
 };
 
 static struct regulator_ops as3711_dldo_ops = {
@@ -238,8 +112,31 @@ static struct regulator_ops as3711_dldo_ops = {
        .disable                = regulator_disable_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
-       .list_voltage           = as3711_list_voltage_dldo,
-       .map_voltage            = as3711_map_voltage_dldo,
+       .list_voltage           = regulator_list_voltage_linear_range,
+       .map_voltage            = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_linear_range as3711_sd_ranges[] = {
+       { .min_uV = 612500, .max_uV = 1400000,
+         .min_sel = 0x1, .max_sel = 0x40, .uV_step = 12500 },
+       { .min_uV = 1425000, .max_uV = 2600000,
+         .min_sel = 0x41, .max_sel = 0x70, .uV_step = 25000 },
+       { .min_uV = 2650000, .max_uV = 3350000,
+         .min_sel = 0x71, .max_sel = 0x7f, .uV_step = 50000 },
+};
+
+static const struct regulator_linear_range as3711_aldo_ranges[] = {
+       { .min_uV = 1200000, .max_uV = 1950000,
+         .min_sel = 0, .max_sel = 0xf, .uV_step = 50000 },
+       { .min_uV = 1800000, .max_uV = 3300000,
+         .min_sel = 0x10, .max_sel = 0x1f, .uV_step = 100000 },
+};
+
+static const struct regulator_linear_range as3711_dldo_ranges[] = {
+       { .min_uV = 900000, .max_uV = 1700000,
+         .min_sel = 0, .max_sel = 0x10, .uV_step = 50000 },
+       { .min_uV = 1750000, .max_uV = 3300000,
+         .min_sel = 0x20, .max_sel = 0x3f, .uV_step = 50000 },
 };
 
 #define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _vshift, _min_uV, _max_uV, _sfx)     \
@@ -256,6 +153,8 @@ static struct regulator_ops as3711_dldo_ops = {
                .enable_reg = AS3711_ ## _en_reg,                                       \
                .enable_mask = BIT(_en_bit),                                            \
                .min_uV = _min_uV,                                                      \
+               .linear_ranges = as3711_ ## _sfx ## _ranges,                            \
+               .n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges),              \
        },                                                                              \
        .max_uV = _max_uV,                                                              \
 }
index 288c75abc19034c06e62772ea6b6cc62242da473..b471706f875aae88c1e09b6a57741699d451ef92 100644 (file)
@@ -323,13 +323,14 @@ static ssize_t regulator_uA_show(struct device *dev,
 }
 static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL);
 
-static ssize_t regulator_name_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct regulator_dev *rdev = dev_get_drvdata(dev);
 
        return sprintf(buf, "%s\n", rdev_get_name(rdev));
 }
+static DEVICE_ATTR_RO(name);
 
 static ssize_t regulator_print_opmode(char *buf, int mode)
 {
@@ -489,15 +490,16 @@ static ssize_t regulator_total_uA_show(struct device *dev,
 }
 static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL);
 
-static ssize_t regulator_num_users_show(struct device *dev,
-                                     struct device_attribute *attr, char *buf)
+static ssize_t num_users_show(struct device *dev, struct device_attribute *attr,
+                             char *buf)
 {
        struct regulator_dev *rdev = dev_get_drvdata(dev);
        return sprintf(buf, "%d\n", rdev->use_count);
 }
+static DEVICE_ATTR_RO(num_users);
 
-static ssize_t regulator_type_show(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct regulator_dev *rdev = dev_get_drvdata(dev);
 
@@ -509,6 +511,7 @@ static ssize_t regulator_type_show(struct device *dev,
        }
        return sprintf(buf, "unknown\n");
 }
+static DEVICE_ATTR_RO(type);
 
 static ssize_t regulator_suspend_mem_uV_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
@@ -632,12 +635,13 @@ static DEVICE_ATTR(bypass, 0444,
  * These are the only attributes are present for all regulators.
  * Other attributes are a function of regulator functionality.
  */
-static struct device_attribute regulator_dev_attrs[] = {
-       __ATTR(name, 0444, regulator_name_show, NULL),
-       __ATTR(num_users, 0444, regulator_num_users_show, NULL),
-       __ATTR(type, 0444, regulator_type_show, NULL),
-       __ATTR_NULL,
+static struct attribute *regulator_dev_attrs[] = {
+       &dev_attr_name.attr,
+       &dev_attr_num_users.attr,
+       &dev_attr_type.attr,
+       NULL,
 };
+ATTRIBUTE_GROUPS(regulator_dev);
 
 static void regulator_dev_release(struct device *dev)
 {
@@ -648,7 +652,7 @@ static void regulator_dev_release(struct device *dev)
 static struct class regulator_class = {
        .name = "regulator",
        .dev_release = regulator_dev_release,
-       .dev_attrs = regulator_dev_attrs,
+       .dev_groups = regulator_dev_groups,
 };
 
 /* Calculate the new optimum regulator operating mode based on the new total
@@ -984,7 +988,8 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                }
        }
 
-       if (rdev->constraints->ramp_delay && ops->set_ramp_delay) {
+       if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
+               && ops->set_ramp_delay) {
                ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set ramp_delay\n");
@@ -1238,7 +1243,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
 
 /* Internal regulator request function */
 static struct regulator *_regulator_get(struct device *dev, const char *id,
-                                       int exclusive)
+                                       bool exclusive)
 {
        struct regulator_dev *rdev;
        struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
@@ -1344,7 +1349,7 @@ out:
  */
 struct regulator *regulator_get(struct device *dev, const char *id)
 {
-       return _regulator_get(dev, id, 0);
+       return _regulator_get(dev, id, false);
 }
 EXPORT_SYMBOL_GPL(regulator_get);
 
@@ -1405,10 +1410,69 @@ EXPORT_SYMBOL_GPL(devm_regulator_get);
  */
 struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
 {
-       return _regulator_get(dev, id, 1);
+       return _regulator_get(dev, id, true);
 }
 EXPORT_SYMBOL_GPL(regulator_get_exclusive);
 
+/**
+ * regulator_get_optional - obtain optional access to a regulator.
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Returns a struct regulator corresponding to the regulator producer,
+ * or IS_ERR() condition containing errno.  Other consumers will be
+ * unable to obtain this reference is held and the use count for the
+ * regulator will be initialised to reflect the current state of the
+ * regulator.
+ *
+ * This is intended for use by consumers for devices which can have
+ * some supplies unconnected in normal use, such as some MMC devices.
+ * It can allow the regulator core to provide stub supplies for other
+ * supplies requested using normal regulator_get() calls without
+ * disrupting the operation of drivers that can handle absent
+ * supplies.
+ *
+ * Use of supply names configured via regulator_set_device_supply() is
+ * strongly encouraged.  It is recommended that the supply name used
+ * should match the name used for the supply and/or the relevant
+ * device pins in the datasheet.
+ */
+struct regulator *regulator_get_optional(struct device *dev, const char *id)
+{
+       return _regulator_get(dev, id, 0);
+}
+EXPORT_SYMBOL_GPL(regulator_get_optional);
+
+/**
+ * devm_regulator_get_optional - Resource managed regulator_get_optional()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get_optional(). Regulators returned from this
+ * function are automatically regulator_put() on driver detach. See
+ * regulator_get_optional() for more information.
+ */
+struct regulator *devm_regulator_get_optional(struct device *dev,
+                                             const char *id)
+{
+       struct regulator **ptr, *regulator;
+
+       ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+
+       regulator = regulator_get_optional(dev, id);
+       if (!IS_ERR(regulator)) {
+               *ptr = regulator;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return regulator;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
+
 /* Locks held by regulator_put() */
 static void _regulator_put(struct regulator *regulator)
 {
@@ -1890,8 +1954,9 @@ int regulator_disable_deferred(struct regulator *regulator, int ms)
        rdev->deferred_disables++;
        mutex_unlock(&rdev->mutex);
 
-       ret = schedule_delayed_work(&rdev->disable_work,
-                                   msecs_to_jiffies(ms));
+       ret = queue_delayed_work(system_power_efficient_wq,
+                                &rdev->disable_work,
+                                msecs_to_jiffies(ms));
        if (ret < 0)
                return ret;
        else
@@ -1899,77 +1964,6 @@ int regulator_disable_deferred(struct regulator *regulator, int ms)
 }
 EXPORT_SYMBOL_GPL(regulator_disable_deferred);
 
-/**
- * regulator_is_enabled_regmap - standard is_enabled() for regmap users
- *
- * @rdev: regulator to operate on
- *
- * Regulators that use regmap for their register I/O can set the
- * enable_reg and enable_mask fields in their descriptor and then use
- * this as their is_enabled operation, saving some code.
- */
-int regulator_is_enabled_regmap(struct regulator_dev *rdev)
-{
-       unsigned int val;
-       int ret;
-
-       ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
-       if (ret != 0)
-               return ret;
-
-       if (rdev->desc->enable_is_inverted)
-               return (val & rdev->desc->enable_mask) == 0;
-       else
-               return (val & rdev->desc->enable_mask) != 0;
-}
-EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
-
-/**
- * regulator_enable_regmap - standard enable() for regmap users
- *
- * @rdev: regulator to operate on
- *
- * Regulators that use regmap for their register I/O can set the
- * enable_reg and enable_mask fields in their descriptor and then use
- * this as their enable() operation, saving some code.
- */
-int regulator_enable_regmap(struct regulator_dev *rdev)
-{
-       unsigned int val;
-
-       if (rdev->desc->enable_is_inverted)
-               val = 0;
-       else
-               val = rdev->desc->enable_mask;
-
-       return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
-                                 rdev->desc->enable_mask, val);
-}
-EXPORT_SYMBOL_GPL(regulator_enable_regmap);
-
-/**
- * regulator_disable_regmap - standard disable() for regmap users
- *
- * @rdev: regulator to operate on
- *
- * Regulators that use regmap for their register I/O can set the
- * enable_reg and enable_mask fields in their descriptor and then use
- * this as their disable() operation, saving some code.
- */
-int regulator_disable_regmap(struct regulator_dev *rdev)
-{
-       unsigned int val;
-
-       if (rdev->desc->enable_is_inverted)
-               val = rdev->desc->enable_mask;
-       else
-               val = 0;
-
-       return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
-                                 rdev->desc->enable_mask, val);
-}
-EXPORT_SYMBOL_GPL(regulator_disable_regmap);
-
 static int _regulator_is_enabled(struct regulator_dev *rdev)
 {
        /* A GPIO control always takes precedence */
@@ -2054,55 +2048,6 @@ int regulator_count_voltages(struct regulator *regulator)
 }
 EXPORT_SYMBOL_GPL(regulator_count_voltages);
 
-/**
- * regulator_list_voltage_linear - List voltages with simple calculation
- *
- * @rdev: Regulator device
- * @selector: Selector to convert into a voltage
- *
- * Regulators with a simple linear mapping between voltages and
- * selectors can set min_uV and uV_step in the regulator descriptor
- * and then use this function as their list_voltage() operation,
- */
-int regulator_list_voltage_linear(struct regulator_dev *rdev,
-                                 unsigned int selector)
-{
-       if (selector >= rdev->desc->n_voltages)
-               return -EINVAL;
-       if (selector < rdev->desc->linear_min_sel)
-               return 0;
-
-       selector -= rdev->desc->linear_min_sel;
-
-       return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
-}
-EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
-
-/**
- * regulator_list_voltage_table - List voltages with table based mapping
- *
- * @rdev: Regulator device
- * @selector: Selector to convert into a voltage
- *
- * Regulators with table based mapping between voltages and
- * selectors can set volt_table in the regulator descriptor
- * and then use this function as their list_voltage() operation.
- */
-int regulator_list_voltage_table(struct regulator_dev *rdev,
-                                unsigned int selector)
-{
-       if (!rdev->desc->volt_table) {
-               BUG_ON(!rdev->desc->volt_table);
-               return -EINVAL;
-       }
-
-       if (selector >= rdev->desc->n_voltages)
-               return -EINVAL;
-
-       return rdev->desc->volt_table[selector];
-}
-EXPORT_SYMBOL_GPL(regulator_list_voltage_table);
-
 /**
  * regulator_list_voltage - enumerate supported voltages
  * @regulator: regulator source
@@ -2197,177 +2142,6 @@ int regulator_is_supported_voltage(struct regulator *regulator,
 }
 EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
 
-/**
- * regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users
- *
- * @rdev: regulator to operate on
- *
- * Regulators that use regmap for their register I/O can set the
- * vsel_reg and vsel_mask fields in their descriptor and then use this
- * as their get_voltage_vsel operation, saving some code.
- */
-int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev)
-{
-       unsigned int val;
-       int ret;
-
-       ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
-       if (ret != 0)
-               return ret;
-
-       val &= rdev->desc->vsel_mask;
-       val >>= ffs(rdev->desc->vsel_mask) - 1;
-
-       return val;
-}
-EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
-
-/**
- * regulator_set_voltage_sel_regmap - standard set_voltage_sel for regmap users
- *
- * @rdev: regulator to operate on
- * @sel: Selector to set
- *
- * Regulators that use regmap for their register I/O can set the
- * vsel_reg and vsel_mask fields in their descriptor and then use this
- * as their set_voltage_vsel operation, saving some code.
- */
-int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
-{
-       int ret;
-
-       sel <<= ffs(rdev->desc->vsel_mask) - 1;
-
-       ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
-                                 rdev->desc->vsel_mask, sel);
-       if (ret)
-               return ret;
-
-       if (rdev->desc->apply_bit)
-               ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
-                                        rdev->desc->apply_bit,
-                                        rdev->desc->apply_bit);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap);
-
-/**
- * regulator_map_voltage_iterate - map_voltage() based on list_voltage()
- *
- * @rdev: Regulator to operate on
- * @min_uV: Lower bound for voltage
- * @max_uV: Upper bound for voltage
- *
- * Drivers implementing set_voltage_sel() and list_voltage() can use
- * this as their map_voltage() operation.  It will find a suitable
- * voltage by calling list_voltage() until it gets something in bounds
- * for the requested voltages.
- */
-int regulator_map_voltage_iterate(struct regulator_dev *rdev,
-                                 int min_uV, int max_uV)
-{
-       int best_val = INT_MAX;
-       int selector = 0;
-       int i, ret;
-
-       /* Find the smallest voltage that falls within the specified
-        * range.
-        */
-       for (i = 0; i < rdev->desc->n_voltages; i++) {
-               ret = rdev->desc->ops->list_voltage(rdev, i);
-               if (ret < 0)
-                       continue;
-
-               if (ret < best_val && ret >= min_uV && ret <= max_uV) {
-                       best_val = ret;
-                       selector = i;
-               }
-       }
-
-       if (best_val != INT_MAX)
-               return selector;
-       else
-               return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate);
-
-/**
- * regulator_map_voltage_ascend - map_voltage() for ascendant voltage list
- *
- * @rdev: Regulator to operate on
- * @min_uV: Lower bound for voltage
- * @max_uV: Upper bound for voltage
- *
- * Drivers that have ascendant voltage list can use this as their
- * map_voltage() operation.
- */
-int regulator_map_voltage_ascend(struct regulator_dev *rdev,
-                                int min_uV, int max_uV)
-{
-       int i, ret;
-
-       for (i = 0; i < rdev->desc->n_voltages; i++) {
-               ret = rdev->desc->ops->list_voltage(rdev, i);
-               if (ret < 0)
-                       continue;
-
-               if (ret > max_uV)
-                       break;
-
-               if (ret >= min_uV && ret <= max_uV)
-                       return i;
-       }
-
-       return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(regulator_map_voltage_ascend);
-
-/**
- * regulator_map_voltage_linear - map_voltage() for simple linear mappings
- *
- * @rdev: Regulator to operate on
- * @min_uV: Lower bound for voltage
- * @max_uV: Upper bound for voltage
- *
- * Drivers providing min_uV and uV_step in their regulator_desc can
- * use this as their map_voltage() operation.
- */
-int regulator_map_voltage_linear(struct regulator_dev *rdev,
-                                int min_uV, int max_uV)
-{
-       int ret, voltage;
-
-       /* Allow uV_step to be 0 for fixed voltage */
-       if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) {
-               if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV)
-                       return 0;
-               else
-                       return -EINVAL;
-       }
-
-       if (!rdev->desc->uV_step) {
-               BUG_ON(!rdev->desc->uV_step);
-               return -EINVAL;
-       }
-
-       if (min_uV < rdev->desc->min_uV)
-               min_uV = rdev->desc->min_uV;
-
-       ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
-       if (ret < 0)
-               return ret;
-
-       ret += rdev->desc->linear_min_sel;
-
-       /* Map back into a voltage to verify we're still in bounds */
-       voltage = rdev->desc->ops->list_voltage(rdev, ret);
-       if (voltage < min_uV || voltage > max_uV)
-               return -EINVAL;
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(regulator_map_voltage_linear);
-
 static int _regulator_do_set_voltage(struct regulator_dev *rdev,
                                     int min_uV, int max_uV)
 {
@@ -2438,8 +2212,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
        }
 
        /* Call set_voltage_time_sel if successfully obtained old_selector */
-       if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 &&
-           old_selector != selector && rdev->desc->ops->set_voltage_time_sel) {
+       if (ret == 0 && !rdev->constraints->ramp_disable && old_selector >= 0
+               && old_selector != selector) {
 
                delay = rdev->desc->ops->set_voltage_time_sel(rdev,
                                                old_selector, selector);
@@ -2970,47 +2744,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
 
-/**
- * regulator_set_bypass_regmap - Default set_bypass() using regmap
- *
- * @rdev: device to operate on.
- * @enable: state to set.
- */
-int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable)
-{
-       unsigned int val;
-
-       if (enable)
-               val = rdev->desc->bypass_mask;
-       else
-               val = 0;
-
-       return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg,
-                                 rdev->desc->bypass_mask, val);
-}
-EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap);
-
-/**
- * regulator_get_bypass_regmap - Default get_bypass() using regmap
- *
- * @rdev: device to operate on.
- * @enable: current state.
- */
-int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
-{
-       unsigned int val;
-       int ret;
-
-       ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val);
-       if (ret != 0)
-               return ret;
-
-       *enable = val & rdev->desc->bypass_mask;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
-
 /**
  * regulator_allow_bypass - allow the regulator to go into bypass mode
  *
@@ -3740,8 +3473,11 @@ void regulator_unregister(struct regulator_dev *rdev)
        if (rdev == NULL)
                return;
 
-       if (rdev->supply)
+       if (rdev->supply) {
+               while (rdev->use_count--)
+                       regulator_disable(rdev->supply);
                regulator_put(rdev->supply);
+       }
        mutex_lock(&regulator_list_mutex);
        debugfs_remove_recursive(rdev->debugfs);
        flush_work(&rdev->disable_work.work);
index 2afa5730f324ba1ef1bb068b5f63c79e41c11a8f..f06854cf8cf50739866386538668f4eb5acbc34a 100644 (file)
@@ -252,39 +252,12 @@ static int da9034_set_dvc_voltage_sel(struct regulator_dev *rdev,
        return ret;
 }
 
-static int da9034_map_ldo12_voltage(struct regulator_dev *rdev,
-                                   int min_uV, int max_uV)
-{
-       struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
-       int sel;
-
-       if (check_range(info, min_uV, max_uV)) {
-               pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
-               return -EINVAL;
-       }
-
-       sel = DIV_ROUND_UP(min_uV - info->desc.min_uV, info->desc.uV_step);
-       sel = (sel >= 20) ? sel - 12 : ((sel > 7) ? 8 : sel);
-
-       return sel;
-}
-
-static int da9034_list_ldo12_voltage(struct regulator_dev *rdev,
-                                    unsigned selector)
-{
-       struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
-       int volt;
-
-       if (selector >= 8)
-               volt = 2700000 + rdev->desc->uV_step * (selector - 8);
-       else
-               volt = rdev->desc->min_uV + rdev->desc->uV_step * selector;
-
-       if (volt > info->max_uV)
-               return -EINVAL;
-
-       return volt;
-}
+static const struct regulator_linear_range da9034_ldo12_ranges[] = {
+       { .min_uV = 1700000, .max_uV = 2050000, .min_sel =  0, .max_sel = 7,
+         .uV_step =  50000 },
+       { .min_uV = 2700000, .max_uV = 3050000, .min_sel =  8, .max_sel = 15,
+         .uV_step =  50000 },
+};
 
 static struct regulator_ops da903x_regulator_ldo_ops = {
        .set_voltage_sel = da903x_set_voltage_sel,
@@ -332,8 +305,8 @@ static struct regulator_ops da9034_regulator_dvc_ops = {
 static struct regulator_ops da9034_regulator_ldo12_ops = {
        .set_voltage_sel = da903x_set_voltage_sel,
        .get_voltage_sel = da903x_get_voltage_sel,
-       .list_voltage   = da9034_list_ldo12_voltage,
-       .map_voltage    = da9034_map_ldo12_voltage,
+       .list_voltage   = regulator_list_voltage_linear_range,
+       .map_voltage    = regulator_map_voltage_linear_range,
        .enable         = da903x_enable,
        .disable        = da903x_disable,
        .is_enabled     = da903x_is_enabled,
@@ -476,6 +449,8 @@ static int da903x_regulator_probe(struct platform_device *pdev)
        if (ri->desc.id == DA9034_ID_LDO12) {
                ri->desc.ops = &da9034_regulator_ldo12_ops;
                ri->desc.n_voltages = 16;
+               ri->desc.linear_ranges = da9034_ldo12_ranges;
+               ri->desc.n_linear_ranges = ARRAY_SIZE(da9034_ldo12_ranges);
        }
 
        if (ri->desc.id == DA9030_ID_LDO14)
@@ -485,7 +460,7 @@ static int da903x_regulator_probe(struct platform_device *pdev)
                ri->desc.ops = &da9030_regulator_ldo1_15_ops;
 
        config.dev = &pdev->dev;
-       config.init_data = pdev->dev.platform_data;
+       config.init_data = dev_get_platdata(&pdev->dev);
        config.driver_data = ri;
 
        rdev = regulator_register(&ri->desc, &config);
index 96b569abb46cfe055a50cdf35a4c362a15ba0a3a..1e4d483f616373a6fb19b339ce6325be7c8a3b33 100644 (file)
@@ -349,7 +349,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        da9052 = dev_get_drvdata(pdev->dev.parent);
-       pdata = da9052->dev->platform_data;
+       pdata = dev_get_platdata(da9052->dev);
        regulator->da9052 = da9052;
 
        regulator->info = find_regulator_info(regulator->da9052->chip_id,
index 30221099d09c4d09bc3f406c94e5fa089dadc5b8..77b53e5a231cabda9eaa915efa718ad9ce5e18f5 100644 (file)
@@ -535,7 +535,7 @@ static int da9055_regulator_probe(struct platform_device *pdev)
        struct regulator_config config = { };
        struct da9055_regulator *regulator;
        struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
-       struct da9055_pdata *pdata = da9055->dev->platform_data;
+       struct da9055_pdata *pdata = dev_get_platdata(da9055->dev);
        int ret, irq;
 
        if (pdata == NULL || pdata->regulators[pdev->id] == NULL)
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
new file mode 100644 (file)
index 0000000..f0fe54b
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * da9210-regulator.c - Regulator device driver for DA9210
+ * Copyright (C) 2013  Dialog Semiconductor Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+#include "da9210-regulator.h"
+
+struct da9210 {
+       struct regulator_dev *rdev;
+       struct regmap *regmap;
+};
+
+static const struct regmap_config da9210_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+};
+
+static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA,
+                                   int max_uA);
+static int da9210_get_current_limit(struct regulator_dev *rdev);
+
+static struct regulator_ops da9210_buck_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .list_voltage = regulator_list_voltage_linear,
+       .set_current_limit = da9210_set_current_limit,
+       .get_current_limit = da9210_get_current_limit,
+};
+
+/* Default limits measured in millivolts and milliamps */
+#define DA9210_MIN_MV          300
+#define DA9210_MAX_MV          1570
+#define DA9210_STEP_MV         10
+
+/* Current limits for buck (uA) indices corresponds with register values */
+static const int da9210_buck_limits[] = {
+       1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000,
+       3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000
+};
+
+static const struct regulator_desc da9210_reg = {
+       .name = "DA9210",
+       .id = 0,
+       .ops = &da9210_buck_ops,
+       .type = REGULATOR_VOLTAGE,
+       .n_voltages = ((DA9210_MAX_MV - DA9210_MIN_MV) / DA9210_STEP_MV) + 1,
+       .min_uV = (DA9210_MIN_MV * 1000),
+       .uV_step = (DA9210_STEP_MV * 1000),
+       .vsel_reg = DA9210_REG_VBUCK_A,
+       .vsel_mask = DA9210_VBUCK_MASK,
+       .enable_reg = DA9210_REG_BUCK_CONT,
+       .enable_mask = DA9210_BUCK_EN,
+       .owner = THIS_MODULE,
+};
+
+static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA,
+                                   int max_uA)
+{
+       struct da9210 *chip = rdev_get_drvdata(rdev);
+       unsigned int sel;
+       int i;
+
+       /* search for closest to maximum */
+       for (i = ARRAY_SIZE(da9210_buck_limits)-1; i >= 0; i--) {
+               if (min_uA <= da9210_buck_limits[i] &&
+                   max_uA >= da9210_buck_limits[i]) {
+                       sel = i;
+                       sel = sel << DA9210_BUCK_ILIM_SHIFT;
+                       return regmap_update_bits(chip->regmap,
+                                                 DA9210_REG_BUCK_ILIM,
+                                                 DA9210_BUCK_ILIM_MASK, sel);
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int da9210_get_current_limit(struct regulator_dev *rdev)
+{
+       struct da9210 *chip = rdev_get_drvdata(rdev);
+       unsigned int data;
+       unsigned int sel;
+       int ret;
+
+       ret = regmap_read(chip->regmap, DA9210_REG_BUCK_ILIM, &data);
+       if (ret < 0)
+               return ret;
+
+       /* select one of 16 values: 0000 (1600mA) to 1111 (4600mA) */
+       sel = (data & DA9210_BUCK_ILIM_MASK) >> DA9210_BUCK_ILIM_SHIFT;
+
+       return da9210_buck_limits[sel];
+}
+
+/*
+ * I2C driver interface functions
+ */
+static int da9210_i2c_probe(struct i2c_client *i2c,
+                           const struct i2c_device_id *id)
+{
+       struct da9210 *chip;
+       struct da9210_pdata *pdata = i2c->dev.platform_data;
+       struct regulator_dev *rdev = NULL;
+       struct regulator_config config = { };
+       int error;
+
+       chip = devm_kzalloc(&i2c->dev, sizeof(struct da9210), GFP_KERNEL);
+       if (NULL == chip) {
+               dev_err(&i2c->dev,
+                       "Cannot kzalloc memory for regulator structure\n");
+               return -ENOMEM;
+       }
+
+       chip->regmap = devm_regmap_init_i2c(i2c, &da9210_regmap_config);
+       if (IS_ERR(chip->regmap)) {
+               error = PTR_ERR(chip->regmap);
+               dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+                       error);
+               return error;
+       }
+
+       config.dev = &i2c->dev;
+       if (pdata)
+               config.init_data = &pdata->da9210_constraints;
+       config.driver_data = chip;
+       config.regmap = chip->regmap;
+
+       rdev = regulator_register(&da9210_reg, &config);
+       if (IS_ERR(rdev)) {
+               dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
+               return PTR_ERR(rdev);
+       }
+
+       chip->rdev = rdev;
+
+       i2c_set_clientdata(i2c, chip);
+
+       return 0;
+}
+
+static int da9210_i2c_remove(struct i2c_client *i2c)
+{
+       struct da9210 *chip = i2c_get_clientdata(i2c);
+       regulator_unregister(chip->rdev);
+       return 0;
+}
+
+static const struct i2c_device_id da9210_i2c_id[] = {
+       {"da9210", 0},
+       {},
+};
+
+MODULE_DEVICE_TABLE(i2c, da9210_i2c_id);
+
+static struct i2c_driver da9210_regulator_driver = {
+       .driver = {
+               .name = "da9210",
+               .owner = THIS_MODULE,
+       },
+       .probe = da9210_i2c_probe,
+       .remove = da9210_i2c_remove,
+       .id_table = da9210_i2c_id,
+};
+
+module_i2c_driver(da9210_regulator_driver);
+
+MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
+MODULE_DESCRIPTION("Regulator device driver for Dialog DA9210");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/da9210-regulator.h b/drivers/regulator/da9210-regulator.h
new file mode 100644 (file)
index 0000000..749c550
--- /dev/null
@@ -0,0 +1,288 @@
+
+/*
+ * da9210-regulator.h - Regulator definitions for DA9210
+ * Copyright (C) 2013  Dialog Semiconductor Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __DA9210_REGISTERS_H__
+#define __DA9210_REGISTERS_H__
+
+struct da9210_pdata {
+       struct regulator_init_data da9210_constraints;
+};
+
+/* Page selection */
+#define        DA9210_REG_PAGE_CON                     0x00
+
+/* System Control and Event Registers */
+#define        DA9210_REG_STATUS_A                     0x50
+#define        DA9210_REG_STATUS_B                     0x51
+#define        DA9210_REG_EVENT_A                      0x52
+#define        DA9210_REG_EVENT_B                      0x53
+#define        DA9210_REG_MASK_A                       0x54
+#define        DA9210_REG_MASK_B                       0x55
+#define        DA9210_REG_CONTROL_A                    0x56
+
+/* GPIO Control Registers */
+#define        DA9210_REG_GPIO_0_1                     0x58
+#define        DA9210_REG_GPIO_2_3                     0x59
+#define        DA9210_REG_GPIO_4_5                     0x5A
+#define        DA9210_REG_GPIO_6                       0x5B
+
+/* Regulator Registers */
+#define        DA9210_REG_BUCK_CONT                    0x5D
+#define        DA9210_REG_BUCK_ILIM                    0xD0
+#define        DA9210_REG_BUCK_CONF1                   0xD1
+#define        DA9210_REG_BUCK_CONF2                   0xD2
+#define DA9210_REG_VBACK_AUTO                  0xD4
+#define DA9210_REG_VBACK_BASE                  0xD5
+#define DA9210_REG_VBACK_MAX_DVC_IF            0xD6
+#define DA9210_REG_VBACK_DVC                   0xD7
+#define        DA9210_REG_VBUCK_A                      0xD8
+#define        DA9210_REG_VBUCK_B                      0xD9
+
+/* I2C Interface Settings */
+#define DA9210_REG_INTERFACE                   0x105
+
+/* OTP */
+#define        DA9210_REG_OPT_COUNT                    0x140
+#define        DA9210_REG_OPT_ADDR                     0x141
+#define        DA9210_REG_OPT_DATA                     0x142
+
+/* Customer Trim and Configuration */
+#define        DA9210_REG_CONFIG_A                     0x143
+#define        DA9210_REG_CONFIG_B                     0x144
+#define        DA9210_REG_CONFIG_C                     0x145
+#define        DA9210_REG_CONFIG_D                     0x146
+#define        DA9210_REG_CONFIG_E                     0x147
+
+
+/*
+ * Registers bits
+ */
+/* DA9210_REG_PAGE_CON (addr=0x00) */
+#define        DA9210_PEG_PAGE_SHIFT                   0
+#define        DA9210_REG_PAGE_MASK                    0x0F
+/* On I2C registers 0x00 - 0xFF */
+#define        DA9210_REG_PAGE0                        0
+/* On I2C registers 0x100 - 0x1FF */
+#define        DA9210_REG_PAGE2                        2
+#define        DA9210_PAGE_WRITE_MODE                  0x00
+#define        DA9210_REPEAT_WRITE_MODE                0x40
+#define        DA9210_PAGE_REVERT                      0x80
+
+/* DA9210_REG_STATUS_A (addr=0x50) */
+#define        DA9210_GPI0                             0x01
+#define        DA9210_GPI1                             0x02
+#define        DA9210_GPI2                             0x04
+#define        DA9210_GPI3                             0x08
+#define        DA9210_GPI4                             0x10
+#define        DA9210_GPI5                             0x20
+#define        DA9210_GPI6                             0x40
+
+/* DA9210_REG_EVENT_A (addr=0x52) */
+#define        DA9210_E_GPI0                           0x01
+#define        DA9210_E_GPI1                           0x02
+#define        DA9210_E_GPI2                           0x04
+#define        DA9210_E_GPI3                           0x08
+#define        DA9210_E_GPI4                           0x10
+#define        DA9210_E_GPI5                           0x20
+#define        DA9210_E_GPI6                           0x40
+
+/* DA9210_REG_EVENT_B (addr=0x53) */
+#define        DA9210_E_OVCURR                         0x01
+#define        DA9210_E_NPWRGOOD                       0x02
+#define        DA9210_E_TEMP_WARN                      0x04
+#define        DA9210_E_TEMP_CRIT                      0x08
+#define        DA9210_E_VMAX                           0x10
+
+/* DA9210_REG_MASK_A (addr=0x54) */
+#define        DA9210_M_GPI0                           0x01
+#define        DA9210_M_GPI1                           0x02
+#define        DA9210_M_GPI2                           0x04
+#define        DA9210_M_GPI3                           0x08
+#define        DA9210_M_GPI4                           0x10
+#define        DA9210_M_GPI5                           0x20
+#define        DA9210_M_GPI6                           0x40
+
+/* DA9210_REG_MASK_B (addr=0x55) */
+#define        DA9210_M_OVCURR                         0x01
+#define        DA9210_M_NPWRGOOD                       0x02
+#define        DA9210_M_TEMP_WARN                      0x04
+#define        DA9210_M_TEMP_CRIT                      0x08
+#define        DA9210_M_VMAX                           0x10
+
+/* DA9210_REG_CONTROL_A (addr=0x56) */
+#define        DA9210_DEBOUNCING_SHIFT                 0
+#define        DA9210_DEBOUNCING_MASK                  0x07
+#define        DA9210_SLEW_RATE_SHIFT                  3
+#define        DA9210_SLEW_RATE_MASK                   0x18
+#define        DA9210_V_LOCK                           0x20
+
+/* DA9210_REG_GPIO_0_1 (addr=0x58) */
+#define        DA9210_GPIO0_PIN_SHIFT                  0
+#define        DA9210_GPIO0_PIN_MASK                   0x03
+#define                DA9210_GPIO0_PIN_GPI            0x00
+#define                DA9210_GPIO0_PIN_GPO_OD         0x02
+#define                DA9210_GPIO0_PIN_GPO            0x03
+#define        DA9210_GPIO0_TYPE                       0x04
+#define                DA9210_GPIO0_TYPE_GPI           0x00
+#define                DA9210_GPIO0_TYPE_GPO           0x04
+#define        DA9210_GPIO0_MODE                       0x08
+#define        DA9210_GPIO1_PIN_SHIFT                  4
+#define        DA9210_GPIO1_PIN_MASK                   0x30
+#define                DA9210_GPIO1_PIN_GPI            0x00
+#define                DA9210_GPIO1_PIN_VERROR         0x10
+#define                DA9210_GPIO1_PIN_GPO_OD         0x20
+#define                DA9210_GPIO1_PIN_GPO            0x30
+#define        DA9210_GPIO1_TYPE_SHIFT                 0x40
+#define                DA9210_GPIO1_TYPE_GPI           0x00
+#define                DA9210_GPIO1_TYPE_GPO           0x40
+#define        DA9210_GPIO1_MODE                       0x80
+
+/* DA9210_REG_GPIO_2_3 (addr=0x59) */
+#define        DA9210_GPIO2_PIN_SHIFT                  0
+#define        DA9210_GPIO2_PIN_MASK                   0x03
+#define                DA9210_GPIO2_PIN_GPI            0x00
+#define                DA9210_GPIO5_PIN_BUCK_CLK       0x10
+#define                DA9210_GPIO2_PIN_GPO_OD         0x02
+#define                DA9210_GPIO2_PIN_GPO            0x03
+#define        DA9210_GPIO2_TYPE                       0x04
+#define                DA9210_GPIO2_TYPE_GPI           0x00
+#define                DA9210_GPIO2_TYPE_GPO           0x04
+#define        DA9210_GPIO2_MODE                       0x08
+#define        DA9210_GPIO3_PIN_SHIFT                  4
+#define        DA9210_GPIO3_PIN_MASK                   0x30
+#define                DA9210_GPIO3_PIN_GPI            0x00
+#define                DA9210_GPIO3_PIN_IERROR         0x10
+#define                DA9210_GPIO3_PIN_GPO_OD         0x20
+#define                DA9210_GPIO3_PIN_GPO            0x30
+#define        DA9210_GPIO3_TYPE_SHIFT                 0x40
+#define                DA9210_GPIO3_TYPE_GPI           0x00
+#define                DA9210_GPIO3_TYPE_GPO           0x40
+#define        DA9210_GPIO3_MODE                       0x80
+
+/* DA9210_REG_GPIO_4_5 (addr=0x5A) */
+#define        DA9210_GPIO4_PIN_SHIFT                  0
+#define        DA9210_GPIO4_PIN_MASK                   0x03
+#define                DA9210_GPIO4_PIN_GPI            0x00
+#define                DA9210_GPIO4_PIN_GPO_OD         0x02
+#define                DA9210_GPIO4_PIN_GPO            0x03
+#define        DA9210_GPIO4_TYPE                       0x04
+#define                DA9210_GPIO4_TYPE_GPI           0x00
+#define                DA9210_GPIO4_TYPE_GPO           0x04
+#define        DA9210_GPIO4_MODE                       0x08
+#define        DA9210_GPIO5_PIN_SHIFT                  4
+#define        DA9210_GPIO5_PIN_MASK                   0x30
+#define                DA9210_GPIO5_PIN_GPI            0x00
+#define                DA9210_GPIO5_PIN_INTERFACE      0x01
+#define                DA9210_GPIO5_PIN_GPO_OD         0x20
+#define                DA9210_GPIO5_PIN_GPO            0x30
+#define        DA9210_GPIO5_TYPE_SHIFT                 0x40
+#define                DA9210_GPIO5_TYPE_GPI           0x00
+#define                DA9210_GPIO5_TYPE_GPO           0x40
+#define        DA9210_GPIO5_MODE                       0x80
+
+/* DA9210_REG_GPIO_6 (addr=0x5B) */
+#define        DA9210_GPIO6_PIN_SHIFT                  0
+#define        DA9210_GPIO6_PIN_MASK                   0x03
+#define                DA9210_GPIO6_PIN_GPI            0x00
+#define                DA9210_GPIO6_PIN_INTERFACE      0x01
+#define                DA9210_GPIO6_PIN_GPO_OD         0x02
+#define                DA9210_GPIO6_PIN_GPO            0x03
+#define        DA9210_GPIO6_TYPE                       0x04
+#define                DA9210_GPIO6_TYPE_GPI           0x00
+#define                DA9210_GPIO6_TYPE_GPO           0x04
+#define        DA9210_GPIO6_MODE                       0x08
+
+/* DA9210_REG_BUCK_CONT (addr=0x5D) */
+#define        DA9210_BUCK_EN                          0x01
+#define        DA9210_BUCK_GPI_SHIFT                   1
+#define DA9210_BUCK_GPI_MASK                   0x06
+#define                DA9210_BUCK_GPI_OFF             0x00
+#define                DA9210_BUCK_GPI_GPIO0           0x02
+#define                DA9210_BUCK_GPI_GPIO3           0x04
+#define                DA9210_BUCK_GPI_GPIO4           0x06
+#define        DA9210_BUCK_PD_DIS                      0x08
+#define        DA9210_VBUCK_SEL                        0x10
+#define                DA9210_VBUCK_SEL_A              0x00
+#define                DA9210_VBUCK_SEL_B              0x10
+#define        DA9210_VBUCK_GPI_SHIFT                  5
+#define        DA9210_VBUCK_GPI_MASK                   0x60
+#define                DA9210_VBUCK_GPI_OFF            0x00
+#define                DA9210_VBUCK_GPI_GPIO0          0x20
+#define                DA9210_VBUCK_GPI_GPIO3          0x40
+#define                DA9210_VBUCK_GPI_GPIO4          0x60
+#define        DA9210_DVC_CTRL_EN                      0x80
+
+/* DA9210_REG_BUCK_ILIM (addr=0xD0) */
+#define DA9210_BUCK_ILIM_SHIFT                 0
+#define DA9210_BUCK_ILIM_MASK                  0x0F
+#define DA9210_BUCK_IALARM                     0x10
+
+/* DA9210_REG_BUCK_CONF1 (addr=0xD1) */
+#define DA9210_BUCK_MODE_SHIFT                 0
+#define DA9210_BUCK_MODE_MASK                  0x03
+#define                DA9210_BUCK_MODE_MANUAL         0x00
+#define                DA9210_BUCK_MODE_SLEEP          0x01
+#define                DA9210_BUCK_MODE_SYNC           0x02
+#define                DA9210_BUCK_MODE_AUTO           0x03
+#define DA9210_STARTUP_CTRL_SHIFT              2
+#define DA9210_STARTUP_CTRL_MASK               0x1C
+#define DA9210_PWR_DOWN_CTRL_SHIFT             5
+#define DA9210_PWR_DOWN_CTRL_MASK              0xE0
+
+/* DA9210_REG_BUCK_CONF2 (addr=0xD2) */
+#define DA9210_PHASE_SEL_SHIFT                 0
+#define DA9210_PHASE_SEL_MASK                  0x03
+#define DA9210_FREQ_SEL                                0x40
+
+/* DA9210_REG_BUCK_AUTO (addr=0xD4) */
+#define DA9210_VBUCK_AUTO_SHIFT                        0
+#define DA9210_VBUCK_AUTO_MASK                 0x7F
+
+/* DA9210_REG_BUCK_BASE (addr=0xD5) */
+#define DA9210_VBUCK_BASE_SHIFT                        0
+#define DA9210_VBUCK_BASE_MASK                 0x7F
+
+/* DA9210_REG_VBUCK_MAX_DVC_IF (addr=0xD6) */
+#define DA9210_VBUCK_MAX_SHIFT                 0
+#define DA9210_VBUCK_MAX_MASK                  0x7F
+#define DA9210_DVC_STEP_SIZE                   0x80
+#define                DA9210_DVC_STEP_SIZE_10MV       0x00
+#define                DA9210_DVC_STEP_SIZE_20MV       0x80
+
+/* DA9210_REG_VBUCK_DVC (addr=0xD7) */
+#define DA9210_VBUCK_DVC_SHIFT                 0
+#define DA9210_VBUCK_DVC_MASK                  0x7F
+
+/* DA9210_REG_VBUCK_A/B (addr=0xD8/0xD9) */
+#define DA9210_VBUCK_SHIFT                     0
+#define DA9210_VBUCK_MASK                      0x7F
+#define DA9210_VBUCK_BIAS                      0
+#define DA9210_BUCK_SL                         0x80
+
+/* DA9210_REG_INTERFACE (addr=0x105) */
+#define DA9210_IF_BASE_ADDR_SHIFT              4
+#define DA9210_IF_BASE_ADDR_MASK               0xF0
+
+/* DA9210_REG_CONFIG_E (addr=0x147) */
+#define DA9210_STAND_ALONE                     0x01
+
+#endif /* __DA9210_REGISTERS_H__ */
+
index f0e1ae52bb05dd5369769d1b8d09f9c206008495..a32b44272a05dcc114ca8b160f1bf95a42589881 100644 (file)
@@ -237,7 +237,7 @@ static int fan53555_regulator_probe(struct i2c_client *client,
        unsigned int val;
        int ret;
 
-       pdata = client->dev.platform_data;
+       pdata = dev_get_platdata(&client->dev);
        if (!pdata || !pdata->regulator) {
                dev_err(&client->dev, "Platform data not found!\n");
                return -ENODEV;
index e5c03b534faefce8d2e8f30a5ebecd48b1a995dc..7610920014d789925368fd4316d35c1104fbf9ac 100644 (file)
@@ -146,7 +146,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
                if (IS_ERR(config))
                        return PTR_ERR(config);
        } else {
-               config = pdev->dev.platform_data;
+               config = dev_get_platdata(&pdev->dev);
        }
 
        if (!config)
index 9d39eb4aafa3634762678dcf65d460d66b9de8a7..98a98ffa7fe07d2bcb23f2c9cad593b416aaf203 100644 (file)
@@ -219,7 +219,7 @@ static struct regulator_ops gpio_regulator_current_ops = {
 
 static int gpio_regulator_probe(struct platform_device *pdev)
 {
-       struct gpio_regulator_config *config = pdev->dev.platform_data;
+       struct gpio_regulator_config *config = dev_get_platdata(&pdev->dev);
        struct device_node *np = pdev->dev.of_node;
        struct gpio_regulator_data *drvdata;
        struct regulator_config cfg = { };
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
new file mode 100644 (file)
index 0000000..6e30df1
--- /dev/null
@@ -0,0 +1,447 @@
+/*
+ * helpers.c  --  Voltage/Current Regulator framework helper functions.
+ *
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC.
+ * Copyright 2008 SlimLogic Ltd.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/module.h>
+
+/**
+ * regulator_is_enabled_regmap - standard is_enabled() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their is_enabled operation, saving some code.
+ */
+int regulator_is_enabled_regmap(struct regulator_dev *rdev)
+{
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
+       if (ret != 0)
+               return ret;
+
+       if (rdev->desc->enable_is_inverted)
+               return (val & rdev->desc->enable_mask) == 0;
+       else
+               return (val & rdev->desc->enable_mask) != 0;
+}
+EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
+
+/**
+ * regulator_enable_regmap - standard enable() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their enable() operation, saving some code.
+ */
+int regulator_enable_regmap(struct regulator_dev *rdev)
+{
+       unsigned int val;
+
+       if (rdev->desc->enable_is_inverted)
+               val = 0;
+       else
+               val = rdev->desc->enable_mask;
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+                                 rdev->desc->enable_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_enable_regmap);
+
+/**
+ * regulator_disable_regmap - standard disable() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their disable() operation, saving some code.
+ */
+int regulator_disable_regmap(struct regulator_dev *rdev)
+{
+       unsigned int val;
+
+       if (rdev->desc->enable_is_inverted)
+               val = rdev->desc->enable_mask;
+       else
+               val = 0;
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+                                 rdev->desc->enable_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_disable_regmap);
+
+/**
+ * regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their get_voltage_vsel operation, saving some code.
+ */
+int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev)
+{
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+       if (ret != 0)
+               return ret;
+
+       val &= rdev->desc->vsel_mask;
+       val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+       return val;
+}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
+
+/**
+ * regulator_set_voltage_sel_regmap - standard set_voltage_sel for regmap users
+ *
+ * @rdev: regulator to operate on
+ * @sel: Selector to set
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their set_voltage_vsel operation, saving some code.
+ */
+int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
+{
+       int ret;
+
+       sel <<= ffs(rdev->desc->vsel_mask) - 1;
+
+       ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+                                 rdev->desc->vsel_mask, sel);
+       if (ret)
+               return ret;
+
+       if (rdev->desc->apply_bit)
+               ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
+                                        rdev->desc->apply_bit,
+                                        rdev->desc->apply_bit);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap);
+
+/**
+ * regulator_map_voltage_iterate - map_voltage() based on list_voltage()
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers implementing set_voltage_sel() and list_voltage() can use
+ * this as their map_voltage() operation.  It will find a suitable
+ * voltage by calling list_voltage() until it gets something in bounds
+ * for the requested voltages.
+ */
+int regulator_map_voltage_iterate(struct regulator_dev *rdev,
+                                 int min_uV, int max_uV)
+{
+       int best_val = INT_MAX;
+       int selector = 0;
+       int i, ret;
+
+       /* Find the smallest voltage that falls within the specified
+        * range.
+        */
+       for (i = 0; i < rdev->desc->n_voltages; i++) {
+               ret = rdev->desc->ops->list_voltage(rdev, i);
+               if (ret < 0)
+                       continue;
+
+               if (ret < best_val && ret >= min_uV && ret <= max_uV) {
+                       best_val = ret;
+                       selector = i;
+               }
+       }
+
+       if (best_val != INT_MAX)
+               return selector;
+       else
+               return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate);
+
+/**
+ * regulator_map_voltage_ascend - map_voltage() for ascendant voltage list
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers that have ascendant voltage list can use this as their
+ * map_voltage() operation.
+ */
+int regulator_map_voltage_ascend(struct regulator_dev *rdev,
+                                int min_uV, int max_uV)
+{
+       int i, ret;
+
+       for (i = 0; i < rdev->desc->n_voltages; i++) {
+               ret = rdev->desc->ops->list_voltage(rdev, i);
+               if (ret < 0)
+                       continue;
+
+               if (ret > max_uV)
+                       break;
+
+               if (ret >= min_uV && ret <= max_uV)
+                       return i;
+       }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_ascend);
+
+/**
+ * regulator_map_voltage_linear - map_voltage() for simple linear mappings
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers providing min_uV and uV_step in their regulator_desc can
+ * use this as their map_voltage() operation.
+ */
+int regulator_map_voltage_linear(struct regulator_dev *rdev,
+                                int min_uV, int max_uV)
+{
+       int ret, voltage;
+
+       /* Allow uV_step to be 0 for fixed voltage */
+       if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) {
+               if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
+
+       if (!rdev->desc->uV_step) {
+               BUG_ON(!rdev->desc->uV_step);
+               return -EINVAL;
+       }
+
+       if (min_uV < rdev->desc->min_uV)
+               min_uV = rdev->desc->min_uV;
+
+       ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
+       if (ret < 0)
+               return ret;
+
+       ret += rdev->desc->linear_min_sel;
+
+       /* Map back into a voltage to verify we're still in bounds */
+       voltage = rdev->desc->ops->list_voltage(rdev, ret);
+       if (voltage < min_uV || voltage > max_uV)
+               return -EINVAL;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_linear);
+
+/**
+ * regulator_map_voltage_linear - map_voltage() for multiple linear ranges
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers providing linear_ranges in their descriptor can use this as
+ * their map_voltage() callback.
+ */
+int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
+                                      int min_uV, int max_uV)
+{
+       const struct regulator_linear_range *range;
+       int ret = -EINVAL;
+       int voltage, i;
+
+       if (!rdev->desc->n_linear_ranges) {
+               BUG_ON(!rdev->desc->n_linear_ranges);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+               range = &rdev->desc->linear_ranges[i];
+
+               if (!(min_uV <= range->max_uV && max_uV >= range->min_uV))
+                       continue;
+
+               if (min_uV <= range->min_uV)
+                       min_uV = range->min_uV;
+
+               /* range->uV_step == 0 means fixed voltage range */
+               if (range->uV_step == 0) {
+                       ret = 0;
+               } else {
+                       ret = DIV_ROUND_UP(min_uV - range->min_uV,
+                                          range->uV_step);
+                       if (ret < 0)
+                               return ret;
+               }
+
+               ret += range->min_sel;
+
+               break;
+       }
+
+       if (i == rdev->desc->n_linear_ranges)
+               return -EINVAL;
+
+       /* Map back into a voltage to verify we're still in bounds */
+       voltage = rdev->desc->ops->list_voltage(rdev, ret);
+       if (voltage < min_uV || voltage > max_uV)
+               return -EINVAL;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_linear_range);
+
+/**
+ * regulator_list_voltage_linear - List voltages with simple calculation
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a simple linear mapping between voltages and
+ * selectors can set min_uV and uV_step in the regulator descriptor
+ * and then use this function as their list_voltage() operation,
+ */
+int regulator_list_voltage_linear(struct regulator_dev *rdev,
+                                 unsigned int selector)
+{
+       if (selector >= rdev->desc->n_voltages)
+               return -EINVAL;
+       if (selector < rdev->desc->linear_min_sel)
+               return 0;
+
+       selector -= rdev->desc->linear_min_sel;
+
+       return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
+
+/**
+ * regulator_list_voltage_linear_range - List voltages for linear ranges
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a series of simple linear mappings between voltages
+ * and selectors can set linear_ranges in the regulator descriptor and
+ * then use this function as their list_voltage() operation,
+ */
+int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
+                                       unsigned int selector)
+{
+       const struct regulator_linear_range *range;
+       int i;
+
+       if (!rdev->desc->n_linear_ranges) {
+               BUG_ON(!rdev->desc->n_linear_ranges);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+               range = &rdev->desc->linear_ranges[i];
+
+               if (!(selector >= range->min_sel &&
+                     selector <= range->max_sel))
+                       continue;
+
+               selector -= range->min_sel;
+
+               return range->min_uV + (range->uV_step * selector);
+       }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range);
+
+/**
+ * regulator_list_voltage_table - List voltages with table based mapping
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with table based mapping between voltages and
+ * selectors can set volt_table in the regulator descriptor
+ * and then use this function as their list_voltage() operation.
+ */
+int regulator_list_voltage_table(struct regulator_dev *rdev,
+                                unsigned int selector)
+{
+       if (!rdev->desc->volt_table) {
+               BUG_ON(!rdev->desc->volt_table);
+               return -EINVAL;
+       }
+
+       if (selector >= rdev->desc->n_voltages)
+               return -EINVAL;
+
+       return rdev->desc->volt_table[selector];
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_table);
+
+/**
+ * regulator_set_bypass_regmap - Default set_bypass() using regmap
+ *
+ * @rdev: device to operate on.
+ * @enable: state to set.
+ */
+int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable)
+{
+       unsigned int val;
+
+       if (enable)
+               val = rdev->desc->bypass_mask;
+       else
+               val = 0;
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg,
+                                 rdev->desc->bypass_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap);
+
+/**
+ * regulator_get_bypass_regmap - Default get_bypass() using regmap
+ *
+ * @rdev: device to operate on.
+ * @enable: current state.
+ */
+int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
+{
+       unsigned int val;
+       int ret;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val);
+       if (ret != 0)
+               return ret;
+
+       *enable = val & rdev->desc->bypass_mask;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
index b99c49b9aff0a0577009b57fe509f18269d0e9a4..88c1a3acf563ebad5aa6301636961eda75528365 100644 (file)
@@ -110,7 +110,7 @@ static int isl6271a_probe(struct i2c_client *i2c,
                                     const struct i2c_device_id *id)
 {
        struct regulator_config config = { };
-       struct regulator_init_data *init_data   = i2c->dev.platform_data;
+       struct regulator_init_data *init_data   = dev_get_platdata(&i2c->dev);
        struct isl_pmic *pmic;
        int err, i;
 
index 3809b43816060ca0a4e71fecc024b6c7e472b09d..5a4604ee5ea593d33fa0e3412b36ff9baf6bae40 100644 (file)
@@ -425,7 +425,7 @@ static int lp3971_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
        struct lp3971 *lp3971;
-       struct lp3971_platform_data *pdata = i2c->dev.platform_data;
+       struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev);
        int ret;
        u16 val;
 
index 573024039ca0c5751fbd5faf31ca2553d0ee91c1..093e6f44ff8a3e0d9f17eb6cd0001c36d5911b37 100644 (file)
@@ -519,7 +519,7 @@ static int lp3972_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
        struct lp3972 *lp3972;
-       struct lp3972_platform_data *pdata = i2c->dev.platform_data;
+       struct lp3972_platform_data *pdata = dev_get_platdata(&i2c->dev);
        int ret;
        u16 val;
 
index b16336bcd4d46daa099c313fc8cb98b6488ea230..2b84b727a3c498c6e22f97fe9d297d06ee1470c8 100644 (file)
@@ -373,7 +373,7 @@ static int lp8725_buck_set_current_limit(struct regulator_dev *rdev,
                return -EINVAL;
        }
 
-       for (i = ARRAY_SIZE(lp8725_buck_uA) - 1 ; i >= 0; i--) {
+       for (i = ARRAY_SIZE(lp8725_buck_uA) - 1; i >= 0; i--) {
                if (lp8725_buck_uA[i] >= min_uA &&
                        lp8725_buck_uA[i] <= max_uA)
                        return lp872x_update_bits(lp, addr,
@@ -787,7 +787,7 @@ static int lp872x_regulator_register(struct lp872x *lp)
        struct regulator_dev *rdev;
        int i, ret;
 
-       for (i = 0 ; i < lp->num_regulators ; i++) {
+       for (i = 0; i < lp->num_regulators; i++) {
                desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] :
                                                &lp8725_regulator_desc[i];
 
@@ -820,7 +820,7 @@ static void lp872x_regulator_unregister(struct lp872x *lp)
        struct regulator_dev *rdev;
        int i;
 
-       for (i = 0 ; i < lp->num_regulators ; i++) {
+       for (i = 0; i < lp->num_regulators; i++) {
                rdev = *(lp->regulators + i);
                regulator_unregister(rdev);
        }
@@ -907,7 +907,8 @@ static struct lp872x_platform_data
                goto out;
 
        for (i = 0; i < num_matches; i++) {
-               pdata->regulator_data[i].id = (int)match[i].driver_data;
+               pdata->regulator_data[i].id =
+                               (enum lp872x_regulator_id)match[i].driver_data;
                pdata->regulator_data[i].init_data = match[i].init_data;
 
                /* Operation mode configuration for buck/buck1/buck2 */
@@ -961,7 +962,7 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
        }
 
        lp->dev = &cl->dev;
-       lp->pdata = cl->dev.platform_data;
+       lp->pdata = dev_get_platdata(&cl->dev);
        lp->chipid = id->driver_data;
        lp->num_regulators = num_regulators;
        i2c_set_clientdata(cl, lp);
index d9e38b4c2adcd45b39bceba1b7b7b80dff09234d..785a25e9a43744530c2608fea66b4721eb06562e 100644 (file)
@@ -228,6 +228,7 @@ err_i2c:
 }
 
 static struct regulator_ops lp8755_buck_ops = {
+       .map_voltage = regulator_map_voltage_linear,
        .list_voltage = regulator_list_voltage_linear,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -449,7 +450,7 @@ static int lp8755_probe(struct i2c_client *client,
 {
        int ret, icnt;
        struct lp8755_chip *pchip;
-       struct lp8755_platform_data *pdata = client->dev.platform_data;
+       struct lp8755_platform_data *pdata = dev_get_platdata(&client->dev);
 
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
                dev_err(&client->dev, "i2c functionality check fail.\n");
index 54af6101581495c41c686d5c56a6630c6e5a375e..3a599ee0a456000b5058771e5a8325d76afca521 100644 (file)
@@ -163,7 +163,7 @@ static int max1586_pmic_probe(struct i2c_client *client,
                                        const struct i2c_device_id *i2c_id)
 {
        struct regulator_dev **rdev;
-       struct max1586_platform_data *pdata = client->dev.platform_data;
+       struct max1586_platform_data *pdata = dev_get_platdata(&client->dev);
        struct regulator_config config = { };
        struct max1586_data *max1586;
        int i, id, ret = -ENOMEM;
index db6c9be10f3f3470de157a0c0e0592645d5b3a83..19c6f08eafd5bddfe5a626696bafcea8bf5f36ec 100644 (file)
@@ -152,7 +152,7 @@ static struct regmap_config max8649_regmap_config = {
 static int max8649_regulator_probe(struct i2c_client *client,
                                             const struct i2c_device_id *id)
 {
-       struct max8649_platform_data *pdata = client->dev.platform_data;
+       struct max8649_platform_data *pdata = dev_get_platdata(&client->dev);
        struct max8649_regulator_info *info = NULL;
        struct regulator_config config = { };
        unsigned int val;
index d428ef9a626fefde45c5e8487f3ec3125abe05c8..144bcacd734dbe39e44801e411990e15a45d18eb 100644 (file)
@@ -44,6 +44,9 @@
 #include <linux/regulator/driver.h>
 #include <linux/slab.h>
 #include <linux/regulator/max8660.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
 
 #define MAX8660_DCDC_MIN_UV     725000
 #define MAX8660_DCDC_MAX_UV    1800000
@@ -305,21 +308,105 @@ static const struct regulator_desc max8660_reg[] = {
        },
 };
 
+enum {
+       MAX8660 = 0,
+       MAX8661 = 1,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id max8660_dt_ids[] = {
+       { .compatible = "maxim,max8660", .data = (void *) MAX8660 },
+       { .compatible = "maxim,max8661", .data = (void *) MAX8661 },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max8660_dt_ids);
+
+static int max8660_pdata_from_dt(struct device *dev,
+                                struct device_node **of_node,
+                                struct max8660_platform_data *pdata)
+{
+       int matched, i;
+       struct device_node *np;
+       struct max8660_subdev_data *sub;
+       struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)];
+
+       np = of_find_node_by_name(dev->of_node, "regulators");
+       if (!np) {
+               dev_err(dev, "missing 'regulators' subnode in DT\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(rmatch); i++)
+               rmatch[i].name = max8660_reg[i].name;
+
+       matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch));
+       if (matched <= 0)
+               return matched;
+
+       pdata->subdevs = devm_kzalloc(dev, sizeof(struct max8660_subdev_data) *
+                                               matched, GFP_KERNEL);
+       if (!pdata->subdevs)
+               return -ENOMEM;
+
+       pdata->num_subdevs = matched;
+       sub = pdata->subdevs;
+
+       for (i = 0; i < matched; i++) {
+               sub->id = i;
+               sub->name = rmatch[i].name;
+               sub->platform_data = rmatch[i].init_data;
+               of_node[i] = rmatch[i].of_node;
+               sub++;
+       }
+
+       return 0;
+}
+#else
+static inline int max8660_pdata_from_dt(struct device *dev,
+                                       struct device_node **of_node,
+                                       struct max8660_platform_data *pdata)
+{
+       return 0;
+}
+#endif
+
 static int max8660_probe(struct i2c_client *client,
                                   const struct i2c_device_id *i2c_id)
 {
        struct regulator_dev **rdev;
-       struct max8660_platform_data *pdata = client->dev.platform_data;
+       struct device *dev = &client->dev;
+       struct max8660_platform_data *pdata = dev_get_platdata(dev);
        struct regulator_config config = { };
        struct max8660 *max8660;
        int boot_on, i, id, ret = -EINVAL;
+       struct device_node *of_node[MAX8660_V_END];
+       unsigned long type;
+
+       if (dev->of_node && !pdata) {
+               const struct of_device_id *id;
+               struct max8660_platform_data pdata_of;
+
+               id = of_match_device(of_match_ptr(max8660_dt_ids), dev);
+               if (!id)
+                       return -ENODEV;
+
+               ret = max8660_pdata_from_dt(dev, of_node, &pdata_of);
+               if (ret < 0)
+                       return ret;
+
+               pdata = &pdata_of;
+               type = (unsigned long) id->data;
+       } else {
+               type = i2c_id->driver_data;
+               memset(of_node, 0, sizeof(of_node));
+       }
 
        if (pdata->num_subdevs > MAX8660_V_END) {
-               dev_err(&client->dev, "Too many regulators found!\n");
+               dev_err(dev, "Too many regulators found!\n");
                return -EINVAL;
        }
 
-       max8660 = devm_kzalloc(&client->dev, sizeof(struct max8660) +
+       max8660 = devm_kzalloc(dev, sizeof(struct max8660) +
                        sizeof(struct regulator_dev *) * MAX8660_V_END,
                        GFP_KERNEL);
        if (!max8660)
@@ -376,8 +463,8 @@ static int max8660_probe(struct i2c_client *client,
                        break;
 
                case MAX8660_V7:
-                       if (!strcmp(i2c_id->name, "max8661")) {
-                               dev_err(&client->dev, "Regulator not on this chip!\n");
+                       if (type == MAX8661) {
+                               dev_err(dev, "Regulator not on this chip!\n");
                                goto err_out;
                        }
 
@@ -386,7 +473,7 @@ static int max8660_probe(struct i2c_client *client,
                        break;
 
                default:
-                       dev_err(&client->dev, "invalid regulator %s\n",
+                       dev_err(dev, "invalid regulator %s\n",
                                 pdata->subdevs[i].name);
                        goto err_out;
                }
@@ -397,14 +484,15 @@ static int max8660_probe(struct i2c_client *client,
 
                id = pdata->subdevs[i].id;
 
-               config.dev = &client->dev;
+               config.dev = dev;
                config.init_data = pdata->subdevs[i].platform_data;
+               config.of_node = of_node[i];
                config.driver_data = max8660;
 
                rdev[i] = regulator_register(&max8660_reg[id], &config);
                if (IS_ERR(rdev[i])) {
                        ret = PTR_ERR(rdev[i]);
-                       dev_err(&client->dev, "failed to register %s\n",
+                       dev_err(dev, "failed to register %s\n",
                                max8660_reg[id].name);
                        goto err_unregister;
                }
@@ -431,8 +519,8 @@ static int max8660_remove(struct i2c_client *client)
 }
 
 static const struct i2c_device_id max8660_id[] = {
-       { "max8660", 0 },
-       { "max8661", 0 },
+       { .name = "max8660", .driver_data = MAX8660 },
+       { .name = "max8661", .driver_data = MAX8661 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, max8660_id);
index e6d54a546d36d40c68aeb0b623c8d83fee79ef0f..d80b5fa758ae5012585acb0a2e2b2947005177ce 100644 (file)
@@ -277,7 +277,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
 static int max8925_regulator_probe(struct platform_device *pdev)
 {
        struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
-       struct regulator_init_data *pdata = pdev->dev.platform_data;
+       struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
        struct regulator_config config = { };
        struct max8925_regulator_info *ri;
        struct resource *res;
index 5259c2fea90a78fa85431630e14d5bb149d52a3c..788e5ae2af1b51464b16dca99c4d190a067cc67a 100644 (file)
@@ -196,7 +196,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
                const struct i2c_device_id *i2c_id)
 {
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
-       struct max8952_platform_data *pdata = client->dev.platform_data;
+       struct max8952_platform_data *pdata = dev_get_platdata(&client->dev);
        struct regulator_config config = { };
        struct max8952_data *max8952;
 
index 0c5195a842e2ee68b5568f8e1c9cd85081a9df17..5b77ab7762e43c6915dc039e4f7a935a0a4d1adf 100644 (file)
@@ -371,7 +371,7 @@ static int max8973_probe(struct i2c_client *client,
        struct max8973_chip *max;
        int ret;
 
-       pdata = client->dev.platform_data;
+       pdata = dev_get_platdata(&client->dev);
 
        if (!pdata && !client->dev.of_node) {
                dev_err(&client->dev, "No Platform data");
index f3c8f8f9dc39d48e8b586cb808cfdc568ae67d27..7827384680d64a7f62b5e248d687953e5ca8ae7b 100644 (file)
@@ -21,6 +21,7 @@ static void of_get_regulation_constraints(struct device_node *np,
 {
        const __be32 *min_uV, *max_uV, *uV_offset;
        const __be32 *min_uA, *max_uA, *ramp_delay;
+       struct property *prop;
        struct regulation_constraints *constraints = &(*init_data)->constraints;
 
        constraints->name = of_get_property(np, "regulator-name", NULL);
@@ -64,9 +65,14 @@ static void of_get_regulation_constraints(struct device_node *np,
        if (of_property_read_bool(np, "regulator-allow-bypass"))
                constraints->valid_ops_mask |= REGULATOR_CHANGE_BYPASS;
 
-       ramp_delay = of_get_property(np, "regulator-ramp-delay", NULL);
-       if (ramp_delay)
-               constraints->ramp_delay = be32_to_cpu(*ramp_delay);
+       prop = of_find_property(np, "regulator-ramp-delay", NULL);
+       if (prop && prop->value) {
+               ramp_delay = prop->value;
+               if (*ramp_delay)
+                       constraints->ramp_delay = be32_to_cpu(*ramp_delay);
+               else
+                       constraints->ramp_disable = true;
+       }
 }
 
 /**
index d0c87856dd25fdd7eefaf14f784e6f0ad58a48f2..488dfe7ce9a6c048a751029451df59d6f6f2c074 100644 (file)
@@ -97,10 +97,15 @@ static const struct regs_info palmas_regs_info[] = {
                .ctrl_addr      = PALMAS_SMPS9_CTRL,
        },
        {
-               .name           = "SMPS10",
+               .name           = "SMPS10_OUT2",
                .sname          = "smps10-in",
                .ctrl_addr      = PALMAS_SMPS10_CTRL,
        },
+       {
+               .name           = "SMPS10_OUT1",
+               .sname          = "smps10-out2",
+               .ctrl_addr      = PALMAS_SMPS10_CTRL,
+       },
        {
                .name           = "LDO1",
                .sname          = "ldo1-in",
@@ -487,6 +492,8 @@ static struct regulator_ops palmas_ops_smps10 = {
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
+       .set_bypass             = regulator_set_bypass_regmap,
+       .get_bypass             = regulator_get_bypass_regmap,
 };
 
 static int palmas_is_enabled_ldo(struct regulator_dev *dev)
@@ -538,7 +545,8 @@ static int palmas_smps_init(struct palmas *palmas, int id,
                return ret;
 
        switch (id) {
-       case PALMAS_REG_SMPS10:
+       case PALMAS_REG_SMPS10_OUT1:
+       case PALMAS_REG_SMPS10_OUT2:
                reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
                if (reg_init->mode_sleep)
                        reg |= reg_init->mode_sleep <<
@@ -681,7 +689,8 @@ static struct of_regulator_match palmas_matches[] = {
        { .name = "smps7", },
        { .name = "smps8", },
        { .name = "smps9", },
-       { .name = "smps10", },
+       { .name = "smps10_out2", },
+       { .name = "smps10_out1", },
        { .name = "ldo1", },
        { .name = "ldo2", },
        { .name = "ldo3", },
@@ -765,7 +774,7 @@ static void palmas_dt_to_pdata(struct device *dev,
 static int palmas_regulators_probe(struct platform_device *pdev)
 {
        struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
-       struct palmas_pmic_platform_data *pdata = pdev->dev.platform_data;
+       struct palmas_pmic_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct device_node *node = pdev->dev.of_node;
        struct regulator_dev *rdev;
        struct regulator_config config = { };
@@ -838,7 +847,8 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                                continue;
                        ramp_delay_support = true;
                        break;
-               case PALMAS_REG_SMPS10:
+               case PALMAS_REG_SMPS10_OUT1:
+               case PALMAS_REG_SMPS10_OUT2:
                        if (!PALMAS_PMIC_HAS(palmas, SMPS10_BOOST))
                                continue;
                }
@@ -872,7 +882,8 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                pmic->desc[id].id = id;
 
                switch (id) {
-               case PALMAS_REG_SMPS10:
+               case PALMAS_REG_SMPS10_OUT1:
+               case PALMAS_REG_SMPS10_OUT2:
                        pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;
                        pmic->desc[id].ops = &palmas_ops_smps10;
                        pmic->desc[id].vsel_reg =
@@ -882,7 +893,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                        pmic->desc[id].enable_reg =
                                        PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
                                                        PALMAS_SMPS10_CTRL);
-                       pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
+                       if (id == PALMAS_REG_SMPS10_OUT1)
+                               pmic->desc[id].enable_mask = SMPS10_SWITCH_EN;
+                       else
+                               pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
+                       pmic->desc[id].bypass_reg =
+                                       PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+                                                       PALMAS_SMPS10_CTRL);
+                       pmic->desc[id].bypass_mask = SMPS10_BYPASS_EN;
                        pmic->desc[id].min_uV = 3750000;
                        pmic->desc[id].uV_step = 1250000;
                        break;
index 1a73a297fe730533cc6be23469eee0014f6dbe80..b49eaeedea849f5c380059417050bd9d75f2901e 100644 (file)
@@ -243,7 +243,7 @@ static int pcap_regulator_probe(struct platform_device *pdev)
        struct regulator_config config = { };
 
        config.dev = &pdev->dev;
-       config.init_data = pdev->dev.platform_data;
+       config.init_data = dev_get_platdata(&pdev->dev);
        config.driver_data = pcap;
 
        rdev = regulator_register(&pcap_regulators[pdev->id], &config);
index 54df9f7cb504b7da2f4b6c6b26c8389bcdd3f117..0f3576d48abf9733cb87a27231f471ca263cade9 100644 (file)
@@ -86,7 +86,7 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
        pcf = dev_to_pcf50633(pdev->dev.parent);
 
        config.dev = &pdev->dev;
-       config.init_data = pdev->dev.platform_data;
+       config.init_data = dev_get_platdata(&pdev->dev);
        config.driver_data = pcf;
        config.regmap = pcf->regmap;
 
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
new file mode 100644 (file)
index 0000000..ba67b2c
--- /dev/null
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/pfuze100.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#define PFUZE_NUMREGS          128
+#define PFUZE100_VOL_OFFSET    0
+#define PFUZE100_STANDBY_OFFSET        1
+#define PFUZE100_MODE_OFFSET   3
+#define PFUZE100_CONF_OFFSET   4
+
+#define PFUZE100_DEVICEID      0x0
+#define PFUZE100_REVID         0x3
+#define PFUZE100_FABID         0x3
+
+#define PFUZE100_SW1ABVOL      0x20
+#define PFUZE100_SW1CVOL       0x2e
+#define PFUZE100_SW2VOL                0x35
+#define PFUZE100_SW3AVOL       0x3c
+#define PFUZE100_SW3BVOL       0x43
+#define PFUZE100_SW4VOL                0x4a
+#define PFUZE100_SWBSTCON1     0x66
+#define PFUZE100_VREFDDRCON    0x6a
+#define PFUZE100_VSNVSVOL      0x6b
+#define PFUZE100_VGEN1VOL      0x6c
+#define PFUZE100_VGEN2VOL      0x6d
+#define PFUZE100_VGEN3VOL      0x6e
+#define PFUZE100_VGEN4VOL      0x6f
+#define PFUZE100_VGEN5VOL      0x70
+#define PFUZE100_VGEN6VOL      0x71
+
+struct pfuze_regulator {
+       struct regulator_desc desc;
+       unsigned char stby_reg;
+       unsigned char stby_mask;
+};
+
+struct pfuze_chip {
+       struct regmap *regmap;
+       struct device *dev;
+       struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
+       struct regulator_dev *regulators[PFUZE100_MAX_REGULATOR];
+};
+
+static const int pfuze100_swbst[] = {
+       5000000, 5050000, 5100000, 5150000,
+};
+
+static const int pfuze100_vsnvs[] = {
+       1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
+};
+
+static const struct i2c_device_id pfuze_device_id[] = {
+       {.name = "pfuze100"},
+       {},
+};
+MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
+
+static const struct of_device_id pfuze_dt_ids[] = {
+       { .compatible = "fsl,pfuze100" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
+
+static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+       struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
+       int id = rdev->desc->id;
+       unsigned int ramp_bits;
+       int ret;
+
+       if (id < PFUZE100_SWBST) {
+               ramp_delay = 12500 / ramp_delay;
+               ramp_bits = (ramp_delay >> 1) - (ramp_delay >> 3);
+               ret = regmap_update_bits(pfuze100->regmap,
+                                        rdev->desc->vsel_reg + 4,
+                                        0xc0, ramp_bits << 6);
+               if (ret < 0)
+                       dev_err(pfuze100->dev, "ramp failed, err %d\n", ret);
+       } else
+               ret = -EACCES;
+
+       return ret;
+}
+
+static struct regulator_ops pfuze100_ldo_regulator_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .list_voltage = regulator_list_voltage_linear,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static struct regulator_ops pfuze100_fixed_regulator_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+};
+
+static struct regulator_ops pfuze100_sw_regulator_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .set_ramp_delay = pfuze100_set_ramp_delay,
+};
+
+static struct regulator_ops pfuze100_swb_regulator_ops = {
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_ascend,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+
+};
+
+#define PFUZE100_FIXED_REG(_name, base, voltage)       \
+       [PFUZE100_ ## _name] = {        \
+               .desc = {       \
+                       .name = #_name, \
+                       .n_voltages = 1,        \
+                       .ops = &pfuze100_fixed_regulator_ops,   \
+                       .type = REGULATOR_VOLTAGE,      \
+                       .id = PFUZE100_ ## _name,       \
+                       .owner = THIS_MODULE,   \
+                       .min_uV = (voltage),    \
+                       .enable_reg = (base),   \
+                       .enable_mask = 0x10,    \
+               },      \
+       }
+
+#define PFUZE100_SW_REG(_name, base, min, max, step)   \
+       [PFUZE100_ ## _name] = {        \
+               .desc = {       \
+                       .name = #_name,\
+                       .n_voltages = ((max) - (min)) / (step) + 1,     \
+                       .ops = &pfuze100_sw_regulator_ops,      \
+                       .type = REGULATOR_VOLTAGE,      \
+                       .id = PFUZE100_ ## _name,       \
+                       .owner = THIS_MODULE,   \
+                       .min_uV = (min),        \
+                       .uV_step = (step),      \
+                       .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
+                       .vsel_mask = 0x3f,      \
+               },      \
+               .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
+               .stby_mask = 0x3f,      \
+       }
+
+#define PFUZE100_SWB_REG(_name, base, mask, voltages)  \
+       [PFUZE100_ ## _name] = {        \
+               .desc = {       \
+                       .name = #_name, \
+                       .n_voltages = ARRAY_SIZE(voltages),     \
+                       .ops = &pfuze100_swb_regulator_ops,     \
+                       .type = REGULATOR_VOLTAGE,      \
+                       .id = PFUZE100_ ## _name,       \
+                       .owner = THIS_MODULE,   \
+                       .volt_table = voltages, \
+                       .vsel_reg = (base),     \
+                       .vsel_mask = (mask),    \
+               },      \
+       }
+
+#define PFUZE100_VGEN_REG(_name, base, min, max, step) \
+       [PFUZE100_ ## _name] = {        \
+               .desc = {       \
+                       .name = #_name, \
+                       .n_voltages = ((max) - (min)) / (step) + 1,     \
+                       .ops = &pfuze100_ldo_regulator_ops,     \
+                       .type = REGULATOR_VOLTAGE,      \
+                       .id = PFUZE100_ ## _name,       \
+                       .owner = THIS_MODULE,   \
+                       .min_uV = (min),        \
+                       .uV_step = (step),      \
+                       .vsel_reg = (base),     \
+                       .vsel_mask = 0xf,       \
+                       .enable_reg = (base),   \
+                       .enable_mask = 0x10,    \
+               },      \
+               .stby_reg = (base),     \
+               .stby_mask = 0x20,      \
+       }
+
+static struct pfuze_regulator pfuze100_regulators[] = {
+       PFUZE100_SW_REG(SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
+       PFUZE100_SW_REG(SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
+       PFUZE100_SW_REG(SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
+       PFUZE100_SW_REG(SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
+       PFUZE100_SW_REG(SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
+       PFUZE100_SW_REG(SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
+       PFUZE100_SWB_REG(SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
+       PFUZE100_SWB_REG(VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+       PFUZE100_FIXED_REG(VREFDDR, PFUZE100_VREFDDRCON, 750000),
+       PFUZE100_VGEN_REG(VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
+       PFUZE100_VGEN_REG(VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+       PFUZE100_VGEN_REG(VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
+#ifdef CONFIG_OF
+static struct of_regulator_match pfuze100_matches[] = {
+       { .name = "sw1ab",      },
+       { .name = "sw1c",       },
+       { .name = "sw2",        },
+       { .name = "sw3a",       },
+       { .name = "sw3b",       },
+       { .name = "sw4",        },
+       { .name = "swbst",      },
+       { .name = "vsnvs",      },
+       { .name = "vrefddr",    },
+       { .name = "vgen1",      },
+       { .name = "vgen2",      },
+       { .name = "vgen3",      },
+       { .name = "vgen4",      },
+       { .name = "vgen5",      },
+       { .name = "vgen6",      },
+};
+
+static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
+{
+       struct device *dev = chip->dev;
+       struct device_node *np, *parent;
+       int ret;
+
+       np = of_node_get(dev->parent->of_node);
+       if (!np)
+               return 0;
+
+       parent = of_find_node_by_name(np, "regulators");
+       if (!parent) {
+               dev_err(dev, "regulators node not found\n");
+               return -EINVAL;
+       }
+
+       ret = of_regulator_match(dev, parent, pfuze100_matches,
+                                ARRAY_SIZE(pfuze100_matches));
+
+       of_node_put(parent);
+       if (ret < 0) {
+               dev_err(dev, "Error parsing regulator init data: %d\n",
+                       ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static inline struct regulator_init_data *match_init_data(int index)
+{
+       return pfuze100_matches[index].init_data;
+}
+
+static inline struct device_node *match_of_node(int index)
+{
+       return pfuze100_matches[index].of_node;
+}
+#else
+static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
+{
+       return 0;
+}
+
+static inline struct regulator_init_data *match_init_data(int index)
+{
+       return NULL;
+}
+
+static inline struct device_node *match_of_node(int index)
+{
+       return NULL;
+}
+#endif
+
+static int pfuze_identify(struct pfuze_chip *pfuze_chip)
+{
+       unsigned int value;
+       int ret;
+
+       ret = regmap_read(pfuze_chip->regmap, PFUZE100_DEVICEID, &value);
+       if (ret)
+               return ret;
+
+       if (value & 0x0f) {
+               dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
+               return -ENODEV;
+       }
+
+       ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
+       if (ret)
+               return ret;
+       dev_info(pfuze_chip->dev,
+                "Full lay: %x, Metal lay: %x\n",
+                (value & 0xf0) >> 4, value & 0x0f);
+
+       ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value);
+       if (ret)
+               return ret;
+       dev_info(pfuze_chip->dev, "FAB: %x, FIN: %x\n",
+                (value & 0xc) >> 2, value & 0x3);
+
+       return 0;
+}
+
+static const struct regmap_config pfuze_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = PFUZE_NUMREGS - 1,
+       .cache_type = REGCACHE_RBTREE,
+};
+
+static int pfuze100_regulator_probe(struct i2c_client *client,
+                                   const struct i2c_device_id *id)
+{
+       struct pfuze_chip *pfuze_chip;
+       struct pfuze_regulator_platform_data *pdata =
+           dev_get_platdata(&client->dev);
+       struct regulator_config config = { };
+       int i, ret;
+
+       pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
+                       GFP_KERNEL);
+       if (!pfuze_chip)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, pfuze_chip);
+
+       memcpy(pfuze_chip->regulator_descs, pfuze100_regulators,
+               sizeof(pfuze_chip->regulator_descs));
+
+       pfuze_chip->dev = &client->dev;
+
+       pfuze_chip->regmap = devm_regmap_init_i2c(client, &pfuze_regmap_config);
+       if (IS_ERR(pfuze_chip->regmap)) {
+               ret = PTR_ERR(pfuze_chip->regmap);
+               dev_err(&client->dev,
+                       "regmap allocation failed with err %d\n", ret);
+               return ret;
+       }
+
+       ret = pfuze_identify(pfuze_chip);
+       if (ret) {
+               dev_err(&client->dev, "unrecognized pfuze chip ID!\n");
+               return ret;
+       }
+
+       ret = pfuze_parse_regulators_dt(pfuze_chip);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < PFUZE100_MAX_REGULATOR; i++) {
+               struct regulator_init_data *init_data;
+               struct regulator_desc *desc;
+               int val;
+
+               desc = &pfuze_chip->regulator_descs[i].desc;
+
+               if (pdata)
+                       init_data = pdata->init_data[i];
+               else
+                       init_data = match_init_data(i);
+
+               /* SW2~SW4 high bit check and modify the voltage value table */
+               if (i > PFUZE100_SW1C && i < PFUZE100_SWBST) {
+                       regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+                       if (val & 0x40) {
+                               desc->min_uV = 800000;
+                               desc->uV_step = 50000;
+                               desc->n_voltages = 51;
+                       }
+               }
+
+               config.dev = &client->dev;
+               config.init_data = init_data;
+               config.driver_data = pfuze_chip;
+               config.of_node = match_of_node(i);
+
+               pfuze_chip->regulators[i] = regulator_register(desc, &config);
+               if (IS_ERR(pfuze_chip->regulators[i])) {
+                       dev_err(&client->dev, "register regulator%s failed\n",
+                               pfuze100_regulators[i].desc.name);
+                       ret = PTR_ERR(pfuze_chip->regulators[i]);
+                       while (--i >= 0)
+                               regulator_unregister(pfuze_chip->regulators[i]);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int pfuze100_regulator_remove(struct i2c_client *client)
+{
+       int i;
+       struct pfuze_chip *pfuze_chip = i2c_get_clientdata(client);
+
+       for (i = 0; i < PFUZE100_MAX_REGULATOR; i++)
+               regulator_unregister(pfuze_chip->regulators[i]);
+
+       return 0;
+}
+
+static struct i2c_driver pfuze_driver = {
+       .id_table = pfuze_device_id,
+       .driver = {
+               .name = "pfuze100-regulator",
+               .owner = THIS_MODULE,
+               .of_match_table = pfuze_dt_ids,
+       },
+       .probe = pfuze100_regulator_probe,
+       .remove = pfuze100_regulator_remove,
+};
+module_i2c_driver(pfuze_driver);
+
+MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100 PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:pfuze100-regulator");
index 2f62564ca9362e06ae8e5479864af5c48f12342e..5eba2ff8c0e86eff58bf5c48e77e4ad352f8b00a 100644 (file)
 #include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
 #include <linux/mfd/samsung/core.h>
 #include <linux/mfd/samsung/s2mps11.h>
 
+#define S2MPS11_REGULATOR_CNT ARRAY_SIZE(regulators)
+
 struct s2mps11_info {
        struct regulator_dev *rdev[S2MPS11_REGULATOR_MAX];
 
@@ -31,11 +36,6 @@ struct s2mps11_info {
        int ramp_delay16;
        int ramp_delay7810;
        int ramp_delay9;
-
-       bool buck6_ramp;
-       bool buck2_ramp;
-       bool buck3_ramp;
-       bool buck4_ramp;
 };
 
 static int get_ramp_delay(int ramp_delay)
@@ -50,9 +50,171 @@ static int get_ramp_delay(int ramp_delay)
                        break;
                cnt++;
        }
+
+       if (cnt > 3)
+               cnt = 3;
+
        return cnt;
 }
 
+static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+                                  unsigned int old_selector,
+                                  unsigned int new_selector)
+{
+       struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+       unsigned int ramp_delay = 0;
+       int old_volt, new_volt;
+
+       switch (rdev->desc->id) {
+       case S2MPS11_BUCK2:
+               ramp_delay = s2mps11->ramp_delay2;
+               break;
+       case S2MPS11_BUCK3:
+               ramp_delay = s2mps11->ramp_delay34;
+               break;
+       case S2MPS11_BUCK4:
+               ramp_delay = s2mps11->ramp_delay34;
+               break;
+       case S2MPS11_BUCK5:
+               ramp_delay = s2mps11->ramp_delay5;
+               break;
+       case S2MPS11_BUCK6:
+       case S2MPS11_BUCK1:
+               ramp_delay = s2mps11->ramp_delay16;
+               break;
+       case S2MPS11_BUCK7:
+       case S2MPS11_BUCK8:
+       case S2MPS11_BUCK10:
+               ramp_delay = s2mps11->ramp_delay7810;
+               break;
+       case S2MPS11_BUCK9:
+               ramp_delay = s2mps11->ramp_delay9;
+       }
+
+       if (ramp_delay == 0)
+               ramp_delay = rdev->desc->ramp_delay;
+
+       old_volt = rdev->desc->min_uV + (rdev->desc->uV_step * old_selector);
+       new_volt = rdev->desc->min_uV + (rdev->desc->uV_step * new_selector);
+
+       return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
+}
+
+static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+       struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+       unsigned int ramp_val, ramp_shift, ramp_reg = S2MPS11_REG_RAMP_BUCK;
+       unsigned int ramp_enable = 1, enable_shift = 0;
+       int ret;
+
+       switch (rdev->desc->id) {
+       case S2MPS11_BUCK1:
+               if (ramp_delay > s2mps11->ramp_delay16)
+                       s2mps11->ramp_delay16 = ramp_delay;
+               else
+                       ramp_delay = s2mps11->ramp_delay16;
+
+               ramp_shift = S2MPS11_BUCK16_RAMP_SHIFT;
+               break;
+       case S2MPS11_BUCK2:
+               enable_shift = S2MPS11_BUCK2_RAMP_EN_SHIFT;
+               if (!ramp_delay) {
+                       ramp_enable = 0;
+                       break;
+               }
+
+               s2mps11->ramp_delay2 = ramp_delay;
+               ramp_shift = S2MPS11_BUCK2_RAMP_SHIFT;
+               ramp_reg = S2MPS11_REG_RAMP;
+               break;
+       case S2MPS11_BUCK3:
+               enable_shift = S2MPS11_BUCK3_RAMP_EN_SHIFT;
+               if (!ramp_delay) {
+                       ramp_enable = 0;
+                       break;
+               }
+
+               if (ramp_delay > s2mps11->ramp_delay34)
+                       s2mps11->ramp_delay34 = ramp_delay;
+               else
+                       ramp_delay = s2mps11->ramp_delay34;
+
+               ramp_shift = S2MPS11_BUCK34_RAMP_SHIFT;
+               ramp_reg = S2MPS11_REG_RAMP;
+               break;
+       case S2MPS11_BUCK4:
+               enable_shift = S2MPS11_BUCK4_RAMP_EN_SHIFT;
+               if (!ramp_delay) {
+                       ramp_enable = 0;
+                       break;
+               }
+
+               if (ramp_delay > s2mps11->ramp_delay34)
+                       s2mps11->ramp_delay34 = ramp_delay;
+               else
+                       ramp_delay = s2mps11->ramp_delay34;
+
+               ramp_shift = S2MPS11_BUCK34_RAMP_SHIFT;
+               ramp_reg = S2MPS11_REG_RAMP;
+               break;
+       case S2MPS11_BUCK5:
+               s2mps11->ramp_delay5 = ramp_delay;
+               ramp_shift = S2MPS11_BUCK5_RAMP_SHIFT;
+               break;
+       case S2MPS11_BUCK6:
+               enable_shift = S2MPS11_BUCK6_RAMP_EN_SHIFT;
+               if (!ramp_delay) {
+                       ramp_enable = 0;
+                       break;
+               }
+
+               if (ramp_delay > s2mps11->ramp_delay16)
+                       s2mps11->ramp_delay16 = ramp_delay;
+               else
+                       ramp_delay = s2mps11->ramp_delay16;
+
+               ramp_shift = S2MPS11_BUCK16_RAMP_SHIFT;
+               break;
+       case S2MPS11_BUCK7:
+       case S2MPS11_BUCK8:
+       case S2MPS11_BUCK10:
+               if (ramp_delay > s2mps11->ramp_delay7810)
+                       s2mps11->ramp_delay7810 = ramp_delay;
+               else
+                       ramp_delay = s2mps11->ramp_delay7810;
+
+               ramp_shift = S2MPS11_BUCK7810_RAMP_SHIFT;
+               break;
+       case S2MPS11_BUCK9:
+               s2mps11->ramp_delay9 = ramp_delay;
+               ramp_shift = S2MPS11_BUCK9_RAMP_SHIFT;
+               break;
+       default:
+               return 0;
+       }
+
+       if (!ramp_enable)
+               goto ramp_disable;
+
+       if (enable_shift) {
+               ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
+                                       1 << enable_shift, 1 << enable_shift);
+               if (ret) {
+                       dev_err(&rdev->dev, "failed to enable ramp rate\n");
+                       return ret;
+               }
+       }
+
+       ramp_val = get_ramp_delay(ramp_delay);
+
+       return regmap_update_bits(rdev->regmap, ramp_reg, 0x3 << ramp_shift,
+                                 ramp_val << ramp_shift);
+
+ramp_disable:
+       return regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
+                                 1 << enable_shift, 0);
+}
+
 static struct regulator_ops s2mps11_ldo_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
@@ -72,7 +234,8 @@ static struct regulator_ops s2mps11_buck_ops = {
        .disable                = regulator_disable_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
-       .set_voltage_time_sel   = regulator_set_voltage_time_sel,
+       .set_voltage_time_sel   = s2mps11_regulator_set_voltage_time_sel,
+       .set_ramp_delay         = s2mps11_set_ramp_delay,
 };
 
 #define regulator_desc_ldo1(num)       {               \
@@ -239,59 +402,51 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
 {
        struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+       struct of_regulator_match rdata[S2MPS11_REGULATOR_MAX];
+       struct device_node *reg_np = NULL;
        struct regulator_config config = { };
        struct s2mps11_info *s2mps11;
        int i, ret;
-       unsigned char ramp_enable, ramp_reg = 0;
-
-       if (!pdata) {
-               dev_err(pdev->dev.parent, "Platform data not supplied\n");
-               return -ENODEV;
-       }
 
        s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
                                GFP_KERNEL);
        if (!s2mps11)
                return -ENOMEM;
 
-       platform_set_drvdata(pdev, s2mps11);
+       if (!iodev->dev->of_node) {
+               if (pdata) {
+                       goto common_reg;
+               } else {
+                       dev_err(pdev->dev.parent,
+                               "Platform data or DT node not supplied\n");
+                       return -ENODEV;
+               }
+       }
 
-       s2mps11->ramp_delay2 = pdata->buck2_ramp_delay;
-       s2mps11->ramp_delay34 = pdata->buck34_ramp_delay;
-       s2mps11->ramp_delay5 = pdata->buck5_ramp_delay;
-       s2mps11->ramp_delay16 = pdata->buck16_ramp_delay;
-       s2mps11->ramp_delay7810 = pdata->buck7810_ramp_delay;
-       s2mps11->ramp_delay9 = pdata->buck9_ramp_delay;
-
-       s2mps11->buck6_ramp = pdata->buck6_ramp_enable;
-       s2mps11->buck2_ramp = pdata->buck2_ramp_enable;
-       s2mps11->buck3_ramp = pdata->buck3_ramp_enable;
-       s2mps11->buck4_ramp = pdata->buck4_ramp_enable;
-
-       ramp_enable = (s2mps11->buck2_ramp << 3) | (s2mps11->buck3_ramp << 2) |
-               (s2mps11->buck4_ramp << 1) | s2mps11->buck6_ramp ;
-
-       if (ramp_enable) {
-               if (s2mps11->buck2_ramp)
-                       ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) << 6;
-               if (s2mps11->buck3_ramp || s2mps11->buck4_ramp)
-                       ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) << 4;
-               sec_reg_write(iodev, S2MPS11_REG_RAMP, ramp_reg | ramp_enable);
+       for (i = 0; i < S2MPS11_REGULATOR_CNT; i++)
+               rdata[i].name = regulators[i].name;
+
+       reg_np = of_find_node_by_name(iodev->dev->of_node, "regulators");
+       if (!reg_np) {
+               dev_err(&pdev->dev, "could not find regulators sub-node\n");
+               return -EINVAL;
        }
 
-       ramp_reg &= 0x00;
-       ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) << 6;
-       ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) << 4;
-       ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) << 2;
-       ramp_reg |= get_ramp_delay(s2mps11->ramp_delay9);
-       sec_reg_write(iodev, S2MPS11_REG_RAMP_BUCK, ramp_reg);
+       of_regulator_match(&pdev->dev, reg_np, rdata, S2MPS11_REGULATOR_MAX);
 
-       for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+common_reg:
+       platform_set_drvdata(pdev, s2mps11);
 
-               config.dev = &pdev->dev;
-               config.regmap = iodev->regmap;
-               config.init_data = pdata->regulators[i].initdata;
-               config.driver_data = s2mps11;
+       config.dev = &pdev->dev;
+       config.regmap = iodev->regmap;
+       config.driver_data = s2mps11;
+       for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+               if (!reg_np) {
+                       config.init_data = pdata->regulators[i].initdata;
+               } else {
+                       config.init_data = rdata[i].init_data;
+                       config.of_node = rdata[i].of_node;
+               }
 
                s2mps11->rdev[i] = regulator_register(&regulators[i], &config);
                if (IS_ERR(s2mps11->rdev[i])) {
index 6e67be75ea1b5fc63095980fd24d68591d7b5389..9392a7ca3d2dc6df9152cb1aeb13c776b856e253 100644 (file)
@@ -275,7 +275,7 @@ static int tps51632_probe(struct i2c_client *client,
                }
        }
 
-       pdata = client->dev.platform_data;
+       pdata = dev_get_platdata(&client->dev);
        if (!pdata && client->dev.of_node)
                pdata = of_get_tps51632_platform_data(&client->dev);
        if (!pdata) {
index a490d5b749b2c75efffe33c4249fdf06a83c4e33..0b7ebb1ebf859bb5d409c19114b4a89f40a094b1 100644 (file)
@@ -350,7 +350,7 @@ static int tps62360_probe(struct i2c_client *client,
        int i;
        int chip_id;
 
-       pdata = client->dev.platform_data;
+       pdata = dev_get_platdata(&client->dev);
 
        if (client->dev.of_node) {
                const struct of_device_id *match;
index 9d053e23e9ebd188778d4960409ee3c1baffa540..a15263d4bdff8a2fbd433c1315291def0bf66917 100644 (file)
@@ -218,7 +218,7 @@ static int tps_65023_probe(struct i2c_client *client,
         * init_data points to array of regulator_init structures
         * coming from the board-evm file.
         */
-       init_data = client->dev.platform_data;
+       init_data = dev_get_platdata(&client->dev);
        if (!init_data)
                return -EIO;
 
index 1094393155ed17927e3cafd9170cdaf1fb0bb602..62e8d28beabd7cc8ad51b2935e29b7d348e17438 100644 (file)
@@ -601,7 +601,7 @@ static int pmic_probe(struct spi_device *spi)
        struct regulator_config config = { };
        int ret = 0, i;
 
-       init_data = dev->platform_data;
+       init_data = dev_get_platdata(dev);
        if (!init_data) {
                dev_err(dev, "could not find regulator platform data\n");
                return -EINVAL;
index 17e994e47dc139c3117a8affe76eef64b5e092e1..281e52ac64ba0e3caf66c0322bd23ac91455a8a4 100644 (file)
@@ -118,6 +118,15 @@ struct tps65912_reg {
        int eco_reg;
 };
 
+static const struct regulator_linear_range tps65912_ldo_ranges[] = {
+       { .min_uV = 800000, .max_uV = 1600000, .min_sel =  0, .max_sel = 32,
+         .uV_step = 25000 },
+       { .min_uV = 1650000, .max_uV = 3000000, .min_sel = 33, .max_sel = 60,
+         .uV_step = 50000 },
+       { .min_uV = 3100000, .max_uV = 3300000, .min_sel = 61, .max_sel = 63,
+         .uV_step = 100000 },
+};
+
 static int tps65912_get_range(struct tps65912_reg *pmic, int id)
 {
        struct tps65912 *mfd = pmic->mfd;
@@ -184,20 +193,6 @@ static unsigned long tps65912_vsel_to_uv_range3(u8 vsel)
        return uv;
 }
 
-static unsigned long tps65912_vsel_to_uv_ldo(u8 vsel)
-{
-       unsigned long uv = 0;
-
-       if (vsel <= 32)
-               uv = ((vsel * 25000) + 800000);
-       else if (vsel > 32 && vsel <= 60)
-               uv = (((vsel - 32) * 50000) + 1600000);
-       else if (vsel > 60)
-               uv = (((vsel - 60) * 100000) + 3000000);
-
-       return uv;
-}
-
 static int tps65912_get_ctrl_register(int id)
 {
        if (id >= TPS65912_REG_DCDC1 && id <= TPS65912_REG_LDO4)
@@ -376,9 +371,6 @@ static int tps65912_list_voltage(struct regulator_dev *dev, unsigned selector)
        struct tps65912_reg *pmic = rdev_get_drvdata(dev);
        int range, voltage = 0, id = rdev_get_id(dev);
 
-       if (id >= TPS65912_REG_LDO1 && id <= TPS65912_REG_LDO10)
-               return tps65912_vsel_to_uv_ldo(selector);
-
        if (id > TPS65912_REG_DCDC4)
                return -EINVAL;
 
@@ -456,7 +448,8 @@ static struct regulator_ops tps65912_ops_ldo = {
        .disable = tps65912_reg_disable,
        .get_voltage_sel = tps65912_get_voltage_sel,
        .set_voltage_sel = tps65912_set_voltage_sel,
-       .list_voltage = tps65912_list_voltage,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .map_voltage = regulator_map_voltage_linear_range,
 };
 
 static int tps65912_probe(struct platform_device *pdev)
@@ -495,8 +488,14 @@ static int tps65912_probe(struct platform_device *pdev)
                pmic->desc[i].name = info->name;
                pmic->desc[i].id = i;
                pmic->desc[i].n_voltages = 64;
-               pmic->desc[i].ops = (i > TPS65912_REG_DCDC4 ?
-                       &tps65912_ops_ldo : &tps65912_ops_dcdc);
+               if (i > TPS65912_REG_DCDC4) {
+                       pmic->desc[i].ops = &tps65912_ops_ldo;
+                       pmic->desc[i].linear_ranges = tps65912_ldo_ranges;
+                       pmic->desc[i].n_linear_ranges =
+                                       ARRAY_SIZE(tps65912_ldo_ranges);
+               } else {
+                       pmic->desc[i].ops = &tps65912_ops_dcdc;
+               }
                pmic->desc[i].type = REGULATOR_VOLTAGE;
                pmic->desc[i].owner = THIS_MODULE;
                range = tps65912_get_range(pmic, i);
index 93bc4f456da4c2d3d5566427851f5eceaa0082e1..78aae4cbb00424864fbad430fc55379a060065c5 100644 (file)
@@ -1108,7 +1108,7 @@ static int twlreg_probe(struct platform_device *pdev)
                drvdata = NULL;
        } else {
                id = pdev->id;
-               initdata = pdev->dev.platform_data;
+               initdata = dev_get_platdata(&pdev->dev);
                for (i = 0, template = NULL; i < ARRAY_SIZE(twl_of_match); i++) {
                        template = twl_of_match[i].data;
                        if (template && template->desc.id == id)
index a7c8deb5f28fc02f93955f1c2b8f1ddcf2354f9a..765acc11c9c83c151a60f005c770d4639352701c 100644 (file)
@@ -111,7 +111,7 @@ static int regulator_userspace_consumer_probe(struct platform_device *pdev)
        struct userspace_consumer_data *drvdata;
        int ret;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata)
                return -EINVAL;
 
index a9d4284ea007cc83658db17724e976d34dc9aafb..f53e78b9a84eadf1c9b2cded2c1ac00f7b7f055c 100644 (file)
@@ -287,7 +287,7 @@ static const struct attribute_group regulator_virtual_attr_group = {
 
 static int regulator_virtual_probe(struct platform_device *pdev)
 {
-       char *reg_id = pdev->dev.platform_data;
+       char *reg_id = dev_get_platdata(&pdev->dev);
        struct virtual_consumer_data *drvdata;
        int ret;
 
index 46938cf162ad7821403dc97b70b2db3298c86e85..11861cb861df04394ff1de205eb7896c9ab98f89 100644 (file)
@@ -451,7 +451,7 @@ static void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
 static int wm831x_buckv_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct regulator_config config = { };
        int id;
        struct wm831x_dcdc *dcdc;
@@ -624,7 +624,7 @@ static struct regulator_ops wm831x_buckp_ops = {
 static int wm831x_buckp_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct regulator_config config = { };
        int id;
        struct wm831x_dcdc *dcdc;
@@ -770,7 +770,7 @@ static struct regulator_ops wm831x_boostp_ops = {
 static int wm831x_boostp_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct regulator_config config = { };
        int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
        struct wm831x_dcdc *dcdc;
@@ -880,7 +880,7 @@ static struct regulator_ops wm831x_epe_ops = {
 static int wm831x_epe_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct regulator_config config = { };
        int id = pdev->id % ARRAY_SIZE(pdata->epe);
        struct wm831x_dcdc *dcdc;
index 16ebdf94d0a044b6db0111c4d4f9a5c2f8feb5fe..4eb373de1facc63a4041ebc2c2804de358363287 100644 (file)
@@ -151,7 +151,7 @@ static irqreturn_t wm831x_isink_irq(int irq, void *data)
 static int wm831x_isink_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct wm831x_isink *isink;
        int id = pdev->id % ARRAY_SIZE(pdata->isink);
        struct regulator_config config = { };
index 9ff883f80878447dc2902de4cae7de8e206b47fd..1432b26ef2e97b0830a2ecf973789ee20b7991cc 100644 (file)
@@ -62,41 +62,12 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
  * General purpose LDOs
  */
 
-#define WM831X_GP_LDO_SELECTOR_LOW 0xe
-#define WM831X_GP_LDO_MAX_SELECTOR 0x1f
-
-static int wm831x_gp_ldo_list_voltage(struct regulator_dev *rdev,
-                                     unsigned int selector)
-{
-       /* 0.9-1.6V in 50mV steps */
-       if (selector <= WM831X_GP_LDO_SELECTOR_LOW)
-               return 900000 + (selector * 50000);
-       /* 1.7-3.3V in 100mV steps */
-       if (selector <= WM831X_GP_LDO_MAX_SELECTOR)
-               return 1600000 + ((selector - WM831X_GP_LDO_SELECTOR_LOW)
-                                 * 100000);
-       return -EINVAL;
-}
-
-static int wm831x_gp_ldo_map_voltage(struct regulator_dev *rdev,
-                                    int min_uV, int max_uV)
-{
-       int volt, vsel;
-
-       if (min_uV < 900000)
-               vsel = 0;
-       else if (min_uV < 1700000)
-               vsel = ((min_uV - 900000) / 50000);
-       else
-               vsel = ((min_uV - 1700000) / 100000)
-                       + WM831X_GP_LDO_SELECTOR_LOW + 1;
-
-       volt = wm831x_gp_ldo_list_voltage(rdev, vsel);
-       if (volt < min_uV || volt > max_uV)
-               return -EINVAL;
-
-       return vsel;
-}
+static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
+       { .min_uV =  900000, .max_uV = 1650000, .min_sel =  0, .max_sel = 14,
+         .uV_step =  50000 },
+       { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
+         .uV_step = 100000 },
+};
 
 static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
                                             int uV)
@@ -105,7 +76,7 @@ static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
        struct wm831x *wm831x = ldo->wm831x;
        int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
 
-       sel = wm831x_gp_ldo_map_voltage(rdev, uV, uV);
+       sel = regulator_map_voltage_linear_range(rdev, uV, uV);
        if (sel < 0)
                return sel;
 
@@ -230,8 +201,8 @@ static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev,
 
 
 static struct regulator_ops wm831x_gp_ldo_ops = {
-       .list_voltage = wm831x_gp_ldo_list_voltage,
-       .map_voltage = wm831x_gp_ldo_map_voltage,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .map_voltage = regulator_map_voltage_linear_range,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
@@ -250,7 +221,7 @@ static struct regulator_ops wm831x_gp_ldo_ops = {
 static int wm831x_gp_ldo_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct regulator_config config = { };
        int id;
        struct wm831x_ldo *ldo;
@@ -290,7 +261,7 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
 
        ldo->desc.id = id;
        ldo->desc.type = REGULATOR_VOLTAGE;
-       ldo->desc.n_voltages = WM831X_GP_LDO_MAX_SELECTOR + 1;
+       ldo->desc.n_voltages = 32;
        ldo->desc.ops = &wm831x_gp_ldo_ops;
        ldo->desc.owner = THIS_MODULE;
        ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL;
@@ -299,6 +270,8 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
        ldo->desc.enable_mask = 1 << id;
        ldo->desc.bypass_reg = ldo->base;
        ldo->desc.bypass_mask = WM831X_LDO1_SWI;
+       ldo->desc.linear_ranges = wm831x_gp_ldo_ranges;
+       ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_gp_ldo_ranges);
 
        config.dev = pdev->dev.parent;
        if (pdata)
@@ -358,43 +331,12 @@ static struct platform_driver wm831x_gp_ldo_driver = {
  * Analogue LDOs
  */
 
-
-#define WM831X_ALDO_SELECTOR_LOW 0xc
-#define WM831X_ALDO_MAX_SELECTOR 0x1f
-
-static int wm831x_aldo_list_voltage(struct regulator_dev *rdev,
-                                     unsigned int selector)
-{
-       /* 1-1.6V in 50mV steps */
-       if (selector <= WM831X_ALDO_SELECTOR_LOW)
-               return 1000000 + (selector * 50000);
-       /* 1.7-3.5V in 100mV steps */
-       if (selector <= WM831X_ALDO_MAX_SELECTOR)
-               return 1600000 + ((selector - WM831X_ALDO_SELECTOR_LOW)
-                                 * 100000);
-       return -EINVAL;
-}
-
-static int wm831x_aldo_map_voltage(struct regulator_dev *rdev,
-                                  int min_uV, int max_uV)
-{
-       int volt, vsel;
-
-       if (min_uV < 1000000)
-               vsel = 0;
-       else if (min_uV < 1700000)
-               vsel = ((min_uV - 1000000) / 50000);
-       else
-               vsel = ((min_uV - 1700000) / 100000)
-                       + WM831X_ALDO_SELECTOR_LOW + 1;
-
-       volt = wm831x_aldo_list_voltage(rdev, vsel);
-       if (volt < min_uV || volt > max_uV)
-               return -EINVAL;
-
-       return vsel;
-
-}
+static const struct regulator_linear_range wm831x_aldo_ranges[] = {
+       { .min_uV = 1000000, .max_uV = 1650000, .min_sel =  0, .max_sel = 12,
+         .uV_step =  50000 },
+       { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
+         .uV_step = 100000 },
+};
 
 static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
                                             int uV)
@@ -403,7 +345,7 @@ static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
        struct wm831x *wm831x = ldo->wm831x;
        int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
 
-       sel = wm831x_aldo_map_voltage(rdev, uV, uV);
+       sel = regulator_map_voltage_linear_range(rdev, uV, uV);
        if (sel < 0)
                return sel;
 
@@ -486,8 +428,8 @@ static int wm831x_aldo_get_status(struct regulator_dev *rdev)
 }
 
 static struct regulator_ops wm831x_aldo_ops = {
-       .list_voltage = wm831x_aldo_list_voltage,
-       .map_voltage = wm831x_aldo_map_voltage,
+       .list_voltage = regulator_list_voltage_linear_range,
+       .map_voltage = regulator_map_voltage_linear_range,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
@@ -505,7 +447,7 @@ static struct regulator_ops wm831x_aldo_ops = {
 static int wm831x_aldo_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct regulator_config config = { };
        int id;
        struct wm831x_ldo *ldo;
@@ -545,7 +487,9 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
 
        ldo->desc.id = id;
        ldo->desc.type = REGULATOR_VOLTAGE;
-       ldo->desc.n_voltages = WM831X_ALDO_MAX_SELECTOR + 1;
+       ldo->desc.n_voltages = 32;
+       ldo->desc.linear_ranges = wm831x_aldo_ranges;
+       ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_aldo_ranges);
        ldo->desc.ops = &wm831x_aldo_ops;
        ldo->desc.owner = THIS_MODULE;
        ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL;
@@ -661,7 +605,7 @@ static struct regulator_ops wm831x_alive_ldo_ops = {
 static int wm831x_alive_ldo_probe(struct platform_device *pdev)
 {
        struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
-       struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+       struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
        struct regulator_config config = { };
        int id;
        struct wm831x_ldo *ldo;
index 7f0fa22ef2aab4e85e71693358737ee099b537e2..835b5f0f344ed2537115b1ed486d14860f88434e 100644 (file)
@@ -542,41 +542,12 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
        return 0;
 }
 
-static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
-                                   unsigned selector)
-{
-       if (selector > WM8350_LDO1_VSEL_MASK)
-               return -EINVAL;
-
-       if (selector < 16)
-               return (selector * 50000) + 900000;
-       else
-               return ((selector - 16) * 100000) + 1800000;
-}
-
-static int wm8350_ldo_map_voltage(struct regulator_dev *rdev, int min_uV,
-                                 int max_uV)
-{
-       int volt, sel;
-       int min_mV = min_uV / 1000;
-       int max_mV = max_uV / 1000;
-
-       if (min_mV < 900 || min_mV > 3300)
-               return -EINVAL;
-       if (max_mV < 900 || max_mV > 3300)
-               return -EINVAL;
-
-       if (min_mV < 1800) /* step size is 50mV < 1800mV */
-               sel = DIV_ROUND_UP(min_uV - 900, 50);
-       else /* step size is 100mV > 1800mV */
-               sel = DIV_ROUND_UP(min_uV - 1800, 100) + 16;
-
-       volt = wm8350_ldo_list_voltage(rdev, sel);
-       if (volt < min_uV || volt > max_uV)
-               return -EINVAL;
-
-       return sel;
-}
+static const struct regulator_linear_range wm8350_ldo_ranges[] = {
+       { .min_uV =  900000, .max_uV = 1750000, .min_sel =  0, .max_sel = 15,
+         .uV_step =  50000 },
+       { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
+         .uV_step = 100000 },
+};
 
 static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
 {
@@ -603,7 +574,7 @@ static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
                return -EINVAL;
        }
 
-       sel = wm8350_ldo_map_voltage(rdev, uV, uV);
+       sel = regulator_map_voltage_linear_range(rdev, uV, uV);
        if (sel < 0)
                return -EINVAL;
 
@@ -998,10 +969,10 @@ static struct regulator_ops wm8350_dcdc2_5_ops = {
 };
 
 static struct regulator_ops wm8350_ldo_ops = {
-       .map_voltage = wm8350_ldo_map_voltage,
+       .map_voltage = regulator_map_voltage_linear_range,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
-       .list_voltage = wm8350_ldo_list_voltage,
+       .list_voltage = regulator_list_voltage_linear_range,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
        .is_enabled = regulator_is_enabled_regmap,
@@ -1108,6 +1079,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
                .irq = WM8350_IRQ_UV_LDO1,
                .type = REGULATOR_VOLTAGE,
                .n_voltages = WM8350_LDO1_VSEL_MASK + 1,
+               .linear_ranges = wm8350_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
                .vsel_reg = WM8350_LDO1_CONTROL,
                .vsel_mask = WM8350_LDO1_VSEL_MASK,
                .enable_reg = WM8350_DCDC_LDO_REQUESTED,
@@ -1121,6 +1094,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
                .irq = WM8350_IRQ_UV_LDO2,
                .type = REGULATOR_VOLTAGE,
                .n_voltages = WM8350_LDO2_VSEL_MASK + 1,
+               .linear_ranges = wm8350_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
                .vsel_reg = WM8350_LDO2_CONTROL,
                .vsel_mask = WM8350_LDO2_VSEL_MASK,
                .enable_reg = WM8350_DCDC_LDO_REQUESTED,
@@ -1134,6 +1109,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
                .irq = WM8350_IRQ_UV_LDO3,
                .type = REGULATOR_VOLTAGE,
                .n_voltages = WM8350_LDO3_VSEL_MASK + 1,
+               .linear_ranges = wm8350_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
                .vsel_reg = WM8350_LDO3_CONTROL,
                .vsel_mask = WM8350_LDO3_VSEL_MASK,
                .enable_reg = WM8350_DCDC_LDO_REQUESTED,
@@ -1147,6 +1124,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
                .irq = WM8350_IRQ_UV_LDO4,
                .type = REGULATOR_VOLTAGE,
                .n_voltages = WM8350_LDO4_VSEL_MASK + 1,
+               .linear_ranges = wm8350_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
                .vsel_reg = WM8350_LDO4_CONTROL,
                .vsel_mask = WM8350_LDO4_VSEL_MASK,
                .enable_reg = WM8350_DCDC_LDO_REQUESTED,
@@ -1222,7 +1201,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
        }
 
        config.dev = &pdev->dev;
-       config.init_data = pdev->dev.platform_data;
+       config.init_data = dev_get_platdata(&pdev->dev);
        config.driver_data = dev_get_drvdata(&pdev->dev);
        config.regmap = wm8350->regmap;
 
index a09f03ee550621503dfbf95d457cc83de1a1cbbd..58f51bec13f25d2f47a488ca53fc1099bb683762 100644 (file)
 #include <linux/regulator/driver.h>
 #include <linux/mfd/wm8400-private.h>
 
-static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
-                                  unsigned selector)
-{
-       if (selector > WM8400_LDO1_VSEL_MASK)
-               return -EINVAL;
-
-       if (selector < 15)
-               return 900000 + (selector * 50000);
-       else
-               return 1700000 + ((selector - 15) * 100000);
-}
-
-static int wm8400_ldo_map_voltage(struct regulator_dev *dev,
-                                 int min_uV, int max_uV)
-{
-       u16 val;
-       int volt;
-
-       if (min_uV < 900000 || min_uV > 3300000)
-               return -EINVAL;
-
-       if (min_uV < 1700000) /* Steps of 50mV from 900mV;  */
-               val = DIV_ROUND_UP(min_uV - 900000, 50000);
-       else /* Steps of 100mV from 1700mV */
-               val = DIV_ROUND_UP(min_uV - 1700000, 100000) + 15;
-
-       volt = wm8400_ldo_list_voltage(dev, val);
-       if (volt < min_uV || volt > max_uV)
-               return -EINVAL;
-
-       return val;
-}
+static const struct regulator_linear_range wm8400_ldo_ranges[] = {
+       { .min_uV =  900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
+         .uV_step =  50000 },
+       { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
+         .uV_step = 100000 },
+};
 
 static struct regulator_ops wm8400_ldo_ops = {
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
-       .list_voltage = wm8400_ldo_list_voltage,
+       .list_voltage = regulator_list_voltage_linear_range,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
-       .map_voltage = wm8400_ldo_map_voltage,
+       .map_voltage = regulator_map_voltage_linear_range,
 };
 
 static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev)
@@ -155,6 +129,8 @@ static struct regulator_desc regulators[] = {
                .enable_reg = WM8400_LDO1_CONTROL,
                .enable_mask = WM8400_LDO1_ENA,
                .n_voltages = WM8400_LDO1_VSEL_MASK + 1,
+               .linear_ranges = wm8400_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
                .vsel_reg = WM8400_LDO1_CONTROL,
                .vsel_mask = WM8400_LDO1_VSEL_MASK,
                .type = REGULATOR_VOLTAGE,
@@ -167,6 +143,8 @@ static struct regulator_desc regulators[] = {
                .enable_reg = WM8400_LDO2_CONTROL,
                .enable_mask = WM8400_LDO2_ENA,
                .n_voltages = WM8400_LDO2_VSEL_MASK + 1,
+               .linear_ranges = wm8400_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
                .type = REGULATOR_VOLTAGE,
                .vsel_reg = WM8400_LDO2_CONTROL,
                .vsel_mask = WM8400_LDO2_VSEL_MASK,
@@ -179,6 +157,8 @@ static struct regulator_desc regulators[] = {
                .enable_reg = WM8400_LDO3_CONTROL,
                .enable_mask = WM8400_LDO3_ENA,
                .n_voltages = WM8400_LDO3_VSEL_MASK + 1,
+               .linear_ranges = wm8400_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
                .vsel_reg = WM8400_LDO3_CONTROL,
                .vsel_mask = WM8400_LDO3_VSEL_MASK,
                .type = REGULATOR_VOLTAGE,
@@ -191,6 +171,8 @@ static struct regulator_desc regulators[] = {
                .enable_reg = WM8400_LDO4_CONTROL,
                .enable_mask = WM8400_LDO4_ENA,
                .n_voltages = WM8400_LDO4_VSEL_MASK + 1,
+               .linear_ranges = wm8400_ldo_ranges,
+               .n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
                .vsel_reg = WM8400_LDO4_CONTROL,
                .vsel_mask = WM8400_LDO4_VSEL_MASK,
                .type = REGULATOR_VOLTAGE,
@@ -233,7 +215,7 @@ static int wm8400_regulator_probe(struct platform_device *pdev)
        struct regulator_dev *rdev;
 
        config.dev = &pdev->dev;
-       config.init_data = pdev->dev.platform_data;
+       config.init_data = dev_get_platdata(&pdev->dev);
        config.driver_data = wm8400;
        config.regmap = wm8400->regmap;
 
index 8f2a8a7a3f997f5475d305b87f508a47a6daf267..5ee2a208457c2b58e46fb1f1d7a79e8b180c4a35 100644 (file)
@@ -125,7 +125,7 @@ static const struct regulator_init_data wm8994_ldo_default[] = {
 static int wm8994_ldo_probe(struct platform_device *pdev)
 {
        struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
-       struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+       struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
        int id = pdev->id % ARRAY_SIZE(pdata->ldo);
        struct regulator_config config = { };
        struct wm8994_ldo *ldo;
index 58bc6eb49de1da7ee1e3061a7ade5865f1c8d800..2ead7e78c4568ec7fe50ece74c3109d424efa852 100644 (file)
@@ -930,7 +930,7 @@ dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
        if (IS_ERR(devmap))
                return PTR_ERR(devmap);
 
-       if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
+       if ((kstrtoul(buf, 10, &val) != 0) || val > 1)
                return -EINVAL;
 
        spin_lock(&dasd_devmap_lock);
@@ -1225,7 +1225,7 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
        if (IS_ERR(device))
                return -ENODEV;
 
-       if ((strict_strtoul(buf, 10, &val) != 0) ||
+       if ((kstrtoul(buf, 10, &val) != 0) ||
            (val > DASD_EXPIRES_MAX) || val == 0) {
                dasd_put_device(device);
                return -EINVAL;
@@ -1265,7 +1265,7 @@ dasd_retries_store(struct device *dev, struct device_attribute *attr,
        if (IS_ERR(device))
                return -ENODEV;
 
-       if ((strict_strtoul(buf, 10, &val) != 0) ||
+       if ((kstrtoul(buf, 10, &val) != 0) ||
            (val > DASD_RETRIES_MAX)) {
                dasd_put_device(device);
                return -EINVAL;
@@ -1307,7 +1307,7 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
        if (IS_ERR(device) || !device->block)
                return -ENODEV;
 
-       if ((strict_strtoul(buf, 10, &val) != 0) ||
+       if ((kstrtoul(buf, 10, &val) != 0) ||
            val > UINT_MAX / HZ) {
                dasd_put_device(device);
                return -EINVAL;
index e61a6deea3c0fc6bc406c7ac2555b6eda1015dc3..5adb2042e824fc30815ea891c54b89526680b4eb 100644 (file)
@@ -85,6 +85,8 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
 
 static struct ccw_driver dasd_eckd_driver; /* see below */
 
+static void *rawpadpage;
+
 #define INIT_CQR_OK 0
 #define INIT_CQR_UNFORMATTED 1
 #define INIT_CQR_ERROR 2
@@ -3237,18 +3239,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
        unsigned int seg_len, len_to_track_end;
        unsigned int first_offs;
        unsigned int cidaw, cplength, datasize;
-       sector_t first_trk, last_trk;
+       sector_t first_trk, last_trk, sectors;
+       sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
        unsigned int pfx_datasize;
 
        /*
         * raw track access needs to be mutiple of 64k and on 64k boundary
+        * For read requests we can fix an incorrect alignment by padding
+        * the request with dummy pages.
         */
-       if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) {
-               cqr = ERR_PTR(-EINVAL);
-               goto out;
-       }
-       if (((blk_rq_pos(req) + blk_rq_sectors(req)) %
-            DASD_RAW_SECTORS_PER_TRACK) != 0) {
+       start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
+       end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
+               DASD_RAW_SECTORS_PER_TRACK;
+       end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
+               DASD_RAW_SECTORS_PER_TRACK;
+       basedev = block->base;
+       if ((start_padding_sectors || end_padding_sectors) &&
+           (rq_data_dir(req) == WRITE)) {
+               DBF_DEV_EVENT(DBF_ERR, basedev,
+                             "raw write not track aligned (%lu,%lu) req %p",
+                             start_padding_sectors, end_padding_sectors, req);
                cqr = ERR_PTR(-EINVAL);
                goto out;
        }
@@ -3258,7 +3268,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
                DASD_RAW_SECTORS_PER_TRACK;
        trkcount = last_trk - first_trk + 1;
        first_offs = 0;
-       basedev = block->base;
 
        if (rq_data_dir(req) == READ)
                cmd = DASD_ECKD_CCW_READ_TRACK;
@@ -3307,12 +3316,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
        }
 
        idaws = (unsigned long *)(cqr->data + pfx_datasize);
-
        len_to_track_end = 0;
-
+       if (start_padding_sectors) {
+               ccw[-1].flags |= CCW_FLAG_CC;
+               ccw->cmd_code = cmd;
+               /* maximum 3390 track size */
+               ccw->count = 57326;
+               /* 64k map to one track */
+               len_to_track_end = 65536 - start_padding_sectors * 512;
+               ccw->cda = (__u32)(addr_t)idaws;
+               ccw->flags |= CCW_FLAG_IDA;
+               ccw->flags |= CCW_FLAG_SLI;
+               ccw++;
+               for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
+                       idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
+       }
        rq_for_each_segment(bv, req, iter) {
                dst = page_address(bv->bv_page) + bv->bv_offset;
                seg_len = bv->bv_len;
+               if (cmd == DASD_ECKD_CCW_READ_TRACK)
+                       memset(dst, 0, seg_len);
                if (!len_to_track_end) {
                        ccw[-1].flags |= CCW_FLAG_CC;
                        ccw->cmd_code = cmd;
@@ -3328,7 +3351,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
                len_to_track_end -= seg_len;
                idaws = idal_create_words(idaws, dst, seg_len);
        }
-
+       for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
+               idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
        if (blk_noretry_request(req) ||
            block->base->features & DASD_FEATURE_FAILFAST)
                set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
@@ -4479,12 +4503,19 @@ dasd_eckd_init(void)
                kfree(dasd_reserve_req);
                return -ENOMEM;
        }
+       rawpadpage = (void *)__get_free_page(GFP_KERNEL);
+       if (!rawpadpage) {
+               kfree(path_verification_worker);
+               kfree(dasd_reserve_req);
+               return -ENOMEM;
+       }
        ret = ccw_driver_register(&dasd_eckd_driver);
        if (!ret)
                wait_for_device_probe();
        else {
                kfree(path_verification_worker);
                kfree(dasd_reserve_req);
+               free_page((unsigned long)rawpadpage);
        }
        return ret;
 }
@@ -4495,6 +4526,7 @@ dasd_eckd_cleanup(void)
        ccw_driver_unregister(&dasd_eckd_driver);
        kfree(path_verification_worker);
        kfree(dasd_reserve_req);
+       free_page((unsigned long)rawpadpage);
 }
 
 module_init(dasd_eckd_init);
index 8d11f773a75224a74745f20a03be731ea9dad267..ba99b64e2b3f0fea3b5c04c15b66bcd4f2d5ea3c 100644 (file)
@@ -160,11 +160,13 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
 
        device = cqr->startdev;
        if (cqr->intrc == -ETIMEDOUT) {
-               dev_err(&device->cdev->dev, "cqr %p timeout error", cqr);
+               dev_err(&device->cdev->dev,
+                       "A timeout error occurred for cqr %p", cqr);
                return;
        }
        if (cqr->intrc == -ENOLINK) {
-               dev_err(&device->cdev->dev, "cqr %p transport error", cqr);
+               dev_err(&device->cdev->dev,
+                       "A transport error occurred for cqr %p", cqr);
                return;
        }
        /* dump sense data */
index 444d36183a251c38e2c72fda8084ac33f3bd358b..944156207477448ba0e48393212923fee2abfb16 100644 (file)
@@ -32,7 +32,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
        struct device *dev;
 
        s390_adjust_jiffies();
-       pr_warning("cpu capability changed.\n");
+       pr_info("CPU capability may have changed\n");
        get_online_cpus();
        for_each_online_cpu(cpu) {
                dev = get_cpu_device(cpu);
index 91edbd7ee80640d7d624280722843cbfb69093a8..d028fd800c9c6afd7f5b5627475c4bfe6552f04b 100644 (file)
@@ -81,15 +81,185 @@ void unregister_adapter_interrupt(struct airq_struct *airq)
 }
 EXPORT_SYMBOL(unregister_adapter_interrupt);
 
-void do_adapter_IO(u8 isc)
+static irqreturn_t do_airq_interrupt(int irq, void *dummy)
 {
+       struct tpi_info *tpi_info;
        struct airq_struct *airq;
        struct hlist_head *head;
 
-       head = &airq_lists[isc];
+       __this_cpu_write(s390_idle.nohz_delay, 1);
+       tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+       head = &airq_lists[tpi_info->isc];
        rcu_read_lock();
        hlist_for_each_entry_rcu(airq, head, list)
                if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
                        airq->handler(airq);
        rcu_read_unlock();
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction airq_interrupt = {
+       .name    = "AIO",
+       .handler = do_airq_interrupt,
+};
+
+void __init init_airq_interrupts(void)
+{
+       irq_set_chip_and_handler(THIN_INTERRUPT,
+                                &dummy_irq_chip, handle_percpu_irq);
+       setup_irq(THIN_INTERRUPT, &airq_interrupt);
+}
+
+/**
+ * airq_iv_create - create an interrupt vector
+ * @bits: number of bits in the interrupt vector
+ * @flags: allocation flags
+ *
+ * Returns a pointer to an interrupt vector structure
+ */
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
+{
+       struct airq_iv *iv;
+       unsigned long size;
+
+       iv = kzalloc(sizeof(*iv), GFP_KERNEL);
+       if (!iv)
+               goto out;
+       iv->bits = bits;
+       size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+       iv->vector = kzalloc(size, GFP_KERNEL);
+       if (!iv->vector)
+               goto out_free;
+       if (flags & AIRQ_IV_ALLOC) {
+               iv->avail = kmalloc(size, GFP_KERNEL);
+               if (!iv->avail)
+                       goto out_free;
+               memset(iv->avail, 0xff, size);
+               iv->end = 0;
+       } else
+               iv->end = bits;
+       if (flags & AIRQ_IV_BITLOCK) {
+               iv->bitlock = kzalloc(size, GFP_KERNEL);
+               if (!iv->bitlock)
+                       goto out_free;
+       }
+       if (flags & AIRQ_IV_PTR) {
+               size = bits * sizeof(unsigned long);
+               iv->ptr = kzalloc(size, GFP_KERNEL);
+               if (!iv->ptr)
+                       goto out_free;
+       }
+       if (flags & AIRQ_IV_DATA) {
+               size = bits * sizeof(unsigned int);
+               iv->data = kzalloc(size, GFP_KERNEL);
+               if (!iv->data)
+                       goto out_free;
+       }
+       spin_lock_init(&iv->lock);
+       return iv;
+
+out_free:
+       kfree(iv->ptr);
+       kfree(iv->bitlock);
+       kfree(iv->avail);
+       kfree(iv->vector);
+       kfree(iv);
+out:
+       return NULL;
+}
+EXPORT_SYMBOL(airq_iv_create);
+
+/**
+ * airq_iv_release - release an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ */
+void airq_iv_release(struct airq_iv *iv)
+{
+       kfree(iv->data);
+       kfree(iv->ptr);
+       kfree(iv->bitlock);
+       kfree(iv->vector);
+       kfree(iv->avail);
+       kfree(iv);
+}
+EXPORT_SYMBOL(airq_iv_release);
+
+/**
+ * airq_iv_alloc_bit - allocate an irq bit from an interrupt vector
+ * @iv: pointer to an interrupt vector structure
+ *
+ * Returns the bit number of the allocated irq, or -1UL if no bit
+ * is available or the AIRQ_IV_ALLOC flag has not been specified
+ */
+unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
+{
+       const unsigned long be_to_le = BITS_PER_LONG - 1;
+       unsigned long bit;
+
+       if (!iv->avail)
+               return -1UL;
+       spin_lock(&iv->lock);
+       bit = find_first_bit_left(iv->avail, iv->bits);
+       if (bit < iv->bits) {
+               clear_bit(bit ^ be_to_le, iv->avail);
+               if (bit >= iv->end)
+                       iv->end = bit + 1;
+       } else
+               bit = -1UL;
+       spin_unlock(&iv->lock);
+       return bit;
+
+}
+EXPORT_SYMBOL(airq_iv_alloc_bit);
+
+/**
+ * airq_iv_free_bit - free an irq bit of an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ * @bit: number of the irq bit to free
+ */
+void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
+{
+       const unsigned long be_to_le = BITS_PER_LONG - 1;
+
+       if (!iv->avail)
+               return;
+       spin_lock(&iv->lock);
+       /* Clear (possibly left over) interrupt bit */
+       clear_bit(bit ^ be_to_le, iv->vector);
+       /* Make the bit position available again */
+       set_bit(bit ^ be_to_le, iv->avail);
+       if (bit == iv->end - 1) {
+               /* Find new end of bit-field */
+               while (--iv->end > 0)
+                       if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail))
+                               break;
+       }
+       spin_unlock(&iv->lock);
+}
+EXPORT_SYMBOL(airq_iv_free_bit);
+
+/**
+ * airq_iv_scan - scan interrupt vector for non-zero bits
+ * @iv: pointer to interrupt vector structure
+ * @start: bit number to start the search
+ * @end: bit number to end the search
+ *
+ * Returns the bit number of the next non-zero interrupt bit, or
+ * -1UL if the scan completed without finding any more any non-zero bits.
+ */
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+                          unsigned long end)
+{
+       const unsigned long be_to_le = BITS_PER_LONG - 1;
+       unsigned long bit;
+
+       /* Find non-zero bit starting from 'ivs->next'. */
+       bit = find_next_bit_left(iv->vector, end, start);
+       if (bit >= end)
+               return -1UL;
+       /* Clear interrupt bit (find left uses big-endian bit numbers) */
+       clear_bit(bit ^ be_to_le, iv->vector);
+       return bit;
 }
+EXPORT_SYMBOL(airq_iv_scan);
index 84846c2b96d34bdf3f0f1461038d76c9627e2704..959135a01847940a5ecee33ae902b83cf8ecfe44 100644 (file)
@@ -137,7 +137,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
        if (!try_module_get(gdrv->driver.owner))
                return -EINVAL;
 
-       ret = strict_strtoul(buf, 0, &value);
+       ret = kstrtoul(buf, 0, &value);
        if (ret)
                goto out;
 
index 4eeb4a6bf2074cd0f72a988ab6ee5f5ea23cb764..d7da67a31c77f606ef68445f9da446b521a4abf1 100644 (file)
@@ -561,37 +561,23 @@ out:
 }
 
 /*
- * do_IRQ() handles all normal I/O device IRQ's (the special
- *         SMP cross-CPU interrupts have their own specific
- *         handlers).
- *
+ * do_cio_interrupt() handles all normal I/O device IRQ's
  */
-void __irq_entry do_IRQ(struct pt_regs *regs)
+static irqreturn_t do_cio_interrupt(int irq, void *dummy)
 {
-       struct tpi_info *tpi_info = (struct tpi_info *) &regs->int_code;
+       struct tpi_info *tpi_info;
        struct subchannel *sch;
        struct irb *irb;
-       struct pt_regs *old_regs;
 
-       old_regs = set_irq_regs(regs);
-       irq_enter();
        __this_cpu_write(s390_idle.nohz_delay, 1);
-       if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
-               /* Serve timer interrupts first. */
-               clock_comparator_work();
-
-       kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
+       tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
        irb = (struct irb *) &S390_lowcore.irb;
-       if (tpi_info->adapter_IO) {
-               do_adapter_IO(tpi_info->isc);
-               goto out;
-       }
        sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
        if (!sch) {
                /* Clear pending interrupt condition. */
                inc_irq_stat(IRQIO_CIO);
                tsch(tpi_info->schid, irb);
-               goto out;
+               return IRQ_HANDLED;
        }
        spin_lock(sch->lock);
        /* Store interrupt response block to lowcore. */
@@ -606,9 +592,23 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
        } else
                inc_irq_stat(IRQIO_CIO);
        spin_unlock(sch->lock);
-out:
-       irq_exit();
-       set_irq_regs(old_regs);
+
+       return IRQ_HANDLED;
+}
+
+static struct irq_desc *irq_desc_io;
+
+static struct irqaction io_interrupt = {
+       .name    = "IO",
+       .handler = do_cio_interrupt,
+};
+
+void __init init_cio_interrupts(void)
+{
+       irq_set_chip_and_handler(IO_INTERRUPT,
+                                &dummy_irq_chip, handle_percpu_irq);
+       setup_irq(IO_INTERRUPT, &io_interrupt);
+       irq_desc_io = irq_to_desc(IO_INTERRUPT);
 }
 
 #ifdef CONFIG_CCW_CONSOLE
@@ -635,7 +635,7 @@ void cio_tsch(struct subchannel *sch)
                local_bh_disable();
                irq_enter();
        }
-       kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
+       kstat_incr_irqs_this_cpu(IO_INTERRUPT, irq_desc_io);
        if (sch->driver && sch->driver->irq)
                sch->driver->irq(sch);
        else
index d62f5e7f3cf100c217718614edabea5d5470a151..d42f67412bd895b7b5e7857bf1f168d47e3bd3db 100644 (file)
@@ -121,9 +121,6 @@ extern int cio_commit_config(struct subchannel *sch);
 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
 int cio_tm_intrg(struct subchannel *sch);
 
-void do_adapter_IO(u8 isc);
-void do_IRQ(struct pt_regs *);
-
 /* Use with care. */
 #ifdef CONFIG_CCW_CONSOLE
 extern struct subchannel *cio_probe_console(void);
index 4495e0627a40ad8168fe99c34d90c82adc960ee5..23054f8fa9fc2ef8735caed6bdf4d53dc557cf9e 100644 (file)
@@ -1182,7 +1182,7 @@ static ssize_t cmb_enable_store(struct device *dev,
        int ret;
        unsigned long val;
 
-       ret = strict_strtoul(buf, 16, &val);
+       ret = kstrtoul(buf, 16, &val);
        if (ret)
                return ret;
 
index 1ebe5d3ddebb28e7caddb0ec742bc76ce1307f78..4eb2a54e64f25c5f2e9221645e14a9d79d952b9b 100644 (file)
@@ -740,7 +740,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
        int ret;
        unsigned long val;
 
-       ret = strict_strtoul(buf, 16, &val);
+       ret = kstrtoul(buf, 16, &val);
        if (ret)
                return ret;
        mutex_lock(&css->mutex);
index 1ab5f6c36d9b4439db490797ca62aa239f04f7fa..e4a7ab2bb629f76358896e1389072c391fe3504c 100644 (file)
@@ -564,7 +564,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
                ret = 0;
        } else {
                force = 0;
-               ret = strict_strtoul(buf, 16, &i);
+               ret = kstrtoul(buf, 16, &i);
        }
        if (ret)
                goto out;
index d1c8025b0b037605c73f331b6ebd3178feed2767..adef5f5de118a7ed9b320b3cea07672bc511ec36 100644 (file)
@@ -208,7 +208,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
                goto out;
        }
 
-       rc = strict_strtoul(buf, 16, &i);
+       rc = kstrtoul(buf, 16, &i);
        if (rc) {
                rc = -EINVAL;
                goto out;
index f2db5fe7bdc2ba8f8a9c0cbb8458593494eef98a..62f00da09ad1b42f29f812f3ff8dc583ff850130 100644 (file)
@@ -581,8 +581,10 @@ struct iscsi_kwqe_init1 {
 #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
 #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
 #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
-#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
-#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
        u16 cq_num_wqes;
 #elif defined(__LITTLE_ENDIAN)
        u16 cq_num_wqes;
@@ -593,8 +595,10 @@ struct iscsi_kwqe_init1 {
 #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
 #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
 #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
-#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
-#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
        u8 cq_log_wqes_per_page;
 #endif
 #if defined(__BIG_ENDIAN)
index 50fef6963a811baa61b1fbec94e250a4e7457179..3104202902edb319ff02b92168226d4162fb9674 100644 (file)
@@ -172,16 +172,14 @@ void bnx2i_start(void *handle)
        struct bnx2i_hba *hba = handle;
        int i = HZ;
 
-       /*
-        * We should never register devices that don't support iSCSI
-        * (see bnx2i_init_one), so something is wrong if we try to
-        * start a iSCSI adapter on hardware with 0 supported iSCSI
-        * connections
+       /* On some bnx2x devices, it is possible that iSCSI is no
+        * longer supported after firmware is downloaded.  In that
+        * case, the iscsi_init_msg will return failure.
         */
-       BUG_ON(!hba->cnic->max_iscsi_conn);
 
        bnx2i_send_fw_iscsi_init_msg(hba);
-       while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+       while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) &&
+              !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--)
                msleep(BNX2I_INIT_POLL_TIME);
 }
 
index 80fa99b3d3842e1d4ac745d1b3602b8bdb5e04c4..8135f04671af3d679400a3d72396aebadd5dc96e 100644 (file)
@@ -658,11 +658,11 @@ static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
 static inline void *cxgbi_alloc_big_mem(unsigned int size,
                                        gfp_t gfp)
 {
-       void *p = kmalloc(size, gfp);
+       void *p = kzalloc(size, gfp | __GFP_NOWARN);
+
        if (!p)
-               p = vmalloc(size);
-       if (p)
-               memset(p, 0, size);
+               p = vzalloc(size);
+
        return p;
 }
 
index 34552bf1c023d6468af7aea32afcc70ab0f88953..55548dc5cec39da14ec4b23c33ae4d1a609d30e6 100644 (file)
@@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp)
 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
                             struct esp_lun_data *lp)
 {
-       if (!ent->tag[0]) {
+       if (!ent->orig_tag[0]) {
                /* Non-tagged, slot already taken?  */
                if (lp->non_tagged_cmd)
                        return -EBUSY;
@@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
                        return -EBUSY;
        }
 
-       BUG_ON(lp->tagged_cmds[ent->tag[1]]);
+       BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
 
-       lp->tagged_cmds[ent->tag[1]] = ent;
+       lp->tagged_cmds[ent->orig_tag[1]] = ent;
        lp->num_tagged++;
 
        return 0;
@@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 static void esp_free_lun_tag(struct esp_cmd_entry *ent,
                             struct esp_lun_data *lp)
 {
-       if (ent->tag[0]) {
-               BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
-               lp->tagged_cmds[ent->tag[1]] = NULL;
+       if (ent->orig_tag[0]) {
+               BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
+               lp->tagged_cmds[ent->orig_tag[1]] = NULL;
                lp->num_tagged--;
        } else {
                BUG_ON(lp->non_tagged_cmd != ent);
@@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
                        ent->tag[0] = 0;
                        ent->tag[1] = 0;
                }
+               ent->orig_tag[0] = ent->tag[0];
+               ent->orig_tag[1] = ent->tag[1];
 
                if (esp_alloc_lun_tag(ent, lp) < 0)
                        continue;
index 28e22acf87ea94f9c703086689da850174495573..cd68805e8d787e1d2ca9acaedd667d87bc201eb7 100644 (file)
@@ -271,6 +271,7 @@ struct esp_cmd_entry {
 #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
 
        u8                      tag[2];
+       u8                      orig_tag[2];
 
        u8                      status;
        u8                      message;
index 5cb08ae3e8c2eba3668cd92e6aa7dc6b10bf2d5b..f4360c5ea6a9f2f7a6782180d529c5d04bcce729 100644 (file)
@@ -818,7 +818,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
  * the readyness after performing a firmware reset.
  *
  * Returns:
- * zero for success, -EPERM when port does not have privilage to perform the
+ * zero for success, -EPERM when port does not have privilege to perform the
  * reset, -EIO when port timeout from recovering from the reset.
  *
  * Note:
@@ -835,7 +835,7 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
        lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
                   &portstat_reg.word0);
 
-       /* verify if privilaged for the request operation */
+       /* verify if privileged for the request operation */
        if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
            !bf_get(lpfc_sliport_status_err, &portstat_reg))
                return -EPERM;
@@ -927,9 +927,9 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
        rc = lpfc_sli4_pdev_status_reg_wait(phba);
 
        if (rc == -EPERM) {
-               /* no privilage for reset */
+               /* no privilege for reset */
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "3150 No privilage to perform the requested "
+                               "3150 No privilege to perform the requested "
                                "access: x%x\n", reg_val);
        } else if (rc == -EIO) {
                /* reset failed, there is nothing more we can do */
index e6a1e0b38a19a8710eb30ca4bcfe615703224108..515c9629e9fedf0679257a762b553045858070c1 100644 (file)
@@ -549,7 +549,7 @@ out_probe_one:
 
 /**
  * megaraid_detach_one - release framework resources and call LLD release routine
- * @pdev       : handle for our PCI cofiguration space
+ * @pdev       : handle for our PCI configuration space
  *
  * This routine is called during driver unload. We free all the allocated
  * resources and call the corresponding LLD so that it can also release all
@@ -979,7 +979,7 @@ megaraid_fini_mbox(adapter_t *adapter)
  * @adapter            : soft state of the raid controller
  *
  * Allocate and align the shared mailbox. This maibox is used to issue
- * all the commands. For IO based controllers, the mailbox is also regsitered
+ * all the commands. For IO based controllers, the mailbox is also registered
  * with the FW. Allocate memory for all commands as well.
  * This is our big allocator.
  */
@@ -2027,7 +2027,7 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
  * @scb                : scsi control block
  * @scp                : scsi command from the mid-layer
  *
- * Prepare a command for the scsi physical devices. This rountine prepares
+ * Prepare a command for the scsi physical devices. This routine prepares
  * commands for devices which can take extended CDBs (>10 bytes).
  */
 static void
@@ -2586,7 +2586,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
 }
 
 /**
- * megaraid_reset_handler - device reset hadler for mailbox based driver
+ * megaraid_reset_handler - device reset handler for mailbox based driver
  * @scp                : reference command
  *
  * Reset handler for the mailbox based controller. First try to find out if
@@ -3446,7 +3446,7 @@ megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
  * megaraid_mbox_setup_device_map - manage device ids
  * @adapter    : Driver's soft state
  *
- * Manange the device ids to have an appropriate mapping between the kernel
+ * Manage the device ids to have an appropriate mapping between the kernel
  * scsi addresses and megaraid scsi and logical drive addresses. We export
  * scsi devices on their actual addresses, whereas the logical drives are
  * exported on a virtual scsi channel.
index 25506c7773812156f929715a3bba4bf4de634311..dfffd0f37916f18097eaafe21eb2589b033d9193 100644 (file)
@@ -896,7 +896,7 @@ hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
 
 /**
  * mraid_mm_register_adp - Registration routine for low level drivers
- * @lld_adp    : Adapter objejct
+ * @lld_adp    : Adapter object
  */
 int
 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
index 12ff01cf6799c4cb85df90d0b0a1886388d8af19..4eb84011cb07f2470a29b7b6ee426f1d60d464f0 100644 (file)
@@ -88,7 +88,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
 #define MEGASAS_FUSION_IN_RESET 0
 
 /*
- * Raid Context structure which describes MegaRAID specific IO Paramenters
+ * Raid Context structure which describes MegaRAID specific IO Parameters
  * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
  */
 
index d7993797f46e1339d4a36e05b88c6a49905c1d96..ab4be107cda139db1a11fbaad4942ac930efc9da 100644 (file)
@@ -1779,7 +1779,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
                p_sysid = utsname();
                if (!p_sysid) {
                        ql_log(ql_log_warn, vha, 0x303c,
-                           "Not able to get the system informtion\n");
+                           "Not able to get the system information\n");
                        goto done_free_sp;
                }
                break;
index e3946e44e076e2aaf5bd0e7813554f65a9062745..8c11355dec233595a04a33877f832084af933168 100644 (file)
@@ -40,7 +40,7 @@
  * to glue code.  These bitbang setup() and cleanup() routines are always
  * used, though maybe they're called from controller-aware code.
  *
- * chipselect() and friends may use use spi_device->controller_data and
+ * chipselect() and friends may use spi_device->controller_data and
  * controller registers as appropriate.
  *
  *
index 36171fd2826bff5a113b43262c7c749ac87492e8..2cd9b0e44a41cede503dbb9e588020e7a949406b 100644 (file)
@@ -138,7 +138,7 @@ config SSB_DRIVER_MIPS
 
 config SSB_SFLASH
        bool "SSB serial flash support"
-       depends on SSB_DRIVER_MIPS && BROKEN
+       depends on SSB_DRIVER_MIPS
        default y
 
 # Assumption: We are on embedded, if we compile the MIPS core.
index e84cf04f441690e0d518e05a4b59976a80860662..50328de712fa6bb199fad05303bc28b05d46c6ed 100644 (file)
@@ -151,8 +151,8 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
        sflash->size = sflash->blocksize * sflash->numblocks;
        sflash->present = true;
 
-       pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n",
-               e->name, e->blocksize, e->numblocks);
+       pr_info("Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+               e->name, sflash->size / 1024, e->blocksize, e->numblocks);
 
        /* Prepare platform device, but don't register it yet. It's too early,
         * malloc (required by device_private_init) is not available yet. */
@@ -160,7 +160,5 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
                                         sflash->size;
        ssb_sflash_dev.dev.platform_data = sflash;
 
-       pr_err("Serial flash support is not implemented yet!\n");
-
-       return -ENOTSUPP;
+       return 0;
 }
index 9854a1daf606b9b713c63ec8114da1a163948df2..e826086ec30876d34016ff38e2625290550dc48c 100644 (file)
@@ -69,28 +69,20 @@ struct imx_drm_connector {
        struct module                           *owner;
 };
 
-static int imx_drm_driver_firstopen(struct drm_device *drm)
-{
-       if (!imx_drm_device_get())
-               return -EINVAL;
-
-       return 0;
-}
-
 static void imx_drm_driver_lastclose(struct drm_device *drm)
 {
        struct imx_drm_device *imxdrm = drm->dev_private;
 
        if (imxdrm->fbhelper)
                drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
-
-       imx_drm_device_put();
 }
 
 static int imx_drm_driver_unload(struct drm_device *drm)
 {
        struct imx_drm_device *imxdrm = drm->dev_private;
 
+       imx_drm_device_put();
+
        drm_mode_config_cleanup(imxdrm->drm);
        drm_kms_helper_poll_fini(imxdrm->drm);
 
@@ -207,7 +199,6 @@ static const struct file_operations imx_drm_driver_fops = {
        .unlocked_ioctl = drm_ioctl,
        .mmap = drm_gem_cma_mmap,
        .poll = drm_poll,
-       .fasync = drm_fasync,
        .read = drm_read,
        .llseek = noop_llseek,
 };
@@ -226,8 +217,6 @@ struct drm_device *imx_drm_device_get(void)
        struct imx_drm_connector *con;
        struct imx_drm_crtc *crtc;
 
-       mutex_lock(&imxdrm->mutex);
-
        list_for_each_entry(enc, &imxdrm->encoder_list, list) {
                if (!try_module_get(enc->owner)) {
                        dev_err(imxdrm->dev, "could not get module %s\n",
@@ -254,8 +243,6 @@ struct drm_device *imx_drm_device_get(void)
 
        imxdrm->references++;
 
-       mutex_unlock(&imxdrm->mutex);
-
        return imxdrm->drm;
 
 unwind_crtc:
@@ -447,6 +434,9 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
         */
        imxdrm->drm->vblank_disable_allowed = 1;
 
+       if (!imx_drm_device_get())
+               ret = -EINVAL;
+
        ret = 0;
 
 err_init:
@@ -783,7 +773,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
 }
 EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
 
-static struct drm_ioctl_desc imx_drm_ioctls[] = {
+static const struct drm_ioctl_desc imx_drm_ioctls[] = {
        /* none so far */
 };
 
@@ -791,13 +781,12 @@ static struct drm_driver imx_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM,
        .load                   = imx_drm_driver_load,
        .unload                 = imx_drm_driver_unload,
-       .firstopen              = imx_drm_driver_firstopen,
        .lastclose              = imx_drm_driver_lastclose,
        .gem_free_object        = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
-       .dumb_destroy           = drm_gem_cma_dumb_destroy,
+       .dumb_destroy           = drm_gem_dumb_destroy,
 
        .get_vblank_counter     = drm_vblank_count,
        .enable_vblank          = imx_drm_enable_vblank,
index 2faa391006db68165def2bc6badb0be69743d380..28c8b0bcf5b2a2da86f3987389831670ccf574b6 100644 (file)
@@ -240,10 +240,6 @@ static int unregister_from_lirc(struct igorplug *ir)
        dprintk(DRIVER_NAME "[%d]: calling lirc_unregister_driver\n", devnum);
        lirc_unregister_driver(d->minor);
 
-       kfree(d);
-       ir->d = NULL;
-       kfree(ir);
-
        return devnum;
 }
 
@@ -377,20 +373,16 @@ static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
        return -ENODATA;
 }
 
-
-
 static int igorplugusb_remote_probe(struct usb_interface *intf,
                                    const struct usb_device_id *id)
 {
-       struct usb_device *dev = NULL;
+       struct usb_device *dev;
        struct usb_host_interface *idesc = NULL;
        struct usb_endpoint_descriptor *ep;
        struct igorplug *ir = NULL;
        struct lirc_driver *driver = NULL;
        int devnum, pipe, maxp;
-       int minor = 0;
        char buf[63], name[128] = "";
-       int mem_failure = 0;
        int ret;
 
        dprintk(DRIVER_NAME ": usb probe called.\n");
@@ -416,24 +408,18 @@ static int igorplugusb_remote_probe(struct usb_interface *intf,
        dprintk(DRIVER_NAME "[%d]: bytes_in_key=%zu maxp=%d\n",
                devnum, CODE_LENGTH, maxp);
 
-       mem_failure = 0;
-       ir = kzalloc(sizeof(struct igorplug), GFP_KERNEL);
-       if (!ir) {
-               mem_failure = 1;
-               goto mem_failure_switch;
-       }
-       driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
-       if (!driver) {
-               mem_failure = 2;
-               goto mem_failure_switch;
-       }
+       ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL);
+       if (!ir)
+               return -ENOMEM;
+
+       driver = devm_kzalloc(&intf->dev, sizeof(*driver), GFP_KERNEL);
+       if (!driver)
+               return -ENOMEM;
 
        ir->buf_in = usb_alloc_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
                                        GFP_ATOMIC, &ir->dma_in);
-       if (!ir->buf_in) {
-               mem_failure = 3;
-               goto mem_failure_switch;
-       }
+       if (!ir->buf_in)
+               return -ENOMEM;
 
        strcpy(driver->name, DRIVER_NAME " ");
        driver->minor = -1;
@@ -449,27 +435,14 @@ static int igorplugusb_remote_probe(struct usb_interface *intf,
        driver->dev = &intf->dev;
        driver->owner = THIS_MODULE;
 
-       minor = lirc_register_driver(driver);
-       if (minor < 0)
-               mem_failure = 9;
-
-mem_failure_switch:
-
-       switch (mem_failure) {
-       case 9:
+       ret = lirc_register_driver(driver);
+       if (ret < 0) {
                usb_free_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
                        ir->buf_in, ir->dma_in);
-       case 3:
-               kfree(driver);
-       case 2:
-               kfree(ir);
-       case 1:
-               printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n",
-                       devnum, mem_failure);
-               return -ENOMEM;
+               return ret;
        }
 
-       driver->minor = minor;
+       driver->minor = ret;
        ir->d = driver;
        ir->devnum = devnum;
        ir->usbdev = dev;
@@ -502,7 +475,6 @@ mem_failure_switch:
        return 0;
 }
 
-
 static void igorplugusb_remote_disconnect(struct usb_interface *intf)
 {
        struct usb_device *usbdev = interface_to_usbdev(intf);
index e988c81d763c7d4e3d96c32a9834bcb54a63ee25..ae18f025a7615fdd42a1f4838d0a83c2c338c668 100644 (file)
@@ -91,6 +91,17 @@ config THERMAL_EMULATION
          because userland can easily disable the thermal policy by simply
          flooding this sysfs node with low temperature values.
 
+config IMX_THERMAL
+       tristate "Temperature sensor driver for Freescale i.MX SoCs"
+       depends on CPU_THERMAL
+       depends on MFD_SYSCON
+       depends on OF
+       help
+         Support for Temperature Monitor (TEMPMON) found on Freescale i.MX SoCs.
+         It supports one critical trip point and one passive trip point.  The
+         cpufreq is used as the cooling device to throttle CPUs when the
+         passive trip is crossed.
+
 config SPEAR_THERMAL
        bool "SPEAr thermal sensor driver"
        depends on PLAT_SPEAR
@@ -114,14 +125,6 @@ config KIRKWOOD_THERMAL
          Support for the Kirkwood thermal sensor driver into the Linux thermal
          framework. Only kirkwood 88F6282 and 88F6283 have this sensor.
 
-config EXYNOS_THERMAL
-       tristate "Temperature sensor on Samsung EXYNOS"
-       depends on (ARCH_EXYNOS4 || ARCH_EXYNOS5)
-       depends on CPU_THERMAL
-       help
-         If you say yes here you get support for TMU (Thermal Management
-         Unit) on SAMSUNG EXYNOS series of SoC.
-
 config DOVE_THERMAL
        tristate "Temperature sensor on Marvell Dove SoCs"
        depends on ARCH_DOVE
@@ -184,4 +187,9 @@ menu "Texas Instruments thermal drivers"
 source "drivers/thermal/ti-soc-thermal/Kconfig"
 endmenu
 
+menu "Samsung thermal drivers"
+depends on PLAT_SAMSUNG
+source "drivers/thermal/samsung/Kconfig"
+endmenu
+
 endif
index 67184a293e3f615524225c166ec5b2ee2d00d831..c19df7ab26143cee1c9710e06f17c4315d2bacdd 100644 (file)
@@ -17,10 +17,11 @@ thermal_sys-$(CONFIG_CPU_THERMAL)   += cpu_cooling.o
 obj-$(CONFIG_SPEAR_THERMAL)    += spear_thermal.o
 obj-$(CONFIG_RCAR_THERMAL)     += rcar_thermal.o
 obj-$(CONFIG_KIRKWOOD_THERMAL)  += kirkwood_thermal.o
-obj-$(CONFIG_EXYNOS_THERMAL)   += exynos_thermal.o
+obj-y                          += samsung/
 obj-$(CONFIG_DOVE_THERMAL)     += dove_thermal.o
 obj-$(CONFIG_DB8500_THERMAL)   += db8500_thermal.o
 obj-$(CONFIG_ARMADA_THERMAL)   += armada_thermal.o
+obj-$(CONFIG_IMX_THERMAL)      += imx_thermal.o
 obj-$(CONFIG_DB8500_CPUFREQ_COOLING)   += db8500_cpufreq_cooling.o
 obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
 obj-$(CONFIG_X86_PKG_TEMP_THERMAL)     += x86_pkg_temp_thermal.o
index 82e15dbb3ac7a1b3b9063a1577c61f3b1025fb27..5b3744e7a95bb93348616c51ceaf00ae2326eee9 100644 (file)
@@ -322,6 +322,8 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
 
        if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
                max_freq = notify_device->cpufreq_val;
+       else
+               return 0;
 
        /* Never exceed user_policy.max */
        if (max_freq > policy->user_policy.max)
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
deleted file mode 100644 (file)
index 9af4b93..0000000
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- * exynos_thermal.c - Samsung EXYNOS TMU (Thermal Management Unit)
- *
- *  Copyright (C) 2011 Samsung Electronics
- *  Donggeun Kim <dg77.kim@samsung.com>
- *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/workqueue.h>
-#include <linux/sysfs.h>
-#include <linux/kobject.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/platform_data/exynos_thermal.h>
-#include <linux/thermal.h>
-#include <linux/cpufreq.h>
-#include <linux/cpu_cooling.h>
-#include <linux/of.h>
-
-/* Exynos generic registers */
-#define EXYNOS_TMU_REG_TRIMINFO                0x0
-#define EXYNOS_TMU_REG_CONTROL         0x20
-#define EXYNOS_TMU_REG_STATUS          0x28
-#define EXYNOS_TMU_REG_CURRENT_TEMP    0x40
-#define EXYNOS_TMU_REG_INTEN           0x70
-#define EXYNOS_TMU_REG_INTSTAT         0x74
-#define EXYNOS_TMU_REG_INTCLEAR                0x78
-
-#define EXYNOS_TMU_TRIM_TEMP_MASK      0xff
-#define EXYNOS_TMU_GAIN_SHIFT          8
-#define EXYNOS_TMU_REF_VOLTAGE_SHIFT   24
-#define EXYNOS_TMU_CORE_ON             3
-#define EXYNOS_TMU_CORE_OFF            2
-#define EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET     50
-
-/* Exynos4210 specific registers */
-#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP      0x44
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58
-#define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C
-#define EXYNOS4210_TMU_REG_PAST_TEMP0  0x60
-#define EXYNOS4210_TMU_REG_PAST_TEMP1  0x64
-#define EXYNOS4210_TMU_REG_PAST_TEMP2  0x68
-#define EXYNOS4210_TMU_REG_PAST_TEMP3  0x6C
-
-#define EXYNOS4210_TMU_TRIG_LEVEL0_MASK        0x1
-#define EXYNOS4210_TMU_TRIG_LEVEL1_MASK        0x10
-#define EXYNOS4210_TMU_TRIG_LEVEL2_MASK        0x100
-#define EXYNOS4210_TMU_TRIG_LEVEL3_MASK        0x1000
-#define EXYNOS4210_TMU_INTCLEAR_VAL    0x1111
-
-/* Exynos5250 and Exynos4412 specific registers */
-#define EXYNOS_TMU_TRIMINFO_CON        0x14
-#define EXYNOS_THD_TEMP_RISE           0x50
-#define EXYNOS_THD_TEMP_FALL           0x54
-#define EXYNOS_EMUL_CON                0x80
-
-#define EXYNOS_TRIMINFO_RELOAD         0x1
-#define EXYNOS_TMU_CLEAR_RISE_INT      0x111
-#define EXYNOS_TMU_CLEAR_FALL_INT      (0x111 << 12)
-#define EXYNOS_MUX_ADDR_VALUE          6
-#define EXYNOS_MUX_ADDR_SHIFT          20
-#define EXYNOS_TMU_TRIP_MODE_SHIFT     13
-
-#define EFUSE_MIN_VALUE 40
-#define EFUSE_MAX_VALUE 100
-
-/* In-kernel thermal framework related macros & definations */
-#define SENSOR_NAME_LEN        16
-#define MAX_TRIP_COUNT 8
-#define MAX_COOLING_DEVICE 4
-#define MAX_THRESHOLD_LEVS 4
-
-#define ACTIVE_INTERVAL 500
-#define IDLE_INTERVAL 10000
-#define MCELSIUS       1000
-
-#ifdef CONFIG_THERMAL_EMULATION
-#define EXYNOS_EMUL_TIME       0x57F0
-#define EXYNOS_EMUL_TIME_SHIFT 16
-#define EXYNOS_EMUL_DATA_SHIFT 8
-#define EXYNOS_EMUL_DATA_MASK  0xFF
-#define EXYNOS_EMUL_ENABLE     0x1
-#endif /* CONFIG_THERMAL_EMULATION */
-
-/* CPU Zone information */
-#define PANIC_ZONE      4
-#define WARN_ZONE       3
-#define MONITOR_ZONE    2
-#define SAFE_ZONE       1
-
-#define GET_ZONE(trip) (trip + 2)
-#define GET_TRIP(zone) (zone - 2)
-
-#define EXYNOS_ZONE_COUNT      3
-
-struct exynos_tmu_data {
-       struct exynos_tmu_platform_data *pdata;
-       struct resource *mem;
-       void __iomem *base;
-       int irq;
-       enum soc_type soc;
-       struct work_struct irq_work;
-       struct mutex lock;
-       struct clk *clk;
-       u8 temp_error1, temp_error2;
-};
-
-struct thermal_trip_point_conf {
-       int trip_val[MAX_TRIP_COUNT];
-       int trip_count;
-       u8 trigger_falling;
-};
-
-struct thermal_cooling_conf {
-       struct freq_clip_table freq_data[MAX_TRIP_COUNT];
-       int freq_clip_count;
-};
-
-struct thermal_sensor_conf {
-       char name[SENSOR_NAME_LEN];
-       int (*read_temperature)(void *data);
-       int (*write_emul_temp)(void *drv_data, unsigned long temp);
-       struct thermal_trip_point_conf trip_data;
-       struct thermal_cooling_conf cooling_data;
-       void *private_data;
-};
-
-struct exynos_thermal_zone {
-       enum thermal_device_mode mode;
-       struct thermal_zone_device *therm_dev;
-       struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
-       unsigned int cool_dev_size;
-       struct platform_device *exynos4_dev;
-       struct thermal_sensor_conf *sensor_conf;
-       bool bind;
-};
-
-static struct exynos_thermal_zone *th_zone;
-static void exynos_unregister_thermal(void);
-static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
-
-/* Get mode callback functions for thermal zone */
-static int exynos_get_mode(struct thermal_zone_device *thermal,
-                       enum thermal_device_mode *mode)
-{
-       if (th_zone)
-               *mode = th_zone->mode;
-       return 0;
-}
-
-/* Set mode callback functions for thermal zone */
-static int exynos_set_mode(struct thermal_zone_device *thermal,
-                       enum thermal_device_mode mode)
-{
-       if (!th_zone->therm_dev) {
-               pr_notice("thermal zone not registered\n");
-               return 0;
-       }
-
-       mutex_lock(&th_zone->therm_dev->lock);
-
-       if (mode == THERMAL_DEVICE_ENABLED &&
-               !th_zone->sensor_conf->trip_data.trigger_falling)
-               th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
-       else
-               th_zone->therm_dev->polling_delay = 0;
-
-       mutex_unlock(&th_zone->therm_dev->lock);
-
-       th_zone->mode = mode;
-       thermal_zone_device_update(th_zone->therm_dev);
-       pr_info("thermal polling set for duration=%d msec\n",
-                               th_zone->therm_dev->polling_delay);
-       return 0;
-}
-
-
-/* Get trip type callback functions for thermal zone */
-static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
-                                enum thermal_trip_type *type)
-{
-       switch (GET_ZONE(trip)) {
-       case MONITOR_ZONE:
-       case WARN_ZONE:
-               *type = THERMAL_TRIP_ACTIVE;
-               break;
-       case PANIC_ZONE:
-               *type = THERMAL_TRIP_CRITICAL;
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-/* Get trip temperature callback functions for thermal zone */
-static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
-                               unsigned long *temp)
-{
-       if (trip < GET_TRIP(MONITOR_ZONE) || trip > GET_TRIP(PANIC_ZONE))
-               return -EINVAL;
-
-       *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
-       /* convert the temperature into millicelsius */
-       *temp = *temp * MCELSIUS;
-
-       return 0;
-}
-
-/* Get critical temperature callback functions for thermal zone */
-static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
-                               unsigned long *temp)
-{
-       int ret;
-       /* Panic zone */
-       ret = exynos_get_trip_temp(thermal, GET_TRIP(PANIC_ZONE), temp);
-       return ret;
-}
-
-/* Bind callback functions for thermal zone */
-static int exynos_bind(struct thermal_zone_device *thermal,
-                       struct thermal_cooling_device *cdev)
-{
-       int ret = 0, i, tab_size, level;
-       struct freq_clip_table *tab_ptr, *clip_data;
-       struct thermal_sensor_conf *data = th_zone->sensor_conf;
-
-       tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
-       tab_size = data->cooling_data.freq_clip_count;
-
-       if (tab_ptr == NULL || tab_size == 0)
-               return -EINVAL;
-
-       /* find the cooling device registered*/
-       for (i = 0; i < th_zone->cool_dev_size; i++)
-               if (cdev == th_zone->cool_dev[i])
-                       break;
-
-       /* No matching cooling device */
-       if (i == th_zone->cool_dev_size)
-               return 0;
-
-       /* Bind the thermal zone to the cpufreq cooling device */
-       for (i = 0; i < tab_size; i++) {
-               clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
-               level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
-               if (level == THERMAL_CSTATE_INVALID)
-                       return 0;
-               switch (GET_ZONE(i)) {
-               case MONITOR_ZONE:
-               case WARN_ZONE:
-                       if (thermal_zone_bind_cooling_device(thermal, i, cdev,
-                                                               level, 0)) {
-                               pr_err("error binding cdev inst %d\n", i);
-                               ret = -EINVAL;
-                       }
-                       th_zone->bind = true;
-                       break;
-               default:
-                       ret = -EINVAL;
-               }
-       }
-
-       return ret;
-}
-
-/* Unbind callback functions for thermal zone */
-static int exynos_unbind(struct thermal_zone_device *thermal,
-                       struct thermal_cooling_device *cdev)
-{
-       int ret = 0, i, tab_size;
-       struct thermal_sensor_conf *data = th_zone->sensor_conf;
-
-       if (th_zone->bind == false)
-               return 0;
-
-       tab_size = data->cooling_data.freq_clip_count;
-
-       if (tab_size == 0)
-               return -EINVAL;
-
-       /* find the cooling device registered*/
-       for (i = 0; i < th_zone->cool_dev_size; i++)
-               if (cdev == th_zone->cool_dev[i])
-                       break;
-
-       /* No matching cooling device */
-       if (i == th_zone->cool_dev_size)
-               return 0;
-
-       /* Bind the thermal zone to the cpufreq cooling device */
-       for (i = 0; i < tab_size; i++) {
-               switch (GET_ZONE(i)) {
-               case MONITOR_ZONE:
-               case WARN_ZONE:
-                       if (thermal_zone_unbind_cooling_device(thermal, i,
-                                                               cdev)) {
-                               pr_err("error unbinding cdev inst=%d\n", i);
-                               ret = -EINVAL;
-                       }
-                       th_zone->bind = false;
-                       break;
-               default:
-                       ret = -EINVAL;
-               }
-       }
-       return ret;
-}
-
-/* Get temperature callback functions for thermal zone */
-static int exynos_get_temp(struct thermal_zone_device *thermal,
-                       unsigned long *temp)
-{
-       void *data;
-
-       if (!th_zone->sensor_conf) {
-               pr_info("Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-       data = th_zone->sensor_conf->private_data;
-       *temp = th_zone->sensor_conf->read_temperature(data);
-       /* convert the temperature into millicelsius */
-       *temp = *temp * MCELSIUS;
-       return 0;
-}
-
-/* Get temperature callback functions for thermal zone */
-static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
-                                               unsigned long temp)
-{
-       void *data;
-       int ret = -EINVAL;
-
-       if (!th_zone->sensor_conf) {
-               pr_info("Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-       data = th_zone->sensor_conf->private_data;
-       if (th_zone->sensor_conf->write_emul_temp)
-               ret = th_zone->sensor_conf->write_emul_temp(data, temp);
-       return ret;
-}
-
-/* Get the temperature trend */
-static int exynos_get_trend(struct thermal_zone_device *thermal,
-                       int trip, enum thermal_trend *trend)
-{
-       int ret;
-       unsigned long trip_temp;
-
-       ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
-       if (ret < 0)
-               return ret;
-
-       if (thermal->temperature >= trip_temp)
-               *trend = THERMAL_TREND_RAISE_FULL;
-       else
-               *trend = THERMAL_TREND_DROP_FULL;
-
-       return 0;
-}
-/* Operation callback functions for thermal zone */
-static struct thermal_zone_device_ops const exynos_dev_ops = {
-       .bind = exynos_bind,
-       .unbind = exynos_unbind,
-       .get_temp = exynos_get_temp,
-       .set_emul_temp = exynos_set_emul_temp,
-       .get_trend = exynos_get_trend,
-       .get_mode = exynos_get_mode,
-       .set_mode = exynos_set_mode,
-       .get_trip_type = exynos_get_trip_type,
-       .get_trip_temp = exynos_get_trip_temp,
-       .get_crit_temp = exynos_get_crit_temp,
-};
-
-/*
- * This function may be called from interrupt based temperature sensor
- * when threshold is changed.
- */
-static void exynos_report_trigger(void)
-{
-       unsigned int i;
-       char data[10];
-       char *envp[] = { data, NULL };
-
-       if (!th_zone || !th_zone->therm_dev)
-               return;
-       if (th_zone->bind == false) {
-               for (i = 0; i < th_zone->cool_dev_size; i++) {
-                       if (!th_zone->cool_dev[i])
-                               continue;
-                       exynos_bind(th_zone->therm_dev,
-                                       th_zone->cool_dev[i]);
-               }
-       }
-
-       thermal_zone_device_update(th_zone->therm_dev);
-
-       mutex_lock(&th_zone->therm_dev->lock);
-       /* Find the level for which trip happened */
-       for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
-               if (th_zone->therm_dev->last_temperature <
-                       th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
-                       break;
-       }
-
-       if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
-               !th_zone->sensor_conf->trip_data.trigger_falling) {
-               if (i > 0)
-                       th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
-               else
-                       th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
-       }
-
-       snprintf(data, sizeof(data), "%u", i);
-       kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
-       mutex_unlock(&th_zone->therm_dev->lock);
-}
-
-/* Register with the in-kernel thermal management */
-static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
-{
-       int ret;
-       struct cpumask mask_val;
-
-       if (!sensor_conf || !sensor_conf->read_temperature) {
-               pr_err("Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-
-       th_zone = kzalloc(sizeof(struct exynos_thermal_zone), GFP_KERNEL);
-       if (!th_zone)
-               return -ENOMEM;
-
-       th_zone->sensor_conf = sensor_conf;
-       cpumask_set_cpu(0, &mask_val);
-       th_zone->cool_dev[0] = cpufreq_cooling_register(&mask_val);
-       if (IS_ERR(th_zone->cool_dev[0])) {
-               pr_err("Failed to register cpufreq cooling device\n");
-               ret = -EINVAL;
-               goto err_unregister;
-       }
-       th_zone->cool_dev_size++;
-
-       th_zone->therm_dev = thermal_zone_device_register(sensor_conf->name,
-                       EXYNOS_ZONE_COUNT, 0, NULL, &exynos_dev_ops, NULL, 0,
-                       sensor_conf->trip_data.trigger_falling ?
-                       0 : IDLE_INTERVAL);
-
-       if (IS_ERR(th_zone->therm_dev)) {
-               pr_err("Failed to register thermal zone device\n");
-               ret = PTR_ERR(th_zone->therm_dev);
-               goto err_unregister;
-       }
-       th_zone->mode = THERMAL_DEVICE_ENABLED;
-
-       pr_info("Exynos: Kernel Thermal management registered\n");
-
-       return 0;
-
-err_unregister:
-       exynos_unregister_thermal();
-       return ret;
-}
-
-/* Un-Register with the in-kernel thermal management */
-static void exynos_unregister_thermal(void)
-{
-       int i;
-
-       if (!th_zone)
-               return;
-
-       if (th_zone->therm_dev)
-               thermal_zone_device_unregister(th_zone->therm_dev);
-
-       for (i = 0; i < th_zone->cool_dev_size; i++) {
-               if (th_zone->cool_dev[i])
-                       cpufreq_cooling_unregister(th_zone->cool_dev[i]);
-       }
-
-       kfree(th_zone);
-       pr_info("Exynos: Kernel Thermal management unregistered\n");
-}
-
-/*
- * TMU treats temperature as a mapped temperature code.
- * The temperature is converted differently depending on the calibration type.
- */
-static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
-{
-       struct exynos_tmu_platform_data *pdata = data->pdata;
-       int temp_code;
-
-       if (data->soc == SOC_ARCH_EXYNOS4210)
-               /* temp should range between 25 and 125 */
-               if (temp < 25 || temp > 125) {
-                       temp_code = -EINVAL;
-                       goto out;
-               }
-
-       switch (pdata->cal_type) {
-       case TYPE_TWO_POINT_TRIMMING:
-               temp_code = (temp - 25) *
-                   (data->temp_error2 - data->temp_error1) /
-                   (85 - 25) + data->temp_error1;
-               break;
-       case TYPE_ONE_POINT_TRIMMING:
-               temp_code = temp + data->temp_error1 - 25;
-               break;
-       default:
-               temp_code = temp + EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET;
-               break;
-       }
-out:
-       return temp_code;
-}
-
-/*
- * Calculate a temperature value from a temperature code.
- * The unit of the temperature is degree Celsius.
- */
-static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
-{
-       struct exynos_tmu_platform_data *pdata = data->pdata;
-       int temp;
-
-       if (data->soc == SOC_ARCH_EXYNOS4210)
-               /* temp_code should range between 75 and 175 */
-               if (temp_code < 75 || temp_code > 175) {
-                       temp = -ENODATA;
-                       goto out;
-               }
-
-       switch (pdata->cal_type) {
-       case TYPE_TWO_POINT_TRIMMING:
-               temp = (temp_code - data->temp_error1) * (85 - 25) /
-                   (data->temp_error2 - data->temp_error1) + 25;
-               break;
-       case TYPE_ONE_POINT_TRIMMING:
-               temp = temp_code - data->temp_error1 + 25;
-               break;
-       default:
-               temp = temp_code - EXYNOS_TMU_DEF_CODE_TO_TEMP_OFFSET;
-               break;
-       }
-out:
-       return temp;
-}
-
-static int exynos_tmu_initialize(struct platform_device *pdev)
-{
-       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
-       unsigned int status, trim_info;
-       unsigned int rising_threshold = 0, falling_threshold = 0;
-       int ret = 0, threshold_code, i, trigger_levs = 0;
-
-       mutex_lock(&data->lock);
-       clk_enable(data->clk);
-
-       status = readb(data->base + EXYNOS_TMU_REG_STATUS);
-       if (!status) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       if (data->soc == SOC_ARCH_EXYNOS) {
-               __raw_writel(EXYNOS_TRIMINFO_RELOAD,
-                               data->base + EXYNOS_TMU_TRIMINFO_CON);
-       }
-       /* Save trimming info in order to perform calibration */
-       trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
-       data->temp_error1 = trim_info & EXYNOS_TMU_TRIM_TEMP_MASK;
-       data->temp_error2 = ((trim_info >> 8) & EXYNOS_TMU_TRIM_TEMP_MASK);
-
-       if ((EFUSE_MIN_VALUE > data->temp_error1) ||
-                       (data->temp_error1 > EFUSE_MAX_VALUE) ||
-                       (data->temp_error2 != 0))
-               data->temp_error1 = pdata->efuse_value;
-
-       /* Count trigger levels to be enabled */
-       for (i = 0; i < MAX_THRESHOLD_LEVS; i++)
-               if (pdata->trigger_levels[i])
-                       trigger_levs++;
-
-       if (data->soc == SOC_ARCH_EXYNOS4210) {
-               /* Write temperature code for threshold */
-               threshold_code = temp_to_code(data, pdata->threshold);
-               if (threshold_code < 0) {
-                       ret = threshold_code;
-                       goto out;
-               }
-               writeb(threshold_code,
-                       data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
-               for (i = 0; i < trigger_levs; i++)
-                       writeb(pdata->trigger_levels[i],
-                       data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
-
-               writel(EXYNOS4210_TMU_INTCLEAR_VAL,
-                       data->base + EXYNOS_TMU_REG_INTCLEAR);
-       } else if (data->soc == SOC_ARCH_EXYNOS) {
-               /* Write temperature code for rising and falling threshold */
-               for (i = 0; i < trigger_levs; i++) {
-                       threshold_code = temp_to_code(data,
-                                               pdata->trigger_levels[i]);
-                       if (threshold_code < 0) {
-                               ret = threshold_code;
-                               goto out;
-                       }
-                       rising_threshold |= threshold_code << 8 * i;
-                       if (pdata->threshold_falling) {
-                               threshold_code = temp_to_code(data,
-                                               pdata->trigger_levels[i] -
-                                               pdata->threshold_falling);
-                               if (threshold_code > 0)
-                                       falling_threshold |=
-                                               threshold_code << 8 * i;
-                       }
-               }
-
-               writel(rising_threshold,
-                               data->base + EXYNOS_THD_TEMP_RISE);
-               writel(falling_threshold,
-                               data->base + EXYNOS_THD_TEMP_FALL);
-
-               writel(EXYNOS_TMU_CLEAR_RISE_INT | EXYNOS_TMU_CLEAR_FALL_INT,
-                               data->base + EXYNOS_TMU_REG_INTCLEAR);
-       }
-out:
-       clk_disable(data->clk);
-       mutex_unlock(&data->lock);
-
-       return ret;
-}
-
-static void exynos_tmu_control(struct platform_device *pdev, bool on)
-{
-       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
-       unsigned int con, interrupt_en;
-
-       mutex_lock(&data->lock);
-       clk_enable(data->clk);
-
-       con = pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT |
-               pdata->gain << EXYNOS_TMU_GAIN_SHIFT;
-
-       if (data->soc == SOC_ARCH_EXYNOS) {
-               con |= pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT;
-               con |= (EXYNOS_MUX_ADDR_VALUE << EXYNOS_MUX_ADDR_SHIFT);
-       }
-
-       if (on) {
-               con |= EXYNOS_TMU_CORE_ON;
-               interrupt_en = pdata->trigger_level3_en << 12 |
-                       pdata->trigger_level2_en << 8 |
-                       pdata->trigger_level1_en << 4 |
-                       pdata->trigger_level0_en;
-               if (pdata->threshold_falling)
-                       interrupt_en |= interrupt_en << 16;
-       } else {
-               con |= EXYNOS_TMU_CORE_OFF;
-               interrupt_en = 0; /* Disable all interrupts */
-       }
-       writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
-       writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
-
-       clk_disable(data->clk);
-       mutex_unlock(&data->lock);
-}
-
-static int exynos_tmu_read(struct exynos_tmu_data *data)
-{
-       u8 temp_code;
-       int temp;
-
-       mutex_lock(&data->lock);
-       clk_enable(data->clk);
-
-       temp_code = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
-       temp = code_to_temp(data, temp_code);
-
-       clk_disable(data->clk);
-       mutex_unlock(&data->lock);
-
-       return temp;
-}
-
-#ifdef CONFIG_THERMAL_EMULATION
-static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
-{
-       struct exynos_tmu_data *data = drv_data;
-       unsigned int reg;
-       int ret = -EINVAL;
-
-       if (data->soc == SOC_ARCH_EXYNOS4210)
-               goto out;
-
-       if (temp && temp < MCELSIUS)
-               goto out;
-
-       mutex_lock(&data->lock);
-       clk_enable(data->clk);
-
-       reg = readl(data->base + EXYNOS_EMUL_CON);
-
-       if (temp) {
-               temp /= MCELSIUS;
-
-               reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
-                       (temp_to_code(data, temp)
-                        << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
-       } else {
-               reg &= ~EXYNOS_EMUL_ENABLE;
-       }
-
-       writel(reg, data->base + EXYNOS_EMUL_CON);
-
-       clk_disable(data->clk);
-       mutex_unlock(&data->lock);
-       return 0;
-out:
-       return ret;
-}
-#else
-static int exynos_tmu_set_emulation(void *drv_data,    unsigned long temp)
-       { return -EINVAL; }
-#endif/*CONFIG_THERMAL_EMULATION*/
-
-static void exynos_tmu_work(struct work_struct *work)
-{
-       struct exynos_tmu_data *data = container_of(work,
-                       struct exynos_tmu_data, irq_work);
-
-       exynos_report_trigger();
-       mutex_lock(&data->lock);
-       clk_enable(data->clk);
-       if (data->soc == SOC_ARCH_EXYNOS)
-               writel(EXYNOS_TMU_CLEAR_RISE_INT |
-                               EXYNOS_TMU_CLEAR_FALL_INT,
-                               data->base + EXYNOS_TMU_REG_INTCLEAR);
-       else
-               writel(EXYNOS4210_TMU_INTCLEAR_VAL,
-                               data->base + EXYNOS_TMU_REG_INTCLEAR);
-       clk_disable(data->clk);
-       mutex_unlock(&data->lock);
-
-       enable_irq(data->irq);
-}
-
-static irqreturn_t exynos_tmu_irq(int irq, void *id)
-{
-       struct exynos_tmu_data *data = id;
-
-       disable_irq_nosync(irq);
-       schedule_work(&data->irq_work);
-
-       return IRQ_HANDLED;
-}
-static struct thermal_sensor_conf exynos_sensor_conf = {
-       .name                   = "exynos-therm",
-       .read_temperature       = (int (*)(void *))exynos_tmu_read,
-       .write_emul_temp        = exynos_tmu_set_emulation,
-};
-
-#if defined(CONFIG_CPU_EXYNOS4210)
-static struct exynos_tmu_platform_data const exynos4210_default_tmu_data = {
-       .threshold = 80,
-       .trigger_levels[0] = 5,
-       .trigger_levels[1] = 20,
-       .trigger_levels[2] = 30,
-       .trigger_level0_en = 1,
-       .trigger_level1_en = 1,
-       .trigger_level2_en = 1,
-       .trigger_level3_en = 0,
-       .gain = 15,
-       .reference_voltage = 7,
-       .cal_type = TYPE_ONE_POINT_TRIMMING,
-       .freq_tab[0] = {
-               .freq_clip_max = 800 * 1000,
-               .temp_level = 85,
-       },
-       .freq_tab[1] = {
-               .freq_clip_max = 200 * 1000,
-               .temp_level = 100,
-       },
-       .freq_tab_count = 2,
-       .type = SOC_ARCH_EXYNOS4210,
-};
-#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
-#else
-#define EXYNOS4210_TMU_DRV_DATA (NULL)
-#endif
-
-#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412) || \
-       defined(CONFIG_SOC_EXYNOS4212)
-static struct exynos_tmu_platform_data const exynos_default_tmu_data = {
-       .threshold_falling = 10,
-       .trigger_levels[0] = 85,
-       .trigger_levels[1] = 103,
-       .trigger_levels[2] = 110,
-       .trigger_level0_en = 1,
-       .trigger_level1_en = 1,
-       .trigger_level2_en = 1,
-       .trigger_level3_en = 0,
-       .gain = 8,
-       .reference_voltage = 16,
-       .noise_cancel_mode = 4,
-       .cal_type = TYPE_ONE_POINT_TRIMMING,
-       .efuse_value = 55,
-       .freq_tab[0] = {
-               .freq_clip_max = 800 * 1000,
-               .temp_level = 85,
-       },
-       .freq_tab[1] = {
-               .freq_clip_max = 200 * 1000,
-               .temp_level = 103,
-       },
-       .freq_tab_count = 2,
-       .type = SOC_ARCH_EXYNOS,
-};
-#define EXYNOS_TMU_DRV_DATA (&exynos_default_tmu_data)
-#else
-#define EXYNOS_TMU_DRV_DATA (NULL)
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id exynos_tmu_match[] = {
-       {
-               .compatible = "samsung,exynos4210-tmu",
-               .data = (void *)EXYNOS4210_TMU_DRV_DATA,
-       },
-       {
-               .compatible = "samsung,exynos4412-tmu",
-               .data = (void *)EXYNOS_TMU_DRV_DATA,
-       },
-       {
-               .compatible = "samsung,exynos5250-tmu",
-               .data = (void *)EXYNOS_TMU_DRV_DATA,
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, exynos_tmu_match);
-#endif
-
-static struct platform_device_id exynos_tmu_driver_ids[] = {
-       {
-               .name           = "exynos4210-tmu",
-               .driver_data    = (kernel_ulong_t)EXYNOS4210_TMU_DRV_DATA,
-       },
-       {
-               .name           = "exynos5250-tmu",
-               .driver_data    = (kernel_ulong_t)EXYNOS_TMU_DRV_DATA,
-       },
-       { },
-};
-MODULE_DEVICE_TABLE(platform, exynos_tmu_driver_ids);
-
-static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
-                       struct platform_device *pdev)
-{
-#ifdef CONFIG_OF
-       if (pdev->dev.of_node) {
-               const struct of_device_id *match;
-               match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
-               if (!match)
-                       return NULL;
-               return (struct exynos_tmu_platform_data *) match->data;
-       }
-#endif
-       return (struct exynos_tmu_platform_data *)
-                       platform_get_device_id(pdev)->driver_data;
-}
-
-static int exynos_tmu_probe(struct platform_device *pdev)
-{
-       struct exynos_tmu_data *data;
-       struct exynos_tmu_platform_data *pdata = pdev->dev.platform_data;
-       int ret, i;
-
-       if (!pdata)
-               pdata = exynos_get_driver_data(pdev);
-
-       if (!pdata) {
-               dev_err(&pdev->dev, "No platform init data supplied.\n");
-               return -ENODEV;
-       }
-       data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
-                                       GFP_KERNEL);
-       if (!data) {
-               dev_err(&pdev->dev, "Failed to allocate driver structure\n");
-               return -ENOMEM;
-       }
-
-       data->irq = platform_get_irq(pdev, 0);
-       if (data->irq < 0) {
-               dev_err(&pdev->dev, "Failed to get platform irq\n");
-               return data->irq;
-       }
-
-       INIT_WORK(&data->irq_work, exynos_tmu_work);
-
-       data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       data->base = devm_ioremap_resource(&pdev->dev, data->mem);
-       if (IS_ERR(data->base))
-               return PTR_ERR(data->base);
-
-       ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
-               IRQF_TRIGGER_RISING, "exynos-tmu", data);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
-               return ret;
-       }
-
-       data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
-       if (IS_ERR(data->clk)) {
-               dev_err(&pdev->dev, "Failed to get clock\n");
-               return  PTR_ERR(data->clk);
-       }
-
-       ret = clk_prepare(data->clk);
-       if (ret)
-               return ret;
-
-       if (pdata->type == SOC_ARCH_EXYNOS ||
-                               pdata->type == SOC_ARCH_EXYNOS4210)
-               data->soc = pdata->type;
-       else {
-               ret = -EINVAL;
-               dev_err(&pdev->dev, "Platform not supported\n");
-               goto err_clk;
-       }
-
-       data->pdata = pdata;
-       platform_set_drvdata(pdev, data);
-       mutex_init(&data->lock);
-
-       ret = exynos_tmu_initialize(pdev);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to initialize TMU\n");
-               goto err_clk;
-       }
-
-       exynos_tmu_control(pdev, true);
-
-       /* Register the sensor with thermal management interface */
-       (&exynos_sensor_conf)->private_data = data;
-       exynos_sensor_conf.trip_data.trip_count = pdata->trigger_level0_en +
-                       pdata->trigger_level1_en + pdata->trigger_level2_en +
-                       pdata->trigger_level3_en;
-
-       for (i = 0; i < exynos_sensor_conf.trip_data.trip_count; i++)
-               exynos_sensor_conf.trip_data.trip_val[i] =
-                       pdata->threshold + pdata->trigger_levels[i];
-
-       exynos_sensor_conf.trip_data.trigger_falling = pdata->threshold_falling;
-
-       exynos_sensor_conf.cooling_data.freq_clip_count =
-                                               pdata->freq_tab_count;
-       for (i = 0; i < pdata->freq_tab_count; i++) {
-               exynos_sensor_conf.cooling_data.freq_data[i].freq_clip_max =
-                                       pdata->freq_tab[i].freq_clip_max;
-               exynos_sensor_conf.cooling_data.freq_data[i].temp_level =
-                                       pdata->freq_tab[i].temp_level;
-       }
-
-       ret = exynos_register_thermal(&exynos_sensor_conf);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to register thermal interface\n");
-               goto err_clk;
-       }
-
-       return 0;
-err_clk:
-       clk_unprepare(data->clk);
-       return ret;
-}
-
-static int exynos_tmu_remove(struct platform_device *pdev)
-{
-       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-
-       exynos_tmu_control(pdev, false);
-
-       exynos_unregister_thermal();
-
-       clk_unprepare(data->clk);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos_tmu_suspend(struct device *dev)
-{
-       exynos_tmu_control(to_platform_device(dev), false);
-
-       return 0;
-}
-
-static int exynos_tmu_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-
-       exynos_tmu_initialize(pdev);
-       exynos_tmu_control(pdev, true);
-
-       return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
-                        exynos_tmu_suspend, exynos_tmu_resume);
-#define EXYNOS_TMU_PM  (&exynos_tmu_pm)
-#else
-#define EXYNOS_TMU_PM  NULL
-#endif
-
-static struct platform_driver exynos_tmu_driver = {
-       .driver = {
-               .name   = "exynos-tmu",
-               .owner  = THIS_MODULE,
-               .pm     = EXYNOS_TMU_PM,
-               .of_match_table = of_match_ptr(exynos_tmu_match),
-       },
-       .probe = exynos_tmu_probe,
-       .remove = exynos_tmu_remove,
-       .id_table = exynos_tmu_driver_ids,
-};
-
-module_platform_driver(exynos_tmu_driver);
-
-MODULE_DESCRIPTION("EXYNOS TMU Driver");
-MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:exynos-tmu");
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
new file mode 100644 (file)
index 0000000..1d6c801
--- /dev/null
@@ -0,0 +1,541 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/cpu_cooling.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/types.h>
+
+#define REG_SET                0x4
+#define REG_CLR                0x8
+#define REG_TOG                0xc
+
+#define MISC0                          0x0150
+#define MISC0_REFTOP_SELBIASOFF                (1 << 3)
+
+#define TEMPSENSE0                     0x0180
+#define TEMPSENSE0_ALARM_VALUE_SHIFT   20
+#define TEMPSENSE0_ALARM_VALUE_MASK    (0xfff << TEMPSENSE0_ALARM_VALUE_SHIFT)
+#define TEMPSENSE0_TEMP_CNT_SHIFT      8
+#define TEMPSENSE0_TEMP_CNT_MASK       (0xfff << TEMPSENSE0_TEMP_CNT_SHIFT)
+#define TEMPSENSE0_FINISHED            (1 << 2)
+#define TEMPSENSE0_MEASURE_TEMP                (1 << 1)
+#define TEMPSENSE0_POWER_DOWN          (1 << 0)
+
+#define TEMPSENSE1                     0x0190
+#define TEMPSENSE1_MEASURE_FREQ                0xffff
+
+#define OCOTP_ANA1                     0x04e0
+
+/* The driver supports 1 passive trip point and 1 critical trip point */
+enum imx_thermal_trip {
+       IMX_TRIP_PASSIVE,
+       IMX_TRIP_CRITICAL,
+       IMX_TRIP_NUM,
+};
+
+/*
+ * It defines the temperature in millicelsius for passive trip point
+ * that will trigger cooling action when crossed.
+ */
+#define IMX_TEMP_PASSIVE               85000
+
+#define IMX_POLLING_DELAY              2000 /* millisecond */
+#define IMX_PASSIVE_DELAY              1000
+
+struct imx_thermal_data {
+       struct thermal_zone_device *tz;
+       struct thermal_cooling_device *cdev;
+       enum thermal_device_mode mode;
+       struct regmap *tempmon;
+       int c1, c2; /* See formula in imx_get_sensor_data() */
+       unsigned long temp_passive;
+       unsigned long temp_critical;
+       unsigned long alarm_temp;
+       unsigned long last_temp;
+       bool irq_enabled;
+       int irq;
+};
+
+static void imx_set_alarm_temp(struct imx_thermal_data *data,
+                              signed long alarm_temp)
+{
+       struct regmap *map = data->tempmon;
+       int alarm_value;
+
+       data->alarm_temp = alarm_temp;
+       alarm_value = (alarm_temp - data->c2) / data->c1;
+       regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_ALARM_VALUE_MASK);
+       regmap_write(map, TEMPSENSE0 + REG_SET, alarm_value <<
+                       TEMPSENSE0_ALARM_VALUE_SHIFT);
+}
+
+static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
+{
+       struct imx_thermal_data *data = tz->devdata;
+       struct regmap *map = data->tempmon;
+       unsigned int n_meas;
+       bool wait;
+       u32 val;
+
+       if (data->mode == THERMAL_DEVICE_ENABLED) {
+               /* Check if a measurement is currently in progress */
+               regmap_read(map, TEMPSENSE0, &val);
+               wait = !(val & TEMPSENSE0_FINISHED);
+       } else {
+               /*
+                * Every time we measure the temperature, we will power on the
+                * temperature sensor, enable measurements, take a reading,
+                * disable measurements, power off the temperature sensor.
+                */
+               regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+               regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+
+               wait = true;
+       }
+
+       /*
+        * According to the temp sensor designers, it may require up to ~17us
+        * to complete a measurement.
+        */
+       if (wait)
+               usleep_range(20, 50);
+
+       regmap_read(map, TEMPSENSE0, &val);
+
+       if (data->mode != THERMAL_DEVICE_ENABLED) {
+               regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
+               regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+       }
+
+       if ((val & TEMPSENSE0_FINISHED) == 0) {
+               dev_dbg(&tz->device, "temp measurement never finished\n");
+               return -EAGAIN;
+       }
+
+       n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
+
+       /* See imx_get_sensor_data() for formula derivation */
+       *temp = data->c2 + data->c1 * n_meas;
+
+       /* Update alarm value to next higher trip point */
+       if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive)
+               imx_set_alarm_temp(data, data->temp_critical);
+       if (data->alarm_temp == data->temp_critical && *temp < data->temp_passive) {
+               imx_set_alarm_temp(data, data->temp_passive);
+               dev_dbg(&tz->device, "thermal alarm off: T < %lu\n",
+                       data->alarm_temp / 1000);
+       }
+
+       if (*temp != data->last_temp) {
+               dev_dbg(&tz->device, "millicelsius: %ld\n", *temp);
+               data->last_temp = *temp;
+       }
+
+       /* Reenable alarm IRQ if temperature below alarm temperature */
+       if (!data->irq_enabled && *temp < data->alarm_temp) {
+               data->irq_enabled = true;
+               enable_irq(data->irq);
+       }
+
+       return 0;
+}
+
+static int imx_get_mode(struct thermal_zone_device *tz,
+                       enum thermal_device_mode *mode)
+{
+       struct imx_thermal_data *data = tz->devdata;
+
+       *mode = data->mode;
+
+       return 0;
+}
+
+static int imx_set_mode(struct thermal_zone_device *tz,
+                       enum thermal_device_mode mode)
+{
+       struct imx_thermal_data *data = tz->devdata;
+       struct regmap *map = data->tempmon;
+
+       if (mode == THERMAL_DEVICE_ENABLED) {
+               tz->polling_delay = IMX_POLLING_DELAY;
+               tz->passive_delay = IMX_PASSIVE_DELAY;
+
+               regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+               regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+
+               if (!data->irq_enabled) {
+                       data->irq_enabled = true;
+                       enable_irq(data->irq);
+               }
+       } else {
+               regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
+               regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+
+               tz->polling_delay = 0;
+               tz->passive_delay = 0;
+
+               if (data->irq_enabled) {
+                       disable_irq(data->irq);
+                       data->irq_enabled = false;
+               }
+       }
+
+       data->mode = mode;
+       thermal_zone_device_update(tz);
+
+       return 0;
+}
+
+static int imx_get_trip_type(struct thermal_zone_device *tz, int trip,
+                            enum thermal_trip_type *type)
+{
+       *type = (trip == IMX_TRIP_PASSIVE) ? THERMAL_TRIP_PASSIVE :
+                                            THERMAL_TRIP_CRITICAL;
+       return 0;
+}
+
+static int imx_get_crit_temp(struct thermal_zone_device *tz,
+                            unsigned long *temp)
+{
+       struct imx_thermal_data *data = tz->devdata;
+
+       *temp = data->temp_critical;
+       return 0;
+}
+
+static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip,
+                            unsigned long *temp)
+{
+       struct imx_thermal_data *data = tz->devdata;
+
+       *temp = (trip == IMX_TRIP_PASSIVE) ? data->temp_passive :
+                                            data->temp_critical;
+       return 0;
+}
+
+static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
+                            unsigned long temp)
+{
+       struct imx_thermal_data *data = tz->devdata;
+
+       if (trip == IMX_TRIP_CRITICAL)
+               return -EPERM;
+
+       if (temp > IMX_TEMP_PASSIVE)
+               return -EINVAL;
+
+       data->temp_passive = temp;
+
+       imx_set_alarm_temp(data, temp);
+
+       return 0;
+}
+
+static int imx_bind(struct thermal_zone_device *tz,
+                   struct thermal_cooling_device *cdev)
+{
+       int ret;
+
+       ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev,
+                                              THERMAL_NO_LIMIT,
+                                              THERMAL_NO_LIMIT);
+       if (ret) {
+               dev_err(&tz->device,
+                       "binding zone %s with cdev %s failed:%d\n",
+                       tz->type, cdev->type, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int imx_unbind(struct thermal_zone_device *tz,
+                     struct thermal_cooling_device *cdev)
+{
+       int ret;
+
+       ret = thermal_zone_unbind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev);
+       if (ret) {
+               dev_err(&tz->device,
+                       "unbinding zone %s with cdev %s failed:%d\n",
+                       tz->type, cdev->type, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct thermal_zone_device_ops imx_tz_ops = {
+       .bind = imx_bind,
+       .unbind = imx_unbind,
+       .get_temp = imx_get_temp,
+       .get_mode = imx_get_mode,
+       .set_mode = imx_set_mode,
+       .get_trip_type = imx_get_trip_type,
+       .get_trip_temp = imx_get_trip_temp,
+       .get_crit_temp = imx_get_crit_temp,
+       .set_trip_temp = imx_set_trip_temp,
+};
+
+static int imx_get_sensor_data(struct platform_device *pdev)
+{
+       struct imx_thermal_data *data = platform_get_drvdata(pdev);
+       struct regmap *map;
+       int t1, t2, n1, n2;
+       int ret;
+       u32 val;
+
+       map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                             "fsl,tempmon-data");
+       if (IS_ERR(map)) {
+               ret = PTR_ERR(map);
+               dev_err(&pdev->dev, "failed to get sensor regmap: %d\n", ret);
+               return ret;
+       }
+
+       ret = regmap_read(map, OCOTP_ANA1, &val);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret);
+               return ret;
+       }
+
+       if (val == 0 || val == ~0) {
+               dev_err(&pdev->dev, "invalid sensor calibration data\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Sensor data layout:
+        *   [31:20] - sensor value @ 25C
+        *    [19:8] - sensor value of hot
+        *     [7:0] - hot temperature value
+        */
+       n1 = val >> 20;
+       n2 = (val & 0xfff00) >> 8;
+       t2 = val & 0xff;
+       t1 = 25; /* t1 always 25C */
+
+       /*
+        * Derived from linear interpolation,
+        * Tmeas = T2 + (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+        * We want to reduce this down to the minimum computation necessary
+        * for each temperature read.  Also, we want Tmeas in millicelsius
+        * and we don't want to lose precision from integer division. So...
+        * milli_Tmeas = 1000 * T2 + 1000 * (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+        * Let constant c1 = 1000 * (T1 - T2) / (N1 - N2)
+        * milli_Tmeas = (1000 * T2) + c1 * (Nmeas - N2)
+        * milli_Tmeas = (1000 * T2) + (c1 * Nmeas) - (c1 * N2)
+        * Let constant c2 = (1000 * T2) - (c1 * N2)
+        * milli_Tmeas = c2 + (c1 * Nmeas)
+        */
+       data->c1 = 1000 * (t1 - t2) / (n1 - n2);
+       data->c2 = 1000 * t2 - data->c1 * n2;
+
+       /*
+        * Set the default passive cooling trip point to 20 °C below the
+        * maximum die temperature. Can be changed from userspace.
+        */
+       data->temp_passive = 1000 * (t2 - 20);
+
+       /*
+        * The maximum die temperature is t2, let's give 5 °C cushion
+        * for noise and possible temperature rise between measurements.
+        */
+       data->temp_critical = 1000 * (t2 - 5);
+
+       return 0;
+}
+
+static irqreturn_t imx_thermal_alarm_irq(int irq, void *dev)
+{
+       struct imx_thermal_data *data = dev;
+
+       disable_irq_nosync(irq);
+       data->irq_enabled = false;
+
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev)
+{
+       struct imx_thermal_data *data = dev;
+
+       dev_dbg(&data->tz->device, "THERMAL ALARM: T > %lu\n",
+               data->alarm_temp / 1000);
+
+       thermal_zone_device_update(data->tz);
+
+       return IRQ_HANDLED;
+}
+
+static int imx_thermal_probe(struct platform_device *pdev)
+{
+       struct imx_thermal_data *data;
+       struct cpumask clip_cpus;
+       struct regmap *map;
+       int measure_freq;
+       int ret;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon");
+       if (IS_ERR(map)) {
+               ret = PTR_ERR(map);
+               dev_err(&pdev->dev, "failed to get tempmon regmap: %d\n", ret);
+               return ret;
+       }
+       data->tempmon = map;
+
+       data->irq = platform_get_irq(pdev, 0);
+       if (data->irq < 0)
+               return data->irq;
+
+       ret = devm_request_threaded_irq(&pdev->dev, data->irq,
+                       imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
+                       0, "imx_thermal", data);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, data);
+
+       ret = imx_get_sensor_data(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to get sensor data\n");
+               return ret;
+       }
+
+       /* Make sure sensor is in known good state for measurements */
+       regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+       regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
+       regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
+       regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
+       regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+
+       cpumask_set_cpu(0, &clip_cpus);
+       data->cdev = cpufreq_cooling_register(&clip_cpus);
+       if (IS_ERR(data->cdev)) {
+               ret = PTR_ERR(data->cdev);
+               dev_err(&pdev->dev,
+                       "failed to register cpufreq cooling device: %d\n", ret);
+               return ret;
+       }
+
+       data->tz = thermal_zone_device_register("imx_thermal_zone",
+                                               IMX_TRIP_NUM,
+                                               BIT(IMX_TRIP_PASSIVE), data,
+                                               &imx_tz_ops, NULL,
+                                               IMX_PASSIVE_DELAY,
+                                               IMX_POLLING_DELAY);
+       if (IS_ERR(data->tz)) {
+               ret = PTR_ERR(data->tz);
+               dev_err(&pdev->dev,
+                       "failed to register thermal zone device %d\n", ret);
+               cpufreq_cooling_unregister(data->cdev);
+               return ret;
+       }
+
+       /* Enable measurements at ~ 10 Hz */
+       regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
+       measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
+       regmap_write(map, TEMPSENSE1 + REG_SET, measure_freq);
+       imx_set_alarm_temp(data, data->temp_passive);
+       regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+       regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+
+       data->irq_enabled = true;
+       data->mode = THERMAL_DEVICE_ENABLED;
+
+       return 0;
+}
+
+static int imx_thermal_remove(struct platform_device *pdev)
+{
+       struct imx_thermal_data *data = platform_get_drvdata(pdev);
+       struct regmap *map = data->tempmon;
+
+       /* Disable measurements */
+       regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
+
+       thermal_zone_device_unregister(data->tz);
+       cpufreq_cooling_unregister(data->cdev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int imx_thermal_suspend(struct device *dev)
+{
+       struct imx_thermal_data *data = dev_get_drvdata(dev);
+       struct regmap *map = data->tempmon;
+       u32 val;
+
+       regmap_read(map, TEMPSENSE0, &val);
+       if ((val & TEMPSENSE0_POWER_DOWN) == 0) {
+               /*
+                * If a measurement is taking place, wait for a long enough
+                * time for it to finish, and then check again.  If it still
+                * does not finish, something must go wrong.
+                */
+               udelay(50);
+               regmap_read(map, TEMPSENSE0, &val);
+               if ((val & TEMPSENSE0_POWER_DOWN) == 0)
+                       return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int imx_thermal_resume(struct device *dev)
+{
+       /* Nothing to do for now */
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops,
+                        imx_thermal_suspend, imx_thermal_resume);
+
+static const struct of_device_id of_imx_thermal_match[] = {
+       { .compatible = "fsl,imx6q-tempmon", },
+       { /* end */ }
+};
+
+static struct platform_driver imx_thermal = {
+       .driver = {
+               .name   = "imx_thermal",
+               .owner  = THIS_MODULE,
+               .pm     = &imx_thermal_pm_ops,
+               .of_match_table = of_imx_thermal_match,
+       },
+       .probe          = imx_thermal_probe,
+       .remove         = imx_thermal_remove,
+};
+module_platform_driver(imx_thermal);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Thermal driver for Freescale i.MX SoCs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx-thermal");
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
new file mode 100644 (file)
index 0000000..b653f15
--- /dev/null
@@ -0,0 +1,18 @@
+config EXYNOS_THERMAL
+       tristate "Exynos thermal management unit driver"
+       depends on ARCH_HAS_BANDGAP
+       help
+         If you say yes here you get support for the TMU (Thermal Management
+         Unit) driver for SAMSUNG EXYNOS series of soc. This driver initialises
+         the TMU, reports temperature and handles cooling action if defined.
+         This driver uses the exynos core thermal API's and TMU configuration
+         data from the supported soc's.
+
+config EXYNOS_THERMAL_CORE
+       bool "Core thermal framework support for EXYNOS SOC's"
+       depends on EXYNOS_THERMAL
+       help
+         If you say yes here you get support for EXYNOS TMU
+         (Thermal Management Unit) common registration/unregistration
+         functions to the core thermal layer and also to use the generic
+         cpu cooling API's.
diff --git a/drivers/thermal/samsung/Makefile b/drivers/thermal/samsung/Makefile
new file mode 100644 (file)
index 0000000..c09d830
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Samsung thermal specific Makefile
+#
+obj-$(CONFIG_EXYNOS_THERMAL)                   += exynos_thermal.o
+exynos_thermal-y                               := exynos_tmu.o
+exynos_thermal-y                               += exynos_tmu_data.o
+exynos_thermal-$(CONFIG_EXYNOS_THERMAL_CORE)   += exynos_thermal_common.o
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
new file mode 100644 (file)
index 0000000..4d8e444
--- /dev/null
@@ -0,0 +1,432 @@
+/*
+ * exynos_thermal_common.c - Samsung EXYNOS common thermal file
+ *
+ *  Copyright (C) 2013 Samsung Electronics
+ *  Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/cpu_cooling.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#include "exynos_thermal_common.h"
+
+struct exynos_thermal_zone {
+       enum thermal_device_mode mode;
+       struct thermal_zone_device *therm_dev;
+       struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
+       unsigned int cool_dev_size;
+       struct platform_device *exynos4_dev;
+       struct thermal_sensor_conf *sensor_conf;
+       bool bind;
+};
+
+/* Get mode callback functions for thermal zone */
+static int exynos_get_mode(struct thermal_zone_device *thermal,
+                       enum thermal_device_mode *mode)
+{
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+       if (th_zone)
+               *mode = th_zone->mode;
+       return 0;
+}
+
+/* Set mode callback functions for thermal zone */
+static int exynos_set_mode(struct thermal_zone_device *thermal,
+                       enum thermal_device_mode mode)
+{
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+       if (!th_zone) {
+               dev_err(th_zone->sensor_conf->dev,
+                       "thermal zone not registered\n");
+               return 0;
+       }
+
+       mutex_lock(&thermal->lock);
+
+       if (mode == THERMAL_DEVICE_ENABLED &&
+               !th_zone->sensor_conf->trip_data.trigger_falling)
+               thermal->polling_delay = IDLE_INTERVAL;
+       else
+               thermal->polling_delay = 0;
+
+       mutex_unlock(&thermal->lock);
+
+       th_zone->mode = mode;
+       thermal_zone_device_update(thermal);
+       dev_dbg(th_zone->sensor_conf->dev,
+               "thermal polling set for duration=%d msec\n",
+               thermal->polling_delay);
+       return 0;
+}
+
+
+/* Get trip type callback functions for thermal zone */
+static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
+                                enum thermal_trip_type *type)
+{
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
+       int trip_type;
+
+       if (trip < 0 || trip >= max_trip)
+               return -EINVAL;
+
+       trip_type = th_zone->sensor_conf->trip_data.trip_type[trip];
+
+       if (trip_type == SW_TRIP)
+               *type = THERMAL_TRIP_CRITICAL;
+       else if (trip_type == THROTTLE_ACTIVE)
+               *type = THERMAL_TRIP_ACTIVE;
+       else if (trip_type == THROTTLE_PASSIVE)
+               *type = THERMAL_TRIP_PASSIVE;
+       else
+               return -EINVAL;
+
+       return 0;
+}
+
+/* Get trip temperature callback functions for thermal zone */
+static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
+                               unsigned long *temp)
+{
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
+
+       if (trip < 0 || trip >= max_trip)
+               return -EINVAL;
+
+       *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
+       /* convert the temperature into millicelsius */
+       *temp = *temp * MCELSIUS;
+
+       return 0;
+}
+
+/* Get critical temperature callback functions for thermal zone */
+static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
+                               unsigned long *temp)
+{
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
+       /* Get the temp of highest trip*/
+       return exynos_get_trip_temp(thermal, max_trip - 1, temp);
+}
+
+/* Bind callback functions for thermal zone */
+static int exynos_bind(struct thermal_zone_device *thermal,
+                       struct thermal_cooling_device *cdev)
+{
+       int ret = 0, i, tab_size, level;
+       struct freq_clip_table *tab_ptr, *clip_data;
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+       struct thermal_sensor_conf *data = th_zone->sensor_conf;
+
+       tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
+       tab_size = data->cooling_data.freq_clip_count;
+
+       if (tab_ptr == NULL || tab_size == 0)
+               return 0;
+
+       /* find the cooling device registered*/
+       for (i = 0; i < th_zone->cool_dev_size; i++)
+               if (cdev == th_zone->cool_dev[i])
+                       break;
+
+       /* No matching cooling device */
+       if (i == th_zone->cool_dev_size)
+               return 0;
+
+       /* Bind the thermal zone to the cpufreq cooling device */
+       for (i = 0; i < tab_size; i++) {
+               clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
+               level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
+               if (level == THERMAL_CSTATE_INVALID)
+                       return 0;
+               switch (GET_ZONE(i)) {
+               case MONITOR_ZONE:
+               case WARN_ZONE:
+                       if (thermal_zone_bind_cooling_device(thermal, i, cdev,
+                                                               level, 0)) {
+                               dev_err(data->dev,
+                                       "error unbinding cdev inst=%d\n", i);
+                               ret = -EINVAL;
+                       }
+                       th_zone->bind = true;
+                       break;
+               default:
+                       ret = -EINVAL;
+               }
+       }
+
+       return ret;
+}
+
+/* Unbind callback functions for thermal zone */
+static int exynos_unbind(struct thermal_zone_device *thermal,
+                       struct thermal_cooling_device *cdev)
+{
+       int ret = 0, i, tab_size;
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+       struct thermal_sensor_conf *data = th_zone->sensor_conf;
+
+       if (th_zone->bind == false)
+               return 0;
+
+       tab_size = data->cooling_data.freq_clip_count;
+
+       if (tab_size == 0)
+               return 0;
+
+       /* find the cooling device registered*/
+       for (i = 0; i < th_zone->cool_dev_size; i++)
+               if (cdev == th_zone->cool_dev[i])
+                       break;
+
+       /* No matching cooling device */
+       if (i == th_zone->cool_dev_size)
+               return 0;
+
+       /* Bind the thermal zone to the cpufreq cooling device */
+       for (i = 0; i < tab_size; i++) {
+               switch (GET_ZONE(i)) {
+               case MONITOR_ZONE:
+               case WARN_ZONE:
+                       if (thermal_zone_unbind_cooling_device(thermal, i,
+                                                               cdev)) {
+                               dev_err(data->dev,
+                                       "error unbinding cdev inst=%d\n", i);
+                               ret = -EINVAL;
+                       }
+                       th_zone->bind = false;
+                       break;
+               default:
+                       ret = -EINVAL;
+               }
+       }
+       return ret;
+}
+
+/* Get temperature callback functions for thermal zone */
+static int exynos_get_temp(struct thermal_zone_device *thermal,
+                       unsigned long *temp)
+{
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+       void *data;
+
+       if (!th_zone->sensor_conf) {
+               dev_err(th_zone->sensor_conf->dev,
+                       "Temperature sensor not initialised\n");
+               return -EINVAL;
+       }
+       data = th_zone->sensor_conf->driver_data;
+       *temp = th_zone->sensor_conf->read_temperature(data);
+       /* convert the temperature into millicelsius */
+       *temp = *temp * MCELSIUS;
+       return 0;
+}
+
+/* Get temperature callback functions for thermal zone */
+static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
+                                               unsigned long temp)
+{
+       void *data;
+       int ret = -EINVAL;
+       struct exynos_thermal_zone *th_zone = thermal->devdata;
+
+       if (!th_zone->sensor_conf) {
+               dev_err(th_zone->sensor_conf->dev,
+                       "Temperature sensor not initialised\n");
+               return -EINVAL;
+       }
+       data = th_zone->sensor_conf->driver_data;
+       if (th_zone->sensor_conf->write_emul_temp)
+               ret = th_zone->sensor_conf->write_emul_temp(data, temp);
+       return ret;
+}
+
+/* Get the temperature trend */
+static int exynos_get_trend(struct thermal_zone_device *thermal,
+                       int trip, enum thermal_trend *trend)
+{
+       int ret;
+       unsigned long trip_temp;
+
+       ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
+       if (ret < 0)
+               return ret;
+
+       if (thermal->temperature >= trip_temp)
+               *trend = THERMAL_TREND_RAISE_FULL;
+       else
+               *trend = THERMAL_TREND_DROP_FULL;
+
+       return 0;
+}
+/* Operation callback functions for thermal zone */
+static struct thermal_zone_device_ops const exynos_dev_ops = {
+       .bind = exynos_bind,
+       .unbind = exynos_unbind,
+       .get_temp = exynos_get_temp,
+       .set_emul_temp = exynos_set_emul_temp,
+       .get_trend = exynos_get_trend,
+       .get_mode = exynos_get_mode,
+       .set_mode = exynos_set_mode,
+       .get_trip_type = exynos_get_trip_type,
+       .get_trip_temp = exynos_get_trip_temp,
+       .get_crit_temp = exynos_get_crit_temp,
+};
+
+/*
+ * This function may be called from interrupt based temperature sensor
+ * when threshold is changed.
+ */
+void exynos_report_trigger(struct thermal_sensor_conf *conf)
+{
+       unsigned int i;
+       char data[10];
+       char *envp[] = { data, NULL };
+       struct exynos_thermal_zone *th_zone;
+
+       if (!conf || !conf->pzone_data) {
+               pr_err("Invalid temperature sensor configuration data\n");
+               return;
+       }
+
+       th_zone = conf->pzone_data;
+       if (th_zone->therm_dev)
+               return;
+
+       if (th_zone->bind == false) {
+               for (i = 0; i < th_zone->cool_dev_size; i++) {
+                       if (!th_zone->cool_dev[i])
+                               continue;
+                       exynos_bind(th_zone->therm_dev,
+                                       th_zone->cool_dev[i]);
+               }
+       }
+
+       thermal_zone_device_update(th_zone->therm_dev);
+
+       mutex_lock(&th_zone->therm_dev->lock);
+       /* Find the level for which trip happened */
+       for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
+               if (th_zone->therm_dev->last_temperature <
+                       th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
+                       break;
+       }
+
+       if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
+               !th_zone->sensor_conf->trip_data.trigger_falling) {
+               if (i > 0)
+                       th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
+               else
+                       th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
+       }
+
+       snprintf(data, sizeof(data), "%u", i);
+       kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
+       mutex_unlock(&th_zone->therm_dev->lock);
+}
+
+/* Register with the in-kernel thermal management */
+int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
+{
+       int ret;
+       struct cpumask mask_val;
+       struct exynos_thermal_zone *th_zone;
+
+       if (!sensor_conf || !sensor_conf->read_temperature) {
+               pr_err("Temperature sensor not initialised\n");
+               return -EINVAL;
+       }
+
+       th_zone = devm_kzalloc(sensor_conf->dev,
+                               sizeof(struct exynos_thermal_zone), GFP_KERNEL);
+       if (!th_zone)
+               return -ENOMEM;
+
+       th_zone->sensor_conf = sensor_conf;
+       /*
+        * TODO: 1) Handle multiple cooling devices in a thermal zone
+        *       2) Add a flag/name in cooling info to map to specific
+        *       sensor
+        */
+       if (sensor_conf->cooling_data.freq_clip_count > 0) {
+               cpumask_set_cpu(0, &mask_val);
+               th_zone->cool_dev[th_zone->cool_dev_size] =
+                                       cpufreq_cooling_register(&mask_val);
+               if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
+                       dev_err(sensor_conf->dev,
+                               "Failed to register cpufreq cooling device\n");
+                       ret = -EINVAL;
+                       goto err_unregister;
+               }
+               th_zone->cool_dev_size++;
+       }
+
+       th_zone->therm_dev = thermal_zone_device_register(
+                       sensor_conf->name, sensor_conf->trip_data.trip_count,
+                       0, th_zone, &exynos_dev_ops, NULL, 0,
+                       sensor_conf->trip_data.trigger_falling ? 0 :
+                       IDLE_INTERVAL);
+
+       if (IS_ERR(th_zone->therm_dev)) {
+               dev_err(sensor_conf->dev,
+                       "Failed to register thermal zone device\n");
+               ret = PTR_ERR(th_zone->therm_dev);
+               goto err_unregister;
+       }
+       th_zone->mode = THERMAL_DEVICE_ENABLED;
+       sensor_conf->pzone_data = th_zone;
+
+       dev_info(sensor_conf->dev,
+               "Exynos: Thermal zone(%s) registered\n", sensor_conf->name);
+
+       return 0;
+
+err_unregister:
+       exynos_unregister_thermal(sensor_conf);
+       return ret;
+}
+
+/* Un-Register with the in-kernel thermal management */
+void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
+{
+       int i;
+       struct exynos_thermal_zone *th_zone;
+
+       if (!sensor_conf || !sensor_conf->pzone_data) {
+               pr_err("Invalid temperature sensor configuration data\n");
+               return;
+       }
+
+       th_zone = sensor_conf->pzone_data;
+
+       if (th_zone->therm_dev)
+               thermal_zone_device_unregister(th_zone->therm_dev);
+
+       for (i = 0; i < th_zone->cool_dev_size; i++) {
+               if (th_zone->cool_dev[i])
+                       cpufreq_cooling_unregister(th_zone->cool_dev[i]);
+       }
+
+       dev_info(sensor_conf->dev,
+               "Exynos: Kernel Thermal management unregistered\n");
+}
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
new file mode 100644 (file)
index 0000000..3eb2ed9
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * exynos_thermal_common.h - Samsung EXYNOS common header file
+ *
+ *  Copyright (C) 2013 Samsung Electronics
+ *  Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _EXYNOS_THERMAL_COMMON_H
+#define _EXYNOS_THERMAL_COMMON_H
+
+/* In-kernel thermal framework related macros & definations */
+#define SENSOR_NAME_LEN        16
+#define MAX_TRIP_COUNT 8
+#define MAX_COOLING_DEVICE 4
+#define MAX_THRESHOLD_LEVS 5
+
+#define ACTIVE_INTERVAL 500
+#define IDLE_INTERVAL 10000
+#define MCELSIUS       1000
+
+/* CPU Zone information */
+#define PANIC_ZONE      4
+#define WARN_ZONE       3
+#define MONITOR_ZONE    2
+#define SAFE_ZONE       1
+
+#define GET_ZONE(trip) (trip + 2)
+#define GET_TRIP(zone) (zone - 2)
+
+enum trigger_type {
+       THROTTLE_ACTIVE = 1,
+       THROTTLE_PASSIVE,
+       SW_TRIP,
+       HW_TRIP,
+};
+
+/**
+ * struct freq_clip_table
+ * @freq_clip_max: maximum frequency allowed for this cooling state.
+ * @temp_level: Temperature level at which the temperature clipping will
+ *     happen.
+ * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
+ *
+ * This structure is required to be filled and passed to the
+ * cpufreq_cooling_unregister function.
+ */
+struct freq_clip_table {
+       unsigned int freq_clip_max;
+       unsigned int temp_level;
+       const struct cpumask *mask_val;
+};
+
+struct thermal_trip_point_conf {
+       int trip_val[MAX_TRIP_COUNT];
+       int trip_type[MAX_TRIP_COUNT];
+       int trip_count;
+       unsigned char trigger_falling;
+};
+
+struct thermal_cooling_conf {
+       struct freq_clip_table freq_data[MAX_TRIP_COUNT];
+       int freq_clip_count;
+};
+
+struct thermal_sensor_conf {
+       char name[SENSOR_NAME_LEN];
+       int (*read_temperature)(void *data);
+       int (*write_emul_temp)(void *drv_data, unsigned long temp);
+       struct thermal_trip_point_conf trip_data;
+       struct thermal_cooling_conf cooling_data;
+       void *driver_data;
+       void *pzone_data;
+       struct device *dev;
+};
+
+/*Functions used exynos based thermal sensor driver*/
+#ifdef CONFIG_EXYNOS_THERMAL_CORE
+void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf);
+int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
+void exynos_report_trigger(struct thermal_sensor_conf *sensor_conf);
+#else
+static inline void
+exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) { return; }
+
+static inline int
+exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) { return 0; }
+
+static inline void
+exynos_report_trigger(struct thermal_sensor_conf *sensor_conf) { return; }
+
+#endif /* CONFIG_EXYNOS_THERMAL_CORE */
+#endif /* _EXYNOS_THERMAL_COMMON_H */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
new file mode 100644 (file)
index 0000000..a033dbb
--- /dev/null
@@ -0,0 +1,768 @@
+/*
+ * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *  Donggeun Kim <dg77.kim@samsung.com>
+ *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "exynos_thermal_common.h"
+#include "exynos_tmu.h"
+#include "exynos_tmu_data.h"
+
+/**
+ * struct exynos_tmu_data : A structure to hold the private data of the TMU
+       driver
+ * @id: identifier of the one instance of the TMU controller.
+ * @pdata: pointer to the tmu platform/configuration data
+ * @base: base address of the single instance of the TMU controller.
+ * @base_common: base address of the common registers of the TMU controller.
+ * @irq: irq number of the TMU controller.
+ * @soc: id of the SOC type.
+ * @irq_work: pointer to the irq work structure.
+ * @lock: lock to implement synchronization.
+ * @clk: pointer to the clock structure.
+ * @temp_error1: fused value of the first point trim.
+ * @temp_error2: fused value of the second point trim.
+ * @regulator: pointer to the TMU regulator structure.
+ * @reg_conf: pointer to structure to register with core thermal.
+ */
+struct exynos_tmu_data {
+       int id;
+       struct exynos_tmu_platform_data *pdata;
+       void __iomem *base;
+       void __iomem *base_common;
+       int irq;
+       enum soc_type soc;
+       struct work_struct irq_work;
+       struct mutex lock;
+       struct clk *clk;
+       u8 temp_error1, temp_error2;
+       struct regulator *regulator;
+       struct thermal_sensor_conf *reg_conf;
+};
+
+/*
+ * TMU treats temperature as a mapped temperature code.
+ * The temperature is converted differently depending on the calibration type.
+ */
+static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
+{
+       struct exynos_tmu_platform_data *pdata = data->pdata;
+       int temp_code;
+
+       if (pdata->cal_mode == HW_MODE)
+               return temp;
+
+       if (data->soc == SOC_ARCH_EXYNOS4210)
+               /* temp should range between 25 and 125 */
+               if (temp < 25 || temp > 125) {
+                       temp_code = -EINVAL;
+                       goto out;
+               }
+
+       switch (pdata->cal_type) {
+       case TYPE_TWO_POINT_TRIMMING:
+               temp_code = (temp - pdata->first_point_trim) *
+                       (data->temp_error2 - data->temp_error1) /
+                       (pdata->second_point_trim - pdata->first_point_trim) +
+                       data->temp_error1;
+               break;
+       case TYPE_ONE_POINT_TRIMMING:
+               temp_code = temp + data->temp_error1 - pdata->first_point_trim;
+               break;
+       default:
+               temp_code = temp + pdata->default_temp_offset;
+               break;
+       }
+out:
+       return temp_code;
+}
+
+/*
+ * Calculate a temperature value from a temperature code.
+ * The unit of the temperature is degree Celsius.
+ */
+static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
+{
+       struct exynos_tmu_platform_data *pdata = data->pdata;
+       int temp;
+
+       if (pdata->cal_mode == HW_MODE)
+               return temp_code;
+
+       if (data->soc == SOC_ARCH_EXYNOS4210)
+               /* temp_code should range between 75 and 175 */
+               if (temp_code < 75 || temp_code > 175) {
+                       temp = -ENODATA;
+                       goto out;
+               }
+
+       switch (pdata->cal_type) {
+       case TYPE_TWO_POINT_TRIMMING:
+               temp = (temp_code - data->temp_error1) *
+                       (pdata->second_point_trim - pdata->first_point_trim) /
+                       (data->temp_error2 - data->temp_error1) +
+                       pdata->first_point_trim;
+               break;
+       case TYPE_ONE_POINT_TRIMMING:
+               temp = temp_code - data->temp_error1 + pdata->first_point_trim;
+               break;
+       default:
+               temp = temp_code - pdata->default_temp_offset;
+               break;
+       }
+out:
+       return temp;
+}
+
+static int exynos_tmu_initialize(struct platform_device *pdev)
+{
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct exynos_tmu_platform_data *pdata = data->pdata;
+       const struct exynos_tmu_registers *reg = pdata->registers;
+       unsigned int status, trim_info = 0, con;
+       unsigned int rising_threshold = 0, falling_threshold = 0;
+       int ret = 0, threshold_code, i, trigger_levs = 0;
+
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+
+       if (TMU_SUPPORTS(pdata, READY_STATUS)) {
+               status = readb(data->base + reg->tmu_status);
+               if (!status) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+       }
+
+       if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
+               __raw_writel(1, data->base + reg->triminfo_ctrl);
+
+       if (pdata->cal_mode == HW_MODE)
+               goto skip_calib_data;
+
+       /* Save trimming info in order to perform calibration */
+       if (data->soc == SOC_ARCH_EXYNOS5440) {
+               /*
+                * For exynos5440 soc triminfo value is swapped between TMU0 and
+                * TMU2, so the below logic is needed.
+                */
+               switch (data->id) {
+               case 0:
+                       trim_info = readl(data->base +
+                       EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
+                       break;
+               case 1:
+                       trim_info = readl(data->base + reg->triminfo_data);
+                       break;
+               case 2:
+                       trim_info = readl(data->base -
+                       EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
+               }
+       } else {
+               trim_info = readl(data->base + reg->triminfo_data);
+       }
+       data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
+       data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
+                               EXYNOS_TMU_TEMP_MASK);
+
+       if (!data->temp_error1 ||
+               (pdata->min_efuse_value > data->temp_error1) ||
+               (data->temp_error1 > pdata->max_efuse_value))
+               data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
+
+       if (!data->temp_error2)
+               data->temp_error2 =
+                       (pdata->efuse_value >> reg->triminfo_85_shift) &
+                       EXYNOS_TMU_TEMP_MASK;
+
+skip_calib_data:
+       if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
+               dev_err(&pdev->dev, "Invalid max trigger level\n");
+               goto out;
+       }
+
+       for (i = 0; i < pdata->max_trigger_level; i++) {
+               if (!pdata->trigger_levels[i])
+                       continue;
+
+               if ((pdata->trigger_type[i] == HW_TRIP) &&
+               (!pdata->trigger_levels[pdata->max_trigger_level - 1])) {
+                       dev_err(&pdev->dev, "Invalid hw trigger level\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               /* Count trigger levels except the HW trip*/
+               if (!(pdata->trigger_type[i] == HW_TRIP))
+                       trigger_levs++;
+       }
+
+       if (data->soc == SOC_ARCH_EXYNOS4210) {
+               /* Write temperature code for threshold */
+               threshold_code = temp_to_code(data, pdata->threshold);
+               if (threshold_code < 0) {
+                       ret = threshold_code;
+                       goto out;
+               }
+               writeb(threshold_code,
+                       data->base + reg->threshold_temp);
+               for (i = 0; i < trigger_levs; i++)
+                       writeb(pdata->trigger_levels[i], data->base +
+                       reg->threshold_th0 + i * sizeof(reg->threshold_th0));
+
+               writel(reg->inten_rise_mask, data->base + reg->tmu_intclear);
+       } else {
+               /* Write temperature code for rising and falling threshold */
+               for (i = 0;
+               i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
+                       threshold_code = temp_to_code(data,
+                                               pdata->trigger_levels[i]);
+                       if (threshold_code < 0) {
+                               ret = threshold_code;
+                               goto out;
+                       }
+                       rising_threshold |= threshold_code << 8 * i;
+                       if (pdata->threshold_falling) {
+                               threshold_code = temp_to_code(data,
+                                               pdata->trigger_levels[i] -
+                                               pdata->threshold_falling);
+                               if (threshold_code > 0)
+                                       falling_threshold |=
+                                               threshold_code << 8 * i;
+                       }
+               }
+
+               writel(rising_threshold,
+                               data->base + reg->threshold_th0);
+               writel(falling_threshold,
+                               data->base + reg->threshold_th1);
+
+               writel((reg->inten_rise_mask << reg->inten_rise_shift) |
+                       (reg->inten_fall_mask << reg->inten_fall_shift),
+                               data->base + reg->tmu_intclear);
+
+               /* if last threshold limit is also present */
+               i = pdata->max_trigger_level - 1;
+               if (pdata->trigger_levels[i] &&
+                               (pdata->trigger_type[i] == HW_TRIP)) {
+                       threshold_code = temp_to_code(data,
+                                               pdata->trigger_levels[i]);
+                       if (threshold_code < 0) {
+                               ret = threshold_code;
+                               goto out;
+                       }
+                       if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
+                               /* 1-4 level to be assigned in th0 reg */
+                               rising_threshold |= threshold_code << 8 * i;
+                               writel(rising_threshold,
+                                       data->base + reg->threshold_th0);
+                       } else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
+                               /* 5th level to be assigned in th2 reg */
+                               rising_threshold =
+                               threshold_code << reg->threshold_th3_l0_shift;
+                               writel(rising_threshold,
+                                       data->base + reg->threshold_th2);
+                       }
+                       con = readl(data->base + reg->tmu_ctrl);
+                       con |= (1 << reg->therm_trip_en_shift);
+                       writel(con, data->base + reg->tmu_ctrl);
+               }
+       }
+       /*Clear the PMIN in the common TMU register*/
+       if (reg->tmu_pmin && !data->id)
+               writel(0, data->base_common + reg->tmu_pmin);
+out:
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+
+       return ret;
+}
+
+static void exynos_tmu_control(struct platform_device *pdev, bool on)
+{
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct exynos_tmu_platform_data *pdata = data->pdata;
+       const struct exynos_tmu_registers *reg = pdata->registers;
+       unsigned int con, interrupt_en, cal_val;
+
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+
+       con = readl(data->base + reg->tmu_ctrl);
+
+       if (pdata->reference_voltage) {
+               con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
+               con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
+       }
+
+       if (pdata->gain) {
+               con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
+               con |= (pdata->gain << reg->buf_slope_sel_shift);
+       }
+
+       if (pdata->noise_cancel_mode) {
+               con &= ~(reg->therm_trip_mode_mask <<
+                                       reg->therm_trip_mode_shift);
+               con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
+       }
+
+       if (pdata->cal_mode == HW_MODE) {
+               con &= ~(reg->calib_mode_mask << reg->calib_mode_shift);
+               cal_val = 0;
+               switch (pdata->cal_type) {
+               case TYPE_TWO_POINT_TRIMMING:
+                       cal_val = 3;
+                       break;
+               case TYPE_ONE_POINT_TRIMMING_85:
+                       cal_val = 2;
+                       break;
+               case TYPE_ONE_POINT_TRIMMING_25:
+                       cal_val = 1;
+                       break;
+               case TYPE_NONE:
+                       break;
+               default:
+                       dev_err(&pdev->dev, "Invalid calibration type, using none\n");
+               }
+               con |= cal_val << reg->calib_mode_shift;
+       }
+
+       if (on) {
+               con |= (1 << reg->core_en_shift);
+               interrupt_en =
+                       pdata->trigger_enable[3] << reg->inten_rise3_shift |
+                       pdata->trigger_enable[2] << reg->inten_rise2_shift |
+                       pdata->trigger_enable[1] << reg->inten_rise1_shift |
+                       pdata->trigger_enable[0] << reg->inten_rise0_shift;
+               if (TMU_SUPPORTS(pdata, FALLING_TRIP))
+                       interrupt_en |=
+                               interrupt_en << reg->inten_fall0_shift;
+       } else {
+               con &= ~(1 << reg->core_en_shift);
+               interrupt_en = 0; /* Disable all interrupts */
+       }
+       writel(interrupt_en, data->base + reg->tmu_inten);
+       writel(con, data->base + reg->tmu_ctrl);
+
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+}
+
+static int exynos_tmu_read(struct exynos_tmu_data *data)
+{
+       struct exynos_tmu_platform_data *pdata = data->pdata;
+       const struct exynos_tmu_registers *reg = pdata->registers;
+       u8 temp_code;
+       int temp;
+
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+
+       temp_code = readb(data->base + reg->tmu_cur_temp);
+       temp = code_to_temp(data, temp_code);
+
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+
+       return temp;
+}
+
+#ifdef CONFIG_THERMAL_EMULATION
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+{
+       struct exynos_tmu_data *data = drv_data;
+       struct exynos_tmu_platform_data *pdata = data->pdata;
+       const struct exynos_tmu_registers *reg = pdata->registers;
+       unsigned int val;
+       int ret = -EINVAL;
+
+       if (!TMU_SUPPORTS(pdata, EMULATION))
+               goto out;
+
+       if (temp && temp < MCELSIUS)
+               goto out;
+
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+
+       val = readl(data->base + reg->emul_con);
+
+       if (temp) {
+               temp /= MCELSIUS;
+
+               if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
+                       val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
+                       val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
+               }
+               val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
+               val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
+                       EXYNOS_EMUL_ENABLE;
+       } else {
+               val &= ~EXYNOS_EMUL_ENABLE;
+       }
+
+       writel(val, data->base + reg->emul_con);
+
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+       return 0;
+out:
+       return ret;
+}
+#else
+static int exynos_tmu_set_emulation(void *drv_data,    unsigned long temp)
+       { return -EINVAL; }
+#endif/*CONFIG_THERMAL_EMULATION*/
+
+static void exynos_tmu_work(struct work_struct *work)
+{
+       struct exynos_tmu_data *data = container_of(work,
+                       struct exynos_tmu_data, irq_work);
+       struct exynos_tmu_platform_data *pdata = data->pdata;
+       const struct exynos_tmu_registers *reg = pdata->registers;
+       unsigned int val_irq, val_type;
+
+       /* Find which sensor generated this interrupt */
+       if (reg->tmu_irqstatus) {
+               val_type = readl(data->base_common + reg->tmu_irqstatus);
+               if (!((val_type >> data->id) & 0x1))
+                       goto out;
+       }
+
+       exynos_report_trigger(data->reg_conf);
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+
+       /* TODO: take action based on particular interrupt */
+       val_irq = readl(data->base + reg->tmu_intstat);
+       /* clear the interrupts */
+       writel(val_irq, data->base + reg->tmu_intclear);
+
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+out:
+       enable_irq(data->irq);
+}
+
+static irqreturn_t exynos_tmu_irq(int irq, void *id)
+{
+       struct exynos_tmu_data *data = id;
+
+       disable_irq_nosync(irq);
+       schedule_work(&data->irq_work);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_tmu_match[] = {
+       {
+               .compatible = "samsung,exynos4210-tmu",
+               .data = (void *)EXYNOS4210_TMU_DRV_DATA,
+       },
+       {
+               .compatible = "samsung,exynos4412-tmu",
+               .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+       },
+       {
+               .compatible = "samsung,exynos5250-tmu",
+               .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+       },
+       {
+               .compatible = "samsung,exynos5440-tmu",
+               .data = (void *)EXYNOS5440_TMU_DRV_DATA,
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, exynos_tmu_match);
+#endif
+
+static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
+                       struct platform_device *pdev, int id)
+{
+#ifdef CONFIG_OF
+       struct  exynos_tmu_init_data *data_table;
+       struct exynos_tmu_platform_data *tmu_data;
+       if (pdev->dev.of_node) {
+               const struct of_device_id *match;
+               match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
+               if (!match)
+                       return NULL;
+               data_table = (struct exynos_tmu_init_data *) match->data;
+               if (!data_table || id >= data_table->tmu_count)
+                       return NULL;
+               tmu_data = data_table->tmu_data;
+               return (struct exynos_tmu_platform_data *) (tmu_data + id);
+       }
+#endif
+       return NULL;
+}
+
+static int exynos_map_dt_data(struct platform_device *pdev)
+{
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct exynos_tmu_platform_data *pdata;
+       struct resource res;
+       int ret;
+
+       if (!data)
+               return -ENODEV;
+
+       /*
+        * Try enabling the regulator if found
+        * TODO: Add regulator as an SOC feature, so that regulator enable
+        * is a compulsory call.
+        */
+       data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
+       if (!IS_ERR(data->regulator)) {
+               ret = regulator_enable(data->regulator);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to enable vtmu\n");
+                       return ret;
+               }
+       } else {
+               dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
+       }
+
+       data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
+       if (data->id < 0)
+               data->id = 0;
+
+       data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       if (data->irq <= 0) {
+               dev_err(&pdev->dev, "failed to get IRQ\n");
+               return -ENODEV;
+       }
+
+       if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
+               dev_err(&pdev->dev, "failed to get Resource 0\n");
+               return -ENODEV;
+       }
+
+       data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
+       if (!data->base) {
+               dev_err(&pdev->dev, "Failed to ioremap memory\n");
+               return -EADDRNOTAVAIL;
+       }
+
+       pdata = exynos_get_driver_data(pdev, data->id);
+       if (!pdata) {
+               dev_err(&pdev->dev, "No platform init data supplied.\n");
+               return -ENODEV;
+       }
+       data->pdata = pdata;
+       /*
+        * Check if the TMU shares some registers and then try to map the
+        * memory of common registers.
+        */
+       if (!TMU_SUPPORTS(pdata, SHARED_MEMORY))
+               return 0;
+
+       if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
+               dev_err(&pdev->dev, "failed to get Resource 1\n");
+               return -ENODEV;
+       }
+
+       data->base_common = devm_ioremap(&pdev->dev, res.start,
+                                       resource_size(&res));
+       if (!data->base_common) {
+               dev_err(&pdev->dev, "Failed to ioremap memory\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int exynos_tmu_probe(struct platform_device *pdev)
+{
+       struct exynos_tmu_data *data;
+       struct exynos_tmu_platform_data *pdata;
+       struct thermal_sensor_conf *sensor_conf;
+       int ret, i;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
+                                       GFP_KERNEL);
+       if (!data) {
+               dev_err(&pdev->dev, "Failed to allocate driver structure\n");
+               return -ENOMEM;
+       }
+
+       platform_set_drvdata(pdev, data);
+       mutex_init(&data->lock);
+
+       ret = exynos_map_dt_data(pdev);
+       if (ret)
+               return ret;
+
+       pdata = data->pdata;
+
+       INIT_WORK(&data->irq_work, exynos_tmu_work);
+
+       data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
+       if (IS_ERR(data->clk)) {
+               dev_err(&pdev->dev, "Failed to get clock\n");
+               return  PTR_ERR(data->clk);
+       }
+
+       ret = clk_prepare(data->clk);
+       if (ret)
+               return ret;
+
+       if (pdata->type == SOC_ARCH_EXYNOS ||
+               pdata->type == SOC_ARCH_EXYNOS4210 ||
+                               pdata->type == SOC_ARCH_EXYNOS5440)
+               data->soc = pdata->type;
+       else {
+               ret = -EINVAL;
+               dev_err(&pdev->dev, "Platform not supported\n");
+               goto err_clk;
+       }
+
+       ret = exynos_tmu_initialize(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to initialize TMU\n");
+               goto err_clk;
+       }
+
+       exynos_tmu_control(pdev, true);
+
+       /* Allocate a structure to register with the exynos core thermal */
+       sensor_conf = devm_kzalloc(&pdev->dev,
+                               sizeof(struct thermal_sensor_conf), GFP_KERNEL);
+       if (!sensor_conf) {
+               dev_err(&pdev->dev, "Failed to allocate registration struct\n");
+               ret = -ENOMEM;
+               goto err_clk;
+       }
+       sprintf(sensor_conf->name, "therm_zone%d", data->id);
+       sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
+       sensor_conf->write_emul_temp =
+               (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
+       sensor_conf->driver_data = data;
+       sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
+                       pdata->trigger_enable[1] + pdata->trigger_enable[2]+
+                       pdata->trigger_enable[3];
+
+       for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
+               sensor_conf->trip_data.trip_val[i] =
+                       pdata->threshold + pdata->trigger_levels[i];
+               sensor_conf->trip_data.trip_type[i] =
+                                       pdata->trigger_type[i];
+       }
+
+       sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
+
+       sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
+       for (i = 0; i < pdata->freq_tab_count; i++) {
+               sensor_conf->cooling_data.freq_data[i].freq_clip_max =
+                                       pdata->freq_tab[i].freq_clip_max;
+               sensor_conf->cooling_data.freq_data[i].temp_level =
+                                       pdata->freq_tab[i].temp_level;
+       }
+       sensor_conf->dev = &pdev->dev;
+       /* Register the sensor with thermal management interface */
+       ret = exynos_register_thermal(sensor_conf);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register thermal interface\n");
+               goto err_clk;
+       }
+       data->reg_conf = sensor_conf;
+
+       ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
+               IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
+               goto err_clk;
+       }
+
+       return 0;
+err_clk:
+       clk_unprepare(data->clk);
+       return ret;
+}
+
+static int exynos_tmu_remove(struct platform_device *pdev)
+{
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+
+       exynos_tmu_control(pdev, false);
+
+       exynos_unregister_thermal(data->reg_conf);
+
+       clk_unprepare(data->clk);
+
+       if (!IS_ERR(data->regulator))
+               regulator_disable(data->regulator);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_tmu_suspend(struct device *dev)
+{
+       exynos_tmu_control(to_platform_device(dev), false);
+
+       return 0;
+}
+
+static int exynos_tmu_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+
+       exynos_tmu_initialize(pdev);
+       exynos_tmu_control(pdev, true);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
+                        exynos_tmu_suspend, exynos_tmu_resume);
+#define EXYNOS_TMU_PM  (&exynos_tmu_pm)
+#else
+#define EXYNOS_TMU_PM  NULL
+#endif
+
+static struct platform_driver exynos_tmu_driver = {
+       .driver = {
+               .name   = "exynos-tmu",
+               .owner  = THIS_MODULE,
+               .pm     = EXYNOS_TMU_PM,
+               .of_match_table = of_match_ptr(exynos_tmu_match),
+       },
+       .probe = exynos_tmu_probe,
+       .remove = exynos_tmu_remove,
+};
+
+module_platform_driver(exynos_tmu_driver);
+
+MODULE_DESCRIPTION("EXYNOS TMU Driver");
+MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:exynos-tmu");
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
new file mode 100644 (file)
index 0000000..b364c9e
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+ * exynos_tmu.h - Samsung EXYNOS TMU (Thermal Management Unit)
+ *
+ *  Copyright (C) 2011 Samsung Electronics
+ *  Donggeun Kim <dg77.kim@samsung.com>
+ *  Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _EXYNOS_TMU_H
+#define _EXYNOS_TMU_H
+#include <linux/cpu_cooling.h>
+
+#include "exynos_thermal_common.h"
+
+enum calibration_type {
+       TYPE_ONE_POINT_TRIMMING,
+       TYPE_ONE_POINT_TRIMMING_25,
+       TYPE_ONE_POINT_TRIMMING_85,
+       TYPE_TWO_POINT_TRIMMING,
+       TYPE_NONE,
+};
+
+enum calibration_mode {
+       SW_MODE,
+       HW_MODE,
+};
+
+enum soc_type {
+       SOC_ARCH_EXYNOS4210 = 1,
+       SOC_ARCH_EXYNOS,
+       SOC_ARCH_EXYNOS5440,
+};
+
+/**
+ * EXYNOS TMU supported features.
+ * TMU_SUPPORT_EMULATION - This features is used to set user defined
+ *                     temperature to the TMU controller.
+ * TMU_SUPPORT_MULTI_INST - This features denotes that the soc
+ *                     has many instances of TMU.
+ * TMU_SUPPORT_TRIM_RELOAD - This features shows that trimming can
+ *                     be reloaded.
+ * TMU_SUPPORT_FALLING_TRIP - This features shows that interrupt can
+ *                     be registered for falling trips also.
+ * TMU_SUPPORT_READY_STATUS - This feature tells that the TMU current
+ *                     state(active/idle) can be checked.
+ * TMU_SUPPORT_EMUL_TIME - This features allows to set next temp emulation
+ *                     sample time.
+ * TMU_SUPPORT_SHARED_MEMORY - This feature tells that the different TMU
+ *                     sensors shares some common registers.
+ * TMU_SUPPORT - macro to compare the above features with the supplied.
+ */
+#define TMU_SUPPORT_EMULATION                  BIT(0)
+#define TMU_SUPPORT_MULTI_INST                 BIT(1)
+#define TMU_SUPPORT_TRIM_RELOAD                        BIT(2)
+#define TMU_SUPPORT_FALLING_TRIP               BIT(3)
+#define TMU_SUPPORT_READY_STATUS               BIT(4)
+#define TMU_SUPPORT_EMUL_TIME                  BIT(5)
+#define TMU_SUPPORT_SHARED_MEMORY              BIT(6)
+
+#define TMU_SUPPORTS(a, b)     (a->features & TMU_SUPPORT_ ## b)
+
+/**
+ * struct exynos_tmu_register - register descriptors to access registers and
+ * bitfields. The register validity, offsets and bitfield values may vary
+ * slightly across different exynos SOC's.
+ * @triminfo_data: register containing 2 pont trimming data
+ * @triminfo_25_shift: shift bit of the 25 C trim value in triminfo_data reg.
+ * @triminfo_85_shift: shift bit of the 85 C trim value in triminfo_data reg.
+ * @triminfo_ctrl: trim info controller register.
+ * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
+       reg.
+ * @tmu_ctrl: TMU main controller register.
+ * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
+ * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
+ * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
+ * @therm_trip_mode_mask: mask bits of tripping mode in tmu_ctrl register.
+ * @therm_trip_en_shift: shift bits of tripping enable in tmu_ctrl register.
+ * @buf_slope_sel_shift: shift bits of amplifier gain value in tmu_ctrl
+       register.
+ * @buf_slope_sel_mask: mask bits of amplifier gain value in tmu_ctrl register.
+ * @calib_mode_shift: shift bits of calibration mode value in tmu_ctrl
+       register.
+ * @calib_mode_mask: mask bits of calibration mode value in tmu_ctrl
+       register.
+ * @therm_trip_tq_en_shift: shift bits of thermal trip enable by TQ pin in
+       tmu_ctrl register.
+ * @core_en_shift: shift bits of TMU core enable bit in tmu_ctrl register.
+ * @tmu_status: register drescribing the TMU status.
+ * @tmu_cur_temp: register containing the current temperature of the TMU.
+ * @tmu_cur_temp_shift: shift bits of current temp value in tmu_cur_temp
+       register.
+ * @threshold_temp: register containing the base threshold level.
+ * @threshold_th0: Register containing first set of rising levels.
+ * @threshold_th0_l0_shift: shift bits of level0 threshold temperature.
+ * @threshold_th0_l1_shift: shift bits of level1 threshold temperature.
+ * @threshold_th0_l2_shift: shift bits of level2 threshold temperature.
+ * @threshold_th0_l3_shift: shift bits of level3 threshold temperature.
+ * @threshold_th1: Register containing second set of rising levels.
+ * @threshold_th1_l0_shift: shift bits of level0 threshold temperature.
+ * @threshold_th1_l1_shift: shift bits of level1 threshold temperature.
+ * @threshold_th1_l2_shift: shift bits of level2 threshold temperature.
+ * @threshold_th1_l3_shift: shift bits of level3 threshold temperature.
+ * @threshold_th2: Register containing third set of rising levels.
+ * @threshold_th2_l0_shift: shift bits of level0 threshold temperature.
+ * @threshold_th3: Register containing fourth set of rising levels.
+ * @threshold_th3_l0_shift: shift bits of level0 threshold temperature.
+ * @tmu_inten: register containing the different threshold interrupt
+       enable bits.
+ * @inten_rise_shift: shift bits of all rising interrupt bits.
+ * @inten_rise_mask: mask bits of all rising interrupt bits.
+ * @inten_fall_shift: shift bits of all rising interrupt bits.
+ * @inten_fall_mask: mask bits of all rising interrupt bits.
+ * @inten_rise0_shift: shift bits of rising 0 interrupt bits.
+ * @inten_rise1_shift: shift bits of rising 1 interrupt bits.
+ * @inten_rise2_shift: shift bits of rising 2 interrupt bits.
+ * @inten_rise3_shift: shift bits of rising 3 interrupt bits.
+ * @inten_fall0_shift: shift bits of falling 0 interrupt bits.
+ * @inten_fall1_shift: shift bits of falling 1 interrupt bits.
+ * @inten_fall2_shift: shift bits of falling 2 interrupt bits.
+ * @inten_fall3_shift: shift bits of falling 3 interrupt bits.
+ * @tmu_intstat: Register containing the interrupt status values.
+ * @tmu_intclear: Register for clearing the raised interrupt status.
+ * @emul_con: TMU emulation controller register.
+ * @emul_temp_shift: shift bits of emulation temperature.
+ * @emul_time_shift: shift bits of emulation time.
+ * @emul_time_mask: mask bits of emulation time.
+ * @tmu_irqstatus: register to find which TMU generated interrupts.
+ * @tmu_pmin: register to get/set the Pmin value.
+ */
+struct exynos_tmu_registers {
+       u32     triminfo_data;
+       u32     triminfo_25_shift;
+       u32     triminfo_85_shift;
+
+       u32     triminfo_ctrl;
+       u32     triminfo_reload_shift;
+
+       u32     tmu_ctrl;
+       u32     buf_vref_sel_shift;
+       u32     buf_vref_sel_mask;
+       u32     therm_trip_mode_shift;
+       u32     therm_trip_mode_mask;
+       u32     therm_trip_en_shift;
+       u32     buf_slope_sel_shift;
+       u32     buf_slope_sel_mask;
+       u32     calib_mode_shift;
+       u32     calib_mode_mask;
+       u32     therm_trip_tq_en_shift;
+       u32     core_en_shift;
+
+       u32     tmu_status;
+
+       u32     tmu_cur_temp;
+       u32     tmu_cur_temp_shift;
+
+       u32     threshold_temp;
+
+       u32     threshold_th0;
+       u32     threshold_th0_l0_shift;
+       u32     threshold_th0_l1_shift;
+       u32     threshold_th0_l2_shift;
+       u32     threshold_th0_l3_shift;
+
+       u32     threshold_th1;
+       u32     threshold_th1_l0_shift;
+       u32     threshold_th1_l1_shift;
+       u32     threshold_th1_l2_shift;
+       u32     threshold_th1_l3_shift;
+
+       u32     threshold_th2;
+       u32     threshold_th2_l0_shift;
+
+       u32     threshold_th3;
+       u32     threshold_th3_l0_shift;
+
+       u32     tmu_inten;
+       u32     inten_rise_shift;
+       u32     inten_rise_mask;
+       u32     inten_fall_shift;
+       u32     inten_fall_mask;
+       u32     inten_rise0_shift;
+       u32     inten_rise1_shift;
+       u32     inten_rise2_shift;
+       u32     inten_rise3_shift;
+       u32     inten_fall0_shift;
+       u32     inten_fall1_shift;
+       u32     inten_fall2_shift;
+       u32     inten_fall3_shift;
+
+       u32     tmu_intstat;
+
+       u32     tmu_intclear;
+
+       u32     emul_con;
+       u32     emul_temp_shift;
+       u32     emul_time_shift;
+       u32     emul_time_mask;
+
+       u32     tmu_irqstatus;
+       u32     tmu_pmin;
+};
+
+/**
+ * struct exynos_tmu_platform_data
+ * @threshold: basic temperature for generating interrupt
+ *            25 <= threshold <= 125 [unit: degree Celsius]
+ * @threshold_falling: differntial value for setting threshold
+ *                    of temperature falling interrupt.
+ * @trigger_levels: array for each interrupt levels
+ *     [unit: degree Celsius]
+ *     0: temperature for trigger_level0 interrupt
+ *        condition for trigger_level0 interrupt:
+ *             current temperature > threshold + trigger_levels[0]
+ *     1: temperature for trigger_level1 interrupt
+ *        condition for trigger_level1 interrupt:
+ *             current temperature > threshold + trigger_levels[1]
+ *     2: temperature for trigger_level2 interrupt
+ *        condition for trigger_level2 interrupt:
+ *             current temperature > threshold + trigger_levels[2]
+ *     3: temperature for trigger_level3 interrupt
+ *        condition for trigger_level3 interrupt:
+ *             current temperature > threshold + trigger_levels[3]
+ * @trigger_type: defines the type of trigger. Possible values are,
+ *     THROTTLE_ACTIVE trigger type
+ *     THROTTLE_PASSIVE trigger type
+ *     SW_TRIP trigger type
+ *     HW_TRIP
+ * @trigger_enable[]: array to denote which trigger levels are enabled.
+ *     1 = enable trigger_level[] interrupt,
+ *     0 = disable trigger_level[] interrupt
+ * @max_trigger_level: max trigger level supported by the TMU
+ * @gain: gain of amplifier in the positive-TC generator block
+ *     0 <= gain <= 15
+ * @reference_voltage: reference voltage of amplifier
+ *     in the positive-TC generator block
+ *     0 <= reference_voltage <= 31
+ * @noise_cancel_mode: noise cancellation mode
+ *     000, 100, 101, 110 and 111 can be different modes
+ * @type: determines the type of SOC
+ * @efuse_value: platform defined fuse value
+ * @min_efuse_value: minimum valid trimming data
+ * @max_efuse_value: maximum valid trimming data
+ * @first_point_trim: temp value of the first point trimming
+ * @second_point_trim: temp value of the second point trimming
+ * @default_temp_offset: default temperature offset in case of no trimming
+ * @cal_type: calibration type for temperature
+ * @cal_mode: calibration mode for temperature
+ * @freq_clip_table: Table representing frequency reduction percentage.
+ * @freq_tab_count: Count of the above table as frequency reduction may
+ *     applicable to only some of the trigger levels.
+ * @registers: Pointer to structure containing all the TMU controller registers
+ *     and bitfields shifts and masks.
+ * @features: a bitfield value indicating the features supported in SOC like
+ *     emulation, multi instance etc
+ *
+ * This structure is required for configuration of exynos_tmu driver.
+ */
+struct exynos_tmu_platform_data {
+       u8 threshold;
+       u8 threshold_falling;
+       u8 trigger_levels[MAX_TRIP_COUNT];
+       enum trigger_type trigger_type[MAX_TRIP_COUNT];
+       bool trigger_enable[MAX_TRIP_COUNT];
+       u8 max_trigger_level;
+       u8 gain;
+       u8 reference_voltage;
+       u8 noise_cancel_mode;
+
+       u32 efuse_value;
+       u32 min_efuse_value;
+       u32 max_efuse_value;
+       u8 first_point_trim;
+       u8 second_point_trim;
+       u8 default_temp_offset;
+
+       enum calibration_type cal_type;
+       enum calibration_mode cal_mode;
+       enum soc_type type;
+       struct freq_clip_table freq_tab[4];
+       unsigned int freq_tab_count;
+       const struct exynos_tmu_registers *registers;
+       unsigned int features;
+};
+
+/**
+ * struct exynos_tmu_init_data
+ * @tmu_count: number of TMU instances.
+ * @tmu_data: platform data of all TMU instances.
+ * This structure is required to store data for multi-instance exynos tmu
+ * driver.
+ */
+struct exynos_tmu_init_data {
+       int tmu_count;
+       struct exynos_tmu_platform_data tmu_data[];
+};
+
+#endif /* _EXYNOS_TMU_H */
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
new file mode 100644 (file)
index 0000000..9002499
--- /dev/null
@@ -0,0 +1,250 @@
+/*
+ * exynos_tmu_data.c - Samsung EXYNOS tmu data file
+ *
+ *  Copyright (C) 2013 Samsung Electronics
+ *  Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include "exynos_thermal_common.h"
+#include "exynos_tmu.h"
+#include "exynos_tmu_data.h"
+
+#if defined(CONFIG_CPU_EXYNOS4210)
+static const struct exynos_tmu_registers exynos4210_tmu_registers = {
+       .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
+       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+       .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+       .tmu_status = EXYNOS_TMU_REG_STATUS,
+       .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
+       .threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP,
+       .threshold_th0 = EXYNOS4210_TMU_REG_TRIG_LEVEL0,
+       .tmu_inten = EXYNOS_TMU_REG_INTEN,
+       .inten_rise_mask = EXYNOS4210_TMU_TRIG_LEVEL_MASK,
+       .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
+       .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
+       .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
+       .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
+       .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
+       .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
+};
+
+struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
+       .tmu_data = {
+               {
+               .threshold = 80,
+               .trigger_levels[0] = 5,
+               .trigger_levels[1] = 20,
+               .trigger_levels[2] = 30,
+               .trigger_enable[0] = true,
+               .trigger_enable[1] = true,
+               .trigger_enable[2] = true,
+               .trigger_enable[3] = false,
+               .trigger_type[0] = THROTTLE_ACTIVE,
+               .trigger_type[1] = THROTTLE_ACTIVE,
+               .trigger_type[2] = SW_TRIP,
+               .max_trigger_level = 4,
+               .gain = 15,
+               .reference_voltage = 7,
+               .cal_type = TYPE_ONE_POINT_TRIMMING,
+               .min_efuse_value = 40,
+               .max_efuse_value = 100,
+               .first_point_trim = 25,
+               .second_point_trim = 85,
+               .default_temp_offset = 50,
+               .freq_tab[0] = {
+                       .freq_clip_max = 800 * 1000,
+                       .temp_level = 85,
+                       },
+               .freq_tab[1] = {
+                       .freq_clip_max = 200 * 1000,
+                       .temp_level = 100,
+               },
+               .freq_tab_count = 2,
+               .type = SOC_ARCH_EXYNOS4210,
+               .registers = &exynos4210_tmu_registers,
+               .features = TMU_SUPPORT_READY_STATUS,
+               },
+       },
+       .tmu_count = 1,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
+static const struct exynos_tmu_registers exynos5250_tmu_registers = {
+       .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
+       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+       .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
+       .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
+       .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+       .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
+       .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
+       .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
+       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+       .tmu_status = EXYNOS_TMU_REG_STATUS,
+       .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
+       .threshold_th0 = EXYNOS_THD_TEMP_RISE,
+       .threshold_th1 = EXYNOS_THD_TEMP_FALL,
+       .tmu_inten = EXYNOS_TMU_REG_INTEN,
+       .inten_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
+       .inten_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
+       .inten_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
+       .inten_fall_shift = EXYNOS_TMU_FALL_INT_SHIFT,
+       .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
+       .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
+       .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
+       .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
+       .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
+       .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
+       .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
+       .emul_con = EXYNOS_EMUL_CON,
+       .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
+       .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
+       .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
+};
+
+#define EXYNOS5250_TMU_DATA \
+       .threshold_falling = 10, \
+       .trigger_levels[0] = 85, \
+       .trigger_levels[1] = 103, \
+       .trigger_levels[2] = 110, \
+       .trigger_levels[3] = 120, \
+       .trigger_enable[0] = true, \
+       .trigger_enable[1] = true, \
+       .trigger_enable[2] = true, \
+       .trigger_enable[3] = false, \
+       .trigger_type[0] = THROTTLE_ACTIVE, \
+       .trigger_type[1] = THROTTLE_ACTIVE, \
+       .trigger_type[2] = SW_TRIP, \
+       .trigger_type[3] = HW_TRIP, \
+       .max_trigger_level = 4, \
+       .gain = 8, \
+       .reference_voltage = 16, \
+       .noise_cancel_mode = 4, \
+       .cal_type = TYPE_ONE_POINT_TRIMMING, \
+       .efuse_value = 55, \
+       .min_efuse_value = 40, \
+       .max_efuse_value = 100, \
+       .first_point_trim = 25, \
+       .second_point_trim = 85, \
+       .default_temp_offset = 50, \
+       .freq_tab[0] = { \
+               .freq_clip_max = 800 * 1000, \
+               .temp_level = 85, \
+       }, \
+       .freq_tab[1] = { \
+               .freq_clip_max = 200 * 1000, \
+               .temp_level = 103, \
+       }, \
+       .freq_tab_count = 2, \
+       .type = SOC_ARCH_EXYNOS, \
+       .registers = &exynos5250_tmu_registers, \
+       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
+                       TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
+                       TMU_SUPPORT_EMUL_TIME)
+
+struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
+       .tmu_data = {
+               { EXYNOS5250_TMU_DATA },
+       },
+       .tmu_count = 1,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5440)
+static const struct exynos_tmu_registers exynos5440_tmu_registers = {
+       .triminfo_data = EXYNOS5440_TMU_S0_7_TRIM,
+       .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+       .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+       .tmu_ctrl = EXYNOS5440_TMU_S0_7_CTRL,
+       .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+       .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+       .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
+       .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
+       .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
+       .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+       .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+       .calib_mode_shift = EXYNOS_TMU_CALIB_MODE_SHIFT,
+       .calib_mode_mask = EXYNOS_TMU_CALIB_MODE_MASK,
+       .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+       .tmu_status = EXYNOS5440_TMU_S0_7_STATUS,
+       .tmu_cur_temp = EXYNOS5440_TMU_S0_7_TEMP,
+       .threshold_th0 = EXYNOS5440_TMU_S0_7_TH0,
+       .threshold_th1 = EXYNOS5440_TMU_S0_7_TH1,
+       .threshold_th2 = EXYNOS5440_TMU_S0_7_TH2,
+       .threshold_th3_l0_shift = EXYNOS5440_TMU_TH_RISE4_SHIFT,
+       .tmu_inten = EXYNOS5440_TMU_S0_7_IRQEN,
+       .inten_rise_mask = EXYNOS5440_TMU_RISE_INT_MASK,
+       .inten_rise_shift = EXYNOS5440_TMU_RISE_INT_SHIFT,
+       .inten_fall_mask = EXYNOS5440_TMU_FALL_INT_MASK,
+       .inten_fall_shift = EXYNOS5440_TMU_FALL_INT_SHIFT,
+       .inten_rise0_shift = EXYNOS5440_TMU_INTEN_RISE0_SHIFT,
+       .inten_rise1_shift = EXYNOS5440_TMU_INTEN_RISE1_SHIFT,
+       .inten_rise2_shift = EXYNOS5440_TMU_INTEN_RISE2_SHIFT,
+       .inten_rise3_shift = EXYNOS5440_TMU_INTEN_RISE3_SHIFT,
+       .inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT,
+       .tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ,
+       .tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ,
+       .tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS,
+       .emul_con = EXYNOS5440_TMU_S0_7_DEBUG,
+       .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
+       .tmu_pmin = EXYNOS5440_TMU_PMIN,
+};
+
+#define EXYNOS5440_TMU_DATA \
+       .trigger_levels[0] = 100, \
+       .trigger_levels[4] = 105, \
+       .trigger_enable[0] = 1, \
+       .trigger_type[0] = SW_TRIP, \
+       .trigger_type[4] = HW_TRIP, \
+       .max_trigger_level = 5, \
+       .gain = 5, \
+       .reference_voltage = 16, \
+       .noise_cancel_mode = 4, \
+       .cal_type = TYPE_ONE_POINT_TRIMMING, \
+       .cal_mode = 0, \
+       .efuse_value = 0x5b2d, \
+       .min_efuse_value = 16, \
+       .max_efuse_value = 76, \
+       .first_point_trim = 25, \
+       .second_point_trim = 70, \
+       .default_temp_offset = 25, \
+       .type = SOC_ARCH_EXYNOS5440, \
+       .registers = &exynos5440_tmu_registers, \
+       .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
+                       TMU_SUPPORT_MULTI_INST | TMU_SUPPORT_SHARED_MEMORY),
+
+struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
+       .tmu_data = {
+               { EXYNOS5440_TMU_DATA } ,
+               { EXYNOS5440_TMU_DATA } ,
+               { EXYNOS5440_TMU_DATA } ,
+       },
+       .tmu_count = 3,
+};
+#endif
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
new file mode 100644 (file)
index 0000000..dc7feb5
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * exynos_tmu_data.h - Samsung EXYNOS tmu data header file
+ *
+ *  Copyright (C) 2013 Samsung Electronics
+ *  Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _EXYNOS_TMU_DATA_H
+#define _EXYNOS_TMU_DATA_H
+
+/* Exynos generic registers */
+#define EXYNOS_TMU_REG_TRIMINFO                0x0
+#define EXYNOS_TMU_REG_CONTROL         0x20
+#define EXYNOS_TMU_REG_STATUS          0x28
+#define EXYNOS_TMU_REG_CURRENT_TEMP    0x40
+#define EXYNOS_TMU_REG_INTEN           0x70
+#define EXYNOS_TMU_REG_INTSTAT         0x74
+#define EXYNOS_TMU_REG_INTCLEAR                0x78
+
+#define EXYNOS_TMU_TEMP_MASK           0xff
+#define EXYNOS_TMU_REF_VOLTAGE_SHIFT   24
+#define EXYNOS_TMU_REF_VOLTAGE_MASK    0x1f
+#define EXYNOS_TMU_BUF_SLOPE_SEL_MASK  0xf
+#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
+#define EXYNOS_TMU_CORE_EN_SHIFT       0
+
+/* Exynos4210 specific registers */
+#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP      0x44
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58
+#define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C
+#define EXYNOS4210_TMU_REG_PAST_TEMP0  0x60
+#define EXYNOS4210_TMU_REG_PAST_TEMP1  0x64
+#define EXYNOS4210_TMU_REG_PAST_TEMP2  0x68
+#define EXYNOS4210_TMU_REG_PAST_TEMP3  0x6C
+
+#define EXYNOS4210_TMU_TRIG_LEVEL0_MASK        0x1
+#define EXYNOS4210_TMU_TRIG_LEVEL1_MASK        0x10
+#define EXYNOS4210_TMU_TRIG_LEVEL2_MASK        0x100
+#define EXYNOS4210_TMU_TRIG_LEVEL3_MASK        0x1000
+#define EXYNOS4210_TMU_TRIG_LEVEL_MASK 0x1111
+#define EXYNOS4210_TMU_INTCLEAR_VAL    0x1111
+
+/* Exynos5250 and Exynos4412 specific registers */
+#define EXYNOS_TMU_TRIMINFO_CON        0x14
+#define EXYNOS_THD_TEMP_RISE           0x50
+#define EXYNOS_THD_TEMP_FALL           0x54
+#define EXYNOS_EMUL_CON                0x80
+
+#define EXYNOS_TRIMINFO_RELOAD_SHIFT   1
+#define EXYNOS_TRIMINFO_25_SHIFT       0
+#define EXYNOS_TRIMINFO_85_SHIFT       8
+#define EXYNOS_TMU_RISE_INT_MASK       0x111
+#define EXYNOS_TMU_RISE_INT_SHIFT      0
+#define EXYNOS_TMU_FALL_INT_MASK       0x111
+#define EXYNOS_TMU_FALL_INT_SHIFT      12
+#define EXYNOS_TMU_CLEAR_RISE_INT      0x111
+#define EXYNOS_TMU_CLEAR_FALL_INT      (0x111 << 12)
+#define EXYNOS_TMU_TRIP_MODE_SHIFT     13
+#define EXYNOS_TMU_TRIP_MODE_MASK      0x7
+#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
+#define EXYNOS_TMU_CALIB_MODE_SHIFT    4
+#define EXYNOS_TMU_CALIB_MODE_MASK     0x3
+
+#define EXYNOS_TMU_INTEN_RISE0_SHIFT   0
+#define EXYNOS_TMU_INTEN_RISE1_SHIFT   4
+#define EXYNOS_TMU_INTEN_RISE2_SHIFT   8
+#define EXYNOS_TMU_INTEN_RISE3_SHIFT   12
+#define EXYNOS_TMU_INTEN_FALL0_SHIFT   16
+#define EXYNOS_TMU_INTEN_FALL1_SHIFT   20
+#define EXYNOS_TMU_INTEN_FALL2_SHIFT   24
+
+#define EXYNOS_EMUL_TIME       0x57F0
+#define EXYNOS_EMUL_TIME_MASK  0xffff
+#define EXYNOS_EMUL_TIME_SHIFT 16
+#define EXYNOS_EMUL_DATA_SHIFT 8
+#define EXYNOS_EMUL_DATA_MASK  0xFF
+#define EXYNOS_EMUL_ENABLE     0x1
+
+#define EXYNOS_MAX_TRIGGER_PER_REG     4
+
+/*exynos5440 specific registers*/
+#define EXYNOS5440_TMU_S0_7_TRIM               0x000
+#define EXYNOS5440_TMU_S0_7_CTRL               0x020
+#define EXYNOS5440_TMU_S0_7_DEBUG              0x040
+#define EXYNOS5440_TMU_S0_7_STATUS             0x060
+#define EXYNOS5440_TMU_S0_7_TEMP               0x0f0
+#define EXYNOS5440_TMU_S0_7_TH0                        0x110
+#define EXYNOS5440_TMU_S0_7_TH1                        0x130
+#define EXYNOS5440_TMU_S0_7_TH2                        0x150
+#define EXYNOS5440_TMU_S0_7_EVTEN              0x1F0
+#define EXYNOS5440_TMU_S0_7_IRQEN              0x210
+#define EXYNOS5440_TMU_S0_7_IRQ                        0x230
+/* exynos5440 common registers */
+#define EXYNOS5440_TMU_IRQ_STATUS              0x000
+#define EXYNOS5440_TMU_PMIN                    0x004
+#define EXYNOS5440_TMU_TEMP                    0x008
+
+#define EXYNOS5440_TMU_RISE_INT_MASK           0xf
+#define EXYNOS5440_TMU_RISE_INT_SHIFT          0
+#define EXYNOS5440_TMU_FALL_INT_MASK           0xf
+#define EXYNOS5440_TMU_FALL_INT_SHIFT          4
+#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT       0
+#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT       1
+#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT       2
+#define EXYNOS5440_TMU_INTEN_RISE3_SHIFT       3
+#define EXYNOS5440_TMU_INTEN_FALL0_SHIFT       4
+#define EXYNOS5440_TMU_INTEN_FALL1_SHIFT       5
+#define EXYNOS5440_TMU_INTEN_FALL2_SHIFT       6
+#define EXYNOS5440_TMU_INTEN_FALL3_SHIFT       7
+#define EXYNOS5440_TMU_TH_RISE0_SHIFT          0
+#define EXYNOS5440_TMU_TH_RISE1_SHIFT          8
+#define EXYNOS5440_TMU_TH_RISE2_SHIFT          16
+#define EXYNOS5440_TMU_TH_RISE3_SHIFT          24
+#define EXYNOS5440_TMU_TH_RISE4_SHIFT          24
+#define EXYNOS5440_EFUSE_SWAP_OFFSET           8
+
+#if defined(CONFIG_CPU_EXYNOS4210)
+extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
+#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
+#else
+#define EXYNOS4210_TMU_DRV_DATA (NULL)
+#endif
+
+#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412))
+extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
+#define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
+#else
+#define EXYNOS5250_TMU_DRV_DATA (NULL)
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5440)
+extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
+#define EXYNOS5440_TMU_DRV_DATA (&exynos5440_default_tmu_data)
+#else
+#define EXYNOS5440_TMU_DRV_DATA (NULL)
+#endif
+
+#endif /*_EXYNOS_TMU_DATA_H*/
index 4d4ddae1a99183cee9705f24e7acdc91cde62cc6..d89e781b0a18d9a71fce0ff18c77683c7699dc10 100644 (file)
@@ -51,44 +51,51 @@ static unsigned long get_target_state(struct thermal_instance *instance,
 {
        struct thermal_cooling_device *cdev = instance->cdev;
        unsigned long cur_state;
+       unsigned long next_target;
 
+       /*
+        * We keep this instance the way it is by default.
+        * Otherwise, we use the current state of the
+        * cdev in use to determine the next_target.
+        */
        cdev->ops->get_cur_state(cdev, &cur_state);
+       next_target = instance->target;
 
        switch (trend) {
        case THERMAL_TREND_RAISING:
                if (throttle) {
-                       cur_state = cur_state < instance->upper ?
+                       next_target = cur_state < instance->upper ?
                                    (cur_state + 1) : instance->upper;
-                       if (cur_state < instance->lower)
-                               cur_state = instance->lower;
+                       if (next_target < instance->lower)
+                               next_target = instance->lower;
                }
                break;
        case THERMAL_TREND_RAISE_FULL:
                if (throttle)
-                       cur_state = instance->upper;
+                       next_target = instance->upper;
                break;
        case THERMAL_TREND_DROPPING:
                if (cur_state == instance->lower) {
                        if (!throttle)
-                               cur_state = -1;
+                               next_target = THERMAL_NO_TARGET;
                } else {
-                       cur_state -= 1;
-                       if (cur_state > instance->upper)
-                               cur_state = instance->upper;
+                       next_target = cur_state - 1;
+                       if (next_target > instance->upper)
+                               next_target = instance->upper;
                }
                break;
        case THERMAL_TREND_DROP_FULL:
                if (cur_state == instance->lower) {
                        if (!throttle)
-                               cur_state = -1;
+                               next_target = THERMAL_NO_TARGET;
                } else
-                       cur_state = instance->lower;
+                       next_target = instance->lower;
                break;
        default:
                break;
        }
 
-       return cur_state;
+       return next_target;
 }
 
 static void update_passive_instance(struct thermal_zone_device *tz,
@@ -133,6 +140,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                old_target = instance->target;
                instance->target = get_target_state(instance, trend, throttle);
 
+               if (old_target == instance->target)
+                       continue;
+
                /* Activate a passive thermal instance */
                if (old_target == THERMAL_NO_TARGET &&
                        instance->target != THERMAL_NO_TARGET)
index 7a84a0595477512581b4247f8a0165d775d34aba..af8cdaa1dcb90b50d9ee1143e933d51d7fc6696d 100644 (file)
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/moduleparam.h>
+#include <linux/platform_device.h>
 #include <linux/types.h>
 
+#include <asm/setup.h>
+#include <arch/sim_def.h>
+
 #include <hv/hypervisor.h>
 
 #include "hvc_console.h"
 
+static int use_sim_console;
+static int __init sim_console(char *str)
+{
+       use_sim_console = 1;
+       return 0;
+}
+early_param("sim_console", sim_console);
+
+int tile_console_write(const char *buf, int count)
+{
+       if (unlikely(use_sim_console)) {
+               int i;
+               for (i = 0; i < count; ++i)
+                       __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
+                                    (buf[i] << _SIM_CONTROL_OPERATOR_BITS));
+               __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_PUTC |
+                            (SIM_PUTC_FLUSH_BINARY <<
+                             _SIM_CONTROL_OPERATOR_BITS));
+               return 0;
+       } else {
+               return hv_console_write((HV_VirtAddr)buf, count);
+       }
+}
+
 static int hvc_tile_put_chars(uint32_t vt, const char *buf, int count)
 {
-       return hv_console_write((HV_VirtAddr)buf, count);
+       return tile_console_write(buf, count);
 }
 
 static int hvc_tile_get_chars(uint32_t vt, char *buf, int count)
@@ -44,25 +74,132 @@ static int hvc_tile_get_chars(uint32_t vt, char *buf, int count)
        return i;
 }
 
+#ifdef __tilegx__
+/*
+ * IRQ based callbacks.
+ */
+static int hvc_tile_notifier_add_irq(struct hvc_struct *hp, int irq)
+{
+       int rc;
+       int cpu = raw_smp_processor_id();  /* Choose an arbitrary cpu */
+       HV_Coord coord = { .x = cpu_x(cpu), .y = cpu_y(cpu) };
+
+       rc = notifier_add_irq(hp, irq);
+       if (rc)
+               return rc;
+
+       /*
+        * Request that the hypervisor start sending us interrupts.
+        * If the hypervisor returns an error, we still return 0, so that
+        * we can fall back to polling.
+        */
+       if (hv_console_set_ipi(KERNEL_PL, irq, coord) < 0)
+               notifier_del_irq(hp, irq);
+
+       return 0;
+}
+
+static void hvc_tile_notifier_del_irq(struct hvc_struct *hp, int irq)
+{
+       HV_Coord coord = { 0, 0 };
+
+       /* Tell the hypervisor to stop sending us interrupts. */
+       hv_console_set_ipi(KERNEL_PL, -1, coord);
+
+       notifier_del_irq(hp, irq);
+}
+
+static void hvc_tile_notifier_hangup_irq(struct hvc_struct *hp, int irq)
+{
+       hvc_tile_notifier_del_irq(hp, irq);
+}
+#endif
+
 static const struct hv_ops hvc_tile_get_put_ops = {
        .get_chars = hvc_tile_get_chars,
        .put_chars = hvc_tile_put_chars,
+#ifdef __tilegx__
+       .notifier_add = hvc_tile_notifier_add_irq,
+       .notifier_del = hvc_tile_notifier_del_irq,
+       .notifier_hangup = hvc_tile_notifier_hangup_irq,
+#endif
+};
+
+
+#ifdef __tilegx__
+static int hvc_tile_probe(struct platform_device *pdev)
+{
+       struct hvc_struct *hp;
+       int tile_hvc_irq;
+
+       /* Create our IRQ and register it. */
+       tile_hvc_irq = create_irq();
+       if (tile_hvc_irq < 0)
+               return -ENXIO;
+
+       tile_irq_activate(tile_hvc_irq, TILE_IRQ_PERCPU);
+       hp = hvc_alloc(0, tile_hvc_irq, &hvc_tile_get_put_ops, 128);
+       if (IS_ERR(hp)) {
+               destroy_irq(tile_hvc_irq);
+               return PTR_ERR(hp);
+       }
+       dev_set_drvdata(&pdev->dev, hp);
+
+       return 0;
+}
+
+static int hvc_tile_remove(struct platform_device *pdev)
+{
+       int rc;
+       struct hvc_struct *hp = dev_get_drvdata(&pdev->dev);
+
+       rc = hvc_remove(hp);
+       if (rc == 0)
+               destroy_irq(hp->data);
+
+       return rc;
+}
+
+static void hvc_tile_shutdown(struct platform_device *pdev)
+{
+       struct hvc_struct *hp = dev_get_drvdata(&pdev->dev);
+
+       hvc_tile_notifier_del_irq(hp, hp->data);
+}
+
+static struct platform_device hvc_tile_pdev = {
+       .name           = "hvc-tile",
+       .id             = 0,
+};
+
+static struct platform_driver hvc_tile_driver = {
+       .probe          = hvc_tile_probe,
+       .remove         = hvc_tile_remove,
+       .shutdown       = hvc_tile_shutdown,
+       .driver         = {
+               .name   = "hvc-tile",
+               .owner  = THIS_MODULE,
+       }
 };
+#endif
 
 static int __init hvc_tile_console_init(void)
 {
-       extern void disable_early_printk(void);
        hvc_instantiate(0, 0, &hvc_tile_get_put_ops);
        add_preferred_console("hvc", 0, NULL);
-       disable_early_printk();
        return 0;
 }
 console_initcall(hvc_tile_console_init);
 
 static int __init hvc_tile_init(void)
 {
-       struct hvc_struct *s;
-       s = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
-       return IS_ERR(s) ? PTR_ERR(s) : 0;
+#ifndef __tilegx__
+       struct hvc_struct *hp;
+       hp = hvc_alloc(0, 0, &hvc_tile_get_put_ops, 128);
+       return IS_ERR(hp) ? PTR_ERR(hp) : 0;
+#else
+       platform_device_register(&hvc_tile_pdev);
+       return platform_driver_register(&hvc_tile_driver);
+#endif
 }
 device_initcall(hvc_tile_init);
index 0c629807610e6576ff822f2b958d8894e4182dc7..c791b18cdd086a1fa22b470ba8b9904dd0fc01e3 100644 (file)
@@ -404,7 +404,7 @@ module_exit(hvc_vio_exit);
 void __init hvc_vio_init_early(void)
 {
        struct device_node *stdout_node;
-       const u32 *termno;
+       const __be32 *termno;
        const char *name;
        const struct hv_ops *ops;
 
@@ -429,7 +429,7 @@ void __init hvc_vio_init_early(void)
        termno = of_get_property(stdout_node, "reg", NULL);
        if (termno == NULL)
                goto out;
-       hvterm_priv0.termno = *termno;
+       hvterm_priv0.termno = of_read_number(termno, 1);
        spin_lock_init(&hvterm_priv0.buf_lock);
        hvterm_privs[0] = &hvterm_priv0;
 
index 1456673bcca09b58cc4d1e7c27fd31569b8439bf..330a4dbc0ac700e012255ae265d738b930df4cb6 100644 (file)
@@ -1436,6 +1436,15 @@ config SERIAL_EFM32_UART_CONSOLE
        depends on SERIAL_EFM32_UART=y
        select SERIAL_CORE_CONSOLE
 
+config SERIAL_TILEGX
+       tristate "TILE-Gx on-chip serial port support"
+       depends on TILEGX
+       select TILE_GXIO_UART
+       select SERIAL_CORE
+       ---help---
+         This device provides access to the on-chip UARTs on the TILE-Gx
+         processor.
+
 config SERIAL_ARC
        tristate "ARC UART driver support"
        select SERIAL_CORE
index cf650f0cd6e4175adb72d9139c278dc33ec7b4fa..3d0f097e82ff51b3e34c8cb80e467d70c07dfb34 100644 (file)
@@ -65,6 +65,7 @@ obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
 obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
 obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
+obj-$(CONFIG_SERIAL_TILEGX) += tilegx.o
 obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
 obj-$(CONFIG_SERIAL_TIMBERDALE)        += timbuart.o
index e1280a20b7a2f4c840e400039d79a40196ae7e5a..5be1df39f9f5f6b8fe0671bba23ba8f566e685f4 100644 (file)
@@ -107,6 +107,8 @@ struct psc_ops {
        unsigned int    (*set_baudrate)(struct uart_port *port,
                                        struct ktermios *new,
                                        struct ktermios *old);
+       int             (*clock_alloc)(struct uart_port *port);
+       void            (*clock_relse)(struct uart_port *port);
        int             (*clock)(struct uart_port *port, int enable);
        int             (*fifoc_init)(void);
        void            (*fifoc_uninit)(void);
@@ -616,31 +618,73 @@ static irqreturn_t mpc512x_psc_handle_irq(struct uart_port *port)
        return IRQ_NONE;
 }
 
-static int mpc512x_psc_clock(struct uart_port *port, int enable)
+static struct clk *psc_mclk_clk[MPC52xx_PSC_MAXNUM];
+
+/* called from within the .request_port() callback (allocation) */
+static int mpc512x_psc_alloc_clock(struct uart_port *port)
 {
-       struct clk *psc_clk;
        int psc_num;
-       char clk_name[10];
+       char clk_name[16];
+       struct clk *clk;
+       int err;
+
+       psc_num = (port->mapbase & 0xf00) >> 8;
+       snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
+       clk = devm_clk_get(port->dev, clk_name);
+       if (IS_ERR(clk)) {
+               dev_err(port->dev, "Failed to get MCLK!\n");
+               return PTR_ERR(clk);
+       }
+       err = clk_prepare_enable(clk);
+       if (err) {
+               dev_err(port->dev, "Failed to enable MCLK!\n");
+               return err;
+       }
+       psc_mclk_clk[psc_num] = clk;
+       return 0;
+}
+
+/* called from within the .release_port() callback (release) */
+static void mpc512x_psc_relse_clock(struct uart_port *port)
+{
+       int psc_num;
+       struct clk *clk;
+
+       psc_num = (port->mapbase & 0xf00) >> 8;
+       clk = psc_mclk_clk[psc_num];
+       if (clk) {
+               clk_disable_unprepare(clk);
+               psc_mclk_clk[psc_num] = NULL;
+       }
+}
+
+/* implementation of the .clock() callback (enable/disable) */
+static int mpc512x_psc_endis_clock(struct uart_port *port, int enable)
+{
+       int psc_num;
+       struct clk *psc_clk;
+       int ret;
 
        if (uart_console(port))
                return 0;
 
        psc_num = (port->mapbase & 0xf00) >> 8;
-       snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
-       psc_clk = clk_get(port->dev, clk_name);
-       if (IS_ERR(psc_clk)) {
+       psc_clk = psc_mclk_clk[psc_num];
+       if (!psc_clk) {
                dev_err(port->dev, "Failed to get PSC clock entry!\n");
                return -ENODEV;
        }
 
-       dev_dbg(port->dev, "%s %sable\n", clk_name, enable ? "en" : "dis");
-
-       if (enable)
-               clk_enable(psc_clk);
-       else
+       dev_dbg(port->dev, "mclk %sable\n", enable ? "en" : "dis");
+       if (enable) {
+               ret = clk_enable(psc_clk);
+               if (ret)
+                       dev_err(port->dev, "Failed to enable MCLK!\n");
+               return ret;
+       } else {
                clk_disable(psc_clk);
-
-       return 0;
+               return 0;
+       }
 }
 
 static void mpc512x_psc_get_irq(struct uart_port *port, struct device_node *np)
@@ -873,7 +917,9 @@ static struct psc_ops mpc5125_psc_ops = {
        .cw_disable_ints = mpc5125_psc_cw_disable_ints,
        .cw_restore_ints = mpc5125_psc_cw_restore_ints,
        .set_baudrate = mpc5125_psc_set_baudrate,
-       .clock = mpc512x_psc_clock,
+       .clock_alloc = mpc512x_psc_alloc_clock,
+       .clock_relse = mpc512x_psc_relse_clock,
+       .clock = mpc512x_psc_endis_clock,
        .fifoc_init = mpc512x_psc_fifoc_init,
        .fifoc_uninit = mpc512x_psc_fifoc_uninit,
        .get_irq = mpc512x_psc_get_irq,
@@ -906,7 +952,9 @@ static struct psc_ops mpc512x_psc_ops = {
        .cw_disable_ints = mpc512x_psc_cw_disable_ints,
        .cw_restore_ints = mpc512x_psc_cw_restore_ints,
        .set_baudrate = mpc512x_psc_set_baudrate,
-       .clock = mpc512x_psc_clock,
+       .clock_alloc = mpc512x_psc_alloc_clock,
+       .clock_relse = mpc512x_psc_relse_clock,
+       .clock = mpc512x_psc_endis_clock,
        .fifoc_init = mpc512x_psc_fifoc_init,
        .fifoc_uninit = mpc512x_psc_fifoc_uninit,
        .get_irq = mpc512x_psc_get_irq,
@@ -1166,6 +1214,9 @@ mpc52xx_uart_type(struct uart_port *port)
 static void
 mpc52xx_uart_release_port(struct uart_port *port)
 {
+       if (psc_ops->clock_relse)
+               psc_ops->clock_relse(port);
+
        /* remapped by us ? */
        if (port->flags & UPF_IOREMAP) {
                iounmap(port->membase);
@@ -1190,11 +1241,24 @@ mpc52xx_uart_request_port(struct uart_port *port)
        err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc),
                        "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY;
 
-       if (err && (port->flags & UPF_IOREMAP)) {
+       if (err)
+               goto out_membase;
+
+       if (psc_ops->clock_alloc) {
+               err = psc_ops->clock_alloc(port);
+               if (err)
+                       goto out_mapregion;
+       }
+
+       return 0;
+
+out_mapregion:
+       release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
+out_membase:
+       if (port->flags & UPF_IOREMAP) {
                iounmap(port->membase);
                port->membase = NULL;
        }
-
        return err;
 }
 
index 7477e0ea5cdb886607e023ae06ce8cc92bb2edde..5ef9300b04669d22a04263950dd51e25262b2d6a 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
 
 #ifdef CONFIG_SUPERH
 #include <asm/sh_bios.h>
@@ -2437,6 +2438,112 @@ static int sci_remove(struct platform_device *dev)
        return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id of_sci_match[] = {
+       { .compatible = "renesas,sci-SCI-uart",
+               .data = (void *)PORT_SCI },
+       { .compatible = "renesas,sci-SCIF-uart",
+               .data = (void *)PORT_SCIF },
+       { .compatible = "renesas,sci-IRDA-uart",
+               .data = (void *)PORT_IRDA },
+       { .compatible = "renesas,sci-SCIFA-uart",
+               .data = (void *)PORT_SCIFA },
+       { .compatible = "renesas,sci-SCIFB-uart",
+               .data = (void *)PORT_SCIFB },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_sci_match);
+
+static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+                                                               int *dev_id)
+{
+       struct plat_sci_port *p;
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct resource *res;
+       const __be32 *prop;
+       int i, irq, val;
+
+       match = of_match_node(of_sci_match, pdev->dev.of_node);
+       if (!match || !match->data) {
+               dev_err(&pdev->dev, "OF match error\n");
+               return NULL;
+       }
+
+       p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
+       if (!p) {
+               dev_err(&pdev->dev, "failed to allocate DT config data\n");
+               return NULL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to get I/O memory\n");
+               return NULL;
+       }
+       p->mapbase = res->start;
+
+       for (i = 0; i < SCIx_NR_IRQS; i++) {
+               irq = platform_get_irq(pdev, i);
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "failed to get irq data %d\n", i);
+                       return NULL;
+               }
+               p->irqs[i] = irq;
+       }
+
+       prop = of_get_property(np, "cell-index", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop cell-index missing\n");
+               return NULL;
+       }
+       *dev_id = be32_to_cpup(prop);
+
+       prop = of_get_property(np, "renesas,scscr", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop scscr missing\n");
+               return NULL;
+       }
+       p->scscr = be32_to_cpup(prop);
+
+       prop = of_get_property(np, "renesas,scbrr-algo-id", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop scbrr-algo-id missing\n");
+               return NULL;
+       }
+       val = be32_to_cpup(prop);
+       if (val <= SCBRR_ALGO_INVALID || val >= SCBRR_NR_ALGOS) {
+               dev_err(&pdev->dev, "DT prop scbrr-algo-id out of range\n");
+               return NULL;
+       }
+       p->scbrr_algo_id = val;
+
+       p->flags = UPF_IOREMAP;
+       if (of_get_property(np, "renesas,autoconf", NULL))
+               p->flags |= UPF_BOOT_AUTOCONF;
+
+       prop = of_get_property(np, "renesas,regtype", NULL);
+       if (prop) {
+               val = be32_to_cpup(prop);
+               if (val < SCIx_PROBE_REGTYPE || val >= SCIx_NR_REGTYPES) {
+                       dev_err(&pdev->dev, "DT prop regtype out of range\n");
+                       return NULL;
+               }
+               p->regtype = val;
+       }
+
+       p->type = (unsigned int)match->data;
+
+       return p;
+}
+#else
+static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+                                                               int *dev_id)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
 static int sci_probe_single(struct platform_device *dev,
                                      unsigned int index,
                                      struct plat_sci_port *p,
@@ -2469,9 +2576,9 @@ static int sci_probe_single(struct platform_device *dev,
 
 static int sci_probe(struct platform_device *dev)
 {
-       struct plat_sci_port *p = dev->dev.platform_data;
-       struct sci_port *sp = &sci_ports[dev->id];
-       int ret;
+       struct plat_sci_port *p;
+       struct sci_port *sp;
+       int ret, dev_id = dev->id;
 
        /*
         * If we've come here via earlyprintk initialization, head off to
@@ -2481,9 +2588,20 @@ static int sci_probe(struct platform_device *dev)
        if (is_early_platform_device(dev))
                return sci_probe_earlyprintk(dev);
 
+       if (dev->dev.of_node)
+               p = sci_parse_dt(dev, &dev_id);
+       else
+               p = dev->dev.platform_data;
+
+       if (!p) {
+               dev_err(&dev->dev, "no setup data supplied\n");
+               return -EINVAL;
+       }
+
+       sp = &sci_ports[dev_id];
        platform_set_drvdata(dev, sp);
 
-       ret = sci_probe_single(dev, dev->id, p, sp);
+       ret = sci_probe_single(dev, dev_id, p, sp);
        if (ret)
                return ret;
 
@@ -2535,6 +2653,7 @@ static struct platform_driver sci_driver = {
                .name   = "sh-sci",
                .owner  = THIS_MODULE,
                .pm     = &sci_dev_pm_ops,
+               .of_match_table = of_match_ptr(of_sci_match),
        },
 };
 
diff --git a/drivers/tty/serial/tilegx.c b/drivers/tty/serial/tilegx.c
new file mode 100644 (file)
index 0000000..f92d7e6
--- /dev/null
@@ -0,0 +1,708 @@
+/*
+ * Copyright 2013 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * TILEGx UART driver.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <gxio/common.h>
+#include <gxio/iorpc_globals.h>
+#include <gxio/iorpc_uart.h>
+#include <gxio/kiorpc.h>
+
+#include <hv/drv_uart_intf.h>
+
+/*
+ * Use device name ttyS, major 4, minor 64-65.
+ * This is the usual serial port name, 8250 conventional range.
+ */
+#define TILEGX_UART_MAJOR      TTY_MAJOR
+#define TILEGX_UART_MINOR      64
+#define TILEGX_UART_NAME       "ttyS"
+#define DRIVER_NAME_STRING     "TILEGx_Serial"
+#define TILEGX_UART_REF_CLK    125000000; /* REF_CLK is always 125 MHz. */
+
+struct tile_uart_port {
+       /* UART port. */
+       struct uart_port        uart;
+
+       /* GXIO device context. */
+       gxio_uart_context_t     context;
+
+       /* UART access mutex. */
+       struct mutex            mutex;
+
+       /* CPU receiving interrupts. */
+       int                     irq_cpu;
+};
+
+static struct tile_uart_port tile_uart_ports[TILEGX_UART_NR];
+static struct uart_driver tilegx_uart_driver;
+
+
+/*
+ * Read UART rx fifo, and insert the chars into tty buffer.
+ */
+static void receive_chars(struct tile_uart_port *tile_uart,
+                         struct tty_struct *tty)
+{
+       int i;
+       char c;
+       UART_FIFO_COUNT_t count;
+       gxio_uart_context_t *context = &tile_uart->context;
+       struct tty_port *port = tty->port;
+
+       count.word = gxio_uart_read(context, UART_FIFO_COUNT);
+       for (i = 0; i < count.rfifo_count; i++) {
+               c = (char)gxio_uart_read(context, UART_RECEIVE_DATA);
+               tty_insert_flip_char(port, c, TTY_NORMAL);
+       }
+}
+
+
+/*
+ * Drain the Rx FIFO, called by interrupt handler.
+ */
+static void handle_receive(struct tile_uart_port *tile_uart)
+{
+       struct tty_port *port = &tile_uart->uart.state->port;
+       struct tty_struct *tty = tty_port_tty_get(port);
+       gxio_uart_context_t *context = &tile_uart->context;
+
+       if (!tty)
+               return;
+
+       /* First read UART rx fifo. */
+       receive_chars(tile_uart, tty);
+
+       /* Reset RFIFO_WE interrupt. */
+       gxio_uart_write(context, UART_INTERRUPT_STATUS,
+                       UART_INTERRUPT_MASK__RFIFO_WE_MASK);
+
+       /* Final read, if any chars comes between the first read and
+        * the interrupt reset.
+        */
+       receive_chars(tile_uart, tty);
+
+       spin_unlock(&tile_uart->uart.lock);
+       tty_flip_buffer_push(port);
+       spin_lock(&tile_uart->uart.lock);
+       tty_kref_put(tty);
+}
+
+
+/*
+ * Push one char to UART Write FIFO.
+ * Return 0 on success, -1 if write filo is full.
+ */
+static int tilegx_putchar(gxio_uart_context_t *context, char c)
+{
+       UART_FLAG_t flag;
+       flag.word = gxio_uart_read(context, UART_FLAG);
+       if (flag.wfifo_full)
+               return -1;
+
+       gxio_uart_write(context, UART_TRANSMIT_DATA, (unsigned long)c);
+       return 0;
+}
+
+
+/*
+ * Send chars to UART Write FIFO; called by interrupt handler.
+ */
+static void handle_transmit(struct tile_uart_port *tile_uart)
+{
+       unsigned char ch;
+       struct uart_port *port;
+       struct circ_buf *xmit;
+       gxio_uart_context_t *context = &tile_uart->context;
+
+       /* First reset WFIFO_RE interrupt. */
+       gxio_uart_write(context, UART_INTERRUPT_STATUS,
+                       UART_INTERRUPT_MASK__WFIFO_RE_MASK);
+
+       port = &tile_uart->uart;
+       xmit = &port->state->xmit;
+       if (port->x_char) {
+               if (tilegx_putchar(context, port->x_char))
+                       return;
+               port->x_char = 0;
+               port->icount.tx++;
+       }
+
+       if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+               return;
+
+       while (!uart_circ_empty(xmit)) {
+               ch = xmit->buf[xmit->tail];
+               if (tilegx_putchar(context, ch))
+                       break;
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               port->icount.tx++;
+       }
+
+       /* Reset WFIFO_RE interrupt. */
+       gxio_uart_write(context, UART_INTERRUPT_STATUS,
+                       UART_INTERRUPT_MASK__WFIFO_RE_MASK);
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(port);
+}
+
+
+/*
+ * UART Interrupt handler.
+ */
+static irqreturn_t tilegx_interrupt(int irq, void *dev_id)
+{
+       unsigned long flags;
+       UART_INTERRUPT_STATUS_t intr_stat;
+       struct tile_uart_port *tile_uart;
+       gxio_uart_context_t *context;
+       struct uart_port *port = dev_id;
+       irqreturn_t ret = IRQ_NONE;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       context = &tile_uart->context;
+       intr_stat.word = gxio_uart_read(context, UART_INTERRUPT_STATUS);
+
+       if (intr_stat.rfifo_we) {
+               handle_receive(tile_uart);
+               ret = IRQ_HANDLED;
+       }
+       if (intr_stat.wfifo_re) {
+               handle_transmit(tile_uart);
+               ret = IRQ_HANDLED;
+       }
+
+       spin_unlock_irqrestore(&port->lock, flags);
+       return ret;
+}
+
+
+/*
+ * Return TIOCSER_TEMT when transmitter FIFO is empty.
+ */
+static u_int tilegx_tx_empty(struct uart_port *port)
+{
+       int ret;
+       UART_FLAG_t flag;
+       struct tile_uart_port *tile_uart;
+       gxio_uart_context_t *context;
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       if (!mutex_trylock(&tile_uart->mutex))
+               return 0;
+       context = &tile_uart->context;
+
+       flag.word = gxio_uart_read(context, UART_FLAG);
+       ret = (flag.wfifo_empty) ? TIOCSER_TEMT : 0;
+       mutex_unlock(&tile_uart->mutex);
+
+       return ret;
+}
+
+
+/*
+ * Set state of the modem control output lines.
+ */
+static void tilegx_set_mctrl(struct uart_port *port, u_int mctrl)
+{
+       /* N/A */
+}
+
+
+/*
+ * Get state of the modem control input lines.
+ */
+static u_int tilegx_get_mctrl(struct uart_port *port)
+{
+       return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+
+/*
+ * Stop transmitting.
+ */
+static void tilegx_stop_tx(struct uart_port *port)
+{
+       /* N/A */
+}
+
+
+/*
+ * Start transmitting.
+ */
+static void tilegx_start_tx(struct uart_port *port)
+{
+       unsigned char ch;
+       struct circ_buf *xmit;
+       struct tile_uart_port *tile_uart;
+       gxio_uart_context_t *context;
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       if (!mutex_trylock(&tile_uart->mutex))
+               return;
+       context = &tile_uart->context;
+       xmit = &port->state->xmit;
+       if (port->x_char) {
+               if (tilegx_putchar(context, port->x_char))
+                       return;
+               port->x_char = 0;
+               port->icount.tx++;
+       }
+
+       if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+               mutex_unlock(&tile_uart->mutex);
+               return;
+       }
+
+       while (!uart_circ_empty(xmit)) {
+               ch = xmit->buf[xmit->tail];
+               if (tilegx_putchar(context, ch))
+                       break;
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               port->icount.tx++;
+       }
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(port);
+
+       mutex_unlock(&tile_uart->mutex);
+}
+
+
+/*
+ * Stop receiving - port is in process of being closed.
+ */
+static void tilegx_stop_rx(struct uart_port *port)
+{
+       int err;
+       struct tile_uart_port *tile_uart;
+       gxio_uart_context_t *context;
+       int cpu;
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       if (!mutex_trylock(&tile_uart->mutex))
+               return;
+
+       context = &tile_uart->context;
+       cpu = tile_uart->irq_cpu;
+       err = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu),
+                                     KERNEL_PL, -1);
+       mutex_unlock(&tile_uart->mutex);
+}
+
+
+/*
+ * Enable modem status interrupts.
+ */
+static void tilegx_enable_ms(struct uart_port *port)
+{
+       /* N/A */
+}
+
+/*
+ * Control the transmission of a break signal.
+ */
+static void tilegx_break_ctl(struct uart_port *port, int break_state)
+{
+       /* N/A */
+}
+
+
+/*
+ * Perform initialization and enable port for reception.
+ */
+static int tilegx_startup(struct uart_port *port)
+{
+       struct tile_uart_port *tile_uart;
+       gxio_uart_context_t *context;
+       int ret = 0;
+       int cpu = raw_smp_processor_id();  /* pick an arbitrary cpu */
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       if (mutex_lock_interruptible(&tile_uart->mutex))
+               return -EBUSY;
+       context = &tile_uart->context;
+
+       /* Now open the hypervisor device if we haven't already. */
+       if (context->fd < 0) {
+               UART_INTERRUPT_MASK_t intr_mask;
+
+               /* Initialize UART device. */
+               ret = gxio_uart_init(context, port->line);
+               if (ret) {
+                       ret = -ENXIO;
+                       goto err;
+               }
+
+               /* Create our IRQs. */
+               port->irq = create_irq();
+               if (port->irq < 0)
+                       goto err_uart_dest;
+               tile_irq_activate(port->irq, TILE_IRQ_PERCPU);
+
+               /* Register our IRQs. */
+               ret = request_irq(port->irq, tilegx_interrupt, 0,
+                                 tilegx_uart_driver.driver_name, port);
+               if (ret)
+                       goto err_dest_irq;
+
+               /* Request that the hardware start sending us interrupts. */
+               tile_uart->irq_cpu = cpu;
+               ret = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu),
+                                             KERNEL_PL, port->irq);
+               if (ret)
+                       goto err_free_irq;
+
+               /* Enable UART Tx/Rx Interrupt. */
+               intr_mask.word = gxio_uart_read(context, UART_INTERRUPT_MASK);
+               intr_mask.wfifo_re = 0;
+               intr_mask.rfifo_we = 0;
+               gxio_uart_write(context, UART_INTERRUPT_MASK, intr_mask.word);
+
+               /* Reset the Tx/Rx interrupt in case it's set. */
+               gxio_uart_write(context, UART_INTERRUPT_STATUS,
+                               UART_INTERRUPT_MASK__WFIFO_RE_MASK |
+                               UART_INTERRUPT_MASK__RFIFO_WE_MASK);
+       }
+
+       mutex_unlock(&tile_uart->mutex);
+       return ret;
+
+err_free_irq:
+       free_irq(port->irq, port);
+err_dest_irq:
+       destroy_irq(port->irq);
+err_uart_dest:
+       gxio_uart_destroy(context);
+       ret = -ENXIO;
+err:
+       mutex_unlock(&tile_uart->mutex);
+       return ret;
+}
+
+
+/*
+ * Release kernel resources if it is the last close, disable the port,
+ * free IRQ and close the port.
+ */
+static void tilegx_shutdown(struct uart_port *port)
+{
+       int err;
+       UART_INTERRUPT_MASK_t intr_mask;
+       struct tile_uart_port *tile_uart;
+       gxio_uart_context_t *context;
+       int cpu;
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       if (mutex_lock_interruptible(&tile_uart->mutex))
+               return;
+       context = &tile_uart->context;
+
+       /* Disable UART Tx/Rx Interrupt. */
+       intr_mask.word = gxio_uart_read(context, UART_INTERRUPT_MASK);
+       intr_mask.wfifo_re = 1;
+       intr_mask.rfifo_we = 1;
+       gxio_uart_write(context, UART_INTERRUPT_MASK, intr_mask.word);
+
+       /* Request that the hardware stop sending us interrupts. */
+       cpu = tile_uart->irq_cpu;
+       err = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu),
+                                     KERNEL_PL, -1);
+
+       if (port->irq > 0) {
+               free_irq(port->irq, port);
+               destroy_irq(port->irq);
+               port->irq = 0;
+       }
+
+       gxio_uart_destroy(context);
+
+       mutex_unlock(&tile_uart->mutex);
+}
+
+
+/*
+ * Flush the buffer.
+ */
+static void tilegx_flush_buffer(struct uart_port *port)
+{
+       /* N/A */
+}
+
+
+/*
+ * Change the port parameters.
+ */
+static void tilegx_set_termios(struct uart_port *port,
+                              struct ktermios *termios, struct ktermios *old)
+{
+       int err;
+       UART_DIVISOR_t divisor;
+       UART_TYPE_t type;
+       unsigned int baud;
+       struct tile_uart_port *tile_uart;
+       gxio_uart_context_t *context;
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       if (!mutex_trylock(&tile_uart->mutex))
+               return;
+       context = &tile_uart->context;
+
+       /* Open the hypervisor device if we haven't already. */
+       if (context->fd < 0) {
+               err = gxio_uart_init(context, port->line);
+               if (err) {
+                       mutex_unlock(&tile_uart->mutex);
+                       return;
+               }
+       }
+
+       divisor.word = gxio_uart_read(context, UART_DIVISOR);
+       type.word = gxio_uart_read(context, UART_TYPE);
+
+       /* Divisor. */
+       baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+       divisor.divisor = uart_get_divisor(port, baud);
+
+       /* Byte size. */
+       if ((termios->c_cflag & CSIZE) == CS7)
+               type.dbits = UART_TYPE__DBITS_VAL_SEVEN_DBITS;
+       else
+               type.dbits = UART_TYPE__DBITS_VAL_EIGHT_DBITS;
+
+       /* Parity. */
+       if (termios->c_cflag & PARENB) {
+               /* Mark or Space parity. */
+               if (termios->c_cflag & CMSPAR)
+                       if (termios->c_cflag & PARODD)
+                               type.ptype = UART_TYPE__PTYPE_VAL_MARK;
+                       else
+                               type.ptype = UART_TYPE__PTYPE_VAL_SPACE;
+               else if (termios->c_cflag & PARODD)
+                       type.ptype = UART_TYPE__PTYPE_VAL_ODD;
+               else
+                       type.ptype = UART_TYPE__PTYPE_VAL_EVEN;
+       } else
+               type.ptype = UART_TYPE__PTYPE_VAL_NONE;
+
+       /* Stop bits. */
+       if (termios->c_cflag & CSTOPB)
+               type.sbits = UART_TYPE__SBITS_VAL_TWO_SBITS;
+       else
+               type.sbits = UART_TYPE__SBITS_VAL_ONE_SBITS;
+
+       /* Set the uart paramters. */
+       gxio_uart_write(context, UART_DIVISOR, divisor.word);
+       gxio_uart_write(context, UART_TYPE, type.word);
+
+       mutex_unlock(&tile_uart->mutex);
+}
+
+
+/*
+ * Return string describing the specified port.
+ */
+static const char *tilegx_type(struct uart_port *port)
+{
+       return port->type == PORT_TILEGX ? DRIVER_NAME_STRING : NULL;
+}
+
+
+/*
+ * Release the resources being used by 'port'.
+ */
+static void tilegx_release_port(struct uart_port *port)
+{
+       /* Nothing to release. */
+}
+
+
+/*
+ * Request the resources being used by 'port'.
+ */
+static int tilegx_request_port(struct uart_port *port)
+{
+       /* Always present. */
+       return 0;
+}
+
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void tilegx_config_port(struct uart_port *port, int flags)
+{
+       if (flags & UART_CONFIG_TYPE)
+               port->type = PORT_TILEGX;
+}
+
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int tilegx_verify_port(struct uart_port *port,
+                             struct serial_struct *ser)
+{
+       if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_TILEGX))
+               return -EINVAL;
+
+       return 0;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int tilegx_poll_get_char(struct uart_port *port)
+{
+       UART_FIFO_COUNT_t count;
+       gxio_uart_context_t *context;
+       struct tile_uart_port *tile_uart;
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       context = &tile_uart->context;
+       count.word = gxio_uart_read(context, UART_FIFO_COUNT);
+       if (count.rfifo_count == 0)
+               return NO_POLL_CHAR;
+       return (char)gxio_uart_read(context, UART_RECEIVE_DATA);
+}
+
+static void tilegx_poll_put_char(struct uart_port *port, unsigned char c)
+{
+       gxio_uart_context_t *context;
+       struct tile_uart_port *tile_uart;
+
+       tile_uart = container_of(port, struct tile_uart_port, uart);
+       context = &tile_uart->context;
+       gxio_uart_write(context, UART_TRANSMIT_DATA, (unsigned long)c);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+
+static const struct uart_ops tilegx_ops = {
+       .tx_empty       = tilegx_tx_empty,
+       .set_mctrl      = tilegx_set_mctrl,
+       .get_mctrl      = tilegx_get_mctrl,
+       .stop_tx        = tilegx_stop_tx,
+       .start_tx       = tilegx_start_tx,
+       .stop_rx        = tilegx_stop_rx,
+       .enable_ms      = tilegx_enable_ms,
+       .break_ctl      = tilegx_break_ctl,
+       .startup        = tilegx_startup,
+       .shutdown       = tilegx_shutdown,
+       .flush_buffer   = tilegx_flush_buffer,
+       .set_termios    = tilegx_set_termios,
+       .type           = tilegx_type,
+       .release_port   = tilegx_release_port,
+       .request_port   = tilegx_request_port,
+       .config_port    = tilegx_config_port,
+       .verify_port    = tilegx_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+       .poll_get_char  = tilegx_poll_get_char,
+       .poll_put_char  = tilegx_poll_put_char,
+#endif
+};
+
+
+static void tilegx_init_ports(void)
+{
+       int i;
+       struct uart_port *port;
+
+       for (i = 0; i < TILEGX_UART_NR; i++) {
+               port = &tile_uart_ports[i].uart;
+               port->ops = &tilegx_ops;
+               port->line = i;
+               port->type = PORT_TILEGX;
+               port->uartclk = TILEGX_UART_REF_CLK;
+               port->flags = UPF_BOOT_AUTOCONF;
+
+               tile_uart_ports[i].context.fd = -1;
+               mutex_init(&tile_uart_ports[i].mutex);
+       }
+}
+
+
+static struct uart_driver tilegx_uart_driver = {
+       .owner          = THIS_MODULE,
+       .driver_name    = DRIVER_NAME_STRING,
+       .dev_name       = TILEGX_UART_NAME,
+       .major          = TILEGX_UART_MAJOR,
+       .minor          = TILEGX_UART_MINOR,
+       .nr             = TILEGX_UART_NR,
+};
+
+
+static int __init tilegx_init(void)
+{
+       int i;
+       int ret;
+       struct tty_driver *tty_drv;
+
+       ret = uart_register_driver(&tilegx_uart_driver);
+       if (ret)
+               return ret;
+       tty_drv = tilegx_uart_driver.tty_driver;
+       tty_drv->init_termios.c_cflag = B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+       tty_drv->init_termios.c_ispeed = 115200;
+       tty_drv->init_termios.c_ospeed = 115200;
+
+       tilegx_init_ports();
+
+       for (i = 0; i < TILEGX_UART_NR; i++) {
+               struct uart_port *port = &tile_uart_ports[i].uart;
+               ret = uart_add_one_port(&tilegx_uart_driver, port);
+       }
+
+       return 0;
+}
+
+
+static void __exit tilegx_exit(void)
+{
+       int i;
+       struct uart_port *port;
+
+       for (i = 0; i < TILEGX_UART_NR; i++) {
+               port = &tile_uart_ports[i].uart;
+               uart_remove_one_port(&tilegx_uart_driver, port);
+       }
+
+       uart_unregister_driver(&tilegx_uart_driver);
+}
+
+
+module_init(tilegx_init);
+module_exit(tilegx_exit);
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_DESCRIPTION("TILEGx serial port driver");
+MODULE_LICENSE("GPL");
index d5cc3acecfd382d820b9eb5d5c3fbebf0f105b71..40a9fe9d3b10f0170af31c4d8845d8cc609e6bb5 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/moduleparam.h>
 #include <linux/jiffies.h>
 #include <linux/syscalls.h>
+#include <linux/of.h>
 
 #include <asm/ptrace.h>
 #include <asm/irq_regs.h>
@@ -681,6 +682,40 @@ static void sysrq_detect_reset_sequence(struct sysrq_state *state,
        }
 }
 
+#ifdef CONFIG_OF
+static void sysrq_of_get_keyreset_config(void)
+{
+       u32 key;
+       struct device_node *np;
+       struct property *prop;
+       const __be32 *p;
+
+       np = of_find_node_by_path("/chosen/linux,sysrq-reset-seq");
+       if (!np) {
+               pr_debug("No sysrq node found");
+               return;
+       }
+
+       /* Reset in case a __weak definition was present */
+       sysrq_reset_seq_len = 0;
+
+       of_property_for_each_u32(np, "keyset", prop, p, key) {
+               if (key == KEY_RESERVED || key > KEY_MAX ||
+                   sysrq_reset_seq_len == SYSRQ_KEY_RESET_MAX)
+                       break;
+
+               sysrq_reset_seq[sysrq_reset_seq_len++] = (unsigned short)key;
+       }
+
+       /* Get reset timeout if any. */
+       of_property_read_u32(np, "timeout-ms", &sysrq_reset_downtime_ms);
+}
+#else
+static void sysrq_of_get_keyreset_config(void)
+{
+}
+#endif
+
 static void sysrq_reinject_alt_sysrq(struct work_struct *work)
 {
        struct sysrq_state *sysrq =
@@ -914,6 +949,7 @@ static inline void sysrq_register_handler(void)
        int error;
        int i;
 
+       /* First check if a __weak interface was instantiated. */
        for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
                key = platform_sysrq_reset_seq[i];
                if (key == KEY_RESERVED || key > KEY_MAX)
@@ -922,6 +958,12 @@ static inline void sysrq_register_handler(void)
                sysrq_reset_seq[sysrq_reset_seq_len++] = key;
        }
 
+       /*
+        * DT configuration takes precedence over anything that would
+        * have been defined via the __weak interface.
+        */
+       sysrq_of_get_keyreset_config();
+
        error = input_register_handler(&sysrq_handler);
        if (error)
                pr_err("Failed to register input handler, error %d", error);
index dbce3a9074e6ebac353ec891ecc7c6726c5359b7..a801a005540e3c3e5958b577302f40ad07b2eb1e 100644 (file)
@@ -450,11 +450,11 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
         * If we can't read the file, it's no good.
         * If we can't write the file, use it read-only.
         */
-       if (!(filp->f_op->read || filp->f_op->aio_read)) {
+       if (!file_readable(filp)) {
                LINFO(curlun, "file not readable: %s\n", filename);
                goto out;
        }
-       if (!(filp->f_op->write || filp->f_op->aio_write))
+       if (!file_writable(filp))
                ro = 1;
 
        size = i_size_read(inode->i_mapping->host);
index 11e0b79ff9d52a8d703c3b2134b8960768123246..b8a18665b1805ad8609b97cbbce5e671cfbda202 100644 (file)
@@ -260,6 +260,7 @@ int fsl_usb2_mpc5121_init(struct platform_device *pdev)
 {
        struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
        struct clk *clk;
+       int err;
        char clk_name[10];
        int base, clk_num;
 
@@ -272,13 +273,16 @@ int fsl_usb2_mpc5121_init(struct platform_device *pdev)
                return -ENODEV;
 
        snprintf(clk_name, sizeof(clk_name), "usb%d_clk", clk_num);
-       clk = clk_get(&pdev->dev, clk_name);
+       clk = devm_clk_get(&pdev->dev, clk_name);
        if (IS_ERR(clk)) {
                dev_err(&pdev->dev, "failed to get clk\n");
                return PTR_ERR(clk);
        }
-
-       clk_enable(clk);
+       err = clk_prepare_enable(clk);
+       if (err) {
+               dev_err(&pdev->dev, "failed to enable clk\n");
+               return err;
+       }
        pdata->clk = clk;
 
        if (pdata->phy_mode == FSL_USB2_PHY_UTMI_WIDE) {
@@ -302,10 +306,8 @@ static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
 
        pdata->regs = NULL;
 
-       if (pdata->clk) {
-               clk_disable(pdata->clk);
-               clk_put(pdata->clk);
-       }
+       if (pdata->clk)
+               clk_disable_unprepare(pdata->clk);
 }
 
 static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
index 842f4507883e1b1d10c99d343e35e450abf56db0..d3cb34224369759d047e147e67e42db782bf476c 100644 (file)
@@ -1352,6 +1352,68 @@ static const struct file_operations vfio_device_fops = {
        .mmap           = vfio_device_fops_mmap,
 };
 
+/**
+ * External user API, exported by symbols to be linked dynamically.
+ *
+ * The protocol includes:
+ *  1. do normal VFIO init operation:
+ *     - opening a new container;
+ *     - attaching group(s) to it;
+ *     - setting an IOMMU driver for a container.
+ * When IOMMU is set for a container, all groups in it are
+ * considered ready to use by an external user.
+ *
+ * 2. User space passes a group fd to an external user.
+ * The external user calls vfio_group_get_external_user()
+ * to verify that:
+ *     - the group is initialized;
+ *     - IOMMU is set for it.
+ * If both checks passed, vfio_group_get_external_user()
+ * increments the container user counter to prevent
+ * the VFIO group from disposal before KVM exits.
+ *
+ * 3. The external user calls vfio_external_user_iommu_id()
+ * to know an IOMMU ID.
+ *
+ * 4. When the external KVM finishes, it calls
+ * vfio_group_put_external_user() to release the VFIO group.
+ * This call decrements the container user counter.
+ */
+struct vfio_group *vfio_group_get_external_user(struct file *filep)
+{
+       struct vfio_group *group = filep->private_data;
+
+       if (filep->f_op != &vfio_group_fops)
+               return ERR_PTR(-EINVAL);
+
+       if (!atomic_inc_not_zero(&group->container_users))
+               return ERR_PTR(-EINVAL);
+
+       if (!group->container->iommu_driver ||
+                       !vfio_group_viable(group)) {
+               atomic_dec(&group->container_users);
+               return ERR_PTR(-EINVAL);
+       }
+
+       vfio_group_get(group);
+
+       return group;
+}
+EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
+
+void vfio_group_put_external_user(struct vfio_group *group)
+{
+       vfio_group_put(group);
+       vfio_group_try_dissolve_container(group);
+}
+EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+
+int vfio_external_user_iommu_id(struct vfio_group *group)
+{
+       return iommu_group_id(group->iommu_group);
+}
+EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
+
 /**
  * Module/class support
  */
index e58cf0001cee10bbe789fdbf7d7fe94260f4eaa9..448efe01f18a173d8d560fc68387d9393d415a62 100644 (file)
@@ -13,7 +13,7 @@
 
 #include <linux/eventfd.h>
 #include <linux/vhost.h>
-#include <linux/socket.h> /* memcpy_fromiovec */
+#include <linux/uio.h>
 #include <linux/mm.h>
 #include <linux/mmu_context.h>
 #include <linux/miscdevice.h>
index 4cf1e1dd562169e52426577534333e5669508c24..9b32a999b9471f91e49c6b3fcbda8074406cd239 100644 (file)
@@ -996,6 +996,8 @@ config FB_ATMEL
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
+       select FB_MODE_HELPERS
+       select VIDEOMODE_HELPERS
        help
          This enables support for the AT91/AT32 LCD Controller.
 
@@ -2100,13 +2102,6 @@ config GPM1040A0_320X240
         bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
         depends on FB_NUC900
 
-config FB_NUC900_DEBUG
-        bool "NUC900 lcd debug messages"
-        depends on FB_NUC900
-        help
-          Turn on debugging messages. Note that you can set/unset at run time
-          through sysfs
-
 config FB_SM501
        tristate "Silicon Motion SM501 framebuffer support"
        depends on FB && MFD_SM501
index effdb373b8db0ffd55d4f56523c8c379f50e4cbc..ece49d53145a575cebd058c0a0259c7f57614d9f 100644 (file)
 #include <linux/gfp.h>
 #include <linux/module.h>
 #include <linux/platform_data/atmel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
 
 #include <mach/cpu.h>
 #include <asm/gpio.h>
 
 #include <video/atmel_lcdc.h>
 
+struct atmel_lcdfb_config {
+       bool have_alt_pixclock;
+       bool have_hozval;
+       bool have_intensity_bit;
+};
+
+ /* LCD Controller info data structure, stored in device platform_data */
+struct atmel_lcdfb_info {
+       spinlock_t              lock;
+       struct fb_info          *info;
+       void __iomem            *mmio;
+       int                     irq_base;
+       struct work_struct      task;
+
+       unsigned int            smem_len;
+       struct platform_device  *pdev;
+       struct clk              *bus_clk;
+       struct clk              *lcdc_clk;
+
+       struct backlight_device *backlight;
+       u8                      bl_power;
+       u8                      saved_lcdcon;
+
+       u32                     pseudo_palette[16];
+       bool                    have_intensity_bit;
+
+       struct atmel_lcdfb_pdata pdata;
+
+       struct atmel_lcdfb_config *config;
+};
+
+struct atmel_lcdfb_power_ctrl_gpio {
+       int gpio;
+       int active_low;
+
+       struct list_head list;
+};
+
 #define lcdc_readl(sinfo, reg)         __raw_readl((sinfo)->mmio+(reg))
 #define lcdc_writel(sinfo, reg, val)   __raw_writel((val), (sinfo)->mmio+(reg))
 
 #define ATMEL_LCDC_DMA_BURST_LEN       8       /* words */
 #define ATMEL_LCDC_FIFO_SIZE           512     /* words */
 
-struct atmel_lcdfb_config {
-       bool have_alt_pixclock;
-       bool have_hozval;
-       bool have_intensity_bit;
-};
-
 static struct atmel_lcdfb_config at91sam9261_config = {
        .have_hozval            = true,
        .have_intensity_bit     = true,
@@ -248,18 +285,27 @@ static void exit_backlight(struct atmel_lcdfb_info *sinfo)
 
 static void init_contrast(struct atmel_lcdfb_info *sinfo)
 {
+       struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
        /* contrast pwm can be 'inverted' */
-       if (sinfo->lcdcon_pol_negative)
+       if (pdata->lcdcon_pol_negative)
                        contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE);
 
        /* have some default contrast/backlight settings */
        lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
        lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
 
-       if (sinfo->lcdcon_is_backlight)
+       if (pdata->lcdcon_is_backlight)
                init_backlight(sinfo);
 }
 
+static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int on)
+{
+       struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
+       if (pdata->atmel_lcdfb_power_control)
+               pdata->atmel_lcdfb_power_control(pdata, on);
+}
 
 static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
        .type           = FB_TYPE_PACKED_PIXELS,
@@ -299,9 +345,11 @@ static unsigned long compute_hozval(struct atmel_lcdfb_info *sinfo,
 
 static void atmel_lcdfb_stop_nowait(struct atmel_lcdfb_info *sinfo)
 {
+       struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
        /* Turn off the LCD controller and the DMA controller */
        lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
-                       sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
+                       pdata->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
 
        /* Wait for the LCDC core to become idle */
        while (lcdc_readl(sinfo, ATMEL_LCDC_PWRCON) & ATMEL_LCDC_BUSY)
@@ -321,9 +369,11 @@ static void atmel_lcdfb_stop(struct atmel_lcdfb_info *sinfo)
 
 static void atmel_lcdfb_start(struct atmel_lcdfb_info *sinfo)
 {
-       lcdc_writel(sinfo, ATMEL_LCDC_DMACON, sinfo->default_dmacon);
+       struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+
+       lcdc_writel(sinfo, ATMEL_LCDC_DMACON, pdata->default_dmacon);
        lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
-               (sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET)
+               (pdata->guard_time << ATMEL_LCDC_GUARDT_OFFSET)
                | ATMEL_LCDC_PWR);
 }
 
@@ -424,6 +474,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
 {
        struct device *dev = info->device;
        struct atmel_lcdfb_info *sinfo = info->par;
+       struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
        unsigned long clk_value_khz;
 
        clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
@@ -510,7 +561,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
                else
                        var->green.length = 6;
 
-               if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+               if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
                        /* RGB:5X5 mode */
                        var->red.offset = var->green.length + 5;
                        var->blue.offset = 0;
@@ -527,7 +578,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
                var->transp.length = 8;
                /* fall through */
        case 24:
-               if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+               if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
                        /* RGB:888 mode */
                        var->red.offset = 16;
                        var->blue.offset = 0;
@@ -576,6 +627,7 @@ static void atmel_lcdfb_reset(struct atmel_lcdfb_info *sinfo)
 static int atmel_lcdfb_set_par(struct fb_info *info)
 {
        struct atmel_lcdfb_info *sinfo = info->par;
+       struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
        unsigned long hozval_linesz;
        unsigned long value;
        unsigned long clk_value_khz;
@@ -637,7 +689,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
 
 
        /* Initialize control register 2 */
-       value = sinfo->default_lcdcon2;
+       value = pdata->default_lcdcon2;
 
        if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
                value |= ATMEL_LCDC_INVLINE_INVERTED;
@@ -741,6 +793,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
                             unsigned int transp, struct fb_info *info)
 {
        struct atmel_lcdfb_info *sinfo = info->par;
+       struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
        unsigned int val;
        u32 *pal;
        int ret = 1;
@@ -777,8 +830,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
                                 */
                        } else {
                                /* new style BGR:565 / RGB:565 */
-                               if (sinfo->lcd_wiring_mode ==
-                                   ATMEL_LCDC_WIRING_RGB) {
+                               if (pdata->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
                                        val  = ((blue >> 11) & 0x001f);
                                        val |= ((red  >>  0) & 0xf800);
                                } else {
@@ -912,16 +964,187 @@ static void atmel_lcdfb_stop_clock(struct atmel_lcdfb_info *sinfo)
        clk_disable(sinfo->lcdc_clk);
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_lcdfb_dt_ids[] = {
+       { .compatible = "atmel,at91sam9261-lcdc" , .data = &at91sam9261_config, },
+       { .compatible = "atmel,at91sam9263-lcdc" , .data = &at91sam9263_config, },
+       { .compatible = "atmel,at91sam9g10-lcdc" , .data = &at91sam9g10_config, },
+       { .compatible = "atmel,at91sam9g45-lcdc" , .data = &at91sam9g45_config, },
+       { .compatible = "atmel,at91sam9g45es-lcdc" , .data = &at91sam9g45es_config, },
+       { .compatible = "atmel,at91sam9rl-lcdc" , .data = &at91sam9rl_config, },
+       { .compatible = "atmel,at32ap-lcdc" , .data = &at32ap_config, },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_lcdfb_dt_ids);
+
+static const char *atmel_lcdfb_wiring_modes[] = {
+       [ATMEL_LCDC_WIRING_BGR] = "BRG",
+       [ATMEL_LCDC_WIRING_RGB] = "RGB",
+};
+
+const int atmel_lcdfb_get_of_wiring_modes(struct device_node *np)
+{
+       const char *mode;
+       int err, i;
+
+       err = of_property_read_string(np, "atmel,lcd-wiring-mode", &mode);
+       if (err < 0)
+               return ATMEL_LCDC_WIRING_BGR;
+
+       for (i = 0; i < ARRAY_SIZE(atmel_lcdfb_wiring_modes); i++)
+               if (!strcasecmp(mode, atmel_lcdfb_wiring_modes[i]))
+                       return i;
+
+       return -ENODEV;
+}
+
+static void atmel_lcdfb_power_control_gpio(struct atmel_lcdfb_pdata *pdata, int on)
+{
+       struct atmel_lcdfb_power_ctrl_gpio *og;
+
+       list_for_each_entry(og, &pdata->pwr_gpios, list)
+               gpio_set_value(og->gpio, on);
+}
+
+static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+{
+       struct fb_info *info = sinfo->info;
+       struct atmel_lcdfb_pdata *pdata = &sinfo->pdata;
+       struct fb_var_screeninfo *var = &info->var;
+       struct device *dev = &sinfo->pdev->dev;
+       struct device_node *np =dev->of_node;
+       struct device_node *display_np;
+       struct device_node *timings_np;
+       struct display_timings *timings;
+       enum of_gpio_flags flags;
+       struct atmel_lcdfb_power_ctrl_gpio *og;
+       bool is_gpio_power = false;
+       int ret = -ENOENT;
+       int i, gpio;
+
+       sinfo->config = (struct atmel_lcdfb_config*)
+               of_match_device(atmel_lcdfb_dt_ids, dev)->data;
+
+       display_np = of_parse_phandle(np, "display", 0);
+       if (!display_np) {
+               dev_err(dev, "failed to find display phandle\n");
+               return -ENOENT;
+       }
+
+       ret = of_property_read_u32(display_np, "bits-per-pixel", &var->bits_per_pixel);
+       if (ret < 0) {
+               dev_err(dev, "failed to get property bits-per-pixel\n");
+               goto put_display_node;
+       }
+
+       ret = of_property_read_u32(display_np, "atmel,guard-time", &pdata->guard_time);
+       if (ret < 0) {
+               dev_err(dev, "failed to get property atmel,guard-time\n");
+               goto put_display_node;
+       }
+
+       ret = of_property_read_u32(display_np, "atmel,lcdcon2", &pdata->default_lcdcon2);
+       if (ret < 0) {
+               dev_err(dev, "failed to get property atmel,lcdcon2\n");
+               goto put_display_node;
+       }
+
+       ret = of_property_read_u32(display_np, "atmel,dmacon", &pdata->default_dmacon);
+       if (ret < 0) {
+               dev_err(dev, "failed to get property bits-per-pixel\n");
+               goto put_display_node;
+       }
+
+       ret = -ENOMEM;
+       for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) {
+               gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio",
+                                              i, &flags);
+               if (gpio < 0)
+                       continue;
+
+               og = devm_kzalloc(dev, sizeof(*og), GFP_KERNEL);
+               if (!og)
+                       goto put_display_node;
+
+               og->gpio = gpio;
+               og->active_low = flags & OF_GPIO_ACTIVE_LOW;
+               is_gpio_power = true;
+               ret = devm_gpio_request(dev, gpio, "lcd-power-control-gpio");
+               if (ret) {
+                       dev_err(dev, "request gpio %d failed\n", gpio);
+                       goto put_display_node;
+               }
+
+               ret = gpio_direction_output(gpio, og->active_low);
+               if (ret) {
+                       dev_err(dev, "set direction output gpio %d failed\n", gpio);
+                       goto put_display_node;
+               }
+       }
+
+       if (is_gpio_power)
+               pdata->atmel_lcdfb_power_control = atmel_lcdfb_power_control_gpio;
+
+       ret = atmel_lcdfb_get_of_wiring_modes(display_np);
+       if (ret < 0) {
+               dev_err(dev, "invalid atmel,lcd-wiring-mode\n");
+               goto put_display_node;
+       }
+       pdata->lcd_wiring_mode = ret;
+
+       pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight");
+
+       timings = of_get_display_timings(display_np);
+       if (!timings) {
+               dev_err(dev, "failed to get display timings\n");
+               goto put_display_node;
+       }
+
+       timings_np = of_find_node_by_name(display_np, "display-timings");
+       if (!timings_np) {
+               dev_err(dev, "failed to find display-timings node\n");
+               goto put_display_node;
+       }
+
+       for (i = 0; i < of_get_child_count(timings_np); i++) {
+               struct videomode vm;
+               struct fb_videomode fb_vm;
+
+               ret = videomode_from_timings(timings, &vm, i);
+               if (ret < 0)
+                       goto put_timings_node;
+               ret = fb_videomode_from_videomode(&vm, &fb_vm);
+               if (ret < 0)
+                       goto put_timings_node;
+
+               fb_add_videomode(&fb_vm, &info->modelist);
+       }
+
+       return 0;
+
+put_timings_node:
+       of_node_put(timings_np);
+put_display_node:
+       of_node_put(display_np);
+       return ret;
+}
+#else
+static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+{
+       return 0;
+}
+#endif
 
 static int __init atmel_lcdfb_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct fb_info *info;
        struct atmel_lcdfb_info *sinfo;
-       struct atmel_lcdfb_info *pdata_sinfo;
-       struct fb_videomode fbmode;
+       struct atmel_lcdfb_pdata *pdata = NULL;
        struct resource *regs = NULL;
        struct resource *map = NULL;
+       struct fb_modelist *modelist;
        int ret;
 
        dev_dbg(dev, "%s BEGIN\n", __func__);
@@ -934,26 +1157,35 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
        }
 
        sinfo = info->par;
+       sinfo->pdev = pdev;
+       sinfo->info = info;
 
-       if (dev->platform_data) {
-               pdata_sinfo = (struct atmel_lcdfb_info *)dev->platform_data;
-               sinfo->default_bpp = pdata_sinfo->default_bpp;
-               sinfo->default_dmacon = pdata_sinfo->default_dmacon;
-               sinfo->default_lcdcon2 = pdata_sinfo->default_lcdcon2;
-               sinfo->default_monspecs = pdata_sinfo->default_monspecs;
-               sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
-               sinfo->guard_time = pdata_sinfo->guard_time;
-               sinfo->smem_len = pdata_sinfo->smem_len;
-               sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
-               sinfo->lcdcon_pol_negative = pdata_sinfo->lcdcon_pol_negative;
-               sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
+       INIT_LIST_HEAD(&info->modelist);
+
+       if (pdev->dev.of_node) {
+               ret = atmel_lcdfb_of_init(sinfo);
+               if (ret)
+                       goto free_info;
+       } else if (dev->platform_data) {
+               struct fb_monspecs *monspecs;
+               int i;
+
+               pdata = dev->platform_data;
+               monspecs = pdata->default_monspecs;
+               sinfo->pdata = *pdata;
+
+               for (i = 0; i < monspecs->modedb_len; i++)
+                       fb_add_videomode(&monspecs->modedb[i], &info->modelist);
+
+               sinfo->config = atmel_lcdfb_get_config(pdev);
+
+               info->var.bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16;
+               memcpy(&info->monspecs, pdata->default_monspecs, sizeof(info->monspecs));
        } else {
                dev_err(dev, "cannot get default configuration\n");
                goto free_info;
        }
-       sinfo->info = info;
-       sinfo->pdev = pdev;
-       sinfo->config = atmel_lcdfb_get_config(pdev);
+
        if (!sinfo->config)
                goto free_info;
 
@@ -962,7 +1194,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
        info->pseudo_palette = sinfo->pseudo_palette;
        info->fbops = &atmel_lcdfb_ops;
 
-       memcpy(&info->monspecs, sinfo->default_monspecs, sizeof(info->monspecs));
        info->fix = atmel_lcdfb_fix;
 
        /* Enable LCDC Clocks */
@@ -978,14 +1209,11 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
        }
        atmel_lcdfb_start_clock(sinfo);
 
-       ret = fb_find_mode(&info->var, info, NULL, info->monspecs.modedb,
-                       info->monspecs.modedb_len, info->monspecs.modedb,
-                       sinfo->default_bpp);
-       if (!ret) {
-               dev_err(dev, "no suitable video mode found\n");
-               goto stop_clk;
-       }
+       modelist = list_first_entry(&info->modelist,
+                       struct fb_modelist, list);
+       fb_videomode_to_var(&info->var, &modelist->mode);
 
+       atmel_lcdfb_check_var(&info->var, info);
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!regs) {
@@ -1069,18 +1297,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
                goto unregister_irqs;
        }
 
-       /*
-        * This makes sure that our colour bitfield
-        * descriptors are correctly initialised.
-        */
-       atmel_lcdfb_check_var(&info->var, info);
-
-       ret = fb_set_var(info, &info->var);
-       if (ret) {
-               dev_warn(dev, "unable to set display parameters\n");
-               goto free_cmap;
-       }
-
        dev_set_drvdata(dev, info);
 
        /*
@@ -1092,13 +1308,8 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
                goto reset_drvdata;
        }
 
-       /* add selected videomode to modelist */
-       fb_var_to_videomode(&fbmode, &info->var);
-       fb_add_videomode(&fbmode, &info->modelist);
-
        /* Power up the LCDC screen */
-       if (sinfo->atmel_lcdfb_power_control)
-               sinfo->atmel_lcdfb_power_control(1);
+       atmel_lcdfb_power_control(sinfo, 1);
 
        dev_info(dev, "fb%d: Atmel LCDC at 0x%08lx (mapped at %p), irq %d\n",
                       info->node, info->fix.mmio_start, sinfo->mmio, sinfo->irq_base);
@@ -1107,7 +1318,6 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
 
 reset_drvdata:
        dev_set_drvdata(dev, NULL);
-free_cmap:
        fb_dealloc_cmap(&info->cmap);
 unregister_irqs:
        cancel_work_sync(&sinfo->task);
@@ -1143,15 +1353,16 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct fb_info *info = dev_get_drvdata(dev);
        struct atmel_lcdfb_info *sinfo;
+       struct atmel_lcdfb_pdata *pdata;
 
        if (!info || !info->par)
                return 0;
        sinfo = info->par;
+       pdata = &sinfo->pdata;
 
        cancel_work_sync(&sinfo->task);
        exit_backlight(sinfo);
-       if (sinfo->atmel_lcdfb_power_control)
-               sinfo->atmel_lcdfb_power_control(0);
+       atmel_lcdfb_power_control(sinfo, 0);
        unregister_framebuffer(info);
        atmel_lcdfb_stop_clock(sinfo);
        clk_put(sinfo->lcdc_clk);
@@ -1188,9 +1399,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
 
        sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
        lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
-       if (sinfo->atmel_lcdfb_power_control)
-               sinfo->atmel_lcdfb_power_control(0);
-
+       atmel_lcdfb_power_control(sinfo, 0);
        atmel_lcdfb_stop(sinfo);
        atmel_lcdfb_stop_clock(sinfo);
 
@@ -1204,8 +1413,7 @@ static int atmel_lcdfb_resume(struct platform_device *pdev)
 
        atmel_lcdfb_start_clock(sinfo);
        atmel_lcdfb_start(sinfo);
-       if (sinfo->atmel_lcdfb_power_control)
-               sinfo->atmel_lcdfb_power_control(1);
+       atmel_lcdfb_power_control(sinfo, 1);
        lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, sinfo->saved_lcdcon);
 
        /* Enable FIFO & DMA errors */
@@ -1228,6 +1436,7 @@ static struct platform_driver atmel_lcdfb_driver = {
        .driver         = {
                .name   = "atmel_lcdfb",
                .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(atmel_lcdfb_dt_ids),
        },
 };
 
index a0482b567bfe2cf9af2f58d9a8e4e6697686a6b7..c7af8c45ab8a448678181f22c3074cbc7be0e89f 100644 (file)
 #define HX8357_SET_POWER_NORMAL                0xd2
 #define HX8357_SET_PANEL_RELATED       0xe9
 
+#define HX8369_SET_DISPLAY_BRIGHTNESS          0x51
+#define HX8369_WRITE_CABC_DISPLAY_VALUE                0x53
+#define HX8369_WRITE_CABC_BRIGHT_CTRL          0x55
+#define HX8369_WRITE_CABC_MIN_BRIGHTNESS       0x5e
+#define HX8369_SET_POWER                       0xb1
+#define HX8369_SET_DISPLAY_MODE                        0xb2
+#define HX8369_SET_DISPLAY_WAVEFORM_CYC                0xb4
+#define HX8369_SET_VCOM                                0xb6
+#define HX8369_SET_EXTENSION_COMMAND           0xb9
+#define HX8369_SET_GIP                         0xd5
+#define HX8369_SET_GAMMA_CURVE_RELATED         0xe0
+
 struct hx8357_data {
        unsigned                im_pins[HX8357_NUM_IM_PINS];
        unsigned                reset;
        struct spi_device       *spi;
        int                     state;
+       bool                    use_im_pins;
 };
 
 static u8 hx8357_seq_power[] = {
@@ -143,6 +156,61 @@ static u8 hx8357_seq_display_mode[] = {
        HX8357_SET_DISPLAY_MODE_RGB_INTERFACE,
 };
 
+static u8 hx8369_seq_write_CABC_min_brightness[] = {
+       HX8369_WRITE_CABC_MIN_BRIGHTNESS, 0x00,
+};
+
+static u8 hx8369_seq_write_CABC_control[] = {
+       HX8369_WRITE_CABC_DISPLAY_VALUE, 0x24,
+};
+
+static u8 hx8369_seq_set_display_brightness[] = {
+       HX8369_SET_DISPLAY_BRIGHTNESS, 0xFF,
+};
+
+static u8 hx8369_seq_write_CABC_control_setting[] = {
+       HX8369_WRITE_CABC_BRIGHT_CTRL, 0x02,
+};
+
+static u8 hx8369_seq_extension_command[] = {
+       HX8369_SET_EXTENSION_COMMAND, 0xff, 0x83, 0x69,
+};
+
+static u8 hx8369_seq_display_related[] = {
+       HX8369_SET_DISPLAY_MODE, 0x00, 0x2b, 0x03, 0x03, 0x70, 0x00,
+       0xff, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x01,
+};
+
+static u8 hx8369_seq_panel_waveform_cycle[] = {
+       HX8369_SET_DISPLAY_WAVEFORM_CYC, 0x0a, 0x1d, 0x80, 0x06, 0x02,
+};
+
+static u8 hx8369_seq_set_address_mode[] = {
+       HX8357_SET_ADDRESS_MODE, 0x00,
+};
+
+static u8 hx8369_seq_vcom[] = {
+       HX8369_SET_VCOM, 0x3e, 0x3e,
+};
+
+static u8 hx8369_seq_gip[] = {
+       HX8369_SET_GIP, 0x00, 0x01, 0x03, 0x25, 0x01, 0x02, 0x28, 0x70,
+       0x11, 0x13, 0x00, 0x00, 0x40, 0x26, 0x51, 0x37, 0x00, 0x00, 0x71,
+       0x35, 0x60, 0x24, 0x07, 0x0f, 0x04, 0x04,
+};
+
+static u8 hx8369_seq_power[] = {
+       HX8369_SET_POWER, 0x01, 0x00, 0x34, 0x03, 0x00, 0x11, 0x11, 0x32,
+       0x2f, 0x3f, 0x3f, 0x01, 0x3a, 0x01, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6,
+};
+
+static u8 hx8369_seq_gamma_curve_related[] = {
+       HX8369_SET_GAMMA_CURVE_RELATED, 0x00, 0x0d, 0x19, 0x2f, 0x3b, 0x3d,
+       0x2e, 0x4a, 0x08, 0x0e, 0x0f, 0x14, 0x16, 0x14, 0x14, 0x14, 0x1e,
+       0x00, 0x0d, 0x19, 0x2f, 0x3b, 0x3d, 0x2e, 0x4a, 0x08, 0x0e, 0x0f,
+       0x14, 0x16, 0x14, 0x14, 0x14, 0x1e,
+};
+
 static int hx8357_spi_write_then_read(struct lcd_device *lcdev,
                                u8 *txbuf, u16 txlen,
                                u8 *rxbuf, u16 rxlen)
@@ -219,6 +287,10 @@ static int hx8357_enter_standby(struct lcd_device *lcdev)
        if (ret < 0)
                return ret;
 
+       /*
+        * The controller needs 120ms when entering in sleep mode before we can
+        * send the command to go off sleep mode
+        */
        msleep(120);
 
        return 0;
@@ -232,6 +304,10 @@ static int hx8357_exit_standby(struct lcd_device *lcdev)
        if (ret < 0)
                return ret;
 
+       /*
+        * The controller needs 120ms when exiting from sleep mode before we
+        * can send the command to enter in sleep mode
+        */
        msleep(120);
 
        ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
@@ -241,18 +317,9 @@ static int hx8357_exit_standby(struct lcd_device *lcdev)
        return 0;
 }
 
-static int hx8357_lcd_init(struct lcd_device *lcdev)
+static void hx8357_lcd_reset(struct lcd_device *lcdev)
 {
        struct hx8357_data *lcd = lcd_get_data(lcdev);
-       int ret;
-
-       /*
-        * Set the interface selection pins to SPI mode, with three
-        * wires
-        */
-       gpio_set_value_cansleep(lcd->im_pins[0], 1);
-       gpio_set_value_cansleep(lcd->im_pins[1], 0);
-       gpio_set_value_cansleep(lcd->im_pins[2], 1);
 
        /* Reset the screen */
        gpio_set_value(lcd->reset, 1);
@@ -260,7 +327,25 @@ static int hx8357_lcd_init(struct lcd_device *lcdev)
        gpio_set_value(lcd->reset, 0);
        usleep_range(10000, 12000);
        gpio_set_value(lcd->reset, 1);
+
+       /* The controller needs 120ms to recover from reset */
        msleep(120);
+}
+
+static int hx8357_lcd_init(struct lcd_device *lcdev)
+{
+       struct hx8357_data *lcd = lcd_get_data(lcdev);
+       int ret;
+
+       /*
+        * Set the interface selection pins to SPI mode, with three
+        * wires
+        */
+       if (lcd->use_im_pins) {
+               gpio_set_value_cansleep(lcd->im_pins[0], 1);
+               gpio_set_value_cansleep(lcd->im_pins[1], 0);
+               gpio_set_value_cansleep(lcd->im_pins[2], 1);
+       }
 
        ret = hx8357_spi_write_array(lcdev, hx8357_seq_power,
                                ARRAY_SIZE(hx8357_seq_power));
@@ -341,6 +426,9 @@ static int hx8357_lcd_init(struct lcd_device *lcdev)
        if (ret < 0)
                return ret;
 
+       /*
+        * The controller needs 120ms to fully recover from exiting sleep mode
+        */
        msleep(120);
 
        ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
@@ -356,6 +444,96 @@ static int hx8357_lcd_init(struct lcd_device *lcdev)
        return 0;
 }
 
+static int hx8369_lcd_init(struct lcd_device *lcdev)
+{
+       int ret;
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_extension_command,
+                               ARRAY_SIZE(hx8369_seq_extension_command));
+       if (ret < 0)
+               return ret;
+       usleep_range(10000, 12000);
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_display_related,
+                               ARRAY_SIZE(hx8369_seq_display_related));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_panel_waveform_cycle,
+                               ARRAY_SIZE(hx8369_seq_panel_waveform_cycle));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_set_address_mode,
+                               ARRAY_SIZE(hx8369_seq_set_address_mode));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_vcom,
+                               ARRAY_SIZE(hx8369_seq_vcom));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_gip,
+                               ARRAY_SIZE(hx8369_seq_gip));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_power,
+                               ARRAY_SIZE(hx8369_seq_power));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_byte(lcdev, HX8357_EXIT_SLEEP_MODE);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * The controller needs 120ms to fully recover from exiting sleep mode
+        */
+       msleep(120);
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_gamma_curve_related,
+                               ARRAY_SIZE(hx8369_seq_gamma_curve_related));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_byte(lcdev, HX8357_EXIT_SLEEP_MODE);
+       if (ret < 0)
+               return ret;
+       usleep_range(1000, 1200);
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_write_CABC_control,
+                               ARRAY_SIZE(hx8369_seq_write_CABC_control));
+       if (ret < 0)
+               return ret;
+       usleep_range(10000, 12000);
+
+       ret = hx8357_spi_write_array(lcdev,
+                       hx8369_seq_write_CABC_control_setting,
+                       ARRAY_SIZE(hx8369_seq_write_CABC_control_setting));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_array(lcdev,
+                       hx8369_seq_write_CABC_min_brightness,
+                       ARRAY_SIZE(hx8369_seq_write_CABC_min_brightness));
+       if (ret < 0)
+               return ret;
+       usleep_range(10000, 12000);
+
+       ret = hx8357_spi_write_array(lcdev, hx8369_seq_set_display_brightness,
+                               ARRAY_SIZE(hx8369_seq_set_display_brightness));
+       if (ret < 0)
+               return ret;
+
+       ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 #define POWER_IS_ON(pwr)       ((pwr) <= FB_BLANK_NORMAL)
 
 static int hx8357_set_power(struct lcd_device *lcdev, int power)
@@ -388,10 +566,24 @@ static struct lcd_ops hx8357_ops = {
        .get_power      = hx8357_get_power,
 };
 
+static const struct of_device_id hx8357_dt_ids[] = {
+       {
+               .compatible = "himax,hx8357",
+               .data = hx8357_lcd_init,
+       },
+       {
+               .compatible = "himax,hx8369",
+               .data = hx8369_lcd_init,
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
+
 static int hx8357_probe(struct spi_device *spi)
 {
        struct lcd_device *lcdev;
        struct hx8357_data *lcd;
+       const struct of_device_id *match;
        int i, ret;
 
        lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
@@ -408,6 +600,10 @@ static int hx8357_probe(struct spi_device *spi)
 
        lcd->spi = spi;
 
+       match = of_match_device(hx8357_dt_ids, &spi->dev);
+       if (!match || !match->data)
+               return -EINVAL;
+
        lcd->reset = of_get_named_gpio(spi->dev.of_node, "gpios-reset", 0);
        if (!gpio_is_valid(lcd->reset)) {
                dev_err(&spi->dev, "Missing dt property: gpios-reset\n");
@@ -424,25 +620,32 @@ static int hx8357_probe(struct spi_device *spi)
                return -EINVAL;
        }
 
-       for (i = 0; i < HX8357_NUM_IM_PINS; i++) {
-               lcd->im_pins[i] = of_get_named_gpio(spi->dev.of_node,
-                                               "im-gpios", i);
-               if (lcd->im_pins[i] == -EPROBE_DEFER) {
-                       dev_info(&spi->dev, "GPIO requested is not here yet, deferring the probe\n");
-                       return -EPROBE_DEFER;
-               }
-               if (!gpio_is_valid(lcd->im_pins[i])) {
-                       dev_err(&spi->dev, "Missing dt property: im-gpios\n");
-                       return -EINVAL;
-               }
-
-               ret = devm_gpio_request_one(&spi->dev, lcd->im_pins[i],
-                                       GPIOF_OUT_INIT_LOW, "im_pins");
-               if (ret) {
-                       dev_err(&spi->dev, "failed to request gpio %d: %d\n",
-                               lcd->im_pins[i], ret);
-                       return -EINVAL;
+       if (of_find_property(spi->dev.of_node, "im-gpios", NULL)) {
+               lcd->use_im_pins = 1;
+
+               for (i = 0; i < HX8357_NUM_IM_PINS; i++) {
+                       lcd->im_pins[i] = of_get_named_gpio(spi->dev.of_node,
+                                                           "im-gpios", i);
+                       if (lcd->im_pins[i] == -EPROBE_DEFER) {
+                               dev_info(&spi->dev, "GPIO requested is not here yet, deferring the probe\n");
+                               return -EPROBE_DEFER;
+                       }
+                       if (!gpio_is_valid(lcd->im_pins[i])) {
+                               dev_err(&spi->dev, "Missing dt property: im-gpios\n");
+                               return -EINVAL;
+                       }
+
+                       ret = devm_gpio_request_one(&spi->dev, lcd->im_pins[i],
+                                                   GPIOF_OUT_INIT_LOW,
+                                                   "im_pins");
+                       if (ret) {
+                               dev_err(&spi->dev, "failed to request gpio %d: %d\n",
+                                       lcd->im_pins[i], ret);
+                               return -EINVAL;
+                       }
                }
+       } else {
+               lcd->use_im_pins = 0;
        }
 
        lcdev = lcd_device_register("mxsfb", &spi->dev, lcd, &hx8357_ops);
@@ -452,7 +655,9 @@ static int hx8357_probe(struct spi_device *spi)
        }
        spi_set_drvdata(spi, lcdev);
 
-       ret = hx8357_lcd_init(lcdev);
+       hx8357_lcd_reset(lcdev);
+
+       ret = ((int (*)(struct lcd_device *))match->data)(lcdev);
        if (ret) {
                dev_err(&spi->dev, "Couldn't initialize panel\n");
                goto init_error;
@@ -475,12 +680,6 @@ static int hx8357_remove(struct spi_device *spi)
        return 0;
 }
 
-static const struct of_device_id hx8357_dt_ids[] = {
-       { .compatible = "himax,hx8357" },
-       {},
-};
-MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
-
 static struct spi_driver hx8357_driver = {
        .probe  = hx8357_probe,
        .remove = hx8357_remove,
index a0e1e02bdc2e1f1b696bfef26ccdc8d794dd3e34..c0b41f13bd4aea01e0156ff11c52e3b585be628d 100644 (file)
@@ -246,7 +246,7 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
 {
        struct lp855x *lp = bl_get_data(bl);
 
-       if (bl->props.state & BL_CORE_SUSPENDED)
+       if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
                bl->props.brightness = 0;
 
        if (lp->mode == PWM_BASED) {
index 40178338b61994a024482db29107bd5efaa9454f..635d5690dd5a3a78b548e1ebd47dc5403f6b393c 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/bug.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/hdmi.h>
@@ -52,7 +53,7 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
 
        frame->type = HDMI_INFOFRAME_TYPE_AVI;
        frame->version = 2;
-       frame->length = 13;
+       frame->length = HDMI_AVI_INFOFRAME_SIZE;
 
        return 0;
 }
@@ -83,7 +84,7 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
        if (size < length)
                return -ENOSPC;
 
-       memset(buffer, 0, length);
+       memset(buffer, 0, size);
 
        ptr[0] = frame->type;
        ptr[1] = frame->version;
@@ -151,7 +152,7 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
 
        frame->type = HDMI_INFOFRAME_TYPE_SPD;
        frame->version = 1;
-       frame->length = 25;
+       frame->length = HDMI_SPD_INFOFRAME_SIZE;
 
        strncpy(frame->vendor, vendor, sizeof(frame->vendor));
        strncpy(frame->product, product, sizeof(frame->product));
@@ -185,7 +186,7 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
        if (size < length)
                return -ENOSPC;
 
-       memset(buffer, 0, length);
+       memset(buffer, 0, size);
 
        ptr[0] = frame->type;
        ptr[1] = frame->version;
@@ -218,7 +219,7 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
 
        frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
        frame->version = 1;
-       frame->length = 10;
+       frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
 
        return 0;
 }
@@ -250,7 +251,7 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
        if (size < length)
                return -ENOSPC;
 
-       memset(buffer, 0, length);
+       memset(buffer, 0, size);
 
        if (frame->channels >= 2)
                channels = frame->channels - 1;
@@ -307,7 +308,7 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
        if (size < length)
                return -ENOSPC;
 
-       memset(buffer, 0, length);
+       memset(buffer, 0, size);
 
        ptr[0] = frame->type;
        ptr[1] = frame->version;
@@ -321,3 +322,45 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
        return length;
 }
 EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
+
+/**
+ * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+       ssize_t length;
+
+       switch (frame->any.type) {
+       case HDMI_INFOFRAME_TYPE_AVI:
+               length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
+               break;
+       case HDMI_INFOFRAME_TYPE_SPD:
+               length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
+               break;
+       case HDMI_INFOFRAME_TYPE_AUDIO:
+               length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
+               break;
+       case HDMI_INFOFRAME_TYPE_VENDOR:
+               length = hdmi_vendor_infoframe_pack(&frame->vendor,
+                                                   buffer, size);
+               break;
+       default:
+               WARN(1, "Bad infoframe type %d\n", frame->any.type);
+               length = -EINVAL;
+       }
+
+       return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack);
index 401a56e250bd40e3ba484c403ff1975357e202e9..245652911650df88bf5a3d1c05e99599e0e420a2 100644 (file)
@@ -2029,10 +2029,9 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
                return -1;
        }
 
-       minfo = kmalloc(sizeof(*minfo), GFP_KERNEL);
+       minfo = kzalloc(sizeof(*minfo), GFP_KERNEL);
        if (!minfo)
                return -1;
-       memset(minfo, 0, sizeof(*minfo));
 
        minfo->pcidev = pdev;
        minfo->dead = 0;
index dc09ebe4aba53d078881b28e123bc072142f423c..c2d3514f5b197ece7cb8b38a66edca11cd38a17e 100644 (file)
@@ -46,7 +46,6 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/fb.h>
 #include <linux/regulator/consumer.h>
 #include <video/of_display_timing.h>
@@ -851,7 +850,6 @@ static int mxsfb_probe(struct platform_device *pdev)
        struct mxsfb_info *host;
        struct fb_info *fb_info;
        struct fb_modelist *modelist;
-       struct pinctrl *pinctrl;
        int ret;
 
        if (of_id)
@@ -882,12 +880,6 @@ static int mxsfb_probe(struct platform_device *pdev)
 
        host->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
 
-       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-       if (IS_ERR(pinctrl)) {
-               ret = PTR_ERR(pinctrl);
-               goto fb_release;
-       }
-
        host->clk = devm_clk_get(&host->pdev->dev, NULL);
        if (IS_ERR(host->clk)) {
                ret = PTR_ERR(host->clk);
index e242ed85cb07a6985eb1dd5ded7dbb064d924b40..3dfe00956a4f156b8c0261c03186efda84187c87 100644 (file)
@@ -779,16 +779,14 @@ void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
        struct omap_video_timings video_timing;
        struct hdmi_video_format video_format;
        /* HDMI core */
-       struct hdmi_core_infoframe_avi avi_cfg = ip_data->avi_cfg;
+       struct hdmi_core_infoframe_avi *avi_cfg = &ip_data->avi_cfg;
        struct hdmi_core_video_config v_core_cfg;
        struct hdmi_core_packet_enable_repeat repeat_cfg;
        struct hdmi_config *cfg = &ip_data->cfg;
 
        hdmi_wp_init(&video_timing, &video_format);
 
-       hdmi_core_init(&v_core_cfg,
-               &avi_cfg,
-               &repeat_cfg);
+       hdmi_core_init(&v_core_cfg, avi_cfg, &repeat_cfg);
 
        hdmi_wp_video_init_format(&video_format, &video_timing, cfg);
 
@@ -822,24 +820,24 @@ void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
         * configure packet
         * info frame video see doc CEA861-D page 65
         */
-       avi_cfg.db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
-       avi_cfg.db1_active_info =
-               HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
-       avi_cfg.db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
-       avi_cfg.db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
-       avi_cfg.db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
-       avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
-       avi_cfg.db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
-       avi_cfg.db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
-       avi_cfg.db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
-       avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
-       avi_cfg.db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
-       avi_cfg.db4_videocode = cfg->cm.code;
-       avi_cfg.db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
-       avi_cfg.db6_7_line_eoftop = 0;
-       avi_cfg.db8_9_line_sofbottom = 0;
-       avi_cfg.db10_11_pixel_eofleft = 0;
-       avi_cfg.db12_13_pixel_sofright = 0;
+       avi_cfg->db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
+       avi_cfg->db1_active_info =
+                       HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
+       avi_cfg->db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
+       avi_cfg->db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
+       avi_cfg->db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
+       avi_cfg->db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
+       avi_cfg->db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
+       avi_cfg->db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
+       avi_cfg->db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
+       avi_cfg->db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
+       avi_cfg->db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
+       avi_cfg->db4_videocode = cfg->cm.code;
+       avi_cfg->db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
+       avi_cfg->db6_7_line_eoftop = 0;
+       avi_cfg->db8_9_line_sofbottom = 0;
+       avi_cfg->db10_11_pixel_eofleft = 0;
+       avi_cfg->db12_13_pixel_sofright = 0;
 
        hdmi_core_aux_infoframe_avi_config(ip_data);
 
index 6285b97184510726cc3900b2092584aaae8fc930..1446c49fe6afff693797c08194e44bb377d6ae58 100644 (file)
@@ -32,8 +32,8 @@ MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
 
-static ssize_t video_output_show_state(struct device *dev,
-                                      struct device_attribute *attr, char *buf)
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
 {
        ssize_t ret_size = 0;
        struct output_device *od = to_output_device(dev);
@@ -42,9 +42,8 @@ static ssize_t video_output_show_state(struct device *dev,
        return ret_size;
 }
 
-static ssize_t video_output_store_state(struct device *dev,
-                                       struct device_attribute *attr,
-                                       const char *buf,size_t count)
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf,size_t count)
 {
        char *endp;
        struct output_device *od = to_output_device(dev);
@@ -62,6 +61,7 @@ static ssize_t video_output_store_state(struct device *dev,
        }
        return count;
 }
+static DEVICE_ATTR_RW(state);
 
 static void video_output_release(struct device *dev)
 {
@@ -69,16 +69,16 @@ static void video_output_release(struct device *dev)
        kfree(od);
 }
 
-static struct device_attribute video_output_attributes[] = {
-       __ATTR(state, 0644, video_output_show_state, video_output_store_state),
-       __ATTR_NULL,
+static struct attribute *video_output_attrs[] = {
+       &dev_attr_state.attr,
+       NULL,
 };
-
+ATTRIBUTE_GROUPS(video_output);
 
 static struct class video_output_class = {
        .name = "video_output",
        .dev_release = video_output_release,
-       .dev_attrs = video_output_attributes,
+       .dev_groups = video_output_groups,
 };
 
 struct output_device *video_output_register(const char *name,
index e2e9e3e61b72ad5dc72f0cd3cde1aeb1c36a2b2a..f0154824582b514c55f7bcc1951dd3523fba7bfb 100644 (file)
@@ -84,6 +84,7 @@ struct simplefb_format {
 
 static struct simplefb_format simplefb_formats[] = {
        { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} },
+       { "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8} },
 };
 
 struct simplefb_params {
index de7e4f497222ffc858d2df762c27003c08874722..03f2a46cb3f44cd800ebaec9446350973ca12755 100644 (file)
@@ -784,6 +784,13 @@ static int hpwdt_init_one(struct pci_dev *dev,
 {
        int retval;
 
+
+       /*
+        * Ignore all auxilary iLO devices with the following PCI ID
+        */
+       if (dev->subsystem_device == 0x1979)
+               return -ENODEV;
+
        /*
         * Check if we can do NMI decoding or not
         */
index 119d42a2bf57808135462d70606bd650ce525081..90307c0b630c14b10faf01eb1b764e6578cbc72e 100644 (file)
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
 
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
-                                    u32 pm1a_cnt, u32 pm1b_cnt)
+static int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+                                           u32 val_a, u32 val_b,
+                                           bool extended)
 {
+       unsigned int bits = extended ? 8 : 16;
+
        struct xen_platform_op op = {
                .cmd = XENPF_enter_acpi_sleep,
                .interface_version = XENPF_INTERFACE_VERSION,
-               .u = {
-                       .enter_acpi_sleep = {
-                               .pm1a_cnt_val = (u16)pm1a_cnt,
-                               .pm1b_cnt_val = (u16)pm1b_cnt,
-                               .sleep_state = sleep_state,
-                       },
+               .u.enter_acpi_sleep = {
+                       .val_a = (u16)val_a,
+                       .val_b = (u16)val_b,
+                       .sleep_state = sleep_state,
+                       .flags = extended ? XENPF_ACPI_SLEEP_EXTENDED : 0,
                },
        };
 
-       if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
-               WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
-                    "Email xen-devel@lists.xensource.com  Thank you.\n", \
-                    pm1a_cnt, pm1b_cnt);
+       if (WARN((val_a & (~0 << bits)) || (val_b & (~0 << bits)),
+                "Using more than %u bits of sleep control values %#x/%#x!"
+                "Email xen-devel@lists.xen.org - Thank you.\n", \
+                bits, val_a, val_b))
                return -1;
-       }
 
        HYPERVISOR_dom0_op(&op);
        return 1;
 }
+
+int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
+                                    u32 pm1a_cnt, u32 pm1b_cnt)
+{
+       return xen_acpi_notify_hypervisor_state(sleep_state, pm1a_cnt,
+                                               pm1b_cnt, false);
+}
+
+int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
+                                    u32 val_a, u32 val_b)
+{
+       return xen_acpi_notify_hypervisor_state(sleep_state, val_a,
+                                               val_b, true);
+}
index 9ff073f4090afee750e4058129427d4bd6f6117f..da0821bc05b9d2f56fb56e63c99a1b58ae57895d 100644 (file)
@@ -241,9 +241,8 @@ static int v9fs_launder_page(struct page *page)
  * v9fs_direct_IO - 9P address space operation for direct I/O
  * @rw: direction (read or write)
  * @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
+ * @iter: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
- * @nr_segs: size of iovec array
  *
  * The presence of v9fs_direct_IO() in the address space ops vector
  * allowes open() O_DIRECT flags which would have failed otherwise.
@@ -252,13 +251,12 @@ static int v9fs_launder_page(struct page *page)
  * the VFS gets them, so this method should never be called.
  *
  * Direct IO is not 'yet' supported in the cached mode. Hence when
- * this routine is called through generic_file_aio_read(), the read/write fails
- * with an error.
+ * this routine is called through generic_file_read_iter(), the read/write
+ * fails with an error.
  *
  */
 static ssize_t
-v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-              loff_t pos, unsigned long nr_segs)
+v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        /*
         * FIXME
@@ -267,7 +265,7 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         */
        p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
                 iocb->ki_filp->f_path.dentry->d_name.name,
-                (long long)pos, nr_segs);
+                (long long)pos, iter->nr_segs);
 
        return -EINVAL;
 }
index d384a8b77ee8a705fe9763de2257b3c43224b816..18d02934fce92288fd34a8c4a7f4fe87696ec5ff 100644 (file)
@@ -743,8 +743,8 @@ const struct file_operations v9fs_cached_file_operations = {
        .llseek = generic_file_llseek,
        .read = v9fs_cached_file_read,
        .write = v9fs_cached_file_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
@@ -756,8 +756,8 @@ const struct file_operations v9fs_cached_file_operations_dotl = {
        .llseek = generic_file_llseek,
        .read = v9fs_cached_file_read,
        .write = v9fs_cached_file_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock_dotl,
index 25b018efb8abd8bb82189daf711a9fa4372f3c54..94de6d1482e2e076c4b3d10451c39fc245cce6ef 100644 (file)
@@ -146,7 +146,7 @@ static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses,
                char type = 0, ext[32];
                int major = -1, minor = -1;
 
-               strncpy(ext, stat->extension, sizeof(ext));
+               strlcpy(ext, stat->extension, sizeof(ext));
                sscanf(ext, "%c %u %u", &type, &major, &minor);
                switch (type) {
                case 'c':
@@ -1186,7 +1186,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
                         * this even with .u extension. So check
                         * for non NULL stat->extension
                         */
-                       strncpy(ext, stat->extension, sizeof(ext));
+                       strlcpy(ext, stat->extension, sizeof(ext));
                        /* HARDLINKCOUNT %u */
                        sscanf(ext, "%13s %u", tag_name, &i_nlink);
                        if (!strncmp(tag_name, "HARDLINKCOUNT", 13))
index 4fe6df3ec28fe5392b680e853b0fbfef6313179a..1afa0e020082f8279ce3f8708131c4f74ba49941 100644 (file)
@@ -11,7 +11,7 @@ obj-y :=      open.o read_write.o file_table.o super.o \
                attr.o bad_inode.o file.o filesystems.o namespace.o \
                seq_file.o xattr.o libfs.o fs-writeback.o \
                pnode.o splice.o sync.o utimes.o \
-               stack.o fs_struct.o statfs.o
+               stack.o fs_struct.o statfs.o iov-iter.o
 
 ifeq ($(CONFIG_BLOCK),y)
 obj-y +=       buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
index a36da5382b40dc9c09ab6aa59762d9c1a7808b72..da1e02161ac3386da0489d32834ae932c46bac72 100644 (file)
 const struct file_operations adfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .splice_read    = generic_file_splice_read,
 };
 
index af3261b781021f0a51dda6b73b5fb5b3d051baa7..d09a2dbb5cd2b804ee6de2611a879f51a41e196f 100644 (file)
@@ -28,9 +28,9 @@ static int affs_file_release(struct inode *inode, struct file *filp);
 const struct file_operations affs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = affs_file_open,
        .release        = affs_file_release,
index 66d50fe2ee459a887511381e8e375db72d2bf1f3..3b71622e40f44a7f8d98e4794a688dd224713029 100644 (file)
@@ -33,8 +33,8 @@ const struct file_operations afs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = afs_file_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = afs_file_write,
        .mmap           = generic_file_readonly_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = afs_fsync,
index a306bb6d88d9937badc2a1df1462d3bb0f469829..9c048ffac900f0fc89ecdb045449ef38d907c875 100644 (file)
@@ -747,8 +747,7 @@ extern int afs_write_end(struct file *file, struct address_space *mapping,
 extern int afs_writepage(struct page *, struct writeback_control *);
 extern int afs_writepages(struct address_space *, struct writeback_control *);
 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
-extern ssize_t afs_file_write(struct kiocb *, const struct iovec *,
-                             unsigned long, loff_t);
+extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *, loff_t);
 extern int afs_writeback_all(struct afs_vnode *);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 
index a890db4b9898fc1d888c5e7285da55db85e4da54..9fa2f596430accaecd06970f83209dba6e80425e 100644 (file)
@@ -625,15 +625,14 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
 /*
  * write to an AFS file
  */
-ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
-                      unsigned long nr_segs, loff_t pos)
+ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
        ssize_t result;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
 
        _enter("{%x.%u},{%zu},%lu,",
-              vnode->fid.vid, vnode->fid.vnode, count, nr_segs);
+              vnode->fid.vid, vnode->fid.vnode, count, iter->nr_segs);
 
        if (IS_SWAPFILE(&vnode->vfs_inode)) {
                printk(KERN_INFO
@@ -644,7 +643,7 @@ ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
        if (!count)
                return 0;
 
-       result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+       result = generic_file_write_iter(iocb, iter, pos);
        if (IS_ERR_VALUE(result)) {
                _leave(" = %zd", result);
                return result;
index 9b5ca113741948d95330d866e8e4e2cce113f3e5..8ef3157c7943ee6c7b863bd4513c8b0f18367275 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -596,6 +596,10 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
                atomic_set(&iocb->ki_users, 0);
                wake_up_process(iocb->ki_obj.tsk);
                return;
+       } else if (is_kernel_kiocb(iocb)) {
+               iocb->ki_obj.complete(iocb->ki_user_data, res);
+               aio_kernel_free(iocb);
+               return;
        }
 
        /*
@@ -987,6 +991,48 @@ static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb)
        return 0;
 }
 
+static ssize_t aio_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       ssize_t ret;
+
+       if (unlikely(!is_kernel_kiocb(iocb)))
+               return -EINVAL;
+
+       if (unlikely(!(file->f_mode & FMODE_READ)))
+               return -EBADF;
+
+       ret = security_file_permission(file, MAY_READ);
+       if (unlikely(ret))
+               return ret;
+
+       if (!file->f_op->read_iter)
+               return -EINVAL;
+
+       return file->f_op->read_iter(iocb, iter, iocb->ki_pos);
+}
+
+static ssize_t aio_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       ssize_t ret;
+
+       if (unlikely(!is_kernel_kiocb(iocb)))
+               return -EINVAL;
+
+       if (unlikely(!(file->f_mode & FMODE_WRITE)))
+               return -EBADF;
+
+       ret = security_file_permission(file, MAY_WRITE);
+       if (unlikely(ret))
+               return ret;
+
+       if (!file->f_op->write_iter)
+               return -EINVAL;
+
+       return file->f_op->write_iter(iocb, iter, iocb->ki_pos);
+}
+
 /*
  * aio_setup_iocb:
  *     Performs the initial checks and aio retry method
@@ -1005,14 +1051,14 @@ static ssize_t aio_run_iocb(struct kiocb *req, bool compat)
        case IOCB_CMD_PREADV:
                mode    = FMODE_READ;
                rw      = READ;
-               rw_op   = file->f_op->aio_read;
+               rw_op   = do_aio_read;
                goto rw_common;
 
        case IOCB_CMD_PWRITE:
        case IOCB_CMD_PWRITEV:
                mode    = FMODE_WRITE;
                rw      = WRITE;
-               rw_op   = file->f_op->aio_write;
+               rw_op   = do_aio_write;
                goto rw_common;
 rw_common:
                if (unlikely(!(file->f_mode & mode)))
@@ -1038,6 +1084,14 @@ rw_common:
                ret = aio_rw_vect_retry(req, rw, rw_op);
                break;
 
+       case IOCB_CMD_READ_ITER:
+               ret = aio_read_iter(req, req->ki_iter);
+               break;
+
+       case IOCB_CMD_WRITE_ITER:
+               ret = aio_write_iter(req, req->ki_iter);
+               break;
+
        case IOCB_CMD_FDSYNC:
                if (!file->f_op->aio_fsync)
                        return -EINVAL;
@@ -1072,6 +1126,85 @@ rw_common:
        return 0;
 }
 
+/*
+ * This allocates an iocb that will be used to submit and track completion of
+ * an IO that is issued from kernel space.
+ *
+ * The caller is expected to call the appropriate aio_kernel_init_() functions
+ * and then call aio_kernel_submit().  From that point forward progress is
+ * guaranteed by the file system aio method.  Eventually the caller's
+ * completion callback will be called.
+ *
+ * These iocbs are special.  They don't have a context, we don't limit the
+ * number pending, and they can't be canceled.
+ */
+struct kiocb *aio_kernel_alloc(gfp_t gfp)
+{
+       return kzalloc(sizeof(struct kiocb), gfp);
+}
+EXPORT_SYMBOL_GPL(aio_kernel_alloc);
+
+void aio_kernel_free(struct kiocb *iocb)
+{
+       kfree(iocb);
+}
+EXPORT_SYMBOL_GPL(aio_kernel_free);
+
+/*
+ * ptr and count can be a buff and bytes or an iov and segs.
+ */
+void aio_kernel_init_rw(struct kiocb *iocb, struct file *filp,
+                       size_t nr, loff_t off)
+{
+       iocb->ki_filp = filp;
+       iocb->ki_left = nr;
+       iocb->ki_nbytes = nr;
+       iocb->ki_pos = off;
+       iocb->ki_ctx = (void *)-1;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_init_rw);
+
+void aio_kernel_init_callback(struct kiocb *iocb,
+                             void (*complete)(u64 user_data, long res),
+                             u64 user_data)
+{
+       iocb->ki_obj.complete = complete;
+       iocb->ki_user_data = user_data;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_init_callback);
+
+/*
+ * The iocb is our responsibility once this is called.  The caller must not
+ * reference it.
+ *
+ * Callers must be prepared for their iocb completion callback to be called the
+ * moment they enter this function.  The completion callback may be called from
+ * any context.
+ *
+ * Returns: 0: the iocb completion callback will be called with the op result
+ * negative errno: the operation was not submitted and the iocb was freed
+ */
+int aio_kernel_submit(struct kiocb *iocb, unsigned short op, void *ptr)
+{
+       int ret;
+
+       BUG_ON(!is_kernel_kiocb(iocb));
+       BUG_ON(!iocb->ki_obj.complete);
+       BUG_ON(!iocb->ki_filp);
+
+       iocb->ki_opcode = op;
+       iocb->ki_buf = (char __user *)(unsigned long)ptr;
+       iocb->ki_iter = ptr;
+
+       ret = aio_run_iocb(iocb, 0);
+
+       if (ret)
+               aio_kernel_free(iocb);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_submit);
+
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         struct iocb *iocb, bool compat)
 {
index 7c93953030fbe5eda13d76b6a8c53d6f2a31902d..38651e5da183f80b60763c7423a1171e1dfc9d6f 100644 (file)
@@ -39,12 +39,24 @@ static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
        return -EIO;
 }
 
+static ssize_t bad_file_read_iter(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos)
+{
+       return -EIO;
+}
+
 static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                        unsigned long nr_segs, loff_t pos)
 {
        return -EIO;
 }
 
+static ssize_t bad_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos)
+{
+       return -EIO;
+}
+
 static int bad_file_readdir(struct file *file, struct dir_context *ctx)
 {
        return -EIO;
@@ -151,7 +163,9 @@ static const struct file_operations bad_file_ops =
        .read           = bad_file_read,
        .write          = bad_file_write,
        .aio_read       = bad_file_aio_read,
+       .read_iter      = bad_file_read_iter,
        .aio_write      = bad_file_aio_write,
+       .write_iter     = bad_file_write_iter,
        .iterate        = bad_file_readdir,
        .poll           = bad_file_poll,
        .unlocked_ioctl = bad_file_unlocked_ioctl,
index ad3ea1497cc3825d4fdf795cd6b5735618005e0e..3d14806d5393805df9a414942d52069465f4dfa8 100644 (file)
@@ -24,9 +24,9 @@
 const struct file_operations bfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
 };
index 89dec7f789a44335c500e8a2d89fc3f13dac6c5e..b3192da97cabec0e3feb69c64d0749ca05d25237 100644 (file)
@@ -45,7 +45,6 @@ static int load_aout_library(struct file*);
  */
 static int aout_core_dump(struct coredump_params *cprm)
 {
-       struct file *file = cprm->file;
        mm_segment_t fs;
        int has_dumped = 0;
        void __user *dump_start;
@@ -85,10 +84,10 @@ static int aout_core_dump(struct coredump_params *cprm)
 
        set_fs(KERNEL_DS);
 /* struct user */
-       if (!dump_write(file, &dump, sizeof(dump)))
+       if (!dump_emit(cprm, &dump, sizeof(dump)))
                goto end_coredump;
 /* Now dump all of the user data.  Include malloced stuff as well */
-       if (!dump_seek(cprm->file, PAGE_SIZE - sizeof(dump)))
+       if (!dump_align(cprm, PAGE_SIZE))
                goto end_coredump;
 /* now we start writing out the user space info */
        set_fs(USER_DS);
@@ -96,14 +95,14 @@ static int aout_core_dump(struct coredump_params *cprm)
        if (dump.u_dsize != 0) {
                dump_start = START_DATA(dump);
                dump_size = dump.u_dsize << PAGE_SHIFT;
-               if (!dump_write(file, dump_start, dump_size))
+               if (!dump_emit(cprm, dump_start, dump_size))
                        goto end_coredump;
        }
 /* Now prepare to dump the stack area */
        if (dump.u_ssize != 0) {
                dump_start = START_STACK(dump);
                dump_size = dump.u_ssize << PAGE_SHIFT;
-               if (!dump_write(file, dump_start, dump_size))
+               if (!dump_emit(cprm, dump_start, dump_size))
                        goto end_coredump;
        }
 end_coredump:
index 100edcc5e3122323eb8f4087305e623c666eceb0..7d863a4de5a51aed378cbb4172015789f491f7f7 100644 (file)
@@ -1225,35 +1225,32 @@ static int notesize(struct memelfnote *en)
        return sz;
 }
 
-#define DUMP_WRITE(addr, nr, foffset)  \
-       do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
-
-static int alignfile(struct file *file, loff_t *foffset)
+static int alignfile(struct coredump_params *cprm)
 {
        static const char buf[4] = { 0, };
-       DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
-       return 1;
+       return dump_emit(cprm, buf, roundup(cprm->written, 4) - cprm->written);
 }
 
-static int writenote(struct memelfnote *men, struct file *file,
-                       loff_t *foffset)
+static int writenote(struct memelfnote *men, struct coredump_params *cprm)
 {
        struct elf_note en;
        en.n_namesz = strlen(men->name) + 1;
        en.n_descsz = men->datasz;
        en.n_type = men->type;
 
-       DUMP_WRITE(&en, sizeof(en), foffset);
-       DUMP_WRITE(men->name, en.n_namesz, foffset);
-       if (!alignfile(file, foffset))
+       if (!dump_emit(cprm, &en, sizeof(en)))
+               return 0;
+       if (!dump_emit(cprm, men->name, en.n_namesz))
+               return 0;
+       if (!alignfile(cprm))
                return 0;
-       DUMP_WRITE(men->data, men->datasz, foffset);
-       if (!alignfile(file, foffset))
+       if (!dump_emit(cprm, men->data, men->datasz))
+               return 0;
+       if (!alignfile(cprm))
                return 0;
 
        return 1;
 }
-#undef DUMP_WRITE
 
 static void fill_elf_header(struct elfhdr *elf, int segs,
                            u16 machine, u32 flags)
@@ -1702,7 +1699,7 @@ static size_t get_note_info_size(struct elf_note_info *info)
  * process-wide notes are interleaved after the first thread-specific note.
  */
 static int write_note_info(struct elf_note_info *info,
-                          struct file *file, loff_t *foffset)
+                          struct coredump_params *cprm)
 {
        bool first = 1;
        struct elf_thread_core_info *t = info->thread;
@@ -1710,21 +1707,21 @@ static int write_note_info(struct elf_note_info *info,
        do {
                int i;
 
-               if (!writenote(&t->notes[0], file, foffset))
+               if (!writenote(&t->notes[0], cprm))
                        return 0;
 
-               if (first && !writenote(&info->psinfo, file, foffset))
+               if (first && !writenote(&info->psinfo, cprm))
                        return 0;
-               if (first && !writenote(&info->signote, file, foffset))
+               if (first && !writenote(&info->signote, cprm))
                        return 0;
-               if (first && !writenote(&info->auxv, file, foffset))
+               if (first && !writenote(&info->auxv, cprm))
                        return 0;
-               if (first && !writenote(&info->files, file, foffset))
+               if (first && !writenote(&info->files, cprm))
                        return 0;
 
                for (i = 1; i < info->thread_notes; ++i)
                        if (t->notes[i].data &&
-                           !writenote(&t->notes[i], file, foffset))
+                           !writenote(&t->notes[i], cprm))
                                return 0;
 
                first = 0;
@@ -1930,13 +1927,13 @@ static size_t get_note_info_size(struct elf_note_info *info)
 }
 
 static int write_note_info(struct elf_note_info *info,
-                          struct file *file, loff_t *foffset)
+                          struct coredump_params *cprm)
 {
        int i;
        struct list_head *t;
 
        for (i = 0; i < info->numnote; i++)
-               if (!writenote(info->notes + i, file, foffset))
+               if (!writenote(info->notes + i, cprm))
                        return 0;
 
        /* write out the thread status notes section */
@@ -1945,7 +1942,7 @@ static int write_note_info(struct elf_note_info *info,
                                list_entry(t, struct elf_thread_status, list);
 
                for (i = 0; i < tmp->num_notes; i++)
-                       if (!writenote(&tmp->notes[i], file, foffset))
+                       if (!writenote(&tmp->notes[i], cprm))
                                return 0;
        }
 
@@ -2040,10 +2037,9 @@ static int elf_core_dump(struct coredump_params *cprm)
        int has_dumped = 0;
        mm_segment_t fs;
        int segs;
-       size_t size = 0;
        struct vm_area_struct *vma, *gate_vma;
        struct elfhdr *elf = NULL;
-       loff_t offset = 0, dataoff, foffset;
+       loff_t offset = 0, dataoff;
        struct elf_note_info info;
        struct elf_phdr *phdr4note = NULL;
        struct elf_shdr *shdr4extnum = NULL;
@@ -2099,7 +2095,6 @@ static int elf_core_dump(struct coredump_params *cprm)
 
        offset += sizeof(*elf);                         /* Elf header */
        offset += segs * sizeof(struct elf_phdr);       /* Program headers */
-       foffset = offset;
 
        /* Write notes phdr entry */
        {
@@ -2130,13 +2125,10 @@ static int elf_core_dump(struct coredump_params *cprm)
 
        offset = dataoff;
 
-       size += sizeof(*elf);
-       if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+       if (!dump_emit(cprm, elf, sizeof(*elf)))
                goto end_coredump;
 
-       size += sizeof(*phdr4note);
-       if (size > cprm->limit
-           || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+       if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
                goto end_coredump;
 
        /* Write program headers for segments dump */
@@ -2158,24 +2150,22 @@ static int elf_core_dump(struct coredump_params *cprm)
                        phdr.p_flags |= PF_X;
                phdr.p_align = ELF_EXEC_PAGESIZE;
 
-               size += sizeof(phdr);
-               if (size > cprm->limit
-                   || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+               if (!dump_emit(cprm, &phdr, sizeof(phdr)))
                        goto end_coredump;
        }
 
-       if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
+       if (!elf_core_write_extra_phdrs(cprm, offset))
                goto end_coredump;
 
        /* write out the notes section */
-       if (!write_note_info(&info, cprm->file, &foffset))
+       if (!write_note_info(&info, cprm))
                goto end_coredump;
 
-       if (elf_coredump_extra_notes_write(cprm->file, &foffset))
+       if (elf_coredump_extra_notes_write(cprm))
                goto end_coredump;
 
        /* Align to page */
-       if (!dump_seek(cprm->file, dataoff - foffset))
+       if (!dump_align(cprm, ELF_EXEC_PAGESIZE))
                goto end_coredump;
 
        for (vma = first_vma(current, gate_vma); vma != NULL;
@@ -2192,28 +2182,22 @@ static int elf_core_dump(struct coredump_params *cprm)
                        page = get_dump_page(addr);
                        if (page) {
                                void *kaddr = kmap(page);
-                               stop = ((size += PAGE_SIZE) > cprm->limit) ||
-                                       !dump_write(cprm->file, kaddr,
-                                                   PAGE_SIZE);
+                               stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
                                kunmap(page);
                                page_cache_release(page);
                        } else
-                               stop = !dump_seek(cprm->file, PAGE_SIZE);
+                               stop = !dump_skip(cprm, PAGE_SIZE);
                        if (stop)
                                goto end_coredump;
                }
        }
 
-       if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
+       if (!elf_core_write_extra_data(cprm))
                goto end_coredump;
 
-       if (e_phnum == PN_XNUM) {
-               size += sizeof(*shdr4extnum);
-               if (size > cprm->limit
-                   || !dump_write(cprm->file, shdr4extnum,
-                                  sizeof(*shdr4extnum)))
+       if (e_phnum == PN_XNUM)
+               if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
                        goto end_coredump;
-       }
 
 end_coredump:
        set_fs(fs);
index c166f325a1839b1ecf83ae0b068f13e8a26495c2..85c23c4969998a619849dc4c39ccf5ec19ce747e 100644 (file)
@@ -1207,7 +1207,7 @@ static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
        }
 
        /* If we may not read the contents, don't allow us to dump
-        * them either. "dump_write()" can't handle it anyway.
+        * them either. "dump_emit()" can't handle it anyway.
         */
        if (!(vma->vm_flags & VM_READ)) {
                kdcore("%08lx: %08lx: no (!read)", vma->vm_start, vma->vm_flags);
@@ -1267,35 +1267,26 @@ static int notesize(struct memelfnote *en)
 
 /* #define DEBUG */
 
-#define DUMP_WRITE(addr, nr, foffset)  \
-       do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
-
-static int alignfile(struct file *file, loff_t *foffset)
-{
-       static const char buf[4] = { 0, };
-       DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
-       return 1;
-}
-
-static int writenote(struct memelfnote *men, struct file *file,
-                       loff_t *foffset)
+static int writenote(struct memelfnote *men, struct coredump_params *cprm)
 {
        struct elf_note en;
        en.n_namesz = strlen(men->name) + 1;
        en.n_descsz = men->datasz;
        en.n_type = men->type;
 
-       DUMP_WRITE(&en, sizeof(en), foffset);
-       DUMP_WRITE(men->name, en.n_namesz, foffset);
-       if (!alignfile(file, foffset))
+       if (!dump_emit(cprm, &en, sizeof(en)))
                return 0;
-       DUMP_WRITE(men->data, men->datasz, foffset);
-       if (!alignfile(file, foffset))
+       if (!dump_emit(cprm, men->name, en.n_namesz))
+               return 0;
+       if (!dump_align(cprm, 4))
+               return 0;
+       if (!dump_emit(cprm, men->data, men->datasz))
+               return 0;
+       if (!dump_align(cprm, 4))
                return 0;
 
        return 1;
 }
-#undef DUMP_WRITE
 
 static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
 {
@@ -1501,8 +1492,7 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
  * dump the segments for an MMU process
  */
 #ifdef CONFIG_MMU
-static int elf_fdpic_dump_segments(struct file *file, size_t *size,
-                          unsigned long *limit, unsigned long mm_flags)
+static int elf_fdpic_dump_segments(struct coredump_params *cprm)
 {
        struct vm_area_struct *vma;
        int err = 0;
@@ -1510,7 +1500,7 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size,
        for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
                unsigned long addr;
 
-               if (!maydump(vma, mm_flags))
+               if (!maydump(vma, cprm->mm_flags))
                        continue;
 
                for (addr = vma->vm_start; addr < vma->vm_end;
@@ -1518,14 +1508,11 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size,
                        struct page *page = get_dump_page(addr);
                        if (page) {
                                void *kaddr = kmap(page);
-                               *size += PAGE_SIZE;
-                               if (*size > *limit)
-                                       err = -EFBIG;
-                               else if (!dump_write(file, kaddr, PAGE_SIZE))
+                               if (!dump_emit(cprm, kaddr, PAGE_SIZE))
                                        err = -EIO;
                                kunmap(page);
                                page_cache_release(page);
-                       } else if (!dump_seek(file, PAGE_SIZE))
+                       } else if (!dump_skip(cprm, PAGE_SIZE))
                                err = -EFBIG;
                        if (err)
                                goto out;
@@ -1540,19 +1527,15 @@ out:
  * dump the segments for a NOMMU process
  */
 #ifndef CONFIG_MMU
-static int elf_fdpic_dump_segments(struct file *file, size_t *size,
-                          unsigned long *limit, unsigned long mm_flags)
+static int elf_fdpic_dump_segments(struct coredump_params *cprm)
 {
        struct vm_area_struct *vma;
 
        for (vma = current->mm->mmap; vma; vma = vma->vm_next) {
-               if (!maydump(vma, mm_flags))
+               if (!maydump(vma, cprm->mm_flags))
                        continue;
 
-               if ((*size += PAGE_SIZE) > *limit)
-                       return -EFBIG;
-
-               if (!dump_write(file, (void *) vma->vm_start,
+               if (!dump_emit(cprm, (void *) vma->vm_start,
                                vma->vm_end - vma->vm_start))
                        return -EIO;
        }
@@ -1585,7 +1568,6 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
        int has_dumped = 0;
        mm_segment_t fs;
        int segs;
-       size_t size = 0;
        int i;
        struct vm_area_struct *vma;
        struct elfhdr *elf = NULL;
@@ -1755,13 +1737,10 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
 
        offset = dataoff;
 
-       size += sizeof(*elf);
-       if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
+       if (!dump_emit(cprm, elf, sizeof(*elf)))
                goto end_coredump;
 
-       size += sizeof(*phdr4note);
-       if (size > cprm->limit
-           || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
+       if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
                goto end_coredump;
 
        /* write program headers for segments dump */
@@ -1785,18 +1764,16 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
                        phdr.p_flags |= PF_X;
                phdr.p_align = ELF_EXEC_PAGESIZE;
 
-               size += sizeof(phdr);
-               if (size > cprm->limit
-                   || !dump_write(cprm->file, &phdr, sizeof(phdr)))
+               if (!dump_emit(cprm, &phdr, sizeof(phdr)))
                        goto end_coredump;
        }
 
-       if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
+       if (!elf_core_write_extra_phdrs(cprm, offset))
                goto end_coredump;
 
        /* write out the notes section */
        for (i = 0; i < numnote; i++)
-               if (!writenote(notes + i, cprm->file, &foffset))
+               if (!writenote(notes + i, cprm))
                        goto end_coredump;
 
        /* write out the thread status notes section */
@@ -1805,33 +1782,29 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
                                list_entry(t, struct elf_thread_status, list);
 
                for (i = 0; i < tmp->num_notes; i++)
-                       if (!writenote(&tmp->notes[i], cprm->file, &foffset))
+                       if (!writenote(&tmp->notes[i], cprm))
                                goto end_coredump;
        }
 
-       if (!dump_seek(cprm->file, dataoff - foffset))
+       if (!dump_align(cprm, ELF_EXEC_PAGESIZE))
                goto end_coredump;
 
-       if (elf_fdpic_dump_segments(cprm->file, &size, &cprm->limit,
-                                   cprm->mm_flags) < 0)
+       if (elf_fdpic_dump_segments(cprm) < 0)
                goto end_coredump;
 
-       if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
+       if (!elf_core_write_extra_data(cprm))
                goto end_coredump;
 
        if (e_phnum == PN_XNUM) {
-               size += sizeof(*shdr4extnum);
-               if (size > cprm->limit
-                   || !dump_write(cprm->file, shdr4extnum,
-                                  sizeof(*shdr4extnum)))
+               if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
                        goto end_coredump;
        }
 
-       if (cprm->file->f_pos != offset) {
+       if (cprm->written != offset) {
                /* Sanity check */
                printk(KERN_WARNING
-                      "elf_core_dump: file->f_pos (%lld) != offset (%lld)\n",
-                      cprm->file->f_pos, offset);
+                      "elf_core_dump: cprm->written (%lld) != offset (%lld)\n",
+                      (long long)cprm->written, offset);
        }
 
 end_coredump:
index 94bbc04dba77053bb47d3d8b793a3a8218f0a0d2..8e0348f6e5bdd2173b5bc18d71f9692bca944f1f 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1946,7 +1946,7 @@ int bio_associate_current(struct bio *bio)
 
        /* associate blkcg if exists */
        rcu_read_lock();
-       css = task_subsys_state(current, blkio_subsys_id);
+       css = task_css(current, blkio_subsys_id);
        if (css && css_tryget(css))
                bio->bi_css = css;
        rcu_read_unlock();
index c7bda5cd3da74daa998d1fda16c3e78240d7e872..89d8ec543e65a5e5987bdd95e8af9bb55ab5c9f7 100644 (file)
@@ -165,14 +165,14 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
 }
 
 static ssize_t
-blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 
-       return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
-                                   nr_segs, blkdev_get_block, NULL, NULL, 0);
+       return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
+                                   offset, blkdev_get_block, NULL, NULL, 0);
 }
 
 int __sync_blockdev(struct block_device *bdev, int wait)
@@ -1508,8 +1508,7 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  * Does not take i_mutex for the write and thus is not for general purpose
  * use.
  */
-ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                        unsigned long nr_segs, loff_t pos)
+ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct blk_plug plug;
@@ -1518,7 +1517,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
        BUG_ON(iocb->ki_pos != pos);
 
        blk_start_plug(&plug);
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
        if (ret > 0 || ret == -EIOCBQUEUED) {
                ssize_t err;
 
@@ -1529,10 +1528,10 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
        blk_finish_plug(&plug);
        return ret;
 }
-EXPORT_SYMBOL_GPL(blkdev_aio_write);
+EXPORT_SYMBOL_GPL(blkdev_write_iter);
 
-static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                        unsigned long nr_segs, loff_t pos)
+static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *iter,
+                        loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *bd_inode = file->f_mapping->host;
@@ -1543,8 +1542,8 @@ static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
 
        size -= pos;
        if (size < iocb->ki_left)
-               nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size);
-       return generic_file_aio_read(iocb, iov, nr_segs, pos);
+               iov_iter_shorten(iter, size);
+       return generic_file_read_iter(iocb, iter, pos);
 }
 
 /*
@@ -1578,8 +1577,8 @@ const struct file_operations def_blk_fops = {
        .llseek         = block_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = blkdev_aio_read,
-       .aio_write      = blkdev_aio_write,
+       .read_iter      = blkdev_read_iter,
+       .write_iter     = blkdev_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = blkdev_fsync,
        .unlocked_ioctl = block_ioctl,
index 8e686a427ce2e8e5e824419321f1f8f2dfe19acb..5625cfcb5b7e45eb06be38f8a47ae094d0ce7fd6 100644 (file)
@@ -453,7 +453,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
                write_bytes -= copied;
                total_copied += copied;
 
-               /* Return to btrfs_file_aio_write to fault page */
+               /* Return to btrfs_file_write_iter to fault page */
                if (unlikely(copied == 0))
                        break;
 
@@ -1566,27 +1566,23 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 }
 
 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs, loff_t pos,
-                                   loff_t *ppos, size_t count, size_t ocount)
+                                    struct iov_iter *iter, loff_t pos,
+                                   loff_t *ppos, size_t count)
 {
        struct file *file = iocb->ki_filp;
-       struct iov_iter i;
        ssize_t written;
        ssize_t written_buffered;
        loff_t endbyte;
        int err;
 
-       written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
-                                           count, ocount);
+       written = generic_file_direct_write_iter(iocb, iter, pos, ppos, count);
 
        if (written < 0 || written == count)
                return written;
 
        pos += written;
        count -= written;
-       iov_iter_init(&i, iov, nr_segs, count, written);
-       written_buffered = __btrfs_buffered_write(file, &i, pos);
+       written_buffered = __btrfs_buffered_write(file, iter, pos);
        if (written_buffered < 0) {
                err = written_buffered;
                goto out;
@@ -1621,9 +1617,8 @@ static void update_time_for_write(struct inode *inode)
                inode_inc_iversion(inode);
 }
 
-static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs, loff_t pos)
+static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+                                    struct iov_iter *iter, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
@@ -1632,17 +1627,12 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        u64 start_pos;
        ssize_t num_written = 0;
        ssize_t err = 0;
-       size_t count, ocount;
+       size_t count;
        bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
 
        mutex_lock(&inode->i_mutex);
 
-       err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
-       if (err) {
-               mutex_unlock(&inode->i_mutex);
-               goto out;
-       }
-       count = ocount;
+       count = iov_iter_count(iter);
 
        current->backing_dev_info = inode->i_mapping->backing_dev_info;
        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
@@ -1695,14 +1685,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                atomic_inc(&BTRFS_I(inode)->sync_writers);
 
        if (unlikely(file->f_flags & O_DIRECT)) {
-               num_written = __btrfs_direct_write(iocb, iov, nr_segs,
-                                                  pos, ppos, count, ocount);
+               num_written = __btrfs_direct_write(iocb, iter, pos, ppos,
+                                                  count);
        } else {
-               struct iov_iter i;
-
-               iov_iter_init(&i, iov, nr_segs, count, num_written);
-
-               num_written = __btrfs_buffered_write(file, &i, pos);
+               num_written = __btrfs_buffered_write(file, iter, pos);
                if (num_written > 0)
                        *ppos = pos + num_written;
        }
@@ -2561,9 +2547,9 @@ const struct file_operations btrfs_file_operations = {
        .llseek         = btrfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
        .splice_read    = generic_file_splice_read,
-       .aio_write      = btrfs_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = btrfs_file_write_iter,
        .mmap           = btrfs_file_mmap,
        .open           = generic_file_open,
        .release        = btrfs_release_file,
index 021694c081814828272ff03a8b9e1f64732a46cc..8a56501572e63818e63725dc392dd3adb29570a5 100644 (file)
@@ -7216,8 +7216,7 @@ free_ordered:
 }
 
 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        int seg;
        int i;
@@ -7231,35 +7230,50 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                goto out;
 
        /* Check the memory alignment.  Blocks cannot straddle pages */
-       for (seg = 0; seg < nr_segs; seg++) {
-               addr = (unsigned long)iov[seg].iov_base;
-               size = iov[seg].iov_len;
-               end += size;
-               if ((addr & blocksize_mask) || (size & blocksize_mask))
-                       goto out;
+       if (iov_iter_has_iovec(iter)) {
+               const struct iovec *iov = iov_iter_iovec(iter);
+
+               for (seg = 0; seg < iter->nr_segs; seg++) {
+                       addr = (unsigned long)iov[seg].iov_base;
+                       size = iov[seg].iov_len;
+                       end += size;
+                       if ((addr & blocksize_mask) || (size & blocksize_mask))
+                               goto out;
 
-               /* If this is a write we don't need to check anymore */
-               if (rw & WRITE)
-                       continue;
+                       /* If this is a write we don't need to check anymore */
+                       if (rw & WRITE)
+                               continue;
 
-               /*
-                * Check to make sure we don't have duplicate iov_base's in this
-                * iovec, if so return EINVAL, otherwise we'll get csum errors
-                * when reading back.
-                */
-               for (i = seg + 1; i < nr_segs; i++) {
-                       if (iov[seg].iov_base == iov[i].iov_base)
+                       /*
+                       * Check to make sure we don't have duplicate iov_base's
+                       * in this iovec, if so return EINVAL, otherwise we'll
+                       * get csum errors when reading back.
+                       */
+                       for (i = seg + 1; i < iter->nr_segs; i++) {
+                               if (iov[seg].iov_base == iov[i].iov_base)
+                                       goto out;
+                       }
+               }
+       } else if (iov_iter_has_bvec(iter)) {
+               struct bio_vec *bvec = iov_iter_bvec(iter);
+
+               for (seg = 0; seg < iter->nr_segs; seg++) {
+                       addr = (unsigned long)bvec[seg].bv_offset;
+                       size = bvec[seg].bv_len;
+                       end += size;
+                       if ((addr & blocksize_mask) || (size & blocksize_mask))
                                goto out;
                }
-       }
+       } else
+               BUG();
+
        retval = 0;
 out:
        return retval;
 }
 
 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -7269,8 +7283,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
        bool relock = false;
        ssize_t ret;
 
-       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
-                           offset, nr_segs))
+       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
                return 0;
 
        atomic_inc(&inode->i_dio_count);
@@ -7282,7 +7295,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
         * call btrfs_wait_ordered_range to make absolutely sure that any
         * outstanding dirty pages are on disk.
         */
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        btrfs_wait_ordered_range(inode, offset, count);
 
        if (rw & WRITE) {
@@ -7307,7 +7320,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
 
        ret = __blockdev_direct_IO(rw, iocb, inode,
                        BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
-                       iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
+                       iter, offset, btrfs_get_blocks_direct, NULL,
                        btrfs_submit_direct, flags);
        if (rw & WRITE) {
                if (ret < 0 && ret != -EIOCBQUEUED)
index d3f3b43cae0bdef23c889f6c5939c49aea13eda6..2e14fd89a8b46e80622168ef30b9ac206b4b6a04 100644 (file)
@@ -219,7 +219,7 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
        len = PAGE_ALIGN(len);
 
        if (p->buf == p->inline_buf) {
-               tmp_buf = kmalloc(len, GFP_NOFS);
+               tmp_buf = kmalloc(len, GFP_NOFS | __GFP_NOWARN);
                if (!tmp_buf) {
                        tmp_buf = vmalloc(len);
                        if (!tmp_buf)
index 5318a3b704f6d6f908520a9c1fc18b4dadc9a509..2c1a9004be7a4703502788a4e8a3a28a3e549061 100644 (file)
@@ -150,10 +150,6 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
        struct ceph_inode_info *ci;
        struct ceph_snap_context *snapc = page_snap_context(page);
 
-       BUG_ON(!PageLocked(page));
-       BUG_ON(!PagePrivate(page));
-       BUG_ON(!page->mapping);
-
        inode = page->mapping->host;
 
        /*
@@ -1169,8 +1165,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
  * never get called.
  */
 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
-                             const struct iovec *iov,
-                             loff_t pos, unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t pos)
 {
        WARN_ON(1);
        return -EINVAL;
index 25442b40c25a71761596e071612140f01279fb69..430121a795bdcb8e463ba2a03f7e7b1903bbdd14 100644 (file)
@@ -2333,6 +2333,38 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                iput(inode);
 }
 
+/*
+ * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
+ */
+static void invalidate_aliases(struct inode *inode)
+{
+       struct dentry *dn, *prev = NULL;
+
+       dout("invalidate_aliases inode %p\n", inode);
+       d_prune_aliases(inode);
+       /*
+        * For non-directory inode, d_find_alias() only returns
+        * connected dentry. After calling d_delete(), the dentry
+        * become disconnected.
+        *
+        * For directory inode, d_find_alias() only can return
+        * disconnected dentry. But directory inode should have
+        * one alias at most.
+        */
+       while ((dn = d_find_alias(inode))) {
+               if (dn == prev) {
+                       dput(dn);
+                       break;
+               }
+               d_delete(dn);
+               if (prev)
+                       dput(prev);
+               prev = dn;
+       }
+       if (prev)
+               dput(prev);
+}
+
 /*
  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
  * actually be a revocation if it specifies a smaller cap set.)
@@ -2363,6 +2395,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        int writeback = 0;
        int revoked_rdcache = 0;
        int queue_invalidate = 0;
+       int deleted_inode = 0;
 
        dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
             inode, cap, mds, seq, ceph_cap_string(newcaps));
@@ -2407,8 +2440,12 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
                     from_kgid(&init_user_ns, inode->i_gid));
        }
 
-       if ((issued & CEPH_CAP_LINK_EXCL) == 0)
+       if ((issued & CEPH_CAP_LINK_EXCL) == 0) {
                set_nlink(inode, le32_to_cpu(grant->nlink));
+               if (inode->i_nlink == 0 &&
+                   (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
+                       deleted_inode = 1;
+       }
 
        if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
                int len = le32_to_cpu(grant->xattr_len);
@@ -2517,6 +2554,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
                ceph_queue_writeback(inode);
        if (queue_invalidate)
                ceph_queue_invalidate(inode);
+       if (deleted_inode)
+               invalidate_aliases(inode);
        if (wake)
                wake_up_all(&ci->i_cap_wq);
 
index a40ceda47a3218ee53c2167d8844899c5de3e9cf..868b61d56cac77f3a8328d5ba4851ec7947fe827 100644 (file)
@@ -793,6 +793,8 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
        req->r_locked_dir = dir;
        req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+       /* release LINK_SHARED on source inode (mds will lock it) */
+       req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
        err = ceph_mdsc_do_request(mdsc, dir, req);
        if (err) {
                d_drop(dentry);
index 2ddf061c1c4af730885365b07dcb9388d7af98f9..bc0735498d293e9c45a0bada42a9443e0f36e382 100644 (file)
@@ -313,9 +313,9 @@ static int striped_read(struct inode *inode,
 {
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       u64 pos, this_len;
+       u64 pos, this_len, left;
        int io_align, page_align;
-       int left, pages_left;
+       int pages_left;
        int read;
        struct page **page_pos;
        int ret;
@@ -346,7 +346,7 @@ more:
                ret = 0;
        hit_stripe = this_len < left;
        was_short = ret >= 0 && ret < this_len;
-       dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
+       dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
             ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 
        if (ret > 0) {
@@ -378,7 +378,7 @@ more:
                        if (pos + left > inode->i_size)
                                left = inode->i_size - pos;
 
-                       dout("zero tail %d\n", left);
+                       dout("zero tail %llu\n", left);
                        ceph_zero_page_vector_range(page_align + read, left,
                                                    pages);
                        read += left;
@@ -659,7 +659,6 @@ again:
 
        if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
            (iocb->ki_filp->f_flags & O_DIRECT) ||
-           (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
            (fi->flags & CEPH_F_SYNC))
                /* hmm, this isn't really async... */
                ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
@@ -711,13 +710,11 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
                &ceph_sb_to_client(inode->i_sb)->client->osdc;
        ssize_t count, written = 0;
        int err, want, got;
-       bool hold_mutex;
 
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EROFS;
 
        mutex_lock(&inode->i_mutex);
-       hold_mutex = true;
 
        err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
        if (err)
@@ -763,18 +760,24 @@ retry_snap:
 
        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
            (iocb->ki_filp->f_flags & O_DIRECT) ||
-           (inode->i_sb->s_flags & MS_SYNCHRONOUS) ||
            (fi->flags & CEPH_F_SYNC)) {
                mutex_unlock(&inode->i_mutex);
                written = ceph_sync_write(file, iov->iov_base, count,
                                          pos, &iocb->ki_pos);
+               if (written == -EOLDSNAPC) {
+                       dout("aio_write %p %llx.%llx %llu~%u"
+                               "got EOLDSNAPC, retrying\n",
+                               inode, ceph_vinop(inode),
+                               pos, (unsigned)iov->iov_len);
+                       mutex_lock(&inode->i_mutex);
+                       goto retry_snap;
+               }
        } else {
                written = generic_file_buffered_write(iocb, iov, nr_segs,
                                                      pos, &iocb->ki_pos,
                                                      count, 0);
                mutex_unlock(&inode->i_mutex);
        }
-       hold_mutex = false;
 
        if (written >= 0) {
                int dirty;
@@ -798,18 +801,12 @@ retry_snap:
                        written = err;
        }
 
-       if (written == -EOLDSNAPC) {
-               dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
-                    inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
-               mutex_lock(&inode->i_mutex);
-               hold_mutex = true;
-               goto retry_snap;
-       }
+       goto out_unlocked;
+
 out:
-       if (hold_mutex)
-               mutex_unlock(&inode->i_mutex);
+       mutex_unlock(&inode->i_mutex);
+out_unlocked:
        current->backing_dev_info = NULL;
-
        return written ? written : err;
 }
 
index f3a2abf28a77df362faf5c38dc471a64dcbfdffc..98b6e50bde04dfe6fc60cf381d068055b2a07db3 100644 (file)
@@ -61,6 +61,14 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
        return inode;
 }
 
+struct inode *ceph_lookup_inode(struct super_block *sb, struct ceph_vino vino)
+{
+       struct inode *inode;
+       ino_t t = ceph_vino_to_ino(vino);
+       inode = ilookup5_nowait(sb, t, ceph_ino_compare, &vino);
+       return inode;
+}
+
 /*
  * get/constuct snapdir inode for a given directory
  */
@@ -1465,7 +1473,14 @@ static void ceph_vmtruncate_work(struct work_struct *work)
        struct inode *inode = &ci->vfs_inode;
 
        dout("vmtruncate_work %p\n", inode);
-       mutex_lock(&inode->i_mutex);
+       if (!mutex_trylock(&inode->i_mutex)) {
+               /*
+                * the i_mutex can be hold by a writer who is waiting for
+                * caps. wake up waiters, they will do pending vmtruncate.
+                */
+               wake_up_all(&ci->i_cap_wq);
+               mutex_lock(&inode->i_mutex);
+       }
        __ceph_do_pending_vmtruncate(inode);
        mutex_unlock(&inode->i_mutex);
        iput(inode);
index e0b4ef31d3c870c9e73fecad303e9f9957542385..669622fd1ae3d52af418cc4c283a5f22513bca73 100644 (file)
@@ -196,8 +196,10 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
        r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
                                          &dl.object_no, &dl.object_offset,
                                          &olen);
-       if (r < 0)
+       if (r < 0) {
+               up_read(&osdc->map_sem);
                return -EIO;
+       }
        dl.file_offset -= dl.object_offset;
        dl.object_size = ceph_file_layout_object_size(ci->i_layout);
        dl.block_size = ceph_file_layout_su(ci->i_layout);
@@ -209,8 +211,12 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
        snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
                 ceph_ino(inode), dl.object_no);
 
-       ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap,
-               ceph_file_layout_pg_pool(ci->i_layout));
+       r = ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap,
+                               ceph_file_layout_pg_pool(ci->i_layout));
+       if (r < 0) {
+               up_read(&osdc->map_sem);
+               return r;
+       }
 
        dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
        if (dl.osd >= 0) {
index 187bf214444da8c8fc9c6a8603b699a258f773f8..603786b564bed08e2591761ae79aa5ef0c926849 100644 (file)
@@ -414,6 +414,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
 {
        struct ceph_mds_session *s;
 
+       if (mds >= mdsc->mdsmap->m_max_mds)
+               return ERR_PTR(-EINVAL);
+
        s = kzalloc(sizeof(*s), GFP_NOFS);
        if (!s)
                return ERR_PTR(-ENOMEM);
@@ -1028,6 +1031,37 @@ static void remove_session_caps(struct ceph_mds_session *session)
 {
        dout("remove_session_caps on %p\n", session);
        iterate_session_caps(session, remove_session_caps_cb, NULL);
+
+       spin_lock(&session->s_cap_lock);
+       if (session->s_nr_caps > 0) {
+               struct super_block *sb = session->s_mdsc->fsc->sb;
+               struct inode *inode;
+               struct ceph_cap *cap, *prev = NULL;
+               struct ceph_vino vino;
+               /*
+                * iterate_session_caps() skips inodes that are being
+                * deleted, we need to wait until deletions are complete.
+                * __wait_on_freeing_inode() is designed for the job,
+                * but it is not exported, so use lookup inode function
+                * to access it.
+                */
+               while (!list_empty(&session->s_caps)) {
+                       cap = list_entry(session->s_caps.next,
+                                        struct ceph_cap, session_caps);
+                       if (cap == prev)
+                               break;
+                       prev = cap;
+                       vino = cap->ci->i_vino;
+                       spin_unlock(&session->s_cap_lock);
+
+                       inode = ceph_lookup_inode(sb, vino);
+                       iput(inode);
+
+                       spin_lock(&session->s_cap_lock);
+               }
+       }
+       spin_unlock(&session->s_cap_lock);
+
        BUG_ON(session->s_nr_caps > 0);
        BUG_ON(!list_empty(&session->s_cap_flushing));
        cleanup_cap_releases(session);
index cbded572345e77a107e539aa4e433d6f6f7964c0..afcd62a68916e358676d4fa4e7b7abf3c94ecb43 100644 (file)
@@ -677,6 +677,8 @@ extern void ceph_destroy_inode(struct inode *inode);
 
 extern struct inode *ceph_get_inode(struct super_block *sb,
                                    struct ceph_vino vino);
+extern struct inode *ceph_lookup_inode(struct super_block *sb,
+                                      struct ceph_vino vino);
 extern struct inode *ceph_get_snapdir(struct inode *parent);
 extern int ceph_fill_file_size(struct inode *inode, int issued,
                               u32 truncate_seq, u64 truncate_size, u64 size);
index ea940b1db77bdb1330b4efc795ad08b023e9acd2..ca4a67a0bb1eb9d8deaa25717fd5b5ac77e5c2c7 100644 (file)
@@ -39,6 +39,7 @@ Shaggy (Dave Kleikamp) for innumerable small fs suggestions and some good cleanu
 Gunter Kukkukk (testing and suggestions for support of old servers)
 Igor Mammedov (DFS support)
 Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
+Scott Lovenberg
 
 Test case and Bug Report contributors
 -------------------------------------
index 85ea98d139fc5643b0606b67959bb1f320037d80..d70e551f0bfbfef801f1daea11ad6a4e9f81354f 100644 (file)
@@ -357,6 +357,18 @@ cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
                seq_printf(s, "loose");
 }
 
+static void
+cifs_show_nls(struct seq_file *s, struct nls_table *cur)
+{
+       struct nls_table *def;
+
+       /* Display iocharset= option if it's not default charset */
+       def = load_nls_default();
+       if (def != cur)
+               seq_printf(s, ",iocharset=%s", cur->charset);
+       unload_nls(def);
+}
+
 /*
  * cifs_show_options() is for displaying mount options in /proc/mounts.
  * Not all settable options are displayed but most of the important
@@ -418,6 +430,9 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
                                           cifs_sb->mnt_file_mode,
                                           cifs_sb->mnt_dir_mode);
+
+       cifs_show_nls(s, cifs_sb->local_nls);
+
        if (tcon->seal)
                seq_printf(s, ",seal");
        if (tcon->nocase)
index 52ca861ed35e4fe3fbf78ec387ff401fb0e8fb94..fb186f7bae492881cc248ce55d7366c33b92936c 100644 (file)
@@ -28,6 +28,7 @@
 #include "cifsacl.h"
 #include <crypto/internal/hash.h>
 #include <linux/scatterlist.h>
+#include <uapi/linux/cifs/cifs_mount.h>
 #ifdef CONFIG_CIFS_SMB2
 #include "smb2pdu.h"
 #endif
 #define MAX_SES_INFO 2
 #define MAX_TCON_INFO 4
 
-#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
-#define MAX_SERVER_SIZE 15
-#define MAX_SHARE_SIZE 80
-#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
-#define MAX_USERNAME_SIZE 256  /* reasonable maximum for current servers */
-#define MAX_PASSWORD_SIZE 512  /* max for windows seems to be 256 wide chars */
+#define MAX_TREE_SIZE (2 + CIFS_NI_MAXHOST + 1 + CIFS_MAX_SHARE_LEN + 1)
 
 #define CIFS_MIN_RCV_POOL 4
 
@@ -308,6 +304,9 @@ struct smb_version_operations {
        int (*create_hardlink)(const unsigned int, struct cifs_tcon *,
                               const char *, const char *,
                               struct cifs_sb_info *);
+       /* query symlink target */
+       int (*query_symlink)(const unsigned int, struct cifs_tcon *,
+                            const char *, char **, struct cifs_sb_info *);
        /* open a file for non-posix mounts */
        int (*open)(const unsigned int, struct cifs_open_parms *,
                    __u32 *, FILE_ALL_INFO *);
index 11ca24a8e054ef11472ba634472e6e8622008305..948676db8e2ea5f65d276535caa087c86792b18c 100644 (file)
@@ -1495,11 +1495,12 @@ struct reparse_data {
        __u32   ReparseTag;
        __u16   ReparseDataLength;
        __u16   Reserved;
-       __u16   AltNameOffset;
-       __u16   AltNameLen;
-       __u16   TargetNameOffset;
-       __u16   TargetNameLen;
-       char    LinkNamesBuf[1];
+       __u16   SubstituteNameOffset;
+       __u16   SubstituteNameLength;
+       __u16   PrintNameOffset;
+       __u16   PrintNameLength;
+       __u32   Flags;
+       char    PathBuffer[0];
 } __attribute__((packed));
 
 struct cifs_quota_data {
index b29a012bed33a24b6ba45b17f95c70303e3e3014..5b05eb082a415ab0708179a644dc2c3733d1d0fd 100644 (file)
@@ -357,13 +357,9 @@ extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
                        struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **syminfo,
                        const struct nls_table *nls_codepage);
-#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
-extern int CIFSSMBQueryReparseLinkInfo(const unsigned int xid,
-                       struct cifs_tcon *tcon,
-                       const unsigned char *searchName,
-                       char *symlinkinfo, const int buflen, __u16 fid,
-                       const struct nls_table *nls_codepage);
-#endif /* temporarily unused until cifs_symlink fixed */
+extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
+                              __u16 fid, char **symlinkinfo,
+                              const struct nls_table *nls_codepage);
 extern int CIFSSMBOpen(const unsigned int xid, struct cifs_tcon *tcon,
                        const char *fileName, const int disposition,
                        const int access_flags, const int omode,
index a89c4cb4e6cf64e8cbd92fe6f7ceb6b7e941eca3..a3d74fea16233ef9d4c32a0179b4b96cccb8fa2b 100644 (file)
@@ -3067,7 +3067,6 @@ querySymLinkRetry:
        return rc;
 }
 
-#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
 /*
  *     Recent Windows versions now create symlinks more frequently
  *     and they use the "reparse point" mechanism below.  We can of course
@@ -3079,18 +3078,22 @@ querySymLinkRetry:
  *     it is not compiled in by default until callers fixed up and more tested.
  */
 int
-CIFSSMBQueryReparseLinkInfo(const unsigned int xid, struct cifs_tcon *tcon,
-                       const unsigned char *searchName,
-                       char *symlinkinfo, const int buflen, __u16 fid,
-                       const struct nls_table *nls_codepage)
+CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
+                   __u16 fid, char **symlinkinfo,
+                   const struct nls_table *nls_codepage)
 {
        int rc = 0;
        int bytes_returned;
        struct smb_com_transaction_ioctl_req *pSMB;
        struct smb_com_transaction_ioctl_rsp *pSMBr;
+       bool is_unicode;
+       unsigned int sub_len;
+       char *sub_start;
+       struct reparse_data *reparse_buf;
+       __u32 data_offset, data_count;
+       char *end_of_smb;
 
-       cifs_dbg(FYI, "In Windows reparse style QueryLink for path %s\n",
-                searchName);
+       cifs_dbg(FYI, "In Windows reparse style QueryLink for fid %u\n", fid);
        rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
                      (void **) &pSMBr);
        if (rc)
@@ -3119,66 +3122,55 @@ CIFSSMBQueryReparseLinkInfo(const unsigned int xid, struct cifs_tcon *tcon,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc) {
                cifs_dbg(FYI, "Send error in QueryReparseLinkInfo = %d\n", rc);
-       } else {                /* decode response */
-               __u32 data_offset = le32_to_cpu(pSMBr->DataOffset);
-               __u32 data_count = le32_to_cpu(pSMBr->DataCount);
-               if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) {
-                       /* BB also check enough total bytes returned */
-                       rc = -EIO;      /* bad smb */
-                       goto qreparse_out;
-               }
-               if (data_count && (data_count < 2048)) {
-                       char *end_of_smb = 2 /* sizeof byte count */ +
-                              get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
-
-                       struct reparse_data *reparse_buf =
-                                               (struct reparse_data *)
-                                               ((char *)&pSMBr->hdr.Protocol
-                                                                + data_offset);
-                       if ((char *)reparse_buf >= end_of_smb) {
-                               rc = -EIO;
-                               goto qreparse_out;
-                       }
-                       if ((reparse_buf->LinkNamesBuf +
-                               reparse_buf->TargetNameOffset +
-                               reparse_buf->TargetNameLen) > end_of_smb) {
-                               cifs_dbg(FYI, "reparse buf beyond SMB\n");
-                               rc = -EIO;
-                               goto qreparse_out;
-                       }
+               goto qreparse_out;
+       }
 
-                       if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
-                               cifs_from_ucs2(symlinkinfo, (__le16 *)
-                                               (reparse_buf->LinkNamesBuf +
-                                               reparse_buf->TargetNameOffset),
-                                               buflen,
-                                               reparse_buf->TargetNameLen,
-                                               nls_codepage, 0);
-                       } else { /* ASCII names */
-                               strncpy(symlinkinfo,
-                                       reparse_buf->LinkNamesBuf +
-                                       reparse_buf->TargetNameOffset,
-                                       min_t(const int, buflen,
-                                          reparse_buf->TargetNameLen));
-                       }
-               } else {
-                       rc = -EIO;
-                       cifs_dbg(FYI, "Invalid return data count on get reparse info ioctl\n");
-               }
-               symlinkinfo[buflen] = 0; /* just in case so the caller
-                                       does not go off the end of the buffer */
-               cifs_dbg(FYI, "readlink result - %s\n", symlinkinfo);
+       data_offset = le32_to_cpu(pSMBr->DataOffset);
+       data_count = le32_to_cpu(pSMBr->DataCount);
+       if (get_bcc(&pSMBr->hdr) < 2 || data_offset > 512) {
+               /* BB also check enough total bytes returned */
+               rc = -EIO;      /* bad smb */
+               goto qreparse_out;
+       }
+       if (!data_count || (data_count > 2048)) {
+               rc = -EIO;
+               cifs_dbg(FYI, "Invalid return data count on get reparse info ioctl\n");
+               goto qreparse_out;
+       }
+       end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
+       reparse_buf = (struct reparse_data *)
+                               ((char *)&pSMBr->hdr.Protocol + data_offset);
+       if ((char *)reparse_buf >= end_of_smb) {
+               rc = -EIO;
+               goto qreparse_out;
        }
+       if ((reparse_buf->PathBuffer + reparse_buf->PrintNameOffset +
+                               reparse_buf->PrintNameLength) > end_of_smb) {
+               cifs_dbg(FYI, "reparse buf beyond SMB\n");
+               rc = -EIO;
+               goto qreparse_out;
+       }
+       sub_start = reparse_buf->SubstituteNameOffset + reparse_buf->PathBuffer;
+       sub_len = reparse_buf->SubstituteNameLength;
+       if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
+               is_unicode = true;
+       else
+               is_unicode = false;
 
+       /* BB FIXME investigate remapping reserved chars here */
+       *symlinkinfo = cifs_strndup_from_utf16(sub_start, sub_len, is_unicode,
+                                              nls_codepage);
+       if (!*symlinkinfo)
+               rc = -ENOMEM;
 qreparse_out:
        cifs_buf_release(pSMB);
 
-       /* Note: On -EAGAIN error only caller can retry on handle based calls
-               since file handle passed in no longer valid */
-
+       /*
+        * Note: On -EAGAIN error only caller can retry on handle based calls
+        * since file handle passed in no longer valid.
+        */
        return rc;
 }
-#endif /* CIFS_SYMLINK_EXPERIMENTAL */ /* BB temporarily unused */
 
 #ifdef CONFIG_CIFS_POSIX
 
index d67c550c49806254da76ca6f7dd32d29144c3c16..20ac9d4ca2af1842eb2da4b02c03ddce46935f53 100644 (file)
@@ -1575,8 +1575,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        if (string == NULL)
                                goto out_nomem;
 
-                       if (strnlen(string, MAX_USERNAME_SIZE) >
-                                                       MAX_USERNAME_SIZE) {
+                       if (strnlen(string, CIFS_MAX_USERNAME_LEN) >
+                                                       CIFS_MAX_USERNAME_LEN) {
                                printk(KERN_WARNING "CIFS: username too long\n");
                                goto cifs_parse_mount_err;
                        }
@@ -2221,13 +2221,13 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
                /* anything else takes username/password */
                if (strncmp(ses->user_name,
                            vol->username ? vol->username : "",
-                           MAX_USERNAME_SIZE))
+                           CIFS_MAX_USERNAME_LEN))
                        return 0;
                if (strlen(vol->username) != 0 &&
                    ses->password != NULL &&
                    strncmp(ses->password,
                            vol->password ? vol->password : "",
-                           MAX_PASSWORD_SIZE))
+                           CIFS_MAX_PASSWORD_LEN))
                        return 0;
        }
        return 1;
@@ -2352,7 +2352,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
        }
 
        len = delim - payload;
-       if (len > MAX_USERNAME_SIZE || len <= 0) {
+       if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
                cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
                         len);
                rc = -EINVAL;
@@ -2369,7 +2369,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
        cifs_dbg(FYI, "%s: username=%s\n", __func__, vol->username);
 
        len = key->datalen - (len + 1);
-       if (len > MAX_PASSWORD_SIZE || len <= 0) {
+       if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
                cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
                rc = -EINVAL;
                kfree(vol->username);
index 7e36ae34e9479b2614d61c919d393291593ef28f..cdb9f608ec4e1d736c960d25fbc6bc343916cb1d 100644 (file)
@@ -2734,8 +2734,8 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
                /* go while there's data to be copied and no errors */
                if (copy && !rc) {
                        pdata = kmap(page);
-                       rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
-                                               (int)copy);
+                       rc = memcpy_toiovecend(iov_iter_iovec(&ii), pdata,
+                                              ii.iov_offset, (int)copy);
                        kunmap(page);
                        if (!rc) {
                                *copied += copy;
index 449b6cf09b09dbc15e90f311e09bc011a0157b44..ec0f3423cdac9ff7091296084388bcfc1a9c4076 100644 (file)
@@ -549,6 +549,10 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
                 * when Unix extensions are disabled - fake it.
                 */
                fattr->cf_nlink = 2;
+       } else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
+               fattr->cf_mode = S_IFLNK;
+               fattr->cf_dtype = DT_LNK;
+               fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
        } else {
                fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
                fattr->cf_dtype = DT_REG;
index 562044f700e56bf27997bf7ef7490bddf6bc1c91..7e36ceba0c7a72d797a798de500847d4fa6ac66d 100644 (file)
@@ -509,6 +509,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink = NULL;
        struct cifs_tcon *tcon;
+       struct TCP_Server_Info *server;
 
        xid = get_xid();
 
@@ -519,25 +520,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
                goto out;
        }
        tcon = tlink_tcon(tlink);
-
-       /*
-        * For now, we just handle symlinks with unix extensions enabled.
-        * Eventually we should handle NTFS reparse points, and MacOS
-        * symlink support. For instance...
-        *
-        * rc = CIFSSMBQueryReparseLinkInfo(...)
-        *
-        * For now, just return -EACCES when the server doesn't support posix
-        * extensions. Note that we still allow querying symlinks when posix
-        * extensions are manually disabled. We could disable these as well
-        * but there doesn't seem to be any harm in allowing the client to
-        * read them.
-        */
-       if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
-           !cap_unix(tcon->ses)) {
-               rc = -EACCES;
-               goto out;
-       }
+       server = tcon->ses->server;
 
        full_path = build_path_from_dentry(direntry);
        if (!full_path)
@@ -559,6 +542,9 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
        if ((rc != 0) && cap_unix(tcon->ses))
                rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
                                             cifs_sb->local_nls);
+       else if (rc != 0 && server->ops->query_symlink)
+               rc = server->ops->query_symlink(xid, tcon, full_path,
+                                               &target_path, cifs_sb);
 
        kfree(full_path);
 out:
index 69d2c826a23badc552bb686b518beaea297b473c..42ef03be089f2bc9aaabd2e3f40d2b6d6ac207a8 100644 (file)
@@ -172,6 +172,9 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
                if (cifs_dfs_is_possible(cifs_sb) &&
                    (fattr->cf_cifsattrs & ATTR_REPARSE))
                        fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
+       } else if (fattr->cf_cifsattrs & ATTR_REPARSE) {
+               fattr->cf_mode = S_IFLNK;
+               fattr->cf_dtype = DT_LNK;
        } else {
                fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
                fattr->cf_dtype = DT_REG;
index 08dd37bb23aac8ea04fe979743f8c963990a80f7..a0a62db0f5750fc172e0eb3505fde310144a387b 100644 (file)
@@ -226,7 +226,7 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
                *(bcc_ptr+1) = 0;
        } else {
                bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name,
-                                           MAX_USERNAME_SIZE, nls_cp);
+                                           CIFS_MAX_USERNAME_LEN, nls_cp);
        }
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2; /* account for null termination */
@@ -246,8 +246,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
        /* BB what about null user mounts - check that we do this BB */
        /* copy user */
        if (ses->user_name != NULL) {
-               strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
-               bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
+               strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
+               bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
        }
        /* else null user mount */
        *bcc_ptr = 0;
@@ -501,7 +501,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        } else {
                int len;
                len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
-                                     MAX_USERNAME_SIZE, nls_cp);
+                                     CIFS_MAX_USERNAME_LEN, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->DomainName.Length = cpu_to_le16(len);
@@ -517,7 +517,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        } else {
                int len;
                len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
-                                     MAX_USERNAME_SIZE, nls_cp);
+                                     CIFS_MAX_USERNAME_LEN, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->UserName.Length = cpu_to_le16(len);
index 60943978aec35bb06360adb7e060802d5775bee1..8fe19c973ee4df30542eac1cf117bea6bb935a1e 100644 (file)
@@ -881,6 +881,37 @@ cifs_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
                           (__u8)type, wait, 0);
 }
 
+static int
+cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+                  const char *full_path, char **target_path,
+                  struct cifs_sb_info *cifs_sb)
+{
+       int rc;
+       int oplock = 0;
+       __u16 netfid;
+
+       cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+
+       rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
+                        FILE_READ_ATTRIBUTES, OPEN_REPARSE_POINT, &netfid,
+                        &oplock, NULL, cifs_sb->local_nls,
+                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+       if (rc)
+               return rc;
+
+       rc = CIFSSMBQuerySymLink(xid, tcon, netfid, target_path,
+                                cifs_sb->local_nls);
+       if (rc) {
+               CIFSSMBClose(xid, tcon, netfid);
+               return rc;
+       }
+
+       convert_delimiter(*target_path, '/');
+       CIFSSMBClose(xid, tcon, netfid);
+       cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+       return rc;
+}
+
 struct smb_version_operations smb1_operations = {
        .send_cancel = send_nt_cancel,
        .compare_fids = cifs_compare_fids,
@@ -927,6 +958,7 @@ struct smb_version_operations smb1_operations = {
        .rename_pending_delete = cifs_rename_pending_delete,
        .rename = CIFSSMBRename,
        .create_hardlink = CIFSCreateHardLink,
+       .query_symlink = cifs_query_symlink,
        .open = cifs_open_file,
        .set_fid = cifs_set_fid,
        .close = cifs_close_file,
index 04a81a4142c3235f1bb1693b9be4c73580023c2b..020245d5c9a74c3592a8eacee9bf1baf47f9629b 100644 (file)
@@ -86,7 +86,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
        if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
                memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE);
 
-       rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data);
+       rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL);
        if (rc)
                goto out;
 
index c6ec1633309abad6464995e4aad8352129f5fed6..78ff88c467b99b9a17c706032a9ed655f92c58b0 100644 (file)
@@ -60,7 +60,7 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.fid = &fid;
        oparms.reconnect = false;
 
-       rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL);
+       rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
        if (rc) {
                kfree(utf16_path);
                return rc;
@@ -136,7 +136,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
                return -ENOMEM;
 
        rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
-                               FILE_READ_ATTRIBUTES, FILE_OPEN, 0, smb2_data,
+                               FILE_READ_ATTRIBUTES, FILE_OPEN,
+                               OPEN_REPARSE_POINT, smb2_data,
                                SMB2_OP_QUERY_INFO);
        if (rc)
                goto out;
@@ -191,8 +192,8 @@ smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
            struct cifs_sb_info *cifs_sb)
 {
        return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
-                                 CREATE_DELETE_ON_CLOSE, NULL,
-                                 SMB2_OP_DELETE);
+                                 CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
+                                 NULL, SMB2_OP_DELETE);
 }
 
 static int
index b0c43345cd981c082e3de5965e647a5432197605..6103359fb598b79f79523e4246c7e9874dcefab5 100644 (file)
@@ -171,6 +171,10 @@ smb2_check_message(char *buf, unsigned int length)
        if (4 + len != clc_len) {
                cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n",
                         clc_len, 4 + len, mid);
+               /* create failed on symlink */
+               if (command == SMB2_CREATE_HE &&
+                   hdr->Status == STATUS_STOPPED_ON_SYMLINK)
+                       return 0;
                /* Windows 7 server returns 24 bytes more */
                if (clc_len + 20 == len && command == SMB2_OPLOCK_BREAK_HE)
                        return 0;
index f259e6cc835791f20e34acce582f83ca1b78102c..91b9e5422e9af9a8c1adcc1c2382060c1d33008d 100644 (file)
@@ -24,6 +24,7 @@
 #include "smb2proto.h"
 #include "cifsproto.h"
 #include "cifs_debug.h"
+#include "cifs_unicode.h"
 #include "smb2status.h"
 #include "smb2glob.h"
 
@@ -229,7 +230,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.fid = &fid;
        oparms.reconnect = false;
 
-       rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL);
+       rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
        if (rc) {
                kfree(utf16_path);
                return rc;
@@ -463,7 +464,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.fid = fid;
        oparms.reconnect = false;
 
-       rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL);
+       rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
        kfree(utf16_path);
        if (rc) {
                cifs_dbg(VFS, "open dir failed\n");
@@ -550,7 +551,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.fid = &fid;
        oparms.reconnect = false;
 
-       rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL);
+       rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
        if (rc)
                return rc;
        buf->f_type = SMB2_MAGIC_NUMBER;
@@ -596,6 +597,57 @@ smb2_new_lease_key(struct cifs_fid *fid)
        get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
 }
 
+static int
+smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+                  const char *full_path, char **target_path,
+                  struct cifs_sb_info *cifs_sb)
+{
+       int rc;
+       __le16 *utf16_path;
+       __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       struct cifs_open_parms oparms;
+       struct cifs_fid fid;
+       struct smb2_err_rsp *err_buf = NULL;
+       struct smb2_symlink_err_rsp *symlink;
+       unsigned int sub_len, sub_offset;
+
+       cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+
+       utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+       if (!utf16_path)
+               return -ENOMEM;
+
+       oparms.tcon = tcon;
+       oparms.desired_access = FILE_READ_ATTRIBUTES;
+       oparms.disposition = FILE_OPEN;
+       oparms.create_options = 0;
+       oparms.fid = &fid;
+       oparms.reconnect = false;
+
+       rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_buf);
+
+       if (!rc || !err_buf) {
+               kfree(utf16_path);
+               return -ENOENT;
+       }
+       /* open must fail on symlink - reset rc */
+       rc = 0;
+       symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
+       sub_len = le16_to_cpu(symlink->SubstituteNameLength);
+       sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
+       *target_path = cifs_strndup_from_utf16(
+                               (char *)symlink->PathBuffer + sub_offset,
+                               sub_len, true, cifs_sb->local_nls);
+       if (!(*target_path)) {
+               kfree(utf16_path);
+               return -ENOMEM;
+       }
+       convert_delimiter(*target_path, '/');
+       cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+       kfree(utf16_path);
+       return rc;
+}
+
 struct smb_version_operations smb21_operations = {
        .compare_fids = smb2_compare_fids,
        .setup_request = smb2_setup_request,
@@ -638,6 +690,7 @@ struct smb_version_operations smb21_operations = {
        .unlink = smb2_unlink,
        .rename = smb2_rename_path,
        .create_hardlink = smb2_create_hardlink,
+       .query_symlink = smb2_query_symlink,
        .open = smb2_open_file,
        .set_fid = smb2_set_fid,
        .close = smb2_close_file,
@@ -706,6 +759,7 @@ struct smb_version_operations smb30_operations = {
        .unlink = smb2_unlink,
        .rename = smb2_rename_path,
        .create_hardlink = smb2_create_hardlink,
+       .query_symlink = smb2_query_symlink,
        .open = smb2_open_file,
        .set_fid = smb2_set_fid,
        .close = smb2_close_file,
index abc9c2809b519c50623209d341733c31a752b2b3..5a49861633a62652461ada755e16b187bd32ea1b 100644 (file)
@@ -977,7 +977,8 @@ add_durable_context(struct kvec *iov, unsigned int *num_iovec,
 
 int
 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
-         __u8 *oplock, struct smb2_file_all_info *buf)
+         __u8 *oplock, struct smb2_file_all_info *buf,
+         struct smb2_err_rsp **err_buf)
 {
        struct smb2_create_req *req;
        struct smb2_create_rsp *rsp;
@@ -1082,6 +1083,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
 
        if (rc != 0) {
                cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+               if (err_buf)
+                       *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
+                                          GFP_KERNEL);
                goto creat_exit;
        }
 
index 36b0d37ea69b8d9f7bab7d1e1e3fcd167afe34e3..40baeae60b081aa8e94757bf992f1f612f26a927 100644 (file)
@@ -150,6 +150,20 @@ struct smb2_err_rsp {
        __u8   ErrorData[1];  /* variable length */
 } __packed;
 
+struct smb2_symlink_err_rsp {
+       __le32 SymLinkLength;
+       __le32 SymLinkErrorTag;
+       __le32 ReparseTag;
+       __le16 ReparseDataLength;
+       __le16 UnparsedPathLength;
+       __le16 SubstituteNameOffset;
+       __le16 SubstituteNameLength;
+       __le16 PrintNameOffset;
+       __le16 PrintNameLength;
+       __le32 Flags;
+       __u8  PathBuffer[0];
+} __packed;
+
 #define SMB2_CLIENT_GUID_SIZE 16
 
 extern __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE];
index 1a5ecbed40edac9ae352a1baa2c162697712e9fd..1db89fda1392be62d8a5dd09e4f30bad54346f0d 100644 (file)
@@ -106,7 +106,8 @@ extern int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses,
 extern int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon);
 extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
                     __le16 *path, __u8 *oplock,
-                    struct smb2_file_all_info *buf);
+                    struct smb2_file_all_info *buf,
+                    struct smb2_err_rsp **err_buf);
 extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, u32 opcode,
                     bool is_fsctl, char *in_data, u32 indatalen,
index 72f816d6cad99d4d1f81433e928d42e540295188..fb4e2b7ea07b3555a6b32fb05eebd7ba6fd24a88 100644 (file)
@@ -675,45 +675,59 @@ fail:
        return;
 }
 
-/*
- * Core dumping helper functions.  These are the only things you should
- * do on a core-file: use only these functions to write out all the
- * necessary info.
- */
-int dump_write(struct file *file, const void *addr, int nr)
+bool dump_emit(struct coredump_params *cprm, const void *addr, size_t size)
 {
-       return !dump_interrupted() &&
-               access_ok(VERIFY_READ, addr, nr) &&
-               file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+       struct file *file;
+       loff_t pos;
+       if (size > cprm->limit - cprm->written || dump_interrupted() ||
+           !access_ok(VERIFY_READ, addr, size))
+               return false;
+       file = cprm->file;
+       pos = file->f_pos;
+       if (file->f_op->write(file, addr, size, &pos) != size)
+               return false;
+       file->f_pos = pos;
+       cprm->written += size;
+       return true;
 }
-EXPORT_SYMBOL(dump_write);
+EXPORT_SYMBOL(dump_emit);
 
-int dump_seek(struct file *file, loff_t off)
+bool dump_skip(struct coredump_params *cprm, size_t off)
 {
-       int ret = 1;
+       loff_t new = cprm->written + off;
+       struct file *file = cprm->file;
+
+       if (!off)
+               return true;
+
+       if (new > cprm->limit)
+               return false;
 
        if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
                if (dump_interrupted() ||
-                   file->f_op->llseek(file, off, SEEK_CUR) < 0)
-                       return 0;
+                   file->f_op->llseek(file, new, SEEK_SET) < 0)
+                       return false;
+               cprm->written = new;
        } else {
                char *buf = (char *)get_zeroed_page(GFP_KERNEL);
-
                if (!buf)
-                       return 0;
-               while (off > 0) {
+                       return false;
+               while (off) {
                        unsigned long n = off;
-
                        if (n > PAGE_SIZE)
                                n = PAGE_SIZE;
-                       if (!dump_write(file, buf, n)) {
-                               ret = 0;
-                               break;
-                       }
+                       if (!dump_emit(cprm, buf, n))
+                               return false;
                        off -= n;
                }
                free_page((unsigned long)buf);
        }
-       return ret;
+       return true;
+}
+EXPORT_SYMBOL(dump_skip);
+
+bool dump_align(struct coredump_params *cprm, int align)
+{
+       return dump_skip(cprm, roundup(cprm->written, align) - cprm->written);
 }
-EXPORT_SYMBOL(dump_seek);
+EXPORT_SYMBOL(dump_align);
index 7ab90f5081eebc4ab8b0de88bef8d0b6310ed113..75a39892c12262451af084b9505b3c26c8c55144 100644 (file)
@@ -127,6 +127,7 @@ struct dio {
        spinlock_t bio_lock;            /* protects BIO fields below */
        int page_errors;                /* errno from get_user_pages() */
        int is_async;                   /* is IO async ? */
+       int should_dirty;               /* should we mark read pages dirty? */
        int io_error;                   /* IO error in completion path */
        unsigned long refcount;         /* direct_io_worker() and bios */
        struct bio *bio_list;           /* singly linked via bi_private */
@@ -377,7 +378,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
        dio->refcount++;
        spin_unlock_irqrestore(&dio->bio_lock, flags);
 
-       if (dio->is_async && dio->rw == READ)
+       if (dio->is_async && dio->rw == READ && dio->should_dirty)
                bio_set_pages_dirty(bio);
 
        if (sdio->submit_io)
@@ -448,13 +449,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
        if (!uptodate)
                dio->io_error = -EIO;
 
-       if (dio->is_async && dio->rw == READ) {
+       if (dio->is_async && dio->rw == READ && dio->should_dirty) {
                bio_check_pages_dirty(bio);     /* transfers ownership */
        } else {
                bio_for_each_segment_all(bvec, bio, i) {
                        struct page *page = bvec->bv_page;
 
-                       if (dio->rw == READ && !PageCompound(page))
+                       if (dio->rw == READ && !PageCompound(page) &&
+                           dio->should_dirty)
                                set_page_dirty_lock(page);
                        page_cache_release(page);
                }
@@ -1016,6 +1018,101 @@ static inline int drop_refcount(struct dio *dio)
        return ret2;
 }
 
+static ssize_t direct_IO_iovec(const struct iovec *iov, unsigned long nr_segs,
+                              struct dio *dio, struct dio_submit *sdio,
+                              unsigned blkbits, struct buffer_head *map_bh)
+{
+       size_t bytes;
+       ssize_t retval = 0;
+       int seg;
+       unsigned long user_addr;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               user_addr = (unsigned long)iov[seg].iov_base;
+               sdio->pages_in_io +=
+                       ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
+                               PAGE_SIZE - user_addr / PAGE_SIZE);
+       }
+
+       dio->should_dirty = 1;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               user_addr = (unsigned long)iov[seg].iov_base;
+               sdio->size += bytes = iov[seg].iov_len;
+
+               /* Index into the first page of the first block */
+               sdio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
+               sdio->final_block_in_request = sdio->block_in_file +
+                                               (bytes >> blkbits);
+               /* Page fetching state */
+               sdio->head = 0;
+               sdio->tail = 0;
+               sdio->curr_page = 0;
+
+               sdio->total_pages = 0;
+               if (user_addr & (PAGE_SIZE-1)) {
+                       sdio->total_pages++;
+                       bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
+               }
+               sdio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+               sdio->curr_user_address = user_addr;
+
+               retval = do_direct_IO(dio, sdio, map_bh);
+
+               dio->result += iov[seg].iov_len -
+                       ((sdio->final_block_in_request - sdio->block_in_file) <<
+                                       blkbits);
+
+               if (retval) {
+                       dio_cleanup(dio, sdio);
+                       break;
+               }
+       } /* end iovec loop */
+
+       return retval;
+}
+
+static ssize_t direct_IO_bvec(struct bio_vec *bvec, unsigned long nr_segs,
+                             struct dio *dio, struct dio_submit *sdio,
+                             unsigned blkbits, struct buffer_head *map_bh)
+{
+       ssize_t retval = 0;
+       int seg;
+
+       sdio->pages_in_io += nr_segs;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               sdio->size += bvec[seg].bv_len;
+
+               /* Index into the first page of the first block */
+               sdio->first_block_in_page = bvec[seg].bv_offset >> blkbits;
+               sdio->final_block_in_request = sdio->block_in_file +
+                                               (bvec[seg].bv_len  >> blkbits);
+               /* Page fetching state */
+               sdio->curr_page = 0;
+               page_cache_get(bvec[seg].bv_page);
+               dio->pages[0] = bvec[seg].bv_page;
+               sdio->head = 0;
+               sdio->tail = 1;
+
+               sdio->total_pages = 1;
+               sdio->curr_user_address = 0;
+
+               retval = do_direct_IO(dio, sdio, map_bh);
+
+               dio->result += bvec[seg].bv_len -
+                       ((sdio->final_block_in_request - sdio->block_in_file) <<
+                                       blkbits);
+
+               if (retval) {
+                       dio_cleanup(dio, sdio);
+                       break;
+               }
+       }
+
+       return retval;
+}
+
 /*
  * This is a library function for use by filesystem drivers.
  *
@@ -1043,9 +1140,9 @@ static inline int drop_refcount(struct dio *dio)
  */
 static inline ssize_t
 do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset, 
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags)
 {
        int seg;
        size_t size;
@@ -1057,10 +1154,9 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        loff_t end = offset;
        struct dio *dio;
        struct dio_submit sdio = { 0, };
-       unsigned long user_addr;
-       size_t bytes;
        struct buffer_head map_bh = { 0, };
        struct blk_plug plug;
+       unsigned long nr_segs = iter->nr_segs;
 
        if (rw & WRITE)
                rw = WRITE_ODIRECT;
@@ -1079,20 +1175,49 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        }
 
        /* Check the memory alignment.  Blocks cannot straddle pages */
-       for (seg = 0; seg < nr_segs; seg++) {
-               addr = (unsigned long)iov[seg].iov_base;
-               size = iov[seg].iov_len;
-               end += size;
-               if (unlikely((addr & blocksize_mask) ||
-                            (size & blocksize_mask))) {
-                       if (bdev)
-                               blkbits = blksize_bits(
-                                        bdev_logical_block_size(bdev));
-                       blocksize_mask = (1 << blkbits) - 1;
-                       if ((addr & blocksize_mask) || (size & blocksize_mask))
-                               goto out;
+       if (iov_iter_has_iovec(iter)) {
+               const struct iovec *iov = iov_iter_iovec(iter);
+
+               for (seg = 0; seg < nr_segs; seg++) {
+                       addr = (unsigned long)iov[seg].iov_base;
+                       size = iov[seg].iov_len;
+                       end += size;
+                       if (unlikely((addr & blocksize_mask) ||
+                                    (size & blocksize_mask))) {
+                               if (bdev)
+                                       blkbits = blksize_bits(
+                                                bdev_logical_block_size(bdev));
+                               blocksize_mask = (1 << blkbits) - 1;
+                               if ((addr & blocksize_mask) ||
+                                   (size & blocksize_mask))
+                                       goto out;
+                       }
                }
-       }
+       } else if (iov_iter_has_bvec(iter)) {
+               /*
+                * Is this necessary, or can we trust the in-kernel
+                * caller? Can we replace this with
+                *      end += iov_iter_count(iter); ?
+                */
+               struct bio_vec *bvec = iov_iter_bvec(iter);
+
+               for (seg = 0; seg < nr_segs; seg++) {
+                       addr = bvec[seg].bv_offset;
+                       size = bvec[seg].bv_len;
+                       end += size;
+                       if (unlikely((addr & blocksize_mask) ||
+                                    (size & blocksize_mask))) {
+                               if (bdev)
+                                       blkbits = blksize_bits(
+                                                bdev_logical_block_size(bdev));
+                               blocksize_mask = (1 << blkbits) - 1;
+                               if ((addr & blocksize_mask) ||
+                                   (size & blocksize_mask))
+                                       goto out;
+                       }
+               }
+       } else
+               BUG();
 
        /* watch out for a 0 len io from a tricksy fs */
        if (rw == READ && end == offset)
@@ -1169,47 +1294,14 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        if (unlikely(sdio.blkfactor))
                sdio.pages_in_io = 2;
 
-       for (seg = 0; seg < nr_segs; seg++) {
-               user_addr = (unsigned long)iov[seg].iov_base;
-               sdio.pages_in_io +=
-                       ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
-                               PAGE_SIZE - user_addr / PAGE_SIZE);
-       }
-
        blk_start_plug(&plug);
 
-       for (seg = 0; seg < nr_segs; seg++) {
-               user_addr = (unsigned long)iov[seg].iov_base;
-               sdio.size += bytes = iov[seg].iov_len;
-
-               /* Index into the first page of the first block */
-               sdio.first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
-               sdio.final_block_in_request = sdio.block_in_file +
-                                               (bytes >> blkbits);
-               /* Page fetching state */
-               sdio.head = 0;
-               sdio.tail = 0;
-               sdio.curr_page = 0;
-
-               sdio.total_pages = 0;
-               if (user_addr & (PAGE_SIZE-1)) {
-                       sdio.total_pages++;
-                       bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
-               }
-               sdio.total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
-               sdio.curr_user_address = user_addr;
-
-               retval = do_direct_IO(dio, &sdio, &map_bh);
-
-               dio->result += iov[seg].iov_len -
-                       ((sdio.final_block_in_request - sdio.block_in_file) <<
-                                       blkbits);
-
-               if (retval) {
-                       dio_cleanup(dio, &sdio);
-                       break;
-               }
-       } /* end iovec loop */
+       if (iov_iter_has_iovec(iter))
+               retval = direct_IO_iovec(iov_iter_iovec(iter), nr_segs, dio,
+                                        &sdio, blkbits, &map_bh);
+       else
+               retval = direct_IO_bvec(iov_iter_bvec(iter), nr_segs, dio,
+                                       &sdio, blkbits, &map_bh);
 
        if (retval == -ENOTBLK) {
                /*
@@ -1279,9 +1371,9 @@ out:
 
 ssize_t
 __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset,
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags)
 {
        /*
         * The block device state is needed in the end to finally
@@ -1295,9 +1387,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        prefetch(bdev->bd_queue);
        prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
 
-       return do_blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-                                    nr_segs, get_block, end_io,
-                                    submit_io, flags);
+       return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
+                                    get_block, end_io, submit_io, flags);
 }
 
 EXPORT_SYMBOL(__blockdev_direct_IO);
index 27a6ba9aaeec7e410c1002b72cd81c344c3d76ef..0e90f0c91b931c74d2c0fdd51b9c54de3330e32f 100644 (file)
@@ -267,10 +267,7 @@ void dlm_callback_work(struct work_struct *work)
 int dlm_callback_start(struct dlm_ls *ls)
 {
        ls->ls_callback_wq = alloc_workqueue("dlm_callback",
-                                            WQ_UNBOUND |
-                                            WQ_MEM_RECLAIM |
-                                            WQ_NON_REENTRANT,
-                                            0);
+                                            WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
        if (!ls->ls_callback_wq) {
                log_print("can't start dlm_callback workqueue");
                return -ENOMEM;
index 812149119fa3c36291508af6fc33a7646c84155f..142e21655eed76e2479404d592f5596c4b745f37 100644 (file)
@@ -493,7 +493,6 @@ static ssize_t device_write(struct file *file, const char __user *buf,
 {
        struct dlm_user_proc *proc = file->private_data;
        struct dlm_write_request *kbuf;
-       sigset_t tmpsig, allsigs;
        int error;
 
 #ifdef CONFIG_COMPAT
@@ -557,9 +556,6 @@ static ssize_t device_write(struct file *file, const char __user *buf,
                goto out_free;
        }
 
-       sigfillset(&allsigs);
-       sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
-
        error = -EINVAL;
 
        switch (kbuf->cmd)
@@ -567,7 +563,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
        case DLM_USER_LOCK:
                if (!proc) {
                        log_print("no locking on control device");
-                       goto out_sig;
+                       goto out_free;
                }
                error = device_user_lock(proc, &kbuf->i.lock);
                break;
@@ -575,7 +571,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
        case DLM_USER_UNLOCK:
                if (!proc) {
                        log_print("no locking on control device");
-                       goto out_sig;
+                       goto out_free;
                }
                error = device_user_unlock(proc, &kbuf->i.lock);
                break;
@@ -583,7 +579,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
        case DLM_USER_DEADLOCK:
                if (!proc) {
                        log_print("no locking on control device");
-                       goto out_sig;
+                       goto out_free;
                }
                error = device_user_deadlock(proc, &kbuf->i.lock);
                break;
@@ -591,7 +587,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
        case DLM_USER_CREATE_LOCKSPACE:
                if (proc) {
                        log_print("create/remove only on control device");
-                       goto out_sig;
+                       goto out_free;
                }
                error = device_create_lockspace(&kbuf->i.lspace);
                break;
@@ -599,7 +595,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
        case DLM_USER_REMOVE_LOCKSPACE:
                if (proc) {
                        log_print("create/remove only on control device");
-                       goto out_sig;
+                       goto out_free;
                }
                error = device_remove_lockspace(&kbuf->i.lspace);
                break;
@@ -607,7 +603,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
        case DLM_USER_PURGE:
                if (!proc) {
                        log_print("no locking on control device");
-                       goto out_sig;
+                       goto out_free;
                }
                error = device_user_purge(proc, &kbuf->i.purge);
                break;
@@ -617,8 +613,6 @@ static ssize_t device_write(struct file *file, const char __user *buf,
                          kbuf->cmd);
        }
 
- out_sig:
-       sigprocmask(SIG_SETMASK, &tmpsig, NULL);
  out_free:
        kfree(kbuf);
        return error;
@@ -659,15 +653,11 @@ static int device_close(struct inode *inode, struct file *file)
 {
        struct dlm_user_proc *proc = file->private_data;
        struct dlm_ls *ls;
-       sigset_t tmpsig, allsigs;
 
        ls = dlm_find_lockspace_local(proc->lockspace);
        if (!ls)
                return -ENOENT;
 
-       sigfillset(&allsigs);
-       sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
-
        set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
 
        dlm_clear_proc_locks(ls, proc);
@@ -685,8 +675,6 @@ static int device_close(struct inode *inode, struct file *file)
        /* FIXME: AUTOFREE: if this ls is no longer used do
           device_remove_lockspace() */
 
-       sigprocmask(SIG_SETMASK, &tmpsig, NULL);
-
        return 0;
 }
 
index 992cf95830b5792a5d510f7d588049d27c139a94..3ed6e5f5bb4b73711792494b2a6edf4c415dd5a1 100644 (file)
 /**
  * ecryptfs_read_update_atime
  *
- * generic_file_read updates the atime of upper layer inode.  But, it
+ * generic_file_read_iter updates the atime of upper layer inode.  But, it
  * doesn't give us a chance to update the atime of the lower layer
- * inode.  This function is a wrapper to generic_file_read.  It
- * updates the atime of the lower level inode if generic_file_read
+ * inode.  This function is a wrapper to generic_file_read_iter.  It
+ * updates the atime of the lower level inode if generic_file_read_iter
  * returns without any errors. This is to be used only for file reads.
  * The function to be used for directory reads is ecryptfs_read.
  */
 static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
-                               const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos)
+                               struct iov_iter *iter, loff_t pos)
 {
        ssize_t rc;
        struct path *path;
        struct file *file = iocb->ki_filp;
 
-       rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
+       rc = generic_file_read_iter(iocb, iter, pos);
        /*
         * Even though this is a async interface, we need to wait
         * for IO to finish to update atime
@@ -357,9 +356,9 @@ const struct file_operations ecryptfs_dir_fops = {
 const struct file_operations ecryptfs_main_fops = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
-       .aio_read = ecryptfs_read_update_atime,
+       .read_iter = ecryptfs_read_update_atime,
        .write = do_sync_write,
-       .aio_write = generic_file_aio_write,
+       .write_iter = generic_file_write_iter,
        .iterate = ecryptfs_readdir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
index 491c6c078e7f5e0ac420646288452d93cca86ce2..20564f8a358a8e9809f5cb086c1900edeea9ffcf 100644 (file)
@@ -69,8 +69,8 @@ const struct file_operations exofs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = generic_file_open,
        .release        = exofs_release_file,
index a5b3a5db31206f8f3c84781362f7c45e919ddf0e..6af043bab460b0225a2f08a3f9b9b6513c97d9f6 100644 (file)
@@ -64,8 +64,8 @@ const struct file_operations ext2_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext2_compat_ioctl,
index 0a87bb10998dc00070bc6d05d6f536c21de44e3a..e3e8e3bd62933ca4e527739b925f69f30574d5e1 100644 (file)
@@ -848,18 +848,16 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
 }
 
 static ssize_t
-ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                ext2_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext2_get_block);
        if (ret < 0 && (rw & WRITE))
-               ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               ext2_write_failed(mapping, offset + iov_iter_count(iter));
        return ret;
 }
 
index 25cb413277e906edb1f037ba625ece7aa92903bb..a79677188b54128121a04b4bf671f48b7dd90901 100644 (file)
@@ -52,8 +52,8 @@ const struct file_operations ext3_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext3_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext3_compat_ioctl,
index 2bd85486b87974b390892a2280676a49d8eb274e..85bd13b8b7589b8522921ff7bd10bc89fcdfb270 100644 (file)
@@ -1862,8 +1862,7 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
  * VFS code falls back into buffered path in that case so we are safe.
  */
 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1871,10 +1870,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        handle_t *handle;
        ssize_t ret;
        int orphan = 0;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int retries = 0;
 
-       trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+       trace_ext3_direct_IO_enter(inode, offset, count, rw);
 
        if (rw == WRITE) {
                loff_t final_size = offset + count;
@@ -1898,15 +1897,14 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                ext3_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + count;
 
                if (end > isize)
                        ext3_truncate_failed_direct_write(inode);
@@ -1949,8 +1947,7 @@ retry:
                        ret = err;
        }
 out:
-       trace_ext3_direct_IO_exit(inode, offset,
-                               iov_length(iov, nr_segs), rw, ret);
+       trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
        return ret;
 }
 
index c47f147507227fda66d6b1a59e6922ab60e36ff9..c50c76190373a06bdb1a7971a935e7a5d00d9888 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/seq_file.h>
 #include <linux/log2.h>
 #include <linux/cleancache.h>
+#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
@@ -819,6 +820,7 @@ enum {
        Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
        Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
        Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
+       Opt_journal_path,
        Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
        Opt_data_err_abort, Opt_data_err_ignore,
        Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
@@ -860,6 +862,7 @@ static const match_table_t tokens = {
        {Opt_journal_update, "journal=update"},
        {Opt_journal_inum, "journal=%u"},
        {Opt_journal_dev, "journal_dev=%u"},
+       {Opt_journal_path, "journal_path=%s"},
        {Opt_abort, "abort"},
        {Opt_data_journal, "data=journal"},
        {Opt_data_ordered, "data=ordered"},
@@ -975,6 +978,11 @@ static int parse_options (char *options, struct super_block *sb,
        int option;
        kuid_t uid;
        kgid_t gid;
+       char *journal_path;
+       struct inode *journal_inode;
+       struct path path;
+       int error;
+
 #ifdef CONFIG_QUOTA
        int qfmt;
 #endif
@@ -1129,6 +1137,41 @@ static int parse_options (char *options, struct super_block *sb,
                                return 0;
                        *journal_devnum = option;
                        break;
+               case Opt_journal_path:
+                       if (is_remount) {
+                               ext3_msg(sb, KERN_ERR, "error: cannot specify "
+                                      "journal on remount");
+                               return 0;
+                       }
+
+                       journal_path = match_strdup(&args[0]);
+                       if (!journal_path) {
+                               ext3_msg(sb, KERN_ERR, "error: could not dup "
+                                       "journal device string");
+                               return 0;
+                       }
+
+                       error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
+                       if (error) {
+                               ext3_msg(sb, KERN_ERR, "error: could not find "
+                                       "journal device path: error %d", error);
+                               kfree(journal_path);
+                               return 0;
+                       }
+
+                       journal_inode = path.dentry->d_inode;
+                       if (!S_ISBLK(journal_inode->i_mode)) {
+                               ext3_msg(sb, KERN_ERR, "error: journal path %s "
+                                       "is not a block device", journal_path);
+                               path_put(&path);
+                               kfree(journal_path);
+                               return 0;
+                       }
+
+                       *journal_devnum = new_encode_dev(journal_inode->i_rdev);
+                       path_put(&path);
+                       kfree(journal_path);
+                       break;
                case Opt_noload:
                        set_opt (sbi->s_mount_opt, NOLOAD);
                        break;
index 0ab26fbf33808c565ae9ef4449d6836e4cb01fae..b4d4a7d8ca24b7b4d82d160046dbf03a4bdfa598 100644 (file)
@@ -560,6 +560,18 @@ enum {
        /* Do not put hole in extent cache */
 #define EXT4_GET_BLOCKS_NO_PUT_HOLE            0x0200
 
+/*
+ * The bit position of these flags must not overlap with any of the
+ * EXT4_GET_BLOCKS_*.  They are used by ext4_ext_find_extent(),
+ * read_extent_tree_block(), ext4_split_extent_at(),
+ * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf().
+ * EXT4_EX_NOCACHE is used to indicate that the we shouldn't be
+ * caching the extents when reading from the extent tree while a
+ * truncate or punch hole operation is in progress.
+ */
+#define EXT4_EX_NOCACHE                                0x0400
+#define EXT4_EX_FORCE_CACHE                    0x0800
+
 /*
  * Flags used by ext4_free_blocks
  */
@@ -569,6 +581,7 @@ enum {
 #define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE        0x0008
 #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER  0x0010
 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER   0x0020
+#define EXT4_FREE_BLOCKS_RESERVE               0x0040
 
 /*
  * ioctl commands
@@ -590,6 +603,7 @@ enum {
 #define EXT4_IOC_MOVE_EXT              _IOWR('f', 15, struct move_extent)
 #define EXT4_IOC_RESIZE_FS             _IOW('f', 16, __u64)
 #define EXT4_IOC_SWAP_BOOT             _IO('f', 17)
+#define EXT4_IOC_PRECACHE_EXTENTS      _IO('f', 18)
 
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /*
@@ -1375,6 +1389,7 @@ enum {
                                           nolocking */
        EXT4_STATE_MAY_INLINE_DATA,     /* may have in-inode data */
        EXT4_STATE_ORDERED_MODE,        /* data=ordered mode */
+       EXT4_STATE_EXT_PRECACHED,       /* extents have been precached */
 };
 
 #define EXT4_INODE_BIT_FNS(name, field, offset)                                \
@@ -2112,8 +2127,7 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
                                struct ext4_map_blocks *map, int flags);
 extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                               const struct iovec *iov, loff_t offset,
-                               unsigned long nr_segs);
+                               struct iov_iter *iter, loff_t offset);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
@@ -2417,16 +2431,32 @@ do {                                                            \
 #define EXT4_FREECLUSTERS_WATERMARK 0
 #endif
 
+/* Update i_disksize. Requires i_mutex to avoid races with truncate */
 static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
 {
-       /*
-        * XXX: replace with spinlock if seen contended -bzzz
-        */
+       WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
+                    !mutex_is_locked(&inode->i_mutex));
+       down_write(&EXT4_I(inode)->i_data_sem);
+       if (newsize > EXT4_I(inode)->i_disksize)
+               EXT4_I(inode)->i_disksize = newsize;
+       up_write(&EXT4_I(inode)->i_data_sem);
+}
+
+/*
+ * Update i_disksize after writeback has been started. Races with truncate
+ * are avoided by checking i_size under i_data_sem.
+ */
+static inline void ext4_wb_update_i_disksize(struct inode *inode, loff_t newsize)
+{
+       loff_t i_size;
+
        down_write(&EXT4_I(inode)->i_data_sem);
+       i_size = i_size_read(inode);
+       if (newsize > i_size)
+               newsize = i_size;
        if (newsize > EXT4_I(inode)->i_disksize)
                EXT4_I(inode)->i_disksize = newsize;
        up_write(&EXT4_I(inode)->i_data_sem);
-       return ;
 }
 
 struct ext4_group_info {
@@ -2684,7 +2714,8 @@ extern int ext4_ext_insert_extent(handle_t *, struct inode *,
                                  struct ext4_ext_path *,
                                  struct ext4_extent *, int);
 extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
-                                                 struct ext4_ext_path *);
+                                                 struct ext4_ext_path *,
+                                                 int flags);
 extern void ext4_ext_drop_refs(struct ext4_ext_path *);
 extern int ext4_ext_check_inode(struct inode *inode);
 extern int ext4_find_delalloc_range(struct inode *inode,
@@ -2693,7 +2724,7 @@ extern int ext4_find_delalloc_range(struct inode *inode,
 extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
 extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        __u64 start, __u64 len);
-
+extern int ext4_ext_precache(struct inode *inode);
 
 /* move_extent.c */
 extern void ext4_double_down_write_data_sem(struct inode *first,
index 72ba4705d4fa40e12edd1097f0815381688df7e6..62b21ccea882f349e0d0746795733fae714b3cd2 100644 (file)
@@ -407,7 +407,7 @@ static int ext4_valid_extent_entries(struct inode *inode,
 
 static int __ext4_ext_check(const char *function, unsigned int line,
                            struct inode *inode, struct ext4_extent_header *eh,
-                           int depth)
+                           int depth, ext4_fsblk_t pblk)
 {
        const char *error_msg;
        int max = 0;
@@ -447,42 +447,149 @@ static int __ext4_ext_check(const char *function, unsigned int line,
 
 corrupted:
        ext4_error_inode(inode, function, line, 0,
-                       "bad header/extent: %s - magic %x, "
-                       "entries %u, max %u(%u), depth %u(%u)",
-                       error_msg, le16_to_cpu(eh->eh_magic),
-                       le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
-                       max, le16_to_cpu(eh->eh_depth), depth);
-
+                        "pblk %llu bad header/extent: %s - magic %x, "
+                        "entries %u, max %u(%u), depth %u(%u)",
+                        (unsigned long long) pblk, error_msg,
+                        le16_to_cpu(eh->eh_magic),
+                        le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
+                        max, le16_to_cpu(eh->eh_depth), depth);
        return -EIO;
 }
 
-#define ext4_ext_check(inode, eh, depth)       \
-       __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
+#define ext4_ext_check(inode, eh, depth, pblk)                 \
+       __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
 
 int ext4_ext_check_inode(struct inode *inode)
 {
-       return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
+       return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
 }
 
-static int __ext4_ext_check_block(const char *function, unsigned int line,
-                                 struct inode *inode,
-                                 struct ext4_extent_header *eh,
-                                 int depth,
-                                 struct buffer_head *bh)
+static struct buffer_head *
+__read_extent_tree_block(const char *function, unsigned int line,
+                        struct inode *inode, ext4_fsblk_t pblk, int depth,
+                        int flags)
 {
-       int ret;
+       struct buffer_head              *bh;
+       int                             err;
 
-       if (buffer_verified(bh))
-               return 0;
-       ret = ext4_ext_check(inode, eh, depth);
-       if (ret)
-               return ret;
+       bh = sb_getblk(inode->i_sb, pblk);
+       if (unlikely(!bh))
+               return ERR_PTR(-ENOMEM);
+
+       if (!bh_uptodate_or_lock(bh)) {
+               trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
+               err = bh_submit_read(bh);
+               if (err < 0)
+                       goto errout;
+       }
+       if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
+               return bh;
+       err = __ext4_ext_check(function, line, inode,
+                              ext_block_hdr(bh), depth, pblk);
+       if (err)
+               goto errout;
        set_buffer_verified(bh);
-       return ret;
+       /*
+        * If this is a leaf block, cache all of its entries
+        */
+       if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
+               struct ext4_extent_header *eh = ext_block_hdr(bh);
+               struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
+               ext4_lblk_t prev = 0;
+               int i;
+
+               for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
+                       unsigned int status = EXTENT_STATUS_WRITTEN;
+                       ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
+                       int len = ext4_ext_get_actual_len(ex);
+
+                       if (prev && (prev != lblk))
+                               ext4_es_cache_extent(inode, prev,
+                                                    lblk - prev, ~0,
+                                                    EXTENT_STATUS_HOLE);
+
+                       if (ext4_ext_is_uninitialized(ex))
+                               status = EXTENT_STATUS_UNWRITTEN;
+                       ext4_es_cache_extent(inode, lblk, len,
+                                            ext4_ext_pblock(ex), status);
+                       prev = lblk + len;
+               }
+       }
+       return bh;
+errout:
+       put_bh(bh);
+       return ERR_PTR(err);
+
 }
 
-#define ext4_ext_check_block(inode, eh, depth, bh)     \
-       __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
+#define read_extent_tree_block(inode, pblk, depth, flags)              \
+       __read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
+                                (depth), (flags))
+
+/*
+ * This function is called to cache a file's extent information in the
+ * extent status tree
+ */
+int ext4_ext_precache(struct inode *inode)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_ext_path *path = NULL;
+       struct buffer_head *bh;
+       int i = 0, depth, ret = 0;
+
+       if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+               return 0;       /* not an extent-mapped inode */
+
+       down_read(&ei->i_data_sem);
+       depth = ext_depth(inode);
+
+       path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
+                      GFP_NOFS);
+       if (path == NULL) {
+               up_read(&ei->i_data_sem);
+               return -ENOMEM;
+       }
+
+       /* Don't cache anything if there are no external extent blocks */
+       if (depth == 0)
+               goto out;
+       path[0].p_hdr = ext_inode_hdr(inode);
+       ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
+       if (ret)
+               goto out;
+       path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
+       while (i >= 0) {
+               /*
+                * If this is a leaf block or we've reached the end of
+                * the index block, go up
+                */
+               if ((i == depth) ||
+                   path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
+                       brelse(path[i].p_bh);
+                       path[i].p_bh = NULL;
+                       i--;
+                       continue;
+               }
+               bh = read_extent_tree_block(inode,
+                                           ext4_idx_pblock(path[i].p_idx++),
+                                           depth - i - 1,
+                                           EXT4_EX_FORCE_CACHE);
+               if (IS_ERR(bh)) {
+                       ret = PTR_ERR(bh);
+                       break;
+               }
+               i++;
+               path[i].p_bh = bh;
+               path[i].p_hdr = ext_block_hdr(bh);
+               path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
+       }
+       ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
+out:
+       up_read(&ei->i_data_sem);
+       ext4_ext_drop_refs(path);
+       kfree(path);
+       return ret;
+}
 
 #ifdef EXT_DEBUG
 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
@@ -716,7 +823,7 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
 
 struct ext4_ext_path *
 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
-                                       struct ext4_ext_path *path)
+                    struct ext4_ext_path *path, int flags)
 {
        struct ext4_extent_header *eh;
        struct buffer_head *bh;
@@ -748,20 +855,13 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                path[ppos].p_depth = i;
                path[ppos].p_ext = NULL;
 
-               bh = sb_getblk(inode->i_sb, path[ppos].p_block);
-               if (unlikely(!bh)) {
-                       ret = -ENOMEM;
+               bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
+                                           flags);
+               if (IS_ERR(bh)) {
+                       ret = PTR_ERR(bh);
                        goto err;
                }
-               if (!bh_uptodate_or_lock(bh)) {
-                       trace_ext4_ext_load_extent(inode, block,
-                                               path[ppos].p_block);
-                       ret = bh_submit_read(bh);
-                       if (ret < 0) {
-                               put_bh(bh);
-                               goto err;
-                       }
-               }
+
                eh = ext_block_hdr(bh);
                ppos++;
                if (unlikely(ppos > depth)) {
@@ -773,11 +873,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                }
                path[ppos].p_bh = bh;
                path[ppos].p_hdr = eh;
-               i--;
-
-               ret = ext4_ext_check_block(inode, eh, i, bh);
-               if (ret < 0)
-                       goto err;
        }
 
        path[ppos].p_depth = i;
@@ -1198,7 +1293,8 @@ out:
  * if no free index is found, then it requests in-depth growing.
  */
 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
-                                   unsigned int flags,
+                                   unsigned int mb_flags,
+                                   unsigned int gb_flags,
                                    struct ext4_ext_path *path,
                                    struct ext4_extent *newext)
 {
@@ -1220,7 +1316,7 @@ repeat:
        if (EXT_HAS_FREE_INDEX(curp)) {
                /* if we found index with free entry, then use that
                 * entry: create all needed subtree and add new leaf */
-               err = ext4_ext_split(handle, inode, flags, path, newext, i);
+               err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
                if (err)
                        goto out;
 
@@ -1228,12 +1324,12 @@ repeat:
                ext4_ext_drop_refs(path);
                path = ext4_ext_find_extent(inode,
                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
-                                   path);
+                                   path, gb_flags);
                if (IS_ERR(path))
                        err = PTR_ERR(path);
        } else {
                /* tree is full, time to grow in depth */
-               err = ext4_ext_grow_indepth(handle, inode, flags, newext);
+               err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext);
                if (err)
                        goto out;
 
@@ -1241,7 +1337,7 @@ repeat:
                ext4_ext_drop_refs(path);
                path = ext4_ext_find_extent(inode,
                                   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
-                                   path);
+                                   path, gb_flags);
                if (IS_ERR(path)) {
                        err = PTR_ERR(path);
                        goto out;
@@ -1412,29 +1508,21 @@ got_index:
        ix++;
        block = ext4_idx_pblock(ix);
        while (++depth < path->p_depth) {
-               bh = sb_bread(inode->i_sb, block);
-               if (bh == NULL)
-                       return -EIO;
-               eh = ext_block_hdr(bh);
                /* subtract from p_depth to get proper eh_depth */
-               if (ext4_ext_check_block(inode, eh,
-                                        path->p_depth - depth, bh)) {
-                       put_bh(bh);
-                       return -EIO;
-               }
+               bh = read_extent_tree_block(inode, block,
+                                           path->p_depth - depth, 0);
+               if (IS_ERR(bh))
+                       return PTR_ERR(bh);
+               eh = ext_block_hdr(bh);
                ix = EXT_FIRST_INDEX(eh);
                block = ext4_idx_pblock(ix);
                put_bh(bh);
        }
 
-       bh = sb_bread(inode->i_sb, block);
-       if (bh == NULL)
-               return -EIO;
+       bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
+       if (IS_ERR(bh))
+               return PTR_ERR(bh);
        eh = ext_block_hdr(bh);
-       if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
-               put_bh(bh);
-               return -EIO;
-       }
        ex = EXT_FIRST_EXTENT(eh);
 found_extent:
        *logical = le32_to_cpu(ex->ee_block);
@@ -1705,7 +1793,8 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
 
        brelse(path[1].p_bh);
        ext4_free_blocks(handle, inode, NULL, blk, 1,
-                        EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
+                        EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
+                        EXT4_FREE_BLOCKS_RESERVE);
 }
 
 /*
@@ -1793,7 +1882,7 @@ out:
  */
 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
                                struct ext4_ext_path *path,
-                               struct ext4_extent *newext, int flag)
+                               struct ext4_extent *newext, int gb_flags)
 {
        struct ext4_extent_header *eh;
        struct ext4_extent *ex, *fex;
@@ -1802,7 +1891,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
        int depth, len, err;
        ext4_lblk_t next;
        unsigned uninitialized = 0;
-       int flags = 0;
+       int mb_flags = 0;
 
        if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
                EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
@@ -1817,7 +1906,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
        }
 
        /* try to insert block into found extent and return */
-       if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)) {
+       if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
 
                /*
                 * Try to see whether we should rather test the extent on
@@ -1920,7 +2009,7 @@ prepend:
        if (next != EXT_MAX_BLOCKS) {
                ext_debug("next leaf block - %u\n", next);
                BUG_ON(npath != NULL);
-               npath = ext4_ext_find_extent(inode, next, NULL);
+               npath = ext4_ext_find_extent(inode, next, NULL, 0);
                if (IS_ERR(npath))
                        return PTR_ERR(npath);
                BUG_ON(npath->p_depth != path->p_depth);
@@ -1939,9 +2028,10 @@ prepend:
         * There is no free space in the found leaf.
         * We're gonna add a new leaf in the tree.
         */
-       if (flag & EXT4_GET_BLOCKS_METADATA_NOFAIL)
-               flags = EXT4_MB_USE_RESERVED;
-       err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
+       if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
+               mb_flags = EXT4_MB_USE_RESERVED;
+       err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+                                      path, newext);
        if (err)
                goto cleanup;
        depth = ext_depth(inode);
@@ -2007,7 +2097,7 @@ has_space:
 
 merge:
        /* try to merge extents */
-       if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
+       if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
                ext4_ext_try_to_merge(handle, inode, path, nearex);
 
 
@@ -2050,7 +2140,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
                        path = NULL;
                }
 
-               path = ext4_ext_find_extent(inode, block, path);
+               path = ext4_ext_find_extent(inode, block, path, 0);
                if (IS_ERR(path)) {
                        up_read(&EXT4_I(inode)->i_data_sem);
                        err = PTR_ERR(path);
@@ -2712,7 +2802,7 @@ again:
                ext4_lblk_t ee_block;
 
                /* find extent for this block */
-               path = ext4_ext_find_extent(inode, end, NULL);
+               path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
                if (IS_ERR(path)) {
                        ext4_journal_stop(handle);
                        return PTR_ERR(path);
@@ -2754,6 +2844,7 @@ again:
                         */
                        err = ext4_split_extent_at(handle, inode, path,
                                        end + 1, split_flag,
+                                       EXT4_EX_NOCACHE |
                                        EXT4_GET_BLOCKS_PRE_IO |
                                        EXT4_GET_BLOCKS_METADATA_NOFAIL);
 
@@ -2782,7 +2873,7 @@ again:
                path[0].p_hdr = ext_inode_hdr(inode);
                i = 0;
 
-               if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
+               if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
                        err = -EIO;
                        goto out;
                }
@@ -2829,10 +2920,12 @@ again:
                        ext_debug("move to level %d (block %llu)\n",
                                  i + 1, ext4_idx_pblock(path[i].p_idx));
                        memset(path + i + 1, 0, sizeof(*path));
-                       bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
-                       if (!bh) {
+                       bh = read_extent_tree_block(inode,
+                               ext4_idx_pblock(path[i].p_idx), depth - i - 1,
+                               EXT4_EX_NOCACHE);
+                       if (IS_ERR(bh)) {
                                /* should we reset i_size? */
-                               err = -EIO;
+                               err = PTR_ERR(bh);
                                break;
                        }
                        /* Yield here to deal with large extent trees.
@@ -2842,11 +2935,6 @@ again:
                                err = -EIO;
                                break;
                        }
-                       if (ext4_ext_check_block(inode, ext_block_hdr(bh),
-                                                       depth - i - 1, bh)) {
-                               err = -EIO;
-                               break;
-                       }
                        path[i + 1].p_bh = bh;
 
                        /* save actual number of indexes since this
@@ -3181,7 +3269,7 @@ static int ext4_split_extent(handle_t *handle,
         * result in split of original leaf or extent zeroout.
         */
        ext4_ext_drop_refs(path);
-       path = ext4_ext_find_extent(inode, map->m_lblk, path);
+       path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
        if (IS_ERR(path))
                return PTR_ERR(path);
        depth = ext_depth(inode);
@@ -3565,7 +3653,7 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
                if (err < 0)
                        goto out;
                ext4_ext_drop_refs(path);
-               path = ext4_ext_find_extent(inode, map->m_lblk, path);
+               path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
                if (IS_ERR(path)) {
                        err = PTR_ERR(path);
                        goto out;
@@ -4052,7 +4140,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
        trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
 
        /* find extent for this block */
-       path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
+       path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0);
        if (IS_ERR(path)) {
                err = PTR_ERR(path);
                path = NULL;
@@ -4744,6 +4832,12 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        return error;
        }
 
+       if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+               error = ext4_ext_precache(inode);
+               if (error)
+                       return error;
+       }
+
        /* fallback to generic here if not in extents fmt */
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
                return generic_block_fiemap(inode, fieinfo, start, len,
@@ -4771,6 +4865,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                error = ext4_fill_fiemap_extents(inode, start_blk,
                                                 len_blks, fieinfo);
        }
-
+       ext4_es_lru_add(inode);
        return error;
 }
index 91cb110da1b4fe6fe9e334073ddae337507cd5d6..0e88a367b535f0b1321e68c6a5d742d910a428e6 100644 (file)
@@ -263,7 +263,7 @@ void ext4_es_find_delayed_extent_range(struct inode *inode,
        if (tree->cache_es) {
                es1 = tree->cache_es;
                if (in_range(lblk, es1->es_lblk, es1->es_len)) {
-                       es_debug("%u cached by [%u/%u) %llu %llx\n",
+                       es_debug("%u cached by [%u/%u) %llu %x\n",
                                 lblk, es1->es_lblk, es1->es_len,
                                 ext4_es_pblock(es1), ext4_es_status(es1));
                        goto out;
@@ -419,7 +419,7 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
        unsigned short ee_len;
        int depth, ee_status, es_status;
 
-       path = ext4_ext_find_extent(inode, es->es_lblk, NULL);
+       path = ext4_ext_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
        if (IS_ERR(path))
                return;
 
@@ -641,13 +641,13 @@ out:
  */
 int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
                          ext4_lblk_t len, ext4_fsblk_t pblk,
-                         unsigned long long status)
+                         unsigned int status)
 {
        struct extent_status newes;
        ext4_lblk_t end = lblk + len - 1;
        int err = 0;
 
-       es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n",
+       es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
                 lblk, len, pblk, status, inode->i_ino);
 
        if (!len)
@@ -683,6 +683,38 @@ error:
        return err;
 }
 
+/*
+ * ext4_es_cache_extent() inserts information into the extent status
+ * tree if and only if there isn't information about the range in
+ * question already.
+ */
+void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+                         ext4_lblk_t len, ext4_fsblk_t pblk,
+                         unsigned int status)
+{
+       struct extent_status *es;
+       struct extent_status newes;
+       ext4_lblk_t end = lblk + len - 1;
+
+       newes.es_lblk = lblk;
+       newes.es_len = len;
+       ext4_es_store_pblock(&newes, pblk);
+       ext4_es_store_status(&newes, status);
+       trace_ext4_es_cache_extent(inode, &newes);
+
+       if (!len)
+               return;
+
+       BUG_ON(end < lblk);
+
+       write_lock(&EXT4_I(inode)->i_es_lock);
+
+       es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
+       if (!es || es->es_lblk > end)
+               __es_insert_extent(inode, &newes);
+       write_unlock(&EXT4_I(inode)->i_es_lock);
+}
+
 /*
  * ext4_es_lookup_extent() looks up an extent in extent status tree.
  *
@@ -895,6 +927,12 @@ static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
        eia = list_entry(a, struct ext4_inode_info, i_es_lru);
        eib = list_entry(b, struct ext4_inode_info, i_es_lru);
 
+       if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
+           !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
+               return 1;
+       if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
+           ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
+               return -1;
        if (eia->i_touch_when == eib->i_touch_when)
                return 0;
        if (time_after(eia->i_touch_when, eib->i_touch_when))
@@ -908,21 +946,13 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
 {
        struct ext4_inode_info *ei;
        struct list_head *cur, *tmp;
-       LIST_HEAD(skiped);
+       LIST_HEAD(skipped);
        int ret, nr_shrunk = 0;
+       int retried = 0, skip_precached = 1, nr_skipped = 0;
 
        spin_lock(&sbi->s_es_lru_lock);
 
-       /*
-        * If the inode that is at the head of LRU list is newer than
-        * last_sorted time, that means that we need to sort this list.
-        */
-       ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru);
-       if (sbi->s_es_last_sorted < ei->i_touch_when) {
-               list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
-               sbi->s_es_last_sorted = jiffies;
-       }
-
+retry:
        list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
                /*
                 * If we have already reclaimed all extents from extent
@@ -933,9 +963,16 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
 
                ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
 
-               /* Skip the inode that is newer than the last_sorted time */
-               if (sbi->s_es_last_sorted < ei->i_touch_when) {
-                       list_move_tail(cur, &skiped);
+               /*
+                * Skip the inode that is newer than the last_sorted
+                * time.  Normally we try hard to avoid shrinking
+                * precached inodes, but we will as a last resort.
+                */
+               if ((sbi->s_es_last_sorted < ei->i_touch_when) ||
+                   (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
+                                               EXT4_STATE_EXT_PRECACHED))) {
+                       nr_skipped++;
+                       list_move_tail(cur, &skipped);
                        continue;
                }
 
@@ -955,11 +992,33 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
        }
 
        /* Move the newer inodes into the tail of the LRU list. */
-       list_splice_tail(&skiped, &sbi->s_es_lru);
+       list_splice_tail(&skipped, &sbi->s_es_lru);
+       INIT_LIST_HEAD(&skipped);
+
+       /*
+        * If we skipped any inodes, and we weren't able to make any
+        * forward progress, sort the list and try again.
+        */
+       if ((nr_shrunk == 0) && nr_skipped && !retried) {
+               retried++;
+               list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
+               sbi->s_es_last_sorted = jiffies;
+               ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
+                                     i_es_lru);
+               /*
+                * If there are no non-precached inodes left on the
+                * list, start releasing precached extents.
+                */
+               if (ext4_test_inode_state(&ei->vfs_inode,
+                                         EXT4_STATE_EXT_PRECACHED))
+                       skip_precached = 0;
+               goto retry;
+       }
+
        spin_unlock(&sbi->s_es_lru_lock);
 
        if (locked_ei && nr_shrunk == 0)
-               nr_shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
+               nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
 
        return nr_shrunk;
 }
@@ -1034,10 +1093,16 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
        struct rb_node *node;
        struct extent_status *es;
        int nr_shrunk = 0;
+       static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+                                     DEFAULT_RATELIMIT_BURST);
 
        if (ei->i_es_lru_nr == 0)
                return 0;
 
+       if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
+           __ratelimit(&_rs))
+               ext4_warning(inode->i_sb, "forced shrink of precached extents");
+
        node = rb_first(&tree->root);
        while (node != NULL) {
                es = rb_entry(node, struct extent_status, rb_node);
index e936730cc5b029c45153d47a026028aab8a23283..3e83aef3653a03938a0b9f30533ffbd1566b2fe1 100644 (file)
 /*
  * These flags live in the high bits of extent_status.es_pblk
  */
-#define EXTENT_STATUS_WRITTEN  (1ULL << 63)
-#define EXTENT_STATUS_UNWRITTEN (1ULL << 62)
-#define EXTENT_STATUS_DELAYED  (1ULL << 61)
-#define EXTENT_STATUS_HOLE     (1ULL << 60)
+#define ES_SHIFT       60
+
+#define EXTENT_STATUS_WRITTEN  (1 << 3)
+#define EXTENT_STATUS_UNWRITTEN (1 << 2)
+#define EXTENT_STATUS_DELAYED  (1 << 1)
+#define EXTENT_STATUS_HOLE     (1 << 0)
 
 #define EXTENT_STATUS_FLAGS    (EXTENT_STATUS_WRITTEN | \
                                 EXTENT_STATUS_UNWRITTEN | \
                                 EXTENT_STATUS_DELAYED | \
                                 EXTENT_STATUS_HOLE)
 
+#define ES_WRITTEN             (1ULL << 63)
+#define ES_UNWRITTEN           (1ULL << 62)
+#define ES_DELAYED             (1ULL << 61)
+#define ES_HOLE                        (1ULL << 60)
+
+#define ES_MASK                        (ES_WRITTEN | ES_UNWRITTEN | \
+                                ES_DELAYED | ES_HOLE)
+
 struct ext4_sb_info;
 struct ext4_extent;
 
@@ -60,7 +70,10 @@ extern void ext4_es_init_tree(struct ext4_es_tree *tree);
 
 extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
                                 ext4_lblk_t len, ext4_fsblk_t pblk,
-                                unsigned long long status);
+                                unsigned int status);
+extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+                                ext4_lblk_t len, ext4_fsblk_t pblk,
+                                unsigned int status);
 extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
                                 ext4_lblk_t len);
 extern void ext4_es_find_delayed_extent_range(struct inode *inode,
@@ -72,32 +85,32 @@ extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex);
 
 static inline int ext4_es_is_written(struct extent_status *es)
 {
-       return (es->es_pblk & EXTENT_STATUS_WRITTEN) != 0;
+       return (es->es_pblk & ES_WRITTEN) != 0;
 }
 
 static inline int ext4_es_is_unwritten(struct extent_status *es)
 {
-       return (es->es_pblk & EXTENT_STATUS_UNWRITTEN) != 0;
+       return (es->es_pblk & ES_UNWRITTEN) != 0;
 }
 
 static inline int ext4_es_is_delayed(struct extent_status *es)
 {
-       return (es->es_pblk & EXTENT_STATUS_DELAYED) != 0;
+       return (es->es_pblk & ES_DELAYED) != 0;
 }
 
 static inline int ext4_es_is_hole(struct extent_status *es)
 {
-       return (es->es_pblk & EXTENT_STATUS_HOLE) != 0;
+       return (es->es_pblk & ES_HOLE) != 0;
 }
 
-static inline ext4_fsblk_t ext4_es_status(struct extent_status *es)
+static inline unsigned int ext4_es_status(struct extent_status *es)
 {
-       return (es->es_pblk & EXTENT_STATUS_FLAGS);
+       return es->es_pblk >> ES_SHIFT;
 }
 
 static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
 {
-       return (es->es_pblk & ~EXTENT_STATUS_FLAGS);
+       return es->es_pblk & ~ES_MASK;
 }
 
 static inline void ext4_es_store_pblock(struct extent_status *es,
@@ -105,19 +118,16 @@ static inline void ext4_es_store_pblock(struct extent_status *es,
 {
        ext4_fsblk_t block;
 
-       block = (pb & ~EXTENT_STATUS_FLAGS) |
-               (es->es_pblk & EXTENT_STATUS_FLAGS);
+       block = (pb & ~ES_MASK) | (es->es_pblk & ES_MASK);
        es->es_pblk = block;
 }
 
 static inline void ext4_es_store_status(struct extent_status *es,
-                                       unsigned long long status)
+                                       unsigned int status)
 {
-       ext4_fsblk_t block;
-
-       block = (status & EXTENT_STATUS_FLAGS) |
-               (es->es_pblk & ~EXTENT_STATUS_FLAGS);
-       es->es_pblk = block;
+       es->es_pblk = (((ext4_fsblk_t)
+                       (status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
+                      (es->es_pblk & ~ES_MASK));
 }
 
 extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi);
index 319c9d26279a94fa7039dc2f8730747fe2404697..f79fc6eb86cbeba331bac4a3511dcbd7a79ec4e1 100644 (file)
@@ -74,12 +74,11 @@ void ext4_unwritten_wait(struct inode *inode)
  * or one thread will zero the other's data, causing corruption.
  */
 static int
-ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
-                  unsigned long nr_segs, loff_t pos)
+ext4_unaligned_aio(struct inode *inode, struct iov_iter *iter, loff_t pos)
 {
        struct super_block *sb = inode->i_sb;
        int blockmask = sb->s_blocksize - 1;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        loff_t final_size = pos + count;
 
        if (pos >= inode->i_size)
@@ -92,8 +91,8 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
 }
 
 static ssize_t
-ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
-                   unsigned long nr_segs, loff_t pos)
+ext4_file_dio_write(struct kiocb *iocb, struct iov_iter *iter,
+                   loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -101,11 +100,11 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
        int unaligned_aio = 0;
        ssize_t ret;
        int overwrite = 0;
-       size_t length = iov_length(iov, nr_segs);
+       size_t length = iov_iter_count(iter);
 
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
            !is_sync_kiocb(iocb))
-               unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
+               unaligned_aio = ext4_unaligned_aio(inode, iter, pos);
 
        /* Unaligned direct AIO must be serialized; see comment above */
        if (unaligned_aio) {
@@ -146,7 +145,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
                        overwrite = 1;
        }
 
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
        mutex_unlock(&inode->i_mutex);
 
        if (ret > 0 || ret == -EIOCBQUEUED) {
@@ -165,8 +164,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
 }
 
 static ssize_t
-ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t ret;
@@ -178,22 +176,24 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-               size_t length = iov_length(iov, nr_segs);
+               size_t length = iov_iter_count(iter);
 
                if ((pos > sbi->s_bitmap_maxbytes ||
                    (pos == sbi->s_bitmap_maxbytes && length > 0)))
                        return -EFBIG;
 
                if (pos + length > sbi->s_bitmap_maxbytes) {
-                       nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
-                                             sbi->s_bitmap_maxbytes - pos);
+                       ret = iov_iter_shorten(iter,
+                                              sbi->s_bitmap_maxbytes - pos);
+                       if (ret)
+                               return ret;
                }
        }
 
        if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
-               ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
+               ret = ext4_file_dio_write(iocb, iter, pos);
        else
-               ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+               ret = generic_file_write_iter(iocb, iter, pos);
 
        return ret;
 }
@@ -594,8 +594,8 @@ const struct file_operations ext4_file_operations = {
        .llseek         = ext4_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = ext4_file_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = ext4_file_write_iter,
        .unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext4_compat_ioctl,
index 8bf5999875ee454c67899dad4c49ed2ec9445ffc..666a5ed48bcc9d07bb8ab9272e536087010cc2d2 100644 (file)
@@ -624,6 +624,51 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
        return -1;
 }
 
+/*
+ * In no journal mode, if an inode has recently been deleted, we want
+ * to avoid reusing it until we're reasonably sure the inode table
+ * block has been written back to disk.  (Yes, these values are
+ * somewhat arbitrary...)
+ */
+#define RECENTCY_MIN   5
+#define RECENTCY_DIRTY 30
+
+static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
+{
+       struct ext4_group_desc  *gdp;
+       struct ext4_inode       *raw_inode;
+       struct buffer_head      *bh;
+       unsigned long           dtime, now;
+       int     inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
+       int     offset, ret = 0, recentcy = RECENTCY_MIN;
+
+       gdp = ext4_get_group_desc(sb, group, NULL);
+       if (unlikely(!gdp))
+               return 0;
+
+       bh = sb_getblk(sb, ext4_inode_table(sb, gdp) +
+                      (ino / inodes_per_block));
+       if (unlikely(!bh) || !buffer_uptodate(bh))
+               /*
+                * If the block is not in the buffer cache, then it
+                * must have been written out.
+                */
+               goto out;
+
+       offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
+       raw_inode = (struct ext4_inode *) (bh->b_data + offset);
+       dtime = le32_to_cpu(raw_inode->i_dtime);
+       now = get_seconds();
+       if (buffer_dirty(bh))
+               recentcy += RECENTCY_DIRTY;
+
+       if (dtime && (dtime < now) && (now < dtime + recentcy))
+               ret = 1;
+out:
+       brelse(bh);
+       return ret;
+}
+
 /*
  * There are two policies for allocating an inode.  If the new inode is
  * a directory, then a forward search is made for a block group with both
@@ -741,6 +786,11 @@ repeat_in_this_group:
                                   "inode=%lu", ino + 1);
                        continue;
                }
+               if ((EXT4_SB(sb)->s_journal == NULL) &&
+                   recently_deleted(sb, group, ino)) {
+                       ino++;
+                       goto next_inode;
+               }
                if (!handle) {
                        BUG_ON(nblocks <= 0);
                        handle = __ext4_journal_start_sb(dir->i_sb, line_no,
@@ -764,6 +814,7 @@ repeat_in_this_group:
                ino++;          /* the inode bitmap is zero-based */
                if (!ret2)
                        goto got; /* we grabbed the inode! */
+next_inode:
                if (ino < EXT4_INODES_PER_GROUP(sb))
                        goto repeat_in_this_group;
 next_group:
index 87b30cd357e7f1962b55bf9d452f839376187302..b6eb453d0d00621704bf9b04af5edab03aa371a1 100644 (file)
@@ -640,8 +640,7 @@ out:
  * VFS code falls back into buffered path in that case so we are safe.
  */
 ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                          const struct iovec *iov, loff_t offset,
-                          unsigned long nr_segs)
+                          struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -649,7 +648,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        handle_t *handle;
        ssize_t ret;
        int orphan = 0;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int retries = 0;
 
        if (rw == WRITE) {
@@ -688,18 +687,17 @@ retry:
                        goto locked;
                }
                ret = __blockdev_direct_IO(rw, iocb, inode,
-                                inode->i_sb->s_bdev, iov,
-                                offset, nr_segs,
-                                ext4_get_block, NULL, NULL, 0);
+                                inode->i_sb->s_bdev, iter,
+                                offset, ext4_get_block, NULL, NULL, 0);
                inode_dio_done(inode);
        } else {
 locked:
-               ret = blockdev_direct_IO(rw, iocb, inode, iov,
-                                offset, nr_segs, ext4_get_block);
+               ret = blockdev_direct_IO(rw, iocb, inode, iter,
+                                offset, ext4_get_block);
 
                if (unlikely((rw & WRITE) && ret < 0)) {
                        loff_t isize = i_size_read(inode);
-                       loff_t end = offset + iov_length(iov, nr_segs);
+                       loff_t end = offset + iov_iter_count(iter);
 
                        if (end > isize)
                                ext4_truncate_failed_write(inode);
index c2ca04e67a4fce6a40316550215e2b2fbe107d01..fc198cd8191768efbcc608d24c05a2df8fd9540d 100644 (file)
@@ -553,7 +553,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
        }
        if (retval > 0) {
                int ret;
-               unsigned long long status;
+               unsigned int status;
 
                if (unlikely(retval != map->m_len)) {
                        ext4_warning(inode->i_sb,
@@ -653,7 +653,7 @@ found:
 
        if (retval > 0) {
                int ret;
-               unsigned long long status;
+               unsigned int status;
 
                if (unlikely(retval != map->m_len)) {
                        ext4_warning(inode->i_sb,
@@ -1633,7 +1633,7 @@ add_delayed:
                set_buffer_delay(bh);
        } else if (retval > 0) {
                int ret;
-               unsigned long long status;
+               unsigned int status;
 
                if (unlikely(retval != map->m_len)) {
                        ext4_warning(inode->i_sb,
@@ -1890,6 +1890,26 @@ static int ext4_writepage(struct page *page,
        return ret;
 }
 
+static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
+{
+       int len;
+       loff_t size = i_size_read(mpd->inode);
+       int err;
+
+       BUG_ON(page->index != mpd->first_page);
+       if (page->index == size >> PAGE_CACHE_SHIFT)
+               len = size & ~PAGE_CACHE_MASK;
+       else
+               len = PAGE_CACHE_SIZE;
+       clear_page_dirty_for_io(page);
+       err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
+       if (!err)
+               mpd->wbc->nr_to_write--;
+       mpd->first_page++;
+
+       return err;
+}
+
 #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
 
 /*
@@ -1904,82 +1924,94 @@ static int ext4_writepage(struct page *page,
  *
  * @mpd - extent of blocks
  * @lblk - logical number of the block in the file
- * @b_state - b_state of the buffer head added
+ * @bh - buffer head we want to add to the extent
  *
- * the function is used to collect contig. blocks in same state
+ * The function is used to collect contig. blocks in the same state. If the
+ * buffer doesn't require mapping for writeback and we haven't started the
+ * extent of buffers to map yet, the function returns 'true' immediately - the
+ * caller can write the buffer right away. Otherwise the function returns true
+ * if the block has been added to the extent, false if the block couldn't be
+ * added.
  */
-static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
-                                 unsigned long b_state)
+static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
+                                  struct buffer_head *bh)
 {
        struct ext4_map_blocks *map = &mpd->map;
 
-       /* Don't go larger than mballoc is willing to allocate */
-       if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
-               return 0;
+       /* Buffer that doesn't need mapping for writeback? */
+       if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
+           (!buffer_delay(bh) && !buffer_unwritten(bh))) {
+               /* So far no extent to map => we write the buffer right away */
+               if (map->m_len == 0)
+                       return true;
+               return false;
+       }
 
        /* First block in the extent? */
        if (map->m_len == 0) {
                map->m_lblk = lblk;
                map->m_len = 1;
-               map->m_flags = b_state & BH_FLAGS;
-               return 1;
+               map->m_flags = bh->b_state & BH_FLAGS;
+               return true;
        }
 
+       /* Don't go larger than mballoc is willing to allocate */
+       if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
+               return false;
+
        /* Can we merge the block to our big extent? */
        if (lblk == map->m_lblk + map->m_len &&
-           (b_state & BH_FLAGS) == map->m_flags) {
+           (bh->b_state & BH_FLAGS) == map->m_flags) {
                map->m_len++;
-               return 1;
+               return true;
        }
-       return 0;
+       return false;
 }
 
-static bool add_page_bufs_to_extent(struct mpage_da_data *mpd,
-                                   struct buffer_head *head,
-                                   struct buffer_head *bh,
-                                   ext4_lblk_t lblk)
+/*
+ * mpage_process_page_bufs - submit page buffers for IO or add them to extent
+ *
+ * @mpd - extent of blocks for mapping
+ * @head - the first buffer in the page
+ * @bh - buffer we should start processing from
+ * @lblk - logical number of the block in the file corresponding to @bh
+ *
+ * Walk through page buffers from @bh upto @head (exclusive) and either submit
+ * the page for IO if all buffers in this page were mapped and there's no
+ * accumulated extent of buffers to map or add buffers in the page to the
+ * extent of buffers to map. The function returns 1 if the caller can continue
+ * by processing the next page, 0 if it should stop adding buffers to the
+ * extent to map because we cannot extend it anymore. It can also return value
+ * < 0 in case of error during IO submission.
+ */
+static int mpage_process_page_bufs(struct mpage_da_data *mpd,
+                                  struct buffer_head *head,
+                                  struct buffer_head *bh,
+                                  ext4_lblk_t lblk)
 {
        struct inode *inode = mpd->inode;
+       int err;
        ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
                                                        >> inode->i_blkbits;
 
        do {
                BUG_ON(buffer_locked(bh));
 
-               if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
-                   (!buffer_delay(bh) && !buffer_unwritten(bh)) ||
-                   lblk >= blocks) {
+               if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
                        /* Found extent to map? */
                        if (mpd->map.m_len)
-                               return false;
-                       if (lblk >= blocks)
-                               return true;
-                       continue;
+                               return 0;
+                       /* Everything mapped so far and we hit EOF */
+                       break;
                }
-               if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state))
-                       return false;
        } while (lblk++, (bh = bh->b_this_page) != head);
-       return true;
-}
-
-static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
-{
-       int len;
-       loff_t size = i_size_read(mpd->inode);
-       int err;
-
-       BUG_ON(page->index != mpd->first_page);
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
-       else
-               len = PAGE_CACHE_SIZE;
-       clear_page_dirty_for_io(page);
-       err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
-       if (!err)
-               mpd->wbc->nr_to_write--;
-       mpd->first_page++;
-
-       return err;
+       /* So far everything mapped? Submit the page for IO. */
+       if (mpd->map.m_len == 0) {
+               err = mpage_submit_page(mpd, head->b_page);
+               if (err < 0)
+                       return err;
+       }
+       return lblk < blocks;
 }
 
 /*
@@ -2003,8 +2035,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
        struct inode *inode = mpd->inode;
        struct buffer_head *head, *bh;
        int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
-       ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
-                                                       >> inode->i_blkbits;
        pgoff_t start, end;
        ext4_lblk_t lblk;
        sector_t pblock;
@@ -2039,18 +2069,26 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
                                         */
                                        mpd->map.m_len = 0;
                                        mpd->map.m_flags = 0;
-                                       add_page_bufs_to_extent(mpd, head, bh,
-                                                               lblk);
+                                       /*
+                                        * FIXME: If dioread_nolock supports
+                                        * blocksize < pagesize, we need to make
+                                        * sure we add size mapped so far to
+                                        * io_end->size as the following call
+                                        * can submit the page for IO.
+                                        */
+                                       err = mpage_process_page_bufs(mpd, head,
+                                                                     bh, lblk);
                                        pagevec_release(&pvec);
-                                       return 0;
+                                       if (err > 0)
+                                               err = 0;
+                                       return err;
                                }
                                if (buffer_delay(bh)) {
                                        clear_buffer_delay(bh);
                                        bh->b_blocknr = pblock++;
                                }
                                clear_buffer_unwritten(bh);
-                       } while (++lblk < blocks &&
-                                (bh = bh->b_this_page) != head);
+                       } while (lblk++, (bh = bh->b_this_page) != head);
 
                        /*
                         * FIXME: This is going to break if dioread_nolock
@@ -2199,12 +2237,10 @@ static int mpage_map_and_submit_extent(handle_t *handle,
 
        /* Update on-disk size after IO is submitted */
        disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
-       if (disksize > i_size_read(inode))
-               disksize = i_size_read(inode);
        if (disksize > EXT4_I(inode)->i_disksize) {
                int err2;
 
-               ext4_update_i_disksize(inode, disksize);
+               ext4_wb_update_i_disksize(inode, disksize);
                err2 = ext4_mark_inode_dirty(handle, inode);
                if (err2)
                        ext4_error(inode->i_sb,
@@ -2319,14 +2355,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                        lblk = ((ext4_lblk_t)page->index) <<
                                (PAGE_CACHE_SHIFT - blkbits);
                        head = page_buffers(page);
-                       if (!add_page_bufs_to_extent(mpd, head, head, lblk))
+                       err = mpage_process_page_bufs(mpd, head, head, lblk);
+                       if (err <= 0)
                                goto out;
-                       /* So far everything mapped? Submit the page for IO. */
-                       if (mpd->map.m_len == 0) {
-                               err = mpage_submit_page(mpd, page);
-                               if (err < 0)
-                                       goto out;
-                       }
+                       err = 0;
 
                        /*
                         * Accumulated enough dirty pages? This doesn't apply
@@ -3040,13 +3072,12 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
  *
  */
 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int overwrite = 0;
        get_block_t *get_block_func = NULL;
        int dio_flags = 0;
@@ -3055,7 +3086,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
 
        /* Use the old path for reads and writes beyond i_size. */
        if (rw != WRITE || final_size > inode->i_size)
-               return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+               return ext4_ind_direct_IO(rw, iocb, iter, offset);
 
        BUG_ON(iocb->private == NULL);
 
@@ -3123,8 +3154,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                dio_flags = DIO_LOCKING;
        }
        ret = __blockdev_direct_IO(rw, iocb, inode,
-                                  inode->i_sb->s_bdev, iov,
-                                  offset, nr_segs,
+                                  inode->i_sb->s_bdev, iter,
+                                  offset,
                                   get_block_func,
                                   ext4_end_io_dio,
                                   NULL,
@@ -3185,8 +3216,7 @@ retake_lock:
 }
 
 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -3202,13 +3232,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        if (ext4_has_inline_data(inode))
                return 0;
 
-       trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+       trace_ext4_direct_IO_enter(inode, offset, iov_iter_count(iter), rw);
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-               ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+               ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
        else
-               ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
-       trace_ext4_direct_IO_exit(inode, offset,
-                               iov_length(iov, nr_segs), rw, ret);
+               ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
+       trace_ext4_direct_IO_exit(inode, offset, iov_iter_count(iter), rw, ret);
        return ret;
 }
 
@@ -4566,7 +4595,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                ext4_journal_stop(handle);
        }
 
-       if (attr->ia_valid & ATTR_SIZE) {
+       if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
+               handle_t *handle;
+               loff_t oldsize = inode->i_size;
 
                if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
                        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -4574,73 +4605,69 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                        if (attr->ia_size > sbi->s_bitmap_maxbytes)
                                return -EFBIG;
                }
-       }
-
-       if (S_ISREG(inode->i_mode) &&
-           attr->ia_valid & ATTR_SIZE &&
-           (attr->ia_size < inode->i_size)) {
-               handle_t *handle;
-
-               handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
-               if (IS_ERR(handle)) {
-                       error = PTR_ERR(handle);
-                       goto err_out;
-               }
-               if (ext4_handle_valid(handle)) {
-                       error = ext4_orphan_add(handle, inode);
-                       orphan = 1;
-               }
-               EXT4_I(inode)->i_disksize = attr->ia_size;
-               rc = ext4_mark_inode_dirty(handle, inode);
-               if (!error)
-                       error = rc;
-               ext4_journal_stop(handle);
-
-               if (ext4_should_order_data(inode)) {
-                       error = ext4_begin_ordered_truncate(inode,
+               if (S_ISREG(inode->i_mode) &&
+                   (attr->ia_size < inode->i_size)) {
+                       if (ext4_should_order_data(inode)) {
+                               error = ext4_begin_ordered_truncate(inode,
                                                            attr->ia_size);
-                       if (error) {
-                               /* Do as much error cleanup as possible */
-                               handle = ext4_journal_start(inode,
-                                                           EXT4_HT_INODE, 3);
-                               if (IS_ERR(handle)) {
-                                       ext4_orphan_del(NULL, inode);
+                               if (error)
                                        goto err_out;
-                               }
-                               ext4_orphan_del(handle, inode);
-                               orphan = 0;
-                               ext4_journal_stop(handle);
+                       }
+                       handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
+                       if (IS_ERR(handle)) {
+                               error = PTR_ERR(handle);
                                goto err_out;
                        }
-               }
-       }
-
-       if (attr->ia_valid & ATTR_SIZE) {
-               if (attr->ia_size != inode->i_size) {
-                       loff_t oldsize = inode->i_size;
-
-                       i_size_write(inode, attr->ia_size);
-                       /*
-                        * Blocks are going to be removed from the inode. Wait
-                        * for dio in flight.  Temporarily disable
-                        * dioread_nolock to prevent livelock.
-                        */
-                       if (orphan) {
-                               if (!ext4_should_journal_data(inode)) {
-                                       ext4_inode_block_unlocked_dio(inode);
-                                       inode_dio_wait(inode);
-                                       ext4_inode_resume_unlocked_dio(inode);
-                               } else
-                                       ext4_wait_for_tail_page_commit(inode);
+                       if (ext4_handle_valid(handle)) {
+                               error = ext4_orphan_add(handle, inode);
+                               orphan = 1;
                        }
+                       down_write(&EXT4_I(inode)->i_data_sem);
+                       EXT4_I(inode)->i_disksize = attr->ia_size;
+                       rc = ext4_mark_inode_dirty(handle, inode);
+                       if (!error)
+                               error = rc;
                        /*
-                        * Truncate pagecache after we've waited for commit
-                        * in data=journal mode to make pages freeable.
+                        * We have to update i_size under i_data_sem together
+                        * with i_disksize to avoid races with writeback code
+                        * running ext4_wb_update_i_disksize().
                         */
-                       truncate_pagecache(inode, oldsize, inode->i_size);
+                       if (!error)
+                               i_size_write(inode, attr->ia_size);
+                       up_write(&EXT4_I(inode)->i_data_sem);
+                       ext4_journal_stop(handle);
+                       if (error) {
+                               ext4_orphan_del(NULL, inode);
+                               goto err_out;
+                       }
+               } else
+                       i_size_write(inode, attr->ia_size);
+
+               /*
+                * Blocks are going to be removed from the inode. Wait
+                * for dio in flight.  Temporarily disable
+                * dioread_nolock to prevent livelock.
+                */
+               if (orphan) {
+                       if (!ext4_should_journal_data(inode)) {
+                               ext4_inode_block_unlocked_dio(inode);
+                               inode_dio_wait(inode);
+                               ext4_inode_resume_unlocked_dio(inode);
+                       } else
+                               ext4_wait_for_tail_page_commit(inode);
                }
-               ext4_truncate(inode);
+               /*
+                * Truncate pagecache after we've waited for commit
+                * in data=journal mode to make pages freeable.
+                */
+               truncate_pagecache(inode, oldsize, inode->i_size);
        }
+       /*
+        * We want to call ext4_truncate() even if attr->ia_size ==
+        * inode->i_size for cases like truncation of fallocated space
+        */
+       if (attr->ia_valid & ATTR_SIZE)
+               ext4_truncate(inode);
 
        if (!rc) {
                setattr_copy(inode, attr);
index c0427e2f66481a13f3018b4630901688fdd6646e..5498f75a16480b6f4b97e170c2f07d03cb0d6baf 100644 (file)
@@ -624,6 +624,8 @@ resizefs_out:
 
                return 0;
        }
+       case EXT4_IOC_PRECACHE_EXTENTS:
+               return ext4_ext_precache(inode);
 
        default:
                return -ENOTTY;
@@ -688,6 +690,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case EXT4_IOC_MOVE_EXT:
        case FITRIM:
        case EXT4_IOC_RESIZE_FS:
+       case EXT4_IOC_PRECACHE_EXTENTS:
                break;
        default:
                return -ENOIOCTLCMD;
index 4bbbf13bd7435fd2181aee091dfd6212347463da..aa7d058e9e486e855d53fb2573a5009a4a347dd7 100644 (file)
@@ -4585,6 +4585,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
        struct buffer_head *gd_bh;
        ext4_group_t block_group;
        struct ext4_sb_info *sbi;
+       struct ext4_inode_info *ei = EXT4_I(inode);
        struct ext4_buddy e4b;
        unsigned int count_clusters;
        int err = 0;
@@ -4784,7 +4785,6 @@ do_more:
        ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
        ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
-       percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
@@ -4792,10 +4792,23 @@ do_more:
                             &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
-       ext4_mb_unload_buddy(&e4b);
-
-       if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+       if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) {
+               percpu_counter_add(&sbi->s_dirtyclusters_counter,
+                                  count_clusters);
+               spin_lock(&ei->i_block_reservation_lock);
+               if (flags & EXT4_FREE_BLOCKS_METADATA)
+                       ei->i_reserved_meta_blocks += count_clusters;
+               else
+                       ei->i_reserved_data_blocks += count_clusters;
+               spin_unlock(&ei->i_block_reservation_lock);
+               if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+                       dquot_reclaim_block(inode,
+                                       EXT4_C2B(sbi, count_clusters));
+       } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
                dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
+       percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
+
+       ext4_mb_unload_buddy(&e4b);
 
        /* We dirtied the bitmap block */
        BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
index 49e8bdff9163e830931570f4c2660cea201ab3fd..f99bdb8548b2bd91e9fec87b2955ef3457af0202 100644 (file)
@@ -39,7 +39,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
        newext.ee_block = cpu_to_le32(lb->first_block);
        newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
        ext4_ext_store_pblock(&newext, lb->first_pblock);
-       path = ext4_ext_find_extent(inode, lb->first_block, NULL);
+       path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0);
 
        if (IS_ERR(path)) {
                retval = PTR_ERR(path);
index e86dddbd8296c138347f9ba1c5f6e52d51a4c91c..7fa4d855dbd5e65e373b8222800f2a5d6bc5520c 100644 (file)
@@ -37,7 +37,7 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
        int ret = 0;
        struct ext4_ext_path *path;
 
-       path = ext4_ext_find_extent(inode, lblock, *orig_path);
+       path = ext4_ext_find_extent(inode, lblock, *orig_path, EXT4_EX_NOCACHE);
        if (IS_ERR(path))
                ret = PTR_ERR(path);
        else if (path[ext_depth(inode)].p_ext == NULL)
index 35f55a0dbc4b0bb044571a647a8d1982e9533c4b..1bec5a5c1e45a29e9ead318987ec03d803104c5a 100644 (file)
@@ -3005,15 +3005,19 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
 /*
  * Anybody can rename anything with this: the permission checks are left to the
  * higher-level routines.
+ *
+ * n.b.  old_{dentry,inode) refers to the source dentry/inode
+ * while new_{dentry,inode) refers to the destination dentry/inode
+ * This comes from rename(const char *oldpath, const char *newpath)
  */
 static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                       struct inode *new_dir, struct dentry *new_dentry)
 {
-       handle_t *handle;
+       handle_t *handle = NULL;
        struct inode *old_inode, *new_inode;
        struct buffer_head *old_bh, *new_bh, *dir_bh;
        struct ext4_dir_entry_2 *old_de, *new_de;
-       int retval, force_da_alloc = 0;
+       int retval;
        int inlined = 0, new_inlined = 0;
        struct ext4_dir_entry_2 *parent_de;
 
@@ -3026,14 +3030,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
         * in separate transaction */
        if (new_dentry->d_inode)
                dquot_initialize(new_dentry->d_inode);
-       handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
-               (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
-                EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
-
-       if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
-               ext4_handle_sync(handle);
 
        old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
        /*
@@ -3056,6 +3052,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                        new_bh = NULL;
                }
        }
+       if (new_inode && !test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
+               ext4_alloc_da_blocks(old_inode);
+
+       handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
+               (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
+                EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+               ext4_handle_sync(handle);
+
        if (S_ISDIR(old_inode->i_mode)) {
                if (new_inode) {
                        retval = -ENOTEMPTY;
@@ -3186,8 +3194,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                ext4_mark_inode_dirty(handle, new_inode);
                if (!new_inode->i_nlink)
                        ext4_orphan_add(handle, new_inode);
-               if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
-                       force_da_alloc = 1;
        }
        retval = 0;
 
@@ -3195,9 +3201,8 @@ end_rename:
        brelse(dir_bh);
        brelse(old_bh);
        brelse(new_bh);
-       ext4_journal_stop(handle);
-       if (retval == 0 && force_da_alloc)
-               ext4_alloc_da_blocks(old_inode);
+       if (handle)
+               ext4_journal_stop(handle);
        return retval;
 }
 
index b59373b625e9bcacaf3a63e409b21b1ef6212dfa..8d514649d60e99926cbd9c62b2017b1fb9a68bf3 100644 (file)
@@ -162,7 +162,7 @@ void *ext4_kvmalloc(size_t size, gfp_t flags)
 {
        void *ret;
 
-       ret = kmalloc(size, flags);
+       ret = kmalloc(size, flags | __GFP_NOWARN);
        if (!ret)
                ret = __vmalloc(size, flags, PAGE_KERNEL);
        return ret;
@@ -172,7 +172,7 @@ void *ext4_kvzalloc(size_t size, gfp_t flags)
 {
        void *ret;
 
-       ret = kzalloc(size, flags);
+       ret = kzalloc(size, flags | __GFP_NOWARN);
        if (!ret)
                ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
        return ret;
index 66a6b85a51d8ab1a1724f10858648876b09f8b4b..bb312201ca950114782f6a811725102da3baa718 100644 (file)
@@ -182,7 +182,7 @@ const struct address_space_operations f2fs_meta_aops = {
        .set_page_dirty = f2fs_set_meta_page_dirty,
 };
 
-int check_orphan_space(struct f2fs_sb_info *sbi)
+int acquire_orphan_inode(struct f2fs_sb_info *sbi)
 {
        unsigned int max_orphans;
        int err = 0;
@@ -197,10 +197,19 @@ int check_orphan_space(struct f2fs_sb_info *sbi)
        mutex_lock(&sbi->orphan_inode_mutex);
        if (sbi->n_orphans >= max_orphans)
                err = -ENOSPC;
+       else
+               sbi->n_orphans++;
        mutex_unlock(&sbi->orphan_inode_mutex);
        return err;
 }
 
+void release_orphan_inode(struct f2fs_sb_info *sbi)
+{
+       mutex_lock(&sbi->orphan_inode_mutex);
+       sbi->n_orphans--;
+       mutex_unlock(&sbi->orphan_inode_mutex);
+}
+
 void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
        struct list_head *head, *this;
@@ -229,21 +238,18 @@ retry:
                list_add(&new->list, this->prev);
        else
                list_add_tail(&new->list, head);
-
-       sbi->n_orphans++;
 out:
        mutex_unlock(&sbi->orphan_inode_mutex);
 }
 
 void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
-       struct list_head *this, *next, *head;
+       struct list_head *head;
        struct orphan_inode_entry *orphan;
 
        mutex_lock(&sbi->orphan_inode_mutex);
        head = &sbi->orphan_inode_list;
-       list_for_each_safe(this, next, head) {
-               orphan = list_entry(this, struct orphan_inode_entry, list);
+       list_for_each_entry(orphan, head, list) {
                if (orphan->ino == ino) {
                        list_del(&orphan->list);
                        kmem_cache_free(orphan_entry_slab, orphan);
@@ -373,7 +379,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
        if (!f2fs_crc_valid(crc, cp_block, crc_offset))
                goto invalid_cp1;
 
-       pre_version = le64_to_cpu(cp_block->checkpoint_ver);
+       pre_version = cur_cp_version(cp_block);
 
        /* Read the 2nd cp block in this CP pack */
        cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
@@ -388,7 +394,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
        if (!f2fs_crc_valid(crc, cp_block, crc_offset))
                goto invalid_cp2;
 
-       cur_version = le64_to_cpu(cp_block->checkpoint_ver);
+       cur_version = cur_cp_version(cp_block);
 
        if (cur_version == pre_version) {
                *version = cur_version;
@@ -793,7 +799,7 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
         * Increase the version number so that
         * SIT entries and seg summaries are written at correct place
         */
-       ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver);
+       ckpt_ver = cur_cp_version(ckpt);
        ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
 
        /* write cached NAT/SIT entries to NAT/SIT area */
index 035f9a345cdf23446abbdaa26e8ac28340b715a0..6ec0ef780ee0fa4983170f0002ebde2ec84705fd 100644 (file)
@@ -37,9 +37,9 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
        struct page *node_page = dn->node_page;
        unsigned int ofs_in_node = dn->ofs_in_node;
 
-       wait_on_page_writeback(node_page);
+       f2fs_wait_on_page_writeback(node_page, NODE, false);
 
-       rn = (struct f2fs_node *)page_address(node_page);
+       rn = F2FS_NODE(node_page);
 
        /* Get physical address of data block */
        addr_array = blkaddr_in_node(rn);
@@ -176,7 +176,6 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
 end_update:
        write_unlock(&fi->ext.ext_lock);
        sync_inode_page(dn);
-       return;
 }
 
 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
@@ -365,7 +364,6 @@ static void read_end_io(struct bio *bio, int err)
                }
                unlock_page(page);
        } while (bvec >= bio->bi_io_vec);
-       kfree(bio->bi_private);
        bio_put(bio);
 }
 
@@ -391,7 +389,6 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
        bio->bi_end_io = read_end_io;
 
        if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
-               kfree(bio->bi_private);
                bio_put(bio);
                up_read(&sbi->bio_sem);
                f2fs_put_page(page, 1);
@@ -636,9 +633,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
        int err = 0;
        int ilock;
 
-       /* for nobh_write_end */
-       *fsdata = NULL;
-
        f2fs_balance_fs(sbi);
 repeat:
        page = grab_cache_page_write_begin(mapping, index, flags);
@@ -723,7 +717,7 @@ static int f2fs_write_end(struct file *file,
 }
 
 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -732,7 +726,7 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
                return 0;
 
        /* Needs synchronization with the cleaner */
-       return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       return blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                                  get_data_block_ro);
 }
 
index 0d6c6aafb235b140b3ba44ab7245ba37212b2a27..a84b0a8e6854cd6e2faba8a84124d350378343f1 100644 (file)
@@ -29,7 +29,7 @@ static DEFINE_MUTEX(f2fs_stat_mutex);
 
 static void update_general_status(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_stat_info *si = sbi->stat_info;
+       struct f2fs_stat_info *si = F2FS_STAT(sbi);
        int i;
 
        /* valid check of the segment numbers */
@@ -83,7 +83,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
  */
 static void update_sit_info(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_stat_info *si = sbi->stat_info;
+       struct f2fs_stat_info *si = F2FS_STAT(sbi);
        unsigned int blks_per_sec, hblks_per_sec, total_vblocks, bimodal, dist;
        struct sit_info *sit_i = SIT_I(sbi);
        unsigned int segno, vblocks;
@@ -118,7 +118,7 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
  */
 static void update_mem_info(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_stat_info *si = sbi->stat_info;
+       struct f2fs_stat_info *si = F2FS_STAT(sbi);
        unsigned npages;
 
        if (si->base_mem)
@@ -253,21 +253,21 @@ static int stat_show(struct seq_file *s, void *v)
                           si->nats, NM_WOUT_THRESHOLD);
                seq_printf(s, "  - SITs: %5d\n  - free_nids: %5d\n",
                           si->sits, si->fnids);
-               seq_printf(s, "\nDistribution of User Blocks:");
-               seq_printf(s, " [ valid | invalid | free ]\n");
-               seq_printf(s, "  [");
+               seq_puts(s, "\nDistribution of User Blocks:");
+               seq_puts(s, " [ valid | invalid | free ]\n");
+               seq_puts(s, "  [");
 
                for (j = 0; j < si->util_valid; j++)
-                       seq_printf(s, "-");
-               seq_printf(s, "|");
+                       seq_putc(s, '-');
+               seq_putc(s, '|');
 
                for (j = 0; j < si->util_invalid; j++)
-                       seq_printf(s, "-");
-               seq_printf(s, "|");
+                       seq_putc(s, '-');
+               seq_putc(s, '|');
 
                for (j = 0; j < si->util_free; j++)
-                       seq_printf(s, "-");
-               seq_printf(s, "]\n\n");
+                       seq_putc(s, '-');
+               seq_puts(s, "]\n\n");
                seq_printf(s, "SSR: %u blocks in %u segments\n",
                           si->block_count[SSR], si->segment_count[SSR]);
                seq_printf(s, "LFS: %u blocks in %u segments\n",
@@ -305,11 +305,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
        struct f2fs_stat_info *si;
 
-       sbi->stat_info = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
-       if (!sbi->stat_info)
+       si = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
+       if (!si)
                return -ENOMEM;
 
-       si = sbi->stat_info;
        si->all_area_segs = le32_to_cpu(raw_super->segment_count);
        si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
        si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
@@ -319,6 +318,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
        si->main_area_zones = si->main_area_sections /
                                le32_to_cpu(raw_super->secs_per_zone);
        si->sbi = sbi;
+       sbi->stat_info = si;
 
        mutex_lock(&f2fs_stat_mutex);
        list_add_tail(&si->stat_list, &f2fs_stat_list);
@@ -329,13 +329,13 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
 
 void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_stat_info *si = sbi->stat_info;
+       struct f2fs_stat_info *si = F2FS_STAT(sbi);
 
        mutex_lock(&f2fs_stat_mutex);
        list_del(&si->stat_list);
        mutex_unlock(&f2fs_stat_mutex);
 
-       kfree(sbi->stat_info);
+       kfree(si);
 }
 
 void __init f2fs_create_root_stats(void)
index 62f0d5977c64f3526e0720653fe33039c85355e3..384c6daf9a89a668b25bf2a22a193ac9f23f6a8a 100644 (file)
@@ -270,12 +270,27 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
        struct f2fs_node *rn;
 
        /* copy name info. to this inode page */
-       rn = (struct f2fs_node *)page_address(ipage);
+       rn = F2FS_NODE(ipage);
        rn->i.i_namelen = cpu_to_le32(name->len);
        memcpy(rn->i.i_name, name->name, name->len);
        set_page_dirty(ipage);
 }
 
+int update_dent_inode(struct inode *inode, const struct qstr *name)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct page *page;
+
+       page = get_node_page(sbi, inode->i_ino);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
+       init_dent_inode(name, page);
+       f2fs_put_page(page, 1);
+
+       return 0;
+}
+
 static int make_empty_dir(struct inode *inode,
                struct inode *parent, struct page *page)
 {
@@ -557,6 +572,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
 
                if (inode->i_nlink == 0)
                        add_orphan_inode(sbi, inode->i_ino);
+               else
+                       release_orphan_inode(sbi);
        }
 
        if (bit_pos == NR_DENTRY_IN_BLOCK) {
index 467d42d65c488a0ed3841b01e798841d0e48a395..5348b63adbe9b17a681352a6c582572bd76e7a44 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/crc32.h>
 #include <linux/magic.h>
+#include <linux/kobject.h>
 
 /*
  * For mount options
@@ -134,11 +135,13 @@ static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
 /*
  * For INODE and NODE manager
  */
-#define XATTR_NODE_OFFSET      (-1)    /*
-                                        * store xattrs to one node block per
-                                        * file keeping -1 as its node offset to
-                                        * distinguish from index node blocks.
-                                        */
+/*
+ * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
+ * as its node offset to distinguish from index node blocks.
+ * But some bits are used to mark the node block.
+ */
+#define XATTR_NODE_OFFSET      ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
+                               >> OFFSET_BIT_SHIFT)
 enum {
        ALLOC_NODE,                     /* allocate a new node page if needed */
        LOOKUP_NODE,                    /* look up a node without readahead */
@@ -178,6 +181,7 @@ struct f2fs_inode_info {
        f2fs_hash_t chash;              /* hash value of given file name */
        unsigned int clevel;            /* maximum level of given file name */
        nid_t i_xattr_nid;              /* node id that contains xattrs */
+       unsigned long long xattr_ver;   /* cp version of xattr modification */
        struct extent_info ext;         /* in-memory extent cache entry */
 };
 
@@ -350,6 +354,7 @@ enum page_type {
 
 struct f2fs_sb_info {
        struct super_block *sb;                 /* pointer to VFS super block */
+       struct proc_dir_entry *s_proc;          /* proc entry */
        struct buffer_head *raw_super_buf;      /* buffer head of raw sb */
        struct f2fs_super_block *raw_super;     /* raw super block pointer */
        int s_dirty;                            /* dirty flag for checkpoint */
@@ -429,6 +434,10 @@ struct f2fs_sb_info {
 #endif
        unsigned int last_victim[2];            /* last victim segment # */
        spinlock_t stat_lock;                   /* lock for stat operations */
+
+       /* For sysfs suppport */
+       struct kobject s_kobj;
+       struct completion s_kobj_unregister;
 };
 
 /*
@@ -454,6 +463,11 @@ static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
        return (struct f2fs_checkpoint *)(sbi->ckpt);
 }
 
+static inline struct f2fs_node *F2FS_NODE(struct page *page)
+{
+       return (struct f2fs_node *)page_address(page);
+}
+
 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
 {
        return (struct f2fs_nm_info *)(sbi->nm_info);
@@ -489,6 +503,11 @@ static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
        sbi->s_dirty = 0;
 }
 
+static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
+{
+       return le64_to_cpu(cp->checkpoint_ver);
+}
+
 static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 {
        unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
@@ -677,7 +696,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
 {
        block_t start_addr;
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
-       unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
+       unsigned long long ckpt_version = cur_cp_version(ckpt);
 
        start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
 
@@ -812,7 +831,7 @@ static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
 
 static inline bool IS_INODE(struct page *page)
 {
-       struct f2fs_node *p = (struct f2fs_node *)page_address(page);
+       struct f2fs_node *p = F2FS_NODE(page);
        return RAW_IS_INODE(p);
 }
 
@@ -826,7 +845,7 @@ static inline block_t datablock_addr(struct page *node_page,
 {
        struct f2fs_node *raw_node;
        __le32 *addr_array;
-       raw_node = (struct f2fs_node *)page_address(node_page);
+       raw_node = F2FS_NODE(node_page);
        addr_array = blkaddr_in_node(raw_node);
        return le32_to_cpu(addr_array[offset]);
 }
@@ -947,6 +966,7 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
 ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
                                struct page *, struct inode *);
+int update_dent_inode(struct inode *, const struct qstr *);
 int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
 void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
 int f2fs_make_empty(struct inode *, struct inode *);
@@ -1012,7 +1032,8 @@ int npages_for_summary_flush(struct f2fs_sb_info *);
 void allocate_new_segments(struct f2fs_sb_info *);
 struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
 struct bio *f2fs_bio_alloc(struct block_device *, int);
-void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
+void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool);
+void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
 void write_meta_page(struct f2fs_sb_info *, struct page *);
 void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
                                        block_t, block_t *);
@@ -1037,7 +1058,8 @@ void destroy_segment_manager(struct f2fs_sb_info *);
 struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
 struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
 long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
-int check_orphan_space(struct f2fs_sb_info *);
+int acquire_orphan_inode(struct f2fs_sb_info *);
+void release_orphan_inode(struct f2fs_sb_info *);
 void add_orphan_inode(struct f2fs_sb_info *, nid_t);
 void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
 int recover_orphan_inodes(struct f2fs_sb_info *);
@@ -1112,11 +1134,16 @@ struct f2fs_stat_info {
        unsigned base_mem, cache_mem;
 };
 
+static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
+{
+       return (struct f2fs_stat_info*)sbi->stat_info;
+}
+
 #define stat_inc_call_count(si)        ((si)->call_count++)
 
 #define stat_inc_seg_count(sbi, type)                                  \
        do {                                                            \
-               struct f2fs_stat_info *si = sbi->stat_info;             \
+               struct f2fs_stat_info *si = F2FS_STAT(sbi);             \
                (si)->tot_segs++;                                       \
                if (type == SUM_TYPE_DATA)                              \
                        si->data_segs++;                                \
@@ -1129,14 +1156,14 @@ struct f2fs_stat_info {
 
 #define stat_inc_data_blk_count(sbi, blks)                             \
        do {                                                            \
-               struct f2fs_stat_info *si = sbi->stat_info;             \
+               struct f2fs_stat_info *si = F2FS_STAT(sbi);             \
                stat_inc_tot_blk_count(si, blks);                       \
                si->data_blks += (blks);                                \
        } while (0)
 
 #define stat_inc_node_blk_count(sbi, blks)                             \
        do {                                                            \
-               struct f2fs_stat_info *si = sbi->stat_info;             \
+               struct f2fs_stat_info *si = F2FS_STAT(sbi);             \
                stat_inc_tot_blk_count(si, blks);                       \
                si->node_blks += (blks);                                \
        } while (0)
index d2d2b7dbdcc12b348ca89899f500748716c2104c..4f24855fa17f44f7459cfb466379bbd2fbe90cea 100644 (file)
@@ -112,11 +112,13 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
        if (!dentry)
                return 0;
 
-       inode = igrab(dentry->d_parent->d_inode);
-       dput(dentry);
+       if (update_dent_inode(inode, &dentry->d_name)) {
+               dput(dentry);
+               return 0;
+       }
 
-       *pino = inode->i_ino;
-       iput(inode);
+       *pino = parent_ino(dentry);
+       dput(dentry);
        return 1;
 }
 
@@ -147,9 +149,10 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        mutex_lock(&inode->i_mutex);
 
-       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
-               goto out;
-
+       /*
+        * Both of fdatasync() and fsync() are able to be recovered from
+        * sudden-power-off.
+        */
        if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
                need_cp = true;
        else if (file_wrong_pino(inode))
@@ -158,10 +161,14 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                need_cp = true;
        else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
                need_cp = true;
+       else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
+               need_cp = true;
 
        if (need_cp) {
                nid_t pino;
 
+               F2FS_I(inode)->xattr_ver = 0;
+
                /* all the dirty node pages should be flushed for POR */
                ret = f2fs_sync_fs(inode->i_sb, 1);
                if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
@@ -205,7 +212,7 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
        struct f2fs_node *raw_node;
        __le32 *addr;
 
-       raw_node = page_address(dn->node_page);
+       raw_node = F2FS_NODE(dn->node_page);
        addr = blkaddr_in_node(raw_node) + ofs;
 
        for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
@@ -678,8 +685,8 @@ const struct file_operations f2fs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .open           = generic_file_open,
        .mmap           = f2fs_file_mmap,
        .fsync          = f2fs_sync_file,
index 35f9b1a196aa15bd4208a33dfca2b6d85bd63150..e6b3ffd5ff6a15430d1f9ae0ebe9457fa14ca1c6 100644 (file)
@@ -29,10 +29,11 @@ static struct kmem_cache *winode_slab;
 static int gc_thread_func(void *data)
 {
        struct f2fs_sb_info *sbi = data;
+       struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
        wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
        long wait_ms;
 
-       wait_ms = GC_THREAD_MIN_SLEEP_TIME;
+       wait_ms = gc_th->min_sleep_time;
 
        do {
                if (try_to_freeze())
@@ -45,7 +46,7 @@ static int gc_thread_func(void *data)
                        break;
 
                if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
-                       wait_ms = GC_THREAD_MAX_SLEEP_TIME;
+                       wait_ms = increase_sleep_time(gc_th, wait_ms);
                        continue;
                }
 
@@ -66,15 +67,15 @@ static int gc_thread_func(void *data)
                        continue;
 
                if (!is_idle(sbi)) {
-                       wait_ms = increase_sleep_time(wait_ms);
+                       wait_ms = increase_sleep_time(gc_th, wait_ms);
                        mutex_unlock(&sbi->gc_mutex);
                        continue;
                }
 
                if (has_enough_invalid_blocks(sbi))
-                       wait_ms = decrease_sleep_time(wait_ms);
+                       wait_ms = decrease_sleep_time(gc_th, wait_ms);
                else
-                       wait_ms = increase_sleep_time(wait_ms);
+                       wait_ms = increase_sleep_time(gc_th, wait_ms);
 
 #ifdef CONFIG_F2FS_STAT_FS
                sbi->bg_gc++;
@@ -82,7 +83,7 @@ static int gc_thread_func(void *data)
 
                /* if return value is not zero, no victim was selected */
                if (f2fs_gc(sbi))
-                       wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
+                       wait_ms = gc_th->no_gc_sleep_time;
        } while (!kthread_should_stop());
        return 0;
 }
@@ -101,6 +102,12 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
                goto out;
        }
 
+       gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
+       gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
+       gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+
+       gc_th->gc_idle = 0;
+
        sbi->gc_thread = gc_th;
        init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
        sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
@@ -125,9 +132,17 @@ void stop_gc_thread(struct f2fs_sb_info *sbi)
        sbi->gc_thread = NULL;
 }
 
-static int select_gc_type(int gc_type)
+static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
 {
-       return (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
+       int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
+
+       if (gc_th && gc_th->gc_idle) {
+               if (gc_th->gc_idle == 1)
+                       gc_mode = GC_CB;
+               else if (gc_th->gc_idle == 2)
+                       gc_mode = GC_GREEDY;
+       }
+       return gc_mode;
 }
 
 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
@@ -140,7 +155,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
                p->dirty_segmap = dirty_i->dirty_segmap[type];
                p->ofs_unit = 1;
        } else {
-               p->gc_mode = select_gc_type(gc_type);
+               p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
                p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
                p->ofs_unit = sbi->segs_per_sec;
        }
@@ -407,8 +422,7 @@ next_step:
 
                /* set page dirty and write it */
                if (gc_type == FG_GC) {
-                       f2fs_submit_bio(sbi, NODE, true);
-                       wait_on_page_writeback(node_page);
+                       f2fs_wait_on_page_writeback(node_page, NODE, true);
                        set_page_dirty(node_page);
                } else {
                        if (!PageWriteback(node_page))
@@ -508,10 +522,7 @@ static void move_data_page(struct inode *inode, struct page *page, int gc_type)
        } else {
                struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
-               if (PageWriteback(page)) {
-                       f2fs_submit_bio(sbi, DATA, true);
-                       wait_on_page_writeback(page);
-               }
+               f2fs_wait_on_page_writeback(page, DATA, true);
 
                if (clear_page_dirty_for_io(page) &&
                        S_ISDIR(inode->i_mode)) {
index 2c6a6bd0832244f4bb1e45b52cd41400e87fe363..c22dee9f1422f9847cc659795dcaaba0c0f56a66 100644 (file)
@@ -13,9 +13,9 @@
                                                 * whether IO subsystem is idle
                                                 * or not
                                                 */
-#define GC_THREAD_MIN_SLEEP_TIME       30000   /* milliseconds */
-#define GC_THREAD_MAX_SLEEP_TIME       60000
-#define GC_THREAD_NOGC_SLEEP_TIME      300000  /* wait 5 min */
+#define DEF_GC_THREAD_MIN_SLEEP_TIME   30000   /* milliseconds */
+#define DEF_GC_THREAD_MAX_SLEEP_TIME   60000
+#define DEF_GC_THREAD_NOGC_SLEEP_TIME  300000  /* wait 5 min */
 #define LIMIT_INVALID_BLOCK    40 /* percentage over total user space */
 #define LIMIT_FREE_BLOCK       40 /* percentage over invalid + free space */
 
 struct f2fs_gc_kthread {
        struct task_struct *f2fs_gc_task;
        wait_queue_head_t gc_wait_queue_head;
+
+       /* for gc sleep time */
+       unsigned int min_sleep_time;
+       unsigned int max_sleep_time;
+       unsigned int no_gc_sleep_time;
+
+       /* for changing gc mode */
+       unsigned int gc_idle;
 };
 
 struct inode_entry {
@@ -56,25 +64,25 @@ static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
        return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
 }
 
-static inline long increase_sleep_time(long wait)
+static inline long increase_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
 {
-       if (wait == GC_THREAD_NOGC_SLEEP_TIME)
+       if (wait == gc_th->no_gc_sleep_time)
                return wait;
 
-       wait += GC_THREAD_MIN_SLEEP_TIME;
-       if (wait > GC_THREAD_MAX_SLEEP_TIME)
-               wait = GC_THREAD_MAX_SLEEP_TIME;
+       wait += gc_th->min_sleep_time;
+       if (wait > gc_th->max_sleep_time)
+               wait = gc_th->max_sleep_time;
        return wait;
 }
 
-static inline long decrease_sleep_time(long wait)
+static inline long decrease_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
 {
-       if (wait == GC_THREAD_NOGC_SLEEP_TIME)
-               wait = GC_THREAD_MAX_SLEEP_TIME;
+       if (wait == gc_th->no_gc_sleep_time)
+               wait = gc_th->max_sleep_time;
 
-       wait -= GC_THREAD_MIN_SLEEP_TIME;
-       if (wait <= GC_THREAD_MIN_SLEEP_TIME)
-               wait = GC_THREAD_MIN_SLEEP_TIME;
+       wait -= gc_th->min_sleep_time;
+       if (wait <= gc_th->min_sleep_time)
+               wait = gc_th->min_sleep_time;
        return wait;
 }
 
index 2b2d45d19e3ea1e1101ff893a2ce9e0f19e607ea..7f8569bd875906bbbbbbb3fd7fef222d10243520 100644 (file)
@@ -56,7 +56,7 @@ static int do_read_inode(struct inode *inode)
        if (IS_ERR(node_page))
                return PTR_ERR(node_page);
 
-       rn = page_address(node_page);
+       rn = F2FS_NODE(node_page);
        ri = &(rn->i);
 
        inode->i_mode = le16_to_cpu(ri->i_mode);
@@ -151,9 +151,9 @@ void update_inode(struct inode *inode, struct page *node_page)
        struct f2fs_node *rn;
        struct f2fs_inode *ri;
 
-       wait_on_page_writeback(node_page);
+       f2fs_wait_on_page_writeback(node_page, NODE, false);
 
-       rn = page_address(node_page);
+       rn = F2FS_NODE(node_page);
        ri = &(rn->i);
 
        ri->i_mode = cpu_to_le16(inode->i_mode);
@@ -221,9 +221,6 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
        if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
                return 0;
 
-       if (wbc)
-               f2fs_balance_fs(sbi);
-
        /*
         * We need to lock here to prevent from producing dirty node pages
         * during the urgent cleaning time when runing out of free sections.
@@ -231,6 +228,10 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
        ilock = mutex_lock_op(sbi);
        ret = update_inode_page(inode);
        mutex_unlock_op(sbi, ilock);
+
+       if (wbc)
+               f2fs_balance_fs(sbi);
+
        return ret;
 }
 
index 64c07169df050b4b358186540031365bd67f4b56..4e475181280c4a8865513f89297a17ca14af07f0 100644 (file)
@@ -239,7 +239,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
        if (!de)
                goto fail;
 
-       err = check_orphan_space(sbi);
+       err = acquire_orphan_inode(sbi);
        if (err) {
                kunmap(page);
                f2fs_put_page(page, 0);
@@ -393,7 +393,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct inode *old_inode = old_dentry->d_inode;
        struct inode *new_inode = new_dentry->d_inode;
        struct page *old_dir_page;
-       struct page *old_page;
+       struct page *old_page, *new_page;
        struct f2fs_dir_entry *old_dir_entry = NULL;
        struct f2fs_dir_entry *old_entry;
        struct f2fs_dir_entry *new_entry;
@@ -415,7 +415,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
        ilock = mutex_lock_op(sbi);
 
        if (new_inode) {
-               struct page *new_page;
 
                err = -ENOTEMPTY;
                if (old_dir_entry && !f2fs_empty_dir(new_inode))
@@ -427,14 +426,27 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (!new_entry)
                        goto out_dir;
 
+               err = acquire_orphan_inode(sbi);
+               if (err)
+                       goto put_out_dir;
+
+               if (update_dent_inode(old_inode, &new_dentry->d_name)) {
+                       release_orphan_inode(sbi);
+                       goto put_out_dir;
+               }
+
                f2fs_set_link(new_dir, new_entry, new_page, old_inode);
 
                new_inode->i_ctime = CURRENT_TIME;
                if (old_dir_entry)
                        drop_nlink(new_inode);
                drop_nlink(new_inode);
+
                if (!new_inode->i_nlink)
                        add_orphan_inode(sbi, new_inode->i_ino);
+               else
+                       release_orphan_inode(sbi);
+
                update_inode_page(new_inode);
        } else {
                err = f2fs_add_link(new_dentry, old_inode);
@@ -467,6 +479,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
        mutex_unlock_op(sbi, ilock);
        return 0;
 
+put_out_dir:
+       f2fs_put_page(new_page, 1);
 out_dir:
        if (old_dir_entry) {
                kunmap(old_dir_page);
index b418aee09573f6d4aff7c4f821ee4faf86d98b0b..818ff368de8199f02794db8c7f7da9069fe113e7 100644 (file)
@@ -565,7 +565,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
                return PTR_ERR(page);
        }
 
-       rn = (struct f2fs_node *)page_address(page);
+       rn = F2FS_NODE(page);
        if (depth < 3) {
                for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
                        child_nid = le32_to_cpu(rn->in.nid[i]);
@@ -698,7 +698,7 @@ restart:
        set_new_dnode(&dn, inode, page, NULL, 0);
        unlock_page(page);
 
-       rn = page_address(page);
+       rn = F2FS_NODE(page);
        switch (level) {
        case 0:
        case 1:
@@ -833,29 +833,32 @@ struct page *new_node_page(struct dnode_of_data *dn,
        if (!page)
                return ERR_PTR(-ENOMEM);
 
-       get_node_info(sbi, dn->nid, &old_ni);
+       if (!inc_valid_node_count(sbi, dn->inode, 1)) {
+               err = -ENOSPC;
+               goto fail;
+       }
 
-       SetPageUptodate(page);
-       fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
+       get_node_info(sbi, dn->nid, &old_ni);
 
        /* Reinitialize old_ni with new node page */
        BUG_ON(old_ni.blk_addr != NULL_ADDR);
        new_ni = old_ni;
        new_ni.ino = dn->inode->i_ino;
-
-       if (!inc_valid_node_count(sbi, dn->inode, 1)) {
-               err = -ENOSPC;
-               goto fail;
-       }
        set_node_addr(sbi, &new_ni, NEW_ADDR);
+
+       fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
        set_cold_node(dn->inode, page);
+       SetPageUptodate(page);
+       set_page_dirty(page);
+
+       if (ofs == XATTR_NODE_OFFSET)
+               F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
 
        dn->node_page = page;
        if (ipage)
                update_inode(dn->inode, ipage);
        else
                sync_inode_page(dn);
-       set_page_dirty(page);
        if (ofs == 0)
                inc_valid_inode_count(sbi);
 
@@ -916,7 +919,6 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
                f2fs_put_page(apage, 0);
        else if (err == LOCKED_PAGE)
                f2fs_put_page(apage, 1);
-       return;
 }
 
 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1484,8 +1486,8 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
        SetPageUptodate(ipage);
        fill_node_footer(ipage, ino, ino, 0, true);
 
-       src = (struct f2fs_node *)page_address(page);
-       dst = (struct f2fs_node *)page_address(ipage);
+       src = F2FS_NODE(page);
+       dst = F2FS_NODE(ipage);
 
        memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
        dst->i.i_size = 0;
@@ -1515,8 +1517,8 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
 
        /* alloc temporal page for read node */
        page = alloc_page(GFP_NOFS | __GFP_ZERO);
-       if (IS_ERR(page))
-               return PTR_ERR(page);
+       if (!page)
+               return -ENOMEM;
        lock_page(page);
 
        /* scan the node segment */
@@ -1535,7 +1537,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
                        goto out;
 
                lock_page(page);
-               rn = (struct f2fs_node *)page_address(page);
+               rn = F2FS_NODE(page);
                sum_entry->nid = rn->footer.nid;
                sum_entry->version = 0;
                sum_entry->ofs_in_node = 0;
index c65fb4f4230f699ba7647c820b328942b43b8490..3496bb3e15dc37b7b6806dffc08a00abf5921766 100644 (file)
@@ -155,8 +155,7 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
 static inline void fill_node_footer(struct page *page, nid_t nid,
                                nid_t ino, unsigned int ofs, bool reset)
 {
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(page);
        if (reset)
                memset(rn, 0, sizeof(*rn));
        rn->footer.nid = cpu_to_le32(nid);
@@ -166,10 +165,8 @@ static inline void fill_node_footer(struct page *page, nid_t nid,
 
 static inline void copy_node_footer(struct page *dst, struct page *src)
 {
-       void *src_addr = page_address(src);
-       void *dst_addr = page_address(dst);
-       struct f2fs_node *src_rn = (struct f2fs_node *)src_addr;
-       struct f2fs_node *dst_rn = (struct f2fs_node *)dst_addr;
+       struct f2fs_node *src_rn = F2FS_NODE(src);
+       struct f2fs_node *dst_rn = F2FS_NODE(dst);
        memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
 }
 
@@ -177,45 +174,40 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(page);
+
        rn->footer.cp_ver = ckpt->checkpoint_ver;
        rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
 }
 
 static inline nid_t ino_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        return le32_to_cpu(rn->footer.ino);
 }
 
 static inline nid_t nid_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        return le32_to_cpu(rn->footer.nid);
 }
 
 static inline unsigned int ofs_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        unsigned flag = le32_to_cpu(rn->footer.flag);
        return flag >> OFFSET_BIT_SHIFT;
 }
 
 static inline unsigned long long cpver_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        return le64_to_cpu(rn->footer.cp_ver);
 }
 
 static inline block_t next_blkaddr_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        return le32_to_cpu(rn->footer.next_blkaddr);
 }
 
@@ -237,6 +229,10 @@ static inline block_t next_blkaddr_of_node(struct page *node_page)
 static inline bool IS_DNODE(struct page *node_page)
 {
        unsigned int ofs = ofs_of_node(node_page);
+
+       if (ofs == XATTR_NODE_OFFSET)
+               return false;
+
        if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
                        ofs == 5 + 2 * NIDS_PER_BLOCK)
                return false;
@@ -250,7 +246,7 @@ static inline bool IS_DNODE(struct page *node_page)
 
 static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
 {
-       struct f2fs_node *rn = (struct f2fs_node *)page_address(p);
+       struct f2fs_node *rn = F2FS_NODE(p);
 
        wait_on_page_writeback(p);
 
@@ -263,7 +259,8 @@ static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
 
 static inline nid_t get_nid(struct page *p, int off, bool i)
 {
-       struct f2fs_node *rn = (struct f2fs_node *)page_address(p);
+       struct f2fs_node *rn = F2FS_NODE(p);
+
        if (i)
                return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
        return le32_to_cpu(rn->in.nid[off]);
@@ -314,8 +311,7 @@ static inline void clear_cold_data(struct page *page)
 
 static inline int is_node(struct page *page, int type)
 {
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(page);
        return le32_to_cpu(rn->footer.flag) & (1 << type);
 }
 
@@ -325,7 +321,7 @@ static inline int is_node(struct page *page, int type)
 
 static inline void set_cold_node(struct inode *inode, struct page *page)
 {
-       struct f2fs_node *rn = (struct f2fs_node *)page_address(page);
+       struct f2fs_node *rn = F2FS_NODE(page);
        unsigned int flag = le32_to_cpu(rn->footer.flag);
 
        if (S_ISDIR(inode->i_mode))
@@ -337,7 +333,7 @@ static inline void set_cold_node(struct inode *inode, struct page *page)
 
 static inline void set_mark(struct page *page, int mark, int type)
 {
-       struct f2fs_node *rn = (struct f2fs_node *)page_address(page);
+       struct f2fs_node *rn = F2FS_NODE(page);
        unsigned int flag = le32_to_cpu(rn->footer.flag);
        if (mark)
                flag |= (0x1 << type);
index d56d951c22537a14a9f103c679e93e64321d863f..fa493bb64167b50291e19d38fea2beebb3405b80 100644 (file)
@@ -40,8 +40,7 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
 
 static int recover_dentry(struct page *ipage, struct inode *inode)
 {
-       void *kaddr = page_address(ipage);
-       struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
+       struct f2fs_node *raw_node = F2FS_NODE(ipage);
        struct f2fs_inode *raw_inode = &(raw_node->i);
        nid_t pino = le32_to_cpu(raw_inode->i_pino);
        struct f2fs_dir_entry *de;
@@ -93,8 +92,7 @@ out:
 
 static int recover_inode(struct inode *inode, struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
+       struct f2fs_node *raw_node = F2FS_NODE(node_page);
        struct f2fs_inode *raw_inode = &(raw_node->i);
 
        if (!IS_INODE(node_page))
@@ -119,7 +117,7 @@ static int recover_inode(struct inode *inode, struct page *node_page)
 
 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
 {
-       unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
+       unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
        struct curseg_info *curseg;
        struct page *page;
        block_t blkaddr;
@@ -131,8 +129,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
 
        /* read node page */
        page = alloc_page(GFP_F2FS_ZERO);
-       if (IS_ERR(page))
-               return PTR_ERR(page);
+       if (!page)
+               return -ENOMEM;
        lock_page(page);
 
        while (1) {
@@ -357,7 +355,7 @@ err:
 static int recover_data(struct f2fs_sb_info *sbi,
                                struct list_head *head, int type)
 {
-       unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
+       unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
        struct curseg_info *curseg;
        struct page *page;
        int err = 0;
@@ -369,7 +367,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
 
        /* read node page */
        page = alloc_page(GFP_NOFS | __GFP_ZERO);
-       if (IS_ERR(page))
+       if (!page)
                return -ENOMEM;
 
        lock_page(page);
index a86d125a9885e274b11a421d4b117480a2744994..09af9c7b0f52673fff92f00be5a37a6030b72668 100644 (file)
@@ -117,7 +117,6 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
        }
 
        mutex_unlock(&dirty_i->seglist_lock);
-       return;
 }
 
 /*
@@ -261,7 +260,6 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
        void *addr = curseg->sum_blk;
        addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
        memcpy(addr, sum, sizeof(struct f2fs_summary));
-       return;
 }
 
 /*
@@ -542,12 +540,9 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
 {
        struct curseg_info *curseg = CURSEG_I(sbi, type);
 
-       if (force) {
+       if (force)
                new_curseg(sbi, type, true);
-               goto out;
-       }
-
-       if (type == CURSEG_WARM_NODE)
+       else if (type == CURSEG_WARM_NODE)
                new_curseg(sbi, type, false);
        else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
                new_curseg(sbi, type, false);
@@ -555,11 +550,9 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
                change_curseg(sbi, type, true);
        else
                new_curseg(sbi, type, false);
-out:
 #ifdef CONFIG_F2FS_STAT_FS
        sbi->segment_count[curseg->alloc_type]++;
 #endif
-       return;
 }
 
 void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -611,18 +604,12 @@ static void f2fs_end_io_write(struct bio *bio, int err)
 struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
 {
        struct bio *bio;
-       struct bio_private *priv;
-retry:
-       priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
-       if (!priv) {
-               cond_resched();
-               goto retry;
-       }
 
        /* No failure on bio allocation */
        bio = bio_alloc(GFP_NOIO, npages);
        bio->bi_bdev = bdev;
-       bio->bi_private = priv;
+       bio->bi_private = NULL;
+
        return bio;
 }
 
@@ -681,8 +668,17 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
                do_submit_bio(sbi, type, false);
 alloc_new:
        if (sbi->bio[type] == NULL) {
+               struct bio_private *priv;
+retry:
+               priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
+               if (!priv) {
+                       cond_resched();
+                       goto retry;
+               }
+
                sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
                sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+               sbi->bio[type]->bi_private = priv;
                /*
                 * The end_io will be assigned at the sumbission phase.
                 * Until then, let bio_add_page() merge consecutive IOs as much
@@ -702,6 +698,16 @@ alloc_new:
        trace_f2fs_submit_write_page(page, blk_addr, type);
 }
 
+void f2fs_wait_on_page_writeback(struct page *page,
+                               enum page_type type, bool sync)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+       if (PageWriteback(page)) {
+               f2fs_submit_bio(sbi, type, sync);
+               wait_on_page_writeback(page);
+       }
+}
+
 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
 {
        struct curseg_info *curseg = CURSEG_I(sbi, type);
@@ -1179,7 +1185,6 @@ void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
 {
        if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
                write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
-       return;
 }
 
 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
index 75c7dc363e9267378d5cdf39f811c16a7adc8692..1d12e60c00d9301d07c8fef0bf6714f1d8d16c85 100644 (file)
 #include <linux/parser.h>
 #include <linux/mount.h>
 #include <linux/seq_file.h>
+#include <linux/proc_fs.h>
 #include <linux/random.h>
 #include <linux/exportfs.h>
 #include <linux/blkdev.h>
 #include <linux/f2fs_fs.h>
+#include <linux/sysfs.h>
 
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
 #include "xattr.h"
+#include "gc.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/f2fs.h>
 
+static struct proc_dir_entry *f2fs_proc_root;
 static struct kmem_cache *f2fs_inode_cachep;
+static struct kset *f2fs_kset;
 
 enum {
        Opt_gc_background,
@@ -57,6 +62,113 @@ static match_table_t f2fs_tokens = {
        {Opt_err, NULL},
 };
 
+/* Sysfs support for f2fs */
+struct f2fs_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
+       ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
+                        const char *, size_t);
+       int offset;
+};
+
+static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
+                       struct f2fs_sb_info *sbi, char *buf)
+{
+       struct f2fs_gc_kthread *gc_kth = sbi->gc_thread;
+       unsigned int *ui;
+
+       if (!gc_kth)
+               return -EINVAL;
+
+       ui = (unsigned int *)(((char *)gc_kth) + a->offset);
+
+       return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+}
+
+static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
+                       struct f2fs_sb_info *sbi,
+                       const char *buf, size_t count)
+{
+       struct f2fs_gc_kthread *gc_kth = sbi->gc_thread;
+       unsigned long t;
+       unsigned int *ui;
+       ssize_t ret;
+
+       if (!gc_kth)
+               return -EINVAL;
+
+       ui = (unsigned int *)(((char *)gc_kth) + a->offset);
+
+       ret = kstrtoul(skip_spaces(buf), 0, &t);
+       if (ret < 0)
+               return ret;
+       *ui = t;
+       return count;
+}
+
+static ssize_t f2fs_attr_show(struct kobject *kobj,
+                               struct attribute *attr, char *buf)
+{
+       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+                                                               s_kobj);
+       struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+       return a->show ? a->show(a, sbi, buf) : 0;
+}
+
+static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
+                                               const char *buf, size_t len)
+{
+       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+                                                                       s_kobj);
+       struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+       return a->store ? a->store(a, sbi, buf, len) : 0;
+}
+
+static void f2fs_sb_release(struct kobject *kobj)
+{
+       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+                                                               s_kobj);
+       complete(&sbi->s_kobj_unregister);
+}
+
+#define F2FS_ATTR_OFFSET(_name, _mode, _show, _store, _elname) \
+static struct f2fs_attr f2fs_attr_##_name = {                  \
+       .attr = {.name = __stringify(_name), .mode = _mode },   \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+       .offset = offsetof(struct f2fs_gc_kthread, _elname),    \
+}
+
+#define F2FS_RW_ATTR(name, elname)     \
+       F2FS_ATTR_OFFSET(name, 0644, f2fs_sbi_show, f2fs_sbi_store, elname)
+
+F2FS_RW_ATTR(gc_min_sleep_time, min_sleep_time);
+F2FS_RW_ATTR(gc_max_sleep_time, max_sleep_time);
+F2FS_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
+F2FS_RW_ATTR(gc_idle, gc_idle);
+
+#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
+static struct attribute *f2fs_attrs[] = {
+       ATTR_LIST(gc_min_sleep_time),
+       ATTR_LIST(gc_max_sleep_time),
+       ATTR_LIST(gc_no_gc_sleep_time),
+       ATTR_LIST(gc_idle),
+       NULL,
+};
+
+static const struct sysfs_ops f2fs_attr_ops = {
+       .show   = f2fs_attr_show,
+       .store  = f2fs_attr_store,
+};
+
+static struct kobj_type f2fs_ktype = {
+       .default_attrs  = f2fs_attrs,
+       .sysfs_ops      = &f2fs_attr_ops,
+       .release        = f2fs_sb_release,
+};
+
 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
 {
        struct va_format vaf;
@@ -205,7 +317,6 @@ static int f2fs_drop_inode(struct inode *inode)
 static void f2fs_dirty_inode(struct inode *inode, int flags)
 {
        set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
-       return;
 }
 
 static void f2fs_i_callback(struct rcu_head *head)
@@ -223,6 +334,12 @@ static void f2fs_put_super(struct super_block *sb)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 
+       if (sbi->s_proc) {
+               remove_proc_entry("segment_info", sbi->s_proc);
+               remove_proc_entry(sb->s_id, f2fs_proc_root);
+       }
+       kobject_del(&sbi->s_kobj);
+
        f2fs_destroy_stats(sbi);
        stop_gc_thread(sbi);
 
@@ -236,6 +353,8 @@ static void f2fs_put_super(struct super_block *sb)
        destroy_segment_manager(sbi);
 
        kfree(sbi->ckpt);
+       kobject_put(&sbi->s_kobj);
+       wait_for_completion(&sbi->s_kobj_unregister);
 
        sb->s_fs_info = NULL;
        brelse(sbi->raw_super_buf);
@@ -340,6 +459,36 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
        return 0;
 }
 
+static int segment_info_seq_show(struct seq_file *seq, void *offset)
+{
+       struct super_block *sb = seq->private;
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       unsigned int total_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
+       int i;
+
+       for (i = 0; i < total_segs; i++) {
+               seq_printf(seq, "%u", get_valid_blocks(sbi, i, 1));
+               if (i != 0 && (i % 10) == 0)
+                       seq_puts(seq, "\n");
+               else
+                       seq_puts(seq, " ");
+       }
+       return 0;
+}
+
+static int segment_info_open_fs(struct inode *inode, struct file *file)
+{
+       return single_open(file, segment_info_seq_show, PDE_DATA(inode));
+}
+
+static const struct file_operations f2fs_seq_segment_info_fops = {
+       .owner = THIS_MODULE,
+       .open = segment_info_open_fs,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -766,6 +915,13 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        if (err)
                goto fail;
 
+       if (f2fs_proc_root)
+               sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+
+       if (sbi->s_proc)
+               proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
+                                &f2fs_seq_segment_info_fops, sb);
+
        if (test_opt(sbi, DISCARD)) {
                struct request_queue *q = bdev_get_queue(sb->s_bdev);
                if (!blk_queue_discard(q))
@@ -774,6 +930,13 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                                        "the device does not support discard");
        }
 
+       sbi->s_kobj.kset = f2fs_kset;
+       init_completion(&sbi->s_kobj_unregister);
+       err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
+                                                       "%s", sb->s_id);
+       if (err)
+               goto fail;
+
        return 0;
 fail:
        stop_gc_thread(sbi);
@@ -848,22 +1011,28 @@ static int __init init_f2fs_fs(void)
        err = create_checkpoint_caches();
        if (err)
                goto fail;
+       f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
+       if (!f2fs_kset)
+               goto fail;
        err = register_filesystem(&f2fs_fs_type);
        if (err)
                goto fail;
        f2fs_create_root_stats();
+       f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
 fail:
        return err;
 }
 
 static void __exit exit_f2fs_fs(void)
 {
+       remove_proc_entry("fs/f2fs", NULL);
        f2fs_destroy_root_stats();
        unregister_filesystem(&f2fs_fs_type);
        destroy_checkpoint_caches();
        destroy_gc_caches();
        destroy_node_manager_caches();
        destroy_inodecache();
+       kset_unregister(f2fs_kset);
 }
 
 module_init(init_f2fs_fs)
index 3ab07ecd86ca16b5aaaa4e89792e5fe0db22ffca..3bc307c22b70b923fedfffd88f9bb383adc1081b 100644 (file)
@@ -378,23 +378,23 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
        if (!fi->i_xattr_nid) {
                /* Allocate new attribute block */
                struct dnode_of_data dn;
+               nid_t new_nid;
 
-               if (!alloc_nid(sbi, &fi->i_xattr_nid)) {
+               if (!alloc_nid(sbi, &new_nid)) {
                        error = -ENOSPC;
                        goto exit;
                }
-               set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
+               set_new_dnode(&dn, inode, NULL, NULL, new_nid);
                mark_inode_dirty(inode);
 
                page = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
                if (IS_ERR(page)) {
-                       alloc_nid_failed(sbi, fi->i_xattr_nid);
-                       fi->i_xattr_nid = 0;
+                       alloc_nid_failed(sbi, new_nid);
                        error = PTR_ERR(page);
                        goto exit;
                }
 
-               alloc_nid_done(sbi, fi->i_xattr_nid);
+               alloc_nid_done(sbi, new_nid);
                base_addr = page_address(page);
                header = XATTR_HDR(base_addr);
                header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
@@ -486,6 +486,10 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
                inode->i_ctime = CURRENT_TIME;
                clear_inode_flag(fi, FI_ACL_MODE);
        }
+
+       /* store checkpoint version for conducting checkpoint during fsync */
+       fi->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
+
        if (ipage)
                update_inode(inode, ipage);
        else
index 9b104f543056238016c683ef822046a784169f50..33711ff2b4a3e495a886ef09c229431fbead4598 100644 (file)
@@ -172,8 +172,8 @@ const struct file_operations fat_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .release        = fat_file_release,
        .unlocked_ioctl = fat_generic_ioctl,
index 11b51bb55b42b03187fc8436125e986c0cdcc047..70a218da9b956e830b9b788c9ff5a4ee68398273 100644 (file)
@@ -185,8 +185,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
-                            const struct iovec *iov,
-                            loff_t offset, unsigned long nr_segs)
+                            struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -203,7 +202,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
                 *
                 * Return 0, and fallback to normal buffered write.
                 */
-               loff_t size = offset + iov_length(iov, nr_segs);
+               loff_t size = offset + iov_iter_count(iter);
                if (MSDOS_I(inode)->mmu_private < size)
                        return 0;
        }
@@ -212,10 +211,9 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
         * FAT need to use the DIO_LOCKING for avoiding the race
         * condition of fat_get_block() and ->truncate().
         */
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                fat_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
        if (ret < 0 && (rw & WRITE))
-               fat_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               fat_write_failed(mapping, offset + iov_iter_count(iter));
 
        return ret;
 }
index aef34b1e635e9a424659e721a8a891a3084b8bf4..014ccc542af819f128b0ce3140232478a7c09e97 100644 (file)
@@ -94,8 +94,11 @@ static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
        loff_t pos = 0;
        struct iovec iov = { .iov_base = buf, .iov_len = count };
        struct fuse_io_priv io = { .async = 0, .file = file };
+       struct iov_iter ii;
 
-       return fuse_direct_io(&io, &iov, 1, count, &pos, 0);
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
+       return fuse_direct_io(&io, &ii, count, &pos, 0);
 }
 
 static ssize_t cuse_write(struct file *file, const char __user *buf,
@@ -104,12 +107,15 @@ static ssize_t cuse_write(struct file *file, const char __user *buf,
        loff_t pos = 0;
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
        struct fuse_io_priv io = { .async = 0, .file = file };
+       struct iov_iter ii;
+
+       iov_iter_init(&ii, &iov, 1, count, 0);
 
        /*
         * No locking or generic_write_checks(), the server is
         * responsible for locking and sanity checks.
         */
-       return fuse_direct_io(&io, &iov, 1, count, &pos, 1);
+       return fuse_direct_io(&io, &ii, count, &pos, 1);
 }
 
 static int cuse_open(struct inode *inode, struct file *file)
index 5c121fe19c5f9b6122b687cc14bbcd7b1bbe0dc9..d429c017e80156f3d55e2111e542d0d44b41d6d1 100644 (file)
@@ -1172,9 +1172,10 @@ static inline void fuse_page_descs_length_init(struct fuse_req *req,
                        req->page_descs[i].offset;
 }
 
-static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
+static inline unsigned long fuse_get_user_addr(struct iov_iter *ii)
 {
-       return (unsigned long)ii->iov->iov_base + ii->iov_offset;
+       struct iovec *iov = iov_iter_iovec(ii);
+       return (unsigned long)iov->iov_base + ii->iov_offset;
 }
 
 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
@@ -1263,9 +1264,8 @@ static inline int fuse_iter_npages(const struct iov_iter *ii_p)
        return min(npages, FUSE_MAX_PAGES_PER_REQ);
 }
 
-ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
-                      unsigned long nr_segs, size_t count, loff_t *ppos,
-                      int write)
+ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *ii,
+                      size_t count, loff_t *ppos, int write)
 {
        struct file *file = io->file;
        struct fuse_file *ff = file->private_data;
@@ -1274,14 +1274,11 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
        loff_t pos = *ppos;
        ssize_t res = 0;
        struct fuse_req *req;
-       struct iov_iter ii;
-
-       iov_iter_init(&ii, iov, nr_segs, count, 0);
 
        if (io->async)
-               req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
+               req = fuse_get_req_for_background(fc, fuse_iter_npages(ii));
        else
-               req = fuse_get_req(fc, fuse_iter_npages(&ii));
+               req = fuse_get_req(fc, fuse_iter_npages(ii));
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -1289,7 +1286,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
                size_t nres;
                fl_owner_t owner = current->files;
                size_t nbytes = min(count, nmax);
-               int err = fuse_get_user_pages(req, &ii, &nbytes, write);
+               int err = fuse_get_user_pages(req, ii, &nbytes, write);
                if (err) {
                        res = err;
                        break;
@@ -1319,9 +1316,9 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
                        fuse_put_request(fc, req);
                        if (io->async)
                                req = fuse_get_req_for_background(fc,
-                                       fuse_iter_npages(&ii));
+                                       fuse_iter_npages(ii));
                        else
-                               req = fuse_get_req(fc, fuse_iter_npages(&ii));
+                               req = fuse_get_req(fc, fuse_iter_npages(ii));
                        if (IS_ERR(req))
                                break;
                }
@@ -1335,10 +1332,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
 }
 EXPORT_SYMBOL_GPL(fuse_direct_io);
 
-static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
-                                 const struct iovec *iov,
-                                 unsigned long nr_segs, loff_t *ppos,
-                                 size_t count)
+static ssize_t __fuse_direct_read(struct fuse_io_priv *io, struct iov_iter *ii,
+                                 loff_t *ppos, size_t count)
 {
        ssize_t res;
        struct file *file = io->file;
@@ -1347,7 +1342,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
        if (is_bad_inode(inode))
                return -EIO;
 
-       res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0);
+       res = fuse_direct_io(io, ii, count, ppos, 0);
 
        fuse_invalidate_attr(inode);
 
@@ -1359,21 +1354,24 @@ static ssize_t fuse_direct_read(struct file *file, char __user *buf,
 {
        struct fuse_io_priv io = { .async = 0, .file = file };
        struct iovec iov = { .iov_base = buf, .iov_len = count };
-       return __fuse_direct_read(&io, &iov, 1, ppos, count);
+       struct iov_iter ii;
+
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
+       return __fuse_direct_read(&io, &ii, ppos, count);
 }
 
-static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
-                                  const struct iovec *iov,
-                                  unsigned long nr_segs, loff_t *ppos)
+static ssize_t __fuse_direct_write(struct fuse_io_priv *io, struct iov_iter *ii,
+                                  loff_t *ppos)
 {
        struct file *file = io->file;
        struct inode *inode = file_inode(file);
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(ii);
        ssize_t res;
 
        res = generic_write_checks(file, ppos, &count, 0);
        if (!res)
-               res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
+               res = fuse_direct_io(io, ii, count, ppos, 1);
 
        fuse_invalidate_attr(inode);
 
@@ -1384,6 +1382,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
+       struct iov_iter ii;
        struct inode *inode = file_inode(file);
        ssize_t res;
        struct fuse_io_priv io = { .async = 0, .file = file };
@@ -1391,9 +1390,11 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
        if (is_bad_inode(inode))
                return -EIO;
 
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
        /* Don't allow parallel writes to the same file */
        mutex_lock(&inode->i_mutex);
-       res = __fuse_direct_write(&io, &iov, 1, ppos);
+       res = __fuse_direct_write(&io, &ii, ppos);
        if (res > 0)
                fuse_write_update_size(inode, *ppos);
        mutex_unlock(&inode->i_mutex);
@@ -1861,30 +1862,17 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
        while (iov_iter_count(&ii)) {
                struct page *page = pages[page_idx++];
                size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
-               void *kaddr;
+               size_t left;
 
-               kaddr = kmap(page);
-
-               while (todo) {
-                       char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
-                       size_t iov_len = ii.iov->iov_len - ii.iov_offset;
-                       size_t copy = min(todo, iov_len);
-                       size_t left;
-
-                       if (!to_user)
-                               left = copy_from_user(kaddr, uaddr, copy);
-                       else
-                               left = copy_to_user(uaddr, kaddr, copy);
-
-                       if (unlikely(left))
-                               return -EFAULT;
+               if (!to_user)
+                       left = iov_iter_copy_from_user(page, &ii, 0, todo);
+               else
+                       left = iov_iter_copy_to_user(page, &ii, 0, todo);
 
-                       iov_iter_advance(&ii, copy);
-                       todo -= copy;
-                       kaddr += copy;
-               }
+               if (unlikely(left))
+                       return -EFAULT;
 
-               kunmap(page);
+               iov_iter_advance(&ii, todo);
        }
 
        return 0;
@@ -2378,8 +2366,8 @@ static inline loff_t fuse_round_up(loff_t off)
 }
 
 static ssize_t
-fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *ii,
+                       loff_t offset)
 {
        ssize_t ret = 0;
        struct file *file = iocb->ki_filp;
@@ -2388,7 +2376,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        loff_t pos = 0;
        struct inode *inode;
        loff_t i_size;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(ii);
        struct fuse_io_priv *io;
 
        pos = offset;
@@ -2429,9 +2417,9 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                io->async = false;
 
        if (rw == WRITE)
-               ret = __fuse_direct_write(io, iov, nr_segs, &pos);
+               ret = __fuse_direct_write(io, ii, &pos);
        else
-               ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
+               ret = __fuse_direct_read(io, ii, &pos, count);
 
        if (io->async) {
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
index fde7249a3a9608c8c6e49be4316a1d155b7cdfba..dacffcb41a34549ab1c7b5fed4767ee1c414b59e 100644 (file)
@@ -854,9 +854,8 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
 
 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
                 bool isdir);
-ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
-                      unsigned long nr_segs, size_t count, loff_t *ppos,
-                      int write);
+ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *ii,
+                      size_t count, loff_t *ppos, int write);
 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
                   unsigned int flags);
 long fuse_ioctl_common(struct file *file, unsigned int cmd,
index ee48ad37d9c0109dfd81bcca983c13a29492f0cb..733e94af72a9ece8bbd771d1ffce8ae43e70a438 100644 (file)
@@ -1001,8 +1001,7 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
 
 
 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1026,8 +1025,8 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
        if (rv != 1)
                goto out; /* dio not valid, fall back to buffered i/o */
 
-       rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-                                 offset, nr_segs, gfs2_get_block_direct,
+       rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+                                 offset, gfs2_get_block_direct,
                                  NULL, NULL, 0);
 out:
        gfs2_glock_dq(&gh);
index 0cb4c1557f20c87a44f74b4ac7d29f9a700de05a..2e5fc268d3242678c8a8e348141679a8fc94b530 100644 (file)
@@ -1859,7 +1859,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
 
        memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 
-       ht = kzalloc(size, GFP_NOFS);
+       ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
        if (ht == NULL)
                ht = vzalloc(size);
        if (!ht)
index 72c3866a73205217b9fe57d7863ce2376ce8002b..23cbdd4dcde56623ccb6a6d08f22f51a1bcf94ab 100644 (file)
@@ -679,10 +679,9 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
 }
 
 /**
- * gfs2_file_aio_write - Perform a write to a file
+ * gfs2_file_write_iter - Perform a write to a file
  * @iocb: The io context
- * @iov: The data to write
- * @nr_segs: Number of @iov segments
+ * @iter: The data to write
  * @pos: The file position
  *
  * We have to do a lock/unlock here to refresh the inode size for
@@ -692,11 +691,11 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
  *
  */
 
-static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                  unsigned long nr_segs, loff_t pos)
+static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                   loff_t pos)
 {
        struct file *file = iocb->ki_filp;
-       size_t writesize = iov_length(iov, nr_segs);
+       size_t writesize = iov_iter_count(iter);
        struct gfs2_inode *ip = GFS2_I(file_inode(file));
        int ret;
 
@@ -715,7 +714,7 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                gfs2_glock_dq_uninit(&gh);
        }
 
-       return generic_file_aio_write(iocb, iov, nr_segs, pos);
+       return generic_file_write_iter(iocb, iter, pos);
 }
 
 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
@@ -1047,9 +1046,9 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 const struct file_operations gfs2_file_fops = {
        .llseek         = gfs2_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = gfs2_file_aio_write,
+       .write_iter     = gfs2_file_write_iter,
        .unlocked_ioctl = gfs2_ioctl,
        .mmap           = gfs2_mmap,
        .open           = gfs2_open,
@@ -1079,9 +1078,9 @@ const struct file_operations gfs2_dir_fops = {
 const struct file_operations gfs2_file_fops_nolock = {
        .llseek         = gfs2_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = gfs2_file_aio_write,
+       .write_iter     = gfs2_file_write_iter,
        .unlocked_ioctl = gfs2_ioctl,
        .mmap           = gfs2_mmap,
        .open           = gfs2_open,
index 9435384562a271cacd5219651269b36d5d2923c4..ce7078d5aa9787ac1a8ef9dd3798aef2db73de21 100644 (file)
@@ -1488,7 +1488,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
 
        rcu_read_lock();
        hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
-               if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
+               if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
                        examiner(gl);
        }
        rcu_read_unlock();
@@ -1508,18 +1508,17 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  * @gl: The glock to thaw
  *
- * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
- * so this has to result in the ref count being dropped by one.
  */
 
 static void thaw_glock(struct gfs2_glock *gl)
 {
        if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
-               return;
+               goto out;
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
-       gfs2_glock_hold(gl);
-       if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+       if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
+out:
                gfs2_glock_put(gl);
+       }
 }
 
 /**
@@ -1536,7 +1535,6 @@ static void clear_glock(struct gfs2_glock *gl)
        if (gl->gl_state != LM_ST_UNLOCKED)
                handle_callback(gl, LM_ST_UNLOCKED, 0, false);
        spin_unlock(&gl->gl_spin);
-       gfs2_glock_hold(gl);
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                gfs2_glock_put(gl);
 }
@@ -1838,14 +1836,14 @@ int __init gfs2_glock_init(void)
 
        glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
                                          WQ_HIGHPRI | WQ_FREEZABLE, 0);
-       if (IS_ERR(glock_workqueue))
-               return PTR_ERR(glock_workqueue);
+       if (!glock_workqueue)
+               return -ENOMEM;
        gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
                                                WQ_MEM_RECLAIM | WQ_FREEZABLE,
                                                0);
-       if (IS_ERR(gfs2_delete_workqueue)) {
+       if (!gfs2_delete_workqueue) {
                destroy_workqueue(glock_workqueue);
-               return PTR_ERR(gfs2_delete_workqueue);
+               return -ENOMEM;
        }
 
        register_shrinker(&glock_shrinker);
index 5f2e5224c51c9ae79e34a1eb0f405a7b304cd0be..e2e0a90396e7823da9aa47c44a727d5e75751cc6 100644 (file)
@@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
  * None of the buffers should be dirty, locked, or pinned.
  */
 
-static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+                            unsigned int nr_revokes)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct list_head *head = &gl->gl_ail_list;
@@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
 
        gfs2_log_lock(sdp);
        spin_lock(&sdp->sd_ail_lock);
-       list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) {
+       list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
+               if (nr_revokes == 0)
+                       break;
                bh = bd->bd_bh;
                if (bh->b_state & b_state) {
                        if (fsync)
@@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
                        gfs2_ail_error(gl, bh);
                }
                gfs2_trans_add_revoke(sdp, bd);
+               nr_revokes--;
        }
        GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
        spin_unlock(&sdp->sd_ail_lock);
@@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
        WARN_ON_ONCE(current->journal_info);
        current->journal_info = &tr;
 
-       __gfs2_ail_flush(gl, 0);
+       __gfs2_ail_flush(gl, 0, tr.tr_revokes);
 
        gfs2_trans_end(sdp);
        gfs2_log_flush(sdp, NULL);
@@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        unsigned int revokes = atomic_read(&gl->gl_ail_count);
+       unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
        int ret;
 
        if (!revokes)
                return;
 
-       ret = gfs2_trans_begin(sdp, 0, revokes);
+       while (revokes > max_revokes)
+               max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+
+       ret = gfs2_trans_begin(sdp, 0, max_revokes);
        if (ret)
                return;
-       __gfs2_ail_flush(gl, fsync);
+       __gfs2_ail_flush(gl, fsync, max_revokes);
        gfs2_trans_end(sdp);
        gfs2_log_flush(sdp, NULL);
 }
index bbb2715171cd0c983770cf86b4b57ed69e04c98d..64915eeae5a7112f59256185a00a1cc9f3b2a193 100644 (file)
@@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                }
                gfs2_glock_dq_uninit(ghs);
                if (IS_ERR(d))
-                       return PTR_RET(d);
+                       return PTR_ERR(d);
                return error;
        } else if (error != -ENOENT) {
                goto fail_gunlock;
@@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
        struct gfs2_holder gh;
        int ret;
 
+       /* For selinux during lookup */
+       if (gfs2_glock_is_locked_by_me(ip->i_gl))
+               return generic_getxattr(dentry, name, data, size);
+
        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
        ret = gfs2_glock_nq(&gh);
        if (ret == 0) {
index 17c5b5d7dc88c4b73e00c5918f97473df15b354f..010b9fb9fec6e781cb80a6982d746896ba6cffdb 100644 (file)
@@ -579,6 +579,24 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
        return error;
 }
 
+/**
+ * gfs2_meta_sync - Sync all buffers associated with a glock
+ * @gl: The glock
+ *
+ */
+
+static void gfs2_meta_sync(struct gfs2_glock *gl)
+{
+       struct address_space *mapping = gfs2_glock2aspace(gl);
+       int error;
+
+       filemap_fdatawrite(mapping);
+       error = filemap_fdatawait(mapping);
+
+       if (error)
+               gfs2_io_error(gl->gl_sbd);
+}
+
 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
 {
        struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
index e04d0e09ee7b59d5959d69e5df872a5871e9ae64..7b0f5043cf24c253612451787588d638da5483ce 100644 (file)
@@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void)
                goto fail_wq;
 
        gfs2_control_wq = alloc_workqueue("gfs2_control",
-                              WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0);
+                                         WQ_UNBOUND | WQ_FREEZABLE, 0);
        if (!gfs2_control_wq)
                goto fail_recovery;
 
index 0da390686c08f458e12aeb44df92d7301a96d788..932415050540e2a1bdefc6d957e68ef7a0d82d01 100644 (file)
@@ -97,24 +97,6 @@ const struct address_space_operations gfs2_meta_aops = {
        .releasepage = gfs2_releasepage,
 };
 
-/**
- * gfs2_meta_sync - Sync all buffers associated with a glock
- * @gl: The glock
- *
- */
-
-void gfs2_meta_sync(struct gfs2_glock *gl)
-{
-       struct address_space *mapping = gfs2_glock2aspace(gl);
-       int error;
-
-       filemap_fdatawrite(mapping);
-       error = filemap_fdatawait(mapping);
-
-       if (error)
-               gfs2_io_error(gl->gl_sbd);
-}
-
 /**
  * gfs2_getbuf - Get a buffer with a given address space
  * @gl: the glock
index 0d4c843b6f8e59aec3f143a80417d0e4e6de43e0..4823b934208a2be6012a71ded8f4e950fca753fb 100644 (file)
@@ -48,21 +48,17 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
                return inode->i_sb->s_fs_info;
 }
 
-void gfs2_meta_sync(struct gfs2_glock *gl);
-
-struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
-int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
-                  int flags, struct buffer_head **bhp);
-int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
-struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create);
-
-void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr,
-                             int meta);
-
-void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
-
-int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
-                             struct buffer_head **bhp);
+extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
+extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
+                         struct buffer_head **bhp);
+extern int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
+extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
+                                      int create);
+extern void gfs2_remove_from_journal(struct buffer_head *bh,
+                                    struct gfs2_trans *tr, int meta);
+extern void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
+extern int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
+                                    struct buffer_head **bhp);
 
 static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
                                         struct buffer_head **bhp)
index f9299d8a64e3a2af9f6ef2aadd009a097c74ccf6..f9242b8075da19e179c6c0f2b36a9a431a034d5b 100644 (file)
@@ -125,15 +125,14 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
 }
 
 static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file_inode(file)->i_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                hfs_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
@@ -141,7 +140,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        hfs_write_failed(mapping, end);
@@ -675,9 +674,9 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
 static const struct file_operations hfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = hfs_file_fsync,
index f833d35630abbd4d98c4ca322e32704d792cf9e9..13813f6351e0b8cf06b69dc2ee5ae8356d3e4025 100644 (file)
@@ -122,14 +122,14 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
 }
 
 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file_inode(file)->i_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                 hfsplus_get_block);
 
        /*
@@ -138,7 +138,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        hfsplus_write_failed(mapping, end);
@@ -388,9 +388,9 @@ static const struct inode_operations hfsplus_file_inode_operations = {
 static const struct file_operations hfsplus_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = hfsplus_file_fsync,
index cddb0521751278526dfc1b678acbf708a906a815..e3adc8ed77570e1e1dc91e083d55c3d4dac17843 100644 (file)
@@ -381,8 +381,8 @@ static const struct file_operations hostfs_file_fops = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .splice_read    = generic_file_splice_read,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .write          = do_sync_write,
        .mmap           = generic_file_mmap,
        .open           = hostfs_file_open,
index 4e9dabcf1f4cc35f8c3abd6455818f6b15c03a99..2561eba66c9f5aab9fa9d287128fbee939976f3a 100644 (file)
@@ -198,9 +198,9 @@ const struct file_operations hpfs_file_ops =
 {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .release        = hpfs_file_release,
        .fsync          = hpfs_file_fsync,
index 7c5f01cf619d689d76ab51cc7cc221bead874cf4..143a903deb3d6494932f6e61e0f419c2b095530c 100644 (file)
@@ -132,6 +132,10 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
  */
 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
 extern int rw_verify_area(int, struct file *, const loff_t *, size_t);
+extern ssize_t do_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+                          unsigned long nr_segs, loff_t pos);
+extern ssize_t do_aio_write(struct kiocb *kiocb, const struct iovec *iov,
+                           unsigned long nr_segs, loff_t pos);
 
 /*
  * splice.c
diff --git a/fs/iov-iter.c b/fs/iov-iter.c
new file mode 100644 (file)
index 0000000..ec461c8
--- /dev/null
@@ -0,0 +1,411 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/hardirq.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+
+static size_t __iovec_copy_to_user(char *vaddr, const struct iovec *iov,
+                                  size_t base, size_t bytes, int atomic)
+{
+       size_t copied = 0, left = 0;
+
+       while (bytes) {
+               char __user *buf = iov->iov_base + base;
+               int copy = min(bytes, iov->iov_len - base);
+
+               base = 0;
+               if (atomic)
+                       left = __copy_to_user_inatomic(buf, vaddr, copy);
+               else
+                       left = __copy_to_user(buf, vaddr, copy);
+               copied += copy;
+               bytes -= copy;
+               vaddr += copy;
+               iov++;
+
+               if (unlikely(left))
+                       break;
+       }
+       return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were sucessfully copied.  If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+static size_t ii_iovec_copy_to_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       BUG_ON(!in_atomic());
+       kaddr = kmap_atomic(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_to_user(kaddr + offset, iov,
+                                             i->iov_offset, bytes, 1);
+       }
+       kunmap_atomic(kaddr);
+
+       return copied;
+}
+
+/*
+ * This has the same sideeffects and return value as
+ * ii_iovec_copy_to_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+static size_t ii_iovec_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes,
+               int check_access)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       if (check_access) {
+               might_sleep();
+               if (generic_segment_checks(iov, &i->nr_segs, &bytes,
+                                          VERIFY_WRITE))
+                       return 0;
+       }
+
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               /*
+                * Faults on the destination of a read are common, so do it
+                * before taking the kmap.
+                */
+               if (!fault_in_pages_writeable(buf, bytes)) {
+                       kaddr = kmap_atomic(page);
+                       left = __copy_to_user_inatomic(buf, kaddr + offset,
+                                                    bytes);
+                       kunmap_atomic(kaddr);
+                       if (left == 0)
+                               goto success;
+               }
+               kaddr = kmap(page);
+               left = copy_to_user(buf, kaddr + offset, bytes);
+               kunmap(page);
+success:
+               copied = bytes - left;
+       } else {
+               kaddr = kmap(page);
+               copied = __iovec_copy_to_user(kaddr + offset, iov,
+                                             i->iov_offset, bytes, 0);
+               kunmap(page);
+       }
+       return copied;
+}
+
+#ifdef CONFIG_BLOCK
+/*
+ * As an easily verifiable first pass, we implement all the methods that
+ * copy data to and from bvec pages with one function.  We implement it
+ * all with kmap_atomic().
+ */
+static size_t bvec_copy_tofrom_page(struct iov_iter *iter, struct page *page,
+                                   unsigned long page_offset, size_t bytes,
+                                   int topage)
+{
+       struct bio_vec *bvec = (struct bio_vec *)iter->data;
+       size_t bvec_offset = iter->iov_offset;
+       size_t remaining = bytes;
+       void *bvec_map;
+       void *page_map;
+       size_t copy;
+
+       page_map = kmap_atomic(page);
+
+       BUG_ON(bytes > iter->count);
+       while (remaining) {
+               BUG_ON(bvec->bv_len == 0);
+               BUG_ON(bvec_offset >= bvec->bv_len);
+               copy = min(remaining, bvec->bv_len - bvec_offset);
+               bvec_map = kmap_atomic(bvec->bv_page);
+               if (topage)
+                       memcpy(page_map + page_offset,
+                              bvec_map + bvec->bv_offset + bvec_offset,
+                              copy);
+               else
+                       memcpy(bvec_map + bvec->bv_offset + bvec_offset,
+                              page_map + page_offset,
+                              copy);
+               kunmap_atomic(bvec_map);
+               remaining -= copy;
+               bvec_offset += copy;
+               page_offset += copy;
+               if (bvec_offset == bvec->bv_len) {
+                       bvec_offset = 0;
+                       bvec++;
+               }
+       }
+
+       kunmap_atomic(page_map);
+
+       return bytes;
+}
+
+static size_t ii_bvec_copy_to_user_atomic(struct page *page, struct iov_iter *i,
+                                         unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
+}
+static size_t ii_bvec_copy_to_user(struct page *page, struct iov_iter *i,
+                                  unsigned long offset, size_t bytes,
+                                  int check_access)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
+}
+static size_t ii_bvec_copy_from_user_atomic(struct page *page,
+                                           struct iov_iter *i,
+                                           unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
+}
+static size_t ii_bvec_copy_from_user(struct page *page, struct iov_iter *i,
+                                    unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
+}
+
+/*
+ * bio_vecs have a stricter structure than iovecs that might have
+ * come from userspace.  There are no zero length bio_vec elements.
+ */
+static void ii_bvec_advance(struct iov_iter *i, size_t bytes)
+{
+       struct bio_vec *bvec = (struct bio_vec *)i->data;
+       size_t offset = i->iov_offset;
+       size_t delta;
+
+       BUG_ON(i->count < bytes);
+       while (bytes) {
+               BUG_ON(bvec->bv_len == 0);
+               BUG_ON(bvec->bv_len <= offset);
+               delta = min(bytes, bvec->bv_len - offset);
+               offset += delta;
+               i->count -= delta;
+               bytes -= delta;
+               if (offset == bvec->bv_len) {
+                       bvec++;
+                       offset = 0;
+               }
+       }
+
+       i->data = (unsigned long)bvec;
+       i->iov_offset = offset;
+}
+
+/*
+ * pages pointed to by bio_vecs are always pinned.
+ */
+static int ii_bvec_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       return 0;
+}
+
+static size_t ii_bvec_single_seg_count(const struct iov_iter *i)
+{
+       const struct bio_vec *bvec = (struct bio_vec *)i->data;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, bvec->bv_len - i->iov_offset);
+}
+
+static int ii_bvec_shorten(struct iov_iter *i, size_t count)
+{
+       return -EINVAL;
+}
+
+struct iov_iter_ops ii_bvec_ops = {
+       .ii_copy_to_user_atomic = ii_bvec_copy_to_user_atomic,
+       .ii_copy_to_user = ii_bvec_copy_to_user,
+       .ii_copy_from_user_atomic = ii_bvec_copy_from_user_atomic,
+       .ii_copy_from_user = ii_bvec_copy_from_user,
+       .ii_advance = ii_bvec_advance,
+       .ii_fault_in_readable = ii_bvec_fault_in_readable,
+       .ii_single_seg_count = ii_bvec_single_seg_count,
+       .ii_shorten = ii_bvec_shorten,
+};
+EXPORT_SYMBOL(ii_bvec_ops);
+#endif /* CONFIG_BLOCK */
+
+static size_t __iovec_copy_from_user(char *vaddr, const struct iovec *iov,
+                                    size_t base, size_t bytes, int atomic)
+{
+       size_t copied = 0, left = 0;
+
+       while (bytes) {
+               char __user *buf = iov->iov_base + base;
+               int copy = min(bytes, iov->iov_len - base);
+
+               base = 0;
+               if (atomic)
+                       left = __copy_from_user_inatomic(vaddr, buf, copy);
+               else
+                       left = __copy_from_user(vaddr, buf, copy);
+               copied += copy;
+               bytes -= copy;
+               vaddr += copy;
+               iov++;
+
+               if (unlikely(left))
+                       break;
+       }
+       return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were successfully copied.  If a fault is encountered then return the number
+ * of bytes which were copied.
+ */
+static size_t ii_iovec_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       BUG_ON(!in_atomic());
+       kaddr = kmap_atomic(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user(kaddr + offset, iov,
+                                               i->iov_offset, bytes, 1);
+       }
+       kunmap_atomic(kaddr);
+
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * ii_iovec_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+static size_t ii_iovec_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_from_user(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user(kaddr + offset, iov,
+                                               i->iov_offset, bytes, 0);
+       }
+       kunmap(page);
+       return copied;
+}
+
+static void ii_iovec_advance(struct iov_iter *i, size_t bytes)
+{
+       BUG_ON(i->count < bytes);
+
+       if (likely(i->nr_segs == 1)) {
+               i->iov_offset += bytes;
+               i->count -= bytes;
+       } else {
+               struct iovec *iov = (struct iovec *)i->data;
+               size_t base = i->iov_offset;
+               unsigned long nr_segs = i->nr_segs;
+
+               /*
+                * The !iov->iov_len check ensures we skip over unlikely
+                * zero-length segments (without overruning the iovec).
+                */
+               while (bytes || unlikely(i->count && !iov->iov_len)) {
+                       int copy;
+
+                       copy = min(bytes, iov->iov_len - base);
+                       BUG_ON(!i->count || i->count < copy);
+                       i->count -= copy;
+                       bytes -= copy;
+                       base += copy;
+                       if (iov->iov_len == base) {
+                               iov++;
+                               nr_segs--;
+                               base = 0;
+                       }
+               }
+               i->data = (unsigned long)iov;
+               i->iov_offset = base;
+               i->nr_segs = nr_segs;
+       }
+}
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+static int ii_iovec_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char __user *buf = iov->iov_base + i->iov_offset;
+       bytes = min(bytes, iov->iov_len - i->iov_offset);
+       return fault_in_pages_readable(buf, bytes);
+}
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+static size_t ii_iovec_single_seg_count(const struct iov_iter *i)
+{
+       const struct iovec *iov = (struct iovec *)i->data;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, iov->iov_len - i->iov_offset);
+}
+
+static int ii_iovec_shorten(struct iov_iter *i, size_t count)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       i->nr_segs = iov_shorten(iov, i->nr_segs, count);
+       i->count = min(i->count, count);
+       return 0;
+}
+
+struct iov_iter_ops ii_iovec_ops = {
+       .ii_copy_to_user_atomic = ii_iovec_copy_to_user_atomic,
+       .ii_copy_to_user = ii_iovec_copy_to_user,
+       .ii_copy_from_user_atomic = ii_iovec_copy_from_user_atomic,
+       .ii_copy_from_user = ii_iovec_copy_from_user,
+       .ii_advance = ii_iovec_advance,
+       .ii_fault_in_readable = ii_iovec_fault_in_readable,
+       .ii_single_seg_count = ii_iovec_single_seg_count,
+       .ii_shorten = ii_iovec_shorten,
+};
+EXPORT_SYMBOL(ii_iovec_ops);
index c348d6d886240cb0354dbc71faf88fef2395db67..e5d408a7ea4a27c88af2a1dcae32259eab1f7083 100644 (file)
@@ -117,8 +117,8 @@ static void destroy_inodecache(void)
 
 static int isofs_remount(struct super_block *sb, int *flags, char *data)
 {
-       /* we probably want a lot more here */
-       *flags |= MS_RDONLY;
+       if (!(*flags & MS_RDONLY))
+               return -EROFS;
        return 0;
 }
 
@@ -763,15 +763,6 @@ root_found:
         */
        s->s_maxbytes = 0x80000000000LL;
 
-       /*
-        * The CDROM is read-only, has no nodes (devices) on it, and since
-        * all of the files appear to be owned by root, we really do not want
-        * to allow suid.  (suid or devices will not show up unless we have
-        * Rock Ridge extensions)
-        */
-
-       s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
-
        /* Set this for reference. Its not currently used except on write
           which we don't have .. */
 
@@ -1530,6 +1521,9 @@ struct inode *isofs_iget(struct super_block *sb,
 static struct dentry *isofs_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
 {
+       /* We don't support read-write mounts */
+       if (!(flags & MS_RDONLY))
+               return ERR_PTR(-EACCES);
        return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
 }
 
index 11bb11f48b3ad7aa70d80a39af28c65f1f2d2dd1..bb217dcb41affbcca6b42e7c5b018a72e0a3aed0 100644 (file)
@@ -340,13 +340,13 @@ void journal_commit_transaction(journal_t *journal)
        J_ASSERT(journal->j_committing_transaction == NULL);
 
        commit_transaction = journal->j_running_transaction;
-       J_ASSERT(commit_transaction->t_state == T_RUNNING);
 
        trace_jbd_start_commit(journal, commit_transaction);
        jbd_debug(1, "JBD: starting commit of transaction %d\n",
                        commit_transaction->t_tid);
 
        spin_lock(&journal->j_state_lock);
+       J_ASSERT(commit_transaction->t_state == T_RUNNING);
        commit_transaction->t_state = T_LOCKED;
 
        trace_jbd_commit_locking(journal, commit_transaction);
index 6510d6355729c021b79afe88adbff86c96a2112f..2d04f9afafd7ea5cf59624777d12c9196b61e97c 100644 (file)
@@ -90,6 +90,24 @@ static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
 static const char *journal_dev_name(journal_t *journal, char *buffer);
 
+#ifdef CONFIG_JBD_DEBUG
+void __jbd_debug(int level, const char *file, const char *func,
+                unsigned int line, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       if (level > journal_enable_debug)
+               return;
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
+       va_end(args);
+}
+EXPORT_SYMBOL(__jbd_debug);
+#endif
+
 /*
  * Helper function used to manage commit timeouts
  */
index 1506673c087e11ae820245baadc8ae74cb9f01b2..1d7ab8b7d41e4e19c2053ee5be00375a5d9ca3c4 100644 (file)
@@ -51,10 +51,10 @@ const struct file_operations jffs2_file_operations =
 {
        .llseek =       generic_file_llseek,
        .open =         generic_file_open,
-       .read =         do_sync_read,
-       .aio_read =     generic_file_aio_read,
-       .write =        do_sync_write,
-       .aio_write =    generic_file_aio_write,
+       .read =         do_sync_read,
+       .read_iter =    generic_file_read_iter,
+       .write =        do_sync_write,
+       .write_iter =   generic_file_write_iter,
        .unlocked_ioctl=jffs2_ioctl,
        .mmap =         generic_file_readonly_mmap,
        .fsync =        jffs2_fsync,
index dd7442c5835864b0e04bdff5befbd0f020232bfb..040b6c7725ad878d5347274ae1ca4cb76003a83c 100644 (file)
@@ -151,8 +151,8 @@ const struct file_operations jfs_file_operations = {
        .llseek         = generic_file_llseek,
        .write          = do_sync_write,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .splice_write   = generic_file_splice_write,
index 730f24e282a652029ca14b0f5032411512914beb..0a0453a6665925649cbc41edc87d003486e2465e 100644 (file)
@@ -331,15 +331,14 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
 }
 
 static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
-       const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+                            struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                jfs_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
@@ -347,7 +346,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        jfs_write_failed(mapping, end);
index 8743ba9c6742f4c9b2457bd0ef87d58f8bc81051..984c2bbf4f6143356232c9ab6b5c123fa0da3e8b 100644 (file)
@@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
 
                dir_index = (u32) ctx->pos;
 
+               /*
+                * NFSv4 reserves cookies 1 and 2 for . and .. so the value
+                * we return to the vfs is one greater than the one we use
+                * internally.
+                */
+               if (dir_index)
+                       dir_index--;
+
                if (dir_index > 1) {
                        struct dir_table_slot dirtab_slot;
 
@@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
                        if (p->header.flag & BT_INTERNAL) {
                                jfs_err("jfs_readdir: bad index table");
                                DT_PUTPAGE(mp);
-                               ctx->pos = -1;
+                               ctx->pos = DIREND;
                                return 0;
                        }
                } else {
@@ -3094,14 +3102,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
                                /*
                                 * self "."
                                 */
-                               ctx->pos = 0;
+                               ctx->pos = 1;
                                if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
                                        return 0;
                        }
                        /*
                         * parent ".."
                         */
-                       ctx->pos = 1;
+                       ctx->pos = 2;
                        if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
                                return 0;
 
@@ -3122,22 +3130,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
                /*
                 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
                 *
-                * pn = index = 0:      First entry "."
-                * pn = 0; index = 1:   Second entry ".."
+                * pn = 0; index = 1:   First entry "."
+                * pn = 0; index = 2:   Second entry ".."
                 * pn > 0:              Real entries, pn=1 -> leftmost page
                 * pn = index = -1:     No more entries
                 */
                dtpos = ctx->pos;
-               if (dtpos == 0) {
+               if (dtpos < 2) {
                        /* build "." entry */
+                       ctx->pos = 1;
                        if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
                                return 0;
-                       dtoffset->index = 1;
+                       dtoffset->index = 2;
                        ctx->pos = dtpos;
                }
 
                if (dtoffset->pn == 0) {
-                       if (dtoffset->index == 1) {
+                       if (dtoffset->index == 2) {
                                /* build ".." entry */
                                if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
                                        return 0;
@@ -3228,6 +3237,12 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
                                        }
                                        jfs_dirent->position = unique_pos++;
                                }
+                               /*
+                                * We add 1 to the index because we may
+                                * use a value of 2 internally, and NFSv4
+                                * doesn't like that.
+                                */
+                               jfs_dirent->position++;
                        } else {
                                jfs_dirent->position = dtpos;
                                len = min(d_namleft, DTLHDRDATALEN_LEGACY);
index 9c501449450dc9be6891e5d9c1a035ca31b5687b..427bb73e298f197d4cfc73b17baeffdd1c848c5d 100644 (file)
@@ -245,8 +245,8 @@ static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
                goto out;
        if (memchr_inv(buf, 0xff, super->s_writesize))
                err = -EIO;
-       kfree(buf);
 out:
+       kfree(buf);
        return err;
 }
 
index 57914fc32b62538f43909d35ffc031742b98a881..57f994e887b5344daa7a47f279c233d41188fba1 100644 (file)
@@ -264,8 +264,8 @@ const struct inode_operations logfs_reg_iops = {
 };
 
 const struct file_operations logfs_reg_fops = {
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .fsync          = logfs_fsync,
        .unlocked_ioctl = logfs_ioctl,
        .llseek         = generic_file_llseek,
index 54360293bcb5cd0680c3042e6f0b9b87e342c649..b256c0690e5b66f5b16a2b81be033cc3b345dec6 100644 (file)
@@ -287,14 +287,14 @@ static int logfs_make_writeable(struct super_block *sb)
        if (err)
                return err;
 
+       /* Do one GC pass before any data gets dirtied */
+       logfs_gc_pass(sb);
+
        /* Check areas for trailing unaccounted data */
        err = logfs_check_areas(sb);
        if (err)
                return err;
 
-       /* Do one GC pass before any data gets dirtied */
-       logfs_gc_pass(sb);
-
        /* after all initializations are done, replay the journal
         * for rw-mounts, if necessary */
        err = logfs_replay_journal(sb);
index adc6f5494231bc947f45d8a3c526db0b36f39bf3..346d8f37d342df53f5a7d9736431283e33eea431 100644 (file)
@@ -15,9 +15,9 @@
 const struct file_operations minix_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .splice_read    = generic_file_splice_read,
index 340b1eff02679ad3485f51f6c526ba3363b2ef53..2dceee4db07652fd449ef713037a8a268d864f00 100644 (file)
@@ -501,8 +501,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
                                        &nn->nfs_client_list);
                        spin_unlock(&nn->nfs_client_lock);
                        new->cl_flags = cl_init->init_flags;
-                       return rpc_ops->init_client(new, timeparms, ip_addr,
-                                                   authflavour);
+                       return rpc_ops->init_client(new, timeparms, ip_addr);
                }
 
                spin_unlock(&nn->nfs_client_lock);
@@ -694,13 +693,12 @@ EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient);
  * @clp: nfs_client to initialise
  * @timeparms: timeout parameters for underlying RPC transport
  * @ip_addr: IP presentation address (not used)
- * @authflavor: authentication flavor for underlying RPC transport
  *
  * Returns pointer to an NFS client, or an ERR_PTR value.
  */
 struct nfs_client *nfs_init_client(struct nfs_client *clp,
                    const struct rpc_timeout *timeparms,
-                   const char *ip_addr, rpc_authflavor_t authflavour)
+                   const char *ip_addr)
 {
        int error;
 
index e474ca2b2bfea832d488536a807ae140073dd591..39e69d47cd0b042e38510af9e5991a286529071e 100644 (file)
@@ -1413,6 +1413,10 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
        dfprintk(VFS, "NFS: atomic_open(%s/%ld), %s\n",
                        dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
 
+       err = nfs_check_flags(open_flags);
+       if (err)
+               return err;
+
        /* NFS only supports OPEN on regular files */
        if ((open_flags & O_DIRECTORY)) {
                if (!d_unhashed(dentry)) {
index 0bd7a55a5f073befd4d0ce97e87cca2dd0d42e37..239c2fe05ea5f2a5a16ccc10b3426e77a842e4d6 100644 (file)
@@ -90,6 +90,7 @@ struct nfs_direct_req {
        int                     flags;
 #define NFS_ODIRECT_DO_COMMIT          (1)     /* an unstable reply was received */
 #define NFS_ODIRECT_RESCHED_WRITES     (2)     /* write verification failed */
+#define NFS_ODIRECT_MARK_DIRTY         (4)     /* mark read pages dirty */
        struct nfs_writeverf    verf;           /* unstable write verifier */
 };
 
@@ -112,33 +113,23 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
  * nfs_direct_IO - NFS address space operation for direct I/O
  * @rw: direction (read or write)
  * @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
+ * @iter: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
  * @nr_segs: size of iovec array
  *
  * The presence of this routine in the address space ops vector means
- * the NFS client supports direct I/O. However, for most direct IO, we
- * shunt off direct read and write requests before the VFS gets them,
- * so this method is only ever called for swap.
+ * the NFS client supports direct I/O. However, we shunt off direct
+ * read and write requests before the VFS gets them, so this method
+ * should never be called.
  */
-ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                     loff_t pos)
 {
-#ifndef CONFIG_NFS_SWAP
        dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
                        iocb->ki_filp->f_path.dentry->d_name.name,
-                       (long long) pos, nr_segs);
+                       (long long) pos, iter->nr_segs);
 
        return -EINVAL;
-#else
-       VM_BUG_ON(iocb->ki_left != PAGE_SIZE);
-       VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
-
-       if (rw == READ || rw == KERNEL_READ)
-               return nfs_file_direct_read(iocb, iov, nr_segs, pos,
-                               rw == READ ? true : false);
-       return nfs_file_direct_write(iocb, iov, nr_segs, pos,
-                               rw == WRITE ? true : false);
-#endif /* CONFIG_NFS_SWAP */
 }
 
 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
@@ -266,7 +257,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
                struct nfs_page *req = nfs_list_entry(hdr->pages.next);
                struct page *page = req->wb_page;
 
-               if (!PageCompound(page) && bytes < hdr->good_bytes)
+               if ((dreq->flags & NFS_ODIRECT_MARK_DIRTY) &&
+                   !PageCompound(page) && bytes < hdr->good_bytes)
                        set_page_dirty(page);
                bytes += req->wb_bytes;
                nfs_list_remove_request(req);
@@ -309,7 +301,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
  */
 static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
                                                const struct iovec *iov,
-                                               loff_t pos, bool uio)
+                                               loff_t pos)
 {
        struct nfs_direct_req *dreq = desc->pg_dreq;
        struct nfs_open_context *ctx = dreq->ctx;
@@ -337,20 +329,12 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
                                          GFP_KERNEL);
                if (!pagevec)
                        break;
-               if (uio) {
-                       down_read(&current->mm->mmap_sem);
-                       result = get_user_pages(current, current->mm, user_addr,
+               down_read(&current->mm->mmap_sem);
+               result = get_user_pages(current, current->mm, user_addr,
                                        npages, 1, 0, pagevec, NULL);
-                       up_read(&current->mm->mmap_sem);
-                       if (result < 0)
-                               break;
-               } else {
-                       WARN_ON(npages != 1);
-                       result = get_kernel_page(user_addr, 1, pagevec);
-                       if (WARN_ON(result != 1))
-                               break;
-               }
-
+               up_read(&current->mm->mmap_sem);
+               if (result < 0)
+                       break;
                if ((unsigned)result < npages) {
                        bytes = result * PAGE_SIZE;
                        if (bytes <= pgbase) {
@@ -398,24 +382,17 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
        return result < 0 ? (ssize_t) result : -EFAULT;
 }
 
-static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
-                                             const struct iovec *iov,
-                                             unsigned long nr_segs,
-                                             loff_t pos, bool uio)
+static ssize_t nfs_direct_do_schedule_read_iovec(
+               struct nfs_pageio_descriptor *desc, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
 {
-       struct nfs_pageio_descriptor desc;
        ssize_t result = -EINVAL;
        size_t requested_bytes = 0;
        unsigned long seg;
 
-       NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
-                            &nfs_direct_read_completion_ops);
-       get_dreq(dreq);
-       desc.pg_dreq = dreq;
-
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
-               result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
+               result = nfs_direct_read_schedule_segment(desc, vec, pos);
                if (result < 0)
                        break;
                requested_bytes += result;
@@ -423,6 +400,75 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
                        break;
                pos += vec->iov_len;
        }
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+
+#ifdef CONFIG_BLOCK
+static ssize_t nfs_direct_do_schedule_read_bvec(
+               struct nfs_pageio_descriptor *desc,
+               struct bio_vec *bvec, unsigned long nr_segs, loff_t pos)
+{
+       struct nfs_direct_req *dreq = desc->pg_dreq;
+       struct nfs_open_context *ctx = dreq->ctx;
+       struct inode *inode = ctx->dentry->d_inode;
+       ssize_t result = -EINVAL;
+       size_t requested_bytes = 0;
+       unsigned long seg;
+       struct nfs_page *req;
+       unsigned int req_len;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               result = -EIO;
+               req_len = bvec[seg].bv_len;
+               req = nfs_create_request(ctx, inode,
+                                        bvec[seg].bv_page,
+                                        bvec[seg].bv_offset, req_len);
+               if (IS_ERR(req)) {
+                       result = PTR_ERR(req);
+                       break;
+               }
+               req->wb_index = pos >> PAGE_SHIFT;
+               req->wb_offset = pos & ~PAGE_MASK;
+               if (!nfs_pageio_add_request(desc, req)) {
+                       result = desc->pg_error;
+                       nfs_release_request(req);
+                       break;
+               }
+               requested_bytes += req_len;
+               pos += req_len;
+       }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq,
+                                       struct iov_iter *iter, loff_t pos)
+{
+       struct nfs_pageio_descriptor desc;
+       ssize_t result;
+
+       NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
+                            &nfs_direct_read_completion_ops);
+       get_dreq(dreq);
+       desc.pg_dreq = dreq;
+
+       if (iov_iter_has_iovec(iter)) {
+               result = nfs_direct_do_schedule_read_iovec(&desc,
+                               iov_iter_iovec(iter), iter->nr_segs, pos);
+#ifdef CONFIG_BLOCK
+       } else if (iov_iter_has_bvec(iter)) {
+               result = nfs_direct_do_schedule_read_bvec(&desc,
+                               iov_iter_bvec(iter), iter->nr_segs, pos);
+#endif
+       } else
+               BUG();
 
        nfs_pageio_complete(&desc);
 
@@ -430,9 +476,9 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
         * If no bytes were started, return the error, and let the
         * generic layer handle the completion.
         */
-       if (requested_bytes == 0) {
+       if (result < 0) {
                nfs_direct_req_release(dreq);
-               return result < 0 ? result : -EIO;
+               return result;
        }
 
        if (put_dreq(dreq))
@@ -440,8 +486,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
        return 0;
 }
 
-static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t pos, bool uio)
+static ssize_t nfs_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t pos)
 {
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -453,7 +499,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
                goto out;
 
        dreq->inode = inode;
-       dreq->bytes_left = iov_length(iov, nr_segs);
+       dreq->bytes_left = iov_iter_count(iter);
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
        l_ctx = nfs_get_lock_context(dreq->ctx);
        if (IS_ERR(l_ctx)) {
@@ -464,8 +510,8 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       NFS_I(inode)->read_io += iov_length(iov, nr_segs);
-       result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
+       NFS_I(inode)->read_io += iov_iter_count(iter);
+       result = nfs_direct_read_schedule(dreq, iter, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
 out_release:
@@ -630,7 +676,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
  */
 static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
                                                 const struct iovec *iov,
-                                                loff_t pos, bool uio)
+                                                loff_t pos)
 {
        struct nfs_direct_req *dreq = desc->pg_dreq;
        struct nfs_open_context *ctx = dreq->ctx;
@@ -658,19 +704,12 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d
                if (!pagevec)
                        break;
 
-               if (uio) {
-                       down_read(&current->mm->mmap_sem);
-                       result = get_user_pages(current, current->mm, user_addr,
-                                               npages, 0, 0, pagevec, NULL);
-                       up_read(&current->mm->mmap_sem);
-                       if (result < 0)
-                               break;
-               } else {
-                       WARN_ON(npages != 1);
-                       result = get_kernel_page(user_addr, 0, pagevec);
-                       if (WARN_ON(result != 1))
-                               break;
-               }
+               down_read(&current->mm->mmap_sem);
+               result = get_user_pages(current, current->mm, user_addr,
+                                       npages, 0, 0, pagevec, NULL);
+               up_read(&current->mm->mmap_sem);
+               if (result < 0)
+                       break;
 
                if ((unsigned)result < npages) {
                        bytes = result * PAGE_SIZE;
@@ -799,27 +838,18 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
        .completion = nfs_direct_write_completion,
 };
 
-static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
-                                              const struct iovec *iov,
-                                              unsigned long nr_segs,
-                                              loff_t pos, bool uio)
+static ssize_t nfs_direct_do_schedule_write_iovec(
+               struct nfs_pageio_descriptor *desc, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
 {
-       struct nfs_pageio_descriptor desc;
-       struct inode *inode = dreq->inode;
-       ssize_t result = 0;
+       ssize_t result = -EINVAL;
        size_t requested_bytes = 0;
        unsigned long seg;
 
-       NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
-                             &nfs_direct_write_completion_ops);
-       desc.pg_dreq = dreq;
-       get_dreq(dreq);
-       atomic_inc(&inode->i_dio_count);
-
-       NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
-               result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
+               result = nfs_direct_write_schedule_segment(desc, vec,
+                                                          pos);
                if (result < 0)
                        break;
                requested_bytes += result;
@@ -827,16 +857,91 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                        break;
                pos += vec->iov_len;
        }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+
+#ifdef CONFIG_BLOCK
+static ssize_t nfs_direct_do_schedule_write_bvec(
+               struct nfs_pageio_descriptor *desc,
+               struct bio_vec *bvec, unsigned long nr_segs, loff_t pos)
+{
+       struct nfs_direct_req *dreq = desc->pg_dreq;
+       struct nfs_open_context *ctx = dreq->ctx;
+       struct inode *inode = dreq->inode;
+       ssize_t result = 0;
+       size_t requested_bytes = 0;
+       unsigned long seg;
+       struct nfs_page *req;
+       unsigned int req_len;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               req_len = bvec[seg].bv_len;
+
+               req = nfs_create_request(ctx, inode, bvec[seg].bv_page,
+                                        bvec[seg].bv_offset, req_len);
+               if (IS_ERR(req)) {
+                       result = PTR_ERR(req);
+                       break;
+               }
+               nfs_lock_request(req);
+               req->wb_index = pos >> PAGE_SHIFT;
+               req->wb_offset = pos & ~PAGE_MASK;
+               if (!nfs_pageio_add_request(desc, req)) {
+                       result = desc->pg_error;
+                       nfs_unlock_and_release_request(req);
+                       break;
+               }
+               requested_bytes += req_len;
+               pos += req_len;
+       }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq,
+                                        struct iov_iter *iter, loff_t pos)
+{
+       struct nfs_pageio_descriptor desc;
+       struct inode *inode = dreq->inode;
+       ssize_t result = 0;
+
+       NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
+                             &nfs_direct_write_completion_ops);
+       desc.pg_dreq = dreq;
+       get_dreq(dreq);
+       atomic_inc(&inode->i_dio_count);
+
+       NFS_I(dreq->inode)->write_io += iov_iter_count(iter);
+
+       if (iov_iter_has_iovec(iter)) {
+               result = nfs_direct_do_schedule_write_iovec(&desc,
+                               iov_iter_iovec(iter), iter->nr_segs, pos);
+#ifdef CONFIG_BLOCK
+       } else if (iov_iter_has_bvec(iter)) {
+               result = nfs_direct_do_schedule_write_bvec(&desc,
+                               iov_iter_bvec(iter), iter->nr_segs, pos);
+#endif
+       } else
+               BUG();
+
        nfs_pageio_complete(&desc);
 
        /*
         * If no bytes were started, return the error, and let the
         * generic layer handle the completion.
         */
-       if (requested_bytes == 0) {
+       if (result < 0) {
                inode_dio_done(inode);
                nfs_direct_req_release(dreq);
-               return result < 0 ? result : -EIO;
+               return result;
        }
 
        if (put_dreq(dreq))
@@ -844,9 +949,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
        return 0;
 }
 
-static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos,
-                               size_t count, bool uio)
+static ssize_t nfs_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
 {
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -858,7 +962,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
                goto out;
 
        dreq->inode = inode;
-       dreq->bytes_left = count;
+       dreq->bytes_left = iov_iter_count(iter);
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
        l_ctx = nfs_get_lock_context(dreq->ctx);
        if (IS_ERR(l_ctx)) {
@@ -869,7 +973,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
+       result = nfs_direct_write_schedule(dreq, iter, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
 out_release:
@@ -881,12 +985,11 @@ out:
 /**
  * nfs_file_direct_read - file direct read operation for NFS files
  * @iocb: target I/O control block
- * @iov: vector of user buffers into which to read data
- * @nr_segs: size of iov vector
+ * @iter: vector of buffers into which to read data
  * @pos: byte offset in file where reading starts
  *
  * We use this function for direct reads instead of calling
- * generic_file_aio_read() in order to avoid gfar's check to see if
+ * generic_file_read_iter() in order to avoid gfar's check to see if
  * the request starts before the end of the file.  For that check
  * to work, we must generate a GETATTR before each direct read, and
  * even then there is a window between the GETATTR and the subsequent
@@ -899,15 +1002,15 @@ out:
  * client must read the updated atime from the server back into its
  * cache.
  */
-ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos, bool uio)
+ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t pos)
 {
        ssize_t retval = -EINVAL;
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        size_t count;
 
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 
        dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
@@ -925,7 +1028,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
 
        task_io_account_read(count);
 
-       retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
+       retval = nfs_direct_read(iocb, iter, pos);
        if (retval > 0)
                iocb->ki_pos = pos + retval;
 
@@ -936,12 +1039,11 @@ out:
 /**
  * nfs_file_direct_write - file direct write operation for NFS files
  * @iocb: target I/O control block
- * @iov: vector of user buffers from which to write data
- * @nr_segs: size of iov vector
+ * @iter: vector of buffers from which to write data
  * @pos: byte offset in file where writing starts
  *
  * We use this function for direct writes instead of calling
- * generic_file_aio_write() in order to avoid taking the inode
+ * generic_file_write_iter() in order to avoid taking the inode
  * semaphore and updating the i_size.  The NFS server will set
  * the new i_size and this client must read the updated size
  * back into its cache.  We let the server do generic write
@@ -955,15 +1057,15 @@ out:
  * Note that O_APPEND is not supported for NFS direct writes, as there
  * is no atomic O_APPEND write facility in the NFS protocol.
  */
-ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos, bool uio)
+ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t pos)
 {
        ssize_t retval = -EINVAL;
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        size_t count;
 
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 
        dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
@@ -988,7 +1090,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 
        task_io_account_write(count);
 
-       retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
+       retval = nfs_direct_write(iocb, iter, pos);
        if (retval > 0) {
                struct inode *inode = mapping->host;
 
index 94e94bd11aae6d0a6c32acf5c16491448edb93f8..3e210ca964263f9238cace8edf5a73c7aa568fd3 100644 (file)
@@ -172,29 +172,28 @@ nfs_file_flush(struct file *file, fl_owner_t id)
 EXPORT_SYMBOL_GPL(nfs_file_flush);
 
 ssize_t
-nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+nfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct dentry * dentry = iocb->ki_filp->f_path.dentry;
        struct inode * inode = dentry->d_inode;
        ssize_t result;
 
        if (iocb->ki_filp->f_flags & O_DIRECT)
-               return nfs_file_direct_read(iocb, iov, nr_segs, pos, true);
+               return nfs_file_direct_read(iocb, iter, pos);
 
-       dprintk("NFS: read(%s/%s, %lu@%lu)\n",
+       dprintk("NFS: read_iter(%s/%s, %lu@%lu)\n",
                dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) iov_length(iov, nr_segs), (unsigned long) pos);
+               (unsigned long) iov_iter_count(iter), (unsigned long) pos);
 
        result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
        if (!result) {
-               result = generic_file_aio_read(iocb, iov, nr_segs, pos);
+               result = generic_file_read_iter(iocb, iter, pos);
                if (result > 0)
                        nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
        }
        return result;
 }
-EXPORT_SYMBOL_GPL(nfs_file_read);
+EXPORT_SYMBOL_GPL(nfs_file_read_iter);
 
 ssize_t
 nfs_file_splice_read(struct file *filp, loff_t *ppos,
@@ -250,7 +249,7 @@ EXPORT_SYMBOL_GPL(nfs_file_mmap);
  * disk, but it retrieves and clears ctx->error after synching, despite
  * the two being set at the same time in nfs_context_set_write_error().
  * This is because the former is used to notify the _next_ call to
- * nfs_file_write() that a write error occurred, and hence cause it to
+ * nfs_file_write_iter() that a write error occurred, and hence cause it to
  * fall back to doing a synchronous write.
  */
 int
@@ -642,19 +641,19 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
        return 0;
 }
 
-ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
-                      unsigned long nr_segs, loff_t pos)
+ssize_t nfs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                  loff_t pos)
 {
        struct dentry * dentry = iocb->ki_filp->f_path.dentry;
        struct inode * inode = dentry->d_inode;
        unsigned long written = 0;
        ssize_t result;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
 
        if (iocb->ki_filp->f_flags & O_DIRECT)
-               return nfs_file_direct_write(iocb, iov, nr_segs, pos, true);
+               return nfs_file_direct_write(iocb, iter, pos);
 
-       dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
+       dprintk("NFS: write_iter(%s/%s, %lu@%lld)\n",
                dentry->d_parent->d_name.name, dentry->d_name.name,
                (unsigned long) count, (long long) pos);
 
@@ -674,7 +673,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
        if (!count)
                goto out;
 
-       result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+       result = generic_file_write_iter(iocb, iter, pos);
        if (result > 0)
                written = result;
 
@@ -693,7 +692,7 @@ out_swapfile:
        printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
        goto out;
 }
-EXPORT_SYMBOL_GPL(nfs_file_write);
+EXPORT_SYMBOL_GPL(nfs_file_write_iter);
 
 ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
                              struct file *filp, loff_t *ppos,
@@ -953,8 +952,8 @@ const struct file_operations nfs_file_operations = {
        .llseek         = nfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = nfs_file_read,
-       .aio_write      = nfs_file_write,
+       .read_iter      = nfs_file_read_iter,
+       .write_iter     = nfs_file_write_iter,
        .mmap           = nfs_file_mmap,
        .open           = nfs_file_open,
        .flush          = nfs_file_flush,
index 3c8373f90ab3150f2530a795b977c1489a344771..7a7599b19ae58610b19a14735de66cdb49e67ce4 100644 (file)
@@ -267,7 +267,7 @@ extern struct rpc_procinfo nfs4_procedures[];
 void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
 extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
                           const struct rpc_timeout *timeparms,
-                          const char *ip_addr, rpc_authflavor_t authflavour);
+                          const char *ip_addr);
 
 /* dir.c */
 extern int nfs_access_cache_shrinker(struct shrinker *shrink,
@@ -286,11 +286,11 @@ int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *)
 int nfs_file_fsync_commit(struct file *, loff_t, loff_t, int);
 loff_t nfs_file_llseek(struct file *, loff_t, int);
 int nfs_file_flush(struct file *, fl_owner_t);
-ssize_t nfs_file_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_read_iter(struct kiocb *, struct iov_iter *, loff_t);
 ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *,
                             size_t, unsigned int);
 int nfs_file_mmap(struct file *, struct vm_area_struct *);
-ssize_t nfs_file_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_write_iter(struct kiocb *, struct iov_iter *, loff_t);
 int nfs_file_release(struct inode *, struct file *);
 int nfs_lock(struct file *, int, struct file_lock *);
 int nfs_flock(struct file *, int, struct file_lock *);
@@ -451,8 +451,7 @@ extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
 extern void __nfs4_read_done_cb(struct nfs_read_data *);
 extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
                            const struct rpc_timeout *timeparms,
-                           const char *ip_addr,
-                           rpc_authflavor_t authflavour);
+                           const char *ip_addr);
 extern int nfs40_walk_client_list(struct nfs_client *clp,
                                struct nfs_client **result,
                                struct rpc_cred *cred);
index ee81e354bce7a9d7fbc36023c993fe312ef3e255..d7bb59d5dd9c63dbba3986cfc1d85afe4d293387 100644 (file)
@@ -193,7 +193,6 @@ struct nfs4_state_recovery_ops {
        int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
        int (*recover_lock)(struct nfs4_state *, struct file_lock *);
        int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
-       struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
        int (*reclaim_complete)(struct nfs_client *, struct rpc_cred *);
        int (*detect_trunking)(struct nfs_client *, struct nfs_client **,
                struct rpc_cred *);
@@ -319,7 +318,7 @@ extern void nfs4_kill_renewd(struct nfs_client *);
 extern void nfs4_renew_state(struct work_struct *);
 
 /* nfs4state.c */
-struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
+struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp);
 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
 int nfs4_discover_server_trunking(struct nfs_client *clp,
@@ -327,7 +326,6 @@ int nfs4_discover_server_trunking(struct nfs_client *clp,
 int nfs40_discover_server_trunking(struct nfs_client *clp,
                        struct nfs_client **, struct rpc_cred *);
 #if defined(CONFIG_NFS_V4_1)
-struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
 int nfs41_discover_server_trunking(struct nfs_client *clp,
                        struct nfs_client **, struct rpc_cred *);
 extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
index 90dce91dd5b5c7aa61a7a17c34242619bbfc5b8d..767a5e37fe9731673a81ae284123979b4b72134d 100644 (file)
@@ -187,8 +187,7 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
  */
 struct nfs_client *nfs4_init_client(struct nfs_client *clp,
                                    const struct rpc_timeout *timeparms,
-                                   const char *ip_addr,
-                                   rpc_authflavor_t authflavour)
+                                   const char *ip_addr)
 {
        char buf[INET6_ADDRSTRLEN + 1];
        struct nfs_client *old;
index e5b804dd944c16a8adf4de17ee6588562cec55e8..e13bb0281ebf0b858295bd9482f9c4a620f01e5d 100644 (file)
@@ -121,8 +121,8 @@ const struct file_operations nfs4_file_operations = {
        .llseek         = nfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = nfs_file_read,
-       .aio_write      = nfs_file_write,
+       .read_iter      = nfs_file_read_iter,
+       .write_iter     = nfs_file_write_iter,
        .mmap           = nfs_file_mmap,
        .open           = nfs4_file_open,
        .flush          = nfs_file_flush,
index 108a774095f7ef6a53fc04366aec3caa3bfea717..f672c340393df51cfe3cf20709cfdbdcaf8fc585 100644 (file)
@@ -2940,10 +2940,10 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
        
        /* Deal with open(O_TRUNC) */
        if (sattr->ia_valid & ATTR_OPEN)
-               sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
+               sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
 
        /* Optimization: if the end result is no change, don't RPC */
-       if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
+       if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
                return 0;
 
        /* Search for an existing open(O_WRITE) file */
@@ -4660,10 +4660,14 @@ static unsigned int
 nfs4_init_uniform_client_string(const struct nfs_client *clp,
                                char *buf, size_t len)
 {
-       char *nodename = clp->cl_rpcclient->cl_nodename;
+       const char *nodename = clp->cl_rpcclient->cl_nodename;
 
        if (nfs4_client_id_uniquifier[0] != '\0')
-               nodename = nfs4_client_id_uniquifier;
+               return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
+                               clp->rpc_ops->version,
+                               clp->cl_minorversion,
+                               nfs4_client_id_uniquifier,
+                               nodename);
        return scnprintf(buf, len, "Linux NFSv%u.%u %s",
                                clp->rpc_ops->version, clp->cl_minorversion,
                                nodename);
@@ -5788,6 +5792,10 @@ int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
        return err;
 }
 
+/**
+ * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
+ * possible) as per RFC3530bis and RFC5661 Security Considerations sections
+ */
 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
 {
        int status;
@@ -5803,9 +5811,10 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
+       struct rpc_clnt *clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
 
        dprintk("NFS call  secinfo %s\n", name->name);
-       status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
+       status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
        dprintk("NFS reply  secinfo: %d\n", status);
        return status;
 }
@@ -6063,7 +6072,7 @@ int nfs4_destroy_clientid(struct nfs_client *clp)
                goto out;
        if (clp->cl_preserve_clid)
                goto out;
-       cred = nfs4_get_exchange_id_cred(clp);
+       cred = nfs4_get_clid_cred(clp);
        ret = nfs4_proc_destroy_clientid(clp, cred);
        if (cred)
                put_rpccred(cred);
@@ -6874,7 +6883,7 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
                .rpc_cred = lrp->cred,
        };
        struct rpc_task_setup task_setup_data = {
-               .rpc_client = lrp->clp->cl_rpcclient,
+               .rpc_client = NFS_SERVER(lrp->args.inode)->client,
                .rpc_message = &msg,
                .callback_ops = &nfs4_layoutreturn_call_ops,
                .callback_data = lrp,
@@ -7079,6 +7088,10 @@ out:
        return status;
 }
 
+/**
+ * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
+ * possible) as per RFC3530bis and RFC5661 Security Considerations sections
+ */
 static int
 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
                    struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
@@ -7094,7 +7107,8 @@ _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+       return nfs4_call_sync(server->nfs_client->cl_rpcclient, server, &msg,
+                               &args.seq_args, &res.seq_res, 0);
 }
 
 static int
@@ -7357,7 +7371,6 @@ static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
        .recover_open   = nfs4_open_reclaim,
        .recover_lock   = nfs4_lock_reclaim,
        .establish_clid = nfs4_init_clientid,
-       .get_clid_cred  = nfs4_get_setclientid_cred,
        .detect_trunking = nfs40_discover_server_trunking,
 };
 
@@ -7368,7 +7381,6 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
        .recover_open   = nfs4_open_reclaim,
        .recover_lock   = nfs4_lock_reclaim,
        .establish_clid = nfs41_init_clientid,
-       .get_clid_cred  = nfs4_get_exchange_id_cred,
        .reclaim_complete = nfs41_proc_reclaim_complete,
        .detect_trunking = nfs41_discover_server_trunking,
 };
@@ -7380,7 +7392,6 @@ static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
        .recover_open   = nfs4_open_expired,
        .recover_lock   = nfs4_lock_expired,
        .establish_clid = nfs4_init_clientid,
-       .get_clid_cred  = nfs4_get_setclientid_cred,
 };
 
 #if defined(CONFIG_NFS_V4_1)
@@ -7390,7 +7401,6 @@ static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
        .recover_open   = nfs41_open_expired,
        .recover_lock   = nfs41_lock_expired,
        .establish_clid = nfs41_init_clientid,
-       .get_clid_cred  = nfs4_get_exchange_id_cred,
 };
 #endif /* CONFIG_NFS_V4_1 */
 
index 36e21cb29d65971dff3f1b104d5685a8cae27d83..202e3633d5554d4b3da2ce589c4925236b5a7710 100644 (file)
@@ -441,7 +441,7 @@ void nfs4_destroy_session(struct nfs4_session *session)
        struct rpc_xprt *xprt;
        struct rpc_cred *cred;
 
-       cred = nfs4_get_exchange_id_cred(session->clp);
+       cred = nfs4_get_clid_cred(session->clp);
        nfs4_proc_destroy_session(session, cred);
        if (cred)
                put_rpccred(cred);
index 3a153d82b90c638215b5d01c68b68116a454d515..86a066926f244426cde10843f7f357a315d8d09d 100644 (file)
@@ -8,7 +8,7 @@
 #define __LINUX_FS_NFS_NFS4SESSION_H
 
 /* maximum number of slots to use */
-#define NFS4_DEF_SLOT_TABLE_SIZE (16U)
+#define NFS4_DEF_SLOT_TABLE_SIZE (64U)
 #define NFS4_MAX_SLOT_TABLE (1024U)
 #define NFS4_NO_SLOT ((u32)-1)
 
index e22862f13564486ab535a437f4d46e6709a30ed9..6818964bb7c0e8fe1432cfc8da286d0b88a01979 100644 (file)
@@ -154,6 +154,19 @@ struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
        return cred;
 }
 
+static void nfs4_root_machine_cred(struct nfs_client *clp)
+{
+       struct rpc_cred *cred, *new;
+
+       new = rpc_lookup_machine_cred(NULL);
+       spin_lock(&clp->cl_lock);
+       cred = clp->cl_machine_cred;
+       clp->cl_machine_cred = new;
+       spin_unlock(&clp->cl_lock);
+       if (cred != NULL)
+               put_rpccred(cred);
+}
+
 static struct rpc_cred *
 nfs4_get_renew_cred_server_locked(struct nfs_server *server)
 {
@@ -339,62 +352,21 @@ int nfs41_discover_server_trunking(struct nfs_client *clp,
        return nfs41_walk_client_list(clp, result, cred);
 }
 
-struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
-{
-       struct rpc_cred *cred;
-
-       spin_lock(&clp->cl_lock);
-       cred = nfs4_get_machine_cred_locked(clp);
-       spin_unlock(&clp->cl_lock);
-       return cred;
-}
-
 #endif /* CONFIG_NFS_V4_1 */
 
-static struct rpc_cred *
-nfs4_get_setclientid_cred_server(struct nfs_server *server)
-{
-       struct nfs_client *clp = server->nfs_client;
-       struct rpc_cred *cred = NULL;
-       struct nfs4_state_owner *sp;
-       struct rb_node *pos;
-
-       spin_lock(&clp->cl_lock);
-       pos = rb_first(&server->state_owners);
-       if (pos != NULL) {
-               sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
-               cred = get_rpccred(sp->so_cred);
-       }
-       spin_unlock(&clp->cl_lock);
-       return cred;
-}
-
 /**
- * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
+ * nfs4_get_clid_cred - Acquire credential for a setclientid operation
  * @clp: client state handle
  *
  * Returns an rpc_cred with reference count bumped, or NULL.
  */
-struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+struct rpc_cred *nfs4_get_clid_cred(struct nfs_client *clp)
 {
-       struct nfs_server *server;
        struct rpc_cred *cred;
 
        spin_lock(&clp->cl_lock);
        cred = nfs4_get_machine_cred_locked(clp);
        spin_unlock(&clp->cl_lock);
-       if (cred != NULL)
-               goto out;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
-               cred = nfs4_get_setclientid_cred_server(server);
-               if (cred != NULL)
-                       break;
-       }
-       rcu_read_unlock();
-
-out:
        return cred;
 }
 
@@ -1618,7 +1590,7 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
        if (!nfs4_state_clear_reclaim_reboot(clp))
                return;
        ops = clp->cl_mvops->reboot_recovery_ops;
-       cred = ops->get_clid_cred(clp);
+       cred = nfs4_get_clid_cred(clp);
        nfs4_reclaim_complete(clp, ops, cred);
        put_rpccred(cred);
 }
@@ -1732,7 +1704,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        cred = ops->get_state_renewal_cred_locked(clp);
        spin_unlock(&clp->cl_lock);
        if (cred == NULL) {
-               cred = nfs4_get_setclientid_cred(clp);
+               cred = nfs4_get_clid_cred(clp);
                status = -ENOKEY;
                if (cred == NULL)
                        goto out;
@@ -1804,7 +1776,7 @@ static int nfs4_establish_lease(struct nfs_client *clp)
                clp->cl_mvops->reboot_recovery_ops;
        int status;
 
-       cred = ops->get_clid_cred(clp);
+       cred = nfs4_get_clid_cred(clp);
        if (cred == NULL)
                return -ENOENT;
        status = ops->establish_clid(clp, cred);
@@ -1878,7 +1850,7 @@ int nfs4_discover_server_trunking(struct nfs_client *clp,
        mutex_lock(&nfs_clid_init_mutex);
 again:
        status  = -ENOENT;
-       cred = ops->get_clid_cred(clp);
+       cred = nfs4_get_clid_cred(clp);
        if (cred == NULL)
                goto out_unlock;
 
@@ -1896,7 +1868,11 @@ again:
                        __func__, status);
                goto again;
        case -EACCES:
-               if (i++)
+               if (i++ == 0) {
+                       nfs4_root_machine_cred(clp);
+                       goto again;
+               }
+               if (i > 2)
                        break;
        case -NFS4ERR_CLID_INUSE:
        case -NFS4ERR_WRONGSEC:
@@ -2052,7 +2028,7 @@ static int nfs4_reset_session(struct nfs_client *clp)
        if (!nfs4_has_session(clp))
                return 0;
        nfs4_begin_drain_session(clp);
-       cred = nfs4_get_exchange_id_cred(clp);
+       cred = nfs4_get_clid_cred(clp);
        status = nfs4_proc_destroy_session(clp->cl_session, cred);
        switch (status) {
        case 0:
@@ -2095,7 +2071,7 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
        if (!nfs4_has_session(clp))
                return 0;
        nfs4_begin_drain_session(clp);
-       cred = nfs4_get_exchange_id_cred(clp);
+       cred = nfs4_get_clid_cred(clp);
        ret = nfs4_proc_bind_conn_to_session(clp, cred);
        if (cred)
                put_rpccred(cred);
@@ -2116,7 +2092,7 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
 }
 #else /* CONFIG_NFS_V4_1 */
 static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
-static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
+static void nfs4_end_drain_session(struct nfs_client *clp) { }
 
 static int nfs4_bind_conn_to_session(struct nfs_client *clp)
 {
index 3850b018815f2d07e4740fdd3ff8200523b9fe92..1a4a3bd415ed6c6b29e7cedf7809f9c1970288d4 100644 (file)
@@ -997,12 +997,10 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
        int owner_namelen = 0;
        int owner_grouplen = 0;
        __be32 *p;
-       __be32 *q;
-       int len;
-       uint32_t bmval_len = 2;
-       uint32_t bmval0 = 0;
-       uint32_t bmval1 = 0;
-       uint32_t bmval2 = 0;
+       unsigned i;
+       uint32_t len = 0;
+       uint32_t bmval_len;
+       uint32_t bmval[3] = { 0 };
 
        /*
         * We reserve enough space to write the entire attribute buffer at once.
@@ -1011,13 +1009,14 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
         * = 40 bytes, plus any contribution from variable-length fields
         *            such as owner/group.
         */
-       len = 8;
-
-       /* Sigh */
-       if (iap->ia_valid & ATTR_SIZE)
+       if (iap->ia_valid & ATTR_SIZE) {
+               bmval[0] |= FATTR4_WORD0_SIZE;
                len += 8;
-       if (iap->ia_valid & ATTR_MODE)
+       }
+       if (iap->ia_valid & ATTR_MODE) {
+               bmval[1] |= FATTR4_WORD1_MODE;
                len += 4;
+       }
        if (iap->ia_valid & ATTR_UID) {
                owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ);
                if (owner_namelen < 0) {
@@ -1028,6 +1027,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
                        owner_namelen = sizeof("nobody") - 1;
                        /* goto out; */
                }
+               bmval[1] |= FATTR4_WORD1_OWNER;
                len += 4 + (XDR_QUADLEN(owner_namelen) << 2);
        }
        if (iap->ia_valid & ATTR_GID) {
@@ -1039,92 +1039,73 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
                        owner_grouplen = sizeof("nobody") - 1;
                        /* goto out; */
                }
+               bmval[1] |= FATTR4_WORD1_OWNER_GROUP;
                len += 4 + (XDR_QUADLEN(owner_grouplen) << 2);
        }
-       if (iap->ia_valid & ATTR_ATIME_SET)
+       if (iap->ia_valid & ATTR_ATIME_SET) {
+               bmval[1] |= FATTR4_WORD1_TIME_ACCESS_SET;
                len += 16;
-       else if (iap->ia_valid & ATTR_ATIME)
+       } else if (iap->ia_valid & ATTR_ATIME) {
+               bmval[1] |= FATTR4_WORD1_TIME_ACCESS_SET;
                len += 4;
-       if (iap->ia_valid & ATTR_MTIME_SET)
+       }
+       if (iap->ia_valid & ATTR_MTIME_SET) {
+               bmval[1] |= FATTR4_WORD1_TIME_MODIFY_SET;
                len += 16;
-       else if (iap->ia_valid & ATTR_MTIME)
+       } else if (iap->ia_valid & ATTR_MTIME) {
+               bmval[1] |= FATTR4_WORD1_TIME_MODIFY_SET;
                len += 4;
+       }
        if (label) {
                len += 4 + 4 + 4 + (XDR_QUADLEN(label->len) << 2);
-               bmval_len = 3;
+               bmval[2] |= FATTR4_WORD2_SECURITY_LABEL;
        }
 
-       len += bmval_len << 2;
-       p = reserve_space(xdr, len);
+       if (bmval[2] != 0)
+               bmval_len = 3;
+       else if (bmval[1] != 0)
+               bmval_len = 2;
+       else
+               bmval_len = 1;
+
+       p = reserve_space(xdr, 4 + (bmval_len << 2) + 4 + len);
 
-       /*
-        * We write the bitmap length now, but leave the bitmap and the attribute
-        * buffer length to be backfilled at the end of this routine.
-        */
        *p++ = cpu_to_be32(bmval_len);
-       q = p;
-       /* Skip bitmap entries + attrlen */
-       p += bmval_len + 1;
+       for (i = 0; i < bmval_len; i++)
+               *p++ = cpu_to_be32(bmval[i]);
+       *p++ = cpu_to_be32(len);
 
-       if (iap->ia_valid & ATTR_SIZE) {
-               bmval0 |= FATTR4_WORD0_SIZE;
+       if (bmval[0] & FATTR4_WORD0_SIZE)
                p = xdr_encode_hyper(p, iap->ia_size);
-       }
-       if (iap->ia_valid & ATTR_MODE) {
-               bmval1 |= FATTR4_WORD1_MODE;
+       if (bmval[1] & FATTR4_WORD1_MODE)
                *p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
-       }
-       if (iap->ia_valid & ATTR_UID) {
-               bmval1 |= FATTR4_WORD1_OWNER;
+       if (bmval[1] & FATTR4_WORD1_OWNER)
                p = xdr_encode_opaque(p, owner_name, owner_namelen);
-       }
-       if (iap->ia_valid & ATTR_GID) {
-               bmval1 |= FATTR4_WORD1_OWNER_GROUP;
+       if (bmval[1] & FATTR4_WORD1_OWNER_GROUP)
                p = xdr_encode_opaque(p, owner_group, owner_grouplen);
+       if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
+               if (iap->ia_valid & ATTR_ATIME_SET) {
+                       *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
+                       p = xdr_encode_hyper(p, (s64)iap->ia_atime.tv_sec);
+                       *p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
+               } else
+                       *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
        }
-       if (iap->ia_valid & ATTR_ATIME_SET) {
-               bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
-               *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
-               p = xdr_encode_hyper(p, (s64)iap->ia_atime.tv_sec);
-               *p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
-       }
-       else if (iap->ia_valid & ATTR_ATIME) {
-               bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
-               *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
-       }
-       if (iap->ia_valid & ATTR_MTIME_SET) {
-               bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
-               *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
-               p = xdr_encode_hyper(p, (s64)iap->ia_mtime.tv_sec);
-               *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
-       }
-       else if (iap->ia_valid & ATTR_MTIME) {
-               bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
-               *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
+       if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {
+               if (iap->ia_valid & ATTR_MTIME_SET) {
+                       *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
+                       p = xdr_encode_hyper(p, (s64)iap->ia_mtime.tv_sec);
+                       *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
+               } else
+                       *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
        }
-       if (label) {
-               bmval2 |= FATTR4_WORD2_SECURITY_LABEL;
+       if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
                *p++ = cpu_to_be32(label->lfs);
                *p++ = cpu_to_be32(label->pi);
                *p++ = cpu_to_be32(label->len);
                p = xdr_encode_opaque_fixed(p, label->label, label->len);
        }
 
-       /*
-        * Now we backfill the bitmap and the attribute buffer length.
-        */
-       if (len != ((char *)p - (char *)q) + 4) {
-               printk(KERN_ERR "NFS: Attr length error, %u != %Zu\n",
-                               len, ((char *)p - (char *)q) + 4);
-               BUG();
-       }
-       *q++ = htonl(bmval0);
-       *q++ = htonl(bmval1);
-       if (bmval_len == 3)
-               *q++ = htonl(bmval2);
-       len = (char *)p - (char *)(q + 1);
-       *q = htonl(len);
-
 /* out: */
 }
 
index f6db66d8f647069a4cffde4e609bc7f23e3a2da0..d26bd63e93cec81db8baa7f43f669f1c5dbe6574 100644 (file)
@@ -2084,6 +2084,8 @@ static int nfs_validate_text_mount_data(void *options,
                max_namelen = NFS4_MAXNAMLEN;
                max_pathlen = NFS4_MAXPATHLEN;
                nfs_validate_transport_protocol(args);
+               if (args->nfs_server.protocol == XPRT_TRANSPORT_UDP)
+                       goto out_invalid_transport_udp;
                nfs4_validate_mount_flags(args);
 #else
                goto out_v4_not_compiled;
@@ -2106,6 +2108,10 @@ static int nfs_validate_text_mount_data(void *options,
 out_v4_not_compiled:
        dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
        return -EPROTONOSUPPORT;
+#else
+out_invalid_transport_udp:
+       dfprintk(MOUNT, "NFSv4: Unsupported transport protocol udp\n");
+       return -EINVAL;
 #endif /* !CONFIG_NFS_V4 */
 
 out_no_address:
@@ -2715,6 +2721,8 @@ static int nfs4_validate_mount_data(void *options,
                args->acdirmax  = data->acdirmax;
                args->nfs_server.protocol = data->proto;
                nfs_validate_transport_protocol(args);
+               if (args->nfs_server.protocol == XPRT_TRANSPORT_UDP)
+                       goto out_invalid_transport_udp;
 
                break;
        default:
@@ -2735,6 +2743,10 @@ out_inval_auth:
 out_no_address:
        dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
        return -EINVAL;
+
+out_invalid_transport_udp:
+       dfprintk(MOUNT, "NFSv4: Unsupported transport protocol udp\n");
+       return -EINVAL;
 }
 
 /*
index 43f42290e5df096fe7eced0266759b45264a61a1..5e609b17ada4862ec18f1e109b2e6d7597031aca 100644 (file)
@@ -282,19 +282,14 @@ static unsigned int file_hashval(struct inode *ino)
 
 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
 
-static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
-{
-       WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
-       atomic_inc(&fp->fi_access[oflag]);
-}
-
 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
 {
+       WARN_ON_ONCE(!fp->fi_fds[oflag]);
        if (oflag == O_RDWR) {
-               __nfs4_file_get_access(fp, O_RDONLY);
-               __nfs4_file_get_access(fp, O_WRONLY);
+               atomic_inc(&fp->fi_access[O_RDONLY]);
+               atomic_inc(&fp->fi_access[O_WRONLY]);
        } else
-               __nfs4_file_get_access(fp, oflag);
+               atomic_inc(&fp->fi_access[oflag]);
 }
 
 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
@@ -3035,7 +3030,7 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
        if (status) {
                list_del_init(&dp->dl_perclnt);
                locks_free_lock(fl);
-               return -ENOMEM;
+               return status;
        }
        fp->fi_lease = fl;
        fp->fi_deleg_file = get_file(fl->fl_file);
index 08fdb77852acd4f2ca692c5c8eb010e55b00aaf5..7aeb8ee013050453d136fc86e80022616118a07f 100644 (file)
@@ -153,8 +153,8 @@ const struct file_operations nilfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = nilfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = nilfs_compat_ioctl,
index b1a5277cfd182adcbfda860ad395e07a14f94db8..059b760f7f624d417f5aa5531fa525c8f11a8186 100644 (file)
@@ -298,8 +298,8 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t
-nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-               loff_t offset, unsigned long nr_segs)
+nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+               loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -310,7 +310,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                return 0;
 
        /* Needs synchronization with the cleaner */
-       size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                  nilfs_get_block);
 
        /*
@@ -319,7 +319,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         */
        if (unlikely((rw & WRITE) && size < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        nilfs_write_failed(mapping, end);
index 2abf97b2a592b7dafaad4e2cfc0f9a6f7442f475..c1b3c1cf6e929cbf87203efe41af15aa1dd36df5 100644 (file)
@@ -622,9 +622,8 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait)
 
 static ssize_t ocfs2_direct_IO(int rw,
                               struct kiocb *iocb,
-                              const struct iovec *iov,
-                              loff_t offset,
-                              unsigned long nr_segs)
+                              struct iov_iter *iter,
+                              loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file)->i_mapping->host;
@@ -641,8 +640,7 @@ static ssize_t ocfs2_direct_IO(int rw,
                return 0;
 
        return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
-                                   iov, offset, nr_segs,
-                                   ocfs2_direct_IO_get_blocks,
+                                   iter, offset, ocfs2_direct_IO_get_blocks,
                                    ocfs2_dio_end_io, NULL, 0);
 }
 
index f671e49beb348b5c33dfba3e3c4b5f322c73e43b..573f41d1e45935ff67ea600d6c54edb3df0c0829 100644 (file)
@@ -74,7 +74,7 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
 /*
  * Using a named enum representing lock types in terms of #N bit stored in
  * iocb->private, which is going to be used for communication between
- * ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
+ * ocfs2_dio_end_io() and ocfs2_file_write/read_iter().
  */
 enum ocfs2_iocb_lock_bits {
        OCFS2_IOCB_RW_LOCK = 0,
index 5c1c864e81cc0dbcb0cd2f358f52f3c62ae6907d..5b62c95a6f5d21449a2691cbf2ac7277f9510242 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/time.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
+#include <linux/bitmap.h>
 
 #include "heartbeat.h"
 #include "tcp.h"
@@ -282,15 +283,6 @@ struct o2hb_bio_wait_ctxt {
        int               wc_error;
 };
 
-static int o2hb_pop_count(void *map, int count)
-{
-       int i = -1, pop = 0;
-
-       while ((i = find_next_bit(map, count, i + 1)) < count)
-               pop++;
-       return pop;
-}
-
 static void o2hb_write_timeout(struct work_struct *work)
 {
        int failed, quorum;
@@ -307,9 +299,9 @@ static void o2hb_write_timeout(struct work_struct *work)
                spin_lock_irqsave(&o2hb_live_lock, flags);
                if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
                        set_bit(reg->hr_region_num, o2hb_failed_region_bitmap);
-               failed = o2hb_pop_count(&o2hb_failed_region_bitmap,
+               failed = bitmap_weight(o2hb_failed_region_bitmap,
                                        O2NM_MAX_REGIONS);
-               quorum = o2hb_pop_count(&o2hb_quorum_region_bitmap,
+               quorum = bitmap_weight(o2hb_quorum_region_bitmap,
                                        O2NM_MAX_REGIONS);
                spin_unlock_irqrestore(&o2hb_live_lock, flags);
 
@@ -771,7 +763,7 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg)
         * If global heartbeat active, unpin all regions if the
         * region count > CUT_OFF
         */
-       if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+       if (bitmap_weight(o2hb_quorum_region_bitmap,
                           O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
                o2hb_region_unpin(NULL);
 unlock:
@@ -956,23 +948,9 @@ out:
        return changed;
 }
 
-/* This could be faster if we just implmented a find_last_bit, but I
- * don't think the circumstances warrant it. */
-static int o2hb_highest_node(unsigned long *nodes,
-                            int numbits)
+static int o2hb_highest_node(unsigned long *nodes, int numbits)
 {
-       int highest, node;
-
-       highest = numbits;
-       node = -1;
-       while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) {
-               if (node >= numbits)
-                       break;
-
-               highest = node;
-       }
-
-       return highest;
+       return find_last_bit(nodes, numbits);
 }
 
 static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
@@ -1831,7 +1809,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
        live_threshold = O2HB_LIVE_THRESHOLD;
        if (o2hb_global_heartbeat_active()) {
                spin_lock(&o2hb_live_lock);
-               if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
+               if (bitmap_weight(o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
                        live_threshold <<= 1;
                spin_unlock(&o2hb_live_lock);
        }
@@ -2182,7 +2160,7 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
        if (!o2hb_dependent_users)
                goto unlock;
 
-       if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+       if (bitmap_weight(o2hb_quorum_region_bitmap,
                           O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
                o2hb_region_pin(NULL);
 
@@ -2482,7 +2460,7 @@ static int o2hb_region_inc_user(const char *region_uuid)
        if (o2hb_dependent_users > 1)
                goto unlock;
 
-       if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+       if (bitmap_weight(o2hb_quorum_region_bitmap,
                           O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
                ret = o2hb_region_pin(NULL);
 
index baa2b9ef7eef90094dbd3ef8d76f6a6e1df64224..2260fb9e650831fef349ce2c6f6dcd878ffa45d2 100644 (file)
@@ -199,7 +199,8 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
 #define mlog_errno(st) do {                                            \
        int _st = (st);                                                 \
        if (_st != -ERESTARTSYS && _st != -EINTR &&                     \
-           _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC)                \
+           _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC &&              \
+           _st != -EDQUOT)                                             \
                mlog(ML_ERROR, "status = %lld\n", (long long)_st);      \
 } while (0)
 
index 33ecbe0e6734a7deaf0c8712934b9eb78fb44197..3d09a940c015d4e0304aaf2d3c98baeb3c0b80f8 100644 (file)
@@ -1888,8 +1888,10 @@ ok:
                         * up nodes that this node contacted */
                        while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
                                                    nn+1)) < O2NM_MAX_NODES) {
-                               if (nn != dlm->node_num && nn != assert->node_idx)
+                               if (nn != dlm->node_num && nn != assert->node_idx) {
                                        master_request = 1;
+                                       break;
+                               }
                        }
                }
                mle->master = assert->node_idx;
@@ -2357,6 +2359,10 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
 
        assert_spin_locked(&res->spinlock);
 
+       /* delay migration when the lockres is in MIGRATING state */
+       if (res->state & DLM_LOCK_RES_MIGRATING)
+               return 0;
+
        if (res->owner != dlm->node_num)
                return 0;
 
index 773bd32bfd8c8bb56bcf5f7ee7880cd65bbe8f26..317c0d4024d823d383318170195c556d86cb5542 100644 (file)
@@ -1882,6 +1882,13 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
 
                if (ml->type == LKM_NLMODE)
                        goto skip_lvb;
+               
+               /*
+                * If the lock is in the blocked list it can't have a valid lvb,
+                * so skip it
+                */
+               if (ml->list == DLM_BLOCKED_LIST)
+                       goto skip_lvb;
 
                if (!dlm_lvb_is_empty(mres->lvb)) {
                        if (lksb->flags & DLM_LKSB_PUT_LVB) {
index 3261d71319eeb27d3a569aeccf4f3f6e27a95496..0a355f24b6e581cadf7384123250b9b084242973 100644 (file)
@@ -2220,15 +2220,13 @@ out:
        return ret;
 }
 
-static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs,
-                                   loff_t pos)
+static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
+                                    struct iov_iter *iter,
+                                    loff_t pos)
 {
        int ret, direct_io, appending, rw_level, have_alloc_sem  = 0;
        int can_do_direct, has_refcount = 0;
        ssize_t written = 0;
-       size_t ocount;          /* original count */
        size_t count;           /* after file limit checks */
        loff_t old_size, *ppos = &iocb->ki_pos;
        u32 old_clusters;
@@ -2239,11 +2237,11 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                               OCFS2_MOUNT_COHERENCY_BUFFERED);
        int unaligned_dio = 0;
 
-       trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
+       trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
                (unsigned long long)OCFS2_I(inode)->ip_blkno,
                file->f_path.dentry->d_name.len,
                file->f_path.dentry->d_name.name,
-               (unsigned int)nr_segs);
+               (unsigned long long)pos);
 
        if (iocb->ki_left == 0)
                return 0;
@@ -2343,28 +2341,24 @@ relock:
        /* communicate with ocfs2_dio_end_io */
        ocfs2_iocb_set_rw_locked(iocb, rw_level);
 
-       ret = generic_segment_checks(iov, &nr_segs, &ocount,
-                                    VERIFY_READ);
-       if (ret)
-               goto out_dio;
 
-       count = ocount;
+       count = iov_iter_count(iter);
        ret = generic_write_checks(file, ppos, &count,
                                   S_ISBLK(inode->i_mode));
        if (ret)
                goto out_dio;
 
        if (direct_io) {
-               written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
-                                                   ppos, count, ocount);
+               written = generic_file_direct_write_iter(iocb, iter, *ppos,
+                                                   ppos, count);
                if (written < 0) {
                        ret = written;
                        goto out_dio;
                }
        } else {
                current->backing_dev_info = file->f_mapping->backing_dev_info;
-               written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
-                                                     ppos, count, 0);
+               written = generic_file_buffered_write_iter(iocb, iter, *ppos,
+                                                          ppos, count, 0);
                current->backing_dev_info = NULL;
        }
 
@@ -2520,7 +2514,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
                        in->f_path.dentry->d_name.name, len);
 
        /*
-        * See the comment in ocfs2_file_aio_read()
+        * See the comment in ocfs2_file_read_iter()
         */
        ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
        if (ret < 0) {
@@ -2535,19 +2529,18 @@ bail:
        return ret;
 }
 
-static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
-                                  const struct iovec *iov,
-                                  unsigned long nr_segs,
-                                  loff_t pos)
+static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
+                                   struct iov_iter *iter,
+                                   loff_t pos)
 {
        int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
        struct file *filp = iocb->ki_filp;
        struct inode *inode = file_inode(filp);
 
-       trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
+       trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
                        (unsigned long long)OCFS2_I(inode)->ip_blkno,
                        filp->f_path.dentry->d_name.len,
-                       filp->f_path.dentry->d_name.name, nr_segs);
+                       filp->f_path.dentry->d_name.name, pos);
 
 
        if (!inode) {
@@ -2583,7 +2576,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
         *
         * Take and drop the meta data lock to update inode fields
         * like i_size. This allows the checks down below
-        * generic_file_aio_read() a chance of actually working.
+        * generic_file_read_iter() a chance of actually working.
         */
        ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
        if (ret < 0) {
@@ -2592,13 +2585,13 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
        }
        ocfs2_inode_unlock(inode, lock_level);
 
-       ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
-       trace_generic_file_aio_read_ret(ret);
+       ret = generic_file_read_iter(iocb, iter, iocb->ki_pos);
+       trace_generic_file_read_iter_ret(ret);
 
        /* buffered aio wouldn't have proper lock coverage today */
        BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
 
-       /* see ocfs2_file_aio_write */
+       /* see ocfs2_file_write_iter */
        if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
                rw_level = -1;
                have_alloc_sem = 0;
@@ -2686,8 +2679,8 @@ const struct file_operations ocfs2_fops = {
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_file_release,
        .open           = ocfs2_file_open,
-       .aio_read       = ocfs2_file_aio_read,
-       .aio_write      = ocfs2_file_aio_write,
+       .read_iter      = ocfs2_file_read_iter,
+       .write_iter     = ocfs2_file_write_iter,
        .unlocked_ioctl = ocfs2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ocfs2_compat_ioctl,
@@ -2734,8 +2727,8 @@ const struct file_operations ocfs2_fops_no_plocks = {
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_file_release,
        .open           = ocfs2_file_open,
-       .aio_read       = ocfs2_file_aio_read,
-       .aio_write      = ocfs2_file_aio_write,
+       .read_iter      = ocfs2_file_read_iter,
+       .write_iter     = ocfs2_file_write_iter,
        .unlocked_ioctl = ocfs2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ocfs2_compat_ioctl,
index 3b481f490633af2f483afd1817fe8ad538908b6a..1c5018c310d3be039a3010529be3929ecb5d5ce2 100644 (file)
@@ -1310,13 +1310,13 @@ DEFINE_OCFS2_FILE_OPS(ocfs2_file_release);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file);
 
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_write_iter);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read);
 
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_read_iter);
 
 DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
 
@@ -1474,7 +1474,7 @@ TRACE_EVENT(ocfs2_prepare_inode_for_write,
                  __entry->direct_io, __entry->has_refcount)
 );
 
-DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
+DEFINE_OCFS2_INT_EVENT(generic_file_read_iter_ret);
 
 /* End of trace events for fs/ocfs2/file.c. */
 
index e5c7f15465b49c2b744fe4fca30e107fa1d1ddae..19f134e896a9a8bdd69a387015819e4ebc3c2705 100644 (file)
@@ -32,7 +32,7 @@ enum ocfs2_xattr_type {
 
 struct ocfs2_security_xattr_info {
        int enable;
-       char *name;
+       const char *name;
        void *value;
        size_t value_len;
 };
index e0d9b3e722bd41f91cf4b58f76aa1f34e2684149..badafd8ef4c718877b775c84ca1d8342c1932681 100644 (file)
@@ -339,8 +339,8 @@ const struct file_operations omfs_file_operations = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
        .write = do_sync_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .mmap = generic_file_mmap,
        .fsync = generic_file_fsync,
        .splice_read = generic_file_splice_read,
index 94441a407337bb02fb77e89fe93dfbbed169f827..737e15615b0490c40d002a315217033f5463d5b3 100644 (file)
@@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
                de = next;
        } while (de);
        spin_unlock(&proc_subdir_lock);
-       return 0;
+       return 1;
 }
 
 int proc_readdir(struct file *file, struct dir_context *ctx)
index 229e366598daecd4e905e8f51f13efaf0a44e773..e0a790da726d0f710a7585b0a8b6663e9902a0ac 100644 (file)
@@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
 static int proc_root_readdir(struct file *file, struct dir_context *ctx)
 {
        if (ctx->pos < FIRST_PROCESS_ENTRY) {
-               proc_readdir(file, ctx);
+               int error = proc_readdir(file, ctx);
+               if (unlikely(error <= 0))
+                       return error;
                ctx->pos = FIRST_PROCESS_ENTRY;
        }
 
index ca71db69da07a000814837c20cf76bac049b3ca1..983d9510becca65778e395817d563e793a266741 100644 (file)
@@ -1,6 +1,8 @@
 config PSTORE
        bool "Persistent store support"
        default n
+       select ZLIB_DEFLATE
+       select ZLIB_INFLATE
        help
           This option enables generic access to platform level
           persistent storage via "pstore" filesystem that can
index 71bf5f4ae84c9a9044be1056e83582b7c72fd74c..12823845d32490d220d795ee46e1a328491a1585 100644 (file)
@@ -275,8 +275,8 @@ int pstore_is_mounted(void)
  * Set the mtime & ctime to the date that this record was originally stored.
  */
 int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
-                 char *data, size_t size, struct timespec time,
-                 struct pstore_info *psi)
+                 char *data, bool compressed, size_t size,
+                 struct timespec time, struct pstore_info *psi)
 {
        struct dentry           *root = pstore_sb->s_root;
        struct dentry           *dentry;
@@ -315,7 +315,8 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
 
        switch (type) {
        case PSTORE_TYPE_DMESG:
-               sprintf(name, "dmesg-%s-%lld", psname, id);
+               sprintf(name, "dmesg-%s-%lld%s", psname, id,
+                                               compressed ? ".enc.z" : "");
                break;
        case PSTORE_TYPE_CONSOLE:
                sprintf(name, "console-%s", psname);
@@ -345,9 +346,8 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
 
        mutex_lock(&root->d_inode->i_mutex);
 
-       rc = -ENOSPC;
        dentry = d_alloc_name(root, name);
-       if (IS_ERR(dentry))
+       if (!dentry)
                goto fail_lockedalloc;
 
        memcpy(private->data, data, size);
index 937d820f273c2b17c5b47aa365f230970b19cc32..3b3d305277c44eabaa3eae3c7aa95b28a9826a61 100644 (file)
@@ -50,8 +50,9 @@ extern struct pstore_info *psinfo;
 extern void    pstore_set_kmsg_bytes(int);
 extern void    pstore_get_records(int);
 extern int     pstore_mkfile(enum pstore_type_id, char *psname, u64 id,
-                             int count, char *data, size_t size,
-                             struct timespec time, struct pstore_info *psi);
+                             int count, char *data, bool compressed,
+                             size_t size, struct timespec time,
+                             struct pstore_info *psi);
 extern int     pstore_is_mounted(void);
 
 #endif
index 422962ae9fc241ed05bddddd2a313150d5853f0b..4ffb7ab5e397ecfcaede608a39c5025cb4ecd858 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/pstore.h>
+#include <linux/zlib.h>
 #include <linux/string.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
@@ -65,6 +66,15 @@ struct pstore_info *psinfo;
 
 static char *backend;
 
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+static struct z_stream_s stream;
+
+static char *big_oops_buf;
+static size_t big_oops_buf_sz;
+
 /* How much of the console log to snapshot */
 static unsigned long kmsg_bytes = 10240;
 
@@ -117,6 +127,121 @@ bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
 }
 EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
 
+/* Derived from logfs_compress() */
+static int pstore_compress(const void *in, void *out, size_t inlen,
+                                                       size_t outlen)
+{
+       int err, ret;
+
+       ret = -EIO;
+       err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+                                               MEM_LEVEL, Z_DEFAULT_STRATEGY);
+       if (err != Z_OK)
+               goto error;
+
+       stream.next_in = in;
+       stream.avail_in = inlen;
+       stream.total_in = 0;
+       stream.next_out = out;
+       stream.avail_out = outlen;
+       stream.total_out = 0;
+
+       err = zlib_deflate(&stream, Z_FINISH);
+       if (err != Z_STREAM_END)
+               goto error;
+
+       err = zlib_deflateEnd(&stream);
+       if (err != Z_OK)
+               goto error;
+
+       if (stream.total_out >= stream.total_in)
+               goto error;
+
+       ret = stream.total_out;
+error:
+       return ret;
+}
+
+/* Derived from logfs_uncompress */
+static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
+{
+       int err, ret;
+
+       ret = -EIO;
+       err = zlib_inflateInit(&stream);
+       if (err != Z_OK)
+               goto error;
+
+       stream.next_in = in;
+       stream.avail_in = inlen;
+       stream.total_in = 0;
+       stream.next_out = out;
+       stream.avail_out = outlen;
+       stream.total_out = 0;
+
+       err = zlib_inflate(&stream, Z_FINISH);
+       if (err != Z_STREAM_END)
+               goto error;
+
+       err = zlib_inflateEnd(&stream);
+       if (err != Z_OK)
+               goto error;
+
+       ret = stream.total_out;
+error:
+       return ret;
+}
+
+static void allocate_buf_for_compression(void)
+{
+       size_t size;
+
+       big_oops_buf_sz = (psinfo->bufsize * 100) / 45;
+       big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+       if (big_oops_buf) {
+               size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
+                       zlib_inflate_workspacesize());
+               stream.workspace = kmalloc(size, GFP_KERNEL);
+               if (!stream.workspace) {
+                       pr_err("pstore: No memory for compression workspace; "
+                               "skipping compression\n");
+                       kfree(big_oops_buf);
+                       big_oops_buf = NULL;
+               }
+       } else {
+               pr_err("No memory for uncompressed data; "
+                       "skipping compression\n");
+               stream.workspace = NULL;
+       }
+
+}
+
+/*
+ * Called when compression fails, since the printk buffer
+ * would be fetched for compression calling it again when
+ * compression fails would have moved the iterator of
+ * printk buffer which results in fetching old contents.
+ * Copy the recent messages from big_oops_buf to psinfo->buf
+ */
+static size_t copy_kmsg_to_buffer(int hsize, size_t len)
+{
+       size_t total_len;
+       size_t diff;
+
+       total_len = hsize + len;
+
+       if (total_len > psinfo->bufsize) {
+               diff = total_len - psinfo->bufsize + hsize;
+               memcpy(psinfo->buf, big_oops_buf, hsize);
+               memcpy(psinfo->buf + hsize, big_oops_buf + diff,
+                                       psinfo->bufsize - hsize);
+               total_len = psinfo->bufsize;
+       } else
+               memcpy(psinfo->buf, big_oops_buf, total_len);
+
+       return total_len;
+}
+
 /*
  * callback from kmsg_dump. (s2,l2) has the most recently
  * written bytes, older bytes are in (s1,l1). Save as much
@@ -148,22 +273,56 @@ static void pstore_dump(struct kmsg_dumper *dumper,
                char *dst;
                unsigned long size;
                int hsize;
+               int zipped_len = -1;
                size_t len;
+               bool compressed;
+               size_t total_len;
 
-               dst = psinfo->buf;
-               hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
-               size = psinfo->bufsize - hsize;
-               dst += hsize;
+               if (big_oops_buf) {
+                       dst = big_oops_buf;
+                       hsize = sprintf(dst, "%s#%d Part%d\n", why,
+                                                       oopscount, part);
+                       size = big_oops_buf_sz - hsize;
 
-               if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len))
-                       break;
+                       if (!kmsg_dump_get_buffer(dumper, true, dst + hsize,
+                                                               size, &len))
+                               break;
+
+                       zipped_len = pstore_compress(dst, psinfo->buf,
+                                               hsize + len, psinfo->bufsize);
+
+                       if (zipped_len > 0) {
+                               compressed = true;
+                               total_len = zipped_len;
+                       } else {
+                               pr_err("pstore: compression failed for Part %d"
+                                       " returned %d\n", part, zipped_len);
+                               pr_err("pstore: Capture uncompressed"
+                                       " oops/panic report of Part %d\n", part);
+                               compressed = false;
+                               total_len = copy_kmsg_to_buffer(hsize, len);
+                       }
+               } else {
+                       dst = psinfo->buf;
+                       hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount,
+                                                                       part);
+                       size = psinfo->bufsize - hsize;
+                       dst += hsize;
+
+                       if (!kmsg_dump_get_buffer(dumper, true, dst,
+                                                               size, &len))
+                               break;
+
+                       compressed = false;
+                       total_len = hsize + len;
+               }
 
                ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
-                                   oopscount, hsize, hsize + len, psinfo);
+                                   oopscount, compressed, total_len, psinfo);
                if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
                        pstore_new_entry = 1;
 
-               total += hsize + len;
+               total += total_len;
                part++;
        }
        if (pstore_cannot_block_path(reason)) {
@@ -221,10 +380,10 @@ static void pstore_register_console(void) {}
 static int pstore_write_compat(enum pstore_type_id type,
                               enum kmsg_dump_reason reason,
                               u64 *id, unsigned int part, int count,
-                              size_t hsize, size_t size,
+                              bool compressed, size_t size,
                               struct pstore_info *psi)
 {
-       return psi->write_buf(type, reason, id, part, psinfo->buf, hsize,
+       return psi->write_buf(type, reason, id, part, psinfo->buf, compressed,
                             size, psi);
 }
 
@@ -261,6 +420,8 @@ int pstore_register(struct pstore_info *psi)
                return -EINVAL;
        }
 
+       allocate_buf_for_compression();
+
        if (pstore_is_mounted())
                pstore_get_records(0);
 
@@ -297,6 +458,8 @@ void pstore_get_records(int quiet)
        enum pstore_type_id     type;
        struct timespec         time;
        int                     failed = 0, rc;
+       bool                    compressed;
+       int                     unzipped_len = -1;
 
        if (!psi)
                return;
@@ -305,11 +468,32 @@ void pstore_get_records(int quiet)
        if (psi->open && psi->open(psi))
                goto out;
 
-       while ((size = psi->read(&id, &type, &count, &time, &buf, psi)) > 0) {
+       while ((size = psi->read(&id, &type, &count, &time, &buf, &compressed,
+                               psi)) > 0) {
+               if (compressed && (type == PSTORE_TYPE_DMESG)) {
+                       if (big_oops_buf)
+                               unzipped_len = pstore_decompress(buf,
+                                                       big_oops_buf, size,
+                                                       big_oops_buf_sz);
+
+                       if (unzipped_len > 0) {
+                               buf = big_oops_buf;
+                               size = unzipped_len;
+                               compressed = false;
+                       } else {
+                               pr_err("pstore: decompression failed;"
+                                       "returned %d\n", unzipped_len);
+                               compressed = true;
+                       }
+               }
                rc = pstore_mkfile(type, psi->name, id, count, buf,
-                                 (size_t)size, time, psi);
-               kfree(buf);
-               buf = NULL;
+                                 compressed, (size_t)size, time, psi);
+               if (unzipped_len < 0) {
+                       /* Free buffer other than big oops */
+                       kfree(buf);
+                       buf = NULL;
+               } else
+                       unzipped_len = -1;
                if (rc && (rc != -EEXIST || !quiet))
                        failed++;
        }
index a6119f9469e20f26bbae18292d365100b63029d0..4027c2065842d488755e2cadbc6bbdf62b171e39 100644 (file)
@@ -131,9 +131,31 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
        return prz;
 }
 
+static void ramoops_read_kmsg_hdr(char *buffer, struct timespec *time,
+                                 bool *compressed)
+{
+       char data_type;
+
+       if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lu.%lu-%c\n",
+                       &time->tv_sec, &time->tv_nsec, &data_type) == 3) {
+               if (data_type == 'C')
+                       *compressed = true;
+               else
+                       *compressed = false;
+       } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lu.%lu\n",
+                       &time->tv_sec, &time->tv_nsec) == 2) {
+                       *compressed = false;
+       } else {
+               time->tv_sec = 0;
+               time->tv_nsec = 0;
+               *compressed = false;
+       }
+}
+
 static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
                                   int *count, struct timespec *time,
-                                  char **buf, struct pstore_info *psi)
+                                  char **buf, bool *compressed,
+                                  struct pstore_info *psi)
 {
        ssize_t size;
        ssize_t ecc_notice_size;
@@ -152,10 +174,6 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
        if (!prz)
                return 0;
 
-       /* TODO(kees): Bogus time for the moment. */
-       time->tv_sec = 0;
-       time->tv_nsec = 0;
-
        size = persistent_ram_old_size(prz);
 
        /* ECC correction notice */
@@ -166,12 +184,14 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
                return -ENOMEM;
 
        memcpy(*buf, persistent_ram_old(prz), size);
+       ramoops_read_kmsg_hdr(*buf, time, compressed);
        persistent_ram_ecc_string(prz, *buf + size, ecc_notice_size + 1);
 
        return size + ecc_notice_size;
 }
 
-static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz)
+static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz,
+                                    bool compressed)
 {
        char *hdr;
        struct timespec timestamp;
@@ -182,8 +202,9 @@ static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz)
                timestamp.tv_sec = 0;
                timestamp.tv_nsec = 0;
        }
-       hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu\n",
-               (long)timestamp.tv_sec, (long)(timestamp.tv_nsec / 1000));
+       hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu-%c\n",
+               (long)timestamp.tv_sec, (long)(timestamp.tv_nsec / 1000),
+               compressed ? 'C' : 'D');
        WARN_ON_ONCE(!hdr);
        len = hdr ? strlen(hdr) : 0;
        persistent_ram_write(prz, hdr, len);
@@ -196,7 +217,7 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
                                            enum kmsg_dump_reason reason,
                                            u64 *id, unsigned int part,
                                            const char *buf,
-                                           size_t hsize, size_t size,
+                                           bool compressed, size_t size,
                                            struct pstore_info *psi)
 {
        struct ramoops_context *cxt = psi->data;
@@ -242,7 +263,7 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
 
        prz = cxt->przs[cxt->dump_write_cnt];
 
-       hlen = ramoops_write_kmsg_hdr(prz);
+       hlen = ramoops_write_kmsg_hdr(prz, compressed);
        if (size + hlen > prz->buffer_size)
                size = prz->buffer_size - hlen;
        persistent_ram_write(prz, buf, size);
index fbad622841f904ebd471ae0668f03018309a076e..9a702e1935383d1e8deda7678e529426262a8636 100644 (file)
@@ -1094,6 +1094,14 @@ static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
        dquot->dq_dqb.dqb_rsvspace -= number;
 }
 
+static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
+{
+       if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
+               number = dquot->dq_dqb.dqb_curspace;
+       dquot->dq_dqb.dqb_rsvspace += number;
+       dquot->dq_dqb.dqb_curspace -= number;
+}
+
 static inline
 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
 {
@@ -1528,6 +1536,15 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number)
 }
 EXPORT_SYMBOL(inode_claim_rsv_space);
 
+void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
+{
+       spin_lock(&inode->i_lock);
+       *inode_reserved_space(inode) += number;
+       __inode_sub_bytes(inode, number);
+       spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(inode_reclaim_rsv_space);
+
 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
 {
        spin_lock(&inode->i_lock);
@@ -1701,6 +1718,35 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
 }
 EXPORT_SYMBOL(dquot_claim_space_nodirty);
 
+/*
+ * Convert allocated space back to in-memory reserved quotas
+ */
+void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+{
+       int cnt;
+
+       if (!dquot_active(inode)) {
+               inode_reclaim_rsv_space(inode, number);
+               return;
+       }
+
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       spin_lock(&dq_data_lock);
+       /* Claim reserved quotas to allocated quotas */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (inode->i_dquot[cnt])
+                       dquot_reclaim_reserved_space(inode->i_dquot[cnt],
+                                                    number);
+       }
+       /* Update inode bytes */
+       inode_reclaim_rsv_space(inode, number);
+       spin_unlock(&dq_data_lock);
+       mark_all_dquot_dirty(inode->i_dquot);
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       return;
+}
+EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
+
 /*
  * This operation can block, but only after everything is updated
  */
index c7314f1771f5824be95fffb4cd27695494c767ff..dea86e8967ee2c354d489384a83d1492b1dbcba3 100644 (file)
@@ -27,6 +27,7 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
        case Q_SYNC:
        case Q_GETINFO:
        case Q_XGETQSTAT:
+       case Q_XGETQSTATV:
        case Q_XQUOTASYNC:
                break;
        /* allow to query information for dquots we "own" */
@@ -217,6 +218,31 @@ static int quota_getxstate(struct super_block *sb, void __user *addr)
        return ret;
 }
 
+static int quota_getxstatev(struct super_block *sb, void __user *addr)
+{
+       struct fs_quota_statv fqs;
+       int ret;
+
+       if (!sb->s_qcop->get_xstatev)
+               return -ENOSYS;
+
+       memset(&fqs, 0, sizeof(fqs));
+       if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */
+               return -EFAULT;
+
+       /* If this kernel doesn't support user specified version, fail */
+       switch (fqs.qs_version) {
+       case FS_QSTATV_VERSION1:
+               break;
+       default:
+               return -EINVAL;
+       }
+       ret = sb->s_qcop->get_xstatev(sb, &fqs);
+       if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
+               return -EFAULT;
+       return ret;
+}
+
 static int quota_setxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
@@ -293,6 +319,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
                return quota_setxstate(sb, cmd, addr);
        case Q_XGETQSTAT:
                return quota_getxstate(sb, addr);
+       case Q_XGETQSTATV:
+               return quota_getxstatev(sb, addr);
        case Q_XSETQLIM:
                return quota_setxquota(sb, type, id, addr);
        case Q_XGETQUOTA:
@@ -317,6 +345,7 @@ static int quotactl_cmd_write(int cmd)
        case Q_GETINFO:
        case Q_SYNC:
        case Q_XGETQSTAT:
+       case Q_XGETQSTATV:
        case Q_XGETQUOTA:
        case Q_XQUOTASYNC:
                return 0;
index 4884ac5ae9bea224517e384588847a44f4f8e462..c4d8572a37dfcf89be3e0f1d981a2186b96a5fc4 100644 (file)
@@ -39,9 +39,9 @@ const struct address_space_operations ramfs_aops = {
 
 const struct file_operations ramfs_file_operations = {
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = noop_fsync,
        .splice_read    = generic_file_splice_read,
index 8d5b438cc18859868fc0cf2206fa51d147ba9eb4..f2487c3cc3f33bd004075b633a4204d11a839a5b 100644 (file)
@@ -39,9 +39,9 @@ const struct file_operations ramfs_file_operations = {
        .mmap                   = ramfs_nommu_mmap,
        .get_unmapped_area      = ramfs_nommu_get_unmapped_area,
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .write                  = do_sync_write,
-       .aio_write              = generic_file_aio_write,
+       .write_iter             = generic_file_write_iter,
        .fsync                  = noop_fsync,
        .splice_read            = generic_file_splice_read,
        .splice_write           = generic_file_splice_write,
index 122a3846d9e14270a26952e92b25971257ebd82b..c3579a945a4dab7f438c31f89a8a2e8c7ed349c8 100644 (file)
@@ -29,7 +29,7 @@ typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
 const struct file_operations generic_ro_fops = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .mmap           = generic_file_readonly_mmap,
        .splice_read    = generic_file_splice_read,
 };
@@ -359,6 +359,29 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
        return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
 }
 
+ssize_t do_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+                   unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = kiocb->ki_filp;
+
+       if (file->f_op->read_iter) {
+               size_t count;
+               struct iov_iter iter;
+               int ret;
+
+               count = 0;
+               ret = generic_segment_checks(iov, &nr_segs, &count,
+                                            VERIFY_WRITE);
+               if (ret)
+                       return ret;
+
+               iov_iter_init(&iter, iov, nr_segs, count, 0);
+               return file->f_op->read_iter(kiocb, &iter, pos);
+       }
+
+       return file->f_op->aio_read(kiocb, iov, nr_segs, pos);
+}
+
 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = buf, .iov_len = len };
@@ -370,7 +393,7 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
        kiocb.ki_left = len;
        kiocb.ki_nbytes = len;
 
-       ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
+       ret = do_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
        if (-EIOCBQUEUED == ret)
                ret = wait_on_sync_kiocb(&kiocb);
        *ppos = kiocb.ki_pos;
@@ -385,7 +408,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 
        if (!(file->f_mode & FMODE_READ))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
+       if (!file_readable(file))
                return -EINVAL;
        if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
                return -EFAULT;
@@ -409,6 +432,29 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 
 EXPORT_SYMBOL(vfs_read);
 
+ssize_t do_aio_write(struct kiocb *kiocb, const struct iovec *iov,
+                    unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = kiocb->ki_filp;
+
+       if (file->f_op->write_iter) {
+               size_t count;
+               struct iov_iter iter;
+               int ret;
+
+               count = 0;
+               ret = generic_segment_checks(iov, &nr_segs, &count,
+                                            VERIFY_READ);
+               if (ret)
+                       return ret;
+
+               iov_iter_init(&iter, iov, nr_segs, count, 0);
+               return file->f_op->write_iter(kiocb, &iter, pos);
+       }
+
+       return file->f_op->aio_write(kiocb, iov, nr_segs, pos);
+}
+
 ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
@@ -420,7 +466,7 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
        kiocb.ki_left = len;
        kiocb.ki_nbytes = len;
 
-       ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
+       ret = do_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
        if (-EIOCBQUEUED == ret)
                ret = wait_on_sync_kiocb(&kiocb);
        *ppos = kiocb.ki_pos;
@@ -435,7 +481,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
        const char __user *p;
        ssize_t ret;
 
-       if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+       if (!file_writable(file))
                return -EINVAL;
 
        old_fs = get_fs();
@@ -462,7 +508,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
 
        if (!(file->f_mode & FMODE_WRITE))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+       if (!file_writable(file))
                return -EINVAL;
        if (unlikely(!access_ok(VERIFY_READ, buf, count)))
                return -EFAULT;
@@ -748,10 +794,12 @@ static ssize_t do_readv_writev(int type, struct file *file,
        fnv = NULL;
        if (type == READ) {
                fn = file->f_op->read;
-               fnv = file->f_op->aio_read;
+               if (file->f_op->aio_read || file->f_op->read_iter)
+                       fnv = do_aio_read;
        } else {
                fn = (io_fn_t)file->f_op->write;
-               fnv = file->f_op->aio_write;
+               if (file->f_op->aio_write || file->f_op->write_iter)
+                       fnv = do_aio_write;
                file_start_write(file);
        }
 
@@ -781,7 +829,7 @@ ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
 {
        if (!(file->f_mode & FMODE_READ))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+       if (!file_readable(file))
                return -EINVAL;
 
        return do_readv_writev(READ, file, vec, vlen, pos);
@@ -794,7 +842,7 @@ ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
 {
        if (!(file->f_mode & FMODE_WRITE))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+       if (!file_writable(file))
                return -EINVAL;
 
        return do_readv_writev(WRITE, file, vec, vlen, pos);
@@ -930,10 +978,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
        fnv = NULL;
        if (type == READ) {
                fn = file->f_op->read;
-               fnv = file->f_op->aio_read;
+               if (file->f_op->aio_read || file->f_op->read_iter)
+                       fnv = do_aio_read;
        } else {
                fn = (io_fn_t)file->f_op->write;
-               fnv = file->f_op->aio_write;
+               if (file->f_op->aio_write || file->f_op->write_iter)
+                       fnv = do_aio_write;
                file_start_write(file);
        }
 
@@ -968,7 +1018,7 @@ static size_t compat_readv(struct file *file,
                goto out;
 
        ret = -EINVAL;
-       if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+       if (!file_readable(file))
                goto out;
 
        ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
@@ -1035,7 +1085,7 @@ static size_t compat_writev(struct file *file,
                goto out;
 
        ret = -EINVAL;
-       if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+       if (!file_writable(file))
                goto out;
 
        ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
index a98b7740a0fcade0b894920154e65ecbe7987509..dc9a6829f7c673ffa127115f7bce3cf355638264 100644 (file)
@@ -423,8 +423,11 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
        set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
 
        journal_mark_dirty(th, s, sbh);
-       if (for_unformatted)
+       if (for_unformatted) {
+               int depth = reiserfs_write_unlock_nested(s);
                dquot_free_block_nodirty(inode, 1);
+               reiserfs_write_lock_nested(s, depth);
+       }
 }
 
 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1128,6 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
        b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
        int passno = 0;
        int nr_allocated = 0;
+       int depth;
 
        determine_prealloc_size(hint);
        if (!hint->formatted_node) {
@@ -1137,10 +1141,13 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
                               "reiserquota: allocating %d blocks id=%u",
                               amount_needed, hint->inode->i_uid);
 #endif
+               depth = reiserfs_write_unlock_nested(s);
                quota_ret =
                    dquot_alloc_block_nodirty(hint->inode, amount_needed);
-               if (quota_ret)  /* Quota exceeded? */
+               if (quota_ret) {        /* Quota exceeded? */
+                       reiserfs_write_lock_nested(s, depth);
                        return QUOTA_EXCEEDED;
+               }
                if (hint->preallocate && hint->prealloc_size) {
 #ifdef REISERQUOTA_DEBUG
                        reiserfs_debug(s, REISERFS_DEBUG_CODE,
@@ -1153,6 +1160,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
                                hint->preallocate = hint->prealloc_size = 0;
                }
                /* for unformatted nodes, force large allocations */
+               reiserfs_write_lock_nested(s, depth);
        }
 
        do {
@@ -1181,9 +1189,11 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
                                               hint->inode->i_uid);
 #endif
                                /* Free not allocated blocks */
+                               depth = reiserfs_write_unlock_nested(s);
                                dquot_free_block_nodirty(hint->inode,
                                        amount_needed + hint->prealloc_size -
                                        nr_allocated);
+                               reiserfs_write_lock_nested(s, depth);
                        }
                        while (nr_allocated--)
                                reiserfs_free_block(hint->th, hint->inode,
@@ -1214,10 +1224,13 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
                               REISERFS_I(hint->inode)->i_prealloc_count,
                               hint->inode->i_uid);
 #endif
+
+               depth = reiserfs_write_unlock_nested(s);
                dquot_free_block_nodirty(hint->inode, amount_needed +
                                         hint->prealloc_size - nr_allocated -
                                         REISERFS_I(hint->inode)->
                                         i_prealloc_count);
+               reiserfs_write_lock_nested(s, depth);
        }
 
        return CARRY_ON;
@@ -1340,10 +1353,11 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
                                 "reading failed", __func__, block);
        else {
                if (buffer_locked(bh)) {
+                       int depth;
                        PROC_INFO_INC(sb, scan_bitmap.wait);
-                       reiserfs_write_unlock(sb);
+                       depth = reiserfs_write_unlock_nested(sb);
                        __wait_on_buffer(bh);
-                       reiserfs_write_lock(sb);
+                       reiserfs_write_lock_nested(sb, depth);
                }
                BUG_ON(!buffer_uptodate(bh));
                BUG_ON(atomic_read(&bh->b_count) == 0);
index 03e4ca5624d6057f90c8230a2c6d89cac0fcd96f..1fd2051109a3547a47dbea91dc8e88ea5343d7c4 100644 (file)
@@ -71,6 +71,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
        char small_buf[32];     /* avoid kmalloc if we can */
        struct reiserfs_dir_entry de;
        int ret = 0;
+       int depth;
 
        reiserfs_write_lock(inode->i_sb);
 
@@ -181,17 +182,17 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
                                 * Since filldir might sleep, we can release
                                 * the write lock here for other waiters
                                 */
-                               reiserfs_write_unlock(inode->i_sb);
+                               depth = reiserfs_write_unlock_nested(inode->i_sb);
                                if (!dir_emit
                                    (ctx, local_buf, d_reclen, d_ino,
                                     DT_UNKNOWN)) {
-                                       reiserfs_write_lock(inode->i_sb);
+                                       reiserfs_write_lock_nested(inode->i_sb, depth);
                                        if (local_buf != small_buf) {
                                                kfree(local_buf);
                                        }
                                        goto end;
                                }
-                               reiserfs_write_lock(inode->i_sb);
+                               reiserfs_write_lock_nested(inode->i_sb, depth);
                                if (local_buf != small_buf) {
                                        kfree(local_buf);
                                }
index dcaafcfc23b007c845f490a4f293cc485c324b51..f98feb229ec4646613463a305f0a3db275ad1e8d 100644 (file)
@@ -245,8 +245,8 @@ const struct file_operations reiserfs_file_operations = {
        .open = reiserfs_file_open,
        .release = reiserfs_file_release,
        .fsync = reiserfs_sync_file,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .splice_read = generic_file_splice_read,
        .splice_write = generic_file_splice_write,
        .llseek = generic_file_llseek,
index 430e0658704c3f1c19d903e78d03e80f4c305484..dc4d415303164934d21b8c0dd416adb52035f311 100644 (file)
@@ -1022,9 +1022,9 @@ static int get_far_parent(struct tree_balance *tb,
        if (buffer_locked(*pcom_father)) {
 
                /* Release the write lock while the buffer is busy */
-               reiserfs_write_unlock(tb->tb_sb);
+               int depth = reiserfs_write_unlock_nested(tb->tb_sb);
                __wait_on_buffer(*pcom_father);
-               reiserfs_write_lock(tb->tb_sb);
+               reiserfs_write_lock_nested(tb->tb_sb, depth);
                if (FILESYSTEM_CHANGED_TB(tb)) {
                        brelse(*pcom_father);
                        return REPEAT_SEARCH;
@@ -1929,9 +1929,9 @@ static int get_direct_parent(struct tree_balance *tb, int h)
                return REPEAT_SEARCH;
 
        if (buffer_locked(bh)) {
-               reiserfs_write_unlock(tb->tb_sb);
+               int depth = reiserfs_write_unlock_nested(tb->tb_sb);
                __wait_on_buffer(bh);
-               reiserfs_write_lock(tb->tb_sb);
+               reiserfs_write_lock_nested(tb->tb_sb, depth);
                if (FILESYSTEM_CHANGED_TB(tb))
                        return REPEAT_SEARCH;
        }
@@ -1952,6 +1952,7 @@ static int get_neighbors(struct tree_balance *tb, int h)
        unsigned long son_number;
        struct super_block *sb = tb->tb_sb;
        struct buffer_head *bh;
+       int depth;
 
        PROC_INFO_INC(sb, get_neighbors[h]);
 
@@ -1969,9 +1970,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
                     tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
                                                                       FL[h]);
                son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
-               reiserfs_write_unlock(sb);
+               depth = reiserfs_write_unlock_nested(tb->tb_sb);
                bh = sb_bread(sb, son_number);
-               reiserfs_write_lock(sb);
+               reiserfs_write_lock_nested(tb->tb_sb, depth);
                if (!bh)
                        return IO_ERROR;
                if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2009,9 +2010,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
                child_position =
                    (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
                son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
-               reiserfs_write_unlock(sb);
+               depth = reiserfs_write_unlock_nested(tb->tb_sb);
                bh = sb_bread(sb, son_number);
-               reiserfs_write_lock(sb);
+               reiserfs_write_lock_nested(tb->tb_sb, depth);
                if (!bh)
                        return IO_ERROR;
                if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2272,6 +2273,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
                }
 
                if (locked) {
+                       int depth;
 #ifdef CONFIG_REISERFS_CHECK
                        repeat_counter++;
                        if ((repeat_counter % 10000) == 0) {
@@ -2286,9 +2288,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
                                    REPEAT_SEARCH : CARRY_ON;
                        }
 #endif
-                       reiserfs_write_unlock(tb->tb_sb);
+                       depth = reiserfs_write_unlock_nested(tb->tb_sb);
                        __wait_on_buffer(locked);
-                       reiserfs_write_lock(tb->tb_sb);
+                       reiserfs_write_lock_nested(tb->tb_sb, depth);
                        if (FILESYSTEM_CHANGED_TB(tb))
                                return REPEAT_SEARCH;
                }
@@ -2359,9 +2361,9 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
 
        /* if it possible in indirect_to_direct conversion */
        if (buffer_locked(tbS0)) {
-               reiserfs_write_unlock(tb->tb_sb);
+               int depth = reiserfs_write_unlock_nested(tb->tb_sb);
                __wait_on_buffer(tbS0);
-               reiserfs_write_lock(tb->tb_sb);
+               reiserfs_write_lock_nested(tb->tb_sb, depth);
                if (FILESYSTEM_CHANGED_TB(tb))
                        return REPEAT_SEARCH;
        }
index 0048cc16a6a8c5e6d54ebd1d9dd16f29f1d310ea..6d652af02c5b2c7f8bfc449451c909e0ae1ec6cf 100644 (file)
@@ -30,7 +30,6 @@ void reiserfs_evict_inode(struct inode *inode)
            JOURNAL_PER_BALANCE_CNT * 2 +
            2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
        struct reiserfs_transaction_handle th;
-       int depth;
        int err;
 
        if (!inode->i_nlink && !is_bad_inode(inode))
@@ -40,12 +39,13 @@ void reiserfs_evict_inode(struct inode *inode)
        if (inode->i_nlink)
                goto no_delete;
 
-       depth = reiserfs_write_lock_once(inode->i_sb);
-
        /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
        if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {  /* also handles bad_inode case */
+
                reiserfs_delete_xattrs(inode);
 
+               reiserfs_write_lock(inode->i_sb);
+
                if (journal_begin(&th, inode->i_sb, jbegin_count))
                        goto out;
                reiserfs_update_inode_transaction(inode);
@@ -57,8 +57,11 @@ void reiserfs_evict_inode(struct inode *inode)
                /* Do quota update inside a transaction for journaled quotas. We must do that
                 * after delete_object so that quota updates go into the same transaction as
                 * stat data deletion */
-               if (!err) 
+               if (!err) {
+                       int depth = reiserfs_write_unlock_nested(inode->i_sb);
                        dquot_free_inode(inode);
+                       reiserfs_write_lock_nested(inode->i_sb, depth);
+               }
 
                if (journal_end(&th, inode->i_sb, jbegin_count))
                        goto out;
@@ -72,12 +75,12 @@ void reiserfs_evict_inode(struct inode *inode)
                /* all items of file are deleted, so we can remove "save" link */
                remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything
                                                                 * about an error here */
+out:
+               reiserfs_write_unlock(inode->i_sb);
        } else {
                /* no object items are in the tree */
                ;
        }
-      out:
-       reiserfs_write_unlock_once(inode->i_sb, depth);
        clear_inode(inode);     /* note this must go after the journal_end to prevent deadlock */
        dquot_drop(inode);
        inode->i_blocks = 0;
@@ -610,7 +613,6 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
        __le32 *item;
        int done;
        int fs_gen;
-       int lock_depth;
        struct reiserfs_transaction_handle *th = NULL;
        /* space reserved in transaction batch:
           . 3 balancings in direct->indirect conversion
@@ -626,11 +628,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
        loff_t new_offset =
            (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
 
-       lock_depth = reiserfs_write_lock_once(inode->i_sb);
+       reiserfs_write_lock(inode->i_sb);
        version = get_inode_item_key_version(inode);
 
        if (!file_capable(inode, block)) {
-               reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+               reiserfs_write_unlock(inode->i_sb);
                return -EFBIG;
        }
 
@@ -642,7 +644,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
                /* find number of block-th logical block of the file */
                ret = _get_block_create_0(inode, block, bh_result,
                                          create | GET_BLOCK_READ_DIRECT);
-               reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+               reiserfs_write_unlock(inode->i_sb);
                return ret;
        }
        /*
@@ -760,7 +762,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
                if (!dangle && th)
                        retval = reiserfs_end_persistent_transaction(th);
 
-               reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+               reiserfs_write_unlock(inode->i_sb);
 
                /* the item was found, so new blocks were not added to the file
                 ** there is no need to make sure the inode is updated with this
@@ -1011,11 +1013,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
                 * long time.  reschedule if needed and also release the write
                 * lock for others.
                 */
-               if (need_resched()) {
-                       reiserfs_write_unlock_once(inode->i_sb, lock_depth);
-                       schedule();
-                       lock_depth = reiserfs_write_lock_once(inode->i_sb);
-               }
+               reiserfs_cond_resched(inode->i_sb);
 
                retval = search_for_position_by_key(inode->i_sb, &key, &path);
                if (retval == IO_ERROR) {
@@ -1050,7 +1048,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
                        retval = err;
        }
 
-       reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+       reiserfs_write_unlock(inode->i_sb);
        reiserfs_check_path(&path);
        return retval;
 }
@@ -1509,14 +1507,15 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
 {
        struct inode *inode;
        struct reiserfs_iget_args args;
+       int depth;
 
        args.objectid = key->on_disk_key.k_objectid;
        args.dirid = key->on_disk_key.k_dir_id;
-       reiserfs_write_unlock(s);
+       depth = reiserfs_write_unlock_nested(s);
        inode = iget5_locked(s, key->on_disk_key.k_objectid,
                             reiserfs_find_actor, reiserfs_init_locked_inode,
                             (void *)(&args));
-       reiserfs_write_lock(s);
+       reiserfs_write_lock_nested(s, depth);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
@@ -1772,7 +1771,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                       struct inode *inode,
                       struct reiserfs_security_handle *security)
 {
-       struct super_block *sb;
+       struct super_block *sb = dir->i_sb;
        struct reiserfs_iget_args args;
        INITIALIZE_PATH(path_to_key);
        struct cpu_key key;
@@ -1780,12 +1779,13 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
        struct stat_data sd;
        int retval;
        int err;
+       int depth;
 
        BUG_ON(!th->t_trans_id);
 
-       reiserfs_write_unlock(inode->i_sb);
+       depth = reiserfs_write_unlock_nested(sb);
        err = dquot_alloc_inode(inode);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_write_lock_nested(sb, depth);
        if (err)
                goto out_end_trans;
        if (!dir->i_nlink) {
@@ -1793,8 +1793,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                goto out_bad_inode;
        }
 
-       sb = dir->i_sb;
-
        /* item head of new item */
        ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
        ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
@@ -1812,10 +1810,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
        memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
        args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
 
-       reiserfs_write_unlock(inode->i_sb);
+       depth = reiserfs_write_unlock_nested(inode->i_sb);
        err = insert_inode_locked4(inode, args.objectid,
                             reiserfs_find_actor, &args);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_write_lock_nested(inode->i_sb, depth);
        if (err) {
                err = -EINVAL;
                goto out_bad_inode;
@@ -1941,7 +1939,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
        }
 
        if (reiserfs_posixacl(inode->i_sb)) {
+               reiserfs_write_unlock(inode->i_sb);
                retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
+               reiserfs_write_lock(inode->i_sb);
                if (retval) {
                        err = retval;
                        reiserfs_check_path(&path_to_key);
@@ -1956,7 +1956,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
                inode->i_flags |= S_PRIVATE;
 
        if (security->name) {
+               reiserfs_write_unlock(inode->i_sb);
                retval = reiserfs_security_write(th, inode, security);
+               reiserfs_write_lock(inode->i_sb);
                if (retval) {
                        err = retval;
                        reiserfs_check_path(&path_to_key);
@@ -1982,14 +1984,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
        INODE_PKEY(inode)->k_objectid = 0;
 
        /* Quota change must be inside a transaction for journaling */
+       depth = reiserfs_write_unlock_nested(inode->i_sb);
        dquot_free_inode(inode);
+       reiserfs_write_lock_nested(inode->i_sb, depth);
 
       out_end_trans:
        journal_end(th, th->t_super, th->t_blocks_allocated);
-       reiserfs_write_unlock(inode->i_sb);
        /* Drop can be outside and it needs more credits so it's better to have it outside */
+       depth = reiserfs_write_unlock_nested(inode->i_sb);
        dquot_drop(inode);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_write_lock_nested(inode->i_sb, depth);
        inode->i_flags |= S_NOQUOTA;
        make_bad_inode(inode);
 
@@ -2103,9 +2107,8 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
        int error;
        struct buffer_head *bh = NULL;
        int err2;
-       int lock_depth;
 
-       lock_depth = reiserfs_write_lock_once(inode->i_sb);
+       reiserfs_write_lock(inode->i_sb);
 
        if (inode->i_size > 0) {
                error = grab_tail_page(inode, &page, &bh);
@@ -2174,7 +2177,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
                page_cache_release(page);
        }
 
-       reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+       reiserfs_write_unlock(inode->i_sb);
 
        return 0;
       out:
@@ -2183,7 +2186,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
                page_cache_release(page);
        }
 
-       reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+       reiserfs_write_unlock(inode->i_sb);
 
        return error;
 }
@@ -2648,10 +2651,11 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
        struct inode *inode = page->mapping->host;
        int ret;
        int old_ref = 0;
+       int depth;
 
-       reiserfs_write_unlock(inode->i_sb);
+       depth = reiserfs_write_unlock_nested(inode->i_sb);
        reiserfs_wait_on_write_block(inode->i_sb);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_write_lock_nested(inode->i_sb, depth);
 
        fix_tail_page_for_writing(page);
        if (reiserfs_transaction_running(inode->i_sb)) {
@@ -2708,7 +2712,6 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
        int update_sd = 0;
        struct reiserfs_transaction_handle *th;
        unsigned start;
-       int lock_depth = 0;
        bool locked = false;
 
        if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
@@ -2737,7 +2740,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
         */
        if (pos + copied > inode->i_size) {
                struct reiserfs_transaction_handle myth;
-               lock_depth = reiserfs_write_lock_once(inode->i_sb);
+               reiserfs_write_lock(inode->i_sb);
                locked = true;
                /* If the file have grown beyond the border where it
                   can have a tail, unmark it as needing a tail
@@ -2768,7 +2771,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
        }
        if (th) {
                if (!locked) {
-                       lock_depth = reiserfs_write_lock_once(inode->i_sb);
+                       reiserfs_write_lock(inode->i_sb);
                        locked = true;
                }
                if (!update_sd)
@@ -2780,7 +2783,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
 
       out:
        if (locked)
-               reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+               reiserfs_write_unlock(inode->i_sb);
        unlock_page(page);
        page_cache_release(page);
 
@@ -2790,7 +2793,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
        return ret == 0 ? copied : ret;
 
       journal_error:
-       reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+       reiserfs_write_unlock(inode->i_sb);
        locked = false;
        if (th) {
                if (!update_sd)
@@ -2808,10 +2811,11 @@ int reiserfs_commit_write(struct file *f, struct page *page,
        int ret = 0;
        int update_sd = 0;
        struct reiserfs_transaction_handle *th = NULL;
+       int depth;
 
-       reiserfs_write_unlock(inode->i_sb);
+       depth = reiserfs_write_unlock_nested(inode->i_sb);
        reiserfs_wait_on_write_block(inode->i_sb);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_write_lock_nested(inode->i_sb, depth);
 
        if (reiserfs_transaction_running(inode->i_sb)) {
                th = current->journal_info;
@@ -3079,14 +3083,13 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 /* We thank Mingming Cao for helping us understand in great detail what
    to do in this section of the code. */
 static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
-                                 const struct iovec *iov, loff_t offset,
-                                 unsigned long nr_segs)
+                                 struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                  reiserfs_get_blocks_direct_io);
 
        /*
@@ -3095,7 +3098,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
                        truncate_setsize(inode, isize);
@@ -3110,7 +3113,6 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = dentry->d_inode;
        unsigned int ia_valid;
-       int depth;
        int error;
 
        error = inode_change_ok(inode, attr);
@@ -3122,13 +3124,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (is_quota_modification(inode, attr))
                dquot_initialize(inode);
-       depth = reiserfs_write_lock_once(inode->i_sb);
+       reiserfs_write_lock(inode->i_sb);
        if (attr->ia_valid & ATTR_SIZE) {
                /* version 2 items will be caught by the s_maxbytes check
                 ** done for us in vmtruncate
                 */
                if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
                    attr->ia_size > MAX_NON_LFS) {
+                       reiserfs_write_unlock(inode->i_sb);
                        error = -EFBIG;
                        goto out;
                }
@@ -3150,8 +3153,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                                if (err)
                                        error = err;
                        }
-                       if (error)
+                       if (error) {
+                               reiserfs_write_unlock(inode->i_sb);
                                goto out;
+                       }
                        /*
                         * file size is changed, ctime and mtime are
                         * to be updated
@@ -3159,6 +3164,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                        attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
                }
        }
+       reiserfs_write_unlock(inode->i_sb);
 
        if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
             ((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
@@ -3183,14 +3189,16 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                        return error;
 
                /* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
+               reiserfs_write_lock(inode->i_sb);
                error = journal_begin(&th, inode->i_sb, jbegin_count);
+               reiserfs_write_unlock(inode->i_sb);
                if (error)
                        goto out;
-               reiserfs_write_unlock_once(inode->i_sb, depth);
                error = dquot_transfer(inode, attr);
-               depth = reiserfs_write_lock_once(inode->i_sb);
+               reiserfs_write_lock(inode->i_sb);
                if (error) {
                        journal_end(&th, inode->i_sb, jbegin_count);
+                       reiserfs_write_unlock(inode->i_sb);
                        goto out;
                }
 
@@ -3202,17 +3210,11 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                        inode->i_gid = attr->ia_gid;
                mark_inode_dirty(inode);
                error = journal_end(&th, inode->i_sb, jbegin_count);
+               reiserfs_write_unlock(inode->i_sb);
                if (error)
                        goto out;
        }
 
-       /*
-        * Relax the lock here, as it might truncate the
-        * inode pages and wait for inode pages locks.
-        * To release such page lock, the owner needs the
-        * reiserfs lock
-        */
-       reiserfs_write_unlock_once(inode->i_sb, depth);
        if ((attr->ia_valid & ATTR_SIZE) &&
            attr->ia_size != i_size_read(inode)) {
                error = inode_newsize_ok(inode, attr->ia_size);
@@ -3226,16 +3228,13 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                setattr_copy(inode, attr);
                mark_inode_dirty(inode);
        }
-       depth = reiserfs_write_lock_once(inode->i_sb);
 
        if (!error && reiserfs_posixacl(inode->i_sb)) {
                if (attr->ia_valid & ATTR_MODE)
                        error = reiserfs_acl_chmod(inode);
        }
 
-      out:
-       reiserfs_write_unlock_once(inode->i_sb, depth);
-
+out:
        return error;
 }
 
index 15cb5fe6b425bce7fdd680511e28b0cf4fad3968..946ccbf5b5a10d9a104d48c4dcb66a6a8ec3ffc0 100644 (file)
@@ -167,7 +167,6 @@ int reiserfs_commit_write(struct file *f, struct page *page,
 int reiserfs_unpack(struct inode *inode, struct file *filp)
 {
        int retval = 0;
-       int depth;
        int index;
        struct page *page;
        struct address_space *mapping;
@@ -183,11 +182,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
                return 0;
        }
 
-       depth = reiserfs_write_lock_once(inode->i_sb);
-
        /* we need to make sure nobody is changing the file size beneath us */
        reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
 
+       reiserfs_write_lock(inode->i_sb);
+
        write_from = inode->i_size & (blocksize - 1);
        /* if we are on a block boundary, we are already unpacked.  */
        if (write_from == 0) {
@@ -221,6 +220,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
 
       out:
        mutex_unlock(&inode->i_mutex);
-       reiserfs_write_unlock_once(inode->i_sb, depth);
+       reiserfs_write_unlock(inode->i_sb);
        return retval;
 }
index 742fdd4c209ae90500b49a78ce52d27689c03ba9..73feacc49b2ef3bc2b088f7d74ed05d4195a45ac 100644 (file)
@@ -947,9 +947,11 @@ static int reiserfs_async_progress_wait(struct super_block *s)
        struct reiserfs_journal *j = SB_JOURNAL(s);
 
        if (atomic_read(&j->j_async_throttle)) {
-               reiserfs_write_unlock(s);
+               int depth;
+
+               depth = reiserfs_write_unlock_nested(s);
                congestion_wait(BLK_RW_ASYNC, HZ / 10);
-               reiserfs_write_lock(s);
+               reiserfs_write_lock_nested(s, depth);
        }
 
        return 0;
@@ -972,6 +974,7 @@ static int flush_commit_list(struct super_block *s,
        struct reiserfs_journal *journal = SB_JOURNAL(s);
        int retval = 0;
        int write_len;
+       int depth;
 
        reiserfs_check_lock_depth(s, "flush_commit_list");
 
@@ -1018,12 +1021,12 @@ static int flush_commit_list(struct super_block *s,
                 * We might sleep in numerous places inside
                 * write_ordered_buffers. Relax the write lock.
                 */
-               reiserfs_write_unlock(s);
+               depth = reiserfs_write_unlock_nested(s);
                ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
                                            journal, jl, &jl->j_bh_list);
                if (ret < 0 && retval == 0)
                        retval = ret;
-               reiserfs_write_lock(s);
+               reiserfs_write_lock_nested(s, depth);
        }
        BUG_ON(!list_empty(&jl->j_bh_list));
        /*
@@ -1043,9 +1046,9 @@ static int flush_commit_list(struct super_block *s,
                tbh = journal_find_get_block(s, bn);
                if (tbh) {
                        if (buffer_dirty(tbh)) {
-                           reiserfs_write_unlock(s);
+                           depth = reiserfs_write_unlock_nested(s);
                            ll_rw_block(WRITE, 1, &tbh);
-                           reiserfs_write_lock(s);
+                           reiserfs_write_lock_nested(s, depth);
                        }
                        put_bh(tbh) ;
                }
@@ -1057,17 +1060,17 @@ static int flush_commit_list(struct super_block *s,
                    (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
                tbh = journal_find_get_block(s, bn);
 
-               reiserfs_write_unlock(s);
-               wait_on_buffer(tbh);
-               reiserfs_write_lock(s);
+               depth = reiserfs_write_unlock_nested(s);
+               __wait_on_buffer(tbh);
+               reiserfs_write_lock_nested(s, depth);
                // since we're using ll_rw_blk above, it might have skipped over
                // a locked buffer.  Double check here
                //
                /* redundant, sync_dirty_buffer() checks */
                if (buffer_dirty(tbh)) {
-                       reiserfs_write_unlock(s);
+                       depth = reiserfs_write_unlock_nested(s);
                        sync_dirty_buffer(tbh);
-                       reiserfs_write_lock(s);
+                       reiserfs_write_lock_nested(s, depth);
                }
                if (unlikely(!buffer_uptodate(tbh))) {
 #ifdef CONFIG_REISERFS_CHECK
@@ -1091,12 +1094,12 @@ static int flush_commit_list(struct super_block *s,
                if (buffer_dirty(jl->j_commit_bh))
                        BUG();
                mark_buffer_dirty(jl->j_commit_bh) ;
-               reiserfs_write_unlock(s);
+               depth = reiserfs_write_unlock_nested(s);
                if (reiserfs_barrier_flush(s))
                        __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
                else
                        sync_dirty_buffer(jl->j_commit_bh);
-               reiserfs_write_lock(s);
+               reiserfs_write_lock_nested(s, depth);
        }
 
        /* If there was a write error in the journal - we can't commit this
@@ -1228,15 +1231,16 @@ static int _update_journal_header_block(struct super_block *sb,
 {
        struct reiserfs_journal_header *jh;
        struct reiserfs_journal *journal = SB_JOURNAL(sb);
+       int depth;
 
        if (reiserfs_is_journal_aborted(journal))
                return -EIO;
 
        if (trans_id >= journal->j_last_flush_trans_id) {
                if (buffer_locked((journal->j_header_bh))) {
-                       reiserfs_write_unlock(sb);
-                       wait_on_buffer((journal->j_header_bh));
-                       reiserfs_write_lock(sb);
+                       depth = reiserfs_write_unlock_nested(sb);
+                       __wait_on_buffer(journal->j_header_bh);
+                       reiserfs_write_lock_nested(sb, depth);
                        if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
 #ifdef CONFIG_REISERFS_CHECK
                                reiserfs_warning(sb, "journal-699",
@@ -1254,14 +1258,14 @@ static int _update_journal_header_block(struct super_block *sb,
                jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
 
                set_buffer_dirty(journal->j_header_bh);
-               reiserfs_write_unlock(sb);
+               depth = reiserfs_write_unlock_nested(sb);
 
                if (reiserfs_barrier_flush(sb))
                        __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
                else
                        sync_dirty_buffer(journal->j_header_bh);
 
-               reiserfs_write_lock(sb);
+               reiserfs_write_lock_nested(sb, depth);
                if (!buffer_uptodate(journal->j_header_bh)) {
                        reiserfs_warning(sb, "journal-837",
                                         "IO error during journal replay");
@@ -1341,6 +1345,7 @@ static int flush_journal_list(struct super_block *s,
        unsigned long j_len_saved = jl->j_len;
        struct reiserfs_journal *journal = SB_JOURNAL(s);
        int err = 0;
+       int depth;
 
        BUG_ON(j_len_saved <= 0);
 
@@ -1495,9 +1500,9 @@ static int flush_journal_list(struct super_block *s,
                                                       "cn->bh is NULL");
                                }
 
-                               reiserfs_write_unlock(s);
-                               wait_on_buffer(cn->bh);
-                               reiserfs_write_lock(s);
+                               depth = reiserfs_write_unlock_nested(s);
+                               __wait_on_buffer(cn->bh);
+                               reiserfs_write_lock_nested(s, depth);
 
                                if (!cn->bh) {
                                        reiserfs_panic(s, "journal-1012",
@@ -1974,6 +1979,7 @@ static int journal_compare_desc_commit(struct super_block *sb,
 /* returns 0 if it did not find a description block
 ** returns -1 if it found a corrupt commit block
 ** returns 1 if both desc and commit were valid
+** NOTE: only called during fs mount
 */
 static int journal_transaction_is_valid(struct super_block *sb,
                                        struct buffer_head *d_bh,
@@ -2073,8 +2079,9 @@ static void brelse_array(struct buffer_head **heads, int num)
 
 /*
 ** given the start, and values for the oldest acceptable transactions,
-** this either reads in a replays a transaction, or returns because the transaction
-** is invalid, or too old.
+** this either reads in a replays a transaction, or returns because the
+** transaction is invalid, or too old.
+** NOTE: only called during fs mount
 */
 static int journal_read_transaction(struct super_block *sb,
                                    unsigned long cur_dblock,
@@ -2208,10 +2215,7 @@ static int journal_read_transaction(struct super_block *sb,
        ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
        for (i = 0; i < get_desc_trans_len(desc); i++) {
 
-               reiserfs_write_unlock(sb);
                wait_on_buffer(log_blocks[i]);
-               reiserfs_write_lock(sb);
-
                if (!buffer_uptodate(log_blocks[i])) {
                        reiserfs_warning(sb, "journal-1212",
                                         "REPLAY FAILURE fsck required! "
@@ -2318,12 +2322,13 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
 
 /*
 ** read and replay the log
-** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
-** transaction.  This tests that before finding all the transactions in the log, which makes normal mount times fast.
-**
-** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
-**
+** on a clean unmount, the journal header's next unflushed pointer will
+** be to an invalid transaction.  This tests that before finding all the
+** transactions in the log, which makes normal mount times fast.
+** After a crash, this starts with the next unflushed transaction, and
+** replays until it finds one too old, or invalid.
 ** On exit, it sets things up so the first transaction will work correctly.
+** NOTE: only called during fs mount
 */
 static int journal_read(struct super_block *sb)
 {
@@ -2501,14 +2506,18 @@ static int journal_read(struct super_block *sb)
                              "replayed %d transactions in %lu seconds\n",
                              replay_count, get_seconds() - start);
        }
+       /* needed to satisfy the locking in _update_journal_header_block */
+       reiserfs_write_lock(sb);
        if (!bdev_read_only(sb->s_bdev) &&
            _update_journal_header_block(sb, journal->j_start,
                                         journal->j_last_flush_trans_id)) {
+               reiserfs_write_unlock(sb);
                /* replay failed, caller must call free_journal_ram and abort
                 ** the mount
                 */
                return -1;
        }
+       reiserfs_write_unlock(sb);
        return 0;
 }
 
@@ -2828,13 +2837,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
                goto free_and_return;
        }
 
-       /*
-        * Journal_read needs to be inspected in order to push down
-        * the lock further inside (or even remove it).
-        */
-       reiserfs_write_lock(sb);
        ret = journal_read(sb);
-       reiserfs_write_unlock(sb);
        if (ret < 0) {
                reiserfs_warning(sb, "reiserfs-2006",
                                 "Replay Failure, unable to mount");
@@ -2923,9 +2926,9 @@ static void queue_log_writer(struct super_block *s)
        add_wait_queue(&journal->j_join_wait, &wait);
        set_current_state(TASK_UNINTERRUPTIBLE);
        if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
-               reiserfs_write_unlock(s);
+               int depth = reiserfs_write_unlock_nested(s);
                schedule();
-               reiserfs_write_lock(s);
+               reiserfs_write_lock_nested(s, depth);
        }
        __set_current_state(TASK_RUNNING);
        remove_wait_queue(&journal->j_join_wait, &wait);
@@ -2943,9 +2946,12 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
        struct reiserfs_journal *journal = SB_JOURNAL(sb);
        unsigned long bcount = journal->j_bcount;
        while (1) {
-               reiserfs_write_unlock(sb);
+               int depth;
+
+               depth = reiserfs_write_unlock_nested(sb);
                schedule_timeout_uninterruptible(1);
-               reiserfs_write_lock(sb);
+               reiserfs_write_lock_nested(sb, depth);
+
                journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
                while ((atomic_read(&journal->j_wcount) > 0 ||
                        atomic_read(&journal->j_jlock)) &&
@@ -2976,6 +2982,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
        struct reiserfs_transaction_handle myth;
        int sched_count = 0;
        int retval;
+       int depth;
 
        reiserfs_check_lock_depth(sb, "journal_begin");
        BUG_ON(nblocks > journal->j_trans_max);
@@ -2996,9 +3003,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
 
        if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
                unlock_journal(sb);
-               reiserfs_write_unlock(sb);
+               depth = reiserfs_write_unlock_nested(sb);
                reiserfs_wait_on_write_block(sb);
-               reiserfs_write_lock(sb);
+               reiserfs_write_lock_nested(sb, depth);
                PROC_INFO_INC(sb, journal.journal_relock_writers);
                goto relock;
        }
@@ -3821,6 +3828,7 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
        if (test_clear_buffer_journal_restore_dirty(bh) &&
            buffer_journal_dirty(bh)) {
                struct reiserfs_journal_cnode *cn;
+               reiserfs_write_lock(sb);
                cn = get_journal_hash_dev(sb,
                                          journal->j_list_hash_table,
                                          bh->b_blocknr);
@@ -3828,6 +3836,7 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
                        set_buffer_journal_test(bh);
                        mark_buffer_dirty(bh);
                }
+               reiserfs_write_unlock(sb);
        }
        clear_buffer_journal_prepared(bh);
 }
@@ -3911,6 +3920,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
        unsigned long jindex;
        unsigned int commit_trans_id;
        int trans_half;
+       int depth;
 
        BUG_ON(th->t_refcount > 1);
        BUG_ON(!th->t_trans_id);
@@ -4116,9 +4126,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
                next = cn->next;
                free_cnode(sb, cn);
                cn = next;
-               reiserfs_write_unlock(sb);
-               cond_resched();
-               reiserfs_write_lock(sb);
+               reiserfs_cond_resched(sb);
        }
 
        /* we are done  with both the c_bh and d_bh, but
@@ -4165,10 +4173,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
         * is lost.
         */
        if (!list_empty(&jl->j_tail_bh_list)) {
-               reiserfs_write_unlock(sb);
+               depth = reiserfs_write_unlock_nested(sb);
                write_ordered_buffers(&journal->j_dirty_buffers_lock,
                                      journal, jl, &jl->j_tail_bh_list);
-               reiserfs_write_lock(sb);
+               reiserfs_write_lock_nested(sb, depth);
        }
        BUG_ON(!list_empty(&jl->j_tail_bh_list));
        mutex_unlock(&jl->j_commit_mutex);
index d735bc8470e344f8a27021bb46d0c324704ad7f5..045b83ef9fd9d42264fd55af61775abd62843807 100644 (file)
@@ -48,30 +48,35 @@ void reiserfs_write_unlock(struct super_block *s)
        }
 }
 
-/*
- * If we already own the lock, just exit and don't increase the depth.
- * Useful when we don't want to lock more than once.
- *
- * We always return the lock_depth we had before calling
- * this function.
- */
-int reiserfs_write_lock_once(struct super_block *s)
+int __must_check reiserfs_write_unlock_nested(struct super_block *s)
 {
        struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+       int depth;
 
-       if (sb_i->lock_owner != current) {
-               mutex_lock(&sb_i->lock);
-               sb_i->lock_owner = current;
-               return sb_i->lock_depth++;
-       }
+       /* this can happen when the lock isn't always held */
+       if (sb_i->lock_owner != current)
+               return -1;
+
+       depth = sb_i->lock_depth;
+
+       sb_i->lock_depth = -1;
+       sb_i->lock_owner = NULL;
+       mutex_unlock(&sb_i->lock);
 
-       return sb_i->lock_depth;
+       return depth;
 }
 
-void reiserfs_write_unlock_once(struct super_block *s, int lock_depth)
+void reiserfs_write_lock_nested(struct super_block *s, int depth)
 {
-       if (lock_depth == -1)
-               reiserfs_write_unlock(s);
+       struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+       /* this can happen when the lock isn't always held */
+       if (depth == -1)
+               return;
+
+       mutex_lock(&sb_i->lock);
+       sb_i->lock_owner = current;
+       sb_i->lock_depth = depth;
 }
 
 /*
@@ -82,9 +87,7 @@ void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
 {
        struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
 
-       if (sb_i->lock_depth < 0)
-               reiserfs_panic(sb, "%s called without kernel lock held %d",
-                              caller);
+       WARN_ON(sb_i->lock_depth < 0);
 }
 
 #ifdef CONFIG_REISERFS_CHECK
index 8567fb847601ceac1e8a7a6b6048061d4797352d..dc5236f6de1be129e44caf8f78f16d010d7ad7fb 100644 (file)
@@ -325,7 +325,6 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
                                      unsigned int flags)
 {
        int retval;
-       int lock_depth;
        struct inode *inode = NULL;
        struct reiserfs_dir_entry de;
        INITIALIZE_PATH(path_to_entry);
@@ -333,12 +332,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
        if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len)
                return ERR_PTR(-ENAMETOOLONG);
 
-       /*
-        * Might be called with or without the write lock, must be careful
-        * to not recursively hold it in case we want to release the lock
-        * before rescheduling.
-        */
-       lock_depth = reiserfs_write_lock_once(dir->i_sb);
+       reiserfs_write_lock(dir->i_sb);
 
        de.de_gen_number_bit_string = NULL;
        retval =
@@ -349,7 +343,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
                inode = reiserfs_iget(dir->i_sb,
                                      (struct cpu_key *)&(de.de_dir_id));
                if (!inode || IS_ERR(inode)) {
-                       reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+                       reiserfs_write_unlock(dir->i_sb);
                        return ERR_PTR(-EACCES);
                }
 
@@ -358,7 +352,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
                if (IS_PRIVATE(dir))
                        inode->i_flags |= S_PRIVATE;
        }
-       reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+       reiserfs_write_unlock(dir->i_sb);
        if (retval == IO_ERROR) {
                return ERR_PTR(-EIO);
        }
@@ -727,7 +721,6 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
        struct inode *inode;
        struct reiserfs_transaction_handle th;
        struct reiserfs_security_handle security;
-       int lock_depth;
        /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
        int jbegin_count =
            JOURNAL_PER_BALANCE_CNT * 3 +
@@ -753,7 +746,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
                return retval;
        }
        jbegin_count += retval;
-       lock_depth = reiserfs_write_lock_once(dir->i_sb);
+       reiserfs_write_lock(dir->i_sb);
 
        retval = journal_begin(&th, dir->i_sb, jbegin_count);
        if (retval) {
@@ -804,7 +797,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
        d_instantiate(dentry, inode);
        retval = journal_end(&th, dir->i_sb, jbegin_count);
 out_failed:
-       reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+       reiserfs_write_unlock(dir->i_sb);
        return retval;
 }
 
@@ -920,7 +913,6 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
        struct reiserfs_transaction_handle th;
        int jbegin_count;
        unsigned long savelink;
-       int depth;
 
        dquot_initialize(dir);
 
@@ -934,7 +926,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
            JOURNAL_PER_BALANCE_CNT * 2 + 2 +
            4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
 
-       depth = reiserfs_write_lock_once(dir->i_sb);
+       reiserfs_write_lock(dir->i_sb);
        retval = journal_begin(&th, dir->i_sb, jbegin_count);
        if (retval)
                goto out_unlink;
@@ -995,7 +987,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
 
        retval = journal_end(&th, dir->i_sb, jbegin_count);
        reiserfs_check_path(&path);
-       reiserfs_write_unlock_once(dir->i_sb, depth);
+       reiserfs_write_unlock(dir->i_sb);
        return retval;
 
       end_unlink:
@@ -1005,7 +997,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
        if (err)
                retval = err;
       out_unlink:
-       reiserfs_write_unlock_once(dir->i_sb, depth);
+       reiserfs_write_unlock(dir->i_sb);
        return retval;
 }
 
index c0b1112ab7e3ad6b6f310172444ccf017ac09409..54944d5a4a6e170fb55f507e9373456226eb23db 100644 (file)
@@ -358,12 +358,13 @@ void __reiserfs_panic(struct super_block *sb, const char *id,
        dump_stack();
 #endif
        if (sb)
-               panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
+               printk(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
                      sb->s_id, id ? id : "", id ? " " : "",
                      function, error_buf);
        else
-               panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
+               printk(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
                      id ? id : "", id ? " " : "", function, error_buf);
+       BUG();
 }
 
 void __reiserfs_error(struct super_block *sb, const char *id,
index 3df5ce6c724d5bbafc27e655fc30870a5f35c6e1..f8adaee537c2a39283bc8c4c672f0949fd79d0f4 100644 (file)
@@ -630,8 +630,8 @@ static inline int __reiserfs_is_journal_aborted(struct reiserfs_journal
  */
 void reiserfs_write_lock(struct super_block *s);
 void reiserfs_write_unlock(struct super_block *s);
-int reiserfs_write_lock_once(struct super_block *s);
-void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
+int __must_check reiserfs_write_unlock_nested(struct super_block *s);
+void reiserfs_write_lock_nested(struct super_block *s, int depth);
 
 #ifdef CONFIG_REISERFS_CHECK
 void reiserfs_lock_check_recursive(struct super_block *s);
@@ -667,31 +667,33 @@ static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
  * - The inode mutex
  */
 static inline void reiserfs_mutex_lock_safe(struct mutex *m,
-                              struct super_block *s)
+                                           struct super_block *s)
 {
-       reiserfs_lock_check_recursive(s);
-       reiserfs_write_unlock(s);
+       int depth;
+
+       depth = reiserfs_write_unlock_nested(s);
        mutex_lock(m);
-       reiserfs_write_lock(s);
+       reiserfs_write_lock_nested(s, depth);
 }
 
 static inline void
 reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
-                              struct super_block *s)
+                               struct super_block *s)
 {
-       reiserfs_lock_check_recursive(s);
-       reiserfs_write_unlock(s);
+       int depth;
+
+       depth = reiserfs_write_unlock_nested(s);
        mutex_lock_nested(m, subclass);
-       reiserfs_write_lock(s);
+       reiserfs_write_lock_nested(s, depth);
 }
 
 static inline void
 reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
 {
-       reiserfs_lock_check_recursive(s);
-       reiserfs_write_unlock(s);
-       down_read(sem);
-       reiserfs_write_lock(s);
+       int depth;
+       depth = reiserfs_write_unlock_nested(s);
+       down_read(sem);
+       reiserfs_write_lock_nested(s, depth);
 }
 
 /*
@@ -701,9 +703,11 @@ reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
 static inline void reiserfs_cond_resched(struct super_block *s)
 {
        if (need_resched()) {
-               reiserfs_write_unlock(s);
+               int depth;
+
+               depth = reiserfs_write_unlock_nested(s);
                schedule();
-               reiserfs_write_lock(s);
+               reiserfs_write_lock_nested(s, depth);
        }
 }
 
index 3ce02cff5e90bd1c26374e12e15a6f56ea8c8803..a4ef5cd606eb5a9612291c7f177047809ffefe74 100644 (file)
@@ -34,6 +34,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
        unsigned long int block_count, free_blocks;
        int i;
        int copy_size;
+       int depth;
 
        sb = SB_DISK_SUPER_BLOCK(s);
 
@@ -43,7 +44,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
        }
 
        /* check the device size */
+       depth = reiserfs_write_unlock_nested(s);
        bh = sb_bread(s, block_count_new - 1);
+       reiserfs_write_lock_nested(s, depth);
        if (!bh) {
                printk("reiserfs_resize: can\'t read last block\n");
                return -EINVAL;
@@ -125,9 +128,12 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
                 * transaction begins, and the new bitmaps don't matter if the
                 * transaction fails. */
                for (i = bmap_nr; i < bmap_nr_new; i++) {
+                       int depth;
                        /* don't use read_bitmap_block since it will cache
                         * the uninitialized bitmap */
+                       depth = reiserfs_write_unlock_nested(s);
                        bh = sb_bread(s, i * s->s_blocksize * 8);
+                       reiserfs_write_lock_nested(s, depth);
                        if (!bh) {
                                vfree(bitmap);
                                return -EIO;
@@ -138,9 +144,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
 
                        set_buffer_uptodate(bh);
                        mark_buffer_dirty(bh);
-                       reiserfs_write_unlock(s);
+                       depth = reiserfs_write_unlock_nested(s);
                        sync_dirty_buffer(bh);
-                       reiserfs_write_lock(s);
+                       reiserfs_write_lock_nested(s, depth);
                        // update bitmap_info stuff
                        bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
                        brelse(bh);
index 2f40a4c70a4d9667ca34e00ef2b31888255276c2..b14706a05d520d4b960681cc6dd8d5e6231e32d2 100644 (file)
@@ -524,14 +524,14 @@ static int is_tree_node(struct buffer_head *bh, int level)
  * the caller (search_by_key) will perform other schedule-unsafe
  * operations just after calling this function.
  *
- * @return true if we have unlocked
+ * @return depth of lock to be restored after read completes
  */
-static bool search_by_key_reada(struct super_block *s,
+static int search_by_key_reada(struct super_block *s,
                                struct buffer_head **bh,
                                b_blocknr_t *b, int num)
 {
        int i, j;
-       bool unlocked = false;
+       int depth = -1;
 
        for (i = 0; i < num; i++) {
                bh[i] = sb_getblk(s, b[i]);
@@ -549,15 +549,13 @@ static bool search_by_key_reada(struct super_block *s,
                 * you have to make sure the prepared bit isn't set on this buffer
                 */
                if (!buffer_uptodate(bh[j])) {
-                       if (!unlocked) {
-                               reiserfs_write_unlock(s);
-                               unlocked = true;
-                       }
+                       if (depth == -1)
+                               depth = reiserfs_write_unlock_nested(s);
                        ll_rw_block(READA, 1, bh + j);
                }
                brelse(bh[j]);
        }
-       return unlocked;
+       return depth;
 }
 
 /**************************************************************************
@@ -645,26 +643,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,      /* Key to s
                   have a pointer to it. */
                if ((bh = last_element->pe_buffer =
                     sb_getblk(sb, block_number))) {
-                       bool unlocked = false;
 
-                       if (!buffer_uptodate(bh) && reada_count > 1)
-                               /* may unlock the write lock */
-                               unlocked = search_by_key_reada(sb, reada_bh,
-                                                   reada_blocks, reada_count);
                        /*
-                        * If we haven't already unlocked the write lock,
-                        * then we need to do that here before reading
-                        * the current block
+                        * We'll need to drop the lock if we encounter any
+                        * buffers that need to be read. If all of them are
+                        * already up to date, we don't need to drop the lock.
                         */
-                       if (!buffer_uptodate(bh) && !unlocked) {
-                               reiserfs_write_unlock(sb);
-                               unlocked = true;
-                       }
+                       int depth = -1;
+
+                       if (!buffer_uptodate(bh) && reada_count > 1)
+                               depth = search_by_key_reada(sb, reada_bh,
+                                                   reada_blocks, reada_count);
+
+                       if (!buffer_uptodate(bh) && depth == -1)
+                               depth = reiserfs_write_unlock_nested(sb);
+
                        ll_rw_block(READ, 1, &bh);
                        wait_on_buffer(bh);
 
-                       if (unlocked)
-                               reiserfs_write_lock(sb);
+                       if (depth != -1)
+                               reiserfs_write_lock_nested(sb, depth);
                        if (!buffer_uptodate(bh))
                                goto io_error;
                } else {
@@ -1059,9 +1057,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
                        reiserfs_free_block(th, inode, block, 1);
                    }
 
-                   reiserfs_write_unlock(sb);
-                   cond_resched();
-                   reiserfs_write_lock(sb);
+                   reiserfs_cond_resched(sb);
 
                    if (item_moved (&s_ih, path))  {
                        need_re_search = 1;
@@ -1190,6 +1186,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
        struct item_head *q_ih;
        int quota_cut_bytes;
        int ret_value, del_size, removed;
+       int depth;
 
 #ifdef CONFIG_REISERFS_CHECK
        char mode;
@@ -1299,7 +1296,9 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
                       "reiserquota delete_item(): freeing %u, id=%u type=%c",
                       quota_cut_bytes, inode->i_uid, head2type(&s_ih));
 #endif
+       depth = reiserfs_write_unlock_nested(inode->i_sb);
        dquot_free_space_nodirty(inode, quota_cut_bytes);
+       reiserfs_write_lock_nested(inode->i_sb, depth);
 
        /* Return deleted body length */
        return ret_value;
@@ -1325,6 +1324,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
 void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
                                struct inode *inode, struct reiserfs_key *key)
 {
+       struct super_block *sb = th->t_super;
        struct tree_balance tb;
        INITIALIZE_PATH(path);
        int item_len = 0;
@@ -1377,14 +1377,17 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
                if (retval == CARRY_ON) {
                        do_balance(&tb, NULL, NULL, M_DELETE);
                        if (inode) {    /* Should we count quota for item? (we don't count quotas for save-links) */
+                               int depth;
 #ifdef REISERQUOTA_DEBUG
                                reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
                                               "reiserquota delete_solid_item(): freeing %u id=%u type=%c",
                                               quota_cut_bytes, inode->i_uid,
                                               key2type(key));
 #endif
+                               depth = reiserfs_write_unlock_nested(sb);
                                dquot_free_space_nodirty(inode,
                                                         quota_cut_bytes);
+                               reiserfs_write_lock_nested(sb, depth);
                        }
                        break;
                }
@@ -1561,6 +1564,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
        int retval2 = -1;
        int quota_cut_bytes;
        loff_t tail_pos = 0;
+       int depth;
 
        BUG_ON(!th->t_trans_id);
 
@@ -1733,7 +1737,9 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
                       "reiserquota cut_from_item(): freeing %u id=%u type=%c",
                       quota_cut_bytes, inode->i_uid, '?');
 #endif
+       depth = reiserfs_write_unlock_nested(sb);
        dquot_free_space_nodirty(inode, quota_cut_bytes);
+       reiserfs_write_lock_nested(sb, depth);
        return ret_value;
 }
 
@@ -1953,9 +1959,11 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
                             const char *body,  /* Pointer to the bytes to paste.    */
                             int pasted_size)
 {                              /* Size of pasted bytes.             */
+       struct super_block *sb = inode->i_sb;
        struct tree_balance s_paste_balance;
        int retval;
        int fs_gen;
+       int depth;
 
        BUG_ON(!th->t_trans_id);
 
@@ -1968,9 +1976,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
                       key2type(&(key->on_disk_key)));
 #endif
 
-       reiserfs_write_unlock(inode->i_sb);
+       depth = reiserfs_write_unlock_nested(sb);
        retval = dquot_alloc_space_nodirty(inode, pasted_size);
-       reiserfs_write_lock(inode->i_sb);
+       reiserfs_write_lock_nested(sb, depth);
        if (retval) {
                pathrelse(search_path);
                return retval;
@@ -2027,7 +2035,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
                       pasted_size, inode->i_uid,
                       key2type(&(key->on_disk_key)));
 #endif
+       depth = reiserfs_write_unlock_nested(sb);
        dquot_free_space_nodirty(inode, pasted_size);
+       reiserfs_write_lock_nested(sb, depth);
        return retval;
 }
 
@@ -2050,6 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
        BUG_ON(!th->t_trans_id);
 
        if (inode) {            /* Do we count quotas for item? */
+               int depth;
                fs_gen = get_generation(inode->i_sb);
                quota_bytes = ih_item_len(ih);
 
@@ -2063,11 +2074,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
                               "reiserquota insert_item(): allocating %u id=%u type=%c",
                               quota_bytes, inode->i_uid, head2type(ih));
 #endif
-               reiserfs_write_unlock(inode->i_sb);
                /* We can't dirty inode here. It would be immediately written but
                 * appropriate stat item isn't inserted yet... */
+               depth = reiserfs_write_unlock_nested(inode->i_sb);
                retval = dquot_alloc_space_nodirty(inode, quota_bytes);
-               reiserfs_write_lock(inode->i_sb);
+               reiserfs_write_lock_nested(inode->i_sb, depth);
                if (retval) {
                        pathrelse(path);
                        return retval;
@@ -2118,7 +2129,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
                       "reiserquota insert_item(): freeing %u id=%u type=%c",
                       quota_bytes, inode->i_uid, head2type(ih));
 #endif
-       if (inode)
+       if (inode) {
+               int depth = reiserfs_write_unlock_nested(inode->i_sb);
                dquot_free_space_nodirty(inode, quota_bytes);
+               reiserfs_write_lock_nested(inode->i_sb, depth);
+       }
        return retval;
 }
index e2e202a07b317cff5c84d8b51e3b66efafd689ea..3ead145dadc406646f742f519befe3ef9a2db6b9 100644 (file)
@@ -243,6 +243,7 @@ static int finish_unfinished(struct super_block *s)
        done = 0;
        REISERFS_SB(s)->s_is_unlinked_ok = 1;
        while (!retval) {
+               int depth;
                retval = search_item(s, &max_cpu_key, &path);
                if (retval != ITEM_NOT_FOUND) {
                        reiserfs_error(s, "vs-2140",
@@ -298,9 +299,9 @@ static int finish_unfinished(struct super_block *s)
                        retval = remove_save_link_only(s, &save_link_key, 0);
                        continue;
                }
-               reiserfs_write_unlock(s);
+               depth = reiserfs_write_unlock_nested(inode->i_sb);
                dquot_initialize(inode);
-               reiserfs_write_lock(s);
+               reiserfs_write_lock_nested(inode->i_sb, depth);
 
                if (truncate && S_ISDIR(inode->i_mode)) {
                        /* We got a truncate request for a dir which is impossible.
@@ -356,10 +357,12 @@ static int finish_unfinished(struct super_block *s)
 
 #ifdef CONFIG_QUOTA
        /* Turn quotas off */
+       reiserfs_write_unlock(s);
        for (i = 0; i < MAXQUOTAS; i++) {
                if (sb_dqopt(s)->files[i] && quota_enabled[i])
                        dquot_quota_off(s, i);
        }
+       reiserfs_write_lock(s);
        if (ms_active_set)
                /* Restore the flag back */
                s->s_flags &= ~MS_ACTIVE;
@@ -623,7 +626,6 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
        struct reiserfs_transaction_handle th;
 
        int err = 0;
-       int lock_depth;
 
        if (inode->i_sb->s_flags & MS_RDONLY) {
                reiserfs_warning(inode->i_sb, "clm-6006",
@@ -631,7 +633,7 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
                                 inode->i_ino);
                return;
        }
-       lock_depth = reiserfs_write_lock_once(inode->i_sb);
+       reiserfs_write_lock(inode->i_sb);
 
        /* this is really only used for atime updates, so they don't have
         ** to be included in O_SYNC or fsync
@@ -644,7 +646,7 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
        journal_end(&th, inode->i_sb, 1);
 
 out:
-       reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+       reiserfs_write_unlock(inode->i_sb);
 }
 
 static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
@@ -1334,7 +1336,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                                kfree(qf_names[i]);
 #endif
                err = -EINVAL;
-               goto out_unlock;
+               goto out_err_unlock;
        }
 #ifdef CONFIG_QUOTA
        handle_quota_files(s, qf_names, &qfmt);
@@ -1378,35 +1380,32 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        if (blocks) {
                err = reiserfs_resize(s, blocks);
                if (err != 0)
-                       goto out_unlock;
+                       goto out_err_unlock;
        }
 
        if (*mount_flags & MS_RDONLY) {
+               reiserfs_write_unlock(s);
                reiserfs_xattr_init(s, *mount_flags);
                /* remount read-only */
                if (s->s_flags & MS_RDONLY)
                        /* it is read-only already */
-                       goto out_ok;
+                       goto out_ok_unlocked;
 
-               /*
-                * Drop write lock. Quota will retake it when needed and lock
-                * ordering requires calling dquot_suspend() without it.
-                */
-               reiserfs_write_unlock(s);
                err = dquot_suspend(s, -1);
                if (err < 0)
                        goto out_err;
-               reiserfs_write_lock(s);
 
                /* try to remount file system with read-only permissions */
                if (sb_umount_state(rs) == REISERFS_VALID_FS
                    || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) {
-                       goto out_ok;
+                       goto out_ok_unlocked;
                }
 
+               reiserfs_write_lock(s);
+
                err = journal_begin(&th, s, 10);
                if (err)
-                       goto out_unlock;
+                       goto out_err_unlock;
 
                /* Mounting a rw partition read-only. */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1415,13 +1414,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        } else {
                /* remount read-write */
                if (!(s->s_flags & MS_RDONLY)) {
+                       reiserfs_write_unlock(s);
                        reiserfs_xattr_init(s, *mount_flags);
-                       goto out_ok   /* We are read-write already */
+                       goto out_ok_unlocked;   /* We are read-write already */
                }
 
                if (reiserfs_is_journal_aborted(journal)) {
                        err = journal->j_errno;
-                       goto out_unlock;
+                       goto out_err_unlock;
                }
 
                handle_data_mode(s, mount_options);
@@ -1430,7 +1430,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                s->s_flags &= ~MS_RDONLY;       /* now it is safe to call journal_begin */
                err = journal_begin(&th, s, 10);
                if (err)
-                       goto out_unlock;
+                       goto out_err_unlock;
 
                /* Mount a partition which is read-only, read-write */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1447,26 +1447,22 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        SB_JOURNAL(s)->j_must_wait = 1;
        err = journal_end(&th, s, 10);
        if (err)
-               goto out_unlock;
+               goto out_err_unlock;
 
+       reiserfs_write_unlock(s);
        if (!(*mount_flags & MS_RDONLY)) {
-               /*
-                * Drop write lock. Quota will retake it when needed and lock
-                * ordering requires calling dquot_resume() without it.
-                */
-               reiserfs_write_unlock(s);
                dquot_resume(s, -1);
                reiserfs_write_lock(s);
                finish_unfinished(s);
+               reiserfs_write_unlock(s);
                reiserfs_xattr_init(s, *mount_flags);
        }
 
-out_ok:
+out_ok_unlocked:
        replace_mount_options(s, new_opts);
-       reiserfs_write_unlock(s);
        return 0;
 
-out_unlock:
+out_err_unlock:
        reiserfs_write_unlock(s);
 out_err:
        kfree(new_opts);
@@ -2013,12 +2009,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                        goto error;
                }
 
+               reiserfs_write_unlock(s);
                if ((errval = reiserfs_lookup_privroot(s)) ||
                    (errval = reiserfs_xattr_init(s, s->s_flags))) {
                        dput(s->s_root);
                        s->s_root = NULL;
-                       goto error;
+                       goto error_unlocked;
                }
+               reiserfs_write_lock(s);
 
                /* look for files which were to be removed in previous session */
                finish_unfinished(s);
@@ -2027,12 +2025,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
                        reiserfs_info(s, "using 3.5.x disk format\n");
                }
 
+               reiserfs_write_unlock(s);
                if ((errval = reiserfs_lookup_privroot(s)) ||
                    (errval = reiserfs_xattr_init(s, s->s_flags))) {
                        dput(s->s_root);
                        s->s_root = NULL;
-                       goto error;
+                       goto error_unlocked;
                }
+               reiserfs_write_lock(s);
        }
        // mark hash in super block: it could be unset. overwrite should be ok
        set_sb_hash_function_code(rs, function2code(sbi->s_hash_function));
@@ -2100,6 +2100,7 @@ static int reiserfs_write_dquot(struct dquot *dquot)
 {
        struct reiserfs_transaction_handle th;
        int ret, err;
+       int depth;
 
        reiserfs_write_lock(dquot->dq_sb);
        ret =
@@ -2107,9 +2108,9 @@ static int reiserfs_write_dquot(struct dquot *dquot)
                          REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
        if (ret)
                goto out;
-       reiserfs_write_unlock(dquot->dq_sb);
+       depth = reiserfs_write_unlock_nested(dquot->dq_sb);
        ret = dquot_commit(dquot);
-       reiserfs_write_lock(dquot->dq_sb);
+       reiserfs_write_lock_nested(dquot->dq_sb, depth);
        err =
            journal_end(&th, dquot->dq_sb,
                        REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
@@ -2124,6 +2125,7 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
 {
        struct reiserfs_transaction_handle th;
        int ret, err;
+       int depth;
 
        reiserfs_write_lock(dquot->dq_sb);
        ret =
@@ -2131,9 +2133,9 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
                          REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
        if (ret)
                goto out;
-       reiserfs_write_unlock(dquot->dq_sb);
+       depth = reiserfs_write_unlock_nested(dquot->dq_sb);
        ret = dquot_acquire(dquot);
-       reiserfs_write_lock(dquot->dq_sb);
+       reiserfs_write_lock_nested(dquot->dq_sb, depth);
        err =
            journal_end(&th, dquot->dq_sb,
                        REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
@@ -2186,15 +2188,16 @@ static int reiserfs_write_info(struct super_block *sb, int type)
 {
        struct reiserfs_transaction_handle th;
        int ret, err;
+       int depth;
 
        /* Data block + inode block */
        reiserfs_write_lock(sb);
        ret = journal_begin(&th, sb, 2);
        if (ret)
                goto out;
-       reiserfs_write_unlock(sb);
+       depth = reiserfs_write_unlock_nested(sb);
        ret = dquot_commit_info(sb, type);
-       reiserfs_write_lock(sb);
+       reiserfs_write_lock_nested(sb, depth);
        err = journal_end(&th, sb, 2);
        if (!ret && err)
                ret = err;
index c69cdd749f09b98b702c103062262c38b92606e1..8a9e2dcfe004919da6fe48e4c292c7ddfeef8476 100644 (file)
@@ -81,8 +81,7 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry)
        int error;
        BUG_ON(!mutex_is_locked(&dir->i_mutex));
 
-       reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
-                                       I_MUTEX_CHILD, dir->i_sb);
+       mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
        error = dir->i_op->unlink(dir, dentry);
        mutex_unlock(&dentry->d_inode->i_mutex);
 
@@ -96,8 +95,7 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
        int error;
        BUG_ON(!mutex_is_locked(&dir->i_mutex));
 
-       reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
-                                       I_MUTEX_CHILD, dir->i_sb);
+       mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
        error = dir->i_op->rmdir(dir, dentry);
        if (!error)
                dentry->d_inode->i_flags |= S_DEAD;
@@ -232,22 +230,17 @@ static int reiserfs_for_each_xattr(struct inode *inode,
        if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
                return 0;
 
-       reiserfs_write_unlock(inode->i_sb);
        dir = open_xa_dir(inode, XATTR_REPLACE);
        if (IS_ERR(dir)) {
                err = PTR_ERR(dir);
-               reiserfs_write_lock(inode->i_sb);
                goto out;
        } else if (!dir->d_inode) {
                err = 0;
-               reiserfs_write_lock(inode->i_sb);
                goto out_dir;
        }
 
        mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
 
-       reiserfs_write_lock(inode->i_sb);
-
        buf.xadir = dir;
        while (1) {
                err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
@@ -281,14 +274,17 @@ static int reiserfs_for_each_xattr(struct inode *inode,
                int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
                             4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
                struct reiserfs_transaction_handle th;
+               reiserfs_write_lock(inode->i_sb);
                err = journal_begin(&th, inode->i_sb, blocks);
+               reiserfs_write_unlock(inode->i_sb);
                if (!err) {
                        int jerror;
-                       reiserfs_mutex_lock_nested_safe(
-                                         &dir->d_parent->d_inode->i_mutex,
-                                         I_MUTEX_XATTR, inode->i_sb);
+                       mutex_lock_nested(&dir->d_parent->d_inode->i_mutex,
+                                         I_MUTEX_XATTR);
                        err = action(dir, data);
+                       reiserfs_write_lock(inode->i_sb);
                        jerror = journal_end(&th, inode->i_sb, blocks);
+                       reiserfs_write_unlock(inode->i_sb);
                        mutex_unlock(&dir->d_parent->d_inode->i_mutex);
                        err = jerror ?: err;
                }
@@ -455,9 +451,7 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name)
        }
 
        if (dentry->d_inode) {
-               reiserfs_write_lock(inode->i_sb);
                err = xattr_unlink(xadir->d_inode, dentry);
-               reiserfs_write_unlock(inode->i_sb);
                update_ctime(inode);
        }
 
@@ -491,24 +485,17 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
        if (get_inode_sd_version(inode) == STAT_DATA_V1)
                return -EOPNOTSUPP;
 
-       reiserfs_write_unlock(inode->i_sb);
-
        if (!buffer) {
                err = lookup_and_delete_xattr(inode, name);
-               reiserfs_write_lock(inode->i_sb);
                return err;
        }
 
        dentry = xattr_lookup(inode, name, flags);
-       if (IS_ERR(dentry)) {
-               reiserfs_write_lock(inode->i_sb);
+       if (IS_ERR(dentry))
                return PTR_ERR(dentry);
-       }
 
        down_write(&REISERFS_I(inode)->i_xattr_sem);
 
-       reiserfs_write_lock(inode->i_sb);
-
        xahash = xattr_hash(buffer, buffer_size);
        while (buffer_pos < buffer_size || buffer_pos == 0) {
                size_t chunk;
@@ -538,6 +525,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
                        rxh->h_hash = cpu_to_le32(xahash);
                }
 
+               reiserfs_write_lock(inode->i_sb);
                err = __reiserfs_write_begin(page, page_offset, chunk + skip);
                if (!err) {
                        if (buffer)
@@ -546,6 +534,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
                                                    page_offset + chunk +
                                                    skip);
                }
+               reiserfs_write_unlock(inode->i_sb);
                unlock_page(page);
                reiserfs_put_page(page);
                buffer_pos += chunk;
@@ -563,10 +552,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
                        .ia_valid = ATTR_SIZE | ATTR_CTIME,
                };
 
-               reiserfs_write_unlock(inode->i_sb);
                mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
                inode_dio_wait(dentry->d_inode);
-               reiserfs_write_lock(inode->i_sb);
 
                err = reiserfs_setattr(dentry, &newattrs);
                mutex_unlock(&dentry->d_inode->i_mutex);
@@ -592,18 +579,19 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
 
        reiserfs_write_lock(inode->i_sb);
        error = journal_begin(&th, inode->i_sb, jbegin_count);
+       reiserfs_write_unlock(inode->i_sb);
        if (error) {
-               reiserfs_write_unlock(inode->i_sb);
                return error;
        }
 
        error = reiserfs_xattr_set_handle(&th, inode, name,
                                          buffer, buffer_size, flags);
 
+       reiserfs_write_lock(inode->i_sb);
        error2 = journal_end(&th, inode->i_sb, jbegin_count);
+       reiserfs_write_unlock(inode->i_sb);
        if (error == 0)
                error = error2;
-       reiserfs_write_unlock(inode->i_sb);
 
        return error;
 }
@@ -968,7 +956,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
        int err = 0;
 
        /* If we don't have the privroot located yet - go find it */
-       reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
+       mutex_lock(&s->s_root->d_inode->i_mutex);
        dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
                                strlen(PRIVROOT_NAME));
        if (!IS_ERR(dentry)) {
@@ -996,14 +984,14 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
                goto error;
 
        if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
-               reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
+               mutex_lock(&s->s_root->d_inode->i_mutex);
                err = create_privroot(REISERFS_SB(s)->priv_root);
                mutex_unlock(&s->s_root->d_inode->i_mutex);
        }
 
        if (privroot->d_inode) {
                s->s_xattr = reiserfs_xattr_handlers;
-               reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
+               mutex_lock(&privroot->d_inode->i_mutex);
                if (!REISERFS_SB(s)->xattr_root) {
                        struct dentry *dentry;
                        dentry = lookup_one_len(XAROOT_NAME, privroot,
index 6c8767fdfc6a287b0a9d201b4048d89b06fecaad..06c04f73da6529d012ee0c8b0d364cef939709ea 100644 (file)
@@ -49,13 +49,15 @@ posix_acl_set(struct dentry *dentry, const char *name, const void *value,
 
        reiserfs_write_lock(inode->i_sb);
        error = journal_begin(&th, inode->i_sb, jcreate_blocks);
+       reiserfs_write_unlock(inode->i_sb);
        if (error == 0) {
                error = reiserfs_set_acl(&th, inode, type, acl);
+               reiserfs_write_lock(inode->i_sb);
                error2 = journal_end(&th, inode->i_sb, jcreate_blocks);
+               reiserfs_write_unlock(inode->i_sb);
                if (error2)
                        error = error2;
        }
-       reiserfs_write_unlock(inode->i_sb);
 
       release_and_out:
        posix_acl_release(acl);
@@ -435,12 +437,14 @@ int reiserfs_cache_default_acl(struct inode *inode)
        return nblocks;
 }
 
+/*
+ * Called under i_mutex
+ */
 int reiserfs_acl_chmod(struct inode *inode)
 {
        struct reiserfs_transaction_handle th;
        struct posix_acl *acl;
        size_t size;
-       int depth;
        int error;
 
        if (IS_PRIVATE(inode))
@@ -454,9 +458,7 @@ int reiserfs_acl_chmod(struct inode *inode)
                return 0;
        }
 
-       reiserfs_write_unlock(inode->i_sb);
        acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
-       reiserfs_write_lock(inode->i_sb);
        if (!acl)
                return 0;
        if (IS_ERR(acl))
@@ -466,16 +468,18 @@ int reiserfs_acl_chmod(struct inode *inode)
                return error;
 
        size = reiserfs_xattr_nblocks(inode, reiserfs_acl_size(acl->a_count));
-       depth = reiserfs_write_lock_once(inode->i_sb);
+       reiserfs_write_lock(inode->i_sb);
        error = journal_begin(&th, inode->i_sb, size * 2);
+       reiserfs_write_unlock(inode->i_sb);
        if (!error) {
                int error2;
                error = reiserfs_set_acl(&th, inode, ACL_TYPE_ACCESS, acl);
+               reiserfs_write_lock(inode->i_sb);
                error2 = journal_end(&th, inode->i_sb, size * 2);
+               reiserfs_write_unlock(inode->i_sb);
                if (error2)
                        error = error2;
        }
-       reiserfs_write_unlock_once(inode->i_sb, depth);
        posix_acl_release(acl);
        return error;
 }
index f373bde8f545da481ba0a7caa873271dd599b30d..f8a9e2bf8d8bf3b04b9a68133cbbfb62e4d2f49e 100644 (file)
@@ -73,7 +73,7 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
 const struct file_operations romfs_ro_fops = {
        .llseek                 = generic_file_llseek,
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .splice_read            = generic_file_splice_read,
        .mmap                   = romfs_mmap,
        .get_unmapped_area      = romfs_get_unmapped_area,
index 04ce1ac20d20b393d13bdfcd0e24c8ca4cde743e..d0ea7ef75e264ef2cccf2c100d4e3a8c7393121c 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -447,9 +447,8 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
 
 EXPORT_SYMBOL(inode_add_bytes);
 
-void inode_sub_bytes(struct inode *inode, loff_t bytes)
+void __inode_sub_bytes(struct inode *inode, loff_t bytes)
 {
-       spin_lock(&inode->i_lock);
        inode->i_blocks -= bytes >> 9;
        bytes &= 511;
        if (inode->i_bytes < bytes) {
@@ -457,6 +456,14 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes)
                inode->i_bytes += 512;
        }
        inode->i_bytes -= bytes;
+}
+
+EXPORT_SYMBOL(__inode_sub_bytes);
+
+void inode_sub_bytes(struct inode *inode, loff_t bytes)
+{
+       spin_lock(&inode->i_lock);
+       __inode_sub_bytes(inode, bytes);
        spin_unlock(&inode->i_lock);
 }
 
index 9d4dc6831792a23270148c2d26b0423f656b505c..ff4b363ba5c95c78bbe92a4203dbd86e338c28de 100644 (file)
@@ -22,9 +22,9 @@
 const struct file_operations sysv_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .splice_read    = generic_file_splice_read,
index 7f60e900edff6ff758e84ddf95f0da693570832e..6e025e02ffde80c924bec1b90b4fe62b7d163121 100644 (file)
@@ -2587,10 +2587,11 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
                return -EROFS;
 
        failing = power_cut_emulated(c, lnum, 1);
-       if (failing)
+       if (failing) {
                len = corrupt_data(c, buf, len);
-       ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
-                  len, lnum, offs);
+               ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
+                          len, lnum, offs);
+       }
        err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
        if (err)
                return err;
index 123c79b7261ef8092e57477bd1141d365a0a2a3f..22924e048ac04aab77dba789e096a093f118e50f 100644 (file)
@@ -44,7 +44,7 @@
  * 'ubifs_writepage()' we are only guaranteed that the page is locked.
  *
  * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
- * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
+ * read-ahead path does not lock it ("sys_read -> generic_file_read_iter ->
  * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
  * set as well. However, UBIFS disables readahead.
  */
@@ -1396,8 +1396,8 @@ static int update_mctime(struct ubifs_info *c, struct inode *inode)
        return 0;
 }
 
-static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t pos)
+static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
 {
        int err;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -1407,7 +1407,7 @@ static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                return err;
 
-       return generic_file_aio_write(iocb, iov, nr_segs, pos);
+       return generic_file_write_iter(iocb, iter, pos);
 }
 
 static int ubifs_set_page_dirty(struct page *page)
@@ -1583,8 +1583,8 @@ const struct file_operations ubifs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = ubifs_aio_write,
+       .read_iter       = generic_file_read_iter,
+       .write_iter      = ubifs_write_iter,
        .mmap           = ubifs_file_mmap,
        .fsync          = ubifs_fsync,
        .unlocked_ioctl = ubifs_ioctl,
index 29569dd0816814f23a34d713a3f3178c4e822929..e392d60f3a82f2c52aa4ba366c0a5ac1508f73d7 100644 (file)
@@ -119,8 +119,7 @@ static int udf_adinicb_write_end(struct file *file,
 }
 
 static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
-                                    const struct iovec *iov,
-                                    loff_t offset, unsigned long nr_segs)
+                                    struct iov_iter *iter, loff_t offset)
 {
        /* Fallback to buffered I/O. */
        return 0;
@@ -134,8 +133,8 @@ const struct address_space_operations udf_adinicb_aops = {
        .direct_IO      = udf_adinicb_direct_IO,
 };
 
-static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                 unsigned long nr_segs, loff_t ppos)
+static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                  loff_t ppos)
 {
        ssize_t retval;
        struct file *file = iocb->ki_filp;
@@ -169,7 +168,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        } else
                up_write(&iinfo->i_data_sem);
 
-       retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
+       retval = generic_file_write_iter(iocb, iter, ppos);
        if (retval > 0)
                mark_inode_dirty(inode);
 
@@ -243,12 +242,12 @@ static int udf_release_file(struct inode *inode, struct file *filp)
 
 const struct file_operations udf_file_operations = {
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .unlocked_ioctl         = udf_ioctl,
        .open                   = generic_file_open,
        .mmap                   = generic_file_mmap,
        .write                  = do_sync_write,
-       .aio_write              = udf_file_aio_write,
+       .write_iter             = udf_file_write_iter,
        .release                = udf_release_file,
        .fsync                  = generic_file_fsync,
        .splice_read            = generic_file_splice_read,
index b6d15d349810fe5ca21649208bd86d220caf338c..fad32d5f62e5c5673fdd181d193c1b6a2b7b2670 100644 (file)
@@ -216,19 +216,17 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
        return ret;
 }
 
-static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
-                            const struct iovec *iov,
-                            loff_t offset, unsigned long nr_segs)
+static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                 udf_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
        if (unlikely(ret < 0 && (rw & WRITE)))
-               udf_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               udf_write_failed(mapping, offset + iov_iter_count(iter));
        return ret;
 }
 
index 9ac4057a86c90f64a84ea2cee0b92e4f549ac01a..839a2bad7f45b693db4ed478598b997c42077712 100644 (file)
@@ -630,6 +630,12 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        struct udf_sb_info *sbi = UDF_SB(sb);
        int error = 0;
 
+       if (sbi->s_lvid_bh) {
+               int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
+               if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+                       return -EACCES;
+       }
+
        uopt.flags = sbi->s_flags;
        uopt.uid   = sbi->s_uid;
        uopt.gid   = sbi->s_gid;
@@ -649,12 +655,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
        sbi->s_dmode = uopt.dmode;
        write_unlock(&sbi->s_cred_lock);
 
-       if (sbi->s_lvid_bh) {
-               int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
-               if (write_rev > UDF_MAX_WRITE_VERSION)
-                       *flags |= MS_RDONLY;
-       }
-
        if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
                goto out_unlock;
 
@@ -843,27 +843,38 @@ static int udf_find_fileset(struct super_block *sb,
        return 1;
 }
 
+/*
+ * Load primary Volume Descriptor Sequence
+ *
+ * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
+ * should be tried.
+ */
 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 {
        struct primaryVolDesc *pvoldesc;
        struct ustr *instr, *outstr;
        struct buffer_head *bh;
        uint16_t ident;
-       int ret = 1;
+       int ret = -ENOMEM;
 
        instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
        if (!instr)
-               return 1;
+               return -ENOMEM;
 
        outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
        if (!outstr)
                goto out1;
 
        bh = udf_read_tagged(sb, block, block, &ident);
-       if (!bh)
+       if (!bh) {
+               ret = -EAGAIN;
                goto out2;
+       }
 
-       BUG_ON(ident != TAG_IDENT_PVD);
+       if (ident != TAG_IDENT_PVD) {
+               ret = -EIO;
+               goto out_bh;
+       }
 
        pvoldesc = (struct primaryVolDesc *)bh->b_data;
 
@@ -889,8 +900,9 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
                if (udf_CS0toUTF8(outstr, instr))
                        udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
 
-       brelse(bh);
        ret = 0;
+out_bh:
+       brelse(bh);
 out2:
        kfree(outstr);
 out1:
@@ -947,7 +959,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
 
                if (mdata->s_mirror_fe == NULL) {
                        udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
-                       goto error_exit;
+                       return -EIO;
                }
        }
 
@@ -964,23 +976,18 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
                          addr.logicalBlockNum, addr.partitionReferenceNum);
 
                mdata->s_bitmap_fe = udf_iget(sb, &addr);
-
                if (mdata->s_bitmap_fe == NULL) {
                        if (sb->s_flags & MS_RDONLY)
                                udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
                        else {
                                udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
-                               goto error_exit;
+                               return -EIO;
                        }
                }
        }
 
        udf_debug("udf_load_metadata_files Ok\n");
-
        return 0;
-
-error_exit:
-       return 1;
 }
 
 static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -1069,7 +1076,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                if (!map->s_uspace.s_table) {
                        udf_debug("cannot load unallocSpaceTable (part %d)\n",
                                  p_index);
-                       return 1;
+                       return -EIO;
                }
                map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
                udf_debug("unallocSpaceTable (part %d) @ %ld\n",
@@ -1079,7 +1086,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
        if (phd->unallocSpaceBitmap.extLength) {
                struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
                if (!bitmap)
-                       return 1;
+                       return -ENOMEM;
                map->s_uspace.s_bitmap = bitmap;
                bitmap->s_extPosition = le32_to_cpu(
                                phd->unallocSpaceBitmap.extPosition);
@@ -1102,7 +1109,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
                if (!map->s_fspace.s_table) {
                        udf_debug("cannot load freedSpaceTable (part %d)\n",
                                  p_index);
-                       return 1;
+                       return -EIO;
                }
 
                map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
@@ -1113,7 +1120,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
        if (phd->freedSpaceBitmap.extLength) {
                struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
                if (!bitmap)
-                       return 1;
+                       return -ENOMEM;
                map->s_fspace.s_bitmap = bitmap;
                bitmap->s_extPosition = le32_to_cpu(
                                phd->freedSpaceBitmap.extPosition);
@@ -1165,7 +1172,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
                udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
        }
        if (!sbi->s_vat_inode)
-               return 1;
+               return -EIO;
 
        if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
                map->s_type_specific.s_virtual.s_start_offset = 0;
@@ -1177,7 +1184,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
                        pos = udf_block_map(sbi->s_vat_inode, 0);
                        bh = sb_bread(sb, pos);
                        if (!bh)
-                               return 1;
+                               return -EIO;
                        vat20 = (struct virtualAllocationTable20 *)bh->b_data;
                } else {
                        vat20 = (struct virtualAllocationTable20 *)
@@ -1195,6 +1202,12 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
        return 0;
 }
 
+/*
+ * Load partition descriptor block
+ *
+ * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
+ * sequence.
+ */
 static int udf_load_partdesc(struct super_block *sb, sector_t block)
 {
        struct buffer_head *bh;
@@ -1204,13 +1217,15 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
        int i, type1_idx;
        uint16_t partitionNumber;
        uint16_t ident;
-       int ret = 0;
+       int ret;
 
        bh = udf_read_tagged(sb, block, block, &ident);
        if (!bh)
-               return 1;
-       if (ident != TAG_IDENT_PD)
+               return -EAGAIN;
+       if (ident != TAG_IDENT_PD) {
+               ret = 0;
                goto out_bh;
+       }
 
        p = (struct partitionDesc *)bh->b_data;
        partitionNumber = le16_to_cpu(p->partitionNumber);
@@ -1229,10 +1244,13 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
        if (i >= sbi->s_partitions) {
                udf_debug("Partition (%d) not found in partition map\n",
                          partitionNumber);
+               ret = 0;
                goto out_bh;
        }
 
        ret = udf_fill_partdesc_info(sb, p, i);
+       if (ret < 0)
+               goto out_bh;
 
        /*
         * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
@@ -1249,32 +1267,37 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
                        break;
        }
 
-       if (i >= sbi->s_partitions)
+       if (i >= sbi->s_partitions) {
+               ret = 0;
                goto out_bh;
+       }
 
        ret = udf_fill_partdesc_info(sb, p, i);
-       if (ret)
+       if (ret < 0)
                goto out_bh;
 
        if (map->s_partition_type == UDF_METADATA_MAP25) {
                ret = udf_load_metadata_files(sb, i);
-               if (ret) {
+               if (ret < 0) {
                        udf_err(sb, "error loading MetaData partition map %d\n",
                                i);
                        goto out_bh;
                }
        } else {
-               ret = udf_load_vat(sb, i, type1_idx);
-               if (ret)
-                       goto out_bh;
                /*
-                * Mark filesystem read-only if we have a partition with
-                * virtual map since we don't handle writing to it (we
-                * overwrite blocks instead of relocating them).
+                * If we have a partition with virtual map, we don't handle
+                * writing to it (we overwrite blocks instead of relocating
+                * them).
                 */
-               sb->s_flags |= MS_RDONLY;
-               pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n");
+               if (!(sb->s_flags & MS_RDONLY)) {
+                       ret = -EACCES;
+                       goto out_bh;
+               }
+               ret = udf_load_vat(sb, i, type1_idx);
+               if (ret < 0)
+                       goto out_bh;
        }
+       ret = 0;
 out_bh:
        /* In case loading failed, we handle cleanup in udf_fill_super */
        brelse(bh);
@@ -1340,11 +1363,11 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
        uint16_t ident;
        struct buffer_head *bh;
        unsigned int table_len;
-       int ret = 0;
+       int ret;
 
        bh = udf_read_tagged(sb, block, block, &ident);
        if (!bh)
-               return 1;
+               return -EAGAIN;
        BUG_ON(ident != TAG_IDENT_LVD);
        lvd = (struct logicalVolDesc *)bh->b_data;
        table_len = le32_to_cpu(lvd->mapTableLength);
@@ -1352,7 +1375,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                udf_err(sb, "error loading logical volume descriptor: "
                        "Partition table too long (%u > %lu)\n", table_len,
                        sb->s_blocksize - sizeof(*lvd));
-               ret = 1;
+               ret = -EIO;
                goto out_bh;
        }
 
@@ -1396,11 +1419,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_SPARABLE,
                                                strlen(UDF_ID_SPARABLE))) {
-                               if (udf_load_sparable_map(sb, map,
-                                   (struct sparablePartitionMap *)gpm) < 0) {
-                                       ret = 1;
+                               ret = udf_load_sparable_map(sb, map,
+                                       (struct sparablePartitionMap *)gpm);
+                               if (ret < 0)
                                        goto out_bh;
-                               }
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_METADATA,
                                                strlen(UDF_ID_METADATA))) {
@@ -1465,7 +1487,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
        }
        if (lvd->integritySeqExt.extLength)
                udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
-
+       ret = 0;
 out_bh:
        brelse(bh);
        return ret;
@@ -1503,22 +1525,18 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
 }
 
 /*
- * udf_process_sequence
- *
- * PURPOSE
- *     Process a main/reserve volume descriptor sequence.
- *
- * PRE-CONDITIONS
- *     sb                      Pointer to _locked_ superblock.
- *     block                   First block of first extent of the sequence.
- *     lastblock               Lastblock of first extent of the sequence.
+ * Process a main/reserve volume descriptor sequence.
+ *   @block            First block of first extent of the sequence.
+ *   @lastblock                Lastblock of first extent of the sequence.
+ *   @fileset          There we store extent containing root fileset
  *
- * HISTORY
- *     July 1, 1997 - Andrew E. Mileski
- *     Written, tested, and released.
+ * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
+ * sequence
  */
-static noinline int udf_process_sequence(struct super_block *sb, long block,
-                               long lastblock, struct kernel_lb_addr *fileset)
+static noinline int udf_process_sequence(
+               struct super_block *sb,
+               sector_t block, sector_t lastblock,
+               struct kernel_lb_addr *fileset)
 {
        struct buffer_head *bh = NULL;
        struct udf_vds_record vds[VDS_POS_LENGTH];
@@ -1529,6 +1547,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
        uint32_t vdsn;
        uint16_t ident;
        long next_s = 0, next_e = 0;
+       int ret;
 
        memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
 
@@ -1543,7 +1562,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
                        udf_err(sb,
                                "Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
                                (unsigned long long)block);
-                       return 1;
+                       return -EAGAIN;
                }
 
                /* Process each descriptor (ISO 13346 3/8.3-8.4) */
@@ -1616,14 +1635,19 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
         */
        if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
                udf_err(sb, "Primary Volume Descriptor not found!\n");
-               return 1;
+               return -EAGAIN;
+       }
+       ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
+       if (ret < 0)
+               return ret;
+
+       if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
+               ret = udf_load_logicalvol(sb,
+                                         vds[VDS_POS_LOGICAL_VOL_DESC].block,
+                                         fileset);
+               if (ret < 0)
+                       return ret;
        }
-       if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
-               return 1;
-
-       if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
-           vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
-               return 1;
 
        if (vds[VDS_POS_PARTITION_DESC].block) {
                /*
@@ -1632,19 +1656,27 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
                 */
                for (block = vds[VDS_POS_PARTITION_DESC].block;
                     block < vds[VDS_POS_TERMINATING_DESC].block;
-                    block++)
-                       if (udf_load_partdesc(sb, block))
-                               return 1;
+                    block++) {
+                       ret = udf_load_partdesc(sb, block);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
        return 0;
 }
 
+/*
+ * Load Volume Descriptor Sequence described by anchor in bh
+ *
+ * Returns <0 on error, 0 on success
+ */
 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
                             struct kernel_lb_addr *fileset)
 {
        struct anchorVolDescPtr *anchor;
-       long main_s, main_e, reserve_s, reserve_e;
+       sector_t main_s, main_e, reserve_s, reserve_e;
+       int ret;
 
        anchor = (struct anchorVolDescPtr *)bh->b_data;
 
@@ -1662,18 +1694,26 @@ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
 
        /* Process the main & reserve sequences */
        /* responsible for finding the PartitionDesc(s) */
-       if (!udf_process_sequence(sb, main_s, main_e, fileset))
-               return 1;
-       udf_sb_free_partitions(sb);
-       if (!udf_process_sequence(sb, reserve_s, reserve_e, fileset))
-               return 1;
+       ret = udf_process_sequence(sb, main_s, main_e, fileset);
+       if (ret != -EAGAIN)
+               return ret;
        udf_sb_free_partitions(sb);
-       return 0;
+       ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
+       if (ret < 0) {
+               udf_sb_free_partitions(sb);
+               /* No sequence was OK, return -EIO */
+               if (ret == -EAGAIN)
+                       ret = -EIO;
+       }
+       return ret;
 }
 
 /*
  * Check whether there is an anchor block in the given block and
  * load Volume Descriptor Sequence if so.
+ *
+ * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
+ * block
  */
 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
                                  struct kernel_lb_addr *fileset)
@@ -1685,33 +1725,40 @@ static int udf_check_anchor_block(struct super_block *sb, sector_t block,
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
            udf_fixed_to_variable(block) >=
            sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
-               return 0;
+               return -EAGAIN;
 
        bh = udf_read_tagged(sb, block, block, &ident);
        if (!bh)
-               return 0;
+               return -EAGAIN;
        if (ident != TAG_IDENT_AVDP) {
                brelse(bh);
-               return 0;
+               return -EAGAIN;
        }
        ret = udf_load_sequence(sb, bh, fileset);
        brelse(bh);
        return ret;
 }
 
-/* Search for an anchor volume descriptor pointer */
-static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
-                                struct kernel_lb_addr *fileset)
+/*
+ * Search for an anchor volume descriptor pointer.
+ *
+ * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
+ * of anchors.
+ */
+static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
+                           struct kernel_lb_addr *fileset)
 {
        sector_t last[6];
        int i;
        struct udf_sb_info *sbi = UDF_SB(sb);
        int last_count = 0;
+       int ret;
 
        /* First try user provided anchor */
        if (sbi->s_anchor) {
-               if (udf_check_anchor_block(sb, sbi->s_anchor, fileset))
-                       return lastblock;
+               ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
+               if (ret != -EAGAIN)
+                       return ret;
        }
        /*
         * according to spec, anchor is in either:
@@ -1720,39 +1767,46 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
         *     lastblock
         *  however, if the disc isn't closed, it could be 512.
         */
-       if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset))
-               return lastblock;
+       ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
+       if (ret != -EAGAIN)
+               return ret;
        /*
         * The trouble is which block is the last one. Drives often misreport
         * this so we try various possibilities.
         */
-       last[last_count++] = lastblock;
-       if (lastblock >= 1)
-               last[last_count++] = lastblock - 1;
-       last[last_count++] = lastblock + 1;
-       if (lastblock >= 2)
-               last[last_count++] = lastblock - 2;
-       if (lastblock >= 150)
-               last[last_count++] = lastblock - 150;
-       if (lastblock >= 152)
-               last[last_count++] = lastblock - 152;
+       last[last_count++] = *lastblock;
+       if (*lastblock >= 1)
+               last[last_count++] = *lastblock - 1;
+       last[last_count++] = *lastblock + 1;
+       if (*lastblock >= 2)
+               last[last_count++] = *lastblock - 2;
+       if (*lastblock >= 150)
+               last[last_count++] = *lastblock - 150;
+       if (*lastblock >= 152)
+               last[last_count++] = *lastblock - 152;
 
        for (i = 0; i < last_count; i++) {
                if (last[i] >= sb->s_bdev->bd_inode->i_size >>
                                sb->s_blocksize_bits)
                        continue;
-               if (udf_check_anchor_block(sb, last[i], fileset))
-                       return last[i];
+               ret = udf_check_anchor_block(sb, last[i], fileset);
+               if (ret != -EAGAIN) {
+                       if (!ret)
+                               *lastblock = last[i];
+                       return ret;
+               }
                if (last[i] < 256)
                        continue;
-               if (udf_check_anchor_block(sb, last[i] - 256, fileset))
-                       return last[i];
+               ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
+               if (ret != -EAGAIN) {
+                       if (!ret)
+                               *lastblock = last[i];
+                       return ret;
+               }
        }
 
        /* Finally try block 512 in case media is open */
-       if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset))
-               return last[0];
-       return 0;
+       return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
 }
 
 /*
@@ -1760,54 +1814,59 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
  * area specified by it. The function expects sbi->s_lastblock to be the last
  * block on the media.
  *
- * Return 1 if ok, 0 if not found.
- *
+ * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
+ * was not found.
  */
 static int udf_find_anchor(struct super_block *sb,
                           struct kernel_lb_addr *fileset)
 {
-       sector_t lastblock;
        struct udf_sb_info *sbi = UDF_SB(sb);
+       sector_t lastblock = sbi->s_last_block;
+       int ret;
 
-       lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
-       if (lastblock)
+       ret = udf_scan_anchors(sb, &lastblock, fileset);
+       if (ret != -EAGAIN)
                goto out;
 
        /* No anchor found? Try VARCONV conversion of block numbers */
        UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+       lastblock = udf_variable_to_fixed(sbi->s_last_block);
        /* Firstly, we try to not convert number of the last block */
-       lastblock = udf_scan_anchors(sb,
-                               udf_variable_to_fixed(sbi->s_last_block),
-                               fileset);
-       if (lastblock)
+       ret = udf_scan_anchors(sb, &lastblock, fileset);
+       if (ret != -EAGAIN)
                goto out;
 
+       lastblock = sbi->s_last_block;
        /* Secondly, we try with converted number of the last block */
-       lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
-       if (!lastblock) {
+       ret = udf_scan_anchors(sb, &lastblock, fileset);
+       if (ret < 0) {
                /* VARCONV didn't help. Clear it. */
                UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
-               return 0;
        }
 out:
-       sbi->s_last_block = lastblock;
-       return 1;
+       if (ret == 0)
+               sbi->s_last_block = lastblock;
+       return ret;
 }
 
 /*
  * Check Volume Structure Descriptor, find Anchor block and load Volume
- * Descriptor Sequence
+ * Descriptor Sequence.
+ *
+ * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
+ * block was not found.
  */
 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
                        int silent, struct kernel_lb_addr *fileset)
 {
        struct udf_sb_info *sbi = UDF_SB(sb);
        loff_t nsr_off;
+       int ret;
 
        if (!sb_set_blocksize(sb, uopt->blocksize)) {
                if (!silent)
                        udf_warn(sb, "Bad block size\n");
-               return 0;
+               return -EINVAL;
        }
        sbi->s_last_block = uopt->lastblock;
        if (!uopt->novrs) {
@@ -1828,12 +1887,13 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
 
        /* Look for anchor block and load Volume Descriptor Sequence */
        sbi->s_anchor = uopt->anchor;
-       if (!udf_find_anchor(sb, fileset)) {
-               if (!silent)
+       ret = udf_find_anchor(sb, fileset);
+       if (ret < 0) {
+               if (!silent && ret == -EAGAIN)
                        udf_warn(sb, "No anchor found\n");
-               return 0;
+               return ret;
        }
-       return 1;
+       return 0;
 }
 
 static void udf_open_lvid(struct super_block *sb)
@@ -1939,7 +1999,7 @@ u64 lvid_get_unique_id(struct super_block *sb)
 
 static int udf_fill_super(struct super_block *sb, void *options, int silent)
 {
-       int ret;
+       int ret = -EINVAL;
        struct inode *inode = NULL;
        struct udf_options uopt;
        struct kernel_lb_addr rootdir, fileset;
@@ -2011,7 +2071,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        } else {
                uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
                ret = udf_load_vrs(sb, &uopt, silent, &fileset);
-               if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
+               if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
                        if (!silent)
                                pr_notice("Rescanning with blocksize %d\n",
                                          UDF_DEFAULT_BLOCKSIZE);
@@ -2021,8 +2081,11 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
                        ret = udf_load_vrs(sb, &uopt, silent, &fileset);
                }
        }
-       if (!ret) {
-               udf_warn(sb, "No partition found (1)\n");
+       if (ret < 0) {
+               if (ret == -EAGAIN) {
+                       udf_warn(sb, "No partition found (1)\n");
+                       ret = -EINVAL;
+               }
                goto error_out;
        }
 
@@ -2040,9 +2103,13 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
                        udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
                                le16_to_cpu(lvidiu->minUDFReadRev),
                                UDF_MAX_READ_VERSION);
+                       ret = -EINVAL;
+                       goto error_out;
+               } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
+                          !(sb->s_flags & MS_RDONLY)) {
+                       ret = -EACCES;
                        goto error_out;
-               } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
-                       sb->s_flags |= MS_RDONLY;
+               }
 
                sbi->s_udfrev = minUDFWriteRev;
 
@@ -2054,17 +2121,20 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
 
        if (!sbi->s_partitions) {
                udf_warn(sb, "No partition found (2)\n");
+               ret = -EINVAL;
                goto error_out;
        }
 
        if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
-                       UDF_PART_FLAG_READ_ONLY) {
-               pr_notice("Partition marked readonly; forcing readonly mount\n");
-               sb->s_flags |= MS_RDONLY;
+                       UDF_PART_FLAG_READ_ONLY &&
+           !(sb->s_flags & MS_RDONLY)) {
+               ret = -EACCES;
+               goto error_out;
        }
 
        if (udf_find_fileset(sb, &fileset, &rootdir)) {
                udf_warn(sb, "No fileset found\n");
+               ret = -EINVAL;
                goto error_out;
        }
 
@@ -2086,6 +2156,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        if (!inode) {
                udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
                       rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
+               ret = -EIO;
                goto error_out;
        }
 
@@ -2093,6 +2164,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
        sb->s_root = d_make_root(inode);
        if (!sb->s_root) {
                udf_err(sb, "Couldn't allocate root dentry\n");
+               ret = -ENOMEM;
                goto error_out;
        }
        sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -2113,7 +2185,7 @@ error_out:
        kfree(sbi);
        sb->s_fs_info = NULL;
 
-       return -EINVAL;
+       return ret;
 }
 
 void _udf_err(struct super_block *sb, const char *function,
index 33afa20d450982eafb4e1bcc77193cce152270d1..e155e4c4af879c46b3bbb682366b1cc99f981aed 100644 (file)
@@ -36,9 +36,9 @@
 const struct file_operations ufs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = generic_file_open,
        .fsync          = generic_file_fsync,
index 4a4508023a3c15724a2edfd87204550f718c805b..0719e4db93f274de9af844f3ef66ce387b02a716 100644 (file)
@@ -27,9 +27,12 @@ xfs-y                                += xfs_trace.o
 
 # highlevel code
 xfs-y                          += xfs_aops.o \
+                                  xfs_attr_inactive.o \
+                                  xfs_attr_list.o \
                                   xfs_bit.o \
+                                  xfs_bmap_util.o \
                                   xfs_buf.o \
-                                  xfs_dfrag.o \
+                                  xfs_dir2_readdir.o \
                                   xfs_discard.o \
                                   xfs_error.o \
                                   xfs_export.o \
@@ -44,11 +47,11 @@ xfs-y                               += xfs_aops.o \
                                   xfs_iops.o \
                                   xfs_itable.o \
                                   xfs_message.o \
+                                  xfs_mount.o \
                                   xfs_mru_cache.o \
-                                  xfs_rename.o \
                                   xfs_super.o \
-                                  xfs_utils.o \
-                                  xfs_vnodeops.o \
+                                  xfs_symlink.o \
+                                  xfs_trans.o \
                                   xfs_xattr.o \
                                   kmem.o \
                                   uuid.o
@@ -73,10 +76,13 @@ xfs-y                               += xfs_alloc.o \
                                   xfs_ialloc_btree.o \
                                   xfs_icreate_item.o \
                                   xfs_inode.o \
+                                  xfs_inode_fork.o \
+                                  xfs_inode_buf.o \
                                   xfs_log_recover.o \
-                                  xfs_mount.o \
-                                  xfs_symlink.o \
-                                  xfs_trans.o
+                                  xfs_log_rlimit.o \
+                                  xfs_sb.o \
+                                  xfs_symlink_remote.o \
+                                  xfs_trans_resv.o
 
 # low-level transaction/log code
 xfs-y                          += xfs_log.o \
index 306d883d89bc7d6420ca4b5b8c5f848e573249bf..69518960b2ba17e888d71f75d30e4df57d0cca73 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_acl.h"
 #include "xfs_attr.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_vnodeops.h"
+#include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
 #include "xfs_trace.h"
@@ -68,14 +70,15 @@ xfs_acl_from_disk(
 
                switch (acl_e->e_tag) {
                case ACL_USER:
+                       acl_e->e_uid = xfs_uid_to_kuid(be32_to_cpu(ace->ae_id));
+                       break;
                case ACL_GROUP:
-                       acl_e->e_id = be32_to_cpu(ace->ae_id);
+                       acl_e->e_gid = xfs_gid_to_kgid(be32_to_cpu(ace->ae_id));
                        break;
                case ACL_USER_OBJ:
                case ACL_GROUP_OBJ:
                case ACL_MASK:
                case ACL_OTHER:
-                       acl_e->e_id = ACL_UNDEFINED_ID;
                        break;
                default:
                        goto fail;
@@ -101,7 +104,18 @@ xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl)
                acl_e = &acl->a_entries[i];
 
                ace->ae_tag = cpu_to_be32(acl_e->e_tag);
-               ace->ae_id = cpu_to_be32(acl_e->e_id);
+               switch (acl_e->e_tag) {
+               case ACL_USER:
+                       ace->ae_id = cpu_to_be32(xfs_kuid_to_uid(acl_e->e_uid));
+                       break;
+               case ACL_GROUP:
+                       ace->ae_id = cpu_to_be32(xfs_kgid_to_gid(acl_e->e_gid));
+                       break;
+               default:
+                       ace->ae_id = cpu_to_be32(ACL_UNDEFINED_ID);
+                       break;
+               }
+
                ace->ae_perm = cpu_to_be16(acl_e->e_perm);
        }
 }
@@ -360,7 +374,7 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
                return -EINVAL;
        if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
                return value ? -EACCES : 0;
-       if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
+       if (!inode_owner_or_capable(inode))
                return -EPERM;
 
        if (!value)
index 317aa86d96ea04925b35e995cc70e1eb024beb28..1cb740afd674e8fd3f5ce0a3f47bfc00f8b36471 100644 (file)
@@ -226,59 +226,6 @@ typedef struct xfs_agfl {
        __be32          agfl_bno[];     /* actually XFS_AGFL_SIZE(mp) */
 } xfs_agfl_t;
 
-/*
- * Per-ag incore structure, copies of information in agf and agi,
- * to improve the performance of allocation group selection.
- */
-#define XFS_PAGB_NUM_SLOTS     128
-
-typedef struct xfs_perag {
-       struct xfs_mount *pag_mount;    /* owner filesystem */
-       xfs_agnumber_t  pag_agno;       /* AG this structure belongs to */
-       atomic_t        pag_ref;        /* perag reference count */
-       char            pagf_init;      /* this agf's entry is initialized */
-       char            pagi_init;      /* this agi's entry is initialized */
-       char            pagf_metadata;  /* the agf is preferred to be metadata */
-       char            pagi_inodeok;   /* The agi is ok for inodes */
-       __uint8_t       pagf_levels[XFS_BTNUM_AGF];
-                                       /* # of levels in bno & cnt btree */
-       __uint32_t      pagf_flcount;   /* count of blocks in freelist */
-       xfs_extlen_t    pagf_freeblks;  /* total free blocks */
-       xfs_extlen_t    pagf_longest;   /* longest free space */
-       __uint32_t      pagf_btreeblks; /* # of blocks held in AGF btrees */
-       xfs_agino_t     pagi_freecount; /* number of free inodes */
-       xfs_agino_t     pagi_count;     /* number of allocated inodes */
-
-       /*
-        * Inode allocation search lookup optimisation.
-        * If the pagino matches, the search for new inodes
-        * doesn't need to search the near ones again straight away
-        */
-       xfs_agino_t     pagl_pagino;
-       xfs_agino_t     pagl_leftrec;
-       xfs_agino_t     pagl_rightrec;
-#ifdef __KERNEL__
-       spinlock_t      pagb_lock;      /* lock for pagb_tree */
-       struct rb_root  pagb_tree;      /* ordered tree of busy extents */
-
-       atomic_t        pagf_fstrms;    /* # of filestreams active in this AG */
-
-       spinlock_t      pag_ici_lock;   /* incore inode cache lock */
-       struct radix_tree_root pag_ici_root;    /* incore inode cache root */
-       int             pag_ici_reclaimable;    /* reclaimable inodes */
-       struct mutex    pag_ici_reclaim_lock;   /* serialisation point */
-       unsigned long   pag_ici_reclaim_cursor; /* reclaim restart point */
-
-       /* buffer cache index */
-       spinlock_t      pag_buf_lock;   /* lock for pag_buf_tree */
-       struct rb_root  pag_buf_tree;   /* ordered tree of active buffers */
-
-       /* for rcu-safe freeing */
-       struct rcu_head rcu_head;
-#endif
-       int             pagb_count;     /* pagb slots in use */
-} xfs_perag_t;
-
 /*
  * tags for inode radix tree
  */
index 71596e57283ae6b44702f8d6866405de6b911728..5a1393f5e020739a648612002f7ae9a3f704d032 100644 (file)
@@ -878,7 +878,7 @@ xfs_alloc_ag_vextent_near(
        xfs_agblock_t   ltnew;          /* useful start bno of left side */
        xfs_extlen_t    rlen;           /* length of returned extent */
        int             forced = 0;
-#if defined(DEBUG) && defined(__KERNEL__)
+#ifdef DEBUG
        /*
         * Randomly don't execute the first algorithm.
         */
@@ -938,8 +938,8 @@ restart:
                xfs_extlen_t    blen=0;
                xfs_agblock_t   bnew=0;
 
-#if defined(DEBUG) && defined(__KERNEL__)
-               if (!dofirst)
+#ifdef DEBUG
+               if (dofirst)
                        break;
 #endif
                /*
index 596ec71da00e8d4f005ab264142e927d03ac667d..5a2a38c677d8fab42d472e7c3e420eafd57f45c3 100644 (file)
@@ -28,9 +28,9 @@
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_iomap.h"
-#include "xfs_vnodeops.h"
 #include "xfs_trace.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include <linux/aio.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
@@ -116,7 +116,7 @@ xfs_setfilesize_trans_alloc(
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
 
-       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
@@ -451,7 +451,7 @@ xfs_start_page_writeback(
                end_page_writeback(page);
 }
 
-static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
+static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
 {
        return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
 }
@@ -525,7 +525,7 @@ xfs_submit_ioend(
                                goto retry;
                        }
 
-                       if (bio_add_buffer(bio, bh) != bh->b_size) {
+                       if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
                                xfs_submit_ioend_bio(wbc, ioend, bio);
                                goto retry;
                        }
@@ -1431,9 +1431,8 @@ STATIC ssize_t
 xfs_vm_direct_IO(
        int                     rw,
        struct kiocb            *iocb,
-       const struct iovec      *iov,
-       loff_t                  offset,
-       unsigned long           nr_segs)
+       struct iov_iter         *iter,
+       loff_t                  offset)
 {
        struct inode            *inode = iocb->ki_filp->f_mapping->host;
        struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
@@ -1441,7 +1440,7 @@ xfs_vm_direct_IO(
        ssize_t                 ret;
 
        if (rw & WRITE) {
-               size_t size = iov_length(iov, nr_segs);
+               size_t size = iov_iter_count(iter);
 
                /*
                 * We cannot preallocate a size update transaction here as we
@@ -1453,15 +1452,13 @@ xfs_vm_direct_IO(
                if (offset + size > XFS_I(inode)->i_d.di_size)
                        ioend->io_isdirect = 1;
 
-               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
-                                           offset, nr_segs,
+               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
                                            xfs_get_blocks_direct,
                                            xfs_end_io_direct_write, NULL, 0);
                if (ret != -EIOCBQUEUED && iocb->private)
                        goto out_destroy_ioend;
        } else {
-               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
-                                           offset, nr_segs,
+               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
                                            xfs_get_blocks_direct,
                                            NULL, NULL, 0);
        }
@@ -1516,13 +1513,26 @@ xfs_vm_write_failed(
        loff_t                  pos,
        unsigned                len)
 {
-       loff_t                  block_offset = pos & PAGE_MASK;
+       loff_t                  block_offset;
        loff_t                  block_start;
        loff_t                  block_end;
        loff_t                  from = pos & (PAGE_CACHE_SIZE - 1);
        loff_t                  to = from + len;
        struct buffer_head      *bh, *head;
 
+       /*
+        * The request pos offset might be 32 or 64 bit, this is all fine
+        * on 64-bit platform.  However, for 64-bit pos request on 32-bit
+        * platform, the high 32-bit will be masked off if we evaluate the
+        * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
+        * 0xfffff000 as an unsigned long, hence the result is incorrect
+        * which could cause the following ASSERT failed in most cases.
+        * In order to avoid this, we can evaluate the block_offset of the
+        * start of the page by using shifts rather than masks the mismatch
+        * problem.
+        */
+       block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
+
        ASSERT(block_offset + from == pos);
 
        head = page_buffers(page);
index 20fe3fe9d3417aabcd566ddad7719d3653ad4443..ddcf2267ffa6fdf1bcf33cf7439c6b379472c2b4 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
+#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_alloc.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_attr_remote.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
-#include "xfs_vnodeops.h"
 #include "xfs_trace.h"
 
 /*
@@ -62,7 +63,6 @@ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args);
 STATIC int xfs_attr_leaf_get(xfs_da_args_t *args);
 STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args);
 STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args);
-STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context);
 
 /*
  * Internal routines when attribute list is more than one block.
@@ -70,7 +70,6 @@ STATIC int xfs_attr_leaf_list(xfs_attr_list_context_t *context);
 STATIC int xfs_attr_node_get(xfs_da_args_t *args);
 STATIC int xfs_attr_node_addname(xfs_da_args_t *args);
 STATIC int xfs_attr_node_removename(xfs_da_args_t *args);
-STATIC int xfs_attr_node_list(xfs_attr_list_context_t *context);
 STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
 STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
 
@@ -90,7 +89,7 @@ xfs_attr_name_to_xname(
        return 0;
 }
 
-STATIC int
+int
 xfs_inode_hasattr(
        struct xfs_inode        *ip)
 {
@@ -227,13 +226,14 @@ xfs_attr_set_int(
        int             valuelen,
        int             flags)
 {
-       xfs_da_args_t   args;
-       xfs_fsblock_t   firstblock;
-       xfs_bmap_free_t flist;
-       int             error, err2, committed;
-       xfs_mount_t     *mp = dp->i_mount;
-       int             rsvd = (flags & ATTR_ROOT) != 0;
-       int             local;
+       xfs_da_args_t           args;
+       xfs_fsblock_t           firstblock;
+       xfs_bmap_free_t         flist;
+       int                     error, err2, committed;
+       struct xfs_mount        *mp = dp->i_mount;
+       struct xfs_trans_res    tres;
+       int                     rsvd = (flags & ATTR_ROOT) != 0;
+       int                     local;
 
        /*
         * Attach the dquots to the inode.
@@ -293,11 +293,11 @@ xfs_attr_set_int(
        if (rsvd)
                args.trans->t_flags |= XFS_TRANS_RESERVE;
 
-       error = xfs_trans_reserve(args.trans, args.total,
-                                 XFS_ATTRSETM_LOG_RES(mp) +
-                                 XFS_ATTRSETRT_LOG_RES(mp) * args.total,
-                                 0, XFS_TRANS_PERM_LOG_RES,
-                                 XFS_ATTRSET_LOG_COUNT);
+       tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
+                        M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
+       tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
+       tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+       error = xfs_trans_reserve(args.trans, &tres, args.total, 0);
        if (error) {
                xfs_trans_cancel(args.trans, 0);
                return(error);
@@ -517,11 +517,9 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
        if (flags & ATTR_ROOT)
                args.trans->t_flags |= XFS_TRANS_RESERVE;
 
-       if ((error = xfs_trans_reserve(args.trans,
-                                     XFS_ATTRRM_SPACE_RES(mp),
-                                     XFS_ATTRRM_LOG_RES(mp),
-                                     0, XFS_TRANS_PERM_LOG_RES,
-                                     XFS_ATTRRM_LOG_COUNT))) {
+       error = xfs_trans_reserve(args.trans, &M_RES(mp)->tr_attrrm,
+                                 XFS_ATTRRM_SPACE_RES(mp), 0);
+       if (error) {
                xfs_trans_cancel(args.trans, 0);
                return(error);
        }
@@ -611,228 +609,6 @@ xfs_attr_remove(
        return xfs_attr_remove_int(dp, &xname, flags);
 }
 
-int
-xfs_attr_list_int(xfs_attr_list_context_t *context)
-{
-       int error;
-       xfs_inode_t *dp = context->dp;
-
-       XFS_STATS_INC(xs_attr_list);
-
-       if (XFS_FORCED_SHUTDOWN(dp->i_mount))
-               return EIO;
-
-       xfs_ilock(dp, XFS_ILOCK_SHARED);
-
-       /*
-        * Decide on what work routines to call based on the inode size.
-        */
-       if (!xfs_inode_hasattr(dp)) {
-               error = 0;
-       } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               error = xfs_attr_shortform_list(context);
-       } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
-               error = xfs_attr_leaf_list(context);
-       } else {
-               error = xfs_attr_node_list(context);
-       }
-
-       xfs_iunlock(dp, XFS_ILOCK_SHARED);
-
-       return error;
-}
-
-#define        ATTR_ENTBASESIZE                /* minimum bytes used by an attr */ \
-       (((struct attrlist_ent *) 0)->a_name - (char *) 0)
-#define        ATTR_ENTSIZE(namelen)           /* actual bytes used by an attr */ \
-       ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
-        & ~(sizeof(u_int32_t)-1))
-
-/*
- * Format an attribute and copy it out to the user's buffer.
- * Take care to check values and protect against them changing later,
- * we may be reading them directly out of a user buffer.
- */
-/*ARGSUSED*/
-STATIC int
-xfs_attr_put_listent(
-       xfs_attr_list_context_t *context,
-       int             flags,
-       unsigned char   *name,
-       int             namelen,
-       int             valuelen,
-       unsigned char   *value)
-{
-       struct attrlist *alist = (struct attrlist *)context->alist;
-       attrlist_ent_t *aep;
-       int arraytop;
-
-       ASSERT(!(context->flags & ATTR_KERNOVAL));
-       ASSERT(context->count >= 0);
-       ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
-       ASSERT(context->firstu >= sizeof(*alist));
-       ASSERT(context->firstu <= context->bufsize);
-
-       /*
-        * Only list entries in the right namespace.
-        */
-       if (((context->flags & ATTR_SECURE) == 0) !=
-           ((flags & XFS_ATTR_SECURE) == 0))
-               return 0;
-       if (((context->flags & ATTR_ROOT) == 0) !=
-           ((flags & XFS_ATTR_ROOT) == 0))
-               return 0;
-
-       arraytop = sizeof(*alist) +
-                       context->count * sizeof(alist->al_offset[0]);
-       context->firstu -= ATTR_ENTSIZE(namelen);
-       if (context->firstu < arraytop) {
-               trace_xfs_attr_list_full(context);
-               alist->al_more = 1;
-               context->seen_enough = 1;
-               return 1;
-       }
-
-       aep = (attrlist_ent_t *)&context->alist[context->firstu];
-       aep->a_valuelen = valuelen;
-       memcpy(aep->a_name, name, namelen);
-       aep->a_name[namelen] = 0;
-       alist->al_offset[context->count++] = context->firstu;
-       alist->al_count = context->count;
-       trace_xfs_attr_list_add(context);
-       return 0;
-}
-
-/*
- * Generate a list of extended attribute names and optionally
- * also value lengths.  Positive return value follows the XFS
- * convention of being an error, zero or negative return code
- * is the length of the buffer returned (negated), indicating
- * success.
- */
-int
-xfs_attr_list(
-       xfs_inode_t     *dp,
-       char            *buffer,
-       int             bufsize,
-       int             flags,
-       attrlist_cursor_kern_t *cursor)
-{
-       xfs_attr_list_context_t context;
-       struct attrlist *alist;
-       int error;
-
-       /*
-        * Validate the cursor.
-        */
-       if (cursor->pad1 || cursor->pad2)
-               return(XFS_ERROR(EINVAL));
-       if ((cursor->initted == 0) &&
-           (cursor->hashval || cursor->blkno || cursor->offset))
-               return XFS_ERROR(EINVAL);
-
-       /*
-        * Check for a properly aligned buffer.
-        */
-       if (((long)buffer) & (sizeof(int)-1))
-               return XFS_ERROR(EFAULT);
-       if (flags & ATTR_KERNOVAL)
-               bufsize = 0;
-
-       /*
-        * Initialize the output buffer.
-        */
-       memset(&context, 0, sizeof(context));
-       context.dp = dp;
-       context.cursor = cursor;
-       context.resynch = 1;
-       context.flags = flags;
-       context.alist = buffer;
-       context.bufsize = (bufsize & ~(sizeof(int)-1));  /* align */
-       context.firstu = context.bufsize;
-       context.put_listent = xfs_attr_put_listent;
-
-       alist = (struct attrlist *)context.alist;
-       alist->al_count = 0;
-       alist->al_more = 0;
-       alist->al_offset[0] = context.bufsize;
-
-       error = xfs_attr_list_int(&context);
-       ASSERT(error >= 0);
-       return error;
-}
-
-int                                                            /* error */
-xfs_attr_inactive(xfs_inode_t *dp)
-{
-       xfs_trans_t *trans;
-       xfs_mount_t *mp;
-       int error;
-
-       mp = dp->i_mount;
-       ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
-
-       xfs_ilock(dp, XFS_ILOCK_SHARED);
-       if (!xfs_inode_hasattr(dp) ||
-           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               xfs_iunlock(dp, XFS_ILOCK_SHARED);
-               return 0;
-       }
-       xfs_iunlock(dp, XFS_ILOCK_SHARED);
-
-       /*
-        * Start our first transaction of the day.
-        *
-        * All future transactions during this code must be "chained" off
-        * this one via the trans_dup() call.  All transactions will contain
-        * the inode, and the inode will always be marked with trans_ihold().
-        * Since the inode will be locked in all transactions, we must log
-        * the inode in every transaction to let it float upward through
-        * the log.
-        */
-       trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
-       if ((error = xfs_trans_reserve(trans, 0, XFS_ATTRINVAL_LOG_RES(mp), 0,
-                                     XFS_TRANS_PERM_LOG_RES,
-                                     XFS_ATTRINVAL_LOG_COUNT))) {
-               xfs_trans_cancel(trans, 0);
-               return(error);
-       }
-       xfs_ilock(dp, XFS_ILOCK_EXCL);
-
-       /*
-        * No need to make quota reservations here. We expect to release some
-        * blocks, not allocate, in the common case.
-        */
-       xfs_trans_ijoin(trans, dp, 0);
-
-       /*
-        * Decide on what work routines to call based on the inode size.
-        */
-       if (!xfs_inode_hasattr(dp) ||
-           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               error = 0;
-               goto out;
-       }
-       error = xfs_attr3_root_inactive(&trans, dp);
-       if (error)
-               goto out;
-
-       error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
-       if (error)
-               goto out;
-
-       error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
-       return(error);
-
-out:
-       xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
-       return(error);
-}
-
-
 
 /*========================================================================
  * External routines when attribute list is inside the inode
@@ -1166,28 +942,6 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
        return error;
 }
 
-/*
- * Copy out attribute entries for attr_list(), for leaf attribute lists.
- */
-STATIC int
-xfs_attr_leaf_list(xfs_attr_list_context_t *context)
-{
-       int error;
-       struct xfs_buf *bp;
-
-       trace_xfs_attr_leaf_list(context);
-
-       context->cursor->blkno = 0;
-       error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp);
-       if (error)
-               return XFS_ERROR(error);
-
-       error = xfs_attr3_leaf_list_int(bp, context);
-       xfs_trans_brelse(NULL, bp);
-       return XFS_ERROR(error);
-}
-
-
 /*========================================================================
  * External routines when attribute list size > XFS_LBSIZE(mp).
  *========================================================================*/
@@ -1260,6 +1014,7 @@ restart:
                         * have been a b-tree.
                         */
                        xfs_da_state_free(state);
+                       state = NULL;
                        xfs_bmap_init(args->flist, args->firstblock);
                        error = xfs_attr3_leaf_to_node(args);
                        if (!error) {
@@ -1780,143 +1535,3 @@ xfs_attr_node_get(xfs_da_args_t *args)
        xfs_da_state_free(state);
        return(retval);
 }
-
-STATIC int                                                     /* error */
-xfs_attr_node_list(xfs_attr_list_context_t *context)
-{
-       attrlist_cursor_kern_t *cursor;
-       xfs_attr_leafblock_t *leaf;
-       xfs_da_intnode_t *node;
-       struct xfs_attr3_icleaf_hdr leafhdr;
-       struct xfs_da3_icnode_hdr nodehdr;
-       struct xfs_da_node_entry *btree;
-       int error, i;
-       struct xfs_buf *bp;
-
-       trace_xfs_attr_node_list(context);
-
-       cursor = context->cursor;
-       cursor->initted = 1;
-
-       /*
-        * Do all sorts of validation on the passed-in cursor structure.
-        * If anything is amiss, ignore the cursor and look up the hashval
-        * starting from the btree root.
-        */
-       bp = NULL;
-       if (cursor->blkno > 0) {
-               error = xfs_da3_node_read(NULL, context->dp, cursor->blkno, -1,
-                                             &bp, XFS_ATTR_FORK);
-               if ((error != 0) && (error != EFSCORRUPTED))
-                       return(error);
-               if (bp) {
-                       struct xfs_attr_leaf_entry *entries;
-
-                       node = bp->b_addr;
-                       switch (be16_to_cpu(node->hdr.info.magic)) {
-                       case XFS_DA_NODE_MAGIC:
-                       case XFS_DA3_NODE_MAGIC:
-                               trace_xfs_attr_list_wrong_blk(context);
-                               xfs_trans_brelse(NULL, bp);
-                               bp = NULL;
-                               break;
-                       case XFS_ATTR_LEAF_MAGIC:
-                       case XFS_ATTR3_LEAF_MAGIC:
-                               leaf = bp->b_addr;
-                               xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
-                               entries = xfs_attr3_leaf_entryp(leaf);
-                               if (cursor->hashval > be32_to_cpu(
-                                               entries[leafhdr.count - 1].hashval)) {
-                                       trace_xfs_attr_list_wrong_blk(context);
-                                       xfs_trans_brelse(NULL, bp);
-                                       bp = NULL;
-                               } else if (cursor->hashval <= be32_to_cpu(
-                                               entries[0].hashval)) {
-                                       trace_xfs_attr_list_wrong_blk(context);
-                                       xfs_trans_brelse(NULL, bp);
-                                       bp = NULL;
-                               }
-                               break;
-                       default:
-                               trace_xfs_attr_list_wrong_blk(context);
-                               xfs_trans_brelse(NULL, bp);
-                               bp = NULL;
-                       }
-               }
-       }
-
-       /*
-        * We did not find what we expected given the cursor's contents,
-        * so we start from the top and work down based on the hash value.
-        * Note that start of node block is same as start of leaf block.
-        */
-       if (bp == NULL) {
-               cursor->blkno = 0;
-               for (;;) {
-                       __uint16_t magic;
-
-                       error = xfs_da3_node_read(NULL, context->dp,
-                                                     cursor->blkno, -1, &bp,
-                                                     XFS_ATTR_FORK);
-                       if (error)
-                               return(error);
-                       node = bp->b_addr;
-                       magic = be16_to_cpu(node->hdr.info.magic);
-                       if (magic == XFS_ATTR_LEAF_MAGIC ||
-                           magic == XFS_ATTR3_LEAF_MAGIC)
-                               break;
-                       if (magic != XFS_DA_NODE_MAGIC &&
-                           magic != XFS_DA3_NODE_MAGIC) {
-                               XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
-                                                    XFS_ERRLEVEL_LOW,
-                                                    context->dp->i_mount,
-                                                    node);
-                               xfs_trans_brelse(NULL, bp);
-                               return XFS_ERROR(EFSCORRUPTED);
-                       }
-
-                       xfs_da3_node_hdr_from_disk(&nodehdr, node);
-                       btree = xfs_da3_node_tree_p(node);
-                       for (i = 0; i < nodehdr.count; btree++, i++) {
-                               if (cursor->hashval
-                                               <= be32_to_cpu(btree->hashval)) {
-                                       cursor->blkno = be32_to_cpu(btree->before);
-                                       trace_xfs_attr_list_node_descend(context,
-                                                                        btree);
-                                       break;
-                               }
-                       }
-                       if (i == nodehdr.count) {
-                               xfs_trans_brelse(NULL, bp);
-                               return 0;
-                       }
-                       xfs_trans_brelse(NULL, bp);
-               }
-       }
-       ASSERT(bp != NULL);
-
-       /*
-        * Roll upward through the blocks, processing each leaf block in
-        * order.  As long as there is space in the result buffer, keep
-        * adding the information.
-        */
-       for (;;) {
-               leaf = bp->b_addr;
-               error = xfs_attr3_leaf_list_int(bp, context);
-               if (error) {
-                       xfs_trans_brelse(NULL, bp);
-                       return error;
-               }
-               xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
-               if (context->seen_enough || leafhdr.forw == 0)
-                       break;
-               cursor->blkno = leafhdr.forw;
-               xfs_trans_brelse(NULL, bp);
-               error = xfs_attr3_leaf_read(NULL, context->dp, cursor->blkno, -1,
-                                          &bp);
-               if (error)
-                       return error;
-       }
-       xfs_trans_brelse(NULL, bp);
-       return 0;
-}
index de8dd58da46c28ec078dbd5e6d829ae725e973ab..dd4824589470eb106a2b5a764da6039d56121726 100644 (file)
@@ -141,5 +141,14 @@ typedef struct xfs_attr_list_context {
  */
 int xfs_attr_inactive(struct xfs_inode *dp);
 int xfs_attr_list_int(struct xfs_attr_list_context *);
+int xfs_inode_hasattr(struct xfs_inode *ip);
+int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
+                unsigned char *value, int *valuelenp, int flags);
+int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
+                unsigned char *value, int valuelen, int flags);
+int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
+                 int flags, struct attrlist_cursor_kern *cursor);
+
 
 #endif /* __XFS_ATTR_H__ */
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
new file mode 100644 (file)
index 0000000..bb24b07
--- /dev/null
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_bit.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_remote.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_inode_item.h"
+#include "xfs_bmap.h"
+#include "xfs_attr.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+#include "xfs_trace.h"
+#include "xfs_trans_priv.h"
+
+/*
+ * Look at all the extents for this logical region,
+ * invalidate any buffers that are incore/in transactions.
+ */
+STATIC int
+xfs_attr3_leaf_freextent(
+       struct xfs_trans        **trans,
+       struct xfs_inode        *dp,
+       xfs_dablk_t             blkno,
+       int                     blkcnt)
+{
+       struct xfs_bmbt_irec    map;
+       struct xfs_buf          *bp;
+       xfs_dablk_t             tblkno;
+       xfs_daddr_t             dblkno;
+       int                     tblkcnt;
+       int                     dblkcnt;
+       int                     nmap;
+       int                     error;
+
+       /*
+        * Roll through the "value", invalidating the attribute value's
+        * blocks.
+        */
+       tblkno = blkno;
+       tblkcnt = blkcnt;
+       while (tblkcnt > 0) {
+               /*
+                * Try to remember where we decided to put the value.
+                */
+               nmap = 1;
+               error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
+                                      &map, &nmap, XFS_BMAPI_ATTRFORK);
+               if (error) {
+                       return(error);
+               }
+               ASSERT(nmap == 1);
+               ASSERT(map.br_startblock != DELAYSTARTBLOCK);
+
+               /*
+                * If it's a hole, these are already unmapped
+                * so there's nothing to invalidate.
+                */
+               if (map.br_startblock != HOLESTARTBLOCK) {
+
+                       dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
+                                                 map.br_startblock);
+                       dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
+                                               map.br_blockcount);
+                       bp = xfs_trans_get_buf(*trans,
+                                       dp->i_mount->m_ddev_targp,
+                                       dblkno, dblkcnt, 0);
+                       if (!bp)
+                               return ENOMEM;
+                       xfs_trans_binval(*trans, bp);
+                       /*
+                        * Roll to next transaction.
+                        */
+                       error = xfs_trans_roll(trans, dp);
+                       if (error)
+                               return (error);
+               }
+
+               tblkno += map.br_blockcount;
+               tblkcnt -= map.br_blockcount;
+       }
+
+       return(0);
+}
+
+/*
+ * Invalidate all of the "remote" value regions pointed to by a particular
+ * leaf block.
+ * Note that we must release the lock on the buffer so that we are not
+ * caught holding something that the logging code wants to flush to disk.
+ */
+STATIC int
+xfs_attr3_leaf_inactive(
+       struct xfs_trans        **trans,
+       struct xfs_inode        *dp,
+       struct xfs_buf          *bp)
+{
+       struct xfs_attr_leafblock *leaf;
+       struct xfs_attr3_icleaf_hdr ichdr;
+       struct xfs_attr_leaf_entry *entry;
+       struct xfs_attr_leaf_name_remote *name_rmt;
+       struct xfs_attr_inactive_list *list;
+       struct xfs_attr_inactive_list *lp;
+       int                     error;
+       int                     count;
+       int                     size;
+       int                     tmp;
+       int                     i;
+
+       leaf = bp->b_addr;
+       xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+
+       /*
+        * Count the number of "remote" value extents.
+        */
+       count = 0;
+       entry = xfs_attr3_leaf_entryp(leaf);
+       for (i = 0; i < ichdr.count; entry++, i++) {
+               if (be16_to_cpu(entry->nameidx) &&
+                   ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
+                       name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+                       if (name_rmt->valueblk)
+                               count++;
+               }
+       }
+
+       /*
+        * If there are no "remote" values, we're done.
+        */
+       if (count == 0) {
+               xfs_trans_brelse(*trans, bp);
+               return 0;
+       }
+
+       /*
+        * Allocate storage for a list of all the "remote" value extents.
+        */
+       size = count * sizeof(xfs_attr_inactive_list_t);
+       list = kmem_alloc(size, KM_SLEEP);
+
+       /*
+        * Identify each of the "remote" value extents.
+        */
+       lp = list;
+       entry = xfs_attr3_leaf_entryp(leaf);
+       for (i = 0; i < ichdr.count; entry++, i++) {
+               if (be16_to_cpu(entry->nameidx) &&
+                   ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
+                       name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+                       if (name_rmt->valueblk) {
+                               lp->valueblk = be32_to_cpu(name_rmt->valueblk);
+                               lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
+                                                   be32_to_cpu(name_rmt->valuelen));
+                               lp++;
+                       }
+               }
+       }
+       xfs_trans_brelse(*trans, bp);   /* unlock for trans. in freextent() */
+
+       /*
+        * Invalidate each of the "remote" value extents.
+        */
+       error = 0;
+       for (lp = list, i = 0; i < count; i++, lp++) {
+               tmp = xfs_attr3_leaf_freextent(trans, dp,
+                               lp->valueblk, lp->valuelen);
+
+               if (error == 0)
+                       error = tmp;    /* save only the 1st errno */
+       }
+
+       kmem_free(list);
+       return error;
+}
+
+/*
+ * Recurse (gasp!) through the attribute nodes until we find leaves.
+ * We're doing a depth-first traversal in order to invalidate everything.
+ */
+STATIC int
+xfs_attr3_node_inactive(
+       struct xfs_trans **trans,
+       struct xfs_inode *dp,
+       struct xfs_buf  *bp,
+       int             level)
+{
+       xfs_da_blkinfo_t *info;
+       xfs_da_intnode_t *node;
+       xfs_dablk_t child_fsb;
+       xfs_daddr_t parent_blkno, child_blkno;
+       int error, i;
+       struct xfs_buf *child_bp;
+       struct xfs_da_node_entry *btree;
+       struct xfs_da3_icnode_hdr ichdr;
+
+       /*
+        * Since this code is recursive (gasp!) we must protect ourselves.
+        */
+       if (level > XFS_DA_NODE_MAXDEPTH) {
+               xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
+               return XFS_ERROR(EIO);
+       }
+
+       node = bp->b_addr;
+       xfs_da3_node_hdr_from_disk(&ichdr, node);
+       parent_blkno = bp->b_bn;
+       if (!ichdr.count) {
+               xfs_trans_brelse(*trans, bp);
+               return 0;
+       }
+       btree = xfs_da3_node_tree_p(node);
+       child_fsb = be32_to_cpu(btree[0].before);
+       xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
+
+       /*
+        * If this is the node level just above the leaves, simply loop
+        * over the leaves removing all of them.  If this is higher up
+        * in the tree, recurse downward.
+        */
+       for (i = 0; i < ichdr.count; i++) {
+               /*
+                * Read the subsidiary block to see what we have to work with.
+                * Don't do this in a transaction.  This is a depth-first
+                * traversal of the tree so we may deal with many blocks
+                * before we come back to this one.
+                */
+               error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp,
+                                               XFS_ATTR_FORK);
+               if (error)
+                       return(error);
+               if (child_bp) {
+                                               /* save for re-read later */
+                       child_blkno = XFS_BUF_ADDR(child_bp);
+
+                       /*
+                        * Invalidate the subtree, however we have to.
+                        */
+                       info = child_bp->b_addr;
+                       switch (info->magic) {
+                       case cpu_to_be16(XFS_DA_NODE_MAGIC):
+                       case cpu_to_be16(XFS_DA3_NODE_MAGIC):
+                               error = xfs_attr3_node_inactive(trans, dp,
+                                                       child_bp, level + 1);
+                               break;
+                       case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
+                       case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
+                               error = xfs_attr3_leaf_inactive(trans, dp,
+                                                       child_bp);
+                               break;
+                       default:
+                               error = XFS_ERROR(EIO);
+                               xfs_trans_brelse(*trans, child_bp);
+                               break;
+                       }
+                       if (error)
+                               return error;
+
+                       /*
+                        * Remove the subsidiary block from the cache
+                        * and from the log.
+                        */
+                       error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
+                               &child_bp, XFS_ATTR_FORK);
+                       if (error)
+                               return error;
+                       xfs_trans_binval(*trans, child_bp);
+               }
+
+               /*
+                * If we're not done, re-read the parent to get the next
+                * child block number.
+                */
+               if (i + 1 < ichdr.count) {
+                       error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
+                                                &bp, XFS_ATTR_FORK);
+                       if (error)
+                               return error;
+                       child_fsb = be32_to_cpu(btree[i + 1].before);
+                       xfs_trans_brelse(*trans, bp);
+               }
+               /*
+                * Atomically commit the whole invalidate stuff.
+                */
+               error = xfs_trans_roll(trans, dp);
+               if (error)
+                       return  error;
+       }
+
+       return 0;
+}
+
+/*
+ * Indiscriminately delete the entire attribute fork
+ *
+ * Recurse (gasp!) through the attribute nodes until we find leaves.
+ * We're doing a depth-first traversal in order to invalidate everything.
+ */
+int
+xfs_attr3_root_inactive(
+       struct xfs_trans        **trans,
+       struct xfs_inode        *dp)
+{
+       struct xfs_da_blkinfo   *info;
+       struct xfs_buf          *bp;
+       xfs_daddr_t             blkno;
+       int                     error;
+
+       /*
+        * Read block 0 to see what we have to work with.
+        * We only get here if we have extents, since we remove
+        * the extents in reverse order the extent containing
+        * block 0 must still be there.
+        */
+       error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
+       if (error)
+               return error;
+       blkno = bp->b_bn;
+
+       /*
+        * Invalidate the tree, even if the "tree" is only a single leaf block.
+        * This is a depth-first traversal!
+        */
+       info = bp->b_addr;
+       switch (info->magic) {
+       case cpu_to_be16(XFS_DA_NODE_MAGIC):
+       case cpu_to_be16(XFS_DA3_NODE_MAGIC):
+               error = xfs_attr3_node_inactive(trans, dp, bp, 1);
+               break;
+       case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
+       case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
+               error = xfs_attr3_leaf_inactive(trans, dp, bp);
+               break;
+       default:
+               error = XFS_ERROR(EIO);
+               xfs_trans_brelse(*trans, bp);
+               break;
+       }
+       if (error)
+               return error;
+
+       /*
+        * Invalidate the incore copy of the root block.
+        */
+       error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
+       if (error)
+               return error;
+       xfs_trans_binval(*trans, bp);   /* remove from cache */
+       /*
+        * Commit the invalidate and start the next transaction.
+        */
+       error = xfs_trans_roll(trans, dp);
+
+       return error;
+}
+
+int
+xfs_attr_inactive(xfs_inode_t *dp)
+{
+       xfs_trans_t *trans;
+       xfs_mount_t *mp;
+       int error;
+
+       mp = dp->i_mount;
+       ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
+
+       xfs_ilock(dp, XFS_ILOCK_SHARED);
+       if (!xfs_inode_hasattr(dp) ||
+           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+               xfs_iunlock(dp, XFS_ILOCK_SHARED);
+               return 0;
+       }
+       xfs_iunlock(dp, XFS_ILOCK_SHARED);
+
+       /*
+        * Start our first transaction of the day.
+        *
+        * All future transactions during this code must be "chained" off
+        * this one via the trans_dup() call.  All transactions will contain
+        * the inode, and the inode will always be marked with trans_ihold().
+        * Since the inode will be locked in all transactions, we must log
+        * the inode in every transaction to let it float upward through
+        * the log.
+        */
+       trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
+       error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
+       if (error) {
+               xfs_trans_cancel(trans, 0);
+               return(error);
+       }
+       xfs_ilock(dp, XFS_ILOCK_EXCL);
+
+       /*
+        * No need to make quota reservations here. We expect to release some
+        * blocks, not allocate, in the common case.
+        */
+       xfs_trans_ijoin(trans, dp, 0);
+
+       /*
+        * Decide on what work routines to call based on the inode size.
+        */
+       if (!xfs_inode_hasattr(dp) ||
+           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+               error = 0;
+               goto out;
+       }
+       error = xfs_attr3_root_inactive(&trans, dp);
+       if (error)
+               goto out;
+
+       error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
+       if (error)
+               goto out;
+
+       error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
+       xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
+       return(error);
+
+out:
+       xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+       xfs_iunlock(dp, XFS_ILOCK_EXCL);
+       return(error);
+}
index b800fbcafc7f639f05a83fc97fc5e964f77422b0..4dba2f9455f40dcca8870e660e1eaeb3ddb6c024 100644 (file)
@@ -22,6 +22,7 @@
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
+#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
@@ -77,16 +78,6 @@ STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state,
                        int *number_entries_in_blk1,
                        int *number_usedbytes_in_blk1);
 
-/*
- * Routines used for shrinking the Btree.
- */
-STATIC int xfs_attr3_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
-                                 struct xfs_buf *bp, int level);
-STATIC int xfs_attr3_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
-                                 struct xfs_buf *bp);
-STATIC int xfs_attr3_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
-                                  xfs_dablk_t blkno, int blkcnt);
-
 /*
  * Utility routines.
  */
@@ -751,182 +742,6 @@ out:
        return(error);
 }
 
-STATIC int
-xfs_attr_shortform_compare(const void *a, const void *b)
-{
-       xfs_attr_sf_sort_t *sa, *sb;
-
-       sa = (xfs_attr_sf_sort_t *)a;
-       sb = (xfs_attr_sf_sort_t *)b;
-       if (sa->hash < sb->hash) {
-               return(-1);
-       } else if (sa->hash > sb->hash) {
-               return(1);
-       } else {
-               return(sa->entno - sb->entno);
-       }
-}
-
-
-#define XFS_ISRESET_CURSOR(cursor) \
-       (!((cursor)->initted) && !((cursor)->hashval) && \
-        !((cursor)->blkno) && !((cursor)->offset))
-/*
- * Copy out entries of shortform attribute lists for attr_list().
- * Shortform attribute lists are not stored in hashval sorted order.
- * If the output buffer is not large enough to hold them all, then we
- * we have to calculate each entries' hashvalue and sort them before
- * we can begin returning them to the user.
- */
-/*ARGSUSED*/
-int
-xfs_attr_shortform_list(xfs_attr_list_context_t *context)
-{
-       attrlist_cursor_kern_t *cursor;
-       xfs_attr_sf_sort_t *sbuf, *sbp;
-       xfs_attr_shortform_t *sf;
-       xfs_attr_sf_entry_t *sfe;
-       xfs_inode_t *dp;
-       int sbsize, nsbuf, count, i;
-       int error;
-
-       ASSERT(context != NULL);
-       dp = context->dp;
-       ASSERT(dp != NULL);
-       ASSERT(dp->i_afp != NULL);
-       sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
-       ASSERT(sf != NULL);
-       if (!sf->hdr.count)
-               return(0);
-       cursor = context->cursor;
-       ASSERT(cursor != NULL);
-
-       trace_xfs_attr_list_sf(context);
-
-       /*
-        * If the buffer is large enough and the cursor is at the start,
-        * do not bother with sorting since we will return everything in
-        * one buffer and another call using the cursor won't need to be
-        * made.
-        * Note the generous fudge factor of 16 overhead bytes per entry.
-        * If bufsize is zero then put_listent must be a search function
-        * and can just scan through what we have.
-        */
-       if (context->bufsize == 0 ||
-           (XFS_ISRESET_CURSOR(cursor) &&
-             (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
-               for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
-                       error = context->put_listent(context,
-                                          sfe->flags,
-                                          sfe->nameval,
-                                          (int)sfe->namelen,
-                                          (int)sfe->valuelen,
-                                          &sfe->nameval[sfe->namelen]);
-
-                       /*
-                        * Either search callback finished early or
-                        * didn't fit it all in the buffer after all.
-                        */
-                       if (context->seen_enough)
-                               break;
-
-                       if (error)
-                               return error;
-                       sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
-               }
-               trace_xfs_attr_list_sf_all(context);
-               return(0);
-       }
-
-       /* do no more for a search callback */
-       if (context->bufsize == 0)
-               return 0;
-
-       /*
-        * It didn't all fit, so we have to sort everything on hashval.
-        */
-       sbsize = sf->hdr.count * sizeof(*sbuf);
-       sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
-
-       /*
-        * Scan the attribute list for the rest of the entries, storing
-        * the relevant info from only those that match into a buffer.
-        */
-       nsbuf = 0;
-       for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
-               if (unlikely(
-                   ((char *)sfe < (char *)sf) ||
-                   ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
-                       XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
-                                            XFS_ERRLEVEL_LOW,
-                                            context->dp->i_mount, sfe);
-                       kmem_free(sbuf);
-                       return XFS_ERROR(EFSCORRUPTED);
-               }
-
-               sbp->entno = i;
-               sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
-               sbp->name = sfe->nameval;
-               sbp->namelen = sfe->namelen;
-               /* These are bytes, and both on-disk, don't endian-flip */
-               sbp->valuelen = sfe->valuelen;
-               sbp->flags = sfe->flags;
-               sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
-               sbp++;
-               nsbuf++;
-       }
-
-       /*
-        * Sort the entries on hash then entno.
-        */
-       xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
-
-       /*
-        * Re-find our place IN THE SORTED LIST.
-        */
-       count = 0;
-       cursor->initted = 1;
-       cursor->blkno = 0;
-       for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
-               if (sbp->hash == cursor->hashval) {
-                       if (cursor->offset == count) {
-                               break;
-                       }
-                       count++;
-               } else if (sbp->hash > cursor->hashval) {
-                       break;
-               }
-       }
-       if (i == nsbuf) {
-               kmem_free(sbuf);
-               return(0);
-       }
-
-       /*
-        * Loop putting entries into the user buffer.
-        */
-       for ( ; i < nsbuf; i++, sbp++) {
-               if (cursor->hashval != sbp->hash) {
-                       cursor->hashval = sbp->hash;
-                       cursor->offset = 0;
-               }
-               error = context->put_listent(context,
-                                       sbp->flags,
-                                       sbp->name,
-                                       sbp->namelen,
-                                       sbp->valuelen,
-                                       &sbp->name[sbp->namelen]);
-               if (error)
-                       return error;
-               if (context->seen_enough)
-                       break;
-               cursor->offset++;
-       }
-
-       kmem_free(sbuf);
-       return(0);
-}
-
 /*
  * Check a leaf attribute block to see if all the entries would fit into
  * a shortform attribute list.
@@ -1121,7 +936,6 @@ out:
        return error;
 }
 
-
 /*========================================================================
  * Routines used for growing the Btree.
  *========================================================================*/
@@ -1482,7 +1296,6 @@ xfs_attr3_leaf_compact(
        ichdr_dst->freemap[0].size = ichdr_dst->firstused -
                                                ichdr_dst->freemap[0].base;
 
-
        /* write the header back to initialise the underlying buffer */
        xfs_attr3_leaf_hdr_to_disk(leaf_dst, ichdr_dst);
 
@@ -2643,130 +2456,6 @@ xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local)
        return size;
 }
 
-/*
- * Copy out attribute list entries for attr_list(), for leaf attribute lists.
- */
-int
-xfs_attr3_leaf_list_int(
-       struct xfs_buf                  *bp,
-       struct xfs_attr_list_context    *context)
-{
-       struct attrlist_cursor_kern     *cursor;
-       struct xfs_attr_leafblock       *leaf;
-       struct xfs_attr3_icleaf_hdr     ichdr;
-       struct xfs_attr_leaf_entry      *entries;
-       struct xfs_attr_leaf_entry      *entry;
-       int                             retval;
-       int                             i;
-
-       trace_xfs_attr_list_leaf(context);
-
-       leaf = bp->b_addr;
-       xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
-       entries = xfs_attr3_leaf_entryp(leaf);
-
-       cursor = context->cursor;
-       cursor->initted = 1;
-
-       /*
-        * Re-find our place in the leaf block if this is a new syscall.
-        */
-       if (context->resynch) {
-               entry = &entries[0];
-               for (i = 0; i < ichdr.count; entry++, i++) {
-                       if (be32_to_cpu(entry->hashval) == cursor->hashval) {
-                               if (cursor->offset == context->dupcnt) {
-                                       context->dupcnt = 0;
-                                       break;
-                               }
-                               context->dupcnt++;
-                       } else if (be32_to_cpu(entry->hashval) >
-                                       cursor->hashval) {
-                               context->dupcnt = 0;
-                               break;
-                       }
-               }
-               if (i == ichdr.count) {
-                       trace_xfs_attr_list_notfound(context);
-                       return 0;
-               }
-       } else {
-               entry = &entries[0];
-               i = 0;
-       }
-       context->resynch = 0;
-
-       /*
-        * We have found our place, start copying out the new attributes.
-        */
-       retval = 0;
-       for (; i < ichdr.count; entry++, i++) {
-               if (be32_to_cpu(entry->hashval) != cursor->hashval) {
-                       cursor->hashval = be32_to_cpu(entry->hashval);
-                       cursor->offset = 0;
-               }
-
-               if (entry->flags & XFS_ATTR_INCOMPLETE)
-                       continue;               /* skip incomplete entries */
-
-               if (entry->flags & XFS_ATTR_LOCAL) {
-                       xfs_attr_leaf_name_local_t *name_loc =
-                               xfs_attr3_leaf_name_local(leaf, i);
-
-                       retval = context->put_listent(context,
-                                               entry->flags,
-                                               name_loc->nameval,
-                                               (int)name_loc->namelen,
-                                               be16_to_cpu(name_loc->valuelen),
-                                               &name_loc->nameval[name_loc->namelen]);
-                       if (retval)
-                               return retval;
-               } else {
-                       xfs_attr_leaf_name_remote_t *name_rmt =
-                               xfs_attr3_leaf_name_remote(leaf, i);
-
-                       int valuelen = be32_to_cpu(name_rmt->valuelen);
-
-                       if (context->put_value) {
-                               xfs_da_args_t args;
-
-                               memset((char *)&args, 0, sizeof(args));
-                               args.dp = context->dp;
-                               args.whichfork = XFS_ATTR_FORK;
-                               args.valuelen = valuelen;
-                               args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
-                               args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
-                               args.rmtblkcnt = xfs_attr3_rmt_blocks(
-                                                       args.dp->i_mount, valuelen);
-                               retval = xfs_attr_rmtval_get(&args);
-                               if (retval)
-                                       return retval;
-                               retval = context->put_listent(context,
-                                               entry->flags,
-                                               name_rmt->name,
-                                               (int)name_rmt->namelen,
-                                               valuelen,
-                                               args.value);
-                               kmem_free(args.value);
-                       } else {
-                               retval = context->put_listent(context,
-                                               entry->flags,
-                                               name_rmt->name,
-                                               (int)name_rmt->namelen,
-                                               valuelen,
-                                               NULL);
-                       }
-                       if (retval)
-                               return retval;
-               }
-               if (context->seen_enough)
-                       break;
-               cursor->offset++;
-       }
-       trace_xfs_attr_list_leaf_end(context);
-       return retval;
-}
-
 
 /*========================================================================
  * Manage the INCOMPLETE flag in a leaf entry
@@ -3011,345 +2700,3 @@ xfs_attr3_leaf_flipflags(
 
        return error;
 }
-
-/*========================================================================
- * Indiscriminately delete the entire attribute fork
- *========================================================================*/
-
-/*
- * Recurse (gasp!) through the attribute nodes until we find leaves.
- * We're doing a depth-first traversal in order to invalidate everything.
- */
-int
-xfs_attr3_root_inactive(
-       struct xfs_trans        **trans,
-       struct xfs_inode        *dp)
-{
-       struct xfs_da_blkinfo   *info;
-       struct xfs_buf          *bp;
-       xfs_daddr_t             blkno;
-       int                     error;
-
-       /*
-        * Read block 0 to see what we have to work with.
-        * We only get here if we have extents, since we remove
-        * the extents in reverse order the extent containing
-        * block 0 must still be there.
-        */
-       error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
-       if (error)
-               return error;
-       blkno = bp->b_bn;
-
-       /*
-        * Invalidate the tree, even if the "tree" is only a single leaf block.
-        * This is a depth-first traversal!
-        */
-       info = bp->b_addr;
-       switch (info->magic) {
-       case cpu_to_be16(XFS_DA_NODE_MAGIC):
-       case cpu_to_be16(XFS_DA3_NODE_MAGIC):
-               error = xfs_attr3_node_inactive(trans, dp, bp, 1);
-               break;
-       case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
-       case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
-               error = xfs_attr3_leaf_inactive(trans, dp, bp);
-               break;
-       default:
-               error = XFS_ERROR(EIO);
-               xfs_trans_brelse(*trans, bp);
-               break;
-       }
-       if (error)
-               return error;
-
-       /*
-        * Invalidate the incore copy of the root block.
-        */
-       error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
-       if (error)
-               return error;
-       xfs_trans_binval(*trans, bp);   /* remove from cache */
-       /*
-        * Commit the invalidate and start the next transaction.
-        */
-       error = xfs_trans_roll(trans, dp);
-
-       return error;
-}
-
-/*
- * Recurse (gasp!) through the attribute nodes until we find leaves.
- * We're doing a depth-first traversal in order to invalidate everything.
- */
-STATIC int
-xfs_attr3_node_inactive(
-       struct xfs_trans **trans,
-       struct xfs_inode *dp,
-       struct xfs_buf  *bp,
-       int             level)
-{
-       xfs_da_blkinfo_t *info;
-       xfs_da_intnode_t *node;
-       xfs_dablk_t child_fsb;
-       xfs_daddr_t parent_blkno, child_blkno;
-       int error, i;
-       struct xfs_buf *child_bp;
-       struct xfs_da_node_entry *btree;
-       struct xfs_da3_icnode_hdr ichdr;
-
-       /*
-        * Since this code is recursive (gasp!) we must protect ourselves.
-        */
-       if (level > XFS_DA_NODE_MAXDEPTH) {
-               xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
-               return XFS_ERROR(EIO);
-       }
-
-       node = bp->b_addr;
-       xfs_da3_node_hdr_from_disk(&ichdr, node);
-       parent_blkno = bp->b_bn;
-       if (!ichdr.count) {
-               xfs_trans_brelse(*trans, bp);
-               return 0;
-       }
-       btree = xfs_da3_node_tree_p(node);
-       child_fsb = be32_to_cpu(btree[0].before);
-       xfs_trans_brelse(*trans, bp);   /* no locks for later trans */
-
-       /*
-        * If this is the node level just above the leaves, simply loop
-        * over the leaves removing all of them.  If this is higher up
-        * in the tree, recurse downward.
-        */
-       for (i = 0; i < ichdr.count; i++) {
-               /*
-                * Read the subsidiary block to see what we have to work with.
-                * Don't do this in a transaction.  This is a depth-first
-                * traversal of the tree so we may deal with many blocks
-                * before we come back to this one.
-                */
-               error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp,
-                                               XFS_ATTR_FORK);
-               if (error)
-                       return(error);
-               if (child_bp) {
-                                               /* save for re-read later */
-                       child_blkno = XFS_BUF_ADDR(child_bp);
-
-                       /*
-                        * Invalidate the subtree, however we have to.
-                        */
-                       info = child_bp->b_addr;
-                       switch (info->magic) {
-                       case cpu_to_be16(XFS_DA_NODE_MAGIC):
-                       case cpu_to_be16(XFS_DA3_NODE_MAGIC):
-                               error = xfs_attr3_node_inactive(trans, dp,
-                                                       child_bp, level + 1);
-                               break;
-                       case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
-                       case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
-                               error = xfs_attr3_leaf_inactive(trans, dp,
-                                                       child_bp);
-                               break;
-                       default:
-                               error = XFS_ERROR(EIO);
-                               xfs_trans_brelse(*trans, child_bp);
-                               break;
-                       }
-                       if (error)
-                               return error;
-
-                       /*
-                        * Remove the subsidiary block from the cache
-                        * and from the log.
-                        */
-                       error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
-                               &child_bp, XFS_ATTR_FORK);
-                       if (error)
-                               return error;
-                       xfs_trans_binval(*trans, child_bp);
-               }
-
-               /*
-                * If we're not done, re-read the parent to get the next
-                * child block number.
-                */
-               if (i + 1 < ichdr.count) {
-                       error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
-                                                &bp, XFS_ATTR_FORK);
-                       if (error)
-                               return error;
-                       child_fsb = be32_to_cpu(btree[i + 1].before);
-                       xfs_trans_brelse(*trans, bp);
-               }
-               /*
-                * Atomically commit the whole invalidate stuff.
-                */
-               error = xfs_trans_roll(trans, dp);
-               if (error)
-                       return  error;
-       }
-
-       return 0;
-}
-
-/*
- * Invalidate all of the "remote" value regions pointed to by a particular
- * leaf block.
- * Note that we must release the lock on the buffer so that we are not
- * caught holding something that the logging code wants to flush to disk.
- */
-STATIC int
-xfs_attr3_leaf_inactive(
-       struct xfs_trans        **trans,
-       struct xfs_inode        *dp,
-       struct xfs_buf          *bp)
-{
-       struct xfs_attr_leafblock *leaf;
-       struct xfs_attr3_icleaf_hdr ichdr;
-       struct xfs_attr_leaf_entry *entry;
-       struct xfs_attr_leaf_name_remote *name_rmt;
-       struct xfs_attr_inactive_list *list;
-       struct xfs_attr_inactive_list *lp;
-       int                     error;
-       int                     count;
-       int                     size;
-       int                     tmp;
-       int                     i;
-
-       leaf = bp->b_addr;
-       xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
-
-       /*
-        * Count the number of "remote" value extents.
-        */
-       count = 0;
-       entry = xfs_attr3_leaf_entryp(leaf);
-       for (i = 0; i < ichdr.count; entry++, i++) {
-               if (be16_to_cpu(entry->nameidx) &&
-                   ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
-                       name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
-                       if (name_rmt->valueblk)
-                               count++;
-               }
-       }
-
-       /*
-        * If there are no "remote" values, we're done.
-        */
-       if (count == 0) {
-               xfs_trans_brelse(*trans, bp);
-               return 0;
-       }
-
-       /*
-        * Allocate storage for a list of all the "remote" value extents.
-        */
-       size = count * sizeof(xfs_attr_inactive_list_t);
-       list = kmem_alloc(size, KM_SLEEP);
-
-       /*
-        * Identify each of the "remote" value extents.
-        */
-       lp = list;
-       entry = xfs_attr3_leaf_entryp(leaf);
-       for (i = 0; i < ichdr.count; entry++, i++) {
-               if (be16_to_cpu(entry->nameidx) &&
-                   ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
-                       name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
-                       if (name_rmt->valueblk) {
-                               lp->valueblk = be32_to_cpu(name_rmt->valueblk);
-                               lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
-                                                   be32_to_cpu(name_rmt->valuelen));
-                               lp++;
-                       }
-               }
-       }
-       xfs_trans_brelse(*trans, bp);   /* unlock for trans. in freextent() */
-
-       /*
-        * Invalidate each of the "remote" value extents.
-        */
-       error = 0;
-       for (lp = list, i = 0; i < count; i++, lp++) {
-               tmp = xfs_attr3_leaf_freextent(trans, dp,
-                               lp->valueblk, lp->valuelen);
-
-               if (error == 0)
-                       error = tmp;    /* save only the 1st errno */
-       }
-
-       kmem_free(list);
-       return error;
-}
-
-/*
- * Look at all the extents for this logical region,
- * invalidate any buffers that are incore/in transactions.
- */
-STATIC int
-xfs_attr3_leaf_freextent(
-       struct xfs_trans        **trans,
-       struct xfs_inode        *dp,
-       xfs_dablk_t             blkno,
-       int                     blkcnt)
-{
-       struct xfs_bmbt_irec    map;
-       struct xfs_buf          *bp;
-       xfs_dablk_t             tblkno;
-       xfs_daddr_t             dblkno;
-       int                     tblkcnt;
-       int                     dblkcnt;
-       int                     nmap;
-       int                     error;
-
-       /*
-        * Roll through the "value", invalidating the attribute value's
-        * blocks.
-        */
-       tblkno = blkno;
-       tblkcnt = blkcnt;
-       while (tblkcnt > 0) {
-               /*
-                * Try to remember where we decided to put the value.
-                */
-               nmap = 1;
-               error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
-                                      &map, &nmap, XFS_BMAPI_ATTRFORK);
-               if (error) {
-                       return(error);
-               }
-               ASSERT(nmap == 1);
-               ASSERT(map.br_startblock != DELAYSTARTBLOCK);
-
-               /*
-                * If it's a hole, these are already unmapped
-                * so there's nothing to invalidate.
-                */
-               if (map.br_startblock != HOLESTARTBLOCK) {
-
-                       dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
-                                                 map.br_startblock);
-                       dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
-                                               map.br_blockcount);
-                       bp = xfs_trans_get_buf(*trans,
-                                       dp->i_mount->m_ddev_targp,
-                                       dblkno, dblkcnt, 0);
-                       if (!bp)
-                               return ENOMEM;
-                       xfs_trans_binval(*trans, bp);
-                       /*
-                        * Roll to next transaction.
-                        */
-                       error = xfs_trans_roll(trans, dp);
-                       if (error)
-                               return (error);
-               }
-
-               tblkno += map.br_blockcount;
-               tblkcnt -= map.br_blockcount;
-       }
-
-       return(0);
-}
index 444a7704596c409f43f0ec495c9e6cc9838460c4..c1022138c7e6f3261819a0c52618b6bb1a9cf34f 100644 (file)
@@ -333,6 +333,8 @@ int xfs_attr3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
                        struct xfs_buf **bpp);
 void   xfs_attr3_leaf_hdr_from_disk(struct xfs_attr3_icleaf_hdr *to,
                                     struct xfs_attr_leafblock *from);
+void   xfs_attr3_leaf_hdr_to_disk(struct xfs_attr_leafblock *to,
+                                  struct xfs_attr3_icleaf_hdr *from);
 
 extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
 
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
new file mode 100644 (file)
index 0000000..cbc80d4
--- /dev/null
@@ -0,0 +1,655 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_types.h"
+#include "xfs_bit.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_inode_item.h"
+#include "xfs_bmap.h"
+#include "xfs_attr.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_buf_item.h"
+#include "xfs_cksum.h"
+
+STATIC int
+xfs_attr_shortform_compare(const void *a, const void *b)
+{
+       xfs_attr_sf_sort_t *sa, *sb;
+
+       sa = (xfs_attr_sf_sort_t *)a;
+       sb = (xfs_attr_sf_sort_t *)b;
+       if (sa->hash < sb->hash) {
+               return(-1);
+       } else if (sa->hash > sb->hash) {
+               return(1);
+       } else {
+               return(sa->entno - sb->entno);
+       }
+}
+
+#define XFS_ISRESET_CURSOR(cursor) \
+       (!((cursor)->initted) && !((cursor)->hashval) && \
+        !((cursor)->blkno) && !((cursor)->offset))
+/*
+ * Copy out entries of shortform attribute lists for attr_list().
+ * Shortform attribute lists are not stored in hashval sorted order.
+ * If the output buffer is not large enough to hold them all, then we
+ * we have to calculate each entries' hashvalue and sort them before
+ * we can begin returning them to the user.
+ */
+int
+xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+{
+       attrlist_cursor_kern_t *cursor;
+       xfs_attr_sf_sort_t *sbuf, *sbp;
+       xfs_attr_shortform_t *sf;
+       xfs_attr_sf_entry_t *sfe;
+       xfs_inode_t *dp;
+       int sbsize, nsbuf, count, i;
+       int error;
+
+       ASSERT(context != NULL);
+       dp = context->dp;
+       ASSERT(dp != NULL);
+       ASSERT(dp->i_afp != NULL);
+       sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
+       ASSERT(sf != NULL);
+       if (!sf->hdr.count)
+               return(0);
+       cursor = context->cursor;
+       ASSERT(cursor != NULL);
+
+       trace_xfs_attr_list_sf(context);
+
+       /*
+        * If the buffer is large enough and the cursor is at the start,
+        * do not bother with sorting since we will return everything in
+        * one buffer and another call using the cursor won't need to be
+        * made.
+        * Note the generous fudge factor of 16 overhead bytes per entry.
+        * If bufsize is zero then put_listent must be a search function
+        * and can just scan through what we have.
+        */
+       if (context->bufsize == 0 ||
+           (XFS_ISRESET_CURSOR(cursor) &&
+             (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
+               for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
+                       error = context->put_listent(context,
+                                          sfe->flags,
+                                          sfe->nameval,
+                                          (int)sfe->namelen,
+                                          (int)sfe->valuelen,
+                                          &sfe->nameval[sfe->namelen]);
+
+                       /*
+                        * Either search callback finished early or
+                        * didn't fit it all in the buffer after all.
+                        */
+                       if (context->seen_enough)
+                               break;
+
+                       if (error)
+                               return error;
+                       sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+               }
+               trace_xfs_attr_list_sf_all(context);
+               return(0);
+       }
+
+       /* do no more for a search callback */
+       if (context->bufsize == 0)
+               return 0;
+
+       /*
+        * It didn't all fit, so we have to sort everything on hashval.
+        */
+       sbsize = sf->hdr.count * sizeof(*sbuf);
+       sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
+
+       /*
+        * Scan the attribute list for the rest of the entries, storing
+        * the relevant info from only those that match into a buffer.
+        */
+       nsbuf = 0;
+       for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
+               if (unlikely(
+                   ((char *)sfe < (char *)sf) ||
+                   ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
+                       XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
+                                            XFS_ERRLEVEL_LOW,
+                                            context->dp->i_mount, sfe);
+                       kmem_free(sbuf);
+                       return XFS_ERROR(EFSCORRUPTED);
+               }
+
+               sbp->entno = i;
+               sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
+               sbp->name = sfe->nameval;
+               sbp->namelen = sfe->namelen;
+               /* These are bytes, and both on-disk, don't endian-flip */
+               sbp->valuelen = sfe->valuelen;
+               sbp->flags = sfe->flags;
+               sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+               sbp++;
+               nsbuf++;
+       }
+
+       /*
+        * Sort the entries on hash then entno.
+        */
+       xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
+
+       /*
+        * Re-find our place IN THE SORTED LIST.
+        */
+       count = 0;
+       cursor->initted = 1;
+       cursor->blkno = 0;
+       for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
+               if (sbp->hash == cursor->hashval) {
+                       if (cursor->offset == count) {
+                               break;
+                       }
+                       count++;
+               } else if (sbp->hash > cursor->hashval) {
+                       break;
+               }
+       }
+       if (i == nsbuf) {
+               kmem_free(sbuf);
+               return(0);
+       }
+
+       /*
+        * Loop putting entries into the user buffer.
+        */
+       for ( ; i < nsbuf; i++, sbp++) {
+               if (cursor->hashval != sbp->hash) {
+                       cursor->hashval = sbp->hash;
+                       cursor->offset = 0;
+               }
+               error = context->put_listent(context,
+                                       sbp->flags,
+                                       sbp->name,
+                                       sbp->namelen,
+                                       sbp->valuelen,
+                                       &sbp->name[sbp->namelen]);
+               if (error)
+                       return error;
+               if (context->seen_enough)
+                       break;
+               cursor->offset++;
+       }
+
+       kmem_free(sbuf);
+       return(0);
+}
+
+STATIC int
+xfs_attr_node_list(xfs_attr_list_context_t *context)
+{
+       attrlist_cursor_kern_t *cursor;
+       xfs_attr_leafblock_t *leaf;
+       xfs_da_intnode_t *node;
+       struct xfs_attr3_icleaf_hdr leafhdr;
+       struct xfs_da3_icnode_hdr nodehdr;
+       struct xfs_da_node_entry *btree;
+       int error, i;
+       struct xfs_buf *bp;
+
+       trace_xfs_attr_node_list(context);
+
+       cursor = context->cursor;
+       cursor->initted = 1;
+
+       /*
+        * Do all sorts of validation on the passed-in cursor structure.
+        * If anything is amiss, ignore the cursor and look up the hashval
+        * starting from the btree root.
+        */
+       bp = NULL;
+       if (cursor->blkno > 0) {
+               error = xfs_da3_node_read(NULL, context->dp, cursor->blkno, -1,
+                                             &bp, XFS_ATTR_FORK);
+               if ((error != 0) && (error != EFSCORRUPTED))
+                       return(error);
+               if (bp) {
+                       struct xfs_attr_leaf_entry *entries;
+
+                       node = bp->b_addr;
+                       switch (be16_to_cpu(node->hdr.info.magic)) {
+                       case XFS_DA_NODE_MAGIC:
+                       case XFS_DA3_NODE_MAGIC:
+                               trace_xfs_attr_list_wrong_blk(context);
+                               xfs_trans_brelse(NULL, bp);
+                               bp = NULL;
+                               break;
+                       case XFS_ATTR_LEAF_MAGIC:
+                       case XFS_ATTR3_LEAF_MAGIC:
+                               leaf = bp->b_addr;
+                               xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+                               entries = xfs_attr3_leaf_entryp(leaf);
+                               if (cursor->hashval > be32_to_cpu(
+                                               entries[leafhdr.count - 1].hashval)) {
+                                       trace_xfs_attr_list_wrong_blk(context);
+                                       xfs_trans_brelse(NULL, bp);
+                                       bp = NULL;
+                               } else if (cursor->hashval <= be32_to_cpu(
+                                               entries[0].hashval)) {
+                                       trace_xfs_attr_list_wrong_blk(context);
+                                       xfs_trans_brelse(NULL, bp);
+                                       bp = NULL;
+                               }
+                               break;
+                       default:
+                               trace_xfs_attr_list_wrong_blk(context);
+                               xfs_trans_brelse(NULL, bp);
+                               bp = NULL;
+                       }
+               }
+       }
+
+       /*
+        * We did not find what we expected given the cursor's contents,
+        * so we start from the top and work down based on the hash value.
+        * Note that start of node block is same as start of leaf block.
+        */
+       if (bp == NULL) {
+               cursor->blkno = 0;
+               for (;;) {
+                       __uint16_t magic;
+
+                       error = xfs_da3_node_read(NULL, context->dp,
+                                                     cursor->blkno, -1, &bp,
+                                                     XFS_ATTR_FORK);
+                       if (error)
+                               return(error);
+                       node = bp->b_addr;
+                       magic = be16_to_cpu(node->hdr.info.magic);
+                       if (magic == XFS_ATTR_LEAF_MAGIC ||
+                           magic == XFS_ATTR3_LEAF_MAGIC)
+                               break;
+                       if (magic != XFS_DA_NODE_MAGIC &&
+                           magic != XFS_DA3_NODE_MAGIC) {
+                               XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
+                                                    XFS_ERRLEVEL_LOW,
+                                                    context->dp->i_mount,
+                                                    node);
+                               xfs_trans_brelse(NULL, bp);
+                               return XFS_ERROR(EFSCORRUPTED);
+                       }
+
+                       xfs_da3_node_hdr_from_disk(&nodehdr, node);
+                       btree = xfs_da3_node_tree_p(node);
+                       for (i = 0; i < nodehdr.count; btree++, i++) {
+                               if (cursor->hashval
+                                               <= be32_to_cpu(btree->hashval)) {
+                                       cursor->blkno = be32_to_cpu(btree->before);
+                                       trace_xfs_attr_list_node_descend(context,
+                                                                        btree);
+                                       break;
+                               }
+                       }
+                       if (i == nodehdr.count) {
+                               xfs_trans_brelse(NULL, bp);
+                               return 0;
+                       }
+                       xfs_trans_brelse(NULL, bp);
+               }
+       }
+       ASSERT(bp != NULL);
+
+       /*
+        * Roll upward through the blocks, processing each leaf block in
+        * order.  As long as there is space in the result buffer, keep
+        * adding the information.
+        */
+       for (;;) {
+               leaf = bp->b_addr;
+               error = xfs_attr3_leaf_list_int(bp, context);
+               if (error) {
+                       xfs_trans_brelse(NULL, bp);
+                       return error;
+               }
+               xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
+               if (context->seen_enough || leafhdr.forw == 0)
+                       break;
+               cursor->blkno = leafhdr.forw;
+               xfs_trans_brelse(NULL, bp);
+               error = xfs_attr3_leaf_read(NULL, context->dp, cursor->blkno, -1,
+                                          &bp);
+               if (error)
+                       return error;
+       }
+       xfs_trans_brelse(NULL, bp);
+       return 0;
+}
+
+/*
+ * Copy out attribute list entries for attr_list(), for leaf attribute lists.
+ */
+int
+xfs_attr3_leaf_list_int(
+       struct xfs_buf                  *bp,
+       struct xfs_attr_list_context    *context)
+{
+       struct attrlist_cursor_kern     *cursor;
+       struct xfs_attr_leafblock       *leaf;
+       struct xfs_attr3_icleaf_hdr     ichdr;
+       struct xfs_attr_leaf_entry      *entries;
+       struct xfs_attr_leaf_entry      *entry;
+       int                             retval;
+       int                             i;
+
+       trace_xfs_attr_list_leaf(context);
+
+       leaf = bp->b_addr;
+       xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf);
+       entries = xfs_attr3_leaf_entryp(leaf);
+
+       cursor = context->cursor;
+       cursor->initted = 1;
+
+       /*
+        * Re-find our place in the leaf block if this is a new syscall.
+        */
+       if (context->resynch) {
+               entry = &entries[0];
+               for (i = 0; i < ichdr.count; entry++, i++) {
+                       if (be32_to_cpu(entry->hashval) == cursor->hashval) {
+                               if (cursor->offset == context->dupcnt) {
+                                       context->dupcnt = 0;
+                                       break;
+                               }
+                               context->dupcnt++;
+                       } else if (be32_to_cpu(entry->hashval) >
+                                       cursor->hashval) {
+                               context->dupcnt = 0;
+                               break;
+                       }
+               }
+               if (i == ichdr.count) {
+                       trace_xfs_attr_list_notfound(context);
+                       return 0;
+               }
+       } else {
+               entry = &entries[0];
+               i = 0;
+       }
+       context->resynch = 0;
+
+       /*
+        * We have found our place, start copying out the new attributes.
+        */
+       retval = 0;
+       for (; i < ichdr.count; entry++, i++) {
+               if (be32_to_cpu(entry->hashval) != cursor->hashval) {
+                       cursor->hashval = be32_to_cpu(entry->hashval);
+                       cursor->offset = 0;
+               }
+
+               if (entry->flags & XFS_ATTR_INCOMPLETE)
+                       continue;               /* skip incomplete entries */
+
+               if (entry->flags & XFS_ATTR_LOCAL) {
+                       xfs_attr_leaf_name_local_t *name_loc =
+                               xfs_attr3_leaf_name_local(leaf, i);
+
+                       retval = context->put_listent(context,
+                                               entry->flags,
+                                               name_loc->nameval,
+                                               (int)name_loc->namelen,
+                                               be16_to_cpu(name_loc->valuelen),
+                                               &name_loc->nameval[name_loc->namelen]);
+                       if (retval)
+                               return retval;
+               } else {
+                       xfs_attr_leaf_name_remote_t *name_rmt =
+                               xfs_attr3_leaf_name_remote(leaf, i);
+
+                       int valuelen = be32_to_cpu(name_rmt->valuelen);
+
+                       if (context->put_value) {
+                               xfs_da_args_t args;
+
+                               memset((char *)&args, 0, sizeof(args));
+                               args.dp = context->dp;
+                               args.whichfork = XFS_ATTR_FORK;
+                               args.valuelen = valuelen;
+                               args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
+                               args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
+                               args.rmtblkcnt = xfs_attr3_rmt_blocks(
+                                                       args.dp->i_mount, valuelen);
+                               retval = xfs_attr_rmtval_get(&args);
+                               if (retval)
+                                       return retval;
+                               retval = context->put_listent(context,
+                                               entry->flags,
+                                               name_rmt->name,
+                                               (int)name_rmt->namelen,
+                                               valuelen,
+                                               args.value);
+                               kmem_free(args.value);
+                       } else {
+                               retval = context->put_listent(context,
+                                               entry->flags,
+                                               name_rmt->name,
+                                               (int)name_rmt->namelen,
+                                               valuelen,
+                                               NULL);
+                       }
+                       if (retval)
+                               return retval;
+               }
+               if (context->seen_enough)
+                       break;
+               cursor->offset++;
+       }
+       trace_xfs_attr_list_leaf_end(context);
+       return retval;
+}
+
+/*
+ * Copy out attribute entries for attr_list(), for leaf attribute lists.
+ */
+STATIC int
+xfs_attr_leaf_list(xfs_attr_list_context_t *context)
+{
+       int error;
+       struct xfs_buf *bp;
+
+       trace_xfs_attr_leaf_list(context);
+
+       context->cursor->blkno = 0;
+       error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp);
+       if (error)
+               return XFS_ERROR(error);
+
+       error = xfs_attr3_leaf_list_int(bp, context);
+       xfs_trans_brelse(NULL, bp);
+       return XFS_ERROR(error);
+}
+
+int
+xfs_attr_list_int(
+       xfs_attr_list_context_t *context)
+{
+       int error;
+       xfs_inode_t *dp = context->dp;
+
+       XFS_STATS_INC(xs_attr_list);
+
+       if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+               return EIO;
+
+       xfs_ilock(dp, XFS_ILOCK_SHARED);
+
+       /*
+        * Decide on what work routines to call based on the inode size.
+        */
+       if (!xfs_inode_hasattr(dp)) {
+               error = 0;
+       } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+               error = xfs_attr_shortform_list(context);
+       } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+               error = xfs_attr_leaf_list(context);
+       } else {
+               error = xfs_attr_node_list(context);
+       }
+
+       xfs_iunlock(dp, XFS_ILOCK_SHARED);
+
+       return error;
+}
+
+#define        ATTR_ENTBASESIZE                /* minimum bytes used by an attr */ \
+       (((struct attrlist_ent *) 0)->a_name - (char *) 0)
+#define        ATTR_ENTSIZE(namelen)           /* actual bytes used by an attr */ \
+       ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
+        & ~(sizeof(u_int32_t)-1))
+
+/*
+ * Format an attribute and copy it out to the user's buffer.
+ * Take care to check values and protect against them changing later,
+ * we may be reading them directly out of a user buffer.
+ */
+STATIC int
+xfs_attr_put_listent(
+       xfs_attr_list_context_t *context,
+       int             flags,
+       unsigned char   *name,
+       int             namelen,
+       int             valuelen,
+       unsigned char   *value)
+{
+       struct attrlist *alist = (struct attrlist *)context->alist;
+       attrlist_ent_t *aep;
+       int arraytop;
+
+       ASSERT(!(context->flags & ATTR_KERNOVAL));
+       ASSERT(context->count >= 0);
+       ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
+       ASSERT(context->firstu >= sizeof(*alist));
+       ASSERT(context->firstu <= context->bufsize);
+
+       /*
+        * Only list entries in the right namespace.
+        */
+       if (((context->flags & ATTR_SECURE) == 0) !=
+           ((flags & XFS_ATTR_SECURE) == 0))
+               return 0;
+       if (((context->flags & ATTR_ROOT) == 0) !=
+           ((flags & XFS_ATTR_ROOT) == 0))
+               return 0;
+
+       arraytop = sizeof(*alist) +
+                       context->count * sizeof(alist->al_offset[0]);
+       context->firstu -= ATTR_ENTSIZE(namelen);
+       if (context->firstu < arraytop) {
+               trace_xfs_attr_list_full(context);
+               alist->al_more = 1;
+               context->seen_enough = 1;
+               return 1;
+       }
+
+       aep = (attrlist_ent_t *)&context->alist[context->firstu];
+       aep->a_valuelen = valuelen;
+       memcpy(aep->a_name, name, namelen);
+       aep->a_name[namelen] = 0;
+       alist->al_offset[context->count++] = context->firstu;
+       alist->al_count = context->count;
+       trace_xfs_attr_list_add(context);
+       return 0;
+}
+
+/*
+ * Generate a list of extended attribute names and optionally
+ * also value lengths.  Positive return value follows the XFS
+ * convention of being an error, zero or negative return code
+ * is the length of the buffer returned (negated), indicating
+ * success.
+ */
+int
+xfs_attr_list(
+       xfs_inode_t     *dp,
+       char            *buffer,
+       int             bufsize,
+       int             flags,
+       attrlist_cursor_kern_t *cursor)
+{
+       xfs_attr_list_context_t context;
+       struct attrlist *alist;
+       int error;
+
+       /*
+        * Validate the cursor.
+        */
+       if (cursor->pad1 || cursor->pad2)
+               return(XFS_ERROR(EINVAL));
+       if ((cursor->initted == 0) &&
+           (cursor->hashval || cursor->blkno || cursor->offset))
+               return XFS_ERROR(EINVAL);
+
+       /*
+        * Check for a properly aligned buffer.
+        */
+       if (((long)buffer) & (sizeof(int)-1))
+               return XFS_ERROR(EFAULT);
+       if (flags & ATTR_KERNOVAL)
+               bufsize = 0;
+
+       /*
+        * Initialize the output buffer.
+        */
+       memset(&context, 0, sizeof(context));
+       context.dp = dp;
+       context.cursor = cursor;
+       context.resynch = 1;
+       context.flags = flags;
+       context.alist = buffer;
+       context.bufsize = (bufsize & ~(sizeof(int)-1));  /* align */
+       context.firstu = context.bufsize;
+       context.put_listent = xfs_attr_put_listent;
+
+       alist = (struct attrlist *)context.alist;
+       alist->al_count = 0;
+       alist->al_more = 0;
+       alist->al_offset[0] = context.bufsize;
+
+       error = xfs_attr_list_int(&context);
+       ASSERT(error >= 0);
+       return error;
+}
index ef6b0c124528f6bff8d59c0fee5fa31a1d5dcc8b..712a502de619b097df202f744ba481bd3471847d 100644 (file)
@@ -22,6 +22,7 @@
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
+#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
@@ -33,6 +34,7 @@
 #include "xfs_alloc.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_attr_remote.h"
@@ -237,7 +239,7 @@ xfs_attr_rmtval_copyout(
        xfs_ino_t       ino,
        int             *offset,
        int             *valuelen,
-       char            **dst)
+       __uint8_t       **dst)
 {
        char            *src = bp->b_addr;
        xfs_daddr_t     bno = bp->b_bn;
@@ -249,7 +251,7 @@ xfs_attr_rmtval_copyout(
                int hdr_size = 0;
                int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
 
-               byte_cnt = min_t(int, *valuelen, byte_cnt);
+               byte_cnt = min(*valuelen, byte_cnt);
 
                if (xfs_sb_version_hascrc(&mp->m_sb)) {
                        if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset,
@@ -284,7 +286,7 @@ xfs_attr_rmtval_copyin(
        xfs_ino_t       ino,
        int             *offset,
        int             *valuelen,
-       char            **src)
+       __uint8_t       **src)
 {
        char            *dst = bp->b_addr;
        xfs_daddr_t     bno = bp->b_bn;
@@ -337,7 +339,7 @@ xfs_attr_rmtval_get(
        struct xfs_mount        *mp = args->dp->i_mount;
        struct xfs_buf          *bp;
        xfs_dablk_t             lblkno = args->rmtblkno;
-       char                    *dst = args->value;
+       __uint8_t               *dst = args->value;
        int                     valuelen = args->valuelen;
        int                     nmap;
        int                     error;
@@ -401,7 +403,7 @@ xfs_attr_rmtval_set(
        struct xfs_bmbt_irec    map;
        xfs_dablk_t             lblkno;
        xfs_fileoff_t           lfileoff = 0;
-       char                    *src = args->value;
+       __uint8_t               *src = args->value;
        int                     blkcnt;
        int                     valuelen;
        int                     nmap;
@@ -543,11 +545,6 @@ xfs_attr_rmtval_remove(
 
        /*
         * Roll through the "value", invalidating the attribute value's blocks.
-        * Note that args->rmtblkcnt is the minimum number of data blocks we'll
-        * see for a CRC enabled remote attribute. Each extent will have a
-        * header, and so we may have more blocks than we realise here.  If we
-        * fail to map the blocks correctly, we'll have problems with the buffer
-        * lookups.
         */
        lblkno = args->rmtblkno;
        blkcnt = args->rmtblkcnt;
@@ -628,4 +625,3 @@ xfs_attr_rmtval_remove(
        }
        return(0);
 }
-
index 05c698ccb238f4fa9eef9a1844683fe24f62bd13..92b830901d60bcf2b662315bb1625b2944964ddf 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_inum.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
@@ -39,6 +40,7 @@
 #include "xfs_extfree_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_attr_leaf.h"
@@ -46,7 +48,6 @@
 #include "xfs_trans_space.h"
 #include "xfs_buf_item.h"
 #include "xfs_filestream.h"
-#include "xfs_vnodeops.h"
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
 
@@ -108,19 +109,6 @@ xfs_bmap_compute_maxlevels(
        mp->m_bm_maxlevels[whichfork] = level;
 }
 
-/*
- * Convert the given file system block to a disk block.  We have to treat it
- * differently based on whether the file is a real time file or not, because the
- * bmap code does.
- */
-xfs_daddr_t
-xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
-{
-       return (XFS_IS_REALTIME_INODE(ip) ? \
-                (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
-                XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
-}
-
 STATIC int                             /* error */
 xfs_bmbt_lookup_eq(
        struct xfs_btree_cur    *cur,
@@ -262,173 +250,6 @@ xfs_bmap_forkoff_reset(
        }
 }
 
-/*
- * Extent tree block counting routines.
- */
-
-/*
- * Count leaf blocks given a range of extent records.
- */
-STATIC void
-xfs_bmap_count_leaves(
-       xfs_ifork_t             *ifp,
-       xfs_extnum_t            idx,
-       int                     numrecs,
-       int                     *count)
-{
-       int             b;
-
-       for (b = 0; b < numrecs; b++) {
-               xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
-               *count += xfs_bmbt_get_blockcount(frp);
-       }
-}
-
-/*
- * Count leaf blocks given a range of extent records originally
- * in btree format.
- */
-STATIC void
-xfs_bmap_disk_count_leaves(
-       struct xfs_mount        *mp,
-       struct xfs_btree_block  *block,
-       int                     numrecs,
-       int                     *count)
-{
-       int             b;
-       xfs_bmbt_rec_t  *frp;
-
-       for (b = 1; b <= numrecs; b++) {
-               frp = XFS_BMBT_REC_ADDR(mp, block, b);
-               *count += xfs_bmbt_disk_get_blockcount(frp);
-       }
-}
-
-/*
- * Recursively walks each level of a btree
- * to count total fsblocks is use.
- */
-STATIC int                                     /* error */
-xfs_bmap_count_tree(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_fsblock_t   blockno,        /* file system block number */
-       int             levelin,        /* level in btree */
-       int             *count)         /* Count of blocks */
-{
-       int                     error;
-       xfs_buf_t               *bp, *nbp;
-       int                     level = levelin;
-       __be64                  *pp;
-       xfs_fsblock_t           bno = blockno;
-       xfs_fsblock_t           nextbno;
-       struct xfs_btree_block  *block, *nextblock;
-       int                     numrecs;
-
-       error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
-                                               &xfs_bmbt_buf_ops);
-       if (error)
-               return error;
-       *count += 1;
-       block = XFS_BUF_TO_BLOCK(bp);
-
-       if (--level) {
-               /* Not at node above leaves, count this level of nodes */
-               nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
-               while (nextbno != NULLFSBLOCK) {
-                       error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
-                                               XFS_BMAP_BTREE_REF,
-                                               &xfs_bmbt_buf_ops);
-                       if (error)
-                               return error;
-                       *count += 1;
-                       nextblock = XFS_BUF_TO_BLOCK(nbp);
-                       nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
-                       xfs_trans_brelse(tp, nbp);
-               }
-
-               /* Dive to the next level */
-               pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
-               bno = be64_to_cpu(*pp);
-               if (unlikely((error =
-                    xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
-                       xfs_trans_brelse(tp, bp);
-                       XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
-                                        XFS_ERRLEVEL_LOW, mp);
-                       return XFS_ERROR(EFSCORRUPTED);
-               }
-               xfs_trans_brelse(tp, bp);
-       } else {
-               /* count all level 1 nodes and their leaves */
-               for (;;) {
-                       nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
-                       numrecs = be16_to_cpu(block->bb_numrecs);
-                       xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
-                       xfs_trans_brelse(tp, bp);
-                       if (nextbno == NULLFSBLOCK)
-                               break;
-                       bno = nextbno;
-                       error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
-                                               XFS_BMAP_BTREE_REF,
-                                               &xfs_bmbt_buf_ops);
-                       if (error)
-                               return error;
-                       *count += 1;
-                       block = XFS_BUF_TO_BLOCK(bp);
-               }
-       }
-       return 0;
-}
-
-/*
- * Count fsblocks of the given fork.
- */
-int                                            /* error */
-xfs_bmap_count_blocks(
-       xfs_trans_t             *tp,            /* transaction pointer */
-       xfs_inode_t             *ip,            /* incore inode */
-       int                     whichfork,      /* data or attr fork */
-       int                     *count)         /* out: count of blocks */
-{
-       struct xfs_btree_block  *block; /* current btree block */
-       xfs_fsblock_t           bno;    /* block # of "block" */
-       xfs_ifork_t             *ifp;   /* fork structure */
-       int                     level;  /* btree level, for checking */
-       xfs_mount_t             *mp;    /* file system mount structure */
-       __be64                  *pp;    /* pointer to block address */
-
-       bno = NULLFSBLOCK;
-       mp = ip->i_mount;
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
-               xfs_bmap_count_leaves(ifp, 0,
-                       ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
-                       count);
-               return 0;
-       }
-
-       /*
-        * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
-        */
-       block = ifp->if_broot;
-       level = be16_to_cpu(block->bb_level);
-       ASSERT(level > 0);
-       pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
-       bno = be64_to_cpu(*pp);
-       ASSERT(bno != NULLDFSBNO);
-       ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
-       ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
-
-       if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
-               XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
-                                mp);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       return 0;
-}
-
 /*
  * Debug/sanity checking code
  */
@@ -724,8 +545,8 @@ xfs_bmap_trace_exlist(
 
 /*
  * Validate that the bmbt_irecs being returned from bmapi are valid
- * given the callers original parameters.  Specifically check the
- * ranges of the returned irecs to ensure that they only extent beyond
+ * given the caller's original parameters.  Specifically check the
+ * ranges of the returned irecs to ensure that they only extend beyond
  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
  */
 STATIC void
@@ -823,7 +644,7 @@ xfs_bmap_add_free(
  * Remove the entry "free" from the free item list.  Prev points to the
  * previous entry, unless "free" is the head of the list.
  */
-STATIC void
+void
 xfs_bmap_del_free(
        xfs_bmap_free_t         *flist, /* free item list header */
        xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
@@ -837,92 +658,6 @@ xfs_bmap_del_free(
        kmem_zone_free(xfs_bmap_free_item_zone, free);
 }
 
-
-/*
- * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
- * caller.  Frees all the extents that need freeing, which must be done
- * last due to locking considerations.  We never free any extents in
- * the first transaction.
- *
- * Return 1 if the given transaction was committed and a new one
- * started, and 0 otherwise in the committed parameter.
- */
-int                                            /* error */
-xfs_bmap_finish(
-       xfs_trans_t             **tp,           /* transaction pointer addr */
-       xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
-       int                     *committed)     /* xact committed or not */
-{
-       xfs_efd_log_item_t      *efd;           /* extent free data */
-       xfs_efi_log_item_t      *efi;           /* extent free intention */
-       int                     error;          /* error return value */
-       xfs_bmap_free_item_t    *free;          /* free extent item */
-       unsigned int            logres;         /* new log reservation */
-       unsigned int            logcount;       /* new log count */
-       xfs_mount_t             *mp;            /* filesystem mount structure */
-       xfs_bmap_free_item_t    *next;          /* next item on free list */
-       xfs_trans_t             *ntp;           /* new transaction pointer */
-
-       ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
-       if (flist->xbf_count == 0) {
-               *committed = 0;
-               return 0;
-       }
-       ntp = *tp;
-       efi = xfs_trans_get_efi(ntp, flist->xbf_count);
-       for (free = flist->xbf_first; free; free = free->xbfi_next)
-               xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
-                       free->xbfi_blockcount);
-       logres = ntp->t_log_res;
-       logcount = ntp->t_log_count;
-       ntp = xfs_trans_dup(*tp);
-       error = xfs_trans_commit(*tp, 0);
-       *tp = ntp;
-       *committed = 1;
-       /*
-        * We have a new transaction, so we should return committed=1,
-        * even though we're returning an error.
-        */
-       if (error)
-               return error;
-
-       /*
-        * transaction commit worked ok so we can drop the extra ticket
-        * reference that we gained in xfs_trans_dup()
-        */
-       xfs_log_ticket_put(ntp->t_ticket);
-
-       if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
-                       logcount)))
-               return error;
-       efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
-       for (free = flist->xbf_first; free != NULL; free = next) {
-               next = free->xbfi_next;
-               if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
-                               free->xbfi_blockcount))) {
-                       /*
-                        * The bmap free list will be cleaned up at a
-                        * higher level.  The EFI will be canceled when
-                        * this transaction is aborted.
-                        * Need to force shutdown here to make sure it
-                        * happens, since this transaction may not be
-                        * dirty yet.
-                        */
-                       mp = ntp->t_mountp;
-                       if (!XFS_FORCED_SHUTDOWN(mp))
-                               xfs_force_shutdown(mp,
-                                                  (error == EFSCORRUPTED) ?
-                                                  SHUTDOWN_CORRUPT_INCORE :
-                                                  SHUTDOWN_META_IO_ERROR);
-                       return error;
-               }
-               xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
-                       free->xbfi_blockcount);
-               xfs_bmap_del_free(flist, NULL, free);
-       }
-       return 0;
-}
-
 /*
  * Free up any items left in the list.
  */
@@ -1413,8 +1148,8 @@ xfs_bmap_add_attrfork(
        blks = XFS_ADDAFORK_SPACE_RES(mp);
        if (rsvd)
                tp->t_flags |= XFS_TRANS_RESERVE;
-       if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
-                       XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0);
+       if (error)
                goto error0;
        xfs_ilock(ip, XFS_ILOCK_EXCL);
        error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
@@ -1815,7 +1550,7 @@ xfs_bmap_first_unused(
 }
 
 /*
- * Returns the file-relative block number of the last block + 1 before
+ * Returns the file-relative block number of the last block - 1 before
  * last_block (input value) in the file.
  * This is not based on i_size, it is based on the extent records.
  * Returns 0 for local files, as they do not have extent records.
@@ -1863,7 +1598,7 @@ xfs_bmap_last_before(
        return 0;
 }
 
-STATIC int
+int
 xfs_bmap_last_extent(
        struct xfs_trans        *tp,
        struct xfs_inode        *ip,
@@ -1926,29 +1661,6 @@ xfs_bmap_isaeof(
        return 0;
 }
 
-/*
- * Check if the endoff is outside the last extent. If so the caller will grow
- * the allocation to a stripe unit boundary.  All offsets are considered outside
- * the end of file for an empty fork, so 1 is returned in *eof in that case.
- */
-int
-xfs_bmap_eof(
-       struct xfs_inode        *ip,
-       xfs_fileoff_t           endoff,
-       int                     whichfork,
-       int                     *eof)
-{
-       struct xfs_bmbt_irec    rec;
-       int                     error;
-
-       error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
-       if (error || *eof)
-               return error;
-
-       *eof = endoff >= rec.br_startoff + rec.br_blockcount;
-       return 0;
-}
-
 /*
  * Returns the file-relative block number of the first block past eof in
  * the file.  This is not based on i_size, it is based on the extent records.
@@ -3488,7 +3200,7 @@ done:
 /*
  * Adjust the size of the new extent based on di_extsize and rt extsize.
  */
-STATIC int
+int
 xfs_bmap_extsize_align(
        xfs_mount_t     *mp,
        xfs_bmbt_irec_t *gotp,          /* next extent pointer */
@@ -3650,9 +3362,9 @@ xfs_bmap_extsize_align(
 
 #define XFS_ALLOC_GAP_UNITS    4
 
-STATIC void
+void
 xfs_bmap_adjacent(
-       xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
+       struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
 {
        xfs_fsblock_t   adjust;         /* adjustment to block numbers */
        xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
@@ -3798,109 +3510,6 @@ xfs_bmap_adjacent(
 #undef ISVALID
 }
 
-STATIC int
-xfs_bmap_rtalloc(
-       xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
-{
-       xfs_alloctype_t atype = 0;      /* type for allocation routines */
-       int             error;          /* error return value */
-       xfs_mount_t     *mp;            /* mount point structure */
-       xfs_extlen_t    prod = 0;       /* product factor for allocators */
-       xfs_extlen_t    ralen = 0;      /* realtime allocation length */
-       xfs_extlen_t    align;          /* minimum allocation alignment */
-       xfs_rtblock_t   rtb;
-
-       mp = ap->ip->i_mount;
-       align = xfs_get_extsz_hint(ap->ip);
-       prod = align / mp->m_sb.sb_rextsize;
-       error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
-                                       align, 1, ap->eof, 0,
-                                       ap->conv, &ap->offset, &ap->length);
-       if (error)
-               return error;
-       ASSERT(ap->length);
-       ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
-
-       /*
-        * If the offset & length are not perfectly aligned
-        * then kill prod, it will just get us in trouble.
-        */
-       if (do_mod(ap->offset, align) || ap->length % align)
-               prod = 1;
-       /*
-        * Set ralen to be the actual requested length in rtextents.
-        */
-       ralen = ap->length / mp->m_sb.sb_rextsize;
-       /*
-        * If the old value was close enough to MAXEXTLEN that
-        * we rounded up to it, cut it back so it's valid again.
-        * Note that if it's a really large request (bigger than
-        * MAXEXTLEN), we don't hear about that number, and can't
-        * adjust the starting point to match it.
-        */
-       if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
-               ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
-
-       /*
-        * Lock out other modifications to the RT bitmap inode.
-        */
-       xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
-
-       /*
-        * If it's an allocation to an empty file at offset 0,
-        * pick an extent that will space things out in the rt area.
-        */
-       if (ap->eof && ap->offset == 0) {
-               xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
-
-               error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
-               if (error)
-                       return error;
-               ap->blkno = rtx * mp->m_sb.sb_rextsize;
-       } else {
-               ap->blkno = 0;
-       }
-
-       xfs_bmap_adjacent(ap);
-
-       /*
-        * Realtime allocation, done through xfs_rtallocate_extent.
-        */
-       atype = ap->blkno == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
-       do_div(ap->blkno, mp->m_sb.sb_rextsize);
-       rtb = ap->blkno;
-       ap->length = ralen;
-       if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
-                               &ralen, atype, ap->wasdel, prod, &rtb)))
-               return error;
-       if (rtb == NULLFSBLOCK && prod > 1 &&
-           (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
-                                          ap->length, &ralen, atype,
-                                          ap->wasdel, 1, &rtb)))
-               return error;
-       ap->blkno = rtb;
-       if (ap->blkno != NULLFSBLOCK) {
-               ap->blkno *= mp->m_sb.sb_rextsize;
-               ralen *= mp->m_sb.sb_rextsize;
-               ap->length = ralen;
-               ap->ip->i_d.di_nblocks += ralen;
-               xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
-               if (ap->wasdel)
-                       ap->ip->i_delayed_blks -= ralen;
-               /*
-                * Adjust the disk quota also. This was reserved
-                * earlier.
-                */
-               xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
-                       ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
-                                       XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
-       } else {
-               ap->length = 0;
-       }
-       return 0;
-}
-
 STATIC int
 xfs_bmap_btalloc_nullfb(
        struct xfs_bmalloca     *ap,
@@ -4018,7 +3627,7 @@ xfs_bmap_btalloc_nullfb(
 
 STATIC int
 xfs_bmap_btalloc(
-       xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
+       struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
 {
        xfs_mount_t     *mp;            /* mount point structure */
        xfs_alloctype_t atype = 0;      /* type for allocation routines */
@@ -4250,7 +3859,7 @@ xfs_bmap_btalloc(
  */
 STATIC int
 xfs_bmap_alloc(
-       xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
+       struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
 {
        if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
                return xfs_bmap_rtalloc(ap);
@@ -4638,7 +4247,7 @@ xfs_bmapi_delay(
 }
 
 
-STATIC int
+int
 __xfs_bmapi_allocate(
        struct xfs_bmalloca     *bma)
 {
@@ -4648,12 +4257,9 @@ __xfs_bmapi_allocate(
        struct xfs_ifork        *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
        int                     tmp_logflags = 0;
        int                     error;
-       int                     rt;
 
        ASSERT(bma->length > 0);
 
-       rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
-
        /*
         * For the wasdelay case, we could also just allocate the stuff asked
         * for in this bmap call but that wouldn't be as good.
@@ -4756,45 +4362,6 @@ __xfs_bmapi_allocate(
        return 0;
 }
 
-static void
-xfs_bmapi_allocate_worker(
-       struct work_struct      *work)
-{
-       struct xfs_bmalloca     *args = container_of(work,
-                                               struct xfs_bmalloca, work);
-       unsigned long           pflags;
-
-       /* we are in a transaction context here */
-       current_set_flags_nested(&pflags, PF_FSTRANS);
-
-       args->result = __xfs_bmapi_allocate(args);
-       complete(args->done);
-
-       current_restore_flags_nested(&pflags, PF_FSTRANS);
-}
-
-/*
- * Some allocation requests often come in with little stack to work on. Push
- * them off to a worker thread so there is lots of stack to use. Otherwise just
- * call directly to avoid the context switch overhead here.
- */
-int
-xfs_bmapi_allocate(
-       struct xfs_bmalloca     *args)
-{
-       DECLARE_COMPLETION_ONSTACK(done);
-
-       if (!args->stack_switch)
-               return __xfs_bmapi_allocate(args);
-
-
-       args->done = &done;
-       INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
-       queue_work(xfs_alloc_wq, &args->work);
-       wait_for_completion(&done);
-       return args->result;
-}
-
 STATIC int
 xfs_bmapi_convert_unwritten(
        struct xfs_bmalloca     *bma,
@@ -5789,359 +5356,3 @@ error0:
        }
        return error;
 }
-
-/*
- * returns 1 for success, 0 if we failed to map the extent.
- */
-STATIC int
-xfs_getbmapx_fix_eof_hole(
-       xfs_inode_t             *ip,            /* xfs incore inode pointer */
-       struct getbmapx         *out,           /* output structure */
-       int                     prealloced,     /* this is a file with
-                                                * preallocated data space */
-       __int64_t               end,            /* last block requested */
-       xfs_fsblock_t           startblock)
-{
-       __int64_t               fixlen;
-       xfs_mount_t             *mp;            /* file system mount point */
-       xfs_ifork_t             *ifp;           /* inode fork pointer */
-       xfs_extnum_t            lastx;          /* last extent pointer */
-       xfs_fileoff_t           fileblock;
-
-       if (startblock == HOLESTARTBLOCK) {
-               mp = ip->i_mount;
-               out->bmv_block = -1;
-               fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
-               fixlen -= out->bmv_offset;
-               if (prealloced && out->bmv_offset + out->bmv_length == end) {
-                       /* Came to hole at EOF. Trim it. */
-                       if (fixlen <= 0)
-                               return 0;
-                       out->bmv_length = fixlen;
-               }
-       } else {
-               if (startblock == DELAYSTARTBLOCK)
-                       out->bmv_block = -2;
-               else
-                       out->bmv_block = xfs_fsb_to_db(ip, startblock);
-               fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
-               ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
-               if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
-                  (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
-                       out->bmv_oflags |= BMV_OF_LAST;
-       }
-
-       return 1;
-}
-
-/*
- * Get inode's extents as described in bmv, and format for output.
- * Calls formatter to fill the user's buffer until all extents
- * are mapped, until the passed-in bmv->bmv_count slots have
- * been filled, or until the formatter short-circuits the loop,
- * if it is tracking filled-in extents on its own.
- */
-int                                            /* error code */
-xfs_getbmap(
-       xfs_inode_t             *ip,
-       struct getbmapx         *bmv,           /* user bmap structure */
-       xfs_bmap_format_t       formatter,      /* format to user */
-       void                    *arg)           /* formatter arg */
-{
-       __int64_t               bmvend;         /* last block requested */
-       int                     error = 0;      /* return value */
-       __int64_t               fixlen;         /* length for -1 case */
-       int                     i;              /* extent number */
-       int                     lock;           /* lock state */
-       xfs_bmbt_irec_t         *map;           /* buffer for user's data */
-       xfs_mount_t             *mp;            /* file system mount point */
-       int                     nex;            /* # of user extents can do */
-       int                     nexleft;        /* # of user extents left */
-       int                     subnex;         /* # of bmapi's can do */
-       int                     nmap;           /* number of map entries */
-       struct getbmapx         *out;           /* output structure */
-       int                     whichfork;      /* data or attr fork */
-       int                     prealloced;     /* this is a file with
-                                                * preallocated data space */
-       int                     iflags;         /* interface flags */
-       int                     bmapi_flags;    /* flags for xfs_bmapi */
-       int                     cur_ext = 0;
-
-       mp = ip->i_mount;
-       iflags = bmv->bmv_iflags;
-       whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
-
-       if (whichfork == XFS_ATTR_FORK) {
-               if (XFS_IFORK_Q(ip)) {
-                       if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
-                           ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
-                           ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
-                               return XFS_ERROR(EINVAL);
-               } else if (unlikely(
-                          ip->i_d.di_aformat != 0 &&
-                          ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
-                       XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
-                                        ip->i_mount);
-                       return XFS_ERROR(EFSCORRUPTED);
-               }
-
-               prealloced = 0;
-               fixlen = 1LL << 32;
-       } else {
-               if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
-                   ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
-                   ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
-                       return XFS_ERROR(EINVAL);
-
-               if (xfs_get_extsz_hint(ip) ||
-                   ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
-                       prealloced = 1;
-                       fixlen = mp->m_super->s_maxbytes;
-               } else {
-                       prealloced = 0;
-                       fixlen = XFS_ISIZE(ip);
-               }
-       }
-
-       if (bmv->bmv_length == -1) {
-               fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
-               bmv->bmv_length =
-                       max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
-       } else if (bmv->bmv_length == 0) {
-               bmv->bmv_entries = 0;
-               return 0;
-       } else if (bmv->bmv_length < 0) {
-               return XFS_ERROR(EINVAL);
-       }
-
-       nex = bmv->bmv_count - 1;
-       if (nex <= 0)
-               return XFS_ERROR(EINVAL);
-       bmvend = bmv->bmv_offset + bmv->bmv_length;
-
-
-       if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
-               return XFS_ERROR(ENOMEM);
-       out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
-       if (!out) {
-               out = kmem_zalloc_large(bmv->bmv_count *
-                                       sizeof(struct getbmapx));
-               if (!out)
-                       return XFS_ERROR(ENOMEM);
-       }
-
-       xfs_ilock(ip, XFS_IOLOCK_SHARED);
-       if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
-               if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
-                       error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
-                       if (error)
-                               goto out_unlock_iolock;
-               }
-               /*
-                * even after flushing the inode, there can still be delalloc
-                * blocks on the inode beyond EOF due to speculative
-                * preallocation. These are not removed until the release
-                * function is called or the inode is inactivated. Hence we
-                * cannot assert here that ip->i_delayed_blks == 0.
-                */
-       }
-
-       lock = xfs_ilock_map_shared(ip);
-
-       /*
-        * Don't let nex be bigger than the number of extents
-        * we can have assuming alternating holes and real extents.
-        */
-       if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
-               nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
-
-       bmapi_flags = xfs_bmapi_aflag(whichfork);
-       if (!(iflags & BMV_IF_PREALLOC))
-               bmapi_flags |= XFS_BMAPI_IGSTATE;
-
-       /*
-        * Allocate enough space to handle "subnex" maps at a time.
-        */
-       error = ENOMEM;
-       subnex = 16;
-       map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
-       if (!map)
-               goto out_unlock_ilock;
-
-       bmv->bmv_entries = 0;
-
-       if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
-           (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
-               error = 0;
-               goto out_free_map;
-       }
-
-       nexleft = nex;
-
-       do {
-               nmap = (nexleft > subnex) ? subnex : nexleft;
-               error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
-                                      XFS_BB_TO_FSB(mp, bmv->bmv_length),
-                                      map, &nmap, bmapi_flags);
-               if (error)
-                       goto out_free_map;
-               ASSERT(nmap <= subnex);
-
-               for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
-                       out[cur_ext].bmv_oflags = 0;
-                       if (map[i].br_state == XFS_EXT_UNWRITTEN)
-                               out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
-                       else if (map[i].br_startblock == DELAYSTARTBLOCK)
-                               out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
-                       out[cur_ext].bmv_offset =
-                               XFS_FSB_TO_BB(mp, map[i].br_startoff);
-                       out[cur_ext].bmv_length =
-                               XFS_FSB_TO_BB(mp, map[i].br_blockcount);
-                       out[cur_ext].bmv_unused1 = 0;
-                       out[cur_ext].bmv_unused2 = 0;
-
-                       /*
-                        * delayed allocation extents that start beyond EOF can
-                        * occur due to speculative EOF allocation when the
-                        * delalloc extent is larger than the largest freespace
-                        * extent at conversion time. These extents cannot be
-                        * converted by data writeback, so can exist here even
-                        * if we are not supposed to be finding delalloc
-                        * extents.
-                        */
-                       if (map[i].br_startblock == DELAYSTARTBLOCK &&
-                           map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
-                               ASSERT((iflags & BMV_IF_DELALLOC) != 0);
-
-                        if (map[i].br_startblock == HOLESTARTBLOCK &&
-                           whichfork == XFS_ATTR_FORK) {
-                               /* came to the end of attribute fork */
-                               out[cur_ext].bmv_oflags |= BMV_OF_LAST;
-                               goto out_free_map;
-                       }
-
-                       if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
-                                       prealloced, bmvend,
-                                       map[i].br_startblock))
-                               goto out_free_map;
-
-                       bmv->bmv_offset =
-                               out[cur_ext].bmv_offset +
-                               out[cur_ext].bmv_length;
-                       bmv->bmv_length =
-                               max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
-
-                       /*
-                        * In case we don't want to return the hole,
-                        * don't increase cur_ext so that we can reuse
-                        * it in the next loop.
-                        */
-                       if ((iflags & BMV_IF_NO_HOLES) &&
-                           map[i].br_startblock == HOLESTARTBLOCK) {
-                               memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
-                               continue;
-                       }
-
-                       nexleft--;
-                       bmv->bmv_entries++;
-                       cur_ext++;
-               }
-       } while (nmap && nexleft && bmv->bmv_length);
-
- out_free_map:
-       kmem_free(map);
- out_unlock_ilock:
-       xfs_iunlock_map_shared(ip, lock);
- out_unlock_iolock:
-       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
-
-       for (i = 0; i < cur_ext; i++) {
-               int full = 0;   /* user array is full */
-
-               /* format results & advance arg */
-               error = formatter(&arg, &out[i], &full);
-               if (error || full)
-                       break;
-       }
-
-       if (is_vmalloc_addr(out))
-               kmem_free_large(out);
-       else
-               kmem_free(out);
-       return error;
-}
-
-/*
- * dead simple method of punching delalyed allocation blocks from a range in
- * the inode. Walks a block at a time so will be slow, but is only executed in
- * rare error cases so the overhead is not critical. This will alays punch out
- * both the start and end blocks, even if the ranges only partially overlap
- * them, so it is up to the caller to ensure that partial blocks are not
- * passed in.
- */
-int
-xfs_bmap_punch_delalloc_range(
-       struct xfs_inode        *ip,
-       xfs_fileoff_t           start_fsb,
-       xfs_fileoff_t           length)
-{
-       xfs_fileoff_t           remaining = length;
-       int                     error = 0;
-
-       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
-       do {
-               int             done;
-               xfs_bmbt_irec_t imap;
-               int             nimaps = 1;
-               xfs_fsblock_t   firstblock;
-               xfs_bmap_free_t flist;
-
-               /*
-                * Map the range first and check that it is a delalloc extent
-                * before trying to unmap the range. Otherwise we will be
-                * trying to remove a real extent (which requires a
-                * transaction) or a hole, which is probably a bad idea...
-                */
-               error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
-                                      XFS_BMAPI_ENTIRE);
-
-               if (error) {
-                       /* something screwed, just bail */
-                       if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
-                               xfs_alert(ip->i_mount,
-                       "Failed delalloc mapping lookup ino %lld fsb %lld.",
-                                               ip->i_ino, start_fsb);
-                       }
-                       break;
-               }
-               if (!nimaps) {
-                       /* nothing there */
-                       goto next_block;
-               }
-               if (imap.br_startblock != DELAYSTARTBLOCK) {
-                       /* been converted, ignore */
-                       goto next_block;
-               }
-               WARN_ON(imap.br_blockcount == 0);
-
-               /*
-                * Note: while we initialise the firstblock/flist pair, they
-                * should never be used because blocks should never be
-                * allocated or freed for a delalloc extent and hence we need
-                * don't cancel or finish them after the xfs_bunmapi() call.
-                */
-               xfs_bmap_init(&flist, &firstblock);
-               error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
-                                       &flist, &done);
-               if (error)
-                       break;
-
-               ASSERT(!flist.xbf_count && !flist.xbf_first);
-next_block:
-               start_fsb++;
-               remaining--;
-       } while(remaining > 0);
-
-       return error;
-}
index 1cf1292d29b70cdbee6168c9a530d3a891547d7f..33b41f35122574e0b1cf7ad7a2a9ae23ecfadddb 100644 (file)
@@ -107,41 +107,6 @@ static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
                (flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK);
 }
 
-/*
- * Argument structure for xfs_bmap_alloc.
- */
-typedef struct xfs_bmalloca {
-       xfs_fsblock_t           *firstblock; /* i/o first block allocated */
-       struct xfs_bmap_free    *flist; /* bmap freelist */
-       struct xfs_trans        *tp;    /* transaction pointer */
-       struct xfs_inode        *ip;    /* incore inode pointer */
-       struct xfs_bmbt_irec    prev;   /* extent before the new one */
-       struct xfs_bmbt_irec    got;    /* extent after, or delayed */
-
-       xfs_fileoff_t           offset; /* offset in file filling in */
-       xfs_extlen_t            length; /* i/o length asked/allocated */
-       xfs_fsblock_t           blkno;  /* starting block of new extent */
-
-       struct xfs_btree_cur    *cur;   /* btree cursor */
-       xfs_extnum_t            idx;    /* current extent index */
-       int                     nallocs;/* number of extents alloc'd */
-       int                     logflags;/* flags for transaction logging */
-
-       xfs_extlen_t            total;  /* total blocks needed for xaction */
-       xfs_extlen_t            minlen; /* minimum allocation size (blocks) */
-       xfs_extlen_t            minleft; /* amount must be left after alloc */
-       char                    eof;    /* set if allocating past last extent */
-       char                    wasdel; /* replacing a delayed allocation */
-       char                    userdata;/* set if is user data */
-       char                    aeof;   /* allocated space at eof */
-       char                    conv;   /* overwriting unwritten extents */
-       char                    stack_switch;
-       int                     flags;
-       struct completion       *done;
-       struct work_struct      work;
-       int                     result;
-} xfs_bmalloca_t;
-
 /*
  * Flags for xfs_bmap_add_extent*.
  */
@@ -162,7 +127,7 @@ typedef struct xfs_bmalloca {
        { BMAP_RIGHT_FILLING,   "RF" }, \
        { BMAP_ATTRFORK,        "ATTR" }
 
-#if defined(__KERNEL) && defined(DEBUG)
+#ifdef DEBUG
 void   xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
                int whichfork, unsigned long caller_ip);
 #define        XFS_BMAP_TRACE_EXLIST(ip,c,w)   \
@@ -205,23 +170,4 @@ int        xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
                xfs_extnum_t num);
 uint   xfs_default_attroffset(struct xfs_inode *ip);
 
-#ifdef __KERNEL__
-/* bmap to userspace formatter - copy to user & advance pointer */
-typedef int (*xfs_bmap_format_t)(void **, struct getbmapx *, int *);
-
-int    xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
-               int *committed);
-int    xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
-               xfs_bmap_format_t formatter, void *arg);
-int    xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
-               int whichfork, int *eof);
-int    xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
-               int whichfork, int *count);
-int    xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
-               xfs_fileoff_t start_fsb, xfs_fileoff_t length);
-
-xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
-
-#endif /* __KERNEL__ */
-
 #endif /* __XFS_BMAP_H__ */
index 0c61a22be6fd630668a16d0f92b3db625fa03173..cf3bc76710c3de6e021b37ccc275894458f8c931 100644 (file)
@@ -17,7 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
@@ -722,7 +722,7 @@ xfs_bmbt_key_diff(
                                      cur->bc_rec.b.br_startoff;
 }
 
-static int
+static bool
 xfs_bmbt_verify(
        struct xfs_buf          *bp)
 {
@@ -775,7 +775,6 @@ xfs_bmbt_verify(
                return false;
 
        return true;
-
 }
 
 static void
@@ -789,7 +788,6 @@ xfs_bmbt_read_verify(
                                     bp->b_target->bt_mount, bp->b_addr);
                xfs_buf_ioerror(bp, EFSCORRUPTED);
        }
-
 }
 
 static void
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
new file mode 100644 (file)
index 0000000..541d59f
--- /dev/null
@@ -0,0 +1,2026 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * Copyright (c) 2012 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_bit.h"
+#include "xfs_log.h"
+#include "xfs_inum.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_extfree_item.h"
+#include "xfs_alloc.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_icache.h"
+
+/* Kernel only BMAP related definitions and functions */
+
+/*
+ * Convert the given file system block to a disk block.  We have to treat it
+ * differently based on whether the file is a real time file or not, because the
+ * bmap code does.
+ */
+xfs_daddr_t
+xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
+{
+       return (XFS_IS_REALTIME_INODE(ip) ? \
+                (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
+                XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
+}
+
+/*
+ * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
+ * caller.  Frees all the extents that need freeing, which must be done
+ * last due to locking considerations.  We never free any extents in
+ * the first transaction.
+ *
+ * Return 1 if the given transaction was committed and a new one
+ * started, and 0 otherwise in the committed parameter.
+ */
+int                                            /* error */
+xfs_bmap_finish(
+       xfs_trans_t             **tp,           /* transaction pointer addr */
+       xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
+       int                     *committed)     /* xact committed or not */
+{
+       xfs_efd_log_item_t      *efd;           /* extent free data */
+       xfs_efi_log_item_t      *efi;           /* extent free intention */
+       int                     error;          /* error return value */
+       xfs_bmap_free_item_t    *free;          /* free extent item */
+       struct xfs_trans_res    tres;           /* new log reservation */
+       xfs_mount_t             *mp;            /* filesystem mount structure */
+       xfs_bmap_free_item_t    *next;          /* next item on free list */
+       xfs_trans_t             *ntp;           /* new transaction pointer */
+
+       ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
+       if (flist->xbf_count == 0) {
+               *committed = 0;
+               return 0;
+       }
+       ntp = *tp;
+       efi = xfs_trans_get_efi(ntp, flist->xbf_count);
+       for (free = flist->xbf_first; free; free = free->xbfi_next)
+               xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
+                       free->xbfi_blockcount);
+
+       tres.tr_logres = ntp->t_log_res;
+       tres.tr_logcount = ntp->t_log_count;
+       tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+       ntp = xfs_trans_dup(*tp);
+       error = xfs_trans_commit(*tp, 0);
+       *tp = ntp;
+       *committed = 1;
+       /*
+        * We have a new transaction, so we should return committed=1,
+        * even though we're returning an error.
+        */
+       if (error)
+               return error;
+
+       /*
+        * transaction commit worked ok so we can drop the extra ticket
+        * reference that we gained in xfs_trans_dup()
+        */
+       xfs_log_ticket_put(ntp->t_ticket);
+
+       error = xfs_trans_reserve(ntp, &tres, 0, 0);
+       if (error)
+               return error;
+       efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
+       for (free = flist->xbf_first; free != NULL; free = next) {
+               next = free->xbfi_next;
+               if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
+                               free->xbfi_blockcount))) {
+                       /*
+                        * The bmap free list will be cleaned up at a
+                        * higher level.  The EFI will be canceled when
+                        * this transaction is aborted.
+                        * Need to force shutdown here to make sure it
+                        * happens, since this transaction may not be
+                        * dirty yet.
+                        */
+                       mp = ntp->t_mountp;
+                       if (!XFS_FORCED_SHUTDOWN(mp))
+                               xfs_force_shutdown(mp,
+                                                  (error == EFSCORRUPTED) ?
+                                                  SHUTDOWN_CORRUPT_INCORE :
+                                                  SHUTDOWN_META_IO_ERROR);
+                       return error;
+               }
+               xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
+                       free->xbfi_blockcount);
+               xfs_bmap_del_free(flist, NULL, free);
+       }
+       return 0;
+}
+
+int
+xfs_bmap_rtalloc(
+       struct xfs_bmalloca     *ap)    /* bmap alloc argument struct */
+{
+       xfs_alloctype_t atype = 0;      /* type for allocation routines */
+       int             error;          /* error return value */
+       xfs_mount_t     *mp;            /* mount point structure */
+       xfs_extlen_t    prod = 0;       /* product factor for allocators */
+       xfs_extlen_t    ralen = 0;      /* realtime allocation length */
+       xfs_extlen_t    align;          /* minimum allocation alignment */
+       xfs_rtblock_t   rtb;
+
+       mp = ap->ip->i_mount;
+       align = xfs_get_extsz_hint(ap->ip);
+       prod = align / mp->m_sb.sb_rextsize;
+       error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
+                                       align, 1, ap->eof, 0,
+                                       ap->conv, &ap->offset, &ap->length);
+       if (error)
+               return error;
+       ASSERT(ap->length);
+       ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
+
+       /*
+        * If the offset & length are not perfectly aligned
+        * then kill prod, it will just get us in trouble.
+        */
+       if (do_mod(ap->offset, align) || ap->length % align)
+               prod = 1;
+       /*
+        * Set ralen to be the actual requested length in rtextents.
+        */
+       ralen = ap->length / mp->m_sb.sb_rextsize;
+       /*
+        * If the old value was close enough to MAXEXTLEN that
+        * we rounded up to it, cut it back so it's valid again.
+        * Note that if it's a really large request (bigger than
+        * MAXEXTLEN), we don't hear about that number, and can't
+        * adjust the starting point to match it.
+        */
+       if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
+               ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
+
+       /*
+        * Lock out other modifications to the RT bitmap inode.
+        */
+       xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+
+       /*
+        * If it's an allocation to an empty file at offset 0,
+        * pick an extent that will space things out in the rt area.
+        */
+       if (ap->eof && ap->offset == 0) {
+               xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
+
+               error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
+               if (error)
+                       return error;
+               ap->blkno = rtx * mp->m_sb.sb_rextsize;
+       } else {
+               ap->blkno = 0;
+       }
+
+       xfs_bmap_adjacent(ap);
+
+       /*
+        * Realtime allocation, done through xfs_rtallocate_extent.
+        */
+       atype = ap->blkno == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
+       do_div(ap->blkno, mp->m_sb.sb_rextsize);
+       rtb = ap->blkno;
+       ap->length = ralen;
+       if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
+                               &ralen, atype, ap->wasdel, prod, &rtb)))
+               return error;
+       if (rtb == NULLFSBLOCK && prod > 1 &&
+           (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
+                                          ap->length, &ralen, atype,
+                                          ap->wasdel, 1, &rtb)))
+               return error;
+       ap->blkno = rtb;
+       if (ap->blkno != NULLFSBLOCK) {
+               ap->blkno *= mp->m_sb.sb_rextsize;
+               ralen *= mp->m_sb.sb_rextsize;
+               ap->length = ralen;
+               ap->ip->i_d.di_nblocks += ralen;
+               xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
+               if (ap->wasdel)
+                       ap->ip->i_delayed_blks -= ralen;
+               /*
+                * Adjust the disk quota also. This was reserved
+                * earlier.
+                */
+               xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
+                       ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
+                                       XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
+       } else {
+               ap->length = 0;
+       }
+       return 0;
+}
+
+/*
+ * Stack switching interfaces for allocation
+ */
+static void
+xfs_bmapi_allocate_worker(
+       struct work_struct      *work)
+{
+       struct xfs_bmalloca     *args = container_of(work,
+                                               struct xfs_bmalloca, work);
+       unsigned long           pflags;
+
+       /* we are in a transaction context here */
+       current_set_flags_nested(&pflags, PF_FSTRANS);
+
+       args->result = __xfs_bmapi_allocate(args);
+       complete(args->done);
+
+       current_restore_flags_nested(&pflags, PF_FSTRANS);
+}
+
+/*
+ * Some allocation requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. Otherwise just
+ * call directly to avoid the context switch overhead here.
+ */
+int
+xfs_bmapi_allocate(
+       struct xfs_bmalloca     *args)
+{
+       DECLARE_COMPLETION_ONSTACK(done);
+
+       if (!args->stack_switch)
+               return __xfs_bmapi_allocate(args);
+
+
+       args->done = &done;
+       INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
+       queue_work(xfs_alloc_wq, &args->work);
+       wait_for_completion(&done);
+       return args->result;
+}
+
+/*
+ * Check if the endoff is outside the last extent. If so the caller will grow
+ * the allocation to a stripe unit boundary.  All offsets are considered outside
+ * the end of file for an empty fork, so 1 is returned in *eof in that case.
+ */
+int
+xfs_bmap_eof(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           endoff,
+       int                     whichfork,
+       int                     *eof)
+{
+       struct xfs_bmbt_irec    rec;
+       int                     error;
+
+       error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
+       if (error || *eof)
+               return error;
+
+       *eof = endoff >= rec.br_startoff + rec.br_blockcount;
+       return 0;
+}
+
+/*
+ * Extent tree block counting routines.
+ */
+
+/*
+ * Count leaf blocks given a range of extent records.
+ */
+STATIC void
+xfs_bmap_count_leaves(
+       xfs_ifork_t             *ifp,
+       xfs_extnum_t            idx,
+       int                     numrecs,
+       int                     *count)
+{
+       int             b;
+
+       for (b = 0; b < numrecs; b++) {
+               xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
+               *count += xfs_bmbt_get_blockcount(frp);
+       }
+}
+
+/*
+ * Count leaf blocks given a range of extent records originally
+ * in btree format.
+ */
+STATIC void
+xfs_bmap_disk_count_leaves(
+       struct xfs_mount        *mp,
+       struct xfs_btree_block  *block,
+       int                     numrecs,
+       int                     *count)
+{
+       int             b;
+       xfs_bmbt_rec_t  *frp;
+
+       for (b = 1; b <= numrecs; b++) {
+               frp = XFS_BMBT_REC_ADDR(mp, block, b);
+               *count += xfs_bmbt_disk_get_blockcount(frp);
+       }
+}
+
+/*
+ * Recursively walks each level of a btree
+ * to count total fsblocks in use.
+ */
+STATIC int                                     /* error */
+xfs_bmap_count_tree(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_fsblock_t   blockno,        /* file system block number */
+       int             levelin,        /* level in btree */
+       int             *count)         /* Count of blocks */
+{
+       int                     error;
+       xfs_buf_t               *bp, *nbp;
+       int                     level = levelin;
+       __be64                  *pp;
+       xfs_fsblock_t           bno = blockno;
+       xfs_fsblock_t           nextbno;
+       struct xfs_btree_block  *block, *nextblock;
+       int                     numrecs;
+
+       error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
+                                               &xfs_bmbt_buf_ops);
+       if (error)
+               return error;
+       *count += 1;
+       block = XFS_BUF_TO_BLOCK(bp);
+
+       if (--level) {
+               /* Not at node above leaves, count this level of nodes */
+               nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
+               while (nextbno != NULLFSBLOCK) {
+                       error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
+                                               XFS_BMAP_BTREE_REF,
+                                               &xfs_bmbt_buf_ops);
+                       if (error)
+                               return error;
+                       *count += 1;
+                       nextblock = XFS_BUF_TO_BLOCK(nbp);
+                       nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
+                       xfs_trans_brelse(tp, nbp);
+               }
+
+               /* Dive to the next level */
+               pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
+               bno = be64_to_cpu(*pp);
+               if (unlikely((error =
+                    xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
+                       xfs_trans_brelse(tp, bp);
+                       XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
+                                        XFS_ERRLEVEL_LOW, mp);
+                       return XFS_ERROR(EFSCORRUPTED);
+               }
+               xfs_trans_brelse(tp, bp);
+       } else {
+               /* count all level 1 nodes and their leaves */
+               for (;;) {
+                       nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
+                       numrecs = be16_to_cpu(block->bb_numrecs);
+                       xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
+                       xfs_trans_brelse(tp, bp);
+                       if (nextbno == NULLFSBLOCK)
+                               break;
+                       bno = nextbno;
+                       error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
+                                               XFS_BMAP_BTREE_REF,
+                                               &xfs_bmbt_buf_ops);
+                       if (error)
+                               return error;
+                       *count += 1;
+                       block = XFS_BUF_TO_BLOCK(bp);
+               }
+       }
+       return 0;
+}
+
+/*
+ * Count fsblocks of the given fork.
+ */
+int                                            /* error */
+xfs_bmap_count_blocks(
+       xfs_trans_t             *tp,            /* transaction pointer */
+       xfs_inode_t             *ip,            /* incore inode */
+       int                     whichfork,      /* data or attr fork */
+       int                     *count)         /* out: count of blocks */
+{
+       struct xfs_btree_block  *block; /* current btree block */
+       xfs_fsblock_t           bno;    /* block # of "block" */
+       xfs_ifork_t             *ifp;   /* fork structure */
+       int                     level;  /* btree level, for checking */
+       xfs_mount_t             *mp;    /* file system mount structure */
+       __be64                  *pp;    /* pointer to block address */
+
+       bno = NULLFSBLOCK;
+       mp = ip->i_mount;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
+               xfs_bmap_count_leaves(ifp, 0,
+                       ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
+                       count);
+               return 0;
+       }
+
+       /*
+        * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+        */
+       block = ifp->if_broot;
+       level = be16_to_cpu(block->bb_level);
+       ASSERT(level > 0);
+       pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
+       bno = be64_to_cpu(*pp);
+       ASSERT(bno != NULLDFSBNO);
+       ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
+       ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
+
+       if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
+               XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
+                                mp);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       return 0;
+}
+
+/*
+ * returns 1 for success, 0 if we failed to map the extent.
+ */
+STATIC int
+xfs_getbmapx_fix_eof_hole(
+       xfs_inode_t             *ip,            /* xfs incore inode pointer */
+       struct getbmapx         *out,           /* output structure */
+       int                     prealloced,     /* this is a file with
+                                                * preallocated data space */
+       __int64_t               end,            /* last block requested */
+       xfs_fsblock_t           startblock)
+{
+       __int64_t               fixlen;
+       xfs_mount_t             *mp;            /* file system mount point */
+       xfs_ifork_t             *ifp;           /* inode fork pointer */
+       xfs_extnum_t            lastx;          /* last extent pointer */
+       xfs_fileoff_t           fileblock;
+
+       if (startblock == HOLESTARTBLOCK) {
+               mp = ip->i_mount;
+               out->bmv_block = -1;
+               fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
+               fixlen -= out->bmv_offset;
+               if (prealloced && out->bmv_offset + out->bmv_length == end) {
+                       /* Came to hole at EOF. Trim it. */
+                       if (fixlen <= 0)
+                               return 0;
+                       out->bmv_length = fixlen;
+               }
+       } else {
+               if (startblock == DELAYSTARTBLOCK)
+                       out->bmv_block = -2;
+               else
+                       out->bmv_block = xfs_fsb_to_db(ip, startblock);
+               fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
+               ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+               if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
+                  (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
+                       out->bmv_oflags |= BMV_OF_LAST;
+       }
+
+       return 1;
+}
+
+/*
+ * Get inode's extents as described in bmv, and format for output.
+ * Calls formatter to fill the user's buffer until all extents
+ * are mapped, until the passed-in bmv->bmv_count slots have
+ * been filled, or until the formatter short-circuits the loop,
+ * if it is tracking filled-in extents on its own.
+ */
+int                                            /* error code */
+xfs_getbmap(
+       xfs_inode_t             *ip,
+       struct getbmapx         *bmv,           /* user bmap structure */
+       xfs_bmap_format_t       formatter,      /* format to user */
+       void                    *arg)           /* formatter arg */
+{
+       __int64_t               bmvend;         /* last block requested */
+       int                     error = 0;      /* return value */
+       __int64_t               fixlen;         /* length for -1 case */
+       int                     i;              /* extent number */
+       int                     lock;           /* lock state */
+       xfs_bmbt_irec_t         *map;           /* buffer for user's data */
+       xfs_mount_t             *mp;            /* file system mount point */
+       int                     nex;            /* # of user extents can do */
+       int                     nexleft;        /* # of user extents left */
+       int                     subnex;         /* # of bmapi's can do */
+       int                     nmap;           /* number of map entries */
+       struct getbmapx         *out;           /* output structure */
+       int                     whichfork;      /* data or attr fork */
+       int                     prealloced;     /* this is a file with
+                                                * preallocated data space */
+       int                     iflags;         /* interface flags */
+       int                     bmapi_flags;    /* flags for xfs_bmapi */
+       int                     cur_ext = 0;
+
+       mp = ip->i_mount;
+       iflags = bmv->bmv_iflags;
+       whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
+
+       if (whichfork == XFS_ATTR_FORK) {
+               if (XFS_IFORK_Q(ip)) {
+                       if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
+                           ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
+                           ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
+                               return XFS_ERROR(EINVAL);
+               } else if (unlikely(
+                          ip->i_d.di_aformat != 0 &&
+                          ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
+                       XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
+                                        ip->i_mount);
+                       return XFS_ERROR(EFSCORRUPTED);
+               }
+
+               prealloced = 0;
+               fixlen = 1LL << 32;
+       } else {
+               if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
+                   ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
+                   ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+                       return XFS_ERROR(EINVAL);
+
+               if (xfs_get_extsz_hint(ip) ||
+                   ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
+                       prealloced = 1;
+                       fixlen = mp->m_super->s_maxbytes;
+               } else {
+                       prealloced = 0;
+                       fixlen = XFS_ISIZE(ip);
+               }
+       }
+
+       if (bmv->bmv_length == -1) {
+               fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
+               bmv->bmv_length =
+                       max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
+       } else if (bmv->bmv_length == 0) {
+               bmv->bmv_entries = 0;
+               return 0;
+       } else if (bmv->bmv_length < 0) {
+               return XFS_ERROR(EINVAL);
+       }
+
+       nex = bmv->bmv_count - 1;
+       if (nex <= 0)
+               return XFS_ERROR(EINVAL);
+       bmvend = bmv->bmv_offset + bmv->bmv_length;
+
+
+       if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
+               return XFS_ERROR(ENOMEM);
+       out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
+       if (!out) {
+               out = kmem_zalloc_large(bmv->bmv_count *
+                                       sizeof(struct getbmapx));
+               if (!out)
+                       return XFS_ERROR(ENOMEM);
+       }
+
+       xfs_ilock(ip, XFS_IOLOCK_SHARED);
+       if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
+               if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
+                       error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
+                       if (error)
+                               goto out_unlock_iolock;
+               }
+               /*
+                * even after flushing the inode, there can still be delalloc
+                * blocks on the inode beyond EOF due to speculative
+                * preallocation. These are not removed until the release
+                * function is called or the inode is inactivated. Hence we
+                * cannot assert here that ip->i_delayed_blks == 0.
+                */
+       }
+
+       lock = xfs_ilock_map_shared(ip);
+
+       /*
+        * Don't let nex be bigger than the number of extents
+        * we can have assuming alternating holes and real extents.
+        */
+       if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
+               nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
+
+       bmapi_flags = xfs_bmapi_aflag(whichfork);
+       if (!(iflags & BMV_IF_PREALLOC))
+               bmapi_flags |= XFS_BMAPI_IGSTATE;
+
+       /*
+        * Allocate enough space to handle "subnex" maps at a time.
+        */
+       error = ENOMEM;
+       subnex = 16;
+       map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
+       if (!map)
+               goto out_unlock_ilock;
+
+       bmv->bmv_entries = 0;
+
+       if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
+           (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
+               error = 0;
+               goto out_free_map;
+       }
+
+       nexleft = nex;
+
+       do {
+               nmap = (nexleft > subnex) ? subnex : nexleft;
+               error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
+                                      XFS_BB_TO_FSB(mp, bmv->bmv_length),
+                                      map, &nmap, bmapi_flags);
+               if (error)
+                       goto out_free_map;
+               ASSERT(nmap <= subnex);
+
+               for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
+                       out[cur_ext].bmv_oflags = 0;
+                       if (map[i].br_state == XFS_EXT_UNWRITTEN)
+                               out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
+                       else if (map[i].br_startblock == DELAYSTARTBLOCK)
+                               out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
+                       out[cur_ext].bmv_offset =
+                               XFS_FSB_TO_BB(mp, map[i].br_startoff);
+                       out[cur_ext].bmv_length =
+                               XFS_FSB_TO_BB(mp, map[i].br_blockcount);
+                       out[cur_ext].bmv_unused1 = 0;
+                       out[cur_ext].bmv_unused2 = 0;
+
+                       /*
+                        * delayed allocation extents that start beyond EOF can
+                        * occur due to speculative EOF allocation when the
+                        * delalloc extent is larger than the largest freespace
+                        * extent at conversion time. These extents cannot be
+                        * converted by data writeback, so can exist here even
+                        * if we are not supposed to be finding delalloc
+                        * extents.
+                        */
+                       if (map[i].br_startblock == DELAYSTARTBLOCK &&
+                           map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+                               ASSERT((iflags & BMV_IF_DELALLOC) != 0);
+
+                        if (map[i].br_startblock == HOLESTARTBLOCK &&
+                           whichfork == XFS_ATTR_FORK) {
+                               /* came to the end of attribute fork */
+                               out[cur_ext].bmv_oflags |= BMV_OF_LAST;
+                               goto out_free_map;
+                       }
+
+                       if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
+                                       prealloced, bmvend,
+                                       map[i].br_startblock))
+                               goto out_free_map;
+
+                       bmv->bmv_offset =
+                               out[cur_ext].bmv_offset +
+                               out[cur_ext].bmv_length;
+                       bmv->bmv_length =
+                               max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
+
+                       /*
+                        * In case we don't want to return the hole,
+                        * don't increase cur_ext so that we can reuse
+                        * it in the next loop.
+                        */
+                       if ((iflags & BMV_IF_NO_HOLES) &&
+                           map[i].br_startblock == HOLESTARTBLOCK) {
+                               memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
+                               continue;
+                       }
+
+                       nexleft--;
+                       bmv->bmv_entries++;
+                       cur_ext++;
+               }
+       } while (nmap && nexleft && bmv->bmv_length);
+
+ out_free_map:
+       kmem_free(map);
+ out_unlock_ilock:
+       xfs_iunlock_map_shared(ip, lock);
+ out_unlock_iolock:
+       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+
+       for (i = 0; i < cur_ext; i++) {
+               int full = 0;   /* user array is full */
+
+               /* format results & advance arg */
+               error = formatter(&arg, &out[i], &full);
+               if (error || full)
+                       break;
+       }
+
+       if (is_vmalloc_addr(out))
+               kmem_free_large(out);
+       else
+               kmem_free(out);
+       return error;
+}
+
+/*
+ * dead simple method of punching delalyed allocation blocks from a range in
+ * the inode. Walks a block at a time so will be slow, but is only executed in
+ * rare error cases so the overhead is not critical. This will always punch out
+ * both the start and end blocks, even if the ranges only partially overlap
+ * them, so it is up to the caller to ensure that partial blocks are not
+ * passed in.
+ */
+int
+xfs_bmap_punch_delalloc_range(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           start_fsb,
+       xfs_fileoff_t           length)
+{
+       xfs_fileoff_t           remaining = length;
+       int                     error = 0;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+       do {
+               int             done;
+               xfs_bmbt_irec_t imap;
+               int             nimaps = 1;
+               xfs_fsblock_t   firstblock;
+               xfs_bmap_free_t flist;
+
+               /*
+                * Map the range first and check that it is a delalloc extent
+                * before trying to unmap the range. Otherwise we will be
+                * trying to remove a real extent (which requires a
+                * transaction) or a hole, which is probably a bad idea...
+                */
+               error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
+                                      XFS_BMAPI_ENTIRE);
+
+               if (error) {
+                       /* something screwed, just bail */
+                       if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+                               xfs_alert(ip->i_mount,
+                       "Failed delalloc mapping lookup ino %lld fsb %lld.",
+                                               ip->i_ino, start_fsb);
+                       }
+                       break;
+               }
+               if (!nimaps) {
+                       /* nothing there */
+                       goto next_block;
+               }
+               if (imap.br_startblock != DELAYSTARTBLOCK) {
+                       /* been converted, ignore */
+                       goto next_block;
+               }
+               WARN_ON(imap.br_blockcount == 0);
+
+               /*
+                * Note: while we initialise the firstblock/flist pair, they
+                * should never be used because blocks should never be
+                * allocated or freed for a delalloc extent and hence we need
+                * don't cancel or finish them after the xfs_bunmapi() call.
+                */
+               xfs_bmap_init(&flist, &firstblock);
+               error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
+                                       &flist, &done);
+               if (error)
+                       break;
+
+               ASSERT(!flist.xbf_count && !flist.xbf_first);
+next_block:
+               start_fsb++;
+               remaining--;
+       } while(remaining > 0);
+
+       return error;
+}
+
+/*
+ * Test whether it is appropriate to check an inode for and free post EOF
+ * blocks. The 'force' parameter determines whether we should also consider
+ * regular files that are marked preallocated or append-only.
+ */
+bool
+xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
+{
+       /* prealloc/delalloc exists only on regular files */
+       if (!S_ISREG(ip->i_d.di_mode))
+               return false;
+
+       /*
+        * Zero sized files with no cached pages and delalloc blocks will not
+        * have speculative prealloc/delalloc blocks to remove.
+        */
+       if (VFS_I(ip)->i_size == 0 &&
+           VN_CACHED(VFS_I(ip)) == 0 &&
+           ip->i_delayed_blks == 0)
+               return false;
+
+       /* If we haven't read in the extent list, then don't do it now. */
+       if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
+               return false;
+
+       /*
+        * Do not free real preallocated or append-only files unless the file
+        * has delalloc blocks and we are forced to remove them.
+        */
+       if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
+               if (!force || ip->i_delayed_blks == 0)
+                       return false;
+
+       return true;
+}
+
+/*
+ * This is called by xfs_inactive to free any blocks beyond eof
+ * when the link count isn't zero and by xfs_dm_punch_hole() when
+ * punching a hole to EOF.
+ */
+int
+xfs_free_eofblocks(
+       xfs_mount_t     *mp,
+       xfs_inode_t     *ip,
+       bool            need_iolock)
+{
+       xfs_trans_t     *tp;
+       int             error;
+       xfs_fileoff_t   end_fsb;
+       xfs_fileoff_t   last_fsb;
+       xfs_filblks_t   map_len;
+       int             nimaps;
+       xfs_bmbt_irec_t imap;
+
+       /*
+        * Figure out if there are any blocks beyond the end
+        * of the file.  If not, then there is nothing to do.
+        */
+       end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
+       last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
+       if (last_fsb <= end_fsb)
+               return 0;
+       map_len = last_fsb - end_fsb;
+
+       nimaps = 1;
+       xfs_ilock(ip, XFS_ILOCK_SHARED);
+       error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
+       xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+       if (!error && (nimaps != 0) &&
+           (imap.br_startblock != HOLESTARTBLOCK ||
+            ip->i_delayed_blks)) {
+               /*
+                * Attach the dquots to the inode up front.
+                */
+               error = xfs_qm_dqattach(ip, 0);
+               if (error)
+                       return error;
+
+               /*
+                * There are blocks after the end of file.
+                * Free them up now by truncating the file to
+                * its current size.
+                */
+               tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+
+               if (need_iolock) {
+                       if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+                               xfs_trans_cancel(tp, 0);
+                               return EAGAIN;
+                       }
+               }
+
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+               if (error) {
+                       ASSERT(XFS_FORCED_SHUTDOWN(mp));
+                       xfs_trans_cancel(tp, 0);
+                       if (need_iolock)
+                               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+                       return error;
+               }
+
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+               xfs_trans_ijoin(tp, ip, 0);
+
+               /*
+                * Do not update the on-disk file size.  If we update the
+                * on-disk file size and then the system crashes before the
+                * contents of the file are flushed to disk then the files
+                * may be full of holes (ie NULL files bug).
+                */
+               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
+                                             XFS_ISIZE(ip));
+               if (error) {
+                       /*
+                        * If we get an error at this point we simply don't
+                        * bother truncating the file.
+                        */
+                       xfs_trans_cancel(tp,
+                                        (XFS_TRANS_RELEASE_LOG_RES |
+                                         XFS_TRANS_ABORT));
+               } else {
+                       error = xfs_trans_commit(tp,
+                                               XFS_TRANS_RELEASE_LOG_RES);
+                       if (!error)
+                               xfs_inode_clear_eofblocks_tag(ip);
+               }
+
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+               if (need_iolock)
+                       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       }
+       return error;
+}
+
+/*
+ * xfs_alloc_file_space()
+ *      This routine allocates disk space for the given file.
+ *
+ *     If alloc_type == 0, this request is for an ALLOCSP type
+ *     request which will change the file size.  In this case, no
+ *     DMAPI event will be generated by the call.  A TRUNCATE event
+ *     will be generated later by xfs_setattr.
+ *
+ *     If alloc_type != 0, this request is for a RESVSP type
+ *     request, and a DMAPI DM_EVENT_WRITE will be generated if the
+ *     lower block boundary byte address is less than the file's
+ *     length.
+ *
+ * RETURNS:
+ *       0 on success
+ *      errno on error
+ *
+ */
+STATIC int
+xfs_alloc_file_space(
+       xfs_inode_t             *ip,
+       xfs_off_t               offset,
+       xfs_off_t               len,
+       int                     alloc_type,
+       int                     attr_flags)
+{
+       xfs_mount_t             *mp = ip->i_mount;
+       xfs_off_t               count;
+       xfs_filblks_t           allocated_fsb;
+       xfs_filblks_t           allocatesize_fsb;
+       xfs_extlen_t            extsz, temp;
+       xfs_fileoff_t           startoffset_fsb;
+       xfs_fsblock_t           firstfsb;
+       int                     nimaps;
+       int                     quota_flag;
+       int                     rt;
+       xfs_trans_t             *tp;
+       xfs_bmbt_irec_t         imaps[1], *imapp;
+       xfs_bmap_free_t         free_list;
+       uint                    qblocks, resblks, resrtextents;
+       int                     committed;
+       int                     error;
+
+       trace_xfs_alloc_file_space(ip);
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return XFS_ERROR(EIO);
+
+       error = xfs_qm_dqattach(ip, 0);
+       if (error)
+               return error;
+
+       if (len <= 0)
+               return XFS_ERROR(EINVAL);
+
+       rt = XFS_IS_REALTIME_INODE(ip);
+       extsz = xfs_get_extsz_hint(ip);
+
+       count = len;
+       imapp = &imaps[0];
+       nimaps = 1;
+       startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
+       allocatesize_fsb = XFS_B_TO_FSB(mp, count);
+
+       /*
+        * Allocate file space until done or until there is an error
+        */
+       while (allocatesize_fsb && !error) {
+               xfs_fileoff_t   s, e;
+
+               /*
+                * Determine space reservations for data/realtime.
+                */
+               if (unlikely(extsz)) {
+                       s = startoffset_fsb;
+                       do_div(s, extsz);
+                       s *= extsz;
+                       e = startoffset_fsb + allocatesize_fsb;
+                       if ((temp = do_mod(startoffset_fsb, extsz)))
+                               e += temp;
+                       if ((temp = do_mod(e, extsz)))
+                               e += extsz - temp;
+               } else {
+                       s = 0;
+                       e = allocatesize_fsb;
+               }
+
+               /*
+                * The transaction reservation is limited to a 32-bit block
+                * count, hence we need to limit the number of blocks we are
+                * trying to reserve to avoid an overflow. We can't allocate
+                * more than @nimaps extents, and an extent is limited on disk
+                * to MAXEXTLEN (21 bits), so use that to enforce the limit.
+                */
+               resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
+               if (unlikely(rt)) {
+                       resrtextents = qblocks = resblks;
+                       resrtextents /= mp->m_sb.sb_rextsize;
+                       resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+                       quota_flag = XFS_QMOPT_RES_RTBLKS;
+               } else {
+                       resrtextents = 0;
+                       resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
+                       quota_flag = XFS_QMOPT_RES_REGBLKS;
+               }
+
+               /*
+                * Allocate and setup the transaction.
+                */
+               tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
+                                         resblks, resrtextents);
+               /*
+                * Check for running out of space
+                */
+               if (error) {
+                       /*
+                        * Free the transaction structure.
+                        */
+                       ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
+                       xfs_trans_cancel(tp, 0);
+                       break;
+               }
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+               error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
+                                                     0, quota_flag);
+               if (error)
+                       goto error1;
+
+               xfs_trans_ijoin(tp, ip, 0);
+
+               xfs_bmap_init(&free_list, &firstfsb);
+               error = xfs_bmapi_write(tp, ip, startoffset_fsb,
+                                       allocatesize_fsb, alloc_type, &firstfsb,
+                                       0, imapp, &nimaps, &free_list);
+               if (error) {
+                       goto error0;
+               }
+
+               /*
+                * Complete the transaction
+                */
+               error = xfs_bmap_finish(&tp, &free_list, &committed);
+               if (error) {
+                       goto error0;
+               }
+
+               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+               if (error) {
+                       break;
+               }
+
+               allocated_fsb = imapp->br_blockcount;
+
+               if (nimaps == 0) {
+                       error = XFS_ERROR(ENOSPC);
+                       break;
+               }
+
+               startoffset_fsb += allocated_fsb;
+               allocatesize_fsb -= allocated_fsb;
+       }
+
+       return error;
+
+error0:        /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
+       xfs_bmap_cancel(&free_list);
+       xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
+
+error1:        /* Just cancel transaction */
+       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return error;
+}
+
+/*
+ * Zero file bytes between startoff and endoff inclusive.
+ * The iolock is held exclusive and no blocks are buffered.
+ *
+ * This function is used by xfs_free_file_space() to zero
+ * partial blocks when the range to free is not block aligned.
+ * When unreserving space with boundaries that are not block
+ * aligned we round up the start and round down the end
+ * boundaries and then use this function to zero the parts of
+ * the blocks that got dropped during the rounding.
+ */
+STATIC int
+xfs_zero_remaining_bytes(
+       xfs_inode_t             *ip,
+       xfs_off_t               startoff,
+       xfs_off_t               endoff)
+{
+       xfs_bmbt_irec_t         imap;
+       xfs_fileoff_t           offset_fsb;
+       xfs_off_t               lastoffset;
+       xfs_off_t               offset;
+       xfs_buf_t               *bp;
+       xfs_mount_t             *mp = ip->i_mount;
+       int                     nimap;
+       int                     error = 0;
+
+       /*
+        * Avoid doing I/O beyond eof - it's not necessary
+        * since nothing can read beyond eof.  The space will
+        * be zeroed when the file is extended anyway.
+        */
+       if (startoff >= XFS_ISIZE(ip))
+               return 0;
+
+       if (endoff > XFS_ISIZE(ip))
+               endoff = XFS_ISIZE(ip);
+
+       bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
+                                       mp->m_rtdev_targp : mp->m_ddev_targp,
+                                 BTOBB(mp->m_sb.sb_blocksize), 0);
+       if (!bp)
+               return XFS_ERROR(ENOMEM);
+
+       xfs_buf_unlock(bp);
+
+       for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
+               offset_fsb = XFS_B_TO_FSBT(mp, offset);
+               nimap = 1;
+               error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
+               if (error || nimap < 1)
+                       break;
+               ASSERT(imap.br_blockcount >= 1);
+               ASSERT(imap.br_startoff == offset_fsb);
+               lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
+               if (lastoffset > endoff)
+                       lastoffset = endoff;
+               if (imap.br_startblock == HOLESTARTBLOCK)
+                       continue;
+               ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+               if (imap.br_state == XFS_EXT_UNWRITTEN)
+                       continue;
+               XFS_BUF_UNDONE(bp);
+               XFS_BUF_UNWRITE(bp);
+               XFS_BUF_READ(bp);
+               XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
+               xfsbdstrat(mp, bp);
+               error = xfs_buf_iowait(bp);
+               if (error) {
+                       xfs_buf_ioerror_alert(bp,
+                                       "xfs_zero_remaining_bytes(read)");
+                       break;
+               }
+               memset(bp->b_addr +
+                       (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
+                     0, lastoffset - offset + 1);
+               XFS_BUF_UNDONE(bp);
+               XFS_BUF_UNREAD(bp);
+               XFS_BUF_WRITE(bp);
+               xfsbdstrat(mp, bp);
+               error = xfs_buf_iowait(bp);
+               if (error) {
+                       xfs_buf_ioerror_alert(bp,
+                                       "xfs_zero_remaining_bytes(write)");
+                       break;
+               }
+       }
+       xfs_buf_free(bp);
+       return error;
+}
+
+/*
+ * xfs_free_file_space()
+ *      This routine frees disk space for the given file.
+ *
+ *     This routine is only called by xfs_change_file_space
+ *     for an UNRESVSP type call.
+ *
+ * RETURNS:
+ *       0 on success
+ *      errno on error
+ *
+ */
+STATIC int
+xfs_free_file_space(
+       xfs_inode_t             *ip,
+       xfs_off_t               offset,
+       xfs_off_t               len,
+       int                     attr_flags)
+{
+       int                     committed;
+       int                     done;
+       xfs_fileoff_t           endoffset_fsb;
+       int                     error;
+       xfs_fsblock_t           firstfsb;
+       xfs_bmap_free_t         free_list;
+       xfs_bmbt_irec_t         imap;
+       xfs_off_t               ioffset;
+       xfs_extlen_t            mod=0;
+       xfs_mount_t             *mp;
+       int                     nimap;
+       uint                    resblks;
+       xfs_off_t               rounding;
+       int                     rt;
+       xfs_fileoff_t           startoffset_fsb;
+       xfs_trans_t             *tp;
+       int                     need_iolock = 1;
+
+       mp = ip->i_mount;
+
+       trace_xfs_free_file_space(ip);
+
+       error = xfs_qm_dqattach(ip, 0);
+       if (error)
+               return error;
+
+       error = 0;
+       if (len <= 0)   /* if nothing being freed */
+               return error;
+       rt = XFS_IS_REALTIME_INODE(ip);
+       startoffset_fsb = XFS_B_TO_FSB(mp, offset);
+       endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
+
+       if (attr_flags & XFS_ATTR_NOLOCK)
+               need_iolock = 0;
+       if (need_iolock) {
+               xfs_ilock(ip, XFS_IOLOCK_EXCL);
+               /* wait for the completion of any pending DIOs */
+               inode_dio_wait(VFS_I(ip));
+       }
+
+       rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+       ioffset = offset & ~(rounding - 1);
+       error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+                                             ioffset, -1);
+       if (error)
+               goto out_unlock_iolock;
+       truncate_pagecache_range(VFS_I(ip), ioffset, -1);
+
+       /*
+        * Need to zero the stuff we're not freeing, on disk.
+        * If it's a realtime file & can't use unwritten extents then we
+        * actually need to zero the extent edges.  Otherwise xfs_bunmapi
+        * will take care of it for us.
+        */
+       if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+               nimap = 1;
+               error = xfs_bmapi_read(ip, startoffset_fsb, 1,
+                                       &imap, &nimap, 0);
+               if (error)
+                       goto out_unlock_iolock;
+               ASSERT(nimap == 0 || nimap == 1);
+               if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
+                       xfs_daddr_t     block;
+
+                       ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+                       block = imap.br_startblock;
+                       mod = do_div(block, mp->m_sb.sb_rextsize);
+                       if (mod)
+                               startoffset_fsb += mp->m_sb.sb_rextsize - mod;
+               }
+               nimap = 1;
+               error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
+                                       &imap, &nimap, 0);
+               if (error)
+                       goto out_unlock_iolock;
+               ASSERT(nimap == 0 || nimap == 1);
+               if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
+                       ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+                       mod++;
+                       if (mod && (mod != mp->m_sb.sb_rextsize))
+                               endoffset_fsb -= mod;
+               }
+       }
+       if ((done = (endoffset_fsb <= startoffset_fsb)))
+               /*
+                * One contiguous piece to clear
+                */
+               error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
+       else {
+               /*
+                * Some full blocks, possibly two pieces to clear
+                */
+               if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
+                       error = xfs_zero_remaining_bytes(ip, offset,
+                               XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
+               if (!error &&
+                   XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
+                       error = xfs_zero_remaining_bytes(ip,
+                               XFS_FSB_TO_B(mp, endoffset_fsb),
+                               offset + len - 1);
+       }
+
+       /*
+        * free file space until done or until there is an error
+        */
+       resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+       while (!error && !done) {
+
+               /*
+                * allocate and setup the transaction. Allow this
+                * transaction to dip into the reserve blocks to ensure
+                * the freeing of the space succeeds at ENOSPC.
+                */
+               tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+               tp->t_flags |= XFS_TRANS_RESERVE;
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
+
+               /*
+                * check for running out of space
+                */
+               if (error) {
+                       /*
+                        * Free the transaction structure.
+                        */
+                       ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
+                       xfs_trans_cancel(tp, 0);
+                       break;
+               }
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+               error = xfs_trans_reserve_quota(tp, mp,
+                               ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
+                               resblks, 0, XFS_QMOPT_RES_REGBLKS);
+               if (error)
+                       goto error1;
+
+               xfs_trans_ijoin(tp, ip, 0);
+
+               /*
+                * issue the bunmapi() call to free the blocks
+                */
+               xfs_bmap_init(&free_list, &firstfsb);
+               error = xfs_bunmapi(tp, ip, startoffset_fsb,
+                                 endoffset_fsb - startoffset_fsb,
+                                 0, 2, &firstfsb, &free_list, &done);
+               if (error) {
+                       goto error0;
+               }
+
+               /*
+                * complete the transaction
+                */
+               error = xfs_bmap_finish(&tp, &free_list, &committed);
+               if (error) {
+                       goto error0;
+               }
+
+               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       }
+
+ out_unlock_iolock:
+       if (need_iolock)
+               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       return error;
+
+ error0:
+       xfs_bmap_cancel(&free_list);
+ error1:
+       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+       xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
+                   XFS_ILOCK_EXCL);
+       return error;
+}
+
+
+STATIC int
+xfs_zero_file_space(
+       struct xfs_inode        *ip,
+       xfs_off_t               offset,
+       xfs_off_t               len,
+       int                     attr_flags)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       uint                    granularity;
+       xfs_off_t               start_boundary;
+       xfs_off_t               end_boundary;
+       int                     error;
+
+       granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
+
+       /*
+        * Round the range of extents we are going to convert inwards.  If the
+        * offset is aligned, then it doesn't get changed so we zero from the
+        * start of the block offset points to.
+        */
+       start_boundary = round_up(offset, granularity);
+       end_boundary = round_down(offset + len, granularity);
+
+       ASSERT(start_boundary >= offset);
+       ASSERT(end_boundary <= offset + len);
+
+       if (!(attr_flags & XFS_ATTR_NOLOCK))
+               xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       if (start_boundary < end_boundary - 1) {
+               /* punch out the page cache over the conversion range */
+               truncate_pagecache_range(VFS_I(ip), start_boundary,
+                                        end_boundary - 1);
+               /* convert the blocks */
+               error = xfs_alloc_file_space(ip, start_boundary,
+                                       end_boundary - start_boundary - 1,
+                                       XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT,
+                                       attr_flags);
+               if (error)
+                       goto out_unlock;
+
+               /* We've handled the interior of the range, now for the edges */
+               if (start_boundary != offset)
+                       error = xfs_iozero(ip, offset, start_boundary - offset);
+               if (error)
+                       goto out_unlock;
+
+               if (end_boundary != offset + len)
+                       error = xfs_iozero(ip, end_boundary,
+                                          offset + len - end_boundary);
+
+       } else {
+               /*
+                * It's either a sub-granularity range or the range spanned lies
+                * partially across two adjacent blocks.
+                */
+               error = xfs_iozero(ip, offset, len);
+       }
+
+out_unlock:
+       if (!(attr_flags & XFS_ATTR_NOLOCK))
+               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       return error;
+
+}
+
+/*
+ * xfs_change_file_space()
+ *      This routine allocates or frees disk space for the given file.
+ *      The user specified parameters are checked for alignment and size
+ *      limitations.
+ *
+ * RETURNS:
+ *       0 on success
+ *      errno on error
+ *
+ */
+int
+xfs_change_file_space(
+       xfs_inode_t     *ip,
+       int             cmd,
+       xfs_flock64_t   *bf,
+       xfs_off_t       offset,
+       int             attr_flags)
+{
+       xfs_mount_t     *mp = ip->i_mount;
+       int             clrprealloc;
+       int             error;
+       xfs_fsize_t     fsize;
+       int             setprealloc;
+       xfs_off_t       startoffset;
+       xfs_trans_t     *tp;
+       struct iattr    iattr;
+
+       if (!S_ISREG(ip->i_d.di_mode))
+               return XFS_ERROR(EINVAL);
+
+       switch (bf->l_whence) {
+       case 0: /*SEEK_SET*/
+               break;
+       case 1: /*SEEK_CUR*/
+               bf->l_start += offset;
+               break;
+       case 2: /*SEEK_END*/
+               bf->l_start += XFS_ISIZE(ip);
+               break;
+       default:
+               return XFS_ERROR(EINVAL);
+       }
+
+       /*
+        * length of <= 0 for resv/unresv/zero is invalid.  length for
+        * alloc/free is ignored completely and we have no idea what userspace
+        * might have set it to, so set it to zero to allow range
+        * checks to pass.
+        */
+       switch (cmd) {
+       case XFS_IOC_ZERO_RANGE:
+       case XFS_IOC_RESVSP:
+       case XFS_IOC_RESVSP64:
+       case XFS_IOC_UNRESVSP:
+       case XFS_IOC_UNRESVSP64:
+               if (bf->l_len <= 0)
+                       return XFS_ERROR(EINVAL);
+               break;
+       default:
+               bf->l_len = 0;
+               break;
+       }
+
+       if (bf->l_start < 0 ||
+           bf->l_start > mp->m_super->s_maxbytes ||
+           bf->l_start + bf->l_len < 0 ||
+           bf->l_start + bf->l_len >= mp->m_super->s_maxbytes)
+               return XFS_ERROR(EINVAL);
+
+       bf->l_whence = 0;
+
+       startoffset = bf->l_start;
+       fsize = XFS_ISIZE(ip);
+
+       setprealloc = clrprealloc = 0;
+       switch (cmd) {
+       case XFS_IOC_ZERO_RANGE:
+               error = xfs_zero_file_space(ip, startoffset, bf->l_len,
+                                               attr_flags);
+               if (error)
+                       return error;
+               setprealloc = 1;
+               break;
+
+       case XFS_IOC_RESVSP:
+       case XFS_IOC_RESVSP64:
+               error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
+                                               XFS_BMAPI_PREALLOC, attr_flags);
+               if (error)
+                       return error;
+               setprealloc = 1;
+               break;
+
+       case XFS_IOC_UNRESVSP:
+       case XFS_IOC_UNRESVSP64:
+               if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
+                                                               attr_flags)))
+                       return error;
+               break;
+
+       case XFS_IOC_ALLOCSP:
+       case XFS_IOC_ALLOCSP64:
+       case XFS_IOC_FREESP:
+       case XFS_IOC_FREESP64:
+               /*
+                * These operations actually do IO when extending the file, but
+                * the allocation is done seperately to the zeroing that is
+                * done. This set of operations need to be serialised against
+                * other IO operations, such as truncate and buffered IO. We
+                * need to take the IOLOCK here to serialise the allocation and
+                * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
+                * truncate, direct IO) from racing against the transient
+                * allocated but not written state we can have here.
+                */
+               xfs_ilock(ip, XFS_IOLOCK_EXCL);
+               if (startoffset > fsize) {
+                       error = xfs_alloc_file_space(ip, fsize,
+                                       startoffset - fsize, 0,
+                                       attr_flags | XFS_ATTR_NOLOCK);
+                       if (error) {
+                               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+                               break;
+                       }
+               }
+
+               iattr.ia_valid = ATTR_SIZE;
+               iattr.ia_size = startoffset;
+
+               error = xfs_setattr_size(ip, &iattr,
+                                        attr_flags | XFS_ATTR_NOLOCK);
+               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+
+               if (error)
+                       return error;
+
+               clrprealloc = 1;
+               break;
+
+       default:
+               ASSERT(0);
+               return XFS_ERROR(EINVAL);
+       }
+
+       /*
+        * update the inode timestamp, mode, and prealloc flag bits
+        */
+       tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+       if ((attr_flags & XFS_ATTR_DMI) == 0) {
+               ip->i_d.di_mode &= ~S_ISUID;
+
+               /*
+                * Note that we don't have to worry about mandatory
+                * file locking being disabled here because we only
+                * clear the S_ISGID bit if the Group execute bit is
+                * on, but if it was on then mandatory locking wouldn't
+                * have been enabled.
+                */
+               if (ip->i_d.di_mode & S_IXGRP)
+                       ip->i_d.di_mode &= ~S_ISGID;
+
+               xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       }
+       if (setprealloc)
+               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+       else if (clrprealloc)
+               ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       if (attr_flags & XFS_ATTR_SYNC)
+               xfs_trans_set_sync(tp);
+       return xfs_trans_commit(tp, 0);
+}
+
+/*
+ * We need to check that the format of the data fork in the temporary inode is
+ * valid for the target inode before doing the swap. This is not a problem with
+ * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
+ * data fork depending on the space the attribute fork is taking so we can get
+ * invalid formats on the target inode.
+ *
+ * E.g. target has space for 7 extents in extent format, temp inode only has
+ * space for 6.  If we defragment down to 7 extents, then the tmp format is a
+ * btree, but when swapped it needs to be in extent format. Hence we can't just
+ * blindly swap data forks on attr2 filesystems.
+ *
+ * Note that we check the swap in both directions so that we don't end up with
+ * a corrupt temporary inode, either.
+ *
+ * Note that fixing the way xfs_fsr sets up the attribute fork in the source
+ * inode will prevent this situation from occurring, so all we do here is
+ * reject and log the attempt. basically we are putting the responsibility on
+ * userspace to get this right.
+ */
+static int
+xfs_swap_extents_check_format(
+       xfs_inode_t     *ip,    /* target inode */
+       xfs_inode_t     *tip)   /* tmp inode */
+{
+
+       /* Should never get a local format */
+       if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
+           tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+               return EINVAL;
+
+       /*
+        * if the target inode has less extents that then temporary inode then
+        * why did userspace call us?
+        */
+       if (ip->i_d.di_nextents < tip->i_d.di_nextents)
+               return EINVAL;
+
+       /*
+        * if the target inode is in extent form and the temp inode is in btree
+        * form then we will end up with the target inode in the wrong format
+        * as we already know there are less extents in the temp inode.
+        */
+       if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+           tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+               return EINVAL;
+
+       /* Check temp in extent form to max in target */
+       if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+           XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
+                       XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
+               return EINVAL;
+
+       /* Check target in extent form to max in temp */
+       if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
+                       XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
+               return EINVAL;
+
+       /*
+        * If we are in a btree format, check that the temp root block will fit
+        * in the target and that it has enough extents to be in btree format
+        * in the target.
+        *
+        * Note that we have to be careful to allow btree->extent conversions
+        * (a common defrag case) which will occur when the temp inode is in
+        * extent format...
+        */
+       if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+               if (XFS_IFORK_BOFF(ip) &&
+                   XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
+                       return EINVAL;
+               if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
+                   XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
+                       return EINVAL;
+       }
+
+       /* Reciprocal target->temp btree format checks */
+       if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+               if (XFS_IFORK_BOFF(tip) &&
+                   XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
+                       return EINVAL;
+               if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
+                   XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
+                       return EINVAL;
+       }
+
+       return 0;
+}
+
+int
+xfs_swap_extents(
+       xfs_inode_t     *ip,    /* target inode */
+       xfs_inode_t     *tip,   /* tmp inode */
+       xfs_swapext_t   *sxp)
+{
+       xfs_mount_t     *mp = ip->i_mount;
+       xfs_trans_t     *tp;
+       xfs_bstat_t     *sbp = &sxp->sx_stat;
+       xfs_ifork_t     *tempifp, *ifp, *tifp;
+       int             src_log_flags, target_log_flags;
+       int             error = 0;
+       int             aforkblks = 0;
+       int             taforkblks = 0;
+       __uint64_t      tmp;
+
+       /*
+        * We have no way of updating owner information in the BMBT blocks for
+        * each inode on CRC enabled filesystems, so to avoid corrupting the
+        * this metadata we simply don't allow extent swaps to occur.
+        */
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               return XFS_ERROR(EINVAL);
+
+       tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
+       if (!tempifp) {
+               error = XFS_ERROR(ENOMEM);
+               goto out;
+       }
+
+       /*
+        * we have to do two separate lock calls here to keep lockdep
+        * happy. If we try to get all the locks in one call, lock will
+        * report false positives when we drop the ILOCK and regain them
+        * below.
+        */
+       xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
+       xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
+
+       /* Verify that both files have the same format */
+       if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
+               error = XFS_ERROR(EINVAL);
+               goto out_unlock;
+       }
+
+       /* Verify both files are either real-time or non-realtime */
+       if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
+               error = XFS_ERROR(EINVAL);
+               goto out_unlock;
+       }
+
+       error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
+       if (error)
+               goto out_unlock;
+       truncate_pagecache_range(VFS_I(tip), 0, -1);
+
+       /* Verify O_DIRECT for ftmp */
+       if (VN_CACHED(VFS_I(tip)) != 0) {
+               error = XFS_ERROR(EINVAL);
+               goto out_unlock;
+       }
+
+       /* Verify all data are being swapped */
+       if (sxp->sx_offset != 0 ||
+           sxp->sx_length != ip->i_d.di_size ||
+           sxp->sx_length != tip->i_d.di_size) {
+               error = XFS_ERROR(EFAULT);
+               goto out_unlock;
+       }
+
+       trace_xfs_swap_extent_before(ip, 0);
+       trace_xfs_swap_extent_before(tip, 1);
+
+       /* check inode formats now that data is flushed */
+       error = xfs_swap_extents_check_format(ip, tip);
+       if (error) {
+               xfs_notice(mp,
+                   "%s: inode 0x%llx format is incompatible for exchanging.",
+                               __func__, ip->i_ino);
+               goto out_unlock;
+       }
+
+       /*
+        * Compare the current change & modify times with that
+        * passed in.  If they differ, we abort this swap.
+        * This is the mechanism used to ensure the calling
+        * process that the file was not changed out from
+        * under it.
+        */
+       if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
+           (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
+           (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
+           (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
+               error = XFS_ERROR(EBUSY);
+               goto out_unlock;
+       }
+
+       /* We need to fail if the file is memory mapped.  Once we have tossed
+        * all existing pages, the page fault will have no option
+        * but to go to the filesystem for pages. By making the page fault call
+        * vop_read (or write in the case of autogrow) they block on the iolock
+        * until we have switched the extents.
+        */
+       if (VN_MAPPED(VFS_I(ip))) {
+               error = XFS_ERROR(EBUSY);
+               goto out_unlock;
+       }
+
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       xfs_iunlock(tip, XFS_ILOCK_EXCL);
+
+       /*
+        * There is a race condition here since we gave up the
+        * ilock.  However, the data fork will not change since
+        * we have the iolock (locked for truncation too) so we
+        * are safe.  We don't really care if non-io related
+        * fields change.
+        */
+       truncate_pagecache_range(VFS_I(ip), 0, -1);
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+       if (error) {
+               xfs_iunlock(ip,  XFS_IOLOCK_EXCL);
+               xfs_iunlock(tip, XFS_IOLOCK_EXCL);
+               xfs_trans_cancel(tp, 0);
+               goto out;
+       }
+       xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
+
+       /*
+        * Count the number of extended attribute blocks
+        */
+       if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
+            (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
+               error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
+               if (error)
+                       goto out_trans_cancel;
+       }
+       if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
+            (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
+               error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
+                       &taforkblks);
+               if (error)
+                       goto out_trans_cancel;
+       }
+
+       /*
+        * Swap the data forks of the inodes
+        */
+       ifp = &ip->i_df;
+       tifp = &tip->i_df;
+       *tempifp = *ifp;        /* struct copy */
+       *ifp = *tifp;           /* struct copy */
+       *tifp = *tempifp;       /* struct copy */
+
+       /*
+        * Fix the on-disk inode values
+        */
+       tmp = (__uint64_t)ip->i_d.di_nblocks;
+       ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
+       tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
+
+       tmp = (__uint64_t) ip->i_d.di_nextents;
+       ip->i_d.di_nextents = tip->i_d.di_nextents;
+       tip->i_d.di_nextents = tmp;
+
+       tmp = (__uint64_t) ip->i_d.di_format;
+       ip->i_d.di_format = tip->i_d.di_format;
+       tip->i_d.di_format = tmp;
+
+       /*
+        * The extents in the source inode could still contain speculative
+        * preallocation beyond EOF (e.g. the file is open but not modified
+        * while defrag is in progress). In that case, we need to copy over the
+        * number of delalloc blocks the data fork in the source inode is
+        * tracking beyond EOF so that when the fork is truncated away when the
+        * temporary inode is unlinked we don't underrun the i_delayed_blks
+        * counter on that inode.
+        */
+       ASSERT(tip->i_delayed_blks == 0);
+       tip->i_delayed_blks = ip->i_delayed_blks;
+       ip->i_delayed_blks = 0;
+
+       src_log_flags = XFS_ILOG_CORE;
+       switch (ip->i_d.di_format) {
+       case XFS_DINODE_FMT_EXTENTS:
+               /* If the extents fit in the inode, fix the
+                * pointer.  Otherwise it's already NULL or
+                * pointing to the extent.
+                */
+               if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+                       ifp->if_u1.if_extents =
+                               ifp->if_u2.if_inline_ext;
+               }
+               src_log_flags |= XFS_ILOG_DEXT;
+               break;
+       case XFS_DINODE_FMT_BTREE:
+               src_log_flags |= XFS_ILOG_DBROOT;
+               break;
+       }
+
+       target_log_flags = XFS_ILOG_CORE;
+       switch (tip->i_d.di_format) {
+       case XFS_DINODE_FMT_EXTENTS:
+               /* If the extents fit in the inode, fix the
+                * pointer.  Otherwise it's already NULL or
+                * pointing to the extent.
+                */
+               if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+                       tifp->if_u1.if_extents =
+                               tifp->if_u2.if_inline_ext;
+               }
+               target_log_flags |= XFS_ILOG_DEXT;
+               break;
+       case XFS_DINODE_FMT_BTREE:
+               target_log_flags |= XFS_ILOG_DBROOT;
+               break;
+       }
+
+
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+       xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+       xfs_trans_log_inode(tp, ip,  src_log_flags);
+       xfs_trans_log_inode(tp, tip, target_log_flags);
+
+       /*
+        * If this is a synchronous mount, make sure that the
+        * transaction goes to disk before returning to the user.
+        */
+       if (mp->m_flags & XFS_MOUNT_WSYNC)
+               xfs_trans_set_sync(tp);
+
+       error = xfs_trans_commit(tp, 0);
+
+       trace_xfs_swap_extent_after(ip, 0);
+       trace_xfs_swap_extent_after(tip, 1);
+out:
+       kmem_free(tempifp);
+       return error;
+
+out_unlock:
+       xfs_iunlock(ip,  XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+       xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+       goto out;
+
+out_trans_cancel:
+       xfs_trans_cancel(tp, 0);
+       goto out_unlock;
+}
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
new file mode 100644 (file)
index 0000000..0612609
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef __XFS_BMAP_UTIL_H__
+#define        __XFS_BMAP_UTIL_H__
+
+/* Kernel only BMAP related definitions and functions */
+
+struct xfs_bmbt_irec;
+struct xfs_bmap_free_item;
+struct xfs_ifork;
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * Argument structure for xfs_bmap_alloc.
+ */
+struct xfs_bmalloca {
+       xfs_fsblock_t           *firstblock; /* i/o first block allocated */
+       struct xfs_bmap_free    *flist; /* bmap freelist */
+       struct xfs_trans        *tp;    /* transaction pointer */
+       struct xfs_inode        *ip;    /* incore inode pointer */
+       struct xfs_bmbt_irec    prev;   /* extent before the new one */
+       struct xfs_bmbt_irec    got;    /* extent after, or delayed */
+
+       xfs_fileoff_t           offset; /* offset in file filling in */
+       xfs_extlen_t            length; /* i/o length asked/allocated */
+       xfs_fsblock_t           blkno;  /* starting block of new extent */
+
+       struct xfs_btree_cur    *cur;   /* btree cursor */
+       xfs_extnum_t            idx;    /* current extent index */
+       int                     nallocs;/* number of extents alloc'd */
+       int                     logflags;/* flags for transaction logging */
+
+       xfs_extlen_t            total;  /* total blocks needed for xaction */
+       xfs_extlen_t            minlen; /* minimum allocation size (blocks) */
+       xfs_extlen_t            minleft; /* amount must be left after alloc */
+       char                    eof;    /* set if allocating past last extent */
+       char                    wasdel; /* replacing a delayed allocation */
+       char                    userdata;/* set if is user data */
+       char                    aeof;   /* allocated space at eof */
+       char                    conv;   /* overwriting unwritten extents */
+       char                    stack_switch;
+       int                     flags;
+       struct completion       *done;
+       struct work_struct      work;
+       int                     result;
+};
+
+int    xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
+                       int *committed);
+int    xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
+int    xfs_bmapi_allocate(struct xfs_bmalloca *args);
+int    __xfs_bmapi_allocate(struct xfs_bmalloca *args);
+int    xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
+                    int whichfork, int *eof);
+int    xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
+                             int whichfork, int *count);
+int    xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
+               xfs_fileoff_t start_fsb, xfs_fileoff_t length);
+
+/* bmap to userspace formatter - copy to user & advance pointer */
+typedef int (*xfs_bmap_format_t)(void **, struct getbmapx *, int *);
+int    xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
+               xfs_bmap_format_t formatter, void *arg);
+
+/* functions in xfs_bmap.c that are only needed by xfs_bmap_util.c */
+void   xfs_bmap_del_free(struct xfs_bmap_free *flist,
+                         struct xfs_bmap_free_item *prev,
+                         struct xfs_bmap_free_item *free);
+int    xfs_bmap_extsize_align(struct xfs_mount *mp, struct xfs_bmbt_irec *gotp,
+                              struct xfs_bmbt_irec *prevp, xfs_extlen_t extsz,
+                              int rt, int eof, int delay, int convert,
+                              xfs_fileoff_t *offp, xfs_extlen_t *lenp);
+void   xfs_bmap_adjacent(struct xfs_bmalloca *ap);
+int    xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
+                            int whichfork, struct xfs_bmbt_irec *rec,
+                            int *is_empty);
+
+/* preallocation and hole punch interface */
+int    xfs_change_file_space(struct xfs_inode *ip, int cmd,
+                             xfs_flock64_t *bf, xfs_off_t offset,
+                             int attr_flags);
+
+/* EOF block manipulation functions */
+bool   xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
+int    xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip,
+                          bool need_iolock);
+
+int    xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
+                        struct xfs_swapext *sx);
+
+xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
+
+#endif /* __XFS_BMAP_UTIL_H__ */
index 0903960410a255c171a7b663ffb3ba4af9137074..ae106f6dae4f480c28e0102aee579a9662177454 100644 (file)
@@ -510,7 +510,7 @@ xfs_btree_ptr_addr(
 }
 
 /*
- * Get the root block which is stored in the inode.
+ * Get the root block which is stored in the inode.
  *
  * For now this btree implementation assumes the btree root is always
  * stored in the if_broot field of an inode fork.
@@ -1684,7 +1684,7 @@ xfs_lookup_get_search_key(
 
 /*
  * Lookup the record.  The cursor is made to point to it, based on dir.
- * Return 0 if can't find any such record, 1 for success.
+ * stat is set to 0 if can't find any such record, 1 for success.
  */
 int                                    /* error */
 xfs_btree_lookup(
@@ -2756,7 +2756,6 @@ xfs_btree_make_block_unfull(
 
                if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
                        /* A root block that can be made bigger. */
-
                        xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
                } else {
                        /* A root block that needs replacing */
index 55e3c7cc3c3d3f22178fb1feb8aab44679821172..c8473c7ef45e4c764fd61eb1bf6419cb1d98f4ea 100644 (file)
@@ -88,13 +88,11 @@ struct xfs_btree_block {
 #define XFS_BTREE_SBLOCK_CRC_LEN       (XFS_BTREE_SBLOCK_LEN + 40)
 #define XFS_BTREE_LBLOCK_CRC_LEN       (XFS_BTREE_LBLOCK_LEN + 48)
 
-
 #define XFS_BTREE_SBLOCK_CRC_OFF \
        offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
 #define XFS_BTREE_LBLOCK_CRC_OFF \
        offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
 
-
 /*
  * Generic key, ptr and record wrapper structures.
  *
index 1b2472a46e46b96e31e0615f670120218ed7cf24..c06823fe10d3559c143c3608b04815efd6252631 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/freezer.h>
 
 #include "xfs_sb.h"
+#include "xfs_trans_resv.h"
 #include "xfs_log.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
@@ -303,7 +304,7 @@ _xfs_buf_free_pages(
  *     Releases the specified buffer.
  *
  *     The modification state of any associated pages is left unchanged.
- *     The buffer most not be on any hash - use xfs_buf_rele instead for
+ *     The buffer must not be on any hash - use xfs_buf_rele instead for
  *     hashed and refcounted buffers
  */
 void
@@ -1621,7 +1622,7 @@ xfs_setsize_buftarg_flags(
 /*
  *     When allocating the initial buffer target we have not yet
  *     read in the superblock, so don't know what sized sectors
- *     are being used is at this early stage.  Play safe.
+ *     are being used at this early stage.  Play safe.
  */
 STATIC int
 xfs_setsize_buftarg_early(
index bfc4e0c26fd3404fb36f007da344be79543aea4c..3a944b198e35a0fbfc758a84fd39a2e0674d360f 100644 (file)
@@ -39,6 +39,14 @@ static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
 
 STATIC void    xfs_buf_do_callbacks(struct xfs_buf *bp);
 
+static inline int
+xfs_buf_log_format_size(
+       struct xfs_buf_log_format *blfp)
+{
+       return offsetof(struct xfs_buf_log_format, blf_data_map) +
+                       (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
+}
+
 /*
  * This returns the number of log iovecs needed to log the
  * given buf log item.
@@ -49,25 +57,27 @@ STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
  *
  * If the XFS_BLI_STALE flag has been set, then log nothing.
  */
-STATIC uint
+STATIC void
 xfs_buf_item_size_segment(
        struct xfs_buf_log_item *bip,
-       struct xfs_buf_log_format *blfp)
+       struct xfs_buf_log_format *blfp,
+       int                     *nvecs,
+       int                     *nbytes)
 {
        struct xfs_buf          *bp = bip->bli_buf;
-       uint                    nvecs;
        int                     next_bit;
        int                     last_bit;
 
        last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
        if (last_bit == -1)
-               return 0;
+               return;
 
        /*
         * initial count for a dirty buffer is 2 vectors - the format structure
         * and the first dirty region.
         */
-       nvecs = 2;
+       *nvecs += 2;
+       *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
 
        while (last_bit != -1) {
                /*
@@ -87,18 +97,17 @@ xfs_buf_item_size_segment(
                        break;
                } else if (next_bit != last_bit + 1) {
                        last_bit = next_bit;
-                       nvecs++;
+                       (*nvecs)++;
                } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
                           (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
                            XFS_BLF_CHUNK)) {
                        last_bit = next_bit;
-                       nvecs++;
+                       (*nvecs)++;
                } else {
                        last_bit++;
                }
+               *nbytes += XFS_BLF_CHUNK;
        }
-
-       return nvecs;
 }
 
 /*
@@ -118,12 +127,13 @@ xfs_buf_item_size_segment(
  * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
  * format structures.
  */
-STATIC uint
+STATIC void
 xfs_buf_item_size(
-       struct xfs_log_item     *lip)
+       struct xfs_log_item     *lip,
+       int                     *nvecs,
+       int                     *nbytes)
 {
        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
-       uint                    nvecs;
        int                     i;
 
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
@@ -135,7 +145,11 @@ xfs_buf_item_size(
                 */
                trace_xfs_buf_item_size_stale(bip);
                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
-               return bip->bli_format_count;
+               *nvecs += bip->bli_format_count;
+               for (i = 0; i < bip->bli_format_count; i++) {
+                       *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
+               }
+               return;
        }
 
        ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
@@ -147,7 +161,8 @@ xfs_buf_item_size(
                 * commit, so no vectors are used at all.
                 */
                trace_xfs_buf_item_size_ordered(bip);
-               return XFS_LOG_VEC_ORDERED;
+               *nvecs = XFS_LOG_VEC_ORDERED;
+               return;
        }
 
        /*
@@ -159,13 +174,11 @@ xfs_buf_item_size(
         * count for the extra buf log format structure that will need to be
         * written.
         */
-       nvecs = 0;
        for (i = 0; i < bip->bli_format_count; i++) {
-               nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
+               xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
+                                         nvecs, nbytes);
        }
-
        trace_xfs_buf_item_size(bip);
-       return nvecs;
 }
 
 static struct xfs_log_iovec *
@@ -192,8 +205,7 @@ xfs_buf_item_format_segment(
         * the actual size of the dirty bitmap rather than the size of the in
         * memory structure.
         */
-       base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
-                       (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
+       base_size = xfs_buf_log_format_size(blfp);
 
        nvecs = 0;
        first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
@@ -601,11 +613,9 @@ xfs_buf_item_unlock(
                        }
                }
        }
-       if (clean)
-               xfs_buf_item_relse(bp);
-       else if (aborted) {
+       if (clean || aborted) {
                if (atomic_dec_and_test(&bip->bli_refcount)) {
-                       ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
+                       ASSERT(!aborted || XFS_FORCED_SHUTDOWN(lip->li_mountp));
                        xfs_buf_item_relse(bp);
                }
        } else
index 0f1c247dc680031fe06554a4bd41f6f962290a53..db6371087fe8ea9b786f82189b387c55979a2460 100644 (file)
 #ifndef        __XFS_BUF_ITEM_H__
 #define        __XFS_BUF_ITEM_H__
 
-extern kmem_zone_t     *xfs_buf_item_zone;
-
-/*
- * This flag indicates that the buffer contains on disk inodes
- * and requires special recovery handling.
- */
-#define        XFS_BLF_INODE_BUF       (1<<0)
-/*
- * This flag indicates that the buffer should not be replayed
- * during recovery because its blocks are being freed.
- */
-#define        XFS_BLF_CANCEL          (1<<1)
-
-/*
- * This flag indicates that the buffer contains on disk
- * user or group dquots and may require special recovery handling.
- */
-#define        XFS_BLF_UDQUOT_BUF      (1<<2)
-#define XFS_BLF_PDQUOT_BUF     (1<<3)
-#define        XFS_BLF_GDQUOT_BUF      (1<<4)
-
-#define        XFS_BLF_CHUNK           128
-#define        XFS_BLF_SHIFT           7
-#define        BIT_TO_WORD_SHIFT       5
-#define        NBWORD                  (NBBY * sizeof(unsigned int))
-
-/*
- * This is the structure used to lay out a buf log item in the
- * log.  The data map describes which 128 byte chunks of the buffer
- * have been logged.
- */
-#define XFS_BLF_DATAMAP_SIZE   ((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
+/* kernel only definitions */
 
-typedef struct xfs_buf_log_format {
-       unsigned short  blf_type;       /* buf log item type indicator */
-       unsigned short  blf_size;       /* size of this item */
-       ushort          blf_flags;      /* misc state */
-       ushort          blf_len;        /* number of blocks in this buf */
-       __int64_t       blf_blkno;      /* starting blkno of this buf */
-       unsigned int    blf_map_size;   /* used size of data bitmap in words */
-       unsigned int    blf_data_map[XFS_BLF_DATAMAP_SIZE]; /* dirty bitmap */
-} xfs_buf_log_format_t;
-
-/*
- * All buffers now need to tell recovery where the magic number
- * is so that it can verify and calculate the CRCs on the buffer correctly
- * once the changes have been replayed into the buffer.
- *
- * The type value is held in the upper 5 bits of the blf_flags field, which is
- * an unsigned 16 bit field. Hence we need to shift it 11 bits up and down.
- */
-#define XFS_BLFT_BITS  5
-#define XFS_BLFT_SHIFT 11
-#define XFS_BLFT_MASK  (((1 << XFS_BLFT_BITS) - 1) << XFS_BLFT_SHIFT)
-
-enum xfs_blft {
-       XFS_BLFT_UNKNOWN_BUF = 0,
-       XFS_BLFT_UDQUOT_BUF,
-       XFS_BLFT_PDQUOT_BUF,
-       XFS_BLFT_GDQUOT_BUF,
-       XFS_BLFT_BTREE_BUF,
-       XFS_BLFT_AGF_BUF,
-       XFS_BLFT_AGFL_BUF,
-       XFS_BLFT_AGI_BUF,
-       XFS_BLFT_DINO_BUF,
-       XFS_BLFT_SYMLINK_BUF,
-       XFS_BLFT_DIR_BLOCK_BUF,
-       XFS_BLFT_DIR_DATA_BUF,
-       XFS_BLFT_DIR_FREE_BUF,
-       XFS_BLFT_DIR_LEAF1_BUF,
-       XFS_BLFT_DIR_LEAFN_BUF,
-       XFS_BLFT_DA_NODE_BUF,
-       XFS_BLFT_ATTR_LEAF_BUF,
-       XFS_BLFT_ATTR_RMT_BUF,
-       XFS_BLFT_SB_BUF,
-       XFS_BLFT_MAX_BUF = (1 << XFS_BLFT_BITS),
-};
-
-static inline void
-xfs_blft_to_flags(struct xfs_buf_log_format *blf, enum xfs_blft type)
-{
-       ASSERT(type > XFS_BLFT_UNKNOWN_BUF && type < XFS_BLFT_MAX_BUF);
-       blf->blf_flags &= ~XFS_BLFT_MASK;
-       blf->blf_flags |= ((type << XFS_BLFT_SHIFT) & XFS_BLFT_MASK);
-}
-
-static inline __uint16_t
-xfs_blft_from_flags(struct xfs_buf_log_format *blf)
-{
-       return (blf->blf_flags & XFS_BLFT_MASK) >> XFS_BLFT_SHIFT;
-}
-
-/*
- * buf log item flags
- */
+/* buf log item flags */
 #define        XFS_BLI_HOLD            0x01
 #define        XFS_BLI_DIRTY           0x02
 #define        XFS_BLI_STALE           0x04
@@ -133,8 +41,6 @@ xfs_blft_from_flags(struct xfs_buf_log_format *blf)
        { XFS_BLI_ORDERED,      "ORDERED" }
 
 
-#ifdef __KERNEL__
-
 struct xfs_buf;
 struct xfs_mount;
 struct xfs_buf_log_item;
@@ -169,6 +75,6 @@ void xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
                               enum xfs_blft);
 void   xfs_trans_buf_copy_type(struct xfs_buf *dst_bp, struct xfs_buf *src_bp);
 
-#endif /* __KERNEL__ */
+extern kmem_zone_t     *xfs_buf_item_zone;
 
 #endif /* __XFS_BUF_ITEM_H__ */
index 0b8b2a13cd24debe493c8982679a2c565ebae5a1..d4e59a4ff59ff1600cf8c9a83ce4b36f47ddfcd0 100644 (file)
@@ -27,8 +27,8 @@
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_dir2.h"
 #include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
@@ -399,7 +399,7 @@ xfs_da3_split(
        struct xfs_da_intnode   *node;
        struct xfs_buf          *bp;
        int                     max;
-       int                     action;
+       int                     action = 0;
        int                     error;
        int                     i;
 
@@ -2454,9 +2454,9 @@ static int
 xfs_buf_map_from_irec(
        struct xfs_mount        *mp,
        struct xfs_buf_map      **mapp,
-       unsigned int            *nmaps,
+       int                     *nmaps,
        struct xfs_bmbt_irec    *irecs,
-       unsigned int            nirecs)
+       int                     nirecs)
 {
        struct xfs_buf_map      *map;
        int                     i;
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
deleted file mode 100644 (file)
index e36445c..0000000
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_bmap.h"
-#include "xfs_itable.h"
-#include "xfs_dfrag.h"
-#include "xfs_error.h"
-#include "xfs_vnodeops.h"
-#include "xfs_trace.h"
-
-
-static int xfs_swap_extents(
-       xfs_inode_t     *ip,    /* target inode */
-       xfs_inode_t     *tip,   /* tmp inode */
-       xfs_swapext_t   *sxp);
-
-/*
- * ioctl interface for swapext
- */
-int
-xfs_swapext(
-       xfs_swapext_t   *sxp)
-{
-       xfs_inode_t     *ip, *tip;
-       struct fd       f, tmp;
-       int             error = 0;
-
-       /* Pull information for the target fd */
-       f = fdget((int)sxp->sx_fdtarget);
-       if (!f.file) {
-               error = XFS_ERROR(EINVAL);
-               goto out;
-       }
-
-       if (!(f.file->f_mode & FMODE_WRITE) ||
-           !(f.file->f_mode & FMODE_READ) ||
-           (f.file->f_flags & O_APPEND)) {
-               error = XFS_ERROR(EBADF);
-               goto out_put_file;
-       }
-
-       tmp = fdget((int)sxp->sx_fdtmp);
-       if (!tmp.file) {
-               error = XFS_ERROR(EINVAL);
-               goto out_put_file;
-       }
-
-       if (!(tmp.file->f_mode & FMODE_WRITE) ||
-           !(tmp.file->f_mode & FMODE_READ) ||
-           (tmp.file->f_flags & O_APPEND)) {
-               error = XFS_ERROR(EBADF);
-               goto out_put_tmp_file;
-       }
-
-       if (IS_SWAPFILE(file_inode(f.file)) ||
-           IS_SWAPFILE(file_inode(tmp.file))) {
-               error = XFS_ERROR(EINVAL);
-               goto out_put_tmp_file;
-       }
-
-       ip = XFS_I(file_inode(f.file));
-       tip = XFS_I(file_inode(tmp.file));
-
-       if (ip->i_mount != tip->i_mount) {
-               error = XFS_ERROR(EINVAL);
-               goto out_put_tmp_file;
-       }
-
-       if (ip->i_ino == tip->i_ino) {
-               error = XFS_ERROR(EINVAL);
-               goto out_put_tmp_file;
-       }
-
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
-               error = XFS_ERROR(EIO);
-               goto out_put_tmp_file;
-       }
-
-       error = xfs_swap_extents(ip, tip, sxp);
-
- out_put_tmp_file:
-       fdput(tmp);
- out_put_file:
-       fdput(f);
- out:
-       return error;
-}
-
-/*
- * We need to check that the format of the data fork in the temporary inode is
- * valid for the target inode before doing the swap. This is not a problem with
- * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
- * data fork depending on the space the attribute fork is taking so we can get
- * invalid formats on the target inode.
- *
- * E.g. target has space for 7 extents in extent format, temp inode only has
- * space for 6.  If we defragment down to 7 extents, then the tmp format is a
- * btree, but when swapped it needs to be in extent format. Hence we can't just
- * blindly swap data forks on attr2 filesystems.
- *
- * Note that we check the swap in both directions so that we don't end up with
- * a corrupt temporary inode, either.
- *
- * Note that fixing the way xfs_fsr sets up the attribute fork in the source
- * inode will prevent this situation from occurring, so all we do here is
- * reject and log the attempt. basically we are putting the responsibility on
- * userspace to get this right.
- */
-static int
-xfs_swap_extents_check_format(
-       xfs_inode_t     *ip,    /* target inode */
-       xfs_inode_t     *tip)   /* tmp inode */
-{
-
-       /* Should never get a local format */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
-           tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
-               return EINVAL;
-
-       /*
-        * if the target inode has less extents that then temporary inode then
-        * why did userspace call us?
-        */
-       if (ip->i_d.di_nextents < tip->i_d.di_nextents)
-               return EINVAL;
-
-       /*
-        * if the target inode is in extent form and the temp inode is in btree
-        * form then we will end up with the target inode in the wrong format
-        * as we already know there are less extents in the temp inode.
-        */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
-               return EINVAL;
-
-       /* Check temp in extent form to max in target */
-       if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
-                       XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
-               return EINVAL;
-
-       /* Check target in extent form to max in temp */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
-                       XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
-               return EINVAL;
-
-       /*
-        * If we are in a btree format, check that the temp root block will fit
-        * in the target and that it has enough extents to be in btree format
-        * in the target.
-        *
-        * Note that we have to be careful to allow btree->extent conversions
-        * (a common defrag case) which will occur when the temp inode is in
-        * extent format...
-        */
-       if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
-               if (XFS_IFORK_BOFF(ip) &&
-                   XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
-                       return EINVAL;
-               if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
-                   XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
-                       return EINVAL;
-       }
-
-       /* Reciprocal target->temp btree format checks */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
-               if (XFS_IFORK_BOFF(tip) &&
-                   XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
-                       return EINVAL;
-               if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
-                   XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
-                       return EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-xfs_swap_extents(
-       xfs_inode_t     *ip,    /* target inode */
-       xfs_inode_t     *tip,   /* tmp inode */
-       xfs_swapext_t   *sxp)
-{
-       xfs_mount_t     *mp = ip->i_mount;
-       xfs_trans_t     *tp;
-       xfs_bstat_t     *sbp = &sxp->sx_stat;
-       xfs_ifork_t     *tempifp, *ifp, *tifp;
-       int             src_log_flags, target_log_flags;
-       int             error = 0;
-       int             aforkblks = 0;
-       int             taforkblks = 0;
-       __uint64_t      tmp;
-
-       /*
-        * We have no way of updating owner information in the BMBT blocks for
-        * each inode on CRC enabled filesystems, so to avoid corrupting the
-        * this metadata we simply don't allow extent swaps to occur.
-        */
-       if (xfs_sb_version_hascrc(&mp->m_sb))
-               return XFS_ERROR(EINVAL);
-
-       tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
-       if (!tempifp) {
-               error = XFS_ERROR(ENOMEM);
-               goto out;
-       }
-
-       /*
-        * we have to do two separate lock calls here to keep lockdep
-        * happy. If we try to get all the locks in one call, lock will
-        * report false positives when we drop the ILOCK and regain them
-        * below.
-        */
-       xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
-       xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
-
-       /* Verify that both files have the same format */
-       if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
-               error = XFS_ERROR(EINVAL);
-               goto out_unlock;
-       }
-
-       /* Verify both files are either real-time or non-realtime */
-       if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
-               error = XFS_ERROR(EINVAL);
-               goto out_unlock;
-       }
-
-       error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
-       if (error)
-               goto out_unlock;
-       truncate_pagecache_range(VFS_I(tip), 0, -1);
-
-       /* Verify O_DIRECT for ftmp */
-       if (VN_CACHED(VFS_I(tip)) != 0) {
-               error = XFS_ERROR(EINVAL);
-               goto out_unlock;
-       }
-
-       /* Verify all data are being swapped */
-       if (sxp->sx_offset != 0 ||
-           sxp->sx_length != ip->i_d.di_size ||
-           sxp->sx_length != tip->i_d.di_size) {
-               error = XFS_ERROR(EFAULT);
-               goto out_unlock;
-       }
-
-       trace_xfs_swap_extent_before(ip, 0);
-       trace_xfs_swap_extent_before(tip, 1);
-
-       /* check inode formats now that data is flushed */
-       error = xfs_swap_extents_check_format(ip, tip);
-       if (error) {
-               xfs_notice(mp,
-                   "%s: inode 0x%llx format is incompatible for exchanging.",
-                               __func__, ip->i_ino);
-               goto out_unlock;
-       }
-
-       /*
-        * Compare the current change & modify times with that
-        * passed in.  If they differ, we abort this swap.
-        * This is the mechanism used to ensure the calling
-        * process that the file was not changed out from
-        * under it.
-        */
-       if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
-           (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
-           (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
-           (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
-               error = XFS_ERROR(EBUSY);
-               goto out_unlock;
-       }
-
-       /* We need to fail if the file is memory mapped.  Once we have tossed
-        * all existing pages, the page fault will have no option
-        * but to go to the filesystem for pages. By making the page fault call
-        * vop_read (or write in the case of autogrow) they block on the iolock
-        * until we have switched the extents.
-        */
-       if (VN_MAPPED(VFS_I(ip))) {
-               error = XFS_ERROR(EBUSY);
-               goto out_unlock;
-       }
-
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-       xfs_iunlock(tip, XFS_ILOCK_EXCL);
-
-       /*
-        * There is a race condition here since we gave up the
-        * ilock.  However, the data fork will not change since
-        * we have the iolock (locked for truncation too) so we
-        * are safe.  We don't really care if non-io related
-        * fields change.
-        */
-       truncate_pagecache_range(VFS_I(ip), 0, -1);
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
-       if ((error = xfs_trans_reserve(tp, 0,
-                                    XFS_ICHANGE_LOG_RES(mp), 0,
-                                    0, 0))) {
-               xfs_iunlock(ip,  XFS_IOLOCK_EXCL);
-               xfs_iunlock(tip, XFS_IOLOCK_EXCL);
-               xfs_trans_cancel(tp, 0);
-               goto out;
-       }
-       xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
-
-       /*
-        * Count the number of extended attribute blocks
-        */
-       if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
-            (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
-               error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
-               if (error)
-                       goto out_trans_cancel;
-       }
-       if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
-            (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
-               error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
-                       &taforkblks);
-               if (error)
-                       goto out_trans_cancel;
-       }
-
-       /*
-        * Swap the data forks of the inodes
-        */
-       ifp = &ip->i_df;
-       tifp = &tip->i_df;
-       *tempifp = *ifp;        /* struct copy */
-       *ifp = *tifp;           /* struct copy */
-       *tifp = *tempifp;       /* struct copy */
-
-       /*
-        * Fix the on-disk inode values
-        */
-       tmp = (__uint64_t)ip->i_d.di_nblocks;
-       ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
-       tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
-
-       tmp = (__uint64_t) ip->i_d.di_nextents;
-       ip->i_d.di_nextents = tip->i_d.di_nextents;
-       tip->i_d.di_nextents = tmp;
-
-       tmp = (__uint64_t) ip->i_d.di_format;
-       ip->i_d.di_format = tip->i_d.di_format;
-       tip->i_d.di_format = tmp;
-
-       /*
-        * The extents in the source inode could still contain speculative
-        * preallocation beyond EOF (e.g. the file is open but not modified
-        * while defrag is in progress). In that case, we need to copy over the
-        * number of delalloc blocks the data fork in the source inode is
-        * tracking beyond EOF so that when the fork is truncated away when the
-        * temporary inode is unlinked we don't underrun the i_delayed_blks
-        * counter on that inode.
-        */
-       ASSERT(tip->i_delayed_blks == 0);
-       tip->i_delayed_blks = ip->i_delayed_blks;
-       ip->i_delayed_blks = 0;
-
-       src_log_flags = XFS_ILOG_CORE;
-       switch (ip->i_d.di_format) {
-       case XFS_DINODE_FMT_EXTENTS:
-               /* If the extents fit in the inode, fix the
-                * pointer.  Otherwise it's already NULL or
-                * pointing to the extent.
-                */
-               if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
-                       ifp->if_u1.if_extents =
-                               ifp->if_u2.if_inline_ext;
-               }
-               src_log_flags |= XFS_ILOG_DEXT;
-               break;
-       case XFS_DINODE_FMT_BTREE:
-               src_log_flags |= XFS_ILOG_DBROOT;
-               break;
-       }
-
-       target_log_flags = XFS_ILOG_CORE;
-       switch (tip->i_d.di_format) {
-       case XFS_DINODE_FMT_EXTENTS:
-               /* If the extents fit in the inode, fix the
-                * pointer.  Otherwise it's already NULL or
-                * pointing to the extent.
-                */
-               if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
-                       tifp->if_u1.if_extents =
-                               tifp->if_u2.if_inline_ext;
-               }
-               target_log_flags |= XFS_ILOG_DEXT;
-               break;
-       case XFS_DINODE_FMT_BTREE:
-               target_log_flags |= XFS_ILOG_DBROOT;
-               break;
-       }
-
-
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-       xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-
-       xfs_trans_log_inode(tp, ip,  src_log_flags);
-       xfs_trans_log_inode(tp, tip, target_log_flags);
-
-       /*
-        * If this is a synchronous mount, make sure that the
-        * transaction goes to disk before returning to the user.
-        */
-       if (mp->m_flags & XFS_MOUNT_WSYNC)
-               xfs_trans_set_sync(tp);
-
-       error = xfs_trans_commit(tp, 0);
-
-       trace_xfs_swap_extent_after(ip, 0);
-       trace_xfs_swap_extent_after(tip, 1);
-out:
-       kmem_free(tempifp);
-       return error;
-
-out_unlock:
-       xfs_iunlock(ip,  XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-       xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-       goto out;
-
-out_trans_cancel:
-       xfs_trans_cancel(tp, 0);
-       goto out_unlock;
-}
diff --git a/fs/xfs/xfs_dfrag.h b/fs/xfs/xfs_dfrag.h
deleted file mode 100644 (file)
index 20bdd93..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2000,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_DFRAG_H__
-#define        __XFS_DFRAG_H__
-
-/*
- * Structure passed to xfs_swapext
- */
-
-typedef struct xfs_swapext
-{
-       __int64_t       sx_version;     /* version */
-       __int64_t       sx_fdtarget;    /* fd of target file */
-       __int64_t       sx_fdtmp;       /* fd of tmp file */
-       xfs_off_t       sx_offset;      /* offset into file */
-       xfs_off_t       sx_length;      /* leng from offset */
-       char            sx_pad[16];     /* pad space, unused */
-       xfs_bstat_t     sx_stat;        /* stat of target b4 copy */
-} xfs_swapext_t;
-
-/*
- * Version flag
- */
-#define XFS_SX_VERSION         0
-
-#ifdef __KERNEL__
-/*
- * Prototypes for visible xfs_dfrag.c routines.
- */
-
-/*
- * Syscall interface for xfs_swapext
- */
-int    xfs_swapext(struct xfs_swapext *sx);
-
-#endif /* __KERNEL__ */
-
-#endif /* __XFS_DFRAG_H__ */
index 8f023dee404da0da9c2092ba15d5ad904588d5e4..841933c9e80f95e10038e5b40212f0760d166fb8 100644 (file)
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2.h"
 #include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
-#include "xfs_vnodeops.h"
 #include "xfs_trace.h"
 
 struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2};
@@ -362,37 +361,6 @@ xfs_dir_removename(
        return rval;
 }
 
-/*
- * Read a directory.
- */
-int
-xfs_readdir(
-       xfs_inode_t     *dp,
-       struct dir_context *ctx,
-       size_t          bufsize)
-{
-       int             rval;           /* return value */
-       int             v;              /* type-checking value */
-
-       trace_xfs_readdir(dp);
-
-       if (XFS_FORCED_SHUTDOWN(dp->i_mount))
-               return XFS_ERROR(EIO);
-
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
-       XFS_STATS_INC(xs_dir_getdents);
-
-       if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
-               rval = xfs_dir2_sf_getdents(dp, ctx);
-       else if ((rval = xfs_dir2_isblock(NULL, dp, &v)))
-               ;
-       else if (v)
-               rval = xfs_dir2_block_getdents(dp, ctx);
-       else
-               rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize);
-       return rval;
-}
-
 /*
  * Replace the inode number of a directory entry.
  */
index e937d9991c1850c5427ecae7419011c8395d09c3..7fe2b8f0a9e389e762691873626c643ef3bd5b01 100644 (file)
@@ -23,6 +23,11 @@ struct xfs_da_args;
 struct xfs_inode;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_dir2_sf_hdr;
+struct xfs_dir2_sf_entry;
+struct xfs_dir2_data_hdr;
+struct xfs_dir2_data_entry;
+struct xfs_dir2_data_unused;
 
 extern struct xfs_name xfs_name_dotdot;
 
@@ -57,4 +62,44 @@ extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
  */
 extern int xfs_dir2_sf_to_block(struct xfs_da_args *args);
 
+/*
+ * Interface routines used by userspace utilities
+ */
+extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp);
+extern void xfs_dir2_sf_put_parent_ino(struct xfs_dir2_sf_hdr *sfp,
+               xfs_ino_t ino);
+extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp,
+               struct xfs_dir2_sf_entry *sfep);
+extern void xfs_dir2_sfe_put_ino( struct xfs_dir2_sf_hdr *,
+               struct xfs_dir2_sf_entry *sfep, xfs_ino_t ino);
+
+extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
+extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
+extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
+                               struct xfs_buf *bp);
+
+extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
+               struct xfs_dir2_data_hdr *hdr, int *loghead);
+extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_buf *bp,
+               struct xfs_dir2_data_entry *dep);
+extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
+               struct xfs_buf *bp);
+extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_buf *bp,
+               struct xfs_dir2_data_unused *dup);
+extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_buf *bp,
+               xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
+               int *needlogp, int *needscanp);
+extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_buf *bp,
+               struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
+               xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
+
+extern struct xfs_dir2_data_free *xfs_dir2_data_freefind(
+               struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_data_unused *dup);
+
+extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
+extern const struct xfs_buf_ops xfs_dir3_leafn_buf_ops;
+extern const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops;
+extern const struct xfs_buf_ops xfs_dir3_free_buf_ops;
+extern const struct xfs_buf_ops xfs_dir3_data_buf_ops;
+
 #endif /* __XFS_DIR2_H__ */
index 5e7fbd72cf5255c53a8494a991986c339ea5293a..becd69f6e4b8db3ea3b0ca79974a913b71ca3393 100644 (file)
@@ -31,8 +31,8 @@
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_buf_item.h"
-#include "xfs_dir2.h"
 #include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
@@ -126,7 +126,7 @@ const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
        .verify_write = xfs_dir3_block_write_verify,
 };
 
-static int
+int
 xfs_dir3_block_read(
        struct xfs_trans        *tp,
        struct xfs_inode        *dp,
@@ -564,101 +564,6 @@ xfs_dir2_block_addname(
        return 0;
 }
 
-/*
- * Readdir for block directories.
- */
-int                                            /* error */
-xfs_dir2_block_getdents(
-       xfs_inode_t             *dp,            /* incore inode */
-       struct dir_context      *ctx)
-{
-       xfs_dir2_data_hdr_t     *hdr;           /* block header */
-       struct xfs_buf          *bp;            /* buffer for block */
-       xfs_dir2_block_tail_t   *btp;           /* block tail */
-       xfs_dir2_data_entry_t   *dep;           /* block data entry */
-       xfs_dir2_data_unused_t  *dup;           /* block unused entry */
-       char                    *endptr;        /* end of the data entries */
-       int                     error;          /* error return value */
-       xfs_mount_t             *mp;            /* filesystem mount point */
-       char                    *ptr;           /* current data entry */
-       int                     wantoff;        /* starting block offset */
-       xfs_off_t               cook;
-
-       mp = dp->i_mount;
-       /*
-        * If the block number in the offset is out of range, we're done.
-        */
-       if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk)
-               return 0;
-
-       error = xfs_dir3_block_read(NULL, dp, &bp);
-       if (error)
-               return error;
-
-       /*
-        * Extract the byte offset we start at from the seek pointer.
-        * We'll skip entries before this.
-        */
-       wantoff = xfs_dir2_dataptr_to_off(mp, ctx->pos);
-       hdr = bp->b_addr;
-       xfs_dir3_data_check(dp, bp);
-       /*
-        * Set up values for the loop.
-        */
-       btp = xfs_dir2_block_tail_p(mp, hdr);
-       ptr = (char *)xfs_dir3_data_entry_p(hdr);
-       endptr = (char *)xfs_dir2_block_leaf_p(btp);
-
-       /*
-        * Loop over the data portion of the block.
-        * Each object is a real entry (dep) or an unused one (dup).
-        */
-       while (ptr < endptr) {
-               dup = (xfs_dir2_data_unused_t *)ptr;
-               /*
-                * Unused, skip it.
-                */
-               if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
-                       ptr += be16_to_cpu(dup->length);
-                       continue;
-               }
-
-               dep = (xfs_dir2_data_entry_t *)ptr;
-
-               /*
-                * Bump pointer for the next iteration.
-                */
-               ptr += xfs_dir2_data_entsize(dep->namelen);
-               /*
-                * The entry is before the desired starting point, skip it.
-                */
-               if ((char *)dep - (char *)hdr < wantoff)
-                       continue;
-
-               cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
-                                           (char *)dep - (char *)hdr);
-
-               ctx->pos = cook & 0x7fffffff;
-               /*
-                * If it didn't fit, set the final offset to here & return.
-                */
-               if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
-                           be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
-                       xfs_trans_brelse(NULL, bp);
-                       return 0;
-               }
-       }
-
-       /*
-        * Reached the end of the block.
-        * Set the offset to a non-existent block 1 and return.
-        */
-       ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
-                       0x7fffffff;
-       xfs_trans_brelse(NULL, bp);
-       return 0;
-}
-
 /*
  * Log leaf entries from the block.
  */
index c2930238005c6605c1e69e4dc455b9ed7267f3da..98c23faa701c62000a0a49447944ca0daa6a460e 100644 (file)
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
-STATIC xfs_dir2_data_free_t *
-xfs_dir2_data_freefind(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup);
-
 /*
  * Check the consistency of the data block.
  * The input can also be a block-format directory.
@@ -325,7 +323,7 @@ xfs_dir3_data_readahead(
  * Given a data block and an unused entry from that block,
  * return the bestfree entry if any that corresponds to it.
  */
-STATIC xfs_dir2_data_free_t *
+xfs_dir2_data_free_t *
 xfs_dir2_data_freefind(
        xfs_dir2_data_hdr_t     *hdr,           /* data block */
        xfs_dir2_data_unused_t  *dup)           /* data unused entry */
@@ -333,7 +331,7 @@ xfs_dir2_data_freefind(
        xfs_dir2_data_free_t    *dfp;           /* bestfree entry */
        xfs_dir2_data_aoff_t    off;            /* offset value needed */
        struct xfs_dir2_data_free *bf;
-#if defined(DEBUG) && defined(__KERNEL__)
+#ifdef DEBUG
        int                     matched;        /* matched the value */
        int                     seenzero;       /* saw a 0 bestfree entry */
 #endif
@@ -341,7 +339,7 @@ xfs_dir2_data_freefind(
        off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr);
        bf = xfs_dir3_data_bestfree_p(hdr);
 
-#if defined(DEBUG) && defined(__KERNEL__)
+#ifdef DEBUG
        /*
         * Validate some consistency in the bestfree table.
         * Check order, non-overlapping entries, and if we find the
index 7826782b8d789461eef5a5443ae1d29944887815..2095e17b75cb3b8576c80c7e0ad9ef18d199102d 100644 (file)
@@ -519,6 +519,9 @@ struct xfs_dir3_leaf {
 
 #define XFS_DIR3_LEAF_CRC_OFF  offsetof(struct xfs_dir3_leaf_hdr, info.crc)
 
+extern void xfs_dir3_leaf_hdr_from_disk(struct xfs_dir3_icleaf_hdr *to,
+                                       struct xfs_dir2_leaf *from);
+
 static inline int
 xfs_dir3_leaf_hdr_size(struct xfs_dir2_leaf *lp)
 {
index 2aed25cae04d9f265df00aba8e5d63a624350a0f..591eaf2359193635ade3bf44f8176362a132313c 100644 (file)
@@ -31,6 +31,7 @@
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
@@ -1083,396 +1084,6 @@ xfs_dir3_leaf_compact_x1(
        *highstalep = highstale;
 }
 
-struct xfs_dir2_leaf_map_info {
-       xfs_extlen_t    map_blocks;     /* number of fsbs in map */
-       xfs_dablk_t     map_off;        /* last mapped file offset */
-       int             map_size;       /* total entries in *map */
-       int             map_valid;      /* valid entries in *map */
-       int             nmap;           /* mappings to ask xfs_bmapi */
-       xfs_dir2_db_t   curdb;          /* db for current block */
-       int             ra_current;     /* number of read-ahead blks */
-       int             ra_index;       /* *map index for read-ahead */
-       int             ra_offset;      /* map entry offset for ra */
-       int             ra_want;        /* readahead count wanted */
-       struct xfs_bmbt_irec map[];     /* map vector for blocks */
-};
-
-STATIC int
-xfs_dir2_leaf_readbuf(
-       struct xfs_inode        *dp,
-       size_t                  bufsize,
-       struct xfs_dir2_leaf_map_info *mip,
-       xfs_dir2_off_t          *curoff,
-       struct xfs_buf          **bpp)
-{
-       struct xfs_mount        *mp = dp->i_mount;
-       struct xfs_buf          *bp = *bpp;
-       struct xfs_bmbt_irec    *map = mip->map;
-       struct blk_plug         plug;
-       int                     error = 0;
-       int                     length;
-       int                     i;
-       int                     j;
-
-       /*
-        * If we have a buffer, we need to release it and
-        * take it out of the mapping.
-        */
-
-       if (bp) {
-               xfs_trans_brelse(NULL, bp);
-               bp = NULL;
-               mip->map_blocks -= mp->m_dirblkfsbs;
-               /*
-                * Loop to get rid of the extents for the
-                * directory block.
-                */
-               for (i = mp->m_dirblkfsbs; i > 0; ) {
-                       j = min_t(int, map->br_blockcount, i);
-                       map->br_blockcount -= j;
-                       map->br_startblock += j;
-                       map->br_startoff += j;
-                       /*
-                        * If mapping is done, pitch it from
-                        * the table.
-                        */
-                       if (!map->br_blockcount && --mip->map_valid)
-                               memmove(&map[0], &map[1],
-                                       sizeof(map[0]) * mip->map_valid);
-                       i -= j;
-               }
-       }
-
-       /*
-        * Recalculate the readahead blocks wanted.
-        */
-       mip->ra_want = howmany(bufsize + mp->m_dirblksize,
-                              mp->m_sb.sb_blocksize) - 1;
-       ASSERT(mip->ra_want >= 0);
-
-       /*
-        * If we don't have as many as we want, and we haven't
-        * run out of data blocks, get some more mappings.
-        */
-       if (1 + mip->ra_want > mip->map_blocks &&
-           mip->map_off < xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) {
-               /*
-                * Get more bmaps, fill in after the ones
-                * we already have in the table.
-                */
-               mip->nmap = mip->map_size - mip->map_valid;
-               error = xfs_bmapi_read(dp, mip->map_off,
-                               xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET) -
-                                                               mip->map_off,
-                               &map[mip->map_valid], &mip->nmap, 0);
-
-               /*
-                * Don't know if we should ignore this or try to return an
-                * error.  The trouble with returning errors is that readdir
-                * will just stop without actually passing the error through.
-                */
-               if (error)
-                       goto out;       /* XXX */
-
-               /*
-                * If we got all the mappings we asked for, set the final map
-                * offset based on the last bmap value received.  Otherwise,
-                * we've reached the end.
-                */
-               if (mip->nmap == mip->map_size - mip->map_valid) {
-                       i = mip->map_valid + mip->nmap - 1;
-                       mip->map_off = map[i].br_startoff + map[i].br_blockcount;
-               } else
-                       mip->map_off = xfs_dir2_byte_to_da(mp,
-                                                       XFS_DIR2_LEAF_OFFSET);
-
-               /*
-                * Look for holes in the mapping, and eliminate them.  Count up
-                * the valid blocks.
-                */
-               for (i = mip->map_valid; i < mip->map_valid + mip->nmap; ) {
-                       if (map[i].br_startblock == HOLESTARTBLOCK) {
-                               mip->nmap--;
-                               length = mip->map_valid + mip->nmap - i;
-                               if (length)
-                                       memmove(&map[i], &map[i + 1],
-                                               sizeof(map[i]) * length);
-                       } else {
-                               mip->map_blocks += map[i].br_blockcount;
-                               i++;
-                       }
-               }
-               mip->map_valid += mip->nmap;
-       }
-
-       /*
-        * No valid mappings, so no more data blocks.
-        */
-       if (!mip->map_valid) {
-               *curoff = xfs_dir2_da_to_byte(mp, mip->map_off);
-               goto out;
-       }
-
-       /*
-        * Read the directory block starting at the first mapping.
-        */
-       mip->curdb = xfs_dir2_da_to_db(mp, map->br_startoff);
-       error = xfs_dir3_data_read(NULL, dp, map->br_startoff,
-                       map->br_blockcount >= mp->m_dirblkfsbs ?
-                           XFS_FSB_TO_DADDR(mp, map->br_startblock) : -1, &bp);
-
-       /*
-        * Should just skip over the data block instead of giving up.
-        */
-       if (error)
-               goto out;       /* XXX */
-
-       /*
-        * Adjust the current amount of read-ahead: we just read a block that
-        * was previously ra.
-        */
-       if (mip->ra_current)
-               mip->ra_current -= mp->m_dirblkfsbs;
-
-       /*
-        * Do we need more readahead?
-        */
-       blk_start_plug(&plug);
-       for (mip->ra_index = mip->ra_offset = i = 0;
-            mip->ra_want > mip->ra_current && i < mip->map_blocks;
-            i += mp->m_dirblkfsbs) {
-               ASSERT(mip->ra_index < mip->map_valid);
-               /*
-                * Read-ahead a contiguous directory block.
-                */
-               if (i > mip->ra_current &&
-                   map[mip->ra_index].br_blockcount >= mp->m_dirblkfsbs) {
-                       xfs_dir3_data_readahead(NULL, dp,
-                               map[mip->ra_index].br_startoff + mip->ra_offset,
-                               XFS_FSB_TO_DADDR(mp,
-                                       map[mip->ra_index].br_startblock +
-                                                       mip->ra_offset));
-                       mip->ra_current = i;
-               }
-
-               /*
-                * Read-ahead a non-contiguous directory block.  This doesn't
-                * use our mapping, but this is a very rare case.
-                */
-               else if (i > mip->ra_current) {
-                       xfs_dir3_data_readahead(NULL, dp,
-                                       map[mip->ra_index].br_startoff +
-                                                       mip->ra_offset, -1);
-                       mip->ra_current = i;
-               }
-
-               /*
-                * Advance offset through the mapping table.
-                */
-               for (j = 0; j < mp->m_dirblkfsbs; j++) {
-                       /*
-                        * The rest of this extent but not more than a dir
-                        * block.
-                        */
-                       length = min_t(int, mp->m_dirblkfsbs,
-                                       map[mip->ra_index].br_blockcount -
-                                                       mip->ra_offset);
-                       j += length;
-                       mip->ra_offset += length;
-
-                       /*
-                        * Advance to the next mapping if this one is used up.
-                        */
-                       if (mip->ra_offset == map[mip->ra_index].br_blockcount) {
-                               mip->ra_offset = 0;
-                               mip->ra_index++;
-                       }
-               }
-       }
-       blk_finish_plug(&plug);
-
-out:
-       *bpp = bp;
-       return error;
-}
-
-/*
- * Getdents (readdir) for leaf and node directories.
- * This reads the data blocks only, so is the same for both forms.
- */
-int                                            /* error */
-xfs_dir2_leaf_getdents(
-       xfs_inode_t             *dp,            /* incore directory inode */
-       struct dir_context      *ctx,
-       size_t                  bufsize)
-{
-       struct xfs_buf          *bp = NULL;     /* data block buffer */
-       xfs_dir2_data_hdr_t     *hdr;           /* data block header */
-       xfs_dir2_data_entry_t   *dep;           /* data entry */
-       xfs_dir2_data_unused_t  *dup;           /* unused entry */
-       int                     error = 0;      /* error return value */
-       int                     length;         /* temporary length value */
-       xfs_mount_t             *mp;            /* filesystem mount point */
-       int                     byteoff;        /* offset in current block */
-       xfs_dir2_off_t          curoff;         /* current overall offset */
-       xfs_dir2_off_t          newoff;         /* new curoff after new blk */
-       char                    *ptr = NULL;    /* pointer to current data */
-       struct xfs_dir2_leaf_map_info *map_info;
-
-       /*
-        * If the offset is at or past the largest allowed value,
-        * give up right away.
-        */
-       if (ctx->pos >= XFS_DIR2_MAX_DATAPTR)
-               return 0;
-
-       mp = dp->i_mount;
-
-       /*
-        * Set up to bmap a number of blocks based on the caller's
-        * buffer size, the directory block size, and the filesystem
-        * block size.
-        */
-       length = howmany(bufsize + mp->m_dirblksize,
-                                    mp->m_sb.sb_blocksize);
-       map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
-                               (length * sizeof(struct xfs_bmbt_irec)),
-                              KM_SLEEP | KM_NOFS);
-       map_info->map_size = length;
-
-       /*
-        * Inside the loop we keep the main offset value as a byte offset
-        * in the directory file.
-        */
-       curoff = xfs_dir2_dataptr_to_byte(mp, ctx->pos);
-
-       /*
-        * Force this conversion through db so we truncate the offset
-        * down to get the start of the data block.
-        */
-       map_info->map_off = xfs_dir2_db_to_da(mp,
-                                             xfs_dir2_byte_to_db(mp, curoff));
-
-       /*
-        * Loop over directory entries until we reach the end offset.
-        * Get more blocks and readahead as necessary.
-        */
-       while (curoff < XFS_DIR2_LEAF_OFFSET) {
-               /*
-                * If we have no buffer, or we're off the end of the
-                * current buffer, need to get another one.
-                */
-               if (!bp || ptr >= (char *)bp->b_addr + mp->m_dirblksize) {
-
-                       error = xfs_dir2_leaf_readbuf(dp, bufsize, map_info,
-                                                     &curoff, &bp);
-                       if (error || !map_info->map_valid)
-                               break;
-
-                       /*
-                        * Having done a read, we need to set a new offset.
-                        */
-                       newoff = xfs_dir2_db_off_to_byte(mp, map_info->curdb, 0);
-                       /*
-                        * Start of the current block.
-                        */
-                       if (curoff < newoff)
-                               curoff = newoff;
-                       /*
-                        * Make sure we're in the right block.
-                        */
-                       else if (curoff > newoff)
-                               ASSERT(xfs_dir2_byte_to_db(mp, curoff) ==
-                                      map_info->curdb);
-                       hdr = bp->b_addr;
-                       xfs_dir3_data_check(dp, bp);
-                       /*
-                        * Find our position in the block.
-                        */
-                       ptr = (char *)xfs_dir3_data_entry_p(hdr);
-                       byteoff = xfs_dir2_byte_to_off(mp, curoff);
-                       /*
-                        * Skip past the header.
-                        */
-                       if (byteoff == 0)
-                               curoff += xfs_dir3_data_entry_offset(hdr);
-                       /*
-                        * Skip past entries until we reach our offset.
-                        */
-                       else {
-                               while ((char *)ptr - (char *)hdr < byteoff) {
-                                       dup = (xfs_dir2_data_unused_t *)ptr;
-
-                                       if (be16_to_cpu(dup->freetag)
-                                                 == XFS_DIR2_DATA_FREE_TAG) {
-
-                                               length = be16_to_cpu(dup->length);
-                                               ptr += length;
-                                               continue;
-                                       }
-                                       dep = (xfs_dir2_data_entry_t *)ptr;
-                                       length =
-                                          xfs_dir2_data_entsize(dep->namelen);
-                                       ptr += length;
-                               }
-                               /*
-                                * Now set our real offset.
-                                */
-                               curoff =
-                                       xfs_dir2_db_off_to_byte(mp,
-                                           xfs_dir2_byte_to_db(mp, curoff),
-                                           (char *)ptr - (char *)hdr);
-                               if (ptr >= (char *)hdr + mp->m_dirblksize) {
-                                       continue;
-                               }
-                       }
-               }
-               /*
-                * We have a pointer to an entry.
-                * Is it a live one?
-                */
-               dup = (xfs_dir2_data_unused_t *)ptr;
-               /*
-                * No, it's unused, skip over it.
-                */
-               if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
-                       length = be16_to_cpu(dup->length);
-                       ptr += length;
-                       curoff += length;
-                       continue;
-               }
-
-               dep = (xfs_dir2_data_entry_t *)ptr;
-               length = xfs_dir2_data_entsize(dep->namelen);
-
-               ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
-               if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
-                           be64_to_cpu(dep->inumber), DT_UNKNOWN))
-                       break;
-
-               /*
-                * Advance to next entry in the block.
-                */
-               ptr += length;
-               curoff += length;
-               /* bufsize may have just been a guess; don't go negative */
-               bufsize = bufsize > length ? bufsize - length : 0;
-       }
-
-       /*
-        * All done.  Set output offset value to current offset.
-        */
-       if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR))
-               ctx->pos = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
-       else
-               ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
-       kmem_free(map_info);
-       if (bp)
-               xfs_trans_brelse(NULL, bp);
-       return error;
-}
-
-
 /*
  * Log the bests entries indicated from a leaf1 block.
  */
@@ -1975,10 +1586,6 @@ xfs_dir2_leaf_search_hash(
        ents = xfs_dir3_leaf_ents_p(leaf);
        xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
 
-#ifndef __KERNEL__
-       if (!leafhdr.count)
-               return 0;
-#endif
        /*
         * Note, the table cannot be empty, so we have to go through the loop.
         * Binary search the leaf entries looking for our hash value.
index 2226a00acd156118a2998ce37c2c95ae628503d9..18e287deee667527b816453c1dfd7f58fb802198 100644 (file)
@@ -30,6 +30,7 @@
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
@@ -312,11 +313,13 @@ xfs_dir2_free_log_header(
        struct xfs_trans        *tp,
        struct xfs_buf          *bp)
 {
+#ifdef DEBUG
        xfs_dir2_free_t         *free;          /* freespace structure */
 
        free = bp->b_addr;
        ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
               free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
+#endif
        xfs_trans_log_buf(tp, bp, 0, xfs_dir3_free_hdr_size(tp->t_mountp) - 1);
 }
 
index 0511cda4a712a480682b946829c5587cf37c4070..6d2a99c224b7ebb67d4eb38d904ee70425dc703e 100644 (file)
 
 /* xfs_dir2.c */
 extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
-extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
-extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
 extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
                                xfs_dir2_db_t *dbp);
-extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
-                               struct xfs_buf *bp);
 extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
                                const unsigned char *name, int len);
 
 /* xfs_dir2_block.c */
-extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
-
+extern int xfs_dir3_block_read(struct xfs_trans *tp, struct xfs_inode *dp,
+                              struct xfs_buf **bpp);
 extern int xfs_dir2_block_addname(struct xfs_da_args *args);
-extern int xfs_dir2_block_getdents(struct xfs_inode *dp,
-               struct dir_context *ctx);
 extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_block_removename(struct xfs_da_args *args);
 extern int xfs_dir2_block_replace(struct xfs_da_args *args);
@@ -48,9 +42,6 @@ extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
 #define        xfs_dir3_data_check(dp,bp)
 #endif
 
-extern const struct xfs_buf_ops xfs_dir3_data_buf_ops;
-extern const struct xfs_buf_ops xfs_dir3_free_buf_ops;
-
 extern int __xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
 extern int xfs_dir3_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
                xfs_dablk_t bno, xfs_daddr_t mapped_bno, struct xfs_buf **bpp);
@@ -60,27 +51,10 @@ extern int xfs_dir3_data_readahead(struct xfs_trans *tp, struct xfs_inode *dp,
 extern struct xfs_dir2_data_free *
 xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
                struct xfs_dir2_data_unused *dup, int *loghead);
-extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
-               struct xfs_dir2_data_hdr *hdr, int *loghead);
 extern int xfs_dir3_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
                struct xfs_buf **bpp);
-extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_buf *bp,
-               struct xfs_dir2_data_entry *dep);
-extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
-               struct xfs_buf *bp);
-extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_buf *bp,
-               struct xfs_dir2_data_unused *dup);
-extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_buf *bp,
-               xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
-               int *needlogp, int *needscanp);
-extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_buf *bp,
-               struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
-               xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
 
 /* xfs_dir2_leaf.c */
-extern const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops;
-extern const struct xfs_buf_ops xfs_dir3_leafn_buf_ops;
-
 extern int xfs_dir3_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
                xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
 extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
@@ -91,8 +65,6 @@ extern void xfs_dir3_leaf_compact(struct xfs_da_args *args,
 extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr,
                struct xfs_dir2_leaf_entry *ents, int *indexp,
                int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
-extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, struct dir_context *ctx,
-               size_t bufsize);
 extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno,
                struct xfs_buf **bpp, __uint16_t magic);
 extern void xfs_dir3_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp,
@@ -144,18 +116,18 @@ extern int xfs_dir2_free_read(struct xfs_trans *tp, struct xfs_inode *dp,
                xfs_dablk_t fbno, struct xfs_buf **bpp);
 
 /* xfs_dir2_sf.c */
-extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp);
-extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp,
-               struct xfs_dir2_sf_entry *sfep);
 extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
                struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
 extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
                int size, xfs_dir2_sf_hdr_t *sfhp);
 extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
 extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
-extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, struct dir_context *ctx);
 extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
 extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
 
+/* xfs_dir2_readdir.c */
+extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
+                      size_t bufsize);
+
 #endif /* __XFS_DIR2_PRIV_H__ */
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
new file mode 100644 (file)
index 0000000..5f4f705
--- /dev/null
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_types.h"
+#include "xfs_bit.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
+#include "xfs_dir2_priv.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_bmap.h"
+
+STATIC int
+xfs_dir2_sf_getdents(
+       xfs_inode_t             *dp,            /* incore directory inode */
+       struct dir_context      *ctx)
+{
+       int                     i;              /* shortform entry number */
+       xfs_mount_t             *mp;            /* filesystem mount point */
+       xfs_dir2_dataptr_t      off;            /* current entry's offset */
+       xfs_dir2_sf_entry_t     *sfep;          /* shortform directory entry */
+       xfs_dir2_sf_hdr_t       *sfp;           /* shortform structure */
+       xfs_dir2_dataptr_t      dot_offset;
+       xfs_dir2_dataptr_t      dotdot_offset;
+       xfs_ino_t               ino;
+
+       mp = dp->i_mount;
+
+       ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
+       /*
+        * Give up if the directory is way too short.
+        */
+       if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
+               ASSERT(XFS_FORCED_SHUTDOWN(mp));
+               return XFS_ERROR(EIO);
+       }
+
+       ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
+       ASSERT(dp->i_df.if_u1.if_data != NULL);
+
+       sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+
+       ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
+
+       /*
+        * If the block number in the offset is out of range, we're done.
+        */
+       if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk)
+               return 0;
+
+       /*
+        * Precalculate offsets for . and .. as we will always need them.
+        *
+        * XXX(hch): the second argument is sometimes 0 and sometimes
+        * mp->m_dirdatablk.
+        */
+       dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
+                                            XFS_DIR3_DATA_DOT_OFFSET(mp));
+       dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
+                                               XFS_DIR3_DATA_DOTDOT_OFFSET(mp));
+
+       /*
+        * Put . entry unless we're starting past it.
+        */
+       if (ctx->pos <= dot_offset) {
+               ctx->pos = dot_offset & 0x7fffffff;
+               if (!dir_emit(ctx, ".", 1, dp->i_ino, DT_DIR))
+                       return 0;
+       }
+
+       /*
+        * Put .. entry unless we're starting past it.
+        */
+       if (ctx->pos <= dotdot_offset) {
+               ino = xfs_dir2_sf_get_parent_ino(sfp);
+               ctx->pos = dotdot_offset & 0x7fffffff;
+               if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
+                       return 0;
+       }
+
+       /*
+        * Loop while there are more entries and put'ing works.
+        */
+       sfep = xfs_dir2_sf_firstentry(sfp);
+       for (i = 0; i < sfp->count; i++) {
+               off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
+                               xfs_dir2_sf_get_offset(sfep));
+
+               if (ctx->pos > off) {
+                       sfep = xfs_dir2_sf_nextentry(sfp, sfep);
+                       continue;
+               }
+
+               ino = xfs_dir2_sfe_get_ino(sfp, sfep);
+               ctx->pos = off & 0x7fffffff;
+               if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen,
+                           ino, DT_UNKNOWN))
+                       return 0;
+               sfep = xfs_dir2_sf_nextentry(sfp, sfep);
+       }
+
+       ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
+                       0x7fffffff;
+       return 0;
+}
+
+/*
+ * Readdir for block directories.
+ */
+STATIC int
+xfs_dir2_block_getdents(
+       xfs_inode_t             *dp,            /* incore inode */
+       struct dir_context      *ctx)
+{
+       xfs_dir2_data_hdr_t     *hdr;           /* block header */
+       struct xfs_buf          *bp;            /* buffer for block */
+       xfs_dir2_block_tail_t   *btp;           /* block tail */
+       xfs_dir2_data_entry_t   *dep;           /* block data entry */
+       xfs_dir2_data_unused_t  *dup;           /* block unused entry */
+       char                    *endptr;        /* end of the data entries */
+       int                     error;          /* error return value */
+       xfs_mount_t             *mp;            /* filesystem mount point */
+       char                    *ptr;           /* current data entry */
+       int                     wantoff;        /* starting block offset */
+       xfs_off_t               cook;
+
+       mp = dp->i_mount;
+       /*
+        * If the block number in the offset is out of range, we're done.
+        */
+       if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk)
+               return 0;
+
+       error = xfs_dir3_block_read(NULL, dp, &bp);
+       if (error)
+               return error;
+
+       /*
+        * Extract the byte offset we start at from the seek pointer.
+        * We'll skip entries before this.
+        */
+       wantoff = xfs_dir2_dataptr_to_off(mp, ctx->pos);
+       hdr = bp->b_addr;
+       xfs_dir3_data_check(dp, bp);
+       /*
+        * Set up values for the loop.
+        */
+       btp = xfs_dir2_block_tail_p(mp, hdr);
+       ptr = (char *)xfs_dir3_data_entry_p(hdr);
+       endptr = (char *)xfs_dir2_block_leaf_p(btp);
+
+       /*
+        * Loop over the data portion of the block.
+        * Each object is a real entry (dep) or an unused one (dup).
+        */
+       while (ptr < endptr) {
+               dup = (xfs_dir2_data_unused_t *)ptr;
+               /*
+                * Unused, skip it.
+                */
+               if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
+                       ptr += be16_to_cpu(dup->length);
+                       continue;
+               }
+
+               dep = (xfs_dir2_data_entry_t *)ptr;
+
+               /*
+                * Bump pointer for the next iteration.
+                */
+               ptr += xfs_dir2_data_entsize(dep->namelen);
+               /*
+                * The entry is before the desired starting point, skip it.
+                */
+               if ((char *)dep - (char *)hdr < wantoff)
+                       continue;
+
+               cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
+                                           (char *)dep - (char *)hdr);
+
+               ctx->pos = cook & 0x7fffffff;
+               /*
+                * If it didn't fit, set the final offset to here & return.
+                */
+               if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
+                           be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
+                       xfs_trans_brelse(NULL, bp);
+                       return 0;
+               }
+       }
+
+       /*
+        * Reached the end of the block.
+        * Set the offset to a non-existent block 1 and return.
+        */
+       ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
+                       0x7fffffff;
+       xfs_trans_brelse(NULL, bp);
+       return 0;
+}
+
+struct xfs_dir2_leaf_map_info {
+       xfs_extlen_t    map_blocks;     /* number of fsbs in map */
+       xfs_dablk_t     map_off;        /* last mapped file offset */
+       int             map_size;       /* total entries in *map */
+       int             map_valid;      /* valid entries in *map */
+       int             nmap;           /* mappings to ask xfs_bmapi */
+       xfs_dir2_db_t   curdb;          /* db for current block */
+       int             ra_current;     /* number of read-ahead blks */
+       int             ra_index;       /* *map index for read-ahead */
+       int             ra_offset;      /* map entry offset for ra */
+       int             ra_want;        /* readahead count wanted */
+       struct xfs_bmbt_irec map[];     /* map vector for blocks */
+};
+
+STATIC int
+xfs_dir2_leaf_readbuf(
+       struct xfs_inode        *dp,
+       size_t                  bufsize,
+       struct xfs_dir2_leaf_map_info *mip,
+       xfs_dir2_off_t          *curoff,
+       struct xfs_buf          **bpp)
+{
+       struct xfs_mount        *mp = dp->i_mount;
+       struct xfs_buf          *bp = *bpp;
+       struct xfs_bmbt_irec    *map = mip->map;
+       struct blk_plug         plug;
+       int                     error = 0;
+       int                     length;
+       int                     i;
+       int                     j;
+
+       /*
+        * If we have a buffer, we need to release it and
+        * take it out of the mapping.
+        */
+
+       if (bp) {
+               xfs_trans_brelse(NULL, bp);
+               bp = NULL;
+               mip->map_blocks -= mp->m_dirblkfsbs;
+               /*
+                * Loop to get rid of the extents for the
+                * directory block.
+                */
+               for (i = mp->m_dirblkfsbs; i > 0; ) {
+                       j = min_t(int, map->br_blockcount, i);
+                       map->br_blockcount -= j;
+                       map->br_startblock += j;
+                       map->br_startoff += j;
+                       /*
+                        * If mapping is done, pitch it from
+                        * the table.
+                        */
+                       if (!map->br_blockcount && --mip->map_valid)
+                               memmove(&map[0], &map[1],
+                                       sizeof(map[0]) * mip->map_valid);
+                       i -= j;
+               }
+       }
+
+       /*
+        * Recalculate the readahead blocks wanted.
+        */
+       mip->ra_want = howmany(bufsize + mp->m_dirblksize,
+                              mp->m_sb.sb_blocksize) - 1;
+       ASSERT(mip->ra_want >= 0);
+
+       /*
+        * If we don't have as many as we want, and we haven't
+        * run out of data blocks, get some more mappings.
+        */
+       if (1 + mip->ra_want > mip->map_blocks &&
+           mip->map_off < xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) {
+               /*
+                * Get more bmaps, fill in after the ones
+                * we already have in the table.
+                */
+               mip->nmap = mip->map_size - mip->map_valid;
+               error = xfs_bmapi_read(dp, mip->map_off,
+                               xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET) -
+                                                               mip->map_off,
+                               &map[mip->map_valid], &mip->nmap, 0);
+
+               /*
+                * Don't know if we should ignore this or try to return an
+                * error.  The trouble with returning errors is that readdir
+                * will just stop without actually passing the error through.
+                */
+               if (error)
+                       goto out;       /* XXX */
+
+               /*
+                * If we got all the mappings we asked for, set the final map
+                * offset based on the last bmap value received.  Otherwise,
+                * we've reached the end.
+                */
+               if (mip->nmap == mip->map_size - mip->map_valid) {
+                       i = mip->map_valid + mip->nmap - 1;
+                       mip->map_off = map[i].br_startoff + map[i].br_blockcount;
+               } else
+                       mip->map_off = xfs_dir2_byte_to_da(mp,
+                                                       XFS_DIR2_LEAF_OFFSET);
+
+               /*
+                * Look for holes in the mapping, and eliminate them.  Count up
+                * the valid blocks.
+                */
+               for (i = mip->map_valid; i < mip->map_valid + mip->nmap; ) {
+                       if (map[i].br_startblock == HOLESTARTBLOCK) {
+                               mip->nmap--;
+                               length = mip->map_valid + mip->nmap - i;
+                               if (length)
+                                       memmove(&map[i], &map[i + 1],
+                                               sizeof(map[i]) * length);
+                       } else {
+                               mip->map_blocks += map[i].br_blockcount;
+                               i++;
+                       }
+               }
+               mip->map_valid += mip->nmap;
+       }
+
+       /*
+        * No valid mappings, so no more data blocks.
+        */
+       if (!mip->map_valid) {
+               *curoff = xfs_dir2_da_to_byte(mp, mip->map_off);
+               goto out;
+       }
+
+       /*
+        * Read the directory block starting at the first mapping.
+        */
+       mip->curdb = xfs_dir2_da_to_db(mp, map->br_startoff);
+       error = xfs_dir3_data_read(NULL, dp, map->br_startoff,
+                       map->br_blockcount >= mp->m_dirblkfsbs ?
+                           XFS_FSB_TO_DADDR(mp, map->br_startblock) : -1, &bp);
+
+       /*
+        * Should just skip over the data block instead of giving up.
+        */
+       if (error)
+               goto out;       /* XXX */
+
+       /*
+        * Adjust the current amount of read-ahead: we just read a block that
+        * was previously ra.
+        */
+       if (mip->ra_current)
+               mip->ra_current -= mp->m_dirblkfsbs;
+
+       /*
+        * Do we need more readahead?
+        */
+       blk_start_plug(&plug);
+       for (mip->ra_index = mip->ra_offset = i = 0;
+            mip->ra_want > mip->ra_current && i < mip->map_blocks;
+            i += mp->m_dirblkfsbs) {
+               ASSERT(mip->ra_index < mip->map_valid);
+               /*
+                * Read-ahead a contiguous directory block.
+                */
+               if (i > mip->ra_current &&
+                   map[mip->ra_index].br_blockcount >= mp->m_dirblkfsbs) {
+                       xfs_dir3_data_readahead(NULL, dp,
+                               map[mip->ra_index].br_startoff + mip->ra_offset,
+                               XFS_FSB_TO_DADDR(mp,
+                                       map[mip->ra_index].br_startblock +
+                                                       mip->ra_offset));
+                       mip->ra_current = i;
+               }
+
+               /*
+                * Read-ahead a non-contiguous directory block.  This doesn't
+                * use our mapping, but this is a very rare case.
+                */
+               else if (i > mip->ra_current) {
+                       xfs_dir3_data_readahead(NULL, dp,
+                                       map[mip->ra_index].br_startoff +
+                                                       mip->ra_offset, -1);
+                       mip->ra_current = i;
+               }
+
+               /*
+                * Advance offset through the mapping table.
+                */
+               for (j = 0; j < mp->m_dirblkfsbs; j++) {
+                       /*
+                        * The rest of this extent but not more than a dir
+                        * block.
+                        */
+                       length = min_t(int, mp->m_dirblkfsbs,
+                                       map[mip->ra_index].br_blockcount -
+                                                       mip->ra_offset);
+                       j += length;
+                       mip->ra_offset += length;
+
+                       /*
+                        * Advance to the next mapping if this one is used up.
+                        */
+                       if (mip->ra_offset == map[mip->ra_index].br_blockcount) {
+                               mip->ra_offset = 0;
+                               mip->ra_index++;
+                       }
+               }
+       }
+       blk_finish_plug(&plug);
+
+out:
+       *bpp = bp;
+       return error;
+}
+
+/*
+ * Getdents (readdir) for leaf and node directories.
+ * This reads the data blocks only, so is the same for both forms.
+ */
+STATIC int
+xfs_dir2_leaf_getdents(
+       xfs_inode_t             *dp,            /* incore directory inode */
+       struct dir_context      *ctx,
+       size_t                  bufsize)
+{
+       struct xfs_buf          *bp = NULL;     /* data block buffer */
+       xfs_dir2_data_hdr_t     *hdr;           /* data block header */
+       xfs_dir2_data_entry_t   *dep;           /* data entry */
+       xfs_dir2_data_unused_t  *dup;           /* unused entry */
+       int                     error = 0;      /* error return value */
+       int                     length;         /* temporary length value */
+       xfs_mount_t             *mp;            /* filesystem mount point */
+       int                     byteoff;        /* offset in current block */
+       xfs_dir2_off_t          curoff;         /* current overall offset */
+       xfs_dir2_off_t          newoff;         /* new curoff after new blk */
+       char                    *ptr = NULL;    /* pointer to current data */
+       struct xfs_dir2_leaf_map_info *map_info;
+
+       /*
+        * If the offset is at or past the largest allowed value,
+        * give up right away.
+        */
+       if (ctx->pos >= XFS_DIR2_MAX_DATAPTR)
+               return 0;
+
+       mp = dp->i_mount;
+
+       /*
+        * Set up to bmap a number of blocks based on the caller's
+        * buffer size, the directory block size, and the filesystem
+        * block size.
+        */
+       length = howmany(bufsize + mp->m_dirblksize,
+                                    mp->m_sb.sb_blocksize);
+       map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
+                               (length * sizeof(struct xfs_bmbt_irec)),
+                              KM_SLEEP | KM_NOFS);
+       map_info->map_size = length;
+
+       /*
+        * Inside the loop we keep the main offset value as a byte offset
+        * in the directory file.
+        */
+       curoff = xfs_dir2_dataptr_to_byte(mp, ctx->pos);
+
+       /*
+        * Force this conversion through db so we truncate the offset
+        * down to get the start of the data block.
+        */
+       map_info->map_off = xfs_dir2_db_to_da(mp,
+                                             xfs_dir2_byte_to_db(mp, curoff));
+
+       /*
+        * Loop over directory entries until we reach the end offset.
+        * Get more blocks and readahead as necessary.
+        */
+       while (curoff < XFS_DIR2_LEAF_OFFSET) {
+               /*
+                * If we have no buffer, or we're off the end of the
+                * current buffer, need to get another one.
+                */
+               if (!bp || ptr >= (char *)bp->b_addr + mp->m_dirblksize) {
+
+                       error = xfs_dir2_leaf_readbuf(dp, bufsize, map_info,
+                                                     &curoff, &bp);
+                       if (error || !map_info->map_valid)
+                               break;
+
+                       /*
+                        * Having done a read, we need to set a new offset.
+                        */
+                       newoff = xfs_dir2_db_off_to_byte(mp, map_info->curdb, 0);
+                       /*
+                        * Start of the current block.
+                        */
+                       if (curoff < newoff)
+                               curoff = newoff;
+                       /*
+                        * Make sure we're in the right block.
+                        */
+                       else if (curoff > newoff)
+                               ASSERT(xfs_dir2_byte_to_db(mp, curoff) ==
+                                      map_info->curdb);
+                       hdr = bp->b_addr;
+                       xfs_dir3_data_check(dp, bp);
+                       /*
+                        * Find our position in the block.
+                        */
+                       ptr = (char *)xfs_dir3_data_entry_p(hdr);
+                       byteoff = xfs_dir2_byte_to_off(mp, curoff);
+                       /*
+                        * Skip past the header.
+                        */
+                       if (byteoff == 0)
+                               curoff += xfs_dir3_data_entry_offset(hdr);
+                       /*
+                        * Skip past entries until we reach our offset.
+                        */
+                       else {
+                               while ((char *)ptr - (char *)hdr < byteoff) {
+                                       dup = (xfs_dir2_data_unused_t *)ptr;
+
+                                       if (be16_to_cpu(dup->freetag)
+                                                 == XFS_DIR2_DATA_FREE_TAG) {
+
+                                               length = be16_to_cpu(dup->length);
+                                               ptr += length;
+                                               continue;
+                                       }
+                                       dep = (xfs_dir2_data_entry_t *)ptr;
+                                       length =
+                                          xfs_dir2_data_entsize(dep->namelen);
+                                       ptr += length;
+                               }
+                               /*
+                                * Now set our real offset.
+                                */
+                               curoff =
+                                       xfs_dir2_db_off_to_byte(mp,
+                                           xfs_dir2_byte_to_db(mp, curoff),
+                                           (char *)ptr - (char *)hdr);
+                               if (ptr >= (char *)hdr + mp->m_dirblksize) {
+                                       continue;
+                               }
+                       }
+               }
+               /*
+                * We have a pointer to an entry.
+                * Is it a live one?
+                */
+               dup = (xfs_dir2_data_unused_t *)ptr;
+               /*
+                * No, it's unused, skip over it.
+                */
+               if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
+                       length = be16_to_cpu(dup->length);
+                       ptr += length;
+                       curoff += length;
+                       continue;
+               }
+
+               dep = (xfs_dir2_data_entry_t *)ptr;
+               length = xfs_dir2_data_entsize(dep->namelen);
+
+               ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
+               if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
+                           be64_to_cpu(dep->inumber), DT_UNKNOWN))
+                       break;
+
+               /*
+                * Advance to next entry in the block.
+                */
+               ptr += length;
+               curoff += length;
+               /* bufsize may have just been a guess; don't go negative */
+               bufsize = bufsize > length ? bufsize - length : 0;
+       }
+
+       /*
+        * All done.  Set output offset value to current offset.
+        */
+       if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR))
+               ctx->pos = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
+       else
+               ctx->pos = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
+       kmem_free(map_info);
+       if (bp)
+               xfs_trans_brelse(NULL, bp);
+       return error;
+}
+
+/*
+ * Read a directory.
+ */
+int
+xfs_readdir(
+       xfs_inode_t     *dp,
+       struct dir_context *ctx,
+       size_t          bufsize)
+{
+       int             rval;           /* return value */
+       int             v;              /* type-checking value */
+
+       trace_xfs_readdir(dp);
+
+       if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+               return XFS_ERROR(EIO);
+
+       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       XFS_STATS_INC(xs_dir_getdents);
+
+       if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+               rval = xfs_dir2_sf_getdents(dp, ctx);
+       else if ((rval = xfs_dir2_isblock(NULL, dp, &v)))
+               ;
+       else if (v)
+               rval = xfs_dir2_block_getdents(dp, ctx);
+       else
+               rval = xfs_dir2_leaf_getdents(dp, ctx, bufsize);
+       return rval;
+}
index 97676a347da166e5d843db8ab2052666390e018d..65b65c5f8c3cdbe933135a3200763811e34976e8 100644 (file)
@@ -29,8 +29,8 @@
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
 #include "xfs_error.h"
-#include "xfs_dir2.h"
 #include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_trace.h"
 
@@ -95,7 +95,7 @@ xfs_dir2_sf_get_parent_ino(
        return xfs_dir2_sf_get_ino(hdr, &hdr->parent);
 }
 
-static void
+void
 xfs_dir2_sf_put_parent_ino(
        struct xfs_dir2_sf_hdr  *hdr,
        xfs_ino_t               ino)
@@ -123,7 +123,7 @@ xfs_dir2_sfe_get_ino(
        return xfs_dir2_sf_get_ino(hdr, xfs_dir2_sfe_inop(sfep));
 }
 
-static void
+void
 xfs_dir2_sfe_put_ino(
        struct xfs_dir2_sf_hdr  *hdr,
        struct xfs_dir2_sf_entry *sfep,
@@ -765,100 +765,6 @@ xfs_dir2_sf_create(
        return 0;
 }
 
-int                                            /* error */
-xfs_dir2_sf_getdents(
-       xfs_inode_t             *dp,            /* incore directory inode */
-       struct dir_context      *ctx)
-{
-       int                     i;              /* shortform entry number */
-       xfs_mount_t             *mp;            /* filesystem mount point */
-       xfs_dir2_dataptr_t      off;            /* current entry's offset */
-       xfs_dir2_sf_entry_t     *sfep;          /* shortform directory entry */
-       xfs_dir2_sf_hdr_t       *sfp;           /* shortform structure */
-       xfs_dir2_dataptr_t      dot_offset;
-       xfs_dir2_dataptr_t      dotdot_offset;
-       xfs_ino_t               ino;
-
-       mp = dp->i_mount;
-
-       ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
-       /*
-        * Give up if the directory is way too short.
-        */
-       if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
-               ASSERT(XFS_FORCED_SHUTDOWN(mp));
-               return XFS_ERROR(EIO);
-       }
-
-       ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
-       ASSERT(dp->i_df.if_u1.if_data != NULL);
-
-       sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
-
-       ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
-
-       /*
-        * If the block number in the offset is out of range, we're done.
-        */
-       if (xfs_dir2_dataptr_to_db(mp, ctx->pos) > mp->m_dirdatablk)
-               return 0;
-
-       /*
-        * Precalculate offsets for . and .. as we will always need them.
-        *
-        * XXX(hch): the second argument is sometimes 0 and sometimes
-        * mp->m_dirdatablk.
-        */
-       dot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
-                                            XFS_DIR3_DATA_DOT_OFFSET(mp));
-       dotdot_offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
-                                               XFS_DIR3_DATA_DOTDOT_OFFSET(mp));
-
-       /*
-        * Put . entry unless we're starting past it.
-        */
-       if (ctx->pos <= dot_offset) {
-               ctx->pos = dot_offset & 0x7fffffff;
-               if (!dir_emit(ctx, ".", 1, dp->i_ino, DT_DIR))
-                       return 0;
-       }
-
-       /*
-        * Put .. entry unless we're starting past it.
-        */
-       if (ctx->pos <= dotdot_offset) {
-               ino = xfs_dir2_sf_get_parent_ino(sfp);
-               ctx->pos = dotdot_offset & 0x7fffffff;
-               if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
-                       return 0;
-       }
-
-       /*
-        * Loop while there are more entries and put'ing works.
-        */
-       sfep = xfs_dir2_sf_firstentry(sfp);
-       for (i = 0; i < sfp->count; i++) {
-               off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
-                               xfs_dir2_sf_get_offset(sfep));
-
-               if (ctx->pos > off) {
-                       sfep = xfs_dir2_sf_nextentry(sfp, sfep);
-                       continue;
-               }
-
-               ino = xfs_dir2_sfe_get_ino(sfp, sfep);
-               ctx->pos = off & 0x7fffffff;
-               if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen,
-                           ino, DT_UNKNOWN))
-                       return 0;
-               sfep = xfs_dir2_sf_nextentry(sfp, sfep);
-       }
-
-       ctx->pos = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
-                       0x7fffffff;
-       return 0;
-}
-
 /*
  * Lookup an entry in a shortform directory.
  * Returns EEXIST if found, ENOENT if not found.
index 69cf4fcde03e2d31266f70f6dfee1b73fe71b4a7..45560ee1a4ba8b1ccfdc36f9616cc5355e03558d 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_sb.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_quota.h"
-#include "xfs_trans.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_ialloc_btree.h"
index 0adf27ecf3f1cd4e98d8fcc06a732e4d476437db..251c66632e5e7d926e92942d764e49220237dd11 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
@@ -28,6 +29,7 @@
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_itable.h"
@@ -710,10 +712,8 @@ xfs_qm_dqread(
 
        if (flags & XFS_QMOPT_DQALLOC) {
                tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
-               error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
-                                         XFS_QM_DQALLOC_LOG_RES(mp), 0,
-                                         XFS_TRANS_PERM_LOG_RES,
-                                         XFS_WRITE_LOG_COUNT);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_attrsetm,
+                                         XFS_QM_DQALLOC_SPACE_RES(mp), 0);
                if (error)
                        goto error1;
                cancelflags = XFS_TRANS_RELEASE_LOG_RES;
index 57aa4b03720cb7440acef44a920d18f81955ba72..60c6e1f126952acc43e1bbe2a1d065f304ed484d 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
@@ -43,14 +44,15 @@ static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
 /*
  * returns the number of iovecs needed to log the given dquot item.
  */
-STATIC uint
+STATIC void
 xfs_qm_dquot_logitem_size(
-       struct xfs_log_item     *lip)
+       struct xfs_log_item     *lip,
+       int                     *nvecs,
+       int                     *nbytes)
 {
-       /*
-        * we need only two iovecs, one for the format, one for the real thing
-        */
-       return 2;
+       *nvecs += 2;
+       *nbytes += sizeof(struct xfs_dq_logformat) +
+                  sizeof(struct xfs_disk_dquot);
 }
 
 /*
@@ -285,11 +287,14 @@ static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
  * We only need 1 iovec for an quotaoff item.  It just logs the
  * quotaoff_log_format structure.
  */
-STATIC uint
+STATIC void
 xfs_qm_qoff_logitem_size(
-       struct xfs_log_item     *lip)
+       struct xfs_log_item     *lip,
+       int                     *nvecs,
+       int                     *nbytes)
 {
-       return 1;
+       *nvecs += 1;
+       *nbytes += sizeof(struct xfs_qoff_logitem);
 }
 
 /*
index 35d3f5b041ddc0977f47981cb88991edee857245..1123d93ff79546efe3a9d962460ab1e44e2e1bac 100644 (file)
@@ -26,7 +26,6 @@
 #include "xfs_bmap_btree.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_utils.h"
 #include "xfs_error.h"
 
 #ifdef DEBUG
index c585bc646395e04c5497621eeb4ec34bd1f262e7..066df425c14ffca5b4dacb20f7b3c6fcdd139acb 100644 (file)
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_mount.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_export.h"
-#include "xfs_vnodeops.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
index 85e9f87a1a7ce7945da58e305a21818262542cfa..86f559f6e5d3c3f825df5aeffbf6967a7654f055 100644 (file)
@@ -147,7 +147,7 @@ xfs_extent_busy_search(
  * extent.  If the overlap covers the beginning, the end, or all of the busy
  * extent, the overlapping portion can be made unbusy and used for the
  * allocation.  We can't split a busy extent because we can't modify a
- * transaction/CIL context busy list, but we can update an entries block
+ * transaction/CIL context busy list, but we can update an entry's block
  * number or length.
  *
  * Returns true if the extent can safely be reused, or false if the search
index 452920a3f03fb2e4405ce52e34587e55acfb7abe..dc53e8febbbeaa54812b4e72dc25938718da69c1 100644 (file)
@@ -73,11 +73,22 @@ __xfs_efi_release(
  * We only need 1 iovec for an efi item.  It just logs the efi_log_format
  * structure.
  */
-STATIC uint
+static inline int
+xfs_efi_item_sizeof(
+       struct xfs_efi_log_item *efip)
+{
+       return sizeof(struct xfs_efi_log_format) +
+              (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t);
+}
+
+STATIC void
 xfs_efi_item_size(
-       struct xfs_log_item     *lip)
+       struct xfs_log_item     *lip,
+       int                     *nvecs,
+       int                     *nbytes)
 {
-       return 1;
+       *nvecs += 1;
+       *nbytes += xfs_efi_item_sizeof(EFI_ITEM(lip));
 }
 
 /*
@@ -93,21 +104,17 @@ xfs_efi_item_format(
        struct xfs_log_iovec    *log_vector)
 {
        struct xfs_efi_log_item *efip = EFI_ITEM(lip);
-       uint                    size;
 
        ASSERT(atomic_read(&efip->efi_next_extent) ==
                                efip->efi_format.efi_nextents);
 
        efip->efi_format.efi_type = XFS_LI_EFI;
-
-       size = sizeof(xfs_efi_log_format_t);
-       size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t);
        efip->efi_format.efi_size = 1;
 
        log_vector->i_addr = &efip->efi_format;
-       log_vector->i_len = size;
+       log_vector->i_len = xfs_efi_item_sizeof(efip);
        log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT;
-       ASSERT(size >= sizeof(xfs_efi_log_format_t));
+       ASSERT(log_vector->i_len >= sizeof(xfs_efi_log_format_t));
 }
 
 
@@ -333,11 +340,22 @@ xfs_efd_item_free(struct xfs_efd_log_item *efdp)
  * We only need 1 iovec for an efd item.  It just logs the efd_log_format
  * structure.
  */
-STATIC uint
+static inline int
+xfs_efd_item_sizeof(
+       struct xfs_efd_log_item *efdp)
+{
+       return sizeof(xfs_efd_log_format_t) +
+              (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t);
+}
+
+STATIC void
 xfs_efd_item_size(
-       struct xfs_log_item     *lip)
+       struct xfs_log_item     *lip,
+       int                     *nvecs,
+       int                     *nbytes)
 {
-       return 1;
+       *nvecs += 1;
+       *nbytes += xfs_efd_item_sizeof(EFD_ITEM(lip));
 }
 
 /*
@@ -353,20 +371,16 @@ xfs_efd_item_format(
        struct xfs_log_iovec    *log_vector)
 {
        struct xfs_efd_log_item *efdp = EFD_ITEM(lip);
-       uint                    size;
 
        ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents);
 
        efdp->efd_format.efd_type = XFS_LI_EFD;
-
-       size = sizeof(xfs_efd_log_format_t);
-       size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t);
        efdp->efd_format.efd_size = 1;
 
        log_vector->i_addr = &efdp->efd_format;
-       log_vector->i_len = size;
+       log_vector->i_len = xfs_efd_item_sizeof(efdp);
        log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT;
-       ASSERT(size >= sizeof(xfs_efd_log_format_t));
+       ASSERT(log_vector->i_len >= sizeof(xfs_efd_log_format_t));
 }
 
 /*
index 432222418c566f6d7f30bed9951289ea9c5f1d10..0ffbce32d5693e05e042de8983c8274942616196 100644 (file)
 #ifndef        __XFS_EXTFREE_ITEM_H__
 #define        __XFS_EXTFREE_ITEM_H__
 
+/* kernel only EFI/EFD definitions */
+
 struct xfs_mount;
 struct kmem_zone;
 
-typedef struct xfs_extent {
-       xfs_dfsbno_t    ext_start;
-       xfs_extlen_t    ext_len;
-} xfs_extent_t;
-
-/*
- * Since an xfs_extent_t has types (start:64, len: 32)
- * there are different alignments on 32 bit and 64 bit kernels.
- * So we provide the different variants for use by a
- * conversion routine.
- */
-
-typedef struct xfs_extent_32 {
-       __uint64_t      ext_start;
-       __uint32_t      ext_len;
-} __attribute__((packed)) xfs_extent_32_t;
-
-typedef struct xfs_extent_64 {
-       __uint64_t      ext_start;
-       __uint32_t      ext_len;
-       __uint32_t      ext_pad;
-} xfs_extent_64_t;
-
-/*
- * This is the structure used to lay out an efi log item in the
- * log.  The efi_extents field is a variable size array whose
- * size is given by efi_nextents.
- */
-typedef struct xfs_efi_log_format {
-       __uint16_t              efi_type;       /* efi log item type */
-       __uint16_t              efi_size;       /* size of this item */
-       __uint32_t              efi_nextents;   /* # extents to free */
-       __uint64_t              efi_id;         /* efi identifier */
-       xfs_extent_t            efi_extents[1]; /* array of extents to free */
-} xfs_efi_log_format_t;
-
-typedef struct xfs_efi_log_format_32 {
-       __uint16_t              efi_type;       /* efi log item type */
-       __uint16_t              efi_size;       /* size of this item */
-       __uint32_t              efi_nextents;   /* # extents to free */
-       __uint64_t              efi_id;         /* efi identifier */
-       xfs_extent_32_t         efi_extents[1]; /* array of extents to free */
-} __attribute__((packed)) xfs_efi_log_format_32_t;
-
-typedef struct xfs_efi_log_format_64 {
-       __uint16_t              efi_type;       /* efi log item type */
-       __uint16_t              efi_size;       /* size of this item */
-       __uint32_t              efi_nextents;   /* # extents to free */
-       __uint64_t              efi_id;         /* efi identifier */
-       xfs_extent_64_t         efi_extents[1]; /* array of extents to free */
-} xfs_efi_log_format_64_t;
-
-/*
- * This is the structure used to lay out an efd log item in the
- * log.  The efd_extents array is a variable size array whose
- * size is given by efd_nextents;
- */
-typedef struct xfs_efd_log_format {
-       __uint16_t              efd_type;       /* efd log item type */
-       __uint16_t              efd_size;       /* size of this item */
-       __uint32_t              efd_nextents;   /* # of extents freed */
-       __uint64_t              efd_efi_id;     /* id of corresponding efi */
-       xfs_extent_t            efd_extents[1]; /* array of extents freed */
-} xfs_efd_log_format_t;
-
-typedef struct xfs_efd_log_format_32 {
-       __uint16_t              efd_type;       /* efd log item type */
-       __uint16_t              efd_size;       /* size of this item */
-       __uint32_t              efd_nextents;   /* # of extents freed */
-       __uint64_t              efd_efi_id;     /* id of corresponding efi */
-       xfs_extent_32_t         efd_extents[1]; /* array of extents freed */
-} __attribute__((packed)) xfs_efd_log_format_32_t;
-
-typedef struct xfs_efd_log_format_64 {
-       __uint16_t              efd_type;       /* efd log item type */
-       __uint16_t              efd_size;       /* size of this item */
-       __uint32_t              efd_nextents;   /* # of extents freed */
-       __uint64_t              efd_efi_id;     /* id of corresponding efi */
-       xfs_extent_64_t         efd_extents[1]; /* array of extents freed */
-} xfs_efd_log_format_64_t;
-
-
-#ifdef __KERNEL__
-
 /*
  * Max number of extents in fast allocation path.
  */
@@ -160,6 +78,4 @@ int                  xfs_efi_copy_format(xfs_log_iovec_t *buf,
                                            xfs_efi_log_format_t *dst_efi_fmt);
 void                   xfs_efi_item_free(xfs_efi_log_item_t *);
 
-#endif /* __KERNEL__ */
-
 #endif /* __XFS_EXTFREE_ITEM_H__ */
index de3dc98f4e8f76067c1e7d0ee4631a8638d87988..818c6230bb35540f61e824c124f4bae4ed8207c2 100644 (file)
 #include "xfs_inode.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_error.h"
-#include "xfs_vnodeops.h"
 #include "xfs_da_btree.h"
 #include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_ioctl.h"
 #include "xfs_trace.h"
@@ -226,10 +227,9 @@ xfs_file_fsync(
 }
 
 STATIC ssize_t
-xfs_file_aio_read(
+xfs_file_read_iter(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos)
 {
        struct file             *file = iocb->ki_filp;
@@ -250,9 +250,7 @@ xfs_file_aio_read(
        if (file->f_mode & FMODE_NOCMTIME)
                ioflags |= IO_INVIS;
 
-       ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
-       if (ret < 0)
-               return ret;
+       size = iov_iter_count(iter);
 
        if (unlikely(ioflags & IO_ISDIRECT)) {
                xfs_buftarg_t   *target =
@@ -305,7 +303,7 @@ xfs_file_aio_read(
 
        trace_xfs_file_read(ip, size, pos, ioflags);
 
-       ret = generic_file_aio_read(iocb, iovp, nr_segs, pos);
+       ret = generic_file_read_iter(iocb, iter, pos);
        if (ret > 0)
                XFS_STATS_ADD(xs_read_bytes, ret);
 
@@ -621,10 +619,9 @@ restart:
 STATIC ssize_t
 xfs_file_dio_aio_write(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos,
-       size_t                  ocount)
+       size_t                  count)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -632,7 +629,6 @@ xfs_file_dio_aio_write(
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
        ssize_t                 ret = 0;
-       size_t                  count = ocount;
        int                     unaligned_io = 0;
        int                     iolock;
        struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
@@ -692,8 +688,8 @@ xfs_file_dio_aio_write(
        }
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_file_direct_write(iocb, iovp,
-                       &nr_segs, pos, &iocb->ki_pos, count, ocount);
+       ret = generic_file_direct_write_iter(iocb, iter,
+                       pos, &iocb->ki_pos, count);
 
 out:
        xfs_rw_iunlock(ip, iolock);
@@ -706,10 +702,9 @@ out:
 STATIC ssize_t
 xfs_file_buffered_aio_write(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos,
-       size_t                  ocount)
+       size_t                  count)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -718,7 +713,6 @@ xfs_file_buffered_aio_write(
        ssize_t                 ret;
        int                     enospc = 0;
        int                     iolock = XFS_IOLOCK_EXCL;
-       size_t                  count = ocount;
 
        xfs_rw_ilock(ip, iolock);
 
@@ -731,7 +725,7 @@ xfs_file_buffered_aio_write(
 
 write_retry:
        trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_file_buffered_write(iocb, iovp, nr_segs,
+       ret = generic_file_buffered_write_iter(iocb, iter,
                        pos, &iocb->ki_pos, count, 0);
 
        /*
@@ -752,10 +746,9 @@ out:
 }
 
 STATIC ssize_t
-xfs_file_aio_write(
+xfs_file_write_iter(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos)
 {
        struct file             *file = iocb->ki_filp;
@@ -763,17 +756,15 @@ xfs_file_aio_write(
        struct inode            *inode = mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        ssize_t                 ret;
-       size_t                  ocount = 0;
+       size_t                  count = 0;
 
        XFS_STATS_INC(xs_write_calls);
 
        BUG_ON(iocb->ki_pos != pos);
 
-       ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
-       if (ret)
-               return ret;
+       count = iov_iter_count(iter);
 
-       if (ocount == 0)
+       if (count == 0)
                return 0;
 
        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
@@ -782,10 +773,9 @@ xfs_file_aio_write(
        }
 
        if (unlikely(file->f_flags & O_DIRECT))
-               ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
+               ret = xfs_file_dio_aio_write(iocb, iter, pos, count);
        else
-               ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
-                                                 ocount);
+               ret = xfs_file_buffered_aio_write(iocb, iter, pos, count);
 
        if (ret > 0) {
                ssize_t err;
@@ -1410,8 +1400,8 @@ const struct file_operations xfs_file_operations = {
        .llseek         = xfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = xfs_file_aio_read,
-       .aio_write      = xfs_file_aio_write,
+       .read_iter      = xfs_file_read_iter,
+       .write_iter     = xfs_file_write_iter,
        .splice_read    = xfs_file_splice_read,
        .splice_write   = xfs_file_splice_write,
        .unlocked_ioctl = xfs_file_ioctl,
index 5170306a1009e22e287c425b1968d97b7a885a82..ce78e654d37b73693aa4c637e021dda9154ad5d6 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_log.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_inum.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ag.h"
-#include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_alloc.h"
-#include "xfs_utils.h"
 #include "xfs_mru_cache.h"
 #include "xfs_filestream.h"
 #include "xfs_trace.h"
@@ -668,8 +668,8 @@ exit:
  */
 int
 xfs_filestream_new_ag(
-       xfs_bmalloca_t  *ap,
-       xfs_agnumber_t  *agp)
+       struct xfs_bmalloca     *ap,
+       xfs_agnumber_t          *agp)
 {
        int             flags, err;
        xfs_inode_t     *ip, *pip = NULL;
index 09dd9af454349b57fe2a8a7bec0f3c5c3c7e4e3e..6d61dbee8564b12cca254721bf78685975799a74 100644 (file)
@@ -18,8 +18,6 @@
 #ifndef __XFS_FILESTREAM_H__
 #define __XFS_FILESTREAM_H__
 
-#ifdef __KERNEL__
-
 struct xfs_mount;
 struct xfs_inode;
 struct xfs_perag;
@@ -69,6 +67,4 @@ xfs_inode_is_filestream(
                (ip->i_d.di_flags & XFS_DIFLAG_FILESTREAM);
 }
 
-#endif /* __KERNEL__ */
-
 #endif /* __XFS_FILESTREAM_H__ */
diff --git a/fs/xfs/xfs_format.h b/fs/xfs/xfs_format.h
new file mode 100644 (file)
index 0000000..35c08ff
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef __XFS_FORMAT_H__
+#define __XFS_FORMAT_H__
+
+/*
+ * XFS On Disk Format Definitions
+ *
+ * This header file defines all the on-disk format definitions for 
+ * general XFS objects. Directory and attribute related objects are defined in
+ * xfs_da_format.h, which log and log item formats are defined in
+ * xfs_log_format.h. Everything else goes here.
+ */
+
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_inode;
+struct xfs_buf;
+struct xfs_ifork;
+
+/*
+ * RealTime Device format definitions
+ */
+
+/* Min and max rt extent sizes, specified in bytes */
+#define        XFS_MAX_RTEXTSIZE       (1024 * 1024 * 1024)    /* 1GB */
+#define        XFS_DFL_RTEXTSIZE       (64 * 1024)             /* 64kB */
+#define        XFS_MIN_RTEXTSIZE       (4 * 1024)              /* 4kB */
+
+#define        XFS_BLOCKSIZE(mp)       ((mp)->m_sb.sb_blocksize)
+#define        XFS_BLOCKMASK(mp)       ((mp)->m_blockmask)
+#define        XFS_BLOCKWSIZE(mp)      ((mp)->m_blockwsize)
+#define        XFS_BLOCKWMASK(mp)      ((mp)->m_blockwmask)
+
+/*
+ * RT Summary and bit manipulation macros.
+ */
+#define        XFS_SUMOFFS(mp,ls,bb)   ((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb)))
+#define        XFS_SUMOFFSTOBLOCK(mp,s)        \
+       (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog)
+#define        XFS_SUMPTR(mp,bp,so)    \
+       ((xfs_suminfo_t *)((bp)->b_addr + \
+               (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp))))
+
+#define        XFS_BITTOBLOCK(mp,bi)   ((bi) >> (mp)->m_blkbit_log)
+#define        XFS_BLOCKTOBIT(mp,bb)   ((bb) << (mp)->m_blkbit_log)
+#define        XFS_BITTOWORD(mp,bi)    \
+       ((int)(((bi) >> XFS_NBWORDLOG) & XFS_BLOCKWMASK(mp)))
+
+#define        XFS_RTMIN(a,b)  ((a) < (b) ? (a) : (b))
+#define        XFS_RTMAX(a,b)  ((a) > (b) ? (a) : (b))
+
+#define        XFS_RTLOBIT(w)  xfs_lowbit32(w)
+#define        XFS_RTHIBIT(w)  xfs_highbit32(w)
+
+#if XFS_BIG_BLKNOS
+#define        XFS_RTBLOCKLOG(b)       xfs_highbit64(b)
+#else
+#define        XFS_RTBLOCKLOG(b)       xfs_highbit32(b)
+#endif
+
+/*
+ * Dquot and dquot block format definitions
+ */
+#define XFS_DQUOT_MAGIC                0x4451          /* 'DQ' */
+#define XFS_DQUOT_VERSION      (u_int8_t)0x01  /* latest version number */
+
+/*
+ * This is the main portion of the on-disk representation of quota
+ * information for a user. This is the q_core of the xfs_dquot_t that
+ * is kept in kernel memory. We pad this with some more expansion room
+ * to construct the on disk structure.
+ */
+typedef struct xfs_disk_dquot {
+       __be16          d_magic;        /* dquot magic = XFS_DQUOT_MAGIC */
+       __u8            d_version;      /* dquot version */
+       __u8            d_flags;        /* XFS_DQ_USER/PROJ/GROUP */
+       __be32          d_id;           /* user,project,group id */
+       __be64          d_blk_hardlimit;/* absolute limit on disk blks */
+       __be64          d_blk_softlimit;/* preferred limit on disk blks */
+       __be64          d_ino_hardlimit;/* maximum # allocated inodes */
+       __be64          d_ino_softlimit;/* preferred inode limit */
+       __be64          d_bcount;       /* disk blocks owned by the user */
+       __be64          d_icount;       /* inodes owned by the user */
+       __be32          d_itimer;       /* zero if within inode limits if not,
+                                          this is when we refuse service */
+       __be32          d_btimer;       /* similar to above; for disk blocks */
+       __be16          d_iwarns;       /* warnings issued wrt num inodes */
+       __be16          d_bwarns;       /* warnings issued wrt disk blocks */
+       __be32          d_pad0;         /* 64 bit align */
+       __be64          d_rtb_hardlimit;/* absolute limit on realtime blks */
+       __be64          d_rtb_softlimit;/* preferred limit on RT disk blks */
+       __be64          d_rtbcount;     /* realtime blocks owned */
+       __be32          d_rtbtimer;     /* similar to above; for RT disk blocks */
+       __be16          d_rtbwarns;     /* warnings issued wrt RT disk blocks */
+       __be16          d_pad;
+} xfs_disk_dquot_t;
+
+/*
+ * This is what goes on disk. This is separated from the xfs_disk_dquot because
+ * carrying the unnecessary padding would be a waste of memory.
+ */
+typedef struct xfs_dqblk {
+       xfs_disk_dquot_t  dd_diskdq;    /* portion that lives incore as well */
+       char              dd_fill[4];   /* filling for posterity */
+
+       /*
+        * These two are only present on filesystems with the CRC bits set.
+        */
+       __be32            dd_crc;       /* checksum */
+       __be64            dd_lsn;       /* last modification in log */
+       uuid_t            dd_uuid;      /* location information */
+} xfs_dqblk_t;
+
+#define XFS_DQUOT_CRC_OFF      offsetof(struct xfs_dqblk, dd_crc)
+
+/*
+ * Remote symlink format and access functions.
+ */
+#define XFS_SYMLINK_MAGIC      0x58534c4d      /* XSLM */
+
+struct xfs_dsymlink_hdr {
+       __be32  sl_magic;
+       __be32  sl_offset;
+       __be32  sl_bytes;
+       __be32  sl_crc;
+       uuid_t  sl_uuid;
+       __be64  sl_owner;
+       __be64  sl_blkno;
+       __be64  sl_lsn;
+};
+
+/*
+ * The maximum pathlen is 1024 bytes. Since the minimum file system
+ * blocksize is 512 bytes, we can get a max of 3 extents back from
+ * bmapi when crc headers are taken into account.
+ */
+#define XFS_SYMLINK_MAPS 3
+
+#define XFS_SYMLINK_BUF_SPACE(mp, bufsize)     \
+       ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
+                       sizeof(struct xfs_dsymlink_hdr) : 0))
+
+int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
+int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+                       uint32_t size, struct xfs_buf *bp);
+bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+                       uint32_t size, struct xfs_buf *bp);
+void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
+                                struct xfs_inode *ip, struct xfs_ifork *ifp);
+
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+
+#endif /* __XFS_FORMAT_H__ */
index d04695545397308a6596f5109a72f8923fd79419..1edb5cc3e5f495fdca3059054d1f2e7bd5d9fd58 100644 (file)
@@ -240,7 +240,9 @@ typedef struct xfs_fsop_resblks {
 
 
 /*
- * Minimum and maximum sizes need for growth checks
+ * Minimum and maximum sizes need for growth checks.
+ *
+ * Block counts are in units of filesystem blocks, not basic blocks.
  */
 #define XFS_MIN_AG_BLOCKS      64
 #define XFS_MIN_LOG_BLOCKS     512ULL
@@ -310,6 +312,17 @@ typedef struct xfs_bstat {
        __u16           bs_aextents;    /* attribute number of extents  */
 } xfs_bstat_t;
 
+/*
+ * Project quota id helpers (previously projid was 16bit only
+ * and using two 16bit values to hold new 32bit projid was choosen
+ * to retain compatibility with "old" filesystems).
+ */
+static inline __uint32_t
+bstat_get_projid(struct xfs_bstat *bs)
+{
+       return (__uint32_t)bs->bs_projid_hi << 16 | bs->bs_projid_lo;
+}
+
 /*
  * The user-level BulkStat Request interface structure.
  */
@@ -344,7 +357,7 @@ typedef struct xfs_error_injection {
  * Speculative preallocation trimming.
  */
 #define XFS_EOFBLOCKS_VERSION          1
-struct xfs_eofblocks {
+struct xfs_fs_eofblocks {
        __u32           eof_version;
        __u32           eof_flags;
        uid_t           eof_uid;
@@ -449,6 +462,21 @@ typedef struct xfs_handle {
                                 - (char *) &(handle))                    \
                                 + (handle).ha_fid.fid_len)
 
+/*
+ * Structure passed to XFS_IOC_SWAPEXT
+ */
+typedef struct xfs_swapext
+{
+       __int64_t       sx_version;     /* version */
+#define XFS_SX_VERSION         0
+       __int64_t       sx_fdtarget;    /* fd of target file */
+       __int64_t       sx_fdtmp;       /* fd of tmp file */
+       xfs_off_t       sx_offset;      /* offset into file */
+       xfs_off_t       sx_length;      /* leng from offset */
+       char            sx_pad[16];     /* pad space, unused */
+       xfs_bstat_t     sx_stat;        /* stat of target b4 copy */
+} xfs_swapext_t;
+
 /*
  * Flags for going down operation
  */
@@ -511,8 +539,14 @@ typedef struct xfs_handle {
 #define XFS_IOC_ERROR_INJECTION             _IOW ('X', 116, struct xfs_error_injection)
 #define XFS_IOC_ERROR_CLEARALL      _IOW ('X', 117, struct xfs_error_injection)
 /*     XFS_IOC_ATTRCTL_BY_HANDLE -- deprecated 118      */
+
 /*     XFS_IOC_FREEZE            -- FIFREEZE   119      */
 /*     XFS_IOC_THAW              -- FITHAW     120      */
+#ifndef FIFREEZE
+#define XFS_IOC_FREEZE              _IOWR('X', 119, int)
+#define XFS_IOC_THAW                _IOWR('X', 120, int)
+#endif
+
 #define XFS_IOC_FSSETDM_BY_HANDLE    _IOW ('X', 121, struct xfs_fsop_setdm_handlereq)
 #define XFS_IOC_ATTRLIST_BY_HANDLE   _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq)
 #define XFS_IOC_ATTRMULTI_BY_HANDLE  _IOW ('X', 123, struct xfs_fsop_attrmulti_handlereq)
index 614eb0cc360860214ce08443ff84993afa531143..e64ee5288b86be2d0c0267b383d3f6f9297e60a1 100644 (file)
@@ -203,8 +203,9 @@ xfs_growfs_data_private(
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
        tp->t_flags |= XFS_TRANS_RESERVE;
-       if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
-                       XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) {
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
+                                 XFS_GROWFS_SPACE_RES(mp), 0);
+       if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
        }
@@ -739,8 +740,7 @@ xfs_fs_log_dummy(
        int             error;
 
        tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
-       error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
-                                 XFS_DEFAULT_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
index 7a0c17d7ec0974354cfd645695e9f4f7778704fa..6bee95d8a73edfe2232156512509a6de16758423 100644 (file)
@@ -39,6 +39,7 @@
 #include "xfs_cksum.h"
 #include "xfs_buf_item.h"
 #include "xfs_icreate_item.h"
+#include "xfs_icache.h"
 
 
 /*
@@ -506,7 +507,7 @@ xfs_ialloc_next_ag(
 
 /*
  * Select an allocation group to look for a free inode in, based on the parent
- * inode and then mode.  Return the allocation group buffer.
+ * inode and the mode.  Return the allocation group buffer.
  */
 STATIC xfs_agnumber_t
 xfs_ialloc_ag_select(
@@ -1341,7 +1342,7 @@ xfs_imap(
        xfs_agblock_t   cluster_agbno;  /* first block in inode cluster */
        int             error;  /* error code */
        int             offset; /* index of inode in its buffer */
-       int             offset_agbno;   /* blks from chunk start to inode */
+       xfs_agblock_t   offset_agbno;   /* blks from chunk start to inode */
 
        ASSERT(ino != NULLFSINO);
 
index 3f90e1ceb8d68c4655bb033da592f495b91e9772..16219b9c67909a6a483678fb761db701a8cb00e6 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_types.h"
 #include "xfs_log.h"
 #include "xfs_log_priv.h"
 #include "xfs_dinode.h"
 #include "xfs_error.h"
 #include "xfs_filestream.h"
-#include "xfs_vnodeops.h"
 #include "xfs_inode_item.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
 #include "xfs_fsops.h"
 #include "xfs_icache.h"
+#include "xfs_bmap_util.h"
 
 #include <linux/kthread.h>
 #include <linux/freezer.h>
@@ -619,7 +620,7 @@ restart:
 
 /*
  * Background scanning to trim post-EOF preallocated space. This is queued
- * based on the 'background_prealloc_discard_period' tunable (5m by default).
+ * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
  */
 STATIC void
 xfs_queue_eofblocks(
@@ -1203,15 +1204,15 @@ xfs_inode_match_id(
        struct xfs_inode        *ip,
        struct xfs_eofblocks    *eofb)
 {
-       if (eofb->eof_flags & XFS_EOF_FLAGS_UID &&
-           ip->i_d.di_uid != eofb->eof_uid)
+       if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
+           !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
                return 0;
 
-       if (eofb->eof_flags & XFS_EOF_FLAGS_GID &&
-           ip->i_d.di_gid != eofb->eof_gid)
+       if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
+           !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
                return 0;
 
-       if (eofb->eof_flags & XFS_EOF_FLAGS_PRID &&
+       if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
            xfs_get_projid(ip) != eofb->eof_prid)
                return 0;
 
index a01afbb3909a465a6e94f23bdc819530a3f22140..8a89f7d791bd9df3184fd9f66a15467f0f30f8dc 100644 (file)
 struct xfs_mount;
 struct xfs_perag;
 
+struct xfs_eofblocks {
+       __u32           eof_flags;
+       kuid_t          eof_uid;
+       kgid_t          eof_gid;
+       prid_t          eof_prid;
+       __u64           eof_min_file_size;
+};
+
 #define SYNC_WAIT              0x0001  /* wait for i/o to complete */
 #define SYNC_TRYLOCK           0x0002  /* only try to lock inodes */
 
+/*
+ * Flags for xfs_iget()
+ */
+#define XFS_IGET_CREATE                0x1
+#define XFS_IGET_UNTRUSTED     0x2
+#define XFS_IGET_DONTCACHE     0x4
+
 int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
             uint flags, uint lock_flags, xfs_inode_t **ipp);
 
@@ -49,4 +64,39 @@ int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
                int flags, void *args),
        int flags, void *args, int tag);
 
+static inline int
+xfs_fs_eofblocks_from_user(
+       struct xfs_fs_eofblocks         *src,
+       struct xfs_eofblocks            *dst)
+{
+       if (src->eof_version != XFS_EOFBLOCKS_VERSION)
+               return EINVAL;
+
+       if (src->eof_flags & ~XFS_EOF_FLAGS_VALID)
+               return EINVAL;
+
+       if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) ||
+           memchr_inv(src->pad64, 0, sizeof(src->pad64)))
+               return EINVAL;
+
+       dst->eof_flags = src->eof_flags;
+       dst->eof_prid = src->eof_prid;
+       dst->eof_min_file_size = src->eof_min_file_size;
+
+       dst->eof_uid = INVALID_UID;
+       if (src->eof_flags & XFS_EOF_FLAGS_UID) {
+               dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid);
+               if (!uid_valid(dst->eof_uid))
+                       return EINVAL;
+       }
+
+       dst->eof_gid = INVALID_GID;
+       if (src->eof_flags & XFS_EOF_FLAGS_GID) {
+               dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid);
+               if (!gid_valid(dst->eof_gid))
+                       return EINVAL;
+       }
+       return 0;
+}
+
 #endif
index 7716a4e7375e296e926ef402f8687169dcc295c3..5a5a593994d4196d3b18c2e959df90dc28c7588f 100644 (file)
 #include "xfs_types.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
-#include "xfs_inum.h"
 #include "xfs_trans.h"
-#include "xfs_buf_item.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_mount.h"
 #include "xfs_trans_priv.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_error.h"
 #include "xfs_icreate_item.h"
 
@@ -52,11 +40,14 @@ static inline struct xfs_icreate_item *ICR_ITEM(struct xfs_log_item *lip)
  *
  * We only need one iovec for the icreate log structure.
  */
-STATIC uint
+STATIC void
 xfs_icreate_item_size(
-       struct xfs_log_item     *lip)
+       struct xfs_log_item     *lip,
+       int                     *nvecs,
+       int                     *nbytes)
 {
-       return 1;
+       *nvecs += 1;
+       *nbytes += sizeof(struct xfs_icreate_log);
 }
 
 /*
index 88ba8aa0bc41c0f3aa291da6b621dfea7445f56b..59e89f87c09b3fb8483326a30a9fcb879a4c4665 100644 (file)
 #ifndef XFS_ICREATE_ITEM_H
 #define XFS_ICREATE_ITEM_H     1
 
-/*
- * on disk log item structure
- *
- * Log recovery assumes the first two entries are the type and size and they fit
- * in 32 bits. Also in host order (ugh) so they have to be 32 bit aligned so
- * decoding can be done correctly.
- */
-struct xfs_icreate_log {
-       __uint16_t      icl_type;       /* type of log format structure */
-       __uint16_t      icl_size;       /* size of log format structure */
-       __be32          icl_ag;         /* ag being allocated in */
-       __be32          icl_agbno;      /* start block of inode range */
-       __be32          icl_count;      /* number of inodes to initialise */
-       __be32          icl_isize;      /* size of inodes */
-       __be32          icl_length;     /* length of extent to initialise */
-       __be32          icl_gen;        /* inode generation number to use */
-};
-
 /* in memory log item structure */
 struct xfs_icreate_item {
        struct xfs_log_item     ic_item;
index bb262c25c8de463276e9282d64b299c975eceb7b..e3d75385aa76a6e45b7711a65bb39c268c9f689b 100644 (file)
 
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_inum.h"
 #include "xfs_trans.h"
+#include "xfs_trans_space.h"
 #include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
 #include "xfs_attr_sf.h"
+#include "xfs_attr.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_buf_item.h"
 #include "xfs_alloc.h"
 #include "xfs_ialloc.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_error.h"
-#include "xfs_utils.h"
 #include "xfs_quota.h"
 #include "xfs_filestream.h"
-#include "xfs_vnodeops.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_symlink.h"
 
-kmem_zone_t *xfs_ifork_zone;
 kmem_zone_t *xfs_inode_zone;
 
 /*
@@ -58,9 +62,6 @@ kmem_zone_t *xfs_inode_zone;
 #define        XFS_ITRUNC_MAX_EXTENTS  2
 
 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
-STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
-STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
-STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
 
 /*
  * helper function to extract extent size hint from inode
@@ -310,623 +311,202 @@ xfs_isilocked(
 }
 #endif
 
-void
-__xfs_iflock(
-       struct xfs_inode        *ip)
-{
-       wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
-       DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
-
-       do {
-               prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
-               if (xfs_isiflocked(ip))
-                       io_schedule();
-       } while (!xfs_iflock_nowait(ip));
-
-       finish_wait(wq, &wait.wait);
-}
-
 #ifdef DEBUG
+int xfs_locked_n;
+int xfs_small_retries;
+int xfs_middle_retries;
+int xfs_lots_retries;
+int xfs_lock_delays;
+#endif
+
 /*
- * Make sure that the extents in the given memory buffer
- * are valid.
+ * Bump the subclass so xfs_lock_inodes() acquires each lock with
+ * a different value
  */
-STATIC void
-xfs_validate_extents(
-       xfs_ifork_t             *ifp,
-       int                     nrecs,
-       xfs_exntfmt_t           fmt)
+static inline int
+xfs_lock_inumorder(int lock_mode, int subclass)
 {
-       xfs_bmbt_irec_t         irec;
-       xfs_bmbt_rec_host_t     rec;
-       int                     i;
+       if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
+               lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
+       if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
+               lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
 
-       for (i = 0; i < nrecs; i++) {
-               xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
-               rec.l0 = get_unaligned(&ep->l0);
-               rec.l1 = get_unaligned(&ep->l1);
-               xfs_bmbt_get_all(&rec, &irec);
-               if (fmt == XFS_EXTFMT_NOSTATE)
-                       ASSERT(irec.br_state == XFS_EXT_NORM);
-       }
+       return lock_mode;
 }
-#else /* DEBUG */
-#define xfs_validate_extents(ifp, nrecs, fmt)
-#endif /* DEBUG */
 
 /*
- * Check that none of the inode's in the buffer have a next
- * unlinked field of 0.
+ * The following routine will lock n inodes in exclusive mode.
+ * We assume the caller calls us with the inodes in i_ino order.
+ *
+ * We need to detect deadlock where an inode that we lock
+ * is in the AIL and we start waiting for another inode that is locked
+ * by a thread in a long running transaction (such as truncate). This can
+ * result in deadlock since the long running trans might need to wait
+ * for the inode we just locked in order to push the tail and free space
+ * in the log.
  */
-#if defined(DEBUG)
 void
-xfs_inobp_check(
-       xfs_mount_t     *mp,
-       xfs_buf_t       *bp)
-{
-       int             i;
-       int             j;
-       xfs_dinode_t    *dip;
-
-       j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
-
-       for (i = 0; i < j; i++) {
-               dip = (xfs_dinode_t *)xfs_buf_offset(bp,
-                                       i * mp->m_sb.sb_inodesize);
-               if (!dip->di_next_unlinked)  {
-                       xfs_alert(mp,
-       "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
-                               bp);
-                       ASSERT(dip->di_next_unlinked);
-               }
-       }
-}
-#endif
-
-static void
-xfs_inode_buf_verify(
-       struct xfs_buf  *bp)
+xfs_lock_inodes(
+       xfs_inode_t     **ips,
+       int             inodes,
+       uint            lock_mode)
 {
-       struct xfs_mount *mp = bp->b_target->bt_mount;
-       int             i;
-       int             ni;
-
-       /*
-        * Validate the magic number and version of every inode in the buffer
-        */
-       ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
-       for (i = 0; i < ni; i++) {
-               int             di_ok;
-               xfs_dinode_t    *dip;
-
-               dip = (struct xfs_dinode *)xfs_buf_offset(bp,
-                                       (i << mp->m_sb.sb_inodelog));
-               di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
-                           XFS_DINODE_GOOD_VERSION(dip->di_version);
-               if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
-                                               XFS_ERRTAG_ITOBP_INOTOBP,
-                                               XFS_RANDOM_ITOBP_INOTOBP))) {
-                       xfs_buf_ioerror(bp, EFSCORRUPTED);
-                       XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
-                                            mp, dip);
-#ifdef DEBUG
-                       xfs_emerg(mp,
-                               "bad inode magic/vsn daddr %lld #%d (magic=%x)",
-                               (unsigned long long)bp->b_bn, i,
-                               be16_to_cpu(dip->di_magic));
-                       ASSERT(0);
-#endif
-               }
-       }
-       xfs_inobp_check(mp, bp);
-}
+       int             attempts = 0, i, j, try_lock;
+       xfs_log_item_t  *lp;
 
+       ASSERT(ips && (inodes >= 2)); /* we need at least two */
 
-static void
-xfs_inode_buf_read_verify(
-       struct xfs_buf  *bp)
-{
-       xfs_inode_buf_verify(bp);
-}
-
-static void
-xfs_inode_buf_write_verify(
-       struct xfs_buf  *bp)
-{
-       xfs_inode_buf_verify(bp);
-}
+       try_lock = 0;
+       i = 0;
 
-const struct xfs_buf_ops xfs_inode_buf_ops = {
-       .verify_read = xfs_inode_buf_read_verify,
-       .verify_write = xfs_inode_buf_write_verify,
-};
+again:
+       for (; i < inodes; i++) {
+               ASSERT(ips[i]);
 
+               if (i && (ips[i] == ips[i-1]))  /* Already locked */
+                       continue;
 
-/*
- * This routine is called to map an inode to the buffer containing the on-disk
- * version of the inode.  It returns a pointer to the buffer containing the
- * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
- * pointer to the on-disk inode within that buffer.
- *
- * If a non-zero error is returned, then the contents of bpp and dipp are
- * undefined.
- */
-int
-xfs_imap_to_bp(
-       struct xfs_mount        *mp,
-       struct xfs_trans        *tp,
-       struct xfs_imap         *imap,
-       struct xfs_dinode       **dipp,
-       struct xfs_buf          **bpp,
-       uint                    buf_flags,
-       uint                    iget_flags)
-{
-       struct xfs_buf          *bp;
-       int                     error;
+               /*
+                * If try_lock is not set yet, make sure all locked inodes
+                * are not in the AIL.
+                * If any are, set try_lock to be used later.
+                */
 
-       buf_flags |= XBF_UNMAPPED;
-       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
-                                  (int)imap->im_len, buf_flags, &bp,
-                                  &xfs_inode_buf_ops);
-       if (error) {
-               if (error == EAGAIN) {
-                       ASSERT(buf_flags & XBF_TRYLOCK);
-                       return error;
+               if (!try_lock) {
+                       for (j = (i - 1); j >= 0 && !try_lock; j--) {
+                               lp = (xfs_log_item_t *)ips[j]->i_itemp;
+                               if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
+                                       try_lock++;
+                               }
+                       }
                }
 
-               if (error == EFSCORRUPTED &&
-                   (iget_flags & XFS_IGET_UNTRUSTED))
-                       return XFS_ERROR(EINVAL);
-
-               xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
-                       __func__, error);
-               return error;
-       }
-
-       *bpp = bp;
-       *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
-       return 0;
-}
-
-/*
- * Move inode type and inode format specific information from the
- * on-disk inode to the in-core inode.  For fifos, devs, and sockets
- * this means set if_rdev to the proper value.  For files, directories,
- * and symlinks this means to bring in the in-line data or extent
- * pointers.  For a file in B-tree format, only the root is immediately
- * brought in-core.  The rest will be in-lined in if_extents when it
- * is first referenced (see xfs_iread_extents()).
- */
-STATIC int
-xfs_iformat(
-       xfs_inode_t             *ip,
-       xfs_dinode_t            *dip)
-{
-       xfs_attr_shortform_t    *atp;
-       int                     size;
-       int                     error = 0;
-       xfs_fsize_t             di_size;
-
-       if (unlikely(be32_to_cpu(dip->di_nextents) +
-                    be16_to_cpu(dip->di_anextents) >
-                    be64_to_cpu(dip->di_nblocks))) {
-               xfs_warn(ip->i_mount,
-                       "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
-                       (unsigned long long)ip->i_ino,
-                       (int)(be32_to_cpu(dip->di_nextents) +
-                             be16_to_cpu(dip->di_anextents)),
-                       (unsigned long long)
-                               be64_to_cpu(dip->di_nblocks));
-               XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
-                                    ip->i_mount, dip);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
-               xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
-                       (unsigned long long)ip->i_ino,
-                       dip->di_forkoff);
-               XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
-                                    ip->i_mount, dip);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
-                    !ip->i_mount->m_rtdev_targp)) {
-               xfs_warn(ip->i_mount,
-                       "corrupt dinode %Lu, has realtime flag set.",
-                       ip->i_ino);
-               XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
-                                    XFS_ERRLEVEL_LOW, ip->i_mount, dip);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       switch (ip->i_d.di_mode & S_IFMT) {
-       case S_IFIFO:
-       case S_IFCHR:
-       case S_IFBLK:
-       case S_IFSOCK:
-               if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
-                       XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
-                                             ip->i_mount, dip);
-                       return XFS_ERROR(EFSCORRUPTED);
-               }
-               ip->i_d.di_size = 0;
-               ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
-               break;
+               /*
+                * If any of the previous locks we have locked is in the AIL,
+                * we must TRY to get the second and subsequent locks. If
+                * we can't get any, we must release all we have
+                * and try again.
+                */
 
-       case S_IFREG:
-       case S_IFLNK:
-       case S_IFDIR:
-               switch (dip->di_format) {
-               case XFS_DINODE_FMT_LOCAL:
+               if (try_lock) {
+                       /* try_lock must be 0 if i is 0. */
                        /*
-                        * no local regular files yet
+                        * try_lock means we have an inode locked
+                        * that is in the AIL.
                         */
-                       if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
-                               xfs_warn(ip->i_mount,
-                       "corrupt inode %Lu (local format for regular file).",
-                                       (unsigned long long) ip->i_ino);
-                               XFS_CORRUPTION_ERROR("xfs_iformat(4)",
-                                                    XFS_ERRLEVEL_LOW,
-                                                    ip->i_mount, dip);
-                               return XFS_ERROR(EFSCORRUPTED);
-                       }
+                       ASSERT(i != 0);
+                       if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
+                               attempts++;
+
+                               /*
+                                * Unlock all previous guys and try again.
+                                * xfs_iunlock will try to push the tail
+                                * if the inode is in the AIL.
+                                */
+
+                               for(j = i - 1; j >= 0; j--) {
+
+                                       /*
+                                        * Check to see if we've already
+                                        * unlocked this one.
+                                        * Not the first one going back,
+                                        * and the inode ptr is the same.
+                                        */
+                                       if ((j != (i - 1)) && ips[j] ==
+                                                               ips[j+1])
+                                               continue;
+
+                                       xfs_iunlock(ips[j], lock_mode);
+                               }
 
-                       di_size = be64_to_cpu(dip->di_size);
-                       if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
-                               xfs_warn(ip->i_mount,
-                       "corrupt inode %Lu (bad size %Ld for local inode).",
-                                       (unsigned long long) ip->i_ino,
-                                       (long long) di_size);
-                               XFS_CORRUPTION_ERROR("xfs_iformat(5)",
-                                                    XFS_ERRLEVEL_LOW,
-                                                    ip->i_mount, dip);
-                               return XFS_ERROR(EFSCORRUPTED);
+                               if ((attempts % 5) == 0) {
+                                       delay(1); /* Don't just spin the CPU */
+#ifdef DEBUG
+                                       xfs_lock_delays++;
+#endif
+                               }
+                               i = 0;
+                               try_lock = 0;
+                               goto again;
                        }
-
-                       size = (int)di_size;
-                       error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
-                       break;
-               case XFS_DINODE_FMT_EXTENTS:
-                       error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
-                       break;
-               case XFS_DINODE_FMT_BTREE:
-                       error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
-                       break;
-               default:
-                       XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
-                                        ip->i_mount);
-                       return XFS_ERROR(EFSCORRUPTED);
+               } else {
+                       xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
                }
-               break;
-
-       default:
-               XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-       if (error) {
-               return error;
        }
-       if (!XFS_DFORK_Q(dip))
-               return 0;
-
-       ASSERT(ip->i_afp == NULL);
-       ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
-
-       switch (dip->di_aformat) {
-       case XFS_DINODE_FMT_LOCAL:
-               atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
-               size = be16_to_cpu(atp->hdr.totsize);
-
-               if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
-                       xfs_warn(ip->i_mount,
-                               "corrupt inode %Lu (bad attr fork size %Ld).",
-                               (unsigned long long) ip->i_ino,
-                               (long long) size);
-                       XFS_CORRUPTION_ERROR("xfs_iformat(8)",
-                                            XFS_ERRLEVEL_LOW,
-                                            ip->i_mount, dip);
-                       return XFS_ERROR(EFSCORRUPTED);
-               }
 
-               error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
-               break;
-       case XFS_DINODE_FMT_EXTENTS:
-               error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
-               break;
-       case XFS_DINODE_FMT_BTREE:
-               error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
-               break;
-       default:
-               error = XFS_ERROR(EFSCORRUPTED);
-               break;
-       }
-       if (error) {
-               kmem_zone_free(xfs_ifork_zone, ip->i_afp);
-               ip->i_afp = NULL;
-               xfs_idestroy_fork(ip, XFS_DATA_FORK);
+#ifdef DEBUG
+       if (attempts) {
+               if (attempts < 5) xfs_small_retries++;
+               else if (attempts < 100) xfs_middle_retries++;
+               else xfs_lots_retries++;
+       } else {
+               xfs_locked_n++;
        }
-       return error;
+#endif
 }
 
 /*
- * The file is in-lined in the on-disk inode.
- * If it fits into if_inline_data, then copy
- * it there, otherwise allocate a buffer for it
- * and copy the data there.  Either way, set
- * if_data to point at the data.
- * If we allocate a buffer for the data, make
- * sure that its size is a multiple of 4 and
- * record the real size in i_real_bytes.
+ * xfs_lock_two_inodes() can only be used to lock one type of lock
+ * at a time - the iolock or the ilock, but not both at once. If
+ * we lock both at once, lockdep will report false positives saying
+ * we have violated locking orders.
  */
-STATIC int
-xfs_iformat_local(
-       xfs_inode_t     *ip,
-       xfs_dinode_t    *dip,
-       int             whichfork,
-       int             size)
+void
+xfs_lock_two_inodes(
+       xfs_inode_t             *ip0,
+       xfs_inode_t             *ip1,
+       uint                    lock_mode)
 {
-       xfs_ifork_t     *ifp;
-       int             real_size;
-
-       /*
-        * If the size is unreasonable, then something
-        * is wrong and we just bail out rather than crash in
-        * kmem_alloc() or memcpy() below.
-        */
-       if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
-               xfs_warn(ip->i_mount,
-       "corrupt inode %Lu (bad size %d for local fork, size = %d).",
-                       (unsigned long long) ip->i_ino, size,
-                       XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
-               XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
-                                    ip->i_mount, dip);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       real_size = 0;
-       if (size == 0)
-               ifp->if_u1.if_data = NULL;
-       else if (size <= sizeof(ifp->if_u2.if_inline_data))
-               ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
-       else {
-               real_size = roundup(size, 4);
-               ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
-       }
-       ifp->if_bytes = size;
-       ifp->if_real_bytes = real_size;
-       if (size)
-               memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
-       ifp->if_flags &= ~XFS_IFEXTENTS;
-       ifp->if_flags |= XFS_IFINLINE;
-       return 0;
-}
+       xfs_inode_t             *temp;
+       int                     attempts = 0;
+       xfs_log_item_t          *lp;
 
-/*
- * The file consists of a set of extents all
- * of which fit into the on-disk inode.
- * If there are few enough extents to fit into
- * the if_inline_ext, then copy them there.
- * Otherwise allocate a buffer for them and copy
- * them into it.  Either way, set if_extents
- * to point at the extents.
- */
-STATIC int
-xfs_iformat_extents(
-       xfs_inode_t     *ip,
-       xfs_dinode_t    *dip,
-       int             whichfork)
-{
-       xfs_bmbt_rec_t  *dp;
-       xfs_ifork_t     *ifp;
-       int             nex;
-       int             size;
-       int             i;
-
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       nex = XFS_DFORK_NEXTENTS(dip, whichfork);
-       size = nex * (uint)sizeof(xfs_bmbt_rec_t);
-
-       /*
-        * If the number of extents is unreasonable, then something
-        * is wrong and we just bail out rather than crash in
-        * kmem_alloc() or memcpy() below.
-        */
-       if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
-               xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
-                       (unsigned long long) ip->i_ino, nex);
-               XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
-                                    ip->i_mount, dip);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       ifp->if_real_bytes = 0;
-       if (nex == 0)
-               ifp->if_u1.if_extents = NULL;
-       else if (nex <= XFS_INLINE_EXTS)
-               ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
-       else
-               xfs_iext_add(ifp, 0, nex);
-
-       ifp->if_bytes = size;
-       if (size) {
-               dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
-               xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
-               for (i = 0; i < nex; i++, dp++) {
-                       xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
-                       ep->l0 = get_unaligned_be64(&dp->l0);
-                       ep->l1 = get_unaligned_be64(&dp->l1);
-               }
-               XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
-               if (whichfork != XFS_DATA_FORK ||
-                       XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
-                               if (unlikely(xfs_check_nostate_extents(
-                                   ifp, 0, nex))) {
-                                       XFS_ERROR_REPORT("xfs_iformat_extents(2)",
-                                                        XFS_ERRLEVEL_LOW,
-                                                        ip->i_mount);
-                                       return XFS_ERROR(EFSCORRUPTED);
-                               }
-       }
-       ifp->if_flags |= XFS_IFEXTENTS;
-       return 0;
-}
+       if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
+               ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
+       ASSERT(ip0->i_ino != ip1->i_ino);
 
-/*
- * The file has too many extents to fit into
- * the inode, so they are in B-tree format.
- * Allocate a buffer for the root of the B-tree
- * and copy the root into it.  The i_extents
- * field will remain NULL until all of the
- * extents are read in (when they are needed).
- */
-STATIC int
-xfs_iformat_btree(
-       xfs_inode_t             *ip,
-       xfs_dinode_t            *dip,
-       int                     whichfork)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       xfs_bmdr_block_t        *dfp;
-       xfs_ifork_t             *ifp;
-       /* REFERENCED */
-       int                     nrecs;
-       int                     size;
-
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
-       size = XFS_BMAP_BROOT_SPACE(mp, dfp);
-       nrecs = be16_to_cpu(dfp->bb_numrecs);
-
-       /*
-        * blow out if -- fork has less extents than can fit in
-        * fork (fork shouldn't be a btree format), root btree
-        * block has more records than can fit into the fork,
-        * or the number of extents is greater than the number of
-        * blocks.
-        */
-       if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
-                                       XFS_IFORK_MAXEXT(ip, whichfork) ||
-                    XFS_BMDR_SPACE_CALC(nrecs) >
-                                       XFS_DFORK_SIZE(dip, mp, whichfork) ||
-                    XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
-               xfs_warn(mp, "corrupt inode %Lu (btree).",
-                                       (unsigned long long) ip->i_ino);
-               XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
-                                        mp, dip);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       ifp->if_broot_bytes = size;
-       ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
-       ASSERT(ifp->if_broot != NULL);
-       /*
-        * Copy and convert from the on-disk structure
-        * to the in-memory structure.
-        */
-       xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
-                        ifp->if_broot, size);
-       ifp->if_flags &= ~XFS_IFEXTENTS;
-       ifp->if_flags |= XFS_IFBROOT;
+       if (ip0->i_ino > ip1->i_ino) {
+               temp = ip0;
+               ip0 = ip1;
+               ip1 = temp;
+       }
 
-       return 0;
-}
+ again:
+       xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
 
-STATIC void
-xfs_dinode_from_disk(
-       xfs_icdinode_t          *to,
-       xfs_dinode_t            *from)
-{
-       to->di_magic = be16_to_cpu(from->di_magic);
-       to->di_mode = be16_to_cpu(from->di_mode);
-       to->di_version = from ->di_version;
-       to->di_format = from->di_format;
-       to->di_onlink = be16_to_cpu(from->di_onlink);
-       to->di_uid = be32_to_cpu(from->di_uid);
-       to->di_gid = be32_to_cpu(from->di_gid);
-       to->di_nlink = be32_to_cpu(from->di_nlink);
-       to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
-       to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
-       memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
-       to->di_flushiter = be16_to_cpu(from->di_flushiter);
-       to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
-       to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
-       to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
-       to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
-       to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
-       to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
-       to->di_size = be64_to_cpu(from->di_size);
-       to->di_nblocks = be64_to_cpu(from->di_nblocks);
-       to->di_extsize = be32_to_cpu(from->di_extsize);
-       to->di_nextents = be32_to_cpu(from->di_nextents);
-       to->di_anextents = be16_to_cpu(from->di_anextents);
-       to->di_forkoff = from->di_forkoff;
-       to->di_aformat  = from->di_aformat;
-       to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
-       to->di_dmstate  = be16_to_cpu(from->di_dmstate);
-       to->di_flags    = be16_to_cpu(from->di_flags);
-       to->di_gen      = be32_to_cpu(from->di_gen);
-
-       if (to->di_version == 3) {
-               to->di_changecount = be64_to_cpu(from->di_changecount);
-               to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
-               to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
-               to->di_flags2 = be64_to_cpu(from->di_flags2);
-               to->di_ino = be64_to_cpu(from->di_ino);
-               to->di_lsn = be64_to_cpu(from->di_lsn);
-               memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
-               uuid_copy(&to->di_uuid, &from->di_uuid);
+       /*
+        * If the first lock we have locked is in the AIL, we must TRY to get
+        * the second lock. If we can't get it, we must release the first one
+        * and try again.
+        */
+       lp = (xfs_log_item_t *)ip0->i_itemp;
+       if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
+               if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
+                       xfs_iunlock(ip0, lock_mode);
+                       if ((++attempts % 5) == 0)
+                               delay(1); /* Don't just spin the CPU */
+                       goto again;
+               }
+       } else {
+               xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
        }
 }
 
+
 void
-xfs_dinode_to_disk(
-       xfs_dinode_t            *to,
-       xfs_icdinode_t          *from)
+__xfs_iflock(
+       struct xfs_inode        *ip)
 {
-       to->di_magic = cpu_to_be16(from->di_magic);
-       to->di_mode = cpu_to_be16(from->di_mode);
-       to->di_version = from ->di_version;
-       to->di_format = from->di_format;
-       to->di_onlink = cpu_to_be16(from->di_onlink);
-       to->di_uid = cpu_to_be32(from->di_uid);
-       to->di_gid = cpu_to_be32(from->di_gid);
-       to->di_nlink = cpu_to_be32(from->di_nlink);
-       to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
-       to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
-       memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
-       to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
-       to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
-       to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
-       to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
-       to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
-       to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
-       to->di_size = cpu_to_be64(from->di_size);
-       to->di_nblocks = cpu_to_be64(from->di_nblocks);
-       to->di_extsize = cpu_to_be32(from->di_extsize);
-       to->di_nextents = cpu_to_be32(from->di_nextents);
-       to->di_anextents = cpu_to_be16(from->di_anextents);
-       to->di_forkoff = from->di_forkoff;
-       to->di_aformat = from->di_aformat;
-       to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
-       to->di_dmstate = cpu_to_be16(from->di_dmstate);
-       to->di_flags = cpu_to_be16(from->di_flags);
-       to->di_gen = cpu_to_be32(from->di_gen);
-
-       if (from->di_version == 3) {
-               to->di_changecount = cpu_to_be64(from->di_changecount);
-               to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
-               to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
-               to->di_flags2 = cpu_to_be64(from->di_flags2);
-               to->di_ino = cpu_to_be64(from->di_ino);
-               to->di_lsn = cpu_to_be64(from->di_lsn);
-               memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
-               uuid_copy(&to->di_uuid, &from->di_uuid);
-               to->di_flushiter = 0;
-       } else {
-               to->di_flushiter = cpu_to_be16(from->di_flushiter);
-       }
+       wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
+       DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
+
+       do {
+               prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+               if (xfs_isiflocked(ip))
+                       io_schedule();
+       } while (!xfs_iflock_nowait(ip));
+
+       finish_wait(wq, &wait.wait);
 }
 
 STATIC uint
@@ -987,234 +567,49 @@ xfs_dic2xflags(
                                (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
 }
 
-static bool
-xfs_dinode_verify(
-       struct xfs_mount        *mp,
-       struct xfs_inode        *ip,
-       struct xfs_dinode       *dip)
-{
-       if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
-               return false;
-
-       /* only version 3 or greater inodes are extensively verified here */
-       if (dip->di_version < 3)
-               return true;
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return false;
-       if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
-                             offsetof(struct xfs_dinode, di_crc)))
-               return false;
-       if (be64_to_cpu(dip->di_ino) != ip->i_ino)
-               return false;
-       if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
-               return false;
-       return true;
-}
-
-void
-xfs_dinode_calc_crc(
-       struct xfs_mount        *mp,
-       struct xfs_dinode       *dip)
-{
-       __uint32_t              crc;
-
-       if (dip->di_version < 3)
-               return;
-
-       ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
-       crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
-                             offsetof(struct xfs_dinode, di_crc));
-       dip->di_crc = xfs_end_cksum(crc);
-}
-
 /*
- * Read the disk inode attributes into the in-core inode structure.
- *
- * For version 5 superblocks, if we are initialising a new inode and we are not
- * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
- * inode core with a random generation number. If we are keeping inodes around,
- * we need to read the inode cluster to get the existing generation number off
- * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
- * format) then log recovery is dependent on the di_flushiter field being
- * initialised from the current on-disk value and hence we must also read the
- * inode off disk.
+ * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
+ * is allowed, otherwise it has to be an exact match. If a CI match is found,
+ * ci_name->name will point to a the actual name (caller must free) or
+ * will be set to NULL if an exact match is found.
  */
 int
-xfs_iread(
-       xfs_mount_t     *mp,
-       xfs_trans_t     *tp,
-       xfs_inode_t     *ip,
-       uint            iget_flags)
+xfs_lookup(
+       xfs_inode_t             *dp,
+       struct xfs_name         *name,
+       xfs_inode_t             **ipp,
+       struct xfs_name         *ci_name)
 {
-       xfs_buf_t       *bp;
-       xfs_dinode_t    *dip;
-       int             error;
-
-       /*
-        * Fill in the location information in the in-core inode.
-        */
-       error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
-       if (error)
-               return error;
-
-       /* shortcut IO on inode allocation if possible */
-       if ((iget_flags & XFS_IGET_CREATE) &&
-           xfs_sb_version_hascrc(&mp->m_sb) &&
-           !(mp->m_flags & XFS_MOUNT_IKEEP)) {
-               /* initialise the on-disk inode core */
-               memset(&ip->i_d, 0, sizeof(ip->i_d));
-               ip->i_d.di_magic = XFS_DINODE_MAGIC;
-               ip->i_d.di_gen = prandom_u32();
-               if (xfs_sb_version_hascrc(&mp->m_sb)) {
-                       ip->i_d.di_version = 3;
-                       ip->i_d.di_ino = ip->i_ino;
-                       uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid);
-               } else
-                       ip->i_d.di_version = 2;
-               return 0;
-       }
-
-       /*
-        * Get pointers to the on-disk inode and the buffer containing it.
-        */
-       error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
-       if (error)
-               return error;
-
-       /* even unallocated inodes are verified */
-       if (!xfs_dinode_verify(mp, ip, dip)) {
-               xfs_alert(mp, "%s: validation failed for inode %lld failed",
-                               __func__, ip->i_ino);
+       xfs_ino_t               inum;
+       int                     error;
+       uint                    lock_mode;
 
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
-               error = XFS_ERROR(EFSCORRUPTED);
-               goto out_brelse;
-       }
+       trace_xfs_lookup(dp, name);
 
-       /*
-        * If the on-disk inode is already linked to a directory
-        * entry, copy all of the inode into the in-core inode.
-        * xfs_iformat() handles copying in the inode format
-        * specific information.
-        * Otherwise, just get the truly permanent information.
-        */
-       if (dip->di_mode) {
-               xfs_dinode_from_disk(&ip->i_d, dip);
-               error = xfs_iformat(ip, dip);
-               if (error)  {
-#ifdef DEBUG
-                       xfs_alert(mp, "%s: xfs_iformat() returned error %d",
-                               __func__, error);
-#endif /* DEBUG */
-                       goto out_brelse;
-               }
-       } else {
-               /*
-                * Partial initialisation of the in-core inode. Just the bits
-                * that xfs_ialloc won't overwrite or relies on being correct.
-                */
-               ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
-               ip->i_d.di_version = dip->di_version;
-               ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
-               ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
-
-               if (dip->di_version == 3) {
-                       ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
-                       uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
-               }
+       if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+               return XFS_ERROR(EIO);
 
-               /*
-                * Make sure to pull in the mode here as well in
-                * case the inode is released without being used.
-                * This ensures that xfs_inactive() will see that
-                * the inode is already free and not try to mess
-                * with the uninitialized part of it.
-                */
-               ip->i_d.di_mode = 0;
-       }
+       lock_mode = xfs_ilock_map_shared(dp);
+       error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
+       xfs_iunlock_map_shared(dp, lock_mode);
 
-       /*
-        * The inode format changed when we moved the link count and
-        * made it 32 bits long.  If this is an old format inode,
-        * convert it in memory to look like a new one.  If it gets
-        * flushed to disk we will convert back before flushing or
-        * logging it.  We zero out the new projid field and the old link
-        * count field.  We'll handle clearing the pad field (the remains
-        * of the old uuid field) when we actually convert the inode to
-        * the new format. We don't change the version number so that we
-        * can distinguish this from a real new format inode.
-        */
-       if (ip->i_d.di_version == 1) {
-               ip->i_d.di_nlink = ip->i_d.di_onlink;
-               ip->i_d.di_onlink = 0;
-               xfs_set_projid(ip, 0);
-       }
+       if (error)
+               goto out;
 
-       ip->i_delayed_blks = 0;
+       error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
+       if (error)
+               goto out_free_name;
 
-       /*
-        * Mark the buffer containing the inode as something to keep
-        * around for a while.  This helps to keep recently accessed
-        * meta-data in-core longer.
-        */
-       xfs_buf_set_ref(bp, XFS_INO_REF);
+       return 0;
 
-       /*
-        * Use xfs_trans_brelse() to release the buffer containing the on-disk
-        * inode, because it was acquired with xfs_trans_read_buf() in
-        * xfs_imap_to_bp() above.  If tp is NULL, this is just a normal
-        * brelse().  If we're within a transaction, then xfs_trans_brelse()
-        * will only release the buffer if it is not dirty within the
-        * transaction.  It will be OK to release the buffer in this case,
-        * because inodes on disk are never destroyed and we will be locking the
-        * new in-core inode before putting it in the cache where other
-        * processes can find it.  Thus we don't have to worry about the inode
-        * being changed just because we released the buffer.
-        */
- out_brelse:
-       xfs_trans_brelse(tp, bp);
+out_free_name:
+       if (ci_name)
+               kmem_free(ci_name->name);
+out:
+       *ipp = NULL;
        return error;
 }
 
-/*
- * Read in extents from a btree-format inode.
- * Allocate and fill in if_extents.  Real work is done in xfs_bmap.c.
- */
-int
-xfs_iread_extents(
-       xfs_trans_t     *tp,
-       xfs_inode_t     *ip,
-       int             whichfork)
-{
-       int             error;
-       xfs_ifork_t     *ifp;
-       xfs_extnum_t    nextents;
-
-       if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
-               XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
-                                ip->i_mount);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-       nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-
-       /*
-        * We know that the size is valid (it's checked in iformat_btree)
-        */
-       ifp->if_bytes = ifp->if_real_bytes = 0;
-       ifp->if_flags |= XFS_IFEXTENTS;
-       xfs_iext_add(ifp, 0, nextents);
-       error = xfs_bmap_read_extents(tp, ip, whichfork);
-       if (error) {
-               xfs_iext_destroy(ifp);
-               ifp->if_flags &= ~XFS_IFEXTENTS;
-               return error;
-       }
-       xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
-       return 0;
-}
-
 /*
  * Allocate an inode on disk and return a copy of its in-core version.
  * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
@@ -1295,8 +690,8 @@ xfs_ialloc(
        ip->i_d.di_onlink = 0;
        ip->i_d.di_nlink = nlink;
        ASSERT(ip->i_d.di_nlink == nlink);
-       ip->i_d.di_uid = current_fsuid();
-       ip->i_d.di_gid = current_fsgid();
+       ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
+       ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
        xfs_set_projid(ip, prid);
        memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
 
@@ -1335,7 +730,7 @@ xfs_ialloc(
         */
        if ((irix_sgid_inherit) &&
            (ip->i_d.di_mode & S_ISGID) &&
-           (!in_group_p((gid_t)ip->i_d.di_gid))) {
+           (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) {
                ip->i_d.di_mode &= ~S_ISGID;
        }
 
@@ -1467,31 +862,608 @@ xfs_ialloc(
 }
 
 /*
- * Free up the underlying blocks past new_size.  The new size must be smaller
- * than the current size.  This routine can be used both for the attribute and
- * data fork, and does not modify the inode size, which is left to the caller.
+ * Allocates a new inode from disk and return a pointer to the
+ * incore copy. This routine will internally commit the current
+ * transaction and allocate a new one if the Space Manager needed
+ * to do an allocation to replenish the inode free-list.
  *
- * The transaction passed to this routine must have made a permanent log
- * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
- * given transaction and start new ones, so make sure everything involved in
- * the transaction is tidy before calling here.  Some transaction will be
- * returned to the caller to be committed.  The incoming transaction must
- * already include the inode, and both inode locks must be held exclusively.
- * The inode must also be "held" within the transaction.  On return the inode
- * will be "held" within the returned transaction.  This routine does NOT
- * require any disk space to be reserved for it within the transaction.
+ * This routine is designed to be called from xfs_create and
+ * xfs_create_dir.
  *
- * If we get an error, we must return with the inode locked and linked into the
- * current transaction. This keeps things simple for the higher level code,
- * because it always knows that the inode is locked and held in the transaction
- * that returns to it whether errors occur or not.  We don't mark the inode
- * dirty on error so that transactions can be easily aborted if possible.
  */
 int
-xfs_itruncate_extents(
-       struct xfs_trans        **tpp,
-       struct xfs_inode        *ip,
-       int                     whichfork,
+xfs_dir_ialloc(
+       xfs_trans_t     **tpp,          /* input: current transaction;
+                                          output: may be a new transaction. */
+       xfs_inode_t     *dp,            /* directory within whose allocate
+                                          the inode. */
+       umode_t         mode,
+       xfs_nlink_t     nlink,
+       xfs_dev_t       rdev,
+       prid_t          prid,           /* project id */
+       int             okalloc,        /* ok to allocate new space */
+       xfs_inode_t     **ipp,          /* pointer to inode; it will be
+                                          locked. */
+       int             *committed)
+
+{
+       xfs_trans_t     *tp;
+       xfs_trans_t     *ntp;
+       xfs_inode_t     *ip;
+       xfs_buf_t       *ialloc_context = NULL;
+       int             code;
+       void            *dqinfo;
+       uint            tflags;
+
+       tp = *tpp;
+       ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+
+       /*
+        * xfs_ialloc will return a pointer to an incore inode if
+        * the Space Manager has an available inode on the free
+        * list. Otherwise, it will do an allocation and replenish
+        * the freelist.  Since we can only do one allocation per
+        * transaction without deadlocks, we will need to commit the
+        * current transaction and start a new one.  We will then
+        * need to call xfs_ialloc again to get the inode.
+        *
+        * If xfs_ialloc did an allocation to replenish the freelist,
+        * it returns the bp containing the head of the freelist as
+        * ialloc_context. We will hold a lock on it across the
+        * transaction commit so that no other process can steal
+        * the inode(s) that we've just allocated.
+        */
+       code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
+                         &ialloc_context, &ip);
+
+       /*
+        * Return an error if we were unable to allocate a new inode.
+        * This should only happen if we run out of space on disk or
+        * encounter a disk error.
+        */
+       if (code) {
+               *ipp = NULL;
+               return code;
+       }
+       if (!ialloc_context && !ip) {
+               *ipp = NULL;
+               return XFS_ERROR(ENOSPC);
+       }
+
+       /*
+        * If the AGI buffer is non-NULL, then we were unable to get an
+        * inode in one operation.  We need to commit the current
+        * transaction and call xfs_ialloc() again.  It is guaranteed
+        * to succeed the second time.
+        */
+       if (ialloc_context) {
+               struct xfs_trans_res tres;
+
+               /*
+                * Normally, xfs_trans_commit releases all the locks.
+                * We call bhold to hang on to the ialloc_context across
+                * the commit.  Holding this buffer prevents any other
+                * processes from doing any allocations in this
+                * allocation group.
+                */
+               xfs_trans_bhold(tp, ialloc_context);
+               /*
+                * Save the log reservation so we can use
+                * them in the next transaction.
+                */
+               tres.tr_logres = xfs_trans_get_log_res(tp);
+               tres.tr_logcount = xfs_trans_get_log_count(tp);
+
+               /*
+                * We want the quota changes to be associated with the next
+                * transaction, NOT this one. So, detach the dqinfo from this
+                * and attach it to the next transaction.
+                */
+               dqinfo = NULL;
+               tflags = 0;
+               if (tp->t_dqinfo) {
+                       dqinfo = (void *)tp->t_dqinfo;
+                       tp->t_dqinfo = NULL;
+                       tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
+                       tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
+               }
+
+               ntp = xfs_trans_dup(tp);
+               code = xfs_trans_commit(tp, 0);
+               tp = ntp;
+               if (committed != NULL) {
+                       *committed = 1;
+               }
+               /*
+                * If we get an error during the commit processing,
+                * release the buffer that is still held and return
+                * to the caller.
+                */
+               if (code) {
+                       xfs_buf_relse(ialloc_context);
+                       if (dqinfo) {
+                               tp->t_dqinfo = dqinfo;
+                               xfs_trans_free_dqinfo(tp);
+                       }
+                       *tpp = ntp;
+                       *ipp = NULL;
+                       return code;
+               }
+
+               /*
+                * transaction commit worked ok so we can drop the extra ticket
+                * reference that we gained in xfs_trans_dup()
+                */
+               xfs_log_ticket_put(tp->t_ticket);
+               tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+               code = xfs_trans_reserve(tp, &tres, 0, 0);
+
+               /*
+                * Re-attach the quota info that we detached from prev trx.
+                */
+               if (dqinfo) {
+                       tp->t_dqinfo = dqinfo;
+                       tp->t_flags |= tflags;
+               }
+
+               if (code) {
+                       xfs_buf_relse(ialloc_context);
+                       *tpp = ntp;
+                       *ipp = NULL;
+                       return code;
+               }
+               xfs_trans_bjoin(tp, ialloc_context);
+
+               /*
+                * Call ialloc again. Since we've locked out all
+                * other allocations in this allocation group,
+                * this call should always succeed.
+                */
+               code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
+                                 okalloc, &ialloc_context, &ip);
+
+               /*
+                * If we get an error at this point, return to the caller
+                * so that the current transaction can be aborted.
+                */
+               if (code) {
+                       *tpp = tp;
+                       *ipp = NULL;
+                       return code;
+               }
+               ASSERT(!ialloc_context && ip);
+
+       } else {
+               if (committed != NULL)
+                       *committed = 0;
+       }
+
+       *ipp = ip;
+       *tpp = tp;
+
+       return 0;
+}
+
+/*
+ * Decrement the link count on an inode & log the change.
+ * If this causes the link count to go to zero, initiate the
+ * logging activity required to truncate a file.
+ */
+int                            /* error */
+xfs_droplink(
+       xfs_trans_t *tp,
+       xfs_inode_t *ip)
+{
+       int     error;
+
+       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+
+       ASSERT (ip->i_d.di_nlink > 0);
+       ip->i_d.di_nlink--;
+       drop_nlink(VFS_I(ip));
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       error = 0;
+       if (ip->i_d.di_nlink == 0) {
+               /*
+                * We're dropping the last link to this file.
+                * Move the on-disk inode to the AGI unlinked list.
+                * From xfs_inactive() we will pull the inode from
+                * the list and free it.
+                */
+               error = xfs_iunlink(tp, ip);
+       }
+       return error;
+}
+
+/*
+ * This gets called when the inode's version needs to be changed from 1 to 2.
+ * Currently this happens when the nlink field overflows the old 16-bit value
+ * or when chproj is called to change the project for the first time.
+ * As a side effect the superblock version will also get rev'd
+ * to contain the NLINK bit.
+ */
+void
+xfs_bump_ino_vers2(
+       xfs_trans_t     *tp,
+       xfs_inode_t     *ip)
+{
+       xfs_mount_t     *mp;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+       ASSERT(ip->i_d.di_version == 1);
+
+       ip->i_d.di_version = 2;
+       ip->i_d.di_onlink = 0;
+       memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
+       mp = tp->t_mountp;
+       if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
+               spin_lock(&mp->m_sb_lock);
+               if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
+                       xfs_sb_version_addnlink(&mp->m_sb);
+                       spin_unlock(&mp->m_sb_lock);
+                       xfs_mod_sb(tp, XFS_SB_VERSIONNUM);
+               } else {
+                       spin_unlock(&mp->m_sb_lock);
+               }
+       }
+       /* Caller must log the inode */
+}
+
+/*
+ * Increment the link count on an inode & log the change.
+ */
+int
+xfs_bumplink(
+       xfs_trans_t *tp,
+       xfs_inode_t *ip)
+{
+       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+
+       ASSERT(ip->i_d.di_nlink > 0);
+       ip->i_d.di_nlink++;
+       inc_nlink(VFS_I(ip));
+       if ((ip->i_d.di_version == 1) &&
+           (ip->i_d.di_nlink > XFS_MAXLINK_1)) {
+               /*
+                * The inode has increased its number of links beyond
+                * what can fit in an old format inode.  It now needs
+                * to be converted to a version 2 inode with a 32 bit
+                * link count.  If this is the first inode in the file
+                * system to do this, then we need to bump the superblock
+                * version number as well.
+                */
+               xfs_bump_ino_vers2(tp, ip);
+       }
+
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       return 0;
+}
+
+int
+xfs_create(
+       xfs_inode_t             *dp,
+       struct xfs_name         *name,
+       umode_t                 mode,
+       xfs_dev_t               rdev,
+       xfs_inode_t             **ipp)
+{
+       int                     is_dir = S_ISDIR(mode);
+       struct xfs_mount        *mp = dp->i_mount;
+       struct xfs_inode        *ip = NULL;
+       struct xfs_trans        *tp = NULL;
+       int                     error;
+       xfs_bmap_free_t         free_list;
+       xfs_fsblock_t           first_block;
+       bool                    unlock_dp_on_error = false;
+       uint                    cancel_flags;
+       int                     committed;
+       prid_t                  prid;
+       struct xfs_dquot        *udqp = NULL;
+       struct xfs_dquot        *gdqp = NULL;
+       struct xfs_dquot        *pdqp = NULL;
+       struct xfs_trans_res    tres;
+       uint                    resblks;
+
+       trace_xfs_create(dp, name);
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return XFS_ERROR(EIO);
+
+       if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
+               prid = xfs_get_projid(dp);
+       else
+               prid = XFS_PROJID_DEFAULT;
+
+       /*
+        * Make sure that we have allocated dquot(s) on disk.
+        */
+       error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
+                                       xfs_kgid_to_gid(current_fsgid()), prid,
+                                       XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
+                                       &udqp, &gdqp, &pdqp);
+       if (error)
+               return error;
+
+       if (is_dir) {
+               rdev = 0;
+               resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
+               tres.tr_logres = M_RES(mp)->tr_mkdir.tr_logres;
+               tres.tr_logcount = XFS_MKDIR_LOG_COUNT;
+               tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
+       } else {
+               resblks = XFS_CREATE_SPACE_RES(mp, name->len);
+               tres.tr_logres = M_RES(mp)->tr_create.tr_logres;
+               tres.tr_logcount = XFS_CREATE_LOG_COUNT;
+               tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
+       }
+
+       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+
+       /*
+        * Initially assume that the file does not exist and
+        * reserve the resources for that case.  If that is not
+        * the case we'll drop the one we have and get a more
+        * appropriate transaction later.
+        */
+       tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+       error = xfs_trans_reserve(tp, &tres, resblks, 0);
+       if (error == ENOSPC) {
+               /* flush outstanding delalloc blocks and retry */
+               xfs_flush_inodes(mp);
+               error = xfs_trans_reserve(tp, &tres, resblks, 0);
+       }
+       if (error == ENOSPC) {
+               /* No space at all so try a "no-allocation" reservation */
+               resblks = 0;
+               error = xfs_trans_reserve(tp, &tres, 0, 0);
+       }
+       if (error) {
+               cancel_flags = 0;
+               goto out_trans_cancel;
+       }
+
+       xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
+       unlock_dp_on_error = true;
+
+       xfs_bmap_init(&free_list, &first_block);
+
+       /*
+        * Reserve disk quota and the inode.
+        */
+       error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
+                                               pdqp, resblks, 1, 0);
+       if (error)
+               goto out_trans_cancel;
+
+       error = xfs_dir_canenter(tp, dp, name, resblks);
+       if (error)
+               goto out_trans_cancel;
+
+       /*
+        * A newly created regular or special file just has one directory
+        * entry pointing to them, but a directory also the "." entry
+        * pointing to itself.
+        */
+       error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
+                              prid, resblks > 0, &ip, &committed);
+       if (error) {
+               if (error == ENOSPC)
+                       goto out_trans_cancel;
+               goto out_trans_abort;
+       }
+
+       /*
+        * Now we join the directory inode to the transaction.  We do not do it
+        * earlier because xfs_dir_ialloc might commit the previous transaction
+        * (and release all the locks).  An error from here on will result in
+        * the transaction cancel unlocking dp so don't do it explicitly in the
+        * error path.
+        */
+       xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+       unlock_dp_on_error = false;
+
+       error = xfs_dir_createname(tp, dp, name, ip->i_ino,
+                                       &first_block, &free_list, resblks ?
+                                       resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
+       if (error) {
+               ASSERT(error != ENOSPC);
+               goto out_trans_abort;
+       }
+       xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+
+       if (is_dir) {
+               error = xfs_dir_init(tp, ip, dp);
+               if (error)
+                       goto out_bmap_cancel;
+
+               error = xfs_bumplink(tp, dp);
+               if (error)
+                       goto out_bmap_cancel;
+       }
+
+       /*
+        * If this is a synchronous mount, make sure that the
+        * create transaction goes to disk before returning to
+        * the user.
+        */
+       if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
+               xfs_trans_set_sync(tp);
+
+       /*
+        * Attach the dquot(s) to the inodes and modify them incore.
+        * These ids of the inode couldn't have changed since the new
+        * inode has been locked ever since it was created.
+        */
+       xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
+
+       error = xfs_bmap_finish(&tp, &free_list, &committed);
+       if (error)
+               goto out_bmap_cancel;
+
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+       if (error)
+               goto out_release_inode;
+
+       xfs_qm_dqrele(udqp);
+       xfs_qm_dqrele(gdqp);
+       xfs_qm_dqrele(pdqp);
+
+       *ipp = ip;
+       return 0;
+
+ out_bmap_cancel:
+       xfs_bmap_cancel(&free_list);
+ out_trans_abort:
+       cancel_flags |= XFS_TRANS_ABORT;
+ out_trans_cancel:
+       xfs_trans_cancel(tp, cancel_flags);
+ out_release_inode:
+       /*
+        * Wait until after the current transaction is aborted to
+        * release the inode.  This prevents recursive transactions
+        * and deadlocks from xfs_inactive.
+        */
+       if (ip)
+               IRELE(ip);
+
+       xfs_qm_dqrele(udqp);
+       xfs_qm_dqrele(gdqp);
+       xfs_qm_dqrele(pdqp);
+
+       if (unlock_dp_on_error)
+               xfs_iunlock(dp, XFS_ILOCK_EXCL);
+       return error;
+}
+
+int
+xfs_link(
+       xfs_inode_t             *tdp,
+       xfs_inode_t             *sip,
+       struct xfs_name         *target_name)
+{
+       xfs_mount_t             *mp = tdp->i_mount;
+       xfs_trans_t             *tp;
+       int                     error;
+       xfs_bmap_free_t         free_list;
+       xfs_fsblock_t           first_block;
+       int                     cancel_flags;
+       int                     committed;
+       int                     resblks;
+
+       trace_xfs_link(tdp, target_name);
+
+       ASSERT(!S_ISDIR(sip->i_d.di_mode));
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return XFS_ERROR(EIO);
+
+       error = xfs_qm_dqattach(sip, 0);
+       if (error)
+               goto std_return;
+
+       error = xfs_qm_dqattach(tdp, 0);
+       if (error)
+               goto std_return;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
+       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+       resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0);
+       if (error == ENOSPC) {
+               resblks = 0;
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0);
+       }
+       if (error) {
+               cancel_flags = 0;
+               goto error_return;
+       }
+
+       xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
+
+       xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
+
+       /*
+        * If we are using project inheritance, we only allow hard link
+        * creation in our tree when the project IDs are the same; else
+        * the tree quota mechanism could be circumvented.
+        */
+       if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
+                    (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
+               error = XFS_ERROR(EXDEV);
+               goto error_return;
+       }
+
+       error = xfs_dir_canenter(tp, tdp, target_name, resblks);
+       if (error)
+               goto error_return;
+
+       xfs_bmap_init(&free_list, &first_block);
+
+       error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
+                                       &first_block, &free_list, resblks);
+       if (error)
+               goto abort_return;
+       xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
+
+       error = xfs_bumplink(tp, sip);
+       if (error)
+               goto abort_return;
+
+       /*
+        * If this is a synchronous mount, make sure that the
+        * link transaction goes to disk before returning to
+        * the user.
+        */
+       if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+               xfs_trans_set_sync(tp);
+       }
+
+       error = xfs_bmap_finish (&tp, &free_list, &committed);
+       if (error) {
+               xfs_bmap_cancel(&free_list);
+               goto abort_return;
+       }
+
+       return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+
+ abort_return:
+       cancel_flags |= XFS_TRANS_ABORT;
+ error_return:
+       xfs_trans_cancel(tp, cancel_flags);
+ std_return:
+       return error;
+}
+
+/*
+ * Free up the underlying blocks past new_size.  The new size must be smaller
+ * than the current size.  This routine can be used both for the attribute and
+ * data fork, and does not modify the inode size, which is left to the caller.
+ *
+ * The transaction passed to this routine must have made a permanent log
+ * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
+ * given transaction and start new ones, so make sure everything involved in
+ * the transaction is tidy before calling here.  Some transaction will be
+ * returned to the caller to be committed.  The incoming transaction must
+ * already include the inode, and both inode locks must be held exclusively.
+ * The inode must also be "held" within the transaction.  On return the inode
+ * will be "held" within the returned transaction.  This routine does NOT
+ * require any disk space to be reserved for it within the transaction.
+ *
+ * If we get an error, we must return with the inode locked and linked into the
+ * current transaction. This keeps things simple for the higher level code,
+ * because it always knows that the inode is locked and held in the transaction
+ * that returns to it whether errors occur or not.  We don't mark the inode
+ * dirty on error so that transactions can be easily aborted if possible.
+ */
+int
+xfs_itruncate_extents(
+       struct xfs_trans        **tpp,
+       struct xfs_inode        *ip,
+       int                     whichfork,
        xfs_fsize_t             new_size)
 {
        struct xfs_mount        *mp = ip->i_mount;
@@ -1572,37 +1544,299 @@ xfs_itruncate_extents(
                        goto out;
 
                /*
-                * Transaction commit worked ok so we can drop the extra ticket
-                * reference that we gained in xfs_trans_dup()
+                * Transaction commit worked ok so we can drop the extra ticket
+                * reference that we gained in xfs_trans_dup()
+                */
+               xfs_log_ticket_put(tp->t_ticket);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+               if (error)
+                       goto out;
+       }
+
+       /*
+        * Always re-log the inode so that our permanent transaction can keep
+        * on rolling it forward in the log.
+        */
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       trace_xfs_itruncate_extents_end(ip, new_size);
+
+out:
+       *tpp = tp;
+       return error;
+out_bmap_cancel:
+       /*
+        * If the bunmapi call encounters an error, return to the caller where
+        * the transaction can be properly aborted.  We just need to make sure
+        * we're not holding any resources that we were not when we came in.
+        */
+       xfs_bmap_cancel(&free_list);
+       goto out;
+}
+
+int
+xfs_release(
+       xfs_inode_t     *ip)
+{
+       xfs_mount_t     *mp = ip->i_mount;
+       int             error;
+
+       if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
+               return 0;
+
+       /* If this is a read-only mount, don't do this (would generate I/O) */
+       if (mp->m_flags & XFS_MOUNT_RDONLY)
+               return 0;
+
+       if (!XFS_FORCED_SHUTDOWN(mp)) {
+               int truncated;
+
+               /*
+                * If we are using filestreams, and we have an unlinked
+                * file that we are processing the last close on, then nothing
+                * will be able to reopen and write to this file. Purge this
+                * inode from the filestreams cache so that it doesn't delay
+                * teardown of the inode.
+                */
+               if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
+                       xfs_filestream_deassociate(ip);
+
+               /*
+                * If we previously truncated this file and removed old data
+                * in the process, we want to initiate "early" writeout on
+                * the last close.  This is an attempt to combat the notorious
+                * NULL files problem which is particularly noticeable from a
+                * truncate down, buffered (re-)write (delalloc), followed by
+                * a crash.  What we are effectively doing here is
+                * significantly reducing the time window where we'd otherwise
+                * be exposed to that problem.
+                */
+               truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
+               if (truncated) {
+                       xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
+                       if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) {
+                               error = -filemap_flush(VFS_I(ip)->i_mapping);
+                               if (error)
+                                       return error;
+                       }
+               }
+       }
+
+       if (ip->i_d.di_nlink == 0)
+               return 0;
+
+       if (xfs_can_free_eofblocks(ip, false)) {
+
+               /*
+                * If we can't get the iolock just skip truncating the blocks
+                * past EOF because we could deadlock with the mmap_sem
+                * otherwise.  We'll get another chance to drop them once the
+                * last reference to the inode is dropped, so we'll never leak
+                * blocks permanently.
+                *
+                * Further, check if the inode is being opened, written and
+                * closed frequently and we have delayed allocation blocks
+                * outstanding (e.g. streaming writes from the NFS server),
+                * truncating the blocks past EOF will cause fragmentation to
+                * occur.
+                *
+                * In this case don't do the truncation, either, but we have to
+                * be careful how we detect this case. Blocks beyond EOF show
+                * up as i_delayed_blks even when the inode is clean, so we
+                * need to truncate them away first before checking for a dirty
+                * release. Hence on the first dirty close we will still remove
+                * the speculative allocation, but after that we will leave it
+                * in place.
+                */
+               if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
+                       return 0;
+
+               error = xfs_free_eofblocks(mp, ip, true);
+               if (error && error != EAGAIN)
+                       return error;
+
+               /* delalloc blocks after truncation means it really is dirty */
+               if (ip->i_delayed_blks)
+                       xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
+       }
+       return 0;
+}
+
+/*
+ * xfs_inactive
+ *
+ * This is called when the vnode reference count for the vnode
+ * goes to zero.  If the file has been unlinked, then it must
+ * now be truncated.  Also, we clear all of the read-ahead state
+ * kept for the inode here since the file is now closed.
+ */
+int
+xfs_inactive(
+       xfs_inode_t     *ip)
+{
+       xfs_bmap_free_t         free_list;
+       xfs_fsblock_t           first_block;
+       int                     committed;
+       struct xfs_trans        *tp;
+       struct xfs_mount        *mp;
+       struct xfs_trans_res    *resp;
+       int                     error;
+       int                     truncate = 0;
+
+       /*
+        * If the inode is already free, then there can be nothing
+        * to clean up here.
+        */
+       if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
+               ASSERT(ip->i_df.if_real_bytes == 0);
+               ASSERT(ip->i_df.if_broot_bytes == 0);
+               return VN_INACTIVE_CACHE;
+       }
+
+       mp = ip->i_mount;
+
+       error = 0;
+
+       /* If this is a read-only mount, don't do this (would generate I/O) */
+       if (mp->m_flags & XFS_MOUNT_RDONLY)
+               goto out;
+
+       if (ip->i_d.di_nlink != 0) {
+               /*
+                * force is true because we are evicting an inode from the
+                * cache. Post-eof blocks must be freed, lest we end up with
+                * broken free space accounting.
+                */
+               if (xfs_can_free_eofblocks(ip, true)) {
+                       error = xfs_free_eofblocks(mp, ip, false);
+                       if (error)
+                               return VN_INACTIVE_CACHE;
+               }
+               goto out;
+       }
+
+       if (S_ISREG(ip->i_d.di_mode) &&
+           (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
+            ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
+               truncate = 1;
+
+       error = xfs_qm_dqattach(ip, 0);
+       if (error)
+               return VN_INACTIVE_CACHE;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+       resp = (truncate || S_ISLNK(ip->i_d.di_mode)) ?
+               &M_RES(mp)->tr_itruncate : &M_RES(mp)->tr_ifree;
+
+       error = xfs_trans_reserve(tp, resp, 0, 0);
+       if (error) {
+               ASSERT(XFS_FORCED_SHUTDOWN(mp));
+               xfs_trans_cancel(tp, 0);
+               return VN_INACTIVE_CACHE;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, 0);
+
+       if (S_ISLNK(ip->i_d.di_mode)) {
+               error = xfs_inactive_symlink(ip, &tp);
+               if (error)
+                       goto out_cancel;
+       } else if (truncate) {
+               ip->i_d.di_size = 0;
+               xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
+               if (error)
+                       goto out_cancel;
+
+               ASSERT(ip->i_d.di_nextents == 0);
+       }
+
+       /*
+        * If there are attributes associated with the file then blow them away
+        * now.  The code calls a routine that recursively deconstructs the
+        * attribute fork.  We need to just commit the current transaction
+        * because we can't use it for xfs_attr_inactive().
+        */
+       if (ip->i_d.di_anextents > 0) {
+               ASSERT(ip->i_d.di_forkoff != 0);
+
+               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+               if (error)
+                       goto out_unlock;
+
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+               error = xfs_attr_inactive(ip);
+               if (error)
+                       goto out;
+
+               tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
+               if (error) {
+                       xfs_trans_cancel(tp, 0);
+                       goto out;
+               }
+
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+               xfs_trans_ijoin(tp, ip, 0);
+       }
+
+       if (ip->i_afp)
+               xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+
+       ASSERT(ip->i_d.di_anextents == 0);
+
+       /*
+        * Free the inode.
+        */
+       xfs_bmap_init(&free_list, &first_block);
+       error = xfs_ifree(tp, ip, &free_list);
+       if (error) {
+               /*
+                * If we fail to free the inode, shut down.  The cancel
+                * might do that, we need to make sure.  Otherwise the
+                * inode might be lost for a long time or forever.
+                */
+               if (!XFS_FORCED_SHUTDOWN(mp)) {
+                       xfs_notice(mp, "%s: xfs_ifree returned error %d",
+                               __func__, error);
+                       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+               }
+               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+       } else {
+               /*
+                * Credit the quota account(s). The inode is gone.
+                */
+               xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
+
+               /*
+                * Just ignore errors at this point.  There is nothing we can
+                * do except to try to keep going. Make sure it's not a silent
+                * error.
                 */
-               xfs_log_ticket_put(tp->t_ticket);
-               error = xfs_trans_reserve(tp, 0,
-                                       XFS_ITRUNCATE_LOG_RES(mp), 0,
-                                       XFS_TRANS_PERM_LOG_RES,
-                                       XFS_ITRUNCATE_LOG_COUNT);
+               error = xfs_bmap_finish(&tp,  &free_list, &committed);
                if (error)
-                       goto out;
+                       xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
+                               __func__, error);
+               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+               if (error)
+                       xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
+                               __func__, error);
        }
 
        /*
-        * Always re-log the inode so that our permanent transaction can keep
-        * on rolling it forward in the log.
+        * Release the dquots held by inode, if any.
         */
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-       trace_xfs_itruncate_extents_end(ip, new_size);
-
+       xfs_qm_dqdetach(ip);
+out_unlock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
 out:
-       *tpp = tp;
-       return error;
-out_bmap_cancel:
-       /*
-        * If the bunmapi call encounters an error, return to the caller where
-        * the transaction can be properly aborted.  We just need to make sure
-        * we're not holding any resources that we were not when we came in.
-        */
-       xfs_bmap_cancel(&free_list);
-       goto out;
+       return VN_INACTIVE_CACHE;
+out_cancel:
+       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+       goto out_unlock;
 }
 
 /*
@@ -1861,7 +2095,7 @@ xfs_iunlink_remove(
 }
 
 /*
- * A big issue when freeing the inode cluster is is that we _cannot_ skip any
+ * A big issue when freeing the inode cluster is that we _cannot_ skip any
  * inodes that are in memory - they all must be marked stale and attached to
  * the cluster buffer.
  */
@@ -2093,272 +2327,6 @@ xfs_ifree(
        return error;
 }
 
-/*
- * Reallocate the space for if_broot based on the number of records
- * being added or deleted as indicated in rec_diff.  Move the records
- * and pointers in if_broot to fit the new size.  When shrinking this
- * will eliminate holes between the records and pointers created by
- * the caller.  When growing this will create holes to be filled in
- * by the caller.
- *
- * The caller must not request to add more records than would fit in
- * the on-disk inode root.  If the if_broot is currently NULL, then
- * if we adding records one will be allocated.  The caller must also
- * not request that the number of records go below zero, although
- * it can go to zero.
- *
- * ip -- the inode whose if_broot area is changing
- * ext_diff -- the change in the number of records, positive or negative,
- *      requested for the if_broot array.
- */
-void
-xfs_iroot_realloc(
-       xfs_inode_t             *ip,
-       int                     rec_diff,
-       int                     whichfork)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       int                     cur_max;
-       xfs_ifork_t             *ifp;
-       struct xfs_btree_block  *new_broot;
-       int                     new_max;
-       size_t                  new_size;
-       char                    *np;
-       char                    *op;
-
-       /*
-        * Handle the degenerate case quietly.
-        */
-       if (rec_diff == 0) {
-               return;
-       }
-
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       if (rec_diff > 0) {
-               /*
-                * If there wasn't any memory allocated before, just
-                * allocate it now and get out.
-                */
-               if (ifp->if_broot_bytes == 0) {
-                       new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
-                       ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
-                       ifp->if_broot_bytes = (int)new_size;
-                       return;
-               }
-
-               /*
-                * If there is already an existing if_broot, then we need
-                * to realloc() it and shift the pointers to their new
-                * location.  The records don't change location because
-                * they are kept butted up against the btree block header.
-                */
-               cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
-               new_max = cur_max + rec_diff;
-               new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
-               ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
-                               XFS_BMAP_BROOT_SPACE_CALC(mp, cur_max),
-                               KM_SLEEP | KM_NOFS);
-               op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
-                                                    ifp->if_broot_bytes);
-               np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
-                                                    (int)new_size);
-               ifp->if_broot_bytes = (int)new_size;
-               ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
-                       XFS_IFORK_SIZE(ip, whichfork));
-               memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
-               return;
-       }
-
-       /*
-        * rec_diff is less than 0.  In this case, we are shrinking the
-        * if_broot buffer.  It must already exist.  If we go to zero
-        * records, just get rid of the root and clear the status bit.
-        */
-       ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
-       cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
-       new_max = cur_max + rec_diff;
-       ASSERT(new_max >= 0);
-       if (new_max > 0)
-               new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
-       else
-               new_size = 0;
-       if (new_size > 0) {
-               new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
-               /*
-                * First copy over the btree block header.
-                */
-               memcpy(new_broot, ifp->if_broot,
-                       XFS_BMBT_BLOCK_LEN(ip->i_mount));
-       } else {
-               new_broot = NULL;
-               ifp->if_flags &= ~XFS_IFBROOT;
-       }
-
-       /*
-        * Only copy the records and pointers if there are any.
-        */
-       if (new_max > 0) {
-               /*
-                * First copy the records.
-                */
-               op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
-               np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
-               memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
-
-               /*
-                * Then copy the pointers.
-                */
-               op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
-                                                    ifp->if_broot_bytes);
-               np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
-                                                    (int)new_size);
-               memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
-       }
-       kmem_free(ifp->if_broot);
-       ifp->if_broot = new_broot;
-       ifp->if_broot_bytes = (int)new_size;
-       if (ifp->if_broot)
-               ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
-                       XFS_IFORK_SIZE(ip, whichfork));
-       return;
-}
-
-
-/*
- * This is called when the amount of space needed for if_data
- * is increased or decreased.  The change in size is indicated by
- * the number of bytes that need to be added or deleted in the
- * byte_diff parameter.
- *
- * If the amount of space needed has decreased below the size of the
- * inline buffer, then switch to using the inline buffer.  Otherwise,
- * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
- * to what is needed.
- *
- * ip -- the inode whose if_data area is changing
- * byte_diff -- the change in the number of bytes, positive or negative,
- *      requested for the if_data array.
- */
-void
-xfs_idata_realloc(
-       xfs_inode_t     *ip,
-       int             byte_diff,
-       int             whichfork)
-{
-       xfs_ifork_t     *ifp;
-       int             new_size;
-       int             real_size;
-
-       if (byte_diff == 0) {
-               return;
-       }
-
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       new_size = (int)ifp->if_bytes + byte_diff;
-       ASSERT(new_size >= 0);
-
-       if (new_size == 0) {
-               if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
-                       kmem_free(ifp->if_u1.if_data);
-               }
-               ifp->if_u1.if_data = NULL;
-               real_size = 0;
-       } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
-               /*
-                * If the valid extents/data can fit in if_inline_ext/data,
-                * copy them from the malloc'd vector and free it.
-                */
-               if (ifp->if_u1.if_data == NULL) {
-                       ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
-               } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
-                       ASSERT(ifp->if_real_bytes != 0);
-                       memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
-                             new_size);
-                       kmem_free(ifp->if_u1.if_data);
-                       ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
-               }
-               real_size = 0;
-       } else {
-               /*
-                * Stuck with malloc/realloc.
-                * For inline data, the underlying buffer must be
-                * a multiple of 4 bytes in size so that it can be
-                * logged and stay on word boundaries.  We enforce
-                * that here.
-                */
-               real_size = roundup(new_size, 4);
-               if (ifp->if_u1.if_data == NULL) {
-                       ASSERT(ifp->if_real_bytes == 0);
-                       ifp->if_u1.if_data = kmem_alloc(real_size,
-                                                       KM_SLEEP | KM_NOFS);
-               } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
-                       /*
-                        * Only do the realloc if the underlying size
-                        * is really changing.
-                        */
-                       if (ifp->if_real_bytes != real_size) {
-                               ifp->if_u1.if_data =
-                                       kmem_realloc(ifp->if_u1.if_data,
-                                                       real_size,
-                                                       ifp->if_real_bytes,
-                                                       KM_SLEEP | KM_NOFS);
-                       }
-               } else {
-                       ASSERT(ifp->if_real_bytes == 0);
-                       ifp->if_u1.if_data = kmem_alloc(real_size,
-                                                       KM_SLEEP | KM_NOFS);
-                       memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
-                               ifp->if_bytes);
-               }
-       }
-       ifp->if_real_bytes = real_size;
-       ifp->if_bytes = new_size;
-       ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
-}
-
-void
-xfs_idestroy_fork(
-       xfs_inode_t     *ip,
-       int             whichfork)
-{
-       xfs_ifork_t     *ifp;
-
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       if (ifp->if_broot != NULL) {
-               kmem_free(ifp->if_broot);
-               ifp->if_broot = NULL;
-       }
-
-       /*
-        * If the format is local, then we can't have an extents
-        * array so just look for an inline data array.  If we're
-        * not local then we may or may not have an extents list,
-        * so check and free it up if we do.
-        */
-       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
-               if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
-                   (ifp->if_u1.if_data != NULL)) {
-                       ASSERT(ifp->if_real_bytes != 0);
-                       kmem_free(ifp->if_u1.if_data);
-                       ifp->if_u1.if_data = NULL;
-                       ifp->if_real_bytes = 0;
-               }
-       } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
-                  ((ifp->if_flags & XFS_IFEXTIREC) ||
-                   ((ifp->if_u1.if_extents != NULL) &&
-                    (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
-               ASSERT(ifp->if_real_bytes != 0);
-               xfs_iext_destroy(ifp);
-       }
-       ASSERT(ifp->if_u1.if_extents == NULL ||
-              ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
-       ASSERT(ifp->if_real_bytes == 0);
-       if (whichfork == XFS_ATTR_FORK) {
-               kmem_zone_free(xfs_ifork_zone, ip->i_afp);
-               ip->i_afp = NULL;
-       }
-}
-
 /*
  * This is called to unpin an inode.  The caller must have the inode locked
  * in at least shared mode so that the buffer cannot be subsequently pinned
@@ -2402,162 +2370,471 @@ xfs_iunpin_wait(
                __xfs_iunpin_wait(ip);
 }
 
-/*
- * xfs_iextents_copy()
- *
- * This is called to copy the REAL extents (as opposed to the delayed
- * allocation extents) from the inode into the given buffer.  It
- * returns the number of bytes copied into the buffer.
- *
- * If there are no delayed allocation extents, then we can just
- * memcpy() the extents into the buffer.  Otherwise, we need to
- * examine each extent in turn and skip those which are delayed.
- */
 int
-xfs_iextents_copy(
-       xfs_inode_t             *ip,
-       xfs_bmbt_rec_t          *dp,
-       int                     whichfork)
+xfs_remove(
+       xfs_inode_t             *dp,
+       struct xfs_name         *name,
+       xfs_inode_t             *ip)
 {
-       int                     copied;
-       int                     i;
-       xfs_ifork_t             *ifp;
-       int                     nrecs;
-       xfs_fsblock_t           start_block;
+       xfs_mount_t             *mp = dp->i_mount;
+       xfs_trans_t             *tp = NULL;
+       int                     is_dir = S_ISDIR(ip->i_d.di_mode);
+       int                     error = 0;
+       xfs_bmap_free_t         free_list;
+       xfs_fsblock_t           first_block;
+       int                     cancel_flags;
+       int                     committed;
+       int                     link_zero;
+       uint                    resblks;
+       uint                    log_count;
 
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
-       ASSERT(ifp->if_bytes > 0);
+       trace_xfs_remove(dp, name);
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return XFS_ERROR(EIO);
+
+       error = xfs_qm_dqattach(dp, 0);
+       if (error)
+               goto std_return;
 
-       nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-       XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
-       ASSERT(nrecs > 0);
+       error = xfs_qm_dqattach(ip, 0);
+       if (error)
+               goto std_return;
+
+       if (is_dir) {
+               tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
+               log_count = XFS_DEFAULT_LOG_COUNT;
+       } else {
+               tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
+               log_count = XFS_REMOVE_LOG_COUNT;
+       }
+       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
 
        /*
-        * There are some delayed allocation extents in the
-        * inode, so copy the extents one at a time and skip
-        * the delayed ones.  There must be at least one
-        * non-delayed extent.
+        * We try to get the real space reservation first,
+        * allowing for directory btree deletion(s) implying
+        * possible bmap insert(s).  If we can't get the space
+        * reservation then we use 0 instead, and avoid the bmap
+        * btree insert(s) in the directory code by, if the bmap
+        * insert tries to happen, instead trimming the LAST
+        * block from the directory.
         */
-       copied = 0;
-       for (i = 0; i < nrecs; i++) {
-               xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
-               start_block = xfs_bmbt_get_startblock(ep);
-               if (isnullstartblock(start_block)) {
-                       /*
-                        * It's a delayed allocation extent, so skip it.
-                        */
-                       continue;
+       resblks = XFS_REMOVE_SPACE_RES(mp);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0);
+       if (error == ENOSPC) {
+               resblks = 0;
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0);
+       }
+       if (error) {
+               ASSERT(error != ENOSPC);
+               cancel_flags = 0;
+               goto out_trans_cancel;
+       }
+
+       xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
+
+       xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+       /*
+        * If we're removing a directory perform some additional validation.
+        */
+       if (is_dir) {
+               ASSERT(ip->i_d.di_nlink >= 2);
+               if (ip->i_d.di_nlink != 2) {
+                       error = XFS_ERROR(ENOTEMPTY);
+                       goto out_trans_cancel;
                }
+               if (!xfs_dir_isempty(ip)) {
+                       error = XFS_ERROR(ENOTEMPTY);
+                       goto out_trans_cancel;
+               }
+       }
+
+       xfs_bmap_init(&free_list, &first_block);
+       error = xfs_dir_removename(tp, dp, name, ip->i_ino,
+                                       &first_block, &free_list, resblks);
+       if (error) {
+               ASSERT(error != ENOENT);
+               goto out_bmap_cancel;
+       }
+       xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+       if (is_dir) {
+               /*
+                * Drop the link from ip's "..".
+                */
+               error = xfs_droplink(tp, dp);
+               if (error)
+                       goto out_bmap_cancel;
 
-               /* Translate to on disk format */
-               put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
-               put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
-               dp++;
-               copied++;
+               /*
+                * Drop the "." link from ip to self.
+                */
+               error = xfs_droplink(tp, ip);
+               if (error)
+                       goto out_bmap_cancel;
+       } else {
+               /*
+                * When removing a non-directory we need to log the parent
+                * inode here.  For a directory this is done implicitly
+                * by the xfs_droplink call for the ".." entry.
+                */
+               xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
        }
-       ASSERT(copied != 0);
-       xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
 
-       return (copied * (uint)sizeof(xfs_bmbt_rec_t));
+       /*
+        * Drop the link from dp to ip.
+        */
+       error = xfs_droplink(tp, ip);
+       if (error)
+               goto out_bmap_cancel;
+
+       /*
+        * Determine if this is the last link while
+        * we are in the transaction.
+        */
+       link_zero = (ip->i_d.di_nlink == 0);
+
+       /*
+        * If this is a synchronous mount, make sure that the
+        * remove transaction goes to disk before returning to
+        * the user.
+        */
+       if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
+               xfs_trans_set_sync(tp);
+
+       error = xfs_bmap_finish(&tp, &free_list, &committed);
+       if (error)
+               goto out_bmap_cancel;
+
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+       if (error)
+               goto std_return;
+
+       /*
+        * If we are using filestreams, kill the stream association.
+        * If the file is still open it may get a new one but that
+        * will get killed on last close in xfs_close() so we don't
+        * have to worry about that.
+        */
+       if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
+               xfs_filestream_deassociate(ip);
+
+       return 0;
+
+ out_bmap_cancel:
+       xfs_bmap_cancel(&free_list);
+       cancel_flags |= XFS_TRANS_ABORT;
+ out_trans_cancel:
+       xfs_trans_cancel(tp, cancel_flags);
+ std_return:
+       return error;
 }
 
 /*
- * Each of the following cases stores data into the same region
- * of the on-disk inode, so only one of them can be valid at
- * any given time. While it is possible to have conflicting formats
- * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
- * in EXTENTS format, this can only happen when the fork has
- * changed formats after being modified but before being flushed.
- * In these cases, the format always takes precedence, because the
- * format indicates the current state of the fork.
+ * Enter all inodes for a rename transaction into a sorted array.
  */
-/*ARGSUSED*/
 STATIC void
-xfs_iflush_fork(
-       xfs_inode_t             *ip,
-       xfs_dinode_t            *dip,
-       xfs_inode_log_item_t    *iip,
-       int                     whichfork,
-       xfs_buf_t               *bp)
-{
-       char                    *cp;
-       xfs_ifork_t             *ifp;
-       xfs_mount_t             *mp;
-       static const short      brootflag[2] =
-               { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
-       static const short      dataflag[2] =
-               { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
-       static const short      extflag[2] =
-               { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
-
-       if (!iip)
-               return;
-       ifp = XFS_IFORK_PTR(ip, whichfork);
-       /*
-        * This can happen if we gave up in iformat in an error path,
-        * for the attribute fork.
-        */
-       if (!ifp) {
-               ASSERT(whichfork == XFS_ATTR_FORK);
-               return;
-       }
-       cp = XFS_DFORK_PTR(dip, whichfork);
-       mp = ip->i_mount;
-       switch (XFS_IFORK_FORMAT(ip, whichfork)) {
-       case XFS_DINODE_FMT_LOCAL:
-               if ((iip->ili_fields & dataflag[whichfork]) &&
-                   (ifp->if_bytes > 0)) {
-                       ASSERT(ifp->if_u1.if_data != NULL);
-                       ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
-                       memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
+xfs_sort_for_rename(
+       xfs_inode_t     *dp1,   /* in: old (source) directory inode */
+       xfs_inode_t     *dp2,   /* in: new (target) directory inode */
+       xfs_inode_t     *ip1,   /* in: inode of old entry */
+       xfs_inode_t     *ip2,   /* in: inode of new entry, if it
+                                  already exists, NULL otherwise. */
+       xfs_inode_t     **i_tab,/* out: array of inode returned, sorted */
+       int             *num_inodes)  /* out: number of inodes in array */
+{
+       xfs_inode_t             *temp;
+       int                     i, j;
+
+       /*
+        * i_tab contains a list of pointers to inodes.  We initialize
+        * the table here & we'll sort it.  We will then use it to
+        * order the acquisition of the inode locks.
+        *
+        * Note that the table may contain duplicates.  e.g., dp1 == dp2.
+        */
+       i_tab[0] = dp1;
+       i_tab[1] = dp2;
+       i_tab[2] = ip1;
+       if (ip2) {
+               *num_inodes = 4;
+               i_tab[3] = ip2;
+       } else {
+               *num_inodes = 3;
+               i_tab[3] = NULL;
+       }
+
+       /*
+        * Sort the elements via bubble sort.  (Remember, there are at
+        * most 4 elements to sort, so this is adequate.)
+        */
+       for (i = 0; i < *num_inodes; i++) {
+               for (j = 1; j < *num_inodes; j++) {
+                       if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
+                               temp = i_tab[j];
+                               i_tab[j] = i_tab[j-1];
+                               i_tab[j-1] = temp;
+                       }
                }
-               break;
+       }
+}
+
+/*
+ * xfs_rename
+ */
+int
+xfs_rename(
+       xfs_inode_t     *src_dp,
+       struct xfs_name *src_name,
+       xfs_inode_t     *src_ip,
+       xfs_inode_t     *target_dp,
+       struct xfs_name *target_name,
+       xfs_inode_t     *target_ip)
+{
+       xfs_trans_t     *tp = NULL;
+       xfs_mount_t     *mp = src_dp->i_mount;
+       int             new_parent;             /* moving to a new dir */
+       int             src_is_directory;       /* src_name is a directory */
+       int             error;
+       xfs_bmap_free_t free_list;
+       xfs_fsblock_t   first_block;
+       int             cancel_flags;
+       int             committed;
+       xfs_inode_t     *inodes[4];
+       int             spaceres;
+       int             num_inodes;
+
+       trace_xfs_rename(src_dp, target_dp, src_name, target_name);
+
+       new_parent = (src_dp != target_dp);
+       src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
+
+       xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip,
+                               inodes, &num_inodes);
+
+       xfs_bmap_init(&free_list, &first_block);
+       tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
+       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
+       spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
+       if (error == ENOSPC) {
+               spaceres = 0;
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
+       }
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto std_return;
+       }
+
+       /*
+        * Attach the dquots to the inodes
+        */
+       error = xfs_qm_vop_rename_dqattach(inodes);
+       if (error) {
+               xfs_trans_cancel(tp, cancel_flags);
+               goto std_return;
+       }
+
+       /*
+        * Lock all the participating inodes. Depending upon whether
+        * the target_name exists in the target directory, and
+        * whether the target directory is the same as the source
+        * directory, we can lock from 2 to 4 inodes.
+        */
+       xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
+
+       /*
+        * Join all the inodes to the transaction. From this point on,
+        * we can rely on either trans_commit or trans_cancel to unlock
+        * them.
+        */
+       xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
+       if (new_parent)
+               xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
+       if (target_ip)
+               xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
+
+       /*
+        * If we are using project inheritance, we only allow renames
+        * into our tree when the project IDs are the same; else the
+        * tree quota mechanism would be circumvented.
+        */
+       if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
+                    (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
+               error = XFS_ERROR(EXDEV);
+               goto error_return;
+       }
+
+       /*
+        * Set up the target.
+        */
+       if (target_ip == NULL) {
+               /*
+                * If there's no space reservation, check the entry will
+                * fit before actually inserting it.
+                */
+               error = xfs_dir_canenter(tp, target_dp, target_name, spaceres);
+               if (error)
+                       goto error_return;
+               /*
+                * If target does not exist and the rename crosses
+                * directories, adjust the target directory link count
+                * to account for the ".." reference from the new entry.
+                */
+               error = xfs_dir_createname(tp, target_dp, target_name,
+                                               src_ip->i_ino, &first_block,
+                                               &free_list, spaceres);
+               if (error == ENOSPC)
+                       goto error_return;
+               if (error)
+                       goto abort_return;
+
+               xfs_trans_ichgtime(tp, target_dp,
+                                       XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
 
-       case XFS_DINODE_FMT_EXTENTS:
-               ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
-                      !(iip->ili_fields & extflag[whichfork]));
-               if ((iip->ili_fields & extflag[whichfork]) &&
-                   (ifp->if_bytes > 0)) {
-                       ASSERT(xfs_iext_get_ext(ifp, 0));
-                       ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
-                       (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
-                               whichfork);
+               if (new_parent && src_is_directory) {
+                       error = xfs_bumplink(tp, target_dp);
+                       if (error)
+                               goto abort_return;
                }
-               break;
+       } else { /* target_ip != NULL */
+               /*
+                * If target exists and it's a directory, check that both
+                * target and source are directories and that target can be
+                * destroyed, or that neither is a directory.
+                */
+               if (S_ISDIR(target_ip->i_d.di_mode)) {
+                       /*
+                        * Make sure target dir is empty.
+                        */
+                       if (!(xfs_dir_isempty(target_ip)) ||
+                           (target_ip->i_d.di_nlink > 2)) {
+                               error = XFS_ERROR(EEXIST);
+                               goto error_return;
+                       }
+               }
+
+               /*
+                * Link the source inode under the target name.
+                * If the source inode is a directory and we are moving
+                * it across directories, its ".." entry will be
+                * inconsistent until we replace that down below.
+                *
+                * In case there is already an entry with the same
+                * name at the destination directory, remove it first.
+                */
+               error = xfs_dir_replace(tp, target_dp, target_name,
+                                       src_ip->i_ino,
+                                       &first_block, &free_list, spaceres);
+               if (error)
+                       goto abort_return;
+
+               xfs_trans_ichgtime(tp, target_dp,
+                                       XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+               /*
+                * Decrement the link count on the target since the target
+                * dir no longer points to it.
+                */
+               error = xfs_droplink(tp, target_ip);
+               if (error)
+                       goto abort_return;
+
+               if (src_is_directory) {
+                       /*
+                        * Drop the link from the old "." entry.
+                        */
+                       error = xfs_droplink(tp, target_ip);
+                       if (error)
+                               goto abort_return;
+               }
+       } /* target_ip != NULL */
+
+       /*
+        * Remove the source.
+        */
+       if (new_parent && src_is_directory) {
+               /*
+                * Rewrite the ".." entry to point to the new
+                * directory.
+                */
+               error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
+                                       target_dp->i_ino,
+                                       &first_block, &free_list, spaceres);
+               ASSERT(error != EEXIST);
+               if (error)
+                       goto abort_return;
+       }
+
+       /*
+        * We always want to hit the ctime on the source inode.
+        *
+        * This isn't strictly required by the standards since the source
+        * inode isn't really being changed, but old unix file systems did
+        * it and some incremental backup programs won't work without it.
+        */
+       xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
+       xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
+
+       /*
+        * Adjust the link count on src_dp.  This is necessary when
+        * renaming a directory, either within one parent when
+        * the target existed, or across two parent directories.
+        */
+       if (src_is_directory && (new_parent || target_ip != NULL)) {
+
+               /*
+                * Decrement link count on src_directory since the
+                * entry that's moved no longer points to it.
+                */
+               error = xfs_droplink(tp, src_dp);
+               if (error)
+                       goto abort_return;
+       }
 
-       case XFS_DINODE_FMT_BTREE:
-               if ((iip->ili_fields & brootflag[whichfork]) &&
-                   (ifp->if_broot_bytes > 0)) {
-                       ASSERT(ifp->if_broot != NULL);
-                       ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
-                               XFS_IFORK_SIZE(ip, whichfork));
-                       xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
-                               (xfs_bmdr_block_t *)cp,
-                               XFS_DFORK_SIZE(dip, mp, whichfork));
-               }
-               break;
+       error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
+                                       &first_block, &free_list, spaceres);
+       if (error)
+               goto abort_return;
 
-       case XFS_DINODE_FMT_DEV:
-               if (iip->ili_fields & XFS_ILOG_DEV) {
-                       ASSERT(whichfork == XFS_DATA_FORK);
-                       xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
-               }
-               break;
+       xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
+       if (new_parent)
+               xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
 
-       case XFS_DINODE_FMT_UUID:
-               if (iip->ili_fields & XFS_ILOG_UUID) {
-                       ASSERT(whichfork == XFS_DATA_FORK);
-                       memcpy(XFS_DFORK_DPTR(dip),
-                              &ip->i_df.if_u2.if_uuid,
-                              sizeof(uuid_t));
-               }
-               break;
+       /*
+        * If this is a synchronous mount, make sure that the
+        * rename transaction goes to disk before returning to
+        * the user.
+        */
+       if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
+               xfs_trans_set_sync(tp);
+       }
 
-       default:
-               ASSERT(0);
-               break;
+       error = xfs_bmap_finish(&tp, &free_list, &committed);
+       if (error) {
+               xfs_bmap_cancel(&free_list);
+               xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
+                                XFS_TRANS_ABORT));
+               goto std_return;
        }
+
+       /*
+        * trans_commit will unlock src_ip, target_ip & decrement
+        * the vnode references.
+        */
+       return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+
+ abort_return:
+       cancel_flags |= XFS_TRANS_ABORT;
+ error_return:
+       xfs_bmap_cancel(&free_list);
+       xfs_trans_cancel(tp, cancel_flags);
+ std_return:
+       return error;
 }
 
 STATIC int
@@ -2816,7 +3093,6 @@ abort_out:
        return error;
 }
 
-
 STATIC int
 xfs_iflush_int(
        struct xfs_inode        *ip,
@@ -3004,1072 +3280,3 @@ xfs_iflush_int(
 corrupt_out:
        return XFS_ERROR(EFSCORRUPTED);
 }
-
-/*
- * Return a pointer to the extent record at file index idx.
- */
-xfs_bmbt_rec_host_t *
-xfs_iext_get_ext(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_extnum_t    idx)            /* index of target extent */
-{
-       ASSERT(idx >= 0);
-       ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
-
-       if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
-               return ifp->if_u1.if_ext_irec->er_extbuf;
-       } else if (ifp->if_flags & XFS_IFEXTIREC) {
-               xfs_ext_irec_t  *erp;           /* irec pointer */
-               int             erp_idx = 0;    /* irec index */
-               xfs_extnum_t    page_idx = idx; /* ext index in target list */
-
-               erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
-               return &erp->er_extbuf[page_idx];
-       } else if (ifp->if_bytes) {
-               return &ifp->if_u1.if_extents[idx];
-       } else {
-               return NULL;
-       }
-}
-
-/*
- * Insert new item(s) into the extent records for incore inode
- * fork 'ifp'.  'count' new items are inserted at index 'idx'.
- */
-void
-xfs_iext_insert(
-       xfs_inode_t     *ip,            /* incore inode pointer */
-       xfs_extnum_t    idx,            /* starting index of new items */
-       xfs_extnum_t    count,          /* number of inserted items */
-       xfs_bmbt_irec_t *new,           /* items to insert */
-       int             state)          /* type of extent conversion */
-{
-       xfs_ifork_t     *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
-       xfs_extnum_t    i;              /* extent record index */
-
-       trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
-
-       ASSERT(ifp->if_flags & XFS_IFEXTENTS);
-       xfs_iext_add(ifp, idx, count);
-       for (i = idx; i < idx + count; i++, new++)
-               xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
-}
-
-/*
- * This is called when the amount of space required for incore file
- * extents needs to be increased. The ext_diff parameter stores the
- * number of new extents being added and the idx parameter contains
- * the extent index where the new extents will be added. If the new
- * extents are being appended, then we just need to (re)allocate and
- * initialize the space. Otherwise, if the new extents are being
- * inserted into the middle of the existing entries, a bit more work
- * is required to make room for the new extents to be inserted. The
- * caller is responsible for filling in the new extent entries upon
- * return.
- */
-void
-xfs_iext_add(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_extnum_t    idx,            /* index to begin adding exts */
-       int             ext_diff)       /* number of extents to add */
-{
-       int             byte_diff;      /* new bytes being added */
-       int             new_size;       /* size of extents after adding */
-       xfs_extnum_t    nextents;       /* number of extents in file */
-
-       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-       ASSERT((idx >= 0) && (idx <= nextents));
-       byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
-       new_size = ifp->if_bytes + byte_diff;
-       /*
-        * If the new number of extents (nextents + ext_diff)
-        * fits inside the inode, then continue to use the inline
-        * extent buffer.
-        */
-       if (nextents + ext_diff <= XFS_INLINE_EXTS) {
-               if (idx < nextents) {
-                       memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
-                               &ifp->if_u2.if_inline_ext[idx],
-                               (nextents - idx) * sizeof(xfs_bmbt_rec_t));
-                       memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
-               }
-               ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
-               ifp->if_real_bytes = 0;
-       }
-       /*
-        * Otherwise use a linear (direct) extent list.
-        * If the extents are currently inside the inode,
-        * xfs_iext_realloc_direct will switch us from
-        * inline to direct extent allocation mode.
-        */
-       else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
-               xfs_iext_realloc_direct(ifp, new_size);
-               if (idx < nextents) {
-                       memmove(&ifp->if_u1.if_extents[idx + ext_diff],
-                               &ifp->if_u1.if_extents[idx],
-                               (nextents - idx) * sizeof(xfs_bmbt_rec_t));
-                       memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
-               }
-       }
-       /* Indirection array */
-       else {
-               xfs_ext_irec_t  *erp;
-               int             erp_idx = 0;
-               int             page_idx = idx;
-
-               ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
-               if (ifp->if_flags & XFS_IFEXTIREC) {
-                       erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
-               } else {
-                       xfs_iext_irec_init(ifp);
-                       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-                       erp = ifp->if_u1.if_ext_irec;
-               }
-               /* Extents fit in target extent page */
-               if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
-                       if (page_idx < erp->er_extcount) {
-                               memmove(&erp->er_extbuf[page_idx + ext_diff],
-                                       &erp->er_extbuf[page_idx],
-                                       (erp->er_extcount - page_idx) *
-                                       sizeof(xfs_bmbt_rec_t));
-                               memset(&erp->er_extbuf[page_idx], 0, byte_diff);
-                       }
-                       erp->er_extcount += ext_diff;
-                       xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
-               }
-               /* Insert a new extent page */
-               else if (erp) {
-                       xfs_iext_add_indirect_multi(ifp,
-                               erp_idx, page_idx, ext_diff);
-               }
-               /*
-                * If extent(s) are being appended to the last page in
-                * the indirection array and the new extent(s) don't fit
-                * in the page, then erp is NULL and erp_idx is set to
-                * the next index needed in the indirection array.
-                */
-               else {
-                       int     count = ext_diff;
-
-                       while (count) {
-                               erp = xfs_iext_irec_new(ifp, erp_idx);
-                               erp->er_extcount = count;
-                               count -= MIN(count, (int)XFS_LINEAR_EXTS);
-                               if (count) {
-                                       erp_idx++;
-                               }
-                       }
-               }
-       }
-       ifp->if_bytes = new_size;
-}
-
-/*
- * This is called when incore extents are being added to the indirection
- * array and the new extents do not fit in the target extent list. The
- * erp_idx parameter contains the irec index for the target extent list
- * in the indirection array, and the idx parameter contains the extent
- * index within the list. The number of extents being added is stored
- * in the count parameter.
- *
- *    |-------|   |-------|
- *    |       |   |       |    idx - number of extents before idx
- *    |  idx  |   | count |
- *    |       |   |       |    count - number of extents being inserted at idx
- *    |-------|   |-------|
- *    | count |   | nex2  |    nex2 - number of extents after idx + count
- *    |-------|   |-------|
- */
-void
-xfs_iext_add_indirect_multi(
-       xfs_ifork_t     *ifp,                   /* inode fork pointer */
-       int             erp_idx,                /* target extent irec index */
-       xfs_extnum_t    idx,                    /* index within target list */
-       int             count)                  /* new extents being added */
-{
-       int             byte_diff;              /* new bytes being added */
-       xfs_ext_irec_t  *erp;                   /* pointer to irec entry */
-       xfs_extnum_t    ext_diff;               /* number of extents to add */
-       xfs_extnum_t    ext_cnt;                /* new extents still needed */
-       xfs_extnum_t    nex2;                   /* extents after idx + count */
-       xfs_bmbt_rec_t  *nex2_ep = NULL;        /* temp list for nex2 extents */
-       int             nlists;                 /* number of irec's (lists) */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       erp = &ifp->if_u1.if_ext_irec[erp_idx];
-       nex2 = erp->er_extcount - idx;
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-
-       /*
-        * Save second part of target extent list
-        * (all extents past */
-       if (nex2) {
-               byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
-               nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
-               memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
-               erp->er_extcount -= nex2;
-               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
-               memset(&erp->er_extbuf[idx], 0, byte_diff);
-       }
-
-       /*
-        * Add the new extents to the end of the target
-        * list, then allocate new irec record(s) and
-        * extent buffer(s) as needed to store the rest
-        * of the new extents.
-        */
-       ext_cnt = count;
-       ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
-       if (ext_diff) {
-               erp->er_extcount += ext_diff;
-               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
-               ext_cnt -= ext_diff;
-       }
-       while (ext_cnt) {
-               erp_idx++;
-               erp = xfs_iext_irec_new(ifp, erp_idx);
-               ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
-               erp->er_extcount = ext_diff;
-               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
-               ext_cnt -= ext_diff;
-       }
-
-       /* Add nex2 extents back to indirection array */
-       if (nex2) {
-               xfs_extnum_t    ext_avail;
-               int             i;
-
-               byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
-               ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
-               i = 0;
-               /*
-                * If nex2 extents fit in the current page, append
-                * nex2_ep after the new extents.
-                */
-               if (nex2 <= ext_avail) {
-                       i = erp->er_extcount;
-               }
-               /*
-                * Otherwise, check if space is available in the
-                * next page.
-                */
-               else if ((erp_idx < nlists - 1) &&
-                        (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
-                         ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
-                       erp_idx++;
-                       erp++;
-                       /* Create a hole for nex2 extents */
-                       memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
-                               erp->er_extcount * sizeof(xfs_bmbt_rec_t));
-               }
-               /*
-                * Final choice, create a new extent page for
-                * nex2 extents.
-                */
-               else {
-                       erp_idx++;
-                       erp = xfs_iext_irec_new(ifp, erp_idx);
-               }
-               memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
-               kmem_free(nex2_ep);
-               erp->er_extcount += nex2;
-               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
-       }
-}
-
-/*
- * This is called when the amount of space required for incore file
- * extents needs to be decreased. The ext_diff parameter stores the
- * number of extents to be removed and the idx parameter contains
- * the extent index where the extents will be removed from.
- *
- * If the amount of space needed has decreased below the linear
- * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
- * extent array.  Otherwise, use kmem_realloc() to adjust the
- * size to what is needed.
- */
-void
-xfs_iext_remove(
-       xfs_inode_t     *ip,            /* incore inode pointer */
-       xfs_extnum_t    idx,            /* index to begin removing exts */
-       int             ext_diff,       /* number of extents to remove */
-       int             state)          /* type of extent conversion */
-{
-       xfs_ifork_t     *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
-       xfs_extnum_t    nextents;       /* number of extents in file */
-       int             new_size;       /* size of extents after removal */
-
-       trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
-
-       ASSERT(ext_diff > 0);
-       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-       new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
-
-       if (new_size == 0) {
-               xfs_iext_destroy(ifp);
-       } else if (ifp->if_flags & XFS_IFEXTIREC) {
-               xfs_iext_remove_indirect(ifp, idx, ext_diff);
-       } else if (ifp->if_real_bytes) {
-               xfs_iext_remove_direct(ifp, idx, ext_diff);
-       } else {
-               xfs_iext_remove_inline(ifp, idx, ext_diff);
-       }
-       ifp->if_bytes = new_size;
-}
-
-/*
- * This removes ext_diff extents from the inline buffer, beginning
- * at extent index idx.
- */
-void
-xfs_iext_remove_inline(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_extnum_t    idx,            /* index to begin removing exts */
-       int             ext_diff)       /* number of extents to remove */
-{
-       int             nextents;       /* number of extents in file */
-
-       ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
-       ASSERT(idx < XFS_INLINE_EXTS);
-       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-       ASSERT(((nextents - ext_diff) > 0) &&
-               (nextents - ext_diff) < XFS_INLINE_EXTS);
-
-       if (idx + ext_diff < nextents) {
-               memmove(&ifp->if_u2.if_inline_ext[idx],
-                       &ifp->if_u2.if_inline_ext[idx + ext_diff],
-                       (nextents - (idx + ext_diff)) *
-                        sizeof(xfs_bmbt_rec_t));
-               memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
-                       0, ext_diff * sizeof(xfs_bmbt_rec_t));
-       } else {
-               memset(&ifp->if_u2.if_inline_ext[idx], 0,
-                       ext_diff * sizeof(xfs_bmbt_rec_t));
-       }
-}
-
-/*
- * This removes ext_diff extents from a linear (direct) extent list,
- * beginning at extent index idx. If the extents are being removed
- * from the end of the list (ie. truncate) then we just need to re-
- * allocate the list to remove the extra space. Otherwise, if the
- * extents are being removed from the middle of the existing extent
- * entries, then we first need to move the extent records beginning
- * at idx + ext_diff up in the list to overwrite the records being
- * removed, then remove the extra space via kmem_realloc.
- */
-void
-xfs_iext_remove_direct(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_extnum_t    idx,            /* index to begin removing exts */
-       int             ext_diff)       /* number of extents to remove */
-{
-       xfs_extnum_t    nextents;       /* number of extents in file */
-       int             new_size;       /* size of extents after removal */
-
-       ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
-       new_size = ifp->if_bytes -
-               (ext_diff * sizeof(xfs_bmbt_rec_t));
-       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-
-       if (new_size == 0) {
-               xfs_iext_destroy(ifp);
-               return;
-       }
-       /* Move extents up in the list (if needed) */
-       if (idx + ext_diff < nextents) {
-               memmove(&ifp->if_u1.if_extents[idx],
-                       &ifp->if_u1.if_extents[idx + ext_diff],
-                       (nextents - (idx + ext_diff)) *
-                        sizeof(xfs_bmbt_rec_t));
-       }
-       memset(&ifp->if_u1.if_extents[nextents - ext_diff],
-               0, ext_diff * sizeof(xfs_bmbt_rec_t));
-       /*
-        * Reallocate the direct extent list. If the extents
-        * will fit inside the inode then xfs_iext_realloc_direct
-        * will switch from direct to inline extent allocation
-        * mode for us.
-        */
-       xfs_iext_realloc_direct(ifp, new_size);
-       ifp->if_bytes = new_size;
-}
-
-/*
- * This is called when incore extents are being removed from the
- * indirection array and the extents being removed span multiple extent
- * buffers. The idx parameter contains the file extent index where we
- * want to begin removing extents, and the count parameter contains
- * how many extents need to be removed.
- *
- *    |-------|   |-------|
- *    | nex1  |   |       |    nex1 - number of extents before idx
- *    |-------|   | count |
- *    |       |   |       |    count - number of extents being removed at idx
- *    | count |   |-------|
- *    |       |   | nex2  |    nex2 - number of extents after idx + count
- *    |-------|   |-------|
- */
-void
-xfs_iext_remove_indirect(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_extnum_t    idx,            /* index to begin removing extents */
-       int             count)          /* number of extents to remove */
-{
-       xfs_ext_irec_t  *erp;           /* indirection array pointer */
-       int             erp_idx = 0;    /* indirection array index */
-       xfs_extnum_t    ext_cnt;        /* extents left to remove */
-       xfs_extnum_t    ext_diff;       /* extents to remove in current list */
-       xfs_extnum_t    nex1;           /* number of extents before idx */
-       xfs_extnum_t    nex2;           /* extents after idx + count */
-       int             page_idx = idx; /* index in target extent list */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       erp = xfs_iext_idx_to_irec(ifp,  &page_idx, &erp_idx, 0);
-       ASSERT(erp != NULL);
-       nex1 = page_idx;
-       ext_cnt = count;
-       while (ext_cnt) {
-               nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
-               ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
-               /*
-                * Check for deletion of entire list;
-                * xfs_iext_irec_remove() updates extent offsets.
-                */
-               if (ext_diff == erp->er_extcount) {
-                       xfs_iext_irec_remove(ifp, erp_idx);
-                       ext_cnt -= ext_diff;
-                       nex1 = 0;
-                       if (ext_cnt) {
-                               ASSERT(erp_idx < ifp->if_real_bytes /
-                                       XFS_IEXT_BUFSZ);
-                               erp = &ifp->if_u1.if_ext_irec[erp_idx];
-                               nex1 = 0;
-                               continue;
-                       } else {
-                               break;
-                       }
-               }
-               /* Move extents up (if needed) */
-               if (nex2) {
-                       memmove(&erp->er_extbuf[nex1],
-                               &erp->er_extbuf[nex1 + ext_diff],
-                               nex2 * sizeof(xfs_bmbt_rec_t));
-               }
-               /* Zero out rest of page */
-               memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
-                       ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
-               /* Update remaining counters */
-               erp->er_extcount -= ext_diff;
-               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
-               ext_cnt -= ext_diff;
-               nex1 = 0;
-               erp_idx++;
-               erp++;
-       }
-       ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
-       xfs_iext_irec_compact(ifp);
-}
-
-/*
- * Create, destroy, or resize a linear (direct) block of extents.
- */
-void
-xfs_iext_realloc_direct(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       int             new_size)       /* new size of extents */
-{
-       int             rnew_size;      /* real new size of extents */
-
-       rnew_size = new_size;
-
-       ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
-               ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
-                (new_size != ifp->if_real_bytes)));
-
-       /* Free extent records */
-       if (new_size == 0) {
-               xfs_iext_destroy(ifp);
-       }
-       /* Resize direct extent list and zero any new bytes */
-       else if (ifp->if_real_bytes) {
-               /* Check if extents will fit inside the inode */
-               if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
-                       xfs_iext_direct_to_inline(ifp, new_size /
-                               (uint)sizeof(xfs_bmbt_rec_t));
-                       ifp->if_bytes = new_size;
-                       return;
-               }
-               if (!is_power_of_2(new_size)){
-                       rnew_size = roundup_pow_of_two(new_size);
-               }
-               if (rnew_size != ifp->if_real_bytes) {
-                       ifp->if_u1.if_extents =
-                               kmem_realloc(ifp->if_u1.if_extents,
-                                               rnew_size,
-                                               ifp->if_real_bytes, KM_NOFS);
-               }
-               if (rnew_size > ifp->if_real_bytes) {
-                       memset(&ifp->if_u1.if_extents[ifp->if_bytes /
-                               (uint)sizeof(xfs_bmbt_rec_t)], 0,
-                               rnew_size - ifp->if_real_bytes);
-               }
-       }
-       /*
-        * Switch from the inline extent buffer to a direct
-        * extent list. Be sure to include the inline extent
-        * bytes in new_size.
-        */
-       else {
-               new_size += ifp->if_bytes;
-               if (!is_power_of_2(new_size)) {
-                       rnew_size = roundup_pow_of_two(new_size);
-               }
-               xfs_iext_inline_to_direct(ifp, rnew_size);
-       }
-       ifp->if_real_bytes = rnew_size;
-       ifp->if_bytes = new_size;
-}
-
-/*
- * Switch from linear (direct) extent records to inline buffer.
- */
-void
-xfs_iext_direct_to_inline(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_extnum_t    nextents)       /* number of extents in file */
-{
-       ASSERT(ifp->if_flags & XFS_IFEXTENTS);
-       ASSERT(nextents <= XFS_INLINE_EXTS);
-       /*
-        * The inline buffer was zeroed when we switched
-        * from inline to direct extent allocation mode,
-        * so we don't need to clear it here.
-        */
-       memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
-               nextents * sizeof(xfs_bmbt_rec_t));
-       kmem_free(ifp->if_u1.if_extents);
-       ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
-       ifp->if_real_bytes = 0;
-}
-
-/*
- * Switch from inline buffer to linear (direct) extent records.
- * new_size should already be rounded up to the next power of 2
- * by the caller (when appropriate), so use new_size as it is.
- * However, since new_size may be rounded up, we can't update
- * if_bytes here. It is the caller's responsibility to update
- * if_bytes upon return.
- */
-void
-xfs_iext_inline_to_direct(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       int             new_size)       /* number of extents in file */
-{
-       ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
-       memset(ifp->if_u1.if_extents, 0, new_size);
-       if (ifp->if_bytes) {
-               memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
-                       ifp->if_bytes);
-               memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
-                       sizeof(xfs_bmbt_rec_t));
-       }
-       ifp->if_real_bytes = new_size;
-}
-
-/*
- * Resize an extent indirection array to new_size bytes.
- */
-STATIC void
-xfs_iext_realloc_indirect(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       int             new_size)       /* new indirection array size */
-{
-       int             nlists;         /* number of irec's (ex lists) */
-       int             size;           /* current indirection array size */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-       size = nlists * sizeof(xfs_ext_irec_t);
-       ASSERT(ifp->if_real_bytes);
-       ASSERT((new_size >= 0) && (new_size != size));
-       if (new_size == 0) {
-               xfs_iext_destroy(ifp);
-       } else {
-               ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
-                       kmem_realloc(ifp->if_u1.if_ext_irec,
-                               new_size, size, KM_NOFS);
-       }
-}
-
-/*
- * Switch from indirection array to linear (direct) extent allocations.
- */
-STATIC void
-xfs_iext_indirect_to_direct(
-        xfs_ifork_t    *ifp)           /* inode fork pointer */
-{
-       xfs_bmbt_rec_host_t *ep;        /* extent record pointer */
-       xfs_extnum_t    nextents;       /* number of extents in file */
-       int             size;           /* size of file extents */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-       ASSERT(nextents <= XFS_LINEAR_EXTS);
-       size = nextents * sizeof(xfs_bmbt_rec_t);
-
-       xfs_iext_irec_compact_pages(ifp);
-       ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
-
-       ep = ifp->if_u1.if_ext_irec->er_extbuf;
-       kmem_free(ifp->if_u1.if_ext_irec);
-       ifp->if_flags &= ~XFS_IFEXTIREC;
-       ifp->if_u1.if_extents = ep;
-       ifp->if_bytes = size;
-       if (nextents < XFS_LINEAR_EXTS) {
-               xfs_iext_realloc_direct(ifp, size);
-       }
-}
-
-/*
- * Free incore file extents.
- */
-void
-xfs_iext_destroy(
-       xfs_ifork_t     *ifp)           /* inode fork pointer */
-{
-       if (ifp->if_flags & XFS_IFEXTIREC) {
-               int     erp_idx;
-               int     nlists;
-
-               nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-               for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
-                       xfs_iext_irec_remove(ifp, erp_idx);
-               }
-               ifp->if_flags &= ~XFS_IFEXTIREC;
-       } else if (ifp->if_real_bytes) {
-               kmem_free(ifp->if_u1.if_extents);
-       } else if (ifp->if_bytes) {
-               memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
-                       sizeof(xfs_bmbt_rec_t));
-       }
-       ifp->if_u1.if_extents = NULL;
-       ifp->if_real_bytes = 0;
-       ifp->if_bytes = 0;
-}
-
-/*
- * Return a pointer to the extent record for file system block bno.
- */
-xfs_bmbt_rec_host_t *                  /* pointer to found extent record */
-xfs_iext_bno_to_ext(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_fileoff_t   bno,            /* block number to search for */
-       xfs_extnum_t    *idxp)          /* index of target extent */
-{
-       xfs_bmbt_rec_host_t *base;      /* pointer to first extent */
-       xfs_filblks_t   blockcount = 0; /* number of blocks in extent */
-       xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
-       xfs_ext_irec_t  *erp = NULL;    /* indirection array pointer */
-       int             high;           /* upper boundary in search */
-       xfs_extnum_t    idx = 0;        /* index of target extent */
-       int             low;            /* lower boundary in search */
-       xfs_extnum_t    nextents;       /* number of file extents */
-       xfs_fileoff_t   startoff = 0;   /* start offset of extent */
-
-       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-       if (nextents == 0) {
-               *idxp = 0;
-               return NULL;
-       }
-       low = 0;
-       if (ifp->if_flags & XFS_IFEXTIREC) {
-               /* Find target extent list */
-               int     erp_idx = 0;
-               erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
-               base = erp->er_extbuf;
-               high = erp->er_extcount - 1;
-       } else {
-               base = ifp->if_u1.if_extents;
-               high = nextents - 1;
-       }
-       /* Binary search extent records */
-       while (low <= high) {
-               idx = (low + high) >> 1;
-               ep = base + idx;
-               startoff = xfs_bmbt_get_startoff(ep);
-               blockcount = xfs_bmbt_get_blockcount(ep);
-               if (bno < startoff) {
-                       high = idx - 1;
-               } else if (bno >= startoff + blockcount) {
-                       low = idx + 1;
-               } else {
-                       /* Convert back to file-based extent index */
-                       if (ifp->if_flags & XFS_IFEXTIREC) {
-                               idx += erp->er_extoff;
-                       }
-                       *idxp = idx;
-                       return ep;
-               }
-       }
-       /* Convert back to file-based extent index */
-       if (ifp->if_flags & XFS_IFEXTIREC) {
-               idx += erp->er_extoff;
-       }
-       if (bno >= startoff + blockcount) {
-               if (++idx == nextents) {
-                       ep = NULL;
-               } else {
-                       ep = xfs_iext_get_ext(ifp, idx);
-               }
-       }
-       *idxp = idx;
-       return ep;
-}
-
-/*
- * Return a pointer to the indirection array entry containing the
- * extent record for filesystem block bno. Store the index of the
- * target irec in *erp_idxp.
- */
-xfs_ext_irec_t *                       /* pointer to found extent record */
-xfs_iext_bno_to_irec(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_fileoff_t   bno,            /* block number to search for */
-       int             *erp_idxp)      /* irec index of target ext list */
-{
-       xfs_ext_irec_t  *erp = NULL;    /* indirection array pointer */
-       xfs_ext_irec_t  *erp_next;      /* next indirection array entry */
-       int             erp_idx;        /* indirection array index */
-       int             nlists;         /* number of extent irec's (lists) */
-       int             high;           /* binary search upper limit */
-       int             low;            /* binary search lower limit */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-       erp_idx = 0;
-       low = 0;
-       high = nlists - 1;
-       while (low <= high) {
-               erp_idx = (low + high) >> 1;
-               erp = &ifp->if_u1.if_ext_irec[erp_idx];
-               erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
-               if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
-                       high = erp_idx - 1;
-               } else if (erp_next && bno >=
-                          xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
-                       low = erp_idx + 1;
-               } else {
-                       break;
-               }
-       }
-       *erp_idxp = erp_idx;
-       return erp;
-}
-
-/*
- * Return a pointer to the indirection array entry containing the
- * extent record at file extent index *idxp. Store the index of the
- * target irec in *erp_idxp and store the page index of the target
- * extent record in *idxp.
- */
-xfs_ext_irec_t *
-xfs_iext_idx_to_irec(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       xfs_extnum_t    *idxp,          /* extent index (file -> page) */
-       int             *erp_idxp,      /* pointer to target irec */
-       int             realloc)        /* new bytes were just added */
-{
-       xfs_ext_irec_t  *prev;          /* pointer to previous irec */
-       xfs_ext_irec_t  *erp = NULL;    /* pointer to current irec */
-       int             erp_idx;        /* indirection array index */
-       int             nlists;         /* number of irec's (ex lists) */
-       int             high;           /* binary search upper limit */
-       int             low;            /* binary search lower limit */
-       xfs_extnum_t    page_idx = *idxp; /* extent index in target list */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       ASSERT(page_idx >= 0);
-       ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
-       ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
-
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-       erp_idx = 0;
-       low = 0;
-       high = nlists - 1;
-
-       /* Binary search extent irec's */
-       while (low <= high) {
-               erp_idx = (low + high) >> 1;
-               erp = &ifp->if_u1.if_ext_irec[erp_idx];
-               prev = erp_idx > 0 ? erp - 1 : NULL;
-               if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
-                    realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
-                       high = erp_idx - 1;
-               } else if (page_idx > erp->er_extoff + erp->er_extcount ||
-                          (page_idx == erp->er_extoff + erp->er_extcount &&
-                           !realloc)) {
-                       low = erp_idx + 1;
-               } else if (page_idx == erp->er_extoff + erp->er_extcount &&
-                          erp->er_extcount == XFS_LINEAR_EXTS) {
-                       ASSERT(realloc);
-                       page_idx = 0;
-                       erp_idx++;
-                       erp = erp_idx < nlists ? erp + 1 : NULL;
-                       break;
-               } else {
-                       page_idx -= erp->er_extoff;
-                       break;
-               }
-       }
-       *idxp = page_idx;
-       *erp_idxp = erp_idx;
-       return(erp);
-}
-
-/*
- * Allocate and initialize an indirection array once the space needed
- * for incore extents increases above XFS_IEXT_BUFSZ.
- */
-void
-xfs_iext_irec_init(
-       xfs_ifork_t     *ifp)           /* inode fork pointer */
-{
-       xfs_ext_irec_t  *erp;           /* indirection array pointer */
-       xfs_extnum_t    nextents;       /* number of extents in file */
-
-       ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
-       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-       ASSERT(nextents <= XFS_LINEAR_EXTS);
-
-       erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
-
-       if (nextents == 0) {
-               ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
-       } else if (!ifp->if_real_bytes) {
-               xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
-       } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
-               xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
-       }
-       erp->er_extbuf = ifp->if_u1.if_extents;
-       erp->er_extcount = nextents;
-       erp->er_extoff = 0;
-
-       ifp->if_flags |= XFS_IFEXTIREC;
-       ifp->if_real_bytes = XFS_IEXT_BUFSZ;
-       ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
-       ifp->if_u1.if_ext_irec = erp;
-
-       return;
-}
-
-/*
- * Allocate and initialize a new entry in the indirection array.
- */
-xfs_ext_irec_t *
-xfs_iext_irec_new(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       int             erp_idx)        /* index for new irec */
-{
-       xfs_ext_irec_t  *erp;           /* indirection array pointer */
-       int             i;              /* loop counter */
-       int             nlists;         /* number of irec's (ex lists) */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-
-       /* Resize indirection array */
-       xfs_iext_realloc_indirect(ifp, ++nlists *
-                                 sizeof(xfs_ext_irec_t));
-       /*
-        * Move records down in the array so the
-        * new page can use erp_idx.
-        */
-       erp = ifp->if_u1.if_ext_irec;
-       for (i = nlists - 1; i > erp_idx; i--) {
-               memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
-       }
-       ASSERT(i == erp_idx);
-
-       /* Initialize new extent record */
-       erp = ifp->if_u1.if_ext_irec;
-       erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
-       ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
-       memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
-       erp[erp_idx].er_extcount = 0;
-       erp[erp_idx].er_extoff = erp_idx > 0 ?
-               erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
-       return (&erp[erp_idx]);
-}
-
-/*
- * Remove a record from the indirection array.
- */
-void
-xfs_iext_irec_remove(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       int             erp_idx)        /* irec index to remove */
-{
-       xfs_ext_irec_t  *erp;           /* indirection array pointer */
-       int             i;              /* loop counter */
-       int             nlists;         /* number of irec's (ex lists) */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-       erp = &ifp->if_u1.if_ext_irec[erp_idx];
-       if (erp->er_extbuf) {
-               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
-                       -erp->er_extcount);
-               kmem_free(erp->er_extbuf);
-       }
-       /* Compact extent records */
-       erp = ifp->if_u1.if_ext_irec;
-       for (i = erp_idx; i < nlists - 1; i++) {
-               memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
-       }
-       /*
-        * Manually free the last extent record from the indirection
-        * array.  A call to xfs_iext_realloc_indirect() with a size
-        * of zero would result in a call to xfs_iext_destroy() which
-        * would in turn call this function again, creating a nasty
-        * infinite loop.
-        */
-       if (--nlists) {
-               xfs_iext_realloc_indirect(ifp,
-                       nlists * sizeof(xfs_ext_irec_t));
-       } else {
-               kmem_free(ifp->if_u1.if_ext_irec);
-       }
-       ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
-}
-
-/*
- * This is called to clean up large amounts of unused memory allocated
- * by the indirection array.  Before compacting anything though, verify
- * that the indirection array is still needed and switch back to the
- * linear extent list (or even the inline buffer) if possible.  The
- * compaction policy is as follows:
- *
- *    Full Compaction: Extents fit into a single page (or inline buffer)
- * Partial Compaction: Extents occupy less than 50% of allocated space
- *      No Compaction: Extents occupy at least 50% of allocated space
- */
-void
-xfs_iext_irec_compact(
-       xfs_ifork_t     *ifp)           /* inode fork pointer */
-{
-       xfs_extnum_t    nextents;       /* number of extents in file */
-       int             nlists;         /* number of irec's (ex lists) */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-
-       if (nextents == 0) {
-               xfs_iext_destroy(ifp);
-       } else if (nextents <= XFS_INLINE_EXTS) {
-               xfs_iext_indirect_to_direct(ifp);
-               xfs_iext_direct_to_inline(ifp, nextents);
-       } else if (nextents <= XFS_LINEAR_EXTS) {
-               xfs_iext_indirect_to_direct(ifp);
-       } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
-               xfs_iext_irec_compact_pages(ifp);
-       }
-}
-
-/*
- * Combine extents from neighboring extent pages.
- */
-void
-xfs_iext_irec_compact_pages(
-       xfs_ifork_t     *ifp)           /* inode fork pointer */
-{
-       xfs_ext_irec_t  *erp, *erp_next;/* pointers to irec entries */
-       int             erp_idx = 0;    /* indirection array index */
-       int             nlists;         /* number of irec's (ex lists) */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-       while (erp_idx < nlists - 1) {
-               erp = &ifp->if_u1.if_ext_irec[erp_idx];
-               erp_next = erp + 1;
-               if (erp_next->er_extcount <=
-                   (XFS_LINEAR_EXTS - erp->er_extcount)) {
-                       memcpy(&erp->er_extbuf[erp->er_extcount],
-                               erp_next->er_extbuf, erp_next->er_extcount *
-                               sizeof(xfs_bmbt_rec_t));
-                       erp->er_extcount += erp_next->er_extcount;
-                       /*
-                        * Free page before removing extent record
-                        * so er_extoffs don't get modified in
-                        * xfs_iext_irec_remove.
-                        */
-                       kmem_free(erp_next->er_extbuf);
-                       erp_next->er_extbuf = NULL;
-                       xfs_iext_irec_remove(ifp, erp_idx + 1);
-                       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-               } else {
-                       erp_idx++;
-               }
-       }
-}
-
-/*
- * This is called to update the er_extoff field in the indirection
- * array when extents have been added or removed from one of the
- * extent lists. erp_idx contains the irec index to begin updating
- * at and ext_diff contains the number of extents that were added
- * or removed.
- */
-void
-xfs_iext_irec_update_extoffs(
-       xfs_ifork_t     *ifp,           /* inode fork pointer */
-       int             erp_idx,        /* irec index to update */
-       int             ext_diff)       /* number of new extents */
-{
-       int             i;              /* loop counter */
-       int             nlists;         /* number of irec's (ex lists */
-
-       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-       for (i = erp_idx; i < nlists; i++) {
-               ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
-       }
-}
-
-/*
- * Test whether it is appropriate to check an inode for and free post EOF
- * blocks. The 'force' parameter determines whether we should also consider
- * regular files that are marked preallocated or append-only.
- */
-bool
-xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
-{
-       /* prealloc/delalloc exists only on regular files */
-       if (!S_ISREG(ip->i_d.di_mode))
-               return false;
-
-       /*
-        * Zero sized files with no cached pages and delalloc blocks will not
-        * have speculative prealloc/delalloc blocks to remove.
-        */
-       if (VFS_I(ip)->i_size == 0 &&
-           VN_CACHED(VFS_I(ip)) == 0 &&
-           ip->i_delayed_blks == 0)
-               return false;
-
-       /* If we haven't read in the extent list, then don't do it now. */
-       if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
-               return false;
-
-       /*
-        * Do not free real preallocated or append-only files unless the file
-        * has delalloc blocks and we are forced to remove them.
-        */
-       if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
-               if (!force || ip->i_delayed_blks == 0)
-                       return false;
-
-       return true;
-}
-
index b55fd347ab5b9b9ff51fd555115c854d7cfd8084..4a91358c1470b9ac029d451dd38c3fa77daf6fc0 100644 (file)
 #ifndef        __XFS_INODE_H__
 #define        __XFS_INODE_H__
 
-struct posix_acl;
-struct xfs_dinode;
-struct xfs_inode;
-
-/*
- * Fork identifiers.
- */
-#define        XFS_DATA_FORK   0
-#define        XFS_ATTR_FORK   1
-
-/*
- * The following xfs_ext_irec_t struct introduces a second (top) level
- * to the in-core extent allocation scheme. These structs are allocated
- * in a contiguous block, creating an indirection array where each entry
- * (irec) contains a pointer to a buffer of in-core extent records which
- * it manages. Each extent buffer is 4k in size, since 4k is the system
- * page size on Linux i386 and systems with larger page sizes don't seem
- * to gain much, if anything, by using their native page size as the
- * extent buffer size. Also, using 4k extent buffers everywhere provides
- * a consistent interface for CXFS across different platforms.
- *
- * There is currently no limit on the number of irec's (extent lists)
- * allowed, so heavily fragmented files may require an indirection array
- * which spans multiple system pages of memory. The number of extents
- * which would require this amount of contiguous memory is very large
- * and should not cause problems in the foreseeable future. However,
- * if the memory needed for the contiguous array ever becomes a problem,
- * it is possible that a third level of indirection may be required.
- */
-typedef struct xfs_ext_irec {
-       xfs_bmbt_rec_host_t *er_extbuf; /* block of extent records */
-       xfs_extnum_t    er_extoff;      /* extent offset in file */
-       xfs_extnum_t    er_extcount;    /* number of extents in page/block */
-} xfs_ext_irec_t;
+#include "xfs_inode_buf.h"
+#include "xfs_inode_fork.h"
 
 /*
- * File incore extent information, present for each of data & attr forks.
+ * Kernel only inode definitions
  */
-#define        XFS_IEXT_BUFSZ          4096
-#define        XFS_LINEAR_EXTS         (XFS_IEXT_BUFSZ / (uint)sizeof(xfs_bmbt_rec_t))
-#define        XFS_INLINE_EXTS         2
-#define        XFS_INLINE_DATA         32
-typedef struct xfs_ifork {
-       int                     if_bytes;       /* bytes in if_u1 */
-       int                     if_real_bytes;  /* bytes allocated in if_u1 */
-       struct xfs_btree_block  *if_broot;      /* file's incore btree root */
-       short                   if_broot_bytes; /* bytes allocated for root */
-       unsigned char           if_flags;       /* per-fork flags */
-       union {
-               xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */
-               xfs_ext_irec_t  *if_ext_irec;   /* irec map file exts */
-               char            *if_data;       /* inline file data */
-       } if_u1;
-       union {
-               xfs_bmbt_rec_host_t if_inline_ext[XFS_INLINE_EXTS];
-                                               /* very small file extents */
-               char            if_inline_data[XFS_INLINE_DATA];
-                                               /* very small file data */
-               xfs_dev_t       if_rdev;        /* dev number if special */
-               uuid_t          if_uuid;        /* mount point value */
-       } if_u2;
-} xfs_ifork_t;
-
-/*
- * Inode location information.  Stored in the inode and passed to
- * xfs_imap_to_bp() to get a buffer and dinode for a given inode.
- */
-struct xfs_imap {
-       xfs_daddr_t     im_blkno;       /* starting BB of inode chunk */
-       ushort          im_len;         /* length in BBs of inode chunk */
-       ushort          im_boffset;     /* inode offset in block in bytes */
-};
-
-/*
- * This is the xfs in-core inode structure.
- * Most of the on-disk inode is embedded in the i_d field.
- *
- * The extent pointers/inline file space, however, are managed
- * separately.  The memory for this information is pointed to by
- * the if_u1 unions depending on the type of the data.
- * This is used to linearize the array of extents for fast in-core
- * access.  This is used until the file's number of extents
- * surpasses XFS_MAX_INCORE_EXTENTS, at which point all extent pointers
- * are accessed through the buffer cache.
- *
- * Other state kept in the in-core inode is used for identification,
- * locking, transactional updating, etc of the inode.
- *
- * Generally, we do not want to hold the i_rlock while holding the
- * i_ilock. Hierarchy is i_iolock followed by i_rlock.
- *
- * xfs_iptr_t contains all the inode fields up to and including the
- * i_mnext and i_mprev fields, it is used as a marker in the inode
- * chain off the mount structure by xfs_sync calls.
- */
-
-typedef struct xfs_ictimestamp {
-       __int32_t       t_sec;          /* timestamp seconds */
-       __int32_t       t_nsec;         /* timestamp nanoseconds */
-} xfs_ictimestamp_t;
-
-/*
- * NOTE:  This structure must be kept identical to struct xfs_dinode
- *       in xfs_dinode.h except for the endianness annotations.
- */
-typedef struct xfs_icdinode {
-       __uint16_t      di_magic;       /* inode magic # = XFS_DINODE_MAGIC */
-       __uint16_t      di_mode;        /* mode and type of file */
-       __int8_t        di_version;     /* inode version */
-       __int8_t        di_format;      /* format of di_c data */
-       __uint16_t      di_onlink;      /* old number of links to file */
-       __uint32_t      di_uid;         /* owner's user id */
-       __uint32_t      di_gid;         /* owner's group id */
-       __uint32_t      di_nlink;       /* number of links to file */
-       __uint16_t      di_projid_lo;   /* lower part of owner's project id */
-       __uint16_t      di_projid_hi;   /* higher part of owner's project id */
-       __uint8_t       di_pad[6];      /* unused, zeroed space */
-       __uint16_t      di_flushiter;   /* incremented on flush */
-       xfs_ictimestamp_t di_atime;     /* time last accessed */
-       xfs_ictimestamp_t di_mtime;     /* time last modified */
-       xfs_ictimestamp_t di_ctime;     /* time created/inode modified */
-       xfs_fsize_t     di_size;        /* number of bytes in file */
-       xfs_drfsbno_t   di_nblocks;     /* # of direct & btree blocks used */
-       xfs_extlen_t    di_extsize;     /* basic/minimum extent size for file */
-       xfs_extnum_t    di_nextents;    /* number of extents in data fork */
-       xfs_aextnum_t   di_anextents;   /* number of extents in attribute fork*/
-       __uint8_t       di_forkoff;     /* attr fork offs, <<3 for 64b align */
-       __int8_t        di_aformat;     /* format of attr fork's data */
-       __uint32_t      di_dmevmask;    /* DMIG event mask */
-       __uint16_t      di_dmstate;     /* DMIG state info */
-       __uint16_t      di_flags;       /* random flags, XFS_DIFLAG_... */
-       __uint32_t      di_gen;         /* generation number */
-
-       /* di_next_unlinked is the only non-core field in the old dinode */
-       xfs_agino_t     di_next_unlinked;/* agi unlinked list ptr */
-
-       /* start of the extended dinode, writable fields */
-       __uint32_t      di_crc;         /* CRC of the inode */
-       __uint64_t      di_changecount; /* number of attribute changes */
-       xfs_lsn_t       di_lsn;         /* flush sequence */
-       __uint64_t      di_flags2;      /* more random flags */
-       __uint8_t       di_pad2[16];    /* more padding for future expansion */
-
-       /* fields only written to during inode creation */
-       xfs_ictimestamp_t di_crtime;    /* time created */
-       xfs_ino_t       di_ino;         /* inode number */
-       uuid_t          di_uuid;        /* UUID of the filesystem */
-
-       /* structure must be padded to 64 bit alignment */
-} xfs_icdinode_t;
-
-static inline uint xfs_icdinode_size(int version)
-{
-       if (version == 3)
-               return sizeof(struct xfs_icdinode);
-       return offsetof(struct xfs_icdinode, di_next_unlinked);
-}
-
-/*
- * Flags for xfs_ichgtime().
- */
-#define        XFS_ICHGTIME_MOD        0x1     /* data fork modification timestamp */
-#define        XFS_ICHGTIME_CHG        0x2     /* inode field change timestamp */
-#define        XFS_ICHGTIME_CREATE     0x4     /* inode create timestamp */
-
-/*
- * Per-fork incore inode flags.
- */
-#define        XFS_IFINLINE    0x01    /* Inline data is read in */
-#define        XFS_IFEXTENTS   0x02    /* All extent pointers are read in */
-#define        XFS_IFBROOT     0x04    /* i_broot points to the bmap b-tree root */
-#define        XFS_IFEXTIREC   0x08    /* Indirection array of extent blocks */
-
-/*
- * Fork handling.
- */
-
-#define XFS_IFORK_Q(ip)                        ((ip)->i_d.di_forkoff != 0)
-#define XFS_IFORK_BOFF(ip)             ((int)((ip)->i_d.di_forkoff << 3))
-
-#define XFS_IFORK_PTR(ip,w)            \
-       ((w) == XFS_DATA_FORK ? \
-               &(ip)->i_df : \
-               (ip)->i_afp)
-#define XFS_IFORK_DSIZE(ip) \
-       (XFS_IFORK_Q(ip) ? \
-               XFS_IFORK_BOFF(ip) : \
-               XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version))
-#define XFS_IFORK_ASIZE(ip) \
-       (XFS_IFORK_Q(ip) ? \
-               XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version) - \
-                       XFS_IFORK_BOFF(ip) : \
-               0)
-#define XFS_IFORK_SIZE(ip,w) \
-       ((w) == XFS_DATA_FORK ? \
-               XFS_IFORK_DSIZE(ip) : \
-               XFS_IFORK_ASIZE(ip))
-#define XFS_IFORK_FORMAT(ip,w) \
-       ((w) == XFS_DATA_FORK ? \
-               (ip)->i_d.di_format : \
-               (ip)->i_d.di_aformat)
-#define XFS_IFORK_FMT_SET(ip,w,n) \
-       ((w) == XFS_DATA_FORK ? \
-               ((ip)->i_d.di_format = (n)) : \
-               ((ip)->i_d.di_aformat = (n)))
-#define XFS_IFORK_NEXTENTS(ip,w) \
-       ((w) == XFS_DATA_FORK ? \
-               (ip)->i_d.di_nextents : \
-               (ip)->i_d.di_anextents)
-#define XFS_IFORK_NEXT_SET(ip,w,n) \
-       ((w) == XFS_DATA_FORK ? \
-               ((ip)->i_d.di_nextents = (n)) : \
-               ((ip)->i_d.di_anextents = (n)))
-#define XFS_IFORK_MAXEXT(ip, w) \
-       (XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
-
-
-#ifdef __KERNEL__
 
+struct xfs_dinode;
+struct xfs_inode;
 struct xfs_buf;
 struct xfs_bmap_free;
 struct xfs_bmbt_irec;
@@ -525,9 +315,21 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
         ((pip)->i_d.di_mode & S_ISGID))
 
 
-/*
- * xfs_inode.c prototypes.
- */
+int            xfs_release(struct xfs_inode *ip);
+int            xfs_inactive(struct xfs_inode *ip);
+int            xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
+                          struct xfs_inode **ipp, struct xfs_name *ci_name);
+int            xfs_create(struct xfs_inode *dp, struct xfs_name *name,
+                          umode_t mode, xfs_dev_t rdev, struct xfs_inode **ipp);
+int            xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
+                          struct xfs_inode *ip);
+int            xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
+                        struct xfs_name *target_name);
+int            xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
+                          struct xfs_inode *src_ip, struct xfs_inode *target_dp,
+                          struct xfs_name *target_name,
+                          struct xfs_inode *target_ip);
+
 void           xfs_ilock(xfs_inode_t *, uint);
 int            xfs_ilock_nowait(xfs_inode_t *, uint);
 void           xfs_iunlock(xfs_inode_t *, uint);
@@ -548,13 +350,28 @@ int               xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
 int            xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
 
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
+
 void           xfs_iunpin_wait(xfs_inode_t *);
+#define xfs_ipincount(ip)      ((unsigned int) atomic_read(&ip->i_pincount))
+
 int            xfs_iflush(struct xfs_inode *, struct xfs_buf **);
 void           xfs_lock_inodes(xfs_inode_t **, int, uint);
 void           xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
 
 xfs_extlen_t   xfs_get_extsz_hint(struct xfs_inode *ip);
 
+int            xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
+                              xfs_nlink_t, xfs_dev_t, prid_t, int,
+                              struct xfs_inode **, int *);
+int            xfs_droplink(struct xfs_trans *, struct xfs_inode *);
+int            xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
+void           xfs_bump_ino_vers2(struct xfs_trans *, struct xfs_inode *);
+
+/* from xfs_file.c */
+int            xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
+int            xfs_iozero(struct xfs_inode *, loff_t, size_t);
+
+
 #define IHOLD(ip) \
 do { \
        ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
@@ -568,65 +385,6 @@ do { \
        iput(VFS_I(ip)); \
 } while (0)
 
-#endif /* __KERNEL__ */
-
-/*
- * Flags for xfs_iget()
- */
-#define XFS_IGET_CREATE                0x1
-#define XFS_IGET_UNTRUSTED     0x2
-#define XFS_IGET_DONTCACHE     0x4
-
-int            xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
-                              struct xfs_imap *, struct xfs_dinode **,
-                              struct xfs_buf **, uint, uint);
-int            xfs_iread(struct xfs_mount *, struct xfs_trans *,
-                         struct xfs_inode *, uint);
-void           xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *);
-void           xfs_dinode_to_disk(struct xfs_dinode *,
-                                  struct xfs_icdinode *);
-void           xfs_idestroy_fork(struct xfs_inode *, int);
-void           xfs_idata_realloc(struct xfs_inode *, int, int);
-void           xfs_iroot_realloc(struct xfs_inode *, int, int);
-int            xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
-int            xfs_iextents_copy(struct xfs_inode *, xfs_bmbt_rec_t *, int);
-
-xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t);
-void           xfs_iext_insert(xfs_inode_t *, xfs_extnum_t, xfs_extnum_t,
-                               xfs_bmbt_irec_t *, int);
-void           xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int);
-void           xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int);
-void           xfs_iext_remove(xfs_inode_t *, xfs_extnum_t, int, int);
-void           xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int);
-void           xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int);
-void           xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int);
-void           xfs_iext_realloc_direct(xfs_ifork_t *, int);
-void           xfs_iext_direct_to_inline(xfs_ifork_t *, xfs_extnum_t);
-void           xfs_iext_inline_to_direct(xfs_ifork_t *, int);
-void           xfs_iext_destroy(xfs_ifork_t *);
-xfs_bmbt_rec_host_t *xfs_iext_bno_to_ext(xfs_ifork_t *, xfs_fileoff_t, int *);
-xfs_ext_irec_t *xfs_iext_bno_to_irec(xfs_ifork_t *, xfs_fileoff_t, int *);
-xfs_ext_irec_t *xfs_iext_idx_to_irec(xfs_ifork_t *, xfs_extnum_t *, int *, int);
-void           xfs_iext_irec_init(xfs_ifork_t *);
-xfs_ext_irec_t *xfs_iext_irec_new(xfs_ifork_t *, int);
-void           xfs_iext_irec_remove(xfs_ifork_t *, int);
-void           xfs_iext_irec_compact(xfs_ifork_t *);
-void           xfs_iext_irec_compact_pages(xfs_ifork_t *);
-void           xfs_iext_irec_compact_full(xfs_ifork_t *);
-void           xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int);
-bool           xfs_can_free_eofblocks(struct xfs_inode *, bool);
-
-#define xfs_ipincount(ip)      ((unsigned int) atomic_read(&ip->i_pincount))
-
-#if defined(DEBUG)
-void           xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
-#else
-#define        xfs_inobp_check(mp, bp)
-#endif /* DEBUG */
-
-extern struct kmem_zone        *xfs_ifork_zone;
 extern struct kmem_zone        *xfs_inode_zone;
-extern struct kmem_zone        *xfs_ili_zone;
-extern const struct xfs_buf_ops xfs_inode_buf_ops;
 
 #endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_buf.c b/fs/xfs/xfs_inode_buf.c
new file mode 100644 (file)
index 0000000..38fe509
--- /dev/null
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_error.h"
+#include "xfs_cksum.h"
+#include "xfs_icache.h"
+#include "xfs_ialloc.h"
+
+/*
+ * Check that none of the inode's in the buffer have a next
+ * unlinked field of 0.
+ */
+#if defined(DEBUG)
+void
+xfs_inobp_check(
+       xfs_mount_t     *mp,
+       xfs_buf_t       *bp)
+{
+       int             i;
+       int             j;
+       xfs_dinode_t    *dip;
+
+       j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
+
+       for (i = 0; i < j; i++) {
+               dip = (xfs_dinode_t *)xfs_buf_offset(bp,
+                                       i * mp->m_sb.sb_inodesize);
+               if (!dip->di_next_unlinked)  {
+                       xfs_alert(mp,
+       "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
+                               bp);
+                       ASSERT(dip->di_next_unlinked);
+               }
+       }
+}
+#endif
+
+static void
+xfs_inode_buf_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_mount *mp = bp->b_target->bt_mount;
+       int             i;
+       int             ni;
+
+       /*
+        * Validate the magic number and version of every inode in the buffer
+        */
+       ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
+       for (i = 0; i < ni; i++) {
+               int             di_ok;
+               xfs_dinode_t    *dip;
+
+               dip = (struct xfs_dinode *)xfs_buf_offset(bp,
+                                       (i << mp->m_sb.sb_inodelog));
+               di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
+                           XFS_DINODE_GOOD_VERSION(dip->di_version);
+               if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
+                                               XFS_ERRTAG_ITOBP_INOTOBP,
+                                               XFS_RANDOM_ITOBP_INOTOBP))) {
+                       xfs_buf_ioerror(bp, EFSCORRUPTED);
+                       XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
+                                            mp, dip);
+#ifdef DEBUG
+                       xfs_emerg(mp,
+                               "bad inode magic/vsn daddr %lld #%d (magic=%x)",
+                               (unsigned long long)bp->b_bn, i,
+                               be16_to_cpu(dip->di_magic));
+                       ASSERT(0);
+#endif
+               }
+       }
+       xfs_inobp_check(mp, bp);
+}
+
+
+static void
+xfs_inode_buf_read_verify(
+       struct xfs_buf  *bp)
+{
+       xfs_inode_buf_verify(bp);
+}
+
+static void
+xfs_inode_buf_write_verify(
+       struct xfs_buf  *bp)
+{
+       xfs_inode_buf_verify(bp);
+}
+
+const struct xfs_buf_ops xfs_inode_buf_ops = {
+       .verify_read = xfs_inode_buf_read_verify,
+       .verify_write = xfs_inode_buf_write_verify,
+};
+
+
+/*
+ * This routine is called to map an inode to the buffer containing the on-disk
+ * version of the inode.  It returns a pointer to the buffer containing the
+ * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
+ * pointer to the on-disk inode within that buffer.
+ *
+ * If a non-zero error is returned, then the contents of bpp and dipp are
+ * undefined.
+ */
+int
+xfs_imap_to_bp(
+       struct xfs_mount        *mp,
+       struct xfs_trans        *tp,
+       struct xfs_imap         *imap,
+       struct xfs_dinode       **dipp,
+       struct xfs_buf          **bpp,
+       uint                    buf_flags,
+       uint                    iget_flags)
+{
+       struct xfs_buf          *bp;
+       int                     error;
+
+       buf_flags |= XBF_UNMAPPED;
+       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
+                                  (int)imap->im_len, buf_flags, &bp,
+                                  &xfs_inode_buf_ops);
+       if (error) {
+               if (error == EAGAIN) {
+                       ASSERT(buf_flags & XBF_TRYLOCK);
+                       return error;
+               }
+
+               if (error == EFSCORRUPTED &&
+                   (iget_flags & XFS_IGET_UNTRUSTED))
+                       return XFS_ERROR(EINVAL);
+
+               xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
+                       __func__, error);
+               return error;
+       }
+
+       *bpp = bp;
+       *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
+       return 0;
+}
+
+STATIC void
+xfs_dinode_from_disk(
+       xfs_icdinode_t          *to,
+       xfs_dinode_t            *from)
+{
+       to->di_magic = be16_to_cpu(from->di_magic);
+       to->di_mode = be16_to_cpu(from->di_mode);
+       to->di_version = from ->di_version;
+       to->di_format = from->di_format;
+       to->di_onlink = be16_to_cpu(from->di_onlink);
+       to->di_uid = be32_to_cpu(from->di_uid);
+       to->di_gid = be32_to_cpu(from->di_gid);
+       to->di_nlink = be32_to_cpu(from->di_nlink);
+       to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
+       to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
+       memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
+       to->di_flushiter = be16_to_cpu(from->di_flushiter);
+       to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
+       to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
+       to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
+       to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
+       to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
+       to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
+       to->di_size = be64_to_cpu(from->di_size);
+       to->di_nblocks = be64_to_cpu(from->di_nblocks);
+       to->di_extsize = be32_to_cpu(from->di_extsize);
+       to->di_nextents = be32_to_cpu(from->di_nextents);
+       to->di_anextents = be16_to_cpu(from->di_anextents);
+       to->di_forkoff = from->di_forkoff;
+       to->di_aformat  = from->di_aformat;
+       to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
+       to->di_dmstate  = be16_to_cpu(from->di_dmstate);
+       to->di_flags    = be16_to_cpu(from->di_flags);
+       to->di_gen      = be32_to_cpu(from->di_gen);
+
+       if (to->di_version == 3) {
+               to->di_changecount = be64_to_cpu(from->di_changecount);
+               to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
+               to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
+               to->di_flags2 = be64_to_cpu(from->di_flags2);
+               to->di_ino = be64_to_cpu(from->di_ino);
+               to->di_lsn = be64_to_cpu(from->di_lsn);
+               memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
+               uuid_copy(&to->di_uuid, &from->di_uuid);
+       }
+}
+
+void
+xfs_dinode_to_disk(
+       xfs_dinode_t            *to,
+       xfs_icdinode_t          *from)
+{
+       to->di_magic = cpu_to_be16(from->di_magic);
+       to->di_mode = cpu_to_be16(from->di_mode);
+       to->di_version = from ->di_version;
+       to->di_format = from->di_format;
+       to->di_onlink = cpu_to_be16(from->di_onlink);
+       to->di_uid = cpu_to_be32(from->di_uid);
+       to->di_gid = cpu_to_be32(from->di_gid);
+       to->di_nlink = cpu_to_be32(from->di_nlink);
+       to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
+       to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
+       memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
+       to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
+       to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
+       to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
+       to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
+       to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
+       to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
+       to->di_size = cpu_to_be64(from->di_size);
+       to->di_nblocks = cpu_to_be64(from->di_nblocks);
+       to->di_extsize = cpu_to_be32(from->di_extsize);
+       to->di_nextents = cpu_to_be32(from->di_nextents);
+       to->di_anextents = cpu_to_be16(from->di_anextents);
+       to->di_forkoff = from->di_forkoff;
+       to->di_aformat = from->di_aformat;
+       to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
+       to->di_dmstate = cpu_to_be16(from->di_dmstate);
+       to->di_flags = cpu_to_be16(from->di_flags);
+       to->di_gen = cpu_to_be32(from->di_gen);
+
+       if (from->di_version == 3) {
+               to->di_changecount = cpu_to_be64(from->di_changecount);
+               to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
+               to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
+               to->di_flags2 = cpu_to_be64(from->di_flags2);
+               to->di_ino = cpu_to_be64(from->di_ino);
+               to->di_lsn = cpu_to_be64(from->di_lsn);
+               memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
+               uuid_copy(&to->di_uuid, &from->di_uuid);
+               to->di_flushiter = 0;
+       } else {
+               to->di_flushiter = cpu_to_be16(from->di_flushiter);
+       }
+}
+
+static bool
+xfs_dinode_verify(
+       struct xfs_mount        *mp,
+       struct xfs_inode        *ip,
+       struct xfs_dinode       *dip)
+{
+       if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
+               return false;
+
+       /* only version 3 or greater inodes are extensively verified here */
+       if (dip->di_version < 3)
+               return true;
+
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return false;
+       if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
+                             offsetof(struct xfs_dinode, di_crc)))
+               return false;
+       if (be64_to_cpu(dip->di_ino) != ip->i_ino)
+               return false;
+       if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
+               return false;
+       return true;
+}
+
+void
+xfs_dinode_calc_crc(
+       struct xfs_mount        *mp,
+       struct xfs_dinode       *dip)
+{
+       __uint32_t              crc;
+
+       if (dip->di_version < 3)
+               return;
+
+       ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
+       crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
+                             offsetof(struct xfs_dinode, di_crc));
+       dip->di_crc = xfs_end_cksum(crc);
+}
+
+/*
+ * Read the disk inode attributes into the in-core inode structure.
+ *
+ * For version 5 superblocks, if we are initialising a new inode and we are not
+ * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
+ * inode core with a random generation number. If we are keeping inodes around,
+ * we need to read the inode cluster to get the existing generation number off
+ * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
+ * format) then log recovery is dependent on the di_flushiter field being
+ * initialised from the current on-disk value and hence we must also read the
+ * inode off disk.
+ */
+int
+xfs_iread(
+       xfs_mount_t     *mp,
+       xfs_trans_t     *tp,
+       xfs_inode_t     *ip,
+       uint            iget_flags)
+{
+       xfs_buf_t       *bp;
+       xfs_dinode_t    *dip;
+       int             error;
+
+       /*
+        * Fill in the location information in the in-core inode.
+        */
+       error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
+       if (error)
+               return error;
+
+       /* shortcut IO on inode allocation if possible */
+       if ((iget_flags & XFS_IGET_CREATE) &&
+           xfs_sb_version_hascrc(&mp->m_sb) &&
+           !(mp->m_flags & XFS_MOUNT_IKEEP)) {
+               /* initialise the on-disk inode core */
+               memset(&ip->i_d, 0, sizeof(ip->i_d));
+               ip->i_d.di_magic = XFS_DINODE_MAGIC;
+               ip->i_d.di_gen = prandom_u32();
+               if (xfs_sb_version_hascrc(&mp->m_sb)) {
+                       ip->i_d.di_version = 3;
+                       ip->i_d.di_ino = ip->i_ino;
+                       uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid);
+               } else
+                       ip->i_d.di_version = 2;
+               return 0;
+       }
+
+       /*
+        * Get pointers to the on-disk inode and the buffer containing it.
+        */
+       error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
+       if (error)
+               return error;
+
+       /* even unallocated inodes are verified */
+       if (!xfs_dinode_verify(mp, ip, dip)) {
+               xfs_alert(mp, "%s: validation failed for inode %lld failed",
+                               __func__, ip->i_ino);
+
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
+               error = XFS_ERROR(EFSCORRUPTED);
+               goto out_brelse;
+       }
+
+       /*
+        * If the on-disk inode is already linked to a directory
+        * entry, copy all of the inode into the in-core inode.
+        * xfs_iformat_fork() handles copying in the inode format
+        * specific information.
+        * Otherwise, just get the truly permanent information.
+        */
+       if (dip->di_mode) {
+               xfs_dinode_from_disk(&ip->i_d, dip);
+               error = xfs_iformat_fork(ip, dip);
+               if (error)  {
+#ifdef DEBUG
+                       xfs_alert(mp, "%s: xfs_iformat() returned error %d",
+                               __func__, error);
+#endif /* DEBUG */
+                       goto out_brelse;
+               }
+       } else {
+               /*
+                * Partial initialisation of the in-core inode. Just the bits
+                * that xfs_ialloc won't overwrite or relies on being correct.
+                */
+               ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
+               ip->i_d.di_version = dip->di_version;
+               ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
+               ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
+
+               if (dip->di_version == 3) {
+                       ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
+                       uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
+               }
+
+               /*
+                * Make sure to pull in the mode here as well in
+                * case the inode is released without being used.
+                * This ensures that xfs_inactive() will see that
+                * the inode is already free and not try to mess
+                * with the uninitialized part of it.
+                */
+               ip->i_d.di_mode = 0;
+       }
+
+       /*
+        * The inode format changed when we moved the link count and
+        * made it 32 bits long.  If this is an old format inode,
+        * convert it in memory to look like a new one.  If it gets
+        * flushed to disk we will convert back before flushing or
+        * logging it.  We zero out the new projid field and the old link
+        * count field.  We'll handle clearing the pad field (the remains
+        * of the old uuid field) when we actually convert the inode to
+        * the new format. We don't change the version number so that we
+        * can distinguish this from a real new format inode.
+        */
+       if (ip->i_d.di_version == 1) {
+               ip->i_d.di_nlink = ip->i_d.di_onlink;
+               ip->i_d.di_onlink = 0;
+               xfs_set_projid(ip, 0);
+       }
+
+       ip->i_delayed_blks = 0;
+
+       /*
+        * Mark the buffer containing the inode as something to keep
+        * around for a while.  This helps to keep recently accessed
+        * meta-data in-core longer.
+        */
+       xfs_buf_set_ref(bp, XFS_INO_REF);
+
+       /*
+        * Use xfs_trans_brelse() to release the buffer containing the on-disk
+        * inode, because it was acquired with xfs_trans_read_buf() in
+        * xfs_imap_to_bp() above.  If tp is NULL, this is just a normal
+        * brelse().  If we're within a transaction, then xfs_trans_brelse()
+        * will only release the buffer if it is not dirty within the
+        * transaction.  It will be OK to release the buffer in this case,
+        * because inodes on disk are never destroyed and we will be locking the
+        * new in-core inode before putting it in the cache where other
+        * processes can find it.  Thus we don't have to worry about the inode
+        * being changed just because we released the buffer.
+        */
+ out_brelse:
+       xfs_trans_brelse(tp, bp);
+       return error;
+}
diff --git a/fs/xfs/xfs_inode_buf.h b/fs/xfs/xfs_inode_buf.h
new file mode 100644 (file)
index 0000000..aae9fc4
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef        __XFS_INODE_BUF_H__
+#define        __XFS_INODE_BUF_H__
+
+struct xfs_inode;
+struct xfs_dinode;
+struct xfs_icdinode;
+
+/*
+ * Inode location information.  Stored in the inode and passed to
+ * xfs_imap_to_bp() to get a buffer and dinode for a given inode.
+ */
+struct xfs_imap {
+       xfs_daddr_t     im_blkno;       /* starting BB of inode chunk */
+       ushort          im_len;         /* length in BBs of inode chunk */
+       ushort          im_boffset;     /* inode offset in block in bytes */
+};
+
+int            xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
+                              struct xfs_imap *, struct xfs_dinode **,
+                              struct xfs_buf **, uint, uint);
+int            xfs_iread(struct xfs_mount *, struct xfs_trans *,
+                         struct xfs_inode *, uint);
+void           xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *);
+void           xfs_dinode_to_disk(struct xfs_dinode *,
+                                  struct xfs_icdinode *);
+
+#if defined(DEBUG)
+void           xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
+#else
+#define        xfs_inobp_check(mp, bp)
+#endif /* DEBUG */
+
+extern const struct xfs_buf_ops xfs_inode_buf_ops;
+
+#endif /* __XFS_INODE_BUF_H__ */
diff --git a/fs/xfs/xfs_inode_fork.c b/fs/xfs/xfs_inode_fork.c
new file mode 100644 (file)
index 0000000..2b60a5a
--- /dev/null
@@ -0,0 +1,1919 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/log2.h>
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log.h"
+#include "xfs_inum.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_buf_item.h"
+#include "xfs_inode_item.h"
+#include "xfs_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_bmap.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+#include "xfs_filestream.h"
+#include "xfs_cksum.h"
+#include "xfs_trace.h"
+#include "xfs_icache.h"
+
+kmem_zone_t *xfs_ifork_zone;
+
+STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
+STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
+STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
+
+#ifdef DEBUG
+/*
+ * Make sure that the extents in the given memory buffer
+ * are valid.
+ */
+void
+xfs_validate_extents(
+       xfs_ifork_t             *ifp,
+       int                     nrecs,
+       xfs_exntfmt_t           fmt)
+{
+       xfs_bmbt_irec_t         irec;
+       xfs_bmbt_rec_host_t     rec;
+       int                     i;
+
+       for (i = 0; i < nrecs; i++) {
+               xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
+               rec.l0 = get_unaligned(&ep->l0);
+               rec.l1 = get_unaligned(&ep->l1);
+               xfs_bmbt_get_all(&rec, &irec);
+               if (fmt == XFS_EXTFMT_NOSTATE)
+                       ASSERT(irec.br_state == XFS_EXT_NORM);
+       }
+}
+#else /* DEBUG */
+#define xfs_validate_extents(ifp, nrecs, fmt)
+#endif /* DEBUG */
+
+
+/*
+ * Move inode type and inode format specific information from the
+ * on-disk inode to the in-core inode.  For fifos, devs, and sockets
+ * this means set if_rdev to the proper value.  For files, directories,
+ * and symlinks this means to bring in the in-line data or extent
+ * pointers.  For a file in B-tree format, only the root is immediately
+ * brought in-core.  The rest will be in-lined in if_extents when it
+ * is first referenced (see xfs_iread_extents()).
+ */
+int
+xfs_iformat_fork(
+       xfs_inode_t             *ip,
+       xfs_dinode_t            *dip)
+{
+       xfs_attr_shortform_t    *atp;
+       int                     size;
+       int                     error = 0;
+       xfs_fsize_t             di_size;
+
+       if (unlikely(be32_to_cpu(dip->di_nextents) +
+                    be16_to_cpu(dip->di_anextents) >
+                    be64_to_cpu(dip->di_nblocks))) {
+               xfs_warn(ip->i_mount,
+                       "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
+                       (unsigned long long)ip->i_ino,
+                       (int)(be32_to_cpu(dip->di_nextents) +
+                             be16_to_cpu(dip->di_anextents)),
+                       (unsigned long long)
+                               be64_to_cpu(dip->di_nblocks));
+               XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
+                                    ip->i_mount, dip);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
+               xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
+                       (unsigned long long)ip->i_ino,
+                       dip->di_forkoff);
+               XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
+                                    ip->i_mount, dip);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
+                    !ip->i_mount->m_rtdev_targp)) {
+               xfs_warn(ip->i_mount,
+                       "corrupt dinode %Lu, has realtime flag set.",
+                       ip->i_ino);
+               XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
+                                    XFS_ERRLEVEL_LOW, ip->i_mount, dip);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       switch (ip->i_d.di_mode & S_IFMT) {
+       case S_IFIFO:
+       case S_IFCHR:
+       case S_IFBLK:
+       case S_IFSOCK:
+               if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
+                       XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
+                                             ip->i_mount, dip);
+                       return XFS_ERROR(EFSCORRUPTED);
+               }
+               ip->i_d.di_size = 0;
+               ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
+               break;
+
+       case S_IFREG:
+       case S_IFLNK:
+       case S_IFDIR:
+               switch (dip->di_format) {
+               case XFS_DINODE_FMT_LOCAL:
+                       /*
+                        * no local regular files yet
+                        */
+                       if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
+                               xfs_warn(ip->i_mount,
+                       "corrupt inode %Lu (local format for regular file).",
+                                       (unsigned long long) ip->i_ino);
+                               XFS_CORRUPTION_ERROR("xfs_iformat(4)",
+                                                    XFS_ERRLEVEL_LOW,
+                                                    ip->i_mount, dip);
+                               return XFS_ERROR(EFSCORRUPTED);
+                       }
+
+                       di_size = be64_to_cpu(dip->di_size);
+                       if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
+                               xfs_warn(ip->i_mount,
+                       "corrupt inode %Lu (bad size %Ld for local inode).",
+                                       (unsigned long long) ip->i_ino,
+                                       (long long) di_size);
+                               XFS_CORRUPTION_ERROR("xfs_iformat(5)",
+                                                    XFS_ERRLEVEL_LOW,
+                                                    ip->i_mount, dip);
+                               return XFS_ERROR(EFSCORRUPTED);
+                       }
+
+                       size = (int)di_size;
+                       error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
+                       break;
+               case XFS_DINODE_FMT_EXTENTS:
+                       error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
+                       break;
+               case XFS_DINODE_FMT_BTREE:
+                       error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
+                       break;
+               default:
+                       XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
+                                        ip->i_mount);
+                       return XFS_ERROR(EFSCORRUPTED);
+               }
+               break;
+
+       default:
+               XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+       if (error) {
+               return error;
+       }
+       if (!XFS_DFORK_Q(dip))
+               return 0;
+
+       ASSERT(ip->i_afp == NULL);
+       ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
+
+       switch (dip->di_aformat) {
+       case XFS_DINODE_FMT_LOCAL:
+               atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
+               size = be16_to_cpu(atp->hdr.totsize);
+
+               if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
+                       xfs_warn(ip->i_mount,
+                               "corrupt inode %Lu (bad attr fork size %Ld).",
+                               (unsigned long long) ip->i_ino,
+                               (long long) size);
+                       XFS_CORRUPTION_ERROR("xfs_iformat(8)",
+                                            XFS_ERRLEVEL_LOW,
+                                            ip->i_mount, dip);
+                       return XFS_ERROR(EFSCORRUPTED);
+               }
+
+               error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
+               break;
+       case XFS_DINODE_FMT_EXTENTS:
+               error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
+               break;
+       case XFS_DINODE_FMT_BTREE:
+               error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
+               break;
+       default:
+               error = XFS_ERROR(EFSCORRUPTED);
+               break;
+       }
+       if (error) {
+               kmem_zone_free(xfs_ifork_zone, ip->i_afp);
+               ip->i_afp = NULL;
+               xfs_idestroy_fork(ip, XFS_DATA_FORK);
+       }
+       return error;
+}
+
+/*
+ * The file is in-lined in the on-disk inode.
+ * If it fits into if_inline_data, then copy
+ * it there, otherwise allocate a buffer for it
+ * and copy the data there.  Either way, set
+ * if_data to point at the data.
+ * If we allocate a buffer for the data, make
+ * sure that its size is a multiple of 4 and
+ * record the real size in i_real_bytes.
+ */
+STATIC int
+xfs_iformat_local(
+       xfs_inode_t     *ip,
+       xfs_dinode_t    *dip,
+       int             whichfork,
+       int             size)
+{
+       xfs_ifork_t     *ifp;
+       int             real_size;
+
+       /*
+        * If the size is unreasonable, then something
+        * is wrong and we just bail out rather than crash in
+        * kmem_alloc() or memcpy() below.
+        */
+       if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
+               xfs_warn(ip->i_mount,
+       "corrupt inode %Lu (bad size %d for local fork, size = %d).",
+                       (unsigned long long) ip->i_ino, size,
+                       XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
+               XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
+                                    ip->i_mount, dip);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       real_size = 0;
+       if (size == 0)
+               ifp->if_u1.if_data = NULL;
+       else if (size <= sizeof(ifp->if_u2.if_inline_data))
+               ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
+       else {
+               real_size = roundup(size, 4);
+               ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
+       }
+       ifp->if_bytes = size;
+       ifp->if_real_bytes = real_size;
+       if (size)
+               memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
+       ifp->if_flags &= ~XFS_IFEXTENTS;
+       ifp->if_flags |= XFS_IFINLINE;
+       return 0;
+}
+
+/*
+ * The file consists of a set of extents all
+ * of which fit into the on-disk inode.
+ * If there are few enough extents to fit into
+ * the if_inline_ext, then copy them there.
+ * Otherwise allocate a buffer for them and copy
+ * them into it.  Either way, set if_extents
+ * to point at the extents.
+ */
+STATIC int
+xfs_iformat_extents(
+       xfs_inode_t     *ip,
+       xfs_dinode_t    *dip,
+       int             whichfork)
+{
+       xfs_bmbt_rec_t  *dp;
+       xfs_ifork_t     *ifp;
+       int             nex;
+       int             size;
+       int             i;
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       nex = XFS_DFORK_NEXTENTS(dip, whichfork);
+       size = nex * (uint)sizeof(xfs_bmbt_rec_t);
+
+       /*
+        * If the number of extents is unreasonable, then something
+        * is wrong and we just bail out rather than crash in
+        * kmem_alloc() or memcpy() below.
+        */
+       if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
+               xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
+                       (unsigned long long) ip->i_ino, nex);
+               XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
+                                    ip->i_mount, dip);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       ifp->if_real_bytes = 0;
+       if (nex == 0)
+               ifp->if_u1.if_extents = NULL;
+       else if (nex <= XFS_INLINE_EXTS)
+               ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
+       else
+               xfs_iext_add(ifp, 0, nex);
+
+       ifp->if_bytes = size;
+       if (size) {
+               dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
+               xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
+               for (i = 0; i < nex; i++, dp++) {
+                       xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
+                       ep->l0 = get_unaligned_be64(&dp->l0);
+                       ep->l1 = get_unaligned_be64(&dp->l1);
+               }
+               XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
+               if (whichfork != XFS_DATA_FORK ||
+                       XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
+                               if (unlikely(xfs_check_nostate_extents(
+                                   ifp, 0, nex))) {
+                                       XFS_ERROR_REPORT("xfs_iformat_extents(2)",
+                                                        XFS_ERRLEVEL_LOW,
+                                                        ip->i_mount);
+                                       return XFS_ERROR(EFSCORRUPTED);
+                               }
+       }
+       ifp->if_flags |= XFS_IFEXTENTS;
+       return 0;
+}
+
+/*
+ * The file has too many extents to fit into
+ * the inode, so they are in B-tree format.
+ * Allocate a buffer for the root of the B-tree
+ * and copy the root into it.  The i_extents
+ * field will remain NULL until all of the
+ * extents are read in (when they are needed).
+ */
+STATIC int
+xfs_iformat_btree(
+       xfs_inode_t             *ip,
+       xfs_dinode_t            *dip,
+       int                     whichfork)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       xfs_bmdr_block_t        *dfp;
+       xfs_ifork_t             *ifp;
+       /* REFERENCED */
+       int                     nrecs;
+       int                     size;
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
+       size = XFS_BMAP_BROOT_SPACE(mp, dfp);
+       nrecs = be16_to_cpu(dfp->bb_numrecs);
+
+       /*
+        * blow out if -- fork has less extents than can fit in
+        * fork (fork shouldn't be a btree format), root btree
+        * block has more records than can fit into the fork,
+        * or the number of extents is greater than the number of
+        * blocks.
+        */
+       if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
+                                       XFS_IFORK_MAXEXT(ip, whichfork) ||
+                    XFS_BMDR_SPACE_CALC(nrecs) >
+                                       XFS_DFORK_SIZE(dip, mp, whichfork) ||
+                    XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
+               xfs_warn(mp, "corrupt inode %Lu (btree).",
+                                       (unsigned long long) ip->i_ino);
+               XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
+                                        mp, dip);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       ifp->if_broot_bytes = size;
+       ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
+       ASSERT(ifp->if_broot != NULL);
+       /*
+        * Copy and convert from the on-disk structure
+        * to the in-memory structure.
+        */
+       xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
+                        ifp->if_broot, size);
+       ifp->if_flags &= ~XFS_IFEXTENTS;
+       ifp->if_flags |= XFS_IFBROOT;
+
+       return 0;
+}
+
+/*
+ * Read in extents from a btree-format inode.
+ * Allocate and fill in if_extents.  Real work is done in xfs_bmap.c.
+ */
+int
+xfs_iread_extents(
+       xfs_trans_t     *tp,
+       xfs_inode_t     *ip,
+       int             whichfork)
+{
+       int             error;
+       xfs_ifork_t     *ifp;
+       xfs_extnum_t    nextents;
+
+       if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
+               XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
+                                ip->i_mount);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+       nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+
+       /*
+        * We know that the size is valid (it's checked in iformat_btree)
+        */
+       ifp->if_bytes = ifp->if_real_bytes = 0;
+       ifp->if_flags |= XFS_IFEXTENTS;
+       xfs_iext_add(ifp, 0, nextents);
+       error = xfs_bmap_read_extents(tp, ip, whichfork);
+       if (error) {
+               xfs_iext_destroy(ifp);
+               ifp->if_flags &= ~XFS_IFEXTENTS;
+               return error;
+       }
+       xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
+       return 0;
+}
+/*
+ * Reallocate the space for if_broot based on the number of records
+ * being added or deleted as indicated in rec_diff.  Move the records
+ * and pointers in if_broot to fit the new size.  When shrinking this
+ * will eliminate holes between the records and pointers created by
+ * the caller.  When growing this will create holes to be filled in
+ * by the caller.
+ *
+ * The caller must not request to add more records than would fit in
+ * the on-disk inode root.  If the if_broot is currently NULL, then
+ * if we are adding records, one will be allocated.  The caller must also
+ * not request that the number of records go below zero, although
+ * it can go to zero.
+ *
+ * ip -- the inode whose if_broot area is changing
+ * ext_diff -- the change in the number of records, positive or negative,
+ *      requested for the if_broot array.
+ */
+void
+xfs_iroot_realloc(
+       xfs_inode_t             *ip,
+       int                     rec_diff,
+       int                     whichfork)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       int                     cur_max;
+       xfs_ifork_t             *ifp;
+       struct xfs_btree_block  *new_broot;
+       int                     new_max;
+       size_t                  new_size;
+       char                    *np;
+       char                    *op;
+
+       /*
+        * Handle the degenerate case quietly.
+        */
+       if (rec_diff == 0) {
+               return;
+       }
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (rec_diff > 0) {
+               /*
+                * If there wasn't any memory allocated before, just
+                * allocate it now and get out.
+                */
+               if (ifp->if_broot_bytes == 0) {
+                       new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
+                       ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
+                       ifp->if_broot_bytes = (int)new_size;
+                       return;
+               }
+
+               /*
+                * If there is already an existing if_broot, then we need
+                * to realloc() it and shift the pointers to their new
+                * location.  The records don't change location because
+                * they are kept butted up against the btree block header.
+                */
+               cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
+               new_max = cur_max + rec_diff;
+               new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
+               ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
+                               XFS_BMAP_BROOT_SPACE_CALC(mp, cur_max),
+                               KM_SLEEP | KM_NOFS);
+               op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
+                                                    ifp->if_broot_bytes);
+               np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
+                                                    (int)new_size);
+               ifp->if_broot_bytes = (int)new_size;
+               ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
+                       XFS_IFORK_SIZE(ip, whichfork));
+               memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
+               return;
+       }
+
+       /*
+        * rec_diff is less than 0.  In this case, we are shrinking the
+        * if_broot buffer.  It must already exist.  If we go to zero
+        * records, just get rid of the root and clear the status bit.
+        */
+       ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
+       cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
+       new_max = cur_max + rec_diff;
+       ASSERT(new_max >= 0);
+       if (new_max > 0)
+               new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
+       else
+               new_size = 0;
+       if (new_size > 0) {
+               new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
+               /*
+                * First copy over the btree block header.
+                */
+               memcpy(new_broot, ifp->if_broot,
+                       XFS_BMBT_BLOCK_LEN(ip->i_mount));
+       } else {
+               new_broot = NULL;
+               ifp->if_flags &= ~XFS_IFBROOT;
+       }
+
+       /*
+        * Only copy the records and pointers if there are any.
+        */
+       if (new_max > 0) {
+               /*
+                * First copy the records.
+                */
+               op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
+               np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
+               memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
+
+               /*
+                * Then copy the pointers.
+                */
+               op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
+                                                    ifp->if_broot_bytes);
+               np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
+                                                    (int)new_size);
+               memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
+       }
+       kmem_free(ifp->if_broot);
+       ifp->if_broot = new_broot;
+       ifp->if_broot_bytes = (int)new_size;
+       if (ifp->if_broot)
+               ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
+                       XFS_IFORK_SIZE(ip, whichfork));
+       return;
+}
+
+
+/*
+ * This is called when the amount of space needed for if_data
+ * is increased or decreased.  The change in size is indicated by
+ * the number of bytes that need to be added or deleted in the
+ * byte_diff parameter.
+ *
+ * If the amount of space needed has decreased below the size of the
+ * inline buffer, then switch to using the inline buffer.  Otherwise,
+ * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
+ * to what is needed.
+ *
+ * ip -- the inode whose if_data area is changing
+ * byte_diff -- the change in the number of bytes, positive or negative,
+ *      requested for the if_data array.
+ */
+void
+xfs_idata_realloc(
+       xfs_inode_t     *ip,
+       int             byte_diff,
+       int             whichfork)
+{
+       xfs_ifork_t     *ifp;
+       int             new_size;
+       int             real_size;
+
+       if (byte_diff == 0) {
+               return;
+       }
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       new_size = (int)ifp->if_bytes + byte_diff;
+       ASSERT(new_size >= 0);
+
+       if (new_size == 0) {
+               if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
+                       kmem_free(ifp->if_u1.if_data);
+               }
+               ifp->if_u1.if_data = NULL;
+               real_size = 0;
+       } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
+               /*
+                * If the valid extents/data can fit in if_inline_ext/data,
+                * copy them from the malloc'd vector and free it.
+                */
+               if (ifp->if_u1.if_data == NULL) {
+                       ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
+               } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
+                       ASSERT(ifp->if_real_bytes != 0);
+                       memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
+                             new_size);
+                       kmem_free(ifp->if_u1.if_data);
+                       ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
+               }
+               real_size = 0;
+       } else {
+               /*
+                * Stuck with malloc/realloc.
+                * For inline data, the underlying buffer must be
+                * a multiple of 4 bytes in size so that it can be
+                * logged and stay on word boundaries.  We enforce
+                * that here.
+                */
+               real_size = roundup(new_size, 4);
+               if (ifp->if_u1.if_data == NULL) {
+                       ASSERT(ifp->if_real_bytes == 0);
+                       ifp->if_u1.if_data = kmem_alloc(real_size,
+                                                       KM_SLEEP | KM_NOFS);
+               } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
+                       /*
+                        * Only do the realloc if the underlying size
+                        * is really changing.
+                        */
+                       if (ifp->if_real_bytes != real_size) {
+                               ifp->if_u1.if_data =
+                                       kmem_realloc(ifp->if_u1.if_data,
+                                                       real_size,
+                                                       ifp->if_real_bytes,
+                                                       KM_SLEEP | KM_NOFS);
+                       }
+               } else {
+                       ASSERT(ifp->if_real_bytes == 0);
+                       ifp->if_u1.if_data = kmem_alloc(real_size,
+                                                       KM_SLEEP | KM_NOFS);
+                       memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
+                               ifp->if_bytes);
+               }
+       }
+       ifp->if_real_bytes = real_size;
+       ifp->if_bytes = new_size;
+       ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
+}
+
+void
+xfs_idestroy_fork(
+       xfs_inode_t     *ip,
+       int             whichfork)
+{
+       xfs_ifork_t     *ifp;
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       if (ifp->if_broot != NULL) {
+               kmem_free(ifp->if_broot);
+               ifp->if_broot = NULL;
+       }
+
+       /*
+        * If the format is local, then we can't have an extents
+        * array so just look for an inline data array.  If we're
+        * not local then we may or may not have an extents list,
+        * so check and free it up if we do.
+        */
+       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+               if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
+                   (ifp->if_u1.if_data != NULL)) {
+                       ASSERT(ifp->if_real_bytes != 0);
+                       kmem_free(ifp->if_u1.if_data);
+                       ifp->if_u1.if_data = NULL;
+                       ifp->if_real_bytes = 0;
+               }
+       } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
+                  ((ifp->if_flags & XFS_IFEXTIREC) ||
+                   ((ifp->if_u1.if_extents != NULL) &&
+                    (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
+               ASSERT(ifp->if_real_bytes != 0);
+               xfs_iext_destroy(ifp);
+       }
+       ASSERT(ifp->if_u1.if_extents == NULL ||
+              ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
+       ASSERT(ifp->if_real_bytes == 0);
+       if (whichfork == XFS_ATTR_FORK) {
+               kmem_zone_free(xfs_ifork_zone, ip->i_afp);
+               ip->i_afp = NULL;
+       }
+}
+
+/*
+ * xfs_iextents_copy()
+ *
+ * This is called to copy the REAL extents (as opposed to the delayed
+ * allocation extents) from the inode into the given buffer.  It
+ * returns the number of bytes copied into the buffer.
+ *
+ * If there are no delayed allocation extents, then we can just
+ * memcpy() the extents into the buffer.  Otherwise, we need to
+ * examine each extent in turn and skip those which are delayed.
+ */
+int
+xfs_iextents_copy(
+       xfs_inode_t             *ip,
+       xfs_bmbt_rec_t          *dp,
+       int                     whichfork)
+{
+       int                     copied;
+       int                     i;
+       xfs_ifork_t             *ifp;
+       int                     nrecs;
+       xfs_fsblock_t           start_block;
+
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+       ASSERT(ifp->if_bytes > 0);
+
+       nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
+       ASSERT(nrecs > 0);
+
+       /*
+        * There are some delayed allocation extents in the
+        * inode, so copy the extents one at a time and skip
+        * the delayed ones.  There must be at least one
+        * non-delayed extent.
+        */
+       copied = 0;
+       for (i = 0; i < nrecs; i++) {
+               xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
+               start_block = xfs_bmbt_get_startblock(ep);
+               if (isnullstartblock(start_block)) {
+                       /*
+                        * It's a delayed allocation extent, so skip it.
+                        */
+                       continue;
+               }
+
+               /* Translate to on disk format */
+               put_unaligned_be64(ep->l0, &dp->l0);
+               put_unaligned_be64(ep->l1, &dp->l1);
+               dp++;
+               copied++;
+       }
+       ASSERT(copied != 0);
+       xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
+
+       return (copied * (uint)sizeof(xfs_bmbt_rec_t));
+}
+
+/*
+ * Each of the following cases stores data into the same region
+ * of the on-disk inode, so only one of them can be valid at
+ * any given time. While it is possible to have conflicting formats
+ * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
+ * in EXTENTS format, this can only happen when the fork has
+ * changed formats after being modified but before being flushed.
+ * In these cases, the format always takes precedence, because the
+ * format indicates the current state of the fork.
+ */
+void
+xfs_iflush_fork(
+       xfs_inode_t             *ip,
+       xfs_dinode_t            *dip,
+       xfs_inode_log_item_t    *iip,
+       int                     whichfork,
+       xfs_buf_t               *bp)
+{
+       char                    *cp;
+       xfs_ifork_t             *ifp;
+       xfs_mount_t             *mp;
+       static const short      brootflag[2] =
+               { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
+       static const short      dataflag[2] =
+               { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
+       static const short      extflag[2] =
+               { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
+
+       if (!iip)
+               return;
+       ifp = XFS_IFORK_PTR(ip, whichfork);
+       /*
+        * This can happen if we gave up in iformat in an error path,
+        * for the attribute fork.
+        */
+       if (!ifp) {
+               ASSERT(whichfork == XFS_ATTR_FORK);
+               return;
+       }
+       cp = XFS_DFORK_PTR(dip, whichfork);
+       mp = ip->i_mount;
+       switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+       case XFS_DINODE_FMT_LOCAL:
+               if ((iip->ili_fields & dataflag[whichfork]) &&
+                   (ifp->if_bytes > 0)) {
+                       ASSERT(ifp->if_u1.if_data != NULL);
+                       ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
+                       memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
+               }
+               break;
+
+       case XFS_DINODE_FMT_EXTENTS:
+               ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
+                      !(iip->ili_fields & extflag[whichfork]));
+               if ((iip->ili_fields & extflag[whichfork]) &&
+                   (ifp->if_bytes > 0)) {
+                       ASSERT(xfs_iext_get_ext(ifp, 0));
+                       ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
+                       (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
+                               whichfork);
+               }
+               break;
+
+       case XFS_DINODE_FMT_BTREE:
+               if ((iip->ili_fields & brootflag[whichfork]) &&
+                   (ifp->if_broot_bytes > 0)) {
+                       ASSERT(ifp->if_broot != NULL);
+                       ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
+                               XFS_IFORK_SIZE(ip, whichfork));
+                       xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
+                               (xfs_bmdr_block_t *)cp,
+                               XFS_DFORK_SIZE(dip, mp, whichfork));
+               }
+               break;
+
+       case XFS_DINODE_FMT_DEV:
+               if (iip->ili_fields & XFS_ILOG_DEV) {
+                       ASSERT(whichfork == XFS_DATA_FORK);
+                       xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
+               }
+               break;
+
+       case XFS_DINODE_FMT_UUID:
+               if (iip->ili_fields & XFS_ILOG_UUID) {
+                       ASSERT(whichfork == XFS_DATA_FORK);
+                       memcpy(XFS_DFORK_DPTR(dip),
+                              &ip->i_df.if_u2.if_uuid,
+                              sizeof(uuid_t));
+               }
+               break;
+
+       default:
+               ASSERT(0);
+               break;
+       }
+}
+
+/*
+ * Return a pointer to the extent record at file index idx.
+ */
+xfs_bmbt_rec_host_t *
+xfs_iext_get_ext(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_extnum_t    idx)            /* index of target extent */
+{
+       ASSERT(idx >= 0);
+       ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
+
+       if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
+               return ifp->if_u1.if_ext_irec->er_extbuf;
+       } else if (ifp->if_flags & XFS_IFEXTIREC) {
+               xfs_ext_irec_t  *erp;           /* irec pointer */
+               int             erp_idx = 0;    /* irec index */
+               xfs_extnum_t    page_idx = idx; /* ext index in target list */
+
+               erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
+               return &erp->er_extbuf[page_idx];
+       } else if (ifp->if_bytes) {
+               return &ifp->if_u1.if_extents[idx];
+       } else {
+               return NULL;
+       }
+}
+
+/*
+ * Insert new item(s) into the extent records for incore inode
+ * fork 'ifp'.  'count' new items are inserted at index 'idx'.
+ */
+void
+xfs_iext_insert(
+       xfs_inode_t     *ip,            /* incore inode pointer */
+       xfs_extnum_t    idx,            /* starting index of new items */
+       xfs_extnum_t    count,          /* number of inserted items */
+       xfs_bmbt_irec_t *new,           /* items to insert */
+       int             state)          /* type of extent conversion */
+{
+       xfs_ifork_t     *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
+       xfs_extnum_t    i;              /* extent record index */
+
+       trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
+
+       ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+       xfs_iext_add(ifp, idx, count);
+       for (i = idx; i < idx + count; i++, new++)
+               xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
+}
+
+/*
+ * This is called when the amount of space required for incore file
+ * extents needs to be increased. The ext_diff parameter stores the
+ * number of new extents being added and the idx parameter contains
+ * the extent index where the new extents will be added. If the new
+ * extents are being appended, then we just need to (re)allocate and
+ * initialize the space. Otherwise, if the new extents are being
+ * inserted into the middle of the existing entries, a bit more work
+ * is required to make room for the new extents to be inserted. The
+ * caller is responsible for filling in the new extent entries upon
+ * return.
+ */
+void
+xfs_iext_add(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_extnum_t    idx,            /* index to begin adding exts */
+       int             ext_diff)       /* number of extents to add */
+{
+       int             byte_diff;      /* new bytes being added */
+       int             new_size;       /* size of extents after adding */
+       xfs_extnum_t    nextents;       /* number of extents in file */
+
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       ASSERT((idx >= 0) && (idx <= nextents));
+       byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
+       new_size = ifp->if_bytes + byte_diff;
+       /*
+        * If the new number of extents (nextents + ext_diff)
+        * fits inside the inode, then continue to use the inline
+        * extent buffer.
+        */
+       if (nextents + ext_diff <= XFS_INLINE_EXTS) {
+               if (idx < nextents) {
+                       memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
+                               &ifp->if_u2.if_inline_ext[idx],
+                               (nextents - idx) * sizeof(xfs_bmbt_rec_t));
+                       memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
+               }
+               ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
+               ifp->if_real_bytes = 0;
+       }
+       /*
+        * Otherwise use a linear (direct) extent list.
+        * If the extents are currently inside the inode,
+        * xfs_iext_realloc_direct will switch us from
+        * inline to direct extent allocation mode.
+        */
+       else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
+               xfs_iext_realloc_direct(ifp, new_size);
+               if (idx < nextents) {
+                       memmove(&ifp->if_u1.if_extents[idx + ext_diff],
+                               &ifp->if_u1.if_extents[idx],
+                               (nextents - idx) * sizeof(xfs_bmbt_rec_t));
+                       memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
+               }
+       }
+       /* Indirection array */
+       else {
+               xfs_ext_irec_t  *erp;
+               int             erp_idx = 0;
+               int             page_idx = idx;
+
+               ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
+               if (ifp->if_flags & XFS_IFEXTIREC) {
+                       erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
+               } else {
+                       xfs_iext_irec_init(ifp);
+                       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+                       erp = ifp->if_u1.if_ext_irec;
+               }
+               /* Extents fit in target extent page */
+               if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
+                       if (page_idx < erp->er_extcount) {
+                               memmove(&erp->er_extbuf[page_idx + ext_diff],
+                                       &erp->er_extbuf[page_idx],
+                                       (erp->er_extcount - page_idx) *
+                                       sizeof(xfs_bmbt_rec_t));
+                               memset(&erp->er_extbuf[page_idx], 0, byte_diff);
+                       }
+                       erp->er_extcount += ext_diff;
+                       xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
+               }
+               /* Insert a new extent page */
+               else if (erp) {
+                       xfs_iext_add_indirect_multi(ifp,
+                               erp_idx, page_idx, ext_diff);
+               }
+               /*
+                * If extent(s) are being appended to the last page in
+                * the indirection array and the new extent(s) don't fit
+                * in the page, then erp is NULL and erp_idx is set to
+                * the next index needed in the indirection array.
+                */
+               else {
+                       int     count = ext_diff;
+
+                       while (count) {
+                               erp = xfs_iext_irec_new(ifp, erp_idx);
+                               erp->er_extcount = count;
+                               count -= MIN(count, (int)XFS_LINEAR_EXTS);
+                               if (count) {
+                                       erp_idx++;
+                               }
+                       }
+               }
+       }
+       ifp->if_bytes = new_size;
+}
+
+/*
+ * This is called when incore extents are being added to the indirection
+ * array and the new extents do not fit in the target extent list. The
+ * erp_idx parameter contains the irec index for the target extent list
+ * in the indirection array, and the idx parameter contains the extent
+ * index within the list. The number of extents being added is stored
+ * in the count parameter.
+ *
+ *    |-------|   |-------|
+ *    |       |   |       |    idx - number of extents before idx
+ *    |  idx  |   | count |
+ *    |       |   |       |    count - number of extents being inserted at idx
+ *    |-------|   |-------|
+ *    | count |   | nex2  |    nex2 - number of extents after idx + count
+ *    |-------|   |-------|
+ */
+void
+xfs_iext_add_indirect_multi(
+       xfs_ifork_t     *ifp,                   /* inode fork pointer */
+       int             erp_idx,                /* target extent irec index */
+       xfs_extnum_t    idx,                    /* index within target list */
+       int             count)                  /* new extents being added */
+{
+       int             byte_diff;              /* new bytes being added */
+       xfs_ext_irec_t  *erp;                   /* pointer to irec entry */
+       xfs_extnum_t    ext_diff;               /* number of extents to add */
+       xfs_extnum_t    ext_cnt;                /* new extents still needed */
+       xfs_extnum_t    nex2;                   /* extents after idx + count */
+       xfs_bmbt_rec_t  *nex2_ep = NULL;        /* temp list for nex2 extents */
+       int             nlists;                 /* number of irec's (lists) */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       erp = &ifp->if_u1.if_ext_irec[erp_idx];
+       nex2 = erp->er_extcount - idx;
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+
+       /*
+        * Save second part of target extent list
+        * (all extents past */
+       if (nex2) {
+               byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
+               nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
+               memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
+               erp->er_extcount -= nex2;
+               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
+               memset(&erp->er_extbuf[idx], 0, byte_diff);
+       }
+
+       /*
+        * Add the new extents to the end of the target
+        * list, then allocate new irec record(s) and
+        * extent buffer(s) as needed to store the rest
+        * of the new extents.
+        */
+       ext_cnt = count;
+       ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
+       if (ext_diff) {
+               erp->er_extcount += ext_diff;
+               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
+               ext_cnt -= ext_diff;
+       }
+       while (ext_cnt) {
+               erp_idx++;
+               erp = xfs_iext_irec_new(ifp, erp_idx);
+               ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
+               erp->er_extcount = ext_diff;
+               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
+               ext_cnt -= ext_diff;
+       }
+
+       /* Add nex2 extents back to indirection array */
+       if (nex2) {
+               xfs_extnum_t    ext_avail;
+               int             i;
+
+               byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
+               ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
+               i = 0;
+               /*
+                * If nex2 extents fit in the current page, append
+                * nex2_ep after the new extents.
+                */
+               if (nex2 <= ext_avail) {
+                       i = erp->er_extcount;
+               }
+               /*
+                * Otherwise, check if space is available in the
+                * next page.
+                */
+               else if ((erp_idx < nlists - 1) &&
+                        (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
+                         ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
+                       erp_idx++;
+                       erp++;
+                       /* Create a hole for nex2 extents */
+                       memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
+                               erp->er_extcount * sizeof(xfs_bmbt_rec_t));
+               }
+               /*
+                * Final choice, create a new extent page for
+                * nex2 extents.
+                */
+               else {
+                       erp_idx++;
+                       erp = xfs_iext_irec_new(ifp, erp_idx);
+               }
+               memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
+               kmem_free(nex2_ep);
+               erp->er_extcount += nex2;
+               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
+       }
+}
+
+/*
+ * This is called when the amount of space required for incore file
+ * extents needs to be decreased. The ext_diff parameter stores the
+ * number of extents to be removed and the idx parameter contains
+ * the extent index where the extents will be removed from.
+ *
+ * If the amount of space needed has decreased below the linear
+ * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
+ * extent array.  Otherwise, use kmem_realloc() to adjust the
+ * size to what is needed.
+ */
+void
+xfs_iext_remove(
+       xfs_inode_t     *ip,            /* incore inode pointer */
+       xfs_extnum_t    idx,            /* index to begin removing exts */
+       int             ext_diff,       /* number of extents to remove */
+       int             state)          /* type of extent conversion */
+{
+       xfs_ifork_t     *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
+       xfs_extnum_t    nextents;       /* number of extents in file */
+       int             new_size;       /* size of extents after removal */
+
+       trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
+
+       ASSERT(ext_diff > 0);
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
+
+       if (new_size == 0) {
+               xfs_iext_destroy(ifp);
+       } else if (ifp->if_flags & XFS_IFEXTIREC) {
+               xfs_iext_remove_indirect(ifp, idx, ext_diff);
+       } else if (ifp->if_real_bytes) {
+               xfs_iext_remove_direct(ifp, idx, ext_diff);
+       } else {
+               xfs_iext_remove_inline(ifp, idx, ext_diff);
+       }
+       ifp->if_bytes = new_size;
+}
+
+/*
+ * This removes ext_diff extents from the inline buffer, beginning
+ * at extent index idx.
+ */
+void
+xfs_iext_remove_inline(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_extnum_t    idx,            /* index to begin removing exts */
+       int             ext_diff)       /* number of extents to remove */
+{
+       int             nextents;       /* number of extents in file */
+
+       ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
+       ASSERT(idx < XFS_INLINE_EXTS);
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       ASSERT(((nextents - ext_diff) > 0) &&
+               (nextents - ext_diff) < XFS_INLINE_EXTS);
+
+       if (idx + ext_diff < nextents) {
+               memmove(&ifp->if_u2.if_inline_ext[idx],
+                       &ifp->if_u2.if_inline_ext[idx + ext_diff],
+                       (nextents - (idx + ext_diff)) *
+                        sizeof(xfs_bmbt_rec_t));
+               memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
+                       0, ext_diff * sizeof(xfs_bmbt_rec_t));
+       } else {
+               memset(&ifp->if_u2.if_inline_ext[idx], 0,
+                       ext_diff * sizeof(xfs_bmbt_rec_t));
+       }
+}
+
+/*
+ * This removes ext_diff extents from a linear (direct) extent list,
+ * beginning at extent index idx. If the extents are being removed
+ * from the end of the list (ie. truncate) then we just need to re-
+ * allocate the list to remove the extra space. Otherwise, if the
+ * extents are being removed from the middle of the existing extent
+ * entries, then we first need to move the extent records beginning
+ * at idx + ext_diff up in the list to overwrite the records being
+ * removed, then remove the extra space via kmem_realloc.
+ */
+void
+xfs_iext_remove_direct(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_extnum_t    idx,            /* index to begin removing exts */
+       int             ext_diff)       /* number of extents to remove */
+{
+       xfs_extnum_t    nextents;       /* number of extents in file */
+       int             new_size;       /* size of extents after removal */
+
+       ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
+       new_size = ifp->if_bytes -
+               (ext_diff * sizeof(xfs_bmbt_rec_t));
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+
+       if (new_size == 0) {
+               xfs_iext_destroy(ifp);
+               return;
+       }
+       /* Move extents up in the list (if needed) */
+       if (idx + ext_diff < nextents) {
+               memmove(&ifp->if_u1.if_extents[idx],
+                       &ifp->if_u1.if_extents[idx + ext_diff],
+                       (nextents - (idx + ext_diff)) *
+                        sizeof(xfs_bmbt_rec_t));
+       }
+       memset(&ifp->if_u1.if_extents[nextents - ext_diff],
+               0, ext_diff * sizeof(xfs_bmbt_rec_t));
+       /*
+        * Reallocate the direct extent list. If the extents
+        * will fit inside the inode then xfs_iext_realloc_direct
+        * will switch from direct to inline extent allocation
+        * mode for us.
+        */
+       xfs_iext_realloc_direct(ifp, new_size);
+       ifp->if_bytes = new_size;
+}
+
+/*
+ * This is called when incore extents are being removed from the
+ * indirection array and the extents being removed span multiple extent
+ * buffers. The idx parameter contains the file extent index where we
+ * want to begin removing extents, and the count parameter contains
+ * how many extents need to be removed.
+ *
+ *    |-------|   |-------|
+ *    | nex1  |   |       |    nex1 - number of extents before idx
+ *    |-------|   | count |
+ *    |       |   |       |    count - number of extents being removed at idx
+ *    | count |   |-------|
+ *    |       |   | nex2  |    nex2 - number of extents after idx + count
+ *    |-------|   |-------|
+ */
+void
+xfs_iext_remove_indirect(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_extnum_t    idx,            /* index to begin removing extents */
+       int             count)          /* number of extents to remove */
+{
+       xfs_ext_irec_t  *erp;           /* indirection array pointer */
+       int             erp_idx = 0;    /* indirection array index */
+       xfs_extnum_t    ext_cnt;        /* extents left to remove */
+       xfs_extnum_t    ext_diff;       /* extents to remove in current list */
+       xfs_extnum_t    nex1;           /* number of extents before idx */
+       xfs_extnum_t    nex2;           /* extents after idx + count */
+       int             page_idx = idx; /* index in target extent list */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       erp = xfs_iext_idx_to_irec(ifp,  &page_idx, &erp_idx, 0);
+       ASSERT(erp != NULL);
+       nex1 = page_idx;
+       ext_cnt = count;
+       while (ext_cnt) {
+               nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
+               ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
+               /*
+                * Check for deletion of entire list;
+                * xfs_iext_irec_remove() updates extent offsets.
+                */
+               if (ext_diff == erp->er_extcount) {
+                       xfs_iext_irec_remove(ifp, erp_idx);
+                       ext_cnt -= ext_diff;
+                       nex1 = 0;
+                       if (ext_cnt) {
+                               ASSERT(erp_idx < ifp->if_real_bytes /
+                                       XFS_IEXT_BUFSZ);
+                               erp = &ifp->if_u1.if_ext_irec[erp_idx];
+                               nex1 = 0;
+                               continue;
+                       } else {
+                               break;
+                       }
+               }
+               /* Move extents up (if needed) */
+               if (nex2) {
+                       memmove(&erp->er_extbuf[nex1],
+                               &erp->er_extbuf[nex1 + ext_diff],
+                               nex2 * sizeof(xfs_bmbt_rec_t));
+               }
+               /* Zero out rest of page */
+               memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
+                       ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
+               /* Update remaining counters */
+               erp->er_extcount -= ext_diff;
+               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
+               ext_cnt -= ext_diff;
+               nex1 = 0;
+               erp_idx++;
+               erp++;
+       }
+       ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
+       xfs_iext_irec_compact(ifp);
+}
+
+/*
+ * Create, destroy, or resize a linear (direct) block of extents.
+ */
+void
+xfs_iext_realloc_direct(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       int             new_size)       /* new size of extents */
+{
+       int             rnew_size;      /* real new size of extents */
+
+       rnew_size = new_size;
+
+       ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
+               ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
+                (new_size != ifp->if_real_bytes)));
+
+       /* Free extent records */
+       if (new_size == 0) {
+               xfs_iext_destroy(ifp);
+       }
+       /* Resize direct extent list and zero any new bytes */
+       else if (ifp->if_real_bytes) {
+               /* Check if extents will fit inside the inode */
+               if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
+                       xfs_iext_direct_to_inline(ifp, new_size /
+                               (uint)sizeof(xfs_bmbt_rec_t));
+                       ifp->if_bytes = new_size;
+                       return;
+               }
+               if (!is_power_of_2(new_size)){
+                       rnew_size = roundup_pow_of_two(new_size);
+               }
+               if (rnew_size != ifp->if_real_bytes) {
+                       ifp->if_u1.if_extents =
+                               kmem_realloc(ifp->if_u1.if_extents,
+                                               rnew_size,
+                                               ifp->if_real_bytes, KM_NOFS);
+               }
+               if (rnew_size > ifp->if_real_bytes) {
+                       memset(&ifp->if_u1.if_extents[ifp->if_bytes /
+                               (uint)sizeof(xfs_bmbt_rec_t)], 0,
+                               rnew_size - ifp->if_real_bytes);
+               }
+       }
+       /*
+        * Switch from the inline extent buffer to a direct
+        * extent list. Be sure to include the inline extent
+        * bytes in new_size.
+        */
+       else {
+               new_size += ifp->if_bytes;
+               if (!is_power_of_2(new_size)) {
+                       rnew_size = roundup_pow_of_two(new_size);
+               }
+               xfs_iext_inline_to_direct(ifp, rnew_size);
+       }
+       ifp->if_real_bytes = rnew_size;
+       ifp->if_bytes = new_size;
+}
+
+/*
+ * Switch from linear (direct) extent records to inline buffer.
+ */
+void
+xfs_iext_direct_to_inline(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_extnum_t    nextents)       /* number of extents in file */
+{
+       ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+       ASSERT(nextents <= XFS_INLINE_EXTS);
+       /*
+        * The inline buffer was zeroed when we switched
+        * from inline to direct extent allocation mode,
+        * so we don't need to clear it here.
+        */
+       memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
+               nextents * sizeof(xfs_bmbt_rec_t));
+       kmem_free(ifp->if_u1.if_extents);
+       ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
+       ifp->if_real_bytes = 0;
+}
+
+/*
+ * Switch from inline buffer to linear (direct) extent records.
+ * new_size should already be rounded up to the next power of 2
+ * by the caller (when appropriate), so use new_size as it is.
+ * However, since new_size may be rounded up, we can't update
+ * if_bytes here. It is the caller's responsibility to update
+ * if_bytes upon return.
+ */
+void
+xfs_iext_inline_to_direct(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       int             new_size)       /* number of extents in file */
+{
+       ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
+       memset(ifp->if_u1.if_extents, 0, new_size);
+       if (ifp->if_bytes) {
+               memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
+                       ifp->if_bytes);
+               memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
+                       sizeof(xfs_bmbt_rec_t));
+       }
+       ifp->if_real_bytes = new_size;
+}
+
+/*
+ * Resize an extent indirection array to new_size bytes.
+ */
+STATIC void
+xfs_iext_realloc_indirect(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       int             new_size)       /* new indirection array size */
+{
+       int             nlists;         /* number of irec's (ex lists) */
+       int             size;           /* current indirection array size */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+       size = nlists * sizeof(xfs_ext_irec_t);
+       ASSERT(ifp->if_real_bytes);
+       ASSERT((new_size >= 0) && (new_size != size));
+       if (new_size == 0) {
+               xfs_iext_destroy(ifp);
+       } else {
+               ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
+                       kmem_realloc(ifp->if_u1.if_ext_irec,
+                               new_size, size, KM_NOFS);
+       }
+}
+
+/*
+ * Switch from indirection array to linear (direct) extent allocations.
+ */
+STATIC void
+xfs_iext_indirect_to_direct(
+        xfs_ifork_t    *ifp)           /* inode fork pointer */
+{
+       xfs_bmbt_rec_host_t *ep;        /* extent record pointer */
+       xfs_extnum_t    nextents;       /* number of extents in file */
+       int             size;           /* size of file extents */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       ASSERT(nextents <= XFS_LINEAR_EXTS);
+       size = nextents * sizeof(xfs_bmbt_rec_t);
+
+       xfs_iext_irec_compact_pages(ifp);
+       ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
+
+       ep = ifp->if_u1.if_ext_irec->er_extbuf;
+       kmem_free(ifp->if_u1.if_ext_irec);
+       ifp->if_flags &= ~XFS_IFEXTIREC;
+       ifp->if_u1.if_extents = ep;
+       ifp->if_bytes = size;
+       if (nextents < XFS_LINEAR_EXTS) {
+               xfs_iext_realloc_direct(ifp, size);
+       }
+}
+
+/*
+ * Free incore file extents.
+ */
+void
+xfs_iext_destroy(
+       xfs_ifork_t     *ifp)           /* inode fork pointer */
+{
+       if (ifp->if_flags & XFS_IFEXTIREC) {
+               int     erp_idx;
+               int     nlists;
+
+               nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+               for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
+                       xfs_iext_irec_remove(ifp, erp_idx);
+               }
+               ifp->if_flags &= ~XFS_IFEXTIREC;
+       } else if (ifp->if_real_bytes) {
+               kmem_free(ifp->if_u1.if_extents);
+       } else if (ifp->if_bytes) {
+               memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
+                       sizeof(xfs_bmbt_rec_t));
+       }
+       ifp->if_u1.if_extents = NULL;
+       ifp->if_real_bytes = 0;
+       ifp->if_bytes = 0;
+}
+
+/*
+ * Return a pointer to the extent record for file system block bno.
+ */
+xfs_bmbt_rec_host_t *                  /* pointer to found extent record */
+xfs_iext_bno_to_ext(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_fileoff_t   bno,            /* block number to search for */
+       xfs_extnum_t    *idxp)          /* index of target extent */
+{
+       xfs_bmbt_rec_host_t *base;      /* pointer to first extent */
+       xfs_filblks_t   blockcount = 0; /* number of blocks in extent */
+       xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
+       xfs_ext_irec_t  *erp = NULL;    /* indirection array pointer */
+       int             high;           /* upper boundary in search */
+       xfs_extnum_t    idx = 0;        /* index of target extent */
+       int             low;            /* lower boundary in search */
+       xfs_extnum_t    nextents;       /* number of file extents */
+       xfs_fileoff_t   startoff = 0;   /* start offset of extent */
+
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       if (nextents == 0) {
+               *idxp = 0;
+               return NULL;
+       }
+       low = 0;
+       if (ifp->if_flags & XFS_IFEXTIREC) {
+               /* Find target extent list */
+               int     erp_idx = 0;
+               erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
+               base = erp->er_extbuf;
+               high = erp->er_extcount - 1;
+       } else {
+               base = ifp->if_u1.if_extents;
+               high = nextents - 1;
+       }
+       /* Binary search extent records */
+       while (low <= high) {
+               idx = (low + high) >> 1;
+               ep = base + idx;
+               startoff = xfs_bmbt_get_startoff(ep);
+               blockcount = xfs_bmbt_get_blockcount(ep);
+               if (bno < startoff) {
+                       high = idx - 1;
+               } else if (bno >= startoff + blockcount) {
+                       low = idx + 1;
+               } else {
+                       /* Convert back to file-based extent index */
+                       if (ifp->if_flags & XFS_IFEXTIREC) {
+                               idx += erp->er_extoff;
+                       }
+                       *idxp = idx;
+                       return ep;
+               }
+       }
+       /* Convert back to file-based extent index */
+       if (ifp->if_flags & XFS_IFEXTIREC) {
+               idx += erp->er_extoff;
+       }
+       if (bno >= startoff + blockcount) {
+               if (++idx == nextents) {
+                       ep = NULL;
+               } else {
+                       ep = xfs_iext_get_ext(ifp, idx);
+               }
+       }
+       *idxp = idx;
+       return ep;
+}
+
+/*
+ * Return a pointer to the indirection array entry containing the
+ * extent record for filesystem block bno. Store the index of the
+ * target irec in *erp_idxp.
+ */
+xfs_ext_irec_t *                       /* pointer to found extent record */
+xfs_iext_bno_to_irec(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_fileoff_t   bno,            /* block number to search for */
+       int             *erp_idxp)      /* irec index of target ext list */
+{
+       xfs_ext_irec_t  *erp = NULL;    /* indirection array pointer */
+       xfs_ext_irec_t  *erp_next;      /* next indirection array entry */
+       int             erp_idx;        /* indirection array index */
+       int             nlists;         /* number of extent irec's (lists) */
+       int             high;           /* binary search upper limit */
+       int             low;            /* binary search lower limit */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+       erp_idx = 0;
+       low = 0;
+       high = nlists - 1;
+       while (low <= high) {
+               erp_idx = (low + high) >> 1;
+               erp = &ifp->if_u1.if_ext_irec[erp_idx];
+               erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
+               if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
+                       high = erp_idx - 1;
+               } else if (erp_next && bno >=
+                          xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
+                       low = erp_idx + 1;
+               } else {
+                       break;
+               }
+       }
+       *erp_idxp = erp_idx;
+       return erp;
+}
+
+/*
+ * Return a pointer to the indirection array entry containing the
+ * extent record at file extent index *idxp. Store the index of the
+ * target irec in *erp_idxp and store the page index of the target
+ * extent record in *idxp.
+ */
+xfs_ext_irec_t *
+xfs_iext_idx_to_irec(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       xfs_extnum_t    *idxp,          /* extent index (file -> page) */
+       int             *erp_idxp,      /* pointer to target irec */
+       int             realloc)        /* new bytes were just added */
+{
+       xfs_ext_irec_t  *prev;          /* pointer to previous irec */
+       xfs_ext_irec_t  *erp = NULL;    /* pointer to current irec */
+       int             erp_idx;        /* indirection array index */
+       int             nlists;         /* number of irec's (ex lists) */
+       int             high;           /* binary search upper limit */
+       int             low;            /* binary search lower limit */
+       xfs_extnum_t    page_idx = *idxp; /* extent index in target list */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       ASSERT(page_idx >= 0);
+       ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
+       ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
+
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+       erp_idx = 0;
+       low = 0;
+       high = nlists - 1;
+
+       /* Binary search extent irec's */
+       while (low <= high) {
+               erp_idx = (low + high) >> 1;
+               erp = &ifp->if_u1.if_ext_irec[erp_idx];
+               prev = erp_idx > 0 ? erp - 1 : NULL;
+               if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
+                    realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
+                       high = erp_idx - 1;
+               } else if (page_idx > erp->er_extoff + erp->er_extcount ||
+                          (page_idx == erp->er_extoff + erp->er_extcount &&
+                           !realloc)) {
+                       low = erp_idx + 1;
+               } else if (page_idx == erp->er_extoff + erp->er_extcount &&
+                          erp->er_extcount == XFS_LINEAR_EXTS) {
+                       ASSERT(realloc);
+                       page_idx = 0;
+                       erp_idx++;
+                       erp = erp_idx < nlists ? erp + 1 : NULL;
+                       break;
+               } else {
+                       page_idx -= erp->er_extoff;
+                       break;
+               }
+       }
+       *idxp = page_idx;
+       *erp_idxp = erp_idx;
+       return(erp);
+}
+
+/*
+ * Allocate and initialize an indirection array once the space needed
+ * for incore extents increases above XFS_IEXT_BUFSZ.
+ */
+void
+xfs_iext_irec_init(
+       xfs_ifork_t     *ifp)           /* inode fork pointer */
+{
+       xfs_ext_irec_t  *erp;           /* indirection array pointer */
+       xfs_extnum_t    nextents;       /* number of extents in file */
+
+       ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       ASSERT(nextents <= XFS_LINEAR_EXTS);
+
+       erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
+
+       if (nextents == 0) {
+               ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
+       } else if (!ifp->if_real_bytes) {
+               xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
+       } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
+               xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
+       }
+       erp->er_extbuf = ifp->if_u1.if_extents;
+       erp->er_extcount = nextents;
+       erp->er_extoff = 0;
+
+       ifp->if_flags |= XFS_IFEXTIREC;
+       ifp->if_real_bytes = XFS_IEXT_BUFSZ;
+       ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
+       ifp->if_u1.if_ext_irec = erp;
+
+       return;
+}
+
+/*
+ * Allocate and initialize a new entry in the indirection array.
+ */
+xfs_ext_irec_t *
+xfs_iext_irec_new(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       int             erp_idx)        /* index for new irec */
+{
+       xfs_ext_irec_t  *erp;           /* indirection array pointer */
+       int             i;              /* loop counter */
+       int             nlists;         /* number of irec's (ex lists) */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+
+       /* Resize indirection array */
+       xfs_iext_realloc_indirect(ifp, ++nlists *
+                                 sizeof(xfs_ext_irec_t));
+       /*
+        * Move records down in the array so the
+        * new page can use erp_idx.
+        */
+       erp = ifp->if_u1.if_ext_irec;
+       for (i = nlists - 1; i > erp_idx; i--) {
+               memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
+       }
+       ASSERT(i == erp_idx);
+
+       /* Initialize new extent record */
+       erp = ifp->if_u1.if_ext_irec;
+       erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
+       ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
+       memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
+       erp[erp_idx].er_extcount = 0;
+       erp[erp_idx].er_extoff = erp_idx > 0 ?
+               erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
+       return (&erp[erp_idx]);
+}
+
+/*
+ * Remove a record from the indirection array.
+ */
+void
+xfs_iext_irec_remove(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       int             erp_idx)        /* irec index to remove */
+{
+       xfs_ext_irec_t  *erp;           /* indirection array pointer */
+       int             i;              /* loop counter */
+       int             nlists;         /* number of irec's (ex lists) */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+       erp = &ifp->if_u1.if_ext_irec[erp_idx];
+       if (erp->er_extbuf) {
+               xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
+                       -erp->er_extcount);
+               kmem_free(erp->er_extbuf);
+       }
+       /* Compact extent records */
+       erp = ifp->if_u1.if_ext_irec;
+       for (i = erp_idx; i < nlists - 1; i++) {
+               memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
+       }
+       /*
+        * Manually free the last extent record from the indirection
+        * array.  A call to xfs_iext_realloc_indirect() with a size
+        * of zero would result in a call to xfs_iext_destroy() which
+        * would in turn call this function again, creating a nasty
+        * infinite loop.
+        */
+       if (--nlists) {
+               xfs_iext_realloc_indirect(ifp,
+                       nlists * sizeof(xfs_ext_irec_t));
+       } else {
+               kmem_free(ifp->if_u1.if_ext_irec);
+       }
+       ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
+}
+
+/*
+ * This is called to clean up large amounts of unused memory allocated
+ * by the indirection array.  Before compacting anything though, verify
+ * that the indirection array is still needed and switch back to the
+ * linear extent list (or even the inline buffer) if possible.  The
+ * compaction policy is as follows:
+ *
+ *    Full Compaction: Extents fit into a single page (or inline buffer)
+ * Partial Compaction: Extents occupy less than 50% of allocated space
+ *      No Compaction: Extents occupy at least 50% of allocated space
+ */
+void
+xfs_iext_irec_compact(
+       xfs_ifork_t     *ifp)           /* inode fork pointer */
+{
+       xfs_extnum_t    nextents;       /* number of extents in file */
+       int             nlists;         /* number of irec's (ex lists) */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+
+       if (nextents == 0) {
+               xfs_iext_destroy(ifp);
+       } else if (nextents <= XFS_INLINE_EXTS) {
+               xfs_iext_indirect_to_direct(ifp);
+               xfs_iext_direct_to_inline(ifp, nextents);
+       } else if (nextents <= XFS_LINEAR_EXTS) {
+               xfs_iext_indirect_to_direct(ifp);
+       } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
+               xfs_iext_irec_compact_pages(ifp);
+       }
+}
+
+/*
+ * Combine extents from neighboring extent pages.
+ */
+void
+xfs_iext_irec_compact_pages(
+       xfs_ifork_t     *ifp)           /* inode fork pointer */
+{
+       xfs_ext_irec_t  *erp, *erp_next;/* pointers to irec entries */
+       int             erp_idx = 0;    /* indirection array index */
+       int             nlists;         /* number of irec's (ex lists) */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+       while (erp_idx < nlists - 1) {
+               erp = &ifp->if_u1.if_ext_irec[erp_idx];
+               erp_next = erp + 1;
+               if (erp_next->er_extcount <=
+                   (XFS_LINEAR_EXTS - erp->er_extcount)) {
+                       memcpy(&erp->er_extbuf[erp->er_extcount],
+                               erp_next->er_extbuf, erp_next->er_extcount *
+                               sizeof(xfs_bmbt_rec_t));
+                       erp->er_extcount += erp_next->er_extcount;
+                       /*
+                        * Free page before removing extent record
+                        * so er_extoffs don't get modified in
+                        * xfs_iext_irec_remove.
+                        */
+                       kmem_free(erp_next->er_extbuf);
+                       erp_next->er_extbuf = NULL;
+                       xfs_iext_irec_remove(ifp, erp_idx + 1);
+                       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+               } else {
+                       erp_idx++;
+               }
+       }
+}
+
+/*
+ * This is called to update the er_extoff field in the indirection
+ * array when extents have been added or removed from one of the
+ * extent lists. erp_idx contains the irec index to begin updating
+ * at and ext_diff contains the number of extents that were added
+ * or removed.
+ */
+void
+xfs_iext_irec_update_extoffs(
+       xfs_ifork_t     *ifp,           /* inode fork pointer */
+       int             erp_idx,        /* irec index to update */
+       int             ext_diff)       /* number of new extents */
+{
+       int             i;              /* loop counter */
+       int             nlists;         /* number of irec's (ex lists */
+
+       ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+       nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+       for (i = erp_idx; i < nlists; i++) {
+               ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
+       }
+}
diff --git a/fs/xfs/xfs_inode_fork.h b/fs/xfs/xfs_inode_fork.h
new file mode 100644 (file)
index 0000000..28661a0
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef        __XFS_INODE_FORK_H__
+#define        __XFS_INODE_FORK_H__
+
+struct xfs_inode_log_item;
+
+/*
+ * The following xfs_ext_irec_t struct introduces a second (top) level
+ * to the in-core extent allocation scheme. These structs are allocated
+ * in a contiguous block, creating an indirection array where each entry
+ * (irec) contains a pointer to a buffer of in-core extent records which
+ * it manages. Each extent buffer is 4k in size, since 4k is the system
+ * page size on Linux i386 and systems with larger page sizes don't seem
+ * to gain much, if anything, by using their native page size as the
+ * extent buffer size. Also, using 4k extent buffers everywhere provides
+ * a consistent interface for CXFS across different platforms.
+ *
+ * There is currently no limit on the number of irec's (extent lists)
+ * allowed, so heavily fragmented files may require an indirection array
+ * which spans multiple system pages of memory. The number of extents
+ * which would require this amount of contiguous memory is very large
+ * and should not cause problems in the foreseeable future. However,
+ * if the memory needed for the contiguous array ever becomes a problem,
+ * it is possible that a third level of indirection may be required.
+ */
+typedef struct xfs_ext_irec {
+       xfs_bmbt_rec_host_t *er_extbuf; /* block of extent records */
+       xfs_extnum_t    er_extoff;      /* extent offset in file */
+       xfs_extnum_t    er_extcount;    /* number of extents in page/block */
+} xfs_ext_irec_t;
+
+/*
+ * File incore extent information, present for each of data & attr forks.
+ */
+#define        XFS_IEXT_BUFSZ          4096
+#define        XFS_LINEAR_EXTS         (XFS_IEXT_BUFSZ / (uint)sizeof(xfs_bmbt_rec_t))
+#define        XFS_INLINE_EXTS         2
+#define        XFS_INLINE_DATA         32
+typedef struct xfs_ifork {
+       int                     if_bytes;       /* bytes in if_u1 */
+       int                     if_real_bytes;  /* bytes allocated in if_u1 */
+       struct xfs_btree_block  *if_broot;      /* file's incore btree root */
+       short                   if_broot_bytes; /* bytes allocated for root */
+       unsigned char           if_flags;       /* per-fork flags */
+       union {
+               xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */
+               xfs_ext_irec_t  *if_ext_irec;   /* irec map file exts */
+               char            *if_data;       /* inline file data */
+       } if_u1;
+       union {
+               xfs_bmbt_rec_host_t if_inline_ext[XFS_INLINE_EXTS];
+                                               /* very small file extents */
+               char            if_inline_data[XFS_INLINE_DATA];
+                                               /* very small file data */
+               xfs_dev_t       if_rdev;        /* dev number if special */
+               uuid_t          if_uuid;        /* mount point value */
+       } if_u2;
+} xfs_ifork_t;
+
+/*
+ * Per-fork incore inode flags.
+ */
+#define        XFS_IFINLINE    0x01    /* Inline data is read in */
+#define        XFS_IFEXTENTS   0x02    /* All extent pointers are read in */
+#define        XFS_IFBROOT     0x04    /* i_broot points to the bmap b-tree root */
+#define        XFS_IFEXTIREC   0x08    /* Indirection array of extent blocks */
+
+/*
+ * Fork handling.
+ */
+
+#define XFS_IFORK_Q(ip)                        ((ip)->i_d.di_forkoff != 0)
+#define XFS_IFORK_BOFF(ip)             ((int)((ip)->i_d.di_forkoff << 3))
+
+#define XFS_IFORK_PTR(ip,w)            \
+       ((w) == XFS_DATA_FORK ? \
+               &(ip)->i_df : \
+               (ip)->i_afp)
+#define XFS_IFORK_DSIZE(ip) \
+       (XFS_IFORK_Q(ip) ? \
+               XFS_IFORK_BOFF(ip) : \
+               XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version))
+#define XFS_IFORK_ASIZE(ip) \
+       (XFS_IFORK_Q(ip) ? \
+               XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version) - \
+                       XFS_IFORK_BOFF(ip) : \
+               0)
+#define XFS_IFORK_SIZE(ip,w) \
+       ((w) == XFS_DATA_FORK ? \
+               XFS_IFORK_DSIZE(ip) : \
+               XFS_IFORK_ASIZE(ip))
+#define XFS_IFORK_FORMAT(ip,w) \
+       ((w) == XFS_DATA_FORK ? \
+               (ip)->i_d.di_format : \
+               (ip)->i_d.di_aformat)
+#define XFS_IFORK_FMT_SET(ip,w,n) \
+       ((w) == XFS_DATA_FORK ? \
+               ((ip)->i_d.di_format = (n)) : \
+               ((ip)->i_d.di_aformat = (n)))
+#define XFS_IFORK_NEXTENTS(ip,w) \
+       ((w) == XFS_DATA_FORK ? \
+               (ip)->i_d.di_nextents : \
+               (ip)->i_d.di_anextents)
+#define XFS_IFORK_NEXT_SET(ip,w,n) \
+       ((w) == XFS_DATA_FORK ? \
+               ((ip)->i_d.di_nextents = (n)) : \
+               ((ip)->i_d.di_anextents = (n)))
+#define XFS_IFORK_MAXEXT(ip, w) \
+       (XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
+
+int            xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
+void           xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
+                               struct xfs_inode_log_item *, int,
+                               struct xfs_buf *);
+void           xfs_idestroy_fork(struct xfs_inode *, int);
+void           xfs_idata_realloc(struct xfs_inode *, int, int);
+void           xfs_iroot_realloc(struct xfs_inode *, int, int);
+int            xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
+int            xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *,
+                                 int);
+
+struct xfs_bmbt_rec_host *
+               xfs_iext_get_ext(struct xfs_ifork *, xfs_extnum_t);
+void           xfs_iext_insert(struct xfs_inode *, xfs_extnum_t, xfs_extnum_t,
+                               struct xfs_bmbt_irec *, int);
+void           xfs_iext_add(struct xfs_ifork *, xfs_extnum_t, int);
+void           xfs_iext_add_indirect_multi(struct xfs_ifork *, int,
+                                           xfs_extnum_t, int);
+void           xfs_iext_remove(struct xfs_inode *, xfs_extnum_t, int, int);
+void           xfs_iext_remove_inline(struct xfs_ifork *, xfs_extnum_t, int);
+void           xfs_iext_remove_direct(struct xfs_ifork *, xfs_extnum_t, int);
+void           xfs_iext_remove_indirect(struct xfs_ifork *, xfs_extnum_t, int);
+void           xfs_iext_realloc_direct(struct xfs_ifork *, int);
+void           xfs_iext_direct_to_inline(struct xfs_ifork *, xfs_extnum_t);
+void           xfs_iext_inline_to_direct(struct xfs_ifork *, int);
+void           xfs_iext_destroy(struct xfs_ifork *);
+struct xfs_bmbt_rec_host *
+               xfs_iext_bno_to_ext(struct xfs_ifork *, xfs_fileoff_t, int *);
+struct xfs_ext_irec *
+               xfs_iext_bno_to_irec(struct xfs_ifork *, xfs_fileoff_t, int *);
+struct xfs_ext_irec *
+               xfs_iext_idx_to_irec(struct xfs_ifork *, xfs_extnum_t *, int *,
+                                    int);
+void           xfs_iext_irec_init(struct xfs_ifork *);
+struct xfs_ext_irec *
+               xfs_iext_irec_new(struct xfs_ifork *, int);
+void           xfs_iext_irec_remove(struct xfs_ifork *, int);
+void           xfs_iext_irec_compact(struct xfs_ifork *);
+void           xfs_iext_irec_compact_pages(struct xfs_ifork *);
+void           xfs_iext_irec_compact_full(struct xfs_ifork *);
+void           xfs_iext_irec_update_extoffs(struct xfs_ifork *, int, int);
+
+extern struct kmem_zone        *xfs_ifork_zone;
+
+#endif /* __XFS_INODE_FORK_H__ */
index f76ff52e43c0a4f5536163a61230f1ac89f77358..378081109844b09b2bbfd07dcb4214027fefe2c2 100644 (file)
@@ -47,32 +47,44 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
  * inode core, and possibly one for the inode data/extents/b-tree root
  * and one for the inode attribute data/extents/b-tree root.
  */
-STATIC uint
+STATIC void
 xfs_inode_item_size(
-       struct xfs_log_item     *lip)
+       struct xfs_log_item     *lip,
+       int                     *nvecs,
+       int                     *nbytes)
 {
        struct xfs_inode_log_item *iip = INODE_ITEM(lip);
        struct xfs_inode        *ip = iip->ili_inode;
-       uint                    nvecs = 2;
+
+       *nvecs += 2;
+       *nbytes += sizeof(struct xfs_inode_log_format) +
+                  xfs_icdinode_size(ip->i_d.di_version);
 
        switch (ip->i_d.di_format) {
        case XFS_DINODE_FMT_EXTENTS:
                if ((iip->ili_fields & XFS_ILOG_DEXT) &&
                    ip->i_d.di_nextents > 0 &&
-                   ip->i_df.if_bytes > 0)
-                       nvecs++;
+                   ip->i_df.if_bytes > 0) {
+                       /* worst case, doesn't subtract delalloc extents */
+                       *nbytes += XFS_IFORK_DSIZE(ip);
+                       *nvecs += 1;
+               }
                break;
 
        case XFS_DINODE_FMT_BTREE:
                if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
-                   ip->i_df.if_broot_bytes > 0)
-                       nvecs++;
+                   ip->i_df.if_broot_bytes > 0) {
+                       *nbytes += ip->i_df.if_broot_bytes;
+                       *nvecs += 1;
+               }
                break;
 
        case XFS_DINODE_FMT_LOCAL:
                if ((iip->ili_fields & XFS_ILOG_DDATA) &&
-                   ip->i_df.if_bytes > 0)
-                       nvecs++;
+                   ip->i_df.if_bytes > 0) {
+                       *nbytes += roundup(ip->i_df.if_bytes, 4);
+                       *nvecs += 1;
+               }
                break;
 
        case XFS_DINODE_FMT_DEV:
@@ -85,7 +97,7 @@ xfs_inode_item_size(
        }
 
        if (!XFS_IFORK_Q(ip))
-               return nvecs;
+               return;
 
 
        /*
@@ -95,28 +107,33 @@ xfs_inode_item_size(
        case XFS_DINODE_FMT_EXTENTS:
                if ((iip->ili_fields & XFS_ILOG_AEXT) &&
                    ip->i_d.di_anextents > 0 &&
-                   ip->i_afp->if_bytes > 0)
-                       nvecs++;
+                   ip->i_afp->if_bytes > 0) {
+                       /* worst case, doesn't subtract unused space */
+                       *nbytes += XFS_IFORK_ASIZE(ip);
+                       *nvecs += 1;
+               }
                break;
 
        case XFS_DINODE_FMT_BTREE:
                if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
-                   ip->i_afp->if_broot_bytes > 0)
-                       nvecs++;
+                   ip->i_afp->if_broot_bytes > 0) {
+                       *nbytes += ip->i_afp->if_broot_bytes;
+                       *nvecs += 1;
+               }
                break;
 
        case XFS_DINODE_FMT_LOCAL:
                if ((iip->ili_fields & XFS_ILOG_ADATA) &&
-                   ip->i_afp->if_bytes > 0)
-                       nvecs++;
+                   ip->i_afp->if_bytes > 0) {
+                       *nbytes += roundup(ip->i_afp->if_bytes, 4);
+                       *nvecs += 1;
+               }
                break;
 
        default:
                ASSERT(0);
                break;
        }
-
-       return nvecs;
 }
 
 /*
index 779812fb3d80b27c94d97f288fea9195db8be4fb..dce4d656768c32888521dc0c9007c6c2961b7c4e 100644 (file)
 #ifndef        __XFS_INODE_ITEM_H__
 #define        __XFS_INODE_ITEM_H__
 
-/*
- * This is the structure used to lay out an inode log item in the
- * log.  The size of the inline data/extents/b-tree root to be logged
- * (if any) is indicated in the ilf_dsize field.  Changes to this structure
- * must be added on to the end.
- */
-typedef struct xfs_inode_log_format {
-       __uint16_t              ilf_type;       /* inode log item type */
-       __uint16_t              ilf_size;       /* size of this item */
-       __uint32_t              ilf_fields;     /* flags for fields logged */
-       __uint16_t              ilf_asize;      /* size of attr d/ext/root */
-       __uint16_t              ilf_dsize;      /* size of data/ext/root */
-       __uint64_t              ilf_ino;        /* inode number */
-       union {
-               __uint32_t      ilfu_rdev;      /* rdev value for dev inode*/
-               uuid_t          ilfu_uuid;      /* mount point value */
-       } ilf_u;
-       __int64_t               ilf_blkno;      /* blkno of inode buffer */
-       __int32_t               ilf_len;        /* len of inode buffer */
-       __int32_t               ilf_boffset;    /* off of inode in buffer */
-} xfs_inode_log_format_t;
-
-typedef struct xfs_inode_log_format_32 {
-       __uint16_t              ilf_type;       /* inode log item type */
-       __uint16_t              ilf_size;       /* size of this item */
-       __uint32_t              ilf_fields;     /* flags for fields logged */
-       __uint16_t              ilf_asize;      /* size of attr d/ext/root */
-       __uint16_t              ilf_dsize;      /* size of data/ext/root */
-       __uint64_t              ilf_ino;        /* inode number */
-       union {
-               __uint32_t      ilfu_rdev;      /* rdev value for dev inode*/
-               uuid_t          ilfu_uuid;      /* mount point value */
-       } ilf_u;
-       __int64_t               ilf_blkno;      /* blkno of inode buffer */
-       __int32_t               ilf_len;        /* len of inode buffer */
-       __int32_t               ilf_boffset;    /* off of inode in buffer */
-} __attribute__((packed)) xfs_inode_log_format_32_t;
-
-typedef struct xfs_inode_log_format_64 {
-       __uint16_t              ilf_type;       /* inode log item type */
-       __uint16_t              ilf_size;       /* size of this item */
-       __uint32_t              ilf_fields;     /* flags for fields logged */
-       __uint16_t              ilf_asize;      /* size of attr d/ext/root */
-       __uint16_t              ilf_dsize;      /* size of data/ext/root */
-       __uint32_t              ilf_pad;        /* pad for 64 bit boundary */
-       __uint64_t              ilf_ino;        /* inode number */
-       union {
-               __uint32_t      ilfu_rdev;      /* rdev value for dev inode*/
-               uuid_t          ilfu_uuid;      /* mount point value */
-       } ilf_u;
-       __int64_t               ilf_blkno;      /* blkno of inode buffer */
-       __int32_t               ilf_len;        /* len of inode buffer */
-       __int32_t               ilf_boffset;    /* off of inode in buffer */
-} xfs_inode_log_format_64_t;
-
-/*
- * Flags for xfs_trans_log_inode flags field.
- */
-#define        XFS_ILOG_CORE   0x001   /* log standard inode fields */
-#define        XFS_ILOG_DDATA  0x002   /* log i_df.if_data */
-#define        XFS_ILOG_DEXT   0x004   /* log i_df.if_extents */
-#define        XFS_ILOG_DBROOT 0x008   /* log i_df.i_broot */
-#define        XFS_ILOG_DEV    0x010   /* log the dev field */
-#define        XFS_ILOG_UUID   0x020   /* log the uuid field */
-#define        XFS_ILOG_ADATA  0x040   /* log i_af.if_data */
-#define        XFS_ILOG_AEXT   0x080   /* log i_af.if_extents */
-#define        XFS_ILOG_ABROOT 0x100   /* log i_af.i_broot */
-
-
-/*
- * The timestamps are dirty, but not necessarily anything else in the inode
- * core.  Unlike the other fields above this one must never make it to disk
- * in the ilf_fields of the inode_log_format, but is purely store in-memory in
- * ili_fields in the inode_log_item.
- */
-#define XFS_ILOG_TIMESTAMP     0x4000
-
-#define        XFS_ILOG_NONCORE        (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
-                                XFS_ILOG_DBROOT | XFS_ILOG_DEV | \
-                                XFS_ILOG_UUID | XFS_ILOG_ADATA | \
-                                XFS_ILOG_AEXT | XFS_ILOG_ABROOT)
-
-#define        XFS_ILOG_DFORK          (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
-                                XFS_ILOG_DBROOT)
-
-#define        XFS_ILOG_AFORK          (XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
-                                XFS_ILOG_ABROOT)
-
-#define        XFS_ILOG_ALL            (XFS_ILOG_CORE | XFS_ILOG_DDATA | \
-                                XFS_ILOG_DEXT | XFS_ILOG_DBROOT | \
-                                XFS_ILOG_DEV | XFS_ILOG_UUID | \
-                                XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
-                                XFS_ILOG_ABROOT | XFS_ILOG_TIMESTAMP)
-
-static inline int xfs_ilog_fbroot(int w)
-{
-       return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT);
-}
-
-static inline int xfs_ilog_fext(int w)
-{
-       return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT);
-}
-
-static inline int xfs_ilog_fdata(int w)
-{
-       return (w == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA);
-}
-
-#ifdef __KERNEL__
+/* kernel only definitions */
 
 struct xfs_buf;
 struct xfs_bmbt_rec;
 struct xfs_inode;
 struct xfs_mount;
 
-
 typedef struct xfs_inode_log_item {
        xfs_log_item_t          ili_item;          /* common portion */
        struct xfs_inode        *ili_inode;        /* inode ptr */
@@ -151,7 +41,6 @@ typedef struct xfs_inode_log_item {
        xfs_inode_log_format_t  ili_format;        /* logged structure */
 } xfs_inode_log_item_t;
 
-
 static inline int xfs_inode_clean(xfs_inode_t *ip)
 {
        return !ip->i_itemp || !(ip->i_itemp->ili_fields & XFS_ILOG_ALL);
@@ -165,6 +54,6 @@ extern void xfs_iflush_abort(struct xfs_inode *, bool);
 extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
                                         xfs_inode_log_format_t *);
 
-#endif /* __KERNEL__ */
+extern struct kmem_zone        *xfs_ili_zone;
 
 #endif /* __XFS_INODE_ITEM_H__ */
index 6e2bca5d44d67acb52a58115a6b9482fc04a5bc1..bdebc21078d7e83bac4347ad13a5f569ed4d6a0f 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_error.h"
 #include "xfs_attr.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_buf_item.h"
-#include "xfs_utils.h"
-#include "xfs_dfrag.h"
 #include "xfs_fsops.h"
-#include "xfs_vnodeops.h"
 #include "xfs_discard.h"
 #include "xfs_quota.h"
 #include "xfs_inode_item.h"
 #include "xfs_export.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_symlink.h"
 
 #include <linux/capability.h>
 #include <linux/dcache.h>
@@ -350,6 +350,40 @@ xfs_readlink_by_handle(
        return error;
 }
 
+int
+xfs_set_dmattrs(
+       xfs_inode_t     *ip,
+       u_int           evmask,
+       u_int16_t       state)
+{
+       xfs_mount_t     *mp = ip->i_mount;
+       xfs_trans_t     *tp;
+       int             error;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return XFS_ERROR(EPERM);
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return XFS_ERROR(EIO);
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+       ip->i_d.di_dmevmask = evmask;
+       ip->i_d.di_dmstate  = state;
+
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       error = xfs_trans_commit(tp, 0);
+
+       return error;
+}
+
 STATIC int
 xfs_fssetdm_by_handle(
        struct file             *parfilp,
@@ -967,7 +1001,7 @@ xfs_ioctl_setattr(
         * first do an error checking pass.
         */
        tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
-       code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
+       code = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
        if (code)
                goto error_return;
 
@@ -981,15 +1015,22 @@ xfs_ioctl_setattr(
         * to the file owner ID, except in cases where the
         * CAP_FSETID capability is applicable.
         */
-       if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) {
+       if (!inode_owner_or_capable(VFS_I(ip))) {
                code = XFS_ERROR(EPERM);
                goto error_return;
        }
 
        /*
         * Do a quota reservation only if projid is actually going to change.
+        * Only allow changing of projid from init_user_ns since it is a
+        * non user namespace aware identifier.
         */
        if (mask & FSX_PROJID) {
+               if (current_user_ns() != &init_user_ns) {
+                       code = XFS_ERROR(EINVAL);
+                       goto error_return;
+               }
+
                if (XFS_IS_QUOTA_RUNNING(mp) &&
                    XFS_IS_PQUOTA_ON(mp) &&
                    xfs_get_projid(ip) != fa->fsx_projid) {
@@ -1103,7 +1144,7 @@ xfs_ioctl_setattr(
                 * cleared upon successful return from chown()
                 */
                if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
-                   !capable(CAP_FSETID))
+                   !inode_capable(VFS_I(ip), CAP_FSETID))
                        ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
 
                /*
@@ -1328,6 +1369,75 @@ xfs_ioc_getbmapx(
        return 0;
 }
 
+int
+xfs_ioc_swapext(
+       xfs_swapext_t   *sxp)
+{
+       xfs_inode_t     *ip, *tip;
+       struct fd       f, tmp;
+       int             error = 0;
+
+       /* Pull information for the target fd */
+       f = fdget((int)sxp->sx_fdtarget);
+       if (!f.file) {
+               error = XFS_ERROR(EINVAL);
+               goto out;
+       }
+
+       if (!(f.file->f_mode & FMODE_WRITE) ||
+           !(f.file->f_mode & FMODE_READ) ||
+           (f.file->f_flags & O_APPEND)) {
+               error = XFS_ERROR(EBADF);
+               goto out_put_file;
+       }
+
+       tmp = fdget((int)sxp->sx_fdtmp);
+       if (!tmp.file) {
+               error = XFS_ERROR(EINVAL);
+               goto out_put_file;
+       }
+
+       if (!(tmp.file->f_mode & FMODE_WRITE) ||
+           !(tmp.file->f_mode & FMODE_READ) ||
+           (tmp.file->f_flags & O_APPEND)) {
+               error = XFS_ERROR(EBADF);
+               goto out_put_tmp_file;
+       }
+
+       if (IS_SWAPFILE(file_inode(f.file)) ||
+           IS_SWAPFILE(file_inode(tmp.file))) {
+               error = XFS_ERROR(EINVAL);
+               goto out_put_tmp_file;
+       }
+
+       ip = XFS_I(file_inode(f.file));
+       tip = XFS_I(file_inode(tmp.file));
+
+       if (ip->i_mount != tip->i_mount) {
+               error = XFS_ERROR(EINVAL);
+               goto out_put_tmp_file;
+       }
+
+       if (ip->i_ino == tip->i_ino) {
+               error = XFS_ERROR(EINVAL);
+               goto out_put_tmp_file;
+       }
+
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+               error = XFS_ERROR(EIO);
+               goto out_put_tmp_file;
+       }
+
+       error = xfs_swap_extents(ip, tip, sxp);
+
+ out_put_tmp_file:
+       fdput(tmp);
+ out_put_file:
+       fdput(f);
+ out:
+       return error;
+}
+
 /*
  * Note: some of the ioctl's return positive numbers as a
  * byte count indicating success, such as readlink_by_handle.
@@ -1472,7 +1582,7 @@ xfs_file_ioctl(
                error = mnt_want_write_file(filp);
                if (error)
                        return error;
-               error = xfs_swapext(&sxp);
+               error = xfs_ioc_swapext(&sxp);
                mnt_drop_write_file(filp);
                return -error;
        }
@@ -1610,23 +1720,23 @@ xfs_file_ioctl(
                return -error;
 
        case XFS_IOC_FREE_EOFBLOCKS: {
-               struct xfs_eofblocks eofb;
+               struct xfs_fs_eofblocks eofb;
+               struct xfs_eofblocks keofb;
 
-               if (copy_from_user(&eofb, arg, sizeof(eofb)))
-                       return -XFS_ERROR(EFAULT);
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
 
-               if (eofb.eof_version != XFS_EOFBLOCKS_VERSION)
-                       return -XFS_ERROR(EINVAL);
+               if (mp->m_flags & XFS_MOUNT_RDONLY)
+                       return -XFS_ERROR(EROFS);
 
-               if (eofb.eof_flags & ~XFS_EOF_FLAGS_VALID)
-                       return -XFS_ERROR(EINVAL);
+               if (copy_from_user(&eofb, arg, sizeof(eofb)))
+                       return -XFS_ERROR(EFAULT);
 
-               if (memchr_inv(&eofb.pad32, 0, sizeof(eofb.pad32)) ||
-                   memchr_inv(eofb.pad64, 0, sizeof(eofb.pad64)))
-                       return -XFS_ERROR(EINVAL);
+               error = xfs_fs_eofblocks_from_user(&eofb, &keofb);
+               if (error)
+                       return -error;
 
-               error = xfs_icache_free_eofblocks(mp, &eofb);
-               return -error;
+               return -xfs_icache_free_eofblocks(mp, &keofb);
        }
 
        default:
index d56173b34a2a55662575f79a4ba5426662b647c9..77c02c7900b6eb8d78276abdd7d0b6ec03c12e29 100644 (file)
@@ -27,6 +27,10 @@ xfs_ioc_space(
        unsigned int            cmd,
        xfs_flock64_t           *bf);
 
+int
+xfs_ioc_swapext(
+       xfs_swapext_t   *sxp);
+
 extern int
 xfs_find_handle(
        unsigned int            cmd,
@@ -82,4 +86,10 @@ xfs_file_compat_ioctl(
        unsigned int            cmd,
        unsigned long           arg);
 
+extern int
+xfs_set_dmattrs(
+       struct xfs_inode        *ip,
+       u_int                   evmask,
+       u_int16_t               state);
+
 #endif
index c0c66259cc913d3a19df61efeab3bb86cb028aaf..d3ab9534307fcaa7e863be8965f80311090ce499 100644 (file)
@@ -33,8 +33,6 @@
 #include "xfs_inode.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
-#include "xfs_dfrag.h"
-#include "xfs_vnodeops.h"
 #include "xfs_fsops.h"
 #include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
@@ -644,7 +642,7 @@ xfs_file_compat_ioctl(
                error = mnt_want_write_file(filp);
                if (error)
                        return error;
-               error = xfs_swapext(&sxp);
+               error = xfs_ioc_swapext(&sxp);
                mnt_drop_write_file(filp);
                return -error;
        }
index 6a7096422295d1d821f1e0bab397041c42f53de1..8d4d49b6fbf347b3add01ed4675b489a653a6dfb 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_inode_item.h"
 #include "xfs_btree.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_itable.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
-#include "xfs_utils.h"
 #include "xfs_iomap.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
@@ -187,10 +188,8 @@ xfs_iomap_write_direct(
         * Allocate and setup the transaction
         */
        tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-       error = xfs_trans_reserve(tp, resblks,
-                       XFS_WRITE_LOG_RES(mp), resrtextents,
-                       XFS_TRANS_PERM_LOG_RES,
-                       XFS_WRITE_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
+                                 resblks, resrtextents);
        /*
         * Check for running out of space, note: need lock to return
         */
@@ -698,10 +697,8 @@ xfs_iomap_write_allocate(
                        tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
                        tp->t_flags |= XFS_TRANS_RESERVE;
                        nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
-                       error = xfs_trans_reserve(tp, nres,
-                                       XFS_WRITE_LOG_RES(mp),
-                                       0, XFS_TRANS_PERM_LOG_RES,
-                                       XFS_WRITE_LOG_COUNT);
+                       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
+                                                 nres, 0);
                        if (error) {
                                xfs_trans_cancel(tp, 0);
                                return XFS_ERROR(error);
@@ -864,10 +861,8 @@ xfs_iomap_write_unwritten(
                sb_start_intwrite(mp->m_super);
                tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
                tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
-               error = xfs_trans_reserve(tp, resblks,
-                               XFS_WRITE_LOG_RES(mp), 0,
-                               XFS_TRANS_PERM_LOG_RES,
-                               XFS_WRITE_LOG_COUNT);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
+                                         resblks, 0);
                if (error) {
                        xfs_trans_cancel(tp, 0);
                        return XFS_ERROR(error);
index 96dda62d497b7e04a68a1a6ddfc579aececf8374..6d7e9e2d76518092506f2f1faae0f0aa0ad6e1f3 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_acl.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_itable.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
-#include "xfs_utils.h"
-#include "xfs_vnodeops.h"
 #include "xfs_inode_item.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_symlink.h"
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
@@ -420,8 +421,8 @@ xfs_vn_getattr(
        stat->dev = inode->i_sb->s_dev;
        stat->mode = ip->i_d.di_mode;
        stat->nlink = ip->i_d.di_nlink;
-       stat->uid = ip->i_d.di_uid;
-       stat->gid = ip->i_d.di_gid;
+       stat->uid = inode->i_uid;
+       stat->gid = inode->i_gid;
        stat->ino = ip->i_ino;
        stat->atime = inode->i_atime;
        stat->mtime = inode->i_mtime;
@@ -485,8 +486,8 @@ xfs_setattr_nonsize(
        int                     mask = iattr->ia_valid;
        xfs_trans_t             *tp;
        int                     error;
-       uid_t                   uid = 0, iuid = 0;
-       gid_t                   gid = 0, igid = 0;
+       kuid_t                  uid = GLOBAL_ROOT_UID, iuid = GLOBAL_ROOT_UID;
+       kgid_t                  gid = GLOBAL_ROOT_GID, igid = GLOBAL_ROOT_GID;
        struct xfs_dquot        *udqp = NULL, *gdqp = NULL;
        struct xfs_dquot        *olddquot1 = NULL, *olddquot2 = NULL;
 
@@ -522,13 +523,13 @@ xfs_setattr_nonsize(
                        uid = iattr->ia_uid;
                        qflags |= XFS_QMOPT_UQUOTA;
                } else {
-                       uid = ip->i_d.di_uid;
+                       uid = inode->i_uid;
                }
                if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
                        gid = iattr->ia_gid;
                        qflags |= XFS_QMOPT_GQUOTA;
                }  else {
-                       gid = ip->i_d.di_gid;
+                       gid = inode->i_gid;
                }
 
                /*
@@ -538,14 +539,16 @@ xfs_setattr_nonsize(
                 */
                ASSERT(udqp == NULL);
                ASSERT(gdqp == NULL);
-               error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
-                                        qflags, &udqp, &gdqp, NULL);
+               error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),
+                                          xfs_kgid_to_gid(gid),
+                                          xfs_get_projid(ip),
+                                          qflags, &udqp, &gdqp, NULL);
                if (error)
                        return error;
        }
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
-       error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
        if (error)
                goto out_dqrele;
 
@@ -561,8 +564,8 @@ xfs_setattr_nonsize(
                 * while we didn't have the inode locked, inode's dquot(s)
                 * would have changed also.
                 */
-               iuid = ip->i_d.di_uid;
-               igid = ip->i_d.di_gid;
+               iuid = inode->i_uid;
+               igid = inode->i_gid;
                gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
                uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
 
@@ -571,8 +574,8 @@ xfs_setattr_nonsize(
                 * going to change.
                 */
                if (XFS_IS_QUOTA_RUNNING(mp) &&
-                   ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
-                    (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
+                   ((XFS_IS_UQUOTA_ON(mp) && !uid_eq(iuid, uid)) ||
+                    (XFS_IS_GQUOTA_ON(mp) && !gid_eq(igid, gid)))) {
                        ASSERT(tp);
                        error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
                                                NULL, capable(CAP_FOWNER) ?
@@ -602,17 +605,17 @@ xfs_setattr_nonsize(
                 * Change the ownerships and register quota modifications
                 * in the transaction.
                 */
-               if (iuid != uid) {
+               if (!uid_eq(iuid, uid)) {
                        if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
                                ASSERT(mask & ATTR_UID);
                                ASSERT(udqp);
                                olddquot1 = xfs_qm_vop_chown(tp, ip,
                                                        &ip->i_udquot, udqp);
                        }
-                       ip->i_d.di_uid = uid;
+                       ip->i_d.di_uid = xfs_kuid_to_uid(uid);
                        inode->i_uid = uid;
                }
-               if (igid != gid) {
+               if (!gid_eq(igid, gid)) {
                        if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
                                ASSERT(!XFS_IS_PQUOTA_ON(mp));
                                ASSERT(mask & ATTR_GID);
@@ -620,7 +623,7 @@ xfs_setattr_nonsize(
                                olddquot2 = xfs_qm_vop_chown(tp, ip,
                                                        &ip->i_gdquot, gdqp);
                        }
-                       ip->i_d.di_gid = gid;
+                       ip->i_d.di_gid = xfs_kgid_to_gid(gid);
                        inode->i_gid = gid;
                }
        }
@@ -807,9 +810,7 @@ xfs_setattr_size(
                goto out_unlock;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
-       error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
-                                XFS_TRANS_PERM_LOG_RES,
-                                XFS_ITRUNCATE_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
        if (error)
                goto out_trans_cancel;
 
@@ -932,7 +933,7 @@ xfs_vn_update_time(
        trace_xfs_update_time(ip);
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return -error;
@@ -1173,8 +1174,8 @@ xfs_setup_inode(
 
        inode->i_mode   = ip->i_d.di_mode;
        set_nlink(inode, ip->i_d.di_nlink);
-       inode->i_uid    = ip->i_d.di_uid;
-       inode->i_gid    = ip->i_d.di_gid;
+       inode->i_uid    = xfs_uid_to_kuid(ip->i_d.di_uid);
+       inode->i_gid    = xfs_gid_to_kgid(ip->i_d.di_gid);
 
        switch (inode->i_mode & S_IFMT) {
        case S_IFBLK:
index ef41c92ce66e9e8159a2c3b1319774e589877ce6..d81fb41205ec97b9a00ecc0789e99763adc5af71 100644 (file)
@@ -27,4 +27,17 @@ extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
 
 extern void xfs_setup_inode(struct xfs_inode *);
 
+/*
+ * Internal setattr interfaces.
+ */
+#define        XFS_ATTR_DMI            0x01    /* invocation from a DMI function */
+#define        XFS_ATTR_NONBLOCK       0x02    /* return EAGAIN if op would block */
+#define XFS_ATTR_NOLOCK                0x04    /* Don't grab any conflicting locks */
+#define XFS_ATTR_NOACL         0x08    /* Don't call xfs_acl_chmod */
+#define XFS_ATTR_SYNC          0x10    /* synchronous operation required */
+
+extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
+                              int flags);
+extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags);
+
 #endif /* __XFS_IOPS_H__ */
index 800f896a6cc48cdffc19f85d6a9dd4c30ef897d5..f9bb590acc0ebfd38a4aaaee3baaa5b0bf6ec505 100644 (file)
 # define XFS_BIG_INUMS 0
 #endif
 
+/*
+ * Kernel specific type declarations for XFS
+ */
+typedef signed char            __int8_t;
+typedef unsigned char          __uint8_t;
+typedef signed short int       __int16_t;
+typedef unsigned short int     __uint16_t;
+typedef signed int             __int32_t;
+typedef unsigned int           __uint32_t;
+typedef signed long long int   __int64_t;
+typedef unsigned long long int __uint64_t;
+
+typedef __uint32_t             inst_t;         /* an instruction */
+
+typedef __s64                  xfs_off_t;      /* <file offset> type */
+typedef unsigned long long     xfs_ino_t;      /* <inode> type */
+typedef __s64                  xfs_daddr_t;    /* <disk address> type */
+typedef char *                 xfs_caddr_t;    /* <core address> type */
+typedef __u32                  xfs_dev_t;
+typedef __u32                  xfs_nlink_t;
+
+/* __psint_t is the same size as a pointer */
+#if (BITS_PER_LONG == 32)
+typedef __int32_t __psint_t;
+typedef __uint32_t __psunsigned_t;
+#elif (BITS_PER_LONG == 64)
+typedef __int64_t __psint_t;
+typedef __uint64_t __psunsigned_t;
+#else
+#error BITS_PER_LONG must be 32 or 64
+#endif
+
 #include "xfs_types.h"
 
 #include "kmem.h"
 #define xfs_inherit_sync       xfs_params.inherit_sync.val
 #define xfs_inherit_nodump     xfs_params.inherit_nodump.val
 #define xfs_inherit_noatime    xfs_params.inherit_noatim.val
-#define xfs_buf_timer_centisecs        xfs_params.xfs_buf_timer.val
-#define xfs_buf_age_centisecs  xfs_params.xfs_buf_age.val
 #define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
 #define xfs_rotorstep          xfs_params.rotorstep.val
 #define xfs_inherit_nodefrag   xfs_params.inherit_nodfrg.val
 #define MAX(a,b)       (max(a,b))
 #define howmany(x, y)  (((x)+((y)-1))/(y))
 
+/* Kernel uid/gid conversion. These are used to convert to/from the on disk
+ * uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally.
+ * The conversion here is type only, the value will remain the same since we
+ * are converting to the init_user_ns. The uid is later mapped to a particular
+ * user namespace value when crossing the kernel/user boundary.
+ */
+static inline __uint32_t xfs_kuid_to_uid(kuid_t uid)
+{
+       return from_kuid(&init_user_ns, uid);
+}
+
+static inline kuid_t xfs_uid_to_kuid(__uint32_t uid)
+{
+       return make_kuid(&init_user_ns, uid);
+}
+
+static inline __uint32_t xfs_kgid_to_gid(kgid_t gid)
+{
+       return from_kgid(&init_user_ns, gid);
+}
+
+static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
+{
+       return make_kgid(&init_user_ns, gid);
+}
+
 /*
  * Various platform dependent calls that don't fit anywhere else
  */
index d852a2b3e1fdfae0c4fb5bf18452ec79fd03ab01..5372d58ef93a26220f0d916763e7f37de63d0050 100644 (file)
@@ -614,7 +614,8 @@ xfs_log_mount(
        xfs_daddr_t     blk_offset,
        int             num_bblks)
 {
-       int             error;
+       int             error = 0;
+       int             min_logfsbs;
 
        if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
                xfs_notice(mp, "Mounting Filesystem");
@@ -630,6 +631,50 @@ xfs_log_mount(
                goto out;
        }
 
+       /*
+        * Validate the given log space and drop a critical message via syslog
+        * if the log size is too small that would lead to some unexpected
+        * situations in transaction log space reservation stage.
+        *
+        * Note: we can't just reject the mount if the validation fails.  This
+        * would mean that people would have to downgrade their kernel just to
+        * remedy the situation as there is no way to grow the log (short of
+        * black magic surgery with xfs_db).
+        *
+        * We can, however, reject mounts for CRC format filesystems, as the
+        * mkfs binary being used to make the filesystem should never create a
+        * filesystem with a log that is too small.
+        */
+       min_logfsbs = xfs_log_calc_minimum_size(mp);
+
+       if (mp->m_sb.sb_logblocks < min_logfsbs) {
+               xfs_warn(mp,
+               "Log size %d blocks too small, minimum size is %d blocks",
+                        mp->m_sb.sb_logblocks, min_logfsbs);
+               error = EINVAL;
+       } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) {
+               xfs_warn(mp,
+               "Log size %d blocks too large, maximum size is %lld blocks",
+                        mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS);
+               error = EINVAL;
+       } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) {
+               xfs_warn(mp,
+               "log size %lld bytes too large, maximum size is %lld bytes",
+                        XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
+                        XFS_MAX_LOG_BYTES);
+               error = EINVAL;
+       }
+       if (error) {
+               if (xfs_sb_version_hascrc(&mp->m_sb)) {
+                       xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
+                       ASSERT(0);
+                       goto out_free_log;
+               }
+               xfs_crit(mp,
+"Log size out of supported range. Continuing onwards, but if log hangs are\n"
+"experienced then please report this message in the bug report.");
+       }
+
        /*
         * Initialize the AIL now we have a log.
         */
@@ -720,7 +765,7 @@ xfs_log_mount_finish(xfs_mount_t *mp)
  * Unmount record used to have a string "Unmount filesystem--" in the
  * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
  * We just write the magic number now since that particular field isn't
- * currently architecture converted and "nUmount" is a bit foo.
+ * currently architecture converted and "Unmount" is a bit foo.
  * As far as I know, there weren't any dependencies on the old behaviour.
  */
 
@@ -1941,7 +1986,7 @@ xlog_print_tic_res(
 
        xfs_alert_tag(mp, XFS_PTAG_LOGRES,
                "xlog_write: reservation ran out. Need to up reservation");
-       xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+       xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
 }
 
 /*
@@ -2044,7 +2089,7 @@ xlog_write_setup_ophdr(
  * Set up the parameters of the region copy into the log. This has
  * to handle region write split across multiple log buffers - this
  * state is kept external to this function so that this code can
- * can be written in an obvious, self documenting manner.
+ * be written in an obvious, self documenting manner.
  */
 static int
 xlog_write_setup_copy(
@@ -3391,24 +3436,17 @@ xfs_log_ticket_get(
 }
 
 /*
- * Allocate and initialise a new log ticket.
+ * Figure out the total log space unit (in bytes) that would be
+ * required for a log ticket.
  */
-struct xlog_ticket *
-xlog_ticket_alloc(
-       struct xlog     *log,
-       int             unit_bytes,
-       int             cnt,
-       char            client,
-       bool            permanent,
-       xfs_km_flags_t  alloc_flags)
+int
+xfs_log_calc_unit_res(
+       struct xfs_mount        *mp,
+       int                     unit_bytes)
 {
-       struct xlog_ticket *tic;
-       uint            num_headers;
-       int             iclog_space;
-
-       tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags);
-       if (!tic)
-               return NULL;
+       struct xlog             *log = mp->m_log;
+       int                     iclog_space;
+       uint                    num_headers;
 
        /*
         * Permanent reservations have up to 'cnt'-1 active log operations
@@ -3483,20 +3521,43 @@ xlog_ticket_alloc(
        unit_bytes += log->l_iclog_hsize;
 
        /* for roundoff padding for transaction data and one for commit record */
-       if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
-           log->l_mp->m_sb.sb_logsunit > 1) {
+       if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) {
                /* log su roundoff */
-               unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
+               unit_bytes += 2 * mp->m_sb.sb_logsunit;
        } else {
                /* BB roundoff */
-               unit_bytes += 2*BBSIZE;
+               unit_bytes += 2 * BBSIZE;
         }
 
+       return unit_bytes;
+}
+
+/*
+ * Allocate and initialise a new log ticket.
+ */
+struct xlog_ticket *
+xlog_ticket_alloc(
+       struct xlog             *log,
+       int                     unit_bytes,
+       int                     cnt,
+       char                    client,
+       bool                    permanent,
+       xfs_km_flags_t          alloc_flags)
+{
+       struct xlog_ticket      *tic;
+       int                     unit_res;
+
+       tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags);
+       if (!tic)
+               return NULL;
+
+       unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes);
+
        atomic_set(&tic->t_ref, 1);
        tic->t_task             = current;
        INIT_LIST_HEAD(&tic->t_queue);
-       tic->t_unit_res         = unit_bytes;
-       tic->t_curr_res         = unit_bytes;
+       tic->t_unit_res         = unit_res;
+       tic->t_curr_res         = unit_res;
        tic->t_cnt              = cnt;
        tic->t_ocnt             = cnt;
        tic->t_tid              = prandom_u32();
index fb630e496c12406c558b7cc53854bf0cd123ccaf..1c458487f000a42306cb44f14509d80fea0c2f02 100644 (file)
 #ifndef        __XFS_LOG_H__
 #define __XFS_LOG_H__
 
-/* get lsn fields */
-#define CYCLE_LSN(lsn) ((uint)((lsn)>>32))
-#define BLOCK_LSN(lsn) ((uint)(lsn))
+#include "xfs_log_format.h"
 
-/* this is used in a spot where we might otherwise double-endian-flip */
-#define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0])
+struct xfs_log_vec {
+       struct xfs_log_vec      *lv_next;       /* next lv in build list */
+       int                     lv_niovecs;     /* number of iovecs in lv */
+       struct xfs_log_iovec    *lv_iovecp;     /* iovec array */
+       struct xfs_log_item     *lv_item;       /* owner */
+       char                    *lv_buf;        /* formatted buffer */
+       int                     lv_buf_len;     /* size of formatted buffer */
+       int                     lv_size;        /* size of allocated lv */
+};
+
+#define XFS_LOG_VEC_ORDERED    (-1)
+
+/*
+ * Structure used to pass callback function and the function's argument
+ * to the log manager.
+ */
+typedef struct xfs_log_callback {
+       struct xfs_log_callback *cb_next;
+       void                    (*cb_func)(void *, int);
+       void                    *cb_arg;
+} xfs_log_callback_t;
 
-#ifdef __KERNEL__
 /*
  * By comparing each component, we don't have to worry about extra
  * endian issues in treating two 32 bit numbers as one 64 bit number
@@ -59,67 +75,6 @@ static inline xfs_lsn_t      _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
  */
 #define XFS_LOG_SYNC           0x1
 
-#endif /* __KERNEL__ */
-
-
-/* Log Clients */
-#define XFS_TRANSACTION                0x69
-#define XFS_VOLUME             0x2
-#define XFS_LOG                        0xaa
-
-
-/* Region types for iovec's i_type */
-#define XLOG_REG_TYPE_BFORMAT          1
-#define XLOG_REG_TYPE_BCHUNK           2
-#define XLOG_REG_TYPE_EFI_FORMAT       3
-#define XLOG_REG_TYPE_EFD_FORMAT       4
-#define XLOG_REG_TYPE_IFORMAT          5
-#define XLOG_REG_TYPE_ICORE            6
-#define XLOG_REG_TYPE_IEXT             7
-#define XLOG_REG_TYPE_IBROOT           8
-#define XLOG_REG_TYPE_ILOCAL           9
-#define XLOG_REG_TYPE_IATTR_EXT                10
-#define XLOG_REG_TYPE_IATTR_BROOT      11
-#define XLOG_REG_TYPE_IATTR_LOCAL      12
-#define XLOG_REG_TYPE_QFORMAT          13
-#define XLOG_REG_TYPE_DQUOT            14
-#define XLOG_REG_TYPE_QUOTAOFF         15
-#define XLOG_REG_TYPE_LRHEADER         16
-#define XLOG_REG_TYPE_UNMOUNT          17
-#define XLOG_REG_TYPE_COMMIT           18
-#define XLOG_REG_TYPE_TRANSHDR         19
-#define XLOG_REG_TYPE_ICREATE          20
-#define XLOG_REG_TYPE_MAX              20
-
-typedef struct xfs_log_iovec {
-       void            *i_addr;        /* beginning address of region */
-       int             i_len;          /* length in bytes of region */
-       uint            i_type;         /* type of region */
-} xfs_log_iovec_t;
-
-struct xfs_log_vec {
-       struct xfs_log_vec      *lv_next;       /* next lv in build list */
-       int                     lv_niovecs;     /* number of iovecs in lv */
-       struct xfs_log_iovec    *lv_iovecp;     /* iovec array */
-       struct xfs_log_item     *lv_item;       /* owner */
-       char                    *lv_buf;        /* formatted buffer */
-       int                     lv_buf_len;     /* size of formatted buffer */
-};
-
-#define XFS_LOG_VEC_ORDERED    (-1)
-
-/*
- * Structure used to pass callback function and the function's argument
- * to the log manager.
- */
-typedef struct xfs_log_callback {
-       struct xfs_log_callback *cb_next;
-       void                    (*cb_func)(void *, int);
-       void                    *cb_arg;
-} xfs_log_callback_t;
-
-
-#ifdef __KERNEL__
 /* Log manager interfaces */
 struct xfs_mount;
 struct xlog_in_core;
@@ -188,5 +143,4 @@ void        xfs_log_work_queue(struct xfs_mount *mp);
 void   xfs_log_worker(struct work_struct *work);
 void   xfs_log_quiesce(struct xfs_mount *mp);
 
-#endif
 #endif /* __XFS_LOG_H__ */
index 02b9cf3f8252baeade5d4e99b3e88853a7b50b98..cfe97973ba36d1d586c3704b536aebce2e391af1 100644 (file)
@@ -80,6 +80,83 @@ xlog_cil_init_post_recovery(
                                                                log->l_curr_block);
 }
 
+STATIC int
+xlog_cil_lv_item_format(
+       struct xfs_log_item     *lip,
+       struct xfs_log_vec      *lv)
+{
+       int     index;
+       char    *ptr;
+
+       /* format new vectors into array */
+       lip->li_ops->iop_format(lip, lv->lv_iovecp);
+
+       /* copy data into existing array */
+       ptr = lv->lv_buf;
+       for (index = 0; index < lv->lv_niovecs; index++) {
+               struct xfs_log_iovec *vec = &lv->lv_iovecp[index];
+
+               memcpy(ptr, vec->i_addr, vec->i_len);
+               vec->i_addr = ptr;
+               ptr += vec->i_len;
+       }
+
+       /*
+        * some size calculations for log vectors over-estimate, so the caller
+        * doesn't know the amount of space actually used by the item. Return
+        * the byte count to the caller so they can check and store it
+        * appropriately.
+        */
+       return ptr - lv->lv_buf;
+}
+
+/*
+ * Prepare the log item for insertion into the CIL. Calculate the difference in
+ * log space and vectors it will consume, and if it is a new item pin it as
+ * well.
+ */
+STATIC void
+xfs_cil_prepare_item(
+       struct xlog             *log,
+       struct xfs_log_vec      *lv,
+       struct xfs_log_vec      *old_lv,
+       int                     *diff_len,
+       int                     *diff_iovecs)
+{
+       /* Account for the new LV being passed in */
+       if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
+               *diff_len += lv->lv_buf_len;
+               *diff_iovecs += lv->lv_niovecs;
+       }
+
+       /*
+        * If there is no old LV, this is the first time we've seen the item in
+        * this CIL context and so we need to pin it. If we are replacing the
+        * old_lv, then remove the space it accounts for and free it.
+        */
+       if (!old_lv)
+               lv->lv_item->li_ops->iop_pin(lv->lv_item);
+       else if (old_lv != lv) {
+               ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
+
+               *diff_len -= old_lv->lv_buf_len;
+               *diff_iovecs -= old_lv->lv_niovecs;
+               kmem_free(old_lv);
+       }
+
+       /* attach new log vector to log item */
+       lv->lv_item->li_lv = lv;
+
+       /*
+        * If this is the first time the item is being committed to the
+        * CIL, store the sequence number on the log item so we can
+        * tell in future commits whether this is the first checkpoint
+        * the item is being committed into.
+        */
+       if (!lv->lv_item->li_seq)
+               lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
+}
+
 /*
  * Format log item into a flat buffers
  *
@@ -106,35 +183,39 @@ xlog_cil_init_post_recovery(
  * format the regions into the iclog as though they are being formatted
  * directly out of the objects themselves.
  */
-static struct xfs_log_vec *
-xlog_cil_prepare_log_vecs(
-       struct xfs_trans        *tp)
+static void
+xlog_cil_insert_format_items(
+       struct xlog             *log,
+       struct xfs_trans        *tp,
+       int                     *diff_len,
+       int                     *diff_iovecs)
 {
        struct xfs_log_item_desc *lidp;
-       struct xfs_log_vec      *lv = NULL;
-       struct xfs_log_vec      *ret_lv = NULL;
 
 
        /* Bail out if we didn't find a log item.  */
        if (list_empty(&tp->t_items)) {
                ASSERT(0);
-               return NULL;
+               return;
        }
 
        list_for_each_entry(lidp, &tp->t_items, lid_trans) {
-               struct xfs_log_vec *new_lv;
-               void    *ptr;
-               int     index;
-               int     len = 0;
-               uint    niovecs;
+               struct xfs_log_item *lip = lidp->lid_item;
+               struct xfs_log_vec *lv;
+               struct xfs_log_vec *old_lv;
+               int     niovecs = 0;
+               int     nbytes = 0;
+               int     buf_size;
                bool    ordered = false;
 
                /* Skip items which aren't dirty in this transaction. */
                if (!(lidp->lid_flags & XFS_LID_DIRTY))
                        continue;
 
+               /* get number of vecs and size of data to be stored */
+               lip->li_ops->iop_size(lip, &niovecs, &nbytes);
+
                /* Skip items that do not have any vectors for writing */
-               niovecs = IOP_SIZE(lidp->lid_item);
                if (!niovecs)
                        continue;
 
@@ -146,109 +227,63 @@ xlog_cil_prepare_log_vecs(
                if (niovecs == XFS_LOG_VEC_ORDERED) {
                        ordered = true;
                        niovecs = 0;
+                       nbytes = 0;
                }
 
-               new_lv = kmem_zalloc(sizeof(*new_lv) +
-                               niovecs * sizeof(struct xfs_log_iovec),
-                               KM_SLEEP|KM_NOFS);
-
-               new_lv->lv_item = lidp->lid_item;
-               new_lv->lv_niovecs = niovecs;
-               if (ordered) {
-                       /* track as an ordered logvec */
-                       new_lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
-                       goto next;
-               }
-
-               /* The allocated iovec region lies beyond the log vector. */
-               new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
+               /* grab the old item if it exists for reservation accounting */
+               old_lv = lip->li_lv;
 
-               /* build the vector array and calculate it's length */
-               IOP_FORMAT(new_lv->lv_item, new_lv->lv_iovecp);
-               for (index = 0; index < new_lv->lv_niovecs; index++)
-                       len += new_lv->lv_iovecp[index].i_len;
+               /* calc buffer size */
+               buf_size = sizeof(struct xfs_log_vec) + nbytes +
+                               niovecs * sizeof(struct xfs_log_iovec);
 
-               new_lv->lv_buf_len = len;
-               new_lv->lv_buf = kmem_alloc(new_lv->lv_buf_len,
-                               KM_SLEEP|KM_NOFS);
-               ptr = new_lv->lv_buf;
+               /* compare to existing item size */
+               if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
+                       /* same or smaller, optimise common overwrite case */
+                       lv = lip->li_lv;
+                       lv->lv_next = NULL;
 
-               for (index = 0; index < new_lv->lv_niovecs; index++) {
-                       struct xfs_log_iovec *vec = &new_lv->lv_iovecp[index];
+                       if (ordered)
+                               goto insert;
 
-                       memcpy(ptr, vec->i_addr, vec->i_len);
-                       vec->i_addr = ptr;
-                       ptr += vec->i_len;
-               }
-               ASSERT(ptr == new_lv->lv_buf + new_lv->lv_buf_len);
-
-next:
-               if (!ret_lv)
-                       ret_lv = new_lv;
-               else
-                       lv->lv_next = new_lv;
-               lv = new_lv;
-       }
-
-       return ret_lv;
-}
-
-/*
- * Prepare the log item for insertion into the CIL. Calculate the difference in
- * log space and vectors it will consume, and if it is a new item pin it as
- * well.
- */
-STATIC void
-xfs_cil_prepare_item(
-       struct xlog             *log,
-       struct xfs_log_vec      *lv,
-       int                     *len,
-       int                     *diff_iovecs)
-{
-       struct xfs_log_vec      *old = lv->lv_item->li_lv;
+                       /*
+                        * set the item up as though it is a new insertion so
+                        * that the space reservation accounting is correct.
+                        */
+                       *diff_iovecs -= lv->lv_niovecs;
+                       *diff_len -= lv->lv_buf_len;
 
-       if (old) {
-               /* existing lv on log item, space used is a delta */
-               ASSERT((old->lv_buf && old->lv_buf_len && old->lv_niovecs) ||
-                       old->lv_buf_len == XFS_LOG_VEC_ORDERED);
+                       /* Ensure the lv is set up according to ->iop_size */
+                       lv->lv_niovecs = niovecs;
+                       lv->lv_buf = (char *)lv + buf_size - nbytes;
 
-               /*
-                * If the new item is ordered, keep the old one that is already
-                * tracking dirty or ordered regions
-                */
-               if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) {
-                       ASSERT(!lv->lv_buf);
-                       kmem_free(lv);
-                       return;
+                       lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
+                       goto insert;
                }
 
-               *len += lv->lv_buf_len - old->lv_buf_len;
-               *diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
-               kmem_free(old->lv_buf);
-               kmem_free(old);
-       } else {
-               /* new lv, must pin the log item */
-               ASSERT(!lv->lv_item->li_lv);
-
-               if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
-                       *len += lv->lv_buf_len;
-                       *diff_iovecs += lv->lv_niovecs;
+               /* allocate new data chunk */
+               lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
+               lv->lv_item = lip;
+               lv->lv_size = buf_size;
+               lv->lv_niovecs = niovecs;
+               if (ordered) {
+                       /* track as an ordered logvec */
+                       ASSERT(lip->li_lv == NULL);
+                       lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
+                       goto insert;
                }
-               IOP_PIN(lv->lv_item);
 
-       }
+               /* The allocated iovec region lies beyond the log vector. */
+               lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
 
-       /* attach new log vector to log item */
-       lv->lv_item->li_lv = lv;
+               /* The allocated data region lies beyond the iovec region */
+               lv->lv_buf = (char *)lv + buf_size - nbytes;
 
-       /*
-        * If this is the first time the item is being committed to the
-        * CIL, store the sequence number on the log item so we can
-        * tell in future commits whether this is the first checkpoint
-        * the item is being committed into.
-        */
-       if (!lv->lv_item->li_seq)
-               lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
+               lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
+insert:
+               ASSERT(lv->lv_buf_len <= nbytes);
+               xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
+       }
 }
 
 /*
@@ -261,53 +296,47 @@ xfs_cil_prepare_item(
 static void
 xlog_cil_insert_items(
        struct xlog             *log,
-       struct xfs_log_vec      *log_vector,
-       struct xlog_ticket      *ticket)
+       struct xfs_trans        *tp)
 {
        struct xfs_cil          *cil = log->l_cilp;
        struct xfs_cil_ctx      *ctx = cil->xc_ctx;
-       struct xfs_log_vec      *lv;
+       struct xfs_log_item_desc *lidp;
        int                     len = 0;
        int                     diff_iovecs = 0;
        int                     iclog_space;
 
-       ASSERT(log_vector);
+       ASSERT(tp);
 
        /*
-        * Do all the accounting aggregation and switching of log vectors
-        * around in a separate loop to the insertion of items into the CIL.
-        * Then we can do a separate loop to update the CIL within a single
-        * lock/unlock pair. This reduces the number of round trips on the CIL
-        * lock from O(nr_logvectors) to O(1) and greatly reduces the overall
-        * hold time for the transaction commit.
-        *
-        * If this is the first time the item is being placed into the CIL in
-        * this context, pin it so it can't be written to disk until the CIL is
-        * flushed to the iclog and the iclog written to disk.
-        *
         * We can do this safely because the context can't checkpoint until we
         * are done so it doesn't matter exactly how we update the CIL.
         */
+       xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
+
+       /*
+        * Now (re-)position everything modified at the tail of the CIL.
+        * We do this here so we only need to take the CIL lock once during
+        * the transaction commit.
+        */
        spin_lock(&cil->xc_cil_lock);
-       for (lv = log_vector; lv; ) {
-               struct xfs_log_vec *next = lv->lv_next;
+       list_for_each_entry(lidp, &tp->t_items, lid_trans) {
+               struct xfs_log_item     *lip = lidp->lid_item;
 
-               ASSERT(lv->lv_item->li_lv || list_empty(&lv->lv_item->li_cil));
-               lv->lv_next = NULL;
+               /* Skip items which aren't dirty in this transaction. */
+               if (!(lidp->lid_flags & XFS_LID_DIRTY))
+                       continue;
 
-               /*
-                * xfs_cil_prepare_item() may free the lv, so move the item on
-                * the CIL first.
-                */
-               list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil);
-               xfs_cil_prepare_item(log, lv, &len, &diff_iovecs);
-               lv = next;
+               list_move_tail(&lip->li_cil, &cil->xc_cil);
        }
 
        /* account for space used by new iovec headers  */
        len += diff_iovecs * sizeof(xlog_op_header_t);
        ctx->nvecs += diff_iovecs;
 
+       /* attach the transaction to the CIL if it has any busy extents */
+       if (!list_empty(&tp->t_busy))
+               list_splice_init(&tp->t_busy, &ctx->busy_extents);
+
        /*
         * Now transfer enough transaction reservation to the context ticket
         * for the checkpoint. The context ticket is special - the unit
@@ -316,10 +345,8 @@ xlog_cil_insert_items(
         * during the transaction commit.
         */
        if (ctx->ticket->t_curr_res == 0) {
-               /* first commit in checkpoint, steal the header reservation */
-               ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
                ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
-               ticket->t_curr_res -= ctx->ticket->t_unit_res;
+               tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res;
        }
 
        /* do we need space for more log record headers? */
@@ -333,10 +360,10 @@ xlog_cil_insert_items(
                hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
                ctx->ticket->t_unit_res += hdrs;
                ctx->ticket->t_curr_res += hdrs;
-               ticket->t_curr_res -= hdrs;
-               ASSERT(ticket->t_curr_res >= len);
+               tp->t_ticket->t_curr_res -= hdrs;
+               ASSERT(tp->t_ticket->t_curr_res >= len);
        }
-       ticket->t_curr_res -= len;
+       tp->t_ticket->t_curr_res -= len;
        ctx->space_used += len;
 
        spin_unlock(&cil->xc_cil_lock);
@@ -350,7 +377,6 @@ xlog_cil_free_logvec(
 
        for (lv = log_vector; lv; ) {
                struct xfs_log_vec *next = lv->lv_next;
-               kmem_free(lv->lv_buf);
                kmem_free(lv);
                lv = next;
        }
@@ -376,9 +402,9 @@ xlog_cil_committed(
        xfs_extent_busy_clear(mp, &ctx->busy_extents,
                             (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
 
-       spin_lock(&ctx->cil->xc_cil_lock);
+       spin_lock(&ctx->cil->xc_push_lock);
        list_del(&ctx->committing);
-       spin_unlock(&ctx->cil->xc_cil_lock);
+       spin_unlock(&ctx->cil->xc_push_lock);
 
        xlog_cil_free_logvec(ctx->lv_chain);
 
@@ -433,7 +459,7 @@ xlog_cil_push(
        down_write(&cil->xc_ctx_lock);
        ctx = cil->xc_ctx;
 
-       spin_lock(&cil->xc_cil_lock);
+       spin_lock(&cil->xc_push_lock);
        push_seq = cil->xc_push_seq;
        ASSERT(push_seq <= ctx->sequence);
 
@@ -444,10 +470,10 @@ xlog_cil_push(
         */
        if (list_empty(&cil->xc_cil)) {
                cil->xc_push_seq = 0;
-               spin_unlock(&cil->xc_cil_lock);
+               spin_unlock(&cil->xc_push_lock);
                goto out_skip;
        }
-       spin_unlock(&cil->xc_cil_lock);
+       spin_unlock(&cil->xc_push_lock);
 
 
        /* check for a previously pushed seqeunce */
@@ -515,9 +541,9 @@ xlog_cil_push(
         * that higher sequences will wait for us to write out a commit record
         * before they do.
         */
-       spin_lock(&cil->xc_cil_lock);
+       spin_lock(&cil->xc_push_lock);
        list_add(&ctx->committing, &cil->xc_committing);
-       spin_unlock(&cil->xc_cil_lock);
+       spin_unlock(&cil->xc_push_lock);
        up_write(&cil->xc_ctx_lock);
 
        /*
@@ -552,7 +578,7 @@ xlog_cil_push(
         * order the commit records so replay will get them in the right order.
         */
 restart:
-       spin_lock(&cil->xc_cil_lock);
+       spin_lock(&cil->xc_push_lock);
        list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
                /*
                 * Higher sequences will wait for this one so skip them.
@@ -565,11 +591,11 @@ restart:
                         * It is still being pushed! Wait for the push to
                         * complete, then start again from the beginning.
                         */
-                       xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
+                       xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
                        goto restart;
                }
        }
-       spin_unlock(&cil->xc_cil_lock);
+       spin_unlock(&cil->xc_push_lock);
 
        /* xfs_log_done always frees the ticket on error. */
        commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
@@ -588,10 +614,10 @@ restart:
         * callbacks to the iclog we can assign the commit LSN to the context
         * and wake up anyone who is waiting for the commit to complete.
         */
-       spin_lock(&cil->xc_cil_lock);
+       spin_lock(&cil->xc_push_lock);
        ctx->commit_lsn = commit_lsn;
        wake_up_all(&cil->xc_commit_wait);
-       spin_unlock(&cil->xc_cil_lock);
+       spin_unlock(&cil->xc_push_lock);
 
        /* release the hounds! */
        return xfs_log_release_iclog(log->l_mp, commit_iclog);
@@ -644,12 +670,12 @@ xlog_cil_push_background(
        if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
                return;
 
-       spin_lock(&cil->xc_cil_lock);
+       spin_lock(&cil->xc_push_lock);
        if (cil->xc_push_seq < cil->xc_current_sequence) {
                cil->xc_push_seq = cil->xc_current_sequence;
                queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
        }
-       spin_unlock(&cil->xc_cil_lock);
+       spin_unlock(&cil->xc_push_lock);
 
 }
 
@@ -672,14 +698,14 @@ xlog_cil_push_foreground(
         * If the CIL is empty or we've already pushed the sequence then
         * there's no work we need to do.
         */
-       spin_lock(&cil->xc_cil_lock);
+       spin_lock(&cil->xc_push_lock);
        if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
-               spin_unlock(&cil->xc_cil_lock);
+               spin_unlock(&cil->xc_push_lock);
                return;
        }
 
        cil->xc_push_seq = push_seq;
-       spin_unlock(&cil->xc_cil_lock);
+       spin_unlock(&cil->xc_push_lock);
 
        /* do the push now */
        xlog_cil_push(log);
@@ -706,43 +732,25 @@ xfs_log_commit_cil(
        int                     flags)
 {
        struct xlog             *log = mp->m_log;
+       struct xfs_cil          *cil = log->l_cilp;
        int                     log_flags = 0;
-       struct xfs_log_vec      *log_vector;
 
        if (flags & XFS_TRANS_RELEASE_LOG_RES)
                log_flags = XFS_LOG_REL_PERM_RESERV;
 
-       /*
-        * Do all the hard work of formatting items (including memory
-        * allocation) outside the CIL context lock. This prevents stalling CIL
-        * pushes when we are low on memory and a transaction commit spends a
-        * lot of time in memory reclaim.
-        */
-       log_vector = xlog_cil_prepare_log_vecs(tp);
-       if (!log_vector)
-               return ENOMEM;
-
        /* lock out background commit */
-       down_read(&log->l_cilp->xc_ctx_lock);
-       if (commit_lsn)
-               *commit_lsn = log->l_cilp->xc_ctx->sequence;
+       down_read(&cil->xc_ctx_lock);
 
-       /* xlog_cil_insert_items() destroys log_vector list */
-       xlog_cil_insert_items(log, log_vector, tp->t_ticket);
+       xlog_cil_insert_items(log, tp);
 
        /* check we didn't blow the reservation */
        if (tp->t_ticket->t_curr_res < 0)
-               xlog_print_tic_res(log->l_mp, tp->t_ticket);
+               xlog_print_tic_res(mp, tp->t_ticket);
 
-       /* attach the transaction to the CIL if it has any busy extents */
-       if (!list_empty(&tp->t_busy)) {
-               spin_lock(&log->l_cilp->xc_cil_lock);
-               list_splice_init(&tp->t_busy,
-                                       &log->l_cilp->xc_ctx->busy_extents);
-               spin_unlock(&log->l_cilp->xc_cil_lock);
-       }
+       tp->t_commit_lsn = cil->xc_ctx->sequence;
+       if (commit_lsn)
+               *commit_lsn = tp->t_commit_lsn;
 
-       tp->t_commit_lsn = *commit_lsn;
        xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
        xfs_trans_unreserve_and_mod_sb(tp);
 
@@ -757,11 +765,11 @@ xfs_log_commit_cil(
         * the log items. This affects (at least) processing of stale buffers,
         * inodes and EFIs.
         */
-       xfs_trans_free_items(tp, *commit_lsn, 0);
+       xfs_trans_free_items(tp, tp->t_commit_lsn, 0);
 
        xlog_cil_push_background(log);
 
-       up_read(&log->l_cilp->xc_ctx_lock);
+       up_read(&cil->xc_ctx_lock);
        return 0;
 }
 
@@ -800,7 +808,7 @@ xlog_cil_force_lsn(
         * on commits for those as well.
         */
 restart:
-       spin_lock(&cil->xc_cil_lock);
+       spin_lock(&cil->xc_push_lock);
        list_for_each_entry(ctx, &cil->xc_committing, committing) {
                if (ctx->sequence > sequence)
                        continue;
@@ -809,7 +817,7 @@ restart:
                         * It is still being pushed! Wait for the push to
                         * complete, then start again from the beginning.
                         */
-                       xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
+                       xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
                        goto restart;
                }
                if (ctx->sequence != sequence)
@@ -817,7 +825,7 @@ restart:
                /* found it! */
                commit_lsn = ctx->commit_lsn;
        }
-       spin_unlock(&cil->xc_cil_lock);
+       spin_unlock(&cil->xc_push_lock);
        return commit_lsn;
 }
 
@@ -875,6 +883,7 @@ xlog_cil_init(
        INIT_LIST_HEAD(&cil->xc_cil);
        INIT_LIST_HEAD(&cil->xc_committing);
        spin_lock_init(&cil->xc_cil_lock);
+       spin_lock_init(&cil->xc_push_lock);
        init_rwsem(&cil->xc_ctx_lock);
        init_waitqueue_head(&cil->xc_commit_wait);
 
diff --git a/fs/xfs/xfs_log_format.h b/fs/xfs/xfs_log_format.h
new file mode 100644 (file)
index 0000000..a49ab2c
--- /dev/null
@@ -0,0 +1,846 @@
+/*
+ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef        __XFS_LOG_FORMAT_H__
+#define __XFS_LOG_FORMAT_H__
+
+struct xfs_mount;
+struct xfs_trans_res;
+
+/*
+ * On-disk Log Format definitions.
+ *
+ * This file contains all the on-disk format definitions used within the log. It
+ * includes the physical log structure itself, as well as all the log item
+ * format structures that are written into the log and intepreted by log
+ * recovery. We start with the physical log format definitions, and then work
+ * through all the log items definitions and everything they encode into the
+ * log.
+ */
+typedef __uint32_t xlog_tid_t;
+
+#define XLOG_MIN_ICLOGS                2
+#define XLOG_MAX_ICLOGS                8
+#define XLOG_HEADER_MAGIC_NUM  0xFEEDbabe      /* Invalid cycle number */
+#define XLOG_VERSION_1         1
+#define XLOG_VERSION_2         2               /* Large IClogs, Log sunit */
+#define XLOG_VERSION_OKBITS    (XLOG_VERSION_1 | XLOG_VERSION_2)
+#define XLOG_MIN_RECORD_BSIZE  (16*1024)       /* eventually 32k */
+#define XLOG_BIG_RECORD_BSIZE  (32*1024)       /* 32k buffers */
+#define XLOG_MAX_RECORD_BSIZE  (256*1024)
+#define XLOG_HEADER_CYCLE_SIZE (32*1024)       /* cycle data in header */
+#define XLOG_MIN_RECORD_BSHIFT 14              /* 16384 == 1 << 14 */
+#define XLOG_BIG_RECORD_BSHIFT 15              /* 32k == 1 << 15 */
+#define XLOG_MAX_RECORD_BSHIFT 18              /* 256k == 1 << 18 */
+#define XLOG_BTOLSUNIT(log, b)  (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \
+                                 (log)->l_mp->m_sb.sb_logsunit)
+#define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit)
+
+#define XLOG_HEADER_SIZE       512
+
+/* Minimum number of transactions that must fit in the log (defined by mkfs) */
+#define XFS_MIN_LOG_FACTOR     3
+
+#define XLOG_REC_SHIFT(log) \
+       BTOBB(1 << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
+        XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
+#define XLOG_TOTAL_REC_SHIFT(log) \
+       BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
+        XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
+
+/* get lsn fields */
+#define CYCLE_LSN(lsn) ((uint)((lsn)>>32))
+#define BLOCK_LSN(lsn) ((uint)(lsn))
+
+/* this is used in a spot where we might otherwise double-endian-flip */
+#define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0])
+
+static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
+{
+       return ((xfs_lsn_t)cycle << 32) | block;
+}
+
+static inline uint xlog_get_cycle(char *ptr)
+{
+       if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
+               return be32_to_cpu(*((__be32 *)ptr + 1));
+       else
+               return be32_to_cpu(*(__be32 *)ptr);
+}
+
+/* Log Clients */
+#define XFS_TRANSACTION                0x69
+#define XFS_VOLUME             0x2
+#define XFS_LOG                        0xaa
+
+#define XLOG_UNMOUNT_TYPE      0x556e  /* Un for Unmount */
+
+/* Region types for iovec's i_type */
+#define XLOG_REG_TYPE_BFORMAT          1
+#define XLOG_REG_TYPE_BCHUNK           2
+#define XLOG_REG_TYPE_EFI_FORMAT       3
+#define XLOG_REG_TYPE_EFD_FORMAT       4
+#define XLOG_REG_TYPE_IFORMAT          5
+#define XLOG_REG_TYPE_ICORE            6
+#define XLOG_REG_TYPE_IEXT             7
+#define XLOG_REG_TYPE_IBROOT           8
+#define XLOG_REG_TYPE_ILOCAL           9
+#define XLOG_REG_TYPE_IATTR_EXT                10
+#define XLOG_REG_TYPE_IATTR_BROOT      11
+#define XLOG_REG_TYPE_IATTR_LOCAL      12
+#define XLOG_REG_TYPE_QFORMAT          13
+#define XLOG_REG_TYPE_DQUOT            14
+#define XLOG_REG_TYPE_QUOTAOFF         15
+#define XLOG_REG_TYPE_LRHEADER         16
+#define XLOG_REG_TYPE_UNMOUNT          17
+#define XLOG_REG_TYPE_COMMIT           18
+#define XLOG_REG_TYPE_TRANSHDR         19
+#define XLOG_REG_TYPE_ICREATE          20
+#define XLOG_REG_TYPE_MAX              20
+
+/*
+ * Flags to log operation header
+ *
+ * The first write of a new transaction will be preceded with a start
+ * record, XLOG_START_TRANS.  Once a transaction is committed, a commit
+ * record is written, XLOG_COMMIT_TRANS.  If a single region can not fit into
+ * the remainder of the current active in-core log, it is split up into
+ * multiple regions.  Each partial region will be marked with a
+ * XLOG_CONTINUE_TRANS until the last one, which gets marked with XLOG_END_TRANS.
+ *
+ */
+#define XLOG_START_TRANS       0x01    /* Start a new transaction */
+#define XLOG_COMMIT_TRANS      0x02    /* Commit this transaction */
+#define XLOG_CONTINUE_TRANS    0x04    /* Cont this trans into new region */
+#define XLOG_WAS_CONT_TRANS    0x08    /* Cont this trans into new region */
+#define XLOG_END_TRANS         0x10    /* End a continued transaction */
+#define XLOG_UNMOUNT_TRANS     0x20    /* Unmount a filesystem transaction */
+
+
+typedef struct xlog_op_header {
+       __be32     oh_tid;      /* transaction id of operation  :  4 b */
+       __be32     oh_len;      /* bytes in data region         :  4 b */
+       __u8       oh_clientid; /* who sent me this             :  1 b */
+       __u8       oh_flags;    /*                              :  1 b */
+       __u16      oh_res2;     /* 32 bit align                 :  2 b */
+} xlog_op_header_t;
+
+/* valid values for h_fmt */
+#define XLOG_FMT_UNKNOWN  0
+#define XLOG_FMT_LINUX_LE 1
+#define XLOG_FMT_LINUX_BE 2
+#define XLOG_FMT_IRIX_BE  3
+
+/* our fmt */
+#ifdef XFS_NATIVE_HOST
+#define XLOG_FMT XLOG_FMT_LINUX_BE
+#else
+#define XLOG_FMT XLOG_FMT_LINUX_LE
+#endif
+
+typedef struct xlog_rec_header {
+       __be32    h_magicno;    /* log record (LR) identifier           :  4 */
+       __be32    h_cycle;      /* write cycle of log                   :  4 */
+       __be32    h_version;    /* LR version                           :  4 */
+       __be32    h_len;        /* len in bytes; should be 64-bit aligned: 4 */
+       __be64    h_lsn;        /* lsn of this LR                       :  8 */
+       __be64    h_tail_lsn;   /* lsn of 1st LR w/ buffers not committed: 8 */
+       __le32    h_crc;        /* crc of log record                    :  4 */
+       __be32    h_prev_block; /* block number to previous LR          :  4 */
+       __be32    h_num_logops; /* number of log operations in this LR  :  4 */
+       __be32    h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
+       /* new fields */
+       __be32    h_fmt;        /* format of log record                 :  4 */
+       uuid_t    h_fs_uuid;    /* uuid of FS                           : 16 */
+       __be32    h_size;       /* iclog size                           :  4 */
+} xlog_rec_header_t;
+
+typedef struct xlog_rec_ext_header {
+       __be32    xh_cycle;     /* write cycle of log                   : 4 */
+       __be32    xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /*    : 256 */
+} xlog_rec_ext_header_t;
+
+/*
+ * Quite misnamed, because this union lays out the actual on-disk log buffer.
+ */
+typedef union xlog_in_core2 {
+       xlog_rec_header_t       hic_header;
+       xlog_rec_ext_header_t   hic_xheader;
+       char                    hic_sector[XLOG_HEADER_SIZE];
+} xlog_in_core_2_t;
+
+/* not an on-disk structure, but needed by log recovery in userspace */
+typedef struct xfs_log_iovec {
+       void            *i_addr;        /* beginning address of region */
+       int             i_len;          /* length in bytes of region */
+       uint            i_type;         /* type of region */
+} xfs_log_iovec_t;
+
+
+/*
+ * Transaction Header definitions.
+ *
+ * This is the structure written in the log at the head of every transaction. It
+ * identifies the type and id of the transaction, and contains the number of
+ * items logged by the transaction so we know how many to expect during
+ * recovery.
+ *
+ * Do not change the below structure without redoing the code in
+ * xlog_recover_add_to_trans() and xlog_recover_add_to_cont_trans().
+ */
+typedef struct xfs_trans_header {
+       uint            th_magic;               /* magic number */
+       uint            th_type;                /* transaction type */
+       __int32_t       th_tid;                 /* transaction id (unused) */
+       uint            th_num_items;           /* num items logged by trans */
+} xfs_trans_header_t;
+
+#define        XFS_TRANS_HEADER_MAGIC  0x5452414e      /* TRAN */
+
+/*
+ * Log item types.
+ */
+#define        XFS_LI_EFI              0x1236
+#define        XFS_LI_EFD              0x1237
+#define        XFS_LI_IUNLINK          0x1238
+#define        XFS_LI_INODE            0x123b  /* aligned ino chunks, var-size ibufs */
+#define        XFS_LI_BUF              0x123c  /* v2 bufs, variable sized inode bufs */
+#define        XFS_LI_DQUOT            0x123d
+#define        XFS_LI_QUOTAOFF         0x123e
+#define        XFS_LI_ICREATE          0x123f
+
+#define XFS_LI_TYPE_DESC \
+       { XFS_LI_EFI,           "XFS_LI_EFI" }, \
+       { XFS_LI_EFD,           "XFS_LI_EFD" }, \
+       { XFS_LI_IUNLINK,       "XFS_LI_IUNLINK" }, \
+       { XFS_LI_INODE,         "XFS_LI_INODE" }, \
+       { XFS_LI_BUF,           "XFS_LI_BUF" }, \
+       { XFS_LI_DQUOT,         "XFS_LI_DQUOT" }, \
+       { XFS_LI_QUOTAOFF,      "XFS_LI_QUOTAOFF" }, \
+       { XFS_LI_ICREATE,       "XFS_LI_ICREATE" }
+
+/*
+ * Transaction types.  Used to distinguish types of buffers.
+ */
+#define XFS_TRANS_SETATTR_NOT_SIZE     1
+#define XFS_TRANS_SETATTR_SIZE         2
+#define XFS_TRANS_INACTIVE             3
+#define XFS_TRANS_CREATE               4
+#define XFS_TRANS_CREATE_TRUNC         5
+#define XFS_TRANS_TRUNCATE_FILE                6
+#define XFS_TRANS_REMOVE               7
+#define XFS_TRANS_LINK                 8
+#define XFS_TRANS_RENAME               9
+#define XFS_TRANS_MKDIR                        10
+#define XFS_TRANS_RMDIR                        11
+#define XFS_TRANS_SYMLINK              12
+#define XFS_TRANS_SET_DMATTRS          13
+#define XFS_TRANS_GROWFS               14
+#define XFS_TRANS_STRAT_WRITE          15
+#define XFS_TRANS_DIOSTRAT             16
+/* 17 was XFS_TRANS_WRITE_SYNC */
+#define        XFS_TRANS_WRITEID               18
+#define        XFS_TRANS_ADDAFORK              19
+#define        XFS_TRANS_ATTRINVAL             20
+#define        XFS_TRANS_ATRUNCATE             21
+#define        XFS_TRANS_ATTR_SET              22
+#define        XFS_TRANS_ATTR_RM               23
+#define        XFS_TRANS_ATTR_FLAG             24
+#define        XFS_TRANS_CLEAR_AGI_BUCKET      25
+#define XFS_TRANS_QM_SBCHANGE          26
+/*
+ * Dummy entries since we use the transaction type to index into the
+ * trans_type[] in xlog_recover_print_trans_head()
+ */
+#define XFS_TRANS_DUMMY1               27
+#define XFS_TRANS_DUMMY2               28
+#define XFS_TRANS_QM_QUOTAOFF          29
+#define XFS_TRANS_QM_DQALLOC           30
+#define XFS_TRANS_QM_SETQLIM           31
+#define XFS_TRANS_QM_DQCLUSTER         32
+#define XFS_TRANS_QM_QINOCREATE                33
+#define XFS_TRANS_QM_QUOTAOFF_END      34
+#define XFS_TRANS_SB_UNIT              35
+#define XFS_TRANS_FSYNC_TS             36
+#define        XFS_TRANS_GROWFSRT_ALLOC        37
+#define        XFS_TRANS_GROWFSRT_ZERO         38
+#define        XFS_TRANS_GROWFSRT_FREE         39
+#define        XFS_TRANS_SWAPEXT               40
+#define        XFS_TRANS_SB_COUNT              41
+#define        XFS_TRANS_CHECKPOINT            42
+#define        XFS_TRANS_ICREATE               43
+#define        XFS_TRANS_TYPE_MAX              43
+/* new transaction types need to be reflected in xfs_logprint(8) */
+
+#define XFS_TRANS_TYPES \
+       { XFS_TRANS_SETATTR_NOT_SIZE,   "SETATTR_NOT_SIZE" }, \
+       { XFS_TRANS_SETATTR_SIZE,       "SETATTR_SIZE" }, \
+       { XFS_TRANS_INACTIVE,           "INACTIVE" }, \
+       { XFS_TRANS_CREATE,             "CREATE" }, \
+       { XFS_TRANS_CREATE_TRUNC,       "CREATE_TRUNC" }, \
+       { XFS_TRANS_TRUNCATE_FILE,      "TRUNCATE_FILE" }, \
+       { XFS_TRANS_REMOVE,             "REMOVE" }, \
+       { XFS_TRANS_LINK,               "LINK" }, \
+       { XFS_TRANS_RENAME,             "RENAME" }, \
+       { XFS_TRANS_MKDIR,              "MKDIR" }, \
+       { XFS_TRANS_RMDIR,              "RMDIR" }, \
+       { XFS_TRANS_SYMLINK,            "SYMLINK" }, \
+       { XFS_TRANS_SET_DMATTRS,        "SET_DMATTRS" }, \
+       { XFS_TRANS_GROWFS,             "GROWFS" }, \
+       { XFS_TRANS_STRAT_WRITE,        "STRAT_WRITE" }, \
+       { XFS_TRANS_DIOSTRAT,           "DIOSTRAT" }, \
+       { XFS_TRANS_WRITEID,            "WRITEID" }, \
+       { XFS_TRANS_ADDAFORK,           "ADDAFORK" }, \
+       { XFS_TRANS_ATTRINVAL,          "ATTRINVAL" }, \
+       { XFS_TRANS_ATRUNCATE,          "ATRUNCATE" }, \
+       { XFS_TRANS_ATTR_SET,           "ATTR_SET" }, \
+       { XFS_TRANS_ATTR_RM,            "ATTR_RM" }, \
+       { XFS_TRANS_ATTR_FLAG,          "ATTR_FLAG" }, \
+       { XFS_TRANS_CLEAR_AGI_BUCKET,   "CLEAR_AGI_BUCKET" }, \
+       { XFS_TRANS_QM_SBCHANGE,        "QM_SBCHANGE" }, \
+       { XFS_TRANS_QM_QUOTAOFF,        "QM_QUOTAOFF" }, \
+       { XFS_TRANS_QM_DQALLOC,         "QM_DQALLOC" }, \
+       { XFS_TRANS_QM_SETQLIM,         "QM_SETQLIM" }, \
+       { XFS_TRANS_QM_DQCLUSTER,       "QM_DQCLUSTER" }, \
+       { XFS_TRANS_QM_QINOCREATE,      "QM_QINOCREATE" }, \
+       { XFS_TRANS_QM_QUOTAOFF_END,    "QM_QOFF_END" }, \
+       { XFS_TRANS_SB_UNIT,            "SB_UNIT" }, \
+       { XFS_TRANS_FSYNC_TS,           "FSYNC_TS" }, \
+       { XFS_TRANS_GROWFSRT_ALLOC,     "GROWFSRT_ALLOC" }, \
+       { XFS_TRANS_GROWFSRT_ZERO,      "GROWFSRT_ZERO" }, \
+       { XFS_TRANS_GROWFSRT_FREE,      "GROWFSRT_FREE" }, \
+       { XFS_TRANS_SWAPEXT,            "SWAPEXT" }, \
+       { XFS_TRANS_SB_COUNT,           "SB_COUNT" }, \
+       { XFS_TRANS_CHECKPOINT,         "CHECKPOINT" }, \
+       { XFS_TRANS_DUMMY1,             "DUMMY1" }, \
+       { XFS_TRANS_DUMMY2,             "DUMMY2" }, \
+       { XLOG_UNMOUNT_REC_TYPE,        "UNMOUNT" }
+
+/*
+ * This structure is used to track log items associated with
+ * a transaction.  It points to the log item and keeps some
+ * flags to track the state of the log item.  It also tracks
+ * the amount of space needed to log the item it describes
+ * once we get to commit processing (see xfs_trans_commit()).
+ */
+struct xfs_log_item_desc {
+       struct xfs_log_item     *lid_item;
+       struct list_head        lid_trans;
+       unsigned char           lid_flags;
+};
+
+#define XFS_LID_DIRTY          0x1
+
+/*
+ * Values for t_flags.
+ */
+#define        XFS_TRANS_DIRTY         0x01    /* something needs to be logged */
+#define        XFS_TRANS_SB_DIRTY      0x02    /* superblock is modified */
+#define        XFS_TRANS_PERM_LOG_RES  0x04    /* xact took a permanent log res */
+#define        XFS_TRANS_SYNC          0x08    /* make commit synchronous */
+#define XFS_TRANS_DQ_DIRTY     0x10    /* at least one dquot in trx dirty */
+#define XFS_TRANS_RESERVE      0x20    /* OK to use reserved data blocks */
+#define XFS_TRANS_FREEZE_PROT  0x40    /* Transaction has elevated writer
+                                          count in superblock */
+
+/*
+ * Values for call flags parameter.
+ */
+#define        XFS_TRANS_RELEASE_LOG_RES       0x4
+#define        XFS_TRANS_ABORT                 0x8
+
+/*
+ * Field values for xfs_trans_mod_sb.
+ */
+#define        XFS_TRANS_SB_ICOUNT             0x00000001
+#define        XFS_TRANS_SB_IFREE              0x00000002
+#define        XFS_TRANS_SB_FDBLOCKS           0x00000004
+#define        XFS_TRANS_SB_RES_FDBLOCKS       0x00000008
+#define        XFS_TRANS_SB_FREXTENTS          0x00000010
+#define        XFS_TRANS_SB_RES_FREXTENTS      0x00000020
+#define        XFS_TRANS_SB_DBLOCKS            0x00000040
+#define        XFS_TRANS_SB_AGCOUNT            0x00000080
+#define        XFS_TRANS_SB_IMAXPCT            0x00000100
+#define        XFS_TRANS_SB_REXTSIZE           0x00000200
+#define        XFS_TRANS_SB_RBMBLOCKS          0x00000400
+#define        XFS_TRANS_SB_RBLOCKS            0x00000800
+#define        XFS_TRANS_SB_REXTENTS           0x00001000
+#define        XFS_TRANS_SB_REXTSLOG           0x00002000
+
+/*
+ * Here we centralize the specification of XFS meta-data buffer
+ * reference count values.  This determine how hard the buffer
+ * cache tries to hold onto the buffer.
+ */
+#define        XFS_AGF_REF             4
+#define        XFS_AGI_REF             4
+#define        XFS_AGFL_REF            3
+#define        XFS_INO_BTREE_REF       3
+#define        XFS_ALLOC_BTREE_REF     2
+#define        XFS_BMAP_BTREE_REF      2
+#define        XFS_DIR_BTREE_REF       2
+#define        XFS_INO_REF             2
+#define        XFS_ATTR_BTREE_REF      1
+#define        XFS_DQUOT_REF           1
+
+/*
+ * Flags for xfs_trans_ichgtime().
+ */
+#define        XFS_ICHGTIME_MOD        0x1     /* data fork modification timestamp */
+#define        XFS_ICHGTIME_CHG        0x2     /* inode field change timestamp */
+#define        XFS_ICHGTIME_CREATE     0x4     /* inode create timestamp */
+
+
+/*
+ * Inode Log Item Format definitions.
+ *
+ * This is the structure used to lay out an inode log item in the
+ * log.  The size of the inline data/extents/b-tree root to be logged
+ * (if any) is indicated in the ilf_dsize field.  Changes to this structure
+ * must be added on to the end.
+ */
+typedef struct xfs_inode_log_format {
+       __uint16_t              ilf_type;       /* inode log item type */
+       __uint16_t              ilf_size;       /* size of this item */
+       __uint32_t              ilf_fields;     /* flags for fields logged */
+       __uint16_t              ilf_asize;      /* size of attr d/ext/root */
+       __uint16_t              ilf_dsize;      /* size of data/ext/root */
+       __uint64_t              ilf_ino;        /* inode number */
+       union {
+               __uint32_t      ilfu_rdev;      /* rdev value for dev inode*/
+               uuid_t          ilfu_uuid;      /* mount point value */
+       } ilf_u;
+       __int64_t               ilf_blkno;      /* blkno of inode buffer */
+       __int32_t               ilf_len;        /* len of inode buffer */
+       __int32_t               ilf_boffset;    /* off of inode in buffer */
+} xfs_inode_log_format_t;
+
+typedef struct xfs_inode_log_format_32 {
+       __uint16_t              ilf_type;       /* inode log item type */
+       __uint16_t              ilf_size;       /* size of this item */
+       __uint32_t              ilf_fields;     /* flags for fields logged */
+       __uint16_t              ilf_asize;      /* size of attr d/ext/root */
+       __uint16_t              ilf_dsize;      /* size of data/ext/root */
+       __uint64_t              ilf_ino;        /* inode number */
+       union {
+               __uint32_t      ilfu_rdev;      /* rdev value for dev inode*/
+               uuid_t          ilfu_uuid;      /* mount point value */
+       } ilf_u;
+       __int64_t               ilf_blkno;      /* blkno of inode buffer */
+       __int32_t               ilf_len;        /* len of inode buffer */
+       __int32_t               ilf_boffset;    /* off of inode in buffer */
+} __attribute__((packed)) xfs_inode_log_format_32_t;
+
+typedef struct xfs_inode_log_format_64 {
+       __uint16_t              ilf_type;       /* inode log item type */
+       __uint16_t              ilf_size;       /* size of this item */
+       __uint32_t              ilf_fields;     /* flags for fields logged */
+       __uint16_t              ilf_asize;      /* size of attr d/ext/root */
+       __uint16_t              ilf_dsize;      /* size of data/ext/root */
+       __uint32_t              ilf_pad;        /* pad for 64 bit boundary */
+       __uint64_t              ilf_ino;        /* inode number */
+       union {
+               __uint32_t      ilfu_rdev;      /* rdev value for dev inode*/
+               uuid_t          ilfu_uuid;      /* mount point value */
+       } ilf_u;
+       __int64_t               ilf_blkno;      /* blkno of inode buffer */
+       __int32_t               ilf_len;        /* len of inode buffer */
+       __int32_t               ilf_boffset;    /* off of inode in buffer */
+} xfs_inode_log_format_64_t;
+
+/*
+ * Flags for xfs_trans_log_inode flags field.
+ */
+#define        XFS_ILOG_CORE   0x001   /* log standard inode fields */
+#define        XFS_ILOG_DDATA  0x002   /* log i_df.if_data */
+#define        XFS_ILOG_DEXT   0x004   /* log i_df.if_extents */
+#define        XFS_ILOG_DBROOT 0x008   /* log i_df.i_broot */
+#define        XFS_ILOG_DEV    0x010   /* log the dev field */
+#define        XFS_ILOG_UUID   0x020   /* log the uuid field */
+#define        XFS_ILOG_ADATA  0x040   /* log i_af.if_data */
+#define        XFS_ILOG_AEXT   0x080   /* log i_af.if_extents */
+#define        XFS_ILOG_ABROOT 0x100   /* log i_af.i_broot */
+
+
+/*
+ * The timestamps are dirty, but not necessarily anything else in the inode
+ * core.  Unlike the other fields above this one must never make it to disk
+ * in the ilf_fields of the inode_log_format, but is purely store in-memory in
+ * ili_fields in the inode_log_item.
+ */
+#define XFS_ILOG_TIMESTAMP     0x4000
+
+#define        XFS_ILOG_NONCORE        (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
+                                XFS_ILOG_DBROOT | XFS_ILOG_DEV | \
+                                XFS_ILOG_UUID | XFS_ILOG_ADATA | \
+                                XFS_ILOG_AEXT | XFS_ILOG_ABROOT)
+
+#define        XFS_ILOG_DFORK          (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
+                                XFS_ILOG_DBROOT)
+
+#define        XFS_ILOG_AFORK          (XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
+                                XFS_ILOG_ABROOT)
+
+#define        XFS_ILOG_ALL            (XFS_ILOG_CORE | XFS_ILOG_DDATA | \
+                                XFS_ILOG_DEXT | XFS_ILOG_DBROOT | \
+                                XFS_ILOG_DEV | XFS_ILOG_UUID | \
+                                XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
+                                XFS_ILOG_ABROOT | XFS_ILOG_TIMESTAMP)
+
+static inline int xfs_ilog_fbroot(int w)
+{
+       return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT);
+}
+
+static inline int xfs_ilog_fext(int w)
+{
+       return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT);
+}
+
+static inline int xfs_ilog_fdata(int w)
+{
+       return (w == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA);
+}
+
+/*
+ * Incore version of the on-disk inode core structures. We log this directly
+ * into the journal in host CPU format (for better or worse) and as such
+ * directly mirrors the xfs_dinode structure as it must contain all the same
+ * information.
+ */
+typedef struct xfs_ictimestamp {
+       __int32_t       t_sec;          /* timestamp seconds */
+       __int32_t       t_nsec;         /* timestamp nanoseconds */
+} xfs_ictimestamp_t;
+
+/*
+ * NOTE:  This structure must be kept identical to struct xfs_dinode
+ *       in xfs_dinode.h except for the endianness annotations.
+ */
+typedef struct xfs_icdinode {
+       __uint16_t      di_magic;       /* inode magic # = XFS_DINODE_MAGIC */
+       __uint16_t      di_mode;        /* mode and type of file */
+       __int8_t        di_version;     /* inode version */
+       __int8_t        di_format;      /* format of di_c data */
+       __uint16_t      di_onlink;      /* old number of links to file */
+       __uint32_t      di_uid;         /* owner's user id */
+       __uint32_t      di_gid;         /* owner's group id */
+       __uint32_t      di_nlink;       /* number of links to file */
+       __uint16_t      di_projid_lo;   /* lower part of owner's project id */
+       __uint16_t      di_projid_hi;   /* higher part of owner's project id */
+       __uint8_t       di_pad[6];      /* unused, zeroed space */
+       __uint16_t      di_flushiter;   /* incremented on flush */
+       xfs_ictimestamp_t di_atime;     /* time last accessed */
+       xfs_ictimestamp_t di_mtime;     /* time last modified */
+       xfs_ictimestamp_t di_ctime;     /* time created/inode modified */
+       xfs_fsize_t     di_size;        /* number of bytes in file */
+       xfs_drfsbno_t   di_nblocks;     /* # of direct & btree blocks used */
+       xfs_extlen_t    di_extsize;     /* basic/minimum extent size for file */
+       xfs_extnum_t    di_nextents;    /* number of extents in data fork */
+       xfs_aextnum_t   di_anextents;   /* number of extents in attribute fork*/
+       __uint8_t       di_forkoff;     /* attr fork offs, <<3 for 64b align */
+       __int8_t        di_aformat;     /* format of attr fork's data */
+       __uint32_t      di_dmevmask;    /* DMIG event mask */
+       __uint16_t      di_dmstate;     /* DMIG state info */
+       __uint16_t      di_flags;       /* random flags, XFS_DIFLAG_... */
+       __uint32_t      di_gen;         /* generation number */
+
+       /* di_next_unlinked is the only non-core field in the old dinode */
+       xfs_agino_t     di_next_unlinked;/* agi unlinked list ptr */
+
+       /* start of the extended dinode, writable fields */
+       __uint32_t      di_crc;         /* CRC of the inode */
+       __uint64_t      di_changecount; /* number of attribute changes */
+       xfs_lsn_t       di_lsn;         /* flush sequence */
+       __uint64_t      di_flags2;      /* more random flags */
+       __uint8_t       di_pad2[16];    /* more padding for future expansion */
+
+       /* fields only written to during inode creation */
+       xfs_ictimestamp_t di_crtime;    /* time created */
+       xfs_ino_t       di_ino;         /* inode number */
+       uuid_t          di_uuid;        /* UUID of the filesystem */
+
+       /* structure must be padded to 64 bit alignment */
+} xfs_icdinode_t;
+
+static inline uint xfs_icdinode_size(int version)
+{
+       if (version == 3)
+               return sizeof(struct xfs_icdinode);
+       return offsetof(struct xfs_icdinode, di_next_unlinked);
+}
+
+/*
+ * Buffer Log Format defintions
+ *
+ * These are the physical dirty bitmap defintions for the log format structure.
+ */
+#define        XFS_BLF_CHUNK           128
+#define        XFS_BLF_SHIFT           7
+#define        BIT_TO_WORD_SHIFT       5
+#define        NBWORD                  (NBBY * sizeof(unsigned int))
+
+/*
+ * This flag indicates that the buffer contains on disk inodes
+ * and requires special recovery handling.
+ */
+#define        XFS_BLF_INODE_BUF       (1<<0)
+
+/*
+ * This flag indicates that the buffer should not be replayed
+ * during recovery because its blocks are being freed.
+ */
+#define        XFS_BLF_CANCEL          (1<<1)
+
+/*
+ * This flag indicates that the buffer contains on disk
+ * user or group dquots and may require special recovery handling.
+ */
+#define        XFS_BLF_UDQUOT_BUF      (1<<2)
+#define XFS_BLF_PDQUOT_BUF     (1<<3)
+#define        XFS_BLF_GDQUOT_BUF      (1<<4)
+
+/*
+ * This is the structure used to lay out a buf log item in the
+ * log.  The data map describes which 128 byte chunks of the buffer
+ * have been logged.
+ */
+#define XFS_BLF_DATAMAP_SIZE   ((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
+
+typedef struct xfs_buf_log_format {
+       unsigned short  blf_type;       /* buf log item type indicator */
+       unsigned short  blf_size;       /* size of this item */
+       ushort          blf_flags;      /* misc state */
+       ushort          blf_len;        /* number of blocks in this buf */
+       __int64_t       blf_blkno;      /* starting blkno of this buf */
+       unsigned int    blf_map_size;   /* used size of data bitmap in words */
+       unsigned int    blf_data_map[XFS_BLF_DATAMAP_SIZE]; /* dirty bitmap */
+} xfs_buf_log_format_t;
+
+/*
+ * All buffers now need to tell recovery where the magic number
+ * is so that it can verify and calculate the CRCs on the buffer correctly
+ * once the changes have been replayed into the buffer.
+ *
+ * The type value is held in the upper 5 bits of the blf_flags field, which is
+ * an unsigned 16 bit field. Hence we need to shift it 11 bits up and down.
+ */
+#define XFS_BLFT_BITS  5
+#define XFS_BLFT_SHIFT 11
+#define XFS_BLFT_MASK  (((1 << XFS_BLFT_BITS) - 1) << XFS_BLFT_SHIFT)
+
+enum xfs_blft {
+       XFS_BLFT_UNKNOWN_BUF = 0,
+       XFS_BLFT_UDQUOT_BUF,
+       XFS_BLFT_PDQUOT_BUF,
+       XFS_BLFT_GDQUOT_BUF,
+       XFS_BLFT_BTREE_BUF,
+       XFS_BLFT_AGF_BUF,
+       XFS_BLFT_AGFL_BUF,
+       XFS_BLFT_AGI_BUF,
+       XFS_BLFT_DINO_BUF,
+       XFS_BLFT_SYMLINK_BUF,
+       XFS_BLFT_DIR_BLOCK_BUF,
+       XFS_BLFT_DIR_DATA_BUF,
+       XFS_BLFT_DIR_FREE_BUF,
+       XFS_BLFT_DIR_LEAF1_BUF,
+       XFS_BLFT_DIR_LEAFN_BUF,
+       XFS_BLFT_DA_NODE_BUF,
+       XFS_BLFT_ATTR_LEAF_BUF,
+       XFS_BLFT_ATTR_RMT_BUF,
+       XFS_BLFT_SB_BUF,
+       XFS_BLFT_MAX_BUF = (1 << XFS_BLFT_BITS),
+};
+
+static inline void
+xfs_blft_to_flags(struct xfs_buf_log_format *blf, enum xfs_blft type)
+{
+       ASSERT(type > XFS_BLFT_UNKNOWN_BUF && type < XFS_BLFT_MAX_BUF);
+       blf->blf_flags &= ~XFS_BLFT_MASK;
+       blf->blf_flags |= ((type << XFS_BLFT_SHIFT) & XFS_BLFT_MASK);
+}
+
+static inline __uint16_t
+xfs_blft_from_flags(struct xfs_buf_log_format *blf)
+{
+       return (blf->blf_flags & XFS_BLFT_MASK) >> XFS_BLFT_SHIFT;
+}
+
+/*
+ * EFI/EFD log format definitions
+ */
+typedef struct xfs_extent {
+       xfs_dfsbno_t    ext_start;
+       xfs_extlen_t    ext_len;
+} xfs_extent_t;
+
+/*
+ * Since an xfs_extent_t has types (start:64, len: 32)
+ * there are different alignments on 32 bit and 64 bit kernels.
+ * So we provide the different variants for use by a
+ * conversion routine.
+ */
+typedef struct xfs_extent_32 {
+       __uint64_t      ext_start;
+       __uint32_t      ext_len;
+} __attribute__((packed)) xfs_extent_32_t;
+
+typedef struct xfs_extent_64 {
+       __uint64_t      ext_start;
+       __uint32_t      ext_len;
+       __uint32_t      ext_pad;
+} xfs_extent_64_t;
+
+/*
+ * This is the structure used to lay out an efi log item in the
+ * log.  The efi_extents field is a variable size array whose
+ * size is given by efi_nextents.
+ */
+typedef struct xfs_efi_log_format {
+       __uint16_t              efi_type;       /* efi log item type */
+       __uint16_t              efi_size;       /* size of this item */
+       __uint32_t              efi_nextents;   /* # extents to free */
+       __uint64_t              efi_id;         /* efi identifier */
+       xfs_extent_t            efi_extents[1]; /* array of extents to free */
+} xfs_efi_log_format_t;
+
+typedef struct xfs_efi_log_format_32 {
+       __uint16_t              efi_type;       /* efi log item type */
+       __uint16_t              efi_size;       /* size of this item */
+       __uint32_t              efi_nextents;   /* # extents to free */
+       __uint64_t              efi_id;         /* efi identifier */
+       xfs_extent_32_t         efi_extents[1]; /* array of extents to free */
+} __attribute__((packed)) xfs_efi_log_format_32_t;
+
+typedef struct xfs_efi_log_format_64 {
+       __uint16_t              efi_type;       /* efi log item type */
+       __uint16_t              efi_size;       /* size of this item */
+       __uint32_t              efi_nextents;   /* # extents to free */
+       __uint64_t              efi_id;         /* efi identifier */
+       xfs_extent_64_t         efi_extents[1]; /* array of extents to free */
+} xfs_efi_log_format_64_t;
+
+/*
+ * This is the structure used to lay out an efd log item in the
+ * log.  The efd_extents array is a variable size array whose
+ * size is given by efd_nextents;
+ */
+typedef struct xfs_efd_log_format {
+       __uint16_t              efd_type;       /* efd log item type */
+       __uint16_t              efd_size;       /* size of this item */
+       __uint32_t              efd_nextents;   /* # of extents freed */
+       __uint64_t              efd_efi_id;     /* id of corresponding efi */
+       xfs_extent_t            efd_extents[1]; /* array of extents freed */
+} xfs_efd_log_format_t;
+
+typedef struct xfs_efd_log_format_32 {
+       __uint16_t              efd_type;       /* efd log item type */
+       __uint16_t              efd_size;       /* size of this item */
+       __uint32_t              efd_nextents;   /* # of extents freed */
+       __uint64_t              efd_efi_id;     /* id of corresponding efi */
+       xfs_extent_32_t         efd_extents[1]; /* array of extents freed */
+} __attribute__((packed)) xfs_efd_log_format_32_t;
+
+typedef struct xfs_efd_log_format_64 {
+       __uint16_t              efd_type;       /* efd log item type */
+       __uint16_t              efd_size;       /* size of this item */
+       __uint32_t              efd_nextents;   /* # of extents freed */
+       __uint64_t              efd_efi_id;     /* id of corresponding efi */
+       xfs_extent_64_t         efd_extents[1]; /* array of extents freed */
+} xfs_efd_log_format_64_t;
+
+/*
+ * Dquot Log format definitions.
+ *
+ * The first two fields must be the type and size fitting into
+ * 32 bits : log_recovery code assumes that.
+ */
+typedef struct xfs_dq_logformat {
+       __uint16_t              qlf_type;      /* dquot log item type */
+       __uint16_t              qlf_size;      /* size of this item */
+       xfs_dqid_t              qlf_id;        /* usr/grp/proj id : 32 bits */
+       __int64_t               qlf_blkno;     /* blkno of dquot buffer */
+       __int32_t               qlf_len;       /* len of dquot buffer */
+       __uint32_t              qlf_boffset;   /* off of dquot in buffer */
+} xfs_dq_logformat_t;
+
+/*
+ * log format struct for QUOTAOFF records.
+ * The first two fields must be the type and size fitting into
+ * 32 bits : log_recovery code assumes that.
+ * We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer
+ * to the first and ensures that the first logitem is taken out of the AIL
+ * only when the last one is securely committed.
+ */
+typedef struct xfs_qoff_logformat {
+       unsigned short          qf_type;        /* quotaoff log item type */
+       unsigned short          qf_size;        /* size of this item */
+       unsigned int            qf_flags;       /* USR and/or GRP */
+       char                    qf_pad[12];     /* padding for future */
+} xfs_qoff_logformat_t;
+
+
+/*
+ * Disk quotas status in m_qflags, and also sb_qflags. 16 bits.
+ */
+#define XFS_UQUOTA_ACCT        0x0001  /* user quota accounting ON */
+#define XFS_UQUOTA_ENFD        0x0002  /* user quota limits enforced */
+#define XFS_UQUOTA_CHKD        0x0004  /* quotacheck run on usr quotas */
+#define XFS_PQUOTA_ACCT        0x0008  /* project quota accounting ON */
+#define XFS_OQUOTA_ENFD        0x0010  /* other (grp/prj) quota limits enforced */
+#define XFS_OQUOTA_CHKD        0x0020  /* quotacheck run on other (grp/prj) quotas */
+#define XFS_GQUOTA_ACCT        0x0040  /* group quota accounting ON */
+
+/*
+ * Conversion to and from the combined OQUOTA flag (if necessary)
+ * is done only in xfs_sb_qflags_to_disk() and xfs_sb_qflags_from_disk()
+ */
+#define XFS_GQUOTA_ENFD        0x0080  /* group quota limits enforced */
+#define XFS_GQUOTA_CHKD        0x0100  /* quotacheck run on group quotas */
+#define XFS_PQUOTA_ENFD        0x0200  /* project quota limits enforced */
+#define XFS_PQUOTA_CHKD        0x0400  /* quotacheck run on project quotas */
+
+#define XFS_ALL_QUOTA_ACCT     \
+               (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT)
+#define XFS_ALL_QUOTA_ENFD     \
+               (XFS_UQUOTA_ENFD | XFS_GQUOTA_ENFD | XFS_PQUOTA_ENFD)
+#define XFS_ALL_QUOTA_CHKD     \
+               (XFS_UQUOTA_CHKD | XFS_GQUOTA_CHKD | XFS_PQUOTA_CHKD)
+
+/*
+ * Inode create log item structure
+ *
+ * Log recovery assumes the first two entries are the type and size and they fit
+ * in 32 bits. Also in host order (ugh) so they have to be 32 bit aligned so
+ * decoding can be done correctly.
+ */
+struct xfs_icreate_log {
+       __uint16_t      icl_type;       /* type of log format structure */
+       __uint16_t      icl_size;       /* size of log format structure */
+       __be32          icl_ag;         /* ag being allocated in */
+       __be32          icl_agbno;      /* start block of inode range */
+       __be32          icl_count;      /* number of inodes to initialise */
+       __be32          icl_isize;      /* size of inodes */
+       __be32          icl_length;     /* length of extent to initialise */
+       __be32          icl_gen;        /* inode generation number to use */
+};
+
+int    xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
+int    xfs_log_calc_minimum_size(struct xfs_mount *);
+
+
+#endif /* __XFS_LOG_FORMAT_H__ */
index b9ea262dd1c2c7575fee738fba6e71575180924a..136654b9400df9b28a40415b1fbca3c9b7d6a958 100644 (file)
@@ -24,51 +24,13 @@ struct xlog_ticket;
 struct xfs_mount;
 
 /*
- * Macros, structures, prototypes for internal log manager use.
+ * Flags for log structure
  */
-
-#define XLOG_MIN_ICLOGS                2
-#define XLOG_MAX_ICLOGS                8
-#define XLOG_HEADER_MAGIC_NUM  0xFEEDbabe      /* Invalid cycle number */
-#define XLOG_VERSION_1         1
-#define XLOG_VERSION_2         2               /* Large IClogs, Log sunit */
-#define XLOG_VERSION_OKBITS    (XLOG_VERSION_1 | XLOG_VERSION_2)
-#define XLOG_MIN_RECORD_BSIZE  (16*1024)       /* eventually 32k */
-#define XLOG_BIG_RECORD_BSIZE  (32*1024)       /* 32k buffers */
-#define XLOG_MAX_RECORD_BSIZE  (256*1024)
-#define XLOG_HEADER_CYCLE_SIZE (32*1024)       /* cycle data in header */
-#define XLOG_MIN_RECORD_BSHIFT 14              /* 16384 == 1 << 14 */
-#define XLOG_BIG_RECORD_BSHIFT 15              /* 32k == 1 << 15 */
-#define XLOG_MAX_RECORD_BSHIFT 18              /* 256k == 1 << 18 */
-#define XLOG_BTOLSUNIT(log, b)  (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \
-                                 (log)->l_mp->m_sb.sb_logsunit)
-#define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit)
-
-#define XLOG_HEADER_SIZE       512
-
-#define XLOG_REC_SHIFT(log) \
-       BTOBB(1 << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
-        XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
-#define XLOG_TOTAL_REC_SHIFT(log) \
-       BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
-        XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
-
-static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
-{
-       return ((xfs_lsn_t)cycle << 32) | block;
-}
-
-static inline uint xlog_get_cycle(char *ptr)
-{
-       if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
-               return be32_to_cpu(*((__be32 *)ptr + 1));
-       else
-               return be32_to_cpu(*(__be32 *)ptr);
-}
-
-#define BLK_AVG(blk1, blk2)    ((blk1+blk2) >> 1)
-
-#ifdef __KERNEL__
+#define XLOG_ACTIVE_RECOVERY   0x2     /* in the middle of recovery */
+#define        XLOG_RECOVERY_NEEDED    0x4     /* log was recovered */
+#define XLOG_IO_ERROR          0x8     /* log hit an I/O error, and being
+                                          shutdown */
+#define XLOG_TAIL_WARN         0x10    /* log tail verify warning issued */
 
 /*
  * get client id from packed copy.
@@ -101,27 +63,7 @@ static inline uint xlog_get_client_id(__be32 i)
 #define XLOG_STATE_IOERROR   0x0080 /* IO error happened in sync'ing log */
 #define XLOG_STATE_ALL      0x7FFF /* All possible valid flags */
 #define XLOG_STATE_NOTUSED   0x8000 /* This IC log not being used */
-#endif /* __KERNEL__ */
 
-/*
- * Flags to log operation header
- *
- * The first write of a new transaction will be preceded with a start
- * record, XLOG_START_TRANS.  Once a transaction is committed, a commit
- * record is written, XLOG_COMMIT_TRANS.  If a single region can not fit into
- * the remainder of the current active in-core log, it is split up into
- * multiple regions.  Each partial region will be marked with a
- * XLOG_CONTINUE_TRANS until the last one, which gets marked with XLOG_END_TRANS.
- *
- */
-#define XLOG_START_TRANS       0x01    /* Start a new transaction */
-#define XLOG_COMMIT_TRANS      0x02    /* Commit this transaction */
-#define XLOG_CONTINUE_TRANS    0x04    /* Cont this trans into new region */
-#define XLOG_WAS_CONT_TRANS    0x08    /* Cont this trans into new region */
-#define XLOG_END_TRANS         0x10    /* End a continued transaction */
-#define XLOG_UNMOUNT_TRANS     0x20    /* Unmount a filesystem transaction */
-
-#ifdef __KERNEL__
 /*
  * Flags to log ticket
  */
@@ -132,22 +74,6 @@ static inline uint xlog_get_client_id(__be32 i)
        { XLOG_TIC_INITED,      "XLOG_TIC_INITED" }, \
        { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
 
-#endif /* __KERNEL__ */
-
-#define XLOG_UNMOUNT_TYPE      0x556e  /* Un for Unmount */
-
-/*
- * Flags for log structure
- */
-#define XLOG_ACTIVE_RECOVERY   0x2     /* in the middle of recovery */
-#define        XLOG_RECOVERY_NEEDED    0x4     /* log was recovered */
-#define XLOG_IO_ERROR          0x8     /* log hit an I/O error, and being
-                                          shutdown */
-#define XLOG_TAIL_WARN         0x10    /* log tail verify warning issued */
-
-typedef __uint32_t xlog_tid_t;
-
-#ifdef __KERNEL__
 /*
  * Below are states for covering allocation transactions.
  * By covering, we mean changing the h_tail_lsn in the last on-disk
@@ -223,7 +149,6 @@ typedef __uint32_t xlog_tid_t;
 
 #define XLOG_COVER_OPS         5
 
-
 /* Ticket reservation region accounting */ 
 #define XLOG_TIC_LEN_MAX       15
 
@@ -258,64 +183,6 @@ typedef struct xlog_ticket {
        xlog_res_t         t_res_arr[XLOG_TIC_LEN_MAX];  /* array of res : 8 * 15 */ 
 } xlog_ticket_t;
 
-#endif
-
-
-typedef struct xlog_op_header {
-       __be32     oh_tid;      /* transaction id of operation  :  4 b */
-       __be32     oh_len;      /* bytes in data region         :  4 b */
-       __u8       oh_clientid; /* who sent me this             :  1 b */
-       __u8       oh_flags;    /*                              :  1 b */
-       __u16      oh_res2;     /* 32 bit align                 :  2 b */
-} xlog_op_header_t;
-
-
-/* valid values for h_fmt */
-#define XLOG_FMT_UNKNOWN  0
-#define XLOG_FMT_LINUX_LE 1
-#define XLOG_FMT_LINUX_BE 2
-#define XLOG_FMT_IRIX_BE  3
-
-/* our fmt */
-#ifdef XFS_NATIVE_HOST
-#define XLOG_FMT XLOG_FMT_LINUX_BE
-#else
-#define XLOG_FMT XLOG_FMT_LINUX_LE
-#endif
-
-typedef struct xlog_rec_header {
-       __be32    h_magicno;    /* log record (LR) identifier           :  4 */
-       __be32    h_cycle;      /* write cycle of log                   :  4 */
-       __be32    h_version;    /* LR version                           :  4 */
-       __be32    h_len;        /* len in bytes; should be 64-bit aligned: 4 */
-       __be64    h_lsn;        /* lsn of this LR                       :  8 */
-       __be64    h_tail_lsn;   /* lsn of 1st LR w/ buffers not committed: 8 */
-       __le32    h_crc;        /* crc of log record                    :  4 */
-       __be32    h_prev_block; /* block number to previous LR          :  4 */
-       __be32    h_num_logops; /* number of log operations in this LR  :  4 */
-       __be32    h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
-       /* new fields */
-       __be32    h_fmt;        /* format of log record                 :  4 */
-       uuid_t    h_fs_uuid;    /* uuid of FS                           : 16 */
-       __be32    h_size;       /* iclog size                           :  4 */
-} xlog_rec_header_t;
-
-typedef struct xlog_rec_ext_header {
-       __be32    xh_cycle;     /* write cycle of log                   : 4 */
-       __be32    xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /*    : 256 */
-} xlog_rec_ext_header_t;
-
-#ifdef __KERNEL__
-
-/*
- * Quite misnamed, because this union lays out the actual on-disk log buffer.
- */
-typedef union xlog_in_core2 {
-       xlog_rec_header_t       hic_header;
-       xlog_rec_ext_header_t   hic_xheader;
-       char                    hic_sector[XLOG_HEADER_SIZE];
-} xlog_in_core_2_t;
-
 /*
  * - A log record header is 512 bytes.  There is plenty of room to grow the
  *     xlog_rec_header_t into the reserved space.
@@ -411,14 +278,17 @@ struct xfs_cil {
        struct xlog             *xc_log;
        struct list_head        xc_cil;
        spinlock_t              xc_cil_lock;
+
+       struct rw_semaphore     xc_ctx_lock ____cacheline_aligned_in_smp;
        struct xfs_cil_ctx      *xc_ctx;
-       struct rw_semaphore     xc_ctx_lock;
+
+       spinlock_t              xc_push_lock ____cacheline_aligned_in_smp;
+       xfs_lsn_t               xc_push_seq;
        struct list_head        xc_committing;
        wait_queue_head_t       xc_commit_wait;
        xfs_lsn_t               xc_current_sequence;
        struct work_struct      xc_push_work;
-       xfs_lsn_t               xc_push_seq;
-};
+} ____cacheline_aligned_in_smp;
 
 /*
  * The amount of log space we allow the CIL to aggregate is difficult to size.
@@ -686,6 +556,5 @@ static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
        schedule();
        remove_wait_queue(wq, &wait);
 }
-#endif /* __KERNEL__ */
 
 #endif /* __XFS_LOG_PRIV_H__ */
index 7681b19aa5dc565a9807005ff3f1d6428a22f016..82f99b8ce07b75c2b4e3142a9fd017555442a9ce 100644 (file)
@@ -17,7 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_inum.h"
@@ -41,7 +41,6 @@
 #include "xfs_extfree_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_quota.h"
-#include "xfs_utils.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
 #include "xfs_da_btree.h"
 #include "xfs_dir2_format.h"
-#include "xfs_dir2_priv.h"
+#include "xfs_dir2.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_attr_remote.h"
 
+#define BLK_AVG(blk1, blk2)    ((blk1+blk2) >> 1)
+
 STATIC int
 xlog_find_zeroed(
        struct xlog     *,
@@ -607,7 +608,7 @@ out:
 
 /*
  * Head is defined to be the point of the log where the next log write
- * write could go.  This means that incomplete LR writes at the end are
+ * could go.  This means that incomplete LR writes at the end are
  * eliminated when calculating the head.  We aren't guaranteed that previous
  * LR have complete transactions.  We only know that a cycle number of
  * current cycle number -1 won't be present in the log if we start writing
@@ -963,6 +964,7 @@ xlog_find_tail(
        }
        if (!found) {
                xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
+               xlog_put_bp(bp);
                ASSERT(0);
                return XFS_ERROR(EIO);
        }
@@ -1144,7 +1146,8 @@ xlog_find_zeroed(
                 */
                xfs_warn(log->l_mp,
                        "Log inconsistent or not a log (last==0, first!=1)");
-               return XFS_ERROR(EINVAL);
+               error = XFS_ERROR(EINVAL);
+               goto bp_err;
        }
 
        /* we have a partially zeroed log */
@@ -2366,7 +2369,7 @@ xfs_qm_dqcheck(
 
 /*
  * Perform a dquot buffer recovery.
- * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
+ * Simple algorithm: if we have found a QUOTAOFF log item of the same type
  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
  * Else, treat it as a regular buffer and do recovery.
  */
@@ -2425,7 +2428,7 @@ xlog_recover_do_dquot_buffer(
  * over the log during recovery.  During the first we build a table of
  * those buffers which have been cancelled, and during the second we
  * only replay those buffers which do not have corresponding cancel
- * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
+ * records in the table.  See xlog_recover_buffer_pass[1,2] above
  * for more details on the implementation of the table of cancel records.
  */
 STATIC int
@@ -3376,7 +3379,7 @@ xlog_recover_process_efi(
        }
 
        tp = xfs_trans_alloc(mp, 0);
-       error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
        if (error)
                goto abort_error;
        efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
@@ -3482,8 +3485,7 @@ xlog_recover_clear_agi_bucket(
        int             error;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
-       error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
-                                 0, 0, 0);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
        if (error)
                goto out_abort;
 
diff --git a/fs/xfs/xfs_log_rlimit.c b/fs/xfs/xfs_log_rlimit.c
new file mode 100644 (file)
index 0000000..bbcec0b
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2013 Jie Liu.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_ag.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_trans_space.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_inode.h"
+#include "xfs_da_btree.h"
+#include "xfs_attr_leaf.h"
+
+/*
+ * Calculate the maximum length in bytes that would be required for a local
+ * attribute value as large attributes out of line are not logged.
+ */
+STATIC int
+xfs_log_calc_max_attrsetm_res(
+       struct xfs_mount        *mp)
+{
+       int                     size;
+       int                     nblks;
+
+       size = xfs_attr_leaf_entsize_local_max(mp->m_sb.sb_blocksize) -
+              MAXNAMELEN - 1;
+       nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
+       nblks += XFS_B_TO_FSB(mp, size);
+       nblks += XFS_NEXTENTADD_SPACE_RES(mp, size, XFS_ATTR_FORK);
+
+       return  M_RES(mp)->tr_attrsetm.tr_logres +
+               M_RES(mp)->tr_attrsetrt.tr_logres * nblks;
+}
+
+/*
+ * Iterate over the log space reservation table to figure out and return
+ * the maximum one in terms of the pre-calculated values which were done
+ * at mount time.
+ */
+STATIC void
+xfs_log_get_max_trans_res(
+       struct xfs_mount        *mp,
+       struct xfs_trans_res    *max_resp)
+{
+       struct xfs_trans_res    *resp;
+       struct xfs_trans_res    *end_resp;
+       int                     log_space = 0;
+       int                     attr_space;
+
+       attr_space = xfs_log_calc_max_attrsetm_res(mp);
+
+       resp = (struct xfs_trans_res *)M_RES(mp);
+       end_resp = (struct xfs_trans_res *)(M_RES(mp) + 1);
+       for (; resp < end_resp; resp++) {
+               int             tmp = resp->tr_logcount > 1 ?
+                                     resp->tr_logres * resp->tr_logcount :
+                                     resp->tr_logres;
+               if (log_space < tmp) {
+                       log_space = tmp;
+                       *max_resp = *resp;              /* struct copy */
+               }
+       }
+
+       if (attr_space > log_space) {
+               *max_resp = M_RES(mp)->tr_attrsetm;     /* struct copy */
+               max_resp->tr_logres = attr_space;
+       }
+}
+
+/*
+ * Calculate the minimum valid log size for the given superblock configuration.
+ * Used to calculate the minimum log size at mkfs time, and to determine if
+ * the log is large enough or not at mount time. Returns the minimum size in
+ * filesystem block size units.
+ */
+int
+xfs_log_calc_minimum_size(
+       struct xfs_mount        *mp)
+{
+       struct xfs_trans_res    tres = {0};
+       int                     max_logres;
+       int                     min_logblks = 0;
+       int                     lsunit = 0;
+
+       xfs_log_get_max_trans_res(mp, &tres);
+
+       max_logres = xfs_log_calc_unit_res(mp, tres.tr_logres);
+       if (tres.tr_logcount > 1)
+               max_logres *= tres.tr_logcount;
+
+       if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1)
+               lsunit = BTOBB(mp->m_sb.sb_logsunit);
+
+       /*
+        * Two factors should be taken into account for calculating the minimum
+        * log space.
+        * 1) The fundamental limitation is that no single transaction can be
+        *    larger than half size of the log.
+        *
+        *    From mkfs.xfs, this is considered by the XFS_MIN_LOG_FACTOR
+        *    define, which is set to 3. That means we can definitely fit
+        *    maximally sized 2 transactions in the log. We'll use this same
+        *    value here.
+        *
+        * 2) If the lsunit option is specified, a transaction requires 2 LSU
+        *    for the reservation because there are two log writes that can
+        *    require padding - the transaction data and the commit record which
+        *    are written separately and both can require padding to the LSU.
+        *    Consider that we can have an active CIL reservation holding 2*LSU,
+        *    but the CIL is not over a push threshold, in this case, if we
+        *    don't have enough log space for at one new transaction, which
+        *    includes another 2*LSU in the reservation, we will run into dead
+        *    loop situation in log space grant procedure. i.e.
+        *    xlog_grant_head_wait().
+        *
+        *    Hence the log size needs to be able to contain two maximally sized
+        *    and padded transactions, which is (2 * (2 * LSU + maxlres)).
+        *
+        * Also, the log size should be a multiple of the log stripe unit, round
+        * it up to lsunit boundary if lsunit is specified.
+        */
+       if (lsunit) {
+               min_logblks = roundup_64(BTOBB(max_logres), lsunit) +
+                             2 * lsunit;
+       } else
+               min_logblks = BTOBB(max_logres) + 2 * BBSIZE;
+       min_logblks *= XFS_MIN_LOG_FACTOR;
+
+       return XFS_BB_TO_FSB(mp, min_logblks);
+}
index 2b0ba358165619b87523315f1ca3940b4e2606c0..38bcbbc21e9269286a0e4b2814ab37c8954ad3d6 100644 (file)
@@ -17,7 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_inum.h"
 #include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_mount.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
@@ -40,7 +42,6 @@
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_fsops.h"
-#include "xfs_utils.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_cksum.h"
@@ -59,69 +60,6 @@ STATIC void  xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
 #define xfs_icsb_balance_counter_locked(mp, a, b)      do { } while (0)
 #endif
 
-static const struct {
-       short offset;
-       short type;     /* 0 = integer
-                        * 1 = binary / string (no translation)
-                        */
-} xfs_sb_info[] = {
-    { offsetof(xfs_sb_t, sb_magicnum),   0 },
-    { offsetof(xfs_sb_t, sb_blocksize),  0 },
-    { offsetof(xfs_sb_t, sb_dblocks),    0 },
-    { offsetof(xfs_sb_t, sb_rblocks),    0 },
-    { offsetof(xfs_sb_t, sb_rextents),   0 },
-    { offsetof(xfs_sb_t, sb_uuid),       1 },
-    { offsetof(xfs_sb_t, sb_logstart),   0 },
-    { offsetof(xfs_sb_t, sb_rootino),    0 },
-    { offsetof(xfs_sb_t, sb_rbmino),     0 },
-    { offsetof(xfs_sb_t, sb_rsumino),    0 },
-    { offsetof(xfs_sb_t, sb_rextsize),   0 },
-    { offsetof(xfs_sb_t, sb_agblocks),   0 },
-    { offsetof(xfs_sb_t, sb_agcount),    0 },
-    { offsetof(xfs_sb_t, sb_rbmblocks),  0 },
-    { offsetof(xfs_sb_t, sb_logblocks),  0 },
-    { offsetof(xfs_sb_t, sb_versionnum), 0 },
-    { offsetof(xfs_sb_t, sb_sectsize),   0 },
-    { offsetof(xfs_sb_t, sb_inodesize),  0 },
-    { offsetof(xfs_sb_t, sb_inopblock),  0 },
-    { offsetof(xfs_sb_t, sb_fname[0]),   1 },
-    { offsetof(xfs_sb_t, sb_blocklog),   0 },
-    { offsetof(xfs_sb_t, sb_sectlog),    0 },
-    { offsetof(xfs_sb_t, sb_inodelog),   0 },
-    { offsetof(xfs_sb_t, sb_inopblog),   0 },
-    { offsetof(xfs_sb_t, sb_agblklog),   0 },
-    { offsetof(xfs_sb_t, sb_rextslog),   0 },
-    { offsetof(xfs_sb_t, sb_inprogress), 0 },
-    { offsetof(xfs_sb_t, sb_imax_pct),   0 },
-    { offsetof(xfs_sb_t, sb_icount),     0 },
-    { offsetof(xfs_sb_t, sb_ifree),      0 },
-    { offsetof(xfs_sb_t, sb_fdblocks),   0 },
-    { offsetof(xfs_sb_t, sb_frextents),  0 },
-    { offsetof(xfs_sb_t, sb_uquotino),   0 },
-    { offsetof(xfs_sb_t, sb_gquotino),   0 },
-    { offsetof(xfs_sb_t, sb_qflags),     0 },
-    { offsetof(xfs_sb_t, sb_flags),      0 },
-    { offsetof(xfs_sb_t, sb_shared_vn),  0 },
-    { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
-    { offsetof(xfs_sb_t, sb_unit),      0 },
-    { offsetof(xfs_sb_t, sb_width),     0 },
-    { offsetof(xfs_sb_t, sb_dirblklog),         0 },
-    { offsetof(xfs_sb_t, sb_logsectlog), 0 },
-    { offsetof(xfs_sb_t, sb_logsectsize),0 },
-    { offsetof(xfs_sb_t, sb_logsunit),  0 },
-    { offsetof(xfs_sb_t, sb_features2),         0 },
-    { offsetof(xfs_sb_t, sb_bad_features2), 0 },
-    { offsetof(xfs_sb_t, sb_features_compat), 0 },
-    { offsetof(xfs_sb_t, sb_features_ro_compat), 0 },
-    { offsetof(xfs_sb_t, sb_features_incompat), 0 },
-    { offsetof(xfs_sb_t, sb_features_log_incompat), 0 },
-    { offsetof(xfs_sb_t, sb_crc),       0 },
-    { offsetof(xfs_sb_t, sb_pad),       0 },
-    { offsetof(xfs_sb_t, sb_pquotino),  0 },
-    { offsetof(xfs_sb_t, sb_lsn),       0 },
-    { sizeof(xfs_sb_t),                         0 }
-};
-
 static DEFINE_MUTEX(xfs_uuid_table_mutex);
 static int xfs_uuid_table_size;
 static uuid_t *xfs_uuid_table;
@@ -197,64 +135,6 @@ xfs_uuid_unmount(
 }
 
 
-/*
- * Reference counting access wrappers to the perag structures.
- * Because we never free per-ag structures, the only thing we
- * have to protect against changes is the tree structure itself.
- */
-struct xfs_perag *
-xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
-{
-       struct xfs_perag        *pag;
-       int                     ref = 0;
-
-       rcu_read_lock();
-       pag = radix_tree_lookup(&mp->m_perag_tree, agno);
-       if (pag) {
-               ASSERT(atomic_read(&pag->pag_ref) >= 0);
-               ref = atomic_inc_return(&pag->pag_ref);
-       }
-       rcu_read_unlock();
-       trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
-       return pag;
-}
-
-/*
- * search from @first to find the next perag with the given tag set.
- */
-struct xfs_perag *
-xfs_perag_get_tag(
-       struct xfs_mount        *mp,
-       xfs_agnumber_t          first,
-       int                     tag)
-{
-       struct xfs_perag        *pag;
-       int                     found;
-       int                     ref;
-
-       rcu_read_lock();
-       found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
-                                       (void **)&pag, first, 1, tag);
-       if (found <= 0) {
-               rcu_read_unlock();
-               return NULL;
-       }
-       ref = atomic_inc_return(&pag->pag_ref);
-       rcu_read_unlock();
-       trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
-       return pag;
-}
-
-void
-xfs_perag_put(struct xfs_perag *pag)
-{
-       int     ref;
-
-       ASSERT(atomic_read(&pag->pag_ref) > 0);
-       ref = atomic_dec_return(&pag->pag_ref);
-       trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
-}
-
 STATIC void
 __xfs_free_perag(
        struct rcu_head *head)
@@ -307,184 +187,6 @@ xfs_sb_validate_fsb_count(
        return 0;
 }
 
-/*
- * Check the validity of the SB found.
- */
-STATIC int
-xfs_mount_validate_sb(
-       xfs_mount_t     *mp,
-       xfs_sb_t        *sbp,
-       bool            check_inprogress,
-       bool            check_version)
-{
-
-       /*
-        * If the log device and data device have the
-        * same device number, the log is internal.
-        * Consequently, the sb_logstart should be non-zero.  If
-        * we have a zero sb_logstart in this case, we may be trying to mount
-        * a volume filesystem in a non-volume manner.
-        */
-       if (sbp->sb_magicnum != XFS_SB_MAGIC) {
-               xfs_warn(mp, "bad magic number");
-               return XFS_ERROR(EWRONGFS);
-       }
-
-
-       if (!xfs_sb_good_version(sbp)) {
-               xfs_warn(mp, "bad version");
-               return XFS_ERROR(EWRONGFS);
-       }
-
-       if ((sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) &&
-                       (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
-                               XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))) {
-               xfs_notice(mp,
-"Super block has XFS_OQUOTA bits along with XFS_PQUOTA and/or XFS_GQUOTA bits.\n");
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       /*
-        * Version 5 superblock feature mask validation. Reject combinations the
-        * kernel cannot support up front before checking anything else. For
-        * write validation, we don't need to check feature masks.
-        */
-       if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
-               xfs_alert(mp,
-"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
-"Use of these features in this kernel is at your own risk!");
-
-               if (xfs_sb_has_compat_feature(sbp,
-                                       XFS_SB_FEAT_COMPAT_UNKNOWN)) {
-                       xfs_warn(mp,
-"Superblock has unknown compatible features (0x%x) enabled.\n"
-"Using a more recent kernel is recommended.",
-                               (sbp->sb_features_compat &
-                                               XFS_SB_FEAT_COMPAT_UNKNOWN));
-               }
-
-               if (xfs_sb_has_ro_compat_feature(sbp,
-                                       XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
-                       xfs_alert(mp,
-"Superblock has unknown read-only compatible features (0x%x) enabled.",
-                               (sbp->sb_features_ro_compat &
-                                               XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
-                       if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
-                               xfs_warn(mp,
-"Attempted to mount read-only compatible filesystem read-write.\n"
-"Filesystem can only be safely mounted read only.");
-                               return XFS_ERROR(EINVAL);
-                       }
-               }
-               if (xfs_sb_has_incompat_feature(sbp,
-                                       XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
-                       xfs_warn(mp,
-"Superblock has unknown incompatible features (0x%x) enabled.\n"
-"Filesystem can not be safely mounted by this kernel.",
-                               (sbp->sb_features_incompat &
-                                               XFS_SB_FEAT_INCOMPAT_UNKNOWN));
-                       return XFS_ERROR(EINVAL);
-               }
-       }
-
-       if (unlikely(
-           sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
-               xfs_warn(mp,
-               "filesystem is marked as having an external log; "
-               "specify logdev on the mount command line.");
-               return XFS_ERROR(EINVAL);
-       }
-
-       if (unlikely(
-           sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
-               xfs_warn(mp,
-               "filesystem is marked as having an internal log; "
-               "do not specify logdev on the mount command line.");
-               return XFS_ERROR(EINVAL);
-       }
-
-       /*
-        * More sanity checking.  Most of these were stolen directly from
-        * xfs_repair.
-        */
-       if (unlikely(
-           sbp->sb_agcount <= 0                                        ||
-           sbp->sb_sectsize < XFS_MIN_SECTORSIZE                       ||
-           sbp->sb_sectsize > XFS_MAX_SECTORSIZE                       ||
-           sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG                    ||
-           sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG                    ||
-           sbp->sb_sectsize != (1 << sbp->sb_sectlog)                  ||
-           sbp->sb_blocksize < XFS_MIN_BLOCKSIZE                       ||
-           sbp->sb_blocksize > XFS_MAX_BLOCKSIZE                       ||
-           sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG                    ||
-           sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG                    ||
-           sbp->sb_blocksize != (1 << sbp->sb_blocklog)                ||
-           sbp->sb_inodesize < XFS_DINODE_MIN_SIZE                     ||
-           sbp->sb_inodesize > XFS_DINODE_MAX_SIZE                     ||
-           sbp->sb_inodelog < XFS_DINODE_MIN_LOG                       ||
-           sbp->sb_inodelog > XFS_DINODE_MAX_LOG                       ||
-           sbp->sb_inodesize != (1 << sbp->sb_inodelog)                ||
-           (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog)   ||
-           (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE)  ||
-           (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE)  ||
-           (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */)    ||
-           sbp->sb_dblocks == 0                                        ||
-           sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp)                      ||
-           sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
-               XFS_CORRUPTION_ERROR("SB sanity check failed",
-                               XFS_ERRLEVEL_LOW, mp, sbp);
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       /*
-        * Until this is fixed only page-sized or smaller data blocks work.
-        */
-       if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
-               xfs_warn(mp,
-               "File system with blocksize %d bytes. "
-               "Only pagesize (%ld) or less will currently work.",
-                               sbp->sb_blocksize, PAGE_SIZE);
-               return XFS_ERROR(ENOSYS);
-       }
-
-       /*
-        * Currently only very few inode sizes are supported.
-        */
-       switch (sbp->sb_inodesize) {
-       case 256:
-       case 512:
-       case 1024:
-       case 2048:
-               break;
-       default:
-               xfs_warn(mp, "inode size of %d bytes not supported",
-                               sbp->sb_inodesize);
-               return XFS_ERROR(ENOSYS);
-       }
-
-       if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
-           xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
-               xfs_warn(mp,
-               "file system too large to be mounted on this system.");
-               return XFS_ERROR(EFBIG);
-       }
-
-       if (check_inprogress && sbp->sb_inprogress) {
-               xfs_warn(mp, "Offline file system operation in progress!");
-               return XFS_ERROR(EFSCORRUPTED);
-       }
-
-       /*
-        * Version 1 directory format has never worked on Linux.
-        */
-       if (unlikely(!xfs_sb_version_hasdirv2(sbp))) {
-               xfs_warn(mp, "file system using version 1 directory format");
-               return XFS_ERROR(ENOSYS);
-       }
-
-       return 0;
-}
-
 int
 xfs_initialize_perag(
        xfs_mount_t     *mp,
@@ -569,283 +271,15 @@ out_unwind:
        return error;
 }
 
-static void
-xfs_sb_quota_from_disk(struct xfs_sb *sbp)
-{
-       if (sbp->sb_qflags & XFS_OQUOTA_ENFD)
-               sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
-                                       XFS_PQUOTA_ENFD : XFS_GQUOTA_ENFD;
-       if (sbp->sb_qflags & XFS_OQUOTA_CHKD)
-               sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
-                                       XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD;
-       sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD);
-}
-
-void
-xfs_sb_from_disk(
-       struct xfs_sb   *to,
-       xfs_dsb_t       *from)
-{
-       to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
-       to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
-       to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
-       to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
-       to->sb_rextents = be64_to_cpu(from->sb_rextents);
-       memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
-       to->sb_logstart = be64_to_cpu(from->sb_logstart);
-       to->sb_rootino = be64_to_cpu(from->sb_rootino);
-       to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
-       to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
-       to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
-       to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
-       to->sb_agcount = be32_to_cpu(from->sb_agcount);
-       to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
-       to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
-       to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
-       to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
-       to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
-       to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
-       memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
-       to->sb_blocklog = from->sb_blocklog;
-       to->sb_sectlog = from->sb_sectlog;
-       to->sb_inodelog = from->sb_inodelog;
-       to->sb_inopblog = from->sb_inopblog;
-       to->sb_agblklog = from->sb_agblklog;
-       to->sb_rextslog = from->sb_rextslog;
-       to->sb_inprogress = from->sb_inprogress;
-       to->sb_imax_pct = from->sb_imax_pct;
-       to->sb_icount = be64_to_cpu(from->sb_icount);
-       to->sb_ifree = be64_to_cpu(from->sb_ifree);
-       to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
-       to->sb_frextents = be64_to_cpu(from->sb_frextents);
-       to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
-       to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
-       to->sb_qflags = be16_to_cpu(from->sb_qflags);
-       to->sb_flags = from->sb_flags;
-       to->sb_shared_vn = from->sb_shared_vn;
-       to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
-       to->sb_unit = be32_to_cpu(from->sb_unit);
-       to->sb_width = be32_to_cpu(from->sb_width);
-       to->sb_dirblklog = from->sb_dirblklog;
-       to->sb_logsectlog = from->sb_logsectlog;
-       to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
-       to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
-       to->sb_features2 = be32_to_cpu(from->sb_features2);
-       to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
-       to->sb_features_compat = be32_to_cpu(from->sb_features_compat);
-       to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat);
-       to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat);
-       to->sb_features_log_incompat =
-                               be32_to_cpu(from->sb_features_log_incompat);
-       to->sb_pad = 0;
-       to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
-       to->sb_lsn = be64_to_cpu(from->sb_lsn);
-}
-
-static inline void
-xfs_sb_quota_to_disk(
-       xfs_dsb_t       *to,
-       xfs_sb_t        *from,
-       __int64_t       *fields)
-{
-       __uint16_t      qflags = from->sb_qflags;
-
-       if (*fields & XFS_SB_QFLAGS) {
-               /*
-                * The in-core version of sb_qflags do not have
-                * XFS_OQUOTA_* flags, whereas the on-disk version
-                * does.  So, convert incore XFS_{PG}QUOTA_* flags
-                * to on-disk XFS_OQUOTA_* flags.
-                */
-               qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
-                               XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
-
-               if (from->sb_qflags &
-                               (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
-                       qflags |= XFS_OQUOTA_ENFD;
-               if (from->sb_qflags &
-                               (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
-                       qflags |= XFS_OQUOTA_CHKD;
-               to->sb_qflags = cpu_to_be16(qflags);
-               *fields &= ~XFS_SB_QFLAGS;
-       }
-}
-
-/*
- * Copy in core superblock to ondisk one.
- *
- * The fields argument is mask of superblock fields to copy.
- */
-void
-xfs_sb_to_disk(
-       xfs_dsb_t       *to,
-       xfs_sb_t        *from,
-       __int64_t       fields)
-{
-       xfs_caddr_t     to_ptr = (xfs_caddr_t)to;
-       xfs_caddr_t     from_ptr = (xfs_caddr_t)from;
-       xfs_sb_field_t  f;
-       int             first;
-       int             size;
-
-       ASSERT(fields);
-       if (!fields)
-               return;
-
-       xfs_sb_quota_to_disk(to, from, &fields);
-       while (fields) {
-               f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
-               first = xfs_sb_info[f].offset;
-               size = xfs_sb_info[f + 1].offset - first;
-
-               ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
-
-               if (size == 1 || xfs_sb_info[f].type == 1) {
-                       memcpy(to_ptr + first, from_ptr + first, size);
-               } else {
-                       switch (size) {
-                       case 2:
-                               *(__be16 *)(to_ptr + first) =
-                                       cpu_to_be16(*(__u16 *)(from_ptr + first));
-                               break;
-                       case 4:
-                               *(__be32 *)(to_ptr + first) =
-                                       cpu_to_be32(*(__u32 *)(from_ptr + first));
-                               break;
-                       case 8:
-                               *(__be64 *)(to_ptr + first) =
-                                       cpu_to_be64(*(__u64 *)(from_ptr + first));
-                               break;
-                       default:
-                               ASSERT(0);
-                       }
-               }
-
-               fields &= ~(1LL << f);
-       }
-}
-
-static int
-xfs_sb_verify(
-       struct xfs_buf  *bp,
-       bool            check_version)
-{
-       struct xfs_mount *mp = bp->b_target->bt_mount;
-       struct xfs_sb   sb;
-
-       xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
-
-       /*
-        * Only check the in progress field for the primary superblock as
-        * mkfs.xfs doesn't clear it from secondary superblocks.
-        */
-       return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
-                                    check_version);
-}
-
-/*
- * If the superblock has the CRC feature bit set or the CRC field is non-null,
- * check that the CRC is valid.  We check the CRC field is non-null because a
- * single bit error could clear the feature bit and unused parts of the
- * superblock are supposed to be zero. Hence a non-null crc field indicates that
- * we've potentially lost a feature bit and we should check it anyway.
- */
-static void
-xfs_sb_read_verify(
-       struct xfs_buf  *bp)
-{
-       struct xfs_mount *mp = bp->b_target->bt_mount;
-       struct xfs_dsb  *dsb = XFS_BUF_TO_SBP(bp);
-       int             error;
-
-       /*
-        * open code the version check to avoid needing to convert the entire
-        * superblock from disk order just to check the version number
-        */
-       if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC) &&
-           (((be16_to_cpu(dsb->sb_versionnum) & XFS_SB_VERSION_NUMBITS) ==
-                                               XFS_SB_VERSION_5) ||
-            dsb->sb_crc != 0)) {
-
-               if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize),
-                                     offsetof(struct xfs_sb, sb_crc))) {
-                       error = EFSCORRUPTED;
-                       goto out_error;
-               }
-       }
-       error = xfs_sb_verify(bp, true);
-
-out_error:
-       if (error) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, error);
-       }
-}
-
-/*
- * We may be probed for a filesystem match, so we may not want to emit
- * messages when the superblock buffer is not actually an XFS superblock.
- * If we find an XFS superblock, the run a normal, noisy mount because we are
- * really going to mount it and want to know about errors.
- */
-static void
-xfs_sb_quiet_read_verify(
-       struct xfs_buf  *bp)
-{
-       struct xfs_dsb  *dsb = XFS_BUF_TO_SBP(bp);
-
-
-       if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
-               /* XFS filesystem, verify noisily! */
-               xfs_sb_read_verify(bp);
-               return;
-       }
-       /* quietly fail */
-       xfs_buf_ioerror(bp, EWRONGFS);
-}
-
-static void
-xfs_sb_write_verify(
-       struct xfs_buf          *bp)
-{
-       struct xfs_mount        *mp = bp->b_target->bt_mount;
-       struct xfs_buf_log_item *bip = bp->b_fspriv;
-       int                     error;
-
-       error = xfs_sb_verify(bp, false);
-       if (error) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, error);
-               return;
-       }
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return;
-
-       if (bip)
-               XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
-
-       xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
-                        offsetof(struct xfs_sb, sb_crc));
-}
-
-const struct xfs_buf_ops xfs_sb_buf_ops = {
-       .verify_read = xfs_sb_read_verify,
-       .verify_write = xfs_sb_write_verify,
-};
-
-static const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
-       .verify_read = xfs_sb_quiet_read_verify,
-       .verify_write = xfs_sb_write_verify,
-};
-
 /*
  * xfs_readsb
  *
  * Does the initial read of the superblock.
  */
 int
-xfs_readsb(xfs_mount_t *mp, int flags)
+xfs_readsb(
+       struct xfs_mount *mp,
+       int             flags)
 {
        unsigned int    sector_size;
        struct xfs_buf  *bp;
@@ -884,8 +318,8 @@ reread:
         * Initialize the mount structure from the superblock.
         */
        xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
-
        xfs_sb_quota_from_disk(&mp->m_sb);
+
        /*
         * We must be able to do sector-sized and sector-aligned IO.
         */
@@ -922,107 +356,6 @@ release_buf:
        return error;
 }
 
-
-/*
- * xfs_mount_common
- *
- * Mount initialization code establishing various mount
- * fields from the superblock associated with the given
- * mount structure
- */
-STATIC void
-xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
-{
-       mp->m_agfrotor = mp->m_agirotor = 0;
-       spin_lock_init(&mp->m_agirotor_lock);
-       mp->m_maxagi = mp->m_sb.sb_agcount;
-       mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
-       mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
-       mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
-       mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
-       mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
-       mp->m_blockmask = sbp->sb_blocksize - 1;
-       mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
-       mp->m_blockwmask = mp->m_blockwsize - 1;
-
-       mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
-       mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
-       mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
-       mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
-
-       mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
-       mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
-       mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
-       mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
-
-       mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
-       mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
-       mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
-       mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
-
-       mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
-       mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
-                                       sbp->sb_inopblock);
-       mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
-}
-
-/*
- * xfs_initialize_perag_data
- *
- * Read in each per-ag structure so we can count up the number of
- * allocated inodes, free inodes and used filesystem blocks as this
- * information is no longer persistent in the superblock. Once we have
- * this information, write it into the in-core superblock structure.
- */
-STATIC int
-xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
-{
-       xfs_agnumber_t  index;
-       xfs_perag_t     *pag;
-       xfs_sb_t        *sbp = &mp->m_sb;
-       uint64_t        ifree = 0;
-       uint64_t        ialloc = 0;
-       uint64_t        bfree = 0;
-       uint64_t        bfreelst = 0;
-       uint64_t        btree = 0;
-       int             error;
-
-       for (index = 0; index < agcount; index++) {
-               /*
-                * read the agf, then the agi. This gets us
-                * all the information we need and populates the
-                * per-ag structures for us.
-                */
-               error = xfs_alloc_pagf_init(mp, NULL, index, 0);
-               if (error)
-                       return error;
-
-               error = xfs_ialloc_pagi_init(mp, NULL, index);
-               if (error)
-                       return error;
-               pag = xfs_perag_get(mp, index);
-               ifree += pag->pagi_freecount;
-               ialloc += pag->pagi_count;
-               bfree += pag->pagf_freeblks;
-               bfreelst += pag->pagf_flcount;
-               btree += pag->pagf_btreeblks;
-               xfs_perag_put(pag);
-       }
-       /*
-        * Overwrite incore superblock counters with just-read data
-        */
-       spin_lock(&mp->m_sb_lock);
-       sbp->sb_ifree = ifree;
-       sbp->sb_icount = ialloc;
-       sbp->sb_fdblocks = bfree + bfreelst + btree;
-       spin_unlock(&mp->m_sb_lock);
-
-       /* Fixup the per-cpu counters as well. */
-       xfs_icsb_reinit_counters(mp);
-
-       return 0;
-}
-
 /*
  * Update alignment values based on mount options and sb values
  */
@@ -1194,7 +527,7 @@ xfs_set_inoalignment(xfs_mount_t *mp)
 }
 
 /*
- * Check that the data (and log if separate) are an ok size.
+ * Check that the data (and log if separate) is an ok size.
  */
 STATIC int
 xfs_check_sizes(xfs_mount_t *mp)
@@ -1264,8 +597,7 @@ xfs_mount_reset_sbqflags(
                return 0;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
-       error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp),
-                                 0, 0, XFS_DEFAULT_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                xfs_alert(mp, "%s: Superblock update failed!", __func__);
@@ -1315,7 +647,7 @@ xfs_mountfs(
        uint            quotaflags = 0;
        int             error = 0;
 
-       xfs_mount_common(mp, sbp);
+       xfs_sb_mount_common(mp, sbp);
 
        /*
         * Check for a mismatched features2 values.  Older kernels
@@ -1400,7 +732,7 @@ xfs_mountfs(
        xfs_set_inoalignment(mp);
 
        /*
-        * Check that the data (and log if separate) are an ok size.
+        * Check that the data (and log if separate) is an ok size.
         */
        error = xfs_check_sizes(mp);
        if (error)
@@ -1738,8 +1070,7 @@ xfs_log_sbcount(xfs_mount_t *mp)
                return 0;
 
        tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
-       error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
-                                 XFS_DEFAULT_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
@@ -1752,49 +1083,7 @@ xfs_log_sbcount(xfs_mount_t *mp)
 }
 
 /*
- * xfs_mod_sb() can be used to copy arbitrary changes to the
- * in-core superblock into the superblock buffer to be logged.
- * It does not provide the higher level of locking that is
- * needed to protect the in-core superblock from concurrent
- * access.
- */
-void
-xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
-{
-       xfs_buf_t       *bp;
-       int             first;
-       int             last;
-       xfs_mount_t     *mp;
-       xfs_sb_field_t  f;
-
-       ASSERT(fields);
-       if (!fields)
-               return;
-       mp = tp->t_mountp;
-       bp = xfs_trans_getsb(tp, mp, 0);
-       first = sizeof(xfs_sb_t);
-       last = 0;
-
-       /* translate/copy */
-
-       xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
-
-       /* find modified range */
-       f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
-       ASSERT((1LL << f) & XFS_SB_MOD_BITS);
-       last = xfs_sb_info[f + 1].offset - 1;
-
-       f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
-       ASSERT((1LL << f) & XFS_SB_MOD_BITS);
-       first = xfs_sb_info[f].offset;
-
-       xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
-       xfs_trans_log_buf(tp, bp, first, last);
-}
-
-
-/*
- * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
+ * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply
  * a delta to a specified field in the in-core superblock.  Simply
  * switch on the field indicated and apply the delta to that field.
  * Fields are not allowed to dip below zero, so if the delta would
@@ -2101,8 +1390,7 @@ xfs_mount_log_sb(
                         XFS_SB_VERSIONNUM));
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
-       error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
-                                 XFS_DEFAULT_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
index 4e374d4a9189622bccb2b56aa331c7218567d1f2..1fa0584b5627830c77e4bf0fa02db9c55c066a2d 100644 (file)
 #ifndef __XFS_MOUNT_H__
 #define        __XFS_MOUNT_H__
 
-typedef struct xfs_trans_reservations {
-       uint    tr_write;       /* extent alloc trans */
-       uint    tr_itruncate;   /* truncate trans */
-       uint    tr_rename;      /* rename trans */
-       uint    tr_link;        /* link trans */
-       uint    tr_remove;      /* unlink trans */
-       uint    tr_symlink;     /* symlink trans */
-       uint    tr_create;      /* create trans */
-       uint    tr_mkdir;       /* mkdir trans */
-       uint    tr_ifree;       /* inode free trans */
-       uint    tr_ichange;     /* inode update trans */
-       uint    tr_growdata;    /* fs data section grow trans */
-       uint    tr_swrite;      /* sync write inode trans */
-       uint    tr_addafork;    /* cvt inode to attributed trans */
-       uint    tr_writeid;     /* write setuid/setgid file */
-       uint    tr_attrinval;   /* attr fork buffer invalidation */
-       uint    tr_attrsetm;    /* set/create an attribute at mount time */
-       uint    tr_attrsetrt;   /* set/create an attribute at runtime */
-       uint    tr_attrrm;      /* remove an attribute */
-       uint    tr_clearagi;    /* clear bad agi unlinked ino bucket */
-       uint    tr_growrtalloc; /* grow realtime allocations */
-       uint    tr_growrtzero;  /* grow realtime zeroing */
-       uint    tr_growrtfree;  /* grow realtime freeing */
-       uint    tr_qm_sbchange; /* change quota flags */
-       uint    tr_qm_setqlim;  /* adjust quota limits */
-       uint    tr_qm_dqalloc;  /* allocate quota on disk */
-       uint    tr_qm_quotaoff; /* turn quota off */
-       uint    tr_qm_equotaoff;/* end of turn quota off */
-       uint    tr_sb;          /* modify superblock */
-} xfs_trans_reservations_t;
-
-#ifndef __KERNEL__
-
-#define xfs_daddr_to_agno(mp,d) \
-       ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks))
-#define xfs_daddr_to_agbno(mp,d) \
-       ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks))
-
-#else /* __KERNEL__ */
+#ifdef __KERNEL__
 
 struct xlog;
 struct xfs_inode;
@@ -174,7 +136,7 @@ typedef struct xfs_mount {
        int                     m_ialloc_blks;  /* blocks in inode allocation */
        int                     m_inoalign_mask;/* mask sb_inoalignmt if used */
        uint                    m_qflags;       /* quota status flags */
-       xfs_trans_reservations_t m_reservations;/* precomputed res values */
+       struct xfs_trans_resv   m_resv;         /* precomputed res values */
        __uint64_t              m_maxicount;    /* maximum inode count */
        __uint64_t              m_resblks;      /* total reserved blocks */
        __uint64_t              m_resblks_avail;/* available reserved blocks */
@@ -329,14 +291,6 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
        return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
 }
 
-/*
- * perag get/put wrappers for ref counting
- */
-struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
-struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
-                                       int tag);
-void   xfs_perag_put(struct xfs_perag *pag);
-
 /*
  * Per-cpu superblock locking functions
  */
@@ -366,9 +320,63 @@ typedef struct xfs_mod_sb {
        int64_t         msb_delta;      /* Change to make to specified field */
 } xfs_mod_sb_t;
 
+/*
+ * Per-ag incore structure, copies of information in agf and agi, to improve the
+ * performance of allocation group selection. This is defined for the kernel
+ * only, and hence is defined here instead of in xfs_ag.h. You need the struct
+ * xfs_mount to be defined to look up a xfs_perag anyway (via mp->m_perag_tree),
+ * so this doesn't introduce any strange header file dependencies.
+ */
+typedef struct xfs_perag {
+       struct xfs_mount *pag_mount;    /* owner filesystem */
+       xfs_agnumber_t  pag_agno;       /* AG this structure belongs to */
+       atomic_t        pag_ref;        /* perag reference count */
+       char            pagf_init;      /* this agf's entry is initialized */
+       char            pagi_init;      /* this agi's entry is initialized */
+       char            pagf_metadata;  /* the agf is preferred to be metadata */
+       char            pagi_inodeok;   /* The agi is ok for inodes */
+       __uint8_t       pagf_levels[XFS_BTNUM_AGF];
+                                       /* # of levels in bno & cnt btree */
+       __uint32_t      pagf_flcount;   /* count of blocks in freelist */
+       xfs_extlen_t    pagf_freeblks;  /* total free blocks */
+       xfs_extlen_t    pagf_longest;   /* longest free space */
+       __uint32_t      pagf_btreeblks; /* # of blocks held in AGF btrees */
+       xfs_agino_t     pagi_freecount; /* number of free inodes */
+       xfs_agino_t     pagi_count;     /* number of allocated inodes */
+
+       /*
+        * Inode allocation search lookup optimisation.
+        * If the pagino matches, the search for new inodes
+        * doesn't need to search the near ones again straight away
+        */
+       xfs_agino_t     pagl_pagino;
+       xfs_agino_t     pagl_leftrec;
+       xfs_agino_t     pagl_rightrec;
+       spinlock_t      pagb_lock;      /* lock for pagb_tree */
+       struct rb_root  pagb_tree;      /* ordered tree of busy extents */
+
+       atomic_t        pagf_fstrms;    /* # of filestreams active in this AG */
+
+       spinlock_t      pag_ici_lock;   /* incore inode cache lock */
+       struct radix_tree_root pag_ici_root;    /* incore inode cache root */
+       int             pag_ici_reclaimable;    /* reclaimable inodes */
+       struct mutex    pag_ici_reclaim_lock;   /* serialisation point */
+       unsigned long   pag_ici_reclaim_cursor; /* reclaim restart point */
+
+       /* buffer cache index */
+       spinlock_t      pag_buf_lock;   /* lock for pag_buf_tree */
+       struct rb_root  pag_buf_tree;   /* ordered tree of active buffers */
+
+       /* for rcu-safe freeing */
+       struct rcu_head rcu_head;
+       int             pagb_count;     /* pagb slots in use */
+} xfs_perag_t;
+
 extern int     xfs_log_sbcount(xfs_mount_t *);
 extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
 extern int     xfs_mountfs(xfs_mount_t *mp);
+extern int     xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
+                                    xfs_agnumber_t *maxagi);
 
 extern void    xfs_unmountfs(xfs_mount_t *);
 extern int     xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
@@ -387,13 +395,4 @@ extern void        xfs_set_low_space_thresholds(struct xfs_mount *);
 
 #endif /* __KERNEL__ */
 
-extern void    xfs_sb_calc_crc(struct xfs_buf  *);
-extern void    xfs_mod_sb(struct xfs_trans *, __int64_t);
-extern int     xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t,
-                                       xfs_agnumber_t *);
-extern void    xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
-extern void    xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
-
-extern const struct xfs_buf_ops xfs_sb_buf_ops;
-
 #endif /* __XFS_MOUNT_H__ */
index d320794d03ce233d93f7ccfcd0a6c2c3f186e9c2..6218a0aeeeea88449c4a1e29e54c0297ae8de6fb 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
@@ -37,7 +38,6 @@
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
-#include "xfs_utils.h"
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
@@ -834,21 +834,52 @@ xfs_qm_qino_alloc(
        int             error;
        int             committed;
 
+       *ip = NULL;
+       /*
+        * With superblock that doesn't have separate pquotino, we
+        * share an inode between gquota and pquota. If the on-disk
+        * superblock has GQUOTA and the filesystem is now mounted
+        * with PQUOTA, just use sb_gquotino for sb_pquotino and
+        * vice-versa.
+        */
+       if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
+                       (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
+               xfs_ino_t ino = NULLFSINO;
+
+               if ((flags & XFS_QMOPT_PQUOTA) &&
+                            (mp->m_sb.sb_gquotino != NULLFSINO)) {
+                       ino = mp->m_sb.sb_gquotino;
+                       ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
+               } else if ((flags & XFS_QMOPT_GQUOTA) &&
+                            (mp->m_sb.sb_pquotino != NULLFSINO)) {
+                       ino = mp->m_sb.sb_pquotino;
+                       ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
+               }
+               if (ino != NULLFSINO) {
+                       error = xfs_iget(mp, NULL, ino, 0, 0, ip);
+                       if (error)
+                               return error;
+                       mp->m_sb.sb_gquotino = NULLFSINO;
+                       mp->m_sb.sb_pquotino = NULLFSINO;
+               }
+       }
+
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
-       if ((error = xfs_trans_reserve(tp,
-                                     XFS_QM_QINOCREATE_SPACE_RES(mp),
-                                     XFS_CREATE_LOG_RES(mp), 0,
-                                     XFS_TRANS_PERM_LOG_RES,
-                                     XFS_CREATE_LOG_COUNT))) {
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
+                                 XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
+       if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
        }
 
-       error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
-       if (error) {
-               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
-                                XFS_TRANS_ABORT);
-               return error;
+       if (!*ip) {
+               error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
+                                                               &committed);
+               if (error) {
+                       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
+                                        XFS_TRANS_ABORT);
+                       return error;
+               }
        }
 
        /*
@@ -860,21 +891,25 @@ xfs_qm_qino_alloc(
        if (flags & XFS_QMOPT_SBVERSION) {
                ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
                ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
-                                  XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
-                      (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
-                       XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
+                       XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
+                               (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
+                                XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
+                                XFS_SB_QFLAGS));
 
                xfs_sb_version_addquota(&mp->m_sb);
                mp->m_sb.sb_uquotino = NULLFSINO;
                mp->m_sb.sb_gquotino = NULLFSINO;
+               mp->m_sb.sb_pquotino = NULLFSINO;
 
-               /* qflags will get updated _after_ quotacheck */
-               mp->m_sb.sb_qflags = 0;
+               /* qflags will get updated fully _after_ quotacheck */
+               mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
        }
        if (flags & XFS_QMOPT_UQUOTA)
                mp->m_sb.sb_uquotino = (*ip)->i_ino;
-       else
+       else if (flags & XFS_QMOPT_GQUOTA)
                mp->m_sb.sb_gquotino = (*ip)->i_ino;
+       else
+               mp->m_sb.sb_pquotino = (*ip)->i_ino;
        spin_unlock(&mp->m_sb_lock);
        xfs_mod_sb(tp, sbfields);
 
@@ -1484,11 +1519,10 @@ xfs_qm_init_quotainos(
                        if (error)
                                goto error_rele;
                }
-               /* XXX: Use gquotino for now */
                if (XFS_IS_PQUOTA_ON(mp) &&
-                   mp->m_sb.sb_gquotino != NULLFSINO) {
-                       ASSERT(mp->m_sb.sb_gquotino > 0);
-                       error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
+                   mp->m_sb.sb_pquotino != NULLFSINO) {
+                       ASSERT(mp->m_sb.sb_pquotino > 0);
+                       error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
                                             0, 0, &pip);
                        if (error)
                                goto error_rele;
@@ -1496,7 +1530,8 @@ xfs_qm_init_quotainos(
        } else {
                flags |= XFS_QMOPT_SBVERSION;
                sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
-                           XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
+                           XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
+                           XFS_SB_QFLAGS);
        }
 
        /*
@@ -1524,9 +1559,8 @@ xfs_qm_init_quotainos(
                flags &= ~XFS_QMOPT_SBVERSION;
        }
        if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
-               /* XXX: Use XFS_SB_GQUOTINO for now */
                error = xfs_qm_qino_alloc(mp, &pip,
-                                         sbflags | XFS_SB_GQUOTINO,
+                                         sbflags | XFS_SB_PQUOTINO,
                                          flags | XFS_QMOPT_PQUOTA);
                if (error)
                        goto error_rele;
@@ -1704,8 +1738,7 @@ xfs_qm_write_sb_changes(
        int             error;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
-       error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp),
-                                 0, 0, XFS_DEFAULT_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
@@ -1734,8 +1767,8 @@ xfs_qm_write_sb_changes(
 int
 xfs_qm_vop_dqalloc(
        struct xfs_inode        *ip,
-       uid_t                   uid,
-       gid_t                   gid,
+       xfs_dqid_t              uid,
+       xfs_dqid_t              gid,
        prid_t                  prid,
        uint                    flags,
        struct xfs_dquot        **O_udqpp,
@@ -1782,7 +1815,7 @@ xfs_qm_vop_dqalloc(
                         * holding ilock.
                         */
                        xfs_iunlock(ip, lockflags);
-                       error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
+                       error = xfs_qm_dqget(mp, NULL, uid,
                                                 XFS_DQ_USER,
                                                 XFS_QMOPT_DQALLOC |
                                                 XFS_QMOPT_DOWARN,
@@ -1809,7 +1842,7 @@ xfs_qm_vop_dqalloc(
        if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
                if (ip->i_d.di_gid != gid) {
                        xfs_iunlock(ip, lockflags);
-                       error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
+                       error = xfs_qm_dqget(mp, NULL, gid,
                                                 XFS_DQ_GROUP,
                                                 XFS_QMOPT_DQALLOC |
                                                 XFS_QMOPT_DOWARN,
@@ -1943,7 +1976,7 @@ xfs_qm_vop_chown_reserve(
                        XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
 
        if (XFS_IS_UQUOTA_ON(mp) && udqp &&
-           ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
+           ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
                udq_delblks = udqp;
                /*
                 * If there are delayed allocation blocks, then we have to
index 579d6a02a5b6ec5fd2e21c503f4ada8b6dfa54d6..670cd44640704eb4899f960045cb14f8f582acce 100644 (file)
@@ -160,6 +160,8 @@ extern int          xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
                                        struct fs_disk_quota *);
 extern int             xfs_qm_scall_getqstat(struct xfs_mount *,
                                        struct fs_quota_stat *);
+extern int             xfs_qm_scall_getqstatv(struct xfs_mount *,
+                                       struct fs_quota_statv *);
 extern int             xfs_qm_scall_quotaon(struct xfs_mount *, uint);
 extern int             xfs_qm_scall_quotaoff(struct xfs_mount *, uint);
 
index 437a52d91f6d91ce9f48acbfc5a9928a9ee366ed..3af50ccdfac1a10da858ef26e80b040ad4a474f1 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
index e4f8b2d6f38ba1960beefe35cbd15b0066f530cf..8174aad0b38813ec836ea044d9a1b7b4dc06f300 100644 (file)
@@ -20,6 +20,7 @@
 
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
@@ -37,7 +38,6 @@
 #include "xfs_error.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
-#include "xfs_utils.h"
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
@@ -247,9 +247,7 @@ xfs_qm_scall_trunc_qfile(
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
-       error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
-                                 XFS_TRANS_PERM_LOG_RES,
-                                 XFS_ITRUNCATE_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                xfs_iunlock(ip, XFS_IOLOCK_EXCL);
@@ -296,8 +294,10 @@ xfs_qm_scall_trunc_qfiles(
 
        if (flags & XFS_DQ_USER)
                error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
-       if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
+       if (flags & XFS_DQ_GROUP)
                error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
+       if (flags & XFS_DQ_PROJ)
+               error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
 
        return error ? error : error2;
 }
@@ -404,6 +404,7 @@ xfs_qm_scall_quotaon(
 
 /*
  * Return quota status information, such as uquota-off, enforcements, etc.
+ * for Q_XGETQSTAT command.
  */
 int
 xfs_qm_scall_getqstat(
@@ -413,8 +414,10 @@ xfs_qm_scall_getqstat(
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        struct xfs_inode        *uip = NULL;
        struct xfs_inode        *gip = NULL;
+       struct xfs_inode        *pip = NULL;
        bool                    tempuqip = false;
        bool                    tempgqip = false;
+       bool                    temppqip = false;
 
        memset(out, 0, sizeof(fs_quota_stat_t));
 
@@ -424,16 +427,106 @@ xfs_qm_scall_getqstat(
                out->qs_gquota.qfs_ino = NULLFSINO;
                return (0);
        }
+
+       out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
+                                                       (XFS_ALL_QUOTA_ACCT|
+                                                        XFS_ALL_QUOTA_ENFD));
+       if (q) {
+               uip = q->qi_uquotaip;
+               gip = q->qi_gquotaip;
+               pip = q->qi_pquotaip;
+       }
+       if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
+               if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
+                                       0, 0, &uip) == 0)
+                       tempuqip = true;
+       }
+       if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
+               if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
+                                       0, 0, &gip) == 0)
+                       tempgqip = true;
+       }
+       /*
+        * Q_XGETQSTAT doesn't have room for both group and project quotas.
+        * So, allow the project quota values to be copied out only if
+        * there is no group quota information available.
+        */
+       if (!gip) {
+               if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
+                       if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
+                                               0, 0, &pip) == 0)
+                               temppqip = true;
+               }
+       } else
+               pip = NULL;
+       if (uip) {
+               out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
+               out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
+               out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
+               if (tempuqip)
+                       IRELE(uip);
+       }
+
+       if (gip) {
+               out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
+               out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
+               out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
+               if (tempgqip)
+                       IRELE(gip);
+       }
+       if (pip) {
+               out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
+               out->qs_gquota.qfs_nblks = pip->i_d.di_nblocks;
+               out->qs_gquota.qfs_nextents = pip->i_d.di_nextents;
+               if (temppqip)
+                       IRELE(pip);
+       }
+       if (q) {
+               out->qs_incoredqs = q->qi_dquots;
+               out->qs_btimelimit = q->qi_btimelimit;
+               out->qs_itimelimit = q->qi_itimelimit;
+               out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+               out->qs_bwarnlimit = q->qi_bwarnlimit;
+               out->qs_iwarnlimit = q->qi_iwarnlimit;
+       }
+       return 0;
+}
+
+/*
+ * Return quota status information, such as uquota-off, enforcements, etc.
+ * for Q_XGETQSTATV command, to support separate project quota field.
+ */
+int
+xfs_qm_scall_getqstatv(
+       struct xfs_mount        *mp,
+       struct fs_quota_statv   *out)
+{
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       struct xfs_inode        *uip = NULL;
+       struct xfs_inode        *gip = NULL;
+       struct xfs_inode        *pip = NULL;
+       bool                    tempuqip = false;
+       bool                    tempgqip = false;
+       bool                    temppqip = false;
+
+       if (!xfs_sb_version_hasquota(&mp->m_sb)) {
+               out->qs_uquota.qfs_ino = NULLFSINO;
+               out->qs_gquota.qfs_ino = NULLFSINO;
+               out->qs_pquota.qfs_ino = NULLFSINO;
+               return (0);
+       }
+
        out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
                                                        (XFS_ALL_QUOTA_ACCT|
                                                         XFS_ALL_QUOTA_ENFD));
-       out->qs_pad = 0;
        out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
        out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
+       out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;
 
        if (q) {
                uip = q->qi_uquotaip;
                gip = q->qi_gquotaip;
+               pip = q->qi_pquotaip;
        }
        if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
                if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
@@ -445,18 +538,30 @@ xfs_qm_scall_getqstat(
                                        0, 0, &gip) == 0)
                        tempgqip = true;
        }
+       if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
+               if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
+                                       0, 0, &pip) == 0)
+                       temppqip = true;
+       }
        if (uip) {
                out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
                out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
                if (tempuqip)
                        IRELE(uip);
        }
+
        if (gip) {
                out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
                out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
                if (tempgqip)
                        IRELE(gip);
        }
+       if (pip) {
+               out->qs_pquota.qfs_nblks = pip->i_d.di_nblocks;
+               out->qs_pquota.qfs_nextents = pip->i_d.di_nextents;
+               if (temppqip)
+                       IRELE(pip);
+       }
        if (q) {
                out->qs_incoredqs = q->qi_dquots;
                out->qs_btimelimit = q->qi_btimelimit;
@@ -515,8 +620,7 @@ xfs_qm_scall_setqlim(
        xfs_dqunlock(dqp);
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
-       error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
-                                 0, 0, XFS_DEFAULT_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                goto out_rele;
@@ -650,8 +754,7 @@ xfs_qm_log_quotaoff_end(
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
 
-       error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_END_LOG_RES(mp),
-                                 0, 0, XFS_DEFAULT_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_equotaoff, 0, 0);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return (error);
@@ -684,8 +787,7 @@ xfs_qm_log_quotaoff(
        uint                    oldsbqflag=0;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
-       error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_LOG_RES(mp),
-                                 0, 0, XFS_DEFAULT_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_quotaoff, 0, 0);
        if (error)
                goto error0;
 
index b14f42c714b609b95eab2b993805a7e4e4e73c24..66522da04d6e82f46c04800ec968ad2453e37349 100644 (file)
 #ifndef __XFS_QUOTA_H__
 #define __XFS_QUOTA_H__
 
-struct xfs_trans;
-
-/*
- * The ondisk form of a dquot structure.
- */
-#define XFS_DQUOT_MAGIC                0x4451          /* 'DQ' */
-#define XFS_DQUOT_VERSION      (u_int8_t)0x01  /* latest version number */
-
-/*
- * uid_t and gid_t are hard-coded to 32 bits in the inode.
- * Hence, an 'id' in a dquot is 32 bits..
- */
-typedef __uint32_t     xfs_dqid_t;
-
-/*
- * Even though users may not have quota limits occupying all 64-bits,
- * they may need 64-bit accounting. Hence, 64-bit quota-counters,
- * and quota-limits. This is a waste in the common case, but hey ...
- */
-typedef __uint64_t     xfs_qcnt_t;
-typedef __uint16_t     xfs_qwarncnt_t;
-
-/*
- * This is the main portion of the on-disk representation of quota
- * information for a user. This is the q_core of the xfs_dquot_t that
- * is kept in kernel memory. We pad this with some more expansion room
- * to construct the on disk structure.
- */
-typedef struct xfs_disk_dquot {
-       __be16          d_magic;        /* dquot magic = XFS_DQUOT_MAGIC */
-       __u8            d_version;      /* dquot version */
-       __u8            d_flags;        /* XFS_DQ_USER/PROJ/GROUP */
-       __be32          d_id;           /* user,project,group id */
-       __be64          d_blk_hardlimit;/* absolute limit on disk blks */
-       __be64          d_blk_softlimit;/* preferred limit on disk blks */
-       __be64          d_ino_hardlimit;/* maximum # allocated inodes */
-       __be64          d_ino_softlimit;/* preferred inode limit */
-       __be64          d_bcount;       /* disk blocks owned by the user */
-       __be64          d_icount;       /* inodes owned by the user */
-       __be32          d_itimer;       /* zero if within inode limits if not,
-                                          this is when we refuse service */
-       __be32          d_btimer;       /* similar to above; for disk blocks */
-       __be16          d_iwarns;       /* warnings issued wrt num inodes */
-       __be16          d_bwarns;       /* warnings issued wrt disk blocks */
-       __be32          d_pad0;         /* 64 bit align */
-       __be64          d_rtb_hardlimit;/* absolute limit on realtime blks */
-       __be64          d_rtb_softlimit;/* preferred limit on RT disk blks */
-       __be64          d_rtbcount;     /* realtime blocks owned */
-       __be32          d_rtbtimer;     /* similar to above; for RT disk blocks */
-       __be16          d_rtbwarns;     /* warnings issued wrt RT disk blocks */
-       __be16          d_pad;
-} xfs_disk_dquot_t;
-
-/*
- * This is what goes on disk. This is separated from the xfs_disk_dquot because
- * carrying the unnecessary padding would be a waste of memory.
- */
-typedef struct xfs_dqblk {
-       xfs_disk_dquot_t  dd_diskdq;    /* portion that lives incore as well */
-       char              dd_fill[4];   /* filling for posterity */
-
-       /*
-        * These two are only present on filesystems with the CRC bits set.
-        */
-       __be32            dd_crc;       /* checksum */
-       __be64            dd_lsn;       /* last modification in log */
-       uuid_t            dd_uuid;      /* location information */
-} xfs_dqblk_t;
-
-#define XFS_DQUOT_CRC_OFF      offsetof(struct xfs_dqblk, dd_crc)
-
-/*
- * flags for q_flags field in the dquot.
- */
-#define XFS_DQ_USER            0x0001          /* a user quota */
-#define XFS_DQ_PROJ            0x0002          /* project quota */
-#define XFS_DQ_GROUP           0x0004          /* a group quota */
-#define XFS_DQ_DIRTY           0x0008          /* dquot is dirty */
-#define XFS_DQ_FREEING         0x0010          /* dquot is beeing torn down */
-
-#define XFS_DQ_ALLTYPES                (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
-
-#define XFS_DQ_FLAGS \
-       { XFS_DQ_USER,          "USER" }, \
-       { XFS_DQ_PROJ,          "PROJ" }, \
-       { XFS_DQ_GROUP,         "GROUP" }, \
-       { XFS_DQ_DIRTY,         "DIRTY" }, \
-       { XFS_DQ_FREEING,       "FREEING" }
-
-/*
- * We have the possibility of all three quota types being active at once, and
- * hence free space modification requires modification of all three current
- * dquots in a single transaction. For this case we need to have a reservation
- * of at least 3 dquots.
- *
- * However, a chmod operation can change both UID and GID in a single
- * transaction, resulting in requiring {old, new} x {uid, gid} dquots to be
- * modified. Hence for this case we need to reserve space for at least 4 dquots.
- *
- * And in the worst case, there's a rename operation that can be modifying up to
- * 4 inodes with dquots attached to them. In reality, the only inodes that can
- * have their dquots modified are the source and destination directory inodes
- * due to directory name creation and removal. That can require space allocation
- * and/or freeing on both directory inodes, and hence all three dquots on each
- * inode can be modified. And if the directories are world writeable, all the
- * dquots can be unique and so 6 dquots can be modified....
- *
- * And, of course, we also need to take into account the dquot log format item
- * used to describe each dquot.
- */
-#define XFS_DQUOT_LOGRES(mp)   \
-       ((sizeof(struct xfs_dq_logformat) + sizeof(struct xfs_disk_dquot)) * 6)
-
-/*
- * These are the structures used to lay out dquots and quotaoff
- * records on the log. Quite similar to those of inodes.
- */
-
-/*
- * log format struct for dquots.
- * The first two fields must be the type and size fitting into
- * 32 bits : log_recovery code assumes that.
- */
-typedef struct xfs_dq_logformat {
-       __uint16_t              qlf_type;      /* dquot log item type */
-       __uint16_t              qlf_size;      /* size of this item */
-       xfs_dqid_t              qlf_id;        /* usr/grp/proj id : 32 bits */
-       __int64_t               qlf_blkno;     /* blkno of dquot buffer */
-       __int32_t               qlf_len;       /* len of dquot buffer */
-       __uint32_t              qlf_boffset;   /* off of dquot in buffer */
-} xfs_dq_logformat_t;
-
-/*
- * log format struct for QUOTAOFF records.
- * The first two fields must be the type and size fitting into
- * 32 bits : log_recovery code assumes that.
- * We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer
- * to the first and ensures that the first logitem is taken out of the AIL
- * only when the last one is securely committed.
- */
-typedef struct xfs_qoff_logformat {
-       unsigned short          qf_type;        /* quotaoff log item type */
-       unsigned short          qf_size;        /* size of this item */
-       unsigned int            qf_flags;       /* USR and/or GRP */
-       char                    qf_pad[12];     /* padding for future */
-} xfs_qoff_logformat_t;
-
-
-/*
- * Disk quotas status in m_qflags, and also sb_qflags. 16 bits.
- */
-#define XFS_UQUOTA_ACCT        0x0001  /* user quota accounting ON */
-#define XFS_UQUOTA_ENFD        0x0002  /* user quota limits enforced */
-#define XFS_UQUOTA_CHKD        0x0004  /* quotacheck run on usr quotas */
-#define XFS_PQUOTA_ACCT        0x0008  /* project quota accounting ON */
-#define XFS_OQUOTA_ENFD        0x0010  /* other (grp/prj) quota limits enforced */
-#define XFS_OQUOTA_CHKD        0x0020  /* quotacheck run on other (grp/prj) quotas */
-#define XFS_GQUOTA_ACCT        0x0040  /* group quota accounting ON */
+#include "xfs_quota_defs.h"
 
 /*
- * Conversion to and from the combined OQUOTA flag (if necessary)
- * is done only in xfs_sb_qflags_to_disk() and xfs_sb_qflags_from_disk()
+ * Kernel only quota definitions and functions
  */
-#define XFS_GQUOTA_ENFD        0x0080  /* group quota limits enforced */
-#define XFS_GQUOTA_CHKD        0x0100  /* quotacheck run on group quotas */
-#define XFS_PQUOTA_ENFD        0x0200  /* project quota limits enforced */
-#define XFS_PQUOTA_CHKD        0x0400  /* quotacheck run on project quotas */
 
-/*
- * Quota Accounting/Enforcement flags
- */
-#define XFS_ALL_QUOTA_ACCT     \
-               (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT)
-#define XFS_ALL_QUOTA_ENFD     \
-               (XFS_UQUOTA_ENFD | XFS_GQUOTA_ENFD | XFS_PQUOTA_ENFD)
-#define XFS_ALL_QUOTA_CHKD     \
-               (XFS_UQUOTA_CHKD | XFS_GQUOTA_CHKD | XFS_PQUOTA_CHKD)
-
-#define XFS_IS_QUOTA_RUNNING(mp)       ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
-#define XFS_IS_UQUOTA_RUNNING(mp)      ((mp)->m_qflags & XFS_UQUOTA_ACCT)
-#define XFS_IS_PQUOTA_RUNNING(mp)      ((mp)->m_qflags & XFS_PQUOTA_ACCT)
-#define XFS_IS_GQUOTA_RUNNING(mp)      ((mp)->m_qflags & XFS_GQUOTA_ACCT)
-#define XFS_IS_UQUOTA_ENFORCED(mp)     ((mp)->m_qflags & XFS_UQUOTA_ENFD)
-#define XFS_IS_GQUOTA_ENFORCED(mp)     ((mp)->m_qflags & XFS_GQUOTA_ENFD)
-#define XFS_IS_PQUOTA_ENFORCED(mp)     ((mp)->m_qflags & XFS_PQUOTA_ENFD)
-
-/*
- * Incore only flags for quotaoff - these bits get cleared when quota(s)
- * are in the process of getting turned off. These flags are in m_qflags but
- * never in sb_qflags.
- */
-#define XFS_UQUOTA_ACTIVE      0x1000  /* uquotas are being turned off */
-#define XFS_GQUOTA_ACTIVE      0x2000  /* gquotas are being turned off */
-#define XFS_PQUOTA_ACTIVE      0x4000  /* pquotas are being turned off */
-#define XFS_ALL_QUOTA_ACTIVE   \
-       (XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE | XFS_PQUOTA_ACTIVE)
-
-/*
- * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees
- * quota will be not be switched off as long as that inode lock is held.
- */
-#define XFS_IS_QUOTA_ON(mp)    ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \
-                                                  XFS_GQUOTA_ACTIVE | \
-                                                  XFS_PQUOTA_ACTIVE))
-#define XFS_IS_OQUOTA_ON(mp)   ((mp)->m_qflags & (XFS_GQUOTA_ACTIVE | \
-                                                  XFS_PQUOTA_ACTIVE))
-#define XFS_IS_UQUOTA_ON(mp)   ((mp)->m_qflags & XFS_UQUOTA_ACTIVE)
-#define XFS_IS_GQUOTA_ON(mp)   ((mp)->m_qflags & XFS_GQUOTA_ACTIVE)
-#define XFS_IS_PQUOTA_ON(mp)   ((mp)->m_qflags & XFS_PQUOTA_ACTIVE)
-
-/*
- * Flags to tell various functions what to do. Not all of these are meaningful
- * to a single function. None of these XFS_QMOPT_* flags are meant to have
- * persistent values (ie. their values can and will change between versions)
- */
-#define XFS_QMOPT_DQALLOC      0x0000002 /* alloc dquot ondisk if needed */
-#define XFS_QMOPT_UQUOTA       0x0000004 /* user dquot requested */
-#define XFS_QMOPT_PQUOTA       0x0000008 /* project dquot requested */
-#define XFS_QMOPT_FORCE_RES    0x0000010 /* ignore quota limits */
-#define XFS_QMOPT_SBVERSION    0x0000040 /* change superblock version num */
-#define XFS_QMOPT_DOWARN        0x0000400 /* increase warning cnt if needed */
-#define XFS_QMOPT_DQREPAIR     0x0001000 /* repair dquot if damaged */
-#define XFS_QMOPT_GQUOTA       0x0002000 /* group dquot requested */
-#define XFS_QMOPT_ENOSPC       0x0004000 /* enospc instead of edquot (prj) */
-
-/*
- * flags to xfs_trans_mod_dquot to indicate which field needs to be
- * modified.
- */
-#define XFS_QMOPT_RES_REGBLKS  0x0010000
-#define XFS_QMOPT_RES_RTBLKS   0x0020000
-#define XFS_QMOPT_BCOUNT       0x0040000
-#define XFS_QMOPT_ICOUNT       0x0080000
-#define XFS_QMOPT_RTBCOUNT     0x0100000
-#define XFS_QMOPT_DELBCOUNT    0x0200000
-#define XFS_QMOPT_DELRTBCOUNT  0x0400000
-#define XFS_QMOPT_RES_INOS     0x0800000
-
-/*
- * flags for dqalloc.
- */
-#define XFS_QMOPT_INHERIT      0x1000000
-
-/*
- * flags to xfs_trans_mod_dquot.
- */
-#define XFS_TRANS_DQ_RES_BLKS  XFS_QMOPT_RES_REGBLKS
-#define XFS_TRANS_DQ_RES_RTBLKS        XFS_QMOPT_RES_RTBLKS
-#define XFS_TRANS_DQ_RES_INOS  XFS_QMOPT_RES_INOS
-#define XFS_TRANS_DQ_BCOUNT    XFS_QMOPT_BCOUNT
-#define XFS_TRANS_DQ_DELBCOUNT XFS_QMOPT_DELBCOUNT
-#define XFS_TRANS_DQ_ICOUNT    XFS_QMOPT_ICOUNT
-#define XFS_TRANS_DQ_RTBCOUNT  XFS_QMOPT_RTBCOUNT
-#define XFS_TRANS_DQ_DELRTBCOUNT XFS_QMOPT_DELRTBCOUNT
-
-
-#define XFS_QMOPT_QUOTALL      \
-               (XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
-#define XFS_QMOPT_RESBLK_MASK  (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
+struct xfs_trans;
 
-#ifdef __KERNEL__
 /*
  * This check is done typically without holding the inode lock;
  * that may seem racy, but it is harmless in the context that it is used.
@@ -340,8 +87,9 @@ extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
                struct xfs_mount *, struct xfs_dquot *,
                struct xfs_dquot *, struct xfs_dquot *, long, long, uint);
 
-extern int xfs_qm_vop_dqalloc(struct xfs_inode *, uid_t, gid_t, prid_t, uint,
-               struct xfs_dquot **, struct xfs_dquot **, struct xfs_dquot **);
+extern int xfs_qm_vop_dqalloc(struct xfs_inode *, xfs_dqid_t, xfs_dqid_t,
+               prid_t, uint, struct xfs_dquot **, struct xfs_dquot **,
+               struct xfs_dquot **);
 extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
                struct xfs_dquot *, struct xfs_dquot *, struct xfs_dquot *);
 extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **);
@@ -362,9 +110,9 @@ extern void xfs_qm_unmount_quotas(struct xfs_mount *);
 
 #else
 static inline int
-xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
-               uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp,
-               struct xfs_dquot **pdqp)
+xfs_qm_vop_dqalloc(struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid,
+               prid_t prid, uint flags, struct xfs_dquot **udqp,
+               struct xfs_dquot **gdqp, struct xfs_dquot **pdqp)
 {
        *udqp = NULL;
        *gdqp = NULL;
@@ -415,5 +163,4 @@ extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
 
 extern const struct xfs_buf_ops xfs_dquot_buf_ops;
 
-#endif /* __KERNEL__ */
 #endif /* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/xfs_quota_defs.h b/fs/xfs/xfs_quota_defs.h
new file mode 100644 (file)
index 0000000..e6b0d6e
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef __XFS_QUOTA_DEFS_H__
+#define __XFS_QUOTA_DEFS_H__
+
+/*
+ * Quota definitions shared between user and kernel source trees.
+ */
+
+/*
+ * Even though users may not have quota limits occupying all 64-bits,
+ * they may need 64-bit accounting. Hence, 64-bit quota-counters,
+ * and quota-limits. This is a waste in the common case, but hey ...
+ */
+typedef __uint64_t     xfs_qcnt_t;
+typedef __uint16_t     xfs_qwarncnt_t;
+
+/*
+ * flags for q_flags field in the dquot.
+ */
+#define XFS_DQ_USER            0x0001          /* a user quota */
+#define XFS_DQ_PROJ            0x0002          /* project quota */
+#define XFS_DQ_GROUP           0x0004          /* a group quota */
+#define XFS_DQ_DIRTY           0x0008          /* dquot is dirty */
+#define XFS_DQ_FREEING         0x0010          /* dquot is beeing torn down */
+
+#define XFS_DQ_ALLTYPES                (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
+
+#define XFS_DQ_FLAGS \
+       { XFS_DQ_USER,          "USER" }, \
+       { XFS_DQ_PROJ,          "PROJ" }, \
+       { XFS_DQ_GROUP,         "GROUP" }, \
+       { XFS_DQ_DIRTY,         "DIRTY" }, \
+       { XFS_DQ_FREEING,       "FREEING" }
+
+/*
+ * We have the possibility of all three quota types being active at once, and
+ * hence free space modification requires modification of all three current
+ * dquots in a single transaction. For this case we need to have a reservation
+ * of at least 3 dquots.
+ *
+ * However, a chmod operation can change both UID and GID in a single
+ * transaction, resulting in requiring {old, new} x {uid, gid} dquots to be
+ * modified. Hence for this case we need to reserve space for at least 4 dquots.
+ *
+ * And in the worst case, there's a rename operation that can be modifying up to
+ * 4 inodes with dquots attached to them. In reality, the only inodes that can
+ * have their dquots modified are the source and destination directory inodes
+ * due to directory name creation and removal. That can require space allocation
+ * and/or freeing on both directory inodes, and hence all three dquots on each
+ * inode can be modified. And if the directories are world writeable, all the
+ * dquots can be unique and so 6 dquots can be modified....
+ *
+ * And, of course, we also need to take into account the dquot log format item
+ * used to describe each dquot.
+ */
+#define XFS_DQUOT_LOGRES(mp)   \
+       ((sizeof(struct xfs_dq_logformat) + sizeof(struct xfs_disk_dquot)) * 6)
+
+#define XFS_IS_QUOTA_RUNNING(mp)       ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
+#define XFS_IS_UQUOTA_RUNNING(mp)      ((mp)->m_qflags & XFS_UQUOTA_ACCT)
+#define XFS_IS_PQUOTA_RUNNING(mp)      ((mp)->m_qflags & XFS_PQUOTA_ACCT)
+#define XFS_IS_GQUOTA_RUNNING(mp)      ((mp)->m_qflags & XFS_GQUOTA_ACCT)
+#define XFS_IS_UQUOTA_ENFORCED(mp)     ((mp)->m_qflags & XFS_UQUOTA_ENFD)
+#define XFS_IS_GQUOTA_ENFORCED(mp)     ((mp)->m_qflags & XFS_GQUOTA_ENFD)
+#define XFS_IS_PQUOTA_ENFORCED(mp)     ((mp)->m_qflags & XFS_PQUOTA_ENFD)
+
+/*
+ * Incore only flags for quotaoff - these bits get cleared when quota(s)
+ * are in the process of getting turned off. These flags are in m_qflags but
+ * never in sb_qflags.
+ */
+#define XFS_UQUOTA_ACTIVE      0x1000  /* uquotas are being turned off */
+#define XFS_GQUOTA_ACTIVE      0x2000  /* gquotas are being turned off */
+#define XFS_PQUOTA_ACTIVE      0x4000  /* pquotas are being turned off */
+#define XFS_ALL_QUOTA_ACTIVE   \
+       (XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACTIVE | XFS_PQUOTA_ACTIVE)
+
+/*
+ * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees
+ * quota will be not be switched off as long as that inode lock is held.
+ */
+#define XFS_IS_QUOTA_ON(mp)    ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \
+                                                  XFS_GQUOTA_ACTIVE | \
+                                                  XFS_PQUOTA_ACTIVE))
+#define XFS_IS_OQUOTA_ON(mp)   ((mp)->m_qflags & (XFS_GQUOTA_ACTIVE | \
+                                                  XFS_PQUOTA_ACTIVE))
+#define XFS_IS_UQUOTA_ON(mp)   ((mp)->m_qflags & XFS_UQUOTA_ACTIVE)
+#define XFS_IS_GQUOTA_ON(mp)   ((mp)->m_qflags & XFS_GQUOTA_ACTIVE)
+#define XFS_IS_PQUOTA_ON(mp)   ((mp)->m_qflags & XFS_PQUOTA_ACTIVE)
+
+/*
+ * Flags to tell various functions what to do. Not all of these are meaningful
+ * to a single function. None of these XFS_QMOPT_* flags are meant to have
+ * persistent values (ie. their values can and will change between versions)
+ */
+#define XFS_QMOPT_DQALLOC      0x0000002 /* alloc dquot ondisk if needed */
+#define XFS_QMOPT_UQUOTA       0x0000004 /* user dquot requested */
+#define XFS_QMOPT_PQUOTA       0x0000008 /* project dquot requested */
+#define XFS_QMOPT_FORCE_RES    0x0000010 /* ignore quota limits */
+#define XFS_QMOPT_SBVERSION    0x0000040 /* change superblock version num */
+#define XFS_QMOPT_DOWARN        0x0000400 /* increase warning cnt if needed */
+#define XFS_QMOPT_DQREPAIR     0x0001000 /* repair dquot if damaged */
+#define XFS_QMOPT_GQUOTA       0x0002000 /* group dquot requested */
+#define XFS_QMOPT_ENOSPC       0x0004000 /* enospc instead of edquot (prj) */
+
+/*
+ * flags to xfs_trans_mod_dquot to indicate which field needs to be
+ * modified.
+ */
+#define XFS_QMOPT_RES_REGBLKS  0x0010000
+#define XFS_QMOPT_RES_RTBLKS   0x0020000
+#define XFS_QMOPT_BCOUNT       0x0040000
+#define XFS_QMOPT_ICOUNT       0x0080000
+#define XFS_QMOPT_RTBCOUNT     0x0100000
+#define XFS_QMOPT_DELBCOUNT    0x0200000
+#define XFS_QMOPT_DELRTBCOUNT  0x0400000
+#define XFS_QMOPT_RES_INOS     0x0800000
+
+/*
+ * flags for dqalloc.
+ */
+#define XFS_QMOPT_INHERIT      0x1000000
+
+/*
+ * flags to xfs_trans_mod_dquot.
+ */
+#define XFS_TRANS_DQ_RES_BLKS  XFS_QMOPT_RES_REGBLKS
+#define XFS_TRANS_DQ_RES_RTBLKS        XFS_QMOPT_RES_RTBLKS
+#define XFS_TRANS_DQ_RES_INOS  XFS_QMOPT_RES_INOS
+#define XFS_TRANS_DQ_BCOUNT    XFS_QMOPT_BCOUNT
+#define XFS_TRANS_DQ_DELBCOUNT XFS_QMOPT_DELBCOUNT
+#define XFS_TRANS_DQ_ICOUNT    XFS_QMOPT_ICOUNT
+#define XFS_TRANS_DQ_RTBCOUNT  XFS_QMOPT_RTBCOUNT
+#define XFS_TRANS_DQ_DELRTBCOUNT XFS_QMOPT_DELRTBCOUNT
+
+
+#define XFS_QMOPT_QUOTALL      \
+               (XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
+#define XFS_QMOPT_RESBLK_MASK  (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
+
+#endif /* __XFS_QUOTA_H__ */
index 20e30f93b0c7dab8b548527c46634c49486e02cd..1326d81596c2920b27f45021b67b76979e5ef387 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_sb.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_log.h"
+#include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_quota.h"
@@ -53,6 +55,18 @@ xfs_fs_get_xstate(
        return -xfs_qm_scall_getqstat(mp, fqs);
 }
 
+STATIC int
+xfs_fs_get_xstatev(
+       struct super_block      *sb,
+       struct fs_quota_statv   *fqs)
+{
+       struct xfs_mount        *mp = XFS_M(sb);
+
+       if (!XFS_IS_QUOTA_RUNNING(mp))
+               return -ENOSYS;
+       return -xfs_qm_scall_getqstatv(mp, fqs);
+}
+
 STATIC int
 xfs_fs_set_xstate(
        struct super_block      *sb,
@@ -133,6 +147,7 @@ xfs_fs_set_dqblk(
 }
 
 const struct quotactl_ops xfs_quotactl_operations = {
+       .get_xstatev            = xfs_fs_get_xstatev,
        .get_xstate             = xfs_fs_get_xstate,
        .set_xstate             = xfs_fs_set_xstate,
        .get_dqblk              = xfs_fs_get_dqblk,
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
deleted file mode 100644 (file)
index 30ff5f4..0000000
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_bmap.h"
-#include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_utils.h"
-#include "xfs_trans_space.h"
-#include "xfs_vnodeops.h"
-#include "xfs_trace.h"
-
-
-/*
- * Enter all inodes for a rename transaction into a sorted array.
- */
-STATIC void
-xfs_sort_for_rename(
-       xfs_inode_t     *dp1,   /* in: old (source) directory inode */
-       xfs_inode_t     *dp2,   /* in: new (target) directory inode */
-       xfs_inode_t     *ip1,   /* in: inode of old entry */
-       xfs_inode_t     *ip2,   /* in: inode of new entry, if it
-                                  already exists, NULL otherwise. */
-       xfs_inode_t     **i_tab,/* out: array of inode returned, sorted */
-       int             *num_inodes)  /* out: number of inodes in array */
-{
-       xfs_inode_t             *temp;
-       int                     i, j;
-
-       /*
-        * i_tab contains a list of pointers to inodes.  We initialize
-        * the table here & we'll sort it.  We will then use it to
-        * order the acquisition of the inode locks.
-        *
-        * Note that the table may contain duplicates.  e.g., dp1 == dp2.
-        */
-       i_tab[0] = dp1;
-       i_tab[1] = dp2;
-       i_tab[2] = ip1;
-       if (ip2) {
-               *num_inodes = 4;
-               i_tab[3] = ip2;
-       } else {
-               *num_inodes = 3;
-               i_tab[3] = NULL;
-       }
-
-       /*
-        * Sort the elements via bubble sort.  (Remember, there are at
-        * most 4 elements to sort, so this is adequate.)
-        */
-       for (i = 0; i < *num_inodes; i++) {
-               for (j = 1; j < *num_inodes; j++) {
-                       if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
-                               temp = i_tab[j];
-                               i_tab[j] = i_tab[j-1];
-                               i_tab[j-1] = temp;
-                       }
-               }
-       }
-}
-
-/*
- * xfs_rename
- */
-int
-xfs_rename(
-       xfs_inode_t     *src_dp,
-       struct xfs_name *src_name,
-       xfs_inode_t     *src_ip,
-       xfs_inode_t     *target_dp,
-       struct xfs_name *target_name,
-       xfs_inode_t     *target_ip)
-{
-       xfs_trans_t     *tp = NULL;
-       xfs_mount_t     *mp = src_dp->i_mount;
-       int             new_parent;             /* moving to a new dir */
-       int             src_is_directory;       /* src_name is a directory */
-       int             error;
-       xfs_bmap_free_t free_list;
-       xfs_fsblock_t   first_block;
-       int             cancel_flags;
-       int             committed;
-       xfs_inode_t     *inodes[4];
-       int             spaceres;
-       int             num_inodes;
-
-       trace_xfs_rename(src_dp, target_dp, src_name, target_name);
-
-       new_parent = (src_dp != target_dp);
-       src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
-
-       xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip,
-                               inodes, &num_inodes);
-
-       xfs_bmap_init(&free_list, &first_block);
-       tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
-       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
-       spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
-       error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0,
-                       XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);
-       if (error == ENOSPC) {
-               spaceres = 0;
-               error = xfs_trans_reserve(tp, 0, XFS_RENAME_LOG_RES(mp), 0,
-                               XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);
-       }
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               goto std_return;
-       }
-
-       /*
-        * Attach the dquots to the inodes
-        */
-       error = xfs_qm_vop_rename_dqattach(inodes);
-       if (error) {
-               xfs_trans_cancel(tp, cancel_flags);
-               goto std_return;
-       }
-
-       /*
-        * Lock all the participating inodes. Depending upon whether
-        * the target_name exists in the target directory, and
-        * whether the target directory is the same as the source
-        * directory, we can lock from 2 to 4 inodes.
-        */
-       xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
-
-       /*
-        * Join all the inodes to the transaction. From this point on,
-        * we can rely on either trans_commit or trans_cancel to unlock
-        * them.
-        */
-       xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
-       if (new_parent)
-               xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
-       if (target_ip)
-               xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
-
-       /*
-        * If we are using project inheritance, we only allow renames
-        * into our tree when the project IDs are the same; else the
-        * tree quota mechanism would be circumvented.
-        */
-       if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
-                    (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
-               error = XFS_ERROR(EXDEV);
-               goto error_return;
-       }
-
-       /*
-        * Set up the target.
-        */
-       if (target_ip == NULL) {
-               /*
-                * If there's no space reservation, check the entry will
-                * fit before actually inserting it.
-                */
-               error = xfs_dir_canenter(tp, target_dp, target_name, spaceres);
-               if (error)
-                       goto error_return;
-               /*
-                * If target does not exist and the rename crosses
-                * directories, adjust the target directory link count
-                * to account for the ".." reference from the new entry.
-                */
-               error = xfs_dir_createname(tp, target_dp, target_name,
-                                               src_ip->i_ino, &first_block,
-                                               &free_list, spaceres);
-               if (error == ENOSPC)
-                       goto error_return;
-               if (error)
-                       goto abort_return;
-
-               xfs_trans_ichgtime(tp, target_dp,
-                                       XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-
-               if (new_parent && src_is_directory) {
-                       error = xfs_bumplink(tp, target_dp);
-                       if (error)
-                               goto abort_return;
-               }
-       } else { /* target_ip != NULL */
-               /*
-                * If target exists and it's a directory, check that both
-                * target and source are directories and that target can be
-                * destroyed, or that neither is a directory.
-                */
-               if (S_ISDIR(target_ip->i_d.di_mode)) {
-                       /*
-                        * Make sure target dir is empty.
-                        */
-                       if (!(xfs_dir_isempty(target_ip)) ||
-                           (target_ip->i_d.di_nlink > 2)) {
-                               error = XFS_ERROR(EEXIST);
-                               goto error_return;
-                       }
-               }
-
-               /*
-                * Link the source inode under the target name.
-                * If the source inode is a directory and we are moving
-                * it across directories, its ".." entry will be
-                * inconsistent until we replace that down below.
-                *
-                * In case there is already an entry with the same
-                * name at the destination directory, remove it first.
-                */
-               error = xfs_dir_replace(tp, target_dp, target_name,
-                                       src_ip->i_ino,
-                                       &first_block, &free_list, spaceres);
-               if (error)
-                       goto abort_return;
-
-               xfs_trans_ichgtime(tp, target_dp,
-                                       XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-
-               /*
-                * Decrement the link count on the target since the target
-                * dir no longer points to it.
-                */
-               error = xfs_droplink(tp, target_ip);
-               if (error)
-                       goto abort_return;
-
-               if (src_is_directory) {
-                       /*
-                        * Drop the link from the old "." entry.
-                        */
-                       error = xfs_droplink(tp, target_ip);
-                       if (error)
-                               goto abort_return;
-               }
-       } /* target_ip != NULL */
-
-       /*
-        * Remove the source.
-        */
-       if (new_parent && src_is_directory) {
-               /*
-                * Rewrite the ".." entry to point to the new
-                * directory.
-                */
-               error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
-                                       target_dp->i_ino,
-                                       &first_block, &free_list, spaceres);
-               ASSERT(error != EEXIST);
-               if (error)
-                       goto abort_return;
-       }
-
-       /*
-        * We always want to hit the ctime on the source inode.
-        *
-        * This isn't strictly required by the standards since the source
-        * inode isn't really being changed, but old unix file systems did
-        * it and some incremental backup programs won't work without it.
-        */
-       xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
-       xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
-
-       /*
-        * Adjust the link count on src_dp.  This is necessary when
-        * renaming a directory, either within one parent when
-        * the target existed, or across two parent directories.
-        */
-       if (src_is_directory && (new_parent || target_ip != NULL)) {
-
-               /*
-                * Decrement link count on src_directory since the
-                * entry that's moved no longer points to it.
-                */
-               error = xfs_droplink(tp, src_dp);
-               if (error)
-                       goto abort_return;
-       }
-
-       error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
-                                       &first_block, &free_list, spaceres);
-       if (error)
-               goto abort_return;
-
-       xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
-       if (new_parent)
-               xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
-
-       /*
-        * If this is a synchronous mount, make sure that the
-        * rename transaction goes to disk before returning to
-        * the user.
-        */
-       if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
-               xfs_trans_set_sync(tp);
-       }
-
-       error = xfs_bmap_finish(&tp, &free_list, &committed);
-       if (error) {
-               xfs_bmap_cancel(&free_list);
-               xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
-                                XFS_TRANS_ABORT));
-               goto std_return;
-       }
-
-       /*
-        * trans_commit will unlock src_ip, target_ip & decrement
-        * the vnode references.
-        */
-       return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-
- abort_return:
-       cancel_flags |= XFS_TRANS_ABORT;
- error_return:
-       xfs_bmap_cancel(&free_list);
-       xfs_trans_cancel(tp, cancel_flags);
- std_return:
-       return error;
-}
index 98dc670d3ee04182da47b27e7db1695b71807434..6f9e63c9fc2617ab89966447083527f1d0c94257 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_rtalloc.h"
 #include "xfs_fsops.h"
 #include "xfs_error.h"
 #include "xfs_inode_item.h"
 #include "xfs_trans_space.h"
-#include "xfs_utils.h"
 #include "xfs_trace.h"
 #include "xfs_buf.h"
 #include "xfs_icache.h"
@@ -101,10 +100,9 @@ xfs_growfs_rt_alloc(
                /*
                 * Reserve space & log for one extent added to the file.
                 */
-               if ((error = xfs_trans_reserve(tp, resblks,
-                               XFS_GROWRTALLOC_LOG_RES(mp), 0,
-                               XFS_TRANS_PERM_LOG_RES,
-                               XFS_DEFAULT_PERM_LOG_COUNT)))
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
+                                         resblks, 0);
+               if (error)
                        goto error_cancel;
                cancelflags = XFS_TRANS_RELEASE_LOG_RES;
                /*
@@ -147,8 +145,9 @@ xfs_growfs_rt_alloc(
                        /*
                         * Reserve log for one block zeroing.
                         */
-                       if ((error = xfs_trans_reserve(tp, 0,
-                                       XFS_GROWRTZERO_LOG_RES(mp), 0, 0, 0)))
+                       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
+                                                 0, 0);
+                       if (error)
                                goto error_cancel;
                        /*
                         * Lock the bitmap inode.
@@ -736,8 +735,8 @@ xfs_rtallocate_range(
 {
        xfs_rtblock_t   end;            /* end of the allocated extent */
        int             error;          /* error value */
-       xfs_rtblock_t   postblock;      /* first block allocated > end */
-       xfs_rtblock_t   preblock;       /* first block allocated < start */
+       xfs_rtblock_t   postblock = 0;  /* first block allocated > end */
+       xfs_rtblock_t   preblock = 0;   /* first block allocated < start */
 
        end = start + len - 1;
        /*
@@ -1958,8 +1957,9 @@ xfs_growfs_rt(
                 * Start a transaction, get the log reservation.
                 */
                tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_FREE);
-               if ((error = xfs_trans_reserve(tp, 0,
-                               XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0)))
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtfree,
+                                         0, 0);
+               if (error)
                        goto error_cancel;
                /*
                 * Lock out other callers by grabbing the bitmap inode lock.
@@ -2148,7 +2148,7 @@ xfs_rtfree_extent(
        ASSERT(mp->m_rbmip->i_itemp != NULL);
        ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
 
-#if defined(__KERNEL__) && defined(DEBUG)
+#ifdef DEBUG
        /*
         * Check to see that this whole range is currently allocated.
         */
index f7f3a359c1c5a238afd4884b19f4a74363e36287..b2a1a24c0e2f3d8037cdd03f2b8deffc298d38c9 100644 (file)
 #ifndef __XFS_RTALLOC_H__
 #define        __XFS_RTALLOC_H__
 
+/* kernel only definitions and functions */
+
 struct xfs_mount;
 struct xfs_trans;
 
-/* Min and max rt extent sizes, specified in bytes */
-#define        XFS_MAX_RTEXTSIZE       (1024 * 1024 * 1024)    /* 1GB */
-#define        XFS_DFL_RTEXTSIZE       (64 * 1024)             /* 64kB */
-#define        XFS_MIN_RTEXTSIZE       (4 * 1024)              /* 4kB */
-
-/*
- * Constants for bit manipulations.
- */
-#define        XFS_NBBYLOG     3               /* log2(NBBY) */
-#define        XFS_WORDLOG     2               /* log2(sizeof(xfs_rtword_t)) */
-#define        XFS_NBWORDLOG   (XFS_NBBYLOG + XFS_WORDLOG)
-#define        XFS_NBWORD      (1 << XFS_NBWORDLOG)
-#define        XFS_WORDMASK    ((1 << XFS_WORDLOG) - 1)
-
-#define        XFS_BLOCKSIZE(mp)       ((mp)->m_sb.sb_blocksize)
-#define        XFS_BLOCKMASK(mp)       ((mp)->m_blockmask)
-#define        XFS_BLOCKWSIZE(mp)      ((mp)->m_blockwsize)
-#define        XFS_BLOCKWMASK(mp)      ((mp)->m_blockwmask)
-
-/*
- * Summary and bit manipulation macros.
- */
-#define        XFS_SUMOFFS(mp,ls,bb)   ((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb)))
-#define        XFS_SUMOFFSTOBLOCK(mp,s)        \
-       (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog)
-#define        XFS_SUMPTR(mp,bp,so)    \
-       ((xfs_suminfo_t *)((bp)->b_addr + \
-               (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp))))
-
-#define        XFS_BITTOBLOCK(mp,bi)   ((bi) >> (mp)->m_blkbit_log)
-#define        XFS_BLOCKTOBIT(mp,bb)   ((bb) << (mp)->m_blkbit_log)
-#define        XFS_BITTOWORD(mp,bi)    \
-       ((int)(((bi) >> XFS_NBWORDLOG) & XFS_BLOCKWMASK(mp)))
-
-#define        XFS_RTMIN(a,b)  ((a) < (b) ? (a) : (b))
-#define        XFS_RTMAX(a,b)  ((a) > (b) ? (a) : (b))
-
-#define        XFS_RTLOBIT(w)  xfs_lowbit32(w)
-#define        XFS_RTHIBIT(w)  xfs_highbit32(w)
-
-#if XFS_BIG_BLKNOS
-#define        XFS_RTBLOCKLOG(b)       xfs_highbit64(b)
-#else
-#define        XFS_RTBLOCKLOG(b)       xfs_highbit32(b)
-#endif
-
-
-#ifdef __KERNEL__
-
 #ifdef CONFIG_XFS_RT
 /*
  * Function prototypes for exported functions.
@@ -161,6 +114,4 @@ xfs_rtmount_init(
 # define xfs_rtunmount_inodes(m)
 #endif /* CONFIG_XFS_RT */
 
-#endif /* __KERNEL__ */
-
 #endif /* __XFS_RTALLOC_H__ */
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
new file mode 100644 (file)
index 0000000..a5b59d9
--- /dev/null
@@ -0,0 +1,834 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_bit.h"
+#include "xfs_log.h"
+#include "xfs_inum.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_alloc.h"
+#include "xfs_rtalloc.h"
+#include "xfs_bmap.h"
+#include "xfs_error.h"
+#include "xfs_quota.h"
+#include "xfs_fsops.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_buf_item.h"
+
+/*
+ * Physical superblock buffer manipulations. Shared with libxfs in userspace.
+ */
+
+static const struct {
+       short offset;
+       short type;     /* 0 = integer
+                        * 1 = binary / string (no translation)
+                        */
+} xfs_sb_info[] = {
+       { offsetof(xfs_sb_t, sb_magicnum),      0 },
+       { offsetof(xfs_sb_t, sb_blocksize),     0 },
+       { offsetof(xfs_sb_t, sb_dblocks),       0 },
+       { offsetof(xfs_sb_t, sb_rblocks),       0 },
+       { offsetof(xfs_sb_t, sb_rextents),      0 },
+       { offsetof(xfs_sb_t, sb_uuid),          1 },
+       { offsetof(xfs_sb_t, sb_logstart),      0 },
+       { offsetof(xfs_sb_t, sb_rootino),       0 },
+       { offsetof(xfs_sb_t, sb_rbmino),        0 },
+       { offsetof(xfs_sb_t, sb_rsumino),       0 },
+       { offsetof(xfs_sb_t, sb_rextsize),      0 },
+       { offsetof(xfs_sb_t, sb_agblocks),      0 },
+       { offsetof(xfs_sb_t, sb_agcount),       0 },
+       { offsetof(xfs_sb_t, sb_rbmblocks),     0 },
+       { offsetof(xfs_sb_t, sb_logblocks),     0 },
+       { offsetof(xfs_sb_t, sb_versionnum),    0 },
+       { offsetof(xfs_sb_t, sb_sectsize),      0 },
+       { offsetof(xfs_sb_t, sb_inodesize),     0 },
+       { offsetof(xfs_sb_t, sb_inopblock),     0 },
+       { offsetof(xfs_sb_t, sb_fname[0]),      1 },
+       { offsetof(xfs_sb_t, sb_blocklog),      0 },
+       { offsetof(xfs_sb_t, sb_sectlog),       0 },
+       { offsetof(xfs_sb_t, sb_inodelog),      0 },
+       { offsetof(xfs_sb_t, sb_inopblog),      0 },
+       { offsetof(xfs_sb_t, sb_agblklog),      0 },
+       { offsetof(xfs_sb_t, sb_rextslog),      0 },
+       { offsetof(xfs_sb_t, sb_inprogress),    0 },
+       { offsetof(xfs_sb_t, sb_imax_pct),      0 },
+       { offsetof(xfs_sb_t, sb_icount),        0 },
+       { offsetof(xfs_sb_t, sb_ifree),         0 },
+       { offsetof(xfs_sb_t, sb_fdblocks),      0 },
+       { offsetof(xfs_sb_t, sb_frextents),     0 },
+       { offsetof(xfs_sb_t, sb_uquotino),      0 },
+       { offsetof(xfs_sb_t, sb_gquotino),      0 },
+       { offsetof(xfs_sb_t, sb_qflags),        0 },
+       { offsetof(xfs_sb_t, sb_flags),         0 },
+       { offsetof(xfs_sb_t, sb_shared_vn),     0 },
+       { offsetof(xfs_sb_t, sb_inoalignmt),    0 },
+       { offsetof(xfs_sb_t, sb_unit),          0 },
+       { offsetof(xfs_sb_t, sb_width),         0 },
+       { offsetof(xfs_sb_t, sb_dirblklog),     0 },
+       { offsetof(xfs_sb_t, sb_logsectlog),    0 },
+       { offsetof(xfs_sb_t, sb_logsectsize),   0 },
+       { offsetof(xfs_sb_t, sb_logsunit),      0 },
+       { offsetof(xfs_sb_t, sb_features2),     0 },
+       { offsetof(xfs_sb_t, sb_bad_features2), 0 },
+       { offsetof(xfs_sb_t, sb_features_compat),       0 },
+       { offsetof(xfs_sb_t, sb_features_ro_compat),    0 },
+       { offsetof(xfs_sb_t, sb_features_incompat),     0 },
+       { offsetof(xfs_sb_t, sb_features_log_incompat), 0 },
+       { offsetof(xfs_sb_t, sb_crc),           0 },
+       { offsetof(xfs_sb_t, sb_pad),           0 },
+       { offsetof(xfs_sb_t, sb_pquotino),      0 },
+       { offsetof(xfs_sb_t, sb_lsn),           0 },
+       { sizeof(xfs_sb_t),                     0 }
+};
+
+/*
+ * Reference counting access wrappers to the perag structures.
+ * Because we never free per-ag structures, the only thing we
+ * have to protect against changes is the tree structure itself.
+ */
+struct xfs_perag *
+xfs_perag_get(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno)
+{
+       struct xfs_perag        *pag;
+       int                     ref = 0;
+
+       rcu_read_lock();
+       pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+       if (pag) {
+               ASSERT(atomic_read(&pag->pag_ref) >= 0);
+               ref = atomic_inc_return(&pag->pag_ref);
+       }
+       rcu_read_unlock();
+       trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
+       return pag;
+}
+
+/*
+ * search from @first to find the next perag with the given tag set.
+ */
+struct xfs_perag *
+xfs_perag_get_tag(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          first,
+       int                     tag)
+{
+       struct xfs_perag        *pag;
+       int                     found;
+       int                     ref;
+
+       rcu_read_lock();
+       found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+                                       (void **)&pag, first, 1, tag);
+       if (found <= 0) {
+               rcu_read_unlock();
+               return NULL;
+       }
+       ref = atomic_inc_return(&pag->pag_ref);
+       rcu_read_unlock();
+       trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
+       return pag;
+}
+
+void
+xfs_perag_put(
+       struct xfs_perag        *pag)
+{
+       int     ref;
+
+       ASSERT(atomic_read(&pag->pag_ref) > 0);
+       ref = atomic_dec_return(&pag->pag_ref);
+       trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
+}
+
+/*
+ * Check the validity of the SB found.
+ */
+STATIC int
+xfs_mount_validate_sb(
+       xfs_mount_t     *mp,
+       xfs_sb_t        *sbp,
+       bool            check_inprogress,
+       bool            check_version)
+{
+
+       /*
+        * If the log device and data device have the
+        * same device number, the log is internal.
+        * Consequently, the sb_logstart should be non-zero.  If
+        * we have a zero sb_logstart in this case, we may be trying to mount
+        * a volume filesystem in a non-volume manner.
+        */
+       if (sbp->sb_magicnum != XFS_SB_MAGIC) {
+               xfs_warn(mp, "bad magic number");
+               return XFS_ERROR(EWRONGFS);
+       }
+
+
+       if (!xfs_sb_good_version(sbp)) {
+               xfs_warn(mp, "bad version");
+               return XFS_ERROR(EWRONGFS);
+       }
+
+       /*
+        * Version 5 superblock feature mask validation. Reject combinations the
+        * kernel cannot support up front before checking anything else. For
+        * write validation, we don't need to check feature masks.
+        */
+       if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
+               xfs_alert(mp,
+"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
+"Use of these features in this kernel is at your own risk!");
+
+               if (xfs_sb_has_compat_feature(sbp,
+                                       XFS_SB_FEAT_COMPAT_UNKNOWN)) {
+                       xfs_warn(mp,
+"Superblock has unknown compatible features (0x%x) enabled.\n"
+"Using a more recent kernel is recommended.",
+                               (sbp->sb_features_compat &
+                                               XFS_SB_FEAT_COMPAT_UNKNOWN));
+               }
+
+               if (xfs_sb_has_ro_compat_feature(sbp,
+                                       XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+                       xfs_alert(mp,
+"Superblock has unknown read-only compatible features (0x%x) enabled.",
+                               (sbp->sb_features_ro_compat &
+                                               XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+                       if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+                               xfs_warn(mp,
+"Attempted to mount read-only compatible filesystem read-write.\n"
+"Filesystem can only be safely mounted read only.");
+                               return XFS_ERROR(EINVAL);
+                       }
+               }
+               if (xfs_sb_has_incompat_feature(sbp,
+                                       XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
+                       xfs_warn(mp,
+"Superblock has unknown incompatible features (0x%x) enabled.\n"
+"Filesystem can not be safely mounted by this kernel.",
+                               (sbp->sb_features_incompat &
+                                               XFS_SB_FEAT_INCOMPAT_UNKNOWN));
+                       return XFS_ERROR(EINVAL);
+               }
+       }
+
+       if (xfs_sb_version_has_pquotino(sbp)) {
+               if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
+                       xfs_notice(mp,
+                          "Version 5 of Super block has XFS_OQUOTA bits.\n");
+                       return XFS_ERROR(EFSCORRUPTED);
+               }
+       } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
+                               XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
+                       xfs_notice(mp,
+"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.\n");
+                       return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       if (unlikely(
+           sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
+               xfs_warn(mp,
+               "filesystem is marked as having an external log; "
+               "specify logdev on the mount command line.");
+               return XFS_ERROR(EINVAL);
+       }
+
+       if (unlikely(
+           sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
+               xfs_warn(mp,
+               "filesystem is marked as having an internal log; "
+               "do not specify logdev on the mount command line.");
+               return XFS_ERROR(EINVAL);
+       }
+
+       /*
+        * More sanity checking.  Most of these were stolen directly from
+        * xfs_repair.
+        */
+       if (unlikely(
+           sbp->sb_agcount <= 0                                        ||
+           sbp->sb_sectsize < XFS_MIN_SECTORSIZE                       ||
+           sbp->sb_sectsize > XFS_MAX_SECTORSIZE                       ||
+           sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG                    ||
+           sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG                    ||
+           sbp->sb_sectsize != (1 << sbp->sb_sectlog)                  ||
+           sbp->sb_blocksize < XFS_MIN_BLOCKSIZE                       ||
+           sbp->sb_blocksize > XFS_MAX_BLOCKSIZE                       ||
+           sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG                    ||
+           sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG                    ||
+           sbp->sb_blocksize != (1 << sbp->sb_blocklog)                ||
+           sbp->sb_inodesize < XFS_DINODE_MIN_SIZE                     ||
+           sbp->sb_inodesize > XFS_DINODE_MAX_SIZE                     ||
+           sbp->sb_inodelog < XFS_DINODE_MIN_LOG                       ||
+           sbp->sb_inodelog > XFS_DINODE_MAX_LOG                       ||
+           sbp->sb_inodesize != (1 << sbp->sb_inodelog)                ||
+           (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog)   ||
+           (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE)  ||
+           (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE)  ||
+           (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */)    ||
+           sbp->sb_dblocks == 0                                        ||
+           sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp)                      ||
+           sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
+               XFS_CORRUPTION_ERROR("SB sanity check failed",
+                               XFS_ERRLEVEL_LOW, mp, sbp);
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       /*
+        * Until this is fixed only page-sized or smaller data blocks work.
+        */
+       if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
+               xfs_warn(mp,
+               "File system with blocksize %d bytes. "
+               "Only pagesize (%ld) or less will currently work.",
+                               sbp->sb_blocksize, PAGE_SIZE);
+               return XFS_ERROR(ENOSYS);
+       }
+
+       /*
+        * Currently only very few inode sizes are supported.
+        */
+       switch (sbp->sb_inodesize) {
+       case 256:
+       case 512:
+       case 1024:
+       case 2048:
+               break;
+       default:
+               xfs_warn(mp, "inode size of %d bytes not supported",
+                               sbp->sb_inodesize);
+               return XFS_ERROR(ENOSYS);
+       }
+
+       if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
+           xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
+               xfs_warn(mp,
+               "file system too large to be mounted on this system.");
+               return XFS_ERROR(EFBIG);
+       }
+
+       if (check_inprogress && sbp->sb_inprogress) {
+               xfs_warn(mp, "Offline file system operation in progress!");
+               return XFS_ERROR(EFSCORRUPTED);
+       }
+
+       /*
+        * Version 1 directory format has never worked on Linux.
+        */
+       if (unlikely(!xfs_sb_version_hasdirv2(sbp))) {
+               xfs_warn(mp, "file system using version 1 directory format");
+               return XFS_ERROR(ENOSYS);
+       }
+
+       return 0;
+}
+
+void
+xfs_sb_quota_from_disk(struct xfs_sb *sbp)
+{
+       /*
+        * older mkfs doesn't initialize quota inodes to NULLFSINO. This
+        * leads to in-core values having two different values for a quota
+        * inode to be invalid: 0 and NULLFSINO. Change it to a single value
+        * NULLFSINO.
+        *
+        * Note that this change affect only the in-core values. These
+        * values are not written back to disk unless any quota information
+        * is written to the disk. Even in that case, sb_pquotino field is
+        * not written to disk unless the superblock supports pquotino.
+        */
+       if (sbp->sb_uquotino == 0)
+               sbp->sb_uquotino = NULLFSINO;
+       if (sbp->sb_gquotino == 0)
+               sbp->sb_gquotino = NULLFSINO;
+       if (sbp->sb_pquotino == 0)
+               sbp->sb_pquotino = NULLFSINO;
+
+       /*
+        * We need to do these manipilations only if we are working
+        * with an older version of on-disk superblock.
+        */
+       if (xfs_sb_version_has_pquotino(sbp))
+               return;
+
+       if (sbp->sb_qflags & XFS_OQUOTA_ENFD)
+               sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
+                                       XFS_PQUOTA_ENFD : XFS_GQUOTA_ENFD;
+       if (sbp->sb_qflags & XFS_OQUOTA_CHKD)
+               sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
+                                       XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD;
+       sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD);
+
+       if (sbp->sb_qflags & XFS_PQUOTA_ACCT)  {
+               /*
+                * In older version of superblock, on-disk superblock only
+                * has sb_gquotino, and in-core superblock has both sb_gquotino
+                * and sb_pquotino. But, only one of them is supported at any
+                * point of time. So, if PQUOTA is set in disk superblock,
+                * copy over sb_gquotino to sb_pquotino.
+                */
+               sbp->sb_pquotino = sbp->sb_gquotino;
+               sbp->sb_gquotino = NULLFSINO;
+       }
+}
+
+void
+xfs_sb_from_disk(
+       struct xfs_sb   *to,
+       xfs_dsb_t       *from)
+{
+       to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
+       to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
+       to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
+       to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
+       to->sb_rextents = be64_to_cpu(from->sb_rextents);
+       memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
+       to->sb_logstart = be64_to_cpu(from->sb_logstart);
+       to->sb_rootino = be64_to_cpu(from->sb_rootino);
+       to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
+       to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
+       to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
+       to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
+       to->sb_agcount = be32_to_cpu(from->sb_agcount);
+       to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
+       to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
+       to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
+       to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
+       to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
+       to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
+       memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
+       to->sb_blocklog = from->sb_blocklog;
+       to->sb_sectlog = from->sb_sectlog;
+       to->sb_inodelog = from->sb_inodelog;
+       to->sb_inopblog = from->sb_inopblog;
+       to->sb_agblklog = from->sb_agblklog;
+       to->sb_rextslog = from->sb_rextslog;
+       to->sb_inprogress = from->sb_inprogress;
+       to->sb_imax_pct = from->sb_imax_pct;
+       to->sb_icount = be64_to_cpu(from->sb_icount);
+       to->sb_ifree = be64_to_cpu(from->sb_ifree);
+       to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
+       to->sb_frextents = be64_to_cpu(from->sb_frextents);
+       to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
+       to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
+       to->sb_qflags = be16_to_cpu(from->sb_qflags);
+       to->sb_flags = from->sb_flags;
+       to->sb_shared_vn = from->sb_shared_vn;
+       to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
+       to->sb_unit = be32_to_cpu(from->sb_unit);
+       to->sb_width = be32_to_cpu(from->sb_width);
+       to->sb_dirblklog = from->sb_dirblklog;
+       to->sb_logsectlog = from->sb_logsectlog;
+       to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
+       to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
+       to->sb_features2 = be32_to_cpu(from->sb_features2);
+       to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
+       to->sb_features_compat = be32_to_cpu(from->sb_features_compat);
+       to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat);
+       to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat);
+       to->sb_features_log_incompat =
+                               be32_to_cpu(from->sb_features_log_incompat);
+       to->sb_pad = 0;
+       to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
+       to->sb_lsn = be64_to_cpu(from->sb_lsn);
+}
+
+static inline void
+xfs_sb_quota_to_disk(
+       xfs_dsb_t       *to,
+       xfs_sb_t        *from,
+       __int64_t       *fields)
+{
+       __uint16_t      qflags = from->sb_qflags;
+
+       /*
+        * We need to do these manipilations only if we are working
+        * with an older version of on-disk superblock.
+        */
+       if (xfs_sb_version_has_pquotino(from))
+               return;
+
+       if (*fields & XFS_SB_QFLAGS) {
+               /*
+                * The in-core version of sb_qflags do not have
+                * XFS_OQUOTA_* flags, whereas the on-disk version
+                * does.  So, convert incore XFS_{PG}QUOTA_* flags
+                * to on-disk XFS_OQUOTA_* flags.
+                */
+               qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
+                               XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
+
+               if (from->sb_qflags &
+                               (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
+                       qflags |= XFS_OQUOTA_ENFD;
+               if (from->sb_qflags &
+                               (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
+                       qflags |= XFS_OQUOTA_CHKD;
+               to->sb_qflags = cpu_to_be16(qflags);
+               *fields &= ~XFS_SB_QFLAGS;
+       }
+
+       /*
+        * GQUOTINO and PQUOTINO cannot be used together in versions
+        * of superblock that do not have pquotino. from->sb_flags
+        * tells us which quota is active and should be copied to
+        * disk.
+        */
+       if ((*fields & XFS_SB_GQUOTINO) &&
+                               (from->sb_qflags & XFS_GQUOTA_ACCT))
+               to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
+       else if ((*fields & XFS_SB_PQUOTINO) &&
+                               (from->sb_qflags & XFS_PQUOTA_ACCT))
+               to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
+
+       *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
+}
+
+/*
+ * Copy in core superblock to ondisk one.
+ *
+ * The fields argument is mask of superblock fields to copy.
+ */
+void
+xfs_sb_to_disk(
+       xfs_dsb_t       *to,
+       xfs_sb_t        *from,
+       __int64_t       fields)
+{
+       xfs_caddr_t     to_ptr = (xfs_caddr_t)to;
+       xfs_caddr_t     from_ptr = (xfs_caddr_t)from;
+       xfs_sb_field_t  f;
+       int             first;
+       int             size;
+
+       ASSERT(fields);
+       if (!fields)
+               return;
+
+       xfs_sb_quota_to_disk(to, from, &fields);
+       while (fields) {
+               f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
+               first = xfs_sb_info[f].offset;
+               size = xfs_sb_info[f + 1].offset - first;
+
+               ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
+
+               if (size == 1 || xfs_sb_info[f].type == 1) {
+                       memcpy(to_ptr + first, from_ptr + first, size);
+               } else {
+                       switch (size) {
+                       case 2:
+                               *(__be16 *)(to_ptr + first) =
+                                     cpu_to_be16(*(__u16 *)(from_ptr + first));
+                               break;
+                       case 4:
+                               *(__be32 *)(to_ptr + first) =
+                                     cpu_to_be32(*(__u32 *)(from_ptr + first));
+                               break;
+                       case 8:
+                               *(__be64 *)(to_ptr + first) =
+                                     cpu_to_be64(*(__u64 *)(from_ptr + first));
+                               break;
+                       default:
+                               ASSERT(0);
+                       }
+               }
+
+               fields &= ~(1LL << f);
+       }
+}
+
+static int
+xfs_sb_verify(
+       struct xfs_buf  *bp,
+       bool            check_version)
+{
+       struct xfs_mount *mp = bp->b_target->bt_mount;
+       struct xfs_sb   sb;
+
+       xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp));
+
+       /*
+        * Only check the in progress field for the primary superblock as
+        * mkfs.xfs doesn't clear it from secondary superblocks.
+        */
+       return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+                                    check_version);
+}
+
+/*
+ * If the superblock has the CRC feature bit set or the CRC field is non-null,
+ * check that the CRC is valid.  We check the CRC field is non-null because a
+ * single bit error could clear the feature bit and unused parts of the
+ * superblock are supposed to be zero. Hence a non-null crc field indicates that
+ * we've potentially lost a feature bit and we should check it anyway.
+ */
+static void
+xfs_sb_read_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_mount *mp = bp->b_target->bt_mount;
+       struct xfs_dsb  *dsb = XFS_BUF_TO_SBP(bp);
+       int             error;
+
+       /*
+        * open code the version check to avoid needing to convert the entire
+        * superblock from disk order just to check the version number
+        */
+       if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC) &&
+           (((be16_to_cpu(dsb->sb_versionnum) & XFS_SB_VERSION_NUMBITS) ==
+                                               XFS_SB_VERSION_5) ||
+            dsb->sb_crc != 0)) {
+
+               if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize),
+                                     offsetof(struct xfs_sb, sb_crc))) {
+                       error = EFSCORRUPTED;
+                       goto out_error;
+               }
+       }
+       error = xfs_sb_verify(bp, true);
+
+out_error:
+       if (error) {
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
+                                    mp, bp->b_addr);
+               xfs_buf_ioerror(bp, error);
+       }
+}
+
+/*
+ * We may be probed for a filesystem match, so we may not want to emit
+ * messages when the superblock buffer is not actually an XFS superblock.
+ * If we find an XFS superblock, then run a normal, noisy mount because we are
+ * really going to mount it and want to know about errors.
+ */
+static void
+xfs_sb_quiet_read_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_dsb  *dsb = XFS_BUF_TO_SBP(bp);
+
+
+       if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
+               /* XFS filesystem, verify noisily! */
+               xfs_sb_read_verify(bp);
+               return;
+       }
+       /* quietly fail */
+       xfs_buf_ioerror(bp, EWRONGFS);
+}
+
+static void
+xfs_sb_write_verify(
+       struct xfs_buf          *bp)
+{
+       struct xfs_mount        *mp = bp->b_target->bt_mount;
+       struct xfs_buf_log_item *bip = bp->b_fspriv;
+       int                     error;
+
+       error = xfs_sb_verify(bp, false);
+       if (error) {
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
+                                    mp, bp->b_addr);
+               xfs_buf_ioerror(bp, error);
+               return;
+       }
+
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return;
+
+       if (bip)
+               XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+
+       xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
+                        offsetof(struct xfs_sb, sb_crc));
+}
+
+const struct xfs_buf_ops xfs_sb_buf_ops = {
+       .verify_read = xfs_sb_read_verify,
+       .verify_write = xfs_sb_write_verify,
+};
+
+const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
+       .verify_read = xfs_sb_quiet_read_verify,
+       .verify_write = xfs_sb_write_verify,
+};
+
+/*
+ * xfs_mount_common
+ *
+ * Mount initialization code establishing various mount
+ * fields from the superblock associated with the given
+ * mount structure
+ */
+void
+xfs_sb_mount_common(
+       struct xfs_mount *mp,
+       struct xfs_sb   *sbp)
+{
+       mp->m_agfrotor = mp->m_agirotor = 0;
+       spin_lock_init(&mp->m_agirotor_lock);
+       mp->m_maxagi = mp->m_sb.sb_agcount;
+       mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
+       mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
+       mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
+       mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
+       mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
+       mp->m_blockmask = sbp->sb_blocksize - 1;
+       mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
+       mp->m_blockwmask = mp->m_blockwsize - 1;
+
+       mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
+       mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
+
+       mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
+       mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
+
+       mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
+       mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
+
+       mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
+       mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
+                                       sbp->sb_inopblock);
+       mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
+}
+
+/*
+ * xfs_initialize_perag_data
+ *
+ * Read in each per-ag structure so we can count up the number of
+ * allocated inodes, free inodes and used filesystem blocks as this
+ * information is no longer persistent in the superblock. Once we have
+ * this information, write it into the in-core superblock structure.
+ */
+int
+xfs_initialize_perag_data(
+       struct xfs_mount *mp,
+       xfs_agnumber_t  agcount)
+{
+       xfs_agnumber_t  index;
+       xfs_perag_t     *pag;
+       xfs_sb_t        *sbp = &mp->m_sb;
+       uint64_t        ifree = 0;
+       uint64_t        ialloc = 0;
+       uint64_t        bfree = 0;
+       uint64_t        bfreelst = 0;
+       uint64_t        btree = 0;
+       int             error;
+
+       for (index = 0; index < agcount; index++) {
+               /*
+                * read the agf, then the agi. This gets us
+                * all the information we need and populates the
+                * per-ag structures for us.
+                */
+               error = xfs_alloc_pagf_init(mp, NULL, index, 0);
+               if (error)
+                       return error;
+
+               error = xfs_ialloc_pagi_init(mp, NULL, index);
+               if (error)
+                       return error;
+               pag = xfs_perag_get(mp, index);
+               ifree += pag->pagi_freecount;
+               ialloc += pag->pagi_count;
+               bfree += pag->pagf_freeblks;
+               bfreelst += pag->pagf_flcount;
+               btree += pag->pagf_btreeblks;
+               xfs_perag_put(pag);
+       }
+       /*
+        * Overwrite incore superblock counters with just-read data
+        */
+       spin_lock(&mp->m_sb_lock);
+       sbp->sb_ifree = ifree;
+       sbp->sb_icount = ialloc;
+       sbp->sb_fdblocks = bfree + bfreelst + btree;
+       spin_unlock(&mp->m_sb_lock);
+
+       /* Fixup the per-cpu counters as well. */
+       xfs_icsb_reinit_counters(mp);
+
+       return 0;
+}
+
+/*
+ * xfs_mod_sb() can be used to copy arbitrary changes to the
+ * in-core superblock into the superblock buffer to be logged.
+ * It does not provide the higher level of locking that is
+ * needed to protect the in-core superblock from concurrent
+ * access.
+ */
+void
+xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
+{
+       xfs_buf_t       *bp;
+       int             first;
+       int             last;
+       xfs_mount_t     *mp;
+       xfs_sb_field_t  f;
+
+       ASSERT(fields);
+       if (!fields)
+               return;
+       mp = tp->t_mountp;
+       bp = xfs_trans_getsb(tp, mp, 0);
+       first = sizeof(xfs_sb_t);
+       last = 0;
+
+       /* translate/copy */
+
+       xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
+
+       /* find modified range */
+       f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
+       ASSERT((1LL << f) & XFS_SB_MOD_BITS);
+       last = xfs_sb_info[f + 1].offset - 1;
+
+       f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
+       ASSERT((1LL << f) & XFS_SB_MOD_BITS);
+       first = xfs_sb_info[f].offset;
+
+       xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+       xfs_trans_log_buf(tp, bp, first, last);
+}
index 78f9e70b80c7da8a64b92d528a9f7aa2ba49401e..db7593f4bc7e3159d98c4edf760107b843702496 100644 (file)
@@ -26,6 +26,7 @@
 
 struct xfs_buf;
 struct xfs_mount;
+struct xfs_trans;
 
 #define        XFS_SB_MAGIC            0x58465342      /* 'XFSB' */
 #define        XFS_SB_VERSION_1        1               /* 5.3, 6.0.1, 6.1 */
@@ -354,15 +355,8 @@ static inline int xfs_sb_good_version(xfs_sb_t *sbp)
                     (sbp->sb_features2 & ~XFS_SB_VERSION2_OKREALBITS)))
                        return 0;
 
-#ifdef __KERNEL__
                if (sbp->sb_shared_vn > XFS_SB_MAX_SHARED_VN)
                        return 0;
-#else
-               if ((sbp->sb_versionnum & XFS_SB_VERSION_SHAREDBIT) &&
-                   sbp->sb_shared_vn > XFS_SB_MAX_SHARED_VN)
-                       return 0;
-#endif
-
                return 1;
        }
        if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5)
@@ -554,6 +548,13 @@ static inline int xfs_sb_version_hasprojid32bit(xfs_sb_t *sbp)
                (sbp->sb_features2 & XFS_SB_VERSION2_PROJID32BIT));
 }
 
+static inline void xfs_sb_version_addprojid32bit(xfs_sb_t *sbp)
+{
+       sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
+       sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
+       sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
+}
+
 static inline int xfs_sb_version_hascrc(xfs_sb_t *sbp)
 {
        return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
@@ -618,16 +619,23 @@ xfs_sb_has_incompat_log_feature(
        return (sbp->sb_features_log_incompat & feature) != 0;
 }
 
-static inline bool
-xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
+static inline int xfs_sb_version_has_pquotino(xfs_sb_t *sbp)
 {
-       return (ino == sbp->sb_uquotino || ino == sbp->sb_gquotino);
+       return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
 }
 
 /*
  * end of superblock version macros
  */
 
+static inline bool
+xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
+{
+       return (ino == sbp->sb_uquotino ||
+               ino == sbp->sb_gquotino ||
+               ino == sbp->sb_pquotino);
+}
+
 #define XFS_SB_DADDR           ((xfs_daddr_t)0) /* daddr in filesystem/ag */
 #define        XFS_SB_BLOCK(mp)        XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
 #define XFS_BUF_TO_SBP(bp)     ((xfs_dsb_t *)((bp)->b_addr))
@@ -660,4 +668,23 @@ xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
 #define XFS_B_TO_FSBT(mp,b)    (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
 #define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask)
 
+/*
+ * perag get/put wrappers for ref counting
+ */
+extern struct xfs_perag *xfs_perag_get(struct xfs_mount *, xfs_agnumber_t);
+extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
+                                          int tag);
+extern void    xfs_perag_put(struct xfs_perag *pag);
+extern int     xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
+
+extern void    xfs_sb_calc_crc(struct xfs_buf  *);
+extern void    xfs_mod_sb(struct xfs_trans *, __int64_t);
+extern void    xfs_sb_mount_common(struct xfs_mount *, struct xfs_sb *);
+extern void    xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
+extern void    xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
+extern void    xfs_sb_quota_from_disk(struct xfs_sb *sbp);
+
+extern const struct xfs_buf_ops xfs_sb_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
+
 #endif /* __XFS_SB_H__ */
index 1d68ffcdeaa7f555ab77ee05f5c13571c6ee3b4d..979a77d4b87d1142c12b4b1cabcf41f75777041a 100644 (file)
  */
 
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_inum.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_alloc.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
 #include "xfs_fsops.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
-#include "xfs_utils.h"
-#include "xfs_vnodeops.h"
 #include "xfs_log_priv.h"
 #include "xfs_trans_priv.h"
 #include "xfs_filestream.h"
 #include "xfs_da_btree.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_extfree_item.h"
 #include "xfs_mru_cache.h"
 #include "xfs_inode_item.h"
@@ -421,12 +421,6 @@ xfs_parseargs(
        }
 #endif
 
-       if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
-           (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
-               xfs_warn(mp, "cannot mount with both project and group quota");
-               return EINVAL;
-       }
-
        if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
                xfs_warn(mp, "sunit and swidth must be specified together");
                return EINVAL;
@@ -556,14 +550,13 @@ xfs_showargs(
        else if (mp->m_qflags & XFS_UQUOTA_ACCT)
                seq_puts(m, "," MNTOPT_UQUOTANOENF);
 
-       /* Either project or group quotas can be active, not both */
-
        if (mp->m_qflags & XFS_PQUOTA_ACCT) {
                if (mp->m_qflags & XFS_PQUOTA_ENFD)
                        seq_puts(m, "," MNTOPT_PRJQUOTA);
                else
                        seq_puts(m, "," MNTOPT_PQUOTANOENF);
-       } else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
+       }
+       if (mp->m_qflags & XFS_GQUOTA_ACCT) {
                if (mp->m_qflags & XFS_GQUOTA_ENFD)
                        seq_puts(m, "," MNTOPT_GRPQUOTA);
                else
@@ -870,17 +863,17 @@ xfs_init_mount_workqueues(
                goto out_destroy_unwritten;
 
        mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
-                       WQ_NON_REENTRANT, 0, mp->m_fsname);
+                       0, 0, mp->m_fsname);
        if (!mp->m_reclaim_workqueue)
                goto out_destroy_cil;
 
        mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
-                       WQ_NON_REENTRANT, 0, mp->m_fsname);
+                       0, 0, mp->m_fsname);
        if (!mp->m_log_workqueue)
                goto out_destroy_reclaim;
 
        mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
-                       WQ_NON_REENTRANT, 0, mp->m_fsname);
+                       0, 0, mp->m_fsname);
        if (!mp->m_eofblocks_workqueue)
                goto out_destroy_log;
 
@@ -1396,6 +1389,14 @@ xfs_finish_flags(
                return XFS_ERROR(EROFS);
        }
 
+       if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
+           (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
+           !xfs_sb_version_has_pquotino(&mp->m_sb)) {
+               xfs_warn(mp,
+                 "Super block does not support project and group quota together");
+               return XFS_ERROR(EINVAL);
+       }
+
        return 0;
 }
 
index f4895b662fcb549706881a4dd65a8b048d23d03e..2f2a7c005be2d32219fd9c580bb2050f2f4e0050 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir2.h"
 #include "xfs_mount.h"
 #include "xfs_da_btree.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_ialloc_btree.h"
 #include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_itable.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
-#include "xfs_utils.h"
 #include "xfs_trans_space.h"
-#include "xfs_log_priv.h"
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
-#include "xfs_cksum.h"
-#include "xfs_buf_item.h"
-
-
-/*
- * Each contiguous block has a header, so it is not just a simple pathlen
- * to FSB conversion.
- */
-int
-xfs_symlink_blocks(
-       struct xfs_mount *mp,
-       int             pathlen)
-{
-       int buflen = XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
-
-       return (pathlen + buflen - 1) / buflen;
-}
-
-static int
-xfs_symlink_hdr_set(
-       struct xfs_mount        *mp,
-       xfs_ino_t               ino,
-       uint32_t                offset,
-       uint32_t                size,
-       struct xfs_buf          *bp)
-{
-       struct xfs_dsymlink_hdr *dsl = bp->b_addr;
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return 0;
-
-       dsl->sl_magic = cpu_to_be32(XFS_SYMLINK_MAGIC);
-       dsl->sl_offset = cpu_to_be32(offset);
-       dsl->sl_bytes = cpu_to_be32(size);
-       uuid_copy(&dsl->sl_uuid, &mp->m_sb.sb_uuid);
-       dsl->sl_owner = cpu_to_be64(ino);
-       dsl->sl_blkno = cpu_to_be64(bp->b_bn);
-       bp->b_ops = &xfs_symlink_buf_ops;
-
-       return sizeof(struct xfs_dsymlink_hdr);
-}
-
-/*
- * Checking of the symlink header is split into two parts. the verifier does
- * CRC, location and bounds checking, the unpacking function checks the path
- * parameters and owner.
- */
-bool
-xfs_symlink_hdr_ok(
-       struct xfs_mount        *mp,
-       xfs_ino_t               ino,
-       uint32_t                offset,
-       uint32_t                size,
-       struct xfs_buf          *bp)
-{
-       struct xfs_dsymlink_hdr *dsl = bp->b_addr;
-
-       if (offset != be32_to_cpu(dsl->sl_offset))
-               return false;
-       if (size != be32_to_cpu(dsl->sl_bytes))
-               return false;
-       if (ino != be64_to_cpu(dsl->sl_owner))
-               return false;
-
-       /* ok */
-       return true;
-}
-
-static bool
-xfs_symlink_verify(
-       struct xfs_buf          *bp)
-{
-       struct xfs_mount        *mp = bp->b_target->bt_mount;
-       struct xfs_dsymlink_hdr *dsl = bp->b_addr;
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return false;
-       if (dsl->sl_magic != cpu_to_be32(XFS_SYMLINK_MAGIC))
-               return false;
-       if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_uuid))
-               return false;
-       if (bp->b_bn != be64_to_cpu(dsl->sl_blkno))
-               return false;
-       if (be32_to_cpu(dsl->sl_offset) +
-                               be32_to_cpu(dsl->sl_bytes) >= MAXPATHLEN)
-               return false;
-       if (dsl->sl_owner == 0)
-               return false;
-
-       return true;
-}
-
-static void
-xfs_symlink_read_verify(
-       struct xfs_buf  *bp)
-{
-       struct xfs_mount *mp = bp->b_target->bt_mount;
-
-       /* no verification of non-crc buffers */
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return;
-
-       if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
-                                 offsetof(struct xfs_dsymlink_hdr, sl_crc)) ||
-           !xfs_symlink_verify(bp)) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, EFSCORRUPTED);
-       }
-}
-
-static void
-xfs_symlink_write_verify(
-       struct xfs_buf  *bp)
-{
-       struct xfs_mount *mp = bp->b_target->bt_mount;
-       struct xfs_buf_log_item *bip = bp->b_fspriv;
-
-       /* no verification of non-crc buffers */
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return;
-
-       if (!xfs_symlink_verify(bp)) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, EFSCORRUPTED);
-               return;
-       }
-
-       if (bip) {
-               struct xfs_dsymlink_hdr *dsl = bp->b_addr;
-               dsl->sl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
-       }
-       xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
-                        offsetof(struct xfs_dsymlink_hdr, sl_crc));
-}
-
-const struct xfs_buf_ops xfs_symlink_buf_ops = {
-       .verify_read = xfs_symlink_read_verify,
-       .verify_write = xfs_symlink_write_verify,
-};
-
-void
-xfs_symlink_local_to_remote(
-       struct xfs_trans        *tp,
-       struct xfs_buf          *bp,
-       struct xfs_inode        *ip,
-       struct xfs_ifork        *ifp)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       char                    *buf;
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb)) {
-               bp->b_ops = NULL;
-               memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
-               return;
-       }
-
-       /*
-        * As this symlink fits in an inode literal area, it must also fit in
-        * the smallest buffer the filesystem supports.
-        */
-       ASSERT(BBTOB(bp->b_length) >=
-                       ifp->if_bytes + sizeof(struct xfs_dsymlink_hdr));
-
-       bp->b_ops = &xfs_symlink_buf_ops;
-
-       buf = bp->b_addr;
-       buf += xfs_symlink_hdr_set(mp, ip->i_ino, 0, ifp->if_bytes, bp);
-       memcpy(buf, ifp->if_u1.if_data, ifp->if_bytes);
-}
 
 /* ----- Kernel only functions below ----- */
 STATIC int
@@ -386,8 +215,11 @@ xfs_symlink(
        /*
         * Make sure that we have allocated dquot(s) on disk.
         */
-       error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
-               XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp, &pdqp);
+       error = xfs_qm_vop_dqalloc(dp,
+                       xfs_kuid_to_uid(current_fsuid()),
+                       xfs_kgid_to_gid(current_fsgid()), prid,
+                       XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
+                       &udqp, &gdqp, &pdqp);
        if (error)
                goto std_return;
 
@@ -402,12 +234,10 @@ xfs_symlink(
        else
                fs_blocks = xfs_symlink_blocks(mp, pathlen);
        resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
-       error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
-                       XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0);
        if (error == ENOSPC && fs_blocks == 0) {
                resblks = 0;
-               error = xfs_trans_reserve(tp, 0, XFS_SYMLINK_LOG_RES(mp), 0,
-                               XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0);
        }
        if (error) {
                cancel_flags = 0;
@@ -710,8 +540,8 @@ xfs_inactive_symlink_rmt(
         * Put an itruncate log reservation in the new transaction
         * for our caller.
         */
-       if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
-                       XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+       if (error) {
                ASSERT(XFS_FORCED_SHUTDOWN(mp));
                goto error0;
        }
index 374394880c01e4d8db8dc36d6468e748295c6f29..99338ba666ac68c11350fb1b24906364c7a6de01 100644 (file)
 #ifndef __XFS_SYMLINK_H
 #define __XFS_SYMLINK_H 1
 
-struct xfs_mount;
-struct xfs_trans;
-struct xfs_inode;
-struct xfs_buf;
-struct xfs_ifork;
-struct xfs_name;
-
-#define XFS_SYMLINK_MAGIC      0x58534c4d      /* XSLM */
-
-struct xfs_dsymlink_hdr {
-       __be32  sl_magic;
-       __be32  sl_offset;
-       __be32  sl_bytes;
-       __be32  sl_crc;
-       uuid_t  sl_uuid;
-       __be64  sl_owner;
-       __be64  sl_blkno;
-       __be64  sl_lsn;
-};
-
-/*
- * The maximum pathlen is 1024 bytes. Since the minimum file system
- * blocksize is 512 bytes, we can get a max of 3 extents back from
- * bmapi when crc headers are taken into account.
- */
-#define XFS_SYMLINK_MAPS 3
-
-#define XFS_SYMLINK_BUF_SPACE(mp, bufsize)     \
-       ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
-                       sizeof(struct xfs_dsymlink_hdr) : 0))
-
-int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
-
-void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
-                                struct xfs_inode *ip, struct xfs_ifork *ifp);
-
-extern const struct xfs_buf_ops xfs_symlink_buf_ops;
-
-#ifdef __KERNEL__
+/* Kernel only symlink defintions */
 
 int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
                const char *target_path, umode_t mode, struct xfs_inode **ipp);
 int xfs_readlink(struct xfs_inode *ip, char *link);
 int xfs_inactive_symlink(struct xfs_inode *ip, struct xfs_trans **tpp);
 
-#endif /* __KERNEL__ */
 #endif /* __XFS_SYMLINK_H */
diff --git a/fs/xfs/xfs_symlink_remote.c b/fs/xfs/xfs_symlink_remote.c
new file mode 100644 (file)
index 0000000..01c85e3
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * Copyright (c) 2012-2013 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_ag.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_inode.h"
+#include "xfs_error.h"
+#include "xfs_trace.h"
+#include "xfs_symlink.h"
+#include "xfs_cksum.h"
+#include "xfs_buf_item.h"
+
+
+/*
+ * Each contiguous block has a header, so it is not just a simple pathlen
+ * to FSB conversion.
+ */
+int
+xfs_symlink_blocks(
+       struct xfs_mount *mp,
+       int             pathlen)
+{
+       int buflen = XFS_SYMLINK_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
+
+       return (pathlen + buflen - 1) / buflen;
+}
+
+int
+xfs_symlink_hdr_set(
+       struct xfs_mount        *mp,
+       xfs_ino_t               ino,
+       uint32_t                offset,
+       uint32_t                size,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dsymlink_hdr *dsl = bp->b_addr;
+
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return 0;
+
+       dsl->sl_magic = cpu_to_be32(XFS_SYMLINK_MAGIC);
+       dsl->sl_offset = cpu_to_be32(offset);
+       dsl->sl_bytes = cpu_to_be32(size);
+       uuid_copy(&dsl->sl_uuid, &mp->m_sb.sb_uuid);
+       dsl->sl_owner = cpu_to_be64(ino);
+       dsl->sl_blkno = cpu_to_be64(bp->b_bn);
+       bp->b_ops = &xfs_symlink_buf_ops;
+
+       return sizeof(struct xfs_dsymlink_hdr);
+}
+
+/*
+ * Checking of the symlink header is split into two parts. the verifier does
+ * CRC, location and bounds checking, the unpacking function checks the path
+ * parameters and owner.
+ */
+bool
+xfs_symlink_hdr_ok(
+       struct xfs_mount        *mp,
+       xfs_ino_t               ino,
+       uint32_t                offset,
+       uint32_t                size,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dsymlink_hdr *dsl = bp->b_addr;
+
+       if (offset != be32_to_cpu(dsl->sl_offset))
+               return false;
+       if (size != be32_to_cpu(dsl->sl_bytes))
+               return false;
+       if (ino != be64_to_cpu(dsl->sl_owner))
+               return false;
+
+       /* ok */
+       return true;
+}
+
+static bool
+xfs_symlink_verify(
+       struct xfs_buf          *bp)
+{
+       struct xfs_mount        *mp = bp->b_target->bt_mount;
+       struct xfs_dsymlink_hdr *dsl = bp->b_addr;
+
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return false;
+       if (dsl->sl_magic != cpu_to_be32(XFS_SYMLINK_MAGIC))
+               return false;
+       if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_uuid))
+               return false;
+       if (bp->b_bn != be64_to_cpu(dsl->sl_blkno))
+               return false;
+       if (be32_to_cpu(dsl->sl_offset) +
+                               be32_to_cpu(dsl->sl_bytes) >= MAXPATHLEN)
+               return false;
+       if (dsl->sl_owner == 0)
+               return false;
+
+       return true;
+}
+
+static void
+xfs_symlink_read_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_mount *mp = bp->b_target->bt_mount;
+
+       /* no verification of non-crc buffers */
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return;
+
+       if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
+                                 offsetof(struct xfs_dsymlink_hdr, sl_crc)) ||
+           !xfs_symlink_verify(bp)) {
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+               xfs_buf_ioerror(bp, EFSCORRUPTED);
+       }
+}
+
+static void
+xfs_symlink_write_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_mount *mp = bp->b_target->bt_mount;
+       struct xfs_buf_log_item *bip = bp->b_fspriv;
+
+       /* no verification of non-crc buffers */
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return;
+
+       if (!xfs_symlink_verify(bp)) {
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+               xfs_buf_ioerror(bp, EFSCORRUPTED);
+               return;
+       }
+
+       if (bip) {
+               struct xfs_dsymlink_hdr *dsl = bp->b_addr;
+               dsl->sl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+       }
+       xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
+                        offsetof(struct xfs_dsymlink_hdr, sl_crc));
+}
+
+const struct xfs_buf_ops xfs_symlink_buf_ops = {
+       .verify_read = xfs_symlink_read_verify,
+       .verify_write = xfs_symlink_write_verify,
+};
+
+void
+xfs_symlink_local_to_remote(
+       struct xfs_trans        *tp,
+       struct xfs_buf          *bp,
+       struct xfs_inode        *ip,
+       struct xfs_ifork        *ifp)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       char                    *buf;
+
+       if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+               bp->b_ops = NULL;
+               memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
+               return;
+       }
+
+       /*
+        * As this symlink fits in an inode literal area, it must also fit in
+        * the smallest buffer the filesystem supports.
+        */
+       ASSERT(BBTOB(bp->b_length) >=
+                       ifp->if_bytes + sizeof(struct xfs_dsymlink_hdr));
+
+       bp->b_ops = &xfs_symlink_buf_ops;
+
+       buf = bp->b_addr;
+       buf += xfs_symlink_hdr_set(mp, ip->i_ino, 0, ifp->if_bytes, bp);
+       memcpy(buf, ifp->if_u1.if_data, ifp->if_bytes);
+}
index b6e3897c1d9f0bbeb34f3b1e610e58217ee005f4..5d7b3e40705ffe4a96c75493d09ac74d9ae265cd 100644 (file)
@@ -18,6 +18,7 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
index 35a229981354159add4b143aea86682c22966a7f..b986400ea728cb6255df040e9cfe3cab97aa6ffb 100644 (file)
@@ -18,7 +18,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 kmem_zone_t    *xfs_trans_zone;
 kmem_zone_t    *xfs_log_item_desc_zone;
 
-/*
- * A buffer has a format structure overhead in the log in addition
- * to the data, so we need to take this into account when reserving
- * space in a transaction for a buffer.  Round the space required up
- * to a multiple of 128 bytes so that we don't change the historical
- * reservation that has been used for this overhead.
- */
-STATIC uint
-xfs_buf_log_overhead(void)
-{
-       return round_up(sizeof(struct xlog_op_header) +
-                       sizeof(struct xfs_buf_log_format), 128);
-}
-
-/*
- * Calculate out transaction log reservation per item in bytes.
- *
- * The nbufs argument is used to indicate the number of items that
- * will be changed in a transaction.  size is used to tell how many
- * bytes should be reserved per item.
- */
-STATIC uint
-xfs_calc_buf_res(
-       uint            nbufs,
-       uint            size)
-{
-       return nbufs * (size + xfs_buf_log_overhead());
-}
-
-/*
- * Various log reservation values.
- *
- * These are based on the size of the file system block because that is what
- * most transactions manipulate.  Each adds in an additional 128 bytes per
- * item logged to try to account for the overhead of the transaction mechanism.
- *
- * Note:  Most of the reservations underestimate the number of allocation
- * groups into which they could free extents in the xfs_bmap_finish() call.
- * This is because the number in the worst case is quite high and quite
- * unusual.  In order to fix this we need to change xfs_bmap_finish() to free
- * extents in only a single AG at a time.  This will require changes to the
- * EFI code as well, however, so that the EFI for the extents not freed is
- * logged again in each transaction.  See SGI PV #261917.
- *
- * Reservation functions here avoid a huge stack in xfs_trans_init due to
- * register overflow from temporaries in the calculations.
- */
-
-
-/*
- * In a write transaction we can allocate a maximum of 2
- * extents.  This gives:
- *    the inode getting the new extents: inode size
- *    the inode's bmap btree: max depth * block size
- *    the agfs of the ags from which the extents are allocated: 2 * sector
- *    the superblock free block counter: sector size
- *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- * And the bmap_finish transaction can free bmap blocks in a join:
- *    the agfs of the ags containing the blocks: 2 * sector size
- *    the agfls of the ags containing the blocks: 2 * sector size
- *    the super block free block counter: sector size
- *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_write_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
-                    xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
-                                     XFS_FSB_TO_B(mp, 1)) +
-                    xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
-                                     XFS_FSB_TO_B(mp, 1))),
-                   (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
-                                     XFS_FSB_TO_B(mp, 1))));
-}
-
-/*
- * In truncating a file we free up to two extents at once.  We can modify:
- *    the inode being truncated: inode size
- *    the inode's bmap btree: (max depth + 1) * block size
- * And the bmap_finish transaction can free the blocks and bmap blocks:
- *    the agf for each of the ags: 4 * sector size
- *    the agfl for each of the ags: 4 * sector size
- *    the super block to reflect the freed blocks: sector size
- *    worst case split in allocation btrees per extent assuming 4 extents:
- *             4 exts * 2 trees * (2 * max depth - 1) * block size
- *    the inode btree: max depth * blocksize
- *    the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_itruncate_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
-                    xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
-                                     XFS_FSB_TO_B(mp, 1))),
-                   (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
-                                     XFS_FSB_TO_B(mp, 1)) +
-                   xfs_calc_buf_res(5, 0) +
-                   xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
-                                    XFS_FSB_TO_B(mp, 1)) +
-                   xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) +
-                                    mp->m_in_maxlevels, 0)));
-}
-
-/*
- * In renaming a files we can modify:
- *    the four inodes involved: 4 * inode size
- *    the two directory btrees: 2 * (max depth + v2) * dir block size
- *    the two directory bmap btrees: 2 * max depth * block size
- * And the bmap_finish transaction can free dir and bmap blocks (two sets
- *     of bmap blocks) giving:
- *    the agf for the ags in which the blocks live: 3 * sector size
- *    the agfl for the ags in which the blocks live: 3 * sector size
- *    the superblock for the free block count: sector size
- *    the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_rename_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               MAX((xfs_calc_buf_res(4, mp->m_sb.sb_inodesize) +
-                    xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
-                                     XFS_FSB_TO_B(mp, 1))),
-                   (xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 3),
-                                     XFS_FSB_TO_B(mp, 1))));
-}
-
-/*
- * For creating a link to an inode:
- *    the parent directory inode: inode size
- *    the linked inode: inode size
- *    the directory btree could split: (max depth + v2) * dir block size
- *    the directory bmap btree could join or split: (max depth + v2) * blocksize
- * And the bmap_finish transaction can free some bmap blocks giving:
- *    the agf for the ag in which the blocks live: sector size
- *    the agfl for the ag in which the blocks live: sector size
- *    the superblock for the free block count: sector size
- *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_link_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
-                    xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
-                                     XFS_FSB_TO_B(mp, 1))),
-                   (xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
-                                     XFS_FSB_TO_B(mp, 1))));
-}
-
-/*
- * For removing a directory entry we can modify:
- *    the parent directory inode: inode size
- *    the removed inode: inode size
- *    the directory btree could join: (max depth + v2) * dir block size
- *    the directory bmap btree could join or split: (max depth + v2) * blocksize
- * And the bmap_finish transaction can free the dir and bmap blocks giving:
- *    the agf for the ag in which the blocks live: 2 * sector size
- *    the agfl for the ag in which the blocks live: 2 * sector size
- *    the superblock for the free block count: sector size
- *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_remove_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
-                    xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
-                                     XFS_FSB_TO_B(mp, 1))),
-                   (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
-                                     XFS_FSB_TO_B(mp, 1))));
-}
-
-/*
- * For create, break it in to the two cases that the transaction
- * covers. We start with the modify case - allocation done by modification
- * of the state of existing inodes - and the allocation case.
- */
-
-/*
- * For create we can modify:
- *    the parent directory inode: inode size
- *    the new inode: inode size
- *    the inode btree entry: block size
- *    the superblock for the nlink flag: sector size
- *    the directory btree: (max depth + v2) * dir block size
- *    the directory inode's bmap btree: (max depth + v2) * block size
- */
-STATIC uint
-xfs_calc_create_resv_modify(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
-               xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
-               (uint)XFS_FSB_TO_B(mp, 1) +
-               xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
-}
-
-/*
- * For create we can allocate some inodes giving:
- *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
- *    the superblock for the nlink flag: sector size
- *    the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
- *    the inode btree: max depth * blocksize
- *    the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_create_resv_alloc(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
-               mp->m_sb.sb_sectsize +
-               xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), XFS_FSB_TO_B(mp, 1)) +
-               xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
-               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
-                                XFS_FSB_TO_B(mp, 1));
-}
-
-STATIC uint
-__xfs_calc_create_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               MAX(xfs_calc_create_resv_alloc(mp),
-                   xfs_calc_create_resv_modify(mp));
-}
-
-/*
- * For icreate we can allocate some inodes giving:
- *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
- *    the superblock for the nlink flag: sector size
- *    the inode btree: max depth * blocksize
- *    the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_icreate_resv_alloc(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
-               mp->m_sb.sb_sectsize +
-               xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
-               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
-                                XFS_FSB_TO_B(mp, 1));
-}
-
-STATIC uint
-xfs_calc_icreate_reservation(xfs_mount_t *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               MAX(xfs_calc_icreate_resv_alloc(mp),
-                   xfs_calc_create_resv_modify(mp));
-}
-
-STATIC uint
-xfs_calc_create_reservation(
-       struct xfs_mount        *mp)
-{
-       if (xfs_sb_version_hascrc(&mp->m_sb))
-               return xfs_calc_icreate_reservation(mp);
-       return __xfs_calc_create_reservation(mp);
-
-}
-
-/*
- * Making a new directory is the same as creating a new file.
- */
-STATIC uint
-xfs_calc_mkdir_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_create_reservation(mp);
-}
-
-
-/*
- * Making a new symplink is the same as creating a new file, but
- * with the added blocks for remote symlink data which can be up to 1kB in
- * length (MAXPATHLEN).
- */
-STATIC uint
-xfs_calc_symlink_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_create_reservation(mp) +
-              xfs_calc_buf_res(1, MAXPATHLEN);
-}
-
-/*
- * In freeing an inode we can modify:
- *    the inode being freed: inode size
- *    the super block free inode counter: sector size
- *    the agi hash list and counters: sector size
- *    the inode btree entry: block size
- *    the on disk inode before ours in the agi hash list: inode cluster size
- *    the inode btree: max depth * blocksize
- *    the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_ifree_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
-               xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
-               xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
-               MAX((__uint16_t)XFS_FSB_TO_B(mp, 1),
-                   XFS_INODE_CLUSTER_SIZE(mp)) +
-               xfs_calc_buf_res(1, 0) +
-               xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) +
-                                mp->m_in_maxlevels, 0) +
-               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
-                                XFS_FSB_TO_B(mp, 1));
-}
-
-/*
- * When only changing the inode we log the inode and possibly the superblock
- * We also add a bit of slop for the transaction stuff.
- */
-STATIC uint
-xfs_calc_ichange_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               mp->m_sb.sb_inodesize +
-               mp->m_sb.sb_sectsize +
-               512;
-
-}
-
-/*
- * Growing the data section of the filesystem.
- *     superblock
- *     agi and agf
- *     allocation btrees
- */
-STATIC uint
-xfs_calc_growdata_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
-               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
-                                XFS_FSB_TO_B(mp, 1));
-}
-
-/*
- * Growing the rt section of the filesystem.
- * In the first set of transactions (ALLOC) we allocate space to the
- * bitmap or summary files.
- *     superblock: sector size
- *     agf of the ag from which the extent is allocated: sector size
- *     bmap btree for bitmap/summary inode: max depth * blocksize
- *     bitmap/summary inode: inode size
- *     allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
- */
-STATIC uint
-xfs_calc_growrtalloc_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
-               xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
-                                XFS_FSB_TO_B(mp, 1)) +
-               xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
-               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
-                                XFS_FSB_TO_B(mp, 1));
-}
-
-/*
- * Growing the rt section of the filesystem.
- * In the second set of transactions (ZERO) we zero the new metadata blocks.
- *     one bitmap/summary block: blocksize
- */
-STATIC uint
-xfs_calc_growrtzero_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize);
-}
-
-/*
- * Growing the rt section of the filesystem.
- * In the third set of transactions (FREE) we update metadata without
- * allocating any new blocks.
- *     superblock: sector size
- *     bitmap inode: inode size
- *     summary inode: inode size
- *     one bitmap block: blocksize
- *     summary blocks: new summary size
- */
-STATIC uint
-xfs_calc_growrtfree_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
-               xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
-               xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
-               xfs_calc_buf_res(1, mp->m_rsumsize);
-}
-
-/*
- * Logging the inode modification timestamp on a synchronous write.
- *     inode
- */
-STATIC uint
-xfs_calc_swrite_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_inodesize);
-}
-
-/*
- * Logging the inode mode bits when writing a setuid/setgid file
- *     inode
- */
-STATIC uint
-xfs_calc_writeid_reservation(xfs_mount_t *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_inodesize);
-}
-
-/*
- * Converting the inode from non-attributed to attributed.
- *     the inode being converted: inode size
- *     agf block and superblock (for block allocation)
- *     the new block (directory sized)
- *     bmap blocks for the new directory block
- *     allocation btrees
- */
-STATIC uint
-xfs_calc_addafork_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
-               xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
-               xfs_calc_buf_res(1, mp->m_dirblksize) +
-               xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
-                                XFS_FSB_TO_B(mp, 1)) +
-               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
-                                XFS_FSB_TO_B(mp, 1));
-}
-
-/*
- * Removing the attribute fork of a file
- *    the inode being truncated: inode size
- *    the inode's bmap btree: max depth * block size
- * And the bmap_finish transaction can free the blocks and bmap blocks:
- *    the agf for each of the ags: 4 * sector size
- *    the agfl for each of the ags: 4 * sector size
- *    the super block to reflect the freed blocks: sector size
- *    worst case split in allocation btrees per extent assuming 4 extents:
- *             4 exts * 2 trees * (2 * max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_attrinval_reservation(
-       struct xfs_mount        *mp)
-{
-       return MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
-                   xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
-                                    XFS_FSB_TO_B(mp, 1))),
-                  (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
-                   xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
-                                    XFS_FSB_TO_B(mp, 1))));
-}
-
-/*
- * Setting an attribute at mount time.
- *     the inode getting the attribute
- *     the superblock for allocations
- *     the agfs extents are allocated from
- *     the attribute btree * max depth
- *     the inode allocation btree
- * Since attribute transaction space is dependent on the size of the attribute,
- * the calculation is done partially at mount time and partially at runtime(see
- * below).
- */
-STATIC uint
-xfs_calc_attrsetm_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
-               xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
-               xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
-}
-
-/*
- * Setting an attribute at runtime, transaction space unit per block.
- *     the superblock for allocations: sector size
- *     the inode bmap btree could join or split: max depth * block size
- * Since the runtime attribute transaction space is dependent on the total
- * blocks needed for the 1st bmap, here we calculate out the space unit for
- * one block so that the caller could figure out the total space according
- * to the attibute extent length in blocks by: ext * XFS_ATTRSETRT_LOG_RES(mp).
- */
-STATIC uint
-xfs_calc_attrsetrt_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
-               xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
-                                XFS_FSB_TO_B(mp, 1));
-}
-
-/*
- * Removing an attribute.
- *    the inode: inode size
- *    the attribute btree could join: max depth * block size
- *    the inode bmap btree could join or split: max depth * block size
- * And the bmap_finish transaction can free the attr blocks freed giving:
- *    the agf for the ag in which the blocks live: 2 * sector size
- *    the agfl for the ag in which the blocks live: 2 * sector size
- *    the superblock for the free block count: sector size
- *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_attrrm_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_DQUOT_LOGRES(mp) +
-               MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
-                    xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
-                                     XFS_FSB_TO_B(mp, 1)) +
-                    (uint)XFS_FSB_TO_B(mp,
-                                       XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
-                    xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
-                   (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
-                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
-                                     XFS_FSB_TO_B(mp, 1))));
-}
-
-/*
- * Clearing a bad agino number in an agi hash bucket.
- */
-STATIC uint
-xfs_calc_clear_agi_bucket_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
-}
-
-/*
- * Clearing the quotaflags in the superblock.
- *     the super block for changing quota flags: sector size
- */
-STATIC uint
-xfs_calc_qm_sbchange_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
-}
-
-/*
- * Adjusting quota limits.
- *    the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
- */
-STATIC uint
-xfs_calc_qm_setqlim_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
-}
-
-/*
- * Allocating quota on disk if needed.
- *     the write transaction log space: XFS_WRITE_LOG_RES(mp)
- *     the unit of quota allocation: one system block size
- */
-STATIC uint
-xfs_calc_qm_dqalloc_reservation(
-       struct xfs_mount        *mp)
-{
-       return XFS_WRITE_LOG_RES(mp) +
-               xfs_calc_buf_res(1,
-                       XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1);
-}
-
-/*
- * Turning off quotas.
- *    the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
- *    the superblock for the quota flags: sector size
- */
-STATIC uint
-xfs_calc_qm_quotaoff_reservation(
-       struct xfs_mount        *mp)
-{
-       return sizeof(struct xfs_qoff_logitem) * 2 +
-               xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
-}
-
-/*
- * End of turning off quotas.
- *    the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
- */
-STATIC uint
-xfs_calc_qm_quotaoff_end_reservation(
-       struct xfs_mount        *mp)
-{
-       return sizeof(struct xfs_qoff_logitem) * 2;
-}
-
-/*
- * Syncing the incore super block changes to disk.
- *     the super block to reflect the changes: sector size
- */
-STATIC uint
-xfs_calc_sb_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
-}
-
 /*
  * Initialize the precomputed transaction reservation values
  * in the mount structure.
@@ -679,36 +56,7 @@ void
 xfs_trans_init(
        struct xfs_mount        *mp)
 {
-       struct xfs_trans_reservations *resp = &mp->m_reservations;
-
-       resp->tr_write = xfs_calc_write_reservation(mp);
-       resp->tr_itruncate = xfs_calc_itruncate_reservation(mp);
-       resp->tr_rename = xfs_calc_rename_reservation(mp);
-       resp->tr_link = xfs_calc_link_reservation(mp);
-       resp->tr_remove = xfs_calc_remove_reservation(mp);
-       resp->tr_symlink = xfs_calc_symlink_reservation(mp);
-       resp->tr_create = xfs_calc_create_reservation(mp);
-       resp->tr_mkdir = xfs_calc_mkdir_reservation(mp);
-       resp->tr_ifree = xfs_calc_ifree_reservation(mp);
-       resp->tr_ichange = xfs_calc_ichange_reservation(mp);
-       resp->tr_growdata = xfs_calc_growdata_reservation(mp);
-       resp->tr_swrite = xfs_calc_swrite_reservation(mp);
-       resp->tr_writeid = xfs_calc_writeid_reservation(mp);
-       resp->tr_addafork = xfs_calc_addafork_reservation(mp);
-       resp->tr_attrinval = xfs_calc_attrinval_reservation(mp);
-       resp->tr_attrsetm = xfs_calc_attrsetm_reservation(mp);
-       resp->tr_attrsetrt = xfs_calc_attrsetrt_reservation(mp);
-       resp->tr_attrrm = xfs_calc_attrrm_reservation(mp);
-       resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp);
-       resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp);
-       resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp);
-       resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp);
-       resp->tr_qm_sbchange = xfs_calc_qm_sbchange_reservation(mp);
-       resp->tr_qm_setqlim = xfs_calc_qm_setqlim_reservation(mp);
-       resp->tr_qm_dqalloc = xfs_calc_qm_dqalloc_reservation(mp);
-       resp->tr_qm_quotaoff = xfs_calc_qm_quotaoff_reservation(mp);
-       resp->tr_qm_equotaoff = xfs_calc_qm_quotaoff_end_reservation(mp);
-       resp->tr_sb = xfs_calc_sb_reservation(mp);
+       xfs_trans_resv_calc(mp, M_RES(mp));
 }
 
 /*
@@ -744,7 +92,7 @@ _xfs_trans_alloc(
        atomic_inc(&mp->m_active_trans);
 
        tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
-       tp->t_magic = XFS_TRANS_MAGIC;
+       tp->t_magic = XFS_TRANS_HEADER_MAGIC;
        tp->t_type = type;
        tp->t_mountp = mp;
        INIT_LIST_HEAD(&tp->t_items);
@@ -789,7 +137,7 @@ xfs_trans_dup(
        /*
         * Initialize the new transaction structure.
         */
-       ntp->t_magic = XFS_TRANS_MAGIC;
+       ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
        ntp->t_type = tp->t_type;
        ntp->t_mountp = tp->t_mountp;
        INIT_LIST_HEAD(&ntp->t_items);
@@ -832,12 +180,10 @@ xfs_trans_dup(
  */
 int
 xfs_trans_reserve(
-       xfs_trans_t     *tp,
-       uint            blocks,
-       uint            logspace,
-       uint            rtextents,
-       uint            flags,
-       uint            logcount)
+       struct xfs_trans        *tp,
+       struct xfs_trans_res    *resp,
+       uint                    blocks,
+       uint                    rtextents)
 {
        int             error = 0;
        int             rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
@@ -863,13 +209,15 @@ xfs_trans_reserve(
        /*
         * Reserve the log space needed for this transaction.
         */
-       if (logspace > 0) {
+       if (resp->tr_logres > 0) {
                bool    permanent = false;
 
-               ASSERT(tp->t_log_res == 0 || tp->t_log_res == logspace);
-               ASSERT(tp->t_log_count == 0 || tp->t_log_count == logcount);
+               ASSERT(tp->t_log_res == 0 ||
+                      tp->t_log_res == resp->tr_logres);
+               ASSERT(tp->t_log_count == 0 ||
+                      tp->t_log_count == resp->tr_logcount);
 
-               if (flags & XFS_TRANS_PERM_LOG_RES) {
+               if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
                        tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
                        permanent = true;
                } else {
@@ -878,20 +226,21 @@ xfs_trans_reserve(
                }
 
                if (tp->t_ticket != NULL) {
-                       ASSERT(flags & XFS_TRANS_PERM_LOG_RES);
+                       ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
                        error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
                } else {
-                       error = xfs_log_reserve(tp->t_mountp, logspace,
-                                               logcount, &tp->t_ticket,
-                                               XFS_TRANSACTION, permanent,
-                                               tp->t_type);
+                       error = xfs_log_reserve(tp->t_mountp,
+                                               resp->tr_logres,
+                                               resp->tr_logcount,
+                                               &tp->t_ticket, XFS_TRANSACTION,
+                                               permanent, tp->t_type);
                }
 
                if (error)
                        goto undo_blocks;
 
-               tp->t_log_res = logspace;
-               tp->t_log_count = logcount;
+               tp->t_log_res = resp->tr_logres;
+               tp->t_log_count = resp->tr_logcount;
        }
 
        /*
@@ -916,10 +265,10 @@ xfs_trans_reserve(
         * reservations which have already been performed.
         */
 undo_log:
-       if (logspace > 0) {
+       if (resp->tr_logres > 0) {
                int             log_flags;
 
-               if (flags & XFS_TRANS_PERM_LOG_RES) {
+               if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
                        log_flags = XFS_LOG_REL_PERM_RESERV;
                } else {
                        log_flags = 0;
@@ -1666,7 +1015,7 @@ xfs_trans_roll(
        struct xfs_inode        *dp)
 {
        struct xfs_trans        *trans;
-       unsigned int            logres, count;
+       struct xfs_trans_res    tres;
        int                     error;
 
        /*
@@ -1678,8 +1027,8 @@ xfs_trans_roll(
        /*
         * Copy the critical parameters from one trans to the next.
         */
-       logres = trans->t_log_res;
-       count = trans->t_log_count;
+       tres.tr_logres = trans->t_log_res;
+       tres.tr_logcount = trans->t_log_count;
        *tpp = xfs_trans_dup(trans);
 
        /*
@@ -1710,8 +1059,8 @@ xfs_trans_roll(
         * across this call, or that anything that is locked be logged in
         * the prior and the next transactions.
         */
-       error = xfs_trans_reserve(trans, 0, logres, 0,
-                                 XFS_TRANS_PERM_LOG_RES, count);
+       tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
+       error = xfs_trans_reserve(trans, &tres, 0, 0);
        /*
         *  Ensure that the inode is in the new transaction and locked.
         */
index 2b4946393e30f56655e55c782813778760846f79..4786170baeb0b891227ec8597cc7f1876aa17310 100644 (file)
 
 struct xfs_log_item;
 
-/*
- * This is the structure written in the log at the head of
- * every transaction. It identifies the type and id of the
- * transaction, and contains the number of items logged by
- * the transaction so we know how many to expect during recovery.
- *
- * Do not change the below structure without redoing the code in
- * xlog_recover_add_to_trans() and xlog_recover_add_to_cont_trans().
- */
-typedef struct xfs_trans_header {
-       uint            th_magic;               /* magic number */
-       uint            th_type;                /* transaction type */
-       __int32_t       th_tid;                 /* transaction id (unused) */
-       uint            th_num_items;           /* num items logged by trans */
-} xfs_trans_header_t;
-
-#define        XFS_TRANS_HEADER_MAGIC  0x5452414e      /* TRAN */
-
-/*
- * Log item types.
- */
-#define        XFS_LI_EFI              0x1236
-#define        XFS_LI_EFD              0x1237
-#define        XFS_LI_IUNLINK          0x1238
-#define        XFS_LI_INODE            0x123b  /* aligned ino chunks, var-size ibufs */
-#define        XFS_LI_BUF              0x123c  /* v2 bufs, variable sized inode bufs */
-#define        XFS_LI_DQUOT            0x123d
-#define        XFS_LI_QUOTAOFF         0x123e
-#define        XFS_LI_ICREATE          0x123f
-
-#define XFS_LI_TYPE_DESC \
-       { XFS_LI_EFI,           "XFS_LI_EFI" }, \
-       { XFS_LI_EFD,           "XFS_LI_EFD" }, \
-       { XFS_LI_IUNLINK,       "XFS_LI_IUNLINK" }, \
-       { XFS_LI_INODE,         "XFS_LI_INODE" }, \
-       { XFS_LI_BUF,           "XFS_LI_BUF" }, \
-       { XFS_LI_DQUOT,         "XFS_LI_DQUOT" }, \
-       { XFS_LI_QUOTAOFF,      "XFS_LI_QUOTAOFF" }
-
-/*
- * Transaction types.  Used to distinguish types of buffers.
- */
-#define XFS_TRANS_SETATTR_NOT_SIZE     1
-#define XFS_TRANS_SETATTR_SIZE         2
-#define XFS_TRANS_INACTIVE             3
-#define XFS_TRANS_CREATE               4
-#define XFS_TRANS_CREATE_TRUNC         5
-#define XFS_TRANS_TRUNCATE_FILE                6
-#define XFS_TRANS_REMOVE               7
-#define XFS_TRANS_LINK                 8
-#define XFS_TRANS_RENAME               9
-#define XFS_TRANS_MKDIR                        10
-#define XFS_TRANS_RMDIR                        11
-#define XFS_TRANS_SYMLINK              12
-#define XFS_TRANS_SET_DMATTRS          13
-#define XFS_TRANS_GROWFS               14
-#define XFS_TRANS_STRAT_WRITE          15
-#define XFS_TRANS_DIOSTRAT             16
-/* 17 was XFS_TRANS_WRITE_SYNC */
-#define        XFS_TRANS_WRITEID               18
-#define        XFS_TRANS_ADDAFORK              19
-#define        XFS_TRANS_ATTRINVAL             20
-#define        XFS_TRANS_ATRUNCATE             21
-#define        XFS_TRANS_ATTR_SET              22
-#define        XFS_TRANS_ATTR_RM               23
-#define        XFS_TRANS_ATTR_FLAG             24
-#define        XFS_TRANS_CLEAR_AGI_BUCKET      25
-#define XFS_TRANS_QM_SBCHANGE          26
-/*
- * Dummy entries since we use the transaction type to index into the
- * trans_type[] in xlog_recover_print_trans_head()
- */
-#define XFS_TRANS_DUMMY1               27
-#define XFS_TRANS_DUMMY2               28
-#define XFS_TRANS_QM_QUOTAOFF          29
-#define XFS_TRANS_QM_DQALLOC           30
-#define XFS_TRANS_QM_SETQLIM           31
-#define XFS_TRANS_QM_DQCLUSTER         32
-#define XFS_TRANS_QM_QINOCREATE                33
-#define XFS_TRANS_QM_QUOTAOFF_END      34
-#define XFS_TRANS_SB_UNIT              35
-#define XFS_TRANS_FSYNC_TS             36
-#define        XFS_TRANS_GROWFSRT_ALLOC        37
-#define        XFS_TRANS_GROWFSRT_ZERO         38
-#define        XFS_TRANS_GROWFSRT_FREE         39
-#define        XFS_TRANS_SWAPEXT               40
-#define        XFS_TRANS_SB_COUNT              41
-#define        XFS_TRANS_CHECKPOINT            42
-#define        XFS_TRANS_ICREATE               43
-#define        XFS_TRANS_TYPE_MAX              43
-/* new transaction types need to be reflected in xfs_logprint(8) */
-
-#define XFS_TRANS_TYPES \
-       { XFS_TRANS_SETATTR_NOT_SIZE,   "SETATTR_NOT_SIZE" }, \
-       { XFS_TRANS_SETATTR_SIZE,       "SETATTR_SIZE" }, \
-       { XFS_TRANS_INACTIVE,           "INACTIVE" }, \
-       { XFS_TRANS_CREATE,             "CREATE" }, \
-       { XFS_TRANS_CREATE_TRUNC,       "CREATE_TRUNC" }, \
-       { XFS_TRANS_TRUNCATE_FILE,      "TRUNCATE_FILE" }, \
-       { XFS_TRANS_REMOVE,             "REMOVE" }, \
-       { XFS_TRANS_LINK,               "LINK" }, \
-       { XFS_TRANS_RENAME,             "RENAME" }, \
-       { XFS_TRANS_MKDIR,              "MKDIR" }, \
-       { XFS_TRANS_RMDIR,              "RMDIR" }, \
-       { XFS_TRANS_SYMLINK,            "SYMLINK" }, \
-       { XFS_TRANS_SET_DMATTRS,        "SET_DMATTRS" }, \
-       { XFS_TRANS_GROWFS,             "GROWFS" }, \
-       { XFS_TRANS_STRAT_WRITE,        "STRAT_WRITE" }, \
-       { XFS_TRANS_DIOSTRAT,           "DIOSTRAT" }, \
-       { XFS_TRANS_WRITEID,            "WRITEID" }, \
-       { XFS_TRANS_ADDAFORK,           "ADDAFORK" }, \
-       { XFS_TRANS_ATTRINVAL,          "ATTRINVAL" }, \
-       { XFS_TRANS_ATRUNCATE,          "ATRUNCATE" }, \
-       { XFS_TRANS_ATTR_SET,           "ATTR_SET" }, \
-       { XFS_TRANS_ATTR_RM,            "ATTR_RM" }, \
-       { XFS_TRANS_ATTR_FLAG,          "ATTR_FLAG" }, \
-       { XFS_TRANS_CLEAR_AGI_BUCKET,   "CLEAR_AGI_BUCKET" }, \
-       { XFS_TRANS_QM_SBCHANGE,        "QM_SBCHANGE" }, \
-       { XFS_TRANS_QM_QUOTAOFF,        "QM_QUOTAOFF" }, \
-       { XFS_TRANS_QM_DQALLOC,         "QM_DQALLOC" }, \
-       { XFS_TRANS_QM_SETQLIM,         "QM_SETQLIM" }, \
-       { XFS_TRANS_QM_DQCLUSTER,       "QM_DQCLUSTER" }, \
-       { XFS_TRANS_QM_QINOCREATE,      "QM_QINOCREATE" }, \
-       { XFS_TRANS_QM_QUOTAOFF_END,    "QM_QOFF_END" }, \
-       { XFS_TRANS_SB_UNIT,            "SB_UNIT" }, \
-       { XFS_TRANS_FSYNC_TS,           "FSYNC_TS" }, \
-       { XFS_TRANS_GROWFSRT_ALLOC,     "GROWFSRT_ALLOC" }, \
-       { XFS_TRANS_GROWFSRT_ZERO,      "GROWFSRT_ZERO" }, \
-       { XFS_TRANS_GROWFSRT_FREE,      "GROWFSRT_FREE" }, \
-       { XFS_TRANS_SWAPEXT,            "SWAPEXT" }, \
-       { XFS_TRANS_SB_COUNT,           "SB_COUNT" }, \
-       { XFS_TRANS_CHECKPOINT,         "CHECKPOINT" }, \
-       { XFS_TRANS_DUMMY1,             "DUMMY1" }, \
-       { XFS_TRANS_DUMMY2,             "DUMMY2" }, \
-       { XLOG_UNMOUNT_REC_TYPE,        "UNMOUNT" }
-
-/*
- * This structure is used to track log items associated with
- * a transaction.  It points to the log item and keeps some
- * flags to track the state of the log item.  It also tracks
- * the amount of space needed to log the item it describes
- * once we get to commit processing (see xfs_trans_commit()).
- */
-struct xfs_log_item_desc {
-       struct xfs_log_item     *lid_item;
-       struct list_head        lid_trans;
-       unsigned char           lid_flags;
-};
+#include "xfs_trans_resv.h"
 
-#define XFS_LID_DIRTY          0x1
-
-#define        XFS_TRANS_MAGIC         0x5452414E      /* 'TRAN' */
-/*
- * Values for t_flags.
- */
-#define        XFS_TRANS_DIRTY         0x01    /* something needs to be logged */
-#define        XFS_TRANS_SB_DIRTY      0x02    /* superblock is modified */
-#define        XFS_TRANS_PERM_LOG_RES  0x04    /* xact took a permanent log res */
-#define        XFS_TRANS_SYNC          0x08    /* make commit synchronous */
-#define XFS_TRANS_DQ_DIRTY     0x10    /* at least one dquot in trx dirty */
-#define XFS_TRANS_RESERVE      0x20    /* OK to use reserved data blocks */
-#define XFS_TRANS_FREEZE_PROT  0x40    /* Transaction has elevated writer
-                                          count in superblock */
-
-/*
- * Values for call flags parameter.
- */
-#define        XFS_TRANS_RELEASE_LOG_RES       0x4
-#define        XFS_TRANS_ABORT                 0x8
-
-/*
- * Field values for xfs_trans_mod_sb.
- */
-#define        XFS_TRANS_SB_ICOUNT             0x00000001
-#define        XFS_TRANS_SB_IFREE              0x00000002
-#define        XFS_TRANS_SB_FDBLOCKS           0x00000004
-#define        XFS_TRANS_SB_RES_FDBLOCKS       0x00000008
-#define        XFS_TRANS_SB_FREXTENTS          0x00000010
-#define        XFS_TRANS_SB_RES_FREXTENTS      0x00000020
-#define        XFS_TRANS_SB_DBLOCKS            0x00000040
-#define        XFS_TRANS_SB_AGCOUNT            0x00000080
-#define        XFS_TRANS_SB_IMAXPCT            0x00000100
-#define        XFS_TRANS_SB_REXTSIZE           0x00000200
-#define        XFS_TRANS_SB_RBMBLOCKS          0x00000400
-#define        XFS_TRANS_SB_RBLOCKS            0x00000800
-#define        XFS_TRANS_SB_REXTENTS           0x00001000
-#define        XFS_TRANS_SB_REXTSLOG           0x00002000
-
-
-/*
- * Per-extent log reservation for the allocation btree changes
- * involved in freeing or allocating an extent.
- * 2 trees * (2 blocks/level * max depth - 1)
- */
-#define        XFS_ALLOCFREE_LOG_COUNT(mp,nx) \
-       ((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1)))
-
-/*
- * Per-directory log reservation for any directory change.
- * dir blocks: (1 btree block per level + data block + free block)
- * bmap btree: (levels + 2) * max depth
- * v2 directory blocks can be fragmented below the dirblksize down to the fsb
- * size, so account for that in the DAENTER macros.
- */
-#define        XFS_DIROP_LOG_COUNT(mp) \
-       (XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \
-        XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1)
-
-
-#define        XFS_WRITE_LOG_RES(mp)   ((mp)->m_reservations.tr_write)
-#define        XFS_ITRUNCATE_LOG_RES(mp)   ((mp)->m_reservations.tr_itruncate)
-#define        XFS_RENAME_LOG_RES(mp)  ((mp)->m_reservations.tr_rename)
-#define        XFS_LINK_LOG_RES(mp)    ((mp)->m_reservations.tr_link)
-#define        XFS_REMOVE_LOG_RES(mp)  ((mp)->m_reservations.tr_remove)
-#define        XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink)
-#define        XFS_CREATE_LOG_RES(mp)  ((mp)->m_reservations.tr_create)
-#define        XFS_MKDIR_LOG_RES(mp)   ((mp)->m_reservations.tr_mkdir)
-#define        XFS_IFREE_LOG_RES(mp)   ((mp)->m_reservations.tr_ifree)
-#define        XFS_ICHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_ichange)
-#define        XFS_GROWDATA_LOG_RES(mp)    ((mp)->m_reservations.tr_growdata)
-#define        XFS_GROWRTALLOC_LOG_RES(mp)     ((mp)->m_reservations.tr_growrtalloc)
-#define        XFS_GROWRTZERO_LOG_RES(mp)      ((mp)->m_reservations.tr_growrtzero)
-#define        XFS_GROWRTFREE_LOG_RES(mp)      ((mp)->m_reservations.tr_growrtfree)
-#define        XFS_SWRITE_LOG_RES(mp)  ((mp)->m_reservations.tr_swrite)
-/*
- * Logging the inode timestamps on an fsync -- same as SWRITE
- * as long as SWRITE logs the entire inode core
- */
-#define XFS_FSYNC_TS_LOG_RES(mp)        ((mp)->m_reservations.tr_swrite)
-#define        XFS_WRITEID_LOG_RES(mp)         ((mp)->m_reservations.tr_swrite)
-#define        XFS_ADDAFORK_LOG_RES(mp)        ((mp)->m_reservations.tr_addafork)
-#define        XFS_ATTRINVAL_LOG_RES(mp)       ((mp)->m_reservations.tr_attrinval)
-#define        XFS_ATTRSETM_LOG_RES(mp)        ((mp)->m_reservations.tr_attrsetm)
-#define XFS_ATTRSETRT_LOG_RES(mp)      ((mp)->m_reservations.tr_attrsetrt)
-#define        XFS_ATTRRM_LOG_RES(mp)          ((mp)->m_reservations.tr_attrrm)
-#define        XFS_CLEAR_AGI_BUCKET_LOG_RES(mp)  ((mp)->m_reservations.tr_clearagi)
-#define XFS_QM_SBCHANGE_LOG_RES(mp)    ((mp)->m_reservations.tr_qm_sbchange)
-#define XFS_QM_SETQLIM_LOG_RES(mp)     ((mp)->m_reservations.tr_qm_setqlim)
-#define XFS_QM_DQALLOC_LOG_RES(mp)     ((mp)->m_reservations.tr_qm_dqalloc)
-#define XFS_QM_QUOTAOFF_LOG_RES(mp)    ((mp)->m_reservations.tr_qm_quotaoff)
-#define XFS_QM_QUOTAOFF_END_LOG_RES(mp)        ((mp)->m_reservations.tr_qm_equotaoff)
-#define XFS_SB_LOG_RES(mp)             ((mp)->m_reservations.tr_sb)
-
-/*
- * Various log count values.
- */
-#define        XFS_DEFAULT_LOG_COUNT           1
-#define        XFS_DEFAULT_PERM_LOG_COUNT      2
-#define        XFS_ITRUNCATE_LOG_COUNT         2
-#define XFS_INACTIVE_LOG_COUNT         2
-#define        XFS_CREATE_LOG_COUNT            2
-#define        XFS_MKDIR_LOG_COUNT             3
-#define        XFS_SYMLINK_LOG_COUNT           3
-#define        XFS_REMOVE_LOG_COUNT            2
-#define        XFS_LINK_LOG_COUNT              2
-#define        XFS_RENAME_LOG_COUNT            2
-#define        XFS_WRITE_LOG_COUNT             2
-#define        XFS_ADDAFORK_LOG_COUNT          2
-#define        XFS_ATTRINVAL_LOG_COUNT         1
-#define        XFS_ATTRSET_LOG_COUNT           3
-#define        XFS_ATTRRM_LOG_COUNT            3
-
-/*
- * Here we centralize the specification of XFS meta-data buffer
- * reference count values.  This determine how hard the buffer
- * cache tries to hold onto the buffer.
- */
-#define        XFS_AGF_REF             4
-#define        XFS_AGI_REF             4
-#define        XFS_AGFL_REF            3
-#define        XFS_INO_BTREE_REF       3
-#define        XFS_ALLOC_BTREE_REF     2
-#define        XFS_BMAP_BTREE_REF      2
-#define        XFS_DIR_BTREE_REF       2
-#define        XFS_INO_REF             2
-#define        XFS_ATTR_BTREE_REF      1
-#define        XFS_DQUOT_REF           1
-
-#ifdef __KERNEL__
+/* kernel only transaction subsystem defines */
 
 struct xfs_buf;
 struct xfs_buftarg;
@@ -310,6 +34,7 @@ struct xfs_log_iovec;
 struct xfs_log_item_desc;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_trans_res;
 struct xfs_dquot_acct;
 struct xfs_busy_extent;
 
@@ -342,7 +67,7 @@ typedef struct xfs_log_item {
        { XFS_LI_ABORTED,       "ABORTED" }
 
 struct xfs_item_ops {
-       uint (*iop_size)(xfs_log_item_t *);
+       void (*iop_size)(xfs_log_item_t *, int *, int *);
        void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
        void (*iop_pin)(xfs_log_item_t *);
        void (*iop_unpin)(xfs_log_item_t *, int remove);
@@ -352,9 +77,6 @@ struct xfs_item_ops {
        void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
 };
 
-#define IOP_SIZE(ip)           (*(ip)->li_ops->iop_size)(ip)
-#define IOP_FORMAT(ip,vp)      (*(ip)->li_ops->iop_format)(ip, vp)
-#define IOP_PIN(ip)            (*(ip)->li_ops->iop_pin)(ip)
 #define IOP_UNPIN(ip, remove)  (*(ip)->li_ops->iop_unpin)(ip, remove)
 #define IOP_PUSH(ip, list)     (*(ip)->li_ops->iop_push)(ip, list)
 #define IOP_UNLOCK(ip)         (*(ip)->li_ops->iop_unlock)(ip)
@@ -446,7 +168,7 @@ typedef struct xfs_trans {
 xfs_trans_t    *xfs_trans_alloc(struct xfs_mount *, uint);
 xfs_trans_t    *_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t);
 xfs_trans_t    *xfs_trans_dup(xfs_trans_t *);
-int            xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
+int            xfs_trans_reserve(struct xfs_trans *, struct xfs_trans_res *,
                                  uint, uint);
 void           xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
 
@@ -528,9 +250,4 @@ void                xfs_trans_ail_destroy(struct xfs_mount *);
 extern kmem_zone_t     *xfs_trans_zone;
 extern kmem_zone_t     *xfs_log_item_desc_zone;
 
-#endif /* __KERNEL__ */
-
-void           xfs_trans_init(struct xfs_mount *);
-int            xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
-
 #endif /* __XFS_TRANS_H__ */
index 61407a847b869a6bb0faec7bf2c3279d66aaeb83..54ee3c5dee76093b6a6136a5fde759a8be309ccd 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_format.h"
 #include "xfs_log.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
index 53b7c9b0f8f7a6fa3c96a3f73298c2862eb3d107..d43b13004f7c8f039b32613ea059942ff5d5c312 100644 (file)
@@ -25,6 +25,9 @@ struct xfs_trans;
 struct xfs_ail;
 struct xfs_log_vec;
 
+
+void   xfs_trans_init(struct xfs_mount *);
+int    xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 void   xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void   xfs_trans_del_item(struct xfs_log_item *);
 void   xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
diff --git a/fs/xfs/xfs_trans_resv.c b/fs/xfs/xfs_trans_resv.c
new file mode 100644 (file)
index 0000000..24110f3
--- /dev/null
@@ -0,0 +1,769 @@
+/*
+ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * Copyright (C) 2010 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log.h"
+#include "xfs_trans_resv.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_error.h"
+#include "xfs_da_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_alloc.h"
+#include "xfs_extent_busy.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_quota.h"
+#include "xfs_qm.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+
+/*
+ * A buffer has a format structure overhead in the log in addition
+ * to the data, so we need to take this into account when reserving
+ * space in a transaction for a buffer.  Round the space required up
+ * to a multiple of 128 bytes so that we don't change the historical
+ * reservation that has been used for this overhead.
+ */
+STATIC uint
+xfs_buf_log_overhead(void)
+{
+       return round_up(sizeof(struct xlog_op_header) +
+                       sizeof(struct xfs_buf_log_format), 128);
+}
+
+/*
+ * Calculate out transaction log reservation per item in bytes.
+ *
+ * The nbufs argument is used to indicate the number of items that
+ * will be changed in a transaction.  size is used to tell how many
+ * bytes should be reserved per item.
+ */
+STATIC uint
+xfs_calc_buf_res(
+       uint            nbufs,
+       uint            size)
+{
+       return nbufs * (size + xfs_buf_log_overhead());
+}
+
+/*
+ * Various log reservation values.
+ *
+ * These are based on the size of the file system block because that is what
+ * most transactions manipulate.  Each adds in an additional 128 bytes per
+ * item logged to try to account for the overhead of the transaction mechanism.
+ *
+ * Note:  Most of the reservations underestimate the number of allocation
+ * groups into which they could free extents in the xfs_bmap_finish() call.
+ * This is because the number in the worst case is quite high and quite
+ * unusual.  In order to fix this we need to change xfs_bmap_finish() to free
+ * extents in only a single AG at a time.  This will require changes to the
+ * EFI code as well, however, so that the EFI for the extents not freed is
+ * logged again in each transaction.  See SGI PV #261917.
+ *
+ * Reservation functions here avoid a huge stack in xfs_trans_init due to
+ * register overflow from temporaries in the calculations.
+ */
+
+
+/*
+ * In a write transaction we can allocate a maximum of 2
+ * extents.  This gives:
+ *    the inode getting the new extents: inode size
+ *    the inode's bmap btree: max depth * block size
+ *    the agfs of the ags from which the extents are allocated: 2 * sector
+ *    the superblock free block counter: sector size
+ *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ * And the bmap_finish transaction can free bmap blocks in a join:
+ *    the agfs of the ags containing the blocks: 2 * sector size
+ *    the agfls of the ags containing the blocks: 2 * sector size
+ *    the super block free block counter: sector size
+ *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_write_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
+                    xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
+                                     XFS_FSB_TO_B(mp, 1)) +
+                    xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
+                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+                                     XFS_FSB_TO_B(mp, 1))),
+                   (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+                                     XFS_FSB_TO_B(mp, 1))));
+}
+
+/*
+ * In truncating a file we free up to two extents at once.  We can modify:
+ *    the inode being truncated: inode size
+ *    the inode's bmap btree: (max depth + 1) * block size
+ * And the bmap_finish transaction can free the blocks and bmap blocks:
+ *    the agf for each of the ags: 4 * sector size
+ *    the agfl for each of the ags: 4 * sector size
+ *    the super block to reflect the freed blocks: sector size
+ *    worst case split in allocation btrees per extent assuming 4 extents:
+ *             4 exts * 2 trees * (2 * max depth - 1) * block size
+ *    the inode btree: max depth * blocksize
+ *    the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_itruncate_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
+                    xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
+                                     XFS_FSB_TO_B(mp, 1))),
+                   (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
+                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
+                                     XFS_FSB_TO_B(mp, 1)) +
+                   xfs_calc_buf_res(5, 0) +
+                   xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+                                    XFS_FSB_TO_B(mp, 1)) +
+                   xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) +
+                                    mp->m_in_maxlevels, 0)));
+}
+
+/*
+ * In renaming a files we can modify:
+ *    the four inodes involved: 4 * inode size
+ *    the two directory btrees: 2 * (max depth + v2) * dir block size
+ *    the two directory bmap btrees: 2 * max depth * block size
+ * And the bmap_finish transaction can free dir and bmap blocks (two sets
+ *     of bmap blocks) giving:
+ *    the agf for the ags in which the blocks live: 3 * sector size
+ *    the agfl for the ags in which the blocks live: 3 * sector size
+ *    the superblock for the free block count: sector size
+ *    the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_rename_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               MAX((xfs_calc_buf_res(4, mp->m_sb.sb_inodesize) +
+                    xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
+                                     XFS_FSB_TO_B(mp, 1))),
+                   (xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
+                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 3),
+                                     XFS_FSB_TO_B(mp, 1))));
+}
+
+/*
+ * For creating a link to an inode:
+ *    the parent directory inode: inode size
+ *    the linked inode: inode size
+ *    the directory btree could split: (max depth + v2) * dir block size
+ *    the directory bmap btree could join or split: (max depth + v2) * blocksize
+ * And the bmap_finish transaction can free some bmap blocks giving:
+ *    the agf for the ag in which the blocks live: sector size
+ *    the agfl for the ag in which the blocks live: sector size
+ *    the superblock for the free block count: sector size
+ *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_link_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
+                    xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
+                                     XFS_FSB_TO_B(mp, 1))),
+                   (xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
+                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+                                     XFS_FSB_TO_B(mp, 1))));
+}
+
+/*
+ * For removing a directory entry we can modify:
+ *    the parent directory inode: inode size
+ *    the removed inode: inode size
+ *    the directory btree could join: (max depth + v2) * dir block size
+ *    the directory bmap btree could join or split: (max depth + v2) * blocksize
+ * And the bmap_finish transaction can free the dir and bmap blocks giving:
+ *    the agf for the ag in which the blocks live: 2 * sector size
+ *    the agfl for the ag in which the blocks live: 2 * sector size
+ *    the superblock for the free block count: sector size
+ *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_remove_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               MAX((xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
+                    xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
+                                     XFS_FSB_TO_B(mp, 1))),
+                   (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+                                     XFS_FSB_TO_B(mp, 1))));
+}
+
+/*
+ * For create, break it in to the two cases that the transaction
+ * covers. We start with the modify case - allocation done by modification
+ * of the state of existing inodes - and the allocation case.
+ */
+
+/*
+ * For create we can modify:
+ *    the parent directory inode: inode size
+ *    the new inode: inode size
+ *    the inode btree entry: block size
+ *    the superblock for the nlink flag: sector size
+ *    the directory btree: (max depth + v2) * dir block size
+ *    the directory inode's bmap btree: (max depth + v2) * block size
+ */
+STATIC uint
+xfs_calc_create_resv_modify(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
+               xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
+               (uint)XFS_FSB_TO_B(mp, 1) +
+               xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * For create we can allocate some inodes giving:
+ *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
+ *    the superblock for the nlink flag: sector size
+ *    the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
+ *    the inode btree: max depth * blocksize
+ *    the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_create_resv_alloc(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
+               mp->m_sb.sb_sectsize +
+               xfs_calc_buf_res(XFS_IALLOC_BLOCKS(mp), XFS_FSB_TO_B(mp, 1)) +
+               xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
+               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+                                XFS_FSB_TO_B(mp, 1));
+}
+
+STATIC uint
+__xfs_calc_create_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               MAX(xfs_calc_create_resv_alloc(mp),
+                   xfs_calc_create_resv_modify(mp));
+}
+
+/*
+ * For icreate we can allocate some inodes giving:
+ *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
+ *    the superblock for the nlink flag: sector size
+ *    the inode btree: max depth * blocksize
+ *    the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_icreate_resv_alloc(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
+               mp->m_sb.sb_sectsize +
+               xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
+               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+                                XFS_FSB_TO_B(mp, 1));
+}
+
+STATIC uint
+xfs_calc_icreate_reservation(xfs_mount_t *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               MAX(xfs_calc_icreate_resv_alloc(mp),
+                   xfs_calc_create_resv_modify(mp));
+}
+
+STATIC uint
+xfs_calc_create_reservation(
+       struct xfs_mount        *mp)
+{
+       if (xfs_sb_version_hascrc(&mp->m_sb))
+               return xfs_calc_icreate_reservation(mp);
+       return __xfs_calc_create_reservation(mp);
+
+}
+
+/*
+ * Making a new directory is the same as creating a new file.
+ */
+STATIC uint
+xfs_calc_mkdir_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_create_reservation(mp);
+}
+
+
+/*
+ * Making a new symplink is the same as creating a new file, but
+ * with the added blocks for remote symlink data which can be up to 1kB in
+ * length (MAXPATHLEN).
+ */
+STATIC uint
+xfs_calc_symlink_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_create_reservation(mp) +
+              xfs_calc_buf_res(1, MAXPATHLEN);
+}
+
+/*
+ * In freeing an inode we can modify:
+ *    the inode being freed: inode size
+ *    the super block free inode counter: sector size
+ *    the agi hash list and counters: sector size
+ *    the inode btree entry: block size
+ *    the on disk inode before ours in the agi hash list: inode cluster size
+ *    the inode btree: max depth * blocksize
+ *    the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_ifree_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
+               xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
+               xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
+               MAX((__uint16_t)XFS_FSB_TO_B(mp, 1),
+                   XFS_INODE_CLUSTER_SIZE(mp)) +
+               xfs_calc_buf_res(1, 0) +
+               xfs_calc_buf_res(2 + XFS_IALLOC_BLOCKS(mp) +
+                                mp->m_in_maxlevels, 0) +
+               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+                                XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * When only changing the inode we log the inode and possibly the superblock
+ * We also add a bit of slop for the transaction stuff.
+ */
+STATIC uint
+xfs_calc_ichange_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               mp->m_sb.sb_inodesize +
+               mp->m_sb.sb_sectsize +
+               512;
+
+}
+
+/*
+ * Growing the data section of the filesystem.
+ *     superblock
+ *     agi and agf
+ *     allocation btrees
+ */
+STATIC uint
+xfs_calc_growdata_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
+               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+                                XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * Growing the rt section of the filesystem.
+ * In the first set of transactions (ALLOC) we allocate space to the
+ * bitmap or summary files.
+ *     superblock: sector size
+ *     agf of the ag from which the extent is allocated: sector size
+ *     bmap btree for bitmap/summary inode: max depth * blocksize
+ *     bitmap/summary inode: inode size
+ *     allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
+ */
+STATIC uint
+xfs_calc_growrtalloc_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
+               xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
+                                XFS_FSB_TO_B(mp, 1)) +
+               xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
+               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+                                XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * Growing the rt section of the filesystem.
+ * In the second set of transactions (ZERO) we zero the new metadata blocks.
+ *     one bitmap/summary block: blocksize
+ */
+STATIC uint
+xfs_calc_growrtzero_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize);
+}
+
+/*
+ * Growing the rt section of the filesystem.
+ * In the third set of transactions (FREE) we update metadata without
+ * allocating any new blocks.
+ *     superblock: sector size
+ *     bitmap inode: inode size
+ *     summary inode: inode size
+ *     one bitmap block: blocksize
+ *     summary blocks: new summary size
+ */
+STATIC uint
+xfs_calc_growrtfree_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
+               xfs_calc_buf_res(2, mp->m_sb.sb_inodesize) +
+               xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
+               xfs_calc_buf_res(1, mp->m_rsumsize);
+}
+
+/*
+ * Logging the inode modification timestamp on a synchronous write.
+ *     inode
+ */
+STATIC uint
+xfs_calc_swrite_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(1, mp->m_sb.sb_inodesize);
+}
+
+/*
+ * Logging the inode mode bits when writing a setuid/setgid file
+ *     inode
+ */
+STATIC uint
+xfs_calc_writeid_reservation(xfs_mount_t *mp)
+{
+       return xfs_calc_buf_res(1, mp->m_sb.sb_inodesize);
+}
+
+/*
+ * Converting the inode from non-attributed to attributed.
+ *     the inode being converted: inode size
+ *     agf block and superblock (for block allocation)
+ *     the new block (directory sized)
+ *     bmap blocks for the new directory block
+ *     allocation btrees
+ */
+STATIC uint
+xfs_calc_addafork_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
+               xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
+               xfs_calc_buf_res(1, mp->m_dirblksize) +
+               xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
+                                XFS_FSB_TO_B(mp, 1)) +
+               xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+                                XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * Removing the attribute fork of a file
+ *    the inode being truncated: inode size
+ *    the inode's bmap btree: max depth * block size
+ * And the bmap_finish transaction can free the blocks and bmap blocks:
+ *    the agf for each of the ags: 4 * sector size
+ *    the agfl for each of the ags: 4 * sector size
+ *    the super block to reflect the freed blocks: sector size
+ *    worst case split in allocation btrees per extent assuming 4 extents:
+ *             4 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_attrinval_reservation(
+       struct xfs_mount        *mp)
+{
+       return MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
+                   xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
+                                    XFS_FSB_TO_B(mp, 1))),
+                  (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
+                   xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
+                                    XFS_FSB_TO_B(mp, 1))));
+}
+
+/*
+ * Setting an attribute at mount time.
+ *     the inode getting the attribute
+ *     the superblock for allocations
+ *     the agfs extents are allocated from
+ *     the attribute btree * max depth
+ *     the inode allocation btree
+ * Since attribute transaction space is dependent on the size of the attribute,
+ * the calculation is done partially at mount time and partially at runtime(see
+ * below).
+ */
+STATIC uint
+xfs_calc_attrsetm_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
+               xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
+               xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * Setting an attribute at runtime, transaction space unit per block.
+ *     the superblock for allocations: sector size
+ *     the inode bmap btree could join or split: max depth * block size
+ * Since the runtime attribute transaction space is dependent on the total
+ * blocks needed for the 1st bmap, here we calculate out the space unit for
+ * one block so that the caller could figure out the total space according
+ * to the attibute extent length in blocks by:
+ *     ext * M_RES(mp)->tr_attrsetrt.tr_logres
+ */
+STATIC uint
+xfs_calc_attrsetrt_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
+               xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
+                                XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * Removing an attribute.
+ *    the inode: inode size
+ *    the attribute btree could join: max depth * block size
+ *    the inode bmap btree could join or split: max depth * block size
+ * And the bmap_finish transaction can free the attr blocks freed giving:
+ *    the agf for the ag in which the blocks live: 2 * sector size
+ *    the agfl for the ag in which the blocks live: 2 * sector size
+ *    the superblock for the free block count: sector size
+ *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+STATIC uint
+xfs_calc_attrrm_reservation(
+       struct xfs_mount        *mp)
+{
+       return XFS_DQUOT_LOGRES(mp) +
+               MAX((xfs_calc_buf_res(1, mp->m_sb.sb_inodesize) +
+                    xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
+                                     XFS_FSB_TO_B(mp, 1)) +
+                    (uint)XFS_FSB_TO_B(mp,
+                                       XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
+                    xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
+                   (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+                    xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+                                     XFS_FSB_TO_B(mp, 1))));
+}
+
+/*
+ * Clearing a bad agino number in an agi hash bucket.
+ */
+STATIC uint
+xfs_calc_clear_agi_bucket_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
+}
+
+/*
+ * Clearing the quotaflags in the superblock.
+ *     the super block for changing quota flags: sector size
+ */
+STATIC uint
+xfs_calc_qm_sbchange_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
+}
+
+/*
+ * Adjusting quota limits.
+ *    the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
+ */
+STATIC uint
+xfs_calc_qm_setqlim_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
+}
+
+/*
+ * Allocating quota on disk if needed.
+ *     the write transaction log space: M_RES(mp)->tr_write.tr_logres
+ *     the unit of quota allocation: one system block size
+ */
+STATIC uint
+xfs_calc_qm_dqalloc_reservation(
+       struct xfs_mount        *mp)
+{
+       return M_RES(mp)->tr_write.tr_logres +
+               xfs_calc_buf_res(1,
+                       XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1);
+}
+
+/*
+ * Turning off quotas.
+ *    the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
+ *    the superblock for the quota flags: sector size
+ */
+STATIC uint
+xfs_calc_qm_quotaoff_reservation(
+       struct xfs_mount        *mp)
+{
+       return sizeof(struct xfs_qoff_logitem) * 2 +
+               xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
+}
+
+/*
+ * End of turning off quotas.
+ *    the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
+ */
+STATIC uint
+xfs_calc_qm_quotaoff_end_reservation(
+       struct xfs_mount        *mp)
+{
+       return sizeof(struct xfs_qoff_logitem) * 2;
+}
+
+/*
+ * Syncing the incore super block changes to disk.
+ *     the super block to reflect the changes: sector size
+ */
+STATIC uint
+xfs_calc_sb_reservation(
+       struct xfs_mount        *mp)
+{
+       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
+}
+
+void
+xfs_trans_resv_calc(
+       struct xfs_mount        *mp,
+       struct xfs_trans_resv   *resp)
+{
+       /*
+        * The following transactions are logged in physical format and
+        * require a permanent reservation on space.
+        */
+       resp->tr_write.tr_logres = xfs_calc_write_reservation(mp);
+       resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
+       resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp);
+       resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
+       resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
+       resp->tr_rename.tr_logcount = XFS_RENAME_LOG_COUNT;
+       resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_link.tr_logres = xfs_calc_link_reservation(mp);
+       resp->tr_link.tr_logcount = XFS_LINK_LOG_COUNT;
+       resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp);
+       resp->tr_remove.tr_logcount = XFS_REMOVE_LOG_COUNT;
+       resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp);
+       resp->tr_symlink.tr_logcount = XFS_SYMLINK_LOG_COUNT;
+       resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_create.tr_logres = xfs_calc_create_reservation(mp);
+       resp->tr_create.tr_logcount = XFS_CREATE_LOG_COUNT;
+       resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp);
+       resp->tr_mkdir.tr_logcount = XFS_MKDIR_LOG_COUNT;
+       resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_ifree.tr_logres = xfs_calc_ifree_reservation(mp);
+       resp->tr_ifree.tr_logcount = XFS_INACTIVE_LOG_COUNT;
+       resp->tr_ifree.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_addafork.tr_logres = xfs_calc_addafork_reservation(mp);
+       resp->tr_addafork.tr_logcount = XFS_ADDAFORK_LOG_COUNT;
+       resp->tr_addafork.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_attrinval.tr_logres = xfs_calc_attrinval_reservation(mp);
+       resp->tr_attrinval.tr_logcount = XFS_ATTRINVAL_LOG_COUNT;
+       resp->tr_attrinval.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_attrsetm.tr_logres = xfs_calc_attrsetm_reservation(mp);
+       resp->tr_attrsetm.tr_logcount = XFS_ATTRSET_LOG_COUNT;
+       resp->tr_attrsetm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_attrrm.tr_logres = xfs_calc_attrrm_reservation(mp);
+       resp->tr_attrrm.tr_logcount = XFS_ATTRRM_LOG_COUNT;
+       resp->tr_attrrm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_growrtalloc.tr_logres = xfs_calc_growrtalloc_reservation(mp);
+       resp->tr_growrtalloc.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
+       resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp);
+       resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
+       resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
+
+       /*
+        * The following transactions are logged in logical format with
+        * a default log count.
+        */
+       resp->tr_qm_sbchange.tr_logres = xfs_calc_qm_sbchange_reservation(mp);
+       resp->tr_qm_sbchange.tr_logcount = XFS_DEFAULT_LOG_COUNT;
+
+       resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
+       resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
+
+       resp->tr_qm_quotaoff.tr_logres = xfs_calc_qm_quotaoff_reservation(mp);
+       resp->tr_qm_quotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
+
+       resp->tr_qm_equotaoff.tr_logres =
+               xfs_calc_qm_quotaoff_end_reservation(mp);
+       resp->tr_qm_equotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
+
+       resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);
+       resp->tr_sb.tr_logcount = XFS_DEFAULT_LOG_COUNT;
+
+       /* The following transaction are logged in logical format */
+       resp->tr_ichange.tr_logres = xfs_calc_ichange_reservation(mp);
+       resp->tr_growdata.tr_logres = xfs_calc_growdata_reservation(mp);
+       resp->tr_swrite.tr_logres = xfs_calc_swrite_reservation(mp);
+       resp->tr_fsyncts.tr_logres = xfs_calc_swrite_reservation(mp);
+       resp->tr_writeid.tr_logres = xfs_calc_writeid_reservation(mp);
+       resp->tr_attrsetrt.tr_logres = xfs_calc_attrsetrt_reservation(mp);
+       resp->tr_clearagi.tr_logres = xfs_calc_clear_agi_bucket_reservation(mp);
+       resp->tr_growrtzero.tr_logres = xfs_calc_growrtzero_reservation(mp);
+       resp->tr_growrtfree.tr_logres = xfs_calc_growrtfree_reservation(mp);
+}
diff --git a/fs/xfs/xfs_trans_resv.h b/fs/xfs/xfs_trans_resv.h
new file mode 100644 (file)
index 0000000..de7de9a
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef        __XFS_TRANS_RESV_H__
+#define        __XFS_TRANS_RESV_H__
+
+struct xfs_mount;
+
+/*
+ * structure for maintaining pre-calculated transaction reservations.
+ */
+struct xfs_trans_res {
+       uint    tr_logres;      /* log space unit in bytes per log ticket */
+       int     tr_logcount;    /* number of log operations per log ticket */
+       int     tr_logflags;    /* log flags, currently only used for indicating
+                                * a reservation request is permanent or not */
+};
+
+struct xfs_trans_resv {
+       struct xfs_trans_res    tr_write;       /* extent alloc trans */
+       struct xfs_trans_res    tr_itruncate;   /* truncate trans */
+       struct xfs_trans_res    tr_rename;      /* rename trans */
+       struct xfs_trans_res    tr_link;        /* link trans */
+       struct xfs_trans_res    tr_remove;      /* unlink trans */
+       struct xfs_trans_res    tr_symlink;     /* symlink trans */
+       struct xfs_trans_res    tr_create;      /* create trans */
+       struct xfs_trans_res    tr_mkdir;       /* mkdir trans */
+       struct xfs_trans_res    tr_ifree;       /* inode free trans */
+       struct xfs_trans_res    tr_ichange;     /* inode update trans */
+       struct xfs_trans_res    tr_growdata;    /* fs data section grow trans */
+       struct xfs_trans_res    tr_swrite;      /* sync write inode trans */
+       struct xfs_trans_res    tr_addafork;    /* add inode attr fork trans */
+       struct xfs_trans_res    tr_writeid;     /* write setuid/setgid file */
+       struct xfs_trans_res    tr_attrinval;   /* attr fork buffer
+                                                * invalidation */
+       struct xfs_trans_res    tr_attrsetm;    /* set/create an attribute at
+                                                * mount time */
+       struct xfs_trans_res    tr_attrsetrt;   /* set/create an attribute at
+                                                * runtime */
+       struct xfs_trans_res    tr_attrrm;      /* remove an attribute */
+       struct xfs_trans_res    tr_clearagi;    /* clear agi unlinked bucket */
+       struct xfs_trans_res    tr_growrtalloc; /* grow realtime allocations */
+       struct xfs_trans_res    tr_growrtzero;  /* grow realtime zeroing */
+       struct xfs_trans_res    tr_growrtfree;  /* grow realtime freeing */
+       struct xfs_trans_res    tr_qm_sbchange; /* change quota flags */
+       struct xfs_trans_res    tr_qm_setqlim;  /* adjust quota limits */
+       struct xfs_trans_res    tr_qm_dqalloc;  /* allocate quota on disk */
+       struct xfs_trans_res    tr_qm_quotaoff; /* turn quota off */
+       struct xfs_trans_res    tr_qm_equotaoff;/* end of turn quota off */
+       struct xfs_trans_res    tr_sb;          /* modify superblock */
+       struct xfs_trans_res    tr_fsyncts;     /* update timestamps on fsync */
+};
+
+/* shorthand way of accessing reservation structure */
+#define M_RES(mp)      (&(mp)->m_resv)
+
+/*
+ * Per-extent log reservation for the allocation btree changes
+ * involved in freeing or allocating an extent.
+ * 2 trees * (2 blocks/level * max depth - 1) * block size
+ */
+#define        XFS_ALLOCFREE_LOG_RES(mp,nx) \
+       ((nx) * (2 * XFS_FSB_TO_B((mp), 2 * XFS_AG_MAXLEVELS(mp) - 1)))
+#define        XFS_ALLOCFREE_LOG_COUNT(mp,nx) \
+       ((nx) * (2 * (2 * XFS_AG_MAXLEVELS(mp) - 1)))
+
+/*
+ * Per-directory log reservation for any directory change.
+ * dir blocks: (1 btree block per level + data block + free block) * dblock size
+ * bmap btree: (levels + 2) * max depth * block size
+ * v2 directory blocks can be fragmented below the dirblksize down to the fsb
+ * size, so account for that in the DAENTER macros.
+ */
+#define        XFS_DIROP_LOG_RES(mp)   \
+       (XFS_FSB_TO_B(mp, XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK)) + \
+        (XFS_FSB_TO_B(mp, XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1)))
+#define        XFS_DIROP_LOG_COUNT(mp) \
+       (XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \
+        XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1)
+
+/*
+ * Various log count values.
+ */
+#define        XFS_DEFAULT_LOG_COUNT           1
+#define        XFS_DEFAULT_PERM_LOG_COUNT      2
+#define        XFS_ITRUNCATE_LOG_COUNT         2
+#define XFS_INACTIVE_LOG_COUNT         2
+#define        XFS_CREATE_LOG_COUNT            2
+#define        XFS_MKDIR_LOG_COUNT             3
+#define        XFS_SYMLINK_LOG_COUNT           3
+#define        XFS_REMOVE_LOG_COUNT            2
+#define        XFS_LINK_LOG_COUNT              2
+#define        XFS_RENAME_LOG_COUNT            2
+#define        XFS_WRITE_LOG_COUNT             2
+#define        XFS_ADDAFORK_LOG_COUNT          2
+#define        XFS_ATTRINVAL_LOG_COUNT         1
+#define        XFS_ATTRSET_LOG_COUNT           3
+#define        XFS_ATTRRM_LOG_COUNT            3
+
+void xfs_trans_resv_calc(struct xfs_mount *mp, struct xfs_trans_resv *resp);
+
+#endif /* __XFS_TRANS_RESV_H__ */
index 61ba1cfa974c7e3e32c493c1317236e2cb397730..ce44b182821f1a25941822035ccf9c210d73270e 100644 (file)
 #ifndef __XFS_TYPES_H__
 #define        __XFS_TYPES_H__
 
-#ifdef __KERNEL__
-
-/*
- * Additional type declarations for XFS
- */
-typedef signed char            __int8_t;
-typedef unsigned char          __uint8_t;
-typedef signed short int       __int16_t;
-typedef unsigned short int     __uint16_t;
-typedef signed int             __int32_t;
-typedef unsigned int           __uint32_t;
-typedef signed long long int   __int64_t;
-typedef unsigned long long int __uint64_t;
-
-typedef __uint32_t             prid_t;         /* project ID */
-typedef __uint32_t             inst_t;         /* an instruction */
-
-typedef __s64                  xfs_off_t;      /* <file offset> type */
-typedef unsigned long long     xfs_ino_t;      /* <inode> type */
-typedef __s64                  xfs_daddr_t;    /* <disk address> type */
-typedef char *                 xfs_caddr_t;    /* <core address> type */
-typedef __u32                  xfs_dev_t;
-typedef __u32                  xfs_nlink_t;
-
-/* __psint_t is the same size as a pointer */
-#if (BITS_PER_LONG == 32)
-typedef __int32_t __psint_t;
-typedef __uint32_t __psunsigned_t;
-#elif (BITS_PER_LONG == 64)
-typedef __int64_t __psint_t;
-typedef __uint64_t __psunsigned_t;
-#else
-#error BITS_PER_LONG must be 32 or 64
-#endif
-
-#endif /* __KERNEL__ */
+typedef __uint32_t     prid_t;         /* project ID */
 
 typedef __uint32_t     xfs_agblock_t;  /* blockno in alloc. group */
 typedef        __uint32_t      xfs_agino_t;    /* inode # within allocation grp */
@@ -145,6 +110,12 @@ typedef __uint64_t xfs_filblks_t;  /* number of blocks in a file */
 #define XFS_MIN_SECTORSIZE     (1 << XFS_MIN_SECTORSIZE_LOG)
 #define XFS_MAX_SECTORSIZE     (1 << XFS_MAX_SECTORSIZE_LOG)
 
+/*
+ * Inode fork identifiers.
+ */
+#define        XFS_DATA_FORK   0
+#define        XFS_ATTR_FORK   1
+
 /*
  * Min numbers of data/attr fork btree root pointers.
  */
@@ -171,4 +142,20 @@ struct xfs_name {
        int                     len;
 };
 
+/*
+ * uid_t and gid_t are hard-coded to 32 bits in the inode.
+ * Hence, an 'id' in a dquot is 32 bits..
+ */
+typedef __uint32_t     xfs_dqid_t;
+
+/*
+ * Constants for bit manipulations.
+ */
+#define        XFS_NBBYLOG     3               /* log2(NBBY) */
+#define        XFS_WORDLOG     2               /* log2(sizeof(xfs_rtword_t)) */
+#define        XFS_NBWORDLOG   (XFS_NBBYLOG + XFS_WORDLOG)
+#define        XFS_NBWORD      (1 << XFS_NBWORDLOG)
+#define        XFS_WORDMASK    ((1 << XFS_WORDLOG) - 1)
+
+
 #endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
deleted file mode 100644 (file)
index 0025c78..0000000
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_bmap.h"
-#include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_itable.h"
-#include "xfs_utils.h"
-
-
-/*
- * Allocates a new inode from disk and return a pointer to the
- * incore copy. This routine will internally commit the current
- * transaction and allocate a new one if the Space Manager needed
- * to do an allocation to replenish the inode free-list.
- *
- * This routine is designed to be called from xfs_create and
- * xfs_create_dir.
- *
- */
-int
-xfs_dir_ialloc(
-       xfs_trans_t     **tpp,          /* input: current transaction;
-                                          output: may be a new transaction. */
-       xfs_inode_t     *dp,            /* directory within whose allocate
-                                          the inode. */
-       umode_t         mode,
-       xfs_nlink_t     nlink,
-       xfs_dev_t       rdev,
-       prid_t          prid,           /* project id */
-       int             okalloc,        /* ok to allocate new space */
-       xfs_inode_t     **ipp,          /* pointer to inode; it will be
-                                          locked. */
-       int             *committed)
-
-{
-       xfs_trans_t     *tp;
-       xfs_trans_t     *ntp;
-       xfs_inode_t     *ip;
-       xfs_buf_t       *ialloc_context = NULL;
-       int             code;
-       uint            log_res;
-       uint            log_count;
-       void            *dqinfo;
-       uint            tflags;
-
-       tp = *tpp;
-       ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
-
-       /*
-        * xfs_ialloc will return a pointer to an incore inode if
-        * the Space Manager has an available inode on the free
-        * list. Otherwise, it will do an allocation and replenish
-        * the freelist.  Since we can only do one allocation per
-        * transaction without deadlocks, we will need to commit the
-        * current transaction and start a new one.  We will then
-        * need to call xfs_ialloc again to get the inode.
-        *
-        * If xfs_ialloc did an allocation to replenish the freelist,
-        * it returns the bp containing the head of the freelist as
-        * ialloc_context. We will hold a lock on it across the
-        * transaction commit so that no other process can steal
-        * the inode(s) that we've just allocated.
-        */
-       code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
-                         &ialloc_context, &ip);
-
-       /*
-        * Return an error if we were unable to allocate a new inode.
-        * This should only happen if we run out of space on disk or
-        * encounter a disk error.
-        */
-       if (code) {
-               *ipp = NULL;
-               return code;
-       }
-       if (!ialloc_context && !ip) {
-               *ipp = NULL;
-               return XFS_ERROR(ENOSPC);
-       }
-
-       /*
-        * If the AGI buffer is non-NULL, then we were unable to get an
-        * inode in one operation.  We need to commit the current
-        * transaction and call xfs_ialloc() again.  It is guaranteed
-        * to succeed the second time.
-        */
-       if (ialloc_context) {
-               /*
-                * Normally, xfs_trans_commit releases all the locks.
-                * We call bhold to hang on to the ialloc_context across
-                * the commit.  Holding this buffer prevents any other
-                * processes from doing any allocations in this
-                * allocation group.
-                */
-               xfs_trans_bhold(tp, ialloc_context);
-               /*
-                * Save the log reservation so we can use
-                * them in the next transaction.
-                */
-               log_res = xfs_trans_get_log_res(tp);
-               log_count = xfs_trans_get_log_count(tp);
-
-               /*
-                * We want the quota changes to be associated with the next
-                * transaction, NOT this one. So, detach the dqinfo from this
-                * and attach it to the next transaction.
-                */
-               dqinfo = NULL;
-               tflags = 0;
-               if (tp->t_dqinfo) {
-                       dqinfo = (void *)tp->t_dqinfo;
-                       tp->t_dqinfo = NULL;
-                       tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
-                       tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
-               }
-
-               ntp = xfs_trans_dup(tp);
-               code = xfs_trans_commit(tp, 0);
-               tp = ntp;
-               if (committed != NULL) {
-                       *committed = 1;
-               }
-               /*
-                * If we get an error during the commit processing,
-                * release the buffer that is still held and return
-                * to the caller.
-                */
-               if (code) {
-                       xfs_buf_relse(ialloc_context);
-                       if (dqinfo) {
-                               tp->t_dqinfo = dqinfo;
-                               xfs_trans_free_dqinfo(tp);
-                       }
-                       *tpp = ntp;
-                       *ipp = NULL;
-                       return code;
-               }
-
-               /*
-                * transaction commit worked ok so we can drop the extra ticket
-                * reference that we gained in xfs_trans_dup()
-                */
-               xfs_log_ticket_put(tp->t_ticket);
-               code = xfs_trans_reserve(tp, 0, log_res, 0,
-                                        XFS_TRANS_PERM_LOG_RES, log_count);
-               /*
-                * Re-attach the quota info that we detached from prev trx.
-                */
-               if (dqinfo) {
-                       tp->t_dqinfo = dqinfo;
-                       tp->t_flags |= tflags;
-               }
-
-               if (code) {
-                       xfs_buf_relse(ialloc_context);
-                       *tpp = ntp;
-                       *ipp = NULL;
-                       return code;
-               }
-               xfs_trans_bjoin(tp, ialloc_context);
-
-               /*
-                * Call ialloc again. Since we've locked out all
-                * other allocations in this allocation group,
-                * this call should always succeed.
-                */
-               code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
-                                 okalloc, &ialloc_context, &ip);
-
-               /*
-                * If we get an error at this point, return to the caller
-                * so that the current transaction can be aborted.
-                */
-               if (code) {
-                       *tpp = tp;
-                       *ipp = NULL;
-                       return code;
-               }
-               ASSERT(!ialloc_context && ip);
-
-       } else {
-               if (committed != NULL)
-                       *committed = 0;
-       }
-
-       *ipp = ip;
-       *tpp = tp;
-
-       return 0;
-}
-
-/*
- * Decrement the link count on an inode & log the change.
- * If this causes the link count to go to zero, initiate the
- * logging activity required to truncate a file.
- */
-int                            /* error */
-xfs_droplink(
-       xfs_trans_t *tp,
-       xfs_inode_t *ip)
-{
-       int     error;
-
-       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
-
-       ASSERT (ip->i_d.di_nlink > 0);
-       ip->i_d.di_nlink--;
-       drop_nlink(VFS_I(ip));
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-       error = 0;
-       if (ip->i_d.di_nlink == 0) {
-               /*
-                * We're dropping the last link to this file.
-                * Move the on-disk inode to the AGI unlinked list.
-                * From xfs_inactive() we will pull the inode from
-                * the list and free it.
-                */
-               error = xfs_iunlink(tp, ip);
-       }
-       return error;
-}
-
-/*
- * This gets called when the inode's version needs to be changed from 1 to 2.
- * Currently this happens when the nlink field overflows the old 16-bit value
- * or when chproj is called to change the project for the first time.
- * As a side effect the superblock version will also get rev'd
- * to contain the NLINK bit.
- */
-void
-xfs_bump_ino_vers2(
-       xfs_trans_t     *tp,
-       xfs_inode_t     *ip)
-{
-       xfs_mount_t     *mp;
-
-       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-       ASSERT(ip->i_d.di_version == 1);
-
-       ip->i_d.di_version = 2;
-       ip->i_d.di_onlink = 0;
-       memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
-       mp = tp->t_mountp;
-       if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
-               spin_lock(&mp->m_sb_lock);
-               if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
-                       xfs_sb_version_addnlink(&mp->m_sb);
-                       spin_unlock(&mp->m_sb_lock);
-                       xfs_mod_sb(tp, XFS_SB_VERSIONNUM);
-               } else {
-                       spin_unlock(&mp->m_sb_lock);
-               }
-       }
-       /* Caller must log the inode */
-}
-
-/*
- * Increment the link count on an inode & log the change.
- */
-int
-xfs_bumplink(
-       xfs_trans_t *tp,
-       xfs_inode_t *ip)
-{
-       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
-
-       ASSERT(ip->i_d.di_nlink > 0);
-       ip->i_d.di_nlink++;
-       inc_nlink(VFS_I(ip));
-       if ((ip->i_d.di_version == 1) &&
-           (ip->i_d.di_nlink > XFS_MAXLINK_1)) {
-               /*
-                * The inode has increased its number of links beyond
-                * what can fit in an old format inode.  It now needs
-                * to be converted to a version 2 inode with a 32 bit
-                * link count.  If this is the first inode in the file
-                * system to do this, then we need to bump the superblock
-                * version number as well.
-                */
-               xfs_bump_ino_vers2(tp, ip);
-       }
-
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       return 0;
-}
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
deleted file mode 100644 (file)
index 5eeab46..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_UTILS_H__
-#define __XFS_UTILS_H__
-
-extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, umode_t, xfs_nlink_t,
-                               xfs_dev_t, prid_t, int, xfs_inode_t **, int *);
-extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *);
-extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *);
-extern void xfs_bump_ino_vers2(xfs_trans_t *, xfs_inode_t *);
-
-#endif /* __XFS_UTILS_H__ */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
deleted file mode 100644 (file)
index dc730ac..0000000
+++ /dev/null
@@ -1,1870 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * Copyright (c) 2012 Red Hat, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_itable.h"
-#include "xfs_ialloc.h"
-#include "xfs_alloc.h"
-#include "xfs_bmap.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_utils.h"
-#include "xfs_rtalloc.h"
-#include "xfs_trans_space.h"
-#include "xfs_log_priv.h"
-#include "xfs_filestream.h"
-#include "xfs_vnodeops.h"
-#include "xfs_trace.h"
-#include "xfs_icache.h"
-#include "xfs_symlink.h"
-
-
-/*
- * This is called by xfs_inactive to free any blocks beyond eof
- * when the link count isn't zero and by xfs_dm_punch_hole() when
- * punching a hole to EOF.
- */
-int
-xfs_free_eofblocks(
-       xfs_mount_t     *mp,
-       xfs_inode_t     *ip,
-       bool            need_iolock)
-{
-       xfs_trans_t     *tp;
-       int             error;
-       xfs_fileoff_t   end_fsb;
-       xfs_fileoff_t   last_fsb;
-       xfs_filblks_t   map_len;
-       int             nimaps;
-       xfs_bmbt_irec_t imap;
-
-       /*
-        * Figure out if there are any blocks beyond the end
-        * of the file.  If not, then there is nothing to do.
-        */
-       end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
-       last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
-       if (last_fsb <= end_fsb)
-               return 0;
-       map_len = last_fsb - end_fsb;
-
-       nimaps = 1;
-       xfs_ilock(ip, XFS_ILOCK_SHARED);
-       error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
-       xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
-       if (!error && (nimaps != 0) &&
-           (imap.br_startblock != HOLESTARTBLOCK ||
-            ip->i_delayed_blks)) {
-               /*
-                * Attach the dquots to the inode up front.
-                */
-               error = xfs_qm_dqattach(ip, 0);
-               if (error)
-                       return error;
-
-               /*
-                * There are blocks after the end of file.
-                * Free them up now by truncating the file to
-                * its current size.
-                */
-               tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-
-               if (need_iolock) {
-                       if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
-                               xfs_trans_cancel(tp, 0);
-                               return EAGAIN;
-                       }
-               }
-
-               error = xfs_trans_reserve(tp, 0,
-                                         XFS_ITRUNCATE_LOG_RES(mp),
-                                         0, XFS_TRANS_PERM_LOG_RES,
-                                         XFS_ITRUNCATE_LOG_COUNT);
-               if (error) {
-                       ASSERT(XFS_FORCED_SHUTDOWN(mp));
-                       xfs_trans_cancel(tp, 0);
-                       if (need_iolock)
-                               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-                       return error;
-               }
-
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               xfs_trans_ijoin(tp, ip, 0);
-
-               /*
-                * Do not update the on-disk file size.  If we update the
-                * on-disk file size and then the system crashes before the
-                * contents of the file are flushed to disk then the files
-                * may be full of holes (ie NULL files bug).
-                */
-               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
-                                             XFS_ISIZE(ip));
-               if (error) {
-                       /*
-                        * If we get an error at this point we simply don't
-                        * bother truncating the file.
-                        */
-                       xfs_trans_cancel(tp,
-                                        (XFS_TRANS_RELEASE_LOG_RES |
-                                         XFS_TRANS_ABORT));
-               } else {
-                       error = xfs_trans_commit(tp,
-                                               XFS_TRANS_RELEASE_LOG_RES);
-                       if (!error)
-                               xfs_inode_clear_eofblocks_tag(ip);
-               }
-
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-               if (need_iolock)
-                       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-       }
-       return error;
-}
-
-int
-xfs_release(
-       xfs_inode_t     *ip)
-{
-       xfs_mount_t     *mp = ip->i_mount;
-       int             error;
-
-       if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
-               return 0;
-
-       /* If this is a read-only mount, don't do this (would generate I/O) */
-       if (mp->m_flags & XFS_MOUNT_RDONLY)
-               return 0;
-
-       if (!XFS_FORCED_SHUTDOWN(mp)) {
-               int truncated;
-
-               /*
-                * If we are using filestreams, and we have an unlinked
-                * file that we are processing the last close on, then nothing
-                * will be able to reopen and write to this file. Purge this
-                * inode from the filestreams cache so that it doesn't delay
-                * teardown of the inode.
-                */
-               if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
-                       xfs_filestream_deassociate(ip);
-
-               /*
-                * If we previously truncated this file and removed old data
-                * in the process, we want to initiate "early" writeout on
-                * the last close.  This is an attempt to combat the notorious
-                * NULL files problem which is particularly noticeable from a
-                * truncate down, buffered (re-)write (delalloc), followed by
-                * a crash.  What we are effectively doing here is
-                * significantly reducing the time window where we'd otherwise
-                * be exposed to that problem.
-                */
-               truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
-               if (truncated) {
-                       xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
-                       if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) {
-                               error = -filemap_flush(VFS_I(ip)->i_mapping);
-                               if (error)
-                                       return error;
-                       }
-               }
-       }
-
-       if (ip->i_d.di_nlink == 0)
-               return 0;
-
-       if (xfs_can_free_eofblocks(ip, false)) {
-
-               /*
-                * If we can't get the iolock just skip truncating the blocks
-                * past EOF because we could deadlock with the mmap_sem
-                * otherwise.  We'll get another chance to drop them once the
-                * last reference to the inode is dropped, so we'll never leak
-                * blocks permanently.
-                *
-                * Further, check if the inode is being opened, written and
-                * closed frequently and we have delayed allocation blocks
-                * outstanding (e.g. streaming writes from the NFS server),
-                * truncating the blocks past EOF will cause fragmentation to
-                * occur.
-                *
-                * In this case don't do the truncation, either, but we have to
-                * be careful how we detect this case. Blocks beyond EOF show
-                * up as i_delayed_blks even when the inode is clean, so we
-                * need to truncate them away first before checking for a dirty
-                * release. Hence on the first dirty close we will still remove
-                * the speculative allocation, but after that we will leave it
-                * in place.
-                */
-               if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
-                       return 0;
-
-               error = xfs_free_eofblocks(mp, ip, true);
-               if (error && error != EAGAIN)
-                       return error;
-
-               /* delalloc blocks after truncation means it really is dirty */
-               if (ip->i_delayed_blks)
-                       xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
-       }
-       return 0;
-}
-
-/*
- * xfs_inactive
- *
- * This is called when the vnode reference count for the vnode
- * goes to zero.  If the file has been unlinked, then it must
- * now be truncated.  Also, we clear all of the read-ahead state
- * kept for the inode here since the file is now closed.
- */
-int
-xfs_inactive(
-       xfs_inode_t     *ip)
-{
-       xfs_bmap_free_t free_list;
-       xfs_fsblock_t   first_block;
-       int             committed;
-       xfs_trans_t     *tp;
-       xfs_mount_t     *mp;
-       int             error;
-       int             truncate = 0;
-
-       /*
-        * If the inode is already free, then there can be nothing
-        * to clean up here.
-        */
-       if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
-               ASSERT(ip->i_df.if_real_bytes == 0);
-               ASSERT(ip->i_df.if_broot_bytes == 0);
-               return VN_INACTIVE_CACHE;
-       }
-
-       mp = ip->i_mount;
-
-       error = 0;
-
-       /* If this is a read-only mount, don't do this (would generate I/O) */
-       if (mp->m_flags & XFS_MOUNT_RDONLY)
-               goto out;
-
-       if (ip->i_d.di_nlink != 0) {
-               /*
-                * force is true because we are evicting an inode from the
-                * cache. Post-eof blocks must be freed, lest we end up with
-                * broken free space accounting.
-                */
-               if (xfs_can_free_eofblocks(ip, true)) {
-                       error = xfs_free_eofblocks(mp, ip, false);
-                       if (error)
-                               return VN_INACTIVE_CACHE;
-               }
-               goto out;
-       }
-
-       if (S_ISREG(ip->i_d.di_mode) &&
-           (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
-            ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
-               truncate = 1;
-
-       error = xfs_qm_dqattach(ip, 0);
-       if (error)
-               return VN_INACTIVE_CACHE;
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-       error = xfs_trans_reserve(tp, 0,
-                       (truncate || S_ISLNK(ip->i_d.di_mode)) ?
-                               XFS_ITRUNCATE_LOG_RES(mp) :
-                               XFS_IFREE_LOG_RES(mp),
-                       0,
-                       XFS_TRANS_PERM_LOG_RES,
-                       XFS_ITRUNCATE_LOG_COUNT);
-       if (error) {
-               ASSERT(XFS_FORCED_SHUTDOWN(mp));
-               xfs_trans_cancel(tp, 0);
-               return VN_INACTIVE_CACHE;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, 0);
-
-       if (S_ISLNK(ip->i_d.di_mode)) {
-               error = xfs_inactive_symlink(ip, &tp);
-               if (error)
-                       goto out_cancel;
-       } else if (truncate) {
-               ip->i_d.di_size = 0;
-               xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
-               if (error)
-                       goto out_cancel;
-
-               ASSERT(ip->i_d.di_nextents == 0);
-       }
-
-       /*
-        * If there are attributes associated with the file then blow them away
-        * now.  The code calls a routine that recursively deconstructs the
-        * attribute fork.  We need to just commit the current transaction
-        * because we can't use it for xfs_attr_inactive().
-        */
-       if (ip->i_d.di_anextents > 0) {
-               ASSERT(ip->i_d.di_forkoff != 0);
-
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               if (error)
-                       goto out_unlock;
-
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-               error = xfs_attr_inactive(ip);
-               if (error)
-                       goto out;
-
-               tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-               error = xfs_trans_reserve(tp, 0,
-                                         XFS_IFREE_LOG_RES(mp),
-                                         0, XFS_TRANS_PERM_LOG_RES,
-                                         XFS_INACTIVE_LOG_COUNT);
-               if (error) {
-                       xfs_trans_cancel(tp, 0);
-                       goto out;
-               }
-
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               xfs_trans_ijoin(tp, ip, 0);
-       }
-
-       if (ip->i_afp)
-               xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-
-       ASSERT(ip->i_d.di_anextents == 0);
-
-       /*
-        * Free the inode.
-        */
-       xfs_bmap_init(&free_list, &first_block);
-       error = xfs_ifree(tp, ip, &free_list);
-       if (error) {
-               /*
-                * If we fail to free the inode, shut down.  The cancel
-                * might do that, we need to make sure.  Otherwise the
-                * inode might be lost for a long time or forever.
-                */
-               if (!XFS_FORCED_SHUTDOWN(mp)) {
-                       xfs_notice(mp, "%s: xfs_ifree returned error %d",
-                               __func__, error);
-                       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
-               }
-               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       } else {
-               /*
-                * Credit the quota account(s). The inode is gone.
-                */
-               xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
-
-               /*
-                * Just ignore errors at this point.  There is nothing we can
-                * do except to try to keep going. Make sure it's not a silent
-                * error.
-                */
-               error = xfs_bmap_finish(&tp,  &free_list, &committed);
-               if (error)
-                       xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
-                               __func__, error);
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               if (error)
-                       xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
-                               __func__, error);
-       }
-
-       /*
-        * Release the dquots held by inode, if any.
-        */
-       xfs_qm_dqdetach(ip);
-out_unlock:
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out:
-       return VN_INACTIVE_CACHE;
-out_cancel:
-       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-       goto out_unlock;
-}
-
-/*
- * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
- * is allowed, otherwise it has to be an exact match. If a CI match is found,
- * ci_name->name will point to a the actual name (caller must free) or
- * will be set to NULL if an exact match is found.
- */
-int
-xfs_lookup(
-       xfs_inode_t             *dp,
-       struct xfs_name         *name,
-       xfs_inode_t             **ipp,
-       struct xfs_name         *ci_name)
-{
-       xfs_ino_t               inum;
-       int                     error;
-       uint                    lock_mode;
-
-       trace_xfs_lookup(dp, name);
-
-       if (XFS_FORCED_SHUTDOWN(dp->i_mount))
-               return XFS_ERROR(EIO);
-
-       lock_mode = xfs_ilock_map_shared(dp);
-       error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
-       xfs_iunlock_map_shared(dp, lock_mode);
-
-       if (error)
-               goto out;
-
-       error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
-       if (error)
-               goto out_free_name;
-
-       return 0;
-
-out_free_name:
-       if (ci_name)
-               kmem_free(ci_name->name);
-out:
-       *ipp = NULL;
-       return error;
-}
-
-int
-xfs_create(
-       xfs_inode_t             *dp,
-       struct xfs_name         *name,
-       umode_t                 mode,
-       xfs_dev_t               rdev,
-       xfs_inode_t             **ipp)
-{
-       int                     is_dir = S_ISDIR(mode);
-       struct xfs_mount        *mp = dp->i_mount;
-       struct xfs_inode        *ip = NULL;
-       struct xfs_trans        *tp = NULL;
-       int                     error;
-       xfs_bmap_free_t         free_list;
-       xfs_fsblock_t           first_block;
-       bool                    unlock_dp_on_error = false;
-       uint                    cancel_flags;
-       int                     committed;
-       prid_t                  prid;
-       struct xfs_dquot        *udqp = NULL;
-       struct xfs_dquot        *gdqp = NULL;
-       struct xfs_dquot        *pdqp = NULL;
-       uint                    resblks;
-       uint                    log_res;
-       uint                    log_count;
-
-       trace_xfs_create(dp, name);
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
-
-       if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
-               prid = xfs_get_projid(dp);
-       else
-               prid = XFS_PROJID_DEFAULT;
-
-       /*
-        * Make sure that we have allocated dquot(s) on disk.
-        */
-       error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
-                                       XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
-                                       &udqp, &gdqp, &pdqp);
-       if (error)
-               return error;
-
-       if (is_dir) {
-               rdev = 0;
-               resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
-               log_res = XFS_MKDIR_LOG_RES(mp);
-               log_count = XFS_MKDIR_LOG_COUNT;
-               tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
-       } else {
-               resblks = XFS_CREATE_SPACE_RES(mp, name->len);
-               log_res = XFS_CREATE_LOG_RES(mp);
-               log_count = XFS_CREATE_LOG_COUNT;
-               tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
-       }
-
-       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
-
-       /*
-        * Initially assume that the file does not exist and
-        * reserve the resources for that case.  If that is not
-        * the case we'll drop the one we have and get a more
-        * appropriate transaction later.
-        */
-       error = xfs_trans_reserve(tp, resblks, log_res, 0,
-                       XFS_TRANS_PERM_LOG_RES, log_count);
-       if (error == ENOSPC) {
-               /* flush outstanding delalloc blocks and retry */
-               xfs_flush_inodes(mp);
-               error = xfs_trans_reserve(tp, resblks, log_res, 0,
-                               XFS_TRANS_PERM_LOG_RES, log_count);
-       }
-       if (error == ENOSPC) {
-               /* No space at all so try a "no-allocation" reservation */
-               resblks = 0;
-               error = xfs_trans_reserve(tp, 0, log_res, 0,
-                               XFS_TRANS_PERM_LOG_RES, log_count);
-       }
-       if (error) {
-               cancel_flags = 0;
-               goto out_trans_cancel;
-       }
-
-       xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
-       unlock_dp_on_error = true;
-
-       xfs_bmap_init(&free_list, &first_block);
-
-       /*
-        * Reserve disk quota and the inode.
-        */
-       error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
-                                               pdqp, resblks, 1, 0);
-       if (error)
-               goto out_trans_cancel;
-
-       error = xfs_dir_canenter(tp, dp, name, resblks);
-       if (error)
-               goto out_trans_cancel;
-
-       /*
-        * A newly created regular or special file just has one directory
-        * entry pointing to them, but a directory also the "." entry
-        * pointing to itself.
-        */
-       error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
-                              prid, resblks > 0, &ip, &committed);
-       if (error) {
-               if (error == ENOSPC)
-                       goto out_trans_cancel;
-               goto out_trans_abort;
-       }
-
-       /*
-        * Now we join the directory inode to the transaction.  We do not do it
-        * earlier because xfs_dir_ialloc might commit the previous transaction
-        * (and release all the locks).  An error from here on will result in
-        * the transaction cancel unlocking dp so don't do it explicitly in the
-        * error path.
-        */
-       xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
-       unlock_dp_on_error = false;
-
-       error = xfs_dir_createname(tp, dp, name, ip->i_ino,
-                                       &first_block, &free_list, resblks ?
-                                       resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
-       if (error) {
-               ASSERT(error != ENOSPC);
-               goto out_trans_abort;
-       }
-       xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
-
-       if (is_dir) {
-               error = xfs_dir_init(tp, ip, dp);
-               if (error)
-                       goto out_bmap_cancel;
-
-               error = xfs_bumplink(tp, dp);
-               if (error)
-                       goto out_bmap_cancel;
-       }
-
-       /*
-        * If this is a synchronous mount, make sure that the
-        * create transaction goes to disk before returning to
-        * the user.
-        */
-       if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
-               xfs_trans_set_sync(tp);
-
-       /*
-        * Attach the dquot(s) to the inodes and modify them incore.
-        * These ids of the inode couldn't have changed since the new
-        * inode has been locked ever since it was created.
-        */
-       xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
-
-       error = xfs_bmap_finish(&tp, &free_list, &committed);
-       if (error)
-               goto out_bmap_cancel;
-
-       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-       if (error)
-               goto out_release_inode;
-
-       xfs_qm_dqrele(udqp);
-       xfs_qm_dqrele(gdqp);
-       xfs_qm_dqrele(pdqp);
-
-       *ipp = ip;
-       return 0;
-
- out_bmap_cancel:
-       xfs_bmap_cancel(&free_list);
- out_trans_abort:
-       cancel_flags |= XFS_TRANS_ABORT;
- out_trans_cancel:
-       xfs_trans_cancel(tp, cancel_flags);
- out_release_inode:
-       /*
-        * Wait until after the current transaction is aborted to
-        * release the inode.  This prevents recursive transactions
-        * and deadlocks from xfs_inactive.
-        */
-       if (ip)
-               IRELE(ip);
-
-       xfs_qm_dqrele(udqp);
-       xfs_qm_dqrele(gdqp);
-       xfs_qm_dqrele(pdqp);
-
-       if (unlock_dp_on_error)
-               xfs_iunlock(dp, XFS_ILOCK_EXCL);
-       return error;
-}
-
-#ifdef DEBUG
-int xfs_locked_n;
-int xfs_small_retries;
-int xfs_middle_retries;
-int xfs_lots_retries;
-int xfs_lock_delays;
-#endif
-
-/*
- * Bump the subclass so xfs_lock_inodes() acquires each lock with
- * a different value
- */
-static inline int
-xfs_lock_inumorder(int lock_mode, int subclass)
-{
-       if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
-               lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
-       if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
-               lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
-
-       return lock_mode;
-}
-
-/*
- * The following routine will lock n inodes in exclusive mode.
- * We assume the caller calls us with the inodes in i_ino order.
- *
- * We need to detect deadlock where an inode that we lock
- * is in the AIL and we start waiting for another inode that is locked
- * by a thread in a long running transaction (such as truncate). This can
- * result in deadlock since the long running trans might need to wait
- * for the inode we just locked in order to push the tail and free space
- * in the log.
- */
-void
-xfs_lock_inodes(
-       xfs_inode_t     **ips,
-       int             inodes,
-       uint            lock_mode)
-{
-       int             attempts = 0, i, j, try_lock;
-       xfs_log_item_t  *lp;
-
-       ASSERT(ips && (inodes >= 2)); /* we need at least two */
-
-       try_lock = 0;
-       i = 0;
-
-again:
-       for (; i < inodes; i++) {
-               ASSERT(ips[i]);
-
-               if (i && (ips[i] == ips[i-1]))  /* Already locked */
-                       continue;
-
-               /*
-                * If try_lock is not set yet, make sure all locked inodes
-                * are not in the AIL.
-                * If any are, set try_lock to be used later.
-                */
-
-               if (!try_lock) {
-                       for (j = (i - 1); j >= 0 && !try_lock; j--) {
-                               lp = (xfs_log_item_t *)ips[j]->i_itemp;
-                               if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
-                                       try_lock++;
-                               }
-                       }
-               }
-
-               /*
-                * If any of the previous locks we have locked is in the AIL,
-                * we must TRY to get the second and subsequent locks. If
-                * we can't get any, we must release all we have
-                * and try again.
-                */
-
-               if (try_lock) {
-                       /* try_lock must be 0 if i is 0. */
-                       /*
-                        * try_lock means we have an inode locked
-                        * that is in the AIL.
-                        */
-                       ASSERT(i != 0);
-                       if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
-                               attempts++;
-
-                               /*
-                                * Unlock all previous guys and try again.
-                                * xfs_iunlock will try to push the tail
-                                * if the inode is in the AIL.
-                                */
-
-                               for(j = i - 1; j >= 0; j--) {
-
-                                       /*
-                                        * Check to see if we've already
-                                        * unlocked this one.
-                                        * Not the first one going back,
-                                        * and the inode ptr is the same.
-                                        */
-                                       if ((j != (i - 1)) && ips[j] ==
-                                                               ips[j+1])
-                                               continue;
-
-                                       xfs_iunlock(ips[j], lock_mode);
-                               }
-
-                               if ((attempts % 5) == 0) {
-                                       delay(1); /* Don't just spin the CPU */
-#ifdef DEBUG
-                                       xfs_lock_delays++;
-#endif
-                               }
-                               i = 0;
-                               try_lock = 0;
-                               goto again;
-                       }
-               } else {
-                       xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
-               }
-       }
-
-#ifdef DEBUG
-       if (attempts) {
-               if (attempts < 5) xfs_small_retries++;
-               else if (attempts < 100) xfs_middle_retries++;
-               else xfs_lots_retries++;
-       } else {
-               xfs_locked_n++;
-       }
-#endif
-}
-
-/*
- * xfs_lock_two_inodes() can only be used to lock one type of lock
- * at a time - the iolock or the ilock, but not both at once. If
- * we lock both at once, lockdep will report false positives saying
- * we have violated locking orders.
- */
-void
-xfs_lock_two_inodes(
-       xfs_inode_t             *ip0,
-       xfs_inode_t             *ip1,
-       uint                    lock_mode)
-{
-       xfs_inode_t             *temp;
-       int                     attempts = 0;
-       xfs_log_item_t          *lp;
-
-       if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
-               ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
-       ASSERT(ip0->i_ino != ip1->i_ino);
-
-       if (ip0->i_ino > ip1->i_ino) {
-               temp = ip0;
-               ip0 = ip1;
-               ip1 = temp;
-       }
-
- again:
-       xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
-
-       /*
-        * If the first lock we have locked is in the AIL, we must TRY to get
-        * the second lock. If we can't get it, we must release the first one
-        * and try again.
-        */
-       lp = (xfs_log_item_t *)ip0->i_itemp;
-       if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
-               if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
-                       xfs_iunlock(ip0, lock_mode);
-                       if ((++attempts % 5) == 0)
-                               delay(1); /* Don't just spin the CPU */
-                       goto again;
-               }
-       } else {
-               xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
-       }
-}
-
-int
-xfs_remove(
-       xfs_inode_t             *dp,
-       struct xfs_name         *name,
-       xfs_inode_t             *ip)
-{
-       xfs_mount_t             *mp = dp->i_mount;
-       xfs_trans_t             *tp = NULL;
-       int                     is_dir = S_ISDIR(ip->i_d.di_mode);
-       int                     error = 0;
-       xfs_bmap_free_t         free_list;
-       xfs_fsblock_t           first_block;
-       int                     cancel_flags;
-       int                     committed;
-       int                     link_zero;
-       uint                    resblks;
-       uint                    log_count;
-
-       trace_xfs_remove(dp, name);
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
-
-       error = xfs_qm_dqattach(dp, 0);
-       if (error)
-               goto std_return;
-
-       error = xfs_qm_dqattach(ip, 0);
-       if (error)
-               goto std_return;
-
-       if (is_dir) {
-               tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
-               log_count = XFS_DEFAULT_LOG_COUNT;
-       } else {
-               tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
-               log_count = XFS_REMOVE_LOG_COUNT;
-       }
-       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
-
-       /*
-        * We try to get the real space reservation first,
-        * allowing for directory btree deletion(s) implying
-        * possible bmap insert(s).  If we can't get the space
-        * reservation then we use 0 instead, and avoid the bmap
-        * btree insert(s) in the directory code by, if the bmap
-        * insert tries to happen, instead trimming the LAST
-        * block from the directory.
-        */
-       resblks = XFS_REMOVE_SPACE_RES(mp);
-       error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,
-                                 XFS_TRANS_PERM_LOG_RES, log_count);
-       if (error == ENOSPC) {
-               resblks = 0;
-               error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,
-                                         XFS_TRANS_PERM_LOG_RES, log_count);
-       }
-       if (error) {
-               ASSERT(error != ENOSPC);
-               cancel_flags = 0;
-               goto out_trans_cancel;
-       }
-
-       xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
-
-       xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-       /*
-        * If we're removing a directory perform some additional validation.
-        */
-       if (is_dir) {
-               ASSERT(ip->i_d.di_nlink >= 2);
-               if (ip->i_d.di_nlink != 2) {
-                       error = XFS_ERROR(ENOTEMPTY);
-                       goto out_trans_cancel;
-               }
-               if (!xfs_dir_isempty(ip)) {
-                       error = XFS_ERROR(ENOTEMPTY);
-                       goto out_trans_cancel;
-               }
-       }
-
-       xfs_bmap_init(&free_list, &first_block);
-       error = xfs_dir_removename(tp, dp, name, ip->i_ino,
-                                       &first_block, &free_list, resblks);
-       if (error) {
-               ASSERT(error != ENOENT);
-               goto out_bmap_cancel;
-       }
-       xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-
-       if (is_dir) {
-               /*
-                * Drop the link from ip's "..".
-                */
-               error = xfs_droplink(tp, dp);
-               if (error)
-                       goto out_bmap_cancel;
-
-               /*
-                * Drop the "." link from ip to self.
-                */
-               error = xfs_droplink(tp, ip);
-               if (error)
-                       goto out_bmap_cancel;
-       } else {
-               /*
-                * When removing a non-directory we need to log the parent
-                * inode here.  For a directory this is done implicitly
-                * by the xfs_droplink call for the ".." entry.
-                */
-               xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
-       }
-
-       /*
-        * Drop the link from dp to ip.
-        */
-       error = xfs_droplink(tp, ip);
-       if (error)
-               goto out_bmap_cancel;
-
-       /*
-        * Determine if this is the last link while
-        * we are in the transaction.
-        */
-       link_zero = (ip->i_d.di_nlink == 0);
-
-       /*
-        * If this is a synchronous mount, make sure that the
-        * remove transaction goes to disk before returning to
-        * the user.
-        */
-       if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
-               xfs_trans_set_sync(tp);
-
-       error = xfs_bmap_finish(&tp, &free_list, &committed);
-       if (error)
-               goto out_bmap_cancel;
-
-       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-       if (error)
-               goto std_return;
-
-       /*
-        * If we are using filestreams, kill the stream association.
-        * If the file is still open it may get a new one but that
-        * will get killed on last close in xfs_close() so we don't
-        * have to worry about that.
-        */
-       if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
-               xfs_filestream_deassociate(ip);
-
-       return 0;
-
- out_bmap_cancel:
-       xfs_bmap_cancel(&free_list);
-       cancel_flags |= XFS_TRANS_ABORT;
- out_trans_cancel:
-       xfs_trans_cancel(tp, cancel_flags);
- std_return:
-       return error;
-}
-
-int
-xfs_link(
-       xfs_inode_t             *tdp,
-       xfs_inode_t             *sip,
-       struct xfs_name         *target_name)
-{
-       xfs_mount_t             *mp = tdp->i_mount;
-       xfs_trans_t             *tp;
-       int                     error;
-       xfs_bmap_free_t         free_list;
-       xfs_fsblock_t           first_block;
-       int                     cancel_flags;
-       int                     committed;
-       int                     resblks;
-
-       trace_xfs_link(tdp, target_name);
-
-       ASSERT(!S_ISDIR(sip->i_d.di_mode));
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
-
-       error = xfs_qm_dqattach(sip, 0);
-       if (error)
-               goto std_return;
-
-       error = xfs_qm_dqattach(tdp, 0);
-       if (error)
-               goto std_return;
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
-       cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
-       resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
-       error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0,
-                       XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
-       if (error == ENOSPC) {
-               resblks = 0;
-               error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0,
-                               XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
-       }
-       if (error) {
-               cancel_flags = 0;
-               goto error_return;
-       }
-
-       xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
-
-       xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
-
-       /*
-        * If we are using project inheritance, we only allow hard link
-        * creation in our tree when the project IDs are the same; else
-        * the tree quota mechanism could be circumvented.
-        */
-       if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
-                    (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
-               error = XFS_ERROR(EXDEV);
-               goto error_return;
-       }
-
-       error = xfs_dir_canenter(tp, tdp, target_name, resblks);
-       if (error)
-               goto error_return;
-
-       xfs_bmap_init(&free_list, &first_block);
-
-       error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
-                                       &first_block, &free_list, resblks);
-       if (error)
-               goto abort_return;
-       xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
-
-       error = xfs_bumplink(tp, sip);
-       if (error)
-               goto abort_return;
-
-       /*
-        * If this is a synchronous mount, make sure that the
-        * link transaction goes to disk before returning to
-        * the user.
-        */
-       if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
-               xfs_trans_set_sync(tp);
-       }
-
-       error = xfs_bmap_finish (&tp, &free_list, &committed);
-       if (error) {
-               xfs_bmap_cancel(&free_list);
-               goto abort_return;
-       }
-
-       return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-
- abort_return:
-       cancel_flags |= XFS_TRANS_ABORT;
- error_return:
-       xfs_trans_cancel(tp, cancel_flags);
- std_return:
-       return error;
-}
-
-int
-xfs_set_dmattrs(
-       xfs_inode_t     *ip,
-       u_int           evmask,
-       u_int16_t       state)
-{
-       xfs_mount_t     *mp = ip->i_mount;
-       xfs_trans_t     *tp;
-       int             error;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return XFS_ERROR(EPERM);
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS);
-       error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-       ip->i_d.di_dmevmask = evmask;
-       ip->i_d.di_dmstate  = state;
-
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       error = xfs_trans_commit(tp, 0);
-
-       return error;
-}
-
-/*
- * xfs_alloc_file_space()
- *      This routine allocates disk space for the given file.
- *
- *     If alloc_type == 0, this request is for an ALLOCSP type
- *     request which will change the file size.  In this case, no
- *     DMAPI event will be generated by the call.  A TRUNCATE event
- *     will be generated later by xfs_setattr.
- *
- *     If alloc_type != 0, this request is for a RESVSP type
- *     request, and a DMAPI DM_EVENT_WRITE will be generated if the
- *     lower block boundary byte address is less than the file's
- *     length.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-STATIC int
-xfs_alloc_file_space(
-       xfs_inode_t             *ip,
-       xfs_off_t               offset,
-       xfs_off_t               len,
-       int                     alloc_type,
-       int                     attr_flags)
-{
-       xfs_mount_t             *mp = ip->i_mount;
-       xfs_off_t               count;
-       xfs_filblks_t           allocated_fsb;
-       xfs_filblks_t           allocatesize_fsb;
-       xfs_extlen_t            extsz, temp;
-       xfs_fileoff_t           startoffset_fsb;
-       xfs_fsblock_t           firstfsb;
-       int                     nimaps;
-       int                     quota_flag;
-       int                     rt;
-       xfs_trans_t             *tp;
-       xfs_bmbt_irec_t         imaps[1], *imapp;
-       xfs_bmap_free_t         free_list;
-       uint                    qblocks, resblks, resrtextents;
-       int                     committed;
-       int                     error;
-
-       trace_xfs_alloc_file_space(ip);
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
-
-       error = xfs_qm_dqattach(ip, 0);
-       if (error)
-               return error;
-
-       if (len <= 0)
-               return XFS_ERROR(EINVAL);
-
-       rt = XFS_IS_REALTIME_INODE(ip);
-       extsz = xfs_get_extsz_hint(ip);
-
-       count = len;
-       imapp = &imaps[0];
-       nimaps = 1;
-       startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
-       allocatesize_fsb = XFS_B_TO_FSB(mp, count);
-
-       /*
-        * Allocate file space until done or until there is an error
-        */
-       while (allocatesize_fsb && !error) {
-               xfs_fileoff_t   s, e;
-
-               /*
-                * Determine space reservations for data/realtime.
-                */
-               if (unlikely(extsz)) {
-                       s = startoffset_fsb;
-                       do_div(s, extsz);
-                       s *= extsz;
-                       e = startoffset_fsb + allocatesize_fsb;
-                       if ((temp = do_mod(startoffset_fsb, extsz)))
-                               e += temp;
-                       if ((temp = do_mod(e, extsz)))
-                               e += extsz - temp;
-               } else {
-                       s = 0;
-                       e = allocatesize_fsb;
-               }
-
-               /*
-                * The transaction reservation is limited to a 32-bit block
-                * count, hence we need to limit the number of blocks we are
-                * trying to reserve to avoid an overflow. We can't allocate
-                * more than @nimaps extents, and an extent is limited on disk
-                * to MAXEXTLEN (21 bits), so use that to enforce the limit.
-                */
-               resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
-               if (unlikely(rt)) {
-                       resrtextents = qblocks = resblks;
-                       resrtextents /= mp->m_sb.sb_rextsize;
-                       resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
-                       quota_flag = XFS_QMOPT_RES_RTBLKS;
-               } else {
-                       resrtextents = 0;
-                       resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
-                       quota_flag = XFS_QMOPT_RES_REGBLKS;
-               }
-
-               /*
-                * Allocate and setup the transaction.
-                */
-               tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-               error = xfs_trans_reserve(tp, resblks,
-                                         XFS_WRITE_LOG_RES(mp), resrtextents,
-                                         XFS_TRANS_PERM_LOG_RES,
-                                         XFS_WRITE_LOG_COUNT);
-               /*
-                * Check for running out of space
-                */
-               if (error) {
-                       /*
-                        * Free the transaction structure.
-                        */
-                       ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
-                       xfs_trans_cancel(tp, 0);
-                       break;
-               }
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
-                                                     0, quota_flag);
-               if (error)
-                       goto error1;
-
-               xfs_trans_ijoin(tp, ip, 0);
-
-               xfs_bmap_init(&free_list, &firstfsb);
-               error = xfs_bmapi_write(tp, ip, startoffset_fsb,
-                                       allocatesize_fsb, alloc_type, &firstfsb,
-                                       0, imapp, &nimaps, &free_list);
-               if (error) {
-                       goto error0;
-               }
-
-               /*
-                * Complete the transaction
-                */
-               error = xfs_bmap_finish(&tp, &free_list, &committed);
-               if (error) {
-                       goto error0;
-               }
-
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-               if (error) {
-                       break;
-               }
-
-               allocated_fsb = imapp->br_blockcount;
-
-               if (nimaps == 0) {
-                       error = XFS_ERROR(ENOSPC);
-                       break;
-               }
-
-               startoffset_fsb += allocated_fsb;
-               allocatesize_fsb -= allocated_fsb;
-       }
-
-       return error;
-
-error0:        /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
-       xfs_bmap_cancel(&free_list);
-       xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
-
-error1:        /* Just cancel transaction */
-       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-       return error;
-}
-
-/*
- * Zero file bytes between startoff and endoff inclusive.
- * The iolock is held exclusive and no blocks are buffered.
- *
- * This function is used by xfs_free_file_space() to zero
- * partial blocks when the range to free is not block aligned.
- * When unreserving space with boundaries that are not block
- * aligned we round up the start and round down the end
- * boundaries and then use this function to zero the parts of
- * the blocks that got dropped during the rounding.
- */
-STATIC int
-xfs_zero_remaining_bytes(
-       xfs_inode_t             *ip,
-       xfs_off_t               startoff,
-       xfs_off_t               endoff)
-{
-       xfs_bmbt_irec_t         imap;
-       xfs_fileoff_t           offset_fsb;
-       xfs_off_t               lastoffset;
-       xfs_off_t               offset;
-       xfs_buf_t               *bp;
-       xfs_mount_t             *mp = ip->i_mount;
-       int                     nimap;
-       int                     error = 0;
-
-       /*
-        * Avoid doing I/O beyond eof - it's not necessary
-        * since nothing can read beyond eof.  The space will
-        * be zeroed when the file is extended anyway.
-        */
-       if (startoff >= XFS_ISIZE(ip))
-               return 0;
-
-       if (endoff > XFS_ISIZE(ip))
-               endoff = XFS_ISIZE(ip);
-
-       bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
-                                       mp->m_rtdev_targp : mp->m_ddev_targp,
-                                 BTOBB(mp->m_sb.sb_blocksize), 0);
-       if (!bp)
-               return XFS_ERROR(ENOMEM);
-
-       xfs_buf_unlock(bp);
-
-       for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
-               offset_fsb = XFS_B_TO_FSBT(mp, offset);
-               nimap = 1;
-               error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
-               if (error || nimap < 1)
-                       break;
-               ASSERT(imap.br_blockcount >= 1);
-               ASSERT(imap.br_startoff == offset_fsb);
-               lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
-               if (lastoffset > endoff)
-                       lastoffset = endoff;
-               if (imap.br_startblock == HOLESTARTBLOCK)
-                       continue;
-               ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
-               if (imap.br_state == XFS_EXT_UNWRITTEN)
-                       continue;
-               XFS_BUF_UNDONE(bp);
-               XFS_BUF_UNWRITE(bp);
-               XFS_BUF_READ(bp);
-               XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
-               xfsbdstrat(mp, bp);
-               error = xfs_buf_iowait(bp);
-               if (error) {
-                       xfs_buf_ioerror_alert(bp,
-                                       "xfs_zero_remaining_bytes(read)");
-                       break;
-               }
-               memset(bp->b_addr +
-                       (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
-                     0, lastoffset - offset + 1);
-               XFS_BUF_UNDONE(bp);
-               XFS_BUF_UNREAD(bp);
-               XFS_BUF_WRITE(bp);
-               xfsbdstrat(mp, bp);
-               error = xfs_buf_iowait(bp);
-               if (error) {
-                       xfs_buf_ioerror_alert(bp,
-                                       "xfs_zero_remaining_bytes(write)");
-                       break;
-               }
-       }
-       xfs_buf_free(bp);
-       return error;
-}
-
-/*
- * xfs_free_file_space()
- *      This routine frees disk space for the given file.
- *
- *     This routine is only called by xfs_change_file_space
- *     for an UNRESVSP type call.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-STATIC int
-xfs_free_file_space(
-       xfs_inode_t             *ip,
-       xfs_off_t               offset,
-       xfs_off_t               len,
-       int                     attr_flags)
-{
-       int                     committed;
-       int                     done;
-       xfs_fileoff_t           endoffset_fsb;
-       int                     error;
-       xfs_fsblock_t           firstfsb;
-       xfs_bmap_free_t         free_list;
-       xfs_bmbt_irec_t         imap;
-       xfs_off_t               ioffset;
-       xfs_extlen_t            mod=0;
-       xfs_mount_t             *mp;
-       int                     nimap;
-       uint                    resblks;
-       xfs_off_t               rounding;
-       int                     rt;
-       xfs_fileoff_t           startoffset_fsb;
-       xfs_trans_t             *tp;
-       int                     need_iolock = 1;
-
-       mp = ip->i_mount;
-
-       trace_xfs_free_file_space(ip);
-
-       error = xfs_qm_dqattach(ip, 0);
-       if (error)
-               return error;
-
-       error = 0;
-       if (len <= 0)   /* if nothing being freed */
-               return error;
-       rt = XFS_IS_REALTIME_INODE(ip);
-       startoffset_fsb = XFS_B_TO_FSB(mp, offset);
-       endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
-
-       if (attr_flags & XFS_ATTR_NOLOCK)
-               need_iolock = 0;
-       if (need_iolock) {
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               /* wait for the completion of any pending DIOs */
-               inode_dio_wait(VFS_I(ip));
-       }
-
-       rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
-       ioffset = offset & ~(rounding - 1);
-       error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-                                             ioffset, -1);
-       if (error)
-               goto out_unlock_iolock;
-       truncate_pagecache_range(VFS_I(ip), ioffset, -1);
-
-       /*
-        * Need to zero the stuff we're not freeing, on disk.
-        * If it's a realtime file & can't use unwritten extents then we
-        * actually need to zero the extent edges.  Otherwise xfs_bunmapi
-        * will take care of it for us.
-        */
-       if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
-               nimap = 1;
-               error = xfs_bmapi_read(ip, startoffset_fsb, 1,
-                                       &imap, &nimap, 0);
-               if (error)
-                       goto out_unlock_iolock;
-               ASSERT(nimap == 0 || nimap == 1);
-               if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
-                       xfs_daddr_t     block;
-
-                       ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
-                       block = imap.br_startblock;
-                       mod = do_div(block, mp->m_sb.sb_rextsize);
-                       if (mod)
-                               startoffset_fsb += mp->m_sb.sb_rextsize - mod;
-               }
-               nimap = 1;
-               error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
-                                       &imap, &nimap, 0);
-               if (error)
-                       goto out_unlock_iolock;
-               ASSERT(nimap == 0 || nimap == 1);
-               if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
-                       ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
-                       mod++;
-                       if (mod && (mod != mp->m_sb.sb_rextsize))
-                               endoffset_fsb -= mod;
-               }
-       }
-       if ((done = (endoffset_fsb <= startoffset_fsb)))
-               /*
-                * One contiguous piece to clear
-                */
-               error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
-       else {
-               /*
-                * Some full blocks, possibly two pieces to clear
-                */
-               if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
-                       error = xfs_zero_remaining_bytes(ip, offset,
-                               XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
-               if (!error &&
-                   XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
-                       error = xfs_zero_remaining_bytes(ip,
-                               XFS_FSB_TO_B(mp, endoffset_fsb),
-                               offset + len - 1);
-       }
-
-       /*
-        * free file space until done or until there is an error
-        */
-       resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
-       while (!error && !done) {
-
-               /*
-                * allocate and setup the transaction. Allow this
-                * transaction to dip into the reserve blocks to ensure
-                * the freeing of the space succeeds at ENOSPC.
-                */
-               tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-               tp->t_flags |= XFS_TRANS_RESERVE;
-               error = xfs_trans_reserve(tp,
-                                         resblks,
-                                         XFS_WRITE_LOG_RES(mp),
-                                         0,
-                                         XFS_TRANS_PERM_LOG_RES,
-                                         XFS_WRITE_LOG_COUNT);
-
-               /*
-                * check for running out of space
-                */
-               if (error) {
-                       /*
-                        * Free the transaction structure.
-                        */
-                       ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
-                       xfs_trans_cancel(tp, 0);
-                       break;
-               }
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               error = xfs_trans_reserve_quota(tp, mp,
-                               ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
-                               resblks, 0, XFS_QMOPT_RES_REGBLKS);
-               if (error)
-                       goto error1;
-
-               xfs_trans_ijoin(tp, ip, 0);
-
-               /*
-                * issue the bunmapi() call to free the blocks
-                */
-               xfs_bmap_init(&free_list, &firstfsb);
-               error = xfs_bunmapi(tp, ip, startoffset_fsb,
-                                 endoffset_fsb - startoffset_fsb,
-                                 0, 2, &firstfsb, &free_list, &done);
-               if (error) {
-                       goto error0;
-               }
-
-               /*
-                * complete the transaction
-                */
-               error = xfs_bmap_finish(&tp, &free_list, &committed);
-               if (error) {
-                       goto error0;
-               }
-
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-       }
-
- out_unlock_iolock:
-       if (need_iolock)
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-       return error;
-
- error0:
-       xfs_bmap_cancel(&free_list);
- error1:
-       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-       xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
-                   XFS_ILOCK_EXCL);
-       return error;
-}
-
-
-STATIC int
-xfs_zero_file_space(
-       struct xfs_inode        *ip,
-       xfs_off_t               offset,
-       xfs_off_t               len,
-       int                     attr_flags)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       uint                    granularity;
-       xfs_off_t               start_boundary;
-       xfs_off_t               end_boundary;
-       int                     error;
-
-       granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
-
-       /*
-        * Round the range of extents we are going to convert inwards.  If the
-        * offset is aligned, then it doesn't get changed so we zero from the
-        * start of the block offset points to.
-        */
-       start_boundary = round_up(offset, granularity);
-       end_boundary = round_down(offset + len, granularity);
-
-       ASSERT(start_boundary >= offset);
-       ASSERT(end_boundary <= offset + len);
-
-       if (!(attr_flags & XFS_ATTR_NOLOCK))
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
-       if (start_boundary < end_boundary - 1) {
-               /* punch out the page cache over the conversion range */
-               truncate_pagecache_range(VFS_I(ip), start_boundary,
-                                        end_boundary - 1);
-               /* convert the blocks */
-               error = xfs_alloc_file_space(ip, start_boundary,
-                                       end_boundary - start_boundary - 1,
-                                       XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT,
-                                       attr_flags);
-               if (error)
-                       goto out_unlock;
-
-               /* We've handled the interior of the range, now for the edges */
-               if (start_boundary != offset)
-                       error = xfs_iozero(ip, offset, start_boundary - offset);
-               if (error)
-                       goto out_unlock;
-
-               if (end_boundary != offset + len)
-                       error = xfs_iozero(ip, end_boundary,
-                                          offset + len - end_boundary);
-
-       } else {
-               /*
-                * It's either a sub-granularity range or the range spanned lies
-                * partially across two adjacent blocks.
-                */
-               error = xfs_iozero(ip, offset, len);
-       }
-
-out_unlock:
-       if (!(attr_flags & XFS_ATTR_NOLOCK))
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-       return error;
-
-}
-
-/*
- * xfs_change_file_space()
- *      This routine allocates or frees disk space for the given file.
- *      The user specified parameters are checked for alignment and size
- *      limitations.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-int
-xfs_change_file_space(
-       xfs_inode_t     *ip,
-       int             cmd,
-       xfs_flock64_t   *bf,
-       xfs_off_t       offset,
-       int             attr_flags)
-{
-       xfs_mount_t     *mp = ip->i_mount;
-       int             clrprealloc;
-       int             error;
-       xfs_fsize_t     fsize;
-       int             setprealloc;
-       xfs_off_t       startoffset;
-       xfs_trans_t     *tp;
-       struct iattr    iattr;
-
-       if (!S_ISREG(ip->i_d.di_mode))
-               return XFS_ERROR(EINVAL);
-
-       switch (bf->l_whence) {
-       case 0: /*SEEK_SET*/
-               break;
-       case 1: /*SEEK_CUR*/
-               bf->l_start += offset;
-               break;
-       case 2: /*SEEK_END*/
-               bf->l_start += XFS_ISIZE(ip);
-               break;
-       default:
-               return XFS_ERROR(EINVAL);
-       }
-
-       /*
-        * length of <= 0 for resv/unresv/zero is invalid.  length for
-        * alloc/free is ignored completely and we have no idea what userspace
-        * might have set it to, so set it to zero to allow range
-        * checks to pass.
-        */
-       switch (cmd) {
-       case XFS_IOC_ZERO_RANGE:
-       case XFS_IOC_RESVSP:
-       case XFS_IOC_RESVSP64:
-       case XFS_IOC_UNRESVSP:
-       case XFS_IOC_UNRESVSP64:
-               if (bf->l_len <= 0)
-                       return XFS_ERROR(EINVAL);
-               break;
-       default:
-               bf->l_len = 0;
-               break;
-       }
-
-       if (bf->l_start < 0 ||
-           bf->l_start > mp->m_super->s_maxbytes ||
-           bf->l_start + bf->l_len < 0 ||
-           bf->l_start + bf->l_len >= mp->m_super->s_maxbytes)
-               return XFS_ERROR(EINVAL);
-
-       bf->l_whence = 0;
-
-       startoffset = bf->l_start;
-       fsize = XFS_ISIZE(ip);
-
-       setprealloc = clrprealloc = 0;
-       switch (cmd) {
-       case XFS_IOC_ZERO_RANGE:
-               error = xfs_zero_file_space(ip, startoffset, bf->l_len,
-                                               attr_flags);
-               if (error)
-                       return error;
-               setprealloc = 1;
-               break;
-
-       case XFS_IOC_RESVSP:
-       case XFS_IOC_RESVSP64:
-               error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
-                                               XFS_BMAPI_PREALLOC, attr_flags);
-               if (error)
-                       return error;
-               setprealloc = 1;
-               break;
-
-       case XFS_IOC_UNRESVSP:
-       case XFS_IOC_UNRESVSP64:
-               if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
-                                                               attr_flags)))
-                       return error;
-               break;
-
-       case XFS_IOC_ALLOCSP:
-       case XFS_IOC_ALLOCSP64:
-       case XFS_IOC_FREESP:
-       case XFS_IOC_FREESP64:
-               /*
-                * These operations actually do IO when extending the file, but
-                * the allocation is done seperately to the zeroing that is
-                * done. This set of operations need to be serialised against
-                * other IO operations, such as truncate and buffered IO. We
-                * need to take the IOLOCK here to serialise the allocation and
-                * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
-                * truncate, direct IO) from racing against the transient
-                * allocated but not written state we can have here.
-                */
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               if (startoffset > fsize) {
-                       error = xfs_alloc_file_space(ip, fsize,
-                                       startoffset - fsize, 0,
-                                       attr_flags | XFS_ATTR_NOLOCK);
-                       if (error) {
-                               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-                               break;
-                       }
-               }
-
-               iattr.ia_valid = ATTR_SIZE;
-               iattr.ia_size = startoffset;
-
-               error = xfs_setattr_size(ip, &iattr,
-                                        attr_flags | XFS_ATTR_NOLOCK);
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-
-               if (error)
-                       return error;
-
-               clrprealloc = 1;
-               break;
-
-       default:
-               ASSERT(0);
-               return XFS_ERROR(EINVAL);
-       }
-
-       /*
-        * update the inode timestamp, mode, and prealloc flag bits
-        */
-       tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
-
-       if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp),
-                                     0, 0, 0))) {
-               /* ASSERT(0); */
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-       if ((attr_flags & XFS_ATTR_DMI) == 0) {
-               ip->i_d.di_mode &= ~S_ISUID;
-
-               /*
-                * Note that we don't have to worry about mandatory
-                * file locking being disabled here because we only
-                * clear the S_ISGID bit if the Group execute bit is
-                * on, but if it was on then mandatory locking wouldn't
-                * have been enabled.
-                */
-               if (ip->i_d.di_mode & S_IXGRP)
-                       ip->i_d.di_mode &= ~S_ISGID;
-
-               xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       }
-       if (setprealloc)
-               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
-       else if (clrprealloc)
-               ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
-
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       if (attr_flags & XFS_ATTR_SYNC)
-               xfs_trans_set_sync(tp);
-       return xfs_trans_commit(tp, 0);
-}
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
deleted file mode 100644 (file)
index 38c67c3..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _XFS_VNODEOPS_H
-#define _XFS_VNODEOPS_H 1
-
-struct attrlist_cursor_kern;
-struct file;
-struct iattr;
-struct inode;
-struct iovec;
-struct kiocb;
-struct pipe_inode_info;
-struct uio;
-struct xfs_inode;
-
-
-int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, int flags);
-int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags);
-#define        XFS_ATTR_DMI            0x01    /* invocation from a DMI function */
-#define        XFS_ATTR_NONBLOCK       0x02    /* return EAGAIN if operation would block */
-#define XFS_ATTR_NOLOCK                0x04    /* Don't grab any conflicting locks */
-#define XFS_ATTR_NOACL         0x08    /* Don't call xfs_acl_chmod */
-#define XFS_ATTR_SYNC          0x10    /* synchronous operation required */
-
-int xfs_readlink(struct xfs_inode *ip, char *link);
-int xfs_release(struct xfs_inode *ip);
-int xfs_inactive(struct xfs_inode *ip);
-int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
-               struct xfs_inode **ipp, struct xfs_name *ci_name);
-int xfs_create(struct xfs_inode *dp, struct xfs_name *name, umode_t mode,
-               xfs_dev_t rdev, struct xfs_inode **ipp);
-int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
-               struct xfs_inode *ip);
-int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
-               struct xfs_name *target_name);
-int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, size_t bufsize);
-int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
-               const char *target_path, umode_t mode, struct xfs_inode **ipp);
-int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
-int xfs_change_file_space(struct xfs_inode *ip, int cmd,
-               xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);
-int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
-               struct xfs_inode *src_ip, struct xfs_inode *target_dp,
-               struct xfs_name *target_name, struct xfs_inode *target_ip);
-int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
-               unsigned char *value, int *valuelenp, int flags);
-int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
-               unsigned char *value, int valuelen, int flags);
-int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
-int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
-               int flags, struct attrlist_cursor_kern *cursor);
-
-int xfs_iozero(struct xfs_inode *, loff_t, size_t);
-int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
-int xfs_free_eofblocks(struct xfs_mount *, struct xfs_inode *, bool);
-
-#endif /* _XFS_VNODEOPS_H */
index 87d3e03878c8da30b762d19263ec3e1cbe02aa24..e01f35ea76ba436310f11d82b9fdf78322d8f6fe 100644 (file)
  */
 
 #include "xfs.h"
+#include "xfs_log_format.h"
 #include "xfs_da_btree.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_acl.h"
-#include "xfs_vnodeops.h"
 
 #include <linux/posix_acl_xattr.h>
 #include <linux/xattr.h>
index 94383a70c1a3d738413e06389cc6937909554823..22650cd9e72aba8c926704bad6af98d542adafaa 100644 (file)
@@ -56,6 +56,16 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
 
 acpi_status
 acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
+
+bool acpi_has_method(acpi_handle handle, char *name);
+acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
+                                      u64 arg);
+acpi_status acpi_evaluate_ej0(acpi_handle handle);
+acpi_status acpi_evaluate_lck(acpi_handle handle, int lock);
+bool acpi_ata_match(acpi_handle handle);
+bool acpi_bay_match(acpi_handle handle);
+bool acpi_dock_match(acpi_handle handle);
+
 #ifdef CONFIG_ACPI
 
 #include <linux/proc_fs.h>
@@ -352,14 +362,11 @@ extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
 extern int register_acpi_notifier(struct notifier_block *);
 extern int unregister_acpi_notifier(struct notifier_block *);
 
-extern int register_acpi_bus_notifier(struct notifier_block *nb);
-extern void unregister_acpi_bus_notifier(struct notifier_block *nb);
 /*
  * External Functions
  */
 
 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
-void acpi_bus_data_handler(acpi_handle handle, void *context);
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
@@ -379,15 +386,6 @@ bool acpi_bus_can_wakeup(acpi_handle handle);
 static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
 #endif
 
-#ifdef CONFIG_ACPI_PROC_EVENT
-int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
-int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
-int acpi_bus_receive_event(struct acpi_bus_event *event);
-#else
-static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
-       { return 0; }
-#endif
-
 void acpi_scan_lock_acquire(void);
 void acpi_scan_lock_release(void);
 int acpi_scan_add_handler(struct acpi_scan_handler *handler);
@@ -478,7 +476,8 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
        if (p)
                *p = ACPI_STATE_D0;
 
-       return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0;
+       return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
+               m : ACPI_STATE_D0;
 }
 static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
                                             struct device *depdev) {}
index b420939f5eb5608f2335e0b4ae4087176911022b..1cedfcb1bd8878ff7466e4ab6d9285e3a811a90a 100644 (file)
@@ -113,14 +113,13 @@ void pci_acpi_crs_quirks(void);
                                   Dock Station
   -------------------------------------------------------------------------- */
 struct acpi_dock_ops {
+       acpi_notify_handler fixup;
        acpi_notify_handler handler;
        acpi_notify_handler uevent;
 };
 
-#if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
+#ifdef CONFIG_ACPI_DOCK
 extern int is_dock_device(acpi_handle handle);
-extern int register_dock_notifier(struct notifier_block *nb);
-extern void unregister_dock_notifier(struct notifier_block *nb);
 extern int register_hotplug_dock_device(acpi_handle handle,
                                        const struct acpi_dock_ops *ops,
                                        void *context,
@@ -132,13 +131,6 @@ static inline int is_dock_device(acpi_handle handle)
 {
        return 0;
 }
-static inline int register_dock_notifier(struct notifier_block *nb)
-{
-       return -ENODEV;
-}
-static inline void unregister_dock_notifier(struct notifier_block *nb)
-{
-}
 static inline int register_hotplug_dock_device(acpi_handle handle,
                                               const struct acpi_dock_ops *ops,
                                               void *context,
@@ -150,6 +142,6 @@ static inline int register_hotplug_dock_device(acpi_handle handle,
 static inline void unregister_hotplug_dock_device(acpi_handle handle)
 {
 }
-#endif
+#endif /* CONFIG_ACPI_DOCK */
 
 #endif /*__ACPI_DRIVERS_H__*/
index 22d497ee6ef95032977a5b4d533f5343d29c4fa5..85bfdbe178052bd2231dce137b1463d174b2bffe 100644 (file)
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20130517
+#define ACPI_CA_VERSION                 0x20130725
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -147,6 +147,8 @@ acpi_status acpi_install_interface(acpi_string interface_name);
 
 acpi_status acpi_remove_interface(acpi_string interface_name);
 
+acpi_status acpi_update_interfaces(u8 action);
+
 u32
 acpi_check_address_range(acpi_adr_space_type space_id,
                         acpi_physical_address address,
@@ -210,8 +212,8 @@ acpi_status
 acpi_walk_namespace(acpi_object_type type,
                    acpi_handle start_object,
                    u32 max_depth,
-                   acpi_walk_callback pre_order_visit,
-                   acpi_walk_callback post_order_visit,
+                   acpi_walk_callback descending_callback,
+                   acpi_walk_callback ascending_callback,
                    void *context, void **return_value);
 
 acpi_status
index 22b03c9286e94d0d4819d5e6610f96a850b4f713..b748aefce929983cf94d3729db782e62c2843768 100644 (file)
@@ -668,13 +668,6 @@ typedef u32 acpi_event_status;
 #define ACPI_EVENT_FLAG_SET             (acpi_event_status) 0x04
 #define ACPI_EVENT_FLAG_HANDLE         (acpi_event_status) 0x08
 
-/*
- * General Purpose Events (GPE)
- */
-#define ACPI_GPE_INVALID                0xFF
-#define ACPI_GPE_MAX                    0xFF
-#define ACPI_NUM_GPE                    256
-
 /* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
 
 #define ACPI_GPE_ENABLE                 0
@@ -1144,7 +1137,19 @@ struct acpi_memory_list {
 #endif
 };
 
-/* Definitions for _OSI support */
+/* Definitions of _OSI support */
+
+#define ACPI_VENDOR_STRINGS                 0x01
+#define ACPI_FEATURE_STRINGS                0x02
+#define ACPI_ENABLE_INTERFACES              0x00
+#define ACPI_DISABLE_INTERFACES             0x04
+
+#define ACPI_DISABLE_ALL_VENDOR_STRINGS     (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS)
+#define ACPI_DISABLE_ALL_FEATURE_STRINGS    (ACPI_DISABLE_INTERFACES | ACPI_FEATURE_STRINGS)
+#define ACPI_DISABLE_ALL_STRINGS            (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS)
+#define ACPI_ENABLE_ALL_VENDOR_STRINGS      (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS)
+#define ACPI_ENABLE_ALL_FEATURE_STRINGS     (ACPI_ENABLE_INTERFACES | ACPI_FEATURE_STRINGS)
+#define ACPI_ENABLE_ALL_STRINGS             (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS)
 
 #define ACPI_OSI_WIN_2000               0x01
 #define ACPI_OSI_WIN_XP                 0x02
index 3744d2a642dfbdf8a13141f0e2491b372a1d7a55..13621cc8cf4c454f546a6fbb33fb5e85e8b1b1aa 100644 (file)
@@ -113,4 +113,6 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more);
 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
                              unsigned int start, unsigned int nbytes, int out);
 
+int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
+
 #endif  /* _CRYPTO_SCATTERWALK_H */
index 12083dc862a9a6e03f12184a1fd1ff11d4e8ed04..90833dccc91960918c9657fd4087b9594812b21f 100644 (file)
@@ -45,7 +45,6 @@
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
 #include <linux/fs.h>
-#include <linux/proc_fs.h>
 #include <linux/init.h>
 #include <linux/file.h>
 #include <linux/platform_device.h>
 #endif
 #include <asm/mman.h>
 #include <asm/uaccess.h>
-#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
 #include <linux/types.h>
 #include <linux/agp_backend.h>
-#endif
 #include <linux/workqueue.h>
 #include <linux/poll.h>
 #include <asm/pgalloc.h>
 #include <drm/drm.h>
 #include <drm/drm_sarea.h>
+#include <drm/drm_vma_manager.h>
 
 #include <linux/idr.h>
 
 #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
-#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
 
 struct module;
 
@@ -140,16 +137,11 @@ int drm_err(const char *func, const char *format, ...);
 /* driver capabilities and requirements mask */
 #define DRIVER_USE_AGP     0x1
 #define DRIVER_REQUIRE_AGP 0x2
-#define DRIVER_USE_MTRR    0x4
 #define DRIVER_PCI_DMA     0x8
 #define DRIVER_SG          0x10
 #define DRIVER_HAVE_DMA    0x20
 #define DRIVER_HAVE_IRQ    0x40
 #define DRIVER_IRQ_SHARED  0x80
-#define DRIVER_IRQ_VBL     0x100
-#define DRIVER_DMA_QUEUE   0x200
-#define DRIVER_FB_DMA      0x400
-#define DRIVER_IRQ_VBL2    0x800
 #define DRIVER_GEM         0x1000
 #define DRIVER_MODESET     0x2000
 #define DRIVER_PRIME       0x4000
@@ -168,13 +160,7 @@ int drm_err(const char *func, const char *format, ...);
 #define DRM_MAGIC_HASH_ORDER  4  /**< Size of key hash table. Must be power of 2. */
 #define DRM_KERNEL_CONTEXT    0         /**< Change drm_resctx if changed */
 #define DRM_RESERVED_CONTEXTS 1         /**< Change drm_resctx if changed */
-#define DRM_LOOPING_LIMIT     5000000
-#define DRM_TIME_SLICE       (HZ/20)  /**< Time slice for GLXContexts */
-#define DRM_LOCK_SLICE       1 /**< Time slice for lock, in jiffies */
-
-#define DRM_FLAG_DEBUG   0x01
 
-#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
 #define DRM_MAP_HASH_OFFSET 0x10000000
 
 /*@}*/
@@ -263,9 +249,6 @@ int drm_err(const char *func, const char *format, ...);
 
 #define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
 
-#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
-#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
-
 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
 
 /**
@@ -587,7 +570,6 @@ struct drm_map_list {
        struct drm_local_map *map;      /**< mapping */
        uint64_t user_token;
        struct drm_master *master;
-       struct drm_mm_node *file_offset_node;   /**< fake offset */
 };
 
 /**
@@ -622,8 +604,7 @@ struct drm_ati_pcigart_info {
  * GEM specific mm private for tracking GEM objects
  */
 struct drm_gem_mm {
-       struct drm_mm offset_manager;   /**< Offset mgmt for buffer objects */
-       struct drm_open_hash offset_hash; /**< User token hash table for maps */
+       struct drm_vma_offset_manager vma_manager;
 };
 
 /**
@@ -634,8 +615,16 @@ struct drm_gem_object {
        /** Reference count of this object */
        struct kref refcount;
 
-       /** Handle count of this object. Each handle also holds a reference */
-       atomic_t handle_count; /* number of handles on this object */
+       /**
+        * handle_count - gem file_priv handle count of this object
+        *
+        * Each handle also holds a reference. Note that when the handle_count
+        * drops to 0 any global names (e.g. the id in the flink namespace) will
+        * be cleared.
+        *
+        * Protected by dev->object_name_lock.
+        * */
+       unsigned handle_count;
 
        /** Related drm device */
        struct drm_device *dev;
@@ -644,7 +633,7 @@ struct drm_gem_object {
        struct file *filp;
 
        /* Mapping info for this object */
-       struct drm_map_list map_list;
+       struct drm_vma_offset_node vma_node;
 
        /**
         * Size of the object, in bytes.  Immutable over the object's
@@ -678,10 +667,32 @@ struct drm_gem_object {
 
        void *driver_private;
 
-       /* dma buf exported from this GEM object */
-       struct dma_buf *export_dma_buf;
+       /**
+        * dma_buf - dma buf associated with this GEM object
+        *
+        * Pointer to the dma-buf associated with this gem object (either
+        * through importing or exporting). We break the resulting reference
+        * loop when the last gem handle for this object is released.
+        *
+        * Protected by obj->object_name_lock
+        */
+       struct dma_buf *dma_buf;
 
-       /* dma buf attachment backing this object */
+       /**
+        * import_attach - dma buf attachment backing this object
+        *
+        * Any foreign dma_buf imported as a gem object has this set to the
+        * attachment point for the device. This is invariant over the lifetime
+        * of a gem object.
+        *
+        * The driver's ->gem_free_object callback is responsible for cleaning
+        * up the dma_buf attachment and references acquired at import time.
+        *
+        * Note that the drm gem/prime core does not depend upon drivers setting
+        * this field any more. So for drivers where this doesn't make sense
+        * (e.g. virtual devices or a displaylink behind an usb bus) they can
+        * simply leave it as NULL.
+        */
        struct dma_buf_attachment *import_attach;
 };
 
@@ -737,6 +748,7 @@ struct drm_bus {
        int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
        /* hooks that are for PCI */
        int (*agp_init)(struct drm_device *dev);
+       void (*agp_destroy)(struct drm_device *dev);
 
 };
 
@@ -885,8 +897,6 @@ struct drm_driver {
        void (*irq_preinstall) (struct drm_device *dev);
        int (*irq_postinstall) (struct drm_device *dev);
        void (*irq_uninstall) (struct drm_device *dev);
-       void (*set_version) (struct drm_device *dev,
-                            struct drm_set_version *sv);
 
        /* Master routines */
        int (*master_create)(struct drm_device *dev, struct drm_master *master);
@@ -966,7 +976,7 @@ struct drm_driver {
 
        u32 driver_features;
        int dev_priv_size;
-       struct drm_ioctl_desc *ioctls;
+       const struct drm_ioctl_desc *ioctls;
        int num_ioctls;
        const struct file_operations *fops;
        union {
@@ -1037,8 +1047,6 @@ struct drm_minor {
        struct device kdev;             /**< Linux device */
        struct drm_device *dev;
 
-       struct proc_dir_entry *proc_root;  /**< proc directory entry */
-       struct drm_info_node proc_nodes;
        struct dentry *debugfs_root;
 
        struct list_head debugfs_list;
@@ -1131,12 +1139,7 @@ struct drm_device {
        /*@{ */
        int irq_enabled;                /**< True if irq handler is enabled */
        __volatile__ long context_flag; /**< Context swapping flag */
-       __volatile__ long interrupt_flag; /**< Interruption handler flag */
-       __volatile__ long dma_flag;     /**< DMA dispatch flag */
-       wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
-       int last_checked;               /**< Last context checked for DMA */
        int last_context;               /**< Last current context */
-       unsigned long last_switch;      /**< jiffies at last context switch */
        /*@} */
 
        struct work_struct work;
@@ -1174,12 +1177,6 @@ struct drm_device {
        spinlock_t event_lock;
 
        /*@} */
-       cycles_t ctx_start;
-       cycles_t lck_start;
-
-       struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
-       wait_queue_head_t buf_readers;  /**< Processes waiting to read */
-       wait_queue_head_t buf_writers;  /**< Processes waiting to ctx switch */
 
        struct drm_agp_head *agp;       /**< AGP data */
 
@@ -1212,7 +1209,7 @@ struct drm_device {
 
        /** \name GEM information */
        /*@{ */
-       spinlock_t object_name_lock;
+       struct mutex object_name_lock;
        struct idr object_name_idr;
        /*@} */
        int switch_power_state;
@@ -1235,25 +1232,6 @@ static inline int drm_dev_to_irq(struct drm_device *dev)
        return dev->driver->bus->get_irq(dev);
 }
 
-
-#if __OS_HAS_AGP
-static inline int drm_core_has_AGP(struct drm_device *dev)
-{
-       return drm_core_check_feature(dev, DRIVER_USE_AGP);
-}
-#else
-#define drm_core_has_AGP(dev) (0)
-#endif
-
-#if __OS_HAS_MTRR
-static inline int drm_core_has_MTRR(struct drm_device *dev)
-{
-       return drm_core_check_feature(dev, DRIVER_USE_MTRR);
-}
-#else
-#define drm_core_has_MTRR(dev) (0)
-#endif
-
 static inline void drm_device_set_unplugged(struct drm_device *dev)
 {
        smp_wmb();
@@ -1287,7 +1265,6 @@ extern int drm_lastclose(struct drm_device *dev);
 extern struct mutex drm_global_mutex;
 extern int drm_open(struct inode *inode, struct file *filp);
 extern int drm_stub_open(struct inode *inode, struct file *filp);
-extern int drm_fasync(int fd, struct file *filp, int on);
 extern ssize_t drm_read(struct file *filp, char __user *buffer,
                        size_t count, loff_t *offset);
 extern int drm_release(struct inode *inode, struct file *filp);
@@ -1301,14 +1278,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
 
                                /* Memory management support (drm_memory.h) */
 #include <drm/drm_memory.h>
-extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
-extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
-extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
-                                      struct page **pages,
-                                      unsigned long num_pages,
-                                      uint32_t gtt_offset,
-                                      uint32_t type);
-extern int drm_unbind_agp(DRM_AGP_MEM * handle);
 
                                /* Misc. IOCTL support (drm_ioctl.h) */
 extern int drm_irq_by_busid(struct drm_device *dev, void *data,
@@ -1335,8 +1304,6 @@ extern int drm_resctx(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
 extern int drm_addctx(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
-extern int drm_modctx(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv);
 extern int drm_getctx(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
 extern int drm_switchctx(struct drm_device *dev, void *data,
@@ -1346,9 +1313,10 @@ extern int drm_newctx(struct drm_device *dev, void *data,
 extern int drm_rmctx(struct drm_device *dev, void *data,
                     struct drm_file *file_priv);
 
-extern int drm_ctxbitmap_init(struct drm_device *dev);
-extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
-extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+extern void drm_legacy_ctxbitmap_init(struct drm_device *dev);
+extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_legacy_ctxbitmap_release(struct drm_device *dev,
+                                        struct drm_file *file_priv);
 
 extern int drm_setsareactx(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
@@ -1405,11 +1373,12 @@ extern int drm_freebufs(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 extern int drm_mapbufs(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
-extern int drm_order(unsigned long size);
+extern int drm_dma_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
 
                                /* DMA support (drm_dma.h) */
-extern int drm_dma_setup(struct drm_device *dev);
-extern void drm_dma_takedown(struct drm_device *dev);
+extern int drm_legacy_dma_setup(struct drm_device *dev);
+extern void drm_legacy_dma_takedown(struct drm_device *dev);
 extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
 extern void drm_core_reclaim_buffers(struct drm_device *dev,
                                     struct drm_file *filp);
@@ -1423,7 +1392,6 @@ extern int drm_irq_uninstall(struct drm_device *dev);
 extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
                           struct drm_file *filp);
-extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
                                     struct timeval *vblanktime);
@@ -1465,31 +1433,8 @@ extern int drm_modeset_ctl(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
 
                                /* AGP/GART support (drm_agpsupport.h) */
-extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-extern int drm_agp_acquire(struct drm_device *dev);
-extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
-                                struct drm_file *file_priv);
-extern int drm_agp_release(struct drm_device *dev);
-extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
-                                struct drm_file *file_priv);
-extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *file_priv);
-extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv);
-extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv);
-extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv);
-extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv);
-extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv);
+
+#include <drm/drm_agpsupport.h>
 
                                /* Stub support (drm_stub.h) */
 extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1510,17 +1455,12 @@ extern unsigned int drm_timestamp_precision;
 extern unsigned int drm_timestamp_monotonic;
 
 extern struct class *drm_class;
-extern struct proc_dir_entry *drm_proc_root;
 extern struct dentry *drm_debugfs_root;
 
 extern struct idr drm_minors_idr;
 
 extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
 
-                               /* Proc support (drm_proc.h) */
-extern int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root);
-extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
-
                                /* Debugfs support */
 #if defined(CONFIG_DEBUG_FS)
 extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
@@ -1550,6 +1490,7 @@ extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
                struct dma_buf *dma_buf);
 extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
                struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
 
 extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
                                        struct drm_file *file_priv);
@@ -1561,25 +1502,22 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **
 extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
 extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
 
+int drm_gem_dumb_destroy(struct drm_file *file,
+                        struct drm_device *dev,
+                        uint32_t handle);
 
 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
-void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
-
-int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
-int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
-                        struct drm_gem_object **obj);
+void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
 
 #if DRM_DEBUG_CODE
 extern int drm_vma_info(struct seq_file *m, void *data);
 #endif
 
                                /* Scatter Gather Support (drm_scatter.h) */
-extern void drm_sg_cleanup(struct drm_sg_mem * entry);
-extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+extern void drm_legacy_sg_cleanup(struct drm_device *dev);
+extern int drm_sg_alloc(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
-extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
 extern int drm_sg_free(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
@@ -1613,9 +1551,8 @@ struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
                                            size_t size);
 int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size);
-int drm_gem_private_object_init(struct drm_device *dev,
-                       struct drm_gem_object *obj, size_t size);
-void drm_gem_object_handle_free(struct drm_gem_object *obj);
+void drm_gem_private_object_init(struct drm_device *dev,
+                                struct drm_gem_object *obj, size_t size);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
@@ -1640,66 +1577,32 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
 static inline void
 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
 {
-       if (obj != NULL) {
+       if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
                struct drm_device *dev = obj->dev;
+
                mutex_lock(&dev->struct_mutex);
-               kref_put(&obj->refcount, drm_gem_object_free);
+               if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
+                       drm_gem_object_free(&obj->refcount);
                mutex_unlock(&dev->struct_mutex);
        }
 }
 
+int drm_gem_handle_create_tail(struct drm_file *file_priv,
+                              struct drm_gem_object *obj,
+                              u32 *handlep);
 int drm_gem_handle_create(struct drm_file *file_priv,
                          struct drm_gem_object *obj,
                          u32 *handlep);
 int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
 
-static inline void
-drm_gem_object_handle_reference(struct drm_gem_object *obj)
-{
-       drm_gem_object_reference(obj);
-       atomic_inc(&obj->handle_count);
-}
-
-static inline void
-drm_gem_object_handle_unreference(struct drm_gem_object *obj)
-{
-       if (obj == NULL)
-               return;
-
-       if (atomic_read(&obj->handle_count) == 0)
-               return;
-       /*
-        * Must bump handle count first as this may be the last
-        * ref, in which case the object would disappear before we
-        * checked for a name
-        */
-       if (atomic_dec_and_test(&obj->handle_count))
-               drm_gem_object_handle_free(obj);
-       drm_gem_object_unreference(obj);
-}
-
-static inline void
-drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
-{
-       if (obj == NULL)
-               return;
-
-       if (atomic_read(&obj->handle_count) == 0)
-               return;
-
-       /*
-       * Must bump handle count first as this may be the last
-       * ref, in which case the object would disappear before we
-       * checked for a name
-       */
-
-       if (atomic_dec_and_test(&obj->handle_count))
-               drm_gem_object_handle_free(obj);
-       drm_gem_object_unreference_unlocked(obj);
-}
 
 void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+               bool dirty, bool accessed);
 
 struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
                                             struct drm_file *filp,
@@ -1769,9 +1672,6 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
 extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
 extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
 
-extern int drm_get_platform_dev(struct platform_device *pdev,
-                               struct drm_driver *driver);
-
 /* returns true if currently okay to sleep */
 static __inline__ bool drm_can_sleep(void)
 {
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
new file mode 100644 (file)
index 0000000..a184eee
--- /dev/null
@@ -0,0 +1,194 @@
+#ifndef _DRM_AGPSUPPORT_H_
+#define _DRM_AGPSUPPORT_H_
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/agp_backend.h>
+#include <drm/drmP.h>
+
+#if __OS_HAS_AGP
+
+void drm_free_agp(DRM_AGP_MEM * handle, int pages);
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+int drm_unbind_agp(DRM_AGP_MEM * handle);
+DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+                               struct page **pages,
+                               unsigned long num_pages,
+                               uint32_t gtt_offset,
+                               uint32_t type);
+
+struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+void drm_agp_destroy(struct drm_agp_head *agp);
+void drm_agp_clear(struct drm_device *dev);
+int drm_agp_acquire(struct drm_device *dev);
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+int drm_agp_release(struct drm_device *dev);
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+       return drm_core_check_feature(dev, DRIVER_USE_AGP);
+}
+
+#else /* __OS_HAS_AGP */
+
+static inline void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+}
+
+static inline int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+       return -ENODEV;
+}
+
+static inline int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+       return -ENODEV;
+}
+
+static inline DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+                                             struct page **pages,
+                                             unsigned long num_pages,
+                                             uint32_t gtt_offset,
+                                             uint32_t type)
+{
+       return NULL;
+}
+
+static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+       return NULL;
+}
+
+static inline void drm_agp_destroy(struct drm_agp_head *agp)
+{
+}
+
+static inline void drm_agp_clear(struct drm_device *dev)
+{
+}
+
+static inline int drm_agp_acquire(struct drm_device *dev)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+                                       struct drm_file *file_priv)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_release(struct drm_device *dev)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+                                       struct drm_file *file_priv)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_enable(struct drm_device *dev,
+                                struct drm_agp_mode mode)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+                                      struct drm_file *file_priv)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_info(struct drm_device *dev,
+                              struct drm_agp_info *info)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_alloc(struct drm_device *dev,
+                               struct drm_agp_buffer *request)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+                                     struct drm_file *file_priv)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_free(struct drm_device *dev,
+                              struct drm_agp_buffer *request)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_unbind(struct drm_device *dev,
+                                struct drm_agp_binding *request)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+                                      struct drm_file *file_priv)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_bind(struct drm_device *dev,
+                              struct drm_agp_binding *request)
+{
+       return -ENODEV;
+}
+
+static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv)
+{
+       return -ENODEV;
+}
+
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+       return 0;
+}
+
+#endif /* __OS_HAS_AGP */
+
+#endif /* _DRM_AGPSUPPORT_H_ */
index fa12a2fa4293a399c42bb1aadc9e359a8e4f71be..0a9f73e8be26827494fb1a663765a03e20fcb3da 100644 (file)
@@ -494,8 +494,6 @@ struct drm_encoder_funcs {
        void (*destroy)(struct drm_encoder *encoder);
 };
 
-#define DRM_CONNECTOR_MAX_UMODES 16
-#define DRM_CONNECTOR_LEN 32
 #define DRM_CONNECTOR_MAX_ENCODER 3
 
 /**
@@ -869,6 +867,8 @@ extern int drm_crtc_init(struct drm_device *dev,
                         const struct drm_crtc_funcs *funcs);
 extern void drm_crtc_cleanup(struct drm_crtc *crtc);
 
+extern void drm_connector_ida_init(void);
+extern void drm_connector_ida_destroy(void);
 extern int drm_connector_init(struct drm_device *dev,
                              struct drm_connector *connector,
                              const struct drm_connector_funcs *funcs,
@@ -908,7 +908,6 @@ extern struct edid *drm_get_edid(struct drm_connector *connector,
                                 struct i2c_adapter *adapter);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
-extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
 extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
 extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
                                                   const struct drm_display_mode *mode);
@@ -925,14 +924,9 @@ extern int drm_mode_height(const struct drm_display_mode *mode);
 /* for us by fb module */
 extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
 extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
-extern void drm_mode_list_concat(struct list_head *head,
-                                struct list_head *new);
 extern void drm_mode_validate_size(struct drm_device *dev,
                                   struct list_head *mode_list,
                                   int maxX, int maxY, int maxPitch);
-extern void drm_mode_validate_clocks(struct drm_device *dev,
-                                    struct list_head *mode_list,
-                                    int *min, int *max, int n_ranges);
 extern void drm_mode_prune_invalid(struct drm_device *dev,
                                   struct list_head *mode_list, bool verbose);
 extern void drm_mode_sort(struct list_head *mode_list);
@@ -949,9 +943,6 @@ extern int drm_object_property_set_value(struct drm_mode_object *obj,
 extern int drm_object_property_get_value(struct drm_mode_object *obj,
                                         struct drm_property *property,
                                         uint64_t *value);
-extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
-extern void drm_framebuffer_set_object(struct drm_device *dev,
-                                      unsigned long handle);
 extern int drm_framebuffer_init(struct drm_device *dev,
                                struct drm_framebuffer *fb,
                                const struct drm_framebuffer_funcs *funcs);
@@ -962,10 +953,6 @@ extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
 extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
 extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
 extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
-extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
-extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
-extern bool drm_crtc_in_use(struct drm_crtc *crtc);
 
 extern void drm_object_attach_property(struct drm_mode_object *obj,
                                       struct drm_property *property,
@@ -990,7 +977,6 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
 extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
                                     char *formats[]);
 extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
-extern int drm_mode_create_dithering_property(struct drm_device *dev);
 extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
 extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
 
@@ -1040,10 +1026,6 @@ extern int drm_mode_getblob_ioctl(struct drm_device *dev,
                                  void *data, struct drm_file *file_priv);
 extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
                                              void *data, struct drm_file *file_priv);
-extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
-                                 void *data, struct drm_file *file_priv);
-extern int drm_mode_replacefb(struct drm_device *dev,
-                             void *data, struct drm_file *file_priv);
 extern int drm_mode_getencoder(struct drm_device *dev,
                               void *data, struct drm_file *file_priv);
 extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
index e8e1417af3d9c933073d2d36270ee3a15ffb5705..ae8dbfb1207c71a6cbe44dc166180988083c00e6 100644 (file)
@@ -342,13 +342,42 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
 u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
                                          int lane);
 
-#define DP_RECEIVER_CAP_SIZE   0xf
+#define DP_RECEIVER_CAP_SIZE           0xf
+#define EDP_PSR_RECEIVER_CAP_SIZE      2
+
 void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 
 u8 drm_dp_link_rate_to_bw_code(int link_rate);
 int drm_dp_bw_code_to_link_rate(u8 link_bw);
 
+struct edp_sdp_header {
+       u8 HB0; /* Secondary Data Packet ID */
+       u8 HB1; /* Secondary Data Packet Type */
+       u8 HB2; /* 7:5 reserved, 4:0 revision number */
+       u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
+} __packed;
+
+#define EDP_SDP_HEADER_REVISION_MASK           0x1F
+#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES     0x1F
+
+struct edp_vsc_psr {
+       struct edp_sdp_header sdp_header;
+       u8 DB0; /* Stereo Interface */
+       u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
+       u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
+       u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
+       u8 DB4; /* CRC value bits 7:0 of the G or Y component */
+       u8 DB5; /* CRC value bits 15:8 of the G or Y component */
+       u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
+       u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
+       u8 DB8_31[24]; /* Reserved */
+} __packed;
+
+#define EDP_VSC_PSR_STATE_ACTIVE       (1<<0)
+#define EDP_VSC_PSR_UPDATE_RFB         (1<<1)
+#define EDP_VSC_PSR_CRC_VALUES_VALID   (1<<2)
+
 static inline int
 drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
index 4a3fc244301c8d68b6aec69acddaf5082fbb4ef7..c54cf3d4a03f0f744e83dbe93a7534620764a6f6 100644 (file)
@@ -24,7 +24,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
        unsigned int plane);
 
 #ifdef CONFIG_DEBUG_FS
-void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m);
 int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
 #endif
 
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
new file mode 100644 (file)
index 0000000..35c776a
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_FLIP_WORK_H
+#define DRM_FLIP_WORK_H
+
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+
+/**
+ * DOC: flip utils
+ *
+ * Util to queue up work to run from work-queue context after flip/vblank.
+ * Typically this can be used to defer unref of framebuffer's, cursor
+ * bo's, etc until after vblank.  The APIs are all safe (and lockless)
+ * for up to one producer and once consumer at a time.  The single-consumer
+ * aspect is ensured by committing the queued work to a single work-queue.
+ */
+
+struct drm_flip_work;
+
+/*
+ * drm_flip_func_t - callback function
+ *
+ * @work: the flip work
+ * @val: value queued via drm_flip_work_queue()
+ *
+ * Callback function to be called for each of the  queue'd work items after
+ * drm_flip_work_commit() is called.
+ */
+typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
+
+/**
+ * struct drm_flip_work - flip work queue
+ * @name: debug name
+ * @pending: number of queued but not committed items
+ * @count: number of committed items
+ * @func: callback fxn called for each committed item
+ * @worker: worker which calls @func
+ */
+struct drm_flip_work {
+       const char *name;
+       atomic_t pending, count;
+       drm_flip_func_t func;
+       struct work_struct worker;
+       DECLARE_KFIFO_PTR(fifo, void *);
+};
+
+void drm_flip_work_queue(struct drm_flip_work *work, void *val);
+void drm_flip_work_commit(struct drm_flip_work *work,
+               struct workqueue_struct *wq);
+int drm_flip_work_init(struct drm_flip_work *work, int size,
+               const char *name, drm_flip_func_t func);
+void drm_flip_work_cleanup(struct drm_flip_work *work);
+
+#endif  /* DRM_FLIP_WORK_H */
index c34f27f80bcc29f189de6e317c17538d5d567889..89b4d7db1ebd3bae06c366bfcdc923b25cee242f 100644 (file)
@@ -30,14 +30,6 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
 
-/*
- * destroy memory region allocated.
- *     - a gem handle and physical memory region pointed by a gem object
- *     would be released by drm_gem_handle_delete().
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
-               struct drm_device *drm, unsigned int handle);
-
 /* allocate physical memory. */
 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
                unsigned int size);
index 4d06edb56d5fbb74480d0ffc4b24297870f8149e..cba67865d18f51adabf46f9a18c1071c4e47c19b 100644 (file)
 /*
  * Generic range manager structs
  */
+#include <linux/bug.h>
+#include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/spinlock.h>
 #ifdef CONFIG_DEBUG_FS
 #include <linux/seq_file.h>
 #endif
 
+enum drm_mm_search_flags {
+       DRM_MM_SEARCH_DEFAULT =         0,
+       DRM_MM_SEARCH_BEST =            1 << 0,
+};
+
 struct drm_mm_node {
        struct list_head node_list;
        struct list_head hole_stack;
@@ -62,9 +70,6 @@ struct drm_mm {
        /* head_node.node_list is the list of all memory nodes, ordered
         * according to the (increasing) start address of the memory node. */
        struct drm_mm_node head_node;
-       struct list_head unused_nodes;
-       int num_unused;
-       spinlock_t unused_lock;
        unsigned int scan_check_range : 1;
        unsigned scan_alignment;
        unsigned long scan_color;
@@ -115,13 +120,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
                                                &(mm)->head_node.node_list, \
                                                node_list)
-#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
-       for (entry = (mm)->prev_scanned_node, \
-               next = entry ? list_entry(entry->node_list.next, \
-                       struct drm_mm_node, node_list) : NULL; \
-            entry != NULL; entry = next, \
-               next = entry ? list_entry(entry->node_list.next, \
-                       struct drm_mm_node, node_list) : NULL) \
 
 /* Note that we need to unroll list_for_each_entry in order to inline
  * setting hole_start and hole_end on each iteration and keep the
@@ -138,124 +136,50 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 /*
  * Basic range manager support (drm_mm.c)
  */
-extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-                                              unsigned long start,
-                                              unsigned long size,
-                                              bool atomic);
-extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
-                                                   unsigned long size,
-                                                   unsigned alignment,
-                                                   unsigned long color,
-                                                   int atomic);
-extern struct drm_mm_node *drm_mm_get_block_range_generic(
-                                               struct drm_mm_node *node,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long color,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               int atomic);
-static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
-                                                  unsigned long size,
-                                                  unsigned alignment)
-{
-       return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
-}
-static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
-                                                         unsigned long size,
-                                                         unsigned alignment)
-{
-       return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
-}
-static inline struct drm_mm_node *drm_mm_get_block_range(
-                                               struct drm_mm_node *parent,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long start,
-                                               unsigned long end)
-{
-       return drm_mm_get_block_range_generic(parent, size, alignment, 0,
-                                             start, end, 0);
-}
-static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
-                                               struct drm_mm_node *parent,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long start,
-                                               unsigned long end)
-{
-       return drm_mm_get_block_range_generic(parent, size, alignment, 0,
-                                               start, end, 1);
-}
+extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
 
-extern int drm_mm_insert_node(struct drm_mm *mm,
-                             struct drm_mm_node *node,
-                             unsigned long size,
-                             unsigned alignment);
-extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
-                                      struct drm_mm_node *node,
-                                      unsigned long size,
-                                      unsigned alignment,
-                                      unsigned long start,
-                                      unsigned long end);
 extern int drm_mm_insert_node_generic(struct drm_mm *mm,
                                      struct drm_mm_node *node,
                                      unsigned long size,
                                      unsigned alignment,
-                                     unsigned long color);
+                                     unsigned long color,
+                                     enum drm_mm_search_flags flags);
+static inline int drm_mm_insert_node(struct drm_mm *mm,
+                                    struct drm_mm_node *node,
+                                    unsigned long size,
+                                    unsigned alignment,
+                                    enum drm_mm_search_flags flags)
+{
+       return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
+}
+
 extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
                                       struct drm_mm_node *node,
                                       unsigned long size,
                                       unsigned alignment,
                                       unsigned long color,
                                       unsigned long start,
-                                      unsigned long end);
-extern void drm_mm_put_block(struct drm_mm_node *cur);
-extern void drm_mm_remove_node(struct drm_mm_node *node);
-extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
-extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-                                                     unsigned long size,
-                                                     unsigned alignment,
-                                                     unsigned long color,
-                                                     bool best_match);
-extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
-                                               const struct drm_mm *mm,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long color,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               bool best_match);
-static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
-                                                    unsigned long size,
-                                                    unsigned alignment,
-                                                    bool best_match)
+                                      unsigned long end,
+                                      enum drm_mm_search_flags flags);
+static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
+                                             struct drm_mm_node *node,
+                                             unsigned long size,
+                                             unsigned alignment,
+                                             unsigned long start,
+                                             unsigned long end,
+                                             enum drm_mm_search_flags flags)
 {
-       return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
-}
-static inline  struct drm_mm_node *drm_mm_search_free_in_range(
-                                               const struct drm_mm *mm,
-                                               unsigned long size,
-                                               unsigned alignment,
-                                               unsigned long start,
-                                               unsigned long end,
-                                               bool best_match)
-{
-       return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
-                                                  start, end, best_match);
+       return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
+                                                  0, start, end, flags);
 }
 
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
 extern void drm_mm_init(struct drm_mm *mm,
                        unsigned long start,
                        unsigned long size);
 extern void drm_mm_takedown(struct drm_mm *mm);
 extern int drm_mm_clean(struct drm_mm *mm);
-extern int drm_mm_pre_get(struct drm_mm *mm);
-
-static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
-{
-       return block->mm;
-}
 
 void drm_mm_init_scan(struct drm_mm *mm,
                      unsigned long size,
index 34efaf64cc8712611ed9aac0c1a5d3596a6d923f..0a85e5c5d61b5e2aca938166b7c233ff53bdefca 100644 (file)
        {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
        {0, 0, 0}
 
-#define mach64_PCI_IDS \
-       {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0, 0, 0}
-
 #define sisdrv_PCI_IDS \
        {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0, 0, 0}
 
-#define gamma_PCI_IDS \
-       {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0, 0, 0}
-
 #define savage_PCI_IDS \
        {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
        {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
        {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
        {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
        {0, 0, 0}
-
-#define ffb_PCI_IDS \
-       {0, 0, 0}
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
new file mode 100644 (file)
index 0000000..22eedac
--- /dev/null
@@ -0,0 +1,224 @@
+#ifndef __DRM_VMA_MANAGER_H__
+#define __DRM_VMA_MANAGER_H__
+
+/*
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mm.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct drm_vma_offset_node {
+       struct drm_mm_node vm_node;
+       struct rb_node vm_rb;
+};
+
+struct drm_vma_offset_manager {
+       rwlock_t vm_lock;
+       struct rb_root vm_addr_space_rb;
+       struct drm_mm vm_addr_space_mm;
+};
+
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+                                unsigned long page_offset, unsigned long size);
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
+
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+                                                 unsigned long start,
+                                                 unsigned long pages);
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+                                                          unsigned long start,
+                                                          unsigned long pages);
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+                      struct drm_vma_offset_node *node, unsigned long pages);
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+                          struct drm_vma_offset_node *node);
+
+/**
+ * drm_vma_offset_exact_lookup() - Look up node by exact address
+ * @mgr: Manager object
+ * @start: Start address (page-based, not byte-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
+ * It only returns the exact object with the given start address.
+ *
+ * RETURNS:
+ * Node at exact start address @start.
+ */
+static inline struct drm_vma_offset_node *
+drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
+                           unsigned long start,
+                           unsigned long pages)
+{
+       struct drm_vma_offset_node *node;
+
+       node = drm_vma_offset_lookup(mgr, start, pages);
+       return (node && node->vm_node.start == start) ? node : NULL;
+}
+
+/**
+ * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
+ * are allowed while holding this lock. All other contexts are blocked from VMA
+ * until the lock is released via drm_vma_offset_unlock_lookup().
+ *
+ * Use this if you need to take a reference to the objects returned by
+ * drm_vma_offset_lookup_locked() before releasing this lock again.
+ *
+ * This lock must not be used for anything else than extended lookups. You must
+ * not call any other VMA helpers while holding this lock.
+ *
+ * Note: You're in atomic-context while holding this lock!
+ *
+ * Example:
+ *   drm_vma_offset_lock_lookup(mgr);
+ *   node = drm_vma_offset_lookup_locked(mgr);
+ *   if (node)
+ *       kref_get_unless_zero(container_of(node, sth, entr));
+ *   drm_vma_offset_unlock_lookup(mgr);
+ */
+static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
+{
+       read_lock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
+ */
+static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
+{
+       read_unlock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_node_reset() - Initialize or reset node object
+ * @node: Node to initialize or reset
+ *
+ * Reset a node to its initial state. This must be called if @node isn't
+ * already cleared (eg., via kzalloc) before using it with any VMA offset
+ * manager.
+ *
+ * This must not be called on an already allocated node, or you will leak
+ * memory.
+ */
+static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
+{
+       memset(node, 0, sizeof(*node));
+}
+
+/**
+ * drm_vma_node_start() - Return start address for page-based addressing
+ * @node: Node to inspect
+ *
+ * Return the start address of the given node. This can be used as offset into
+ * the linear VM space that is provided by the VMA offset manager. Note that
+ * this can only be used for page-based addressing. If you need a proper offset
+ * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
+ * drm_vma_node_offset_addr() helper instead.
+ *
+ * RETURNS:
+ * Start address of @node for page-based addressing. 0 if the node does not
+ * have an offset allocated.
+ */
+static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
+{
+       return node->vm_node.start;
+}
+
+/**
+ * drm_vma_node_size() - Return size (page-based)
+ * @node: Node to inspect
+ *
+ * Return the size as number of pages for the given node. This is the same size
+ * that was passed to drm_vma_offset_add(). If no offset is allocated for the
+ * node, this is 0.
+ *
+ * RETURNS:
+ * Size of @node as number of pages. 0 if the node does not have an offset
+ * allocated.
+ */
+static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
+{
+       return node->vm_node.size;
+}
+
+/**
+ * drm_vma_node_has_offset() - Check whether node is added to offset manager
+ * @node: Node to be checked
+ *
+ * RETURNS:
+ * true iff the node was previously allocated an offset and added to
+ * an vma offset manager.
+ */
+static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
+{
+       return drm_mm_node_allocated(&node->vm_node);
+}
+
+/**
+ * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
+ * @node: Linked offset node
+ *
+ * Same as drm_vma_node_start() but returns the address as a valid offset that
+ * can be used for user-space mappings during mmap().
+ * This must not be called on unlinked nodes.
+ *
+ * RETURNS:
+ * Offset of @node for byte-based addressing. 0 if the node does not have an
+ * object allocated.
+ */
+static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+       return ((__u64)node->vm_node.start) << PAGE_SHIFT;
+}
+
+/**
+ * drm_vma_node_unmap() - Unmap offset node
+ * @node: Offset node
+ * @file_mapping: Address space to unmap @node from
+ *
+ * Unmap all userspace mappings for a given offset node. The mappings must be
+ * associated with the @file_mapping address-space. If no offset exists or
+ * the address-space is invalid, nothing is done.
+ *
+ * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
+ * is not called on this node concurrently.
+ */
+static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
+                                     struct address_space *file_mapping)
+{
+       if (file_mapping && drm_vma_node_has_offset(node))
+               unmap_mapping_range(file_mapping,
+                                   drm_vma_node_offset_addr(node),
+                                   drm_vma_node_size(node) << PAGE_SHIFT, 1);
+}
+
+#endif /* __DRM_VMA_MANAGER_H__ */
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
new file mode 100644 (file)
index 0000000..3e419d9
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef __DRM_I2C_TDA998X_H__
+#define __DRM_I2C_TDA998X_H__
+
+struct tda998x_encoder_params {
+       u8 swap_b:3;
+       u8 mirr_b:1;
+       u8 swap_a:3;
+       u8 mirr_a:1;
+       u8 swap_d:3;
+       u8 mirr_d:1;
+       u8 swap_c:3;
+       u8 mirr_c:1;
+       u8 swap_f:3;
+       u8 mirr_f:1;
+       u8 swap_e:3;
+       u8 mirr_e:1;
+
+       u8 audio_cfg;
+       u8 audio_clk_cfg;
+       u8 audio_frame[6];
+
+       enum {
+               AFMT_SPDIF,
+               AFMT_I2S
+       } audio_format;
+
+       unsigned audio_sample_rate;
+};
+
+#endif
index 8a6aa56ece52a53cad3734c756a6dbc19a5d7ac1..751eaffbf0d5fe5ef758254ceba1fe706af4423d 100644 (file)
 #define _TTM_BO_API_H_
 
 #include <drm/drm_hashtab.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/wait.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
-#include <linux/rbtree.h>
 #include <linux/bitmap.h>
 #include <linux/reservation.h>
 
@@ -145,7 +145,6 @@ struct ttm_tt;
  * @type: The bo type.
  * @destroy: Destruction function. If NULL, kfree is used.
  * @num_pages: Actual number of pages.
- * @addr_space_offset: Address space offset.
  * @acc_size: Accounted size for this object.
  * @kref: Reference count of this buffer object. When this refcount reaches
  * zero, the object is put on the delayed delete list.
@@ -166,8 +165,7 @@ struct ttm_tt;
  * @swap: List head for swap LRU list.
  * @sync_obj: Pointer to a synchronization object.
  * @priv_flags: Flags describing buffer object internal state.
- * @vm_rb: Rb node for the vm rb tree.
- * @vm_node: Address space manager node.
+ * @vma_node: Address space manager node.
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
  * @cur_placement: Hint of current placement.
@@ -194,7 +192,6 @@ struct ttm_buffer_object {
        enum ttm_bo_type type;
        void (*destroy) (struct ttm_buffer_object *);
        unsigned long num_pages;
-       uint64_t addr_space_offset;
        size_t acc_size;
 
        /**
@@ -238,13 +235,7 @@ struct ttm_buffer_object {
        void *sync_obj;
        unsigned long priv_flags;
 
-       /**
-        * Members protected by the bdev::vm_lock
-        */
-
-       struct rb_node vm_rb;
-       struct drm_mm_node *vm_node;
-
+       struct drm_vma_offset_node vma_node;
 
        /**
         * Special members that are protected by the reserve lock
index 984fc2d571a145860d6cfd2962d7ca20da7d0ee5..8639c85d61c400f4694e99ca7e1282df23f5ef66 100644 (file)
@@ -36,6 +36,7 @@
 #include <ttm/ttm_placement.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_global.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/workqueue.h>
 #include <linux/fs.h>
 #include <linux/spinlock.h>
@@ -519,7 +520,7 @@ struct ttm_bo_global {
  * @man: An array of mem_type_managers.
  * @fence_lock: Protects the synchronizing members on *all* bos belonging
  * to this device.
- * @addr_space_mm: Range manager for the device address space.
+ * @vma_manager: Address space manager
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
  * @val_seq: Current validation sequence.
@@ -537,14 +538,13 @@ struct ttm_bo_device {
        struct list_head device_list;
        struct ttm_bo_global *glob;
        struct ttm_bo_driver *driver;
-       rwlock_t vm_lock;
        struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
        spinlock_t fence_lock;
+
        /*
-        * Protected by the vm lock.
+        * Protected by internal locks.
         */
-       struct rb_root addr_space_rb;
-       struct drm_mm addr_space_mm;
+       struct drm_vma_offset_manager vma_manager;
 
        /*
         * Protected by the global:lru lock.
diff --git a/include/dt-bindings/input/input.h b/include/dt-bindings/input/input.h
new file mode 100644 (file)
index 0000000..042e7b3
--- /dev/null
@@ -0,0 +1,525 @@
+/*
+ * This header provides constants for most input bindings.
+ *
+ * Most input bindings include key code, matrix key code format.
+ * In most cases, key code and matrix key code format uses
+ * the standard values/macro defined in this header.
+ */
+
+#ifndef _DT_BINDINGS_INPUT_INPUT_H
+#define _DT_BINDINGS_INPUT_INPUT_H
+
+#define KEY_RESERVED           0
+#define KEY_ESC                        1
+#define KEY_1                  2
+#define KEY_2                  3
+#define KEY_3                  4
+#define KEY_4                  5
+#define KEY_5                  6
+#define KEY_6                  7
+#define KEY_7                  8
+#define KEY_8                  9
+#define KEY_9                  10
+#define KEY_0                  11
+#define KEY_MINUS              12
+#define KEY_EQUAL              13
+#define KEY_BACKSPACE          14
+#define KEY_TAB                        15
+#define KEY_Q                  16
+#define KEY_W                  17
+#define KEY_E                  18
+#define KEY_R                  19
+#define KEY_T                  20
+#define KEY_Y                  21
+#define KEY_U                  22
+#define KEY_I                  23
+#define KEY_O                  24
+#define KEY_P                  25
+#define KEY_LEFTBRACE          26
+#define KEY_RIGHTBRACE         27
+#define KEY_ENTER              28
+#define KEY_LEFTCTRL           29
+#define KEY_A                  30
+#define KEY_S                  31
+#define KEY_D                  32
+#define KEY_F                  33
+#define KEY_G                  34
+#define KEY_H                  35
+#define KEY_J                  36
+#define KEY_K                  37
+#define KEY_L                  38
+#define KEY_SEMICOLON          39
+#define KEY_APOSTROPHE         40
+#define KEY_GRAVE              41
+#define KEY_LEFTSHIFT          42
+#define KEY_BACKSLASH          43
+#define KEY_Z                  44
+#define KEY_X                  45
+#define KEY_C                  46
+#define KEY_V                  47
+#define KEY_B                  48
+#define KEY_N                  49
+#define KEY_M                  50
+#define KEY_COMMA              51
+#define KEY_DOT                        52
+#define KEY_SLASH              53
+#define KEY_RIGHTSHIFT         54
+#define KEY_KPASTERISK         55
+#define KEY_LEFTALT            56
+#define KEY_SPACE              57
+#define KEY_CAPSLOCK           58
+#define KEY_F1                 59
+#define KEY_F2                 60
+#define KEY_F3                 61
+#define KEY_F4                 62
+#define KEY_F5                 63
+#define KEY_F6                 64
+#define KEY_F7                 65
+#define KEY_F8                 66
+#define KEY_F9                 67
+#define KEY_F10                        68
+#define KEY_NUMLOCK            69
+#define KEY_SCROLLLOCK         70
+#define KEY_KP7                        71
+#define KEY_KP8                        72
+#define KEY_KP9                        73
+#define KEY_KPMINUS            74
+#define KEY_KP4                        75
+#define KEY_KP5                        76
+#define KEY_KP6                        77
+#define KEY_KPPLUS             78
+#define KEY_KP1                        79
+#define KEY_KP2                        80
+#define KEY_KP3                        81
+#define KEY_KP0                        82
+#define KEY_KPDOT              83
+
+#define KEY_ZENKAKUHANKAKU     85
+#define KEY_102ND              86
+#define KEY_F11                        87
+#define KEY_F12                        88
+#define KEY_RO                 89
+#define KEY_KATAKANA           90
+#define KEY_HIRAGANA           91
+#define KEY_HENKAN             92
+#define KEY_KATAKANAHIRAGANA   93
+#define KEY_MUHENKAN           94
+#define KEY_KPJPCOMMA          95
+#define KEY_KPENTER            96
+#define KEY_RIGHTCTRL          97
+#define KEY_KPSLASH            98
+#define KEY_SYSRQ              99
+#define KEY_RIGHTALT           100
+#define KEY_LINEFEED           101
+#define KEY_HOME               102
+#define KEY_UP                 103
+#define KEY_PAGEUP             104
+#define KEY_LEFT               105
+#define KEY_RIGHT              106
+#define KEY_END                        107
+#define KEY_DOWN               108
+#define KEY_PAGEDOWN           109
+#define KEY_INSERT             110
+#define KEY_DELETE             111
+#define KEY_MACRO              112
+#define KEY_MUTE               113
+#define KEY_VOLUMEDOWN         114
+#define KEY_VOLUMEUP           115
+#define KEY_POWER              116     /* SC System Power Down */
+#define KEY_KPEQUAL            117
+#define KEY_KPPLUSMINUS                118
+#define KEY_PAUSE              119
+#define KEY_SCALE              120     /* AL Compiz Scale (Expose) */
+
+#define KEY_KPCOMMA            121
+#define KEY_HANGEUL            122
+#define KEY_HANGUEL            KEY_HANGEUL
+#define KEY_HANJA              123
+#define KEY_YEN                        124
+#define KEY_LEFTMETA           125
+#define KEY_RIGHTMETA          126
+#define KEY_COMPOSE            127
+
+#define KEY_STOP               128     /* AC Stop */
+#define KEY_AGAIN              129
+#define KEY_PROPS              130     /* AC Properties */
+#define KEY_UNDO               131     /* AC Undo */
+#define KEY_FRONT              132
+#define KEY_COPY               133     /* AC Copy */
+#define KEY_OPEN               134     /* AC Open */
+#define KEY_PASTE              135     /* AC Paste */
+#define KEY_FIND               136     /* AC Search */
+#define KEY_CUT                        137     /* AC Cut */
+#define KEY_HELP               138     /* AL Integrated Help Center */
+#define KEY_MENU               139     /* Menu (show menu) */
+#define KEY_CALC               140     /* AL Calculator */
+#define KEY_SETUP              141
+#define KEY_SLEEP              142     /* SC System Sleep */
+#define KEY_WAKEUP             143     /* System Wake Up */
+#define KEY_FILE               144     /* AL Local Machine Browser */
+#define KEY_SENDFILE           145
+#define KEY_DELETEFILE         146
+#define KEY_XFER               147
+#define KEY_PROG1              148
+#define KEY_PROG2              149
+#define KEY_WWW                        150     /* AL Internet Browser */
+#define KEY_MSDOS              151
+#define KEY_COFFEE             152     /* AL Terminal Lock/Screensaver */
+#define KEY_SCREENLOCK         KEY_COFFEE
+#define KEY_DIRECTION          153
+#define KEY_CYCLEWINDOWS       154
+#define KEY_MAIL               155
+#define KEY_BOOKMARKS          156     /* AC Bookmarks */
+#define KEY_COMPUTER           157
+#define KEY_BACK               158     /* AC Back */
+#define KEY_FORWARD            159     /* AC Forward */
+#define KEY_CLOSECD            160
+#define KEY_EJECTCD            161
+#define KEY_EJECTCLOSECD       162
+#define KEY_NEXTSONG           163
+#define KEY_PLAYPAUSE          164
+#define KEY_PREVIOUSSONG       165
+#define KEY_STOPCD             166
+#define KEY_RECORD             167
+#define KEY_REWIND             168
+#define KEY_PHONE              169     /* Media Select Telephone */
+#define KEY_ISO                        170
+#define KEY_CONFIG             171     /* AL Consumer Control Configuration */
+#define KEY_HOMEPAGE           172     /* AC Home */
+#define KEY_REFRESH            173     /* AC Refresh */
+#define KEY_EXIT               174     /* AC Exit */
+#define KEY_MOVE               175
+#define KEY_EDIT               176
+#define KEY_SCROLLUP           177
+#define KEY_SCROLLDOWN         178
+#define KEY_KPLEFTPAREN                179
+#define KEY_KPRIGHTPAREN       180
+#define KEY_NEW                        181     /* AC New */
+#define KEY_REDO               182     /* AC Redo/Repeat */
+
+#define KEY_F13                        183
+#define KEY_F14                        184
+#define KEY_F15                        185
+#define KEY_F16                        186
+#define KEY_F17                        187
+#define KEY_F18                        188
+#define KEY_F19                        189
+#define KEY_F20                        190
+#define KEY_F21                        191
+#define KEY_F22                        192
+#define KEY_F23                        193
+#define KEY_F24                        194
+
+#define KEY_PLAYCD             200
+#define KEY_PAUSECD            201
+#define KEY_PROG3              202
+#define KEY_PROG4              203
+#define KEY_DASHBOARD          204     /* AL Dashboard */
+#define KEY_SUSPEND            205
+#define KEY_CLOSE              206     /* AC Close */
+#define KEY_PLAY               207
+#define KEY_FASTFORWARD                208
+#define KEY_BASSBOOST          209
+#define KEY_PRINT              210     /* AC Print */
+#define KEY_HP                 211
+#define KEY_CAMERA             212
+#define KEY_SOUND              213
+#define KEY_QUESTION           214
+#define KEY_EMAIL              215
+#define KEY_CHAT               216
+#define KEY_SEARCH             217
+#define KEY_CONNECT            218
+#define KEY_FINANCE            219     /* AL Checkbook/Finance */
+#define KEY_SPORT              220
+#define KEY_SHOP               221
+#define KEY_ALTERASE           222
+#define KEY_CANCEL             223     /* AC Cancel */
+#define KEY_BRIGHTNESSDOWN     224
+#define KEY_BRIGHTNESSUP       225
+#define KEY_MEDIA              226
+
+#define KEY_SWITCHVIDEOMODE    227     /* Cycle between available video
+                                          outputs (Monitor/LCD/TV-out/etc) */
+#define KEY_KBDILLUMTOGGLE     228
+#define KEY_KBDILLUMDOWN       229
+#define KEY_KBDILLUMUP         230
+
+#define KEY_SEND               231     /* AC Send */
+#define KEY_REPLY              232     /* AC Reply */
+#define KEY_FORWARDMAIL                233     /* AC Forward Msg */
+#define KEY_SAVE               234     /* AC Save */
+#define KEY_DOCUMENTS          235
+
+#define KEY_BATTERY            236
+
+#define KEY_BLUETOOTH          237
+#define KEY_WLAN               238
+#define KEY_UWB                        239
+
+#define KEY_UNKNOWN            240
+
+#define KEY_VIDEO_NEXT         241     /* drive next video source */
+#define KEY_VIDEO_PREV         242     /* drive previous video source */
+#define KEY_BRIGHTNESS_CYCLE   243     /* brightness up, after max is min */
+#define KEY_BRIGHTNESS_ZERO    244     /* brightness off, use ambient */
+#define KEY_DISPLAY_OFF                245     /* display device to off state */
+
+#define KEY_WIMAX              246
+#define KEY_RFKILL             247     /* Key that controls all radios */
+
+#define KEY_MICMUTE            248     /* Mute / unmute the microphone */
+
+/* Code 255 is reserved for special needs of AT keyboard driver */
+
+#define BTN_MISC               0x100
+#define BTN_0                  0x100
+#define BTN_1                  0x101
+#define BTN_2                  0x102
+#define BTN_3                  0x103
+#define BTN_4                  0x104
+#define BTN_5                  0x105
+#define BTN_6                  0x106
+#define BTN_7                  0x107
+#define BTN_8                  0x108
+#define BTN_9                  0x109
+
+#define BTN_MOUSE              0x110
+#define BTN_LEFT               0x110
+#define BTN_RIGHT              0x111
+#define BTN_MIDDLE             0x112
+#define BTN_SIDE               0x113
+#define BTN_EXTRA              0x114
+#define BTN_FORWARD            0x115
+#define BTN_BACK               0x116
+#define BTN_TASK               0x117
+
+#define BTN_JOYSTICK           0x120
+#define BTN_TRIGGER            0x120
+#define BTN_THUMB              0x121
+#define BTN_THUMB2             0x122
+#define BTN_TOP                        0x123
+#define BTN_TOP2               0x124
+#define BTN_PINKIE             0x125
+#define BTN_BASE               0x126
+#define BTN_BASE2              0x127
+#define BTN_BASE3              0x128
+#define BTN_BASE4              0x129
+#define BTN_BASE5              0x12a
+#define BTN_BASE6              0x12b
+#define BTN_DEAD               0x12f
+
+#define BTN_GAMEPAD            0x130
+#define BTN_SOUTH              0x130
+#define BTN_A                  BTN_SOUTH
+#define BTN_EAST               0x131
+#define BTN_B                  BTN_EAST
+#define BTN_C                  0x132
+#define BTN_NORTH              0x133
+#define BTN_X                  BTN_NORTH
+#define BTN_WEST               0x134
+#define BTN_Y                  BTN_WEST
+#define BTN_Z                  0x135
+#define BTN_TL                 0x136
+#define BTN_TR                 0x137
+#define BTN_TL2                        0x138
+#define BTN_TR2                        0x139
+#define BTN_SELECT             0x13a
+#define BTN_START              0x13b
+#define BTN_MODE               0x13c
+#define BTN_THUMBL             0x13d
+#define BTN_THUMBR             0x13e
+
+#define BTN_DIGI               0x140
+#define BTN_TOOL_PEN           0x140
+#define BTN_TOOL_RUBBER                0x141
+#define BTN_TOOL_BRUSH         0x142
+#define BTN_TOOL_PENCIL                0x143
+#define BTN_TOOL_AIRBRUSH      0x144
+#define BTN_TOOL_FINGER                0x145
+#define BTN_TOOL_MOUSE         0x146
+#define BTN_TOOL_LENS          0x147
+#define BTN_TOOL_QUINTTAP      0x148   /* Five fingers on trackpad */
+#define BTN_TOUCH              0x14a
+#define BTN_STYLUS             0x14b
+#define BTN_STYLUS2            0x14c
+#define BTN_TOOL_DOUBLETAP     0x14d
+#define BTN_TOOL_TRIPLETAP     0x14e
+#define BTN_TOOL_QUADTAP       0x14f   /* Four fingers on trackpad */
+
+#define BTN_WHEEL              0x150
+#define BTN_GEAR_DOWN          0x150
+#define BTN_GEAR_UP            0x151
+
+#define KEY_OK                 0x160
+#define KEY_SELECT             0x161
+#define KEY_GOTO               0x162
+#define KEY_CLEAR              0x163
+#define KEY_POWER2             0x164
+#define KEY_OPTION             0x165
+#define KEY_INFO               0x166   /* AL OEM Features/Tips/Tutorial */
+#define KEY_TIME               0x167
+#define KEY_VENDOR             0x168
+#define KEY_ARCHIVE            0x169
+#define KEY_PROGRAM            0x16a   /* Media Select Program Guide */
+#define KEY_CHANNEL            0x16b
+#define KEY_FAVORITES          0x16c
+#define KEY_EPG                        0x16d
+#define KEY_PVR                        0x16e   /* Media Select Home */
+#define KEY_MHP                        0x16f
+#define KEY_LANGUAGE           0x170
+#define KEY_TITLE              0x171
+#define KEY_SUBTITLE           0x172
+#define KEY_ANGLE              0x173
+#define KEY_ZOOM               0x174
+#define KEY_MODE               0x175
+#define KEY_KEYBOARD           0x176
+#define KEY_SCREEN             0x177
+#define KEY_PC                 0x178   /* Media Select Computer */
+#define KEY_TV                 0x179   /* Media Select TV */
+#define KEY_TV2                        0x17a   /* Media Select Cable */
+#define KEY_VCR                        0x17b   /* Media Select VCR */
+#define KEY_VCR2               0x17c   /* VCR Plus */
+#define KEY_SAT                        0x17d   /* Media Select Satellite */
+#define KEY_SAT2               0x17e
+#define KEY_CD                 0x17f   /* Media Select CD */
+#define KEY_TAPE               0x180   /* Media Select Tape */
+#define KEY_RADIO              0x181
+#define KEY_TUNER              0x182   /* Media Select Tuner */
+#define KEY_PLAYER             0x183
+#define KEY_TEXT               0x184
+#define KEY_DVD                        0x185   /* Media Select DVD */
+#define KEY_AUX                        0x186
+#define KEY_MP3                        0x187
+#define KEY_AUDIO              0x188   /* AL Audio Browser */
+#define KEY_VIDEO              0x189   /* AL Movie Browser */
+#define KEY_DIRECTORY          0x18a
+#define KEY_LIST               0x18b
+#define KEY_MEMO               0x18c   /* Media Select Messages */
+#define KEY_CALENDAR           0x18d
+#define KEY_RED                        0x18e
+#define KEY_GREEN              0x18f
+#define KEY_YELLOW             0x190
+#define KEY_BLUE               0x191
+#define KEY_CHANNELUP          0x192   /* Channel Increment */
+#define KEY_CHANNELDOWN                0x193   /* Channel Decrement */
+#define KEY_FIRST              0x194
+#define KEY_LAST               0x195   /* Recall Last */
+#define KEY_AB                 0x196
+#define KEY_NEXT               0x197
+#define KEY_RESTART            0x198
+#define KEY_SLOW               0x199
+#define KEY_SHUFFLE            0x19a
+#define KEY_BREAK              0x19b
+#define KEY_PREVIOUS           0x19c
+#define KEY_DIGITS             0x19d
+#define KEY_TEEN               0x19e
+#define KEY_TWEN               0x19f
+#define KEY_VIDEOPHONE         0x1a0   /* Media Select Video Phone */
+#define KEY_GAMES              0x1a1   /* Media Select Games */
+#define KEY_ZOOMIN             0x1a2   /* AC Zoom In */
+#define KEY_ZOOMOUT            0x1a3   /* AC Zoom Out */
+#define KEY_ZOOMRESET          0x1a4   /* AC Zoom */
+#define KEY_WORDPROCESSOR      0x1a5   /* AL Word Processor */
+#define KEY_EDITOR             0x1a6   /* AL Text Editor */
+#define KEY_SPREADSHEET                0x1a7   /* AL Spreadsheet */
+#define KEY_GRAPHICSEDITOR     0x1a8   /* AL Graphics Editor */
+#define KEY_PRESENTATION       0x1a9   /* AL Presentation App */
+#define KEY_DATABASE           0x1aa   /* AL Database App */
+#define KEY_NEWS               0x1ab   /* AL Newsreader */
+#define KEY_VOICEMAIL          0x1ac   /* AL Voicemail */
+#define KEY_ADDRESSBOOK                0x1ad   /* AL Contacts/Address Book */
+#define KEY_MESSENGER          0x1ae   /* AL Instant Messaging */
+#define KEY_DISPLAYTOGGLE      0x1af   /* Turn display (LCD) on and off */
+#define KEY_SPELLCHECK         0x1b0   /* AL Spell Check */
+#define KEY_LOGOFF             0x1b1   /* AL Logoff */
+
+#define KEY_DOLLAR             0x1b2
+#define KEY_EURO               0x1b3
+
+#define KEY_FRAMEBACK          0x1b4   /* Consumer - transport controls */
+#define KEY_FRAMEFORWARD       0x1b5
+#define KEY_CONTEXT_MENU       0x1b6   /* GenDesc - system context menu */
+#define KEY_MEDIA_REPEAT       0x1b7   /* Consumer - transport control */
+#define KEY_10CHANNELSUP       0x1b8   /* 10 channels up (10+) */
+#define KEY_10CHANNELSDOWN     0x1b9   /* 10 channels down (10-) */
+#define KEY_IMAGES             0x1ba   /* AL Image Browser */
+
+#define KEY_DEL_EOL            0x1c0
+#define KEY_DEL_EOS            0x1c1
+#define KEY_INS_LINE           0x1c2
+#define KEY_DEL_LINE           0x1c3
+
+#define KEY_FN                 0x1d0
+#define KEY_FN_ESC             0x1d1
+#define KEY_FN_F1              0x1d2
+#define KEY_FN_F2              0x1d3
+#define KEY_FN_F3              0x1d4
+#define KEY_FN_F4              0x1d5
+#define KEY_FN_F5              0x1d6
+#define KEY_FN_F6              0x1d7
+#define KEY_FN_F7              0x1d8
+#define KEY_FN_F8              0x1d9
+#define KEY_FN_F9              0x1da
+#define KEY_FN_F10             0x1db
+#define KEY_FN_F11             0x1dc
+#define KEY_FN_F12             0x1dd
+#define KEY_FN_1               0x1de
+#define KEY_FN_2               0x1df
+#define KEY_FN_D               0x1e0
+#define KEY_FN_E               0x1e1
+#define KEY_FN_F               0x1e2
+#define KEY_FN_S               0x1e3
+#define KEY_FN_B               0x1e4
+
+#define KEY_BRL_DOT1           0x1f1
+#define KEY_BRL_DOT2           0x1f2
+#define KEY_BRL_DOT3           0x1f3
+#define KEY_BRL_DOT4           0x1f4
+#define KEY_BRL_DOT5           0x1f5
+#define KEY_BRL_DOT6           0x1f6
+#define KEY_BRL_DOT7           0x1f7
+#define KEY_BRL_DOT8           0x1f8
+#define KEY_BRL_DOT9           0x1f9
+#define KEY_BRL_DOT10          0x1fa
+
+#define KEY_NUMERIC_0          0x200   /* used by phones, remote controls, */
+#define KEY_NUMERIC_1          0x201   /* and other keypads */
+#define KEY_NUMERIC_2          0x202
+#define KEY_NUMERIC_3          0x203
+#define KEY_NUMERIC_4          0x204
+#define KEY_NUMERIC_5          0x205
+#define KEY_NUMERIC_6          0x206
+#define KEY_NUMERIC_7          0x207
+#define KEY_NUMERIC_8          0x208
+#define KEY_NUMERIC_9          0x209
+#define KEY_NUMERIC_STAR       0x20a
+#define KEY_NUMERIC_POUND      0x20b
+
+#define KEY_CAMERA_FOCUS       0x210
+#define KEY_WPS_BUTTON         0x211   /* WiFi Protected Setup key */
+
+#define KEY_TOUCHPAD_TOGGLE    0x212   /* Request switch touchpad on or off */
+#define KEY_TOUCHPAD_ON                0x213
+#define KEY_TOUCHPAD_OFF       0x214
+
+#define KEY_CAMERA_ZOOMIN      0x215
+#define KEY_CAMERA_ZOOMOUT     0x216
+#define KEY_CAMERA_UP          0x217
+#define KEY_CAMERA_DOWN                0x218
+#define KEY_CAMERA_LEFT                0x219
+#define KEY_CAMERA_RIGHT       0x21a
+
+#define KEY_ATTENDANT_ON       0x21b
+#define KEY_ATTENDANT_OFF      0x21c
+#define KEY_ATTENDANT_TOGGLE   0x21d   /* Attendant call on or off */
+#define KEY_LIGHTS_TOGGLE      0x21e   /* Reading light on or off */
+
+#define BTN_DPAD_UP            0x220
+#define BTN_DPAD_DOWN          0x221
+#define BTN_DPAD_LEFT          0x222
+#define BTN_DPAD_RIGHT         0x223
+
+#define MATRIX_KEY(row, col, code)     \
+       ((((row) & 0xFF) << 24) | (((col) & 0xFF) << 16) | ((code) & 0xFFFF))
+
+#endif /* _DT_BINDINGS_INPUT_INPUT_H */
diff --git a/include/dt-bindings/sound/fsl-imx-audmux.h b/include/dt-bindings/sound/fsl-imx-audmux.h
new file mode 100644 (file)
index 0000000..50b09e9
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef __DT_FSL_IMX_AUDMUX_H
+#define __DT_FSL_IMX_AUDMUX_H
+
+#define MX27_AUDMUX_HPCR1_SSI0         0
+#define MX27_AUDMUX_HPCR2_SSI1         1
+#define MX27_AUDMUX_HPCR3_SSI_PINS_4   2
+#define MX27_AUDMUX_PPCR1_SSI_PINS_1   3
+#define MX27_AUDMUX_PPCR2_SSI_PINS_2   4
+#define MX27_AUDMUX_PPCR3_SSI_PINS_3   5
+
+#define MX31_AUDMUX_PORT1_SSI0         0
+#define MX31_AUDMUX_PORT2_SSI1         1
+#define MX31_AUDMUX_PORT3_SSI_PINS_3   2
+#define MX31_AUDMUX_PORT4_SSI_PINS_4   3
+#define MX31_AUDMUX_PORT5_SSI_PINS_5   4
+#define MX31_AUDMUX_PORT6_SSI_PINS_6   5
+#define MX31_AUDMUX_PORT7_SSI_PINS_7   6
+
+#define MX51_AUDMUX_PORT1_SSI0         0
+#define MX51_AUDMUX_PORT2_SSI1         1
+#define MX51_AUDMUX_PORT3              2
+#define MX51_AUDMUX_PORT4              3
+#define MX51_AUDMUX_PORT5              4
+#define MX51_AUDMUX_PORT6              5
+#define MX51_AUDMUX_PORT7              6
+
+/* Register definitions for the i.MX21/27 Digital Audio Multiplexer */
+#define IMX_AUDMUX_V1_PCR_INMMASK(x)   ((x) & 0xff)
+#define IMX_AUDMUX_V1_PCR_INMEN                (1 << 8)
+#define IMX_AUDMUX_V1_PCR_TXRXEN       (1 << 10)
+#define IMX_AUDMUX_V1_PCR_SYN          (1 << 12)
+#define IMX_AUDMUX_V1_PCR_RXDSEL(x)    (((x) & 0x7) << 13)
+#define IMX_AUDMUX_V1_PCR_RFCSEL(x)    (((x) & 0xf) << 20)
+#define IMX_AUDMUX_V1_PCR_RCLKDIR      (1 << 24)
+#define IMX_AUDMUX_V1_PCR_RFSDIR       (1 << 25)
+#define IMX_AUDMUX_V1_PCR_TFCSEL(x)    (((x) & 0xf) << 26)
+#define IMX_AUDMUX_V1_PCR_TCLKDIR      (1 << 30)
+#define IMX_AUDMUX_V1_PCR_TFSDIR       (1 << 31)
+
+/* Register definitions for the i.MX25/31/35/51 Digital Audio Multiplexer */
+#define IMX_AUDMUX_V2_PTCR_TFSDIR      (1 << 31)
+#define IMX_AUDMUX_V2_PTCR_TFSEL(x)    (((x) & 0xf) << 27)
+#define IMX_AUDMUX_V2_PTCR_TCLKDIR     (1 << 26)
+#define IMX_AUDMUX_V2_PTCR_TCSEL(x)    (((x) & 0xf) << 22)
+#define IMX_AUDMUX_V2_PTCR_RFSDIR      (1 << 21)
+#define IMX_AUDMUX_V2_PTCR_RFSEL(x)    (((x) & 0xf) << 17)
+#define IMX_AUDMUX_V2_PTCR_RCLKDIR     (1 << 16)
+#define IMX_AUDMUX_V2_PTCR_RCSEL(x)    (((x) & 0xf) << 12)
+#define IMX_AUDMUX_V2_PTCR_SYN         (1 << 11)
+
+#define IMX_AUDMUX_V2_PDCR_RXDSEL(x)   (((x) & 0x7) << 13)
+#define IMX_AUDMUX_V2_PDCR_TXRXEN      (1 << 12)
+#define IMX_AUDMUX_V2_PDCR_MODE(x)     (((x) & 0x3) << 8)
+#define IMX_AUDMUX_V2_PDCR_INMMASK(x)  ((x) & 0xff)
+
+#endif /* __DT_FSL_IMX_AUDMUX_H */
index 353ba256f3681e4f6ddd8b18b9feba61b1d98cec..a5db4aeefa3642107e28b431c21a89c195569993 100644 (file)
@@ -481,6 +481,13 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
 
 acpi_status acpi_os_prepare_sleep(u8 sleep_state,
                                  u32 pm1a_control, u32 pm1b_control);
+
+void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
+                                       u32 val_a,  u32 val_b));
+
+acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
+                                          u32 val_a, u32 val_b);
+
 #ifdef CONFIG_X86
 void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
 #else
index 1bdf965339f9bef4222a5bc739efc0b23807e35e..2bb87b52cb74a6a2f4ff5d7e2474ebf7a5342344 100644 (file)
@@ -33,13 +33,15 @@ struct kiocb {
        atomic_t                ki_users;
 
        struct file             *ki_filp;
-       struct kioctx           *ki_ctx;        /* NULL for sync ops */
+       struct kioctx           *ki_ctx;        /* NULL for sync ops,
+                                                  -1 for kernel caller */
        kiocb_cancel_fn         *ki_cancel;
        void                    (*ki_dtor)(struct kiocb *);
 
        union {
                void __user             *user;
                struct task_struct      *tsk;
+               void                    (*complete)(u64 user_data, long res);
        } ki_obj;
 
        __u64                   ki_user_data;   /* user's data for completion */
@@ -64,6 +66,7 @@ struct kiocb {
         * this is the underlying eventfd context to deliver events to.
         */
        struct eventfd_ctx      *ki_eventfd;
+       struct iov_iter         *ki_iter;
 };
 
 static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -71,6 +74,11 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
        return kiocb->ki_ctx == NULL;
 }
 
+static inline bool is_kernel_kiocb(struct kiocb *kiocb)
+{
+       return kiocb->ki_ctx == (void *)-1;
+}
+
 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 {
        *kiocb = (struct kiocb) {
@@ -91,6 +99,14 @@ extern void exit_aio(struct mm_struct *mm);
 extern long do_io_submit(aio_context_t ctx_id, long nr,
                         struct iocb __user *__user *iocbpp, bool compat);
 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
+struct kiocb *aio_kernel_alloc(gfp_t gfp);
+void aio_kernel_free(struct kiocb *iocb);
+void aio_kernel_init_rw(struct kiocb *iocb, struct file *filp, size_t nr,
+                       loff_t off);
+void aio_kernel_init_callback(struct kiocb *iocb,
+                             void (*complete)(u64 user_data, long res),
+                             u64 user_data);
+int aio_kernel_submit(struct kiocb *iocb, unsigned short op, void *ptr);
 #else
 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
 static inline void aio_put_req(struct kiocb *iocb) { }
index ee0bd95240553576bf51672015c756aa0dea3b94..f63fb1afc5ccc8a6146dedeae449e4075d4f7653 100644 (file)
@@ -446,22 +446,6 @@ enum {
        SERR_TRANS_ST_ERROR     = (1 << 24), /* Transport state trans. error */
        SERR_UNRECOG_FIS        = (1 << 25), /* Unrecognized FIS */
        SERR_DEV_XCHG           = (1 << 26), /* device exchanged */
-
-       /* struct ata_taskfile flags */
-       ATA_TFLAG_LBA48         = (1 << 0), /* enable 48-bit LBA and "HOB" */
-       ATA_TFLAG_ISADDR        = (1 << 1), /* enable r/w to nsect/lba regs */
-       ATA_TFLAG_DEVICE        = (1 << 2), /* enable r/w to device reg */
-       ATA_TFLAG_WRITE         = (1 << 3), /* data dir: host->dev==1 (write) */
-       ATA_TFLAG_LBA           = (1 << 4), /* enable LBA */
-       ATA_TFLAG_FUA           = (1 << 5), /* enable FUA */
-       ATA_TFLAG_POLLING       = (1 << 6), /* set nIEN to 1 and use polling */
-
-       /* protocol flags */
-       ATA_PROT_FLAG_PIO       = (1 << 0), /* is PIO */
-       ATA_PROT_FLAG_DMA       = (1 << 1), /* is DMA */
-       ATA_PROT_FLAG_DATA      = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
-       ATA_PROT_FLAG_NCQ       = (1 << 2), /* is NCQ */
-       ATA_PROT_FLAG_ATAPI     = (1 << 3), /* is ATAPI */
 };
 
 enum ata_tf_protocols {
@@ -488,83 +472,6 @@ struct ata_bmdma_prd {
        __le32                  flags_len;
 };
 
-struct ata_taskfile {
-       unsigned long           flags;          /* ATA_TFLAG_xxx */
-       u8                      protocol;       /* ATA_PROT_xxx */
-
-       u8                      ctl;            /* control reg */
-
-       u8                      hob_feature;    /* additional data */
-       u8                      hob_nsect;      /* to support LBA48 */
-       u8                      hob_lbal;
-       u8                      hob_lbam;
-       u8                      hob_lbah;
-
-       u8                      feature;
-       u8                      nsect;
-       u8                      lbal;
-       u8                      lbam;
-       u8                      lbah;
-
-       u8                      device;
-
-       u8                      command;        /* IO operation */
-};
-
-/*
- * protocol tests
- */
-static inline unsigned int ata_prot_flags(u8 prot)
-{
-       switch (prot) {
-       case ATA_PROT_NODATA:
-               return 0;
-       case ATA_PROT_PIO:
-               return ATA_PROT_FLAG_PIO;
-       case ATA_PROT_DMA:
-               return ATA_PROT_FLAG_DMA;
-       case ATA_PROT_NCQ:
-               return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
-       case ATAPI_PROT_NODATA:
-               return ATA_PROT_FLAG_ATAPI;
-       case ATAPI_PROT_PIO:
-               return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
-       case ATAPI_PROT_DMA:
-               return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
-       }
-       return 0;
-}
-
-static inline int ata_is_atapi(u8 prot)
-{
-       return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
-}
-
-static inline int ata_is_nodata(u8 prot)
-{
-       return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
-}
-
-static inline int ata_is_pio(u8 prot)
-{
-       return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
-}
-
-static inline int ata_is_dma(u8 prot)
-{
-       return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
-}
-
-static inline int ata_is_ncq(u8 prot)
-{
-       return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
-}
-
-static inline int ata_is_data(u8 prot)
-{
-       return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
-}
-
 /*
  * id tests
  */
@@ -1060,15 +967,6 @@ static inline unsigned ata_set_lba_range_entries(void *_buffer,
        return used_bytes;
 }
 
-static inline int is_multi_taskfile(struct ata_taskfile *tf)
-{
-       return (tf->command == ATA_CMD_READ_MULTI) ||
-              (tf->command == ATA_CMD_WRITE_MULTI) ||
-              (tf->command == ATA_CMD_READ_MULTI_EXT) ||
-              (tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
-              (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
-}
-
 static inline bool ata_ok(u8 status)
 {
        return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
index deb0ae58b99bb3724bee4376b0a376b563c29cff..66a0e5384edd6090ae3793082c8a4f56fb6ea74d 100644 (file)
@@ -11,7 +11,7 @@ struct atmel_ssc_platform_data {
 
 struct ssc_device {
        struct list_head        list;
-       resource_size_t         phybase;
+       dma_addr_t              phybase;
        void __iomem            *regs;
        struct platform_device  *pdev;
        struct atmel_ssc_platform_data *pdata;
index 622fc505d3e1cdfcfe8b2abc8f22800570c6f304..4d043c30216fc978f1d78bd1425177efef623186 100644 (file)
@@ -72,7 +72,19 @@ struct bcma_host_ops {
 /* Core-ID values. */
 #define BCMA_CORE_OOB_ROUTER           0x367   /* Out of band */
 #define BCMA_CORE_4706_CHIPCOMMON      0x500
+#define BCMA_CORE_PCIEG2               0x501
+#define BCMA_CORE_DMA                  0x502
+#define BCMA_CORE_SDIO3                        0x503
+#define BCMA_CORE_USB20                        0x504
+#define BCMA_CORE_USB30                        0x505
+#define BCMA_CORE_A9JTAG               0x506
+#define BCMA_CORE_DDR23                        0x507
+#define BCMA_CORE_ROM                  0x508
+#define BCMA_CORE_NAND                 0x509
+#define BCMA_CORE_QSPI                 0x50A
+#define BCMA_CORE_CHIPCOMMON_B         0x50B
 #define BCMA_CORE_4706_SOC_RAM         0x50E
+#define BCMA_CORE_ARMCA9               0x510
 #define BCMA_CORE_4706_MAC_GBIT                0x52D
 #define BCMA_CORE_AMEMC                        0x52E   /* DDR1/2 memory controller core */
 #define BCMA_CORE_ALTA                 0x534   /* I2S core */
@@ -177,6 +189,11 @@ struct bcma_host_ops {
 #define  BCMA_PKG_ID_BCM5357   11
 #define BCMA_CHIP_ID_BCM53572  53572
 #define  BCMA_PKG_ID_BCM47188  9
+#define BCMA_CHIP_ID_BCM4707   53010
+#define  BCMA_PKG_ID_BCM4707   1
+#define  BCMA_PKG_ID_BCM4708   2
+#define  BCMA_PKG_ID_BCM4709   0
+#define BCMA_CHIP_ID_BCM53018  53018
 
 /* Board types (on PCI usually equals to the subsystem dev id) */
 /* BCM4313 */
index 70cf138690e9184678b1dea1aa9a23dd1a10a487..2c2b7c74f22f6ab6102d9b62063f713d140ff8dc 100644 (file)
@@ -61,6 +61,7 @@ struct coredump_params {
        struct file *file;
        unsigned long limit;
        unsigned long mm_flags;
+       unsigned long written;
 };
 
 /*
index ec48bac5b039d25cd74829b1ba4c6a1506412dec..4fd52534259697abc49023fe28a7b30cbaf1cc23 100644 (file)
@@ -307,6 +307,14 @@ extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
+static inline ssize_t bvec_length(const struct bio_vec *bvec, unsigned long nr)
+{
+       ssize_t bytes = 0;
+       while (nr--)
+               bytes += (bvec++)->bv_len;
+       return bytes;
+}
+
 #ifdef CONFIG_BLK_CGROUP
 int bio_associate_current(struct bio *bio);
 void bio_disassociate_task(struct bio *bio);
index fa1abeb45b7602a4f0c1a4098f05f63d7a075281..1bea25f14796f8fabed2b310adde5316fe893af1 100644 (file)
@@ -176,7 +176,6 @@ enum rq_flag_bits {
        __REQ_FLUSH_SEQ,        /* request for flush sequence */
        __REQ_IO_STAT,          /* account I/O stat */
        __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
-       __REQ_KERNEL,           /* direct IO to kernel pages */
        __REQ_PM,               /* runtime pm request */
        __REQ_NR_BITS,          /* stops here */
 };
@@ -227,7 +226,6 @@ enum rq_flag_bits {
 #define REQ_IO_STAT            (1 << __REQ_IO_STAT)
 #define REQ_MIXED_MERGE                (1 << __REQ_MIXED_MERGE)
 #define REQ_SECURE             (1 << __REQ_SECURE)
-#define REQ_KERNEL             (1 << __REQ_KERNEL)
 #define REQ_PM                 (1 << __REQ_PM)
 
 #endif /* __LINUX_BLK_TYPES_H */
index e9ac882868c0353f8f6bb7f2baba2a73c8602c41..b685955d4b29bfec2f6d33c0eba8e610cb6960e2 100644 (file)
@@ -66,22 +66,25 @@ enum cgroup_subsys_id {
 
 /* Per-subsystem/per-cgroup state maintained by the system. */
 struct cgroup_subsys_state {
-       /*
-        * The cgroup that this subsystem is attached to. Useful
-        * for subsystems that want to know about the cgroup
-        * hierarchy structure
-        */
+       /* the cgroup that this css is attached to */
        struct cgroup *cgroup;
 
+       /* the cgroup subsystem that this css is attached to */
+       struct cgroup_subsys *ss;
+
        /* reference count - access via css_[try]get() and css_put() */
        struct percpu_ref refcnt;
 
+       /* the parent css */
+       struct cgroup_subsys_state *parent;
+
        unsigned long flags;
        /* ID for this css, if possible */
        struct css_id __rcu *id;
 
-       /* Used to put @cgroup->dentry on the last css_put() */
-       struct work_struct dput_work;
+       /* percpu_ref killing and RCU release */
+       struct rcu_head rcu_head;
+       struct work_struct destroy_work;
 };
 
 /* bits in struct cgroup_subsys_state flags field */
@@ -161,7 +164,16 @@ struct cgroup_name {
 struct cgroup {
        unsigned long flags;            /* "unsigned long" so bitops work */
 
-       int id;                         /* ida allocated in-hierarchy ID */
+       /*
+        * idr allocated in-hierarchy ID.
+        *
+        * The ID of the root cgroup is always 0, and a new cgroup
+        * will be assigned with a smallest available ID.
+        */
+       int id;
+
+       /* the number of attached css's */
+       int nr_css;
 
        /*
         * We link our 'sibling' struct into our parent's 'children'.
@@ -196,7 +208,7 @@ struct cgroup {
        struct cgroup_name __rcu *name;
 
        /* Private pointers for each registered subsystem */
-       struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+       struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
 
        struct cgroupfs_root *root;
 
@@ -220,10 +232,12 @@ struct cgroup {
        struct list_head pidlists;
        struct mutex pidlist_mutex;
 
+       /* dummy css with NULL ->ss, points back to this cgroup */
+       struct cgroup_subsys_state dummy_css;
+
        /* For css percpu_ref killing and RCU-protected deletion */
        struct rcu_head rcu_head;
        struct work_struct destroy_work;
-       atomic_t css_kill_cnt;
 
        /* List of events which userspace want to receive */
        struct list_head event_list;
@@ -322,7 +336,7 @@ struct cgroupfs_root {
        unsigned long flags;
 
        /* IDs for cgroups in this hierarchy */
-       struct ida cgroup_ida;
+       struct idr cgroup_idr;
 
        /* The path to use for release notifications. */
        char release_agent_path[PATH_MAX];
@@ -394,8 +408,8 @@ struct cgroup_map_cb {
 
 /* cftype->flags */
 enum {
-       CFTYPE_ONLY_ON_ROOT     = (1 << 0),     /* only create on root cg */
-       CFTYPE_NOT_ON_ROOT      = (1 << 1),     /* don't create on root cg */
+       CFTYPE_ONLY_ON_ROOT     = (1 << 0),     /* only create on root cgrp */
+       CFTYPE_NOT_ON_ROOT      = (1 << 1),     /* don't create on root cgrp */
        CFTYPE_INSANE           = (1 << 2),     /* don't create if sane_behavior */
 };
 
@@ -424,35 +438,41 @@ struct cftype {
        /* CFTYPE_* flags */
        unsigned int flags;
 
+       /*
+        * The subsys this file belongs to.  Initialized automatically
+        * during registration.  NULL for cgroup core files.
+        */
+       struct cgroup_subsys *ss;
+
        int (*open)(struct inode *inode, struct file *file);
-       ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
+       ssize_t (*read)(struct cgroup_subsys_state *css, struct cftype *cft,
                        struct file *file,
                        char __user *buf, size_t nbytes, loff_t *ppos);
        /*
         * read_u64() is a shortcut for the common case of returning a
         * single integer. Use it in place of read()
         */
-       u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
+       u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
        /*
         * read_s64() is a signed version of read_u64()
         */
-       s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
+       s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
        /*
         * read_map() is used for defining a map of key/value
         * pairs. It should call cb->fill(cb, key, value) for each
         * entry. The key/value pairs (and their ordering) should not
         * change between reboots.
         */
-       int (*read_map)(struct cgroup *cgrp, struct cftype *cft,
+       int (*read_map)(struct cgroup_subsys_state *css, struct cftype *cft,
                        struct cgroup_map_cb *cb);
        /*
         * read_seq_string() is used for outputting a simple sequence
         * using seqfile.
         */
-       int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft,
-                              struct seq_file *m);
+       int (*read_seq_string)(struct cgroup_subsys_state *css,
+                              struct cftype *cft, struct seq_file *m);
 
-       ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
+       ssize_t (*write)(struct cgroup_subsys_state *css, struct cftype *cft,
                         struct file *file,
                         const char __user *buf, size_t nbytes, loff_t *ppos);
 
@@ -461,18 +481,20 @@ struct cftype {
         * a single integer (as parsed by simple_strtoull) from
         * userspace. Use in place of write(); return 0 or error.
         */
-       int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
+       int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
+                        u64 val);
        /*
         * write_s64() is a signed version of write_u64()
         */
-       int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
+       int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
+                        s64 val);
 
        /*
         * write_string() is passed a nul-terminated kernelspace
         * buffer of maximum length determined by max_write_len.
         * Returns 0 or -ve error code.
         */
-       int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
+       int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft,
                            const char *buffer);
        /*
         * trigger() callback can be used to get some kick from the
@@ -480,7 +502,7 @@ struct cftype {
         * at all. The private field can be used to determine the
         * kick type for multiplexing.
         */
-       int (*trigger)(struct cgroup *cgrp, unsigned int event);
+       int (*trigger)(struct cgroup_subsys_state *css, unsigned int event);
 
        int (*release)(struct inode *inode, struct file *file);
 
@@ -490,16 +512,18 @@ struct cftype {
         * you want to provide this functionality. Use eventfd_signal()
         * on eventfd to send notification to userspace.
         */
-       int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
-                       struct eventfd_ctx *eventfd, const char *args);
+       int (*register_event)(struct cgroup_subsys_state *css,
+                             struct cftype *cft, struct eventfd_ctx *eventfd,
+                             const char *args);
        /*
         * unregister_event() callback will be called when userspace
         * closes the eventfd or on cgroup removing.
         * This callback must be implemented, if you want provide
         * notification functionality.
         */
-       void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
-                       struct eventfd_ctx *eventfd);
+       void (*unregister_event)(struct cgroup_subsys_state *css,
+                                struct cftype *cft,
+                                struct eventfd_ctx *eventfd);
 };
 
 /*
@@ -512,15 +536,6 @@ struct cftype_set {
        struct cftype                   *cfts;
 };
 
-struct cgroup_scanner {
-       struct cgroup *cg;
-       int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
-       void (*process_task)(struct task_struct *p,
-                       struct cgroup_scanner *scan);
-       struct ptr_heap *heap;
-       void *data;
-};
-
 /*
  * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details.  This
  * function can be called as long as @cgrp is accessible.
@@ -537,7 +552,7 @@ static inline const char *cgroup_name(const struct cgroup *cgrp)
 }
 
 int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
-int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_rm_cftypes(struct cftype *cfts);
 
 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
 
@@ -553,20 +568,22 @@ int cgroup_task_count(const struct cgroup *cgrp);
 struct cgroup_taskset;
 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
-struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
+struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
+                                                  int subsys_id);
 int cgroup_taskset_size(struct cgroup_taskset *tset);
 
 /**
  * cgroup_taskset_for_each - iterate cgroup_taskset
  * @task: the loop cursor
- * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
+ * @skip_css: skip if task's css matches this, %NULL to iterate through all
  * @tset: taskset to iterate
  */
-#define cgroup_taskset_for_each(task, skip_cgrp, tset)                 \
+#define cgroup_taskset_for_each(task, skip_css, tset)                  \
        for ((task) = cgroup_taskset_first((tset)); (task);             \
             (task) = cgroup_taskset_next((tset)))                      \
-               if (!(skip_cgrp) ||                                     \
-                   cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))
+               if (!(skip_css) ||                                      \
+                   cgroup_taskset_cur_css((tset),                      \
+                       (skip_css)->ss->subsys_id) != (skip_css))
 
 /*
  * Control Group subsystem type.
@@ -574,18 +591,22 @@ int cgroup_taskset_size(struct cgroup_taskset *tset);
  */
 
 struct cgroup_subsys {
-       struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
-       int (*css_online)(struct cgroup *cgrp);
-       void (*css_offline)(struct cgroup *cgrp);
-       void (*css_free)(struct cgroup *cgrp);
-
-       int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
-       void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
-       void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
+       struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
+       int (*css_online)(struct cgroup_subsys_state *css);
+       void (*css_offline)(struct cgroup_subsys_state *css);
+       void (*css_free)(struct cgroup_subsys_state *css);
+
+       int (*can_attach)(struct cgroup_subsys_state *css,
+                         struct cgroup_taskset *tset);
+       void (*cancel_attach)(struct cgroup_subsys_state *css,
+                             struct cgroup_taskset *tset);
+       void (*attach)(struct cgroup_subsys_state *css,
+                      struct cgroup_taskset *tset);
        void (*fork)(struct task_struct *task);
-       void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
+       void (*exit)(struct cgroup_subsys_state *css,
+                    struct cgroup_subsys_state *old_css,
                     struct task_struct *task);
-       void (*bind)(struct cgroup *root);
+       void (*bind)(struct cgroup_subsys_state *root_css);
 
        int subsys_id;
        int disabled;
@@ -641,10 +662,17 @@ struct cgroup_subsys {
 #undef IS_SUBSYS_ENABLED
 #undef SUBSYS
 
-static inline struct cgroup_subsys_state *cgroup_subsys_state(
-       struct cgroup *cgrp, int subsys_id)
+/**
+ * css_parent - find the parent css
+ * @css: the target cgroup_subsys_state
+ *
+ * Return the parent css of @css.  This function is guaranteed to return
+ * non-NULL parent as long as @css isn't the root.
+ */
+static inline
+struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css)
 {
-       return cgrp->subsys[subsys_id];
+       return css->parent;
 }
 
 /**
@@ -672,7 +700,7 @@ extern struct mutex cgroup_mutex;
 #endif
 
 /**
- * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
+ * task_css_check - obtain css for (task, subsys) w/ extra access conds
  * @task: the target task
  * @subsys_id: the target subsystem ID
  * @__c: extra condition expression to be passed to rcu_dereference_check()
@@ -680,7 +708,7 @@ extern struct mutex cgroup_mutex;
  * Return the cgroup_subsys_state for the (@task, @subsys_id) pair.  The
  * synchronization rules are the same as task_css_set_check().
  */
-#define task_subsys_state_check(task, subsys_id, __c)                  \
+#define task_css_check(task, subsys_id, __c)                           \
        task_css_set_check((task), (__c))->subsys[(subsys_id)]
 
 /**
@@ -695,87 +723,92 @@ static inline struct css_set *task_css_set(struct task_struct *task)
 }
 
 /**
- * task_subsys_state - obtain css for (task, subsys)
+ * task_css - obtain css for (task, subsys)
  * @task: the target task
  * @subsys_id: the target subsystem ID
  *
- * See task_subsys_state_check().
+ * See task_css_check().
  */
-static inline struct cgroup_subsys_state *
-task_subsys_state(struct task_struct *task, int subsys_id)
+static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
+                                                  int subsys_id)
 {
-       return task_subsys_state_check(task, subsys_id, false);
+       return task_css_check(task, subsys_id, false);
 }
 
-static inline struct cgrouptask_cgroup(struct task_struct *task,
-                                              int subsys_id)
+static inline struct cgroup *task_cgroup(struct task_struct *task,
+                                        int subsys_id)
 {
-       return task_subsys_state(task, subsys_id)->cgroup;
+       return task_css(task, subsys_id)->cgroup;
 }
 
-struct cgroup *cgroup_next_sibling(struct cgroup *pos);
+struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
+                                          struct cgroup_subsys_state *parent);
+
+struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
 
 /**
- * cgroup_for_each_child - iterate through children of a cgroup
- * @pos: the cgroup * to use as the loop cursor
- * @cgrp: cgroup whose children to walk
+ * css_for_each_child - iterate through children of a css
+ * @pos: the css * to use as the loop cursor
+ * @parent: css whose children to walk
  *
- * Walk @cgrp's children.  Must be called under rcu_read_lock().  A child
- * cgroup which hasn't finished ->css_online() or already has finished
+ * Walk @parent's children.  Must be called under rcu_read_lock().  A child
+ * css which hasn't finished ->css_online() or already has finished
  * ->css_offline() may show up during traversal and it's each subsystem's
  * responsibility to verify that each @pos is alive.
  *
  * If a subsystem synchronizes against the parent in its ->css_online() and
- * before starting iterating, a cgroup which finished ->css_online() is
+ * before starting iterating, a css which finished ->css_online() is
  * guaranteed to be visible in the future iterations.
  *
  * It is allowed to temporarily drop RCU read lock during iteration.  The
  * caller is responsible for ensuring that @pos remains accessible until
  * the start of the next iteration by, for example, bumping the css refcnt.
  */
-#define cgroup_for_each_child(pos, cgrp)                               \
-       for ((pos) = list_first_or_null_rcu(&(cgrp)->children,          \
-                                           struct cgroup, sibling);    \
-            (pos); (pos) = cgroup_next_sibling((pos)))
+#define css_for_each_child(pos, parent)                                        \
+       for ((pos) = css_next_child(NULL, (parent)); (pos);             \
+            (pos) = css_next_child((pos), (parent)))
+
+struct cgroup_subsys_state *
+css_next_descendant_pre(struct cgroup_subsys_state *pos,
+                       struct cgroup_subsys_state *css);
 
-struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
-                                         struct cgroup *cgroup);
-struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
+struct cgroup_subsys_state *
+css_rightmost_descendant(struct cgroup_subsys_state *pos);
 
 /**
- * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
- * @pos: the cgroup * to use as the loop cursor
- * @cgroup: cgroup whose descendants to walk
+ * css_for_each_descendant_pre - pre-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @root: css whose descendants to walk
  *
- * Walk @cgroup's descendants.  Must be called under rcu_read_lock().  A
- * descendant cgroup which hasn't finished ->css_online() or already has
+ * Walk @root's descendants.  @root is included in the iteration and the
+ * first node to be visited.  Must be called under rcu_read_lock().  A
+ * descendant css which hasn't finished ->css_online() or already has
  * finished ->css_offline() may show up during traversal and it's each
  * subsystem's responsibility to verify that each @pos is alive.
  *
  * If a subsystem synchronizes against the parent in its ->css_online() and
  * before starting iterating, and synchronizes against @pos on each
- * iteration, any descendant cgroup which finished ->css_online() is
+ * iteration, any descendant css which finished ->css_online() is
  * guaranteed to be visible in the future iterations.
  *
  * In other words, the following guarantees that a descendant can't escape
  * state updates of its ancestors.
  *
- * my_online(@cgrp)
+ * my_online(@css)
  * {
- *     Lock @cgrp->parent and @cgrp;
- *     Inherit state from @cgrp->parent;
+ *     Lock @css's parent and @css;
+ *     Inherit state from the parent;
  *     Unlock both.
  * }
  *
- * my_update_state(@cgrp)
+ * my_update_state(@css)
  * {
- *     Lock @cgrp;
- *     Update @cgrp's state;
- *     Unlock @cgrp;
- *
- *     cgroup_for_each_descendant_pre(@pos, @cgrp) {
+ *     css_for_each_descendant_pre(@pos, @css) {
  *             Lock @pos;
- *             Verify @pos is alive and inherit state from @pos->parent;
+ *             if (@pos == @css)
+ *                     Update @css's state;
+ *             else
+ *                     Verify @pos is alive and inherit state from its parent;
  *             Unlock @pos;
  *     }
  * }
@@ -786,8 +819,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
  * visible by walking order and, as long as inheriting operations to the
  * same @pos are atomic to each other, multiple updates racing each other
  * still result in the correct state.  It's guaranateed that at least one
- * inheritance happens for any cgroup after the latest update to its
- * parent.
+ * inheritance happens for any css after the latest update to its parent.
  *
  * If checking parent's state requires locking the parent, each inheriting
  * iteration should lock and unlock both @pos->parent and @pos.
@@ -800,52 +832,45 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
  * caller is responsible for ensuring that @pos remains accessible until
  * the start of the next iteration by, for example, bumping the css refcnt.
  */
-#define cgroup_for_each_descendant_pre(pos, cgroup)                    \
-       for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos);   \
-            pos = cgroup_next_descendant_pre((pos), (cgroup)))
+#define css_for_each_descendant_pre(pos, css)                          \
+       for ((pos) = css_next_descendant_pre(NULL, (css)); (pos);       \
+            (pos) = css_next_descendant_pre((pos), (css)))
 
-struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
-                                          struct cgroup *cgroup);
+struct cgroup_subsys_state *
+css_next_descendant_post(struct cgroup_subsys_state *pos,
+                        struct cgroup_subsys_state *css);
 
 /**
- * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
- * @pos: the cgroup * to use as the loop cursor
- * @cgroup: cgroup whose descendants to walk
+ * css_for_each_descendant_post - post-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @css: css whose descendants to walk
  *
- * Similar to cgroup_for_each_descendant_pre() but performs post-order
- * traversal instead.  Note that the walk visibility guarantee described in
- * pre-order walk doesn't apply the same to post-order walks.
+ * Similar to css_for_each_descendant_pre() but performs post-order
+ * traversal instead.  @root is included in the iteration and the last
+ * node to be visited.  Note that the walk visibility guarantee described
+ * in pre-order walk doesn't apply the same to post-order walks.
  */
-#define cgroup_for_each_descendant_post(pos, cgroup)                   \
-       for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos);  \
-            pos = cgroup_next_descendant_post((pos), (cgroup)))
-
-/* A cgroup_iter should be treated as an opaque object */
-struct cgroup_iter {
-       struct list_head *cset_link;
-       struct list_head *task;
+#define css_for_each_descendant_post(pos, css)                         \
+       for ((pos) = css_next_descendant_post(NULL, (css)); (pos);      \
+            (pos) = css_next_descendant_post((pos), (css)))
+
+/* A css_task_iter should be treated as an opaque object */
+struct css_task_iter {
+       struct cgroup_subsys_state      *origin_css;
+       struct list_head                *cset_link;
+       struct list_head                *task;
 };
 
-/*
- * To iterate across the tasks in a cgroup:
- *
- * 1) call cgroup_iter_start to initialize an iterator
- *
- * 2) call cgroup_iter_next() to retrieve member tasks until it
- *    returns NULL or until you want to end the iteration
- *
- * 3) call cgroup_iter_end() to destroy the iterator.
- *
- * Or, call cgroup_scan_tasks() to iterate through every task in a
- * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
- * the test_task() callback, but not while calling the process_task()
- * callback.
- */
-void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
-struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
-                                       struct cgroup_iter *it);
-void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
-int cgroup_scan_tasks(struct cgroup_scanner *scan);
+void css_task_iter_start(struct cgroup_subsys_state *css,
+                        struct css_task_iter *it);
+struct task_struct *css_task_iter_next(struct css_task_iter *it);
+void css_task_iter_end(struct css_task_iter *it);
+
+int css_scan_tasks(struct cgroup_subsys_state *css,
+                  bool (*test)(struct task_struct *, void *),
+                  void (*process)(struct task_struct *, void *),
+                  void *data, struct ptr_heap *heap);
+
 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
 
index dbbf8aa7731bee4f8bb1f134b1e847b6af00b8cb..423cb82b1756b1931bebf0384ae1abd5a4408842 100644 (file)
@@ -327,6 +327,11 @@ static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
 
 extern int timekeeping_notify(struct clocksource *clock);
 
+extern int timekeeping_chfreq_prep(struct clocksource *clock, cycle_t
+                                  *start_cycle);
+extern void timekeeping_chfreq(unsigned int freq, cycle_t end_cycle,
+                              u64 delta_ns);
+
 extern cycle_t clocksource_mmio_readl_up(struct clocksource *);
 extern cycle_t clocksource_mmio_readl_down(struct clocksource *);
 extern cycle_t clocksource_mmio_readw_up(struct clocksource *);
index cff544f81105ecf83dac7cc0e75ea8ee26954592..d30209b9cef81773b26e62f79b67de402e56ff68 100644 (file)
@@ -60,7 +60,6 @@ Mellon the rights to redistribute these changes without encumbrance.
 
 #if defined(__linux__)
 typedef unsigned long long u_quad_t;
-#else
 #endif
 #include <uapi/linux/coda.h>
 #endif 
index a98f1ca60407d574ee89980bde8029d8f1f73f78..c617580e24a4b073f7470b14b7fa0ad4e138e376 100644 (file)
@@ -10,8 +10,9 @@
  * These are the only things you should do on a core-file: use only these
  * functions to write out all the necessary info.
  */
-extern int dump_write(struct file *file, const void *addr, int nr);
-extern int dump_seek(struct file *file, loff_t off);
+extern bool dump_emit(struct coredump_params *cprm, const void *addr, size_t size);
+extern bool dump_skip(struct coredump_params *cprm, size_t size);
+extern bool dump_align(struct coredump_params *cprm, int align);
 #ifdef CONFIG_COREDUMP
 extern void do_coredump(siginfo_t *siginfo);
 #else
index ab0eade730392265b298833bd9db4579f387d1bb..956c0a16566f828a37773b8a913423921f3a3079 100644 (file)
@@ -172,6 +172,8 @@ extern struct bus_type cpu_subsys;
 #ifdef CONFIG_HOTPLUG_CPU
 /* Stop CPUs going up and down. */
 
+extern void cpu_hotplug_begin(void);
+extern void cpu_hotplug_done(void);
 extern void get_online_cpus(void);
 extern void put_online_cpus(void);
 extern void cpu_hotplug_disable(void);
@@ -197,6 +199,8 @@ static inline void cpu_hotplug_driver_unlock(void)
 
 #else          /* CONFIG_HOTPLUG_CPU */
 
+static inline void cpu_hotplug_begin(void) {}
+static inline void cpu_hotplug_done(void) {}
 #define get_online_cpus()      do { } while (0)
 #define put_online_cpus()      do { } while (0)
 #define cpu_hotplug_disable()  do { } while (0)
index 90d5a15120d592eedeff5de22cd33fe61fe98351..d568f3975eeb66125a28aa8a31ccbb712de56807 100644 (file)
 #ifndef _LINUX_CPUFREQ_H
 #define _LINUX_CPUFREQ_H
 
-#include <asm/cputime.h>
-#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/completion.h>
 #include <linux/kobject.h>
+#include <linux/notifier.h>
 #include <linux/sysfs.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/cpumask.h>
-#include <asm/div64.h>
-
-#define CPUFREQ_NAME_LEN 16
-/* Print length for names. Extra 1 space for accomodating '\n' in prints */
-#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
 
 /*********************************************************************
- *                     CPUFREQ NOTIFIER INTERFACE                    *
+ *                        CPUFREQ INTERFACE                          *
  *********************************************************************/
-
-#define CPUFREQ_TRANSITION_NOTIFIER    (0)
-#define CPUFREQ_POLICY_NOTIFIER                (1)
-
-#ifdef CONFIG_CPU_FREQ
-int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
-int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
-extern void disable_cpufreq(void);
-#else          /* CONFIG_CPU_FREQ */
-static inline int cpufreq_register_notifier(struct notifier_block *nb,
-                                               unsigned int list)
-{
-       return 0;
-}
-static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
-                                               unsigned int list)
-{
-       return 0;
-}
-static inline void disable_cpufreq(void) { }
-#endif         /* CONFIG_CPU_FREQ */
-
-/* if (cpufreq_driver->target) exists, the ->governor decides what frequency
- * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
- * two generic policies are available:
- */
-
-#define CPUFREQ_POLICY_POWERSAVE       (1)
-#define CPUFREQ_POLICY_PERFORMANCE     (2)
-
-/* Frequency values here are CPU kHz so that hardware which doesn't run
- * with some frequencies can complain without having to guess what per
- * cent / per mille means.
+/*
+ * Frequency values here are CPU kHz
+ *
  * Maximum transition latency is in nanoseconds - if it's unknown,
  * CPUFREQ_ETERNAL shall be used.
  */
 
+#define CPUFREQ_ETERNAL                        (-1)
+#define CPUFREQ_NAME_LEN               16
+/* Print length for names. Extra 1 space for accomodating '\n' in prints */
+#define CPUFREQ_NAME_PLEN              (CPUFREQ_NAME_LEN + 1)
+
 struct cpufreq_governor;
 
-/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
-extern struct kobject *cpufreq_global_kobject;
-int cpufreq_get_global_kobject(void);
-void cpufreq_put_global_kobject(void);
-int cpufreq_sysfs_create_file(const struct attribute *attr);
-void cpufreq_sysfs_remove_file(const struct attribute *attr);
+struct cpufreq_freqs {
+       unsigned int cpu;       /* cpu nr */
+       unsigned int old;
+       unsigned int new;
+       u8 flags;               /* flags of cpufreq_driver, see below. */
+};
 
-#define CPUFREQ_ETERNAL                        (-1)
 struct cpufreq_cpuinfo {
        unsigned int            max_freq;
        unsigned int            min_freq;
@@ -117,123 +82,103 @@ struct cpufreq_policy {
 
        struct cpufreq_real_policy      user_policy;
 
+       struct list_head        policy_list;
        struct kobject          kobj;
        struct completion       kobj_unregister;
        int                     transition_ongoing; /* Tracks transition status */
 };
 
-#define CPUFREQ_ADJUST                 (0)
-#define CPUFREQ_INCOMPATIBLE           (1)
-#define CPUFREQ_NOTIFY                 (2)
-#define CPUFREQ_START                  (3)
-#define CPUFREQ_UPDATE_POLICY_CPU      (4)
-
 /* Only for ACPI */
 #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
 #define CPUFREQ_SHARED_TYPE_HW  (1) /* HW does needed coordination */
 #define CPUFREQ_SHARED_TYPE_ALL         (2) /* All dependent CPUs should set freq */
 #define CPUFREQ_SHARED_TYPE_ANY         (3) /* Freq can be set from any dependent CPU*/
 
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
+void cpufreq_cpu_put(struct cpufreq_policy *policy);
+
 static inline bool policy_is_shared(struct cpufreq_policy *policy)
 {
        return cpumask_weight(policy->cpus) > 1;
 }
 
-/******************** cpufreq transition notifiers *******************/
-
-#define CPUFREQ_PRECHANGE      (0)
-#define CPUFREQ_POSTCHANGE     (1)
-#define CPUFREQ_RESUMECHANGE   (8)
-#define CPUFREQ_SUSPENDCHANGE  (9)
+/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
+extern struct kobject *cpufreq_global_kobject;
+int cpufreq_get_global_kobject(void);
+void cpufreq_put_global_kobject(void);
+int cpufreq_sysfs_create_file(const struct attribute *attr);
+void cpufreq_sysfs_remove_file(const struct attribute *attr);
 
-struct cpufreq_freqs {
-       unsigned int cpu;       /* cpu nr */
-       unsigned int old;
-       unsigned int new;
-       u8 flags;               /* flags of cpufreq_driver, see below. */
-};
+#ifdef CONFIG_CPU_FREQ
+unsigned int cpufreq_get(unsigned int cpu);
+unsigned int cpufreq_quick_get(unsigned int cpu);
+unsigned int cpufreq_quick_get_max(unsigned int cpu);
+void disable_cpufreq(void);
 
-/**
- * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
- * safe)
- * @old:   old value
- * @div:   divisor
- * @mult:  multiplier
- *
- *
- * new = old * mult / div
- */
-static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
-               u_int mult)
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+int cpufreq_update_policy(unsigned int cpu);
+bool have_governor_per_policy(void);
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+#else
+static inline unsigned int cpufreq_get(unsigned int cpu)
 {
-#if BITS_PER_LONG == 32
-
-       u64 result = ((u64) old) * ((u64) mult);
-       do_div(result, div);
-       return (unsigned long) result;
-
-#elif BITS_PER_LONG == 64
-
-       unsigned long result = old * ((u64) mult);
-       result /= div;
-       return result;
-
+       return 0;
+}
+static inline unsigned int cpufreq_quick_get(unsigned int cpu)
+{
+       return 0;
+}
+static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+       return 0;
+}
+static inline void disable_cpufreq(void) { }
 #endif
-};
 
 /*********************************************************************
- *                          CPUFREQ GOVERNORS                        *
+ *                      CPUFREQ DRIVER INTERFACE                     *
  *********************************************************************/
 
-#define CPUFREQ_GOV_START      1
-#define CPUFREQ_GOV_STOP       2
-#define CPUFREQ_GOV_LIMITS     3
-#define CPUFREQ_GOV_POLICY_INIT        4
-#define CPUFREQ_GOV_POLICY_EXIT        5
+#define CPUFREQ_RELATION_L 0  /* lowest frequency at or above target */
+#define CPUFREQ_RELATION_H 1  /* highest frequency below or at target */
 
-struct cpufreq_governor {
-       char    name[CPUFREQ_NAME_LEN];
-       int     initialized;
-       int     (*governor)     (struct cpufreq_policy *policy,
-                                unsigned int event);
-       ssize_t (*show_setspeed)        (struct cpufreq_policy *policy,
-                                        char *buf);
-       int     (*store_setspeed)       (struct cpufreq_policy *policy,
-                                        unsigned int freq);
-       unsigned int max_transition_latency; /* HW must be able to switch to
-                       next freq faster than this value in nano secs or we
-                       will fallback to performance governor */
-       struct list_head        governor_list;
-       struct module           *owner;
+struct freq_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct cpufreq_policy *, char *);
+       ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
 };
 
-/*
- * Pass a target to the cpufreq driver.
- */
-extern int cpufreq_driver_target(struct cpufreq_policy *policy,
-                                unsigned int target_freq,
-                                unsigned int relation);
-extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
-                                  unsigned int target_freq,
-                                  unsigned int relation);
+#define cpufreq_freq_attr_ro(_name)            \
+static struct freq_attr _name =                        \
+__ATTR(_name, 0444, show_##_name, NULL)
 
-extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
-                                  unsigned int cpu);
+#define cpufreq_freq_attr_ro_perm(_name, _perm)        \
+static struct freq_attr _name =                        \
+__ATTR(_name, _perm, show_##_name, NULL)
 
-int cpufreq_register_governor(struct cpufreq_governor *governor);
-void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+#define cpufreq_freq_attr_rw(_name)            \
+static struct freq_attr _name =                        \
+__ATTR(_name, 0644, show_##_name, store_##_name)
 
-/*********************************************************************
- *                      CPUFREQ DRIVER INTERFACE                     *
- *********************************************************************/
+struct global_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct kobject *kobj,
+                       struct attribute *attr, char *buf);
+       ssize_t (*store)(struct kobject *a, struct attribute *b,
+                        const char *c, size_t count);
+};
 
-#define CPUFREQ_RELATION_L 0  /* lowest frequency at or above target */
-#define CPUFREQ_RELATION_H 1  /* highest frequency below or at target */
+#define define_one_global_ro(_name)            \
+static struct global_attr _name =              \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_global_rw(_name)            \
+static struct global_attr _name =              \
+__ATTR(_name, 0644, show_##_name, store_##_name)
 
-struct freq_attr;
 
 struct cpufreq_driver {
-       struct module           *owner;
        char                    name[CPUFREQ_NAME_LEN];
        u8                      flags;
        /*
@@ -258,8 +203,6 @@ struct cpufreq_driver {
        unsigned int    (*get)  (unsigned int cpu);
 
        /* optional */
-       unsigned int (*getavg)  (struct cpufreq_policy *policy,
-                                unsigned int cpu);
        int     (*bios_limit)   (int cpu, unsigned int *limit);
 
        int     (*exit)         (struct cpufreq_policy *policy);
@@ -269,7 +212,6 @@ struct cpufreq_driver {
 };
 
 /* flags */
-
 #define CPUFREQ_STICKY         0x01    /* the driver isn't removed even if
                                         * all ->init() calls failed */
 #define CPUFREQ_CONST_LOOPS    0x02    /* loops_per_jiffy or other kernel
@@ -281,8 +223,7 @@ struct cpufreq_driver {
 int cpufreq_register_driver(struct cpufreq_driver *driver_data);
 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
 
-void cpufreq_notify_transition(struct cpufreq_policy *policy,
-               struct cpufreq_freqs *freqs, unsigned int state);
+const char *cpufreq_get_current_driver(void);
 
 static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
                unsigned int min, unsigned int max)
@@ -300,86 +241,117 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
        return;
 }
 
-struct freq_attr {
-       struct attribute attr;
-       ssize_t (*show)(struct cpufreq_policy *, char *);
-       ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
-};
-
-#define cpufreq_freq_attr_ro(_name)            \
-static struct freq_attr _name =                        \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-#define cpufreq_freq_attr_ro_perm(_name, _perm)        \
-static struct freq_attr _name =                        \
-__ATTR(_name, _perm, show_##_name, NULL)
-
-#define cpufreq_freq_attr_rw(_name)            \
-static struct freq_attr _name =                        \
-__ATTR(_name, 0644, show_##_name, store_##_name)
+/*********************************************************************
+ *                     CPUFREQ NOTIFIER INTERFACE                    *
+ *********************************************************************/
 
-struct global_attr {
-       struct attribute attr;
-       ssize_t (*show)(struct kobject *kobj,
-                       struct attribute *attr, char *buf);
-       ssize_t (*store)(struct kobject *a, struct attribute *b,
-                        const char *c, size_t count);
-};
+#define CPUFREQ_TRANSITION_NOTIFIER    (0)
+#define CPUFREQ_POLICY_NOTIFIER                (1)
 
-#define define_one_global_ro(_name)            \
-static struct global_attr _name =              \
-__ATTR(_name, 0444, show_##_name, NULL)
+/* Transition notifiers */
+#define CPUFREQ_PRECHANGE              (0)
+#define CPUFREQ_POSTCHANGE             (1)
+#define CPUFREQ_RESUMECHANGE           (8)
+#define CPUFREQ_SUSPENDCHANGE          (9)
 
-#define define_one_global_rw(_name)            \
-static struct global_attr _name =              \
-__ATTR(_name, 0644, show_##_name, store_##_name)
+/* Policy Notifiers  */
+#define CPUFREQ_ADJUST                 (0)
+#define CPUFREQ_INCOMPATIBLE           (1)
+#define CPUFREQ_NOTIFY                 (2)
+#define CPUFREQ_START                  (3)
+#define CPUFREQ_UPDATE_POLICY_CPU      (4)
 
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
-void cpufreq_cpu_put(struct cpufreq_policy *data);
-const char *cpufreq_get_current_driver(void);
+#ifdef CONFIG_CPU_FREQ
+int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
+int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
 
-/*********************************************************************
- *                        CPUFREQ 2.6. INTERFACE                     *
- *********************************************************************/
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_update_policy(unsigned int cpu);
-bool have_governor_per_policy(void);
-struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+void cpufreq_notify_transition(struct cpufreq_policy *policy,
+               struct cpufreq_freqs *freqs, unsigned int state);
 
-#ifdef CONFIG_CPU_FREQ
-/*
- * query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it
- */
-unsigned int cpufreq_get(unsigned int cpu);
-#else
-static inline unsigned int cpufreq_get(unsigned int cpu)
+#else /* CONFIG_CPU_FREQ */
+static inline int cpufreq_register_notifier(struct notifier_block *nb,
+                                               unsigned int list)
 {
        return 0;
 }
-#endif
-
-/*
- * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
- */
-#ifdef CONFIG_CPU_FREQ
-unsigned int cpufreq_quick_get(unsigned int cpu);
-unsigned int cpufreq_quick_get_max(unsigned int cpu);
-#else
-static inline unsigned int cpufreq_quick_get(unsigned int cpu)
+static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
+                                               unsigned int list)
 {
        return 0;
 }
-static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
+#endif /* !CONFIG_CPU_FREQ */
+
+/**
+ * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
+ * safe)
+ * @old:   old value
+ * @div:   divisor
+ * @mult:  multiplier
+ *
+ *
+ * new = old * mult / div
+ */
+static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
+               u_int mult)
 {
-       return 0;
-}
+#if BITS_PER_LONG == 32
+       u64 result = ((u64) old) * ((u64) mult);
+       do_div(result, div);
+       return (unsigned long) result;
+
+#elif BITS_PER_LONG == 64
+       unsigned long result = old * ((u64) mult);
+       result /= div;
+       return result;
 #endif
+}
 
 /*********************************************************************
- *                       CPUFREQ DEFAULT GOVERNOR                    *
+ *                          CPUFREQ GOVERNORS                        *
  *********************************************************************/
 
+/*
+ * If (cpufreq_driver->target) exists, the ->governor decides what frequency
+ * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
+ * two generic policies are available:
+ */
+#define CPUFREQ_POLICY_POWERSAVE       (1)
+#define CPUFREQ_POLICY_PERFORMANCE     (2)
+
+/* Governor Events */
+#define CPUFREQ_GOV_START      1
+#define CPUFREQ_GOV_STOP       2
+#define CPUFREQ_GOV_LIMITS     3
+#define CPUFREQ_GOV_POLICY_INIT        4
+#define CPUFREQ_GOV_POLICY_EXIT        5
+
+struct cpufreq_governor {
+       char    name[CPUFREQ_NAME_LEN];
+       int     initialized;
+       int     (*governor)     (struct cpufreq_policy *policy,
+                                unsigned int event);
+       ssize_t (*show_setspeed)        (struct cpufreq_policy *policy,
+                                        char *buf);
+       int     (*store_setspeed)       (struct cpufreq_policy *policy,
+                                        unsigned int freq);
+       unsigned int max_transition_latency; /* HW must be able to switch to
+                       next freq faster than this value in nano secs or we
+                       will fallback to performance governor */
+       struct list_head        governor_list;
+       struct module           *owner;
+};
+
+/* Pass a target to the cpufreq driver */
+int cpufreq_driver_target(struct cpufreq_policy *policy,
+                                unsigned int target_freq,
+                                unsigned int relation);
+int __cpufreq_driver_target(struct cpufreq_policy *policy,
+                                  unsigned int target_freq,
+                                  unsigned int relation);
+int cpufreq_register_governor(struct cpufreq_governor *governor);
+void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+
+/* CPUFREQ DEFAULT GOVERNOR */
 /*
  * Performance governor is fallback governor if any other gov failed to auto
  * load due latency restrictions
@@ -428,18 +400,16 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
                                   unsigned int relation,
                                   unsigned int *index);
 
-/* the following 3 funtions are for cpufreq core use only */
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
+
+/* the following funtion is for cpufreq core use only */
 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
 
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
-
 void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
                                      unsigned int cpu);
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
-
 void cpufreq_frequency_table_put_attr(unsigned int cpu);
 
-ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
-
 #endif /* _LINUX_CPUFREQ_H */
index 0bc4b74668e95f561e6cad07d038f6822f86faa0..781addc66f03135e1fe033bd6aeb0476c0321fd2 100644 (file)
@@ -13,8 +13,6 @@
 
 #include <linux/percpu.h>
 #include <linux/list.h>
-#include <linux/kobject.h>
-#include <linux/completion.h>
 #include <linux/hrtimer.h>
 
 #define CPUIDLE_STATE_MAX      10
@@ -61,6 +59,10 @@ struct cpuidle_state {
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
+struct cpuidle_device_kobj;
+struct cpuidle_state_kobj;
+struct cpuidle_driver_kobj;
+
 struct cpuidle_device {
        unsigned int            registered:1;
        unsigned int            enabled:1;
@@ -71,9 +73,8 @@ struct cpuidle_device {
        struct cpuidle_state_usage      states_usage[CPUIDLE_STATE_MAX];
        struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
        struct cpuidle_driver_kobj *kobj_driver;
+       struct cpuidle_device_kobj *kobj_dev;
        struct list_head        device_list;
-       struct kobject          kobj;
-       struct completion       kobj_unregister;
 
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
        int                     safe_state_index;
index e151d4c9298d858eca5a3ccd5594da1aebe20596..653073de09e379ef1c8b04c1a96d0ef2c948f5a3 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/math64.h>
 #include <linux/ratelimit.h>
 
 struct dm_dev;
@@ -550,6 +551,14 @@ extern struct ratelimit_state dm_ratelimit_state;
 #define DM_MAPIO_REMAPPED      1
 #define DM_MAPIO_REQUEUE       DM_ENDIO_REQUEUE
 
+#define dm_sector_div64(x, y)( \
+{ \
+       u64 _res; \
+       (x) = div64_u64_rem(x, y, &_res); \
+       _res; \
+} \
+)
+
 /*
  * Ceiling(n / sz)
  */
index 96e87693d93349c92563585f1abf59efd8d4c029..841925fbfe8a737f4953deb85cc4f5841be2302b 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef __DM9000_PLATFORM_DATA
 #define __DM9000_PLATFORM_DATA __FILE__
 
+#include <linux/if_ether.h>
+
 /* IO control flags */
 
 #define DM9000_PLATF_8BITONLY  (0x0001)
@@ -27,7 +29,7 @@
 
 struct dm9000_plat_data {
        unsigned int    flags;
-       unsigned char   dev_addr[6];
+       unsigned char   dev_addr[ETH_ALEN];
 
        /* allow replacement IO routines */
 
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
new file mode 100644 (file)
index 0000000..2dc9b2b
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef _MMP_PDMA_H_
+#define _MMP_PDMA_H_
+
+struct dma_chan;
+
+#ifdef CONFIG_MMP_PDMA
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
+#else
+static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+       return false;
+}
+#endif
+
+#endif /* _MMP_PDMA_H_ */
index cb286b1acdb64f06131a4de019d5d5603c864b50..13ac4f55322789c3aedb6f88d9be2ccace6f90a4 100644 (file)
@@ -370,6 +370,33 @@ struct dma_slave_config {
        unsigned int slave_id;
 };
 
+/* struct dma_slave_caps - expose capabilities of a slave channel only
+ *
+ * @src_addr_widths: bit mask of src addr widths the channel supports
+ * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+ * @directions: bit mask of slave direction the channel supported
+ *     since the enum dma_transfer_direction is not defined as bits for each
+ *     type of direction, the dma controller should fill (1 << <TYPE>) and same
+ *     should be checked by controller as well
+ * @cmd_pause: true, if pause and thereby resume is supported
+ * @cmd_terminate: true, if terminate cmd is supported
+ *
+ * @max_sg_nr: maximum number of SG segments supported
+ *     0 for no maximum
+ * @max_sg_len: maximum length of a SG segment supported
+ *     0 for no maximum
+ */
+struct dma_slave_caps {
+       u32 src_addr_widths;
+       u32 dstn_addr_widths;
+       u32 directions;
+       bool cmd_pause;
+       bool cmd_terminate;
+
+       u32 max_sg_nr;
+       u32 max_sg_len;
+};
+
 static inline const char *dma_chan_name(struct dma_chan *chan)
 {
        return dev_name(&chan->dev->device);
@@ -532,6 +559,7 @@ struct dma_tx_state {
  *     struct with auxiliary transfer status information, otherwise the call
  *     will just return a simple status code
  * @device_issue_pending: push pending transactions to hardware
+ * @device_slave_caps: return the slave channel capabilities
  */
 struct dma_device {
 
@@ -597,6 +625,7 @@ struct dma_device {
                                            dma_cookie_t cookie,
                                            struct dma_tx_state *txstate);
        void (*device_issue_pending)(struct dma_chan *chan);
+       int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
 };
 
 static inline int dmaengine_device_control(struct dma_chan *chan,
@@ -670,6 +699,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
        return chan->device->device_prep_interleaved_dma(chan, xt, flags);
 }
 
+static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+{
+       if (!chan || !caps)
+               return -EINVAL;
+
+       /* check if the channel supports slave transactions */
+       if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
+               return -ENXIO;
+
+       if (chan->device->device_slave_caps)
+               return chan->device->device_slave_caps(chan, caps);
+
+       return -ENXIO;
+}
+
 static inline int dmaengine_terminate_all(struct dma_chan *chan)
 {
        return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -995,6 +1039,7 @@ int dma_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
 struct dma_chan *net_dma_find_channel(void);
 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
 #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
index 40a3c0e01b2be880a428d9662ee490d9a070e277..87fdf7a9377fd9d7f65992c3a533bc30231392c6 100644 (file)
@@ -39,13 +39,14 @@ extern Elf64_Dyn _DYNAMIC [];
 
 /* Optional callbacks to write extra ELF notes. */
 struct file;
+struct coredump_params;
 
 #ifndef ARCH_HAVE_EXTRA_ELF_NOTES
 static inline int elf_coredump_extra_notes_size(void) { return 0; }
-static inline int elf_coredump_extra_notes_write(struct file *file,
-                       loff_t *foffset) { return 0; }
+static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm)
+                       { return 0; }
 #else
 extern int elf_coredump_extra_notes_size(void);
-extern int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset);
+extern int elf_coredump_extra_notes_write(struct coredump_params *cprm);
 #endif
 #endif /* _LINUX_ELF_H */
index cdd3d13efce7c24da5e65fc3d651b1564c3a20e0..fea1a62557de52ea3217da9ba79487f79a1fa1bd 100644 (file)
@@ -63,10 +63,9 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
  */
 extern Elf_Half elf_core_extra_phdrs(void);
 extern int
-elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-                          unsigned long limit);
+elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
 extern int
-elf_core_write_extra_data(struct file *file, size_t *size, unsigned long limit);
+elf_core_write_extra_data(struct coredump_params *cprm);
 extern size_t elf_core_extra_data_size(void);
 
 #endif /* _LINUX_ELFCORE_H */
index 981874773e85adfe81c2d24d59daa79aea157114..688c68f25531154fc611ddce2c38ee899ddb6c27 100644 (file)
@@ -181,8 +181,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define READ                   0
 #define WRITE                  RW_MASK
 #define READA                  RWA_MASK
-#define KERNEL_READ            (READ|REQ_KERNEL)
-#define KERNEL_WRITE           (WRITE|REQ_KERNEL)
 
 #define READ_SYNC              (READ | REQ_SYNC)
 #define WRITE_SYNC             (WRITE | REQ_SYNC | REQ_NOIDLE)
@@ -290,25 +288,108 @@ struct address_space;
 struct writeback_control;
 
 struct iov_iter {
-       const struct iovec *iov;
+       struct iov_iter_ops *ops;
+       unsigned long data;
        unsigned long nr_segs;
        size_t iov_offset;
        size_t count;
 };
 
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
-void iov_iter_advance(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-size_t iov_iter_single_seg_count(const struct iov_iter *i);
+struct iov_iter_ops {
+       size_t (*ii_copy_to_user_atomic)(struct page *, struct iov_iter *,
+                                        unsigned long, size_t);
+       size_t (*ii_copy_to_user)(struct page *, struct iov_iter *,
+                                 unsigned long, size_t, int);
+       size_t (*ii_copy_from_user_atomic)(struct page *, struct iov_iter *,
+                                          unsigned long, size_t);
+       size_t (*ii_copy_from_user)(struct page *, struct iov_iter *,
+                                         unsigned long, size_t);
+       void (*ii_advance)(struct iov_iter *, size_t);
+       int (*ii_fault_in_readable)(struct iov_iter *, size_t);
+       size_t (*ii_single_seg_count)(const struct iov_iter *);
+       int (*ii_shorten)(struct iov_iter *, size_t);
+};
+
+static inline size_t iov_iter_copy_to_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user_atomic(page, i, offset, bytes);
+}
+static inline size_t __iov_iter_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user(page, i, offset, bytes, 0);
+}
+static inline size_t iov_iter_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user(page, i, offset, bytes, 1);
+}
+static inline size_t iov_iter_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_from_user_atomic(page, i, offset, bytes);
+}
+static inline size_t iov_iter_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_from_user(page, i, offset, bytes);
+}
+static inline void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+       return i->ops->ii_advance(i, bytes);
+}
+static inline int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       return i->ops->ii_fault_in_readable(i, bytes);
+}
+static inline size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+       return i->ops->ii_single_seg_count(i);
+}
+static inline int iov_iter_shorten(struct iov_iter *i, size_t count)
+{
+       return i->ops->ii_shorten(i, count);
+}
+
+#ifdef CONFIG_BLOCK
+extern struct iov_iter_ops ii_bvec_ops;
+
+struct bio_vec;
+static inline void iov_iter_init_bvec(struct iov_iter *i,
+                                     struct bio_vec *bvec,
+                                     unsigned long nr_segs,
+                                     size_t count, size_t written)
+{
+       i->ops = &ii_bvec_ops;
+       i->data = (unsigned long)bvec;
+       i->nr_segs = nr_segs;
+       i->iov_offset = 0;
+       i->count = count + written;
+
+       iov_iter_advance(i, written);
+}
+
+static inline int iov_iter_has_bvec(struct iov_iter *i)
+{
+       return i->ops == &ii_bvec_ops;
+}
+
+static inline struct bio_vec *iov_iter_bvec(struct iov_iter *i)
+{
+       BUG_ON(!iov_iter_has_bvec(i));
+       return (struct bio_vec *)i->data;
+}
+#endif
+
+extern struct iov_iter_ops ii_iovec_ops;
 
 static inline void iov_iter_init(struct iov_iter *i,
                        const struct iovec *iov, unsigned long nr_segs,
                        size_t count, size_t written)
 {
-       i->iov = iov;
+       i->ops = &ii_iovec_ops;
+       i->data = (unsigned long)iov;
        i->nr_segs = nr_segs;
        i->iov_offset = 0;
        i->count = count + written;
@@ -316,6 +397,17 @@ static inline void iov_iter_init(struct iov_iter *i,
        iov_iter_advance(i, written);
 }
 
+static inline int iov_iter_has_iovec(struct iov_iter *i)
+{
+       return i->ops == &ii_iovec_ops;
+}
+
+static inline struct iovec *iov_iter_iovec(struct iov_iter *i)
+{
+       BUG_ON(!iov_iter_has_iovec(i));
+       return (struct iovec *)i->data;
+}
+
 static inline size_t iov_iter_count(struct iov_iter *i)
 {
        return i->count;
@@ -368,8 +460,8 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, gfp_t);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        int (*get_xip_mem)(struct address_space *, pgoff_t, int,
                                                void **, unsigned long *);
        /*
@@ -1531,7 +1623,9 @@ struct file_operations {
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1556,6 +1650,18 @@ struct file_operations {
        int (*show_fdinfo)(struct seq_file *m, struct file *f);
 };
 
+static inline int file_readable(struct file *filp)
+{
+       return filp && (filp->f_op->read || filp->f_op->aio_read ||
+                       filp->f_op->read_iter);
+}
+
+static inline int file_writable(struct file *filp)
+{
+       return filp && (filp->f_op->write || filp->f_op->aio_write ||
+                       filp->f_op->write_iter);
+}
+
 struct inode_operations {
        struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
        void * (*follow_link) (struct dentry *, struct nameidata *);
@@ -2393,25 +2499,36 @@ extern int sb_min_blocksize(struct super_block *, int);
 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
-               unsigned long size, pgoff_t pgoff);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
+                               unsigned long size, pgoff_t pgoff);
+extern int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
+                               unsigned long offset, unsigned long size);
 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
 extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *,
+               loff_t);
 extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
                loff_t *);
+extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t *);
 extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t);
 extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
                unsigned long *, loff_t, loff_t *, size_t, size_t);
+extern ssize_t generic_file_direct_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t, loff_t *, size_t);
 extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
                unsigned long, loff_t, loff_t *, size_t, ssize_t);
+extern ssize_t generic_file_buffered_write_iter(struct kiocb *,
+               struct iov_iter *, loff_t, loff_t *, size_t, ssize_t);
 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
 extern int generic_segment_checks(const struct iovec *iov,
                unsigned long *nr_segs, size_t *count, int access_flags);
 
 /* fs/block_dev.c */
-extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos);
+extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                loff_t pos);
 extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
                        int datasync);
 extern void block_sync_page(struct page *page);
@@ -2468,16 +2585,16 @@ enum {
 void dio_end_io(struct bio *bio, int error);
 
 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset,
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags);
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags);
 
 static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
-               struct inode *inode, const struct iovec *iov, loff_t offset,
-               unsigned long nr_segs, get_block_t get_block)
+               struct inode *inode, struct iov_iter *iter, loff_t offset,
+               get_block_t get_block)
 {
-       return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-                                   offset, nr_segs, get_block, NULL, NULL,
+       return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+                                   offset, get_block, NULL, NULL,
                                    DIO_LOCKING | DIO_SKIP_HOLES);
 }
 #endif
@@ -2503,6 +2620,7 @@ extern void generic_fillattr(struct inode *, struct kstat *);
 extern int vfs_getattr(struct path *, struct kstat *);
 void __inode_add_bytes(struct inode *inode, loff_t bytes);
 void inode_add_bytes(struct inode *inode, loff_t bytes);
+void __inode_sub_bytes(struct inode *inode, loff_t bytes);
 void inode_sub_bytes(struct inode *inode, loff_t bytes);
 loff_t inode_get_bytes(struct inode *inode);
 void inode_set_bytes(struct inode *inode, loff_t bytes);
index 51b793466ff3adcadb4e6bcc0d372494dbf7440c..343d82a5446877c7f641ed27e01fd8927cc48036 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/string.h>
 #include <linux/of_mdio.h>
+#include <linux/if_ether.h>
 #include <asm/types.h>
 
 #define FS_ENET_NAME   "fs_enet"
@@ -135,7 +136,7 @@ struct fs_platform_info {
        const struct fs_mii_bus_info *bus_info;
 
        int rx_ring, tx_ring;   /* number of buffers on rx     */
-       __u8 macaddr[6];        /* mac address                 */
+       __u8 macaddr[ETH_ALEN]; /* mac address                 */
        int rx_copybreak;       /* limit we copy small frames  */
        int use_napi;           /* use NAPI                    */
        int napi_weight;        /* NAPI weight                 */
diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h
deleted file mode 100644 (file)
index 55d8702..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MACH_MXS_DMA_H__
-#define __MACH_MXS_DMA_H__
-
-#include <linux/dmaengine.h>
-
-struct mxs_dma_data {
-       int chan_irq;
-};
-
-extern int mxs_dma_is_apbh(struct dma_chan *chan);
-extern int mxs_dma_is_apbx(struct dma_chan *chan);
-#endif /* __MACH_MXS_DMA_H__ */
index 3b589440ecfe8c6919a52acfed5b893544747ad2..bc6743e76e378796474cb3cb784e062d07b904b0 100644 (file)
@@ -23,6 +23,15 @@ enum hdmi_infoframe_type {
 #define HDMI_SPD_INFOFRAME_SIZE    25
 #define HDMI_AUDIO_INFOFRAME_SIZE  10
 
+#define HDMI_INFOFRAME_SIZE(type)      \
+       (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
+
+struct hdmi_any_infoframe {
+       enum hdmi_infoframe_type type;
+       unsigned char version;
+       unsigned char length;
+};
+
 enum hdmi_colorspace {
        HDMI_COLORSPACE_RGB,
        HDMI_COLORSPACE_YUV422,
@@ -228,4 +237,15 @@ struct hdmi_vendor_infoframe {
 ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
                                   void *buffer, size_t size);
 
+union hdmi_infoframe {
+       struct hdmi_any_infoframe any;
+       struct hdmi_avi_infoframe avi;
+       struct hdmi_spd_infoframe spd;
+       struct hdmi_vendor_infoframe vendor;
+       struct hdmi_audio_infoframe audio;
+};
+
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
+
 #endif /* _DRM_HDMI_H */
index 0c48991b0402d0d93110b8a4fa9cd42d615279c2..5a4e789b9e978dcbb7065bdd167dc7ab64f2c38a 100644 (file)
@@ -456,6 +456,7 @@ struct hid_device {                                                 /* device report descriptor */
        enum hid_type type;                                             /* device type (mouse, kbd, ...) */
        unsigned country;                                               /* HID country */
        struct hid_report_enum report_enum[HID_REPORT_TYPES];
+       struct work_struct led_work;                                    /* delayed LED worker */
 
        struct semaphore driver_lock;                                   /* protects the current driver, except during input */
        struct semaphore driver_input_lock;                             /* protects the current driver */
@@ -744,6 +745,7 @@ struct hid_field *hidinput_get_led_field(struct hid_device *hid);
 unsigned int hidinput_count_leds(struct hid_device *hid);
 __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
 void hid_output_report(struct hid_report *report, __u8 *data);
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
 struct hid_device *hid_allocate_device(void);
 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
@@ -989,7 +991,6 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
 u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
 int usbhid_quirks_init(char **quirks_param);
 void usbhid_quirks_exit(void);
-void usbhid_set_leds(struct hid_device *hid);
 
 #ifdef CONFIG_HID_PID
 int hid_pidff_init(struct hid_device *hid);
index e988fa935b3c4ad22e5a6e47e8f4ccea0f0e1c3d..b3c4b8dac1ceb70d2a9d434314f9ad2a5d957183 100644 (file)
@@ -447,11 +447,13 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
 static inline struct i2c_adapter *
 i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
 {
+#if IS_ENABLED(I2C_MUX)
        struct device *parent = adapter->dev.parent;
 
        if (parent != NULL && parent->type == &i2c_adapter_type)
                return to_i2c_adapter(parent);
        else
+#endif
                return NULL;
 }
 
index 60e411d764d4d67bf69d52936baf8f844bc097e2..7aa901d920585949b2ecf6c546d65cb004daceea 100644 (file)
@@ -19,7 +19,8 @@
  * @hid_descriptor_address: i2c register where the HID descriptor is stored.
  *
  * Note that it is the responsibility of the platform driver (or the acpi 5.0
- * driver) to setup the irq related to the gpio in the struct i2c_board_info.
+ * driver, or the flattened device tree) to setup the irq related to the gpio in
+ * the struct i2c_board_info.
  * The platform driver should also setup the gpio according to the device:
  *
  * A typical example is the following:
index 1a9f65e6ec0f043a4e45ff58c0220db5194e737d..53aab243cbd83437e60caef39ef39502bba9f5ce 100644 (file)
@@ -67,6 +67,9 @@ struct i2c_pxa_platform_data {
        unsigned int            class;
        unsigned int            use_pio :1;
        unsigned int            fast_mode :1;
+       unsigned int            high_mode:1;
+       unsigned char           master_code;
+       unsigned long           rate;
 };
 
 extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
index b0dc87a2a376e2583d2d28363a0cdd13f859536c..a5b598a79becb99eb3554886cbec7708ee426309 100644 (file)
@@ -16,6 +16,7 @@
 #define LINUX_IEEE80211_H
 
 #include <linux/types.h>
+#include <linux/if_ether.h>
 #include <asm/byteorder.h>
 
 /*
@@ -209,28 +210,28 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
 struct ieee80211_hdr {
        __le16 frame_control;
        __le16 duration_id;
-       u8 addr1[6];
-       u8 addr2[6];
-       u8 addr3[6];
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
        __le16 seq_ctrl;
-       u8 addr4[6];
+       u8 addr4[ETH_ALEN];
 } __packed __aligned(2);
 
 struct ieee80211_hdr_3addr {
        __le16 frame_control;
        __le16 duration_id;
-       u8 addr1[6];
-       u8 addr2[6];
-       u8 addr3[6];
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
        __le16 seq_ctrl;
 } __packed __aligned(2);
 
 struct ieee80211_qos_hdr {
        __le16 frame_control;
        __le16 duration_id;
-       u8 addr1[6];
-       u8 addr2[6];
-       u8 addr3[6];
+       u8 addr1[ETH_ALEN];
+       u8 addr2[ETH_ALEN];
+       u8 addr3[ETH_ALEN];
        __le16 seq_ctrl;
        __le16 qos_ctrl;
 } __packed __aligned(2);
@@ -608,8 +609,8 @@ struct ieee80211s_hdr {
        u8 flags;
        u8 ttl;
        __le32 seqnum;
-       u8 eaddr1[6];
-       u8 eaddr2[6];
+       u8 eaddr1[ETH_ALEN];
+       u8 eaddr2[ETH_ALEN];
 } __packed __aligned(2);
 
 /* Mesh flags */
@@ -758,7 +759,7 @@ struct ieee80211_rann_ie {
        u8 rann_flags;
        u8 rann_hopcount;
        u8 rann_ttl;
-       u8 rann_addr[6];
+       u8 rann_addr[ETH_ALEN];
        __le32 rann_seq;
        __le32 rann_interval;
        __le32 rann_metric;
@@ -802,9 +803,9 @@ enum ieee80211_vht_opmode_bits {
 struct ieee80211_mgmt {
        __le16 frame_control;
        __le16 duration;
-       u8 da[6];
-       u8 sa[6];
-       u8 bssid[6];
+       u8 da[ETH_ALEN];
+       u8 sa[ETH_ALEN];
+       u8 bssid[ETH_ALEN];
        __le16 seq_ctrl;
        union {
                struct {
@@ -833,7 +834,7 @@ struct ieee80211_mgmt {
                struct {
                        __le16 capab_info;
                        __le16 listen_interval;
-                       u8 current_ap[6];
+                       u8 current_ap[ETH_ALEN];
                        /* followed by SSID and Supported rates */
                        u8 variable[0];
                } __packed reassoc_req;
@@ -966,21 +967,21 @@ struct ieee80211_vendor_ie {
 struct ieee80211_rts {
        __le16 frame_control;
        __le16 duration;
-       u8 ra[6];
-       u8 ta[6];
+       u8 ra[ETH_ALEN];
+       u8 ta[ETH_ALEN];
 } __packed __aligned(2);
 
 struct ieee80211_cts {
        __le16 frame_control;
        __le16 duration;
-       u8 ra[6];
+       u8 ra[ETH_ALEN];
 } __packed __aligned(2);
 
 struct ieee80211_pspoll {
        __le16 frame_control;
        __le16 aid;
-       u8 bssid[6];
-       u8 ta[6];
+       u8 bssid[ETH_ALEN];
+       u8 ta[ETH_ALEN];
 } __packed __aligned(2);
 
 /* TDLS */
@@ -989,14 +990,14 @@ struct ieee80211_pspoll {
 struct ieee80211_tdls_lnkie {
        u8 ie_type; /* Link Identifier IE */
        u8 ie_len;
-       u8 bssid[6];
-       u8 init_sta[6];
-       u8 resp_sta[6];
+       u8 bssid[ETH_ALEN];
+       u8 init_sta[ETH_ALEN];
+       u8 resp_sta[ETH_ALEN];
 } __packed;
 
 struct ieee80211_tdls_data {
-       u8 da[6];
-       u8 sa[6];
+       u8 da[ETH_ALEN];
+       u8 sa[ETH_ALEN];
        __be16 ether_type;
        u8 payload_type;
        u8 category;
@@ -1090,8 +1091,8 @@ struct ieee80211_p2p_noa_attr {
 struct ieee80211_bar {
        __le16 frame_control;
        __le16 duration;
-       __u8 ra[6];
-       __u8 ta[6];
+       __u8 ra[ETH_ALEN];
+       __u8 ta[ETH_ALEN];
        __le16 control;
        __le16 start_seq_num;
 } __packed;
@@ -1709,6 +1710,10 @@ enum ieee80211_eid {
        WLAN_EID_OPMODE_NOTIF = 199,
        WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
        WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
+       WLAN_EID_EXTENDED_BSS_LOAD = 193,
+       WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+       WLAN_EID_AID = 197,
+       WLAN_EID_QUIET_CHANNEL = 198,
 
        /* 802.11ad */
        WLAN_EID_NON_TX_BSSID_CAP =  83,
@@ -1860,6 +1865,11 @@ enum ieee80211_tdls_actioncode {
        WLAN_TDLS_DISCOVERY_REQUEST = 10,
 };
 
+/* Interworking capabilities are set in 7th bit of 4th byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA4_INTERWORKING_ENABLED    BIT(7)
+
 /*
  * TDLS capabililites to be enabled in the 5th byte of the
  * @WLAN_EID_EXT_CAPABILITY information element
@@ -2279,4 +2289,8 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
        return !!(tim->virtual_map[index] & mask);
 }
 
+/* convert time units */
+#define TU_TO_JIFFIES(x)       (usecs_to_jiffies((x) * 1024))
+#define TU_TO_EXP_TIME(x)      (jiffies + TU_TO_JIFFIES(x))
+
 #endif /* LINUX_IEEE80211_H */
index f6156f91eb1cb0fb06597e83b95a8bf74664cee6..a899dc24be152ab29525ee4cdcc4e8f2a7a6636f 100644 (file)
@@ -10,9 +10,9 @@
 #ifndef _LINUX_IF_TEAM_H_
 #define _LINUX_IF_TEAM_H_
 
-
 #include <linux/netpoll.h>
 #include <net/sch_generic.h>
+#include <linux/types.h>
 #include <uapi/linux/if_team.h>
 
 struct team_pcpu_stats {
@@ -194,6 +194,18 @@ struct team {
        bool user_carrier_enabled;
        bool queue_override_enabled;
        struct list_head *qom_lists; /* array of queue override mapping lists */
+       struct {
+               unsigned int count;
+               unsigned int interval; /* in ms */
+               atomic_t count_pending;
+               struct delayed_work dw;
+       } notify_peers;
+       struct {
+               unsigned int count;
+               unsigned int interval; /* in ms */
+               atomic_t count_pending;
+               struct delayed_work dw;
+       } mcast_rejoin;
        long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
index e3362b5f13e8258857f50a64e73fe3e70f9039f3..f47550d75f85fddd53186c92f0e1383c8f693b90 100644 (file)
@@ -129,6 +129,5 @@ extern void ip_mc_unmap(struct in_device *);
 extern void ip_mc_remap(struct in_device *);
 extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
 extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-extern void ip_mc_rejoin_groups(struct in_device *in_dev);
 
 #endif
index b99cd23f347489ea0a3c2a14d58193c6e1117331..c796ce26c7c06da9654382e01bff1d9228d09778 100644 (file)
@@ -27,9 +27,11 @@ enum
        IPV4_DEVCONF_TAG,
        IPV4_DEVCONF_ARPFILTER,
        IPV4_DEVCONF_MEDIUM_ID,
+       IPV4_DEVCONF_FORCE_IGMP_VERSION,
+       IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
+       IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
        IPV4_DEVCONF_NOXFRM,
        IPV4_DEVCONF_NOPOLICY,
-       IPV4_DEVCONF_FORCE_IGMP_VERSION,
        IPV4_DEVCONF_ARP_ANNOUNCE,
        IPV4_DEVCONF_ARP_IGNORE,
        IPV4_DEVCONF_PROMOTE_SECONDARIES,
index 3aeb7305e2f59d3eae986c1c36d0af22cfea057b..7ea319e95b4771ecb9dc4be7d04ad24a8e628d86 100644 (file)
@@ -58,10 +58,26 @@ struct iommu_domain {
 #define IOMMU_CAP_CACHE_COHERENCY      0x1
 #define IOMMU_CAP_INTR_REMAP           0x2     /* isolates device intrs */
 
+/*
+ * Following constraints are specifc to FSL_PAMUV1:
+ *  -aperture must be power of 2, and naturally aligned
+ *  -number of windows must be power of 2, and address space size
+ *   of each window is determined by aperture size / # of windows
+ *  -the actual size of the mapped region of a window must be power
+ *   of 2 starting with 4KB and physical address must be naturally
+ *   aligned.
+ * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
+ * The caller can invoke iommu_domain_get_attr to check if the underlying
+ * iommu implementation supports these constraints.
+ */
+
 enum iommu_attr {
        DOMAIN_ATTR_GEOMETRY,
        DOMAIN_ATTR_PAGING,
        DOMAIN_ATTR_WINDOWS,
+       DOMAIN_ATTR_FSL_PAMU_STASH,
+       DOMAIN_ATTR_FSL_PAMU_ENABLE,
+       DOMAIN_ATTR_FSL_PAMUV1,
        DOMAIN_ATTR_MAX,
 };
 
index 850e95bc766c8504d4fbc2c592c1327ef5994431..9ac5047062c826d85102355d9a4bb98bcda795f1 100644 (file)
@@ -19,6 +19,8 @@ struct ipv6_devconf {
        __s32           rtr_solicit_interval;
        __s32           rtr_solicit_delay;
        __s32           force_mld_version;
+       __s32           mldv1_unsolicited_report_interval;
+       __s32           mldv2_unsolicited_report_interval;
 #ifdef CONFIG_IPV6_PRIVACY
        __s32           use_tempaddr;
        __s32           temp_valid_lft;
@@ -101,6 +103,7 @@ struct inet6_skb_parm {
 #define IP6SKB_FORWARDED       2
 #define IP6SKB_REROUTED                4
 #define IP6SKB_ROUTERALERT     8
+#define IP6SKB_FRAGMENTED      16
 };
 
 #define IP6CB(skb)     ((struct inet6_skb_parm*)((skb)->cb))
index 8685d1be12c71a2d28d8be80b3bbe9ae507e1ee4..31229e0be90bd812ce953d257a4b9482f18ff558 100644 (file)
 #define JBD_EXPENSIVE_CHECKING
 extern u8 journal_enable_debug;
 
-#define jbd_debug(n, f, a...)                                          \
-       do {                                                            \
-               if ((n) <= journal_enable_debug) {                      \
-                       printk (KERN_DEBUG "(%s, %d): %s: ",            \
-                               __FILE__, __LINE__, __func__);  \
-                       printk (f, ## a);                               \
-               }                                                       \
-       } while (0)
+void __jbd_debug(int level, const char *file, const char *func,
+                unsigned int line, const char *fmt, ...);
+
+#define jbd_debug(n, fmt, a...) \
+       __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
 #else
-#define jbd_debug(f, a...)     /**/
+#define jbd_debug(n, fmt, a...)    /**/
 #endif
 
 static inline void *jbd_alloc(size_t size, gfp_t flags)
@@ -77,7 +74,7 @@ static inline void *jbd_alloc(size_t size, gfp_t flags)
 static inline void jbd_free(void *ptr, size_t size)
 {
        free_pages((unsigned long)ptr, get_order(size));
-};
+}
 
 #define JFS_MIN_JOURNAL_BLOCKS 1024
 
index debf208b7611ea004537ae284b8436eccd9e7aa2..31c0cd1c941a6c122506cc3158be203391617460 100644 (file)
@@ -69,7 +69,7 @@ typedef union ktime ktime_t;          /* Kill this */
  * @secs:      seconds to set
  * @nsecs:     nanoseconds to set
  *
- * Return the ktime_t representation of the value
+ * Return: The ktime_t representation of the value.
  */
 static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
 {
@@ -151,7 +151,7 @@ static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
  * @lhs:       minuend
  * @rhs:       subtrahend
  *
- * Returns the remainder of the subtraction
+ * Return: The remainder of the subtraction.
  */
 static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
 {
@@ -169,7 +169,7 @@ static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
  * @add1:      addend1
  * @add2:      addend2
  *
- * Returns the sum of @add1 and @add2.
+ * Return: The sum of @add1 and @add2.
  */
 static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
 {
@@ -195,7 +195,7 @@ static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
  * @kt:                addend
  * @nsec:      the scalar nsec value to add
  *
- * Returns the sum of @kt and @nsec in ktime_t format
+ * Return: The sum of @kt and @nsec in ktime_t format.
  */
 extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
 
@@ -204,7 +204,7 @@ extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
  * @kt:                minuend
  * @nsec:      the scalar nsec value to subtract
  *
- * Returns the subtraction of @nsec from @kt in ktime_t format
+ * Return: The subtraction of @nsec from @kt in ktime_t format.
  */
 extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
 
@@ -212,7 +212,7 @@ extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
  * timespec_to_ktime - convert a timespec to ktime_t format
  * @ts:                the timespec variable to convert
  *
- * Returns a ktime_t variable with the converted timespec value
+ * Return: A ktime_t variable with the converted timespec value.
  */
 static inline ktime_t timespec_to_ktime(const struct timespec ts)
 {
@@ -224,7 +224,7 @@ static inline ktime_t timespec_to_ktime(const struct timespec ts)
  * timeval_to_ktime - convert a timeval to ktime_t format
  * @tv:                the timeval variable to convert
  *
- * Returns a ktime_t variable with the converted timeval value
+ * Return: A ktime_t variable with the converted timeval value.
  */
 static inline ktime_t timeval_to_ktime(const struct timeval tv)
 {
@@ -237,7 +237,7 @@ static inline ktime_t timeval_to_ktime(const struct timeval tv)
  * ktime_to_timespec - convert a ktime_t variable to timespec format
  * @kt:                the ktime_t variable to convert
  *
- * Returns the timespec representation of the ktime value
+ * Return: The timespec representation of the ktime value.
  */
 static inline struct timespec ktime_to_timespec(const ktime_t kt)
 {
@@ -249,7 +249,7 @@ static inline struct timespec ktime_to_timespec(const ktime_t kt)
  * ktime_to_timeval - convert a ktime_t variable to timeval format
  * @kt:                the ktime_t variable to convert
  *
- * Returns the timeval representation of the ktime value
+ * Return: The timeval representation of the ktime value.
  */
 static inline struct timeval ktime_to_timeval(const ktime_t kt)
 {
@@ -262,7 +262,7 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
  * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
  * @kt:                the ktime_t variable to convert
  *
- * Returns the scalar nanoseconds representation of @kt
+ * Return: The scalar nanoseconds representation of @kt.
  */
 static inline s64 ktime_to_ns(const ktime_t kt)
 {
@@ -276,7 +276,9 @@ static inline s64 ktime_to_ns(const ktime_t kt)
  * @cmp1:      comparable1
  * @cmp2:      comparable2
  *
- * Compare two ktime_t variables, returns 1 if equal
+ * Compare two ktime_t variables.
+ *
+ * Return: 1 if equal.
  */
 static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
 {
@@ -288,7 +290,7 @@ static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
  * @cmp1:      comparable1
  * @cmp2:      comparable2
  *
- * Returns ...
+ * Return: ...
  *   cmp1  < cmp2: return <0
  *   cmp1 == cmp2: return 0
  *   cmp1  > cmp2: return >0
@@ -342,7 +344,7 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
  * @kt:                the ktime_t variable to convert
  * @ts:                the timespec variable to store the result in
  *
- * Returns true if there was a successful conversion, false if kt was 0.
+ * Return: %true if there was a successful conversion, %false if kt was 0.
  */
 static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
                                                       struct timespec *ts)
index 4ea55bb45debf3a1e8ad6d8120e81b763069adc1..283d66bc603c3374454896e9013f2984cd18b450 100644 (file)
@@ -138,6 +138,22 @@ enum {
        ATA_SHT_THIS_ID         = -1,
        ATA_SHT_USE_CLUSTERING  = 1,
 
+       /* struct ata_taskfile flags */
+       ATA_TFLAG_LBA48         = (1 << 0), /* enable 48-bit LBA and "HOB" */
+       ATA_TFLAG_ISADDR        = (1 << 1), /* enable r/w to nsect/lba regs */
+       ATA_TFLAG_DEVICE        = (1 << 2), /* enable r/w to device reg */
+       ATA_TFLAG_WRITE         = (1 << 3), /* data dir: host->dev==1 (write) */
+       ATA_TFLAG_LBA           = (1 << 4), /* enable LBA */
+       ATA_TFLAG_FUA           = (1 << 5), /* enable FUA */
+       ATA_TFLAG_POLLING       = (1 << 6), /* set nIEN to 1 and use polling */
+
+       /* protocol flags */
+       ATA_PROT_FLAG_PIO       = (1 << 0), /* is PIO */
+       ATA_PROT_FLAG_DMA       = (1 << 1), /* is DMA */
+       ATA_PROT_FLAG_DATA      = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
+       ATA_PROT_FLAG_NCQ       = (1 << 2), /* is NCQ */
+       ATA_PROT_FLAG_ATAPI     = (1 << 3), /* is ATAPI */
+
        /* struct ata_device stuff */
        ATA_DFLAG_LBA           = (1 << 0), /* device supports LBA */
        ATA_DFLAG_LBA48         = (1 << 1), /* device supports LBA48 */
@@ -518,6 +534,29 @@ enum sw_activity {
        BLINK_OFF,
 };
 
+struct ata_taskfile {
+       unsigned long           flags;          /* ATA_TFLAG_xxx */
+       u8                      protocol;       /* ATA_PROT_xxx */
+
+       u8                      ctl;            /* control reg */
+
+       u8                      hob_feature;    /* additional data */
+       u8                      hob_nsect;      /* to support LBA48 */
+       u8                      hob_lbal;
+       u8                      hob_lbam;
+       u8                      hob_lbah;
+
+       u8                      feature;
+       u8                      nsect;
+       u8                      lbal;
+       u8                      lbam;
+       u8                      lbah;
+
+       u8                      device;
+
+       u8                      command;        /* IO operation */
+};
+
 #ifdef CONFIG_ATA_SFF
 struct ata_ioports {
        void __iomem            *cmd_addr;
@@ -959,6 +998,69 @@ extern const unsigned long sata_deb_timing_long[];
 extern struct ata_port_operations ata_dummy_port_ops;
 extern const struct ata_port_info ata_dummy_port_info;
 
+/*
+ * protocol tests
+ */
+static inline unsigned int ata_prot_flags(u8 prot)
+{
+       switch (prot) {
+       case ATA_PROT_NODATA:
+               return 0;
+       case ATA_PROT_PIO:
+               return ATA_PROT_FLAG_PIO;
+       case ATA_PROT_DMA:
+               return ATA_PROT_FLAG_DMA;
+       case ATA_PROT_NCQ:
+               return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
+       case ATAPI_PROT_NODATA:
+               return ATA_PROT_FLAG_ATAPI;
+       case ATAPI_PROT_PIO:
+               return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
+       case ATAPI_PROT_DMA:
+               return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
+       }
+       return 0;
+}
+
+static inline int ata_is_atapi(u8 prot)
+{
+       return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
+}
+
+static inline int ata_is_nodata(u8 prot)
+{
+       return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
+}
+
+static inline int ata_is_pio(u8 prot)
+{
+       return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
+}
+
+static inline int ata_is_dma(u8 prot)
+{
+       return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
+}
+
+static inline int ata_is_ncq(u8 prot)
+{
+       return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
+}
+
+static inline int ata_is_data(u8 prot)
+{
+       return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
+}
+
+static inline int is_multi_taskfile(struct ata_taskfile *tf)
+{
+       return (tf->command == ATA_CMD_READ_MULTI) ||
+              (tf->command == ATA_CMD_WRITE_MULTI) ||
+              (tf->command == ATA_CMD_READ_MULTI_EXT) ||
+              (tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
+              (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
+}
+
 static inline const unsigned long *
 sata_ehc_deb_timing(struct ata_eh_context *ehc)
 {
index 2913b86eb12a7a1068991b9342e7ed43c8eec1fe..69ed5f5e9f6e4a83f8c9226cb92d6ba7eafa17bd 100644 (file)
@@ -30,6 +30,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
        return dividend / divisor;
 }
 
+/**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ */
+static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+       *remainder = dividend % divisor;
+       return dividend / divisor;
+}
+
 /**
  * div64_u64 - unsigned 64bit divide with 64bit divisor
  */
@@ -63,6 +72,10 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
 #endif
 
+#ifndef div64_u64_rem
+extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
+#endif
+
 #ifndef div64_u64
 extern u64 div64_u64(u64 dividend, u64 divisor);
 #endif
index 7b4d9d79570b6a4d0914f08bab3b8b208d665bb4..6c416092e3244d1e82b1f928ee0d31a6f6e46162 100644 (file)
@@ -85,7 +85,7 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
 
 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
+extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
 
 static inline
 bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
diff --git a/include/linux/mfd/arizona/gpio.h b/include/linux/mfd/arizona/gpio.h
new file mode 100644 (file)
index 0000000..d2146bb
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * GPIO configuration for Arizona devices
+ *
+ * Copyright 2013 Wolfson Microelectronics. PLC.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARIZONA_GPIO_H
+#define _ARIZONA_GPIO_H
+
+#define ARIZONA_GP_FN_TXLRCLK                    0x00
+#define ARIZONA_GP_FN_GPIO                       0x01
+#define ARIZONA_GP_FN_IRQ1                       0x02
+#define ARIZONA_GP_FN_IRQ2                       0x03
+#define ARIZONA_GP_FN_OPCLK                      0x04
+#define ARIZONA_GP_FN_FLL1_OUT                   0x05
+#define ARIZONA_GP_FN_FLL2_OUT                   0x06
+#define ARIZONA_GP_FN_PWM1                       0x08
+#define ARIZONA_GP_FN_PWM2                       0x09
+#define ARIZONA_GP_FN_SYSCLK_UNDERCLOCKED        0x0A
+#define ARIZONA_GP_FN_ASYNCCLK_UNDERCLOCKED      0x0B
+#define ARIZONA_GP_FN_FLL1_LOCK                  0x0C
+#define ARIZONA_GP_FN_FLL2_LOCK                  0x0D
+#define ARIZONA_GP_FN_FLL1_CLOCK_OK              0x0F
+#define ARIZONA_GP_FN_FLL2_CLOCK_OK              0x10
+#define ARIZONA_GP_FN_HEADPHONE_DET              0x12
+#define ARIZONA_GP_FN_MIC_DET                    0x13
+#define ARIZONA_GP_FN_WSEQ_STATUS                0x15
+#define ARIZONA_GP_FN_CIF_ADDRESS_ERROR          0x16
+#define ARIZONA_GP_FN_ASRC1_LOCK                 0x1A
+#define ARIZONA_GP_FN_ASRC2_LOCK                 0x1B
+#define ARIZONA_GP_FN_ASRC_CONFIG_ERROR          0x1C
+#define ARIZONA_GP_FN_DRC1_SIGNAL_DETECT         0x1D
+#define ARIZONA_GP_FN_DRC1_ANTICLIP              0x1E
+#define ARIZONA_GP_FN_DRC1_DECAY                 0x1F
+#define ARIZONA_GP_FN_DRC1_NOISE                 0x20
+#define ARIZONA_GP_FN_DRC1_QUICK_RELEASE         0x21
+#define ARIZONA_GP_FN_DRC2_SIGNAL_DETECT         0x22
+#define ARIZONA_GP_FN_DRC2_ANTICLIP              0x23
+#define ARIZONA_GP_FN_DRC2_DECAY                 0x24
+#define ARIZONA_GP_FN_DRC2_NOISE                 0x25
+#define ARIZONA_GP_FN_DRC2_QUICK_RELEASE         0x26
+#define ARIZONA_GP_FN_MIXER_DROPPED_SAMPLE       0x27
+#define ARIZONA_GP_FN_AIF1_CONFIG_ERROR          0x28
+#define ARIZONA_GP_FN_AIF2_CONFIG_ERROR          0x29
+#define ARIZONA_GP_FN_AIF3_CONFIG_ERROR          0x2A
+#define ARIZONA_GP_FN_SPK_TEMP_SHUTDOWN          0x2B
+#define ARIZONA_GP_FN_SPK_TEMP_WARNING           0x2C
+#define ARIZONA_GP_FN_UNDERCLOCKED               0x2D
+#define ARIZONA_GP_FN_OVERCLOCKED                0x2E
+#define ARIZONA_GP_FN_DSP_IRQ1                   0x35
+#define ARIZONA_GP_FN_DSP_IRQ2                   0x36
+#define ARIZONA_GP_FN_ASYNC_OPCLK                0x3D
+#define ARIZONA_GP_FN_BOOT_DONE                  0x44
+#define ARIZONA_GP_FN_DSP1_RAM_READY             0x45
+#define ARIZONA_GP_FN_SYSCLK_ENA_STATUS          0x4B
+#define ARIZONA_GP_FN_ASYNCCLK_ENA_STATUS        0x4C
+
+#define ARIZONA_GPN_DIR                          0x8000  /* GPN_DIR */
+#define ARIZONA_GPN_DIR_MASK                     0x8000  /* GPN_DIR */
+#define ARIZONA_GPN_DIR_SHIFT                        15  /* GPN_DIR */
+#define ARIZONA_GPN_DIR_WIDTH                         1  /* GPN_DIR */
+#define ARIZONA_GPN_PU                           0x4000  /* GPN_PU */
+#define ARIZONA_GPN_PU_MASK                      0x4000  /* GPN_PU */
+#define ARIZONA_GPN_PU_SHIFT                         14  /* GPN_PU */
+#define ARIZONA_GPN_PU_WIDTH                          1  /* GPN_PU */
+#define ARIZONA_GPN_PD                           0x2000  /* GPN_PD */
+#define ARIZONA_GPN_PD_MASK                      0x2000  /* GPN_PD */
+#define ARIZONA_GPN_PD_SHIFT                         13  /* GPN_PD */
+#define ARIZONA_GPN_PD_WIDTH                          1  /* GPN_PD */
+#define ARIZONA_GPN_LVL                          0x0800  /* GPN_LVL */
+#define ARIZONA_GPN_LVL_MASK                     0x0800  /* GPN_LVL */
+#define ARIZONA_GPN_LVL_SHIFT                        11  /* GPN_LVL */
+#define ARIZONA_GPN_LVL_WIDTH                         1  /* GPN_LVL */
+#define ARIZONA_GPN_POL                          0x0400  /* GPN_POL */
+#define ARIZONA_GPN_POL_MASK                     0x0400  /* GPN_POL */
+#define ARIZONA_GPN_POL_SHIFT                        10  /* GPN_POL */
+#define ARIZONA_GPN_POL_WIDTH                         1  /* GPN_POL */
+#define ARIZONA_GPN_OP_CFG                       0x0200  /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_MASK                  0x0200  /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_SHIFT                      9  /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_WIDTH                      1  /* GPN_OP_CFG */
+#define ARIZONA_GPN_DB                           0x0100  /* GPN_DB */
+#define ARIZONA_GPN_DB_MASK                      0x0100  /* GPN_DB */
+#define ARIZONA_GPN_DB_SHIFT                          8  /* GPN_DB */
+#define ARIZONA_GPN_DB_WIDTH                          1  /* GPN_DB */
+#define ARIZONA_GPN_FN_MASK                      0x007F  /* GPN_DB */
+#define ARIZONA_GPN_FN_SHIFT                          0  /* GPN_DB */
+#define ARIZONA_GPN_FN_WIDTH                          7  /* GPN_DB */
+
+#endif
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
new file mode 100644 (file)
index 0000000..2d2a0af
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Definitions for DA9063 MFD driver
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ *        Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#ifndef __MFD_DA9063_CORE_H__
+#define __MFD_DA9063_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9063/registers.h>
+
+/* DA9063 modules */
+#define DA9063_DRVNAME_CORE            "da9063-core"
+#define DA9063_DRVNAME_REGULATORS      "da9063-regulators"
+#define DA9063_DRVNAME_LEDS            "da9063-leds"
+#define DA9063_DRVNAME_WATCHDOG                "da9063-watchdog"
+#define DA9063_DRVNAME_HWMON           "da9063-hwmon"
+#define DA9063_DRVNAME_ONKEY           "da9063-onkey"
+#define DA9063_DRVNAME_RTC             "da9063-rtc"
+#define DA9063_DRVNAME_VIBRATION       "da9063-vibration"
+
+enum da9063_models {
+       PMIC_DA9063 = 0x61,
+};
+
+/* Interrupts */
+enum da9063_irqs {
+       DA9063_IRQ_ONKEY = 0,
+       DA9063_IRQ_ALARM,
+       DA9063_IRQ_TICK,
+       DA9063_IRQ_ADC_RDY,
+       DA9063_IRQ_SEQ_RDY,
+       DA9063_IRQ_WAKE,
+       DA9063_IRQ_TEMP,
+       DA9063_IRQ_COMP_1V2,
+       DA9063_IRQ_LDO_LIM,
+       DA9063_IRQ_REG_UVOV,
+       DA9063_IRQ_VDD_MON,
+       DA9063_IRQ_WARN,
+       DA9063_IRQ_GPI0,
+       DA9063_IRQ_GPI1,
+       DA9063_IRQ_GPI2,
+       DA9063_IRQ_GPI3,
+       DA9063_IRQ_GPI4,
+       DA9063_IRQ_GPI5,
+       DA9063_IRQ_GPI6,
+       DA9063_IRQ_GPI7,
+       DA9063_IRQ_GPI8,
+       DA9063_IRQ_GPI9,
+       DA9063_IRQ_GPI10,
+       DA9063_IRQ_GPI11,
+       DA9063_IRQ_GPI12,
+       DA9063_IRQ_GPI13,
+       DA9063_IRQ_GPI14,
+       DA9063_IRQ_GPI15,
+};
+
+#define DA9063_IRQ_BASE_OFFSET 0
+#define DA9063_NUM_IRQ         (DA9063_IRQ_GPI15 + 1 - DA9063_IRQ_BASE_OFFSET)
+
+struct da9063 {
+       /* Device */
+       struct device   *dev;
+       unsigned short  model;
+       unsigned short  revision;
+       unsigned int    flags;
+
+       /* Control interface */
+       struct regmap   *regmap;
+
+       /* Interrupts */
+       int             chip_irq;
+       unsigned int    irq_base;
+       struct regmap_irq_chip_data *regmap_irq;
+};
+
+int da9063_device_init(struct da9063 *da9063, unsigned int irq);
+int da9063_irq_init(struct da9063 *da9063);
+
+void da9063_device_exit(struct da9063 *da9063);
+void da9063_irq_exit(struct da9063 *da9063);
+
+#endif /* __MFD_DA9063_CORE_H__ */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
new file mode 100644 (file)
index 0000000..95c8742
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Platform configuration options for DA9063
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#ifndef __MFD_DA9063_PDATA_H__
+#define __MFD_DA9063_PDATA_H__
+
+#include <linux/regulator/machine.h>
+
+/*
+ * Regulator configuration
+ */
+/* DA9063 regulator IDs */
+enum {
+       /* BUCKs */
+       DA9063_ID_BCORE1,
+       DA9063_ID_BCORE2,
+       DA9063_ID_BPRO,
+       DA9063_ID_BMEM,
+       DA9063_ID_BIO,
+       DA9063_ID_BPERI,
+
+       /* BCORE1 and BCORE2 in merged mode */
+       DA9063_ID_BCORES_MERGED,
+       /* BMEM and BIO in merged mode */
+       DA9063_ID_BMEM_BIO_MERGED,
+       /* When two BUCKs are merged, they cannot be reused separately */
+
+       /* LDOs */
+       DA9063_ID_LDO1,
+       DA9063_ID_LDO2,
+       DA9063_ID_LDO3,
+       DA9063_ID_LDO4,
+       DA9063_ID_LDO5,
+       DA9063_ID_LDO6,
+       DA9063_ID_LDO7,
+       DA9063_ID_LDO8,
+       DA9063_ID_LDO9,
+       DA9063_ID_LDO10,
+       DA9063_ID_LDO11,
+};
+
+/* Regulators platform data */
+struct da9063_regulator_data {
+       int                             id;
+       struct regulator_init_data      *initdata;
+};
+
+struct da9063_regulators_pdata {
+       unsigned                        n_regulators;
+       struct da9063_regulator_data    *regulator_data;
+};
+
+
+/*
+ * RGB LED configuration
+ */
+/* LED IDs for flags in struct led_info. */
+enum {
+       DA9063_GPIO11_LED,
+       DA9063_GPIO14_LED,
+       DA9063_GPIO15_LED,
+
+       DA9063_LED_NUM
+};
+#define DA9063_LED_ID_MASK             0x3
+
+/* LED polarity for flags in struct led_info. */
+#define DA9063_LED_HIGH_LEVEL_ACTIVE   0x0
+#define DA9063_LED_LOW_LEVEL_ACTIVE    0x4
+
+
+/*
+ * General PMIC configuration
+ */
+/* HWMON ADC channels configuration */
+#define DA9063_FLG_FORCE_IN0_MANUAL_MODE       0x0010
+#define DA9063_FLG_FORCE_IN0_AUTO_MODE         0x0020
+#define DA9063_FLG_FORCE_IN1_MANUAL_MODE       0x0040
+#define DA9063_FLG_FORCE_IN1_AUTO_MODE         0x0080
+#define DA9063_FLG_FORCE_IN2_MANUAL_MODE       0x0100
+#define DA9063_FLG_FORCE_IN2_AUTO_MODE         0x0200
+#define DA9063_FLG_FORCE_IN3_MANUAL_MODE       0x0400
+#define DA9063_FLG_FORCE_IN3_AUTO_MODE         0x0800
+
+/* Disable register caching. */
+#define DA9063_FLG_NO_CACHE                    0x0008
+
+struct da9063;
+
+/* DA9063 platform data */
+struct da9063_pdata {
+       int                             (*init)(struct da9063 *da9063);
+       int                             irq_base;
+       unsigned                        flags;
+       struct da9063_regulators_pdata  *regulators_pdata;
+       struct led_platform_data        *leds_pdata;
+};
+
+#endif /* __MFD_DA9063_PDATA_H__ */
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
new file mode 100644 (file)
index 0000000..5834813
--- /dev/null
@@ -0,0 +1,1028 @@
+/*
+ * Registers definition for DA9063 modules
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ *        Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#ifndef _DA9063_REG_H
+#define        _DA9063_REG_H
+
+#define DA9063_I2C_PAGE_SEL_SHIFT      1
+
+#define        DA9063_EVENT_REG_NUM            4
+#define        DA9210_EVENT_REG_NUM            2
+#define        DA9063_EXT_EVENT_REG_NUM        (DA9063_EVENT_REG_NUM + \
+                                               DA9210_EVENT_REG_NUM)
+
+/* Page selection I2C or SPI always in the begining of any page. */
+/* Page 0 : I2C access 0x000 - 0x0FF   SPI access 0x000 - 0x07F */
+/* Page 1 :                            SPI access 0x080 - 0x0FF */
+/* Page 2 : I2C access 0x100 - 0x1FF   SPI access 0x100 - 0x17F */
+/* Page 3 :                            SPI access 0x180 - 0x1FF */
+#define        DA9063_REG_PAGE_CON             0x00
+
+/* System Control and Event Registers */
+#define        DA9063_REG_STATUS_A             0x01
+#define        DA9063_REG_STATUS_B             0x02
+#define        DA9063_REG_STATUS_C             0x03
+#define        DA9063_REG_STATUS_D             0x04
+#define        DA9063_REG_FAULT_LOG            0x05
+#define        DA9063_REG_EVENT_A              0x06
+#define        DA9063_REG_EVENT_B              0x07
+#define        DA9063_REG_EVENT_C              0x08
+#define        DA9063_REG_EVENT_D              0x09
+#define        DA9063_REG_IRQ_MASK_A           0x0A
+#define        DA9063_REG_IRQ_MASK_B           0x0B
+#define        DA9063_REG_IRQ_MASK_C           0x0C
+#define        DA9063_REG_IRQ_MASK_D           0x0D
+#define        DA9063_REG_CONTROL_A            0x0E
+#define        DA9063_REG_CONTROL_B            0x0F
+#define        DA9063_REG_CONTROL_C            0x10
+#define        DA9063_REG_CONTROL_D            0x11
+#define        DA9063_REG_CONTROL_E            0x12
+#define        DA9063_REG_CONTROL_F            0x13
+#define        DA9063_REG_PD_DIS               0x14
+
+/* GPIO Control Registers */
+#define        DA9063_REG_GPIO_0_1             0x15
+#define        DA9063_REG_GPIO_2_3             0x16
+#define        DA9063_REG_GPIO_4_5             0x17
+#define        DA9063_REG_GPIO_6_7             0x18
+#define        DA9063_REG_GPIO_8_9             0x19
+#define        DA9063_REG_GPIO_10_11           0x1A
+#define        DA9063_REG_GPIO_12_13           0x1B
+#define        DA9063_REG_GPIO_14_15           0x1C
+#define        DA9063_REG_GPIO_MODE_0_7        0x1D
+#define        DA9063_REG_GPIO_MODE_8_15       0x1E
+#define        DA9063_REG_GPIO_SWITCH_CONT     0x1F
+
+/* Regulator Control Registers */
+#define        DA9063_REG_BCORE2_CONT          0x20
+#define        DA9063_REG_BCORE1_CONT          0x21
+#define        DA9063_REG_BPRO_CONT            0x22
+#define        DA9063_REG_BMEM_CONT            0x23
+#define        DA9063_REG_BIO_CONT             0x24
+#define        DA9063_REG_BPERI_CONT           0x25
+#define        DA9063_REG_LDO1_CONT            0x26
+#define        DA9063_REG_LDO2_CONT            0x27
+#define        DA9063_REG_LDO3_CONT            0x28
+#define        DA9063_REG_LDO4_CONT            0x29
+#define        DA9063_REG_LDO5_CONT            0x2A
+#define        DA9063_REG_LDO6_CONT            0x2B
+#define        DA9063_REG_LDO7_CONT            0x2C
+#define        DA9063_REG_LDO8_CONT            0x2D
+#define        DA9063_REG_LDO9_CONT            0x2E
+#define        DA9063_REG_LDO10_CONT           0x2F
+#define        DA9063_REG_LDO11_CONT           0x30
+#define        DA9063_REG_VIB                  0x31
+#define        DA9063_REG_DVC_1                0x32
+#define        DA9063_REG_DVC_2                0x33
+
+/* GP-ADC Control Registers */
+#define        DA9063_REG_ADC_MAN              0x34
+#define        DA9063_REG_ADC_CONT             0x35
+#define        DA9063_REG_VSYS_MON             0x36
+#define        DA9063_REG_ADC_RES_L            0x37
+#define        DA9063_REG_ADC_RES_H            0x38
+#define        DA9063_REG_VSYS_RES             0x39
+#define        DA9063_REG_ADCIN1_RES           0x3A
+#define        DA9063_REG_ADCIN2_RES           0x3B
+#define        DA9063_REG_ADCIN3_RES           0x3C
+#define        DA9063_REG_MON1_RES             0x3D
+#define        DA9063_REG_MON2_RES             0x3E
+#define        DA9063_REG_MON3_RES             0x3F
+
+/* RTC Calendar and Alarm Registers */
+#define        DA9063_REG_COUNT_S              0x40
+#define        DA9063_REG_COUNT_MI             0x41
+#define        DA9063_REG_COUNT_H              0x42
+#define        DA9063_REG_COUNT_D              0x43
+#define        DA9063_REG_COUNT_MO             0x44
+#define        DA9063_REG_COUNT_Y              0x45
+#define        DA9063_REG_ALARM_MI             0x46
+#define        DA9063_REG_ALARM_H              0x47
+#define        DA9063_REG_ALARM_D              0x48
+#define        DA9063_REG_ALARM_MO             0x49
+#define        DA9063_REG_ALARM_Y              0x4A
+#define        DA9063_REG_SECOND_A             0x4B
+#define        DA9063_REG_SECOND_B             0x4C
+#define        DA9063_REG_SECOND_C             0x4D
+#define        DA9063_REG_SECOND_D             0x4E
+
+/* Sequencer Control Registers */
+#define        DA9063_REG_SEQ                  0x81
+#define        DA9063_REG_SEQ_TIMER            0x82
+#define        DA9063_REG_ID_2_1               0x83
+#define        DA9063_REG_ID_4_3               0x84
+#define        DA9063_REG_ID_6_5               0x85
+#define        DA9063_REG_ID_8_7               0x86
+#define        DA9063_REG_ID_10_9              0x87
+#define        DA9063_REG_ID_12_11             0x88
+#define        DA9063_REG_ID_14_13             0x89
+#define        DA9063_REG_ID_16_15             0x8A
+#define        DA9063_REG_ID_18_17             0x8B
+#define        DA9063_REG_ID_20_19             0x8C
+#define        DA9063_REG_ID_22_21             0x8D
+#define        DA9063_REG_ID_24_23             0x8E
+#define        DA9063_REG_ID_26_25             0x8F
+#define        DA9063_REG_ID_28_27             0x90
+#define        DA9063_REG_ID_30_29             0x91
+#define        DA9063_REG_ID_32_31             0x92
+#define        DA9063_REG_SEQ_A                0x95
+#define        DA9063_REG_SEQ_B                0x96
+#define        DA9063_REG_WAIT                 0x97
+#define        DA9063_REG_EN_32K               0x98
+#define        DA9063_REG_RESET                0x99
+
+/* Regulator Setting Registers */
+#define        DA9063_REG_BUCK_ILIM_A          0x9A
+#define        DA9063_REG_BUCK_ILIM_B          0x9B
+#define        DA9063_REG_BUCK_ILIM_C          0x9C
+#define        DA9063_REG_BCORE2_CFG           0x9D
+#define        DA9063_REG_BCORE1_CFG           0x9E
+#define        DA9063_REG_BPRO_CFG             0x9F
+#define        DA9063_REG_BIO_CFG              0xA0
+#define        DA9063_REG_BMEM_CFG             0xA1
+#define        DA9063_REG_BPERI_CFG            0xA2
+#define        DA9063_REG_VBCORE2_A            0xA3
+#define        DA9063_REG_VBCORE1_A            0xA4
+#define        DA9063_REG_VBPRO_A              0xA5
+#define        DA9063_REG_VBMEM_A              0xA6
+#define        DA9063_REG_VBIO_A               0xA7
+#define        DA9063_REG_VBPERI_A             0xA8
+#define        DA9063_REG_VLDO1_A              0xA9
+#define        DA9063_REG_VLDO2_A              0xAA
+#define        DA9063_REG_VLDO3_A              0xAB
+#define        DA9063_REG_VLDO4_A              0xAC
+#define        DA9063_REG_VLDO5_A              0xAD
+#define        DA9063_REG_VLDO6_A              0xAE
+#define        DA9063_REG_VLDO7_A              0xAF
+#define        DA9063_REG_VLDO8_A              0xB0
+#define        DA9063_REG_VLDO9_A              0xB1
+#define        DA9063_REG_VLDO10_A             0xB2
+#define        DA9063_REG_VLDO11_A             0xB3
+#define        DA9063_REG_VBCORE2_B            0xB4
+#define        DA9063_REG_VBCORE1_B            0xB5
+#define        DA9063_REG_VBPRO_B              0xB6
+#define        DA9063_REG_VBMEM_B              0xB7
+#define        DA9063_REG_VBIO_B               0xB8
+#define        DA9063_REG_VBPERI_B             0xB9
+#define        DA9063_REG_VLDO1_B              0xBA
+#define        DA9063_REG_VLDO2_B              0xBB
+#define        DA9063_REG_VLDO3_B              0xBC
+#define        DA9063_REG_VLDO4_B              0xBD
+#define        DA9063_REG_VLDO5_B              0xBE
+#define        DA9063_REG_VLDO6_B              0xBF
+#define        DA9063_REG_VLDO7_B              0xC0
+#define        DA9063_REG_VLDO8_B              0xC1
+#define        DA9063_REG_VLDO9_B              0xC2
+#define        DA9063_REG_VLDO10_B             0xC3
+#define        DA9063_REG_VLDO11_B             0xC4
+
+/* Backup Battery Charger Control Register */
+#define        DA9063_REG_BBAT_CONT            0xC5
+
+/* GPIO PWM (LED) */
+#define        DA9063_REG_GPO11_LED            0xC6
+#define        DA9063_REG_GPO14_LED            0xC7
+#define        DA9063_REG_GPO15_LED            0xC8
+
+/* GP-ADC Threshold Registers */
+#define        DA9063_REG_ADC_CFG              0xC9
+#define        DA9063_REG_AUTO1_HIGH           0xCA
+#define        DA9063_REG_AUTO1_LOW            0xCB
+#define        DA9063_REG_AUTO2_HIGH           0xCC
+#define        DA9063_REG_AUTO2_LOW            0xCD
+#define        DA9063_REG_AUTO3_HIGH           0xCE
+#define        DA9063_REG_AUTO3_LOW            0xCF
+
+/* DA9063 Configuration registers */
+/* OTP */
+#define        DA9063_REG_OPT_COUNT            0x101
+#define        DA9063_REG_OPT_ADDR             0x102
+#define        DA9063_REG_OPT_DATA             0x103
+
+/* Customer Trim and Configuration */
+#define        DA9063_REG_T_OFFSET             0x104
+#define        DA9063_REG_INTERFACE            0x105
+#define        DA9063_REG_CONFIG_A             0x106
+#define        DA9063_REG_CONFIG_B             0x107
+#define        DA9063_REG_CONFIG_C             0x108
+#define        DA9063_REG_CONFIG_D             0x109
+#define        DA9063_REG_CONFIG_E             0x10A
+#define        DA9063_REG_CONFIG_F             0x10B
+#define        DA9063_REG_CONFIG_G             0x10C
+#define        DA9063_REG_CONFIG_H             0x10D
+#define        DA9063_REG_CONFIG_I             0x10E
+#define        DA9063_REG_CONFIG_J             0x10F
+#define        DA9063_REG_CONFIG_K             0x110
+#define        DA9063_REG_CONFIG_L             0x111
+#define        DA9063_REG_MON_REG_1            0x112
+#define        DA9063_REG_MON_REG_2            0x113
+#define        DA9063_REG_MON_REG_3            0x114
+#define        DA9063_REG_MON_REG_4            0x115
+#define        DA9063_REG_MON_REG_5            0x116
+#define        DA9063_REG_MON_REG_6            0x117
+#define        DA9063_REG_TRIM_CLDR            0x118
+
+/* General Purpose Registers */
+#define        DA9063_REG_GP_ID_0              0x119
+#define        DA9063_REG_GP_ID_1              0x11A
+#define        DA9063_REG_GP_ID_2              0x11B
+#define        DA9063_REG_GP_ID_3              0x11C
+#define        DA9063_REG_GP_ID_4              0x11D
+#define        DA9063_REG_GP_ID_5              0x11E
+#define        DA9063_REG_GP_ID_6              0x11F
+#define        DA9063_REG_GP_ID_7              0x120
+#define        DA9063_REG_GP_ID_8              0x121
+#define        DA9063_REG_GP_ID_9              0x122
+#define        DA9063_REG_GP_ID_10             0x123
+#define        DA9063_REG_GP_ID_11             0x124
+#define        DA9063_REG_GP_ID_12             0x125
+#define        DA9063_REG_GP_ID_13             0x126
+#define        DA9063_REG_GP_ID_14             0x127
+#define        DA9063_REG_GP_ID_15             0x128
+#define        DA9063_REG_GP_ID_16             0x129
+#define        DA9063_REG_GP_ID_17             0x12A
+#define        DA9063_REG_GP_ID_18             0x12B
+#define        DA9063_REG_GP_ID_19             0x12C
+
+/* Chip ID and variant */
+#define        DA9063_REG_CHIP_ID              0x181
+#define        DA9063_REG_CHIP_VARIANT         0x182
+
+/*
+ * PMIC registers bits
+ */
+/* DA9063_REG_PAGE_CON (addr=0x00) */
+#define        DA9063_PEG_PAGE_SHIFT                   0
+#define        DA9063_REG_PAGE_MASK                    0x07
+#define                DA9063_REG_PAGE0                0x00
+#define                DA9063_REG_PAGE2                0x02
+#define        DA9063_PAGE_WRITE_MODE                  0x00
+#define        DA9063_REPEAT_WRITE_MODE                0x40
+#define        DA9063_PAGE_REVERT                      0x80
+
+/* DA9063_REG_STATUS_A (addr=0x01) */
+#define        DA9063_NONKEY                           0x01
+#define        DA9063_WAKE                             0x02
+#define        DA9063_DVC_BUSY                         0x04
+#define        DA9063_COMP_1V2                         0x08
+
+/* DA9063_REG_STATUS_B (addr=0x02) */
+#define        DA9063_GPI0                             0x01
+#define        DA9063_GPI1                             0x02
+#define        DA9063_GPI2                             0x04
+#define        DA9063_GPI3                             0x08
+#define        DA9063_GPI4                             0x10
+#define        DA9063_GPI5                             0x20
+#define        DA9063_GPI6                             0x40
+#define        DA9063_GPI7                             0x80
+
+/* DA9063_REG_STATUS_C (addr=0x03) */
+#define        DA9063_GPI8                             0x01
+#define        DA9063_GPI9                             0x02
+#define        DA9063_GPI10                            0x04
+#define        DA9063_GPI11                            0x08
+#define        DA9063_GPI12                            0x10
+#define        DA9063_GPI13                            0x20
+#define        DA9063_GPI14                            0x40
+#define        DA9063_GPI15                            0x80
+
+/* DA9063_REG_STATUS_D (addr=0x04) */
+#define        DA9063_LDO3_LIM                         0x08
+#define        DA9063_LDO4_LIM                         0x10
+#define        DA9063_LDO7_LIM                         0x20
+#define        DA9063_LDO8_LIM                         0x40
+#define        DA9063_LDO11_LIM                        0x80
+
+/* DA9063_REG_FAULT_LOG (addr=0x05) */
+#define        DA9063_TWD_ERROR                        0x01
+#define        DA9063_POR                              0x02
+#define        DA9063_VDD_FAULT                        0x04
+#define        DA9063_VDD_START                        0x08
+#define        DA9063_TEMP_CRIT                        0x10
+#define        DA9063_KEY_RESET                        0x20
+#define        DA9063_NSHUTDOWN                        0x40
+#define        DA9063_WAIT_SHUT                        0x80
+
+/* DA9063_REG_EVENT_A (addr=0x06) */
+#define        DA9063_E_NONKEY                         0x01
+#define        DA9063_E_ALARM                          0x02
+#define        DA9063_E_TICK                           0x04
+#define        DA9063_E_ADC_RDY                        0x08
+#define        DA9063_E_SEQ_RDY                        0x10
+#define        DA9063_EVENTS_B                         0x20
+#define        DA9063_EVENTS_C                         0x40
+#define        DA9063_EVENTS_D                         0x80
+
+/* DA9063_REG_EVENT_B (addr=0x07) */
+#define        DA9063_E_WAKE                           0x01
+#define        DA9063_E_TEMP                           0x02
+#define        DA9063_E_COMP_1V2                       0x04
+#define        DA9063_E_LDO_LIM                        0x08
+#define        DA9063_E_REG_UVOV                       0x10
+#define        DA9063_E_DVC_RDY                        0x20
+#define        DA9063_E_VDD_MON                        0x40
+#define        DA9063_E_VDD_WARN                       0x80
+
+/* DA9063_REG_EVENT_C (addr=0x08) */
+#define        DA9063_E_GPI0                           0x01
+#define        DA9063_E_GPI1                           0x02
+#define        DA9063_E_GPI2                           0x04
+#define        DA9063_E_GPI3                           0x08
+#define        DA9063_E_GPI4                           0x10
+#define        DA9063_E_GPI5                           0x20
+#define        DA9063_E_GPI6                           0x40
+#define        DA9063_E_GPI7                           0x80
+
+/* DA9063_REG_EVENT_D (addr=0x09) */
+#define        DA9063_E_GPI8                           0x01
+#define        DA9063_E_GPI9                           0x02
+#define        DA9063_E_GPI10                          0x04
+#define        DA9063_E_GPI11                          0x08
+#define        DA9063_E_GPI12                          0x10
+#define        DA9063_E_GPI13                          0x20
+#define        DA9063_E_GPI14                          0x40
+#define        DA9063_E_GPI15                          0x80
+
+/* DA9063_REG_IRQ_MASK_A (addr=0x0A) */
+#define        DA9063_M_ONKEY                          0x01
+#define        DA9063_M_ALARM                          0x02
+#define        DA9063_M_TICK                           0x04
+#define        DA9063_M_ADC_RDY                        0x08
+#define        DA9063_M_SEQ_RDY                        0x10
+
+/* DA9063_REG_IRQ_MASK_B (addr=0x0B) */
+#define        DA9063_M_WAKE                           0x01
+#define        DA9063_M_TEMP                           0x02
+#define        DA9063_M_COMP_1V2                       0x04
+#define        DA9063_M_LDO_LIM                        0x08
+#define        DA9063_M_UVOV                           0x10
+#define        DA9063_M_DVC_RDY                        0x20
+#define        DA9063_M_VDD_MON                        0x40
+#define        DA9063_M_VDD_WARN                       0x80
+
+/* DA9063_REG_IRQ_MASK_C (addr=0x0C) */
+#define        DA9063_M_GPI0                           0x01
+#define        DA9063_M_GPI1                           0x02
+#define        DA9063_M_GPI2                           0x04
+#define        DA9063_M_GPI3                           0x08
+#define        DA9063_M_GPI4                           0x10
+#define        DA9063_M_GPI5                           0x20
+#define        DA9063_M_GPI6                           0x40
+#define        DA9063_M_GPI7                           0x80
+
+/* DA9063_REG_IRQ_MASK_D (addr=0x0D) */
+#define        DA9063_M_GPI8                           0x01
+#define        DA9063_M_GPI9                           0x02
+#define        DA9063_M_GPI10                          0x04
+#define        DA9063_M_GPI11                          0x08
+#define        DA9063_M_GPI12                          0x10
+#define        DA9063_M_GPI13                          0x20
+#define        DA9063_M_GPI14                          0x40
+#define        DA9063_M_GPI15                          0x80
+
+/* DA9063_REG_CONTROL_A (addr=0x0E) */
+#define        DA9063_SYSTEM_EN                        0x01
+#define        DA9063_POWER_EN                         0x02
+#define        DA9063_POWER1_EN                        0x04
+#define        DA9063_STANDBY                          0x08
+#define        DA9063_M_SYSTEM_EN                      0x10
+#define        DA9063_M_POWER_EN                       0x20
+#define        DA9063_M_POWER1_EN                      0x40
+#define        DA9063_CP_EN                            0x80
+
+/* DA9063_REG_CONTROL_B (addr=0x0F) */
+#define        DA9063_CHG_SEL                          0x01
+#define        DA9063_WATCHDOG_PD                      0x02
+#define        DA9063_NRES_MODE                        0x08
+#define        DA9063_NONKEY_LOCK                      0x10
+
+/* DA9063_REG_CONTROL_C (addr=0x10) */
+#define        DA9063_DEBOUNCING_MASK                  0x07
+#define                DA9063_DEBOUNCING_OFF           0x0
+#define                DA9063_DEBOUNCING_0MS1          0x1
+#define                DA9063_DEBOUNCING_1MS           0x2
+#define                DA9063_DEBOUNCING_10MS24        0x3
+#define                DA9063_DEBOUNCING_51MS2         0x4
+#define                DA9063_DEBOUNCING_256MS         0x5
+#define                DA9063_DEBOUNCING_512MS         0x6
+#define                DA9063_DEBOUNCING_1024MS        0x7
+
+#define        DA9063_AUTO_BOOT                        0x08
+#define        DA9063_OTPREAD_EN                       0x10
+#define        DA9063_SLEW_RATE_MASK                   0x60
+#define                DA9063_SLEW_RATE_4US            0x00
+#define                DA9063_SLEW_RATE_3US            0x20
+#define                DA9063_SLEW_RATE_1US            0x40
+#define                DA9063_SLEW_RATE_0US5           0x60
+#define        DA9063_DEF_SUPPLY                       0x80
+
+/* DA9063_REG_CONTROL_D (addr=0x11) */
+#define        DA9063_TWDSCALE_MASK                    0x07
+#define        DA9063_BLINK_FRQ_MASK                   0x38
+#define                DA9063_BLINK_FRQ_OFF            0x00
+#define                DA9063_BLINK_FRQ_1S0            0x08
+#define                DA9063_BLINK_FRQ_2S0            0x10
+#define                DA9063_BLINK_FRQ_4S0            0x18
+#define                DA9063_BLINK_FRQ_0S18           0x20
+#define                DA9063_BLINK_FRQ_2S0_VDD        0x28
+#define                DA9063_BLINK_FRQ_4S0_VDD        0x30
+#define                DA9063_BLINK_FRQ_0S18_VDD       0x38
+
+#define        DA9063_BLINK_DUR_MASK                   0xC0
+#define                DA9063_BLINK_DUR_10MS           0x00
+#define                DA9063_BLINK_DUR_20MS           0x40
+#define                DA9063_BLINK_DUR_40MS           0x80
+#define                DA9063_BLINK_DUR_20MSDBL        0xC0
+
+/* DA9063_REG_CONTROL_E (addr=0x12) */
+#define        DA9063_RTC_MODE_PD                      0x01
+#define        DA9063_RTC_MODE_SD                      0x02
+#define        DA9063_RTC_EN                           0x04
+#define        DA9063_ECO_MODE                         0x08
+#define        DA9063_PM_FB1_PIN                       0x10
+#define        DA9063_PM_FB2_PIN                       0x20
+#define        DA9063_PM_FB3_PIN                       0x40
+#define        DA9063_V_LOCK                           0x80
+
+/* DA9063_REG_CONTROL_F (addr=0x13) */
+#define        DA9063_WATCHDOG                         0x01
+#define        DA9063_SHUTDOWN                         0x02
+#define        DA9063_WAKE_UP                          0x04
+
+/* DA9063_REG_PD_DIS (addr=0x14) */
+#define        DA9063_GPI_DIS                          0x01
+#define        DA9063_GPADC_PAUSE                      0x02
+#define        DA9063_PMIF_DIS                         0x04
+#define        DA9063_HS2WIRE_DIS                      0x08
+#define        DA9063_BBAT_DIS                         0x20
+#define        DA9063_OUT_32K_PAUSE                    0x40
+#define        DA9063_PMCONT_DIS                       0x80
+
+/* DA9063_REG_GPIO_0_1 (addr=0x15) */
+#define        DA9063_GPIO0_PIN_MASK                   0x03
+#define                DA9063_GPIO0_PIN_ADCIN1         0x00
+#define                DA9063_GPIO0_PIN_GPI            0x01
+#define                DA9063_GPIO0_PIN_GPO_OD         0x02
+#define                DA9063_GPIO0_PIN_GPO            0x03
+#define        DA9063_GPIO0_TYPE                       0x04
+#define                DA9063_GPIO0_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO0_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO0_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO0_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO0_NO_WAKEUP                  0x08
+#define        DA9063_GPIO1_PIN_MASK                   0x30
+#define                DA9063_GPIO1_PIN_ADCIN2_COMP    0x00
+#define                DA9063_GPIO1_PIN_GPI            0x10
+#define                DA9063_GPIO1_PIN_GPO_OD         0x20
+#define                DA9063_GPIO1_PIN_GPO            0x30
+#define        DA9063_GPIO1_TYPE                       0x40
+#define                DA9063_GPIO1_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO1_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO1_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO1_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO1_NO_WAKEUP                  0x80
+
+/* DA9063_REG_GPIO_2_3 (addr=0x16) */
+#define        DA9063_GPIO2_PIN_MASK                   0x03
+#define                DA9063_GPIO2_PIN_ADCIN3         0x00
+#define                DA9063_GPIO2_PIN_GPI            0x01
+#define                DA9063_GPIO2_PIN_GPO_PSS        0x02
+#define                DA9063_GPIO2_PIN_GPO            0x03
+#define        DA9063_GPIO2_TYPE                       0x04
+#define                DA9063_GPIO2_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO2_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO2_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO2_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO2_NO_WAKEUP                  0x08
+#define        DA9063_GPIO3_PIN_MASK                   0x30
+#define                DA9063_GPIO3_PIN_CORE_SW_G      0x00
+#define                DA9063_GPIO3_PIN_GPI            0x10
+#define                DA9063_GPIO3_PIN_GPO_OD         0x20
+#define                DA9063_GPIO3_PIN_GPO            0x30
+#define        DA9063_GPIO3_TYPE                       0x40
+#define                DA9063_GPIO3_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO3_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO3_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO3_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO3_NO_WAKEUP                  0x80
+
+/* DA9063_REG_GPIO_4_5 (addr=0x17) */
+#define        DA9063_GPIO4_PIN_MASK                   0x03
+#define                DA9063_GPIO4_PIN_CORE_SW_S      0x00
+#define                DA9063_GPIO4_PIN_GPI            0x01
+#define                DA9063_GPIO4_PIN_GPO_OD         0x02
+#define                DA9063_GPIO4_PIN_GPO            0x03
+#define        DA9063_GPIO4_TYPE                       0x04
+#define                DA9063_GPIO4_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO4_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO4_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO4_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO4_NO_WAKEUP                  0x08
+#define        DA9063_GPIO5_PIN_MASK                   0x30
+#define                DA9063_GPIO5_PIN_PERI_SW_G      0x00
+#define                DA9063_GPIO5_PIN_GPI            0x10
+#define                DA9063_GPIO5_PIN_GPO_OD         0x20
+#define                DA9063_GPIO5_PIN_GPO            0x30
+#define        DA9063_GPIO5_TYPE                       0x40
+#define                DA9063_GPIO5_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO5_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO5_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO5_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO5_NO_WAKEUP                  0x80
+
+/* DA9063_REG_GPIO_6_7 (addr=0x18) */
+#define        DA9063_GPIO6_PIN_MASK                   0x03
+#define                DA9063_GPIO6_PIN_PERI_SW_S      0x00
+#define                DA9063_GPIO6_PIN_GPI            0x01
+#define                DA9063_GPIO6_PIN_GPO_OD         0x02
+#define                DA9063_GPIO6_PIN_GPO            0x03
+#define        DA9063_GPIO6_TYPE                       0x04
+#define                DA9063_GPIO6_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO6_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO6_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO6_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO6_NO_WAKEUP                  0x08
+#define        DA9063_GPIO7_PIN_MASK                   0x30
+#define                DA9063_GPIO7_PIN_GPI            0x10
+#define                DA9063_GPIO7_PIN_GPO_PSS        0x20
+#define                DA9063_GPIO7_PIN_GPO            0x30
+#define        DA9063_GPIO7_TYPE                       0x40
+#define                DA9063_GPIO7_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO7_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO7_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO7_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO7_NO_WAKEUP                  0x80
+
+/* DA9063_REG_GPIO_8_9 (addr=0x19) */
+#define        DA9063_GPIO8_PIN_MASK                   0x03
+#define                DA9063_GPIO8_PIN_GPI_SYS_EN     0x00
+#define                DA9063_GPIO8_PIN_GPI            0x01
+#define                DA9063_GPIO8_PIN_GPO_PSS        0x02
+#define                DA9063_GPIO8_PIN_GPO            0x03
+#define        DA9063_GPIO8_TYPE                       0x04
+#define                DA9063_GPIO8_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO8_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO8_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO8_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO8_NO_WAKEUP                  0x08
+#define        DA9063_GPIO9_PIN_MASK                   0x30
+#define                DA9063_GPIO9_PIN_GPI_PWR_EN     0x00
+#define                DA9063_GPIO9_PIN_GPI            0x10
+#define                DA9063_GPIO9_PIN_GPO_PSS        0x20
+#define                DA9063_GPIO9_PIN_GPO            0x30
+#define        DA9063_GPIO9_TYPE                       0x40
+#define                DA9063_GPIO9_TYPE_GPI_ACT_LOW   0x00
+#define                DA9063_GPIO9_TYPE_GPO_VDD_IO1   0x00
+#define                DA9063_GPIO9_TYPE_GPI_ACT_HIGH  0x04
+#define                DA9063_GPIO9_TYPE_GPO_VDD_IO2   0x04
+#define        DA9063_GPIO9_NO_WAKEUP                  0x80
+
+/* DA9063_REG_GPIO_10_11 (addr=0x1A) */
+#define        DA9063_GPIO10_PIN_MASK                  0x03
+#define                DA9063_GPIO10_PIN_GPI_PWR1_EN   0x00
+#define                DA9063_GPIO10_PIN_GPI           0x01
+#define                DA9063_GPIO10_PIN_GPO_OD        0x02
+#define                DA9063_GPIO10_PIN_GPO           0x03
+#define        DA9063_GPIO10_TYPE                      0x04
+#define                DA9063_GPIO10_TYPE_GPI_ACT_LOW  0x00
+#define                DA9063_GPIO10_TYPE_GPO_VDD_IO1  0x00
+#define                DA9063_GPIO10_TYPE_GPI_ACT_HIGH 0x04
+#define                DA9063_GPIO10_TYPE_GPO_VDD_IO2  0x04
+#define        DA9063_GPIO10_NO_WAKEUP                 0x08
+#define        DA9063_GPIO11_PIN_MASK                  0x30
+#define                DA9063_GPIO11_PIN_GPO_OD        0x00
+#define                DA9063_GPIO11_PIN_GPI           0x10
+#define                DA9063_GPIO11_PIN_GPO_PSS       0x20
+#define                DA9063_GPIO11_PIN_GPO           0x30
+#define        DA9063_GPIO11_TYPE                      0x40
+#define                DA9063_GPIO11_TYPE_GPI_ACT_LOW  0x00
+#define                DA9063_GPIO11_TYPE_GPO_VDD_IO1  0x00
+#define                DA9063_GPIO11_TYPE_GPI_ACT_HIGH 0x04
+#define                DA9063_GPIO11_TYPE_GPO_VDD_IO2  0x04
+#define        DA9063_GPIO11_NO_WAKEUP                 0x80
+
+/* DA9063_REG_GPIO_12_13 (addr=0x1B) */
+#define        DA9063_GPIO12_PIN_MASK                  0x03
+#define                DA9063_GPIO12_PIN_NVDDFLT_OUT   0x00
+#define                DA9063_GPIO12_PIN_GPI           0x01
+#define                DA9063_GPIO12_PIN_VSYSMON_OUT   0x02
+#define                DA9063_GPIO12_PIN_GPO           0x03
+#define        DA9063_GPIO12_TYPE                      0x04
+#define                DA9063_GPIO12_TYPE_GPI_ACT_LOW  0x00
+#define                DA9063_GPIO12_TYPE_GPO_VDD_IO1  0x00
+#define                DA9063_GPIO12_TYPE_GPI_ACT_HIGH 0x04
+#define                DA9063_GPIO12_TYPE_GPO_VDD_IO2  0x04
+#define        DA9063_GPIO12_NO_WAKEUP                 0x08
+#define        DA9063_GPIO13_PIN_MASK                  0x30
+#define                DA9063_GPIO13_PIN_GPFB1_OUT     0x00
+#define                DA9063_GPIO13_PIN_GPI           0x10
+#define                DA9063_GPIO13_PIN_GPFB1_OUTOD   0x20
+#define                DA9063_GPIO13_PIN_GPO           0x30
+#define        DA9063_GPIO13_TYPE                      0x40
+#define                DA9063_GPIO13_TYPE_GPFB1_OUT    0x00
+#define                DA9063_GPIO13_TYPE_GPI          0x00
+#define                DA9063_GPIO13_TYPE_GPFB1_OUTOD  0x04
+#define                DA9063_GPIO13_TYPE_GPO          0x04
+#define        DA9063_GPIO13_NO_WAKEUP                 0x80
+
+/* DA9063_REG_GPIO_14_15 (addr=0x1C) */
+#define        DA9063_GPIO14_PIN_MASK                  0x03
+#define                DA9063_GPIO14_PIN_GPO_OD        0x00
+#define                DA9063_GPIO14_PIN_GPI           0x01
+#define                DA9063_GPIO14_PIN_HS2DATA       0x02
+#define                DA9063_GPIO14_PIN_GPO           0x03
+#define        DA9063_GPIO14_TYPE                      0x04
+#define                DA9063_GPIO14_TYPE_GPI_ACT_LOW  0x00
+#define                DA9063_GPIO14_TYPE_GPO_VDD_IO1  0x00
+#define                DA9063_GPIO14_TYPE_GPI_ACT_HIGH 0x04
+#define                DA9063_GPIO14_TYPE_GPO_VDD_IO2  0x04
+#define        DA9063_GPIO14_NO_WAKEUP                 0x08
+#define        DA9063_GPIO15_PIN_MASK                  0x30
+#define                DA9063_GPIO15_PIN_GPO_OD        0x00
+#define                DA9063_GPIO15_PIN_GPI           0x10
+#define                DA9063_GPIO15_PIN_GPO           0x30
+#define        DA9063_GPIO15_TYPE                      0x40
+#define                DA9063_GPIO15_TYPE_GPFB1_OUT    0x00
+#define                DA9063_GPIO15_TYPE_GPI          0x00
+#define                DA9063_GPIO15_TYPE_GPFB1_OUTOD  0x04
+#define                DA9063_GPIO15_TYPE_GPO          0x04
+#define        DA9063_GPIO15_NO_WAKEUP                 0x80
+
+/* DA9063_REG_GPIO_MODE_0_7 (addr=0x1D) */
+#define        DA9063_GPIO0_MODE                       0x01
+#define        DA9063_GPIO1_MODE                       0x02
+#define        DA9063_GPIO2_MODE                       0x04
+#define        DA9063_GPIO3_MODE                       0x08
+#define        DA9063_GPIO4_MODE                       0x10
+#define        DA9063_GPIO5_MODE                       0x20
+#define        DA9063_GPIO6_MODE                       0x40
+#define        DA9063_GPIO7_MODE                       0x80
+
+/* DA9063_REG_GPIO_MODE_8_15 (addr=0x1E) */
+#define        DA9063_GPIO8_MODE                       0x01
+#define        DA9063_GPIO9_MODE                       0x02
+#define        DA9063_GPIO10_MODE                      0x04
+#define        DA9063_GPIO11_MODE                      0x08
+#define                DA9063_GPIO11_MODE_LED_ACT_HIGH 0x00
+#define                DA9063_GPIO11_MODE_LED_ACT_LOW  0x08
+#define        DA9063_GPIO12_MODE                      0x10
+#define        DA9063_GPIO13_MODE                      0x20
+#define        DA9063_GPIO14_MODE                      0x40
+#define                DA9063_GPIO14_MODE_LED_ACT_HIGH 0x00
+#define                DA9063_GPIO14_MODE_LED_ACT_LOW  0x40
+#define        DA9063_GPIO15_MODE                      0x80
+#define                DA9063_GPIO15_MODE_LED_ACT_HIGH 0x00
+#define                DA9063_GPIO15_MODE_LED_ACT_LOW  0x80
+
+/* DA9063_REG_SWITCH_CONT (addr=0x1F) */
+#define        DA9063_CORE_SW_GPI_MASK                 0x03
+#define                DA9063_CORE_SW_GPI_OFF          0x00
+#define                DA9063_CORE_SW_GPI_GPIO1        0x01
+#define                DA9063_CORE_SW_GPI_GPIO2        0x02
+#define                DA9063_CORE_SW_GPI_GPIO13       0x03
+#define        DA9063_PERI_SW_GPI_MASK                 0x0C
+#define                DA9063_PERI_SW_GPI_OFF          0x00
+#define                DA9063_PERI_SW_GPI_GPIO1        0x04
+#define                DA9063_PERI_SW_GPI_GPIO2        0x08
+#define                DA9063_PERI_SW_GPI_GPIO13       0x0C
+#define        DA9063_SWITCH_SR_MASK                   0x30
+#define                DA9063_SWITCH_SR_1MV            0x00
+#define                DA9063_SWITCH_SR_5MV            0x10
+#define                DA9063_SWITCH_SR_10MV           0x20
+#define                DA9063_SWITCH_SR_50MV           0x30
+#define        DA9063_SWITCH_SR_DIS                    0x40
+#define        DA9063_CP_EN_MODE                       0x80
+
+/* DA9063_REGL_Bxxxx_CONT common bits (addr=0x20-0x25) */
+#define        DA9063_BUCK_EN                          0x01
+#define DA9063_BUCK_GPI_MASK                   0x06
+#define                DA9063_BUCK_GPI_OFF             0x00
+#define                DA9063_BUCK_GPI_GPIO1           0x02
+#define                DA9063_BUCK_GPI_GPIO2           0x04
+#define                DA9063_BUCK_GPI_GPIO13          0x06
+#define        DA9063_BUCK_CONF                        0x08
+#define        DA9063_VBUCK_GPI_MASK                   0x60
+#define                DA9063_VBUCK_GPI_OFF            0x00
+#define                DA9063_VBUCK_GPI_GPIO1          0x20
+#define                DA9063_VBUCK_GPI_GPIO2          0x40
+#define                DA9063_VBUCK_GPI_GPIO13         0x60
+
+/* DA9063_REG_BCORE1_CONT specific bits (addr=0x21) */
+#define        DA9063_CORE_SW_EN                       0x10
+#define        DA9063_CORE_SW_CONF                     0x80
+
+/* DA9063_REG_BPERI_CONT specific bits (addr=0x25) */
+#define        DA9063_PERI_SW_EN                       0x10
+#define        DA9063_PERI_SW_CONF                     0x80
+
+/* DA9063_REG_LDOx_CONT common bits (addr=0x26-0x30) */
+#define        DA9063_LDO_EN                           0x01
+#define DA9063_LDO_GPI_MASK                    0x06
+#define                DA9063_LDO_GPI_OFF              0x00
+#define                DA9063_LDO_GPI_GPIO1            0x02
+#define                DA9063_LDO_GPI_GPIO2            0x04
+#define                DA9063_LDO_GPI_GPIO13           0x06
+#define        DA9063_LDO_PD_DIS                       0x08
+#define        DA9063_VLDO_GPI_MASK                    0x60
+#define                DA9063_VLDO_GPI_OFF             0x00
+#define                DA9063_VLDO_GPI_GPIO1           0x20
+#define                DA9063_VLDO_GPI_GPIO2           0x40
+#define                DA9063_VLDO_GPI_GPIO13          0x60
+#define        DA9063_LDO_CONF                         0x80
+
+/* DA9063_REG_LDO5_CONT specific bits (addr=0x2A) */
+#define        DA9063_VLDO5_SEL                        0x10
+
+/* DA9063_REG_LDO6_CONT specific bits (addr=0x2B) */
+#define        DA9063_VLDO6_SEL                        0x10
+
+/* DA9063_REG_LDO7_CONT specific bits (addr=0x2C) */
+#define        DA9063_VLDO7_SEL                        0x10
+
+/* DA9063_REG_LDO8_CONT specific bits (addr=0x2D) */
+#define        DA9063_VLDO8_SEL                        0x10
+
+/* DA9063_REG_LDO9_CONT specific bits (addr=0x2E) */
+#define        DA9063_VLDO9_SEL                        0x10
+
+/* DA9063_REG_LDO10_CONT specific bits (addr=0x2F) */
+#define        DA9063_VLDO10_SEL                       0x10
+
+/* DA9063_REG_LDO11_CONT specific bits (addr=0x30) */
+#define        DA9063_VLDO11_SEL                       0x10
+
+/* DA9063_REG_VIB (addr=0x31) */
+#define DA9063_VIB_SET_MASK                    0x3F
+#define                DA9063_VIB_SET_OFF              0
+#define                DA9063_VIB_SET_MAX              0x3F
+
+/* DA9063_REG_DVC_1 (addr=0x32) */
+#define        DA9063_VBCORE1_SEL                      0x01
+#define        DA9063_VBCORE2_SEL                      0x02
+#define        DA9063_VBPRO_SEL                        0x04
+#define        DA9063_VBMEM_SEL                        0x08
+#define        DA9063_VBPERI_SEL                       0x10
+#define        DA9063_VLDO1_SEL                        0x20
+#define        DA9063_VLDO2_SEL                        0x40
+#define        DA9063_VLDO3_SEL                        0x80
+
+/* DA9063_REG_DVC_2 (addr=0x33) */
+#define        DA9063_VBIO_SEL                         0x01
+#define        DA9063_VLDO4_SEL                        0x80
+
+/* DA9063_REG_ADC_MAN (addr=0x34) */
+#define        DA9063_ADC_MUX_MASK                     0x0F
+#define                DA9063_ADC_MUX_VSYS             0x00
+#define                DA9063_ADC_MUX_ADCIN1           0x01
+#define                DA9063_ADC_MUX_ADCIN2           0x02
+#define                DA9063_ADC_MUX_ADCIN3           0x03
+#define                DA9063_ADC_MUX_T_SENSE          0x04
+#define                DA9063_ADC_MUX_VBBAT            0x05
+#define                DA9063_ADC_MUX_LDO_G1           0x08
+#define                DA9063_ADC_MUX_LDO_G2           0x09
+#define                DA9063_ADC_MUX_LDO_G3           0x0A
+#define        DA9063_ADC_MAN                          0x10
+#define        DA9063_ADC_MODE                         0x20
+
+/* DA9063_REG_ADC_CONT (addr=0x35) */
+#define        DA9063_ADC_AUTO_VSYS_EN                 0x01
+#define        DA9063_ADC_AUTO_AD1_EN                  0x02
+#define        DA9063_ADC_AUTO_AD2_EN                  0x04
+#define        DA9063_ADC_AUTO_AD3_EN                  0x08
+#define        DA9063_ADC_AD1_ISRC_EN                  0x10
+#define        DA9063_ADC_AD2_ISRC_EN                  0x20
+#define        DA9063_ADC_AD3_ISRC_EN                  0x40
+#define        DA9063_COMP1V2_EN                       0x80
+
+/* DA9063_REG_VSYS_MON (addr=0x36) */
+#define        DA9063_VSYS_VAL_MASK                    0xFF
+#define        DA9063_VSYS_VAL_BASE                    0x00
+
+/* DA9063_REG_ADC_RES_L (addr=0x37) */
+#define        DA9063_ADC_RES_L_BITS                   2
+#define        DA9063_ADC_RES_L_MASK                   0xC0
+
+/* DA9063_REG_ADC_RES_H (addr=0x38) */
+#define        DA9063_ADC_RES_M_BITS                   8
+#define        DA9063_ADC_RES_M_MASK                   0xFF
+
+/* DA9063_REG_(xxx_RES/ADC_RES_H) (addr=0x39-0x3F) */
+#define        DA9063_ADC_VAL_MASK                     0xFF
+
+/* DA9063_REG_COUNT_S (addr=0x40) */
+#define DA9063_RTC_READ                                0x80
+#define DA9063_COUNT_SEC_MASK                  0x3F
+
+/* DA9063_REG_COUNT_MI (addr=0x41) */
+#define DA9063_COUNT_MIN_MASK                  0x3F
+
+/* DA9063_REG_COUNT_H (addr=0x42) */
+#define DA9063_COUNT_HOUR_MASK                 0x1F
+
+/* DA9063_REG_COUNT_D (addr=0x43) */
+#define DA9063_COUNT_DAY_MASK                  0x1F
+
+/* DA9063_REG_COUNT_MO (addr=0x44) */
+#define DA9063_COUNT_MONTH_MASK                        0x0F
+
+/* DA9063_REG_COUNT_Y (addr=0x45) */
+#define DA9063_COUNT_YEAR_MASK                 0x3F
+#define DA9063_MONITOR                         0x40
+
+/* DA9063_REG_ALARM_MI (addr=0x46) */
+#define DA9063_ALARM_STATUS_ALARM              0x80
+#define DA9063_ALARM_STATUS_TICK               0x40
+#define DA9063_ALARM_MIN_MASK                  0x3F
+
+/* DA9063_REG_ALARM_H (addr=0x47) */
+#define DA9063_ALARM_HOUR_MASK                 0x1F
+
+/* DA9063_REG_ALARM_D (addr=0x48) */
+#define DA9063_ALARM_DAY_MASK                  0x1F
+
+/* DA9063_REG_ALARM_MO (addr=0x49) */
+#define DA9063_TICK_WAKE                       0x20
+#define DA9063_TICK_TYPE                       0x10
+#define                DA9063_TICK_TYPE_SEC            0x00
+#define                DA9063_TICK_TYPE_MIN            0x10
+#define DA9063_ALARM_MONTH_MASK                        0x0F
+
+/* DA9063_REG_ALARM_Y (addr=0x4A) */
+#define DA9063_TICK_ON                         0x80
+#define DA9063_ALARM_ON                                0x40
+#define DA9063_ALARM_YEAR_MASK                 0x3F
+
+/* DA9063_REG_WAIT (addr=0x97)*/
+#define        DA9063_REG_WAIT_TIME_MASK               0xF
+#define        DA9063_WAIT_TIME_0_US                   0x0
+#define        DA9063_WAIT_TIME_512_US                 0x1
+#define        DA9063_WAIT_TIME_1_MS                   0x2
+#define        DA9063_WAIT_TIME_2_MS                   0x3
+#define        DA9063_WAIT_TIME_4_1_MS                 0x4
+#define        DA9063_WAIT_TIME_8_2_MS                 0x5
+#define        DA9063_WAIT_TIME_16_4_MS                0x6
+#define        DA9063_WAIT_TIME_32_8_MS                0x7
+#define        DA9063_WAIT_TIME_65_5_MS                0x8
+#define        DA9063_WAIT_TIME_128_MS                 0x9
+#define        DA9063_WAIT_TIME_256_MS                 0xA
+#define        DA9063_WAIT_TIME_512_MS                 0xB
+#define        DA9063_WAIT_TIME_1_S                    0xC
+#define        DA9063_WAIT_TIME_2_1_S                  0xD
+
+/* DA9063_REG_EN_32K  (addr=0x98)*/
+#define        DA9063_STABILIZ_TIME_MASK               0x7
+#define        DA9063_CRYSTAL                          0x08
+#define        DA9063_DELAY_MODE                       0x10
+#define        DA9063_OUT_CLOCK                        0x20
+#define        DA9063_RTC_CLOCK                        0x40
+#define        DA9063_OUT_32K_EN                       0x80
+
+/* DA9063_REG_CHIP_VARIANT */
+#define        DA9063_CHIP_VARIANT_SHIFT               4
+
+/* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */
+#define DA9063_BIO_ILIM_MASK                   0x0F
+#define DA9063_BMEM_ILIM_MASK                  0xF0
+
+/* DA9063_REG_BUCK_ILIM_B (addr=0x9B) */
+#define DA9063_BPRO_ILIM_MASK                  0x0F
+#define DA9063_BPERI_ILIM_MASK                 0xF0
+
+/* DA9063_REG_BUCK_ILIM_C (addr=0x9C) */
+#define DA9063_BCORE1_ILIM_MASK                        0x0F
+#define DA9063_BCORE2_ILIM_MASK                        0xF0
+
+/* DA9063_REG_Bxxxx_CFG common bits (addr=0x9D-0xA2) */
+#define DA9063_BUCK_FB_MASK                    0x07
+#define DA9063_BUCK_PD_DIS_SHIFT               5
+#define DA9063_BUCK_MODE_MASK                  0xC0
+#define                DA9063_BUCK_MODE_MANUAL         0x00
+#define                DA9063_BUCK_MODE_SLEEP          0x40
+#define                DA9063_BUCK_MODE_SYNC           0x80
+#define                DA9063_BUCK_MODE_AUTO           0xC0
+
+/* DA9063_REG_BPRO_CFG (addr=0x9F) */
+#define        DA9063_BPRO_VTTR_EN                     0x08
+#define        DA9063_BPRO_VTT_EN                      0x10
+
+/* DA9063_REG_VBxxxx_A/B (addr=0xA3-0xA8, 0xB4-0xB9) */
+#define DA9063_VBUCK_MASK                      0x7F
+#define DA9063_VBUCK_BIAS                      0
+#define DA9063_BUCK_SL                         0x80
+
+/* DA9063_REG_VLDOx_A/B (addr=0xA9-0x3, 0xBA-0xC4) */
+#define DA9063_LDO_SL                          0x80
+
+/* DA9063_REG_VLDO1_A/B (addr=0xA9, 0xBA) */
+#define DA9063_VLDO1_MASK                      0x3F
+#define DA9063_VLDO1_BIAS                      0
+
+/* DA9063_REG_VLDO2_A/B (addr=0xAA, 0xBB) */
+#define DA9063_VLDO2_MASK                      0x3F
+#define DA9063_VLDO2_BIAS                      0
+
+/* DA9063_REG_VLDO3_A/B (addr=0xAB, 0xBC) */
+#define DA9063_VLDO3_MASK                      0x7F
+#define DA9063_VLDO3_BIAS                      0
+
+/* DA9063_REG_VLDO4_A/B (addr=0xAC, 0xBD) */
+#define DA9063_VLDO4_MASK                      0x7F
+#define DA9063_VLDO4_BIAS                      0
+
+/* DA9063_REG_VLDO5_A/B (addr=0xAD, 0xBE) */
+#define DA9063_VLDO5_MASK                      0x3F
+#define DA9063_VLDO5_BIAS                      2
+
+/* DA9063_REG_VLDO6_A/B (addr=0xAE, 0xBF) */
+#define DA9063_VLDO6_MASK                      0x3F
+#define DA9063_VLDO6_BIAS                      2
+
+/* DA9063_REG_VLDO7_A/B (addr=0xAF, 0xC0) */
+#define DA9063_VLDO7_MASK                      0x3F
+#define DA9063_VLDO7_BIAS                      2
+
+/* DA9063_REG_VLDO8_A/B (addr=0xB0, 0xC1) */
+#define DA9063_VLDO8_MASK                      0x3F
+#define DA9063_VLDO8_BIAS                      2
+
+/* DA9063_REG_VLDO9_A/B (addr=0xB1, 0xC2) */
+#define DA9063_VLDO9_MASK                      0x3F
+#define DA9063_VLDO9_BIAS                      3
+
+/* DA9063_REG_VLDO10_A/B (addr=0xB2, 0xC3) */
+#define DA9063_VLDO10_MASK                     0x3F
+#define DA9063_VLDO10_BIAS                     2
+
+/* DA9063_REG_VLDO11_A/B (addr=0xB3, 0xC4) */
+#define DA9063_VLDO11_MASK                     0x3F
+#define DA9063_VLDO11_BIAS                     2
+
+/* DA9063_REG_GPO11_LED (addr=0xC6) */
+/* DA9063_REG_GPO14_LED (addr=0xC7) */
+/* DA9063_REG_GPO15_LED (addr=0xC8) */
+#define DA9063_GPIO_DIM                                0x80
+#define DA9063_GPIO_PWM_MASK                   0x7F
+
+/* DA9063_REG_CONFIG_H (addr=0x10D) */
+#define DA9063_PWM_CLK_MASK                    0x01
+#define                DA9063_PWM_CLK_PWM2MHZ          0x00
+#define                DA9063_PWM_CLK_PWM1MHZ          0x01
+#define DA9063_LDO8_MODE_MASK                  0x02
+#define                DA9063_LDO8_MODE_LDO            0
+#define                DA9063_LDO8_MODE_VIBR           0x02
+#define DA9063_MERGE_SENSE_MASK                        0x04
+#define                DA9063_MERGE_SENSE_GP_FB2       0x00
+#define                DA9063_MERGE_SENSE_GPIO4        0x04
+#define DA9063_BCORE_MERGE                     0x08
+#define DA9063_BPRO_OD                         0x10
+#define DA9063_BCORE2_OD                       0x20
+#define DA9063_BCORE1_OD                       0x40
+#define DA9063_BUCK_MERGE                      0x80
+
+/* DA9063_REG_CONFIG_I (addr=0x10E) */
+#define DA9063_NONKEY_PIN_MASK                 0x03
+#define                DA9063_NONKEY_PIN_PORT          0x00
+#define                DA9063_NONKEY_PIN_SWDOWN        0x01
+#define                DA9063_NONKEY_PIN_AUTODOWN      0x02
+#define                DA9063_NONKEY_PIN_AUTOFLPRT     0x03
+
+/* DA9063_REG_MON_REG_5 (addr=0x116) */
+#define DA9063_MON_A8_IDX_MASK                 0x07
+#define                DA9063_MON_A8_IDX_NONE          0x00
+#define                DA9063_MON_A8_IDX_BCORE1        0x01
+#define                DA9063_MON_A8_IDX_BCORE2        0x02
+#define                DA9063_MON_A8_IDX_BPRO          0x03
+#define                DA9063_MON_A8_IDX_LDO3          0x04
+#define                DA9063_MON_A8_IDX_LDO4          0x05
+#define                DA9063_MON_A8_IDX_LDO11         0x06
+#define DA9063_MON_A9_IDX_MASK                 0x70
+#define                DA9063_MON_A9_IDX_NONE          0x00
+#define                DA9063_MON_A9_IDX_BIO           0x01
+#define                DA9063_MON_A9_IDX_BMEM          0x02
+#define                DA9063_MON_A9_IDX_BPERI         0x03
+#define                DA9063_MON_A9_IDX_LDO1          0x04
+#define                DA9063_MON_A9_IDX_LDO2          0x05
+#define                DA9063_MON_A9_IDX_LDO5          0x06
+
+/* DA9063_REG_MON_REG_6 (addr=0x117) */
+#define DA9063_MON_A10_IDX_MASK                        0x07
+#define                DA9063_MON_A10_IDX_NONE         0x00
+#define                DA9063_MON_A10_IDX_LDO6         0x01
+#define                DA9063_MON_A10_IDX_LDO7         0x02
+#define                DA9063_MON_A10_IDX_LDO8         0x03
+#define                DA9063_MON_A10_IDX_LDO9         0x04
+#define                DA9063_MON_A10_IDX_LDO10        0x05
+
+#endif /* _DA9063_REG_H */
index a9e8bd157673a475ebfb5d78e81f0b8d6d621ed6..f682953043ba9c81686ef35293d0274d402b5326 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef MCP_H
 #define MCP_H
 
+#include <linux/device.h>
+
 struct mcp_ops;
 
 struct mcp {
index 1a8dd7afe0844b81e3f760c9ca80a9ea8dff21e6..2c17fdfce65e882624ca9fd7a6aae9362b7a554c 100644 (file)
@@ -160,7 +160,8 @@ enum palmas_regulators {
        PALMAS_REG_SMPS7,
        PALMAS_REG_SMPS8,
        PALMAS_REG_SMPS9,
-       PALMAS_REG_SMPS10,
+       PALMAS_REG_SMPS10_OUT2,
+       PALMAS_REG_SMPS10_OUT1,
        /* LDO regulators */
        PALMAS_REG_LDO1,
        PALMAS_REG_LDO2,
@@ -183,6 +184,50 @@ enum palmas_regulators {
        PALMAS_NUM_REGS,
 };
 
+/* External controll signal name */
+enum {
+       PALMAS_EXT_CONTROL_ENABLE1      = 0x1,
+       PALMAS_EXT_CONTROL_ENABLE2      = 0x2,
+       PALMAS_EXT_CONTROL_NSLEEP       = 0x4,
+};
+
+/*
+ * Palmas device resources can be controlled externally for
+ * enabling/disabling it rather than register write through i2c.
+ * Add the external controlled requestor ID for different resources.
+ */
+enum palmas_external_requestor_id {
+       PALMAS_EXTERNAL_REQSTR_ID_REGEN1,
+       PALMAS_EXTERNAL_REQSTR_ID_REGEN2,
+       PALMAS_EXTERNAL_REQSTR_ID_SYSEN1,
+       PALMAS_EXTERNAL_REQSTR_ID_SYSEN2,
+       PALMAS_EXTERNAL_REQSTR_ID_CLK32KG,
+       PALMAS_EXTERNAL_REQSTR_ID_CLK32KGAUDIO,
+       PALMAS_EXTERNAL_REQSTR_ID_REGEN3,
+       PALMAS_EXTERNAL_REQSTR_ID_SMPS12,
+       PALMAS_EXTERNAL_REQSTR_ID_SMPS3,
+       PALMAS_EXTERNAL_REQSTR_ID_SMPS45,
+       PALMAS_EXTERNAL_REQSTR_ID_SMPS6,
+       PALMAS_EXTERNAL_REQSTR_ID_SMPS7,
+       PALMAS_EXTERNAL_REQSTR_ID_SMPS8,
+       PALMAS_EXTERNAL_REQSTR_ID_SMPS9,
+       PALMAS_EXTERNAL_REQSTR_ID_SMPS10,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO1,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO2,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO3,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO4,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO5,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO6,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO7,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO8,
+       PALMAS_EXTERNAL_REQSTR_ID_LDO9,
+       PALMAS_EXTERNAL_REQSTR_ID_LDOLN,
+       PALMAS_EXTERNAL_REQSTR_ID_LDOUSB,
+
+       /* Last entry */
+       PALMAS_EXTERNAL_REQSTR_ID_MAX,
+};
+
 struct palmas_pmic_platform_data {
        /* An array of pointers to regulator init data indexed by regulator
         * ID
@@ -258,6 +303,7 @@ struct palmas_platform_data {
         */
        int mux_from_pdata;
        u8 pad1, pad2;
+       bool pm_off;
 
        struct palmas_pmic_platform_data *pmic_pdata;
        struct palmas_gpadc_platform_data *gpadc_pdata;
@@ -355,9 +401,9 @@ struct palmas_pmic {
        int smps123;
        int smps457;
 
-       int range[PALMAS_REG_SMPS10];
-       unsigned int ramp_delay[PALMAS_REG_SMPS10];
-       unsigned int current_reg_mode[PALMAS_REG_SMPS10];
+       int range[PALMAS_REG_SMPS10_OUT1];
+       unsigned int ramp_delay[PALMAS_REG_SMPS10_OUT1];
+       unsigned int current_reg_mode[PALMAS_REG_SMPS10_OUT1];
 };
 
 struct palmas_resource {
@@ -2866,4 +2912,9 @@ static inline int palmas_irq_get_virq(struct palmas *palmas, int irq)
        return regmap_irq_get_virq(palmas->irq_data, irq);
 }
 
+
+int palmas_ext_control_req_config(struct palmas *palmas,
+       enum palmas_external_requestor_id ext_control_req_id,
+       int ext_ctrl, bool enable);
+
 #endif /*  __LINUX_MFD_PALMAS_H */
index 2b13970596f53732cda07a6fd3b270cc9e933e5b..443176ee1ab04e1f9d2788b51d700eb2a913610c 100644 (file)
@@ -1,6 +1,6 @@
 /* Driver for Realtek driver-based card reader
  *
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
  */
 
 #ifndef __RTSX_COMMON_H
index 7a9f7089435dcb21d50c7ff902c14f6c76bd3317..daefca1bafb3672a665d5d3a001f8c96e3b84822 100644 (file)
@@ -1,6 +1,6 @@
 /* Driver for Realtek PCI-Express card reader
  *
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
  *
  * Author:
  *   Wei WANG <wei_wang@realsil.com.cn>
- *   No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
  */
 
 #ifndef __RTSX_PCI_H
@@ -25,8 +24,7 @@
 
 #include <linux/sched.h>
 #include <linux/pci.h>
-
-#include "rtsx_common.h"
+#include <linux/mfd/rtsx_common.h>
 
 #define MAX_RW_REG_CNT                 1024
 
 #define CARD_SHARE_BAROSSA_SD          0x01
 #define CARD_SHARE_BAROSSA_MS          0x02
 
+/* CARD_DRIVE_SEL */
+#define MS_DRIVE_8mA                   (0x01 << 6)
+#define MMC_DRIVE_8mA                  (0x01 << 4)
+#define XD_DRIVE_8mA                   (0x01 << 2)
+#define GPIO_DRIVE_8mA                 0x01
+#define RTS5209_CARD_DRIVE_DEFAULT     (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+                                               XD_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define RTL8411_CARD_DRIVE_DEFAULT     (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+                                               XD_DRIVE_8mA)
+#define RTSX_CARD_DRIVE_DEFAULT                (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+
 /* SD30_DRIVE_SEL */
 #define DRIVER_TYPE_A                  0x05
 #define DRIVER_TYPE_B                  0x03
 #define DRIVER_TYPE_C                  0x02
 #define DRIVER_TYPE_D                  0x01
+#define CFG_DRIVER_TYPE_A              0x02
+#define CFG_DRIVER_TYPE_B              0x03
+#define CFG_DRIVER_TYPE_C              0x01
+#define CFG_DRIVER_TYPE_D              0x00
 
 /* FPDCTL */
 #define SSC_POWER_DOWN                 0x01
 #define SAMPLE_VAR_CLK0                        (0x01 << 4)
 #define SAMPLE_VAR_CLK1                        (0x02 << 4)
 
+/* HOST_SLEEP_STATE */
+#define HOST_ENTER_S1                  1
+#define HOST_ENTER_S3                  2
+
 #define MS_CFG                         0xFD40
 #define MS_TPC                         0xFD41
 #define MS_TRANS_CFG                   0xFD42
 #define PME_FORCE_CTL                  0xFE56
 #define ASPM_FORCE_CTL                 0xFE57
 #define PM_CLK_FORCE_CTL               0xFE58
+#define FUNC_FORCE_CTL                 0xFE59
 #define PERST_GLITCH_WIDTH             0xFE5C
 #define CHANGE_LINK_STATE              0xFE5B
 #define RESET_LOAD_REG                 0xFE5E
 
 #define DUMMY_REG_RESET_0              0xFE90
 
+#define AUTOLOAD_CFG_BASE              0xFF00
+
+#define PM_CTRL1                       0xFF44
+#define PM_CTRL2                       0xFF45
+#define PM_CTRL3                       0xFF46
+#define PM_CTRL4                       0xFF47
+
 /* Memory mapping */
 #define SRAM_BASE                      0xE600
 #define RBUF_BASE                      0xF400
 #define PHY_FLD4                       0x1E
 #define PHY_DUM_REG                    0x1F
 
+#define LCTLR                          0x80
+#define PCR_SETTING_REG1               0x724
+#define PCR_SETTING_REG2               0x814
+#define PCR_SETTING_REG3               0x747
+
 #define rtsx_pci_init_cmd(pcr)         ((pcr)->ci = 0)
 
 struct rtsx_pcr;
@@ -747,6 +777,8 @@ struct pcr_ops {
                                                u8 voltage);
        unsigned int    (*cd_deglitch)(struct rtsx_pcr *pcr);
        int             (*conv_clk_and_div_n)(int clk, int dir);
+       void            (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
+       void            (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
 };
 
 enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -788,7 +820,6 @@ struct rtsx_pcr {
        struct completion               *finish_me;
 
        unsigned int                    cur_clock;
-       bool                            ms_pmos;
        bool                            remove_pci;
        bool                            msi_en;
 
@@ -806,6 +837,16 @@ struct rtsx_pcr {
 #define IC_VER_D                       3
        u8                              ic_version;
 
+       u8                              sd30_drive_sel_1v8;
+       u8                              sd30_drive_sel_3v3;
+       u8                              card_drive_sel;
+#define ASPM_L1_EN                     0x02
+       u8                              aspm_en;
+
+#define PCR_MS_PMOS                    (1 << 0)
+#define PCR_REVERSE_SOCKET             (1 << 1)
+       u32                             flags;
+
        const u32                       *sd_pull_ctl_enable_tbl;
        const u32                       *sd_pull_ctl_disable_tbl;
        const u32                       *ms_pull_ctl_enable_tbl;
index 4e94dc65f987f2b9185ac18b4be67c3ffbb53f58..b3ddf98dec3734ea7c4fc483a2efacc8cd7dfcad 100644 (file)
@@ -167,11 +167,8 @@ enum s2mps11_regulators {
        S2MPS11_BUCK8,
        S2MPS11_BUCK9,
        S2MPS11_BUCK10,
-       S2MPS11_AP_EN32KHZ,
-       S2MPS11_CP_EN32KHZ,
-       S2MPS11_BT_EN32KHZ,
 
-       S2MPS11_REG_MAX,
+       S2MPS11_REGULATOR_MAX,
 };
 
 #define S2MPS11_BUCK_MIN1      600000
@@ -191,7 +188,17 @@ enum s2mps11_regulators {
 #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
 #define S2MPS11_RAMP_DELAY     25000           /* uV/us */
 
+
+#define S2MPS11_BUCK2_RAMP_SHIFT       6
+#define S2MPS11_BUCK34_RAMP_SHIFT      4
+#define S2MPS11_BUCK5_RAMP_SHIFT       6
+#define S2MPS11_BUCK16_RAMP_SHIFT      4
+#define S2MPS11_BUCK7810_RAMP_SHIFT    2
+#define S2MPS11_BUCK9_RAMP_SHIFT       0
+#define S2MPS11_BUCK2_RAMP_EN_SHIFT    3
+#define S2MPS11_BUCK3_RAMP_EN_SHIFT    2
+#define S2MPS11_BUCK4_RAMP_EN_SHIFT    1
+#define S2MPS11_BUCK6_RAMP_EN_SHIFT    0
 #define S2MPS11_PMIC_EN_SHIFT  6
-#define S2MPS11_REGULATOR_MAX (S2MPS11_REG_MAX - 3)
 
 #endif /*  __LINUX_MFD_S2MPS11_H */
index db1791bb997ae495d65637e506b6f13ab4945b6d..25f2c611ab013db8e3a4135a00c129b3d556e7d5 100644 (file)
 #define SEQ_STATUS BIT(5)
 
 #define ADC_CLK                        3000000
-#define        MAX_CLK_DIV             7
 #define TOTAL_STEPS            16
 #define TOTAL_CHANNELS         8
 
index 7e7fbce7a30874ddca4c06bc37359042902c983b..6dd8893b2a566f9020cd2afcbb7967a17f0bc0f3 100644 (file)
 
 #define TWL6040_GPO_MAX        3
 
+/* TODO: All platform data struct can be removed */
 struct twl6040_codec_data {
        u16 hs_left_step;
        u16 hs_right_step;
index 28af417563609b50bbff851bea7e0fb4692c966d..88f90cbf8e6a0a2588aea4e023a5594cb19528a4 100644 (file)
@@ -10,6 +10,7 @@
 #ifndef UCB1200_H
 #define UCB1200_H
 
+#include <linux/device.h>
 #include <linux/mfd/mcp.h>
 #include <linux/gpio.h>
 #include <linux/mutex.h>
index bb1c8096a7eb28a5b9c9c24f37dd4603dc727b98..cd1fdf75103b7bd26350490cffc7de8e054bc366 100644 (file)
@@ -69,6 +69,7 @@ enum {
        MLX4_CMD_SET_ICM_SIZE    = 0xffd,
        /*master notify fw on finish for slave's flr*/
        MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+       MLX4_CMD_GET_OP_REQ      = 0x59,
 
        /* TPT commands */
        MLX4_CMD_SW2HW_MPT       = 0xd,
index 52c23a892bab3cec77af9e8f662fe2ea3d47adfb..09ef2f448a001f6771852f87215437b1e0561fa7 100644 (file)
@@ -33,6 +33,7 @@
 #ifndef MLX4_DEVICE_H
 #define MLX4_DEVICE_H
 
+#include <linux/if_ether.h>
 #include <linux/pci.h>
 #include <linux/completion.h>
 #include <linux/radix-tree.h>
@@ -207,6 +208,7 @@ enum mlx4_event {
        MLX4_EVENT_TYPE_CMD                = 0x0a,
        MLX4_EVENT_TYPE_VEP_UPDATE         = 0x19,
        MLX4_EVENT_TYPE_COMM_CHANNEL       = 0x18,
+       MLX4_EVENT_TYPE_OP_REQUIRED        = 0x1a,
        MLX4_EVENT_TYPE_FATAL_WARNING      = 0x1b,
        MLX4_EVENT_TYPE_FLR_EVENT          = 0x1c,
        MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
@@ -619,7 +621,7 @@ struct mlx4_eth_av {
        u8              dgid[16];
        u32             reserved4[2];
        __be16          vlan;
-       u8              mac[6];
+       u8              mac[ETH_ALEN];
 };
 
 union mlx4_ext_av {
@@ -913,10 +915,10 @@ enum mlx4_net_trans_promisc_mode {
 };
 
 struct mlx4_spec_eth {
-       u8      dst_mac[6];
-       u8      dst_mac_msk[6];
-       u8      src_mac[6];
-       u8      src_mac_msk[6];
+       u8      dst_mac[ETH_ALEN];
+       u8      dst_mac_msk[ETH_ALEN];
+       u8      src_mac[ETH_ALEN];
+       u8      src_mac_msk[ETH_ALEN];
        u8      ether_type_enable;
        __be16  ether_type;
        __be16  vlan_id_msk;
index 262deac02c9ea87cdf5e3dea5cc64fc15bdd6e08..6d351473c29200526a98bbbf275b479f79cfa146 100644 (file)
@@ -34,6 +34,7 @@
 #define MLX4_QP_H
 
 #include <linux/types.h>
+#include <linux/if_ether.h>
 
 #include <linux/mlx4/device.h>
 
@@ -143,7 +144,7 @@ struct mlx4_qp_path {
        u8                      feup;
        u8                      fvl_rx;
        u8                      reserved4[2];
-       u8                      dmac[6];
+       u8                      dmac[ETH_ALEN];
 };
 
 enum { /* fl */
@@ -318,7 +319,7 @@ struct mlx4_wqe_datagram_seg {
        __be32                  dqpn;
        __be32                  qkey;
        __be16                  vlan;
-       u8                      mac[6];
+       u8                      mac[ETH_ALEN];
 };
 
 struct mlx4_wqe_lso_seg {
index 46f1ea01e6f62161db550b13be8a2ed85246219a..504035f3ece151662eab6f5928140f3e159bb83a 100644 (file)
@@ -97,6 +97,11 @@ extern const struct gtype##_id __mod_##gtype##_table         \
 /* For userspace: you can also call me... */
 #define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
 
+/* Soft module dependencies. See man modprobe.d for details.
+ * Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz")
+ */
+#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
+
 /*
  * The following license idents are currently accepted as indicating free
  * software modules
index 27d9da3f86ffe91c9d06aa3dc779902adbc620ad..c3eb102a9cc81d8ed97f11deb9584aa71830b1c5 100644 (file)
@@ -36,7 +36,18 @@ static const char __UNIQUE_ID(name)[]                                          \
 
 struct kernel_param;
 
+/*
+ * Flags available for kernel_param_ops
+ *
+ * NOARG - the parameter allows for no argument (foo instead of foo=1)
+ */
+enum {
+       KERNEL_PARAM_FL_NOARG = (1 << 0)
+};
+
 struct kernel_param_ops {
+       /* How the ops should behave */
+       unsigned int flags;
        /* Returns 0, or -errno.  arg is in kp->arg. */
        int (*set)(const char *val, const struct kernel_param *kp);
        /* Returns length written or -errno.  Buffer is 4k (ie. be short!) */
@@ -187,7 +198,7 @@ struct kparam_array
 /* Obsolete - use module_param_cb() */
 #define module_param_call(name, set, get, arg, perm)                   \
        static struct kernel_param_ops __param_ops_##name =             \
-                { (void *)set, (void *)get };                          \
+               { 0, (void *)set, (void *)get };                        \
        __module_param_call(MODULE_PARAM_PREFIX,                        \
                            name, &__param_ops_##name, arg,             \
                            (perm) + sizeof(__check_old_set_param(set))*0, -1)
index 211ff67e8b0d0c3def3389ed474ab098112fa749..95fc482cef36cb12c78ba0eade0da4f8d717b3cb 100644 (file)
@@ -93,8 +93,6 @@ struct nand_bbt_descr {
 #define NAND_BBT_CREATE_EMPTY  0x00000400
 /* Search good / bad pattern through all pages of a block */
 #define NAND_BBT_SCANALLPAGES  0x00000800
-/* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY     0x00001000
 /* Write bbt if neccecary */
 #define NAND_BBT_WRITE         0x00002000
 /* Read and write back block contents when writing bbt */
index d6ed61ef451df403377f77e010a52711dac8e5a9..c8be32e9fc49507702d187a33893f8f3870a38ef 100644 (file)
@@ -137,6 +137,7 @@ enum access_mode {
 
 /**
  * fsmc_nand_platform_data - platform specific NAND controller config
+ * @nand_timings: timing setup for the physical NAND interface
  * @partitions: partition table for the platform, use a default fallback
  * if this is NULL
  * @nr_partitions: the number of partitions in the previous entry
index a5cf4e8d68187e5bd5e7892effc0957dc5bfea0b..f9bfe526d3102175ab3d32149966f61aab35da62 100644 (file)
@@ -173,6 +173,9 @@ struct mtd_info {
        /* ECC layout structure pointer - read only! */
        struct nand_ecclayout *ecclayout;
 
+       /* the ecc step size. */
+       unsigned int ecc_step_size;
+
        /* max number of correctible bit errors per ecc step */
        unsigned int ecc_strength;
 
index ab6363443ce81f033b641b014102a2dfdf8921be..ac8e89d5a7929b6bdd40c424928a7d0d9872a73e 100644 (file)
@@ -56,7 +56,7 @@ extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  * is supported now. If you add a chip with bigger oobsize/page
  * adjust this accordingly.
  */
-#define NAND_MAX_OOBSIZE       640
+#define NAND_MAX_OOBSIZE       744
 #define NAND_MAX_PAGESIZE      8192
 
 /*
@@ -202,6 +202,10 @@ typedef enum {
 /* Keep gcc happy */
 struct nand_chip;
 
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS                (1 << 0)
+#define ONFI_FEATURE_EXT_PARAM_PAGE    (1 << 7)
+
 /* ONFI timing mode, used in both asynchronous and synchronous mode */
 #define ONFI_TIMING_MODE_0             (1 << 0)
 #define ONFI_TIMING_MODE_1             (1 << 1)
@@ -217,6 +221,9 @@ struct nand_chip;
 /* ONFI subfeature parameters length */
 #define ONFI_SUBFEATURE_PARAM_LEN      4
 
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_SET_GET_FEATURES  (1 << 2)
+
 struct nand_onfi_params {
        /* rev info and features block */
        /* 'O' 'N' 'F' 'I'  */
@@ -224,7 +231,10 @@ struct nand_onfi_params {
        __le16 revision;
        __le16 features;
        __le16 opt_cmd;
-       u8 reserved[22];
+       u8 reserved0[2];
+       __le16 ext_param_page_length; /* since ONFI 2.1 */
+       u8 num_of_param_pages;        /* since ONFI 2.1 */
+       u8 reserved1[17];
 
        /* manufacturer information block */
        char manufacturer[12];
@@ -281,6 +291,40 @@ struct nand_onfi_params {
 
 #define ONFI_CRC_BASE  0x4F4E
 
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+       u8 ecc_bits;
+       u8 codeword_size;
+       __le16 bb_per_lun;
+       __le16 block_endurance;
+       u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0    0       /* Unused section. */
+#define ONFI_SECTION_TYPE_1    1       /* for additional sections. */
+#define ONFI_SECTION_TYPE_2    2       /* for ECC information. */
+struct onfi_ext_section {
+       u8 type;
+       u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+       __le16 crc;
+       u8 sig[4];             /* 'E' 'P' 'P' 'S' */
+       u8 reserved0[10];
+       struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+       /*
+        * The actual size of the Extended Parameter Page is in
+        * @ext_param_page_length of nand_onfi_params{}.
+        * The following are the variable length sections.
+        * So we do not add any fields below. Please see the ONFI spec.
+        */
+} __packed;
+
 /**
  * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
  * @lock:               protection lock
@@ -390,8 +434,8 @@ struct nand_buffers {
  * @write_buf:         [REPLACEABLE] write data from the buffer to the chip
  * @read_buf:          [REPLACEABLE] read data from the chip into the buffer
  * @select_chip:       [REPLACEABLE] select chip nr
- * @block_bad:         [REPLACEABLE] check, if the block is bad
- * @block_markbad:     [REPLACEABLE] mark the block bad
+ * @block_bad:         [REPLACEABLE] check if a block is bad, using OOB markers
+ * @block_markbad:     [REPLACEABLE] mark a block bad
  * @cmd_ctrl:          [BOARDSPECIFIC] hardwarespecific function for controlling
  *                     ALE/CLE/nCE. Also used to write command and address
  * @init_size:         [BOARDSPECIFIC] hardwarespecific function for setting
@@ -434,6 +478,12 @@ struct nand_buffers {
  *                     bad block marker position; i.e., BBM == 11110111b is
  *                     not bad when badblockbits == 7
  * @cellinfo:          [INTERN] MLC/multichip data from chip ident
+ * @ecc_strength_ds:   [INTERN] ECC correctability from the datasheet.
+ *                     Minimum amount of bit errors per @ecc_step_ds guaranteed
+ *                     to be correctable. If unknown, set to zero.
+ * @ecc_step_ds:       [INTERN] ECC step required by the @ecc_strength_ds,
+ *                      also from the datasheet. It is the recommended ECC step
+ *                     size, if known; if unknown, set to zero.
  * @numchips:          [INTERN] number of physical chips
  * @chipsize:          [INTERN] the size of one chip for multichip arrays
  * @pagemask:          [INTERN] page number mask = number of (pages / chip) - 1
@@ -510,6 +560,8 @@ struct nand_chip {
        unsigned int pagebuf_bitflips;
        int subpagesize;
        uint8_t cellinfo;
+       uint16_t ecc_strength_ds;
+       uint16_t ecc_step_ds;
        int badblockpos;
        int badblockbits;
 
@@ -576,6 +628,11 @@ struct nand_chip {
        { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
          .options = (opts) }
 
+#define NAND_ECC_INFO(_strength, _step)        \
+                       { .strength_ds = (_strength), .step_ds = (_step) }
+#define NAND_ECC_STRENGTH(type)                ((type)->ecc.strength_ds)
+#define NAND_ECC_STEP(type)            ((type)->ecc.step_ds)
+
 /**
  * struct nand_flash_dev - NAND Flash Device ID Structure
  * @name: a human-readable name of the NAND chip
@@ -593,6 +650,12 @@ struct nand_chip {
  * @options: stores various chip bit options
  * @id_len: The valid length of the @id.
  * @oobsize: OOB size
+ * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
+ *                   @ecc_strength_ds in nand_chip{}.
+ * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
+ *               @ecc_step_ds in nand_chip{}, also from the datasheet.
+ *               For example, the "4bit ECC for each 512Byte" can be set with
+ *               NAND_ECC_INFO(4, 512).
  */
 struct nand_flash_dev {
        char *name;
@@ -609,6 +672,10 @@ struct nand_flash_dev {
        unsigned int options;
        uint16_t id_len;
        uint16_t oobsize;
+       struct {
+               uint16_t strength_ds;
+               uint16_t step_ds;
+       } ecc;
 };
 
 /**
@@ -625,8 +692,8 @@ extern struct nand_flash_dev nand_flash_ids[];
 extern struct nand_manufacturers nand_manuf_ids[];
 
 extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
-extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
 extern int nand_default_bbt(struct mtd_info *mtd);
+extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
 extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
 extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
                           int allowbbt);
@@ -708,6 +775,12 @@ struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
        return chip->priv;
 }
 
+/* return the supported features. */
+static inline int onfi_feature(struct nand_chip *chip)
+{
+       return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0;
+}
+
 /* return the supported asynchronous timing mode. */
 static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
 {
index 6e8215b159982378847cc7aacd75747da9c604d6..61a0da38d0cbc163e88b46e0b0fdf0fb50c62934 100644 (file)
@@ -6,6 +6,7 @@
 #define __LINUX_MV643XX_ETH_H
 
 #include <linux/mbus.h>
+#include <linux/if_ether.h>
 
 #define MV643XX_ETH_SHARED_NAME                "mv643xx_eth"
 #define MV643XX_ETH_NAME               "mv643xx_eth_port"
@@ -48,7 +49,7 @@ struct mv643xx_eth_platform_data {
         * Use this MAC address if it is valid, overriding the
         * address that is already in the hardware.
         */
-       u8                      mac_addr[6];
+       u8                      mac_addr[ETH_ALEN];
 
        /*
         * If speed is 0, autonegotiation is enabled.
index 9a4156845e9348cb4fe4b803a64041d1db5de377..077363dcd860702b67003c1721badf92d187e020 100644 (file)
@@ -728,6 +728,16 @@ struct netdev_fcoe_hbainfo {
 };
 #endif
 
+#define MAX_PHYS_PORT_ID_LEN 32
+
+/* This structure holds a unique identifier to identify the
+ * physical port used by a netdevice.
+ */
+struct netdev_phys_port_id {
+       unsigned char id[MAX_PHYS_PORT_ID_LEN];
+       unsigned char id_len;
+};
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -932,6 +942,12 @@ struct netdev_fcoe_hbainfo {
  *     that determine carrier state from physical hardware properties (eg
  *     network cables) or protocol-dependent mechanisms (eg
  *     USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
+ *
+ * int (*ndo_get_phys_port_id)(struct net_device *dev,
+ *                            struct netdev_phys_port_id *ppid);
+ *     Called to get ID of physical port of this device. If driver does
+ *     not implement this, it is assumed that the hw is not able to have
+ *     multiple net devices on single physical port.
  */
 struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
@@ -1060,6 +1076,8 @@ struct net_device_ops {
                                                      struct nlmsghdr *nlh);
        int                     (*ndo_change_carrier)(struct net_device *dev,
                                                      bool new_carrier);
+       int                     (*ndo_get_phys_port_id)(struct net_device *dev,
+                                                       struct netdev_phys_port_id *ppid);
 };
 
 /*
@@ -1633,6 +1651,7 @@ struct packet_offload {
 #define NETDEV_NOTIFY_PEERS    0x0013
 #define NETDEV_JOIN            0x0014
 #define NETDEV_CHANGEUPPER     0x0015
+#define NETDEV_RESEND_IGMP     0x0016
 
 extern int register_netdevice_notifier(struct notifier_block *nb);
 extern int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -1665,9 +1684,6 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 
 extern rwlock_t                                dev_base_lock;          /* Device list lock */
 
-extern seqcount_t      devnet_rename_seq;      /* Device rename seq */
-
-
 #define for_each_netdev(net, d)                \
                list_for_each_entry(d, &(net)->dev_base_head, dev_list)
 #define for_each_netdev_reverse(net, d)        \
@@ -2317,6 +2333,8 @@ extern int                dev_set_mac_address(struct net_device *,
                                            struct sockaddr *);
 extern int             dev_change_carrier(struct net_device *,
                                           bool new_carrier);
+extern int             dev_get_phys_port_id(struct net_device *dev,
+                                            struct netdev_phys_port_id *ppid);
 extern int             dev_hard_start_xmit(struct sk_buff *skb,
                                            struct net_device *dev,
                                            struct netdev_queue *txq);
index de70f7b45b682126284a4162535659bfde8e5be1..e2cf786be22fafc2766bdfc8fde1ab71571f86c6 100644 (file)
@@ -314,8 +314,8 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 #endif /*CONFIG_NETFILTER*/
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
+extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
 
 struct nf_conn;
@@ -325,12 +325,14 @@ struct nfq_ct_hook {
        size_t (*build_size)(const struct nf_conn *ct);
        int (*build)(struct sk_buff *skb, struct nf_conn *ct);
        int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
+       int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
+                            u32 portid, u32 report);
 };
 extern struct nfq_ct_hook __rcu *nfq_ct_hook;
 
 struct nfq_ct_nat_hook {
        void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
-                          u32 ctinfo, int off);
+                          u32 ctinfo, s32 off);
 };
 extern struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook;
 #else
index 7125cef741642b77ca5c41758c26e500be3a2672..1f6a332c646b7ba3610bc6de9883f74e26257d07 100644 (file)
@@ -457,14 +457,11 @@ extern int nfs3_removexattr (struct dentry *, const char *name);
 /*
  * linux/fs/nfs/direct.c
  */
-extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
-                       unsigned long);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
-                       const struct iovec *iov, unsigned long nr_segs,
-                       loff_t pos, bool uio);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
-                       const struct iovec *iov, unsigned long nr_segs,
-                       loff_t pos, bool uio);
+extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos);
+extern ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos);
 
 /*
  * linux/fs/nfs/dir.c
index 8651574a305bb44815e0c5e19826a3fbcbf7a37e..ddc3e32178c31c3ec09f56a3263e85d8de55fdd2 100644 (file)
@@ -1442,7 +1442,7 @@ struct nfs_rpc_ops {
        struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
        struct nfs_client *
                (*init_client) (struct nfs_client *, const struct rpc_timeout *,
-                               const char *, rpc_authflavor_t);
+                               const char *);
        void    (*free_client) (struct nfs_client *);
        struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
        struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
index 1fd08ca23106df836678989d96e2a16824b1a932..90a8811e9e480285fe417e6c4e4b6c9c4425166a 100644 (file)
@@ -323,12 +323,6 @@ extern int of_detach_node(struct device_node *);
  */
 const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
                               u32 *pu);
-#define of_property_for_each_u32(np, propname, prop, p, u)     \
-       for (prop = of_find_property(np, propname, NULL),       \
-               p = of_prop_next_u32(prop, NULL, &u);           \
-               p;                                              \
-               p = of_prop_next_u32(prop, p, &u))
-
 /*
  * struct property *prop;
  * const char *s;
@@ -337,11 +331,6 @@ const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
  *         printk("String value: %s\n", s);
  */
 const char *of_prop_next_string(struct property *prop, const char *cur);
-#define of_property_for_each_string(np, propname, prop, s)     \
-       for (prop = of_find_property(np, propname, NULL),       \
-               s = of_prop_next_string(prop, NULL);            \
-               s;                                              \
-               s = of_prop_next_string(prop, s))
 
 #else /* CONFIG_OF */
 
@@ -505,12 +494,20 @@ static inline int of_machine_is_compatible(const char *compat)
        return 0;
 }
 
+static inline const __be32 *of_prop_next_u32(struct property *prop,
+               const __be32 *cur, u32 *pu)
+{
+       return NULL;
+}
+
+static inline const char *of_prop_next_string(struct property *prop,
+               const char *cur)
+{
+       return NULL;
+}
+
 #define of_match_ptr(_ptr)     NULL
 #define of_match_node(_matches, _node) NULL
-#define of_property_for_each_u32(np, propname, prop, p, u) \
-       while (0)
-#define of_property_for_each_string(np, propname, prop, s) \
-       while (0)
 #endif /* CONFIG_OF */
 
 #ifndef of_node_to_nid
@@ -559,6 +556,18 @@ static inline int of_property_read_u32(const struct device_node *np,
        return of_property_read_u32_array(np, propname, out_value, 1);
 }
 
+#define of_property_for_each_u32(np, propname, prop, p, u)     \
+       for (prop = of_find_property(np, propname, NULL),       \
+               p = of_prop_next_u32(prop, NULL, &u);           \
+               p;                                              \
+               p = of_prop_next_u32(prop, p, &u))
+
+#define of_property_for_each_string(np, propname, prop, s)     \
+       for (prop = of_find_property(np, propname, NULL),       \
+               s = of_prop_next_string(prop, NULL);            \
+               s;                                              \
+               s = of_prop_next_string(prop, s))
+
 #if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
 extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
 extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
index ed136ad698ce622e4d150d7fda337a99c78d5a4e..4a17939b95cc100ff09e63135aa4543ad3f3e72a 100644 (file)
@@ -106,8 +106,7 @@ extern u64 dt_mem_next_cell(int s, __be32 **cellp);
  * physical addresses.
  */
 #ifdef CONFIG_BLK_DEV_INITRD
-extern void early_init_dt_setup_initrd_arch(unsigned long start,
-                                           unsigned long end);
+extern void early_init_dt_setup_initrd_arch(u64 start, u64 end);
 #endif
 
 /* Early flat tree scan hooks */
index 1704479772787b28950c9768a9e73c35c88dcd9a..d006f0ca60f46e92705fb8b3fd315ebacc5242b2 100644 (file)
@@ -47,24 +47,22 @@ void acpi_pci_remove_bus(struct pci_bus *bus);
 
 #ifdef CONFIG_ACPI_PCI_SLOT
 void acpi_pci_slot_init(void);
-void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle);
+void acpi_pci_slot_enumerate(struct pci_bus *bus);
 void acpi_pci_slot_remove(struct pci_bus *bus);
 #else
 static inline void acpi_pci_slot_init(void) { }
-static inline void acpi_pci_slot_enumerate(struct pci_bus *bus,
-                                          acpi_handle handle) { }
+static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { }
 static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
 #endif
 
 #ifdef CONFIG_HOTPLUG_PCI_ACPI
 void acpiphp_init(void);
-void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
+void acpiphp_enumerate_slots(struct pci_bus *bus);
 void acpiphp_remove_slots(struct pci_bus *bus);
 void acpiphp_check_host_bridge(acpi_handle handle);
 #else
 static inline void acpiphp_init(void) { }
-static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
-                                          acpi_handle handle) { }
+static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { }
 static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
 static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
 #endif
index 0fd1f1582fa1cdf359b33f3cddeaa60026f492ac..7dbc9fabe5ff3e0066b17f5846e3008d2224ea55 100644 (file)
@@ -183,6 +183,19 @@ enum pci_bus_flags {
        PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
 };
 
+/* These values come from the PCI Express Spec */
+enum pcie_link_width {
+       PCIE_LNK_WIDTH_RESRV    = 0x00,
+       PCIE_LNK_X1             = 0x01,
+       PCIE_LNK_X2             = 0x02,
+       PCIE_LNK_X4             = 0x04,
+       PCIE_LNK_X8             = 0x08,
+       PCIE_LNK_X12            = 0x0C,
+       PCIE_LNK_X16            = 0x10,
+       PCIE_LNK_X32            = 0x20,
+       PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
+};
+
 /* Based on the PCI Hotplug Spec, but some values are made up by us */
 enum pci_bus_speed {
        PCI_SPEED_33MHz                 = 0x00,
@@ -914,6 +927,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev);
 void pci_msi_off(struct pci_dev *dev);
 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
+int pci_wait_for_pending_transaction(struct pci_dev *dev);
 int pcix_get_max_mmrbc(struct pci_dev *dev);
 int pcix_get_mmrbc(struct pci_dev *dev);
 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
@@ -921,9 +935,16 @@ int pcie_get_readrq(struct pci_dev *dev);
 int pcie_set_readrq(struct pci_dev *dev, int rq);
 int pcie_get_mps(struct pci_dev *dev);
 int pcie_set_mps(struct pci_dev *dev, int mps);
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+                         enum pcie_link_width *width);
 int __pci_reset_function(struct pci_dev *dev);
 int __pci_reset_function_locked(struct pci_dev *dev);
 int pci_reset_function(struct pci_dev *dev);
+int pci_probe_reset_slot(struct pci_slot *slot);
+int pci_reset_slot(struct pci_slot *slot);
+int pci_probe_reset_bus(struct pci_bus *bus);
+int pci_reset_bus(struct pci_bus *bus);
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
 void pci_update_resource(struct pci_dev *dev, int resno);
 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
 int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
index 8db71dcd633740a8a7290aff4a99f6a23ee8e366..430dd963707b43bdc995d9c5f1da353af9031665 100644 (file)
 #ifndef _PCI_HOTPLUG_H
 #define _PCI_HOTPLUG_H
 
-/* These values come from the PCI Express Spec */
-enum pcie_link_width {
-       PCIE_LNK_WIDTH_RESRV    = 0x00,
-       PCIE_LNK_X1             = 0x01,
-       PCIE_LNK_X2             = 0x02,
-       PCIE_LNK_X4             = 0x04,
-       PCIE_LNK_X8             = 0x08,
-       PCIE_LNK_X12            = 0x0C,
-       PCIE_LNK_X16            = 0x10,
-       PCIE_LNK_X32            = 0x20,
-       PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
-};
-
 /**
  * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
  * @owner: The module owner of this structure
@@ -63,6 +50,9 @@ enum pcie_link_width {
  * @get_adapter_status: Called to get see if an adapter is present in the slot or not.
  *     If this field is NULL, the value passed in the struct hotplug_slot_info
  *     will be used when this value is requested by a user.
+ * @reset_slot: Optional interface to allow override of a bus reset for the
+ *     slot for cases where a secondary bus reset can result in spurious
+ *     hotplug events or where a slot can be reset independent of the bus.
  *
  * The table of function pointers that is passed to the hotplug pci core by a
  * hotplug pci driver.  These functions are called by the hotplug pci core when
@@ -80,6 +70,7 @@ struct hotplug_slot_ops {
        int (*get_attention_status)     (struct hotplug_slot *slot, u8 *value);
        int (*get_latch_status)         (struct hotplug_slot *slot, u8 *value);
        int (*get_adapter_status)       (struct hotplug_slot *slot, u8 *value);
+       int (*reset_slot)               (struct hotplug_slot *slot, int probe);
 };
 
 /**
index 88272591a8951d09fb18d8b7967ed3e885ca8855..9efc04dd255aa59799c69e4bba791551825023e8 100644 (file)
@@ -36,6 +36,7 @@ struct samsung_i2s {
  */
 #define QUIRK_NO_MUXPSR                (1 << 2)
 #define QUIRK_NEED_RSTCLR      (1 << 3)
+#define QUIRK_SUPPORTS_TDM     (1 << 4)
        /* Quirks of the I2S controller */
        u32 quirks;
        dma_addr_t idma_addr;
index 6a293b7fff3bf9d4cb260d3d6c6d639427f38275..cea9f70133c521f1d5fb2a0d459f3e30ca3f9576 100644 (file)
@@ -71,6 +71,10 @@ struct atmel_nand_data {
        u8              on_flash_bbt;           /* bbt on flash */
        struct mtd_partition *parts;
        unsigned int    num_parts;
+       bool            has_dma;                /* support dma transfer */
+
+       /* default is false, only for at32ap7000 chip is true */
+       bool            need_reset_workaround;
 };
 
  /* Serial */
index b7174998c24aa92a76b2cb693c3e8471b12ec236..e75dcbf2b2302b861fb4e3ab7bd95b2f522cc983 100644 (file)
@@ -94,6 +94,10 @@ void __init brcmfmac_init_pdata(void)
  * Set this to true if the SDIO host controller has higher align requirement
  * than 32 bytes for each scatterlist item.
  *
+ * sd_head_align: alignment requirement for start of data buffer
+ *
+ * sd_sgentry_align: length alignment requirement for each sg entry
+ *
  * power_on: This function is called by the brcmfmac when the module gets
  * loaded. This can be particularly useful for low power devices. The platform
  * spcific routine may for example decide to power up the complete device.
@@ -121,6 +125,8 @@ struct brcmfmac_sdio_platform_data {
        unsigned int oob_irq_nr;
        unsigned long oob_irq_flags;
        bool broken_sg_support;
+       unsigned short sd_head_align;
+       unsigned short sd_sgentry_align;
        void (*power_on)(void);
        void (*power_off)(void);
        void (*reset)(void);
diff --git a/include/linux/platform_data/exynos_thermal.h b/include/linux/platform_data/exynos_thermal.h
deleted file mode 100644 (file)
index da7e627..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * exynos_thermal.h - Samsung EXYNOS TMU (Thermal Management Unit)
- *
- *  Copyright (C) 2011 Samsung Electronics
- *  Donggeun Kim <dg77.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#ifndef _LINUX_EXYNOS_THERMAL_H
-#define _LINUX_EXYNOS_THERMAL_H
-#include <linux/cpu_cooling.h>
-
-enum calibration_type {
-       TYPE_ONE_POINT_TRIMMING,
-       TYPE_TWO_POINT_TRIMMING,
-       TYPE_NONE,
-};
-
-enum soc_type {
-       SOC_ARCH_EXYNOS4210 = 1,
-       SOC_ARCH_EXYNOS,
-};
-/**
- * struct freq_clip_table
- * @freq_clip_max: maximum frequency allowed for this cooling state.
- * @temp_level: Temperature level at which the temperature clipping will
- *     happen.
- * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
- *
- * This structure is required to be filled and passed to the
- * cpufreq_cooling_unregister function.
- */
-struct freq_clip_table {
-       unsigned int freq_clip_max;
-       unsigned int temp_level;
-       const struct cpumask *mask_val;
-};
-
-/**
- * struct exynos_tmu_platform_data
- * @threshold: basic temperature for generating interrupt
- *            25 <= threshold <= 125 [unit: degree Celsius]
- * @threshold_falling: differntial value for setting threshold
- *                    of temperature falling interrupt.
- * @trigger_levels: array for each interrupt levels
- *     [unit: degree Celsius]
- *     0: temperature for trigger_level0 interrupt
- *        condition for trigger_level0 interrupt:
- *             current temperature > threshold + trigger_levels[0]
- *     1: temperature for trigger_level1 interrupt
- *        condition for trigger_level1 interrupt:
- *             current temperature > threshold + trigger_levels[1]
- *     2: temperature for trigger_level2 interrupt
- *        condition for trigger_level2 interrupt:
- *             current temperature > threshold + trigger_levels[2]
- *     3: temperature for trigger_level3 interrupt
- *        condition for trigger_level3 interrupt:
- *             current temperature > threshold + trigger_levels[3]
- * @trigger_level0_en:
- *     1 = enable trigger_level0 interrupt,
- *     0 = disable trigger_level0 interrupt
- * @trigger_level1_en:
- *     1 = enable trigger_level1 interrupt,
- *     0 = disable trigger_level1 interrupt
- * @trigger_level2_en:
- *     1 = enable trigger_level2 interrupt,
- *     0 = disable trigger_level2 interrupt
- * @trigger_level3_en:
- *     1 = enable trigger_level3 interrupt,
- *     0 = disable trigger_level3 interrupt
- * @gain: gain of amplifier in the positive-TC generator block
- *     0 <= gain <= 15
- * @reference_voltage: reference voltage of amplifier
- *     in the positive-TC generator block
- *     0 <= reference_voltage <= 31
- * @noise_cancel_mode: noise cancellation mode
- *     000, 100, 101, 110 and 111 can be different modes
- * @type: determines the type of SOC
- * @efuse_value: platform defined fuse value
- * @cal_type: calibration type for temperature
- * @freq_clip_table: Table representing frequency reduction percentage.
- * @freq_tab_count: Count of the above table as frequency reduction may
- *     applicable to only some of the trigger levels.
- *
- * This structure is required for configuration of exynos_tmu driver.
- */
-struct exynos_tmu_platform_data {
-       u8 threshold;
-       u8 threshold_falling;
-       u8 trigger_levels[4];
-       bool trigger_level0_en;
-       bool trigger_level1_en;
-       bool trigger_level2_en;
-       bool trigger_level3_en;
-
-       u8 gain;
-       u8 reference_voltage;
-       u8 noise_cancel_mode;
-       u32 efuse_value;
-
-       enum calibration_type cal_type;
-       enum soc_type type;
-       struct freq_clip_table freq_tab[4];
-       unsigned int freq_tab_count;
-};
-#endif /* _LINUX_EXYNOS_THERMAL_H */
index c42f39f20195374bf6f0f046b487bd8e8c4511a6..ffb801998e5dfa10c450f99b2dbb943e8f7a05d4 100644 (file)
@@ -16,19 +16,6 @@ struct pxa3xx_nand_timing {
        unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
 };
 
-struct pxa3xx_nand_cmdset {
-       uint16_t        read1;
-       uint16_t        read2;
-       uint16_t        program;
-       uint16_t        read_status;
-       uint16_t        read_id;
-       uint16_t        erase;
-       uint16_t        reset;
-       uint16_t        lock;
-       uint16_t        unlock;
-       uint16_t        lock_status;
-};
-
 struct pxa3xx_nand_flash {
        char            *name;
        uint32_t        chip_id;
diff --git a/include/linux/platform_data/omap-abe-twl6040.h b/include/linux/platform_data/omap-abe-twl6040.h
deleted file mode 100644 (file)
index 5d298ac..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * omap-abe-twl6040.h - ASoC machine driver OMAP4+ devices, header.
- *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
- * All rights reserved.
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef _OMAP_ABE_TWL6040_H_
-#define _OMAP_ABE_TWL6040_H_
-
-/* To select if only one channel is connected in a stereo port */
-#define ABE_TWL6040_LEFT       (1 << 0)
-#define ABE_TWL6040_RIGHT      (1 << 1)
-
-struct omap_abe_twl6040_data {
-       char *card_name;
-       /* Feature flags for connected audio pins */
-       u8      has_hs;
-       u8      has_hf;
-       bool    has_ep;
-       u8      has_aux;
-       u8      has_vibra;
-       bool    has_dmic;
-       bool    has_hsmic;
-       bool    has_mainmic;
-       bool    has_submic;
-       u8      has_afm;
-       /* Other features */
-       bool    jack_detection; /* board can detect jack events */
-       int     mclk_freq;      /* MCLK frequency speed for twl6040 */
-};
-
-#endif /* _OMAP_ABE_TWL6040_H_ */
index 80587fdbba3ef5adcf116afe9676f9b34543b583..1a2e9901a22eb4d5d14b52b56057b84d4bd19820 100644 (file)
 
 #include <drm/drm_mode.h>
 
+enum rcar_du_output {
+       RCAR_DU_OUTPUT_DPAD0,
+       RCAR_DU_OUTPUT_DPAD1,
+       RCAR_DU_OUTPUT_LVDS0,
+       RCAR_DU_OUTPUT_LVDS1,
+       RCAR_DU_OUTPUT_TCON,
+       RCAR_DU_OUTPUT_MAX,
+};
+
 enum rcar_du_encoder_type {
        RCAR_DU_ENCODER_UNUSED = 0,
+       RCAR_DU_ENCODER_NONE,
        RCAR_DU_ENCODER_VGA,
        RCAR_DU_ENCODER_LVDS,
 };
@@ -28,22 +38,32 @@ struct rcar_du_panel_data {
        struct drm_mode_modeinfo mode;
 };
 
-struct rcar_du_encoder_lvds_data {
+struct rcar_du_connector_lvds_data {
        struct rcar_du_panel_data panel;
 };
 
-struct rcar_du_encoder_vga_data {
+struct rcar_du_connector_vga_data {
        /* TODO: Add DDC information for EDID retrieval */
 };
 
+/*
+ * struct rcar_du_encoder_data - Encoder platform data
+ * @type: the encoder type (RCAR_DU_ENCODER_*)
+ * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
+ * @connector.lvds: platform data for LVDS connectors
+ * @connector.vga: platform data for VGA connectors
+ *
+ * Encoder platform data describes an on-board encoder, its associated DU SoC
+ * output, and the connector.
+ */
 struct rcar_du_encoder_data {
-       enum rcar_du_encoder_type encoder;
-       unsigned int output;
+       enum rcar_du_encoder_type type;
+       enum rcar_du_output output;
 
        union {
-               struct rcar_du_encoder_lvds_data lvds;
-               struct rcar_du_encoder_vga_data vga;
-       } u;
+               struct rcar_du_connector_lvds_data lvds;
+               struct rcar_du_connector_vga_data vga;
+       } connector;
 };
 
 struct rcar_du_platform_data {
index 4aa80ba830a267d12ffef09ef2034d7a82dcbe7e..abd437d0a8a7eeac72297ad5306ca071993d8f58 100644 (file)
@@ -55,14 +55,14 @@ struct pstore_info {
        int             (*close)(struct pstore_info *psi);
        ssize_t         (*read)(u64 *id, enum pstore_type_id *type,
                        int *count, struct timespec *time, char **buf,
-                       struct pstore_info *psi);
+                       bool *compressed, struct pstore_info *psi);
        int             (*write)(enum pstore_type_id type,
                        enum kmsg_dump_reason reason, u64 *id,
-                       unsigned int part, int count, size_t hsize,
+                       unsigned int part, int count, bool compressed,
                        size_t size, struct pstore_info *psi);
        int             (*write_buf)(enum pstore_type_id type,
                        enum kmsg_dump_reason reason, u64 *id,
-                       unsigned int part, const char *buf, size_t hsize,
+                       unsigned int part, const char *buf, bool compressed,
                        size_t size, struct pstore_info *psi);
        int             (*erase)(enum pstore_type_id type, u64 id,
                        int count, struct timespec time,
index 467cc6307b621adf71fd94e0dc6f1d3aae409741..49444203328ad8546b4299fb702d0155897bc0d0 100644 (file)
@@ -21,6 +21,8 @@
 
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/of.h>
+
 
 /*
  * SSP Serial Port Registers
@@ -190,6 +192,8 @@ struct ssp_device {
        int             irq;
        int             drcmr_rx;
        int             drcmr_tx;
+
+       struct device_node      *of_node;
 };
 
 /**
@@ -218,11 +222,18 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
 #ifdef CONFIG_ARCH_PXA
 struct ssp_device *pxa_ssp_request(int port, const char *label);
 void pxa_ssp_free(struct ssp_device *);
+struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
+                                     const char *label);
 #else
 static inline struct ssp_device *pxa_ssp_request(int port, const char *label)
 {
        return NULL;
 }
+static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
+                                                   const char *name)
+{
+       return NULL;
+}
 static inline void pxa_ssp_free(struct ssp_device *ssp) {}
 #endif
 
index d13371134c59fbec0be79992a692257d8021a959..cc7494a3542983bbd4e73b95920eb981bb196716 100644 (file)
@@ -328,6 +328,7 @@ struct quotactl_ops {
        int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
        int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
        int (*set_xstate)(struct super_block *, unsigned int, int);
+       int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
 };
 
 struct quota_format_type {
index 1c50093ae656d97211374aeaf30b6998a9e0a764..6965fe394c3bb9d3b66681a8bfd1b9f84a970def 100644 (file)
@@ -41,6 +41,7 @@ void __quota_error(struct super_block *sb, const char *func,
 void inode_add_rsv_space(struct inode *inode, qsize_t number);
 void inode_claim_rsv_space(struct inode *inode, qsize_t number);
 void inode_sub_rsv_space(struct inode *inode, qsize_t number);
+void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
 
 void dquot_initialize(struct inode *inode);
 void dquot_drop(struct inode *inode);
@@ -59,6 +60,7 @@ int dquot_alloc_inode(const struct inode *inode);
 
 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
 void dquot_free_inode(const struct inode *inode);
+void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
 
 int dquot_disable(struct super_block *sb, int type, unsigned int flags);
 /* Suspend quotas on remount RO */
@@ -238,6 +240,13 @@ static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
        return 0;
 }
 
+static inline int dquot_reclaim_space_nodirty(struct inode *inode,
+                                             qsize_t number)
+{
+       inode_sub_bytes(inode, number);
+       return 0;
+}
+
 static inline int dquot_disable(struct super_block *sb, int type,
                unsigned int flags)
 {
@@ -336,6 +345,12 @@ static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
        return ret;
 }
 
+static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
+{
+       dquot_reclaim_space_nodirty(inode, nr << inode->i_blkbits);
+       mark_inode_dirty_sync(inode);
+}
+
 static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr)
 {
        __dquot_free_space(inode, nr, 0);
index 8dfaa2ce2e956e0037c469daed8586cb9a94bc68..0f424698064f5ee2c0a0da036189843c2e8cead4 100644 (file)
@@ -114,6 +114,11 @@ extern const struct raid6_recov_calls raid6_recov_intx1;
 extern const struct raid6_recov_calls raid6_recov_ssse3;
 extern const struct raid6_recov_calls raid6_recov_avx2;
 
+extern const struct raid6_calls raid6_neonx1;
+extern const struct raid6_calls raid6_neonx2;
+extern const struct raid6_calls raid6_neonx4;
+extern const struct raid6_calls raid6_neonx8;
+
 /* Algorithm list */
 extern const struct raid6_calls * const raid6_algos[];
 extern const struct raid6_recov_calls *const raid6_recov_algos[];
index 580a5320cc96afd21807cddaaa40bdff47694d5d..bafda8759be65d570ce36259294137ab2d253417 100644 (file)
@@ -471,6 +471,9 @@ struct regmap_irq {
  * @ack_base:    Base ack address.  If zero then the chip is clear on read.
  * @wake_base:   Base address for wake enables.  If zero unsupported.
  * @irq_reg_stride:  Stride to use for chips where registers are not contiguous.
+ * @init_ack_masked: Ack all masked interrupts once during initalization.
+ * @mask_invert: Inverted mask register: cleared bits are masked out.
+ * @wake_invert: Inverted wake register: cleared bits are wake enabled.
  * @runtime_pm:  Hold a runtime PM lock on the device when accessing it.
  *
  * @num_regs:    Number of registers in each control bank.
@@ -486,9 +489,10 @@ struct regmap_irq_chip {
        unsigned int ack_base;
        unsigned int wake_base;
        unsigned int irq_reg_stride;
-       unsigned int mask_invert;
-       unsigned int wake_invert;
-       bool runtime_pm;
+       bool init_ack_masked:1;
+       bool mask_invert:1;
+       bool wake_invert:1;
+       bool runtime_pm:1;
 
        int num_regs;
 
index 3a76389c6aaa6989900247712c8ffe33658b33db..2582e413aaa3fc56a7e9b3f9dc372269f5db6f3b 100644 (file)
@@ -137,6 +137,10 @@ struct regulator *__must_check devm_regulator_get(struct device *dev,
                                             const char *id);
 struct regulator *__must_check regulator_get_exclusive(struct device *dev,
                                                       const char *id);
+struct regulator *__must_check regulator_get_optional(struct device *dev,
+                                                     const char *id);
+struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
+                                                          const char *id);
 void regulator_put(struct regulator *regulator);
 void devm_regulator_put(struct regulator *regulator);
 
@@ -217,6 +221,25 @@ devm_regulator_get(struct device *dev, const char *id)
        return NULL;
 }
 
+static inline struct regulator *__must_check
+regulator_get_exclusive(struct device *dev, const char *id)
+{
+       return NULL;
+}
+
+static inline struct regulator *__must_check
+regulator_get_optional(struct device *dev, const char *id)
+{
+       return NULL;
+}
+
+
+static inline struct regulator *__must_check
+devm_regulator_get_optional(struct device *dev, const char *id)
+{
+       return NULL;
+}
+
 static inline void regulator_put(struct regulator *regulator)
 {
 }
@@ -369,8 +392,11 @@ static inline int regulator_count_voltages(struct regulator *regulator)
 static inline int regulator_set_voltage_tol(struct regulator *regulator,
                                            int new_uV, int tol_uV)
 {
-       return regulator_set_voltage(regulator,
-                                    new_uV - tol_uV, new_uV + tol_uV);
+       if (regulator_set_voltage(regulator, new_uV, new_uV + tol_uV) == 0)
+               return 0;
+       else
+               return regulator_set_voltage(regulator,
+                                            new_uV - tol_uV, new_uV + tol_uV);
 }
 
 static inline int regulator_is_supported_voltage_tol(struct regulator *regulator,
index 6700cc94bdd12275df1b8be822939c298b811aff..67e13aa5a4781d2c8fdfa8fc902d2bbc90f2a3a0 100644 (file)
@@ -39,6 +39,24 @@ enum regulator_status {
        REGULATOR_STATUS_UNDEFINED,
 };
 
+/**
+ * Specify a range of voltages for regulator_map_linar_range() and
+ * regulator_list_linear_range().
+ *
+ * @min_uV:  Lowest voltage in range
+ * @max_uV:  Highest voltage in range
+ * @min_sel: Lowest selector for range
+ * @max_sel: Highest selector for range
+ * @uV_step: Step size
+ */
+struct regulator_linear_range {
+       unsigned int min_uV;
+       unsigned int max_uV;
+       unsigned int min_sel;
+       unsigned int max_sel;
+       unsigned int uV_step;
+};
+
 /**
  * struct regulator_ops - regulator operations.
  *
@@ -223,6 +241,9 @@ struct regulator_desc {
        unsigned int linear_min_sel;
        unsigned int ramp_delay;
 
+       const struct regulator_linear_range *linear_ranges;
+       int n_linear_ranges;
+
        const unsigned int *volt_table;
 
        unsigned int vsel_reg;
@@ -326,10 +347,14 @@ int regulator_mode_to_status(unsigned int);
 
 int regulator_list_voltage_linear(struct regulator_dev *rdev,
                                  unsigned int selector);
+int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
+                                       unsigned int selector);
 int regulator_list_voltage_table(struct regulator_dev *rdev,
                                  unsigned int selector);
 int regulator_map_voltage_linear(struct regulator_dev *rdev,
                                  int min_uV, int max_uV);
+int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
+                                      int min_uV, int max_uV);
 int regulator_map_voltage_iterate(struct regulator_dev *rdev,
                                  int min_uV, int max_uV);
 int regulator_map_voltage_ascend(struct regulator_dev *rdev,
index 5c45c85d52ca1f1a8ad75da25976ab425d173e41..f13880e84d859817ec4a8c9fb2deccc21f347b4d 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #ifndef __FAN53555_H__
+#define __FAN53555_H__
 
 /* VSEL ID */
 enum {
index 36adbc82de6ae6c58c3a224f62e5d385ce7a35ba..999b20ce06cf349185f008388e12e64c70518196 100644 (file)
@@ -134,6 +134,7 @@ struct regulation_constraints {
        unsigned always_on:1;   /* regulator never off when system is on */
        unsigned boot_on:1;     /* bootloader/firmware enabled regulator */
        unsigned apply_uV:1;    /* apply uV constraint if min == max */
+       unsigned ramp_disable:1; /* disable ramp delay */
 };
 
 /**
index 9936763621c74f68b0224504dd34603b69d06f8a..f8a6a4844864303c21cdf692a3c8e953a80d554a 100644 (file)
@@ -39,7 +39,7 @@ enum {
  */
 struct max8660_subdev_data {
        int                             id;
-       char                            *name;
+       const char                      *name;
        struct regulator_init_data      *platform_data;
 };
 
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
new file mode 100644 (file)
index 0000000..65d550b
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __LINUX_REG_PFUZE100_H
+#define __LINUX_REG_PFUZE100_H
+
+#define PFUZE100_SW1AB         0
+#define PFUZE100_SW1C          1
+#define PFUZE100_SW2           2
+#define PFUZE100_SW3A          3
+#define PFUZE100_SW3B          4
+#define PFUZE100_SW4           5
+#define PFUZE100_SWBST         6
+#define PFUZE100_VSNVS         7
+#define PFUZE100_VREFDDR       8
+#define PFUZE100_VGEN1         9
+#define PFUZE100_VGEN2         10
+#define PFUZE100_VGEN3         11
+#define PFUZE100_VGEN4         12
+#define PFUZE100_VGEN5         13
+#define PFUZE100_VGEN6         14
+#define PFUZE100_MAX_REGULATOR 15
+
+struct regulator_init_data;
+
+struct pfuze_regulator_platform_data {
+       struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR];
+};
+
+#endif /* __LINUX_REG_PFUZE100_H */
index 7ce53ae1266bf04c0305f8b695dc240818a7d4ca..5623a7f965b7bbb07ab94a72351aec027ef4adb7 100644 (file)
@@ -1052,17 +1052,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  * @xfrm_policy_delete_security:
  *     @ctx contains the xfrm_sec_ctx.
  *     Authorize deletion of xp->security.
- * @xfrm_state_alloc_security:
+ * @xfrm_state_alloc:
  *     @x contains the xfrm_state being added to the Security Association
  *     Database by the XFRM system.
  *     @sec_ctx contains the security context information being provided by
  *     the user-level SA generation program (e.g., setkey or racoon).
- *     @secid contains the secid from which to take the mls portion of the context.
  *     Allocate a security structure to the x->security field; the security
  *     field is initialized to NULL when the xfrm_state is allocated. Set the
- *     context to correspond to either sec_ctx or polsec, with the mls portion
- *     taken from secid in the latter case.
- *     Return 0 if operation was successful (memory to allocate, legal context).
+ *     context to correspond to sec_ctx. Return 0 if operation was successful
+ *     (memory to allocate, legal context).
+ * @xfrm_state_alloc_acquire:
+ *     @x contains the xfrm_state being added to the Security Association
+ *     Database by the XFRM system.
+ *     @polsec contains the policy's security context.
+ *     @secid contains the secid from which to take the mls portion of the
+ *     context.
+ *     Allocate a security structure to the x->security field; the security
+ *     field is initialized to NULL when the xfrm_state is allocated. Set the
+ *     context to correspond to secid. Return 0 if operation was successful
+ *     (memory to allocate, legal context).
  * @xfrm_state_free_security:
  *     @x contains the xfrm_state.
  *     Deallocate x->security.
@@ -1492,7 +1500,7 @@ struct security_operations {
        int (*inode_alloc_security) (struct inode *inode);
        void (*inode_free_security) (struct inode *inode);
        int (*inode_init_security) (struct inode *inode, struct inode *dir,
-                                   const struct qstr *qstr, char **name,
+                                   const struct qstr *qstr, const char **name,
                                    void **value, size_t *len);
        int (*inode_create) (struct inode *dir,
                             struct dentry *dentry, umode_t mode);
@@ -1679,9 +1687,11 @@ struct security_operations {
        int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
        void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
        int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
-       int (*xfrm_state_alloc_security) (struct xfrm_state *x,
-               struct xfrm_user_sec_ctx *sec_ctx,
-               u32 secid);
+       int (*xfrm_state_alloc) (struct xfrm_state *x,
+                                struct xfrm_user_sec_ctx *sec_ctx);
+       int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
+                                        struct xfrm_sec_ctx *polsec,
+                                        u32 secid);
        void (*xfrm_state_free_security) (struct xfrm_state *x);
        int (*xfrm_state_delete_security) (struct xfrm_state *x);
        int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
@@ -1770,7 +1780,7 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
                                 const struct qstr *qstr,
                                 initxattrs initxattrs, void *fs_data);
 int security_old_inode_init_security(struct inode *inode, struct inode *dir,
-                                    const struct qstr *qstr, char **name,
+                                    const struct qstr *qstr, const char **name,
                                     void **value, size_t *len);
 int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
 int security_inode_link(struct dentry *old_dentry, struct inode *dir,
@@ -2094,8 +2104,8 @@ static inline int security_inode_init_security(struct inode *inode,
 static inline int security_old_inode_init_security(struct inode *inode,
                                                   struct inode *dir,
                                                   const struct qstr *qstr,
-                                                  char **name, void **value,
-                                                  size_t *len)
+                                                  const char **name,
+                                                  void **value, size_t *len)
 {
        return -EOPNOTSUPP;
 }
index d34049712a4d7cee24958816840ca1c843e0541f..dca3c08d414112cfb56dbe011db8263c6b81343f 100644 (file)
 #define SCIx_NOT_SUPPORTED     (-1)
 
 enum {
+       SCBRR_ALGO_INVALID,
+
        SCBRR_ALGO_1,           /* ((clk + 16 * bps) / (16 * bps) - 1) */
        SCBRR_ALGO_2,           /* ((clk + 16 * bps) / (32 * bps) - 1) */
        SCBRR_ALGO_3,           /* (((clk * 2) + 16 * bps) / (16 * bps) - 1) */
        SCBRR_ALGO_4,           /* (((clk * 2) + 16 * bps) / (32 * bps) - 1) */
        SCBRR_ALGO_5,           /* (((clk * 1000 / 32) / bps) - 1) */
        SCBRR_ALGO_6,           /* HSCIF variable sample rate algorithm */
+
+       SCBRR_NR_ALGOS,
 };
 
 #define SCSCR_TIE      (1 << 7)
index fc305713fc6d4e09d05ef30dd1be1ce24407cfd4..90b5e30c2f222ba8e06deb9d347940bf7d5aa94b 100644 (file)
@@ -2,23 +2,17 @@
 #define __ASM_SH_ETH_H__
 
 #include <linux/phy.h>
+#include <linux/if_ether.h>
 
 enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
-enum {
-       SH_ETH_REG_GIGABIT,
-       SH_ETH_REG_FAST_RCAR,
-       SH_ETH_REG_FAST_SH4,
-       SH_ETH_REG_FAST_SH3_SH2
-};
 
 struct sh_eth_plat_data {
        int phy;
        int edmac_endian;
-       int register_type;
        phy_interface_t phy_interface;
        void (*set_mdio_gate)(void *addr);
 
-       unsigned char mac_addr[6];
+       unsigned char mac_addr[ETH_ALEN];
        unsigned no_ether_link:1;
        unsigned ether_link_active_low:1;
        unsigned needs_init:1;
index 3b71a4e8364200d85afe94b120e09e74edf35d32..5ac96f31d546609403768ea462e88f3e2a02b8dd 100644 (file)
@@ -1805,10 +1805,13 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
  */
 static inline void skb_orphan(struct sk_buff *skb)
 {
-       if (skb->destructor)
+       if (skb->destructor) {
                skb->destructor(skb);
-       skb->destructor = NULL;
-       skb->sk         = NULL;
+               skb->destructor = NULL;
+               skb->sk         = NULL;
+       } else {
+               BUG_ON(skb->sk);
+       }
 }
 
 /**
@@ -2356,6 +2359,10 @@ extern int              skb_copy_datagram_from_iovec(struct sk_buff *skb,
                                                    const struct iovec *from,
                                                    int from_offset,
                                                    int len);
+extern int            zerocopy_sg_from_iovec(struct sk_buff *skb,
+                                             const struct iovec *frm,
+                                             int offset,
+                                             size_t count);
 extern int            skb_copy_datagram_const_iovec(const struct sk_buff *from,
                                                     int offset,
                                                     const struct iovec *to,
index 4dde70e74822be5d13198a15c0816978f3ff4b2a..eec3efd19beb0b8fb059acd97939f0efba09d7fb 100644 (file)
@@ -22,6 +22,7 @@
 #define __LINUX_SMSC911X_H__
 
 #include <linux/phy.h>
+#include <linux/if_ether.h>
 
 /* platform_device configuration data, should be assigned to
  * the platform_device's dev.platform_data */
@@ -31,7 +32,7 @@ struct smsc911x_platform_config {
        unsigned int flags;
        unsigned int shift;
        phy_interface_t phy_interface;
-       unsigned char mac[6];
+       unsigned char mac[ETH_ALEN];
 };
 
 /* Constants for platform_device irq polarity configuration */
index 230c04bda3e2da37e2c8ce387b3ef0e8c0a36118..445ef7519dc28625e7cb19dd698c1918f36b90bf 100644 (file)
@@ -313,6 +313,8 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
                                          struct iovec *iov, 
                                          int offset, 
                                          unsigned int len, __wsum *csump);
+extern unsigned long iov_pages(const struct iovec *iov, int offset,
+                              unsigned long nr_segs);
 
 extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
index 6ce690de447fe80f5967fbc7b459cce646b4706a..437ddb6c4aefbcf4d1b5abbacd6cfcd1ff5a9b3b 100644 (file)
@@ -264,12 +264,30 @@ static inline int get_uint(char **bpp, unsigned int *anint)
        return 0;
 }
 
+static inline int get_time(char **bpp, time_t *time)
+{
+       char buf[50];
+       long long ll;
+       int len = qword_get(bpp, buf, sizeof(buf));
+
+       if (len < 0)
+               return -EINVAL;
+       if (len == 0)
+               return -ENOENT;
+
+       if (kstrtoll(buf, 0, &ll))
+               return -EINVAL;
+
+       *time = (time_t)ll;
+       return 0;
+}
+
 static inline time_t get_expiry(char **bpp)
 {
-       int rv;
+       time_t rv;
        struct timespec boot;
 
-       if (get_int(bpp, &rv))
+       if (get_time(bpp, &rv))
                return 0;
        if (rv < 0)
                return 0;
index 472120b4fac57584f30998d1be87c672607eb643..d68633452d9b070ce12056e5a17dc40f1f148c4d 100644 (file)
@@ -107,7 +107,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
  * only four options will fit in a standard TCP header */
 #define TCP_NUM_SACKS 4
 
-struct tcp_cookie_values;
 struct tcp_request_sock_ops;
 
 struct tcp_request_sock {
@@ -238,6 +237,7 @@ struct tcp_sock {
 
        u32     rcv_wnd;        /* Current receiver window              */
        u32     write_seq;      /* Tail(+1) of data held in tcp send buffer */
+       u32     notsent_lowat;  /* TCP_NOTSENT_LOWAT */
        u32     pushed_seq;     /* Last pushed seq, required to talk to windows */
        u32     lost_out;       /* Lost packets                 */
        u32     sacked_out;     /* SACK'd packets                       */
index f18d64129f99982f966f3c2fe733cd85260dc625..8fbc008e183e33704ddbc0f8cc63c3cd69b5f753 100644 (file)
@@ -34,6 +34,7 @@ struct usbnet {
        struct mutex            phy_mutex;
        unsigned char           suspend_count;
        unsigned char           pkt_cnt, pkt_err;
+       unsigned short          rx_qlen, tx_qlen;
 
        /* i/o info: pipes etc */
        unsigned                in, out;
@@ -253,4 +254,6 @@ extern void usbnet_link_change(struct usbnet *, bool, bool);
 extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
 extern void usbnet_status_stop(struct usbnet *dev);
 
+extern void usbnet_update_max_qlen(struct usbnet *dev);
+
 #endif /* __LINUX_USB_USBNET_H */
index b52e44f1bd33039b6317e6702212138275db2c45..0df24bfcdb3891408556253a5602cf0d9ddb0ffd 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/types.h>
 #include <linux/bitmap.h>
+#include <linux/if_ether.h>
 
 #define i1480_FW 0x00000303
 /* #define i1480_FW 0x00000302 */
@@ -130,7 +131,7 @@ enum { UWB_DRP_BACKOFF_WIN_MAX = 16 };
  * it is also used to define headers sent down and up the wire/radio).
  */
 struct uwb_mac_addr {
-       u8 data[6];
+       u8 data[ETH_ALEN];
 } __attribute__((packed));
 
 
@@ -568,7 +569,7 @@ struct uwb_rc_evt_confirm {
 /* Device Address Management event. [WHCI] section 3.1.3.2. */
 struct uwb_rc_evt_dev_addr_mgmt {
        struct uwb_rceb rceb;
-       u8 baAddr[6];
+       u8 baAddr[ETH_ALEN];
        u8 bResultCode;
 } __attribute__((packed));
 
index ac8d488e4372d32e2136a3c673dc0e105d5320b9..24579a0312a0c45dca0216d137721e0381d8c11d 100644 (file)
@@ -90,4 +90,11 @@ extern void vfio_unregister_iommu_driver(
        TYPE tmp;                                               \
        offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); })         \
 
+/*
+ * External user API
+ */
+extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
+extern void vfio_group_put_external_user(struct vfio_group *group);
+extern int vfio_external_user_iommu_id(struct vfio_group *group);
+
 #endif /* VFIO_H */
index 7dc17e2456de141ef3241a73af02fbec9b9c1ee5..3f3788d4936292ce150a6cb4b56036a58c541128 100644 (file)
@@ -34,10 +34,12 @@ extern void vmpressure_cleanup(struct vmpressure *vmpr);
 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
 extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
 extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
-extern int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
+extern int vmpressure_register_event(struct cgroup_subsys_state *css,
+                                    struct cftype *cft,
                                     struct eventfd_ctx *eventfd,
                                     const char *args);
-extern void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
+extern void vmpressure_unregister_event(struct cgroup_subsys_state *css,
+                                       struct cftype *cft,
                                        struct eventfd_ctx *eventfd);
 #else
 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
index fdbafc6841cfd609cfd0c810ddcf4c3df7a33ed2..91b0a68d38dc2a4941c1b36a8cac270af0136422 100644 (file)
@@ -31,7 +31,7 @@ struct xattr_handler {
 };
 
 struct xattr {
-       char *name;
+       const char *name;
        void *value;
        size_t value_len;
 };
index 944757be49bbf7ef4005202d2f20e71b1fc3fc44..e4142b1ef8cdae0aa99320405d2cd21196058869 100644 (file)
  * @pll_control: PLL and oversampling control. This control allows internal
  *              PLL 1 circuit to be powered down and the oversampling to be
  *              switched off.
- * @dac_1: power on/off DAC 1.
- * @dac_2: power on/off DAC 2.
- * @dac_3: power on/off DAC 3.
- * @dac_4: power on/off DAC 4.
- * @dac_5: power on/off DAC 5.
- * @dac_6: power on/off DAC 6.
+ * @dac: array to configure power on/off DAC's 1..6
  *
  * Power mode register (Register 0x0), for more info refer REGISTER MAP ACCESS
  * section of datasheet[1], table 17 page no 30.
 struct adv7343_power_mode {
        bool sleep_mode;
        bool pll_control;
-       bool dac_1;
-       bool dac_2;
-       bool dac_3;
-       bool dac_4;
-       bool dac_5;
-       bool dac_6;
+       u32 dac[6];
 };
 
 /**
  * struct adv7343_sd_config - SD Only Output Configuration.
- * @sd_dac_out1: Configure SD DAC Output 1.
- * @sd_dac_out2: Configure SD DAC Output 2.
+ * @sd_dac_out: array configuring SD DAC Outputs 1 and 2
  */
 struct adv7343_sd_config {
        /* SD only Output Configuration */
-       bool sd_dac_out1;
-       bool sd_dac_out2;
+       u32 sd_dac_out[2];
 };
 
 /**
index 3882e0675ccfd45e718dbd8dc38bedd51af02120..3cb1704a0650d2020f3bb67cc055bc4bd695c0d0 100644 (file)
@@ -59,6 +59,8 @@ struct vpif_display_config {
        int subdev_count;
        struct vpif_display_chan_config chan_config[VPIF_DISPLAY_MAX_CHANNELS];
        const char *card_name;
+       struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
+       int *asd_sizes;         /* 0-terminated array of asd group sizes */
 };
 
 struct vpif_input {
@@ -81,5 +83,7 @@ struct vpif_capture_config {
        struct vpif_subdev_info *subdev_info;
        int subdev_count;
        const char *card_name;
+       struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
+       int *asd_sizes;         /* 0-terminated array of asd group sizes */
 };
 #endif /* _VPIF_TYPES_H */
index 168dd0b1bae236ea3859e1fb384b6cc79766a1da..78f0637ca68d91d1fc45639141bb6f9d39acd82e 100644 (file)
@@ -139,6 +139,7 @@ struct lirc_driver {
        struct lirc_buffer *rbuf;
        int (*set_use_inc) (void *data);
        void (*set_use_dec) (void *data);
+       struct rc_dev *rdev;
        const struct file_operations *fops;
        struct device *dev;
        struct module *owner;
index 06a75deff553c4a2fac3396c48b3954dda422432..2f6f1f78d958c5238e3f0a145c0c33c0a38b45f0 100644 (file)
@@ -101,6 +101,7 @@ struct rc_dev {
        bool                            idle;
        u64                             allowed_protos;
        u64                             enabled_protocols;
+       u32                             users;
        u32                             scanmask;
        void                            *priv;
        spinlock_t                      keylock;
@@ -142,6 +143,9 @@ void rc_free_device(struct rc_dev *dev);
 int rc_register_device(struct rc_dev *dev);
 void rc_unregister_device(struct rc_dev *dev);
 
+int rc_open(struct rc_dev *rdev);
+void rc_close(struct rc_dev *rdev);
+
 void rc_repeat(struct rc_dev *dev);
 void rc_keydown(struct rc_dev *dev, int scancode, u8 toggle);
 void rc_keydown_notimeout(struct rc_dev *dev, int scancode, u8 toggle);
index 4a1191abd936df57986be2f99b44cfcd1f0203ee..f7119ee3977b22541cb5faa8d0282d62b435fe2c 100644 (file)
@@ -12,6 +12,8 @@ enum tveeprom_audio_processor {
        TVEEPROM_AUDPROC_OTHER,
 };
 
+#include <linux/if_ether.h>
+
 struct tveeprom {
        u32 has_radio;
        /* If has_ir == 0, then it is unknown what the IR capabilities are,
@@ -40,7 +42,7 @@ struct tveeprom {
        u32 revision;
        u32 serial_number;
        char rev_str[5];
-       u8 MAC_address[6];
+       u8 MAC_address[ETH_ALEN];
 };
 
 void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
index c3ec6ac75f7e9837f9d686ab87f3b69f455945f7..768356917bea005886a5c897f3af9c740d07ad6e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mutex.h>
 
 struct device;
+struct device_node;
 struct v4l2_device;
 struct v4l2_subdev;
 struct v4l2_async_notifier;
@@ -22,10 +23,11 @@ struct v4l2_async_notifier;
 /* A random max subdevice number, used to allocate an array on stack */
 #define V4L2_MAX_SUBDEVS 128U
 
-enum v4l2_async_bus_type {
-       V4L2_ASYNC_BUS_CUSTOM,
-       V4L2_ASYNC_BUS_PLATFORM,
-       V4L2_ASYNC_BUS_I2C,
+enum v4l2_async_match_type {
+       V4L2_ASYNC_MATCH_CUSTOM,
+       V4L2_ASYNC_MATCH_DEVNAME,
+       V4L2_ASYNC_MATCH_I2C,
+       V4L2_ASYNC_MATCH_OF,
 };
 
 /**
@@ -36,11 +38,14 @@ enum v4l2_async_bus_type {
  *             probed, to a notifier->waiting list
  */
 struct v4l2_async_subdev {
-       enum v4l2_async_bus_type bus_type;
+       enum v4l2_async_match_type match_type;
        union {
+               struct {
+                       const struct device_node *node;
+               } of;
                struct {
                        const char *name;
-               } platform;
+               } device_name;
                struct {
                        int adapter_id;
                        unsigned short address;
@@ -56,26 +61,13 @@ struct v4l2_async_subdev {
        struct list_head list;
 };
 
-/**
- * v4l2_async_subdev_list - provided by subdevices
- * @list:      links struct v4l2_async_subdev_list objects to a global list
- *             before probing, and onto notifier->done after probing
- * @asd:       pointer to respective struct v4l2_async_subdev
- * @notifier:  pointer to managing notifier
- */
-struct v4l2_async_subdev_list {
-       struct list_head list;
-       struct v4l2_async_subdev *asd;
-       struct v4l2_async_notifier *notifier;
-};
-
 /**
  * v4l2_async_notifier - v4l2_device notifier data
  * @num_subdevs:number of subdevices
- * @subdev   array of pointers to subdevice descriptors
+ * @subdevs:   array of pointers to subdevice descriptors
  * @v4l2_dev:  pointer to struct v4l2_device
  * @waiting:   list of struct v4l2_async_subdev, waiting for their drivers
- * @done:      list of struct v4l2_async_subdev_list, already probed
+ * @done:      list of struct v4l2_subdev, already probed
  * @list:      member in a global list of notifiers
  * @bound:     a subdevice driver has successfully probed one of subdevices
  * @complete:  all subdevices have been probed successfully
@@ -83,7 +75,7 @@ struct v4l2_async_subdev_list {
  */
 struct v4l2_async_notifier {
        unsigned int num_subdevs;
-       struct v4l2_async_subdev **subdev;
+       struct v4l2_async_subdev **subdevs;
        struct v4l2_device *v4l2_dev;
        struct list_head waiting;
        struct list_head done;
index 0f4555b2a31bdfc4a499df23f3b77de6a95769a2..44542a20ab8126cdcd851a3d369f2b2b956c96ac 100644 (file)
@@ -60,6 +60,7 @@ struct v4l2_m2m_queue_ctx {
        struct list_head        rdy_queue;
        spinlock_t              rdy_spinlock;
        u8                      num_rdy;
+       bool                    buffered;
 };
 
 struct v4l2_m2m_ctx {
@@ -134,6 +135,18 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
                void *drv_priv,
                int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
 
+static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
+                                            bool buffered)
+{
+       m2m_ctx->out_q_ctx.buffered = buffered;
+}
+
+static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
+                                            bool buffered)
+{
+       m2m_ctx->cap_q_ctx.buffered = buffered;
+}
+
 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
 
 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
index 3250cc5e79259febddfb7a9472114281a8f4b1f1..bfda0fe9aeb05fc494a167cc246d9b500a508be2 100644 (file)
@@ -586,15 +586,14 @@ struct v4l2_subdev {
        struct video_device *devnode;
        /* pointer to the physical device, if any */
        struct device *dev;
-       struct v4l2_async_subdev_list asdl;
+       /* Links this subdev to a global subdev_list or @notifier->done list. */
+       struct list_head async_list;
+       /* Pointer to respective struct v4l2_async_subdev. */
+       struct v4l2_async_subdev *asd;
+       /* Pointer to the managing notifier. */
+       struct v4l2_async_notifier *notifier;
 };
 
-static inline struct v4l2_subdev *v4l2_async_to_subdev(
-                       struct v4l2_async_subdev_list *asdl)
-{
-       return container_of(asdl, struct v4l2_subdev, asdl);
-}
-
 #define media_entity_to_v4l2_subdev(ent) \
        container_of(ent, struct v4l2_subdev, entity)
 #define vdev_to_v4l2_subdev(vdev) \
index d9fa68f26c41c34c33db5f743a4142faf7886792..9a36d929711482da1f4090fb9e51c4ab9a261ffd 100644 (file)
@@ -40,8 +40,6 @@
  * @close: member function to discard a connection on this transport
  * @request: member function to issue a request to the transport
  * @cancel: member function to cancel a request (if it hasn't been sent)
- * @cancelled: member function to notify that a cancelled request will not
- *             not receive a reply
  *
  * This is the basic API for a transport module which is registered by the
  * transport module with the 9P core network module and used by the client
@@ -60,7 +58,6 @@ struct p9_trans_module {
        void (*close) (struct p9_client *);
        int (*request) (struct p9_client *, struct p9_req_t *req);
        int (*cancel) (struct p9_client *, struct p9_req_t *req);
-       int (*cancelled)(struct p9_client *, struct p9_req_t *req);
        int (*zc_request)(struct p9_client *, struct p9_req_t *,
                          char *, char *, int , int, int, int);
 };
index b8ffac7b6bab755f5adb4d648b5d34bd57eb028f..9e90fdff470d5a8b358b35c8aa3bc660728ffc05 100644 (file)
@@ -82,36 +82,36 @@ struct tc_action_ops {
        int     (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *);
 };
 
-extern struct tcf_common *tcf_hash_lookup(u32 index,
-                                         struct tcf_hashinfo *hinfo);
-extern void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-extern int tcf_hash_release(struct tcf_common *p, int bind,
-                           struct tcf_hashinfo *hinfo);
-extern int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
-                             int type, struct tc_action *a);
-extern u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo);
-extern int tcf_hash_search(struct tc_action *a, u32 index);
-extern struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
-                                        int bind, struct tcf_hashinfo *hinfo);
-extern struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
-                                         struct tc_action *a, int size,
-                                         int bind, u32 *idx_gen,
-                                         struct tcf_hashinfo *hinfo);
-extern void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo);
+void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+int tcf_hash_release(struct tcf_common *p, int bind,
+                    struct tcf_hashinfo *hinfo);
+int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
+                      int type, struct tc_action *a);
+u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo);
+int tcf_hash_search(struct tc_action *a, u32 index);
+struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
+                                 int bind, struct tcf_hashinfo *hinfo);
+struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
+                                  struct tc_action *a, int size,
+                                  int bind, u32 *idx_gen,
+                                  struct tcf_hashinfo *hinfo);
+void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
 
-extern int tcf_register_action(struct tc_action_ops *a);
-extern int tcf_unregister_action(struct tc_action_ops *a);
-extern void tcf_action_destroy(struct tc_action *a, int bind);
-extern int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res);
-extern struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
-                                        struct nlattr *est, char *n, int ovr,
-                                        int bind);
-extern struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
-                                          struct nlattr *est, char *n, int ovr,
-                                          int bind);
-extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_copy_stats (struct sk_buff *,struct tc_action *, int);
+int tcf_register_action(struct tc_action_ops *a);
+int tcf_unregister_action(struct tc_action_ops *a);
+void tcf_action_destroy(struct tc_action *a, int bind);
+int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a,
+                   struct tcf_result *res);
+struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
+                                 struct nlattr *est, char *n, int ovr,
+                                 int bind);
+struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
+                                   struct nlattr *est, char *n, int ovr,
+                                   int bind);
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
 #endif /* CONFIG_NET_CLS_ACT */
 #endif
index c7b181cb47a6a9ae4b4e30ca2d94ce74f5f9ef40..43fa31a610b81e9d7ae8677d58d34b431cd47576 100644 (file)
@@ -53,51 +53,36 @@ struct prefix_info {
 #define IN6_ADDR_HSIZE_SHIFT   4
 #define IN6_ADDR_HSIZE         (1 << IN6_ADDR_HSIZE_SHIFT)
 
-extern int                     addrconf_init(void);
-extern void                    addrconf_cleanup(void);
+int addrconf_init(void);
+void addrconf_cleanup(void);
 
-extern int                     addrconf_add_ifaddr(struct net *net,
-                                                   void __user *arg);
-extern int                     addrconf_del_ifaddr(struct net *net,
-                                                   void __user *arg);
-extern int                     addrconf_set_dstaddr(struct net *net,
-                                                    void __user *arg);
+int addrconf_add_ifaddr(struct net *net, void __user *arg);
+int addrconf_del_ifaddr(struct net *net, void __user *arg);
+int addrconf_set_dstaddr(struct net *net, void __user *arg);
 
-extern int                     ipv6_chk_addr(struct net *net,
-                                             const struct in6_addr *addr,
-                                             const struct net_device *dev,
-                                             int strict);
+int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+                 const struct net_device *dev, int strict);
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-extern int                     ipv6_chk_home_addr(struct net *net,
-                                                  const struct in6_addr *addr);
+int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
 #endif
 
-extern int                     ipv6_chk_prefix(const struct in6_addr *addr,
-                                               struct net_device *dev);
-
-extern struct inet6_ifaddr      *ipv6_get_ifaddr(struct net *net,
-                                                const struct in6_addr *addr,
-                                                struct net_device *dev,
-                                                int strict);
-
-extern int                     ipv6_dev_get_saddr(struct net *net,
-                                              const struct net_device *dev,
-                                              const struct in6_addr *daddr,
-                                              unsigned int srcprefs,
-                                              struct in6_addr *saddr);
-extern int                     __ipv6_get_lladdr(struct inet6_dev *idev,
-                                                 struct in6_addr *addr,
-                                                 unsigned char banned_flags);
-extern int                     ipv6_get_lladdr(struct net_device *dev,
-                                               struct in6_addr *addr,
-                                               unsigned char banned_flags);
-extern int                     ipv6_rcv_saddr_equal(const struct sock *sk,
-                                                   const struct sock *sk2);
-extern void                    addrconf_join_solict(struct net_device *dev,
-                                       const struct in6_addr *addr);
-extern void                    addrconf_leave_solict(struct inet6_dev *idev,
-                                       const struct in6_addr *addr);
+int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
+
+struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
+                                    const struct in6_addr *addr,
+                                    struct net_device *dev, int strict);
+
+int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
+                      const struct in6_addr *daddr, unsigned int srcprefs,
+                      struct in6_addr *saddr);
+int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+                     unsigned char banned_flags);
+int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+                   unsigned char banned_flags);
+int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2);
+void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
+void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
 
 static inline unsigned long addrconf_timeout_fixup(u32 timeout,
                                                   unsigned int unit)
@@ -124,41 +109,38 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
 /*
  *     IPv6 Address Label subsystem (addrlabel.c)
  */
-extern int                     ipv6_addr_label_init(void);
-extern void                    ipv6_addr_label_cleanup(void);
-extern void                    ipv6_addr_label_rtnl_register(void);
-extern u32                     ipv6_addr_label(struct net *net,
-                                               const struct in6_addr *addr,
-                                               int type, int ifindex);
+int ipv6_addr_label_init(void);
+void ipv6_addr_label_cleanup(void);
+void ipv6_addr_label_rtnl_register(void);
+u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
+                   int type, int ifindex);
 
 /*
  *     multicast prototypes (mcast.c)
  */
-extern int ipv6_sock_mc_join(struct sock *sk, int ifindex,
-                            const struct in6_addr *addr);
-extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
-                            const struct in6_addr *addr);
-extern void ipv6_sock_mc_close(struct sock *sk);
-extern bool inet6_mc_check(struct sock *sk,
-                          const struct in6_addr *mc_addr,
-                          const struct in6_addr *src_addr);
-
-extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
-extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
-extern int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
-extern void ipv6_mc_up(struct inet6_dev *idev);
-extern void ipv6_mc_down(struct inet6_dev *idev);
-extern void ipv6_mc_unmap(struct inet6_dev *idev);
-extern void ipv6_mc_remap(struct inet6_dev *idev);
-extern void ipv6_mc_init_dev(struct inet6_dev *idev);
-extern void ipv6_mc_destroy_dev(struct inet6_dev *idev);
-extern void addrconf_dad_failure(struct inet6_ifaddr *ifp);
-
-extern bool ipv6_chk_mcast_addr(struct net_device *dev,
-                               const struct in6_addr *group,
-                               const struct in6_addr *src_addr);
-
-extern void ipv6_mc_dad_complete(struct inet6_dev *idev);
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+                     const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+                     const struct in6_addr *addr);
+void ipv6_sock_mc_close(struct sock *sk);
+bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+                   const struct in6_addr *src_addr);
+
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
+void ipv6_mc_up(struct inet6_dev *idev);
+void ipv6_mc_down(struct inet6_dev *idev);
+void ipv6_mc_unmap(struct inet6_dev *idev);
+void ipv6_mc_remap(struct inet6_dev *idev);
+void ipv6_mc_init_dev(struct inet6_dev *idev);
+void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+void addrconf_dad_failure(struct inet6_ifaddr *ifp);
+
+bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
+                        const struct in6_addr *src_addr);
+
+void ipv6_mc_dad_complete(struct inet6_dev *idev);
 /*
  * identify MLD packets for MLD filter exceptions
  */
@@ -184,29 +166,31 @@ static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
        return false;
 }
 
-extern void addrconf_prefix_rcv(struct net_device *dev,
-                               u8 *opt, int len, bool sllao);
+void addrconf_prefix_rcv(struct net_device *dev,
+                        u8 *opt, int len, bool sllao);
 
 /*
  *     anycast prototypes (anycast.c)
  */
-extern int ipv6_sock_ac_join(struct sock *sk,int ifindex, const struct in6_addr *addr);
-extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex, const struct in6_addr *addr);
-extern void ipv6_sock_ac_close(struct sock *sk);
-
-extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
-extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
-extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+int ipv6_sock_ac_join(struct sock *sk, int ifindex,
+                     const struct in6_addr *addr);
+int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
+                     const struct in6_addr *addr);
+void ipv6_sock_ac_close(struct sock *sk);
+
+int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                                const struct in6_addr *addr);
 
 
 /* Device notifier */
-extern int register_inet6addr_notifier(struct notifier_block *nb);
-extern int unregister_inet6addr_notifier(struct notifier_block *nb);
-extern int inet6addr_notifier_call_chain(unsigned long val, void *v);
+int register_inet6addr_notifier(struct notifier_block *nb);
+int unregister_inet6addr_notifier(struct notifier_block *nb);
+int inet6addr_notifier_call_chain(unsigned long val, void *v);
 
-extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
-                                        struct ipv6_devconf *devconf);
+void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
+                                 struct ipv6_devconf *devconf);
 
 /**
  * __in6_dev_get - get inet6_dev pointer from netdevice
@@ -240,7 +224,7 @@ static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
        return idev;
 }
 
-extern void in6_dev_finish_destroy(struct inet6_dev *idev);
+void in6_dev_finish_destroy(struct inet6_dev *idev);
 
 static inline void in6_dev_put(struct inet6_dev *idev)
 {
@@ -258,7 +242,7 @@ static inline void in6_dev_hold(struct inet6_dev *idev)
        atomic_inc(&idev->refcnt);
 }
 
-extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
+void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
 
 static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
 {
@@ -340,8 +324,8 @@ static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
 }
 
 #ifdef CONFIG_PROC_FS
-extern int if6_proc_init(void);
-extern void if6_proc_exit(void);
+int if6_proc_init(void);
+void if6_proc_exit(void);
 #endif
 
 #endif
index 03e6e94536231559b95b67bd1d9f99f313556459..e797d45a5ae62697361e1ca827a7638542cc4f41 100644 (file)
@@ -31,24 +31,21 @@ enum {
 
 typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long,
                                    struct sk_buff *);
-extern void rxrpc_kernel_intercept_rx_messages(struct socket *,
-                                              rxrpc_interceptor_t);
-extern struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
-                                                 struct sockaddr_rxrpc *,
-                                                 struct key *,
-                                                 unsigned long,
-                                                 gfp_t);
-extern int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *,
-                                 size_t);
-extern void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
-extern void rxrpc_kernel_end_call(struct rxrpc_call *);
-extern bool rxrpc_kernel_is_data_last(struct sk_buff *);
-extern u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
-extern int rxrpc_kernel_get_error_number(struct sk_buff *);
-extern void rxrpc_kernel_data_delivered(struct sk_buff *);
-extern void rxrpc_kernel_free_skb(struct sk_buff *);
-extern struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *,
-                                                  unsigned long);
-extern int rxrpc_kernel_reject_call(struct socket *);
+void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t);
+struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
+                                          struct sockaddr_rxrpc *,
+                                          struct key *,
+                                          unsigned long,
+                                          gfp_t);
+int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
+void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
+void rxrpc_kernel_end_call(struct rxrpc_call *);
+bool rxrpc_kernel_is_data_last(struct sk_buff *);
+u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
+int rxrpc_kernel_get_error_number(struct sk_buff *);
+void rxrpc_kernel_data_delivered(struct sk_buff *);
+void rxrpc_kernel_free_skb(struct sk_buff *);
+struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
+int rxrpc_kernel_reject_call(struct socket *);
 
 #endif /* _NET_RXRPC_H */
index dbdfd2b0f3b3dbe0a4470d83e19052db4ba3aa04..a175ba4a7adbc615c350d8b8daaf7b082049f1ec 100644 (file)
@@ -6,12 +6,12 @@
 #include <linux/mutex.h>
 #include <net/sock.h>
 
-extern void unix_inflight(struct file *fp);
-extern void unix_notinflight(struct file *fp);
-extern void unix_gc(void);
-extern void wait_for_unix_gc(void);
-extern struct sock *unix_get_socket(struct file *filp);
-extern struct sock *unix_peer_get(struct sock *);
+void unix_inflight(struct file *fp);
+void unix_notinflight(struct file *fp);
+void unix_gc(void);
+void wait_for_unix_gc(void);
+struct sock *unix_get_socket(struct file *filp);
+struct sock *unix_peer_get(struct sock *);
 
 #define UNIX_HASH_SIZE 256
 #define UNIX_HASH_BITS 8
@@ -35,6 +35,7 @@ struct unix_skb_parms {
 #ifdef CONFIG_SECURITY_NETWORK
        u32                     secid;          /* Security ID          */
 #endif
+       u32                     consumed;
 };
 
 #define UNIXCB(skb)    (*(struct unix_skb_parms *)&((skb)->cb))
@@ -71,8 +72,8 @@ long unix_inq_len(struct sock *sk);
 long unix_outq_len(struct sock *sk);
 
 #ifdef CONFIG_SYSCTL
-extern int unix_sysctl_register(struct net *net);
-extern void unix_sysctl_unregister(struct net *net);
+int unix_sysctl_register(struct net *net);
+void unix_sysctl_unregister(struct net *net);
 #else
 static inline int unix_sysctl_register(struct net *net) { return 0; }
 static inline void unix_sysctl_unregister(struct net *net) {}
index b630dae03411ae69694e842d75108a254c0584aa..7509d9da4e3631ee9f00fba4d27922ace90cb9ae 100644 (file)
@@ -46,22 +46,22 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
        return n;
 }
 
-extern void    arp_init(void);
-extern int     arp_find(unsigned char *haddr, struct sk_buff *skb);
-extern int     arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-extern void     arp_send(int type, int ptype, __be32 dest_ip,
-                        struct net_device *dev, __be32 src_ip,
-                        const unsigned char *dest_hw,
-                        const unsigned char *src_hw, const unsigned char *th);
-extern int     arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
-extern void    arp_ifdown(struct net_device *dev);
+void arp_init(void);
+int arp_find(unsigned char *haddr, struct sk_buff *skb);
+int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
+void arp_send(int type, int ptype, __be32 dest_ip,
+             struct net_device *dev, __be32 src_ip,
+             const unsigned char *dest_hw,
+             const unsigned char *src_hw, const unsigned char *th);
+int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
+void arp_ifdown(struct net_device *dev);
 
-extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
-                                 struct net_device *dev, __be32 src_ip,
-                                 const unsigned char *dest_hw,
-                                 const unsigned char *src_hw,
-                                 const unsigned char *target_hw);
-extern void arp_xmit(struct sk_buff *skb);
+struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
+                          struct net_device *dev, __be32 src_ip,
+                          const unsigned char *dest_hw,
+                          const unsigned char *src_hw,
+                          const unsigned char *target_hw);
+void arp_xmit(struct sk_buff *skb);
 int arp_invalidate(struct net_device *dev, __be32 ip);
 
 #endif /* _ARP_H */
index 89ed9ac5701fcc3c309890bb3403eef85c97a63e..bf0396e9a5d3f3c946b30a64c030c9f5c4f17ca0 100644 (file)
@@ -195,7 +195,7 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
        atomic_inc(&ax25_rt->refcount);
 }
 
-extern void __ax25_put_route(ax25_route *ax25_rt);
+void __ax25_put_route(ax25_route *ax25_rt);
 
 static inline void ax25_put_route(ax25_route *ax25_rt)
 {
@@ -272,30 +272,31 @@ static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev
 /* af_ax25.c */
 extern struct hlist_head ax25_list;
 extern spinlock_t ax25_list_lock;
-extern void ax25_cb_add(ax25_cb *);
+void ax25_cb_add(ax25_cb *);
 struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
 struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
-extern void ax25_destroy_socket(ax25_cb *);
-extern ax25_cb * __must_check ax25_create_cb(void);
-extern void ax25_fillin_cb(ax25_cb *, ax25_dev *);
-extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
+ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+                     struct net_device *);
+void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
+void ax25_destroy_socket(ax25_cb *);
+ax25_cb * __must_check ax25_create_cb(void);
+void ax25_fillin_cb(ax25_cb *, ax25_dev *);
+struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
 
 /* ax25_addr.c */
 extern const ax25_address ax25_bcast;
 extern const ax25_address ax25_defaddr;
 extern const ax25_address null_ax25_address;
-extern char *ax2asc(char *buf, const ax25_address *);
-extern void asc2ax(ax25_address *addr, const char *callsign);
-extern int ax25cmp(const ax25_address *, const ax25_address *);
-extern int ax25digicmp(const ax25_digi *, const ax25_digi *);
-extern const unsigned char *ax25_addr_parse(const unsigned char *, int,
+char *ax2asc(char *buf, const ax25_address *);
+void asc2ax(ax25_address *addr, const char *callsign);
+int ax25cmp(const ax25_address *, const ax25_address *);
+int ax25digicmp(const ax25_digi *, const ax25_digi *);
+const unsigned char *ax25_addr_parse(const unsigned char *, int,
        ax25_address *, ax25_address *, ax25_digi *, int *, int *);
-extern int  ax25_addr_build(unsigned char *, const ax25_address *,
-       const ax25_address *, const ax25_digi *, int, int);
-extern int  ax25_addr_size(const ax25_digi *);
-extern void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+int ax25_addr_build(unsigned char *, const ax25_address *,
+                   const ax25_address *, const ax25_digi *, int, int);
+int ax25_addr_size(const ax25_digi *);
+void ax25_digi_invert(const ax25_digi *, ax25_digi *);
 
 /* ax25_dev.c */
 extern ax25_dev *ax25_dev_list;
@@ -306,33 +307,33 @@ static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
        return dev->ax25_ptr;
 }
 
-extern ax25_dev *ax25_addr_ax25dev(ax25_address *);
-extern void ax25_dev_device_up(struct net_device *);
-extern void ax25_dev_device_down(struct net_device *);
-extern int  ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
-extern struct net_device *ax25_fwd_dev(struct net_device *);
-extern void ax25_dev_free(void);
+ax25_dev *ax25_addr_ax25dev(ax25_address *);
+void ax25_dev_device_up(struct net_device *);
+void ax25_dev_device_down(struct net_device *);
+int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
+struct net_device *ax25_fwd_dev(struct net_device *);
+void ax25_dev_free(void);
 
 /* ax25_ds_in.c */
-extern int  ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
+int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
 
 /* ax25_ds_subr.c */
-extern void ax25_ds_nr_error_recovery(ax25_cb *);
-extern void ax25_ds_enquiry_response(ax25_cb *);
-extern void ax25_ds_establish_data_link(ax25_cb *);
-extern void ax25_dev_dama_off(ax25_dev *);
-extern void ax25_dama_on(ax25_cb *);
-extern void ax25_dama_off(ax25_cb *);
+void ax25_ds_nr_error_recovery(ax25_cb *);
+void ax25_ds_enquiry_response(ax25_cb *);
+void ax25_ds_establish_data_link(ax25_cb *);
+void ax25_dev_dama_off(ax25_dev *);
+void ax25_dama_on(ax25_cb *);
+void ax25_dama_off(ax25_cb *);
 
 /* ax25_ds_timer.c */
-extern void ax25_ds_setup_timer(ax25_dev *);
-extern void ax25_ds_set_timer(ax25_dev *);
-extern void ax25_ds_del_timer(ax25_dev *);
-extern void ax25_ds_timer(ax25_cb *);
-extern void ax25_ds_t1_timeout(ax25_cb *);
-extern void ax25_ds_heartbeat_expiry(ax25_cb *);
-extern void ax25_ds_t3timer_expiry(ax25_cb *);
-extern void ax25_ds_idletimer_expiry(ax25_cb *);
+void ax25_ds_setup_timer(ax25_dev *);
+void ax25_ds_set_timer(ax25_dev *);
+void ax25_ds_del_timer(ax25_dev *);
+void ax25_ds_timer(ax25_cb *);
+void ax25_ds_t1_timeout(ax25_cb *);
+void ax25_ds_heartbeat_expiry(ax25_cb *);
+void ax25_ds_t3timer_expiry(ax25_cb *);
+void ax25_ds_idletimer_expiry(ax25_cb *);
 
 /* ax25_iface.c */
 
@@ -342,107 +343,109 @@ struct ax25_protocol {
        int (*func)(struct sk_buff *, ax25_cb *);
 };
 
-extern void ax25_register_pid(struct ax25_protocol *ap);
-extern void ax25_protocol_release(unsigned int);
+void ax25_register_pid(struct ax25_protocol *ap);
+void ax25_protocol_release(unsigned int);
 
 struct ax25_linkfail {
        struct hlist_node lf_node;
        void (*func)(ax25_cb *, int);
 };
 
-extern void ax25_linkfail_register(struct ax25_linkfail *lf);
-extern void ax25_linkfail_release(struct ax25_linkfail *lf);
-extern int __must_check ax25_listen_register(ax25_address *,
-       struct net_device *);
-extern void ax25_listen_release(ax25_address *, struct net_device *);
-extern int  (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-extern int  ax25_listen_mine(ax25_address *, struct net_device *);
-extern void ax25_link_failed(ax25_cb *, int);
-extern int  ax25_protocol_is_registered(unsigned int);
+void ax25_linkfail_register(struct ax25_linkfail *lf);
+void ax25_linkfail_release(struct ax25_linkfail *lf);
+int __must_check ax25_listen_register(ax25_address *, struct net_device *);
+void ax25_listen_release(ax25_address *, struct net_device *);
+int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
+int ax25_listen_mine(ax25_address *, struct net_device *);
+void ax25_link_failed(ax25_cb *, int);
+int ax25_protocol_is_registered(unsigned int);
 
 /* ax25_in.c */
-extern int  ax25_rx_iframe(ax25_cb *, struct sk_buff *);
-extern int  ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
+int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
+int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
+                 struct net_device *);
 
 /* ax25_ip.c */
-extern int ax25_hard_header(struct sk_buff *, struct net_device *,
-                           unsigned short, const void *,
-                           const void *, unsigned int);
-extern int  ax25_rebuild_header(struct sk_buff *);
+int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short,
+                    const void *, const void *, unsigned int);
+int ax25_rebuild_header(struct sk_buff *);
 extern const struct header_ops ax25_header_ops;
 
 /* ax25_out.c */
-extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-extern void ax25_output(ax25_cb *, int, struct sk_buff *);
-extern void ax25_kick(ax25_cb *);
-extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
-extern void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int  ax25_check_iframes_acked(ax25_cb *, unsigned short);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
+                        ax25_digi *, struct net_device *);
+void ax25_output(ax25_cb *, int, struct sk_buff *);
+void ax25_kick(ax25_cb *);
+void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
+int ax25_check_iframes_acked(ax25_cb *, unsigned short);
 
 /* ax25_route.c */
-extern void ax25_rt_device_down(struct net_device *);
-extern int  ax25_rt_ioctl(unsigned int, void __user *);
+void ax25_rt_device_down(struct net_device *);
+int ax25_rt_ioctl(unsigned int, void __user *);
 extern const struct file_operations ax25_route_fops;
-extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
-extern int  ax25_rt_autobind(ax25_cb *, ax25_address *);
-extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
-extern void ax25_rt_free(void);
+ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
+int ax25_rt_autobind(ax25_cb *, ax25_address *);
+struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *,
+                                  ax25_address *, ax25_digi *);
+void ax25_rt_free(void);
 
 /* ax25_std_in.c */
-extern int  ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
+int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
 
 /* ax25_std_subr.c */
-extern void ax25_std_nr_error_recovery(ax25_cb *);
-extern void ax25_std_establish_data_link(ax25_cb *);
-extern void ax25_std_transmit_enquiry(ax25_cb *);
-extern void ax25_std_enquiry_response(ax25_cb *);
-extern void ax25_std_timeout_response(ax25_cb *);
+void ax25_std_nr_error_recovery(ax25_cb *);
+void ax25_std_establish_data_link(ax25_cb *);
+void ax25_std_transmit_enquiry(ax25_cb *);
+void ax25_std_enquiry_response(ax25_cb *);
+void ax25_std_timeout_response(ax25_cb *);
 
 /* ax25_std_timer.c */
-extern void ax25_std_heartbeat_expiry(ax25_cb *);
-extern void ax25_std_t1timer_expiry(ax25_cb *);
-extern void ax25_std_t2timer_expiry(ax25_cb *);
-extern void ax25_std_t3timer_expiry(ax25_cb *);
-extern void ax25_std_idletimer_expiry(ax25_cb *);
+void ax25_std_heartbeat_expiry(ax25_cb *);
+void ax25_std_t1timer_expiry(ax25_cb *);
+void ax25_std_t2timer_expiry(ax25_cb *);
+void ax25_std_t3timer_expiry(ax25_cb *);
+void ax25_std_idletimer_expiry(ax25_cb *);
 
 /* ax25_subr.c */
-extern void ax25_clear_queues(ax25_cb *);
-extern void ax25_frames_acked(ax25_cb *, unsigned short);
-extern void ax25_requeue_frames(ax25_cb *);
-extern int  ax25_validate_nr(ax25_cb *, unsigned short);
-extern int  ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
-extern void ax25_send_control(ax25_cb *, int, int, int);
-extern void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, ax25_digi *);
-extern void ax25_calculate_t1(ax25_cb *);
-extern void ax25_calculate_rtt(ax25_cb *);
-extern void ax25_disconnect(ax25_cb *, int);
+void ax25_clear_queues(ax25_cb *);
+void ax25_frames_acked(ax25_cb *, unsigned short);
+void ax25_requeue_frames(ax25_cb *);
+int ax25_validate_nr(ax25_cb *, unsigned short);
+int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+void ax25_send_control(ax25_cb *, int, int, int);
+void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *,
+                   ax25_digi *);
+void ax25_calculate_t1(ax25_cb *);
+void ax25_calculate_rtt(ax25_cb *);
+void ax25_disconnect(ax25_cb *, int);
 
 /* ax25_timer.c */
-extern void ax25_setup_timers(ax25_cb *);
-extern void ax25_start_heartbeat(ax25_cb *);
-extern void ax25_start_t1timer(ax25_cb *);
-extern void ax25_start_t2timer(ax25_cb *);
-extern void ax25_start_t3timer(ax25_cb *);
-extern void ax25_start_idletimer(ax25_cb *);
-extern void ax25_stop_heartbeat(ax25_cb *);
-extern void ax25_stop_t1timer(ax25_cb *);
-extern void ax25_stop_t2timer(ax25_cb *);
-extern void ax25_stop_t3timer(ax25_cb *);
-extern void ax25_stop_idletimer(ax25_cb *);
-extern int  ax25_t1timer_running(ax25_cb *);
-extern unsigned long ax25_display_timer(struct timer_list *);
+void ax25_setup_timers(ax25_cb *);
+void ax25_start_heartbeat(ax25_cb *);
+void ax25_start_t1timer(ax25_cb *);
+void ax25_start_t2timer(ax25_cb *);
+void ax25_start_t3timer(ax25_cb *);
+void ax25_start_idletimer(ax25_cb *);
+void ax25_stop_heartbeat(ax25_cb *);
+void ax25_stop_t1timer(ax25_cb *);
+void ax25_stop_t2timer(ax25_cb *);
+void ax25_stop_t3timer(ax25_cb *);
+void ax25_stop_idletimer(ax25_cb *);
+int ax25_t1timer_running(ax25_cb *);
+unsigned long ax25_display_timer(struct timer_list *);
 
 /* ax25_uid.c */
 extern int  ax25_uid_policy;
-extern ax25_uid_assoc *ax25_findbyuid(kuid_t);
-extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
+ax25_uid_assoc *ax25_findbyuid(kuid_t);
+int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
 extern const struct file_operations ax25_uid_fops;
-extern void ax25_uid_free(void);
+void ax25_uid_free(void);
 
 /* sysctl_net_ax25.c */
 #ifdef CONFIG_SYSCTL
-extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
-extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
+int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
+void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
 #else
 static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; }
 static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {}
index 10eb9b389014259617de84924a297634fe9b61d4..10d43d8c7037ac58bea44788af5c7086dbf40f90 100644 (file)
@@ -107,6 +107,14 @@ struct bt_power {
  */
 #define BT_CHANNEL_POLICY_AMP_PREFERRED                2
 
+#define BT_VOICE               11
+struct bt_voice {
+       __u16 setting;
+};
+
+#define BT_VOICE_TRANSPARENT                   0x0003
+#define BT_VOICE_CVSD_16BIT                    0x0060
+
 __printf(1, 2)
 int bt_info(const char *fmt, ...);
 __printf(1, 2)
index 3c592cf473dae89f5fb529bcfdeb445612604622..aaeaf0938ec0af1181c4660c8fa0468937e1bc74 100644 (file)
@@ -238,6 +238,7 @@ enum {
 #define LMP_CVSD       0x01
 #define LMP_PSCHEME    0x02
 #define LMP_PCONTROL   0x04
+#define LMP_TRANSPARENT        0x08
 
 #define LMP_RSSI_INQ   0x40
 #define LMP_ESCO       0x80
@@ -296,6 +297,12 @@ enum {
 #define HCI_AT_GENERAL_BONDING         0x04
 #define HCI_AT_GENERAL_BONDING_MITM    0x05
 
+/* I/O capabilities */
+#define HCI_IO_DISPLAY_ONLY    0x00
+#define HCI_IO_DISPLAY_YESNO   0x01
+#define HCI_IO_KEYBOARD_ONLY   0x02
+#define HCI_IO_NO_INPUT_OUTPUT 0x03
+
 /* Link Key types */
 #define HCI_LK_COMBINATION             0x00
 #define HCI_LK_LOCAL_UNIT              0x01
index f77885ea78c284dd724a63b131e31346ad4308f7..3ede820d328f9798a12e43ef77db50d0a572d29f 100644 (file)
@@ -320,6 +320,7 @@ struct hci_conn {
        __u32           passkey_notify;
        __u8            passkey_entered;
        __u16           disc_timeout;
+       __u16           setting;
        unsigned long   flags;
 
        __u8            remote_cap;
@@ -569,7 +570,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
 }
 
 void hci_disconnect(struct hci_conn *conn, __u8 reason);
-void hci_setup_sync(struct hci_conn *conn, __u16 handle);
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
 
 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
@@ -584,6 +585,8 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
                             __u8 dst_type, __u8 sec_level, __u8 auth_type);
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+                                __u16 setting);
 int hci_conn_check_link_mode(struct hci_conn *conn);
 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
@@ -797,6 +800,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 #define lmp_lsto_capable(dev)      ((dev)->features[0][7] & LMP_LSTO)
 #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
 #define lmp_ext_feat_capable(dev)  ((dev)->features[0][7] & LMP_EXTFEATURES)
+#define lmp_transp_capable(dev)    ((dev)->features[0][2] & LMP_TRANSPARENT)
 
 /* ----- Extended LMP capabilities ----- */
 #define lmp_host_ssp_capable(dev)  ((dev)->features[1][0] & LMP_HOST_SSP)
@@ -1213,4 +1217,8 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
 
 u8 bdaddr_to_le(u8 bdaddr_type);
 
+#define SCO_AIRMODE_MASK       0x0003
+#define SCO_AIRMODE_CVSD       0x0000
+#define SCO_AIRMODE_TRANSP     0x0003
+
 #endif /* __HCI_CORE_H */
index 1e35c43657c85c71560f2010242c4105701b6287..e252a31ee6b6389f54d1a2483a5f143d6032d57f 100644 (file)
@@ -73,6 +73,7 @@ struct sco_conn {
 struct sco_pinfo {
        struct bt_sock  bt;
        __u32           flags;
+       __u16           setting;
        struct sco_conn *conn;
 };
 
index 7b0730aeb892621abc6419ec9c345b141e184b71..afef53470d2a1f6692d600e2d6a2bc5abf53e02e 100644 (file)
@@ -460,6 +460,33 @@ ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
        return 0;
 }
 
+/**
+ * ieee80211_chandef_max_power - maximum transmission power for the chandef
+ *
+ * In some regulations, the transmit power may depend on the configured channel
+ * bandwidth which may be defined as dBm/MHz. This function returns the actual
+ * max_power for non-standard (20 MHz) channels.
+ *
+ * @chandef: channel definition for the channel
+ *
+ * Returns: maximum allowed transmission power in dBm for the chandef
+ */
+static inline int
+ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
+{
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_5:
+               return min(chandef->chan->max_reg_power - 6,
+                          chandef->chan->max_power);
+       case NL80211_CHAN_WIDTH_10:
+               return min(chandef->chan->max_reg_power - 3,
+                          chandef->chan->max_power);
+       default:
+               break;
+       }
+       return chandef->chan->max_power;
+}
+
 /**
  * enum survey_info_flags - survey information flags
  *
@@ -490,7 +517,7 @@ enum survey_info_flags {
  * @channel: the channel this survey record reports, mandatory
  * @filled: bitflag of flags from &enum survey_info_flags
  * @noise: channel noise in dBm. This and all following fields are
- *     optional
+ *     optional
  * @channel_time: amount of time in ms the radio spent on the channel
  * @channel_time_busy: amount of time the primary channel was sensed busy
  * @channel_time_ext_busy: amount of time the extension channel was sensed busy
@@ -546,9 +573,9 @@ struct cfg80211_crypto_settings {
 /**
  * struct cfg80211_beacon_data - beacon data
  * @head: head portion of beacon (before TIM IE)
- *     or %NULL if not changed
+ *     or %NULL if not changed
  * @tail: tail portion of beacon (after TIM IE)
- *     or %NULL if not changed
+ *     or %NULL if not changed
  * @head_len: length of @head
  * @tail_len: length of @tail
  * @beacon_ies: extra information element(s) to add into Beacon frames or %NULL
@@ -638,6 +665,30 @@ struct cfg80211_ap_settings {
        bool radar_required;
 };
 
+/**
+ * struct cfg80211_csa_settings - channel switch settings
+ *
+ * Used for channel switch
+ *
+ * @chandef: defines the channel to use after the switch
+ * @beacon_csa: beacon data while performing the switch
+ * @counter_offset_beacon: offset for the counter within the beacon (tail)
+ * @counter_offset_presp: offset for the counter within the probe response
+ * @beacon_after: beacon data to be used on the new channel
+ * @radar_required: whether radar detection is required on the new channel
+ * @block_tx: whether transmissions should be blocked while changing
+ * @count: number of beacons until switch
+ */
+struct cfg80211_csa_settings {
+       struct cfg80211_chan_def chandef;
+       struct cfg80211_beacon_data beacon_csa;
+       u16 counter_offset_beacon, counter_offset_presp;
+       struct cfg80211_beacon_data beacon_after;
+       bool radar_required;
+       bool block_tx;
+       u8 count;
+};
+
 /**
  * enum station_parameters_apply_mask - station parameter values to apply
  * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
@@ -764,7 +815,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
  * @STATION_INFO_PLINK_STATE: @plink_state filled
  * @STATION_INFO_SIGNAL: @signal filled
  * @STATION_INFO_TX_BITRATE: @txrate fields are filled
- *  (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
+ *     (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
  * @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value
  * @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value
  * @STATION_INFO_TX_RETRIES: @tx_retries filled
@@ -1285,6 +1336,7 @@ struct cfg80211_ssid {
  * @n_ssids: number of SSIDs
  * @channels: channels to scan on.
  * @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
  * @ie: optional information element(s) to add into Probe Request or %NULL
  * @ie_len: length of ie in octets
  * @flags: bit field of flags controlling operation
@@ -1300,6 +1352,7 @@ struct cfg80211_scan_request {
        struct cfg80211_ssid *ssids;
        int n_ssids;
        u32 n_channels;
+       enum nl80211_bss_scan_width scan_width;
        const u8 *ie;
        size_t ie_len;
        u32 flags;
@@ -1333,6 +1386,7 @@ struct cfg80211_match_set {
  * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
  * @n_ssids: number of SSIDs
  * @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
  * @interval: interval between each scheduled scan cycle
  * @ie: optional information element(s) to add into Probe Request or %NULL
  * @ie_len: length of ie in octets
@@ -1352,6 +1406,7 @@ struct cfg80211_sched_scan_request {
        struct cfg80211_ssid *ssids;
        int n_ssids;
        u32 n_channels;
+       enum nl80211_bss_scan_width scan_width;
        u32 interval;
        const u8 *ie;
        size_t ie_len;
@@ -1403,6 +1458,7 @@ struct cfg80211_bss_ies {
  * for use in scan results and similar.
  *
  * @channel: channel this BSS is on
+ * @scan_width: width of the control channel
  * @bssid: BSSID of the BSS
  * @beacon_interval: the beacon interval as from the frame
  * @capability: the capability field in host byte order
@@ -1424,6 +1480,7 @@ struct cfg80211_bss_ies {
  */
 struct cfg80211_bss {
        struct ieee80211_channel *channel;
+       enum nl80211_bss_scan_width scan_width;
 
        const struct cfg80211_bss_ies __rcu *ies;
        const struct cfg80211_bss_ies __rcu *beacon_ies;
@@ -1509,7 +1566,7 @@ enum cfg80211_assoc_req_flags {
  * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
  * @flags:  See &enum cfg80211_assoc_req_flags
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
- *   will be used in ht_capa.  Un-supported values will be ignored.
+ *     will be used in ht_capa.  Un-supported values will be ignored.
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
  * @vht_capa: VHT capability override
  * @vht_capa_mask: VHT capability mask indicating which fields to use
@@ -1592,6 +1649,9 @@ struct cfg80211_disassoc_request {
  *     user space. Otherwise, port is marked authorized by default.
  * @basic_rates: bitmap of basic rates to use when creating the IBSS
  * @mcast_rate: per-band multicast rate index + 1 (0: disabled)
+ * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
+ *     will be used in ht_capa.  Un-supported values will be ignored.
+ * @ht_capa_mask:  The bits of ht_capa which are to be used.
  */
 struct cfg80211_ibss_params {
        u8 *ssid;
@@ -1605,6 +1665,8 @@ struct cfg80211_ibss_params {
        bool privacy;
        bool control_port;
        int mcast_rate[IEEE80211_NUM_BANDS];
+       struct ieee80211_ht_cap ht_capa;
+       struct ieee80211_ht_cap ht_capa_mask;
 };
 
 /**
@@ -1630,9 +1692,9 @@ struct cfg80211_ibss_params {
  * @key: WEP key for shared key authentication
  * @flags:  See &enum cfg80211_assoc_req_flags
  * @bg_scan_period:  Background scan period in seconds
- *   or -1 to indicate that default value is to be used.
+ *     or -1 to indicate that default value is to be used.
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
- *   will be used in ht_capa.  Un-supported values will be ignored.
+ *     will be used in ht_capa.  Un-supported values will be ignored.
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
  * @vht_capa:  VHT Capability overrides
  * @vht_capa_mask: The bits of vht_capa which are to be used.
@@ -1698,7 +1760,7 @@ struct cfg80211_pmksa {
 };
 
 /**
- * struct cfg80211_wowlan_trig_pkt_pattern - packet pattern
+ * struct cfg80211_pkt_pattern - packet pattern
  * @mask: bitmask where to match pattern and where to ignore bytes,
  *     one bit per byte, in same format as nl80211
  * @pattern: bytes to match where bitmask is 1
@@ -1708,7 +1770,7 @@ struct cfg80211_pmksa {
  * Internal note: @mask and @pattern are allocated in one chunk of
  * memory, free @mask only!
  */
-struct cfg80211_wowlan_trig_pkt_pattern {
+struct cfg80211_pkt_pattern {
        u8 *mask, *pattern;
        int pattern_len;
        int pkt_offset;
@@ -1770,11 +1832,40 @@ struct cfg80211_wowlan {
        bool any, disconnect, magic_pkt, gtk_rekey_failure,
             eap_identity_req, four_way_handshake,
             rfkill_release;
-       struct cfg80211_wowlan_trig_pkt_pattern *patterns;
+       struct cfg80211_pkt_pattern *patterns;
        struct cfg80211_wowlan_tcp *tcp;
        int n_patterns;
 };
 
+/**
+ * struct cfg80211_coalesce_rules - Coalesce rule parameters
+ *
+ * This structure defines coalesce rule for the device.
+ * @delay: maximum coalescing delay in msecs.
+ * @condition: condition for packet coalescence.
+ *     see &enum nl80211_coalesce_condition.
+ * @patterns: array of packet patterns
+ * @n_patterns: number of patterns
+ */
+struct cfg80211_coalesce_rules {
+       int delay;
+       enum nl80211_coalesce_condition condition;
+       struct cfg80211_pkt_pattern *patterns;
+       int n_patterns;
+};
+
+/**
+ * struct cfg80211_coalesce - Packet coalescing settings
+ *
+ * This structure defines coalescing settings.
+ * @rules: array of coalesce rules
+ * @n_rules: number of rules
+ */
+struct cfg80211_coalesce {
+       struct cfg80211_coalesce_rules *rules;
+       int n_rules;
+};
+
 /**
  * struct cfg80211_wowlan_wakeup - wakeup report
  * @disconnect: woke up by getting disconnected
@@ -1990,7 +2081,7 @@ struct cfg80211_update_ft_ies_params {
  * @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management
  *     frame on another channel
  *
- * @testmode_cmd: run a test mode command
+ * @testmode_cmd: run a test mode command; @wdev may be %NULL
  * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be
  *     used by the function, but 0 and 1 must not be touched. Additionally,
  *     return error codes other than -ENOBUFS and -ENOENT will terminate the
@@ -2071,6 +2162,9 @@ struct cfg80211_update_ft_ies_params {
  *     driver can take the most appropriate actions.
  * @crit_proto_stop: Indicates critical protocol no longer needs increased link
  *     reliability. This operation can not fail.
+ * @set_coalesce: Set coalesce parameters.
+ *
+ * @channel_switch: initiate channel-switch procedure (with CSA)
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2196,7 +2290,8 @@ struct cfg80211_ops {
        void    (*rfkill_poll)(struct wiphy *wiphy);
 
 #ifdef CONFIG_NL80211_TESTMODE
-       int     (*testmode_cmd)(struct wiphy *wiphy, void *data, int len);
+       int     (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev,
+                               void *data, int len);
        int     (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb,
                                 struct netlink_callback *cb,
                                 void *data, int len);
@@ -2306,6 +2401,12 @@ struct cfg80211_ops {
                                    u16 duration);
        void    (*crit_proto_stop)(struct wiphy *wiphy,
                                   struct wireless_dev *wdev);
+       int     (*set_coalesce)(struct wiphy *wiphy,
+                               struct cfg80211_coalesce *coalesce);
+
+       int     (*channel_switch)(struct wiphy *wiphy,
+                                 struct net_device *dev,
+                                 struct cfg80211_csa_settings *params);
 };
 
 /*
@@ -2371,6 +2472,8 @@ struct cfg80211_ops {
  * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
  * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
  * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
+ * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
+ *     beaconing mode (AP, IBSS, Mesh, ...).
  */
 enum wiphy_flags {
        WIPHY_FLAG_CUSTOM_REGULATORY            = BIT(0),
@@ -2395,6 +2498,7 @@ enum wiphy_flags {
        WIPHY_FLAG_OFFCHAN_TX                   = BIT(20),
        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL        = BIT(21),
        WIPHY_FLAG_SUPPORTS_5_10_MHZ            = BIT(22),
+       WIPHY_FLAG_HAS_CHANNEL_SWITCH           = BIT(23),
 };
 
 /**
@@ -2531,6 +2635,25 @@ struct wiphy_wowlan_support {
        const struct wiphy_wowlan_tcp_support *tcp;
 };
 
+/**
+ * struct wiphy_coalesce_support - coalesce support data
+ * @n_rules: maximum number of coalesce rules
+ * @max_delay: maximum supported coalescing delay in msecs
+ * @n_patterns: number of supported patterns in a rule
+ *     (see nl80211.h for the pattern definition)
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
+ */
+struct wiphy_coalesce_support {
+       int n_rules;
+       int max_delay;
+       int n_patterns;
+       int pattern_max_len;
+       int pattern_min_len;
+       int max_pkt_offset;
+};
+
 /**
  * struct wiphy - wireless hardware description
  * @reg_notifier: the driver's regulatory notification callback,
@@ -2641,6 +2764,7 @@ struct wiphy_wowlan_support {
  *     802.11-2012 8.4.2.29 for the defined fields.
  * @extended_capabilities_mask: mask of the valid values
  * @extended_capabilities_len: length of the extended capabilities
+ * @coalesce: packet coalescing support information
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
@@ -2750,6 +2874,8 @@ struct wiphy {
        const struct iw_handler_def *wext;
 #endif
 
+       const struct wiphy_coalesce_support *coalesce;
+
        char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -2841,7 +2967,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv);
  *
  * Return: A non-negative wiphy index or a negative error code.
  */
-extern int wiphy_register(struct wiphy *wiphy);
+int wiphy_register(struct wiphy *wiphy);
 
 /**
  * wiphy_unregister - deregister a wiphy from cfg80211
@@ -2852,14 +2978,14 @@ extern int wiphy_register(struct wiphy *wiphy);
  * pointer, but the call may sleep to wait for an outstanding
  * request that is being handled.
  */
-extern void wiphy_unregister(struct wiphy *wiphy);
+void wiphy_unregister(struct wiphy *wiphy);
 
 /**
  * wiphy_free - free wiphy
  *
  * @wiphy: The wiphy to free
  */
-extern void wiphy_free(struct wiphy *wiphy);
+void wiphy_free(struct wiphy *wiphy);
 
 /* internal structs */
 struct cfg80211_conn;
@@ -3014,14 +3140,14 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
  * @band: band, necessary due to channel number overlap
  * Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
  */
-extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
+int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
 
 /**
  * ieee80211_frequency_to_channel - convert frequency to channel number
  * @freq: center frequency
  * Return: The corresponding channel, or 0 if the conversion failed.
  */
-extern int ieee80211_frequency_to_channel(int freq);
+int ieee80211_frequency_to_channel(int freq);
 
 /*
  * Name indirection necessary because the ieee80211 code also has
@@ -3030,8 +3156,8 @@ extern int ieee80211_frequency_to_channel(int freq);
  * to include both header files you'll (rightfully!) get a symbol
  * clash.
  */
-extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
-                                                        int freq);
+struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
+                                                 int freq);
 /**
  * ieee80211_get_channel - get channel struct from wiphy for specified frequency
  * @wiphy: the struct wiphy to get the channel for
@@ -3063,11 +3189,13 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
 /**
  * ieee80211_mandatory_rates - get mandatory rates for a given band
  * @sband: the band to look for rates in
+ * @scan_width: width of the control channel
  *
  * This function returns a bitmap of the mandatory rates for the given
  * band, bits are set according to the rate position in the bitrates array.
  */
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+                             enum nl80211_bss_scan_width scan_width);
 
 /*
  * Radiotap parsing functions -- for controlled injection support
@@ -3141,13 +3269,14 @@ struct ieee80211_radiotap_iterator {
        int _reset_on_ext;
 };
 
-extern int ieee80211_radiotap_iterator_init(
-       struct ieee80211_radiotap_iterator *iterator,
-       struct ieee80211_radiotap_header *radiotap_header,
-       int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns);
+int
+ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator,
+                                struct ieee80211_radiotap_header *radiotap_header,
+                                int max_length,
+                                const struct ieee80211_radiotap_vendor_namespaces *vns);
 
-extern int ieee80211_radiotap_iterator_next(
-       struct ieee80211_radiotap_iterator *iterator);
+int
+ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator);
 
 
 extern const unsigned char rfc1042_header[6];
@@ -3307,7 +3436,7 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
  *
  * Return: 0 on success. -ENOMEM.
  */
-extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
+int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
 
 /**
  * wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
@@ -3321,9 +3450,8 @@ extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
  * default channel settings will be disregarded. If no rule is found for a
  * channel on the regulatory domain the channel will be disabled.
  */
-extern void wiphy_apply_custom_regulatory(
-       struct wiphy *wiphy,
-       const struct ieee80211_regdomain *regd);
+void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
+                                  const struct ieee80211_regdomain *regd);
 
 /**
  * freq_reg_info - get regulatory information for the given frequency
@@ -3379,10 +3507,11 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
 void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
 
 /**
- * cfg80211_inform_bss_frame - inform cfg80211 of a received BSS frame
+ * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
  *
  * @wiphy: the wiphy reporting the BSS
  * @channel: The channel the frame was received on
+ * @scan_width: width of the control channel
  * @mgmt: the management frame (probe response or beacon)
  * @len: length of the management frame
  * @signal: the signal strength, type depends on the wiphy's signal_type
@@ -3395,16 +3524,29 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
  * Or %NULL on error.
  */
 struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+                               struct ieee80211_channel *channel,
+                               enum nl80211_bss_scan_width scan_width,
+                               struct ieee80211_mgmt *mgmt, size_t len,
+                               s32 signal, gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss_frame(struct wiphy *wiphy,
                          struct ieee80211_channel *channel,
                          struct ieee80211_mgmt *mgmt, size_t len,
-                         s32 signal, gfp_t gfp);
+                         s32 signal, gfp_t gfp)
+{
+       return cfg80211_inform_bss_width_frame(wiphy, channel,
+                                              NL80211_BSS_CHAN_WIDTH_20,
+                                              mgmt, len, signal, gfp);
+}
 
 /**
  * cfg80211_inform_bss - inform cfg80211 of a new BSS
  *
  * @wiphy: the wiphy reporting the BSS
  * @channel: The channel the frame was received on
+ * @scan_width: width of the control channel
  * @bssid: the BSSID of the BSS
  * @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
  * @capability: the capability field sent by the peer
@@ -3421,11 +3563,26 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
  * Or %NULL on error.
  */
 struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+                         struct ieee80211_channel *channel,
+                         enum nl80211_bss_scan_width scan_width,
+                         const u8 *bssid, u64 tsf, u16 capability,
+                         u16 beacon_interval, const u8 *ie, size_t ielen,
+                         s32 signal, gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss(struct wiphy *wiphy,
                    struct ieee80211_channel *channel,
                    const u8 *bssid, u64 tsf, u16 capability,
                    u16 beacon_interval, const u8 *ie, size_t ielen,
-                   s32 signal, gfp_t gfp);
+                   s32 signal, gfp_t gfp)
+{
+       return cfg80211_inform_bss_width(wiphy, channel,
+                                        NL80211_BSS_CHAN_WIDTH_20,
+                                        bssid, tsf, capability,
+                                        beacon_interval, ie, ielen, signal,
+                                        gfp);
+}
 
 struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                                      struct ieee80211_channel *channel,
@@ -3471,6 +3628,19 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
  */
 void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
 
+static inline enum nl80211_bss_scan_width
+cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef)
+{
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_5:
+               return NL80211_BSS_CHAN_WIDTH_5;
+       case NL80211_CHAN_WIDTH_10:
+               return NL80211_BSS_CHAN_WIDTH_10;
+       default:
+               return NL80211_BSS_CHAN_WIDTH_20;
+       }
+}
+
 /**
  * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
  * @dev: network device
index 600d1d705bb86f23b00a8d0feebabe6ea303934d..8f59ca50477c6424c6a21d8a5f5ca60af62c7d32 100644 (file)
@@ -107,11 +107,11 @@ static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
 }
 
 struct sk_buff;
-extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-                                    __be32 from, __be32 to, int pseudohdr);
-extern void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
-                                     const __be32 *from, const __be32 *to,
-                                     int pseudohdr);
+void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
+                             __be32 from, __be32 to, int pseudohdr);
+void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+                              const __be32 *from, const __be32 *to,
+                              int pseudohdr);
 
 static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
                                            __be16 from, __be16 to,
index 0fee0617fb7d243728583ee351de284f36aca14a..33d03b648646ce04114c73c65853a0b19b548fa4 100644 (file)
@@ -24,7 +24,7 @@ struct cgroup_cls_state
        u32 classid;
 };
 
-extern void sock_update_classid(struct sock *sk);
+void sock_update_classid(struct sock *sk);
 
 #if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
 static inline u32 task_cls_classid(struct task_struct *p)
@@ -35,7 +35,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
                return 0;
 
        rcu_read_lock();
-       classid = container_of(task_subsys_state(p, net_cls_subsys_id),
+       classid = container_of(task_css(p, net_cls_subsys_id),
                               struct cgroup_cls_state, css)->classid;
        rcu_read_unlock();
 
@@ -51,7 +51,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
                return 0;
 
        rcu_read_lock();
-       css = task_subsys_state(p, net_cls_subsys_id);
+       css = task_css(p, net_cls_subsys_id);
        if (css)
                classid = container_of(css,
                                       struct cgroup_cls_state, css)->classid;
index e361f4882426d26fa8e21dd20e9a6a285c578546..4b2b557fb0e8c2d709e1733a13e4ca3dbc02ff0e 100644 (file)
 
 struct fib_rule {
        struct list_head        list;
-       atomic_t                refcnt;
        int                     iifindex;
        int                     oifindex;
        u32                     mark;
        u32                     mark_mask;
-       u32                     pref;
        u32                     flags;
        u32                     table;
        u8                      action;
+       /* 3 bytes hole, try to use */
        u32                     target;
        struct fib_rule __rcu   *ctarget;
+       struct net              *fr_net;
+
+       atomic_t                refcnt;
+       u32                     pref;
+       int                     suppress_ifgroup;
+       int                     suppress_prefixlen;
        char                    iifname[IFNAMSIZ];
        char                    oifname[IFNAMSIZ];
        struct rcu_head         rcu;
-       struct net *            fr_net;
 };
 
 struct fib_lookup_arg {
@@ -46,6 +50,8 @@ struct fib_rules_ops {
        int                     (*action)(struct fib_rule *,
                                          struct flowi *, int,
                                          struct fib_lookup_arg *);
+       bool                    (*suppress)(struct fib_rule *,
+                                           struct fib_lookup_arg *);
        int                     (*match)(struct fib_rule *,
                                         struct flowi *, int);
        int                     (*configure)(struct fib_rule *,
@@ -80,6 +86,8 @@ struct fib_rules_ops {
        [FRA_FWMARK]    = { .type = NLA_U32 }, \
        [FRA_FWMASK]    = { .type = NLA_U32 }, \
        [FRA_TABLE]     = { .type = NLA_U32 }, \
+       [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
+       [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
        [FRA_GOTO]      = { .type = NLA_U32 }
 
 static inline void fib_rule_get(struct fib_rule *rule)
index c6d07cb074bc3ed04ec8ba76e417248814196e8c..8b5b714332971c547faa032574f56010f7ae3e15 100644 (file)
@@ -230,6 +230,10 @@ enum ieee80211_radiotap_type {
 #define        IEEE80211_CHAN_PASSIVE  0x0200  /* Only passive scan allowed */
 #define        IEEE80211_CHAN_DYN      0x0400  /* Dynamic CCK-OFDM channel */
 #define        IEEE80211_CHAN_GFSK     0x0800  /* GFSK channel (FHSS PHY) */
+#define        IEEE80211_CHAN_GSM      0x1000  /* GSM (900 MHz) */
+#define        IEEE80211_CHAN_STURBO   0x2000  /* Static Turbo */
+#define        IEEE80211_CHAN_HALF     0x4000  /* Half channel (10 MHz wide) */
+#define        IEEE80211_CHAN_QUARTER  0x8000  /* Quarter channel (5 MHz wide) */
 
 /* For IEEE80211_RADIOTAP_FLAGS */
 #define        IEEE80211_RADIOTAP_F_CFP        0x01    /* sent/received
index 4da5de10d1d49f62bbbe33e770e4c95f2b5c9ddd..2265b0bf97e523817ea307d554451c4cebe4fc58 100644 (file)
@@ -36,6 +36,7 @@ struct __ip6_tnl_parm {
 struct ip6_tnl {
        struct ip6_tnl __rcu *next;     /* next tunnel in list */
        struct net_device *dev; /* virtual device associated with tunnel */
+       struct net *net;        /* netns for packet i/o */
        struct __ip6_tnl_parm parms;    /* tunnel configuration parameters */
        struct flowi fl;        /* flowi template for xmit */
        struct dst_entry *dst_cache;    /* cached dst */
index a354db5b7662f1bce43af7edc97471cdef3421bb..0ce316bb3c65c4f7725c24d8963dd306ba23ced6 100644 (file)
@@ -86,12 +86,12 @@ struct tnl_ptk_info {
 #define PACKET_RCVD    0
 #define PACKET_REJECT  1
 
-#define IP_TNL_HASH_BITS   10
+#define IP_TNL_HASH_BITS   7
 #define IP_TNL_HASH_SIZE   (1 << IP_TNL_HASH_BITS)
 
 struct ip_tunnel_net {
-       struct hlist_head *tunnels;
        struct net_device *fb_tunnel_dev;
+       struct hlist_head tunnels[IP_TNL_HASH_SIZE];
 };
 
 #ifdef CONFIG_INET
@@ -102,7 +102,7 @@ void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
                       struct rtnl_link_ops *ops, char *devname);
 
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
 
 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                    const struct iphdr *tnl_params, const u8 protocol);
index 0af8b8dfbc227898badec117e2abe759889f8a20..550c2d6ec7ffefdaaf40c528042d19d8955bf090 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/types.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
+#include <linux/if_ether.h>
 
 #include <net/irda/irttp.h>
 
@@ -161,7 +162,7 @@ struct irlan_provider_cb {
        int access_type;     /* Access type */
        __u16 send_arb_val;
 
-       __u8 mac_address[6]; /* Generated MAC address for peer device */
+       __u8 mac_address[ETH_ALEN]; /* Generated MAC address for peer device */
 };
 
 /*
index 5b7a3dadaddec1b7cabb919a1cf9919b90ff638d..e1b598d71176ec6aa116a0465e377a97e8e69e66 100644 (file)
@@ -152,11 +152,14 @@ struct ieee80211_low_level_stats {
  * @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed
  * @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed
  * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
+ * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
+ *     this is used only with channel switching with CSA
  */
 enum ieee80211_chanctx_change {
        IEEE80211_CHANCTX_CHANGE_WIDTH          = BIT(0),
        IEEE80211_CHANCTX_CHANGE_RX_CHAINS      = BIT(1),
        IEEE80211_CHANCTX_CHANGE_RADAR          = BIT(2),
+       IEEE80211_CHANCTX_CHANGE_CHANNEL        = BIT(3),
 };
 
 /**
@@ -372,7 +375,7 @@ struct ieee80211_bss_conf {
 };
 
 /**
- * enum mac80211_tx_control_flags - flags to describe transmission information/status
+ * enum mac80211_tx_info_flags - flags to describe transmission information/status
  *
  * These flags are used with the @flags member of &ieee80211_tx_info.
  *
@@ -468,7 +471,7 @@ struct ieee80211_bss_conf {
  * Note: If you have to add new flags to the enumeration, then don't
  *      forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
  */
-enum mac80211_tx_control_flags {
+enum mac80211_tx_info_flags {
        IEEE80211_TX_CTL_REQ_TX_STATUS          = BIT(0),
        IEEE80211_TX_CTL_ASSIGN_SEQ             = BIT(1),
        IEEE80211_TX_CTL_NO_ACK                 = BIT(2),
@@ -504,6 +507,18 @@ enum mac80211_tx_control_flags {
 
 #define IEEE80211_TX_CTL_STBC_SHIFT            23
 
+/**
+ * enum mac80211_tx_control_flags - flags to describe transmit control
+ *
+ * @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control
+ *     protocol frame (e.g. EAP)
+ *
+ * These flags are used in tx_info->control.flags.
+ */
+enum mac80211_tx_control_flags {
+       IEEE80211_TX_CTRL_PORT_CTRL_PROTO       = BIT(0),
+};
+
 /*
  * This definition is used as a mask to clear all temporary flags, which are
  * set by the tx handlers for each transmission attempt by the mac80211 stack.
@@ -677,7 +692,8 @@ struct ieee80211_tx_info {
                        /* NB: vif can be NULL for injected frames */
                        struct ieee80211_vif *vif;
                        struct ieee80211_key_conf *hw_key;
-                       /* 8 bytes free */
+                       u32 flags;
+                       /* 4 bytes free */
                } control;
                struct {
                        struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -811,6 +827,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
  *     is stored in the @ampdu_delimiter_crc field)
  * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
+ * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
+ * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
  */
 enum mac80211_rx_flags {
        RX_FLAG_MMIC_ERROR              = BIT(0),
@@ -839,6 +857,8 @@ enum mac80211_rx_flags {
        RX_FLAG_80P80MHZ                = BIT(24),
        RX_FLAG_160MHZ                  = BIT(25),
        RX_FLAG_STBC_MASK               = BIT(26) | BIT(27),
+       RX_FLAG_10MHZ                   = BIT(28),
+       RX_FLAG_5MHZ                    = BIT(29),
 };
 
 #define RX_FLAG_STBC_SHIFT             26
@@ -1004,11 +1024,11 @@ enum ieee80211_smps_mode {
  * @radar_enabled: whether radar detection is enabled
  *
  * @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame
- *    (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
- *    but actually means the number of transmissions not the number of retries
+ *     (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
+ *     but actually means the number of transmissions not the number of retries
  * @short_frame_max_tx_count: Maximum number of transmissions for a "short"
- *    frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
- *    number of transmissions not the number of retries
+ *     frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
+ *     number of transmissions not the number of retries
  *
  * @smps_mode: spatial multiplexing powersave mode; note that
  *     %IEEE80211_SMPS_STATIC is used when the device is not
@@ -1080,6 +1100,7 @@ enum ieee80211_vif_flags {
  * @addr: address of this interface
  * @p2p: indicates whether this AP or STA interface is a p2p
  *     interface, i.e. a GO or p2p-sta respectively
+ * @csa_active: marks whether a channel switch is going on
  * @driver_flags: flags/capabilities the driver has for this interface,
  *     these need to be set (or cleared) when the interface is added
  *     or, if supported by the driver, the interface type is changed
@@ -1092,7 +1113,7 @@ enum ieee80211_vif_flags {
  *     be off when it is %NULL there can still be races and packets could be
  *     processed after it switches back to %NULL.
  * @debugfs_dir: debugfs dentry, can be used by drivers to create own per
- *      interface debug files. Note that it will be NULL for the virtual
+ *     interface debug files. Note that it will be NULL for the virtual
  *     monitor interface (if that is requested.)
  * @drv_priv: data area for driver use, will always be aligned to
  *     sizeof(void *).
@@ -1102,6 +1123,7 @@ struct ieee80211_vif {
        struct ieee80211_bss_conf bss_conf;
        u8 addr[ETH_ALEN];
        bool p2p;
+       bool csa_active;
 
        u8 cab_queue;
        u8 hw_queue[IEEE80211_NUM_ACS];
@@ -1425,10 +1447,10 @@ struct ieee80211_tx_control {
  *     the stack.
  *
  * @IEEE80211_HW_CONNECTION_MONITOR:
- *      The hardware performs its own connection monitoring, including
- *      periodic keep-alives to the AP and probing the AP on beacon loss.
- *      When this flag is set, signaling beacon-loss will cause an immediate
- *      change to disassociated state.
+ *     The hardware performs its own connection monitoring, including
+ *     periodic keep-alives to the AP and probing the AP on beacon loss.
+ *     When this flag is set, signaling beacon-loss will cause an immediate
+ *     change to disassociated state.
  *
  * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC:
  *     This device needs to get data from beacon before association (i.e.
@@ -1526,10 +1548,10 @@ enum ieee80211_hw_flags {
  * @channel_change_time: time (in microseconds) it takes to change channels.
  *
  * @max_signal: Maximum value for signal (rssi) in RX information, used
- *     only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
+ *     only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
  *
  * @max_listen_interval: max listen interval in units of beacon interval
- *     that HW supports
+ *     that HW supports
  *
  * @queues: number of available hardware transmit queues for
  *     data packets. WMM/QoS requires at least four, these
@@ -2443,7 +2465,7 @@ enum ieee80211_roc_type {
  *     The callback can sleep.
  *
  * @set_tsf: Set the TSF timer to the specified value in the firmware/hardware.
- *      Currently, this is only used for IBSS mode debugging. Is not a
+ *     Currently, this is only used for IBSS mode debugging. Is not a
  *     required function.
  *     The callback can sleep.
  *
@@ -2494,8 +2516,8 @@ enum ieee80211_roc_type {
  *     in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
  *     accordingly. This callback is not required and may sleep.
  *
- * @testmode_cmd: Implement a cfg80211 test mode command.
- *     The callback can sleep.
+ * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may
+ *     be %NULL. The callback can sleep.
  * @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep.
  *
  * @flush: Flush all pending frames from the hardware queue, making sure
@@ -2633,6 +2655,16 @@ enum ieee80211_roc_type {
  * @ipv6_addr_change: IPv6 address assignment on the given interface changed.
  *     Currently, this is only called for managed or P2P client interfaces.
  *     This callback is optional; it must not sleep.
+ *
+ * @channel_switch_beacon: Starts a channel switch to a new channel.
+ *     Beacons are modified to include CSA or ECSA IEs before calling this
+ *     function. The corresponding count fields in these IEs must be
+ *     decremented, and when they reach zero the driver must call
+ *     ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
+ *     get the csa counter decremented by mac80211, but must check if it is
+ *     zero using ieee80211_csa_is_complete() after the beacon has been
+ *     transmitted and then call ieee80211_csa_finish().
+ *
  */
 struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw,
@@ -2746,7 +2778,8 @@ struct ieee80211_ops {
        void (*rfkill_poll)(struct ieee80211_hw *hw);
        void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
 #ifdef CONFIG_NL80211_TESTMODE
-       int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len);
+       int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                           void *data, int len);
        int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb,
                             struct netlink_callback *cb,
                             void *data, int len);
@@ -2820,6 +2853,9 @@ struct ieee80211_ops {
                                 struct ieee80211_vif *vif,
                                 struct inet6_dev *idev);
 #endif
+       void (*channel_switch_beacon)(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     struct cfg80211_chan_def *chandef);
 };
 
 /**
@@ -2877,14 +2913,14 @@ enum ieee80211_tpt_led_trigger_flags {
 };
 
 #ifdef CONFIG_MAC80211_LEDS
-extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_create_tpt_led_trigger(
-                               struct ieee80211_hw *hw, unsigned int flags,
-                               const struct ieee80211_tpt_blink *blink_table,
-                               unsigned int blink_table_len);
+char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+                                        unsigned int flags,
+                                        const struct ieee80211_tpt_blink *blink_table,
+                                        unsigned int blink_table_len);
 #endif
 /**
  * ieee80211_get_tx_led_name - get name of TX LED
@@ -3314,6 +3350,25 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
        return ieee80211_beacon_get_tim(hw, vif, NULL, NULL);
 }
 
+/**
+ * ieee80211_csa_finish - notify mac80211 about channel switch
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * After a channel switch announcement was scheduled and the counter in this
+ * announcement hit zero, this function must be called by the driver to
+ * notify mac80211 that the channel can be changed.
+ */
+void ieee80211_csa_finish(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_csa_is_complete - find out if counters reached zero
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * This function returns whether the channel switch counters reached zero.
+ */
+bool ieee80211_csa_is_complete(struct ieee80211_vif *vif);
+
+
 /**
  * ieee80211_proberesp_get - retrieve a Probe Response template
  * @hw: pointer obtained from ieee80211_alloc_hw().
@@ -3632,6 +3687,89 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
 void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
                              int tid, struct ieee80211_key_seq *seq);
 
+/**
+ * ieee80211_set_key_tx_seq - set key TX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current TX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and the
+ * device may have transmitted frames using the PTK, e.g. replies to
+ * ARP requests.
+ *
+ * Note that this function may only be called when no TX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
+                             struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_set_key_rx_seq - set key RX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @tid: The TID, or -1 for the management frame value (CCMP only);
+ *     the value on TID 0 is also used for non-QoS frames. For
+ *     CMAC, only TID 0 is valid.
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current RX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and GTK
+ * rekey may have been done while suspended. It should not be called
+ * if IV checking is done by the device and not by mac80211.
+ *
+ * Note that this function may only be called when no RX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
+                             int tid, struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_remove_key - remove the given key
+ * @keyconf: the parameter passed with the set key
+ *
+ * Remove the given key. If the key was uploaded to the hardware at the
+ * time this function is called, it is not deleted in the hardware but
+ * instead assumed to have been removed already.
+ *
+ * Note that due to locking considerations this function can (currently)
+ * only be called during key iteration (ieee80211_iter_keys().)
+ */
+void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
+ * @vif: the virtual interface to add the key on
+ * @keyconf: new key data
+ *
+ * When GTK rekeying was done while the system was suspended, (a) new
+ * key(s) will be available. These will be needed by mac80211 for proper
+ * RX processing, so this function allows setting them.
+ *
+ * The function returns the newly allocated key structure, which will
+ * have similar contents to the passed key configuration but point to
+ * mac80211-owned memory. In case of errors, the function returns an
+ * ERR_PTR(), use IS_ERR() etc.
+ *
+ * Note that this function assumes the key isn't added to hardware
+ * acceleration, so no TX will be done with the key. Since it's a GTK
+ * on managed (station) networks, this is true anyway. If the driver
+ * calls this function from the resume callback and subsequently uses
+ * the return code 1 to reconfigure the device, this key will be part
+ * of the reconfiguration.
+ *
+ * Note that the driver should also call ieee80211_set_key_rx_seq()
+ * for the new key for each TID to set up sequence counters properly.
+ *
+ * IMPORTANT: If this replaces a key that is present in the hardware,
+ * then it will attempt to remove it during this call. In many cases
+ * this isn't what you want, so call ieee80211_remove_key() first for
+ * the key that's being replaced.
+ */
+struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+                       struct ieee80211_key_conf *keyconf);
+
 /**
  * ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying
  * @vif: virtual interface the rekeying was done on
@@ -4204,8 +4342,10 @@ struct rate_control_ops {
 
        void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
        void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
+                         struct cfg80211_chan_def *chandef,
                          struct ieee80211_sta *sta, void *priv_sta);
        void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
+                           struct cfg80211_chan_def *chandef,
                            struct ieee80211_sta *sta, void *priv_sta,
                            u32 changed);
        void (*free_sta)(void *priv, struct ieee80211_sta *sta,
index 7e748ad8b50c71900bdcedaf09a8c69e41f567a3..536501a3e58d861211656b87ec2e0e9b0e4f9a47 100644 (file)
@@ -195,68 +195,67 @@ static inline void *neighbour_priv(const struct neighbour *n)
 #define NEIGH_UPDATE_F_ISROUTER                        0x40000000
 #define NEIGH_UPDATE_F_ADMIN                   0x80000000
 
-extern void                    neigh_table_init(struct neigh_table *tbl);
-extern int                     neigh_table_clear(struct neigh_table *tbl);
-extern struct neighbour *      neigh_lookup(struct neigh_table *tbl,
-                                            const void *pkey,
-                                            struct net_device *dev);
-extern struct neighbour *      neigh_lookup_nodev(struct neigh_table *tbl,
-                                                  struct net *net,
-                                                  const void *pkey);
-extern struct neighbour *      __neigh_create(struct neigh_table *tbl,
-                                              const void *pkey,
-                                              struct net_device *dev,
-                                              bool want_ref);
+void neigh_table_init(struct neigh_table *tbl);
+int neigh_table_clear(struct neigh_table *tbl);
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+                              struct net_device *dev);
+struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
+                                    const void *pkey);
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+                                struct net_device *dev, bool want_ref);
 static inline struct neighbour *neigh_create(struct neigh_table *tbl,
                                             const void *pkey,
                                             struct net_device *dev)
 {
        return __neigh_create(tbl, pkey, dev, true);
 }
-extern void                    neigh_destroy(struct neighbour *neigh);
-extern int                     __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
-extern int                     neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 
-                                            u32 flags);
-extern void                    neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
-extern int                     neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
-extern int                     neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int                     neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int                     neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int                     neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
-extern struct neighbour        *neigh_event_ns(struct neigh_table *tbl,
+void neigh_destroy(struct neighbour *neigh);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags);
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
+struct neighbour *neigh_event_ns(struct neigh_table *tbl,
                                                u8 *lladdr, void *saddr,
                                                struct net_device *dev);
 
-extern struct neigh_parms      *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl);
-extern void                    neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
+struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
+                                     struct neigh_table *tbl);
+void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
 
 static inline
-struct net                     *neigh_parms_net(const struct neigh_parms *parms)
+struct net *neigh_parms_net(const struct neigh_parms *parms)
 {
        return read_pnet(&parms->net);
 }
 
-extern unsigned long           neigh_rand_reach_time(unsigned long base);
+unsigned long neigh_rand_reach_time(unsigned long base);
 
-extern void                    pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
-                                              struct sk_buff *skb);
-extern struct pneigh_entry     *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat);
-extern struct pneigh_entry     *__pneigh_lookup(struct neigh_table *tbl,
-                                                struct net *net,
-                                                const void *key,
-                                                struct net_device *dev);
-extern int                     pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev);
+void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
+                   struct sk_buff *skb);
+struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
+                                  const void *key, struct net_device *dev,
+                                  int creat);
+struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
+                                    const void *key, struct net_device *dev);
+int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
+                 struct net_device *dev);
 
-static inline
-struct net                     *pneigh_net(const struct pneigh_entry *pneigh)
+static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
 {
        return read_pnet(&pneigh->net);
 }
 
-extern void neigh_app_ns(struct neighbour *n);
-extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
-extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
-extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
+void neigh_app_ns(struct neighbour *n);
+void neigh_for_each(struct neigh_table *tbl,
+                   void (*cb)(struct neighbour *, void *), void *cookie);
+void __neigh_for_each_release(struct neigh_table *tbl,
+                             int (*cb)(struct neighbour *));
+void pneigh_for_each(struct neigh_table *tbl,
+                    void (*cb)(struct pneigh_entry *));
 
 struct neigh_seq_state {
        struct seq_net_private p;
@@ -270,15 +269,14 @@ struct neigh_seq_state {
 #define NEIGH_SEQ_IS_PNEIGH    0x00000002
 #define NEIGH_SEQ_SKIP_NOARP   0x00000004
 };
-extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int);
-extern void *neigh_seq_next(struct seq_file *, void *, loff_t *);
-extern void neigh_seq_stop(struct seq_file *, void *);
-
-extern int                     neigh_sysctl_register(struct net_device *dev, 
-                                                     struct neigh_parms *p,
-                                                     char *p_name,
-                                                     proc_handler *proc_handler);
-extern void                    neigh_sysctl_unregister(struct neigh_parms *p);
+void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
+                     unsigned int);
+void *neigh_seq_next(struct seq_file *, void *, loff_t *);
+void neigh_seq_stop(struct seq_file *, void *);
+
+int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
+                         char *p_name, proc_handler *proc_handler);
+void neigh_sysctl_unregister(struct neigh_parms *p);
 
 static inline void __neigh_parms_put(struct neigh_parms *parms)
 {
index 84e37b1ca9e17824ef33ac4fa7708d684f520edc..1313456a0994e03cab0859e7960773b0115f8652 100644 (file)
@@ -119,7 +119,6 @@ struct net {
        struct netns_ipvs       *ipvs;
 #endif
        struct sock             *diag_nlsk;
-       atomic_t                rt_genid;
        atomic_t                fnhe_genid;
 };
 
@@ -333,14 +332,42 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
 }
 #endif
 
-static inline int rt_genid(struct net *net)
+static inline int rt_genid_ipv4(struct net *net)
 {
-       return atomic_read(&net->rt_genid);
+       return atomic_read(&net->ipv4.rt_genid);
 }
 
-static inline void rt_genid_bump(struct net *net)
+static inline void rt_genid_bump_ipv4(struct net *net)
 {
-       atomic_inc(&net->rt_genid);
+       atomic_inc(&net->ipv4.rt_genid);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int rt_genid_ipv6(struct net *net)
+{
+       return atomic_read(&net->ipv6.rt_genid);
+}
+
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+       atomic_inc(&net->ipv6.rt_genid);
+}
+#else
+static inline int rt_genid_ipv6(struct net *net)
+{
+       return 0;
+}
+
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+}
+#endif
+
+/* For callers who don't really care about whether it's IPv4 or IPv6 */
+static inline void rt_genid_bump_all(struct net *net)
+{
+       rt_genid_bump_ipv4(net);
+       rt_genid_bump_ipv6(net);
 }
 
 static inline int fnhe_genid(struct net *net)
index 644d9c223d249617291edce40d11c75cec6fd0fd..0c1288a50e8bde05d896ebf243d0b7c3a01deb11 100644 (file)
@@ -181,8 +181,7 @@ __nf_conntrack_find(struct net *net, u16 zone,
                    const struct nf_conntrack_tuple *tuple);
 
 extern int nf_conntrack_hash_check_insert(struct nf_conn *ct);
-extern void nf_ct_delete_from_lists(struct nf_conn *ct);
-extern void nf_ct_dying_timeout(struct nf_conn *ct);
+bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 
 extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
 
@@ -235,7 +234,7 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
 }
 
 /* These are for NAT.  Icky. */
-extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
                               enum ip_conntrack_dir dir,
                               u32 seq);
 
@@ -249,7 +248,9 @@ extern void nf_ct_untracked_status_or(unsigned long bits);
 
 /* Iterate over all conntracks: if iter returns true, it's deleted. */
 extern void
-nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
+nf_ct_iterate_cleanup(struct net *net,
+                     int (*iter)(struct nf_conn *i, void *data),
+                     void *data, u32 portid, int report);
 extern void nf_conntrack_free(struct nf_conn *ct);
 extern struct nf_conn *
 nf_conntrack_alloc(struct net *net, u16 zone,
index 914d8d9007981bd9b4cf7db67dc79ec78c6896d0..b411d7b17dec40a64cc41df315f6188004ac65fa 100644 (file)
@@ -148,17 +148,10 @@ extern int nf_ct_port_nlattr_tuple_size(void);
 extern const struct nla_policy nf_ct_port_nla_policy[];
 
 #ifdef CONFIG_SYSCTL
-#ifdef DEBUG_INVALID_PACKETS
 #define LOG_INVALID(net, proto)                                \
        ((net)->ct.sysctl_log_invalid == (proto) ||     \
         (net)->ct.sysctl_log_invalid == IPPROTO_RAW)
 #else
-#define LOG_INVALID(net, proto)                                \
-       (((net)->ct.sysctl_log_invalid == (proto) ||    \
-         (net)->ct.sysctl_log_invalid == IPPROTO_RAW)  \
-        && net_ratelimit())
-#endif
-#else
 static inline int LOG_INVALID(struct net *net, int proto) { return 0; }
 #endif /* CONFIG_SYSCTL */
 
index ad14a799fd2e50b153e47e80a9cd6fbc4235be7f..e2441413675c1c9a5aedd501aa2bdd01ee80bb3d 100644 (file)
@@ -19,7 +19,7 @@ struct nf_nat_seq {
        u_int32_t correction_pos;
 
        /* sequence number offset before and after last modification */
-       int16_t offset_before, offset_after;
+       int32_t offset_before, offset_after;
 };
 
 #include <linux/list.h>
index b4d6bfc2af034a32c1a5f9b847c91b99039cd868..194c347949237f9b4cb270c3a52881dd17d304eb 100644 (file)
@@ -41,7 +41,7 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
 
 extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
                                  enum ip_conntrack_info ctinfo,
-                                 __be32 seq, s16 off);
+                                 __be32 seq, s32 off);
 extern int nf_nat_seq_adjust(struct sk_buff *skb,
                             struct nf_conn *ct,
                             enum ip_conntrack_info ctinfo,
@@ -56,11 +56,11 @@ extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
 extern void nf_nat_follow_master(struct nf_conn *ct,
                                 struct nf_conntrack_expect *this);
 
-extern s16 nf_nat_get_offset(const struct nf_conn *ct,
+extern s32 nf_nat_get_offset(const struct nf_conn *ct,
                             enum ip_conntrack_dir dir,
                             u32 seq);
 
 extern void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
-                                 u32 dir, int off);
+                                 u32 dir, s32 off);
 
 #endif
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
deleted file mode 100644 (file)
index 36d9379..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-#ifndef _NF_TPROXY_CORE_H
-#define _NF_TPROXY_CORE_H
-
-#include <linux/types.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/inet_hashtables.h>
-#include <net/inet6_hashtables.h>
-#include <net/tcp.h>
-
-#define NFT_LOOKUP_ANY         0
-#define NFT_LOOKUP_LISTENER    1
-#define NFT_LOOKUP_ESTABLISHED 2
-
-/* look up and get a reference to a matching socket */
-
-
-/* This function is used by the 'TPROXY' target and the 'socket'
- * match. The following lookups are supported:
- *
- * Explicit TProxy target rule
- * ===========================
- *
- * This is used when the user wants to intercept a connection matching
- * an explicit iptables rule. In this case the sockets are assumed
- * matching in preference order:
- *
- *   - match: if there's a fully established connection matching the
- *     _packet_ tuple, it is returned, assuming the redirection
- *     already took place and we process a packet belonging to an
- *     established connection
- *
- *   - match: if there's a listening socket matching the redirection
- *     (e.g. on-port & on-ip of the connection), it is returned,
- *     regardless if it was bound to 0.0.0.0 or an explicit
- *     address. The reasoning is that if there's an explicit rule, it
- *     does not really matter if the listener is bound to an interface
- *     or to 0. The user already stated that he wants redirection
- *     (since he added the rule).
- *
- * "socket" match based redirection (no specific rule)
- * ===================================================
- *
- * There are connections with dynamic endpoints (e.g. FTP data
- * connection) that the user is unable to add explicit rules
- * for. These are taken care of by a generic "socket" rule. It is
- * assumed that the proxy application is trusted to open such
- * connections without explicit iptables rule (except of course the
- * generic 'socket' rule). In this case the following sockets are
- * matched in preference order:
- *
- *   - match: if there's a fully established connection matching the
- *     _packet_ tuple
- *
- *   - match: if there's a non-zero bound listener (possibly with a
- *     non-local address) We don't accept zero-bound listeners, since
- *     then local services could intercept traffic going through the
- *     box.
- *
- * Please note that there's an overlap between what a TPROXY target
- * and a socket match will match. Normally if you have both rules the
- * "socket" match will be the first one, effectively all packets
- * belonging to established connections going through that one.
- */
-static inline struct sock *
-nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
-                     const __be32 saddr, const __be32 daddr,
-                     const __be16 sport, const __be16 dport,
-                     const struct net_device *in, int lookup_type)
-{
-       struct sock *sk;
-
-       /* look up socket */
-       switch (protocol) {
-       case IPPROTO_TCP:
-               switch (lookup_type) {
-               case NFT_LOOKUP_ANY:
-                       sk = __inet_lookup(net, &tcp_hashinfo,
-                                          saddr, sport, daddr, dport,
-                                          in->ifindex);
-                       break;
-               case NFT_LOOKUP_LISTENER:
-                       sk = inet_lookup_listener(net, &tcp_hashinfo,
-                                                   saddr, sport,
-                                                   daddr, dport,
-                                                   in->ifindex);
-
-                       /* NOTE: we return listeners even if bound to
-                        * 0.0.0.0, those are filtered out in
-                        * xt_socket, since xt_TPROXY needs 0 bound
-                        * listeners too */
-
-                       break;
-               case NFT_LOOKUP_ESTABLISHED:
-                       sk = inet_lookup_established(net, &tcp_hashinfo,
-                                                   saddr, sport, daddr, dport,
-                                                   in->ifindex);
-                       break;
-               default:
-                       WARN_ON(1);
-                       sk = NULL;
-                       break;
-               }
-               break;
-       case IPPROTO_UDP:
-               sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
-                                    in->ifindex);
-               if (sk && lookup_type != NFT_LOOKUP_ANY) {
-                       int connected = (sk->sk_state == TCP_ESTABLISHED);
-                       int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
-
-                       /* NOTE: we return listeners even if bound to
-                        * 0.0.0.0, those are filtered out in
-                        * xt_socket, since xt_TPROXY needs 0 bound
-                        * listeners too */
-                       if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
-                           (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
-                               sock_put(sk);
-                               sk = NULL;
-                       }
-               }
-               break;
-       default:
-               WARN_ON(1);
-               sk = NULL;
-       }
-
-       pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
-                protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
-
-       return sk;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static inline struct sock *
-nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
-                     const struct in6_addr *saddr, const struct in6_addr *daddr,
-                     const __be16 sport, const __be16 dport,
-                     const struct net_device *in, int lookup_type)
-{
-       struct sock *sk;
-
-       /* look up socket */
-       switch (protocol) {
-       case IPPROTO_TCP:
-               switch (lookup_type) {
-               case NFT_LOOKUP_ANY:
-                       sk = inet6_lookup(net, &tcp_hashinfo,
-                                         saddr, sport, daddr, dport,
-                                         in->ifindex);
-                       break;
-               case NFT_LOOKUP_LISTENER:
-                       sk = inet6_lookup_listener(net, &tcp_hashinfo,
-                                                  saddr, sport,
-                                                  daddr, ntohs(dport),
-                                                  in->ifindex);
-
-                       /* NOTE: we return listeners even if bound to
-                        * 0.0.0.0, those are filtered out in
-                        * xt_socket, since xt_TPROXY needs 0 bound
-                        * listeners too */
-
-                       break;
-               case NFT_LOOKUP_ESTABLISHED:
-                       sk = __inet6_lookup_established(net, &tcp_hashinfo,
-                                                       saddr, sport, daddr, ntohs(dport),
-                                                       in->ifindex);
-                       break;
-               default:
-                       WARN_ON(1);
-                       sk = NULL;
-                       break;
-               }
-               break;
-       case IPPROTO_UDP:
-               sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
-                                    in->ifindex);
-               if (sk && lookup_type != NFT_LOOKUP_ANY) {
-                       int connected = (sk->sk_state == TCP_ESTABLISHED);
-                       int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
-
-                       /* NOTE: we return listeners even if bound to
-                        * 0.0.0.0, those are filtered out in
-                        * xt_socket, since xt_TPROXY needs 0 bound
-                        * listeners too */
-                       if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
-                           (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
-                               sock_put(sk);
-                               sk = NULL;
-                       }
-               }
-               break;
-       default:
-               WARN_ON(1);
-               sk = NULL;
-       }
-
-       pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
-                protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
-
-       return sk;
-}
-#endif
-
-/* assign a socket to the skb -- consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
-
-#endif
index 86267a529514e2f1ca0e181df54fe62f5a1e0863..aff88ba9139121445bfca2169e3f39b71f354fa6 100644 (file)
@@ -15,6 +15,8 @@ int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
                 enum ip_conntrack_info ctinfo);
 void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
                         enum ip_conntrack_info ctinfo, int diff);
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+                       u32 portid, u32 report);
 #else
 inline struct nf_conn *
 nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
@@ -39,5 +41,11 @@ inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
                                enum ip_conntrack_info ctinfo, int diff)
 {
 }
+
+inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+                              u32 portid, u32 report)
+{
+       return 0;
+}
 #endif /* NF_CONNTRACK */
 #endif
index 2ba9de89e8ec778990e8b2ff5183fd3e97eee1aa..bf2ec2202c5698b1bb73e22df60b4a3bc9d95c01 100644 (file)
@@ -77,5 +77,6 @@ struct netns_ipv4 {
        struct fib_rules_ops    *mr_rules_ops;
 #endif
 #endif
+       atomic_t        rt_genid;
 };
 #endif
index 005e2c2e39a9022bad13f4205343263f1821cb22..0fb2401197c51ecf9dd041066975cf5f0bb6a264 100644 (file)
@@ -72,6 +72,7 @@ struct netns_ipv6 {
 #endif
 #endif
        atomic_t                dev_addr_genid;
+       atomic_t                rt_genid;
 };
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
index 50ab8c26ab59b7ebe840e50d6f696d16f32969b8..099d02782e22274fb9f7b52abe296cb17d14e45d 100644 (file)
@@ -25,11 +25,7 @@ struct netprio_map {
        u32 priomap[];
 };
 
-struct cgroup_netprio_state {
-       struct cgroup_subsys_state css;
-};
-
-extern void sock_update_netprioidx(struct sock *sk);
+void sock_update_netprioidx(struct sock *sk);
 
 #if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
 
@@ -39,7 +35,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
        u32 idx;
 
        rcu_read_lock();
-       css = task_subsys_state(p, net_prio_subsys_id);
+       css = task_css(p, net_prio_subsys_id);
        idx = css->cgroup->id;
        rcu_read_unlock();
        return idx;
@@ -53,7 +49,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
        u32 idx = 0;
 
        rcu_read_lock();
-       css = task_subsys_state(p, net_prio_subsys_id);
+       css = task_css(p, net_prio_subsys_id);
        if (css)
                idx = css->cgroup->id;
        rcu_read_unlock();
index 5f286b726bb691b3f62e3b6800870ea940a3d65f..f68ee68e4e3e97cd055bc8cc760ab6bba9a11a4b 100644 (file)
@@ -224,6 +224,9 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev,
                                 u8 *gt, u8 gt_len);
 u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
 
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+                        u32 result);
+
 int nfc_targets_found(struct nfc_dev *dev,
                      struct nfc_target *targets, int ntargets);
 int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
index 13174509cdfd38485a48f6fb1055f285a7306936..2ebef77a2f9a39e770237c19ebf2a7bc7bd324f3 100644 (file)
@@ -14,8 +14,8 @@ struct tcf_walker {
        int     (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
 };
 
-extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
-extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+int register_tcf_proto_ops(struct tcf_proto_ops *ops);
+int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
 
 static inline unsigned long
 __cls_set_class(unsigned long *clp, unsigned long cl)
@@ -126,17 +126,17 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
        return 0;
 }
 
-extern int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
-                            struct nlattr **tb, struct nlattr *rate_tlv,
-                            struct tcf_exts *exts,
-                            const struct tcf_ext_map *map);
-extern void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
-extern void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
-                            struct tcf_exts *src);
-extern int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
-                        const struct tcf_ext_map *map);
-extern int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
-                              const struct tcf_ext_map *map);
+int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
+                     struct nlattr **tb, struct nlattr *rate_tlv,
+                     struct tcf_exts *exts,
+                     const struct tcf_ext_map *map);
+void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
+void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
+                    struct tcf_exts *src);
+int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
+                 const struct tcf_ext_map *map);
+int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
+                       const struct tcf_ext_map *map);
 
 /**
  * struct tcf_pkt_info - packet information
@@ -239,14 +239,14 @@ struct tcf_ematch_ops {
        struct list_head        link;
 };
 
-extern int tcf_em_register(struct tcf_ematch_ops *);
-extern void tcf_em_unregister(struct tcf_ematch_ops *);
-extern int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
-                               struct tcf_ematch_tree *);
-extern void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
-extern int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
-extern int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
-                              struct tcf_pkt_info *);
+int tcf_em_register(struct tcf_ematch_ops *);
+void tcf_em_unregister(struct tcf_ematch_ops *);
+int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
+                        struct tcf_ematch_tree *);
+void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
+int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
+int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
+                       struct tcf_pkt_info *);
 
 /**
  * tcf_em_tree_change - replace ematch tree of a running classifier
index 388bf8b6d06068558726943fa58d44a91ab01374..f7c24f8fbdc5c61208a26a11123f33d4d6d385df 100644 (file)
@@ -64,8 +64,8 @@ struct qdisc_watchdog {
        struct Qdisc    *qdisc;
 };
 
-extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-extern void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
 
 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
                                           psched_time_t expires)
@@ -73,31 +73,31 @@ static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
        qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
 }
 
-extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
+void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
 
 extern struct Qdisc_ops pfifo_qdisc_ops;
 extern struct Qdisc_ops bfifo_qdisc_ops;
 extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
 
-extern int fifo_set_limit(struct Qdisc *q, unsigned int limit);
-extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-                                     unsigned int limit);
-
-extern int register_qdisc(struct Qdisc_ops *qops);
-extern int unregister_qdisc(struct Qdisc_ops *qops);
-extern void qdisc_list_del(struct Qdisc *q);
-extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
-extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
-extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-               struct nlattr *tab);
-extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
-extern void qdisc_put_stab(struct qdisc_size_table *tab);
-extern void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc);
-extern int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-                          struct net_device *dev, struct netdev_queue *txq,
-                          spinlock_t *root_lock);
-
-extern void __qdisc_run(struct Qdisc *q);
+int fifo_set_limit(struct Qdisc *q, unsigned int limit);
+struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
+                              unsigned int limit);
+
+int register_qdisc(struct Qdisc_ops *qops);
+int unregister_qdisc(struct Qdisc_ops *qops);
+void qdisc_list_del(struct Qdisc *q);
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
+struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
+struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+                                       struct nlattr *tab);
+void qdisc_put_rtab(struct qdisc_rate_table *tab);
+void qdisc_put_stab(struct qdisc_size_table *tab);
+void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc);
+int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+                   struct net_device *dev, struct netdev_queue *txq,
+                   spinlock_t *root_lock);
+
+void __qdisc_run(struct Qdisc *q);
 
 static inline void qdisc_run(struct Qdisc *q)
 {
@@ -105,10 +105,10 @@ static inline void qdisc_run(struct Qdisc *q)
                __qdisc_run(q);
 }
 
-extern int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
-                             struct tcf_result *res);
-extern int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
                       struct tcf_result *res);
+int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+               struct tcf_result *res);
 
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
index 2ea40c1b5e009746dacb8f146fdcc37309aa7ff3..afdeeb5bec251ac773f03d8c7f0b830c9fa137e2 100644 (file)
@@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
        return hoplimit;
 }
 
+static inline int ip_skb_dst_mtu(struct sk_buff *skb)
+{
+       struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
+
+       return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
+              skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+}
+
 #endif /* _ROUTE_H */
index e5ae0c50fa9c5c481f991de92efd5e862d5bca67..76368c9d4503c124df44a5ed56fc942782209db8 100644 (file)
@@ -350,30 +350,32 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
        return NULL;
 }
 
-extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
-extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
-extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
-
-extern void dev_init_scheduler(struct net_device *dev);
-extern void dev_shutdown(struct net_device *dev);
-extern void dev_activate(struct net_device *dev);
-extern void dev_deactivate(struct net_device *dev);
-extern void dev_deactivate_many(struct list_head *head);
-extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
-                                    struct Qdisc *qdisc);
-extern void qdisc_reset(struct Qdisc *qdisc);
-extern void qdisc_destroy(struct Qdisc *qdisc);
-extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
-extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-                                struct Qdisc_ops *ops);
-extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
-                                      struct Qdisc_ops *ops, u32 parentid);
-extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
-                                     const struct qdisc_size_table *stab);
-extern void tcf_destroy(struct tcf_proto *tp);
-extern void tcf_destroy_chain(struct tcf_proto **fl);
+int qdisc_class_hash_init(struct Qdisc_class_hash *);
+void qdisc_class_hash_insert(struct Qdisc_class_hash *,
+                            struct Qdisc_class_common *);
+void qdisc_class_hash_remove(struct Qdisc_class_hash *,
+                            struct Qdisc_class_common *);
+void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
+void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
+
+void dev_init_scheduler(struct net_device *dev);
+void dev_shutdown(struct net_device *dev);
+void dev_activate(struct net_device *dev);
+void dev_deactivate(struct net_device *dev);
+void dev_deactivate_many(struct list_head *head);
+struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+                             struct Qdisc *qdisc);
+void qdisc_reset(struct Qdisc *qdisc);
+void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+                         struct Qdisc_ops *ops);
+struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+                               struct Qdisc_ops *ops, u32 parentid);
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+                              const struct qdisc_size_table *stab);
+void tcf_destroy(struct tcf_proto *tp);
+void tcf_destroy_chain(struct tcf_proto **fl);
 
 /* Reset all TX qdiscs greater then index of a device.  */
 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
@@ -698,7 +700,8 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
        return ((u64)len * r->mult) >> r->shift;
 }
 
-extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+                              const struct tc_ratespec *conf);
 
 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
                                          const struct psched_ratecfg *r)
index 49bc9577c61efbfcb8caf37e686def5f3de17c81..aa80bef3c9d5392b8cf472013c4f1da8f7de87f4 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *   Vlad Yasevich     <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #ifndef __sctp_auth_h__
index 0cb08e6fb6df9eacfbb94ac53e9b81f51ead993e..259924d63ba6a6f0ab71f52ed0f9e7a62c6bfe1b 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Dinakaran Joseph
@@ -37,9 +34,6 @@
  *
  * Rewritten to use libcrc32c by:
  *    Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #ifndef __sctp_checksum_h__
@@ -85,4 +79,19 @@ static inline __le32 sctp_end_cksum(__u32 crc32)
        return cpu_to_le32(~crc32);
 }
 
+/* Calculate the CRC32C checksum of an SCTP packet.  */
+static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+                                       unsigned int offset)
+{
+       const struct sk_buff *iter;
+
+       __u32 crc32 = sctp_start_cksum(skb->data + offset,
+                                      skb_headlen(skb) - offset);
+       skb_walk_frags(skb, iter)
+               crc32 = sctp_update_cksum((__u8 *) iter->data,
+                                         skb_headlen(iter), crc32);
+
+       return sctp_end_cksum(crc32);
+}
+
 #endif /* __sctp_checksum_h__ */
index 35247271e5571153110fa7759e6c24c13c1114aa..832f2191489c946bebd68ce8b6cdfa92c0086979 100644 (file)
  * the Free Software Foundation, 59 Temple Place - Suite 330,
  * Boston, MA 02111-1307, USA.
  *
- * Please send any bug reports or fixes you make to one of the
- * following email addresses:
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
- * La Monte H.P. Yarroll <piggy@acm.org>
- * Karl Knutson <karl@athena.chicago.il.us>
- * Ardelle Fan <ardelle.fan@intel.com>
- * Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
+ * Written or modified by:
+ *   La Monte H.P. Yarroll <piggy@acm.org>
+ *   Karl Knutson <karl@athena.chicago.il.us>
+ *   Ardelle Fan <ardelle.fan@intel.com>
+ *   Sridhar Samudrala <sri@us.ibm.com>
  */
 
-
 #ifndef __net_sctp_command_h__
 #define __net_sctp_command_h__
 
index ca50e0751e47aabda8beac5a8af9eeff07dc88db..2f0a565a0fd57ee6914b60512d531fb057729d83 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *   La Monte H.P. Yarroll <piggy@acm.org>
@@ -39,9 +36,6 @@
  *   Xingang Guo           <xingang.guo@intel.com>
  *   Sridhar Samudrala     <samudrala@us.ibm.com>
  *   Daisy Chang           <daisyc@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #ifndef __sctp_constants_h__
index d8e37ecea691ae2cc9a578063e304167b36e5740..3794c5ad20fef72a960c34323dab275d6f26bac7 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
  *    Ardelle Fan           <ardelle.fan@intel.com>
  *    Ryan Layer            <rmlayer@us.ibm.com>
  *    Kevin Gao             <kevin.gao@intel.com> 
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #ifndef __net_sctp_h__
@@ -613,7 +607,7 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
  */
 static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
 {
-       if (t->dst && !dst_check(t->dst, 0)) {
+       if (t->dst && !dst_check(t->dst, t->dst_cookie)) {
                dst_release(t->dst);
                t->dst = NULL;
        }
index 2a82d1384706c61a6757db53edf8d81a53cbc7de..4ef75af340b633c7e8a5fabeb34dd88cdbd73ef2 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email addresses:
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -42,9 +39,6 @@
  *    Daisy Chang <daisyc@us.ibm.com>
  *    Ardelle Fan <ardelle.fan@intel.com>
  *    Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/types.h>
index e745c92a153241d6f261a646b40de71a82f2d98b..422db6cc3214b77264eccec4288ece4315f045e7 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email addresses:
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Randall Stewart      <randall@sctp.chicago.il.us>
@@ -46,9 +43,6 @@
  *    Ryan Layer           <rmlayer@us.ibm.com>
  *    Anup Pemmaiah        <pemmaiah@cc.usu.edu>
  *    Kevin Gao             <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #ifndef __sctp_structs_h__
@@ -782,6 +776,7 @@ struct sctp_transport {
 
        /* Has this transport moved the ctsn since we last sacked */
        __u32 sack_generation;
+       u32 dst_cookie;
 
        struct flowi fl;
 
index 2c5d2b4d5d1eb51542df26094700250ab6191070..54bbbe547303cc8d37c88d736e9e95894e054a69 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *   Jon Grimm             <jgrimm@us.ibm.com>
  *   La Monte H.P. Yarroll <piggy@acm.org>
  *   Karl Knutson          <karl@athena.chicago.il.us>
  *   Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 #include <net/sctp/constants.h>
 
index ca4693b4e09e4bb879c0ef8e483247c913bffb6b..27b9f5c90153610e75d9a7023b808e5b61174c33 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *   Jon Grimm             <jgrimm@us.ibm.com>
  *   La Monte H.P. Yarroll <piggy@acm.org>
  *   Karl Knutson          <karl@athena.chicago.il.us>
  *   Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #ifndef __sctp_ulpevent_h__
index 00e50ba3f24b71a2db54d810ee9889f674444a7b..b0cf5d54d717a553a9135044237c2db563253b23 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email addresses:
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *   Jon Grimm             <jgrimm@us.ibm.com>
  *   La Monte H.P. Yarroll <piggy@acm.org>
  *   Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #ifndef __sctp_ulpqueue_h__
index 31d5cfbb51ec9f52b57c0c339b9b5e92804701cb..e4bbcbfd07ea4d15f0ef63f3d765d261d0c25a68 100644 (file)
@@ -746,11 +746,6 @@ static inline int sk_stream_wspace(const struct sock *sk)
 
 extern void sk_stream_write_space(struct sock *sk);
 
-static inline bool sk_stream_memory_free(const struct sock *sk)
-{
-       return sk->sk_wmem_queued < sk->sk_sndbuf;
-}
-
 /* OOB backlog add */
 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
@@ -950,6 +945,7 @@ struct proto {
        unsigned int            inuse_idx;
 #endif
 
+       bool                    (*stream_memory_free)(const struct sock *sk);
        /* Memory pressure */
        void                    (*enter_memory_pressure)(struct sock *sk);
        atomic_long_t           *memory_allocated;      /* Current allocated memory. */
@@ -1088,6 +1084,21 @@ static inline struct cg_proto *parent_cg_proto(struct proto *proto,
 }
 #endif
 
+static inline bool sk_stream_memory_free(const struct sock *sk)
+{
+       if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+               return false;
+
+       return sk->sk_prot->stream_memory_free ?
+               sk->sk_prot->stream_memory_free(sk) : true;
+}
+
+static inline bool sk_stream_is_writeable(const struct sock *sk)
+{
+       return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
+              sk_stream_memory_free(sk);
+}
+
 
 static inline bool sk_has_memory_pressure(const struct sock *sk)
 {
@@ -1509,6 +1520,7 @@ extern struct sk_buff             *sock_rmalloc(struct sock *sk,
                                              unsigned long size, int force,
                                              gfp_t priority);
 extern void                    sock_wfree(struct sk_buff *skb);
+extern void                    skb_orphan_partial(struct sk_buff *skb);
 extern void                    sock_rfree(struct sk_buff *skb);
 extern void                    sock_edemux(struct sk_buff *skb);
 
@@ -1527,7 +1539,8 @@ extern struct sk_buff             *sock_alloc_send_pskb(struct sock *sk,
                                                      unsigned long header_len,
                                                      unsigned long data_len,
                                                      int noblock,
-                                                     int *errcode);
+                                                     int *errcode,
+                                                     int max_page_order);
 extern void *sock_kmalloc(struct sock *sk, int size,
                          gfp_t priority);
 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
@@ -2249,6 +2262,8 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
 extern void sock_enable_timestamp(struct sock *sk, int flag);
 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
+extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+                             int level, int type);
 
 /*
  *     Enable debug/info messages
index d1980054ec75b92c716097c377478b02fdee3762..09cb5c11ceea34b54f46664feef3961d53c57392 100644 (file)
@@ -192,10 +192,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define TCPOLEN_TIMESTAMP      10
 #define TCPOLEN_MD5SIG         18
 #define TCPOLEN_EXP_FASTOPEN_BASE  4
-#define TCPOLEN_COOKIE_BASE    2       /* Cookie-less header extension */
-#define TCPOLEN_COOKIE_PAIR    3       /* Cookie pair header extension */
-#define TCPOLEN_COOKIE_MIN     (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
-#define TCPOLEN_COOKIE_MAX     (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
 
 /* But this is what stacks really send out. */
 #define TCPOLEN_TSTAMP_ALIGNED         12
@@ -284,6 +280,7 @@ extern int sysctl_tcp_thin_dupack;
 extern int sysctl_tcp_early_retrans;
 extern int sysctl_tcp_limit_output_bytes;
 extern int sysctl_tcp_challenge_ack_limit;
+extern unsigned int sysctl_tcp_notsent_lowat;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
@@ -591,7 +588,6 @@ extern void tcp_initialize_rcv_mss(struct sock *sk);
 extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
 extern void tcp_mtup_init(struct sock *sk);
-extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
 extern void tcp_init_buffer_space(struct sock *sk);
 
 static inline void tcp_bound_rto(const struct sock *sk)
@@ -1094,15 +1090,6 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->loc_port = tcp_hdr(skb)->dest;
 }
 
-/* Compute time elapsed between SYNACK and the ACK completing 3WHS */
-static inline void tcp_synack_rtt_meas(struct sock *sk,
-                                      struct request_sock *req)
-{
-       if (tcp_rsk(req)->snt_synack)
-               tcp_valid_rtt_meas(sk,
-                   tcp_time_stamp - tcp_rsk(req)->snt_synack);
-}
-
 extern void tcp_enter_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1313,7 +1300,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc);
+extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+                                   struct tcp_fastopen_cookie *foc);
 
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
@@ -1549,6 +1537,19 @@ extern int tcp_gro_complete(struct sk_buff *skb);
 extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
                                __be32 daddr);
 
+static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
+{
+       return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
+}
+
+static inline bool tcp_stream_memory_free(const struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+       u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+
+       return notsent_bytes < tcp_notsent_lowat(tp);
+}
+
 #ifdef CONFIG_PROC_FS
 extern int tcp4_proc_init(void);
 extern void tcp4_proc_exit(void);
index 74c10ec5e74fa12c74801f48e2716ec2d9518588..ef2e0b7843a0036c1b31d8e67e78b624bf05f44f 100644 (file)
@@ -183,6 +183,7 @@ extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
                            struct msghdr *msg, size_t len);
 extern int udp_push_pending_frames(struct sock *sk);
 extern void udp_flush_pending_frames(struct sock *sk);
+extern void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
 extern int udp_rcv(struct sk_buff *skb);
 extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 extern int udp_disconnect(struct sock *sk, int flags);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
new file mode 100644 (file)
index 0000000..ad342e3
--- /dev/null
@@ -0,0 +1,39 @@
+#ifndef __NET_VXLAN_H
+#define __NET_VXLAN_H 1
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#define VNI_HASH_BITS  10
+#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
+
+struct vxlan_sock;
+typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
+
+/* per UDP socket information */
+struct vxlan_sock {
+       struct hlist_node hlist;
+       vxlan_rcv_t      *rcv;
+       void             *data;
+       struct work_struct del_work;
+       struct socket    *sock;
+       struct rcu_head   rcu;
+       struct hlist_head vni_list[VNI_HASH_SIZE];
+       atomic_t          refcnt;
+};
+
+struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+                                 vxlan_rcv_t *rcv, void *data,
+                                 bool no_share);
+
+void vxlan_sock_release(struct vxlan_sock *vs);
+
+int vxlan_xmit_skb(struct net *net, struct vxlan_sock *vs,
+                  struct rtable *rt, struct sk_buff *skb,
+                  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+                  __be16 src_port, __be16 dst_port, __be32 vni);
+
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
+
+#endif
index 94ce082b29dcdba2d726cbc90a0c4510fd38fb2b..2af355b59e144ac0aa21bfd207ed4a73f007ac61 100644 (file)
@@ -20,6 +20,7 @@
 #include <net/route.h>
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
+#include <net/ip6_route.h>
 #include <net/flow.h>
 
 #include <linux/interrupt.h>
@@ -341,10 +342,13 @@ struct xfrm_state_afinfo {
                                                  struct sk_buff *skb);
        int                     (*transport_finish)(struct sk_buff *skb,
                                                    int async);
+       void                    (*local_error)(struct sk_buff *skb, u32 mtu);
 };
 
 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
 
 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
@@ -1477,6 +1481,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
 extern int xfrm_output_resume(struct sk_buff *skb, int err);
 extern int xfrm_output(struct sk_buff *skb);
 extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+extern void xfrm_local_error(struct sk_buff *skb, int mtu);
 extern int xfrm4_extract_header(struct sk_buff *skb);
 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -1497,6 +1502,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
 extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
 extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
+extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
 extern int xfrm6_extract_header(struct sk_buff *skb);
 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1514,6 +1520,7 @@ extern int xfrm6_output(struct sk_buff *skb);
 extern int xfrm6_output_finish(struct sk_buff *skb);
 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
                                 u8 **prevhdr);
+extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
 
 #ifdef CONFIG_XFRM
 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
@@ -1548,7 +1555,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32
 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
 u32 xfrm_get_acqseq(void);
 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
-struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
+struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
                                 u8 mode, u32 reqid, u8 proto,
                                 const xfrm_address_t *daddr,
                                 const xfrm_address_t *saddr, int create,
@@ -1717,4 +1724,15 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
        return ret;
 }
 
+static inline int xfrm_skb_dst_mtu(struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+
+       if (sk && skb->protocol == htons(ETH_P_IPV6))
+               return ip6_skb_dst_mtu(skb);
+       else if (sk && skb->protocol == htons(ETH_P_IP))
+               return ip_skb_dst_mtu(skb);
+       return dst_mtu(skb_dst(skb));
+}
+
 #endif /* _NET_XFRM_H */
index 645c3cedce9ca7abb69c13769a05aff02d4b5dd4..a84d3dfc40765f7ac688bb3e1932e4aa89e544a7 100644 (file)
@@ -635,6 +635,12 @@ enum ib_qp_create_flags {
        IB_QP_CREATE_RESERVED_END               = 1 << 31,
 };
 
+
+/*
+ * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
+ * callback to destroy the passed in QP.
+ */
+
 struct ib_qp_init_attr {
        void                  (*event_handler)(struct ib_event *, void *);
        void                   *qp_context;
index 1a046b1595ccd2ad4e24b78f8bb5647aadbee29b..1017e0bdf8baa75beb5ce0a13f852ddd7c683c4d 100644 (file)
@@ -49,8 +49,8 @@ enum iw_cm_event_type {
 struct iw_cm_event {
        enum iw_cm_event_type event;
        int                      status;
-       struct sockaddr_in local_addr;
-       struct sockaddr_in remote_addr;
+       struct sockaddr_storage local_addr;
+       struct sockaddr_storage remote_addr;
        void *private_data;
        void *provider_data;
        u8 private_data_len;
@@ -83,8 +83,8 @@ struct iw_cm_id {
        iw_cm_handler           cm_handler;      /* client callback function */
        void                    *context;        /* client cb context */
        struct ib_device        *device;
-       struct sockaddr_in      local_addr;
-       struct sockaddr_in      remote_addr;
+       struct sockaddr_storage local_addr;
+       struct sockaddr_storage remote_addr;
        void                    *provider_data;  /* provider private data */
        iw_event_handler        event_handler;   /* cb for provider
                                                    events */
index c586617cfa0dffaa2cdce5e548f78fed28e1f63e..2a14f1f02d4f6766e236b288f6173ee7391e78a2 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/rwsem.h>               /* struct rw_semaphore */
 #include <linux/pm.h>                  /* pm_message_t */
 #include <linux/stringify.h>
+#include <linux/printk.h>
 
 /* number of supported soundcards */
 #ifdef CONFIG_SND_DYNAMIC_MINORS
@@ -375,6 +376,11 @@ void __snd_printk(unsigned int level, const char *file, int line,
  */
 #define snd_BUG()              WARN(1, "BUG?\n")
 
+/**
+ * Suppress high rates of output when CONFIG_SND_DEBUG is enabled.
+ */
+#define snd_printd_ratelimit() printk_ratelimit()
+
 /**
  * snd_BUG_ON - debugging check macro
  * @cond: condition to evaluate
@@ -398,6 +404,8 @@ static inline void _snd_printd(int level, const char *format, ...) {}
        unlikely(__ret_warn_on); \
 })
 
+static inline bool snd_printd_ratelimit(void) { return false; }
+
 #endif /* CONFIG_SND_DEBUG */
 
 #ifdef CONFIG_SND_DEBUG_VERBOSE
index 2fd3d251d9a54e4896453508fcd3bcd86ebfbf26..56e818e4a1cbf90270d21c3a67e44ff3a0f32d73 100644 (file)
@@ -6,13 +6,6 @@
 
 /* PCM */
 
-struct pxa2xx_pcm_dma_params {
-       char *name;                     /* stream identifier */
-       u32 dcmd;                       /* DMA descriptor dcmd field */
-       volatile u32 *drcmr;            /* the DMA request channel to use */
-       u32 dev_addr;                   /* device physical address for DMA */
-};
-
 extern int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
                                struct snd_pcm_hw_params *params);
 extern int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
new file mode 100644 (file)
index 0000000..d35412a
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Renesas R-Car SRU/SCU/SSIU/SSI support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RCAR_SND_H
+#define RCAR_SND_H
+
+#include <linux/sh_clk.h>
+
+#define RSND_GEN1_SRU  0
+#define RSND_GEN1_ADG  1
+#define RSND_GEN1_SSI  2
+
+#define RSND_GEN2_SRU  0
+#define RSND_GEN2_ADG  1
+#define RSND_GEN2_SSIU 2
+#define RSND_GEN2_SSI  3
+
+#define RSND_BASE_MAX  4
+
+/*
+ * flags
+ *
+ * 0xAB000000
+ *
+ * A : clock sharing settings
+ * B : SSI direction
+ */
+#define RSND_SSI_CLK_PIN_SHARE         (1 << 31)
+#define RSND_SSI_CLK_FROM_ADG          (1 << 30) /* clock parent is master */
+#define RSND_SSI_SYNC                  (1 << 29) /* SSI34_sync etc */
+#define RSND_SSI_DEPENDENT             (1 << 28) /* SSI needs SRU/SCU */
+
+#define RSND_SSI_PLAY                  (1 << 24)
+
+#define RSND_SSI_SET(_dai_id, _dma_id, _pio_irq, _flags)       \
+{ .dai_id = _dai_id, .dma_id = _dma_id, .pio_irq = _pio_irq, .flags = _flags }
+#define RSND_SSI_UNUSED \
+{ .dai_id = -1, .dma_id = -1, .pio_irq = -1, .flags = 0 }
+
+struct rsnd_ssi_platform_info {
+       int dai_id;
+       int dma_id;
+       int pio_irq;
+       u32 flags;
+};
+
+/*
+ * flags
+ */
+#define RSND_SCU_USB_HPBIF             (1 << 31) /* it needs RSND_SSI_DEPENDENT */
+
+struct rsnd_scu_platform_info {
+       u32 flags;
+};
+
+/*
+ * flags
+ *
+ * 0x0000000A
+ *
+ * A : generation
+ */
+#define RSND_GEN1      (1 << 0) /* fixme */
+#define RSND_GEN2      (2 << 0) /* fixme */
+
+struct rcar_snd_info {
+       u32 flags;
+       struct rsnd_ssi_platform_info *ssi_info;
+       int ssi_info_nr;
+       struct rsnd_scu_platform_info *scu_info;
+       int scu_info_nr;
+       int (*start)(int id);
+       int (*stop)(int id);
+};
+
+#endif
index 3e479f4e15f5e3469ef0c7f72f6e88174f36ceba..c728d28ae9a581143bbcabd2da3b179274034090 100644 (file)
@@ -70,121 +70,144 @@ struct device;
        .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
        .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
 
+#define SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert) \
+       .reg = wreg, .mask = 1, .shift = wshift, \
+       .on_val = winvert ? 0 : 1, .off_val = winvert ? 1 : 0
+
 /* path domain */
 #define SND_SOC_DAPM_PGA(wname, wreg, wshift, winvert,\
         wcontrols, wncontrols) \
-{      .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+{      .id = snd_soc_dapm_pga, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
 #define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\
         wcontrols, wncontrols) \
-{      .id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+{      .id = snd_soc_dapm_out_drv, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
 #define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \
         wcontrols, wncontrols)\
-{      .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+{      .id = snd_soc_dapm_mixer, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
 #define SND_SOC_DAPM_MIXER_NAMED_CTL(wname, wreg, wshift, winvert, \
         wcontrols, wncontrols)\
-{       .id = snd_soc_dapm_mixer_named_ctl, .name = wname, .reg = wreg, \
-       .shift = wshift, .invert = winvert, .kcontrol_news = wcontrols, \
-       .num_kcontrols = wncontrols}
+{       .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
 #define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \
-{      .id = snd_soc_dapm_micbias, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = NULL, .num_kcontrols = 0}
+{      .id = snd_soc_dapm_micbias, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = NULL, .num_kcontrols = 0}
 #define SND_SOC_DAPM_SWITCH(wname, wreg, wshift, winvert, wcontrols) \
-{      .id = snd_soc_dapm_switch, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1}
+{      .id = snd_soc_dapm_switch, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = 1}
 #define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{      .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1}
+{      .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, \
+       .kcontrol_news = wcontrols, .num_kcontrols = 1}
 #define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{      .id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1}
+{      .id = snd_soc_dapm_virt_mux, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = 1}
 #define SND_SOC_DAPM_VALUE_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{      .id = snd_soc_dapm_value_mux, .name = wname, .reg = wreg, \
-       .shift = wshift, .invert = winvert, .kcontrol_news = wcontrols, \
-       .num_kcontrols = 1}
+{      .id = snd_soc_dapm_value_mux, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = 1}
 
 /* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
 #define SOC_PGA_ARRAY(wname, wreg, wshift, winvert,\
         wcontrols) \
-{      .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
+{      .id = snd_soc_dapm_pga, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
 #define SOC_MIXER_ARRAY(wname, wreg, wshift, winvert, \
         wcontrols)\
-{      .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
+{      .id = snd_soc_dapm_mixer, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
 #define SOC_MIXER_NAMED_CTL_ARRAY(wname, wreg, wshift, winvert, \
         wcontrols)\
-{       .id = snd_soc_dapm_mixer_named_ctl, .name = wname, .reg = wreg, \
-       .shift = wshift, .invert = winvert, .kcontrol_news = wcontrols, \
-       .num_kcontrols = ARRAY_SIZE(wcontrols)}
+{       .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
 
 /* path domain with event - event handler must return 0 for success */
 #define SND_SOC_DAPM_PGA_E(wname, wreg, wshift, winvert, wcontrols, \
        wncontrols, wevent, wflags) \
-{      .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
+{      .id = snd_soc_dapm_pga, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
        .event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \
        wncontrols, wevent, wflags) \
-{      .id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
+{      .id = snd_soc_dapm_out_drv, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
        .event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \
        wncontrols, wevent, wflags) \
-{      .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
+{      .id = snd_soc_dapm_mixer, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
        .event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_MIXER_NAMED_CTL_E(wname, wreg, wshift, winvert, \
        wcontrols, wncontrols, wevent, wflags) \
-{       .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, \
+{       .id = snd_soc_dapm_mixer, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, \
        .num_kcontrols = wncontrols, .event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_SWITCH_E(wname, wreg, wshift, winvert, wcontrols, \
        wevent, wflags) \
-{      .id = snd_soc_dapm_switch, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1, \
+{      .id = snd_soc_dapm_switch, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = 1, \
        .event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
        wevent, wflags) \
-{      .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1, \
+{      .id = snd_soc_dapm_mux, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = 1, \
        .event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_VIRT_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
        wevent, wflags) \
-{      .id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1, \
+{      .id = snd_soc_dapm_virt_mux, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = 1, \
        .event = wevent, .event_flags = wflags}
 
 /* additional sequencing control within an event type */
 #define SND_SOC_DAPM_PGA_S(wname, wsubseq, wreg, wshift, winvert, \
        wevent, wflags) \
-{      .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .event = wevent, .event_flags = wflags, \
+{      .id = snd_soc_dapm_pga, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .event = wevent, .event_flags = wflags, \
        .subseq = wsubseq}
 #define SND_SOC_DAPM_SUPPLY_S(wname, wsubseq, wreg, wshift, winvert, wevent, \
        wflags) \
-{      .id = snd_soc_dapm_supply, .name = wname, .reg = wreg,  \
-       .shift = wshift, .invert = winvert, .event = wevent, \
-       .event_flags = wflags, .subseq = wsubseq}
+{      .id = snd_soc_dapm_supply, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .event = wevent, .event_flags = wflags, .subseq = wsubseq}
 
 /* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
 #define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
        wevent, wflags) \
-{      .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
+{      .id = snd_soc_dapm_pga, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
        .event = wevent, .event_flags = wflags}
 #define SOC_MIXER_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
        wevent, wflags) \
-{      .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
+{      .id = snd_soc_dapm_mixer, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
        .event = wevent, .event_flags = wflags}
 #define SOC_MIXER_NAMED_CTL_E_ARRAY(wname, wreg, wshift, winvert, \
        wcontrols, wevent, wflags) \
-{       .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
-       .invert = winvert, .kcontrol_news = wcontrols, \
-       .num_kcontrols = ARRAY_SIZE(wcontrols), .event = wevent, .event_flags = wflags}
+{       .id = snd_soc_dapm_mixer, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
+       .event = wevent, .event_flags = wflags}
 
 /* events that are pre and post DAPM */
 #define SND_SOC_DAPM_PRE(wname, wevent) \
@@ -199,35 +222,36 @@ struct device;
 /* stream domain */
 #define SND_SOC_DAPM_AIF_IN(wname, stname, wslot, wreg, wshift, winvert) \
 {      .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
-       .reg = wreg, .shift = wshift, .invert = winvert }
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
 #define SND_SOC_DAPM_AIF_IN_E(wname, stname, wslot, wreg, wshift, winvert, \
                              wevent, wflags)                           \
 {      .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
-       .reg = wreg, .shift = wshift, .invert = winvert, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
        .event = wevent, .event_flags = wflags }
 #define SND_SOC_DAPM_AIF_OUT(wname, stname, wslot, wreg, wshift, winvert) \
 {      .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
-       .reg = wreg, .shift = wshift, .invert = winvert }
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
 #define SND_SOC_DAPM_AIF_OUT_E(wname, stname, wslot, wreg, wshift, winvert, \
                             wevent, wflags)                            \
 {      .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
-       .reg = wreg, .shift = wshift, .invert = winvert, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
        .event = wevent, .event_flags = wflags }
 #define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
-{      .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
-       .shift = wshift, .invert = winvert}
+{      .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert) }
 #define SND_SOC_DAPM_DAC_E(wname, stname, wreg, wshift, winvert, \
                           wevent, wflags)                              \
-{      .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
-       .shift = wshift, .invert = winvert, \
+{      .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
        .event = wevent, .event_flags = wflags}
+
 #define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \
-{      .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
-       .shift = wshift, .invert = winvert}
+{      .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
 #define SND_SOC_DAPM_ADC_E(wname, stname, wreg, wshift, winvert, \
                           wevent, wflags)                              \
-{      .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
-       .shift = wshift, .invert = winvert, \
+{      .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
        .event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_CLOCK_SUPPLY(wname) \
 {      .id = snd_soc_dapm_clock_supply, .name = wname, \
@@ -241,14 +265,14 @@ struct device;
        .on_val = won_val, .off_val = woff_val, .event = dapm_reg_event, \
        .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
 #define SND_SOC_DAPM_SUPPLY(wname, wreg, wshift, winvert, wevent, wflags) \
-{      .id = snd_soc_dapm_supply, .name = wname, .reg = wreg,  \
-       .shift = wshift, .invert = winvert, .event = wevent, \
-       .event_flags = wflags}
+{      .id = snd_soc_dapm_supply, .name = wname, \
+       SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+       .event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_REGULATOR_SUPPLY(wname, wdelay, wflags)       \
 {      .id = snd_soc_dapm_regulator_supply, .name = wname, \
        .reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
        .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
-       .invert = wflags}
+       .on_val = wflags}
 
 
 /* dapm kcontrol types */
@@ -256,14 +280,26 @@ struct device;
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = snd_soc_info_volsw, \
        .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
-       .private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
+       .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+#define SOC_DAPM_SINGLE_AUTODISABLE(xname, reg, shift, max, invert) \
+{      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+       .info = snd_soc_info_volsw, \
+       .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+       .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
 #define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = snd_soc_info_volsw, \
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
        .tlv.p = (tlv_array), \
        .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
-       .private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
+       .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+#define SOC_DAPM_SINGLE_TLV_AUTODISABLE(xname, reg, shift, max, invert, tlv_array) \
+{      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+       .info = snd_soc_info_volsw, \
+       .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+       .tlv.p = (tlv_array), \
+       .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+       .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
 #define SOC_DAPM_ENUM(xname, xenum) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = snd_soc_info_enum_double, \
@@ -333,6 +369,7 @@ struct snd_soc_dapm_route;
 struct snd_soc_dapm_context;
 struct regulator;
 struct snd_soc_dapm_widget_list;
+struct snd_soc_dapm_update;
 
 int dapm_reg_event(struct snd_soc_dapm_widget *w,
                   struct snd_kcontrol *kcontrol, int event);
@@ -391,10 +428,12 @@ void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
 void snd_soc_dapm_shutdown(struct snd_soc_card *card);
 
 /* external DAPM widget events */
-int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
-               struct snd_kcontrol *kcontrol, int connect);
-int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
-                                struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e);
+int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
+               struct snd_kcontrol *kcontrol, int connect,
+               struct snd_soc_dapm_update *update);
+int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
+               struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
+               struct snd_soc_dapm_update *update);
 
 /* dapm sys fs - used by the core */
 int snd_soc_dapm_sys_add(struct device *dev);
@@ -424,6 +463,8 @@ void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm);
 int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
        struct snd_soc_dapm_widget_list **list);
 
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol);
+
 /* dapm widget types */
 enum snd_soc_dapm_type {
        snd_soc_dapm_input = 0,         /* input pin */
@@ -455,6 +496,7 @@ enum snd_soc_dapm_type {
        snd_soc_dapm_dai_in,            /* link to DAI structure */
        snd_soc_dapm_dai_out,
        snd_soc_dapm_dai_link,          /* link between two DAI structures */
+       snd_soc_dapm_kcontrol,          /* Auto-disabled kcontrol */
 };
 
 enum snd_soc_dapm_subclass {
@@ -485,7 +527,6 @@ struct snd_soc_dapm_path {
        /* source (input) and sink (output) widgets */
        struct snd_soc_dapm_widget *source;
        struct snd_soc_dapm_widget *sink;
-       struct snd_kcontrol *kcontrol;
 
        /* status */
        u32 connect:1;  /* source and sink widgets are connected */
@@ -498,6 +539,7 @@ struct snd_soc_dapm_path {
 
        struct list_head list_source;
        struct list_head list_sink;
+       struct list_head list_kcontrol;
        struct list_head list;
 };
 
@@ -518,12 +560,10 @@ struct snd_soc_dapm_widget {
        /* dapm control */
        int reg;                                /* negative reg = no direct dapm */
        unsigned char shift;                    /* bits to shift */
-       unsigned int value;                             /* widget current value */
        unsigned int mask;                      /* non-shifted mask */
        unsigned int on_val;                    /* on state value */
        unsigned int off_val;                   /* off state value */
        unsigned char power:1;                  /* block power status */
-       unsigned char invert:1;                 /* invert the power bit */
        unsigned char active:1;                 /* active stream on DAC, ADC's */
        unsigned char connected:1;              /* connected codec pin */
        unsigned char new:1;                    /* cnew complete */
@@ -559,7 +599,6 @@ struct snd_soc_dapm_widget {
 };
 
 struct snd_soc_dapm_update {
-       struct snd_soc_dapm_widget *widget;
        struct snd_kcontrol *kcontrol;
        int reg;
        int mask;
@@ -573,8 +612,6 @@ struct snd_soc_dapm_context {
        struct delayed_work delayed_work;
        unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
 
-       struct snd_soc_dapm_update *update;
-
        void (*seq_notifier)(struct snd_soc_dapm_context *,
                             enum snd_soc_dapm_type, int);
 
index 04598f1efd771f9f05e6f051edf975bd510700ce..047d657c331ce77b6c54f51ca38f18cfdc24a1d1 100644 (file)
@@ -133,6 +133,6 @@ void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream,
 /* internal use only */
 int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute);
 int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
-int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *);
+int soc_dpcm_runtime_update(struct snd_soc_card *);
 
 #endif
index 6eabee7ec15a90197ff3024f827d47cfaad91095..8e2ad52078b613ec853c2a6637f8a118674e8814 100644 (file)
 /*
  * Convenience kcontrol builders
  */
-#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmax, xinvert) \
+#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmax, xinvert, xautodisable) \
        ((unsigned long)&(struct soc_mixer_control) \
        {.reg = xreg, .rreg = xreg, .shift = shift_left, \
        .rshift = shift_right, .max = xmax, .platform_max = xmax, \
-       .invert = xinvert})
-#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) \
-       SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert)
+       .invert = xinvert, .autodisable = xautodisable})
+#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
+       SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
 #define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
        ((unsigned long)&(struct soc_mixer_control) \
        {.reg = xreg, .max = xmax, .platform_max = xmax, .invert = xinvert})
@@ -52,7 +52,7 @@
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
        .put = snd_soc_put_volsw, \
-       .private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
+       .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
 #define SOC_SINGLE_RANGE(xname, xreg, xshift, xmin, xmax, xinvert) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
        .info = snd_soc_info_volsw_range, .get = snd_soc_get_volsw_range, \
@@ -68,7 +68,7 @@
        .tlv.p = (tlv_array), \
        .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
        .put = snd_soc_put_volsw, \
-       .private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
+       .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
 #define SOC_SINGLE_SX_TLV(xname, xreg, xshift, xmin, xmax, tlv_array) \
 {       .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
@@ -97,7 +97,7 @@
        .info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
        .put = snd_soc_put_volsw, \
        .private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
-                                         max, invert) }
+                                         max, invert, 0) }
 #define SOC_DOUBLE_R(xname, reg_left, reg_right, xshift, xmax, xinvert) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
        .info = snd_soc_info_volsw, \
        .info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
        .put = snd_soc_put_volsw, \
        .private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
-                                         max, invert) }
+                                         max, invert, 0) }
 #define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = snd_soc_info_volsw, \
        .get = xhandler_get, .put = xhandler_put, \
-       .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) }
+       .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, 0) }
 #define SOC_DOUBLE_EXT(xname, reg, shift_left, shift_right, max, invert,\
         xhandler_get, xhandler_put) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
        .info = snd_soc_info_volsw, \
        .get = xhandler_get, .put = xhandler_put, \
        .private_value = \
-               SOC_DOUBLE_VALUE(reg, shift_left, shift_right, max, invert) }
+               SOC_DOUBLE_VALUE(reg, shift_left, shift_right, max, invert, 0) }
 #define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
         xhandler_get, xhandler_put, tlv_array) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .tlv.p = (tlv_array), \
        .info = snd_soc_info_volsw, \
        .get = xhandler_get, .put = xhandler_put, \
-       .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) }
+       .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, 0) }
 #define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\
         xhandler_get, xhandler_put, tlv_array) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
        .info = snd_soc_info_volsw, \
        .get = xhandler_get, .put = xhandler_put, \
        .private_value = SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, \
-                                         xmax, xinvert) }
+                                         xmax, xinvert, 0) }
 #define SOC_DOUBLE_R_EXT_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert,\
         xhandler_get, xhandler_put, tlv_array) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
        .private_value = xdata }
 #define SOC_ENUM_EXT(xname, xenum, xhandler_get, xhandler_put) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
-       .info = snd_soc_info_enum_ext, \
+       .info = snd_soc_info_enum_double, \
        .get = xhandler_get, .put = xhandler_put, \
        .private_value = (unsigned long)&xenum }
 
@@ -468,6 +468,8 @@ int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
 void snd_soc_free_ac97_codec(struct snd_soc_codec *codec);
 
 int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
+int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
+               struct platform_device *pdev);
 
 /*
  *Controls
@@ -475,6 +477,8 @@ int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
 struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template,
                                  void *data, const char *long_name,
                                  const char *prefix);
+struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+                                              const char *name);
 int snd_soc_add_codec_controls(struct snd_soc_codec *codec,
        const struct snd_kcontrol_new *controls, int num_controls);
 int snd_soc_add_platform_controls(struct snd_soc_platform *platform,
@@ -485,8 +489,6 @@ int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
        const struct snd_kcontrol_new *controls, int num_controls);
 int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_info *uinfo);
-int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol,
-       struct snd_ctl_elem_info *uinfo);
 int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol);
 int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
@@ -497,8 +499,6 @@ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol);
 int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_info *uinfo);
-int snd_soc_info_volsw_ext(struct snd_kcontrol *kcontrol,
-       struct snd_ctl_elem_info *uinfo);
 #define snd_soc_info_bool_ext          snd_ctl_boolean_mono_info
 int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol);
@@ -1042,6 +1042,7 @@ struct snd_soc_card {
        /* Generic DAPM context for the card */
        struct snd_soc_dapm_context dapm;
        struct snd_soc_dapm_stats dapm_stats;
+       struct snd_soc_dapm_update *update;
 
 #ifdef CONFIG_DEBUG_FS
        struct dentry *debugfs_card_root;
@@ -1087,7 +1088,9 @@ struct snd_soc_pcm_runtime {
 /* mixer control */
 struct soc_mixer_control {
        int min, max, platform_max;
-       unsigned int reg, rreg, shift, rshift, invert;
+       unsigned int reg, rreg, shift, rshift;
+       unsigned int invert:1;
+       unsigned int autodisable:1;
 };
 
 struct soc_bytes {
index 098c4de4494573a3ecbb5f697d9fc9a34f8416a7..2d4fa59db9021059b387b7df5145b7a615c81007 100644 (file)
@@ -71,6 +71,7 @@ struct snd_tea575x {
        int (*ext_init)(struct snd_tea575x *tea);
 };
 
+int snd_tea575x_hw_init(struct snd_tea575x *tea);
 int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner);
 void snd_tea575x_exit(struct snd_tea575x *tea);
 void snd_tea575x_set_freq(struct snd_tea575x *tea);
index 2068db241f2204a44018f8af22225a952dd58c29..d892b55d91ab84fd00539f54677dae9ce7a064cc 100644 (file)
@@ -64,10 +64,10 @@ struct extent_status;
        { EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" })
 
 #define show_extent_status(status) __print_flags(status, "",   \
-       { (1 << 3),     "W" },                                  \
-       { (1 << 2),     "U" },                                  \
-       { (1 << 1),     "D" },                                  \
-       { (1 << 0),     "H" })
+       { EXTENT_STATUS_WRITTEN,        "W" },                  \
+       { EXTENT_STATUS_UNWRITTEN,      "U" },                  \
+       { EXTENT_STATUS_DELAYED,        "D" },                  \
+       { EXTENT_STATUS_HOLE,           "H" })
 
 
 TRACE_EVENT(ext4_free_inode,
@@ -2192,7 +2192,7 @@ TRACE_EVENT(ext4_ext_remove_space_done,
                  (unsigned short) __entry->eh_entries)
 );
 
-TRACE_EVENT(ext4_es_insert_extent,
+DECLARE_EVENT_CLASS(ext4__es_extent,
        TP_PROTO(struct inode *inode, struct extent_status *es),
 
        TP_ARGS(inode, es),
@@ -2212,7 +2212,7 @@ TRACE_EVENT(ext4_es_insert_extent,
                __entry->lblk   = es->es_lblk;
                __entry->len    = es->es_len;
                __entry->pblk   = ext4_es_pblock(es);
-               __entry->status = ext4_es_status(es) >> 60;
+               __entry->status = ext4_es_status(es);
        ),
 
        TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
@@ -2222,6 +2222,18 @@ TRACE_EVENT(ext4_es_insert_extent,
                  __entry->pblk, show_extent_status(__entry->status))
 );
 
+DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent,
+       TP_PROTO(struct inode *inode, struct extent_status *es),
+
+       TP_ARGS(inode, es)
+);
+
+DEFINE_EVENT(ext4__es_extent, ext4_es_cache_extent,
+       TP_PROTO(struct inode *inode, struct extent_status *es),
+
+       TP_ARGS(inode, es)
+);
+
 TRACE_EVENT(ext4_es_remove_extent,
        TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len),
 
@@ -2289,7 +2301,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
                __entry->lblk   = es->es_lblk;
                __entry->len    = es->es_len;
                __entry->pblk   = ext4_es_pblock(es);
-               __entry->status = ext4_es_status(es) >> 60;
+               __entry->status = ext4_es_status(es);
        ),
 
        TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
@@ -2343,7 +2355,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
                __entry->lblk   = es->es_lblk;
                __entry->len    = es->es_len;
                __entry->pblk   = ext4_es_pblock(es);
-               __entry->status = ext4_es_status(es) >> 60;
+               __entry->status = ext4_es_status(es);
                __entry->found  = found;
        ),
 
index 8e42410bd1591c5168d616d3ebcac66a1d22aed5..cda100d6762d250616648c6323c3ba2d7aaf6316 100644 (file)
@@ -66,6 +66,43 @@ TRACE_EVENT(machine_suspend,
        TP_printk("state=%lu", (unsigned long)__entry->state)
 );
 
+TRACE_EVENT(device_pm_report_time,
+
+       TP_PROTO(struct device *dev, const char *pm_ops, s64 ops_time,
+                char *pm_event_str, int error),
+
+       TP_ARGS(dev, pm_ops, ops_time, pm_event_str, error),
+
+       TP_STRUCT__entry(
+               __string(device, dev_name(dev))
+               __string(driver, dev_driver_string(dev))
+               __string(parent, dev->parent ? dev_name(dev->parent) : "none")
+               __string(pm_ops, pm_ops ? pm_ops : "none ")
+               __string(pm_event_str, pm_event_str)
+               __field(s64, ops_time)
+               __field(int, error)
+       ),
+
+       TP_fast_assign(
+               const char *tmp = dev->parent ? dev_name(dev->parent) : "none";
+               const char *tmp_i = pm_ops ? pm_ops : "none ";
+
+               __assign_str(device, dev_name(dev));
+               __assign_str(driver, dev_driver_string(dev));
+               __assign_str(parent, tmp);
+               __assign_str(pm_ops, tmp_i);
+               __assign_str(pm_event_str, pm_event_str);
+               __entry->ops_time = ops_time;
+               __entry->error = error;
+       ),
+
+       /* ops_str has an extra space at the end */
+       TP_printk("%s %s parent=%s state=%s ops=%snsecs=%lld err=%d",
+               __get_str(driver), __get_str(device), __get_str(parent),
+               __get_str(pm_event_str), __get_str(pm_ops),
+               __entry->ops_time, __entry->error)
+);
+
 DECLARE_EVENT_CLASS(wakeup_source,
 
        TP_PROTO(const char *name, unsigned int state),
index 238a166b9fe6091935d8357f5898510fe220ad4d..272580ca320f1c6755306d36e503de32d44572b0 100644 (file)
@@ -181,7 +181,7 @@ enum drm_map_type {
        _DRM_AGP = 3,             /**< AGP/GART */
        _DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
        _DRM_CONSISTENT = 5,      /**< Consistent memory for PCI DMA */
-       _DRM_GEM = 6,             /**< GEM object */
+       _DRM_GEM = 6,             /**< GEM object (obsolete) */
 };
 
 /**
index 923ed7fe5775b61743ba9df9037d27c2c3fe7b6b..55bb5729bd78a534da094f07fe74714e162a2c9c 100644 (file)
  * subject to backwards-compatibility constraints.
  */
 
+/**
+ * DOC: uevents generated by i915 on it's device node
+ *
+ * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
+ *     event from the gpu l3 cache. Additional information supplied is ROW,
+ *     BANK, SUBBANK of the affected cacheline. Userspace should keep track of
+ *     these events and if a specific cache-line seems to have a persistent
+ *     error remap it with the l3 remapping tool supplied in intel-gpu-tools.
+ *     The value supplied with the event is always 1.
+ *
+ * I915_ERROR_UEVENT - Generated upon error detection, currently only via
+ *     hangcheck. The error detection event is a good indicator of when things
+ *     began to go badly. The value supplied with the event is a 1 upon error
+ *     detection, and a 0 upon reset completion, signifying no more error
+ *     exists. NOTE: Disabling hangcheck or reset via module parameter will
+ *     cause the related events to not be seen.
+ *
+ * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
+ *     the GPU. The value supplied with the event is always 1. NOTE: Disable
+ *     reset via module parameter will cause this event to not be seen.
+ */
+#define I915_L3_PARITY_UEVENT          "L3_PARITY_ERROR"
+#define I915_ERROR_UEVENT              "ERROR"
+#define I915_RESET_UEVENT              "RESET"
 
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */
@@ -310,6 +334,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_PINNED_BATCHES   24
 #define I915_PARAM_HAS_EXEC_NO_RELOC    25
 #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
+#define I915_PARAM_HAS_WT               27
 
 typedef struct drm_i915_getparam {
        int param;
@@ -744,8 +769,32 @@ struct drm_i915_gem_busy {
        __u32 busy;
 };
 
+/**
+ * I915_CACHING_NONE
+ *
+ * GPU access is not coherent with cpu caches. Default for machines without an
+ * LLC.
+ */
 #define I915_CACHING_NONE              0
+/**
+ * I915_CACHING_CACHED
+ *
+ * GPU access is coherent with cpu caches and furthermore the data is cached in
+ * last-level caches shared between cpu cores and the gpu GT. Default on
+ * machines with HAS_LLC.
+ */
 #define I915_CACHING_CACHED            1
+/**
+ * I915_CACHING_DISPLAY
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no special
+ * cache mode (like write-through or gfdt flushing) is available. The kernel
+ * automatically sets this mode when using a buffer as a scanout target.
+ * Userspace can manually set this mode to avoid a costly stall and clflush in
+ * the hotpath of drawing the first frame.
+ */
+#define I915_CACHING_DISPLAY           2
 
 struct drm_i915_gem_caching {
        /**
index bb2554f7fbd12a677015d48703ccf681357e84c6..22ce4bd2501322a6beb468ed84168ebcda774a8b 100644 (file)
@@ -44,6 +44,8 @@ enum {
        IOCB_CMD_NOOP = 6,
        IOCB_CMD_PREADV = 7,
        IOCB_CMD_PWRITEV = 8,
+       IOCB_CMD_READ_ITER = 9,
+       IOCB_CMD_WRITE_ITER = 10,
 };
 
 /*
diff --git a/include/uapi/linux/cifs/cifs_mount.h b/include/uapi/linux/cifs/cifs_mount.h
new file mode 100644 (file)
index 0000000..d7e4c6c
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *   include/uapi/linux/cifs/cifs_mount.h
+ *
+ *   Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ */
+#ifndef _CIFS_MOUNT_H
+#define _CIFS_MOUNT_H
+
+/* Max string lengths for cifs mounting options. */
+#define CIFS_MAX_DOMAINNAME_LEN 256 /* max fully qualified domain name */
+#define CIFS_MAX_USERNAME_LEN   256 /* reasonable max for current servers */
+#define CIFS_MAX_PASSWORD_LEN   512 /* Windows max seems to be 256 wide chars */
+#define CIFS_MAX_SHARE_LEN      256 /* reasonable max share name length */
+#define CIFS_NI_MAXHOST        1024 /* max host name length (256 * 4 bytes) */
+
+
+#endif /* _CIFS_MOUNT_H */
index afd0cbd52edb62b501bbbe4225338af6b9dc95f0..f1e12bd40b3b42bc5f1ccc4695e6622a9522862e 100644 (file)
@@ -267,9 +267,9 @@ enum {
 #define DM_DEV_SET_GEOMETRY    _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR       4
-#define DM_VERSION_MINOR       25
+#define DM_VERSION_MINOR       26
 #define DM_VERSION_PATCHLEVEL  0
-#define DM_VERSION_EXTRA       "-ioctl (2013-06-26)"
+#define DM_VERSION_EXTRA       "-ioctl (2013-08-15)"
 
 /* Status bits */
 #define DM_READONLY_FLAG       (1 << 0) /* In/Out */
index 9c50445462d99dc755af6e442c217784ba940f83..5fbdd3d49ebabd77526ef5f29b73d66112380f70 100644 (file)
@@ -2,6 +2,7 @@
 #define _LINUX_DN_H
 
 #include <linux/types.h>
+#include <linux/if_ether.h>
 
 /*
 
@@ -120,7 +121,7 @@ struct linkinfo_dn {
  * Ethernet address format (for DECnet)
  */
 union etheraddress {
-        __u8 dne_addr[6];             /* Full ethernet address */
+        __u8 dne_addr[ETH_ALEN];      /* Full ethernet address */
   struct {
                 __u8 dne_hiord[4];    /* DECnet HIORD prefix   */
                 __u8 dne_nodeaddr[2]; /* DECnet node address   */
index 86552807aed949529fc34a3007658ede564f4e52..dcd75cc261962f65c909a6efa0defe3f1dcdc281 100644 (file)
@@ -38,6 +38,7 @@
 #define Q_XGETQSTAT    XQM_CMD(5)      /* get quota subsystem status */
 #define Q_XQUOTARM     XQM_CMD(6)      /* free disk space used by dquots */
 #define Q_XQUOTASYNC   XQM_CMD(7)      /* delalloc flush, updates dquots */
+#define Q_XGETQSTATV   XQM_CMD(8)      /* newer version of get quota */
 
 /*
  * fs_disk_quota structure:
@@ -163,4 +164,50 @@ typedef struct fs_quota_stat {
        __u16           qs_iwarnlimit;  /* limit for num warnings */
 } fs_quota_stat_t;
 
+/*
+ * fs_quota_statv is used by Q_XGETQSTATV for a given file system. It provides
+ * a centralized way to get meta information about the quota subsystem. eg.
+ * space taken up for user, group, and project quotas, number of dquots
+ * currently incore.
+ *
+ * This version has proper versioning support with appropriate padding for
+ * future expansions, and ability to expand for future without creating any
+ * backward compatibility issues.
+ *
+ * Q_XGETQSTATV uses the passed in value of the requested version via
+ * fs_quota_statv.qs_version to determine the return data layout of
+ * fs_quota_statv.  The kernel will fill the data fields relevant to that
+ * version.
+ *
+ * If kernel does not support user space caller specified version, EINVAL will
+ * be returned. User space caller can then reduce the version number and retry
+ * the same command.
+ */
+#define FS_QSTATV_VERSION1     1       /* fs_quota_statv.qs_version */
+/*
+ * Some basic information about 'quota files' for Q_XGETQSTATV command
+ */
+struct fs_qfilestatv {
+       __u64           qfs_ino;        /* inode number */
+       __u64           qfs_nblks;      /* number of BBs 512-byte-blks */
+       __u32           qfs_nextents;   /* number of extents */
+       __u32           qfs_pad;        /* pad for 8-byte alignment */
+};
+
+struct fs_quota_statv {
+       __s8                    qs_version;     /* version for future changes */
+       __u8                    qs_pad1;        /* pad for 16bit alignment */
+       __u16                   qs_flags;       /* FS_QUOTA_.* flags */
+       __u32                   qs_incoredqs;   /* number of dquots incore */
+       struct fs_qfilestatv    qs_uquota;      /* user quota information */
+       struct fs_qfilestatv    qs_gquota;      /* group quota information */
+       struct fs_qfilestatv    qs_pquota;      /* project quota information */
+       __s32                   qs_btimelimit;  /* limit for blks timer */
+       __s32                   qs_itimelimit;  /* limit for inodes timer */
+       __s32                   qs_rtbtimelimit;/* limit for rt blks timer */
+       __u16                   qs_bwarnlimit;  /* limit for num warnings */
+       __u16                   qs_iwarnlimit;  /* limit for num warnings */
+       __u64                   qs_pad2[8];     /* for future proofing */
+};
+
 #endif /* _LINUX_DQBLK_XFS_H */
index 51da65b68b8501cb2d25fe0064c2c6ff65e3d4e6..2b82d7e30974f93b9a93afc2fdf5c9137492043d 100644 (file)
@@ -44,8 +44,8 @@ enum {
        FRA_FWMARK,     /* mark */
        FRA_FLOW,       /* flow/class id */
        FRA_UNUSED6,
-       FRA_UNUSED7,
-       FRA_UNUSED8,
+       FRA_SUPPRESS_IFGROUP,
+       FRA_SUPPRESS_PREFIXLEN,
        FRA_TABLE,      /* Extended table id */
        FRA_FWMASK,     /* mask for netfilter mark */
        FRA_OIFNAME,
index d830747f5c0b077e7ac894a687258ec82979af69..0c51d617dae9cf131a8c5a009d0ecf3cac5da735 100644 (file)
@@ -40,6 +40,7 @@ struct fiemap {
 
 #define FIEMAP_FLAG_SYNC       0x00000001 /* sync file data before map */
 #define FIEMAP_FLAG_XATTR      0x00000002 /* map extended attribute tree */
+#define FIEMAP_FLAG_CACHE      0x00000004 /* request caching of the extents */
 
 #define FIEMAP_FLAGS_COMPAT    (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR)
 
index 2d70d79ce2fd8e816e0271f4b771572c05253cef..39f621a9fe826cb9a0eab487cd36a51aed5440ab 100644 (file)
@@ -14,6 +14,7 @@
 #define _UAPI_LINUX_IF_BRIDGE_H
 
 #include <linux/types.h>
+#include <linux/if_ether.h>
 
 #define SYSFS_BRIDGE_ATTR      "bridge"
 #define SYSFS_BRIDGE_FDB       "brforward"
@@ -88,7 +89,7 @@ struct __port_info {
 };
 
 struct __fdb_entry {
-       __u8 mac_addr[6];
+       __u8 mac_addr[ETH_ALEN];
        __u8 port_no;
        __u8 is_local;
        __u32 ageing_timer_value;
index 03f6170ab3372f443c0989c7c5a503b00ef0c308..04c0e7a5d48421645ee23016064d143918a08ca3 100644 (file)
@@ -143,6 +143,7 @@ enum {
        IFLA_NUM_TX_QUEUES,
        IFLA_NUM_RX_QUEUES,
        IFLA_CARRIER,
+       IFLA_PHYS_PORT_ID,
        __IFLA_MAX
 };
 
index e36a4aecd3117ebdaf46c17ac115e5d0a29bcede..e128769331b5a7c6be4a10cf44c262be68a65772 100644 (file)
@@ -46,7 +46,7 @@ struct pppoe_addr {
  * PPTP addressing definition
  */
 struct pptp_addr {
-       __be16          call_id;
+       __u16           call_id;
        struct in_addr  sin_addr;
 };
 
index 82334f88967e9ba269d3e3fa121f563c33e64eed..e9502dd1ee2cc178e712e6ab05c59efecdaf980f 100644 (file)
@@ -56,6 +56,8 @@
 #define TUNGETVNETHDRSZ _IOR('T', 215, int)
 #define TUNSETVNETHDRSZ _IOW('T', 216, int)
 #define TUNSETQUEUE  _IOW('T', 217, int)
+#define TUNSETIFINDEX  _IOW('T', 218, unsigned int)
+#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN                0x0001
 #define IFF_DETACH_QUEUE 0x0400
 /* read-only flag */
 #define IFF_PERSIST    0x0800
+#define IFF_NOFILTER   0x1000
+
+/* Socket options */
+#define TUN_TX_TIMESTAMP 1
 
 /* Features for GSO (TUNSETOFFLOAD). */
 #define TUN_F_CSUM     0x01    /* You can hand me unchecksummed packets. */
index 4bda4cf5b0f56d84651497df86bd9fa09909fa85..d07ac6903e593cdc04279a70bde8a63a47da8bb8 100644 (file)
@@ -160,6 +160,8 @@ enum {
        DEVCONF_ACCEPT_DAD,
        DEVCONF_FORCE_TLLAO,
        DEVCONF_NDISC_NOTIFY,
+       DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
+       DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
        DEVCONF_MAX
 };
 
index acccd08be6c7563f6c2c316f6c2530563a7d3cb5..b622337ead26b4d42ba22c57d92baffae7b1fe1d 100644 (file)
@@ -171,6 +171,7 @@ struct kvm_pit_config {
 #define KVM_EXIT_WATCHDOG         21
 #define KVM_EXIT_S390_TSCH        22
 #define KVM_EXIT_EPR              23
+#define KVM_EXIT_AGAIN            24
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
index e0cecd2eabdc5f2289a1940bf3d00ad8ebc70ee8..6edc6b68badd5385d2fdea6e5f524714c4f3cb90 100644 (file)
@@ -21,6 +21,7 @@ enum {
        LO_FLAGS_READ_ONLY      = 1,
        LO_FLAGS_AUTOCLEAR      = 4,
        LO_FLAGS_PARTSCAN       = 8,
+       LO_FLAGS_USE_AIO        = 16,
 };
 
 #include <asm/posix_types.h>   /* for __kernel_old_dev_t */
index 41115776d76f74996a0815e044533fc5e5ee8f78..174915420d3fe8231dc151519ceac551aceb148e 100644 (file)
@@ -22,6 +22,7 @@ header-y += xt_CONNMARK.h
 header-y += xt_CONNSECMARK.h
 header-y += xt_CT.h
 header-y += xt_DSCP.h
+header-y += xt_HMARK.h
 header-y += xt_IDLETIMER.h
 header-y += xt_LED.h
 header-y += xt_LOG.h
@@ -68,6 +69,7 @@ header-y += xt_quota.h
 header-y += xt_rateest.h
 header-y += xt_realm.h
 header-y += xt_recent.h
+header-y += xt_rpfilter.h
 header-y += xt_sctp.h
 header-y += xt_set.h
 header-y += xt_socket.h
index 3a9b92147339d14d871ba836e5b8840f6d5cce8c..0132bad79de7f6860120d13c6df2d508ad2d5d14 100644 (file)
@@ -46,6 +46,7 @@ enum nfqnl_attr_type {
        NFQA_CT_INFO,                   /* enum ip_conntrack_info */
        NFQA_CAP_LEN,                   /* __u32 length of captured packet */
        NFQA_SKB_INFO,                  /* __u32 skb meta information */
+       NFQA_EXP,                       /* nf_conntrack_netlink.h */
 
        __NFQA_MAX
 };
index 5bf84912a082a53e5cc12b9b03eaae46bcd91116..f37522aade24e6b96e4c0c30670dcf833bb896ed 100644 (file)
@@ -2,6 +2,7 @@
 #define _UAPI__LINUX_BRIDGE_EBT_802_3_H
 
 #include <linux/types.h>
+#include <linux/if_ether.h>
 
 #define EBT_802_3_SAP 0x01
 #define EBT_802_3_TYPE 0x02
@@ -42,8 +43,8 @@ struct hdr_ni {
 };
 
 struct ebt_802_3_hdr {
-       __u8  daddr[6];
-       __u8  saddr[6];
+       __u8  daddr[ETH_ALEN];
+       __u8  saddr[ETH_ALEN];
        __be16 len;
        union {
                struct hdr_ui ui;
index c6a204c97047a671bed64f03230064ef9e174175..eac0f6548f47f17cd918e50b835cc14078f81df0 100644 (file)
@@ -2,6 +2,7 @@
 #define _IPT_CLUSTERIP_H_target
 
 #include <linux/types.h>
+#include <linux/if_ether.h>
 
 enum clusterip_hashmode {
     CLUSTERIP_HASHMODE_SIP = 0,
@@ -22,7 +23,7 @@ struct ipt_clusterip_tgt_info {
        __u32 flags;
 
        /* only relevant for new ones */
-       __u8 clustermac[6];
+       __u8 clustermac[ETH_ALEN];
        __u16 num_total_nodes;
        __u16 num_local_nodes;
        __u16 local_nodes[CLUSTERIP_MAX_NODES];
index 8137dd8d2adffd1c91d4caf76dfa08b47501408a..29bed72a4ac43e4bd141b87a8b5e9d410ab21cff 100644 (file)
  * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element.
  * @NFC_CMD_FW_DOWNLOAD: Request to Load/flash firmware, or event to inform
  *     that some firmware was loaded
+ * @NFC_EVENT_SE_ADDED: Event emitted when a new secure element is discovered.
+ *     This typically will be sent whenever a new NFC controller with either
+ *     an embedded SE or an UICC one connected to it through SWP.
+ * @NFC_EVENT_SE_REMOVED: Event emitted when a secure element is removed from
+ *     the system, as a consequence of e.g. an NFC controller being unplugged.
+ * @NFC_EVENT_SE_CONNECTIVITY: This event is emitted whenever a secure element
+ *     is requesting connectivity access. For example a UICC SE may need to
+ *     talk with a sleeping modem and will notify this need by sending this
+ *     event. It is then up to userspace to decide if it will wake the modem
+ *     up or not.
+ * @NFC_EVENT_SE_TRANSACTION: This event is sent when an application running on
+ *     a specific SE notifies us about the end of a transaction. The parameter
+ *     for this event is the application ID (AID).
+ * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
  */
 enum nfc_commands {
        NFC_CMD_UNSPEC,
@@ -97,6 +111,9 @@ enum nfc_commands {
        NFC_CMD_FW_DOWNLOAD,
        NFC_EVENT_SE_ADDED,
        NFC_EVENT_SE_REMOVED,
+       NFC_EVENT_SE_CONNECTIVITY,
+       NFC_EVENT_SE_TRANSACTION,
+       NFC_CMD_GET_SE,
 /* private: internal use only */
        __NFC_CMD_AFTER_LAST
 };
@@ -129,6 +146,7 @@ enum nfc_commands {
  * @NFC_ATTR_FIRMWARE_NAME: Free format firmware version
  * @NFC_ATTR_SE_INDEX: Secure element index
  * @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
+ * @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status
  */
 enum nfc_attrs {
        NFC_ATTR_UNSPEC,
@@ -154,6 +172,8 @@ enum nfc_attrs {
        NFC_ATTR_FIRMWARE_NAME,
        NFC_ATTR_SE_INDEX,
        NFC_ATTR_SE_TYPE,
+       NFC_ATTR_SE_AID,
+       NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS,
 /* private: internal use only */
        __NFC_ATTR_AFTER_LAST
 };
index 861e5eba3953b613956a6ca24f632c13d2a1e937..1f42bc3dcb9c544407eeb32968b41089c4a5917e 100644 (file)
  * interfaces that a given device supports.
  */
 
+/**
+ * DOC: packet coalesce support
+ *
+ * In most cases, host that receives IPv4 and IPv6 multicast/broadcast
+ * packets does not do anything with these packets. Therefore the
+ * reception of these unwanted packets causes unnecessary processing
+ * and power consumption.
+ *
+ * Packet coalesce feature helps to reduce number of received interrupts
+ * to host by buffering these packets in firmware/hardware for some
+ * predefined time. Received interrupt will be generated when one of the
+ * following events occur.
+ * a) Expiration of hardware timer whose expiration time is set to maximum
+ * coalescing delay of matching coalesce rule.
+ * b) Coalescing buffer in hardware reaches it's limit.
+ * c) Packet doesn't match any of the configured coalesce rules.
+ *
+ * User needs to configure following parameters for creating a coalesce
+ * rule.
+ * a) Maximum coalescing delay
+ * b) List of packet patterns which needs to be matched
+ * c) Condition for coalescence. pattern 'match' or 'no match'
+ * Multiple such rules can be created.
+ */
+
 /**
  * enum nl80211_commands - supported nl80211 commands
  *
  * @NL80211_CMD_CRIT_PROTOCOL_STOP: Indicates the connection reliability can
  *     return back to normal.
  *
+ * @NL80211_CMD_GET_COALESCE: Get currently supported coalesce rules.
+ * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules.
+ *
+ * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the
+ *     the new channel information (Channel Switch Announcement - CSA)
+ *     in the beacon for some time (as defined in the
+ *     %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the
+ *     new channel. Userspace provides the new channel information (using
+ *     %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel
+ *     width). %NL80211_ATTR_CH_SWITCH_BLOCK_TX may be supplied to inform
+ *     other station that transmission must be blocked until the channel
+ *     switch is complete.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -810,6 +848,11 @@ enum nl80211_commands {
        NL80211_CMD_CRIT_PROTOCOL_START,
        NL80211_CMD_CRIT_PROTOCOL_STOP,
 
+       NL80211_CMD_GET_COALESCE,
+       NL80211_CMD_SET_COALESCE,
+
+       NL80211_CMD_CHANNEL_SWITCH,
+
        /* add new commands above here */
 
        /* used to define NL80211_CMD_MAX below */
@@ -1436,6 +1479,20 @@ enum nl80211_commands {
  *     allowed to be used with the first @NL80211_CMD_SET_STATION command to
  *     update a TDLS peer STA entry.
  *
+ * @NL80211_ATTR_COALESCE_RULE: Coalesce rule information.
+ *
+ * @NL80211_ATTR_CH_SWITCH_COUNT: u32 attribute specifying the number of TBTT's
+ *     until the channel switch event.
+ * @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission
+ *     must be blocked on the current channel (before the channel switch
+ *     operation).
+ * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
+ *     for the time while performing a channel switch.
+ * @NL80211_ATTR_CSA_C_OFF_BEACON: Offset of the channel switch counter
+ *     field in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter
+ *     field in the probe response (%NL80211_ATTR_PROBE_RESP).
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1736,6 +1793,14 @@ enum nl80211_attrs {
 
        NL80211_ATTR_PEER_AID,
 
+       NL80211_ATTR_COALESCE_RULE,
+
+       NL80211_ATTR_CH_SWITCH_COUNT,
+       NL80211_ATTR_CH_SWITCH_BLOCK_TX,
+       NL80211_ATTR_CSA_IES,
+       NL80211_ATTR_CSA_C_OFF_BEACON,
+       NL80211_ATTR_CSA_C_OFF_PRESP,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -2772,6 +2837,21 @@ enum nl80211_chan_width {
        NL80211_CHAN_WIDTH_10,
 };
 
+/**
+ * enum nl80211_bss_scan_width - control channel width for a BSS
+ *
+ * These values are used with the %NL80211_BSS_CHAN_WIDTH attribute.
+ *
+ * @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible
+ * @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide
+ * @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide
+ */
+enum nl80211_bss_scan_width {
+       NL80211_BSS_CHAN_WIDTH_20,
+       NL80211_BSS_CHAN_WIDTH_10,
+       NL80211_BSS_CHAN_WIDTH_5,
+};
+
 /**
  * enum nl80211_bss - netlink attributes for a BSS
  *
@@ -2796,6 +2876,8 @@ enum nl80211_chan_width {
  * @NL80211_BSS_BEACON_IES: binary attribute containing the raw information
  *     elements from a Beacon frame (bin); not present if no Beacon frame has
  *     yet been received
+ * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel
+ *     (u32, enum nl80211_bss_scan_width)
  * @__NL80211_BSS_AFTER_LAST: internal
  * @NL80211_BSS_MAX: highest BSS attribute
  */
@@ -2812,6 +2894,7 @@ enum nl80211_bss {
        NL80211_BSS_STATUS,
        NL80211_BSS_SEEN_MS_AGO,
        NL80211_BSS_BEACON_IES,
+       NL80211_BSS_CHAN_WIDTH,
 
        /* keep last */
        __NL80211_BSS_AFTER_LAST,
@@ -3060,11 +3143,11 @@ enum nl80211_tx_power_setting {
 };
 
 /**
- * enum nl80211_wowlan_packet_pattern_attr - WoWLAN packet pattern attribute
- * @__NL80211_WOWLAN_PKTPAT_INVALID: invalid number for nested attribute
- * @NL80211_WOWLAN_PKTPAT_PATTERN: the pattern, values where the mask has
+ * enum nl80211_packet_pattern_attr - packet pattern attribute
+ * @__NL80211_PKTPAT_INVALID: invalid number for nested attribute
+ * @NL80211_PKTPAT_PATTERN: the pattern, values where the mask has
  *     a zero bit are ignored
- * @NL80211_WOWLAN_PKTPAT_MASK: pattern mask, must be long enough to have
+ * @NL80211_PKTPAT_MASK: pattern mask, must be long enough to have
  *     a bit for each byte in the pattern. The lowest-order bit corresponds
  *     to the first byte of the pattern, but the bytes of the pattern are
  *     in a little-endian-like format, i.e. the 9th byte of the pattern
@@ -3075,39 +3158,50 @@ enum nl80211_tx_power_setting {
  *     Note that the pattern matching is done as though frames were not
  *     802.11 frames but 802.3 frames, i.e. the frame is fully unpacked
  *     first (including SNAP header unpacking) and then matched.
- * @NL80211_WOWLAN_PKTPAT_OFFSET: packet offset, pattern is matched after
+ * @NL80211_PKTPAT_OFFSET: packet offset, pattern is matched after
  *     these fixed number of bytes of received packet
- * @NUM_NL80211_WOWLAN_PKTPAT: number of attributes
- * @MAX_NL80211_WOWLAN_PKTPAT: max attribute number
+ * @NUM_NL80211_PKTPAT: number of attributes
+ * @MAX_NL80211_PKTPAT: max attribute number
  */
-enum nl80211_wowlan_packet_pattern_attr {
-       __NL80211_WOWLAN_PKTPAT_INVALID,
-       NL80211_WOWLAN_PKTPAT_MASK,
-       NL80211_WOWLAN_PKTPAT_PATTERN,
-       NL80211_WOWLAN_PKTPAT_OFFSET,
+enum nl80211_packet_pattern_attr {
+       __NL80211_PKTPAT_INVALID,
+       NL80211_PKTPAT_MASK,
+       NL80211_PKTPAT_PATTERN,
+       NL80211_PKTPAT_OFFSET,
 
-       NUM_NL80211_WOWLAN_PKTPAT,
-       MAX_NL80211_WOWLAN_PKTPAT = NUM_NL80211_WOWLAN_PKTPAT - 1,
+       NUM_NL80211_PKTPAT,
+       MAX_NL80211_PKTPAT = NUM_NL80211_PKTPAT - 1,
 };
 
 /**
- * struct nl80211_wowlan_pattern_support - pattern support information
+ * struct nl80211_pattern_support - packet pattern support information
  * @max_patterns: maximum number of patterns supported
  * @min_pattern_len: minimum length of each pattern
  * @max_pattern_len: maximum length of each pattern
  * @max_pkt_offset: maximum Rx packet offset
  *
  * This struct is carried in %NL80211_WOWLAN_TRIG_PKT_PATTERN when
- * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED in the
- * capability information given by the kernel to userspace.
+ * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED or in
+ * %NL80211_ATTR_COALESCE_RULE_PKT_PATTERN when that is part of
+ * %NL80211_ATTR_COALESCE_RULE in the capability information given
+ * by the kernel to userspace.
  */
-struct nl80211_wowlan_pattern_support {
+struct nl80211_pattern_support {
        __u32 max_patterns;
        __u32 min_pattern_len;
        __u32 max_pattern_len;
        __u32 max_pkt_offset;
 } __attribute__((packed));
 
+/* only for backward compatibility */
+#define __NL80211_WOWLAN_PKTPAT_INVALID __NL80211_PKTPAT_INVALID
+#define NL80211_WOWLAN_PKTPAT_MASK NL80211_PKTPAT_MASK
+#define NL80211_WOWLAN_PKTPAT_PATTERN NL80211_PKTPAT_PATTERN
+#define NL80211_WOWLAN_PKTPAT_OFFSET NL80211_PKTPAT_OFFSET
+#define NUM_NL80211_WOWLAN_PKTPAT NUM_NL80211_PKTPAT
+#define MAX_NL80211_WOWLAN_PKTPAT MAX_NL80211_PKTPAT
+#define nl80211_wowlan_pattern_support nl80211_pattern_support
+
 /**
  * enum nl80211_wowlan_triggers - WoWLAN trigger definitions
  * @__NL80211_WOWLAN_TRIG_INVALID: invalid number for nested attributes
@@ -3127,7 +3221,7 @@ struct nl80211_wowlan_pattern_support {
  *     pattern matching is done after the packet is converted to the MSDU.
  *
  *     In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute
- *     carrying a &struct nl80211_wowlan_pattern_support.
+ *     carrying a &struct nl80211_pattern_support.
  *
  *     When reporting wakeup. it is a u32 attribute containing the 0-based
  *     index of the pattern that caused the wakeup, in the patterns passed
@@ -3284,7 +3378,7 @@ struct nl80211_wowlan_tcp_data_token_feature {
  * @NL80211_WOWLAN_TCP_WAKE_PAYLOAD: wake packet payload, for advertising a
  *     u32 attribute holding the maximum length
  * @NL80211_WOWLAN_TCP_WAKE_MASK: Wake packet payload mask, not used for
- *     feature advertising. The mask works like @NL80211_WOWLAN_PKTPAT_MASK
+ *     feature advertising. The mask works like @NL80211_PKTPAT_MASK
  *     but on the TCP payload only.
  * @NUM_NL80211_WOWLAN_TCP: number of TCP attributes
  * @MAX_NL80211_WOWLAN_TCP: highest attribute number
@@ -3308,6 +3402,55 @@ enum nl80211_wowlan_tcp_attrs {
        MAX_NL80211_WOWLAN_TCP = NUM_NL80211_WOWLAN_TCP - 1
 };
 
+/**
+ * struct nl80211_coalesce_rule_support - coalesce rule support information
+ * @max_rules: maximum number of rules supported
+ * @pat: packet pattern support information
+ * @max_delay: maximum supported coalescing delay in msecs
+ *
+ * This struct is carried in %NL80211_ATTR_COALESCE_RULE in the
+ * capability information given by the kernel to userspace.
+ */
+struct nl80211_coalesce_rule_support {
+       __u32 max_rules;
+       struct nl80211_pattern_support pat;
+       __u32 max_delay;
+} __attribute__((packed));
+
+/**
+ * enum nl80211_attr_coalesce_rule - coalesce rule attribute
+ * @__NL80211_COALESCE_RULE_INVALID: invalid number for nested attribute
+ * @NL80211_ATTR_COALESCE_RULE_DELAY: delay in msecs used for packet coalescing
+ * @NL80211_ATTR_COALESCE_RULE_CONDITION: condition for packet coalescence,
+ *     see &enum nl80211_coalesce_condition.
+ * @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN: packet offset, pattern is matched
+ *     after these fixed number of bytes of received packet
+ * @NUM_NL80211_ATTR_COALESCE_RULE: number of attributes
+ * @NL80211_ATTR_COALESCE_RULE_MAX: max attribute number
+ */
+enum nl80211_attr_coalesce_rule {
+       __NL80211_COALESCE_RULE_INVALID,
+       NL80211_ATTR_COALESCE_RULE_DELAY,
+       NL80211_ATTR_COALESCE_RULE_CONDITION,
+       NL80211_ATTR_COALESCE_RULE_PKT_PATTERN,
+
+       /* keep last */
+       NUM_NL80211_ATTR_COALESCE_RULE,
+       NL80211_ATTR_COALESCE_RULE_MAX = NUM_NL80211_ATTR_COALESCE_RULE - 1
+};
+
+/**
+ * enum nl80211_coalesce_condition - coalesce rule conditions
+ * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns
+ *     in a rule are matched.
+ * @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns
+ *     in a rule are not matched.
+ */
+enum nl80211_coalesce_condition {
+       NL80211_COALESCE_CONDITION_MATCH,
+       NL80211_COALESCE_CONDITION_NO_MATCH
+};
+
 /**
  * enum nl80211_iface_limit_attrs - limit attributes
  * @NL80211_IFACE_LIMIT_UNSPEC: (reserved)
index c55efaaa9bb4d889a6fdbee064a00b318f0043cc..52490b0e62b53d6ea1477ae415aca42f54e7015b 100644 (file)
@@ -165,6 +165,7 @@ enum ovs_vport_type {
        OVS_VPORT_TYPE_NETDEV,   /* network device */
        OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
        OVS_VPORT_TYPE_GRE,      /* GRE tunnel. */
+       OVS_VPORT_TYPE_VXLAN,    /* VXLAN tunnel. */
        __OVS_VPORT_TYPE_MAX
 };
 
@@ -211,6 +212,16 @@ enum ovs_vport_attr {
 
 #define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
 
+/* OVS_VPORT_ATTR_OPTIONS attributes for tunnels.
+ */
+enum {
+       OVS_TUNNEL_ATTR_UNSPEC,
+       OVS_TUNNEL_ATTR_DST_PORT, /* 16-bit UDP port, used by L4 tunnels. */
+       __OVS_TUNNEL_ATTR_MAX
+};
+
+#define OVS_TUNNEL_ATTR_MAX (__OVS_TUNNEL_ATTR_MAX - 1)
+
 /* Flows. */
 
 #define OVS_FLOW_FAMILY  "ovs_flow"
index d8ce17c2459a1f8ce79d67267e19d9a581776439..38fdd648be214c1e94851686ae308a5a42543cbf 100644 (file)
@@ -16,7 +16,7 @@ struct reiserfs_xattr_header {
 };
 
 struct reiserfs_security_handle {
-       char *name;
+       const char *name;
        void *value;
        size_t length;
 };
index 66b466e4ca08df99479751c55ce03f3daa39ebcb..ca451e99b28b92f5a5e1d8fde7d99446e2df06cf 100644 (file)
@@ -28,7 +28,7 @@
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Or submit a bug report through the following website:
  *    http://www.sf.net/projects/lksctp
index 9119cc0977bf42a36f2ae8aa51123c99b0b5f9d7..b466487bb5da4917800647af6a040376f1bd2070 100644 (file)
 /* SH-SCI */
 #define PORT_HSCIF     104
 
+/* Tilera TILE-Gx UART */
+#define PORT_TILEGX    105
+
 #endif /* _UAPILINUX_SERIAL_CORE_H */
index a1356d3b54df4bd7f68fa26e3436fd7ff2dbcb71..1bdb4a39d1e1512da2091f1c4e897c5e03527554 100644 (file)
@@ -51,6 +51,10 @@ enum
        IPSTATS_MIB_INBCASTOCTETS,              /* InBcastOctets */
        IPSTATS_MIB_OUTBCASTOCTETS,             /* OutBcastOctets */
        IPSTATS_MIB_CSUMERRORS,                 /* InCsumErrors */
+       IPSTATS_MIB_NOECTPKTS,                  /* InNoECTPkts */
+       IPSTATS_MIB_ECT1PKTS,                   /* InECT1Pkts */
+       IPSTATS_MIB_ECT0PKTS,                   /* InECT0Pkts */
+       IPSTATS_MIB_CEPKTS,                     /* InCEPkts */
        __IPSTATS_MIB_MAX
 };
 
index 8d776ebc4829df669bbb70c4d6331ba21cc88884..377f1e59411d1572eb645b7b80be42347a7513f5 100644 (file)
@@ -111,6 +111,7 @@ enum {
 #define TCP_REPAIR_OPTIONS     22
 #define TCP_FASTOPEN           23      /* Enable FastOpen on listeners */
 #define TCP_TIMESTAMP          24
+#define TCP_NOTSENT_LOWAT      25      /* limit number of unsent bytes in write queue */
 
 struct tcp_repair_opt {
        __u32   opt_code;
index e9ed951e2b09c11bd010520e5ff0119b201ae1c9..414b74be4da1695bf677e599b8aaf5ec8500679c 100644 (file)
@@ -30,7 +30,7 @@ enum uhid_event_type {
        UHID_OPEN,
        UHID_CLOSE,
        UHID_OUTPUT,
-       UHID_OUTPUT_EV,
+       UHID_OUTPUT_EV,                 /* obsolete! */
        UHID_INPUT,
        UHID_FEATURE,
        UHID_FEATURE_ANSWER,
@@ -69,6 +69,8 @@ struct uhid_output_req {
        __u8 rtype;
 } __attribute__((__packed__));
 
+/* Obsolete! Newer kernels will no longer send these events but instead convert
+ * it into raw output reports via UHID_OUTPUT. */
 struct uhid_output_ev_req {
        __u16 type;
        __u16 code;
index c520203fac2f4b466f062e49f743e45391ac2f93..172a7f00780cc41398e54bd1e5c4314c6b1c9627 100644 (file)
@@ -60,7 +60,7 @@
 
 struct virtio_net_config {
        /* The config defining mac address (if VIRTIO_NET_F_MAC) */
-       __u8 mac[6];
+       __u8 mac[ETH_ALEN];
        /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
        __u16 status;
        /* Maximum number of each of transmit and receive queues;
@@ -70,7 +70,9 @@ struct virtio_net_config {
        __u16 max_virtqueue_pairs;
 } __attribute__((packed));
 
-/* This is the first element of the scatter-gather list.  If you don't
+/* This header comes first in the scatter-gather list.
+ * If VIRTIO_F_ANY_LAYOUT is not negotiated, it must
+ * be the first element of the scatter-gather list.  If you don't
  * specify GSO or CSUM features, you can simply ignore the header. */
 struct virtio_net_hdr {
 #define VIRTIO_NET_HDR_F_NEEDS_CSUM    1       // Use csum_start, csum_offset
index 62d356153565a51982cd97be580d8d43647bc003..fd198bc24a3c9d81ed0074fbacce6594cf90ceee 100644 (file)
 #define __LINUX__WIMAX__I2400M_H__
 
 #include <linux/types.h>
-
+#include <linux/if_ether.h>
 
 /*
  * Host Device Interface (HDI) common to all busses
@@ -487,7 +487,7 @@ struct i2400m_tlv_l4_message_versions {
 struct i2400m_tlv_detailed_device_info {
        struct i2400m_tlv_hdr hdr;
        __u8 reserved1[400];
-       __u8 mac_address[6];
+       __u8 mac_address[ETH_ALEN];
        __u8 reserved2[2];
 } __attribute__((packed));
 
index 1f59ea2a4a76b30214a907b2b7ac3199edb029ab..d956c3593f652e01a7df0ffa72ce7ba0642444ed 100644 (file)
@@ -111,7 +111,7 @@ struct hdspm_ltc {
        enum hdspm_ltc_input_format input_format;
 };
 
-#define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_mixer_ioctl)
+#define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_ltc)
 
 /**
  * The status data reflects the device's current state
index 0f5a2fc69af9971606a1b07185e6f366bf21fbec..c79f3813192675c96ecea7ecd57c41d2fd522422 100644 (file)
 #define ATMEL_LCDC_WIRING_BGR  0
 #define ATMEL_LCDC_WIRING_RGB  1
 
-struct atmel_lcdfb_config;
 
  /* LCD Controller info data structure, stored in device platform_data */
-struct atmel_lcdfb_info {
-       spinlock_t              lock;
-       struct fb_info          *info;
-       void __iomem            *mmio;
-       int                     irq_base;
-       struct work_struct      task;
-
+struct atmel_lcdfb_pdata {
        unsigned int            guard_time;
-       unsigned int            smem_len;
-       struct platform_device  *pdev;
-       struct clk              *bus_clk;
-       struct clk              *lcdc_clk;
-
-#ifdef CONFIG_BACKLIGHT_ATMEL_LCDC
-       struct backlight_device *backlight;
-       u8                      bl_power;
-#endif
        bool                    lcdcon_is_backlight;
        bool                    lcdcon_pol_negative;
-       u8                      saved_lcdcon;
-
        u8                      default_bpp;
        u8                      lcd_wiring_mode;
        unsigned int            default_lcdcon2;
        unsigned int            default_dmacon;
-       void (*atmel_lcdfb_power_control)(int on);
+       void (*atmel_lcdfb_power_control)(struct atmel_lcdfb_pdata *pdata, int on);
        struct fb_monspecs      *default_monspecs;
-       u32                     pseudo_palette[16];
 
-       struct atmel_lcdfb_config *config;
+       struct list_head        pwr_gpios;
 };
 
 #define ATMEL_LCDC_DMABADDR1   0x00
index 46aa3d1c1654776b6b223d6089a7eb1597662afc..4ddd7dc4a61e2c9b2470bf58349559dc4196f938 100644 (file)
@@ -75,8 +75,10 @@ static inline int xen_acpi_get_pxm(acpi_handle h)
        return -ENXIO;
 }
 
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
                                     u32 pm1a_cnt, u32 pm1b_cnd);
+int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
+                                    u32 val_a, u32 val_b);
 
 static inline int xen_acpi_suspend_lowlevel(void)
 {
@@ -93,7 +95,9 @@ static inline void xen_acpi_sleep_register(void)
 {
        if (xen_initial_domain()) {
                acpi_os_set_prepare_sleep(
-                       &xen_acpi_notify_hypervisor_state);
+                       &xen_acpi_notify_hypervisor_sleep);
+               acpi_os_set_prepare_extended_sleep(
+                       &xen_acpi_notify_hypervisor_extended_sleep);
 
                acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
        }
index c57d5f67f702e3160aba466702e3a720412f09ed..f1331e3e7271f052cd96e1954651b6a786194bca 100644 (file)
@@ -152,10 +152,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xenpf_firmware_info_t);
 #define XENPF_enter_acpi_sleep    51
 struct xenpf_enter_acpi_sleep {
        /* IN variables */
-       uint16_t pm1a_cnt_val;      /* PM1a control value. */
-       uint16_t pm1b_cnt_val;      /* PM1b control value. */
+       uint16_t val_a;             /* PM1a control / sleep type A. */
+       uint16_t val_b;             /* PM1b control / sleep type B. */
        uint32_t sleep_state;       /* Which state to enter (Sn). */
-       uint32_t flags;             /* Must be zero. */
+#define XENPF_ACPI_SLEEP_EXTENDED 0x00000001
+       uint32_t flags;             /* XENPF_ACPI_SLEEP_*. */
 };
 DEFINE_GUEST_HANDLE_STRUCT(xenpf_enter_acpi_sleep_t);
 
index 247084be059030162838199c953ebac6409f7e01..28a6b572aecd18e3bf96e8e6414cb96254f6690c 100644 (file)
@@ -1106,7 +1106,6 @@ config IPC_NS
 
 config USER_NS
        bool "User namespace"
-       depends on UIDGID_CONVERTED
        select UIDGID_STRICT_TYPE_CHECKS
 
        default n
@@ -1140,20 +1139,8 @@ config NET_NS
 
 endif # NAMESPACES
 
-config UIDGID_CONVERTED
-       # True if all of the selected software conmponents are known
-       # to have uid_t and gid_t converted to kuid_t and kgid_t
-       # where appropriate and are otherwise safe to use with
-       # the user namespace.
-       bool
-       default y
-
-       # Filesystems
-       depends on XFS_FS = n
-
 config UIDGID_STRICT_TYPE_CHECKS
        bool "Require conversions between uid/gids and their internal representation"
-       depends on UIDGID_CONVERTED
        default n
        help
         While the nececessary conversions are being added to all subsystems this option allows
@@ -1598,7 +1585,7 @@ endchoice
 
 config SLUB_CPU_PARTIAL
        default y
-       depends on SLUB
+       depends on SLUB && SMP
        bool "SLUB per cpu partial cache"
        help
          Per cpu partial caches accellerate objects allocation and freeing
@@ -1666,6 +1653,7 @@ config BASE_SMALL
 
 menuconfig MODULES
        bool "Enable loadable module support"
+       option modules
        help
          Kernel modules are small pieces of compiled code which can
          be inserted in the running kernel, rather than being
index f6c2ce5701e1c3c723d03e3d917c62f584c3a074..a4b67446dc8726dfa06cec8d1460468c3bb6a367 100644 (file)
@@ -464,3 +464,4 @@ bool inode_capable(const struct inode *inode, int cap)
 
        return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
 }
+EXPORT_SYMBOL(inode_capable);
index 781845a013ab23c8f99f2c2153c70c9fd679c45d..7b35ff99395e6c90d5078f032008aac89fb48b2b 100644 (file)
@@ -81,7 +81,7 @@
  */
 #ifdef CONFIG_PROVE_RCU
 DEFINE_MUTEX(cgroup_mutex);
-EXPORT_SYMBOL_GPL(cgroup_mutex);       /* only for task_subsys_state_check() */
+EXPORT_SYMBOL_GPL(cgroup_mutex);       /* only for lockdep */
 #else
 static DEFINE_MUTEX(cgroup_mutex);
 #endif
@@ -117,6 +117,7 @@ struct cfent {
        struct list_head                node;
        struct dentry                   *dentry;
        struct cftype                   *type;
+       struct cgroup_subsys_state      *css;
 
        /* file xattrs */
        struct simple_xattrs            xattrs;
@@ -159,9 +160,9 @@ struct css_id {
  */
 struct cgroup_event {
        /*
-        * Cgroup which the event belongs to.
+        * css which the event belongs to.
         */
-       struct cgroup *cgrp;
+       struct cgroup_subsys_state *css;
        /*
         * Control file which the event associated.
         */
@@ -215,10 +216,30 @@ static u64 cgroup_serial_nr_next = 1;
  */
 static int need_forkexit_callback __read_mostly;
 
-static void cgroup_offline_fn(struct work_struct *work);
+static struct cftype cgroup_base_files[];
+
+static void cgroup_destroy_css_killed(struct cgroup *cgrp);
 static int cgroup_destroy_locked(struct cgroup *cgrp);
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
-                             struct cftype cfts[], bool is_add);
+static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+                             bool is_add);
+
+/**
+ * cgroup_css - obtain a cgroup's css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @subsys_id: the subsystem of interest
+ *
+ * Return @cgrp's css (cgroup_subsys_state) associated with @subsys_id.
+ * This function must be called either under cgroup_mutex or
+ * rcu_read_lock() and the caller is responsible for pinning the returned
+ * css if it wants to keep accessing it outside the said locks.  This
+ * function may return %NULL if @cgrp doesn't have @subsys_id enabled.
+ */
+static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
+                                             int subsys_id)
+{
+       return rcu_dereference_check(cgrp->subsys[subsys_id],
+                                    lockdep_is_held(&cgroup_mutex));
+}
 
 /* convenient tests for these bits */
 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
@@ -365,9 +386,11 @@ static struct cgrp_cset_link init_cgrp_cset_link;
 static int cgroup_init_idr(struct cgroup_subsys *ss,
                           struct cgroup_subsys_state *css);
 
-/* css_set_lock protects the list of css_set objects, and the
- * chain of tasks off each css_set.  Nests outside task->alloc_lock
- * due to cgroup_iter_start() */
+/*
+ * css_set_lock protects the list of css_set objects, and the chain of
+ * tasks off each css_set.  Nests outside task->alloc_lock due to
+ * css_task_iter_start().
+ */
 static DEFINE_RWLOCK(css_set_lock);
 static int css_set_count;
 
@@ -392,10 +415,12 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
        return key;
 }
 
-/* We don't maintain the lists running through each css_set to its
- * task until after the first call to cgroup_iter_start(). This
- * reduces the fork()/exit() overhead for people who have cgroups
- * compiled into their kernel but not actually in use */
+/*
+ * We don't maintain the lists running through each css_set to its task
+ * until after the first call to css_task_iter_start().  This reduces the
+ * fork()/exit() overhead for people who have cgroups compiled into their
+ * kernel but not actually in use.
+ */
 static int use_task_css_set_links __read_mostly;
 
 static void __put_css_set(struct css_set *cset, int taskexit)
@@ -464,7 +489,7 @@ static inline void put_css_set_taskexit(struct css_set *cset)
  * @new_cgrp: cgroup that's being entered by the task
  * @template: desired set of css pointers in css_set (pre-calculated)
  *
- * Returns true if "cg" matches "old_cg" except for the hierarchy
+ * Returns true if "cset" matches "old_cset" except for the hierarchy
  * which "new_cgrp" belongs to, for which it should match "new_cgrp".
  */
 static bool compare_css_sets(struct css_set *cset,
@@ -555,7 +580,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
                        /* Subsystem is in this hierarchy. So we want
                         * the subsystem state from the new
                         * cgroup */
-                       template[i] = cgrp->subsys[i];
+                       template[i] = cgroup_css(cgrp, i);
                } else {
                        /* Subsystem is not in this hierarchy, so we
                         * don't want to change the subsystem state */
@@ -803,8 +828,7 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
 
 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
-static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
-                              unsigned long subsys_mask);
+static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
 static const struct inode_operations cgroup_dir_inode_operations;
 static const struct file_operations proc_cgroupstats_operations;
 
@@ -813,8 +837,7 @@ static struct backing_dev_info cgroup_backing_dev_info = {
        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
-static int alloc_css_id(struct cgroup_subsys *ss,
-                       struct cgroup *parent, struct cgroup *child);
+static int alloc_css_id(struct cgroup_subsys_state *child_css);
 
 static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
 {
@@ -845,15 +868,8 @@ static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
 static void cgroup_free_fn(struct work_struct *work)
 {
        struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
-       struct cgroup_subsys *ss;
 
        mutex_lock(&cgroup_mutex);
-       /*
-        * Release the subsystem state objects.
-        */
-       for_each_root_subsys(cgrp->root, ss)
-               ss->css_free(cgrp);
-
        cgrp->root->number_of_cgroups--;
        mutex_unlock(&cgroup_mutex);
 
@@ -864,8 +880,6 @@ static void cgroup_free_fn(struct work_struct *work)
         */
        dput(cgrp->parent->dentry);
 
-       ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
-
        /*
         * Drop the active superblock reference that we took when we
         * created the cgroup. This will free cgrp->root, if we are
@@ -956,27 +970,22 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
 }
 
 /**
- * cgroup_clear_directory - selective removal of base and subsystem files
- * @dir: directory containing the files
- * @base_files: true if the base files should be removed
+ * cgroup_clear_dir - remove subsys files in a cgroup directory
+ * @cgrp: target cgroup
  * @subsys_mask: mask of the subsystem ids whose files should be removed
  */
-static void cgroup_clear_directory(struct dentry *dir, bool base_files,
-                                  unsigned long subsys_mask)
+static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
 {
-       struct cgroup *cgrp = __d_cgrp(dir);
        struct cgroup_subsys *ss;
+       int i;
 
-       for_each_root_subsys(cgrp->root, ss) {
+       for_each_subsys(ss, i) {
                struct cftype_set *set;
-               if (!test_bit(ss->subsys_id, &subsys_mask))
+
+               if (!test_bit(i, &subsys_mask))
                        continue;
                list_for_each_entry(set, &ss->cftsets, node)
-                       cgroup_addrm_files(cgrp, NULL, set->cfts, false);
-       }
-       if (base_files) {
-               while (!list_empty(&cgrp->files))
-                       cgroup_rm_file(cgrp, NULL);
+                       cgroup_addrm_files(cgrp, set->cfts, false);
        }
 }
 
@@ -986,9 +995,6 @@ static void cgroup_clear_directory(struct dentry *dir, bool base_files,
 static void cgroup_d_remove_dir(struct dentry *dentry)
 {
        struct dentry *parent;
-       struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
-
-       cgroup_clear_directory(dentry, true, root->subsys_mask);
 
        parent = dentry->d_parent;
        spin_lock(&parent->d_lock);
@@ -1009,79 +1015,84 @@ static int rebind_subsystems(struct cgroupfs_root *root,
 {
        struct cgroup *cgrp = &root->top_cgroup;
        struct cgroup_subsys *ss;
-       int i;
+       unsigned long pinned = 0;
+       int i, ret;
 
        BUG_ON(!mutex_is_locked(&cgroup_mutex));
        BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
 
        /* Check that any added subsystems are currently free */
        for_each_subsys(ss, i) {
-               unsigned long bit = 1UL << i;
-
-               if (!(bit & added_mask))
+               if (!(added_mask & (1 << i)))
                        continue;
 
+               /* is the subsystem mounted elsewhere? */
                if (ss->root != &cgroup_dummy_root) {
-                       /* Subsystem isn't free */
-                       return -EBUSY;
+                       ret = -EBUSY;
+                       goto out_put;
                }
+
+               /* pin the module */
+               if (!try_module_get(ss->module)) {
+                       ret = -ENOENT;
+                       goto out_put;
+               }
+               pinned |= 1 << i;
        }
 
-       /* Currently we don't handle adding/removing subsystems when
-        * any child cgroups exist. This is theoretically supportable
-        * but involves complex error handling, so it's being left until
-        * later */
-       if (root->number_of_cgroups > 1)
-               return -EBUSY;
+       /* subsys could be missing if unloaded between parsing and here */
+       if (added_mask != pinned) {
+               ret = -ENOENT;
+               goto out_put;
+       }
+
+       ret = cgroup_populate_dir(cgrp, added_mask);
+       if (ret)
+               goto out_put;
+
+       /*
+        * Nothing can fail from this point on.  Remove files for the
+        * removed subsystems and rebind each subsystem.
+        */
+       cgroup_clear_dir(cgrp, removed_mask);
 
-       /* Process each subsystem */
        for_each_subsys(ss, i) {
                unsigned long bit = 1UL << i;
 
                if (bit & added_mask) {
                        /* We're binding this subsystem to this hierarchy */
-                       BUG_ON(cgrp->subsys[i]);
-                       BUG_ON(!cgroup_dummy_top->subsys[i]);
-                       BUG_ON(cgroup_dummy_top->subsys[i]->cgroup != cgroup_dummy_top);
+                       BUG_ON(cgroup_css(cgrp, i));
+                       BUG_ON(!cgroup_css(cgroup_dummy_top, i));
+                       BUG_ON(cgroup_css(cgroup_dummy_top, i)->cgroup != cgroup_dummy_top);
+
+                       rcu_assign_pointer(cgrp->subsys[i],
+                                          cgroup_css(cgroup_dummy_top, i));
+                       cgroup_css(cgrp, i)->cgroup = cgrp;
 
-                       cgrp->subsys[i] = cgroup_dummy_top->subsys[i];
-                       cgrp->subsys[i]->cgroup = cgrp;
                        list_move(&ss->sibling, &root->subsys_list);
                        ss->root = root;
                        if (ss->bind)
-                               ss->bind(cgrp);
+                               ss->bind(cgroup_css(cgrp, i));
 
                        /* refcount was already taken, and we're keeping it */
                        root->subsys_mask |= bit;
                } else if (bit & removed_mask) {
                        /* We're removing this subsystem */
-                       BUG_ON(cgrp->subsys[i] != cgroup_dummy_top->subsys[i]);
-                       BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
+                       BUG_ON(cgroup_css(cgrp, i) != cgroup_css(cgroup_dummy_top, i));
+                       BUG_ON(cgroup_css(cgrp, i)->cgroup != cgrp);
 
                        if (ss->bind)
-                               ss->bind(cgroup_dummy_top);
-                       cgroup_dummy_top->subsys[i]->cgroup = cgroup_dummy_top;
-                       cgrp->subsys[i] = NULL;
+                               ss->bind(cgroup_css(cgroup_dummy_top, i));
+
+                       cgroup_css(cgroup_dummy_top, i)->cgroup = cgroup_dummy_top;
+                       RCU_INIT_POINTER(cgrp->subsys[i], NULL);
+
                        cgroup_subsys[i]->root = &cgroup_dummy_root;
                        list_move(&ss->sibling, &cgroup_dummy_root.subsys_list);
 
                        /* subsystem is now free - drop reference on module */
                        module_put(ss->module);
                        root->subsys_mask &= ~bit;
-               } else if (bit & root->subsys_mask) {
-                       /* Subsystem state should already exist */
-                       BUG_ON(!cgrp->subsys[i]);
-                       /*
-                        * a refcount was taken, but we already had one, so
-                        * drop the extra reference.
-                        */
-                       module_put(ss->module);
-#ifdef CONFIG_MODULE_UNLOAD
-                       BUG_ON(ss->module && !module_refcount(ss->module));
-#endif
-               } else {
-                       /* Subsystem state shouldn't exist */
-                       BUG_ON(cgrp->subsys[i]);
                }
        }
 
@@ -1092,6 +1103,12 @@ static int rebind_subsystems(struct cgroupfs_root *root,
        root->flags |= CGRP_ROOT_SUBSYS_BOUND;
 
        return 0;
+
+out_put:
+       for_each_subsys(ss, i)
+               if (pinned & (1 << i))
+                       module_put(ss->module);
+       return ret;
 }
 
 static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1142,7 +1159,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        char *token, *o = data;
        bool all_ss = false, one_ss = false;
        unsigned long mask = (unsigned long)-1;
-       bool module_pin_failed = false;
        struct cgroup_subsys *ss;
        int i;
 
@@ -1285,52 +1301,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        if (!opts->subsys_mask && !opts->name)
                return -EINVAL;
 
-       /*
-        * Grab references on all the modules we'll need, so the subsystems
-        * don't dance around before rebind_subsystems attaches them. This may
-        * take duplicate reference counts on a subsystem that's already used,
-        * but rebind_subsystems handles this case.
-        */
-       for_each_subsys(ss, i) {
-               if (!(opts->subsys_mask & (1UL << i)))
-                       continue;
-               if (!try_module_get(cgroup_subsys[i]->module)) {
-                       module_pin_failed = true;
-                       break;
-               }
-       }
-       if (module_pin_failed) {
-               /*
-                * oops, one of the modules was going away. this means that we
-                * raced with a module_delete call, and to the user this is
-                * essentially a "subsystem doesn't exist" case.
-                */
-               for (i--; i >= 0; i--) {
-                       /* drop refcounts only on the ones we took */
-                       unsigned long bit = 1UL << i;
-
-                       if (!(bit & opts->subsys_mask))
-                               continue;
-                       module_put(cgroup_subsys[i]->module);
-               }
-               return -ENOENT;
-       }
-
        return 0;
 }
 
-static void drop_parsed_module_refcounts(unsigned long subsys_mask)
-{
-       struct cgroup_subsys *ss;
-       int i;
-
-       mutex_lock(&cgroup_mutex);
-       for_each_subsys(ss, i)
-               if (subsys_mask & (1UL << i))
-                       module_put(cgroup_subsys[i]->module);
-       mutex_unlock(&cgroup_mutex);
-}
-
 static int cgroup_remount(struct super_block *sb, int *flags, char *data)
 {
        int ret = 0;
@@ -1370,22 +1343,15 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
                goto out_unlock;
        }
 
-       /*
-        * Clear out the files of subsystems that should be removed, do
-        * this before rebind_subsystems, since rebind_subsystems may
-        * change this hierarchy's subsys_list.
-        */
-       cgroup_clear_directory(cgrp->dentry, false, removed_mask);
-
-       ret = rebind_subsystems(root, added_mask, removed_mask);
-       if (ret) {
-               /* rebind_subsystems failed, re-populate the removed files */
-               cgroup_populate_dir(cgrp, false, removed_mask);
+       /* remounting is not allowed for populated hierarchies */
+       if (root->number_of_cgroups > 1) {
+               ret = -EBUSY;
                goto out_unlock;
        }
 
-       /* re-populate subsystem files */
-       cgroup_populate_dir(cgrp, false, added_mask);
+       ret = rebind_subsystems(root, added_mask, removed_mask);
+       if (ret)
+               goto out_unlock;
 
        if (opts.release_agent)
                strcpy(root->release_agent_path, opts.release_agent);
@@ -1395,8 +1361,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
        mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
        mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
-       if (ret)
-               drop_parsed_module_refcounts(opts.subsys_mask);
        return ret;
 }
 
@@ -1416,6 +1380,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
        INIT_LIST_HEAD(&cgrp->release_list);
        INIT_LIST_HEAD(&cgrp->pidlists);
        mutex_init(&cgrp->pidlist_mutex);
+       cgrp->dummy_css.cgroup = cgrp;
        INIT_LIST_HEAD(&cgrp->event_list);
        spin_lock_init(&cgrp->event_list_lock);
        simple_xattrs_init(&cgrp->xattrs);
@@ -1431,6 +1396,7 @@ static void init_cgroup_root(struct cgroupfs_root *root)
        cgrp->root = root;
        RCU_INIT_POINTER(cgrp->name, &root_cgroup_name);
        init_cgroup_housekeeping(cgrp);
+       idr_init(&root->cgroup_idr);
 }
 
 static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end)
@@ -1503,7 +1469,6 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
         */
        root->subsys_mask = opts->subsys_mask;
        root->flags = opts->flags;
-       ida_init(&root->cgroup_ida);
        if (opts->release_agent)
                strcpy(root->release_agent_path, opts->release_agent);
        if (opts->name)
@@ -1519,7 +1484,7 @@ static void cgroup_free_root(struct cgroupfs_root *root)
                /* hierarhcy ID shoulid already have been released */
                WARN_ON_ONCE(root->hierarchy_id);
 
-               ida_destroy(&root->cgroup_ida);
+               idr_destroy(&root->cgroup_idr);
                kfree(root);
        }
 }
@@ -1584,7 +1549,9 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
        int ret = 0;
        struct super_block *sb;
        struct cgroupfs_root *new_root;
+       struct list_head tmp_links;
        struct inode *inode;
+       const struct cred *cred;
 
        /* First find the desired set of subsystems */
        mutex_lock(&cgroup_mutex);
@@ -1600,7 +1567,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
        new_root = cgroup_root_from_opts(&opts);
        if (IS_ERR(new_root)) {
                ret = PTR_ERR(new_root);
-               goto drop_modules;
+               goto out_err;
        }
        opts.new_root = new_root;
 
@@ -1609,17 +1576,15 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
        if (IS_ERR(sb)) {
                ret = PTR_ERR(sb);
                cgroup_free_root(opts.new_root);
-               goto drop_modules;
+               goto out_err;
        }
 
        root = sb->s_fs_info;
        BUG_ON(!root);
        if (root == opts.new_root) {
                /* We used the new root structure, so this is a new hierarchy */
-               struct list_head tmp_links;
                struct cgroup *root_cgrp = &root->top_cgroup;
                struct cgroupfs_root *existing_root;
-               const struct cred *cred;
                int i;
                struct css_set *cset;
 
@@ -1634,6 +1599,11 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                mutex_lock(&cgroup_mutex);
                mutex_lock(&cgroup_root_mutex);
 
+               root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
+                                          0, 1, GFP_KERNEL);
+               if (root_cgrp->id < 0)
+                       goto unlock_drop;
+
                /* Check for name clashes with existing mounts */
                ret = -EBUSY;
                if (strlen(root->name))
@@ -1657,26 +1627,37 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                if (ret)
                        goto unlock_drop;
 
+               sb->s_root->d_fsdata = root_cgrp;
+               root_cgrp->dentry = sb->s_root;
+
+               /*
+                * We're inside get_sb() and will call lookup_one_len() to
+                * create the root files, which doesn't work if SELinux is
+                * in use.  The following cred dancing somehow works around
+                * it.  See 2ce9738ba ("cgroupfs: use init_cred when
+                * populating new cgroupfs mount") for more details.
+                */
+               cred = override_creds(&init_cred);
+
+               ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
+               if (ret)
+                       goto rm_base_files;
+
                ret = rebind_subsystems(root, root->subsys_mask, 0);
-               if (ret == -EBUSY) {
-                       free_cgrp_cset_links(&tmp_links);
-                       goto unlock_drop;
-               }
+               if (ret)
+                       goto rm_base_files;
+
+               revert_creds(cred);
+
                /*
                 * There must be no failure case after here, since rebinding
                 * takes care of subsystems' refcounts, which are explicitly
                 * dropped in the failure exit path.
                 */
 
-               /* EBUSY should be the only error here */
-               BUG_ON(ret);
-
                list_add(&root->root_list, &cgroup_roots);
                cgroup_root_count++;
 
-               sb->s_root->d_fsdata = root_cgrp;
-               root->top_cgroup.dentry = sb->s_root;
-
                /* Link the top cgroup in this hierarchy into all
                 * the css_set objects */
                write_lock(&css_set_lock);
@@ -1689,9 +1670,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                BUG_ON(!list_empty(&root_cgrp->children));
                BUG_ON(root->number_of_cgroups != 1);
 
-               cred = override_creds(&init_cred);
-               cgroup_populate_dir(root_cgrp, true, root->subsys_mask);
-               revert_creds(cred);
                mutex_unlock(&cgroup_root_mutex);
                mutex_unlock(&cgroup_mutex);
                mutex_unlock(&inode->i_mutex);
@@ -1711,15 +1689,16 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                                pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
                        }
                }
-
-               /* no subsys rebinding, so refcounts don't change */
-               drop_parsed_module_refcounts(opts.subsys_mask);
        }
 
        kfree(opts.release_agent);
        kfree(opts.name);
        return dget(sb->s_root);
 
+ rm_base_files:
+       free_cgrp_cset_links(&tmp_links);
+       cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false);
+       revert_creds(cred);
  unlock_drop:
        cgroup_exit_root_id(root);
        mutex_unlock(&cgroup_root_mutex);
@@ -1727,8 +1706,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
        mutex_unlock(&inode->i_mutex);
  drop_new_super:
        deactivate_locked_super(sb);
- drop_modules:
-       drop_parsed_module_refcounts(opts.subsys_mask);
  out_err:
        kfree(opts.release_agent);
        kfree(opts.name);
@@ -1746,6 +1723,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
        BUG_ON(root->number_of_cgroups != 1);
        BUG_ON(!list_empty(&cgrp->children));
 
+       mutex_lock(&cgrp->dentry->d_inode->i_mutex);
        mutex_lock(&cgroup_mutex);
        mutex_lock(&cgroup_root_mutex);
 
@@ -1778,6 +1756,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
 
        mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
+       mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
 
        simple_xattrs_free(&cgrp->xattrs);
 
@@ -1889,7 +1868,7 @@ EXPORT_SYMBOL_GPL(task_cgroup_path);
 struct task_and_cgroup {
        struct task_struct      *task;
        struct cgroup           *cgrp;
-       struct css_set          *cg;
+       struct css_set          *cset;
 };
 
 struct cgroup_taskset {
@@ -1939,18 +1918,20 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
 EXPORT_SYMBOL_GPL(cgroup_taskset_next);
 
 /**
- * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
+ * cgroup_taskset_cur_css - return the matching css for the current task
  * @tset: taskset of interest
+ * @subsys_id: the ID of the target subsystem
  *
- * Return the cgroup for the current (last returned) task of @tset.  This
- * function must be preceded by either cgroup_taskset_first() or
- * cgroup_taskset_next().
+ * Return the css for the current (last returned) task of @tset for
+ * subsystem specified by @subsys_id.  This function must be preceded by
+ * either cgroup_taskset_first() or cgroup_taskset_next().
  */
-struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
+struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
+                                                  int subsys_id)
 {
-       return tset->cur_cgrp;
+       return cgroup_css(tset->cur_cgrp, subsys_id);
 }
-EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);
+EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css);
 
 /**
  * cgroup_taskset_size - return the number of tasks in taskset
@@ -2089,8 +2070,10 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
         * step 1: check that we can legitimately attach to the cgroup.
         */
        for_each_root_subsys(root, ss) {
+               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss->subsys_id);
+
                if (ss->can_attach) {
-                       retval = ss->can_attach(cgrp, &tset);
+                       retval = ss->can_attach(css, &tset);
                        if (retval) {
                                failed_ss = ss;
                                goto out_cancel_attach;
@@ -2107,8 +2090,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
 
                tc = flex_array_get(group, i);
                old_cset = task_css_set(tc->task);
-               tc->cg = find_css_set(old_cset, cgrp);
-               if (!tc->cg) {
+               tc->cset = find_css_set(old_cset, cgrp);
+               if (!tc->cset) {
                        retval = -ENOMEM;
                        goto out_put_css_set_refs;
                }
@@ -2121,7 +2104,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
         */
        for (i = 0; i < group_size; i++) {
                tc = flex_array_get(group, i);
-               cgroup_task_migrate(tc->cgrp, tc->task, tc->cg);
+               cgroup_task_migrate(tc->cgrp, tc->task, tc->cset);
        }
        /* nothing is sensitive to fork() after this point. */
 
@@ -2129,8 +2112,10 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
         * step 4: do subsystem attach callbacks.
         */
        for_each_root_subsys(root, ss) {
+               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss->subsys_id);
+
                if (ss->attach)
-                       ss->attach(cgrp, &tset);
+                       ss->attach(css, &tset);
        }
 
        /*
@@ -2141,18 +2126,20 @@ out_put_css_set_refs:
        if (retval) {
                for (i = 0; i < group_size; i++) {
                        tc = flex_array_get(group, i);
-                       if (!tc->cg)
+                       if (!tc->cset)
                                break;
-                       put_css_set(tc->cg);
+                       put_css_set(tc->cset);
                }
        }
 out_cancel_attach:
        if (retval) {
                for_each_root_subsys(root, ss) {
+                       struct cgroup_subsys_state *css = cgroup_css(cgrp, ss->subsys_id);
+
                        if (ss == failed_ss)
                                break;
                        if (ss->cancel_attach)
-                               ss->cancel_attach(cgrp, &tset);
+                               ss->cancel_attach(css, &tset);
                }
        }
 out_free_group_list:
@@ -2253,9 +2240,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 
        mutex_lock(&cgroup_mutex);
        for_each_active_root(root) {
-               struct cgroup *from_cg = task_cgroup_from_root(from, root);
+               struct cgroup *from_cgrp = task_cgroup_from_root(from, root);
 
-               retval = cgroup_attach_task(from_cg, tsk, false);
+               retval = cgroup_attach_task(from_cgrp, tsk, false);
                if (retval)
                        break;
        }
@@ -2265,34 +2252,38 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 }
 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
-static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
+static int cgroup_tasks_write(struct cgroup_subsys_state *css,
+                             struct cftype *cft, u64 pid)
 {
-       return attach_task_by_pid(cgrp, pid, false);
+       return attach_task_by_pid(css->cgroup, pid, false);
 }
 
-static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
+static int cgroup_procs_write(struct cgroup_subsys_state *css,
+                             struct cftype *cft, u64 tgid)
 {
-       return attach_task_by_pid(cgrp, tgid, true);
+       return attach_task_by_pid(css->cgroup, tgid, true);
 }
 
-static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
-                                     const char *buffer)
+static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
+                                     struct cftype *cft, const char *buffer)
 {
-       BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+       BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX);
        if (strlen(buffer) >= PATH_MAX)
                return -EINVAL;
-       if (!cgroup_lock_live_group(cgrp))
+       if (!cgroup_lock_live_group(css->cgroup))
                return -ENODEV;
        mutex_lock(&cgroup_root_mutex);
-       strcpy(cgrp->root->release_agent_path, buffer);
+       strcpy(css->cgroup->root->release_agent_path, buffer);
        mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
        return 0;
 }
 
-static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
-                                    struct seq_file *seq)
+static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
+                                    struct cftype *cft, struct seq_file *seq)
 {
+       struct cgroup *cgrp = css->cgroup;
+
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
        seq_puts(seq, cgrp->root->release_agent_path);
@@ -2301,20 +2292,20 @@ static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
        return 0;
 }
 
-static int cgroup_sane_behavior_show(struct cgroup *cgrp, struct cftype *cft,
-                                    struct seq_file *seq)
+static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css,
+                                    struct cftype *cft, struct seq_file *seq)
 {
-       seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
+       seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup));
        return 0;
 }
 
 /* A buffer size big enough for numbers or short strings */
 #define CGROUP_LOCAL_BUFFER_SIZE 64
 
-static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
-                               struct file *file,
-                               const char __user *userbuf,
-                               size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_X64(struct cgroup_subsys_state *css,
+                               struct cftype *cft, struct file *file,
+                               const char __user *userbuf, size_t nbytes,
+                               loff_t *unused_ppos)
 {
        char buffer[CGROUP_LOCAL_BUFFER_SIZE];
        int retval = 0;
@@ -2332,22 +2323,22 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
                u64 val = simple_strtoull(strstrip(buffer), &end, 0);
                if (*end)
                        return -EINVAL;
-               retval = cft->write_u64(cgrp, cft, val);
+               retval = cft->write_u64(css, cft, val);
        } else {
                s64 val = simple_strtoll(strstrip(buffer), &end, 0);
                if (*end)
                        return -EINVAL;
-               retval = cft->write_s64(cgrp, cft, val);
+               retval = cft->write_s64(css, cft, val);
        }
        if (!retval)
                retval = nbytes;
        return retval;
 }
 
-static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
-                                  struct file *file,
-                                  const char __user *userbuf,
-                                  size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_string(struct cgroup_subsys_state *css,
+                                  struct cftype *cft, struct file *file,
+                                  const char __user *userbuf, size_t nbytes,
+                                  loff_t *unused_ppos)
 {
        char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
        int retval = 0;
@@ -2370,7 +2361,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
        }
 
        buffer[nbytes] = 0;     /* nul-terminate */
-       retval = cft->write_string(cgrp, cft, strstrip(buffer));
+       retval = cft->write_string(css, cft, strstrip(buffer));
        if (!retval)
                retval = nbytes;
 out:
@@ -2380,65 +2371,60 @@ out:
 }
 
 static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
-                                               size_t nbytes, loff_t *ppos)
+                                size_t nbytes, loff_t *ppos)
 {
+       struct cfent *cfe = __d_cfe(file->f_dentry);
        struct cftype *cft = __d_cft(file->f_dentry);
-       struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+       struct cgroup_subsys_state *css = cfe->css;
 
-       if (cgroup_is_dead(cgrp))
-               return -ENODEV;
        if (cft->write)
-               return cft->write(cgrp, cft, file, buf, nbytes, ppos);
+               return cft->write(css, cft, file, buf, nbytes, ppos);
        if (cft->write_u64 || cft->write_s64)
-               return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
+               return cgroup_write_X64(css, cft, file, buf, nbytes, ppos);
        if (cft->write_string)
-               return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
+               return cgroup_write_string(css, cft, file, buf, nbytes, ppos);
        if (cft->trigger) {
-               int ret = cft->trigger(cgrp, (unsigned int)cft->private);
+               int ret = cft->trigger(css, (unsigned int)cft->private);
                return ret ? ret : nbytes;
        }
        return -EINVAL;
 }
 
-static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
-                              struct file *file,
-                              char __user *buf, size_t nbytes,
-                              loff_t *ppos)
+static ssize_t cgroup_read_u64(struct cgroup_subsys_state *css,
+                              struct cftype *cft, struct file *file,
+                              char __user *buf, size_t nbytes, loff_t *ppos)
 {
        char tmp[CGROUP_LOCAL_BUFFER_SIZE];
-       u64 val = cft->read_u64(cgrp, cft);
+       u64 val = cft->read_u64(css, cft);
        int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
 
        return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
 }
 
-static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
-                              struct file *file,
-                              char __user *buf, size_t nbytes,
-                              loff_t *ppos)
+static ssize_t cgroup_read_s64(struct cgroup_subsys_state *css,
+                              struct cftype *cft, struct file *file,
+                              char __user *buf, size_t nbytes, loff_t *ppos)
 {
        char tmp[CGROUP_LOCAL_BUFFER_SIZE];
-       s64 val = cft->read_s64(cgrp, cft);
+       s64 val = cft->read_s64(css, cft);
        int len = sprintf(tmp, "%lld\n", (long long) val);
 
        return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
 }
 
 static ssize_t cgroup_file_read(struct file *file, char __user *buf,
-                                  size_t nbytes, loff_t *ppos)
+                               size_t nbytes, loff_t *ppos)
 {
+       struct cfent *cfe = __d_cfe(file->f_dentry);
        struct cftype *cft = __d_cft(file->f_dentry);
-       struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
-
-       if (cgroup_is_dead(cgrp))
-               return -ENODEV;
+       struct cgroup_subsys_state *css = cfe->css;
 
        if (cft->read)
-               return cft->read(cgrp, cft, file, buf, nbytes, ppos);
+               return cft->read(css, cft, file, buf, nbytes, ppos);
        if (cft->read_u64)
-               return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
+               return cgroup_read_u64(css, cft, file, buf, nbytes, ppos);
        if (cft->read_s64)
-               return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
+               return cgroup_read_s64(css, cft, file, buf, nbytes, ppos);
        return -EINVAL;
 }
 
@@ -2447,11 +2433,6 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf,
  * supports string->u64 maps, but can be extended in future.
  */
 
-struct cgroup_seqfile_state {
-       struct cftype *cft;
-       struct cgroup *cgroup;
-};
-
 static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
 {
        struct seq_file *sf = cb->state;
@@ -2460,69 +2441,90 @@ static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
 
 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
 {
-       struct cgroup_seqfile_state *state = m->private;
-       struct cftype *cft = state->cft;
+       struct cfent *cfe = m->private;
+       struct cftype *cft = cfe->type;
+       struct cgroup_subsys_state *css = cfe->css;
+
        if (cft->read_map) {
                struct cgroup_map_cb cb = {
                        .fill = cgroup_map_add,
                        .state = m,
                };
-               return cft->read_map(state->cgroup, cft, &cb);
+               return cft->read_map(css, cft, &cb);
        }
-       return cft->read_seq_string(state->cgroup, cft, m);
-}
-
-static int cgroup_seqfile_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *seq = file->private_data;
-       kfree(seq->private);
-       return single_release(inode, file);
+       return cft->read_seq_string(css, cft, m);
 }
 
 static const struct file_operations cgroup_seqfile_operations = {
        .read = seq_read,
        .write = cgroup_file_write,
        .llseek = seq_lseek,
-       .release = cgroup_seqfile_release,
+       .release = single_release,
 };
 
 static int cgroup_file_open(struct inode *inode, struct file *file)
 {
+       struct cfent *cfe = __d_cfe(file->f_dentry);
+       struct cftype *cft = __d_cft(file->f_dentry);
+       struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent);
+       struct cgroup_subsys_state *css;
        int err;
-       struct cftype *cft;
 
        err = generic_file_open(inode, file);
        if (err)
                return err;
-       cft = __d_cft(file->f_dentry);
 
-       if (cft->read_map || cft->read_seq_string) {
-               struct cgroup_seqfile_state *state;
+       /*
+        * If the file belongs to a subsystem, pin the css.  Will be
+        * unpinned either on open failure or release.  This ensures that
+        * @css stays alive for all file operations.
+        */
+       rcu_read_lock();
+       if (cft->ss) {
+               css = cgroup_css(cgrp, cft->ss->subsys_id);
+               if (!css_tryget(css))
+                       css = NULL;
+       } else {
+               css = &cgrp->dummy_css;
+       }
+       rcu_read_unlock();
 
-               state = kzalloc(sizeof(*state), GFP_USER);
-               if (!state)
-                       return -ENOMEM;
+       if (!css)
+               return -ENODEV;
 
-               state->cft = cft;
-               state->cgroup = __d_cgrp(file->f_dentry->d_parent);
+       /*
+        * @cfe->css is used by read/write/close to determine the
+        * associated css.  @file->private_data would be a better place but
+        * that's already used by seqfile.  Multiple accessors may use it
+        * simultaneously which is okay as the association never changes.
+        */
+       WARN_ON_ONCE(cfe->css && cfe->css != css);
+       cfe->css = css;
+
+       if (cft->read_map || cft->read_seq_string) {
                file->f_op = &cgroup_seqfile_operations;
-               err = single_open(file, cgroup_seqfile_show, state);
-               if (err < 0)
-                       kfree(state);
-       } else if (cft->open)
+               err = single_open(file, cgroup_seqfile_show, cfe);
+       } else if (cft->open) {
                err = cft->open(inode, file);
-       else
-               err = 0;
+       }
 
+       if (css->ss && err)
+               css_put(css);
        return err;
 }
 
 static int cgroup_file_release(struct inode *inode, struct file *file)
 {
+       struct cfent *cfe = __d_cfe(file->f_dentry);
        struct cftype *cft = __d_cft(file->f_dentry);
+       struct cgroup_subsys_state *css = cfe->css;
+       int ret = 0;
+
        if (cft->release)
-               return cft->release(inode, file);
-       return 0;
+               ret = cft->release(inode, file);
+       if (css->ss)
+               css_put(css);
+       return ret;
 }
 
 /*
@@ -2736,8 +2738,7 @@ static umode_t cgroup_file_mode(const struct cftype *cft)
        return mode;
 }
 
-static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
-                          struct cftype *cft)
+static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
 {
        struct dentry *dir = cgrp->dentry;
        struct cgroup *parent = __d_cgrp(dir);
@@ -2747,8 +2748,8 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
        umode_t mode;
        char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
 
-       if (subsys && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
-               strcpy(name, subsys->name);
+       if (cft->ss && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
+               strcpy(name, cft->ss->name);
                strcat(name, ".");
        }
        strcat(name, cft->name);
@@ -2782,11 +2783,25 @@ out:
        return error;
 }
 
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
-                             struct cftype cfts[], bool is_add)
+/**
+ * cgroup_addrm_files - add or remove files to a cgroup directory
+ * @cgrp: the target cgroup
+ * @cfts: array of cftypes to be added
+ * @is_add: whether to add or remove
+ *
+ * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
+ * For removals, this function never fails.  If addition fails, this
+ * function doesn't remove files already added.  The caller is responsible
+ * for cleaning up.
+ */
+static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+                             bool is_add)
 {
        struct cftype *cft;
-       int err, ret = 0;
+       int ret;
+
+       lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
+       lockdep_assert_held(&cgroup_mutex);
 
        for (cft = cfts; cft->name[0] != '\0'; cft++) {
                /* does cft->flags tell us to skip this file on @cgrp? */
@@ -2798,16 +2813,17 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
                        continue;
 
                if (is_add) {
-                       err = cgroup_add_file(cgrp, subsys, cft);
-                       if (err)
+                       ret = cgroup_add_file(cgrp, cft);
+                       if (ret) {
                                pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
-                                       cft->name, err);
-                       ret = err;
+                                       cft->name, ret);
+                               return ret;
+                       }
                } else {
                        cgroup_rm_file(cgrp, cft);
                }
        }
-       return ret;
+       return 0;
 }
 
 static void cgroup_cfts_prepare(void)
@@ -2816,28 +2832,30 @@ static void cgroup_cfts_prepare(void)
        /*
         * Thanks to the entanglement with vfs inode locking, we can't walk
         * the existing cgroups under cgroup_mutex and create files.
-        * Instead, we use cgroup_for_each_descendant_pre() and drop RCU
-        * read lock before calling cgroup_addrm_files().
+        * Instead, we use css_for_each_descendant_pre() and drop RCU read
+        * lock before calling cgroup_addrm_files().
         */
        mutex_lock(&cgroup_mutex);
 }
 
-static void cgroup_cfts_commit(struct cgroup_subsys *ss,
-                              struct cftype *cfts, bool is_add)
+static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
        __releases(&cgroup_mutex)
 {
        LIST_HEAD(pending);
-       struct cgroup *cgrp, *root = &ss->root->top_cgroup;
+       struct cgroup_subsys *ss = cfts[0].ss;
+       struct cgroup *root = &ss->root->top_cgroup;
        struct super_block *sb = ss->root->sb;
        struct dentry *prev = NULL;
        struct inode *inode;
+       struct cgroup_subsys_state *css;
        u64 update_before;
+       int ret = 0;
 
        /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
        if (!cfts || ss->root == &cgroup_dummy_root ||
            !atomic_inc_not_zero(&sb->s_active)) {
                mutex_unlock(&cgroup_mutex);
-               return;
+               return 0;
        }
 
        /*
@@ -2849,17 +2867,11 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
 
        mutex_unlock(&cgroup_mutex);
 
-       /* @root always needs to be updated */
-       inode = root->dentry->d_inode;
-       mutex_lock(&inode->i_mutex);
-       mutex_lock(&cgroup_mutex);
-       cgroup_addrm_files(root, ss, cfts, is_add);
-       mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&inode->i_mutex);
-
        /* add/rm files for all cgroups created before */
        rcu_read_lock();
-       cgroup_for_each_descendant_pre(cgrp, root) {
+       css_for_each_descendant_pre(css, cgroup_css(root, ss->subsys_id)) {
+               struct cgroup *cgrp = css->cgroup;
+
                if (cgroup_is_dead(cgrp))
                        continue;
 
@@ -2873,15 +2885,18 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
                mutex_lock(&inode->i_mutex);
                mutex_lock(&cgroup_mutex);
                if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
-                       cgroup_addrm_files(cgrp, ss, cfts, is_add);
+                       ret = cgroup_addrm_files(cgrp, cfts, is_add);
                mutex_unlock(&cgroup_mutex);
                mutex_unlock(&inode->i_mutex);
 
                rcu_read_lock();
+               if (ret)
+                       break;
        }
        rcu_read_unlock();
        dput(prev);
        deactivate_super(sb);
+       return ret;
 }
 
 /**
@@ -2901,49 +2916,56 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
 int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
        struct cftype_set *set;
+       struct cftype *cft;
+       int ret;
 
        set = kzalloc(sizeof(*set), GFP_KERNEL);
        if (!set)
                return -ENOMEM;
 
+       for (cft = cfts; cft->name[0] != '\0'; cft++)
+               cft->ss = ss;
+
        cgroup_cfts_prepare();
        set->cfts = cfts;
        list_add_tail(&set->node, &ss->cftsets);
-       cgroup_cfts_commit(ss, cfts, true);
-
-       return 0;
+       ret = cgroup_cfts_commit(cfts, true);
+       if (ret)
+               cgroup_rm_cftypes(cfts);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
 
 /**
  * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
- * @ss: target cgroup subsystem
  * @cfts: zero-length name terminated array of cftypes
  *
- * Unregister @cfts from @ss.  Files described by @cfts are removed from
- * all existing cgroups to which @ss is attached and all future cgroups
- * won't have them either.  This function can be called anytime whether @ss
- * is attached or not.
+ * Unregister @cfts.  Files described by @cfts are removed from all
+ * existing cgroups and all future cgroups won't have them either.  This
+ * function can be called anytime whether @cfts' subsys is attached or not.
  *
  * Returns 0 on successful unregistration, -ENOENT if @cfts is not
- * registered with @ss.
+ * registered.
  */
-int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+int cgroup_rm_cftypes(struct cftype *cfts)
 {
        struct cftype_set *set;
 
+       if (!cfts || !cfts[0].ss)
+               return -ENOENT;
+
        cgroup_cfts_prepare();
 
-       list_for_each_entry(set, &ss->cftsets, node) {
+       list_for_each_entry(set, &cfts[0].ss->cftsets, node) {
                if (set->cfts == cfts) {
                        list_del(&set->node);
                        kfree(set);
-                       cgroup_cfts_commit(ss, cfts, false);
+                       cgroup_cfts_commit(cfts, false);
                        return 0;
                }
        }
 
-       cgroup_cfts_commit(ss, NULL, false);
+       cgroup_cfts_commit(NULL, false);
        return -ENOENT;
 }
 
@@ -2966,34 +2988,10 @@ int cgroup_task_count(const struct cgroup *cgrp)
 }
 
 /*
- * Advance a list_head iterator.  The iterator should be positioned at
- * the start of a css_set
- */
-static void cgroup_advance_iter(struct cgroup *cgrp, struct cgroup_iter *it)
-{
-       struct list_head *l = it->cset_link;
-       struct cgrp_cset_link *link;
-       struct css_set *cset;
-
-       /* Advance to the next non-empty css_set */
-       do {
-               l = l->next;
-               if (l == &cgrp->cset_links) {
-                       it->cset_link = NULL;
-                       return;
-               }
-               link = list_entry(l, struct cgrp_cset_link, cset_link);
-               cset = link->cset;
-       } while (list_empty(&cset->tasks));
-       it->cset_link = l;
-       it->task = cset->tasks.next;
-}
-
-/*
- * To reduce the fork() overhead for systems that are not actually
- * using their cgroups capability, we don't maintain the lists running
- * through each css_set to its tasks until we see the list actually
- * used - in other words after the first call to cgroup_iter_start().
+ * To reduce the fork() overhead for systems that are not actually using
+ * their cgroups capability, we don't maintain the lists running through
+ * each css_set to its tasks until we see the list actually used - in other
+ * words after the first call to css_task_iter_start().
  */
 static void cgroup_enable_task_cg_lists(void)
 {
@@ -3024,16 +3022,21 @@ static void cgroup_enable_task_cg_lists(void)
 }
 
 /**
- * cgroup_next_sibling - find the next sibling of a given cgroup
- * @pos: the current cgroup
+ * css_next_child - find the next child of a given css
+ * @pos_css: the current position (%NULL to initiate traversal)
+ * @parent_css: css whose children to walk
  *
- * This function returns the next sibling of @pos and should be called
- * under RCU read lock.  The only requirement is that @pos is accessible.
- * The next sibling is guaranteed to be returned regardless of @pos's
- * state.
+ * This function returns the next child of @parent_css and should be called
+ * under RCU read lock.  The only requirement is that @parent_css and
+ * @pos_css are accessible.  The next sibling is guaranteed to be returned
+ * regardless of their states.
  */
-struct cgroup *cgroup_next_sibling(struct cgroup *pos)
+struct cgroup_subsys_state *
+css_next_child(struct cgroup_subsys_state *pos_css,
+              struct cgroup_subsys_state *parent_css)
 {
+       struct cgroup *pos = pos_css ? pos_css->cgroup : NULL;
+       struct cgroup *cgrp = parent_css->cgroup;
        struct cgroup *next;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
@@ -3048,78 +3051,84 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos)
         * safe to dereference from this RCU critical section.  If
         * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
         * to be visible as %true here.
+        *
+        * If @pos is dead, its next pointer can't be dereferenced;
+        * however, as each cgroup is given a monotonically increasing
+        * unique serial number and always appended to the sibling list,
+        * the next one can be found by walking the parent's children until
+        * we see a cgroup with higher serial number than @pos's.  While
+        * this path can be slower, it's taken only when either the current
+        * cgroup is removed or iteration and removal race.
         */
-       if (likely(!cgroup_is_dead(pos))) {
+       if (!pos) {
+               next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
+       } else if (likely(!cgroup_is_dead(pos))) {
                next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
-               if (&next->sibling != &pos->parent->children)
-                       return next;
-               return NULL;
+       } else {
+               list_for_each_entry_rcu(next, &cgrp->children, sibling)
+                       if (next->serial_nr > pos->serial_nr)
+                               break;
        }
 
-       /*
-        * Can't dereference the next pointer.  Each cgroup is given a
-        * monotonically increasing unique serial number and always
-        * appended to the sibling list, so the next one can be found by
-        * walking the parent's children until we see a cgroup with higher
-        * serial number than @pos's.
-        *
-        * While this path can be slow, it's taken only when either the
-        * current cgroup is removed or iteration and removal race.
-        */
-       list_for_each_entry_rcu(next, &pos->parent->children, sibling)
-               if (next->serial_nr > pos->serial_nr)
-                       return next;
-       return NULL;
+       if (&next->sibling == &cgrp->children)
+               return NULL;
+
+       if (parent_css->ss)
+               return cgroup_css(next, parent_css->ss->subsys_id);
+       else
+               return &next->dummy_css;
 }
-EXPORT_SYMBOL_GPL(cgroup_next_sibling);
+EXPORT_SYMBOL_GPL(css_next_child);
 
 /**
- * cgroup_next_descendant_pre - find the next descendant for pre-order walk
+ * css_next_descendant_pre - find the next descendant for pre-order walk
  * @pos: the current position (%NULL to initiate traversal)
- * @cgroup: cgroup whose descendants to walk
+ * @root: css whose descendants to walk
  *
- * To be used by cgroup_for_each_descendant_pre().  Find the next
- * descendant to visit for pre-order traversal of @cgroup's descendants.
+ * To be used by css_for_each_descendant_pre().  Find the next descendant
+ * to visit for pre-order traversal of @root's descendants.  @root is
+ * included in the iteration and the first node to be visited.
  *
  * While this function requires RCU read locking, it doesn't require the
  * whole traversal to be contained in a single RCU critical section.  This
  * function will return the correct next descendant as long as both @pos
- * and @cgroup are accessible and @pos is a descendant of @cgroup.
+ * and @root are accessible and @pos is a descendant of @root.
  */
-struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
-                                         struct cgroup *cgroup)
+struct cgroup_subsys_state *
+css_next_descendant_pre(struct cgroup_subsys_state *pos,
+                       struct cgroup_subsys_state *root)
 {
-       struct cgroup *next;
+       struct cgroup_subsys_state *next;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
-       /* if first iteration, pretend we just visited @cgroup */
+       /* if first iteration, visit @root */
        if (!pos)
-               pos = cgroup;
+               return root;
 
        /* visit the first child if exists */
-       next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
+       next = css_next_child(NULL, pos);
        if (next)
                return next;
 
        /* no child, visit my or the closest ancestor's next sibling */
-       while (pos != cgroup) {
-               next = cgroup_next_sibling(pos);
+       while (pos != root) {
+               next = css_next_child(pos, css_parent(pos));
                if (next)
                        return next;
-               pos = pos->parent;
+               pos = css_parent(pos);
        }
 
        return NULL;
 }
-EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
+EXPORT_SYMBOL_GPL(css_next_descendant_pre);
 
 /**
- * cgroup_rightmost_descendant - return the rightmost descendant of a cgroup
- * @pos: cgroup of interest
+ * css_rightmost_descendant - return the rightmost descendant of a css
+ * @pos: css of interest
  *
- * Return the rightmost descendant of @pos.  If there's no descendant,
- * @pos is returned.  This can be used during pre-order traversal to skip
+ * Return the rightmost descendant of @pos.  If there's no descendant, @pos
+ * is returned.  This can be used during pre-order traversal to skip
  * subtree of @pos.
  *
  * While this function requires RCU read locking, it doesn't require the
@@ -3127,9 +3136,10 @@ EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
  * function will return the correct rightmost descendant as long as @pos is
  * accessible.
  */
-struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
+struct cgroup_subsys_state *
+css_rightmost_descendant(struct cgroup_subsys_state *pos)
 {
-       struct cgroup *last, *tmp;
+       struct cgroup_subsys_state *last, *tmp;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
@@ -3137,82 +3147,138 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
                last = pos;
                /* ->prev isn't RCU safe, walk ->next till the end */
                pos = NULL;
-               list_for_each_entry_rcu(tmp, &last->children, sibling)
+               css_for_each_child(tmp, last)
                        pos = tmp;
        } while (pos);
 
        return last;
 }
-EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant);
+EXPORT_SYMBOL_GPL(css_rightmost_descendant);
 
-static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos)
+static struct cgroup_subsys_state *
+css_leftmost_descendant(struct cgroup_subsys_state *pos)
 {
-       struct cgroup *last;
+       struct cgroup_subsys_state *last;
 
        do {
                last = pos;
-               pos = list_first_or_null_rcu(&pos->children, struct cgroup,
-                                            sibling);
+               pos = css_next_child(NULL, pos);
        } while (pos);
 
        return last;
 }
 
 /**
- * cgroup_next_descendant_post - find the next descendant for post-order walk
+ * css_next_descendant_post - find the next descendant for post-order walk
  * @pos: the current position (%NULL to initiate traversal)
- * @cgroup: cgroup whose descendants to walk
+ * @root: css whose descendants to walk
  *
- * To be used by cgroup_for_each_descendant_post().  Find the next
- * descendant to visit for post-order traversal of @cgroup's descendants.
+ * To be used by css_for_each_descendant_post().  Find the next descendant
+ * to visit for post-order traversal of @root's descendants.  @root is
+ * included in the iteration and the last node to be visited.
  *
  * While this function requires RCU read locking, it doesn't require the
  * whole traversal to be contained in a single RCU critical section.  This
  * function will return the correct next descendant as long as both @pos
  * and @cgroup are accessible and @pos is a descendant of @cgroup.
  */
-struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
-                                          struct cgroup *cgroup)
+struct cgroup_subsys_state *
+css_next_descendant_post(struct cgroup_subsys_state *pos,
+                        struct cgroup_subsys_state *root)
 {
-       struct cgroup *next;
+       struct cgroup_subsys_state *next;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
        /* if first iteration, visit the leftmost descendant */
        if (!pos) {
-               next = cgroup_leftmost_descendant(cgroup);
-               return next != cgroup ? next : NULL;
+               next = css_leftmost_descendant(root);
+               return next != root ? next : NULL;
        }
 
+       /* if we visited @root, we're done */
+       if (pos == root)
+               return NULL;
+
        /* if there's an unvisited sibling, visit its leftmost descendant */
-       next = cgroup_next_sibling(pos);
+       next = css_next_child(pos, css_parent(pos));
        if (next)
-               return cgroup_leftmost_descendant(next);
+               return css_leftmost_descendant(next);
 
        /* no sibling left, visit parent */
-       next = pos->parent;
-       return next != cgroup ? next : NULL;
+       return css_parent(pos);
+}
+EXPORT_SYMBOL_GPL(css_next_descendant_post);
+
+/**
+ * css_advance_task_iter - advance a task itererator to the next css_set
+ * @it: the iterator to advance
+ *
+ * Advance @it to the next css_set to walk.
+ */
+static void css_advance_task_iter(struct css_task_iter *it)
+{
+       struct list_head *l = it->cset_link;
+       struct cgrp_cset_link *link;
+       struct css_set *cset;
+
+       /* Advance to the next non-empty css_set */
+       do {
+               l = l->next;
+               if (l == &it->origin_css->cgroup->cset_links) {
+                       it->cset_link = NULL;
+                       return;
+               }
+               link = list_entry(l, struct cgrp_cset_link, cset_link);
+               cset = link->cset;
+       } while (list_empty(&cset->tasks));
+       it->cset_link = l;
+       it->task = cset->tasks.next;
 }
-EXPORT_SYMBOL_GPL(cgroup_next_descendant_post);
 
-void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
+/**
+ * css_task_iter_start - initiate task iteration
+ * @css: the css to walk tasks of
+ * @it: the task iterator to use
+ *
+ * Initiate iteration through the tasks of @css.  The caller can call
+ * css_task_iter_next() to walk through the tasks until the function
+ * returns NULL.  On completion of iteration, css_task_iter_end() must be
+ * called.
+ *
+ * Note that this function acquires a lock which is released when the
+ * iteration finishes.  The caller can't sleep while iteration is in
+ * progress.
+ */
+void css_task_iter_start(struct cgroup_subsys_state *css,
+                        struct css_task_iter *it)
        __acquires(css_set_lock)
 {
        /*
-        * The first time anyone tries to iterate across a cgroup,
-        * we need to enable the list linking each css_set to its
-        * tasks, and fix up all existing tasks.
+        * The first time anyone tries to iterate across a css, we need to
+        * enable the list linking each css_set to its tasks, and fix up
+        * all existing tasks.
         */
        if (!use_task_css_set_links)
                cgroup_enable_task_cg_lists();
 
        read_lock(&css_set_lock);
-       it->cset_link = &cgrp->cset_links;
-       cgroup_advance_iter(cgrp, it);
+
+       it->origin_css = css;
+       it->cset_link = &css->cgroup->cset_links;
+
+       css_advance_task_iter(it);
 }
 
-struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
-                                       struct cgroup_iter *it)
+/**
+ * css_task_iter_next - return the next task for the iterator
+ * @it: the task iterator being iterated
+ *
+ * The "next" function for task iteration.  @it should have been
+ * initialized via css_task_iter_start().  Returns NULL when the iteration
+ * reaches the end.
+ */
+struct task_struct *css_task_iter_next(struct css_task_iter *it)
 {
        struct task_struct *res;
        struct list_head *l = it->task;
@@ -3226,16 +3292,24 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
        l = l->next;
        link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link);
        if (l == &link->cset->tasks) {
-               /* We reached the end of this task list - move on to
-                * the next cg_cgroup_link */
-               cgroup_advance_iter(cgrp, it);
+               /*
+                * We reached the end of this task list - move on to the
+                * next cgrp_cset_link.
+                */
+               css_advance_task_iter(it);
        } else {
                it->task = l;
        }
        return res;
 }
 
-void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
+/**
+ * css_task_iter_end - finish task iteration
+ * @it: the task iterator to finish
+ *
+ * Finish task iteration started by css_task_iter_start().
+ */
+void css_task_iter_end(struct css_task_iter *it)
        __releases(css_set_lock)
 {
        read_unlock(&css_set_lock);
@@ -3276,46 +3350,49 @@ static inline int started_after(void *p1, void *p2)
 }
 
 /**
- * cgroup_scan_tasks - iterate though all the tasks in a cgroup
- * @scan: struct cgroup_scanner containing arguments for the scan
+ * css_scan_tasks - iterate though all the tasks in a css
+ * @css: the css to iterate tasks of
+ * @test: optional test callback
+ * @process: process callback
+ * @data: data passed to @test and @process
+ * @heap: optional pre-allocated heap used for task iteration
+ *
+ * Iterate through all the tasks in @css, calling @test for each, and if it
+ * returns %true, call @process for it also.
+ *
+ * @test may be NULL, meaning always true (select all tasks), which
+ * effectively duplicates css_task_iter_{start,next,end}() but does not
+ * lock css_set_lock for the call to @process.
  *
- * Arguments include pointers to callback functions test_task() and
- * process_task().
- * Iterate through all the tasks in a cgroup, calling test_task() for each,
- * and if it returns true, call process_task() for it also.
- * The test_task pointer may be NULL, meaning always true (select all tasks).
- * Effectively duplicates cgroup_iter_{start,next,end}()
- * but does not lock css_set_lock for the call to process_task().
- * The struct cgroup_scanner may be embedded in any structure of the caller's
- * creation.
- * It is guaranteed that process_task() will act on every task that
- * is a member of the cgroup for the duration of this call. This
- * function may or may not call process_task() for tasks that exit
- * or move to a different cgroup during the call, or are forked or
- * move into the cgroup during the call.
+ * It is guaranteed that @process will act on every task that is a member
+ * of @css for the duration of this call.  This function may or may not
+ * call @process for tasks that exit or move to a different css during the
+ * call, or are forked or move into the css during the call.
  *
- * Note that test_task() may be called with locks held, and may in some
- * situations be called multiple times for the same task, so it should
- * be cheap.
- * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
- * pre-allocated and will be used for heap operations (and its "gt" member will
- * be overwritten), else a temporary heap will be used (allocation of which
- * may cause this function to fail).
+ * Note that @test may be called with locks held, and may in some
+ * situations be called multiple times for the same task, so it should be
+ * cheap.
+ *
+ * If @heap is non-NULL, a heap has been pre-allocated and will be used for
+ * heap operations (and its "gt" member will be overwritten), else a
+ * temporary heap will be used (allocation of which may cause this function
+ * to fail).
  */
-int cgroup_scan_tasks(struct cgroup_scanner *scan)
+int css_scan_tasks(struct cgroup_subsys_state *css,
+                  bool (*test)(struct task_struct *, void *),
+                  void (*process)(struct task_struct *, void *),
+                  void *data, struct ptr_heap *heap)
 {
        int retval, i;
-       struct cgroup_iter it;
+       struct css_task_iter it;
        struct task_struct *p, *dropped;
        /* Never dereference latest_task, since it's not refcounted */
        struct task_struct *latest_task = NULL;
        struct ptr_heap tmp_heap;
-       struct ptr_heap *heap;
        struct timespec latest_time = { 0, 0 };
 
-       if (scan->heap) {
+       if (heap) {
                /* The caller supplied our heap and pre-allocated its memory */
-               heap = scan->heap;
                heap->gt = &started_after;
        } else {
                /* We need to allocate our own heap memory */
@@ -3328,25 +3405,24 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
 
  again:
        /*
-        * Scan tasks in the cgroup, using the scanner's "test_task" callback
-        * to determine which are of interest, and using the scanner's
-        * "process_task" callback to process any of them that need an update.
-        * Since we don't want to hold any locks during the task updates,
-        * gather tasks to be processed in a heap structure.
-        * The heap is sorted by descending task start time.
-        * If the statically-sized heap fills up, we overflow tasks that
-        * started later, and in future iterations only consider tasks that
-        * started after the latest task in the previous pass. This
+        * Scan tasks in the css, using the @test callback to determine
+        * which are of interest, and invoking @process callback on the
+        * ones which need an update.  Since we don't want to hold any
+        * locks during the task updates, gather tasks to be processed in a
+        * heap structure.  The heap is sorted by descending task start
+        * time.  If the statically-sized heap fills up, we overflow tasks
+        * that started later, and in future iterations only consider tasks
+        * that started after the latest task in the previous pass. This
         * guarantees forward progress and that we don't miss any tasks.
         */
        heap->size = 0;
-       cgroup_iter_start(scan->cg, &it);
-       while ((p = cgroup_iter_next(scan->cg, &it))) {
+       css_task_iter_start(css, &it);
+       while ((p = css_task_iter_next(&it))) {
                /*
                 * Only affect tasks that qualify per the caller's callback,
                 * if he provided one
                 */
-               if (scan->test_task && !scan->test_task(p, scan))
+               if (test && !test(p, data))
                        continue;
                /*
                 * Only process tasks that started after the last task
@@ -3374,7 +3450,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
                 * the heap and wasn't inserted
                 */
        }
-       cgroup_iter_end(scan->cg, &it);
+       css_task_iter_end(&it);
 
        if (heap->size) {
                for (i = 0; i < heap->size; i++) {
@@ -3384,7 +3460,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
                                latest_task = q;
                        }
                        /* Process the task per the caller's callback */
-                       scan->process_task(q, scan);
+                       process(q, data);
                        put_task_struct(q);
                }
                /*
@@ -3401,10 +3477,9 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
        return 0;
 }
 
-static void cgroup_transfer_one_task(struct task_struct *task,
-                                    struct cgroup_scanner *scan)
+static void cgroup_transfer_one_task(struct task_struct *task, void *data)
 {
-       struct cgroup *new_cgroup = scan->data;
+       struct cgroup *new_cgroup = data;
 
        mutex_lock(&cgroup_mutex);
        cgroup_attach_task(new_cgroup, task, false);
@@ -3418,15 +3493,8 @@ static void cgroup_transfer_one_task(struct task_struct *task,
  */
 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
 {
-       struct cgroup_scanner scan;
-
-       scan.cg = from;
-       scan.test_task = NULL; /* select all tasks in cgroup */
-       scan.process_task = cgroup_transfer_one_task;
-       scan.heap = NULL;
-       scan.data = to;
-
-       return cgroup_scan_tasks(&scan);
+       return css_scan_tasks(&from->dummy_css, NULL, cgroup_transfer_one_task,
+                             to, NULL);
 }
 
 /*
@@ -3468,7 +3536,7 @@ struct cgroup_pidlist {
        /* pointer to the cgroup we belong to, for list removal purposes */
        struct cgroup *owner;
        /* protects the other fields */
-       struct rw_semaphore mutex;
+       struct rw_semaphore rwsem;
 };
 
 /*
@@ -3541,7 +3609,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
        struct pid_namespace *ns = task_active_pid_ns(current);
 
        /*
-        * We can't drop the pidlist_mutex before taking the l->mutex in case
+        * We can't drop the pidlist_mutex before taking the l->rwsem in case
         * the last ref-holder is trying to remove l from the list at the same
         * time. Holding the pidlist_mutex precludes somebody taking whichever
         * list we find out from under us - compare release_pid_array().
@@ -3550,7 +3618,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
        list_for_each_entry(l, &cgrp->pidlists, links) {
                if (l->key.type == type && l->key.ns == ns) {
                        /* make sure l doesn't vanish out from under us */
-                       down_write(&l->mutex);
+                       down_write(&l->rwsem);
                        mutex_unlock(&cgrp->pidlist_mutex);
                        return l;
                }
@@ -3561,8 +3629,8 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
                mutex_unlock(&cgrp->pidlist_mutex);
                return l;
        }
-       init_rwsem(&l->mutex);
-       down_write(&l->mutex);
+       init_rwsem(&l->rwsem);
+       down_write(&l->rwsem);
        l->key.type = type;
        l->key.ns = get_pid_ns(ns);
        l->owner = cgrp;
@@ -3580,7 +3648,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
        pid_t *array;
        int length;
        int pid, n = 0; /* used for populating the array */
-       struct cgroup_iter it;
+       struct css_task_iter it;
        struct task_struct *tsk;
        struct cgroup_pidlist *l;
 
@@ -3595,8 +3663,8 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
        if (!array)
                return -ENOMEM;
        /* now, populate the array */
-       cgroup_iter_start(cgrp, &it);
-       while ((tsk = cgroup_iter_next(cgrp, &it))) {
+       css_task_iter_start(&cgrp->dummy_css, &it);
+       while ((tsk = css_task_iter_next(&it))) {
                if (unlikely(n == length))
                        break;
                /* get tgid or pid for procs or tasks file respectively */
@@ -3607,7 +3675,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
                if (pid > 0) /* make sure to only use valid results */
                        array[n++] = pid;
        }
-       cgroup_iter_end(cgrp, &it);
+       css_task_iter_end(&it);
        length = n;
        /* now sort & (if procs) strip out duplicates */
        sort(array, length, sizeof(pid_t), cmppid, NULL);
@@ -3623,7 +3691,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
        l->list = array;
        l->length = length;
        l->use_count++;
-       up_write(&l->mutex);
+       up_write(&l->rwsem);
        *lp = l;
        return 0;
 }
@@ -3641,7 +3709,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
 {
        int ret = -EINVAL;
        struct cgroup *cgrp;
-       struct cgroup_iter it;
+       struct css_task_iter it;
        struct task_struct *tsk;
 
        /*
@@ -3655,8 +3723,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
        ret = 0;
        cgrp = dentry->d_fsdata;
 
-       cgroup_iter_start(cgrp, &it);
-       while ((tsk = cgroup_iter_next(cgrp, &it))) {
+       css_task_iter_start(&cgrp->dummy_css, &it);
+       while ((tsk = css_task_iter_next(&it))) {
                switch (tsk->state) {
                case TASK_RUNNING:
                        stats->nr_running++;
@@ -3676,7 +3744,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
                        break;
                }
        }
-       cgroup_iter_end(cgrp, &it);
+       css_task_iter_end(&it);
 
 err:
        return ret;
@@ -3701,7 +3769,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
        int index = 0, pid = *pos;
        int *iter;
 
-       down_read(&l->mutex);
+       down_read(&l->rwsem);
        if (pid) {
                int end = l->length;
 
@@ -3728,7 +3796,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
 {
        struct cgroup_pidlist *l = s->private;
-       up_read(&l->mutex);
+       up_read(&l->rwsem);
 }
 
 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
@@ -3774,7 +3842,7 @@ static void cgroup_release_pid_array(struct cgroup_pidlist *l)
         * pidlist_mutex, we have to take pidlist_mutex first.
         */
        mutex_lock(&l->owner->pidlist_mutex);
-       down_write(&l->mutex);
+       down_write(&l->rwsem);
        BUG_ON(!l->use_count);
        if (!--l->use_count) {
                /* we're the last user if refcount is 0; remove and free */
@@ -3782,12 +3850,12 @@ static void cgroup_release_pid_array(struct cgroup_pidlist *l)
                mutex_unlock(&l->owner->pidlist_mutex);
                pidlist_free(l->list);
                put_pid_ns(l->key.ns);
-               up_write(&l->mutex);
+               up_write(&l->rwsem);
                kfree(l);
                return;
        }
        mutex_unlock(&l->owner->pidlist_mutex);
-       up_write(&l->mutex);
+       up_write(&l->rwsem);
 }
 
 static int cgroup_pidlist_release(struct inode *inode, struct file *file)
@@ -3851,21 +3919,20 @@ static int cgroup_procs_open(struct inode *unused, struct file *file)
        return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
 }
 
-static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
-                                           struct cftype *cft)
+static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
+                                        struct cftype *cft)
 {
-       return notify_on_release(cgrp);
+       return notify_on_release(css->cgroup);
 }
 
-static int cgroup_write_notify_on_release(struct cgroup *cgrp,
-                                         struct cftype *cft,
-                                         u64 val)
+static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
+                                         struct cftype *cft, u64 val)
 {
-       clear_bit(CGRP_RELEASABLE, &cgrp->flags);
+       clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
        if (val)
-               set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+               set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
        else
-               clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+               clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
        return 0;
 }
 
@@ -3895,11 +3962,12 @@ static void cgroup_event_remove(struct work_struct *work)
 {
        struct cgroup_event *event = container_of(work, struct cgroup_event,
                        remove);
-       struct cgroup *cgrp = event->cgrp;
+       struct cgroup_subsys_state *css = event->css;
+       struct cgroup *cgrp = css->cgroup;
 
        remove_wait_queue(event->wqh, &event->wait);
 
-       event->cft->unregister_event(cgrp, event->cft, event->eventfd);
+       event->cft->unregister_event(css, event->cft, event->eventfd);
 
        /* Notify userspace the event is going away. */
        eventfd_signal(event->eventfd, 1);
@@ -3919,7 +3987,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
 {
        struct cgroup_event *event = container_of(wait,
                        struct cgroup_event, wait);
-       struct cgroup *cgrp = event->cgrp;
+       struct cgroup *cgrp = event->css->cgroup;
        unsigned long flags = (unsigned long)key;
 
        if (flags & POLLHUP) {
@@ -3963,14 +4031,15 @@ static void cgroup_event_ptable_queue_proc(struct file *file,
  * Input must be in format '<event_fd> <control_fd> <args>'.
  * Interpretation of args is defined by control file implementation.
  */
-static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
-                                     const char *buffer)
+static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
+                                     struct cftype *cft, const char *buffer)
 {
-       struct cgroup_event *event = NULL;
+       struct cgroup *cgrp = dummy_css->cgroup;
+       struct cgroup_event *event;
        struct cgroup *cgrp_cfile;
        unsigned int efd, cfd;
-       struct file *efile = NULL;
-       struct file *cfile = NULL;
+       struct file *efile;
+       struct file *cfile;
        char *endp;
        int ret;
 
@@ -3987,7 +4056,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
        event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
                return -ENOMEM;
-       event->cgrp = cgrp;
+
        INIT_LIST_HEAD(&event->list);
        init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
        init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
@@ -3996,33 +4065,50 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
        efile = eventfd_fget(efd);
        if (IS_ERR(efile)) {
                ret = PTR_ERR(efile);
-               goto fail;
+               goto out_kfree;
        }
 
        event->eventfd = eventfd_ctx_fileget(efile);
        if (IS_ERR(event->eventfd)) {
                ret = PTR_ERR(event->eventfd);
-               goto fail;
+               goto out_put_efile;
        }
 
        cfile = fget(cfd);
        if (!cfile) {
                ret = -EBADF;
-               goto fail;
+               goto out_put_eventfd;
        }
 
        /* the process need read permission on control file */
        /* AV: shouldn't we check that it's been opened for read instead? */
        ret = inode_permission(file_inode(cfile), MAY_READ);
        if (ret < 0)
-               goto fail;
+               goto out_put_cfile;
 
        event->cft = __file_cft(cfile);
        if (IS_ERR(event->cft)) {
                ret = PTR_ERR(event->cft);
-               goto fail;
+               goto out_put_cfile;
+       }
+
+       if (!event->cft->ss) {
+               ret = -EBADF;
+               goto out_put_cfile;
        }
 
+       /* determine the css of @cfile and associate @event with it */
+       rcu_read_lock();
+
+       ret = -EINVAL;
+       event->css = cgroup_css(cgrp, event->cft->ss->subsys_id);
+       if (event->css)
+               ret = 0;
+
+       rcu_read_unlock();
+       if (ret)
+               goto out_put_cfile;
+
        /*
         * The file to be monitored must be in the same cgroup as
         * cgroup.event_control is.
@@ -4030,18 +4116,18 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
        cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
        if (cgrp_cfile != cgrp) {
                ret = -EINVAL;
-               goto fail;
+               goto out_put_cfile;
        }
 
        if (!event->cft->register_event || !event->cft->unregister_event) {
                ret = -EINVAL;
-               goto fail;
+               goto out_put_cfile;
        }
 
-       ret = event->cft->register_event(cgrp, event->cft,
+       ret = event->cft->register_event(event->css, event->cft,
                        event->eventfd, buffer);
        if (ret)
-               goto fail;
+               goto out_put_cfile;
 
        efile->f_op->poll(efile, &event->pt);
 
@@ -4061,35 +4147,31 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
 
        return 0;
 
-fail:
-       if (cfile)
-               fput(cfile);
-
-       if (event && event->eventfd && !IS_ERR(event->eventfd))
-               eventfd_ctx_put(event->eventfd);
-
-       if (!IS_ERR_OR_NULL(efile))
-               fput(efile);
-
+out_put_cfile:
+       fput(cfile);
+out_put_eventfd:
+       eventfd_ctx_put(event->eventfd);
+out_put_efile:
+       fput(efile);
+out_kfree:
        kfree(event);
 
        return ret;
 }
 
-static u64 cgroup_clone_children_read(struct cgroup *cgrp,
-                                   struct cftype *cft)
+static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
+                                     struct cftype *cft)
 {
-       return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+       return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 }
 
-static int cgroup_clone_children_write(struct cgroup *cgrp,
-                                    struct cftype *cft,
-                                    u64 val)
+static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
+                                      struct cftype *cft, u64 val)
 {
        if (val)
-               set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+               set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
        else
-               clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+               clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
        return 0;
 }
 
@@ -4148,36 +4230,34 @@ static struct cftype cgroup_base_files[] = {
 };
 
 /**
- * cgroup_populate_dir - selectively creation of files in a directory
+ * cgroup_populate_dir - create subsys files in a cgroup directory
  * @cgrp: target cgroup
- * @base_files: true if the base files should be added
  * @subsys_mask: mask of the subsystem ids whose files should be added
+ *
+ * On failure, no file is added.
  */
-static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
-                              unsigned long subsys_mask)
+static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
 {
-       int err;
        struct cgroup_subsys *ss;
-
-       if (base_files) {
-               err = cgroup_addrm_files(cgrp, NULL, cgroup_base_files, true);
-               if (err < 0)
-                       return err;
-       }
+       int i, ret = 0;
 
        /* process cftsets of each subsystem */
-       for_each_root_subsys(cgrp->root, ss) {
+       for_each_subsys(ss, i) {
                struct cftype_set *set;
-               if (!test_bit(ss->subsys_id, &subsys_mask))
+
+               if (!test_bit(i, &subsys_mask))
                        continue;
 
-               list_for_each_entry(set, &ss->cftsets, node)
-                       cgroup_addrm_files(cgrp, ss, set->cfts, true);
+               list_for_each_entry(set, &ss->cftsets, node) {
+                       ret = cgroup_addrm_files(cgrp, set->cfts, true);
+                       if (ret < 0)
+                               goto err;
+               }
        }
 
        /* This cgroup is ready now */
        for_each_root_subsys(cgrp->root, ss) {
-               struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+               struct cgroup_subsys_state *css = cgroup_css(cgrp, ss->subsys_id);
                struct css_id *id = rcu_dereference_protected(css->id, true);
 
                /*
@@ -4190,14 +4270,57 @@ static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
        }
 
        return 0;
+err:
+       cgroup_clear_dir(cgrp, subsys_mask);
+       return ret;
+}
+
+/*
+ * css destruction is four-stage process.
+ *
+ * 1. Destruction starts.  Killing of the percpu_ref is initiated.
+ *    Implemented in kill_css().
+ *
+ * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
+ *    and thus css_tryget() is guaranteed to fail, the css can be offlined
+ *    by invoking offline_css().  After offlining, the base ref is put.
+ *    Implemented in css_killed_work_fn().
+ *
+ * 3. When the percpu_ref reaches zero, the only possible remaining
+ *    accessors are inside RCU read sections.  css_release() schedules the
+ *    RCU callback.
+ *
+ * 4. After the grace period, the css can be freed.  Implemented in
+ *    css_free_work_fn().
+ *
+ * It is actually hairier because both step 2 and 4 require process context
+ * and thus involve punting to css->destroy_work adding two additional
+ * steps to the already complex sequence.
+ */
+static void css_free_work_fn(struct work_struct *work)
+{
+       struct cgroup_subsys_state *css =
+               container_of(work, struct cgroup_subsys_state, destroy_work);
+       struct cgroup *cgrp = css->cgroup;
+
+       if (css->parent)
+               css_put(css->parent);
+
+       css->ss->css_free(css);
+       cgroup_dput(cgrp);
 }
 
-static void css_dput_fn(struct work_struct *work)
+static void css_free_rcu_fn(struct rcu_head *rcu_head)
 {
        struct cgroup_subsys_state *css =
-               container_of(work, struct cgroup_subsys_state, dput_work);
+               container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
 
-       cgroup_dput(css->cgroup);
+       /*
+        * css holds an extra ref to @cgrp->dentry which is put on the last
+        * css_put().  dput() requires process context which we don't have.
+        */
+       INIT_WORK(&css->destroy_work, css_free_work_fn);
+       schedule_work(&css->destroy_work);
 }
 
 static void css_release(struct percpu_ref *ref)
@@ -4205,49 +4328,47 @@ static void css_release(struct percpu_ref *ref)
        struct cgroup_subsys_state *css =
                container_of(ref, struct cgroup_subsys_state, refcnt);
 
-       schedule_work(&css->dput_work);
+       call_rcu(&css->rcu_head, css_free_rcu_fn);
 }
 
-static void init_cgroup_css(struct cgroup_subsys_state *css,
-                              struct cgroup_subsys *ss,
-                              struct cgroup *cgrp)
+static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
+                    struct cgroup *cgrp)
 {
        css->cgroup = cgrp;
+       css->ss = ss;
        css->flags = 0;
        css->id = NULL;
-       if (cgrp == cgroup_dummy_top)
+
+       if (cgrp->parent)
+               css->parent = cgroup_css(cgrp->parent, ss->subsys_id);
+       else
                css->flags |= CSS_ROOT;
-       BUG_ON(cgrp->subsys[ss->subsys_id]);
-       cgrp->subsys[ss->subsys_id] = css;
 
-       /*
-        * css holds an extra ref to @cgrp->dentry which is put on the last
-        * css_put().  dput() requires process context, which css_put() may
-        * be called without.  @css->dput_work will be used to invoke
-        * dput() asynchronously from css_put().
-        */
-       INIT_WORK(&css->dput_work, css_dput_fn);
+       BUG_ON(cgroup_css(cgrp, ss->subsys_id));
 }
 
-/* invoke ->post_create() on a new CSS and mark it online if successful */
-static int online_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
+/* invoke ->css_online() on a new CSS and mark it online if successful */
+static int online_css(struct cgroup_subsys_state *css)
 {
+       struct cgroup_subsys *ss = css->ss;
        int ret = 0;
 
        lockdep_assert_held(&cgroup_mutex);
 
        if (ss->css_online)
-               ret = ss->css_online(cgrp);
-       if (!ret)
-               cgrp->subsys[ss->subsys_id]->flags |= CSS_ONLINE;
+               ret = ss->css_online(css);
+       if (!ret) {
+               css->flags |= CSS_ONLINE;
+               css->cgroup->nr_css++;
+               rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css);
+       }
        return ret;
 }
 
-/* if the CSS is online, invoke ->pre_destory() on it and mark it offline */
-static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
-       __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
+/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
+static void offline_css(struct cgroup_subsys_state *css)
 {
-       struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+       struct cgroup_subsys *ss = css->ss;
 
        lockdep_assert_held(&cgroup_mutex);
 
@@ -4255,9 +4376,11 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
                return;
 
        if (ss->css_offline)
-               ss->css_offline(cgrp);
+               ss->css_offline(css);
 
-       cgrp->subsys[ss->subsys_id]->flags &= ~CSS_ONLINE;
+       css->flags &= ~CSS_ONLINE;
+       css->cgroup->nr_css--;
+       RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css);
 }
 
 /*
@@ -4271,6 +4394,7 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
 static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                             umode_t mode)
 {
+       struct cgroup_subsys_state *css_ar[CGROUP_SUBSYS_COUNT] = { };
        struct cgroup *cgrp;
        struct cgroup_name *name;
        struct cgroupfs_root *root = parent->root;
@@ -4288,7 +4412,11 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                goto err_free_cgrp;
        rcu_assign_pointer(cgrp->name, name);
 
-       cgrp->id = ida_simple_get(&root->cgroup_ida, 1, 0, GFP_KERNEL);
+       /*
+        * Temporarily set the pointer to NULL, so idr_find() won't return
+        * a half-baked cgroup.
+        */
+       cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
        if (cgrp->id < 0)
                goto err_free_name;
 
@@ -4317,6 +4445,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        cgrp->dentry = dentry;
 
        cgrp->parent = parent;
+       cgrp->dummy_css.parent = &parent->dummy_css;
        cgrp->root = parent->root;
 
        if (notify_on_release(parent))
@@ -4328,22 +4457,21 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        for_each_root_subsys(root, ss) {
                struct cgroup_subsys_state *css;
 
-               css = ss->css_alloc(cgrp);
+               css = ss->css_alloc(cgroup_css(parent, ss->subsys_id));
                if (IS_ERR(css)) {
                        err = PTR_ERR(css);
                        goto err_free_all;
                }
+               css_ar[ss->subsys_id] = css;
 
                err = percpu_ref_init(&css->refcnt, css_release);
-               if (err) {
-                       ss->css_free(cgrp);
+               if (err)
                        goto err_free_all;
-               }
 
-               init_cgroup_css(css, ss, cgrp);
+               init_css(css, ss, cgrp);
 
                if (ss->use_id) {
-                       err = alloc_css_id(ss, parent, cgrp);
+                       err = alloc_css_id(css);
                        if (err)
                                goto err_free_all;
                }
@@ -4365,16 +4493,22 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
        root->number_of_cgroups++;
 
-       /* each css holds a ref to the cgroup's dentry */
-       for_each_root_subsys(root, ss)
+       /* each css holds a ref to the cgroup's dentry and the parent css */
+       for_each_root_subsys(root, ss) {
+               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
                dget(dentry);
+               css_get(css->parent);
+       }
 
        /* hold a ref to the parent's dentry */
        dget(parent->dentry);
 
        /* creation succeeded, notify subsystems */
        for_each_root_subsys(root, ss) {
-               err = online_css(ss, cgrp);
+               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
+               err = online_css(css);
                if (err)
                        goto err_destroy;
 
@@ -4388,7 +4522,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
                }
        }
 
-       err = cgroup_populate_dir(cgrp, true, root->subsys_mask);
+       idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
+
+       err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
+       if (err)
+               goto err_destroy;
+
+       err = cgroup_populate_dir(cgrp, root->subsys_mask);
        if (err)
                goto err_destroy;
 
@@ -4399,18 +4539,18 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
 
 err_free_all:
        for_each_root_subsys(root, ss) {
-               struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+               struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
 
                if (css) {
                        percpu_ref_cancel_init(&css->refcnt);
-                       ss->css_free(cgrp);
+                       ss->css_free(css);
                }
        }
        mutex_unlock(&cgroup_mutex);
        /* Release the reference count that we took on the superblock */
        deactivate_super(sb);
 err_free_id:
-       ida_simple_remove(&root->cgroup_ida, cgrp->id);
+       idr_remove(&root->cgroup_idr, cgrp->id);
 err_free_name:
        kfree(rcu_dereference_raw(cgrp->name));
 err_free_cgrp:
@@ -4432,22 +4572,84 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        return cgroup_create(c_parent, dentry, mode | S_IFDIR);
 }
 
-static void cgroup_css_killed(struct cgroup *cgrp)
+/*
+ * This is called when the refcnt of a css is confirmed to be killed.
+ * css_tryget() is now guaranteed to fail.
+ */
+static void css_killed_work_fn(struct work_struct *work)
 {
-       if (!atomic_dec_and_test(&cgrp->css_kill_cnt))
-               return;
+       struct cgroup_subsys_state *css =
+               container_of(work, struct cgroup_subsys_state, destroy_work);
+       struct cgroup *cgrp = css->cgroup;
 
-       /* percpu ref's of all css's are killed, kick off the next step */
-       INIT_WORK(&cgrp->destroy_work, cgroup_offline_fn);
-       schedule_work(&cgrp->destroy_work);
+       mutex_lock(&cgroup_mutex);
+
+       /*
+        * css_tryget() is guaranteed to fail now.  Tell subsystems to
+        * initate destruction.
+        */
+       offline_css(css);
+
+       /*
+        * If @cgrp is marked dead, it's waiting for refs of all css's to
+        * be disabled before proceeding to the second phase of cgroup
+        * destruction.  If we are the last one, kick it off.
+        */
+       if (!cgrp->nr_css && cgroup_is_dead(cgrp))
+               cgroup_destroy_css_killed(cgrp);
+
+       mutex_unlock(&cgroup_mutex);
+
+       /*
+        * Put the css refs from kill_css().  Each css holds an extra
+        * reference to the cgroup's dentry and cgroup removal proceeds
+        * regardless of css refs.  On the last put of each css, whenever
+        * that may be, the extra dentry ref is put so that dentry
+        * destruction happens only after all css's are released.
+        */
+       css_put(css);
 }
 
-static void css_ref_killed_fn(struct percpu_ref *ref)
+/* css kill confirmation processing requires process context, bounce */
+static void css_killed_ref_fn(struct percpu_ref *ref)
 {
        struct cgroup_subsys_state *css =
                container_of(ref, struct cgroup_subsys_state, refcnt);
 
-       cgroup_css_killed(css->cgroup);
+       INIT_WORK(&css->destroy_work, css_killed_work_fn);
+       schedule_work(&css->destroy_work);
+}
+
+/**
+ * kill_css - destroy a css
+ * @css: css to destroy
+ *
+ * This function initiates destruction of @css by removing cgroup interface
+ * files and putting its base reference.  ->css_offline() will be invoked
+ * asynchronously once css_tryget() is guaranteed to fail and when the
+ * reference count reaches zero, @css will be released.
+ */
+static void kill_css(struct cgroup_subsys_state *css)
+{
+       cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
+
+       /*
+        * Killing would put the base ref, but we need to keep it alive
+        * until after ->css_offline().
+        */
+       css_get(css);
+
+       /*
+        * cgroup core guarantees that, by the time ->css_offline() is
+        * invoked, no new css reference will be given out via
+        * css_tryget().  We can't simply call percpu_ref_kill() and
+        * proceed to offlining css's because percpu_ref_kill() doesn't
+        * guarantee that the ref is seen as killed on all CPUs on return.
+        *
+        * Use percpu_ref_kill_and_confirm() to get notifications as each
+        * css is confirmed to be seen as killed on all CPUs.
+        */
+       percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
 }
 
 /**
@@ -4496,41 +4698,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
                return -EBUSY;
 
        /*
-        * Block new css_tryget() by killing css refcnts.  cgroup core
-        * guarantees that, by the time ->css_offline() is invoked, no new
-        * css reference will be given out via css_tryget().  We can't
-        * simply call percpu_ref_kill() and proceed to offlining css's
-        * because percpu_ref_kill() doesn't guarantee that the ref is seen
-        * as killed on all CPUs on return.
-        *
-        * Use percpu_ref_kill_and_confirm() to get notifications as each
-        * css is confirmed to be seen as killed on all CPUs.  The
-        * notification callback keeps track of the number of css's to be
-        * killed and schedules cgroup_offline_fn() to perform the rest of
-        * destruction once the percpu refs of all css's are confirmed to
-        * be killed.
+        * Initiate massacre of all css's.  cgroup_destroy_css_killed()
+        * will be invoked to perform the rest of destruction once the
+        * percpu refs of all css's are confirmed to be killed.
         */
-       atomic_set(&cgrp->css_kill_cnt, 1);
-       for_each_root_subsys(cgrp->root, ss) {
-               struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
-
-               /*
-                * Killing would put the base ref, but we need to keep it
-                * alive until after ->css_offline.
-                */
-               percpu_ref_get(&css->refcnt);
-
-               atomic_inc(&cgrp->css_kill_cnt);
-               percpu_ref_kill_and_confirm(&css->refcnt, css_ref_killed_fn);
-       }
-       cgroup_css_killed(cgrp);
+       for_each_root_subsys(cgrp->root, ss)
+               kill_css(cgroup_css(cgrp, ss->subsys_id));
 
        /*
         * Mark @cgrp dead.  This prevents further task migration and child
         * creation by disabling cgroup_lock_live_group().  Note that
-        * CGRP_DEAD assertion is depended upon by cgroup_next_sibling() to
+        * CGRP_DEAD assertion is depended upon by css_next_child() to
         * resume iteration after dropping RCU read lock.  See
-        * cgroup_next_sibling() for details.
+        * css_next_child() for details.
         */
        set_bit(CGRP_DEAD, &cgrp->flags);
 
@@ -4541,9 +4721,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        raw_spin_unlock(&release_list_lock);
 
        /*
-        * Remove @cgrp directory.  The removal puts the base ref but we
-        * aren't quite done with @cgrp yet, so hold onto it.
+        * If @cgrp has css's attached, the second stage of cgroup
+        * destruction is kicked off from css_killed_work_fn() after the
+        * refs of all attached css's are killed.  If @cgrp doesn't have
+        * any css, we kick it off here.
         */
+       if (!cgrp->nr_css)
+               cgroup_destroy_css_killed(cgrp);
+
+       /*
+        * Clear the base files and remove @cgrp directory.  The removal
+        * puts the base ref but we aren't quite done with @cgrp yet, so
+        * hold onto it.
+        */
+       cgroup_addrm_files(cgrp, cgroup_base_files, false);
        dget(d);
        cgroup_d_remove_dir(d);
 
@@ -4563,50 +4754,36 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
 };
 
 /**
- * cgroup_offline_fn - the second step of cgroup destruction
+ * cgroup_destroy_css_killed - the second step of cgroup destruction
  * @work: cgroup->destroy_free_work
  *
  * This function is invoked from a work item for a cgroup which is being
- * destroyed after the percpu refcnts of all css's are guaranteed to be
- * seen as killed on all CPUs, and performs the rest of destruction.  This
- * is the second step of destruction described in the comment above
- * cgroup_destroy_locked().
+ * destroyed after all css's are offlined and performs the rest of
+ * destruction.  This is the second step of destruction described in the
+ * comment above cgroup_destroy_locked().
  */
-static void cgroup_offline_fn(struct work_struct *work)
+static void cgroup_destroy_css_killed(struct cgroup *cgrp)
 {
-       struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
        struct cgroup *parent = cgrp->parent;
        struct dentry *d = cgrp->dentry;
-       struct cgroup_subsys *ss;
 
-       mutex_lock(&cgroup_mutex);
+       lockdep_assert_held(&cgroup_mutex);
 
-       /*
-        * css_tryget() is guaranteed to fail now.  Tell subsystems to
-        * initate destruction.
-        */
-       for_each_root_subsys(cgrp->root, ss)
-               offline_css(ss, cgrp);
+       /* delete this cgroup from parent->children */
+       list_del_rcu(&cgrp->sibling);
 
        /*
-        * Put the css refs from cgroup_destroy_locked().  Each css holds
-        * an extra reference to the cgroup's dentry and cgroup removal
-        * proceeds regardless of css refs.  On the last put of each css,
-        * whenever that may be, the extra dentry ref is put so that dentry
-        * destruction happens only after all css's are released.
+        * We should remove the cgroup object from idr before its grace
+        * period starts, so we won't be looking up a cgroup while the
+        * cgroup is being freed.
         */
-       for_each_root_subsys(cgrp->root, ss)
-               css_put(cgrp->subsys[ss->subsys_id]);
-
-       /* delete this cgroup from parent->children */
-       list_del_rcu(&cgrp->sibling);
+       idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+       cgrp->id = -1;
 
        dput(d);
 
        set_bit(CGRP_RELEASABLE, &parent->flags);
        check_for_release(parent);
-
-       mutex_unlock(&cgroup_mutex);
 }
 
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
@@ -4629,6 +4806,11 @@ static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
         * deregistration.
         */
        if (ss->base_cftypes) {
+               struct cftype *cft;
+
+               for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++)
+                       cft->ss = ss;
+
                ss->base_cftset.cfts = ss->base_cftypes;
                list_add_tail(&ss->base_cftset.node, &ss->cftsets);
        }
@@ -4648,10 +4830,10 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
        /* Create the top cgroup state for this subsystem */
        list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
        ss->root = &cgroup_dummy_root;
-       css = ss->css_alloc(cgroup_dummy_top);
+       css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss->subsys_id));
        /* We don't handle early failures gracefully */
        BUG_ON(IS_ERR(css));
-       init_cgroup_css(css, ss, cgroup_dummy_top);
+       init_css(css, ss, cgroup_dummy_top);
 
        /* Update the init_css_set to contain a subsys
         * pointer to this state - since the subsystem is
@@ -4666,7 +4848,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
         * need to invoke fork callbacks here. */
        BUG_ON(!list_empty(&init_task.tasks));
 
-       BUG_ON(online_css(ss, cgroup_dummy_top));
+       BUG_ON(online_css(css));
 
        mutex_unlock(&cgroup_mutex);
 
@@ -4727,7 +4909,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
         * struct, so this can happen first (i.e. before the dummy root
         * attachment).
         */
-       css = ss->css_alloc(cgroup_dummy_top);
+       css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss->subsys_id));
        if (IS_ERR(css)) {
                /* failure case - need to deassign the cgroup_subsys[] slot. */
                cgroup_subsys[ss->subsys_id] = NULL;
@@ -4739,8 +4921,8 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
        ss->root = &cgroup_dummy_root;
 
        /* our new subsystem will be attached to the dummy hierarchy. */
-       init_cgroup_css(css, ss, cgroup_dummy_top);
-       /* init_idr must be after init_cgroup_css because it sets css->id. */
+       init_css(css, ss, cgroup_dummy_top);
+       /* init_idr must be after init_css() because it sets css->id. */
        if (ss->use_id) {
                ret = cgroup_init_idr(ss, css);
                if (ret)
@@ -4770,7 +4952,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
        }
        write_unlock(&css_set_lock);
 
-       ret = online_css(ss, cgroup_dummy_top);
+       ret = online_css(css);
        if (ret)
                goto err_unload;
 
@@ -4802,14 +4984,14 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
 
        /*
         * we shouldn't be called if the subsystem is in use, and the use of
-        * try_module_get in parse_cgroupfs_options should ensure that it
+        * try_module_get() in rebind_subsystems() should ensure that it
         * doesn't start being used while we're killing it off.
         */
        BUG_ON(ss->root != &cgroup_dummy_root);
 
        mutex_lock(&cgroup_mutex);
 
-       offline_css(ss, cgroup_dummy_top);
+       offline_css(cgroup_css(cgroup_dummy_top, ss->subsys_id));
 
        if (ss->use_id)
                idr_destroy(&ss->idr);
@@ -4843,8 +5025,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
         * the cgrp->subsys pointer to find their state. note that this
         * also takes care of freeing the css_id.
         */
-       ss->css_free(cgroup_dummy_top);
-       cgroup_dummy_top->subsys[ss->subsys_id] = NULL;
+       ss->css_free(cgroup_css(cgroup_dummy_top, ss->subsys_id));
+       RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
 
        mutex_unlock(&cgroup_mutex);
 }
@@ -4926,6 +5108,10 @@ int __init cgroup_init(void)
 
        BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1));
 
+       err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top,
+                       0, 1, GFP_KERNEL);
+       BUG_ON(err < 0);
+
        mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
 
@@ -5082,7 +5268,7 @@ void cgroup_fork(struct task_struct *child)
  * Adds the task to the list running through its css_set if necessary and
  * call the subsystem fork() callbacks.  Has to be after the task is
  * visible on the task list in case we race with the first call to
- * cgroup_iter_start() - to guarantee that the new task ends up on its
+ * cgroup_task_iter_start() - to guarantee that the new task ends up on its
  * list.
  */
 void cgroup_post_fork(struct task_struct *child)
@@ -5195,10 +5381,10 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
                 */
                for_each_builtin_subsys(ss, i) {
                        if (ss->exit) {
-                               struct cgroup *old_cgrp = cset->subsys[i]->cgroup;
-                               struct cgroup *cgrp = task_cgroup(tsk, i);
+                               struct cgroup_subsys_state *old_css = cset->subsys[i];
+                               struct cgroup_subsys_state *css = task_css(tsk, i);
 
-                               ss->exit(cgrp, old_cgrp, tsk);
+                               ss->exit(css, old_css, tsk);
                        }
                }
        }
@@ -5457,20 +5643,16 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
        return 0;
 }
 
-static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
-                       struct cgroup *child)
+static int alloc_css_id(struct cgroup_subsys_state *child_css)
 {
-       int subsys_id, i, depth = 0;
-       struct cgroup_subsys_state *parent_css, *child_css;
+       struct cgroup_subsys_state *parent_css = css_parent(child_css);
        struct css_id *child_id, *parent_id;
+       int i, depth;
 
-       subsys_id = ss->subsys_id;
-       parent_css = parent->subsys[subsys_id];
-       child_css = child->subsys[subsys_id];
        parent_id = rcu_dereference_protected(parent_css->id, true);
        depth = parent_id->depth + 1;
 
-       child_id = get_new_cssid(ss, depth);
+       child_id = get_new_cssid(child_css->ss, depth);
        if (IS_ERR(child_id))
                return PTR_ERR(child_id);
 
@@ -5508,8 +5690,14 @@ struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
 }
 EXPORT_SYMBOL_GPL(css_lookup);
 
-/*
- * get corresponding css from file open on cgroupfs directory
+/**
+ * cgroup_css_from_dir - get corresponding css from file open on cgroup dir
+ * @f: directory file of interest
+ * @id: subsystem id of interest
+ *
+ * Must be called under RCU read lock.  The caller is responsible for
+ * pinning the returned css if it needs to be accessed outside the RCU
+ * critical section.
  */
 struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
 {
@@ -5517,6 +5705,8 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
        struct inode *inode;
        struct cgroup_subsys_state *css;
 
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
        inode = file_inode(f);
        /* check in cgroup filesystem dir */
        if (inode->i_op != &cgroup_dir_inode_operations)
@@ -5527,12 +5717,35 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
 
        /* get cgroup */
        cgrp = __d_cgrp(f->f_dentry);
-       css = cgrp->subsys[id];
+       css = cgroup_css(cgrp, id);
        return css ? css : ERR_PTR(-ENOENT);
 }
 
+/**
+ * css_from_id - lookup css by id
+ * @id: the cgroup id
+ * @ss: cgroup subsys to be looked into
+ *
+ * Returns the css if there's valid one with @id, otherwise returns NULL.
+ * Should be called under rcu_read_lock().
+ */
+struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
+{
+       struct cgroup *cgrp;
+
+       rcu_lockdep_assert(rcu_read_lock_held() ||
+                          lockdep_is_held(&cgroup_mutex),
+                          "css_from_id() needs proper protection");
+
+       cgrp = idr_find(&ss->root->cgroup_idr, id);
+       if (cgrp)
+               return cgroup_css(cgrp, ss->subsys_id);
+       return NULL;
+}
+
 #ifdef CONFIG_CGROUP_DEBUG
-static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+debug_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
 
@@ -5542,22 +5755,24 @@ static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp)
        return css;
 }
 
-static void debug_css_free(struct cgroup *cgrp)
+static void debug_css_free(struct cgroup_subsys_state *css)
 {
-       kfree(cgrp->subsys[debug_subsys_id]);
+       kfree(css);
 }
 
-static u64 debug_taskcount_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
+                               struct cftype *cft)
 {
-       return cgroup_task_count(cgrp);
+       return cgroup_task_count(css->cgroup);
 }
 
-static u64 current_css_set_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 current_css_set_read(struct cgroup_subsys_state *css,
+                               struct cftype *cft)
 {
        return (u64)(unsigned long)current->cgroups;
 }
 
-static u64 current_css_set_refcount_read(struct cgroup *cgrp,
+static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
                                         struct cftype *cft)
 {
        u64 count;
@@ -5568,7 +5783,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cgrp,
        return count;
 }
 
-static int current_css_set_cg_links_read(struct cgroup *cgrp,
+static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
                                         struct cftype *cft,
                                         struct seq_file *seq)
 {
@@ -5595,14 +5810,13 @@ static int current_css_set_cg_links_read(struct cgroup *cgrp,
 }
 
 #define MAX_TASKS_SHOWN_PER_CSS 25
-static int cgroup_css_links_read(struct cgroup *cgrp,
-                                struct cftype *cft,
-                                struct seq_file *seq)
+static int cgroup_css_links_read(struct cgroup_subsys_state *css,
+                                struct cftype *cft, struct seq_file *seq)
 {
        struct cgrp_cset_link *link;
 
        read_lock(&css_set_lock);
-       list_for_each_entry(link, &cgrp->cset_links, cset_link) {
+       list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
                struct css_set *cset = link->cset;
                struct task_struct *task;
                int count = 0;
@@ -5621,9 +5835,9 @@ static int cgroup_css_links_read(struct cgroup *cgrp,
        return 0;
 }
 
-static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       return test_bit(CGRP_RELEASABLE, &cgrp->flags);
+       return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
 }
 
 static struct cftype debug_files[] =  {
index 75dda1ea5026fdf7f8d179f31b5217348b501e0f..f0ff64d0ebaaf5c8f5d108f3f3ea4ed6cdc114fd 100644 (file)
@@ -45,25 +45,19 @@ struct freezer {
        spinlock_t                      lock;
 };
 
-static inline struct freezer *cgroup_freezer(struct cgroup *cgroup)
+static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
 {
-       return container_of(cgroup_subsys_state(cgroup, freezer_subsys_id),
-                           struct freezer, css);
+       return css ? container_of(css, struct freezer, css) : NULL;
 }
 
 static inline struct freezer *task_freezer(struct task_struct *task)
 {
-       return container_of(task_subsys_state(task, freezer_subsys_id),
-                           struct freezer, css);
+       return css_freezer(task_css(task, freezer_subsys_id));
 }
 
 static struct freezer *parent_freezer(struct freezer *freezer)
 {
-       struct cgroup *pcg = freezer->css.cgroup->parent;
-
-       if (pcg)
-               return cgroup_freezer(pcg);
-       return NULL;
+       return css_freezer(css_parent(&freezer->css));
 }
 
 bool cgroup_freezing(struct task_struct *task)
@@ -92,7 +86,8 @@ static const char *freezer_state_strs(unsigned int state)
 
 struct cgroup_subsys freezer_subsys;
 
-static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+freezer_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        struct freezer *freezer;
 
@@ -105,22 +100,22 @@ static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
 }
 
 /**
- * freezer_css_online - commit creation of a freezer cgroup
- * @cgroup: cgroup being created
+ * freezer_css_online - commit creation of a freezer css
+ * @css: css being created
  *
- * We're committing to creation of @cgroup.  Mark it online and inherit
+ * We're committing to creation of @css.  Mark it online and inherit
  * parent's freezing state while holding both parent's and our
  * freezer->lock.
  */
-static int freezer_css_online(struct cgroup *cgroup)
+static int freezer_css_online(struct cgroup_subsys_state *css)
 {
-       struct freezer *freezer = cgroup_freezer(cgroup);
+       struct freezer *freezer = css_freezer(css);
        struct freezer *parent = parent_freezer(freezer);
 
        /*
         * The following double locking and freezing state inheritance
         * guarantee that @cgroup can never escape ancestors' freezing
-        * states.  See cgroup_for_each_descendant_pre() for details.
+        * states.  See css_for_each_descendant_pre() for details.
         */
        if (parent)
                spin_lock_irq(&parent->lock);
@@ -141,15 +136,15 @@ static int freezer_css_online(struct cgroup *cgroup)
 }
 
 /**
- * freezer_css_offline - initiate destruction of @cgroup
- * @cgroup: cgroup being destroyed
+ * freezer_css_offline - initiate destruction of a freezer css
+ * @css: css being destroyed
  *
- * @cgroup is going away.  Mark it dead and decrement system_freezing_count
- * if it was holding one.
+ * @css is going away.  Mark it dead and decrement system_freezing_count if
+ * it was holding one.
  */
-static void freezer_css_offline(struct cgroup *cgroup)
+static void freezer_css_offline(struct cgroup_subsys_state *css)
 {
-       struct freezer *freezer = cgroup_freezer(cgroup);
+       struct freezer *freezer = css_freezer(css);
 
        spin_lock_irq(&freezer->lock);
 
@@ -161,9 +156,9 @@ static void freezer_css_offline(struct cgroup *cgroup)
        spin_unlock_irq(&freezer->lock);
 }
 
-static void freezer_css_free(struct cgroup *cgroup)
+static void freezer_css_free(struct cgroup_subsys_state *css)
 {
-       kfree(cgroup_freezer(cgroup));
+       kfree(css_freezer(css));
 }
 
 /*
@@ -175,25 +170,26 @@ static void freezer_css_free(struct cgroup *cgroup)
  * @freezer->lock.  freezer_attach() makes the new tasks conform to the
  * current state and all following state changes can see the new tasks.
  */
-static void freezer_attach(struct cgroup *new_cgrp, struct cgroup_taskset *tset)
+static void freezer_attach(struct cgroup_subsys_state *new_css,
+                          struct cgroup_taskset *tset)
 {
-       struct freezer *freezer = cgroup_freezer(new_cgrp);
+       struct freezer *freezer = css_freezer(new_css);
        struct task_struct *task;
        bool clear_frozen = false;
 
        spin_lock_irq(&freezer->lock);
 
        /*
-        * Make the new tasks conform to the current state of @new_cgrp.
+        * Make the new tasks conform to the current state of @new_css.
         * For simplicity, when migrating any task to a FROZEN cgroup, we
         * revert it to FREEZING and let update_if_frozen() determine the
         * correct state later.
         *
-        * Tasks in @tset are on @new_cgrp but may not conform to its
+        * Tasks in @tset are on @new_css but may not conform to its
         * current state before executing the following - !frozen tasks may
         * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
         */
-       cgroup_taskset_for_each(task, new_cgrp, tset) {
+       cgroup_taskset_for_each(task, new_css, tset) {
                if (!(freezer->state & CGROUP_FREEZING)) {
                        __thaw_task(task);
                } else {
@@ -231,7 +227,7 @@ static void freezer_fork(struct task_struct *task)
         * The root cgroup is non-freezable, so we can skip the
         * following check.
         */
-       if (!freezer->css.cgroup->parent)
+       if (!parent_freezer(freezer))
                goto out;
 
        spin_lock_irq(&freezer->lock);
@@ -244,7 +240,7 @@ out:
 
 /**
  * update_if_frozen - update whether a cgroup finished freezing
- * @cgroup: cgroup of interest
+ * @css: css of interest
  *
  * Once FREEZING is initiated, transition to FROZEN is lazily updated by
  * calling this function.  If the current state is FREEZING but not FROZEN,
@@ -255,14 +251,14 @@ out:
  * update_if_frozen() on all descendants prior to invoking this function.
  *
  * Task states and freezer state might disagree while tasks are being
- * migrated into or out of @cgroup, so we can't verify task states against
+ * migrated into or out of @css, so we can't verify task states against
  * @freezer state here.  See freezer_attach() for details.
  */
-static void update_if_frozen(struct cgroup *cgroup)
+static void update_if_frozen(struct cgroup_subsys_state *css)
 {
-       struct freezer *freezer = cgroup_freezer(cgroup);
-       struct cgroup *pos;
-       struct cgroup_iter it;
+       struct freezer *freezer = css_freezer(css);
+       struct cgroup_subsys_state *pos;
+       struct css_task_iter it;
        struct task_struct *task;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
@@ -274,8 +270,8 @@ static void update_if_frozen(struct cgroup *cgroup)
                goto out_unlock;
 
        /* are all (live) children frozen? */
-       cgroup_for_each_child(pos, cgroup) {
-               struct freezer *child = cgroup_freezer(pos);
+       css_for_each_child(pos, css) {
+               struct freezer *child = css_freezer(pos);
 
                if ((child->state & CGROUP_FREEZER_ONLINE) &&
                    !(child->state & CGROUP_FROZEN))
@@ -283,9 +279,9 @@ static void update_if_frozen(struct cgroup *cgroup)
        }
 
        /* are all tasks frozen? */
-       cgroup_iter_start(cgroup, &it);
+       css_task_iter_start(css, &it);
 
-       while ((task = cgroup_iter_next(cgroup, &it))) {
+       while ((task = css_task_iter_next(&it))) {
                if (freezing(task)) {
                        /*
                         * freezer_should_skip() indicates that the task
@@ -300,52 +296,49 @@ static void update_if_frozen(struct cgroup *cgroup)
 
        freezer->state |= CGROUP_FROZEN;
 out_iter_end:
-       cgroup_iter_end(cgroup, &it);
+       css_task_iter_end(&it);
 out_unlock:
        spin_unlock_irq(&freezer->lock);
 }
 
-static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
+static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft,
                        struct seq_file *m)
 {
-       struct cgroup *pos;
+       struct cgroup_subsys_state *pos;
 
        rcu_read_lock();
 
        /* update states bottom-up */
-       cgroup_for_each_descendant_post(pos, cgroup)
+       css_for_each_descendant_post(pos, css)
                update_if_frozen(pos);
-       update_if_frozen(cgroup);
 
        rcu_read_unlock();
 
-       seq_puts(m, freezer_state_strs(cgroup_freezer(cgroup)->state));
+       seq_puts(m, freezer_state_strs(css_freezer(css)->state));
        seq_putc(m, '\n');
        return 0;
 }
 
 static void freeze_cgroup(struct freezer *freezer)
 {
-       struct cgroup *cgroup = freezer->css.cgroup;
-       struct cgroup_iter it;
+       struct css_task_iter it;
        struct task_struct *task;
 
-       cgroup_iter_start(cgroup, &it);
-       while ((task = cgroup_iter_next(cgroup, &it)))
+       css_task_iter_start(&freezer->css, &it);
+       while ((task = css_task_iter_next(&it)))
                freeze_task(task);
-       cgroup_iter_end(cgroup, &it);
+       css_task_iter_end(&it);
 }
 
 static void unfreeze_cgroup(struct freezer *freezer)
 {
-       struct cgroup *cgroup = freezer->css.cgroup;
-       struct cgroup_iter it;
+       struct css_task_iter it;
        struct task_struct *task;
 
-       cgroup_iter_start(cgroup, &it);
-       while ((task = cgroup_iter_next(cgroup, &it)))
+       css_task_iter_start(&freezer->css, &it);
+       while ((task = css_task_iter_next(&it)))
                __thaw_task(task);
-       cgroup_iter_end(cgroup, &it);
+       css_task_iter_end(&it);
 }
 
 /**
@@ -395,12 +388,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
  */
 static void freezer_change_state(struct freezer *freezer, bool freeze)
 {
-       struct cgroup *pos;
-
-       /* update @freezer */
-       spin_lock_irq(&freezer->lock);
-       freezer_apply_state(freezer, freeze, CGROUP_FREEZING_SELF);
-       spin_unlock_irq(&freezer->lock);
+       struct cgroup_subsys_state *pos;
 
        /*
         * Update all its descendants in pre-order traversal.  Each
@@ -408,24 +396,33 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
         * CGROUP_FREEZING_PARENT.
         */
        rcu_read_lock();
-       cgroup_for_each_descendant_pre(pos, freezer->css.cgroup) {
-               struct freezer *pos_f = cgroup_freezer(pos);
+       css_for_each_descendant_pre(pos, &freezer->css) {
+               struct freezer *pos_f = css_freezer(pos);
                struct freezer *parent = parent_freezer(pos_f);
 
-               /*
-                * Our update to @parent->state is already visible which is
-                * all we need.  No need to lock @parent.  For more info on
-                * synchronization, see freezer_post_create().
-                */
                spin_lock_irq(&pos_f->lock);
-               freezer_apply_state(pos_f, parent->state & CGROUP_FREEZING,
-                                   CGROUP_FREEZING_PARENT);
+
+               if (pos_f == freezer) {
+                       freezer_apply_state(pos_f, freeze,
+                                           CGROUP_FREEZING_SELF);
+               } else {
+                       /*
+                        * Our update to @parent->state is already visible
+                        * which is all we need.  No need to lock @parent.
+                        * For more info on synchronization, see
+                        * freezer_post_create().
+                        */
+                       freezer_apply_state(pos_f,
+                                           parent->state & CGROUP_FREEZING,
+                                           CGROUP_FREEZING_PARENT);
+               }
+
                spin_unlock_irq(&pos_f->lock);
        }
        rcu_read_unlock();
 }
 
-static int freezer_write(struct cgroup *cgroup, struct cftype *cft,
+static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
                         const char *buffer)
 {
        bool freeze;
@@ -437,20 +434,22 @@ static int freezer_write(struct cgroup *cgroup, struct cftype *cft,
        else
                return -EINVAL;
 
-       freezer_change_state(cgroup_freezer(cgroup), freeze);
+       freezer_change_state(css_freezer(css), freeze);
        return 0;
 }
 
-static u64 freezer_self_freezing_read(struct cgroup *cgroup, struct cftype *cft)
+static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
+                                     struct cftype *cft)
 {
-       struct freezer *freezer = cgroup_freezer(cgroup);
+       struct freezer *freezer = css_freezer(css);
 
        return (bool)(freezer->state & CGROUP_FREEZING_SELF);
 }
 
-static u64 freezer_parent_freezing_read(struct cgroup *cgroup, struct cftype *cft)
+static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
+                                       struct cftype *cft)
 {
-       struct freezer *freezer = cgroup_freezer(cgroup);
+       struct freezer *freezer = css_freezer(css);
 
        return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
 }
index b2b227b821232d811d371fc24c672ad0fb715cf2..d7f07a2da5a6b6bcc682d39918b3bd97471d9543 100644 (file)
@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
  * get_online_cpus() not an api which is called all that often.
  *
  */
-static void cpu_hotplug_begin(void)
+void cpu_hotplug_begin(void)
 {
        cpu_hotplug.active_writer = current;
 
@@ -127,7 +127,7 @@ static void cpu_hotplug_begin(void)
        }
 }
 
-static void cpu_hotplug_done(void)
+void cpu_hotplug_done(void)
 {
        cpu_hotplug.active_writer = NULL;
        mutex_unlock(&cpu_hotplug.lock);
@@ -154,10 +154,7 @@ void cpu_hotplug_enable(void)
        cpu_maps_update_done();
 }
 
-#else /* #if CONFIG_HOTPLUG_CPU */
-static void cpu_hotplug_begin(void) {}
-static void cpu_hotplug_done(void) {}
-#endif /* #else #if CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU */
 
 /* Need to know about CPUs going up/down? */
 int __ref register_cpu_notifier(struct notifier_block *nb)
index 010a0083c0ae4cfa222e2d51f2e951893bac28c4..6bf981e13c437ff81979f4c9f08f69998c8ca45a 100644 (file)
  */
 int number_of_cpusets __read_mostly;
 
-/* Forward declare cgroup structures */
-struct cgroup_subsys cpuset_subsys;
-struct cpuset;
-
 /* See "Frequency meter" comments, below. */
 
 struct fmeter {
@@ -115,27 +111,20 @@ struct cpuset {
        int relax_domain_level;
 };
 
-/* Retrieve the cpuset for a cgroup */
-static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
+static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
 {
-       return container_of(cgroup_subsys_state(cgrp, cpuset_subsys_id),
-                           struct cpuset, css);
+       return css ? container_of(css, struct cpuset, css) : NULL;
 }
 
 /* Retrieve the cpuset for a task */
 static inline struct cpuset *task_cs(struct task_struct *task)
 {
-       return container_of(task_subsys_state(task, cpuset_subsys_id),
-                           struct cpuset, css);
+       return css_cs(task_css(task, cpuset_subsys_id));
 }
 
-static inline struct cpuset *parent_cs(const struct cpuset *cs)
+static inline struct cpuset *parent_cs(struct cpuset *cs)
 {
-       struct cgroup *pcgrp = cs->css.cgroup->parent;
-
-       if (pcgrp)
-               return cgroup_cs(pcgrp);
-       return NULL;
+       return css_cs(css_parent(&cs->css));
 }
 
 #ifdef CONFIG_NUMA
@@ -212,29 +201,30 @@ static struct cpuset top_cpuset = {
 /**
  * cpuset_for_each_child - traverse online children of a cpuset
  * @child_cs: loop cursor pointing to the current child
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
  * @parent_cs: target cpuset to walk children of
  *
  * Walk @child_cs through the online children of @parent_cs.  Must be used
  * with RCU read locked.
  */
-#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs)           \
-       cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup)      \
-               if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
+#define cpuset_for_each_child(child_cs, pos_css, parent_cs)            \
+       css_for_each_child((pos_css), &(parent_cs)->css)                \
+               if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
 
 /**
  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
  * @des_cs: loop cursor pointing to the current descendant
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
  * @root_cs: target cpuset to walk ancestor of
  *
  * Walk @des_cs through the online descendants of @root_cs.  Must be used
- * with RCU read locked.  The caller may modify @pos_cgrp by calling
- * cgroup_rightmost_descendant() to skip subtree.
+ * with RCU read locked.  The caller may modify @pos_css by calling
+ * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
+ * iteration and the first node to be visited.
  */
-#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs)      \
-       cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
-               if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
+#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)       \
+       css_for_each_descendant_pre((pos_css), &(root_cs)->css)         \
+               if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
 
 /*
  * There are two global mutexes guarding cpuset structures - cpuset_mutex
@@ -320,8 +310,7 @@ static struct file_system_type cpuset_fs_type = {
  *
  * Call with callback_mutex held.
  */
-static void guarantee_online_cpus(const struct cpuset *cs,
-                                 struct cpumask *pmask)
+static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
 {
        while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
                cs = parent_cs(cs);
@@ -339,7 +328,7 @@ static void guarantee_online_cpus(const struct cpuset *cs,
  *
  * Call with callback_mutex held.
  */
-static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
+static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 {
        while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
                cs = parent_cs(cs);
@@ -384,7 +373,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
  * alloc_trial_cpuset - allocate a trial cpuset
  * @cs: the cpuset that the trial cpuset duplicates
  */
-static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
+static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
 {
        struct cpuset *trial;
 
@@ -431,9 +420,9 @@ static void free_trial_cpuset(struct cpuset *trial)
  * Return 0 if valid, -errno if not.
  */
 
-static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
+static int validate_change(struct cpuset *cur, struct cpuset *trial)
 {
-       struct cgroup *cgrp;
+       struct cgroup_subsys_state *css;
        struct cpuset *c, *par;
        int ret;
 
@@ -441,7 +430,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 
        /* Each of our child cpusets must be a subset of us */
        ret = -EBUSY;
-       cpuset_for_each_child(c, cgrp, cur)
+       cpuset_for_each_child(c, css, cur)
                if (!is_cpuset_subset(c, trial))
                        goto out;
 
@@ -462,7 +451,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
         * overlap
         */
        ret = -EINVAL;
-       cpuset_for_each_child(c, cgrp, par) {
+       cpuset_for_each_child(c, css, par) {
                if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
                    c != cur &&
                    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
@@ -475,13 +464,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 
        /*
         * Cpusets with tasks - existing or newly being attached - can't
-        * have empty cpus_allowed or mems_allowed.
+        * be changed to have empty cpus_allowed or mems_allowed.
         */
        ret = -ENOSPC;
-       if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
-           (cpumask_empty(trial->cpus_allowed) &&
-            nodes_empty(trial->mems_allowed)))
-               goto out;
+       if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
+               if (!cpumask_empty(cur->cpus_allowed) &&
+                   cpumask_empty(trial->cpus_allowed))
+                       goto out;
+               if (!nodes_empty(cur->mems_allowed) &&
+                   nodes_empty(trial->mems_allowed))
+                       goto out;
+       }
 
        ret = 0;
 out:
@@ -511,13 +504,16 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
                                    struct cpuset *root_cs)
 {
        struct cpuset *cp;
-       struct cgroup *pos_cgrp;
+       struct cgroup_subsys_state *pos_css;
 
        rcu_read_lock();
-       cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
+       cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+               if (cp == root_cs)
+                       continue;
+
                /* skip the whole subtree if @cp doesn't have any CPU */
                if (cpumask_empty(cp->cpus_allowed)) {
-                       pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
+                       pos_css = css_rightmost_descendant(pos_css);
                        continue;
                }
 
@@ -592,7 +588,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
        struct sched_domain_attr *dattr;  /* attributes for custom domains */
        int ndoms = 0;          /* number of sched domains in result */
        int nslot;              /* next empty doms[] struct cpumask slot */
-       struct cgroup *pos_cgrp;
+       struct cgroup_subsys_state *pos_css;
 
        doms = NULL;
        dattr = NULL;
@@ -621,7 +617,9 @@ static int generate_sched_domains(cpumask_var_t **domains,
        csn = 0;
 
        rcu_read_lock();
-       cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
+       cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
+               if (cp == &top_cpuset)
+                       continue;
                /*
                 * Continue traversing beyond @cp iff @cp has some CPUs and
                 * isn't load balancing.  The former is obvious.  The
@@ -638,7 +636,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
                        csa[csn++] = cp;
 
                /* skip @cp's subtree */
-               pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
+               pos_css = css_rightmost_descendant(pos_css);
        }
        rcu_read_unlock();
 
@@ -833,52 +831,45 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
 /**
  * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
  * @tsk: task to test
- * @scan: struct cgroup_scanner containing the cgroup of the task
+ * @data: cpuset to @tsk belongs to
  *
- * Called by cgroup_scan_tasks() for each task in a cgroup whose
- * cpus_allowed mask needs to be changed.
+ * Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed
+ * mask needs to be changed.
  *
  * We don't need to re-check for the cgroup/cpuset membership, since we're
  * holding cpuset_mutex at this point.
  */
-static void cpuset_change_cpumask(struct task_struct *tsk,
-                                 struct cgroup_scanner *scan)
+static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
 {
-       struct cpuset *cpus_cs;
+       struct cpuset *cs = data;
+       struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
 
-       cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg));
        set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
 }
 
 /**
  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
  *
  * Called with cpuset_mutex held
  *
- * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * The css_scan_tasks() function will scan all the tasks in a cgroup,
  * calling callback functions for each.
  *
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * No return value. It's guaranteed that css_scan_tasks() always returns 0
  * if @heap != NULL.
  */
 static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
 {
-       struct cgroup_scanner scan;
-
-       scan.cg = cs->css.cgroup;
-       scan.test_task = NULL;
-       scan.process_task = cpuset_change_cpumask;
-       scan.heap = heap;
-       cgroup_scan_tasks(&scan);
+       css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap);
 }
 
 /*
  * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
  * @root_cs: the root cpuset of the hierarchy
  * @update_root: update root cpuset or not?
- * @heap: the heap used by cgroup_scan_tasks()
+ * @heap: the heap used by css_scan_tasks()
  *
  * This will update cpumasks of tasks in @root_cs and all other empty cpusets
  * which take on cpumask of @root_cs.
@@ -889,17 +880,19 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
                                      bool update_root, struct ptr_heap *heap)
 {
        struct cpuset *cp;
-       struct cgroup *pos_cgrp;
-
-       if (update_root)
-               update_tasks_cpumask(root_cs, heap);
+       struct cgroup_subsys_state *pos_css;
 
        rcu_read_lock();
-       cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
-               /* skip the whole subtree if @cp have some CPU */
-               if (!cpumask_empty(cp->cpus_allowed)) {
-                       pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
-                       continue;
+       cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+               if (cp == root_cs) {
+                       if (!update_root)
+                               continue;
+               } else {
+                       /* skip the whole subtree if @cp have some CPU */
+                       if (!cpumask_empty(cp->cpus_allowed)) {
+                               pos_css = css_rightmost_descendant(pos_css);
+                               continue;
+                       }
                }
                if (!css_tryget(&cp->css))
                        continue;
@@ -1055,20 +1048,24 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
        task_unlock(tsk);
 }
 
+struct cpuset_change_nodemask_arg {
+       struct cpuset           *cs;
+       nodemask_t              *newmems;
+};
+
 /*
  * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
  * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
  * memory_migrate flag is set. Called with cpuset_mutex held.
  */
-static void cpuset_change_nodemask(struct task_struct *p,
-                                  struct cgroup_scanner *scan)
+static void cpuset_change_nodemask(struct task_struct *p, void *data)
 {
-       struct cpuset *cs = cgroup_cs(scan->cg);
+       struct cpuset_change_nodemask_arg *arg = data;
+       struct cpuset *cs = arg->cs;
        struct mm_struct *mm;
        int migrate;
-       nodemask_t *newmems = scan->data;
 
-       cpuset_change_task_nodemask(p, newmems);
+       cpuset_change_task_nodemask(p, arg->newmems);
 
        mm = get_task_mm(p);
        if (!mm)
@@ -1078,7 +1075,7 @@ static void cpuset_change_nodemask(struct task_struct *p,
 
        mpol_rebind_mm(mm, &cs->mems_allowed);
        if (migrate)
-               cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
+               cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems);
        mmput(mm);
 }
 
@@ -1087,28 +1084,22 @@ static void *cpuset_being_rebound;
 /**
  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
  *
- * Called with cpuset_mutex held
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
- * if @heap != NULL.
+ * Called with cpuset_mutex held.  No return value. It's guaranteed that
+ * css_scan_tasks() always returns 0 if @heap != NULL.
  */
 static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
 {
        static nodemask_t newmems;      /* protected by cpuset_mutex */
-       struct cgroup_scanner scan;
        struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
+       struct cpuset_change_nodemask_arg arg = { .cs = cs,
+                                                 .newmems = &newmems };
 
        cpuset_being_rebound = cs;              /* causes mpol_dup() rebind */
 
        guarantee_online_mems(mems_cs, &newmems);
 
-       scan.cg = cs->css.cgroup;
-       scan.test_task = NULL;
-       scan.process_task = cpuset_change_nodemask;
-       scan.heap = heap;
-       scan.data = &newmems;
-
        /*
         * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
         * take while holding tasklist_lock.  Forks can happen - the
@@ -1119,7 +1110,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
         * It's ok if we rebind the same mm twice; mpol_rebind_mm()
         * is idempotent.  Also migrate pages in each mm to new nodes.
         */
-       cgroup_scan_tasks(&scan);
+       css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap);
 
        /*
         * All the tasks' nodemasks have been updated, update
@@ -1135,7 +1126,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
  * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
  * @cs: the root cpuset of the hierarchy
  * @update_root: update the root cpuset or not?
- * @heap: the heap used by cgroup_scan_tasks()
+ * @heap: the heap used by css_scan_tasks()
  *
  * This will update nodemasks of tasks in @root_cs and all other empty cpusets
  * which take on nodemask of @root_cs.
@@ -1146,17 +1137,19 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs,
                                       bool update_root, struct ptr_heap *heap)
 {
        struct cpuset *cp;
-       struct cgroup *pos_cgrp;
-
-       if (update_root)
-               update_tasks_nodemask(root_cs, heap);
+       struct cgroup_subsys_state *pos_css;
 
        rcu_read_lock();
-       cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
-               /* skip the whole subtree if @cp have some CPU */
-               if (!nodes_empty(cp->mems_allowed)) {
-                       pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
-                       continue;
+       cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+               if (cp == root_cs) {
+                       if (!update_root)
+                               continue;
+               } else {
+                       /* skip the whole subtree if @cp have some CPU */
+                       if (!nodes_empty(cp->mems_allowed)) {
+                               pos_css = css_rightmost_descendant(pos_css);
+                               continue;
+                       }
                }
                if (!css_tryget(&cp->css))
                        continue;
@@ -1263,44 +1256,39 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
        return 0;
 }
 
-/*
+/**
  * cpuset_change_flag - make a task's spread flags the same as its cpuset's
  * @tsk: task to be updated
- * @scan: struct cgroup_scanner containing the cgroup of the task
+ * @data: cpuset to @tsk belongs to
  *
- * Called by cgroup_scan_tasks() for each task in a cgroup.
+ * Called by css_scan_tasks() for each task in a cgroup.
  *
  * We don't need to re-check for the cgroup/cpuset membership, since we're
  * holding cpuset_mutex at this point.
  */
-static void cpuset_change_flag(struct task_struct *tsk,
-                               struct cgroup_scanner *scan)
+static void cpuset_change_flag(struct task_struct *tsk, void *data)
 {
-       cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
+       struct cpuset *cs = data;
+
+       cpuset_update_task_spread_flag(cs, tsk);
 }
 
-/*
+/**
  * update_tasks_flags - update the spread flags of tasks in the cpuset.
  * @cs: the cpuset in which each task's spread flags needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
  *
  * Called with cpuset_mutex held
  *
- * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * The css_scan_tasks() function will scan all the tasks in a cgroup,
  * calling callback functions for each.
  *
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * No return value. It's guaranteed that css_scan_tasks() always returns 0
  * if @heap != NULL.
  */
 static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
 {
-       struct cgroup_scanner scan;
-
-       scan.cg = cs->css.cgroup;
-       scan.test_task = NULL;
-       scan.process_task = cpuset_change_flag;
-       scan.heap = heap;
-       cgroup_scan_tasks(&scan);
+       css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap);
 }
 
 /*
@@ -1458,9 +1446,10 @@ static int fmeter_getrate(struct fmeter *fmp)
 }
 
 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
-static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int cpuset_can_attach(struct cgroup_subsys_state *css,
+                            struct cgroup_taskset *tset)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
        struct task_struct *task;
        int ret;
 
@@ -1471,11 +1460,11 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
         * flag is set.
         */
        ret = -ENOSPC;
-       if (!cgroup_sane_behavior(cgrp) &&
+       if (!cgroup_sane_behavior(css->cgroup) &&
            (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
                goto out_unlock;
 
-       cgroup_taskset_for_each(task, cgrp, tset) {
+       cgroup_taskset_for_each(task, css, tset) {
                /*
                 * Kthreads which disallow setaffinity shouldn't be moved
                 * to a new cpuset; we don't want to change their cpu
@@ -1504,11 +1493,11 @@ out_unlock:
        return ret;
 }
 
-static void cpuset_cancel_attach(struct cgroup *cgrp,
+static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
        mutex_lock(&cpuset_mutex);
-       cgroup_cs(cgrp)->attach_in_progress--;
+       css_cs(css)->attach_in_progress--;
        mutex_unlock(&cpuset_mutex);
 }
 
@@ -1519,16 +1508,18 @@ static void cpuset_cancel_attach(struct cgroup *cgrp,
  */
 static cpumask_var_t cpus_attach;
 
-static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void cpuset_attach(struct cgroup_subsys_state *css,
+                         struct cgroup_taskset *tset)
 {
        /* static buf protected by cpuset_mutex */
        static nodemask_t cpuset_attach_nodemask_to;
        struct mm_struct *mm;
        struct task_struct *task;
        struct task_struct *leader = cgroup_taskset_first(tset);
-       struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
-       struct cpuset *cs = cgroup_cs(cgrp);
-       struct cpuset *oldcs = cgroup_cs(oldcgrp);
+       struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset,
+                                                       cpuset_subsys_id);
+       struct cpuset *cs = css_cs(css);
+       struct cpuset *oldcs = css_cs(oldcss);
        struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
        struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
 
@@ -1542,7 +1533,7 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 
        guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
 
-       cgroup_taskset_for_each(task, cgrp, tset) {
+       cgroup_taskset_for_each(task, css, tset) {
                /*
                 * can_attach beforehand should guarantee that this doesn't
                 * fail.  TODO: have a better way to handle failure here
@@ -1604,9 +1595,10 @@ typedef enum {
        FILE_SPREAD_SLAB,
 } cpuset_filetype_t;
 
-static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
+                           u64 val)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
        cpuset_filetype_t type = cft->private;
        int retval = 0;
 
@@ -1653,9 +1645,10 @@ out_unlock:
        return retval;
 }
 
-static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
+static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
+                           s64 val)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
        cpuset_filetype_t type = cft->private;
        int retval = -ENODEV;
 
@@ -1679,10 +1672,10 @@ out_unlock:
 /*
  * Common handling for a write to a "cpus" or "mems" file.
  */
-static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
-                               const char *buf)
+static int cpuset_write_resmask(struct cgroup_subsys_state *css,
+                               struct cftype *cft, const char *buf)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
        struct cpuset *trialcs;
        int retval = -ENODEV;
 
@@ -1761,13 +1754,12 @@ static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
        return count;
 }
 
-static ssize_t cpuset_common_file_read(struct cgroup *cgrp,
-                                      struct cftype *cft,
-                                      struct file *file,
-                                      char __user *buf,
-                                      size_t nbytes, loff_t *ppos)
+static ssize_t cpuset_common_file_read(struct cgroup_subsys_state *css,
+                                      struct cftype *cft, struct file *file,
+                                      char __user *buf, size_t nbytes,
+                                      loff_t *ppos)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
        cpuset_filetype_t type = cft->private;
        char *page;
        ssize_t retval = 0;
@@ -1797,9 +1789,9 @@ out:
        return retval;
 }
 
-static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
        cpuset_filetype_t type = cft->private;
        switch (type) {
        case FILE_CPU_EXCLUSIVE:
@@ -1828,9 +1820,9 @@ static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
        return 0;
 }
 
-static s64 cpuset_read_s64(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
        cpuset_filetype_t type = cft->private;
        switch (type) {
        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
@@ -1945,11 +1937,12 @@ static struct cftype files[] = {
  *     cgrp:   control group that the new cpuset will be part of
  */
 
-static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        struct cpuset *cs;
 
-       if (!cgrp->parent)
+       if (!parent_css)
                return &top_cpuset.css;
 
        cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -1969,12 +1962,12 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
        return &cs->css;
 }
 
-static int cpuset_css_online(struct cgroup *cgrp)
+static int cpuset_css_online(struct cgroup_subsys_state *css)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
        struct cpuset *parent = parent_cs(cs);
        struct cpuset *tmp_cs;
-       struct cgroup *pos_cg;
+       struct cgroup_subsys_state *pos_css;
 
        if (!parent)
                return 0;
@@ -1989,7 +1982,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
 
        number_of_cpusets++;
 
-       if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
+       if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
                goto out_unlock;
 
        /*
@@ -2006,7 +1999,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
         * (and likewise for mems) to the new cgroup.
         */
        rcu_read_lock();
-       cpuset_for_each_child(tmp_cs, pos_cg, parent) {
+       cpuset_for_each_child(tmp_cs, pos_css, parent) {
                if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
                        rcu_read_unlock();
                        goto out_unlock;
@@ -2023,9 +2016,15 @@ out_unlock:
        return 0;
 }
 
-static void cpuset_css_offline(struct cgroup *cgrp)
+/*
+ * If the cpuset being removed has its flag 'sched_load_balance'
+ * enabled, then simulate turning sched_load_balance off, which
+ * will call rebuild_sched_domains_locked().
+ */
+
+static void cpuset_css_offline(struct cgroup_subsys_state *css)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
 
        mutex_lock(&cpuset_mutex);
 
@@ -2038,15 +2037,9 @@ static void cpuset_css_offline(struct cgroup *cgrp)
        mutex_unlock(&cpuset_mutex);
 }
 
-/*
- * If the cpuset being removed has its flag 'sched_load_balance'
- * enabled, then simulate turning sched_load_balance off, which
- * will call rebuild_sched_domains_locked().
- */
-
-static void cpuset_css_free(struct cgroup *cgrp)
+static void cpuset_css_free(struct cgroup_subsys_state *css)
 {
-       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *cs = css_cs(css);
 
        free_cpumask_var(cs->cpus_allowed);
        kfree(cs);
@@ -2253,11 +2246,11 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
        /* if cpus or mems changed, we need to propagate to descendants */
        if (cpus_updated || mems_updated) {
                struct cpuset *cs;
-               struct cgroup *pos_cgrp;
+               struct cgroup_subsys_state *pos_css;
 
                rcu_read_lock();
-               cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset) {
-                       if (!css_tryget(&cs->css))
+               cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
+                       if (cs == &top_cpuset || !css_tryget(&cs->css))
                                continue;
                        rcu_read_unlock();
 
@@ -2346,7 +2339,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 
 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
 {
-       const struct cpuset *cpus_cs;
+       struct cpuset *cpus_cs;
 
        rcu_read_lock();
        cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
@@ -2419,7 +2412,7 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
  * (an unusual configuration), then returns the root cpuset.
  */
-static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
+static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
 {
        while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
                cs = parent_cs(cs);
@@ -2489,7 +2482,7 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
  */
 int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
 {
-       const struct cpuset *cs;        /* current cpuset ancestors */
+       struct cpuset *cs;              /* current cpuset ancestors */
        int allowed;                    /* is allocation in zone z allowed? */
 
        if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
@@ -2727,7 +2720,7 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
                goto out_free;
 
        rcu_read_lock();
-       css = task_subsys_state(tsk, cpuset_subsys_id);
+       css = task_css(tsk, cpuset_subsys_id);
        retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
        rcu_read_unlock();
        if (retval < 0)
index ff915efef66db9d8635d13a2bf1a783c4059f00e..30c0d91ca7f2995e0a5dfe59c3c8efc6a81c26af 100644 (file)
@@ -10,14 +10,12 @@ Elf_Half __weak elf_core_extra_phdrs(void)
        return 0;
 }
 
-int __weak elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
-                                     unsigned long limit)
+int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
        return 1;
 }
 
-int __weak elf_core_write_extra_data(struct file *file, size_t *size,
-                                    unsigned long limit)
+int __weak elf_core_write_extra_data(struct coredump_params *cprm)
 {
        return 1;
 }
index f86599e8c12371c7d78248e0c6b2a6869b9122ee..aeece58f2a0fb356c85188842bb82ac442a4b287 100644 (file)
@@ -340,8 +340,8 @@ struct perf_cgroup {
 static inline struct perf_cgroup *
 perf_cgroup_from_task(struct task_struct *task)
 {
-       return container_of(task_subsys_state(task, perf_subsys_id),
-                       struct perf_cgroup, css);
+       return container_of(task_css(task, perf_subsys_id),
+                           struct perf_cgroup, css);
 }
 
 static inline bool
@@ -591,6 +591,8 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
        if (!f.file)
                return -EBADF;
 
+       rcu_read_lock();
+
        css = cgroup_css_from_dir(f.file, perf_subsys_id);
        if (IS_ERR(css)) {
                ret = PTR_ERR(css);
@@ -617,6 +619,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
                ret = -EINVAL;
        }
 out:
+       rcu_read_unlock();
        fdput(f);
        return ret;
 }
@@ -7798,7 +7801,8 @@ unlock:
 device_initcall(perf_event_sysfs_init);
 
 #ifdef CONFIG_CGROUP_PERF
-static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
+static struct cgroup_subsys_state *
+perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        struct perf_cgroup *jc;
 
@@ -7815,11 +7819,10 @@ static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
        return &jc->css;
 }
 
-static void perf_cgroup_css_free(struct cgroup *cont)
+static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
 {
-       struct perf_cgroup *jc;
-       jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
-                         struct perf_cgroup, css);
+       struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
+
        free_percpu(jc->info);
        kfree(jc);
 }
@@ -7831,15 +7834,17 @@ static int __perf_cgroup_move(void *info)
        return 0;
 }
 
-static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void perf_cgroup_attach(struct cgroup_subsys_state *css,
+                              struct cgroup_taskset *tset)
 {
        struct task_struct *task;
 
-       cgroup_taskset_for_each(task, cgrp, tset)
+       cgroup_taskset_for_each(task, css, tset)
                task_function_call(task, __perf_cgroup_move, task);
 }
 
-static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
+static void perf_cgroup_exit(struct cgroup_subsys_state *css,
+                            struct cgroup_subsys_state *old_css,
                             struct task_struct *task)
 {
        /*
index 206915830d2993bc54124b0fecf09abadac08a0f..40ee1dc3c3bfb028df31e106e50a29858607df29 100644 (file)
@@ -136,6 +136,7 @@ static int param_set_bool_enable_only(const char *val,
 }
 
 static const struct kernel_param_ops param_ops_bool_enable_only = {
+       .flags = KERNEL_PARAM_FL_NOARG,
        .set = param_set_bool_enable_only,
        .get = param_get_bool,
 };
@@ -603,7 +604,7 @@ static void setup_modinfo_##field(struct module *mod, const char *s)  \
 static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
                        struct module_kobject *mk, char *buffer)      \
 {                                                                     \
-       return sprintf(buffer, "%s\n", mk->mod->field);               \
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
 }                                                                     \
 static int modinfo_##field##_exists(struct module *mod)               \
 {                                                                     \
index 440e65d1a544c2d7bea49efbb45c1fbd2fde3a73..e5f8f17e57cf9e5d9fd25a372b825df4ca263e66 100644 (file)
@@ -103,8 +103,8 @@ static int parse_one(char *param,
                            || params[i].level > max_level)
                                return 0;
                        /* No one handled NULL, so do it here. */
-                       if (!val && params[i].ops->set != param_set_bool
-                           && params[i].ops->set != param_set_bint)
+                       if (!val &&
+                           !(params[i].ops->flags & KERNEL_PARAM_FL_NOARG))
                                return -EINVAL;
                        pr_debug("handling %s with %p\n", param,
                                params[i].ops->set);
@@ -241,7 +241,8 @@ int parse_args(const char *doing,
        }                                                               \
        int param_get_##name(char *buffer, const struct kernel_param *kp) \
        {                                                               \
-               return sprintf(buffer, format, *((type *)kp->arg));     \
+               return scnprintf(buffer, PAGE_SIZE, format,             \
+                               *((type *)kp->arg));                    \
        }                                                               \
        struct kernel_param_ops param_ops_##name = {                    \
                .set = param_set_##name,                                \
@@ -252,7 +253,7 @@ int parse_args(const char *doing,
        EXPORT_SYMBOL(param_ops_##name)
 
 
-STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul);
+STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, strict_strtoul);
 STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol);
 STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, strict_strtoul);
 STANDARD_PARAM_DEF(int, int, "%i", long, strict_strtol);
@@ -285,7 +286,7 @@ EXPORT_SYMBOL(param_set_charp);
 
 int param_get_charp(char *buffer, const struct kernel_param *kp)
 {
-       return sprintf(buffer, "%s", *((char **)kp->arg));
+       return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg));
 }
 EXPORT_SYMBOL(param_get_charp);
 
@@ -320,6 +321,7 @@ int param_get_bool(char *buffer, const struct kernel_param *kp)
 EXPORT_SYMBOL(param_get_bool);
 
 struct kernel_param_ops param_ops_bool = {
+       .flags = KERNEL_PARAM_FL_NOARG,
        .set = param_set_bool,
        .get = param_get_bool,
 };
@@ -370,6 +372,7 @@ int param_set_bint(const char *val, const struct kernel_param *kp)
 EXPORT_SYMBOL(param_set_bint);
 
 struct kernel_param_ops param_ops_bint = {
+       .flags = KERNEL_PARAM_FL_NOARG,
        .set = param_set_bint,
        .get = param_get_int,
 };
@@ -827,7 +830,7 @@ ssize_t __modver_version_show(struct module_attribute *mattr,
        struct module_version_attribute *vattr =
                container_of(mattr, struct module_version_attribute, mattr);
 
-       return sprintf(buf, "%s\n", vattr->version);
+       return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version);
 }
 
 extern const struct module_version_attribute *__start___modver[];
index ece04223bb1ebf99d08a1aa0aab99e14d0fbf994..62ee437b5c7ea362de8573dd18fcee1ef6286221 100644 (file)
@@ -210,6 +210,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
                goto Platform_wake;
        }
 
+       ftrace_stop();
        error = disable_nonboot_cpus();
        if (error || suspend_test(TEST_CPUS))
                goto Enable_cpus;
@@ -232,6 +233,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
 
  Enable_cpus:
        enable_nonboot_cpus();
+       ftrace_start();
 
  Platform_wake:
        if (need_suspend_ops(state) && suspend_ops->wake)
@@ -265,7 +267,6 @@ int suspend_devices_and_enter(suspend_state_t state)
                        goto Close;
        }
        suspend_console();
-       ftrace_stop();
        suspend_test_start();
        error = dpm_suspend_start(PMSG_SUSPEND);
        if (error) {
@@ -285,7 +286,6 @@ int suspend_devices_and_enter(suspend_state_t state)
        suspend_test_start();
        dpm_resume_end(PMSG_RESUME);
        suspend_test_finish("resume devices");
-       ftrace_start();
        resume_console();
  Close:
        if (need_suspend_ops(state) && suspend_ops->end)
index 05c39f030314698730349de8eb9346c1437d9aba..e53bda3ff2f1d7b0f297cc6707107d23b94de3eb 100644 (file)
@@ -6815,7 +6815,7 @@ void sched_move_task(struct task_struct *tsk)
        if (unlikely(running))
                tsk->sched_class->put_prev_task(rq, tsk);
 
-       tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
+       tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
                                lockdep_is_held(&tsk->sighand->siglock)),
                          struct task_group, css);
        tg = autogroup_task_group(tsk, tg);
@@ -7137,23 +7137,22 @@ int sched_rt_handler(struct ctl_table *table, int write,
 
 #ifdef CONFIG_CGROUP_SCHED
 
-/* return corresponding task_group object of a cgroup */
-static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
 {
-       return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
-                           struct task_group, css);
+       return css ? container_of(css, struct task_group, css) : NULL;
 }
 
-static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
-       struct task_group *tg, *parent;
+       struct task_group *parent = css_tg(parent_css);
+       struct task_group *tg;
 
-       if (!cgrp->parent) {
+       if (!parent) {
                /* This is early initialization for the top cgroup */
                return &root_task_group.css;
        }
 
-       parent = cgroup_tg(cgrp->parent);
        tg = sched_create_group(parent);
        if (IS_ERR(tg))
                return ERR_PTR(-ENOMEM);
@@ -7161,41 +7160,38 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
        return &tg->css;
 }
 
-static int cpu_cgroup_css_online(struct cgroup *cgrp)
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
-       struct task_group *parent;
-
-       if (!cgrp->parent)
-               return 0;
+       struct task_group *tg = css_tg(css);
+       struct task_group *parent = css_tg(css_parent(css));
 
-       parent = cgroup_tg(cgrp->parent);
-       sched_online_group(tg, parent);
+       if (parent)
+               sched_online_group(tg, parent);
        return 0;
 }
 
-static void cpu_cgroup_css_free(struct cgroup *cgrp)
+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *tg = css_tg(css);
 
        sched_destroy_group(tg);
 }
 
-static void cpu_cgroup_css_offline(struct cgroup *cgrp)
+static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *tg = css_tg(css);
 
        sched_offline_group(tg);
 }
 
-static int cpu_cgroup_can_attach(struct cgroup *cgrp,
+static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
        struct task_struct *task;
 
-       cgroup_taskset_for_each(task, cgrp, tset) {
+       cgroup_taskset_for_each(task, css, tset) {
 #ifdef CONFIG_RT_GROUP_SCHED
-               if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
+               if (!sched_rt_can_attach(css_tg(css), task))
                        return -EINVAL;
 #else
                /* We don't support RT-tasks being in separate groups */
@@ -7206,18 +7202,18 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp,
        return 0;
 }
 
-static void cpu_cgroup_attach(struct cgroup *cgrp,
+static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
                              struct cgroup_taskset *tset)
 {
        struct task_struct *task;
 
-       cgroup_taskset_for_each(task, cgrp, tset)
+       cgroup_taskset_for_each(task, css, tset)
                sched_move_task(task);
 }
 
-static void
-cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
-               struct task_struct *task)
+static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
+                           struct cgroup_subsys_state *old_css,
+                           struct task_struct *task)
 {
        /*
         * cgroup_exit() is called in the copy_process() failure path.
@@ -7231,15 +7227,16 @@ cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
-                               u64 shareval)
+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
+                               struct cftype *cftype, u64 shareval)
 {
-       return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
+       return sched_group_set_shares(css_tg(css), scale_load(shareval));
 }
 
-static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
+                              struct cftype *cft)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *tg = css_tg(css);
 
        return (u64) scale_load_down(tg->shares);
 }
@@ -7361,26 +7358,28 @@ long tg_get_cfs_period(struct task_group *tg)
        return cfs_period_us;
 }
 
-static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
+                                 struct cftype *cft)
 {
-       return tg_get_cfs_quota(cgroup_tg(cgrp));
+       return tg_get_cfs_quota(css_tg(css));
 }
 
-static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
-                               s64 cfs_quota_us)
+static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
+                                  struct cftype *cftype, s64 cfs_quota_us)
 {
-       return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
+       return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
 }
 
-static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
+                                  struct cftype *cft)
 {
-       return tg_get_cfs_period(cgroup_tg(cgrp));
+       return tg_get_cfs_period(css_tg(css));
 }
 
-static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
-                               u64 cfs_period_us)
+static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
+                                   struct cftype *cftype, u64 cfs_period_us)
 {
-       return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
+       return tg_set_cfs_period(css_tg(css), cfs_period_us);
 }
 
 struct cfs_schedulable_data {
@@ -7461,10 +7460,10 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
        return ret;
 }
 
-static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
+static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft,
                struct cgroup_map_cb *cb)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *tg = css_tg(css);
        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
 
        cb->fill(cb, "nr_periods", cfs_b->nr_periods);
@@ -7477,26 +7476,28 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_RT_GROUP_SCHED
-static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
-                               s64 val)
+static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
+                               struct cftype *cft, s64 val)
 {
-       return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
+       return sched_group_set_rt_runtime(css_tg(css), val);
 }
 
-static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
+                              struct cftype *cft)
 {
-       return sched_group_rt_runtime(cgroup_tg(cgrp));
+       return sched_group_rt_runtime(css_tg(css));
 }
 
-static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
-               u64 rt_period_us)
+static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
+                                   struct cftype *cftype, u64 rt_period_us)
 {
-       return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
+       return sched_group_set_rt_period(css_tg(css), rt_period_us);
 }
 
-static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
+                                  struct cftype *cft)
 {
-       return sched_group_rt_period(cgroup_tg(cgrp));
+       return sched_group_rt_period(css_tg(css));
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
index dbb7e2cd95eba592f6b7878238ff12979e4c1aa0..f64722ff029907b1cbcc6ed57cbc8c1f2b013534 100644 (file)
@@ -33,30 +33,20 @@ struct cpuacct {
        struct kernel_cpustat __percpu *cpustat;
 };
 
-/* return cpu accounting group corresponding to this container */
-static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
+static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
 {
-       return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
-                           struct cpuacct, css);
+       return css ? container_of(css, struct cpuacct, css) : NULL;
 }
 
 /* return cpu accounting group to which this task belongs */
 static inline struct cpuacct *task_ca(struct task_struct *tsk)
 {
-       return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
-                           struct cpuacct, css);
-}
-
-static inline struct cpuacct *__parent_ca(struct cpuacct *ca)
-{
-       return cgroup_ca(ca->css.cgroup->parent);
+       return css_ca(task_css(tsk, cpuacct_subsys_id));
 }
 
 static inline struct cpuacct *parent_ca(struct cpuacct *ca)
 {
-       if (!ca->css.cgroup->parent)
-               return NULL;
-       return cgroup_ca(ca->css.cgroup->parent);
+       return css_ca(css_parent(&ca->css));
 }
 
 static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
@@ -66,11 +56,12 @@ static struct cpuacct root_cpuacct = {
 };
 
 /* create a new cpu accounting group */
-static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        struct cpuacct *ca;
 
-       if (!cgrp->parent)
+       if (!parent_css)
                return &root_cpuacct.css;
 
        ca = kzalloc(sizeof(*ca), GFP_KERNEL);
@@ -96,9 +87,9 @@ out:
 }
 
 /* destroy an existing cpu accounting group */
-static void cpuacct_css_free(struct cgroup *cgrp)
+static void cpuacct_css_free(struct cgroup_subsys_state *css)
 {
-       struct cpuacct *ca = cgroup_ca(cgrp);
+       struct cpuacct *ca = css_ca(css);
 
        free_percpu(ca->cpustat);
        free_percpu(ca->cpuusage);
@@ -141,9 +132,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
 }
 
 /* return total cpu usage (in nanoseconds) of a group */
-static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       struct cpuacct *ca = cgroup_ca(cgrp);
+       struct cpuacct *ca = css_ca(css);
        u64 totalcpuusage = 0;
        int i;
 
@@ -153,10 +144,10 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
        return totalcpuusage;
 }
 
-static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
-                                                               u64 reset)
+static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
+                         u64 reset)
 {
-       struct cpuacct *ca = cgroup_ca(cgrp);
+       struct cpuacct *ca = css_ca(css);
        int err = 0;
        int i;
 
@@ -172,10 +163,10 @@ out:
        return err;
 }
 
-static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
-                                  struct seq_file *m)
+static int cpuacct_percpu_seq_read(struct cgroup_subsys_state *css,
+                                  struct cftype *cft, struct seq_file *m)
 {
-       struct cpuacct *ca = cgroup_ca(cgroup);
+       struct cpuacct *ca = css_ca(css);
        u64 percpu;
        int i;
 
@@ -192,10 +183,10 @@ static const char * const cpuacct_stat_desc[] = {
        [CPUACCT_STAT_SYSTEM] = "system",
 };
 
-static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
-                             struct cgroup_map_cb *cb)
+static int cpuacct_stats_show(struct cgroup_subsys_state *css,
+                             struct cftype *cft, struct cgroup_map_cb *cb)
 {
-       struct cpuacct *ca = cgroup_ca(cgrp);
+       struct cpuacct *ca = css_ca(css);
        int cpu;
        s64 val = 0;
 
@@ -281,7 +272,7 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val)
        while (ca != &root_cpuacct) {
                kcpustat = this_cpu_ptr(ca->cpustat);
                kcpustat->cpustat[index] += val;
-               ca = __parent_ca(ca);
+               ca = parent_ca(ca);
        }
        rcu_read_unlock();
 }
index ef0a7b2439dde25bdd3d4ee54c4801364e09607d..471a56db05eaf4064a9c541e5bd295d2030ebd95 100644 (file)
@@ -665,9 +665,9 @@ extern int group_balance_cpu(struct sched_group *sg);
 /*
  * Return the group to which this tasks belongs.
  *
- * We cannot use task_subsys_state() and friends because the cgroup
- * subsystem changes that value before the cgroup_subsys::attach() method
- * is called, therefore we cannot pin it and might observe the wrong value.
+ * We cannot use task_css() and friends because the cgroup subsystem
+ * changes that value before the cgroup_subsys::attach() method is called,
+ * therefore we cannot pin it and might observe the wrong value.
  *
  * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
  * core changes this before calling sched_move_task().
index a326f27d7f09e0942ae2df9eb010d8caaabdb725..0b479a6a22bb8fe30e2b9d6c76c2ddb1d5646ae1 100644 (file)
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
        BUG_ON(bits > 32);
        WARN_ON(!irqs_disabled());
        read_sched_clock = read;
-       sched_clock_mask = (1 << bits) - 1;
+       sched_clock_mask = (1ULL << bits) - 1;
        cd.rate = rate;
 
        /* calculate the mult/shift to convert counter ticks to ns. */
index e77edc97e036b4e8216ae8b54fe0e6cafaa2fe71..e8a1516cc0a36d3c247d2bd4a18ef6d69f17b42c 100644 (file)
@@ -182,7 +182,8 @@ static bool can_stop_full_tick(void)
                 * Don't allow the user to think they can get
                 * full NO_HZ with this machine.
                 */
-               WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
+               WARN_ONCE(have_nohz_full_mask,
+                         "NO_HZ FULL will not work with unstable sched clock");
                return false;
        }
 #endif
@@ -343,8 +344,6 @@ static int tick_nohz_init_all(void)
 
 void __init tick_nohz_init(void)
 {
-       int cpu;
-
        if (!have_nohz_full_mask) {
                if (tick_nohz_init_all() < 0)
                        return;
index 48b9fffabdc294a4bea16b9b78083a161e8eec42..03a14bf4c435ffa74c8c1d3444f9613c21d34f91 100644 (file)
@@ -1737,3 +1737,84 @@ void xtime_update(unsigned long ticks)
        do_timer(ticks);
        write_sequnlock(&jiffies_lock);
 }
+
+/**
+ * timekeeping_chfreq_prep() - prepare to change the frequency of the
+ *    clocksource being used for timekeeping
+ * @clock:             Pointer to the clock source whose frequency will be
+ *                     changed.  If this is not the clocksource being used
+ *                     or timekeeping, the routine does nothing and
+ *                     returns nonzero; otherwise, it prepares for the
+ *                     frequency change and returns zero.
+ * @start_cycle:       Pointer to a value which will be set to the current
+ *                     cycle count for @clock, in the old clock domain.
+ *
+ * This routine is used when changing processor speed on a system whose
+ * clocksource is dependent upon that speed.  The normal calling sequence
+ * is:
+ *
+ * - Call timekeeping_chfreq_prep(), to get ready for the change and to
+ *   ensure that the current clocksource is what you think it is.
+ *
+ * - Change the actual processor speed.
+ *
+ * - Call timkeeping_chfreq() to change the clocksource frequency and
+ *   adjust the timekeeping parameters to account for the time spent
+ *   doing the frequency change.
+ *
+ * Any timekeeping operations performed while this is happening are likely
+ * to cause problems.  The best way to prevent this from happening is to
+ * perform all of those steps in a routine run via stop_machine().
+ */
+int timekeeping_chfreq_prep(struct clocksource *clock, cycle_t *start_cycle)
+{
+       if (timekeeper.clock != clock)
+               return 1;
+
+       timekeeping_forward_now(&timekeeper);
+       *start_cycle = timekeeper.clock->cycle_last;
+
+       return 0;
+}
+
+/**
+ * timekeeping_chfreq() - change the frequency of the clocksource being
+ *   used for timekeeping, and then recompute the internal timekeeping
+ *   parameters which depend upon that
+ * @freq:              New frequency for the clocksource, in hertz.
+ * @end_cycle:         Cycle count, in the new clock domain.
+ * @delta_ns:          Time delta in ns between start_cycle (as returned
+ *                     from timekeeping_chfreq_prep()) and end_cycle.
+ *
+ * See the timekeeping_chfreq_prep() description for how this routine is
+ * used.
+ */
+void timekeeping_chfreq(unsigned int freq, cycle_t end_cycle, u64 delta_ns)
+{
+       struct clocksource *clock = timekeeper.clock;
+       cycle_t delta_cycles;
+
+       write_seqlock(&jiffies_lock);
+       __clocksource_updatefreq_hz(clock, freq);
+       tk_setup_internals(&timekeeper, clock);
+
+       /*
+        * The timekeeping_forward_now() done in timekeeping_chfreq_prep()
+        * made xtime consistent with the timesource as of a cycle count
+        * which was provided to the caller as *start_cycle.  Then, we
+        * spent a bunch of time actually changing the processor frequency.
+        * Finally, timekeeper_setup_internals() updated cycle_last in the
+        * clocksource to match the current cycle count, but didn't update
+        * xtime.  Thus, the current time is now wrong by the time we spent
+        * doing the frequency change.  To fix this, we need to backdate
+        * the clocksource's cycle_last so that it is again consistent with
+        * xtime.
+        */
+       delta_cycles = delta_ns * freq;
+       do_div(delta_cycles, 1000000000);
+       clock->cycle_last = end_cycle - delta_cycles;
+
+       write_sequnlock(&jiffies_lock);
+
+       tick_clock_notify();
+}
index dec68bd4e9d8e996404faa065b67354f1defa8eb..d550920e040c4515c7c865bb6ba2ab884f7ef7bd 100644 (file)
@@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
 
 /**
  * wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @word: The word being waited on, a kernel virtual address
- * @bit: The bit of the word being waited on
+ * @p: The atomic_t being waited on, a kernel virtual address
  *
  * Wake up anyone waiting for the atomic_t to go to zero.
  *
index 7f5d4be220345b0899d38ced692f2458b9278ce7..c60de8e0cffcc5a529a4dbdad05b6de0d1a3a180 100644 (file)
@@ -540,6 +540,8 @@ static int worker_pool_assign_id(struct worker_pool *pool)
  * This must be called either with pwq_lock held or sched RCU read locked.
  * If the pwq needs to be used beyond the locking in effect, the caller is
  * responsible for guaranteeing that the pwq stays online.
+ *
+ * Return: The unbound pool_workqueue for @node.
  */
 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
                                                  int node)
@@ -638,8 +640,6 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
  * get_work_pool - return the worker_pool a given work was associated with
  * @work: the work item of interest
  *
- * Return the worker_pool @work was last associated with.  %NULL if none.
- *
  * Pools are created and destroyed under wq_pool_mutex, and allows read
  * access under sched-RCU read lock.  As such, this function should be
  * called under wq_pool_mutex or with preemption disabled.
@@ -648,6 +648,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
  * mentioned locking is in effect.  If the returned pool needs to be used
  * beyond the critical section, the caller is responsible for ensuring the
  * returned pool is and stays online.
+ *
+ * Return: The worker_pool @work was last associated with.  %NULL if none.
  */
 static struct worker_pool *get_work_pool(struct work_struct *work)
 {
@@ -671,7 +673,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
  * get_work_pool_id - return the worker pool ID a given work is associated with
  * @work: the work item of interest
  *
- * Return the worker_pool ID @work was last associated with.
+ * Return: The worker_pool ID @work was last associated with.
  * %WORK_OFFQ_POOL_NONE if none.
  */
 static int get_work_pool_id(struct work_struct *work)
@@ -830,7 +832,7 @@ void wq_worker_waking_up(struct task_struct *task, int cpu)
  * CONTEXT:
  * spin_lock_irq(rq->lock)
  *
- * RETURNS:
+ * Return:
  * Worker task on @cpu to wake up, %NULL if none.
  */
 struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
@@ -965,8 +967,8 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
  * CONTEXT:
  * spin_lock_irq(pool->lock).
  *
- * RETURNS:
- * Pointer to worker which is executing @work if found, NULL
+ * Return:
+ * Pointer to worker which is executing @work if found, %NULL
  * otherwise.
  */
 static struct worker *find_worker_executing_work(struct worker_pool *pool,
@@ -1154,14 +1156,16 @@ out_put:
  * @flags: place to store irq state
  *
  * Try to grab PENDING bit of @work.  This function can handle @work in any
- * stable state - idle, on timer or on worklist.  Return values are
+ * stable state - idle, on timer or on worklist.
  *
+ * Return:
  *  1          if @work was pending and we successfully stole PENDING
  *  0          if @work was idle and we claimed PENDING
  *  -EAGAIN    if PENDING couldn't be grabbed at the moment, safe to busy-retry
  *  -ENOENT    if someone else is canceling @work, this state may persist
  *             for arbitrarily long
  *
+ * Note:
  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
  * interrupted while holding PENDING and @work off queue, irq must be
  * disabled on entry.  This, combined with delayed_work->timer being
@@ -1403,10 +1407,10 @@ retry:
  * @wq: workqueue to use
  * @work: work to queue
  *
- * Returns %false if @work was already on a queue, %true otherwise.
- *
  * We queue the work to a specific CPU, the caller must ensure it
  * can't go away.
+ *
+ * Return: %false if @work was already on a queue, %true otherwise.
  */
 bool queue_work_on(int cpu, struct workqueue_struct *wq,
                   struct work_struct *work)
@@ -1476,7 +1480,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
  * @dwork: work to queue
  * @delay: number of jiffies to wait before queueing
  *
- * Returns %false if @work was already on a queue, %true otherwise.  If
+ * Return: %false if @work was already on a queue, %true otherwise.  If
  * @delay is zero and @dwork is idle, it will be scheduled for immediate
  * execution.
  */
@@ -1512,7 +1516,7 @@ EXPORT_SYMBOL(queue_delayed_work_on);
  * zero, @work is guaranteed to be scheduled immediately regardless of its
  * current state.
  *
- * Returns %false if @dwork was idle and queued, %true if @dwork was
+ * Return: %false if @dwork was idle and queued, %true if @dwork was
  * pending and its timer was modified.
  *
  * This function is safe to call from any context including IRQ handler.
@@ -1627,7 +1631,7 @@ static void worker_leave_idle(struct worker *worker)
  * Might sleep.  Called without any lock but returns with pool->lock
  * held.
  *
- * RETURNS:
+ * Return:
  * %true if the associated pool is online (@worker is successfully
  * bound), %false if offline.
  */
@@ -1688,7 +1692,7 @@ static struct worker *alloc_worker(void)
  * CONTEXT:
  * Might sleep.  Does GFP_KERNEL allocations.
  *
- * RETURNS:
+ * Return:
  * Pointer to the newly created worker.
  */
 static struct worker *create_worker(struct worker_pool *pool)
@@ -1788,6 +1792,8 @@ static void start_worker(struct worker *worker)
  * @pool: the target pool
  *
  * Grab the managership of @pool and create and start a new worker for it.
+ *
+ * Return: 0 on success. A negative error code otherwise.
  */
 static int create_and_start_worker(struct worker_pool *pool)
 {
@@ -1932,7 +1938,7 @@ static void pool_mayday_timeout(unsigned long __pool)
  * multiple times.  Does GFP_KERNEL allocations.  Called only from
  * manager.
  *
- * RETURNS:
+ * Return:
  * %false if no action was taken and pool->lock stayed locked, %true
  * otherwise.
  */
@@ -1989,7 +1995,7 @@ restart:
  * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Called only from manager.
  *
- * RETURNS:
+ * Return:
  * %false if no action was taken and pool->lock stayed locked, %true
  * otherwise.
  */
@@ -2032,7 +2038,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
  * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.
  *
- * RETURNS:
+ * Return:
  * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.
  */
@@ -2246,6 +2252,8 @@ static void process_scheduled_works(struct worker *worker)
  * work items regardless of their specific target workqueue.  The only
  * exception is work items which belong to workqueues with a rescuer which
  * will be explained in rescuer_thread().
+ *
+ * Return: 0
  */
 static int worker_thread(void *__worker)
 {
@@ -2344,6 +2352,8 @@ sleep:
  * those works so that forward progress can be guaranteed.
  *
  * This should happen rarely.
+ *
+ * Return: 0
  */
 static int rescuer_thread(void *__rescuer)
 {
@@ -2516,7 +2526,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
  * CONTEXT:
  * mutex_lock(wq->mutex).
  *
- * RETURNS:
+ * Return:
  * %true if @flush_color >= 0 and there's something to flush.  %false
  * otherwise.
  */
@@ -2837,7 +2847,7 @@ static bool __flush_work(struct work_struct *work)
  * Wait until @work has finished execution.  @work is guaranteed to be idle
  * on return if it hasn't been requeued since flush started.
  *
- * RETURNS:
+ * Return:
  * %true if flush_work() waited for the work to finish execution,
  * %false if it was already idle.
  */
@@ -2889,7 +2899,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
  * The caller must ensure that the workqueue on which @work was last
  * queued can't be destroyed before this function returns.
  *
- * RETURNS:
+ * Return:
  * %true if @work was pending, %false otherwise.
  */
 bool cancel_work_sync(struct work_struct *work)
@@ -2906,7 +2916,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
  * immediate execution.  Like flush_work(), this function only
  * considers the last queueing instance of @dwork.
  *
- * RETURNS:
+ * Return:
  * %true if flush_work() waited for the work to finish execution,
  * %false if it was already idle.
  */
@@ -2924,11 +2934,15 @@ EXPORT_SYMBOL(flush_delayed_work);
  * cancel_delayed_work - cancel a delayed work
  * @dwork: delayed_work to cancel
  *
- * Kill off a pending delayed_work.  Returns %true if @dwork was pending
- * and canceled; %false if wasn't pending.  Note that the work callback
- * function may still be running on return, unless it returns %true and the
- * work doesn't re-arm itself.  Explicitly flush or use
- * cancel_delayed_work_sync() to wait on it.
+ * Kill off a pending delayed_work.
+ *
+ * Return: %true if @dwork was pending and canceled; %false if it wasn't
+ * pending.
+ *
+ * Note:
+ * The work callback function may still be running on return, unless
+ * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
+ * use cancel_delayed_work_sync() to wait on it.
  *
  * This function is safe to call from any context including IRQ handler.
  */
@@ -2957,7 +2971,7 @@ EXPORT_SYMBOL(cancel_delayed_work);
  *
  * This is cancel_work_sync() for delayed works.
  *
- * RETURNS:
+ * Return:
  * %true if @dwork was pending, %false otherwise.
  */
 bool cancel_delayed_work_sync(struct delayed_work *dwork)
@@ -2974,7 +2988,7 @@ EXPORT_SYMBOL(cancel_delayed_work_sync);
  * system workqueue and blocks until all CPUs have completed.
  * schedule_on_each_cpu() is very slow.
  *
- * RETURNS:
+ * Return:
  * 0 on success, -errno on failure.
  */
 int schedule_on_each_cpu(work_func_t func)
@@ -3042,7 +3056,7 @@ EXPORT_SYMBOL(flush_scheduled_work);
  * Executes the function immediately if process context is available,
  * otherwise schedules the function for delayed execution.
  *
- * Returns:    0 - function was executed
+ * Return    0 - function was executed
  *             1 - function was scheduled for execution
  */
 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
@@ -3299,7 +3313,7 @@ static void wq_device_release(struct device *dev)
  * apply_workqueue_attrs() may race against userland updating the
  * attributes.
  *
- * Returns 0 on success, -errno on failure.
+ * Return: 0 on success, -errno on failure.
  */
 int workqueue_sysfs_register(struct workqueue_struct *wq)
 {
@@ -3392,7 +3406,9 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
  * @gfp_mask: allocation mask to use
  *
  * Allocate a new workqueue_attrs, initialize with default settings and
- * return it.  Returns NULL on failure.
+ * return it.
+ *
+ * Return: The allocated new workqueue_attr on success. %NULL on failure.
  */
 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
 {
@@ -3451,7 +3467,8 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
  * @pool: worker_pool to initialize
  *
  * Initiailize a newly zalloc'd @pool.  It also allocates @pool->attrs.
- * Returns 0 on success, -errno on failure.  Even on failure, all fields
+ *
+ * Return: 0 on success, -errno on failure.  Even on failure, all fields
  * inside @pool proper are initialized and put_unbound_pool() can be called
  * on @pool safely to release it.
  */
@@ -3558,9 +3575,12 @@ static void put_unbound_pool(struct worker_pool *pool)
  * Obtain a worker_pool which has the same attributes as @attrs, bump the
  * reference count and return it.  If there already is a matching
  * worker_pool, it will be used; otherwise, this function attempts to
- * create a new one.  On failure, returns NULL.
+ * create a new one.
  *
  * Should be called with wq_pool_mutex held.
+ *
+ * Return: On success, a worker_pool with the same attributes as @attrs.
+ * On failure, %NULL.
  */
 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
 {
@@ -3796,9 +3816,7 @@ static void free_unbound_pwq(struct pool_workqueue *pwq)
  *
  * Calculate the cpumask a workqueue with @attrs should use on @node.  If
  * @cpu_going_down is >= 0, that cpu is considered offline during
- * calculation.  The result is stored in @cpumask.  This function returns
- * %true if the resulting @cpumask is different from @attrs->cpumask,
- * %false if equal.
+ * calculation.  The result is stored in @cpumask.
  *
  * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
  * enabled and @node has online CPUs requested by @attrs, the returned
@@ -3807,6 +3825,9 @@ static void free_unbound_pwq(struct pool_workqueue *pwq)
  *
  * The caller is responsible for ensuring that the cpumask of @node stays
  * stable.
+ *
+ * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
+ * %false if equal.
  */
 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
                                 int cpu_going_down, cpumask_t *cpumask)
@@ -3860,8 +3881,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
  * items finish.  Note that a work item which repeatedly requeues itself
  * back-to-back will stay on its current pwq.
  *
- * Performs GFP_KERNEL allocations.  Returns 0 on success and -errno on
- * failure.
+ * Performs GFP_KERNEL allocations.
+ *
+ * Return: 0 on success and -errno on failure.
  */
 int apply_workqueue_attrs(struct workqueue_struct *wq,
                          const struct workqueue_attrs *attrs)
@@ -4329,6 +4351,8 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
  *
  * Determine whether %current is a workqueue rescuer.  Can be used from
  * work functions to determine whether it's being run off the rescuer task.
+ *
+ * Return: %true if %current is a workqueue rescuer. %false otherwise.
  */
 bool current_is_workqueue_rescuer(void)
 {
@@ -4352,7 +4376,7 @@ bool current_is_workqueue_rescuer(void)
  * workqueue being congested on one CPU doesn't mean the workqueue is also
  * contested on other CPUs / NUMA nodes.
  *
- * RETURNS:
+ * Return:
  * %true if congested, %false otherwise.
  */
 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
@@ -4385,7 +4409,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
  * synchronization around this function and the test result is
  * unreliable and only useful as advisory hints or for debugging.
  *
- * RETURNS:
+ * Return:
  * OR'd bitmask of WORK_BUSY_* bits.
  */
 unsigned int work_busy(struct work_struct *work)
@@ -4763,9 +4787,10 @@ static void work_for_cpu_fn(struct work_struct *work)
  * @fn: the function to run
  * @arg: the function arg
  *
- * This will return the value @fn returns.
  * It is up to the caller to ensure that the cpu doesn't go offline.
  * The caller must not hold any locks which would prevent @fn from completing.
+ *
+ * Return: The value @fn returns.
  */
 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
 {
@@ -4837,7 +4862,7 @@ void freeze_workqueues_begin(void)
  * CONTEXT:
  * Grabs and releases wq_pool_mutex.
  *
- * RETURNS:
+ * Return:
  * %true if some freezable workqueues are still busy.  %false if freezing
  * is complete.
  */
index a163b6caef73fdc26e19f8d18ef66604a03b1cf8..4382ad77777ebcb17bc4b25fe606f4371b563a5d 100644 (file)
@@ -78,6 +78,46 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 EXPORT_SYMBOL(div_s64_rem);
 #endif
 
+/**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend:  64bit dividend
+ * @divisor:   64bit divisor
+ * @remainder:  64bit remainder
+ *
+ * This implementation is a comparable to algorithm used by div64_u64.
+ * But this operation, which includes math for calculating the remainder,
+ * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
+ * systems.
+ */
+#ifndef div64_u64_rem
+u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+       u32 high = divisor >> 32;
+       u64 quot;
+
+       if (high == 0) {
+               u32 rem32;
+               quot = div_u64_rem(dividend, divisor, &rem32);
+               *remainder = rem32;
+       } else {
+               int n = 1 + fls(high);
+               quot = div_u64(dividend >> n, divisor >> n);
+
+               if (quot != 0)
+                       quot--;
+
+               *remainder = dividend - quot * divisor;
+               if (*remainder >= divisor) {
+                       quot++;
+                       *remainder -= divisor;
+               }
+       }
+
+       return quot;
+}
+EXPORT_SYMBOL(div64_u64_rem);
+#endif
+
 /**
  * div64_u64 - unsigned 64bit divide with 64bit divisor
  * @dividend:  64bit dividend
index 7aa7ce250c94d236143ea4ecedd532632a665700..3eb3e4722b8ee3599e63c92dee1d14c6aece4a37 100644 (file)
@@ -49,22 +49,23 @@ enum cpio_fields {
 
 /**
  * cpio_data find_cpio_data - Search for files in an uncompressed cpio
- * @path:   The directory to search for, including a slash at the end
- * @data:   Pointer to the the cpio archive or a header inside
- * @len:    Remaining length of the cpio based on data pointer
- * @offset: When a matching file is found, this is the offset to the
- *          beginning of the cpio. It can be used to iterate through
- *          the cpio to find all files inside of a directory path
+ * @path:       The directory to search for, including a slash at the end
+ * @data:       Pointer to the the cpio archive or a header inside
+ * @len:        Remaining length of the cpio based on data pointer
+ * @nextoff:    When a matching file is found, this is the offset from the
+ *              beginning of the cpio to the beginning of the next file, not the
+ *              matching file itself. It can be used to iterate through the cpio
+ *              to find all files inside of a directory path.
  *
- * @return: struct cpio_data containing the address, length and
- *          filename (with the directory path cut off) of the found file.
- *          If you search for a filename and not for files in a directory,
- *          pass the absolute path of the filename in the cpio and make sure
- *          the match returned an empty filename string.
+ * @return:     struct cpio_data containing the address, length and
+ *              filename (with the directory path cut off) of the found file.
+ *              If you search for a filename and not for files in a directory,
+ *              pass the absolute path of the filename in the cpio and make sure
+ *              the match returned an empty filename string.
  */
 
 struct cpio_data find_cpio_data(const char *path, void *data,
-                                         size_t len,  long *offset)
+                               size_t len,  long *nextoff)
 {
        const size_t cpio_header_len = 8*C_NFIELDS - 2;
        struct cpio_data cd = { NULL, 0, "" };
@@ -124,7 +125,7 @@ struct cpio_data find_cpio_data(const char *path, void *data,
                if ((ch[C_MODE] & 0170000) == 0100000 &&
                    ch[C_NAMESIZE] >= mypathsize &&
                    !memcmp(p, path, mypathsize)) {
-                       *offset = (long)nptr - (long)data;
+                       *nextoff = (long)nptr - (long)data;
                        if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) {
                                pr_warn(
                                "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n",
index 162becacf97c3650eb39e54b21a0139a9679a764..0a7e494b2bcdab875d7aebdef5ca7346972053b9 100644 (file)
@@ -2,3 +2,4 @@ mktables
 altivec*.c
 int*.c
 tables.c
+neon?.c
index 9f7c184725d7329d25fe35f1a0c3ae701243b753..b4625787c7eeb462d2ba220a5bf497fb522cf32c 100644 (file)
@@ -5,6 +5,7 @@ raid6_pq-y      += algos.o recov.o tables.o int1.o int2.o int4.o \
 
 raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
 raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
+raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o
 
 hostprogs-y    += mktables
 
@@ -16,6 +17,21 @@ ifeq ($(CONFIG_ALTIVEC),y)
 altivec_flags := -maltivec -mabi=altivec
 endif
 
+# The GCC option -ffreestanding is required in order to compile code containing
+# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel)
+ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
+NEON_FLAGS := -ffreestanding
+ifeq ($(ARCH),arm)
+NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
+endif
+ifeq ($(ARCH),arm64)
+CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
+endif
+endif
+
 targets += int1.c
 $(obj)/int1.c:   UNROLL := 1
 $(obj)/int1.c:   $(src)/int.uc $(src)/unroll.awk FORCE
@@ -70,6 +86,30 @@ $(obj)/altivec8.c:   UNROLL := 8
 $(obj)/altivec8.c:   $(src)/altivec.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
+CFLAGS_neon1.o += $(NEON_FLAGS)
+targets += neon1.c
+$(obj)/neon1.c:   UNROLL := 1
+$(obj)/neon1.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
+       $(call if_changed,unroll)
+
+CFLAGS_neon2.o += $(NEON_FLAGS)
+targets += neon2.c
+$(obj)/neon2.c:   UNROLL := 2
+$(obj)/neon2.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
+       $(call if_changed,unroll)
+
+CFLAGS_neon4.o += $(NEON_FLAGS)
+targets += neon4.c
+$(obj)/neon4.c:   UNROLL := 4
+$(obj)/neon4.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
+       $(call if_changed,unroll)
+
+CFLAGS_neon8.o += $(NEON_FLAGS)
+targets += neon8.c
+$(obj)/neon8.c:   UNROLL := 8
+$(obj)/neon8.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
+       $(call if_changed,unroll)
+
 quiet_cmd_mktable = TABLE   $@
       cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
 
index 6d7316fe9f30207d83bfe6f086ca98984040207d..74e6f5629dbc793464f402087818df6fea11ee1a 100644 (file)
@@ -70,6 +70,12 @@ const struct raid6_calls * const raid6_algos[] = {
        &raid6_intx2,
        &raid6_intx4,
        &raid6_intx8,
+#ifdef CONFIG_KERNEL_MODE_NEON
+       &raid6_neonx1,
+       &raid6_neonx2,
+       &raid6_neonx4,
+       &raid6_neonx8,
+#endif
        NULL
 };
 
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
new file mode 100644 (file)
index 0000000..36ad470
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/raid/pq.h>
+
+#ifdef __KERNEL__
+#include <asm/neon.h>
+#else
+#define kernel_neon_begin()
+#define kernel_neon_end()
+#define cpu_has_neon()         (1)
+#endif
+
+/*
+ * There are 2 reasons these wrappers are kept in a separate compilation unit
+ * from the actual implementations in neonN.c (generated from neon.uc by
+ * unroll.awk):
+ * - the actual implementations use NEON intrinsics, and the GCC support header
+ *   (arm_neon.h) is not fully compatible (type wise) with the kernel;
+ * - the neonN.c files are compiled with -mfpu=neon and optimization enabled,
+ *   and we have to make sure that we never use *any* NEON/VFP instructions
+ *   outside a kernel_neon_begin()/kernel_neon_end() pair.
+ */
+
+#define RAID6_NEON_WRAPPER(_n)                                         \
+       static void raid6_neon ## _n ## _gen_syndrome(int disks,        \
+                                       size_t bytes, void **ptrs)      \
+       {                                                               \
+               void raid6_neon ## _n  ## _gen_syndrome_real(int,       \
+                                               unsigned long, void**); \
+               kernel_neon_begin();                                    \
+               raid6_neon ## _n ## _gen_syndrome_real(disks,           \
+                                       (unsigned long)bytes, ptrs);    \
+               kernel_neon_end();                                      \
+       }                                                               \
+       struct raid6_calls const raid6_neonx ## _n = {                  \
+               raid6_neon ## _n ## _gen_syndrome,                      \
+               raid6_have_neon,                                        \
+               "neonx" #_n,                                            \
+               0                                                       \
+       }
+
+static int raid6_have_neon(void)
+{
+       return cpu_has_neon();
+}
+
+RAID6_NEON_WRAPPER(1);
+RAID6_NEON_WRAPPER(2);
+RAID6_NEON_WRAPPER(4);
+RAID6_NEON_WRAPPER(8);
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
new file mode 100644 (file)
index 0000000..1b9ed79
--- /dev/null
@@ -0,0 +1,80 @@
+/* -----------------------------------------------------------------------
+ *
+ *   neon.uc - RAID-6 syndrome calculation using ARM NEON instructions
+ *
+ *   Copyright (C) 2012 Rob Herring
+ *
+ *   Based on altivec.uc:
+ *     Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ *   Boston MA 02111-1307, USA; either version 2 of the License, or
+ *   (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * neon$#.c
+ *
+ * $#-way unrolled NEON intrinsics math RAID-6 instruction set
+ *
+ * This file is postprocessed using unroll.awk
+ */
+
+#include <arm_neon.h>
+
+typedef uint8x16_t unative_t;
+
+#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
+#define NSIZE  sizeof(unative_t)
+
+/*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+static inline unative_t SHLBYTE(unative_t v)
+{
+       return vshlq_n_u8(v, 1);
+}
+
+/*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+static inline unative_t MASK(unative_t v)
+{
+       const uint8x16_t temp = NBYTES(0);
+       return (unative_t)vcltq_s8((int8x16_t)v, (int8x16_t)temp);
+}
+
+void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
+{
+       uint8_t **dptr = (uint8_t **)ptrs;
+       uint8_t *p, *q;
+       int d, z, z0;
+
+       register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+       const unative_t x1d = NBYTES(0x1d);
+
+       z0 = disks - 3;         /* Highest data disk */
+       p = dptr[z0+1];         /* XOR parity */
+       q = dptr[z0+2];         /* RS syndrome */
+
+       for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+               wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
+               for ( z = z0-1 ; z >= 0 ; z-- ) {
+                       wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
+                       wp$$ = veorq_u8(wp$$, wd$$);
+                       w2$$ = MASK(wq$$);
+                       w1$$ = SHLBYTE(wq$$);
+
+                       w2$$ = vandq_u8(w2$$, x1d);
+                       w1$$ = veorq_u8(w1$$, w2$$);
+                       wq$$ = veorq_u8(w1$$, wd$$);
+               }
+               vst1q_u8(&p[d+NSIZE*$$], wp$$);
+               vst1q_u8(&q[d+NSIZE*$$], wq$$);
+       }
+}
index 087332dbf8aa164630148626e33c523dfd52dbf7..28afa1a06e033f264d2a9228d654e7fc50b6fea4 100644 (file)
@@ -22,11 +22,23 @@ ifeq ($(ARCH),x86_64)
         IS_X86 = yes
 endif
 
+ifeq ($(ARCH),arm)
+        CFLAGS += -I../../../arch/arm/include -mfpu=neon
+        HAS_NEON = yes
+endif
+ifeq ($(ARCH),arm64)
+        CFLAGS += -I../../../arch/arm64/include
+        HAS_NEON = yes
+endif
+
 ifeq ($(IS_X86),yes)
         OBJS   += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o
         CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" |   \
                     gcc -c -x assembler - >&/dev/null &&       \
                     rm ./-.o && echo -DCONFIG_AS_AVX2=1)
+else ifeq ($(HAS_NEON),yes)
+        OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o
+        CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
 else
         HAS_ALTIVEC := $(shell echo -e '\#include <altivec.h>\nvector int a;' |\
                          gcc -c -x c - >&/dev/null && \
@@ -55,6 +67,18 @@ raid6.a: $(OBJS)
 raid6test: test.c raid6.a
        $(CC) $(CFLAGS) -o raid6test $^
 
+neon1.c: neon.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=1 < neon.uc > $@
+
+neon2.c: neon.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=2 < neon.uc > $@
+
+neon4.c: neon.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=4 < neon.uc > $@
+
+neon8.c: neon.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=8 < neon.uc > $@
+
 altivec1.c: altivec.uc ../unroll.awk
        $(AWK) ../unroll.awk -vN=1 < altivec.uc > $@
 
@@ -89,7 +113,7 @@ tables.c: mktables
        ./mktables > tables.c
 
 clean:
-       rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c tables.c raid6test
+       rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test
 
 spotless: clean
        rm -f *~
index 4b51ac1acae7c6ae5233afeb8018dd31cadf909b..45cfcfc944a35f702b2d8e468929b24bbc6269ff 100644 (file)
@@ -1313,44 +1313,6 @@ out:
        file_accessed(filp);
 }
 
-int file_read_actor(read_descriptor_t *desc, struct page *page,
-                       unsigned long offset, unsigned long size)
-{
-       char *kaddr;
-       unsigned long left, count = desc->count;
-
-       if (size > count)
-               size = count;
-
-       /*
-        * Faults on the destination of a read are common, so do it before
-        * taking the kmap.
-        */
-       if (!fault_in_pages_writeable(desc->arg.buf, size)) {
-               kaddr = kmap_atomic(page);
-               left = __copy_to_user_inatomic(desc->arg.buf,
-                                               kaddr + offset, size);
-               kunmap_atomic(kaddr);
-               if (left == 0)
-                       goto success;
-       }
-
-       /* Do it the slow way */
-       kaddr = kmap(page);
-       left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
-       kunmap(page);
-
-       if (left) {
-               size -= left;
-               desc->error = -EFAULT;
-       }
-success:
-       desc->count = count - size;
-       desc->written += size;
-       desc->arg.buf += size;
-       return size;
-}
-
 /*
  * Performs necessary checks before doing a write
  * @iov:       io vector request
@@ -1390,31 +1352,41 @@ int generic_segment_checks(const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_segment_checks);
 
+int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
+                        unsigned long offset, unsigned long size)
+{
+       struct iov_iter *iter = desc->arg.data;
+       unsigned long copied = 0;
+
+       if (size > desc->count)
+               size = desc->count;
+
+       copied = __iov_iter_copy_to_user(page, iter, offset, size);
+       if (copied < size)
+               desc->error = -EFAULT;
+
+       iov_iter_advance(iter, copied);
+       desc->count -= copied;
+       desc->written += copied;
+
+       return copied;
+}
+
 /**
- * generic_file_aio_read - generic filesystem read routine
+ * generic_file_read_iter - generic filesystem read routine
  * @iocb:      kernel I/O control block
- * @iov:       io vector request
- * @nr_segs:   number of segments in the iovec
+ * @iter:      memory vector
  * @pos:       current file position
- *
- * This is the "read()" routine for all filesystems
- * that can use the page cache directly.
  */
 ssize_t
-generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct file *filp = iocb->ki_filp;
-       ssize_t retval;
-       unsigned long seg = 0;
-       size_t count;
+       read_descriptor_t desc;
+       ssize_t retval = 0;
+       size_t count = iov_iter_count(iter);
        loff_t *ppos = &iocb->ki_pos;
 
-       count = 0;
-       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-       if (retval)
-               return retval;
-
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (filp->f_flags & O_DIRECT) {
                loff_t size;
@@ -1428,11 +1400,10 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                size = i_size_read(inode);
                if (pos < size) {
                        retval = filemap_write_and_wait_range(mapping, pos,
-                                       pos + iov_length(iov, nr_segs) - 1);
-                       if (!retval) {
+                                       pos + count - 1);
+                       if (!retval)
                                retval = mapping->a_ops->direct_IO(READ, iocb,
-                                                       iov, pos, nr_segs);
-                       }
+                                                                  iter, pos);
                        if (retval > 0) {
                                *ppos = pos + retval;
                                count -= retval;
@@ -1453,42 +1424,47 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                }
        }
 
-       count = retval;
-       for (seg = 0; seg < nr_segs; seg++) {
-               read_descriptor_t desc;
-               loff_t offset = 0;
-
-               /*
-                * If we did a short DIO read we need to skip the section of the
-                * iov that we've already read data into.
-                */
-               if (count) {
-                       if (count > iov[seg].iov_len) {
-                               count -= iov[seg].iov_len;
-                               continue;
-                       }
-                       offset = count;
-                       count = 0;
-               }
-
-               desc.written = 0;
-               desc.arg.buf = iov[seg].iov_base + offset;
-               desc.count = iov[seg].iov_len - offset;
-               if (desc.count == 0)
-                       continue;
-               desc.error = 0;
-               do_generic_file_read(filp, ppos, &desc, file_read_actor);
-               retval += desc.written;
-               if (desc.error) {
-                       retval = retval ?: desc.error;
-                       break;
-               }
-               if (desc.count > 0)
-                       break;
-       }
+       desc.written = 0;
+       desc.arg.data = iter;
+       desc.count = count;
+       desc.error = 0;
+       do_generic_file_read(filp, ppos, &desc, file_read_iter_actor);
+       if (desc.written)
+               retval = desc.written;
+       else
+               retval = desc.error;
 out:
        return retval;
 }
+EXPORT_SYMBOL(generic_file_read_iter);
+
+/**
+ * generic_file_aio_read - generic filesystem read routine
+ * @iocb:      kernel I/O control block
+ * @iov:       io vector request
+ * @nr_segs:   number of segments in the iovec
+ * @pos:       current file position
+ *
+ * This is the "read()" routine for all filesystems
+ * that can use the page cache directly.
+ */
+ssize_t
+generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
+{
+       struct iov_iter iter;
+       int ret;
+       size_t count;
+
+       count = 0;
+       ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+       if (ret)
+               return ret;
+
+       iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+       return generic_file_read_iter(iocb, &iter, pos);
+}
 EXPORT_SYMBOL(generic_file_aio_read);
 
 #ifdef CONFIG_MMU
@@ -1941,150 +1917,6 @@ struct page *read_cache_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page);
 
-static size_t __iovec_copy_from_user_inatomic(char *vaddr,
-                       const struct iovec *iov, size_t base, size_t bytes)
-{
-       size_t copied = 0, left = 0;
-
-       while (bytes) {
-               char __user *buf = iov->iov_base + base;
-               int copy = min(bytes, iov->iov_len - base);
-
-               base = 0;
-               left = __copy_from_user_inatomic(vaddr, buf, copy);
-               copied += copy;
-               bytes -= copy;
-               vaddr += copy;
-               iov++;
-
-               if (unlikely(left))
-                       break;
-       }
-       return copied - left;
-}
-
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were successfully copied.  If a fault is encountered then return the number of
- * bytes which were copied.
- */
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       BUG_ON(!in_atomic());
-       kaddr = kmap_atomic(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap_atomic(kaddr);
-
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-/*
- * This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
- * The difference is that it attempts to resolve faults.
- * Page must not be locked.
- */
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       kaddr = kmap(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap(page);
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
-       BUG_ON(i->count < bytes);
-
-       if (likely(i->nr_segs == 1)) {
-               i->iov_offset += bytes;
-               i->count -= bytes;
-       } else {
-               const struct iovec *iov = i->iov;
-               size_t base = i->iov_offset;
-               unsigned long nr_segs = i->nr_segs;
-
-               /*
-                * The !iov->iov_len check ensures we skip over unlikely
-                * zero-length segments (without overruning the iovec).
-                */
-               while (bytes || unlikely(i->count && !iov->iov_len)) {
-                       int copy;
-
-                       copy = min(bytes, iov->iov_len - base);
-                       BUG_ON(!i->count || i->count < copy);
-                       i->count -= copy;
-                       bytes -= copy;
-                       base += copy;
-                       if (iov->iov_len == base) {
-                               iov++;
-                               nr_segs--;
-                               base = 0;
-                       }
-               }
-               i->iov = iov;
-               i->iov_offset = base;
-               i->nr_segs = nr_segs;
-       }
-}
-EXPORT_SYMBOL(iov_iter_advance);
-
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-       char __user *buf = i->iov->iov_base + i->iov_offset;
-       bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-       return fault_in_pages_readable(buf, bytes);
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-/*
- * Return the count of just the current iov_iter segment.
- */
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
-{
-       const struct iovec *iov = i->iov;
-       if (i->nr_segs == 1)
-               return i->count;
-       else
-               return min(i->count, iov->iov_len - i->iov_offset);
-}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
-
 /*
  * Performs necessary checks before doing a write
  *
@@ -2190,9 +2022,8 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
 EXPORT_SYMBOL(pagecache_write_end);
 
 ssize_t
-generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long *nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, size_t ocount)
+generic_file_direct_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+               loff_t pos, loff_t *ppos, size_t count)
 {
        struct file     *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -2201,10 +2032,13 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        size_t          write_len;
        pgoff_t         end;
 
-       if (count != ocount)
-               *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
+       if (count != iov_iter_count(iter)) {
+               written = iov_iter_shorten(iter, count);
+               if (written)
+                       goto out;
+       }
 
-       write_len = iov_length(iov, *nr_segs);
+       write_len = count;
        end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
 
        written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
@@ -2231,7 +2065,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
                }
        }
 
-       written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+       written = mapping->a_ops->direct_IO(WRITE, iocb, iter, pos);
 
        /*
         * Finally, try again to invalidate clean pages which might have been
@@ -2257,6 +2091,23 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 out:
        return written;
 }
+EXPORT_SYMBOL(generic_file_direct_write_iter);
+
+ssize_t
+generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long *nr_segs, loff_t pos, loff_t *ppos,
+               size_t count, size_t ocount)
+{
+       struct iov_iter iter;
+       ssize_t ret;
+
+       iov_iter_init(&iter, iov, *nr_segs, ocount, 0);
+       ret = generic_file_direct_write_iter(iocb, &iter, pos, ppos, count);
+       /* generic_file_direct_write_iter() might have shortened the vec */
+       if (*nr_segs != iter.nr_segs)
+               *nr_segs = iter.nr_segs;
+       return ret;
+}
 EXPORT_SYMBOL(generic_file_direct_write);
 
 /*
@@ -2390,16 +2241,19 @@ again:
 }
 
 ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, ssize_t written)
+generic_file_buffered_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+               loff_t pos, loff_t *ppos, size_t count, ssize_t written)
 {
        struct file *file = iocb->ki_filp;
        ssize_t status;
-       struct iov_iter i;
 
-       iov_iter_init(&i, iov, nr_segs, count, written);
-       status = generic_perform_write(file, &i, pos);
+       if ((count + written) != iov_iter_count(iter)) {
+               int rc = iov_iter_shorten(iter, count + written);
+               if (rc)
+                       return rc;
+       }
+
+       status = generic_perform_write(file, iter, pos);
 
        if (likely(status >= 0)) {
                written += status;
@@ -2408,13 +2262,24 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
        
        return written ? written : status;
 }
+EXPORT_SYMBOL(generic_file_buffered_write_iter);
+
+ssize_t
+generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos, loff_t *ppos,
+               size_t count, ssize_t written)
+{
+       struct iov_iter iter;
+       iov_iter_init(&iter, iov, nr_segs, count, written);
+       return generic_file_buffered_write_iter(iocb, &iter, pos, ppos,
+                                               count, written);
+}
 EXPORT_SYMBOL(generic_file_buffered_write);
 
 /**
  * __generic_file_aio_write - write data to a file
  * @iocb:      IO state structure (file, offset, etc.)
- * @iov:       vector with data to write
- * @nr_segs:   number of segments in the vector
+ * @iter:      iov_iter specifying memory to write
  * @ppos:      position where to write
  *
  * This function does all the work needed for actually writing data to a
@@ -2429,24 +2294,18 @@ EXPORT_SYMBOL(generic_file_buffered_write);
  * A caller has to handle it. This is mainly due to the fact that we want to
  * avoid syncing under i_mutex.
  */
-ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                unsigned long nr_segs, loff_t *ppos)
+ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t *ppos)
 {
        struct file *file = iocb->ki_filp;
        struct address_space * mapping = file->f_mapping;
-       size_t ocount;          /* original count */
        size_t count;           /* after file limit checks */
        struct inode    *inode = mapping->host;
        loff_t          pos;
        ssize_t         written;
        ssize_t         err;
 
-       ocount = 0;
-       err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
-       if (err)
-               return err;
-
-       count = ocount;
+       count = iov_iter_count(iter);
        pos = *ppos;
 
        /* We can write back this queue in page reclaim */
@@ -2473,8 +2332,8 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                loff_t endbyte;
                ssize_t written_buffered;
 
-               written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
-                                                       ppos, count, ocount);
+               written = generic_file_direct_write_iter(iocb, iter, pos,
+                                                        ppos, count);
                if (written < 0 || written == count)
                        goto out;
                /*
@@ -2483,9 +2342,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                 */
                pos += written;
                count -= written;
-               written_buffered = generic_file_buffered_write(iocb, iov,
-                                               nr_segs, pos, ppos, count,
-                                               written);
+               iov_iter_advance(iter, written);
+               written_buffered = generic_file_buffered_write_iter(iocb, iter,
+                                               pos, ppos, count, written);
                /*
                 * If generic_file_buffered_write() retuned a synchronous error
                 * then we want to return the number of bytes which were
@@ -2517,13 +2376,57 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                         */
                }
        } else {
-               written = generic_file_buffered_write(iocb, iov, nr_segs,
+               iter->count = count;
+               written = generic_file_buffered_write_iter(iocb, iter,
                                pos, ppos, count, written);
        }
 out:
        current->backing_dev_info = NULL;
        return written ? written : err;
 }
+EXPORT_SYMBOL(__generic_file_write_iter);
+
+ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       ssize_t ret;
+
+       mutex_lock(&inode->i_mutex);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
+       mutex_unlock(&inode->i_mutex);
+
+       if (ret > 0 || ret == -EIOCBQUEUED) {
+               ssize_t err;
+
+               err = generic_write_sync(file, pos, ret);
+               if (err < 0 && ret > 0)
+                       ret = err;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(generic_file_write_iter);
+
+ssize_t
+__generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                        unsigned long nr_segs, loff_t *ppos)
+{
+       struct iov_iter iter;
+       size_t count;
+       int ret;
+
+       count = 0;
+       ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+       if (ret)
+               goto out;
+
+       iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+       ret = __generic_file_write_iter(iocb, &iter, ppos);
+out:
+       return ret;
+}
 EXPORT_SYMBOL(__generic_file_aio_write);
 
 /**
index 9cea7de22ffbf351b8cc77eb40563543e38d59b7..bda8e44f6fdee72c31d805aa5ae4164a0e01a952 100644 (file)
@@ -36,21 +36,13 @@ static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
 static inline
 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
 {
-       return container_of(s, struct hugetlb_cgroup, css);
-}
-
-static inline
-struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
-{
-       return hugetlb_cgroup_from_css(cgroup_subsys_state(cgroup,
-                                                          hugetlb_subsys_id));
+       return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
 }
 
 static inline
 struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
 {
-       return hugetlb_cgroup_from_css(task_subsys_state(task,
-                                                        hugetlb_subsys_id));
+       return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id));
 }
 
 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
@@ -58,17 +50,15 @@ static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
        return (h_cg == root_h_cgroup);
 }
 
-static inline struct hugetlb_cgroup *parent_hugetlb_cgroup(struct cgroup *cg)
+static inline struct hugetlb_cgroup *
+parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
 {
-       if (!cg->parent)
-               return NULL;
-       return hugetlb_cgroup_from_cgroup(cg->parent);
+       return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
 }
 
-static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
+static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
 {
        int idx;
-       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cg);
 
        for (idx = 0; idx < hugetlb_max_hstate; idx++) {
                if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
@@ -77,19 +67,18 @@ static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
        return false;
 }
 
-static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
+       struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
+       struct hugetlb_cgroup *h_cgroup;
        int idx;
-       struct cgroup *parent_cgroup;
-       struct hugetlb_cgroup *h_cgroup, *parent_h_cgroup;
 
        h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
        if (!h_cgroup)
                return ERR_PTR(-ENOMEM);
 
-       parent_cgroup = cgroup->parent;
-       if (parent_cgroup) {
-               parent_h_cgroup = hugetlb_cgroup_from_cgroup(parent_cgroup);
+       if (parent_h_cgroup) {
                for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
                        res_counter_init(&h_cgroup->hugepage[idx],
                                         &parent_h_cgroup->hugepage[idx]);
@@ -101,11 +90,11 @@ static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgrou
        return &h_cgroup->css;
 }
 
-static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
+static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
 {
        struct hugetlb_cgroup *h_cgroup;
 
-       h_cgroup = hugetlb_cgroup_from_cgroup(cgroup);
+       h_cgroup = hugetlb_cgroup_from_css(css);
        kfree(h_cgroup);
 }
 
@@ -117,15 +106,14 @@ static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
  * page reference and test for page active here. This function
  * cannot fail.
  */
-static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
+static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
                                       struct page *page)
 {
        int csize;
        struct res_counter *counter;
        struct res_counter *fail_res;
        struct hugetlb_cgroup *page_hcg;
-       struct hugetlb_cgroup *h_cg   = hugetlb_cgroup_from_cgroup(cgroup);
-       struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
+       struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
 
        page_hcg = hugetlb_cgroup_from_page(page);
        /*
@@ -155,8 +143,9 @@ out:
  * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
  * the parent cgroup.
  */
-static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
+static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
 {
+       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
        struct hstate *h;
        struct page *page;
        int idx = 0;
@@ -165,13 +154,13 @@ static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
                for_each_hstate(h) {
                        spin_lock(&hugetlb_lock);
                        list_for_each_entry(page, &h->hugepage_activelist, lru)
-                               hugetlb_cgroup_move_parent(idx, cgroup, page);
+                               hugetlb_cgroup_move_parent(idx, h_cg, page);
 
                        spin_unlock(&hugetlb_lock);
                        idx++;
                }
                cond_resched();
-       } while (hugetlb_cgroup_have_usage(cgroup));
+       } while (hugetlb_cgroup_have_usage(h_cg));
 }
 
 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
@@ -253,14 +242,15 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
        return;
 }
 
-static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
-                                  struct file *file, char __user *buf,
-                                  size_t nbytes, loff_t *ppos)
+static ssize_t hugetlb_cgroup_read(struct cgroup_subsys_state *css,
+                                  struct cftype *cft, struct file *file,
+                                  char __user *buf, size_t nbytes,
+                                  loff_t *ppos)
 {
        u64 val;
        char str[64];
        int idx, name, len;
-       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
 
        idx = MEMFILE_IDX(cft->private);
        name = MEMFILE_ATTR(cft->private);
@@ -270,12 +260,12 @@ static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
        return simple_read_from_buffer(buf, nbytes, ppos, str, len);
 }
 
-static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
-                               const char *buffer)
+static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
+                               struct cftype *cft, const char *buffer)
 {
        int idx, name, ret;
        unsigned long long val;
-       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
 
        idx = MEMFILE_IDX(cft->private);
        name = MEMFILE_ATTR(cft->private);
@@ -300,10 +290,11 @@ static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
        return ret;
 }
 
-static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event)
+static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
+                               unsigned int event)
 {
        int idx, name, ret = 0;
-       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+       struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
 
        idx = MEMFILE_IDX(event);
        name = MEMFILE_ATTR(event);
index c5792a5d87cede8cf2c5474156c1596f51f5ba61..e1e16ac9db378c33ff875e6bc6ca7d52426d3c9c 100644 (file)
@@ -483,10 +483,9 @@ enum res_type {
  */
 static DEFINE_MUTEX(memcg_create_mutex);
 
-static inline
 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
 {
-       return container_of(s, struct mem_cgroup, css);
+       return s ? container_of(s, struct mem_cgroup, css) : NULL;
 }
 
 /* Some nice accessors for the vmpressure. */
@@ -1035,12 +1034,6 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
                preempt_enable();
 }
 
-struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
-{
-       return mem_cgroup_from_css(
-               cgroup_subsys_state(cont, mem_cgroup_subsys_id));
-}
-
 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 {
        /*
@@ -1051,7 +1044,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
        if (unlikely(!p))
                return NULL;
 
-       return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id));
+       return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id));
 }
 
 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
@@ -1084,20 +1077,11 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
                struct mem_cgroup *last_visited)
 {
-       struct cgroup *prev_cgroup, *next_cgroup;
+       struct cgroup_subsys_state *prev_css, *next_css;
 
-       /*
-        * Root is not visited by cgroup iterators so it needs an
-        * explicit visit.
-        */
-       if (!last_visited)
-               return root;
-
-       prev_cgroup = (last_visited == root) ? NULL
-               : last_visited->css.cgroup;
+       prev_css = last_visited ? &last_visited->css : NULL;
 skip_node:
-       next_cgroup = cgroup_next_descendant_pre(
-                       prev_cgroup, root->css.cgroup);
+       next_css = css_next_descendant_pre(prev_css, &root->css);
 
        /*
         * Even if we found a group we have to make sure it is
@@ -1106,13 +1090,13 @@ skip_node:
         * last_visited css is safe to use because it is
         * protected by css_get and the tree walk is rcu safe.
         */
-       if (next_cgroup) {
-               struct mem_cgroup *mem = mem_cgroup_from_cont(
-                               next_cgroup);
+       if (next_css) {
+               struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
+
                if (css_tryget(&mem->css))
                        return mem;
                else {
-                       prev_cgroup = next_cgroup;
+                       prev_css = next_css;
                        goto skip_node;
                }
        }
@@ -1525,10 +1509,8 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
 
 int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 {
-       struct cgroup *cgrp = memcg->css.cgroup;
-
        /* root ? */
-       if (cgrp->parent == NULL)
+       if (!css_parent(&memcg->css))
                return vm_swappiness;
 
        return memcg->swappiness;
@@ -1805,12 +1787,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
        check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
        totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
        for_each_mem_cgroup_tree(iter, memcg) {
-               struct cgroup *cgroup = iter->css.cgroup;
-               struct cgroup_iter it;
+               struct css_task_iter it;
                struct task_struct *task;
 
-               cgroup_iter_start(cgroup, &it);
-               while ((task = cgroup_iter_next(cgroup, &it))) {
+               css_task_iter_start(&iter->css, &it);
+               while ((task = css_task_iter_next(&it))) {
                        switch (oom_scan_process_thread(task, totalpages, NULL,
                                                        false)) {
                        case OOM_SCAN_SELECT:
@@ -1823,7 +1804,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
                        case OOM_SCAN_CONTINUE:
                                continue;
                        case OOM_SCAN_ABORT:
-                               cgroup_iter_end(cgroup, &it);
+                               css_task_iter_end(&it);
                                mem_cgroup_iter_break(memcg, iter);
                                if (chosen)
                                        put_task_struct(chosen);
@@ -1840,7 +1821,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
                                get_task_struct(chosen);
                        }
                }
-               cgroup_iter_end(cgroup, &it);
+               css_task_iter_end(&it);
        }
 
        if (!chosen)
@@ -2954,10 +2935,10 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
 }
 
 #ifdef CONFIG_SLABINFO
-static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
-                                       struct seq_file *m)
+static int mem_cgroup_slabinfo_read(struct cgroup_subsys_state *css,
+                                   struct cftype *cft, struct seq_file *m)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct memcg_cache_params *params;
 
        if (!memcg_can_account_kmem(memcg))
@@ -4943,10 +4924,10 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
  */
 static inline bool __memcg_has_children(struct mem_cgroup *memcg)
 {
-       struct cgroup *pos;
+       struct cgroup_subsys_state *pos;
 
        /* bounce at first found */
-       cgroup_for_each_child(pos, memcg->css.cgroup)
+       css_for_each_child(pos, &memcg->css)
                return true;
        return false;
 }
@@ -5002,9 +4983,10 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
        return 0;
 }
 
-static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
+                                       unsigned int event)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        int ret;
 
        if (mem_cgroup_is_root(memcg))
@@ -5017,21 +4999,18 @@ static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
 }
 
 
-static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
+static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
+                                    struct cftype *cft)
 {
-       return mem_cgroup_from_cont(cont)->use_hierarchy;
+       return mem_cgroup_from_css(css)->use_hierarchy;
 }
 
-static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
-                                       u64 val)
+static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
+                                     struct cftype *cft, u64 val)
 {
        int retval = 0;
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
-       struct cgroup *parent = cont->parent;
-       struct mem_cgroup *parent_memcg = NULL;
-
-       if (parent)
-               parent_memcg = mem_cgroup_from_cont(parent);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+       struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
 
        mutex_lock(&memcg_create_mutex);
 
@@ -5101,11 +5080,11 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
        return val << PAGE_SHIFT;
 }
 
-static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
-                              struct file *file, char __user *buf,
-                              size_t nbytes, loff_t *ppos)
+static ssize_t mem_cgroup_read(struct cgroup_subsys_state *css,
+                              struct cftype *cft, struct file *file,
+                              char __user *buf, size_t nbytes, loff_t *ppos)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        char str[64];
        u64 val;
        int name, len;
@@ -5138,11 +5117,11 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
        return simple_read_from_buffer(buf, nbytes, ppos, str, len);
 }
 
-static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
+static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val)
 {
        int ret = -EINVAL;
 #ifdef CONFIG_MEMCG_KMEM
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        /*
         * For simplicity, we won't allow this to be disabled.  It also can't
         * be changed if the cgroup has children already, or if tasks had
@@ -5158,7 +5137,7 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
        mutex_lock(&memcg_create_mutex);
        mutex_lock(&set_limit_mutex);
        if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
-               if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
+               if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) {
                        ret = -EBUSY;
                        goto out;
                }
@@ -5228,10 +5207,10 @@ out:
  * The user of this function is...
  * RES_LIMIT.
  */
-static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
+static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
                            const char *buffer)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        enum res_type type;
        int name;
        unsigned long long val;
@@ -5255,7 +5234,7 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
                else if (type == _MEMSWAP)
                        ret = mem_cgroup_resize_memsw_limit(memcg, val);
                else if (type == _KMEM)
-                       ret = memcg_update_kmem_limit(cont, val);
+                       ret = memcg_update_kmem_limit(css, val);
                else
                        return -EINVAL;
                break;
@@ -5283,18 +5262,15 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
                unsigned long long *mem_limit, unsigned long long *memsw_limit)
 {
-       struct cgroup *cgroup;
        unsigned long long min_limit, min_memsw_limit, tmp;
 
        min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
        min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
-       cgroup = memcg->css.cgroup;
        if (!memcg->use_hierarchy)
                goto out;
 
-       while (cgroup->parent) {
-               cgroup = cgroup->parent;
-               memcg = mem_cgroup_from_cont(cgroup);
+       while (css_parent(&memcg->css)) {
+               memcg = mem_cgroup_from_css(css_parent(&memcg->css));
                if (!memcg->use_hierarchy)
                        break;
                tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
@@ -5307,9 +5283,9 @@ out:
        *memsw_limit = min_memsw_limit;
 }
 
-static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        int name;
        enum res_type type;
 
@@ -5342,17 +5318,17 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
        return 0;
 }
 
-static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
+static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
                                        struct cftype *cft)
 {
-       return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
+       return mem_cgroup_from_css(css)->move_charge_at_immigrate;
 }
 
 #ifdef CONFIG_MMU
-static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
                                        struct cftype *cft, u64 val)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
        if (val >= (1 << NR_MOVE_TYPE))
                return -EINVAL;
@@ -5367,7 +5343,7 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
        return 0;
 }
 #else
-static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
                                        struct cftype *cft, u64 val)
 {
        return -ENOSYS;
@@ -5375,13 +5351,13 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
 #endif
 
 #ifdef CONFIG_NUMA
-static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
-                                     struct seq_file *m)
+static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
+                               struct cftype *cft, struct seq_file *m)
 {
        int nid;
        unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
        unsigned long node_nr;
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
        total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
        seq_printf(m, "total=%lu", total_nr);
@@ -5426,10 +5402,10 @@ static inline void mem_cgroup_lru_names_not_uptodate(void)
        BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
 }
 
-static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
+static int memcg_stat_show(struct cgroup_subsys_state *css, struct cftype *cft,
                                 struct seq_file *m)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup *mi;
        unsigned int i;
 
@@ -5513,27 +5489,23 @@ static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
        return 0;
 }
 
-static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
+                                     struct cftype *cft)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
        return mem_cgroup_swappiness(memcg);
 }
 
-static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
-                                      u64 val)
+static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
+                                      struct cftype *cft, u64 val)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
-       struct mem_cgroup *parent;
-
-       if (val > 100)
-               return -EINVAL;
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+       struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
 
-       if (cgrp->parent == NULL)
+       if (val > 100 || !parent)
                return -EINVAL;
 
-       parent = mem_cgroup_from_cont(cgrp->parent);
-
        mutex_lock(&memcg_create_mutex);
 
        /* If under hierarchy, only empty-root can set this value */
@@ -5636,10 +5608,10 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
                mem_cgroup_oom_notify_cb(iter);
 }
 
-static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
+static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css,
        struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup_thresholds *thresholds;
        struct mem_cgroup_threshold_ary *new;
        enum res_type type = MEMFILE_TYPE(cft->private);
@@ -5719,10 +5691,10 @@ unlock:
        return ret;
 }
 
-static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
+static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css,
        struct cftype *cft, struct eventfd_ctx *eventfd)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup_thresholds *thresholds;
        struct mem_cgroup_threshold_ary *new;
        enum res_type type = MEMFILE_TYPE(cft->private);
@@ -5798,10 +5770,10 @@ unlock:
        mutex_unlock(&memcg->thresholds_lock);
 }
 
-static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
+static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css,
        struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup_eventfd_list *event;
        enum res_type type = MEMFILE_TYPE(cft->private);
 
@@ -5823,10 +5795,10 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
        return 0;
 }
 
-static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
+static void mem_cgroup_oom_unregister_event(struct cgroup_subsys_state *css,
        struct cftype *cft, struct eventfd_ctx *eventfd)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup_eventfd_list *ev, *tmp;
        enum res_type type = MEMFILE_TYPE(cft->private);
 
@@ -5844,10 +5816,10 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
        spin_unlock(&memcg_oom_lock);
 }
 
-static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
+static int mem_cgroup_oom_control_read(struct cgroup_subsys_state *css,
        struct cftype *cft,  struct cgroup_map_cb *cb)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
        cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
 
@@ -5858,18 +5830,16 @@ static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
        return 0;
 }
 
-static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
+static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
        struct cftype *cft, u64 val)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
-       struct mem_cgroup *parent;
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+       struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
 
        /* cannot set to root cgroup and only 0 and 1 are allowed */
-       if (!cgrp->parent || !((val == 0) || (val == 1)))
+       if (!parent || !((val == 0) || (val == 1)))
                return -EINVAL;
 
-       parent = mem_cgroup_from_cont(cgrp->parent);
-
        mutex_lock(&memcg_create_mutex);
        /* oom-kill-disable is a flag for subhierarchy. */
        if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
@@ -6228,7 +6198,7 @@ static void __init mem_cgroup_soft_limit_tree_init(void)
 }
 
 static struct cgroup_subsys_state * __ref
-mem_cgroup_css_alloc(struct cgroup *cont)
+mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        struct mem_cgroup *memcg;
        long error = -ENOMEM;
@@ -6243,7 +6213,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
                        goto free_out;
 
        /* root ? */
-       if (cont->parent == NULL) {
+       if (parent_css == NULL) {
                root_mem_cgroup = memcg;
                res_counter_init(&memcg->res, NULL);
                res_counter_init(&memcg->memsw, NULL);
@@ -6265,17 +6235,16 @@ free_out:
 }
 
 static int
-mem_cgroup_css_online(struct cgroup *cont)
+mem_cgroup_css_online(struct cgroup_subsys_state *css)
 {
-       struct mem_cgroup *memcg, *parent;
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+       struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
        int error = 0;
 
-       if (!cont->parent)
+       if (!parent)
                return 0;
 
        mutex_lock(&memcg_create_mutex);
-       memcg = mem_cgroup_from_cont(cont);
-       parent = mem_cgroup_from_cont(cont->parent);
 
        memcg->use_hierarchy = parent->use_hierarchy;
        memcg->oom_kill_disable = parent->oom_kill_disable;
@@ -6326,9 +6295,9 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
                mem_cgroup_iter_invalidate(root_mem_cgroup);
 }
 
-static void mem_cgroup_css_offline(struct cgroup *cont)
+static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
        kmem_cgroup_css_offline(memcg);
 
@@ -6338,9 +6307,9 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
        vmpressure_cleanup(&memcg->vmpressure);
 }
 
-static void mem_cgroup_css_free(struct cgroup *cont)
+static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
        memcg_destroy_kmem(memcg);
        __mem_cgroup_free(memcg);
@@ -6710,12 +6679,12 @@ static void mem_cgroup_clear_mc(void)
        mem_cgroup_end_move(from);
 }
 
-static int mem_cgroup_can_attach(struct cgroup *cgroup,
+static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
        struct task_struct *p = cgroup_taskset_first(tset);
        int ret = 0;
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        unsigned long move_charge_at_immigrate;
 
        /*
@@ -6757,7 +6726,7 @@ static int mem_cgroup_can_attach(struct cgroup *cgroup,
        return ret;
 }
 
-static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
+static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
                                     struct cgroup_taskset *tset)
 {
        mem_cgroup_clear_mc();
@@ -6905,7 +6874,7 @@ retry:
        up_read(&mm->mmap_sem);
 }
 
-static void mem_cgroup_move_task(struct cgroup *cont,
+static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
        struct task_struct *p = cgroup_taskset_first(tset);
@@ -6920,16 +6889,16 @@ static void mem_cgroup_move_task(struct cgroup *cont,
                mem_cgroup_clear_mc();
 }
 #else  /* !CONFIG_MMU */
-static int mem_cgroup_can_attach(struct cgroup *cgroup,
+static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
        return 0;
 }
-static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
+static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
                                     struct cgroup_taskset *tset)
 {
 }
-static void mem_cgroup_move_task(struct cgroup *cont,
+static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
 }
@@ -6939,15 +6908,15 @@ static void mem_cgroup_move_task(struct cgroup *cont,
  * Cgroup retains root cgroups across [un]mount cycles making it necessary
  * to verify sane_behavior flag on each mount attempt.
  */
-static void mem_cgroup_bind(struct cgroup *root)
+static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
 {
        /*
         * use_hierarchy is forced with sane_behavior.  cgroup core
         * guarantees that @root doesn't have any children, so turning it
         * on for the root memcg is enough.
         */
-       if (cgroup_sane_behavior(root))
-               mem_cgroup_from_cont(root)->use_hierarchy = true;
+       if (cgroup_sane_behavior(root_css->cgroup))
+               mem_cgroup_from_css(root_css)->use_hierarchy = true;
 }
 
 struct cgroup_subsys mem_cgroup_subsys = {
index ba05b64e5d8ddfc19f31c046f7c0b735d099364b..21023dff6062a7ec4171b8932013871dc148eeac 100644 (file)
@@ -258,11 +258,14 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
        if (sis->flags & SWP_FILE) {
                struct kiocb kiocb;
                struct file *swap_file = sis->swap_file;
-               struct address_space *mapping = swap_file->f_mapping;
-               struct iovec iov = {
-                       .iov_base = kmap(page),
-                       .iov_len  = PAGE_SIZE,
+               struct bio_vec bvec = {
+                       .bv_page = kmap(page),
+                       .bv_len = PAGE_SIZE,
+                       .bv_offset = 0,
                };
+               struct iov_iter iter;
+
+               iov_iter_init_bvec(&iter, &bvec, 1, PAGE_SIZE, 0);
 
                init_sync_kiocb(&kiocb, swap_file);
                kiocb.ki_pos = page_file_offset(page);
@@ -271,9 +274,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
 
                set_page_writeback(page);
                unlock_page(page);
-               ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
-                                               &kiocb, &iov,
-                                               kiocb.ki_pos, 1);
+               ret = swap_file->f_op->write_iter(&kiocb, &iter, kiocb.ki_pos);
                kunmap(page);
                if (ret == PAGE_SIZE) {
                        count_vm_event(PSWPOUT);
index 383bdbb98b04ca22726fe3d243db2ce789642975..0cee10ffb98d4cf8e6ad930faa1f4de925bdf3a5 100644 (file)
@@ -226,9 +226,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
        int ret;
 
        /*
-        * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
-        * is not aligned to pageblock_nr_pages.
-        * Then we just check pagetype fist.
+        * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
+        * are not aligned to pageblock_nr_pages.
+        * Then we just check migratetype first.
         */
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
                page = __first_valid_page(pfn, pageblock_nr_pages);
@@ -238,7 +238,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
        page = __first_valid_page(start_pfn, end_pfn - start_pfn);
        if ((pfn < end_pfn) || !page)
                return -EBUSY;
-       /* Check all pages are free or Marked as ISOLATED */
+       /* Check all pages are free or marked as ISOLATED */
        zone = page_zone(page);
        spin_lock_irqsave(&zone->lock, flags);
        ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
index 8335dbd3fc358ed1fe9d6521785ecee1dc031313..786d39005ae68d148855980a211f5910fe5b6845 100644 (file)
@@ -1464,14 +1464,23 @@ shmem_write_end(struct file *file, struct address_space *mapping,
        return copied;
 }
 
-static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
+static ssize_t shmem_file_read_iter(struct kiocb *iocb,
+                                   struct iov_iter *iter, loff_t pos)
 {
+       read_descriptor_t desc;
+       loff_t *ppos = &iocb->ki_pos;
+       struct file *filp = iocb->ki_filp;
        struct inode *inode = file_inode(filp);
        struct address_space *mapping = inode->i_mapping;
        pgoff_t index;
        unsigned long offset;
        enum sgp_type sgp = SGP_READ;
 
+       desc.written = 0;
+       desc.count = iov_iter_count(iter);
+       desc.arg.data = iter;
+       desc.error = 0;
+
        /*
         * Might this read be for a stacking filesystem?  Then when reading
         * holes of a sparse file, we actually need to allocate those pages,
@@ -1498,10 +1507,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                                break;
                }
 
-               desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
-               if (desc->error) {
-                       if (desc->error == -EINVAL)
-                               desc->error = 0;
+               desc.error = shmem_getpage(inode, index, &page, sgp, NULL);
+               if (desc.error) {
+                       if (desc.error == -EINVAL)
+                               desc.error = 0;
                        break;
                }
                if (page)
@@ -1552,13 +1561,13 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                 * "pos" here (the actor routine has to update the user buffer
                 * pointers and the remaining count).
                 */
-               ret = actor(desc, page, offset, nr);
+               ret = file_read_iter_actor(&desc, page, offset, nr);
                offset += ret;
                index += offset >> PAGE_CACHE_SHIFT;
                offset &= ~PAGE_CACHE_MASK;
 
                page_cache_release(page);
-               if (ret != nr || !desc->count)
+               if (ret != nr || !desc.count)
                        break;
 
                cond_resched();
@@ -1566,40 +1575,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
 
        *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
        file_accessed(filp);
-}
-
-static ssize_t shmem_file_aio_read(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
-       struct file *filp = iocb->ki_filp;
-       ssize_t retval;
-       unsigned long seg;
-       size_t count;
-       loff_t *ppos = &iocb->ki_pos;
 
-       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-       if (retval)
-               return retval;
-
-       for (seg = 0; seg < nr_segs; seg++) {
-               read_descriptor_t desc;
-
-               desc.written = 0;
-               desc.arg.buf = iov[seg].iov_base;
-               desc.count = iov[seg].iov_len;
-               if (desc.count == 0)
-                       continue;
-               desc.error = 0;
-               do_shmem_file_read(filp, ppos, &desc, file_read_actor);
-               retval += desc.written;
-               if (desc.error) {
-                       retval = retval ?: desc.error;
-                       break;
-               }
-               if (desc.count > 0)
-                       break;
-       }
-       return retval;
+       return desc.written ? desc.written : desc.error;
 }
 
 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
@@ -2722,8 +2699,8 @@ static const struct file_operations shmem_file_operations = {
        .llseek         = shmem_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = shmem_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = shmem_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .fsync          = noop_fsync,
        .splice_read    = shmem_file_splice_read,
        .splice_write   = generic_file_splice_write,
index 538bade6df7dc2a3f27c9ad5ac5f0efc8a6d08bc..f0410eb617410015209d536104acd925f49096c4 100644 (file)
@@ -373,7 +373,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
 {
        int index;
 
-       if (size > KMALLOC_MAX_SIZE) {
+       if (unlikely(size > KMALLOC_MAX_SIZE)) {
                WARN_ON_ONCE(!(flags & __GFP_NOWARN));
                return NULL;
        }
index e3ba1f2cf60cc0caa5dd7ed059dbe583b477e073..88d65fd4ab37b65491fd75f88208b6712dd458db 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
 #endif
        {
                slab_lock(page);
-               if (page->freelist == freelist_old && page->counters == counters_old) {
+               if (page->freelist == freelist_old &&
+                                       page->counters == counters_old) {
                        page->freelist = freelist_new;
                        page->counters = counters_new;
                        slab_unlock(page);
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 
                local_irq_save(flags);
                slab_lock(page);
-               if (page->freelist == freelist_old && page->counters == counters_old) {
+               if (page->freelist == freelist_old &&
+                                       page->counters == counters_old) {
                        page->freelist = freelist_new;
                        page->counters = counters_new;
                        slab_unlock(page);
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
 
 static void print_page_info(struct page *page)
 {
-       printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
-               page, page->objects, page->inuse, page->freelist, page->flags);
+       printk(KERN_ERR
+              "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+              page, page->objects, page->inuse, page->freelist, page->flags);
 
 }
 
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
        print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page,
+                       const char *fmt, ...)
 {
        va_list args;
        char buf[100];
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
        } else {
                if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
                        check_bytes_and_report(s, page, p, "Alignment padding",
-                               endobject, POISON_INUSE, s->inuse - s->object_size);
+                               endobject, POISON_INUSE,
+                               s->inuse - s->object_size);
                }
        }
 
@@ -873,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
                                object_err(s, page, object,
                                        "Freechain corrupt");
                                set_freepointer(s, object, NULL);
-                               break;
                        } else {
                                slab_err(s, page, "Freepointer corrupt");
                                page->freelist = NULL;
@@ -918,7 +922,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object, s->object_size);
+                       print_section("Object ", (void *)object,
+                                       s->object_size);
 
                dump_stack();
        }
@@ -937,7 +942,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
        return should_failslab(s->object_size, flags, s->flags);
 }
 
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+                                       gfp_t flags, void *object)
 {
        flags &= gfp_allowed_mask;
        kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -1039,7 +1045,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
        init_tracking(s, object);
 }
 
-static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+static noinline int alloc_debug_processing(struct kmem_cache *s,
+                                       struct page *page,
                                        void *object, unsigned long addr)
 {
        if (!check_slab(s, page))
@@ -1743,7 +1750,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
 /*
  * Remove the cpu slab
  */
-static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
+static void deactivate_slab(struct kmem_cache *s, struct page *page,
+                               void *freelist)
 {
        enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
        struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -1999,7 +2007,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                page->pobjects = pobjects;
                page->next = oldpage;
 
-       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
+                                                               != oldpage);
 #endif
 }
 
@@ -2169,8 +2178,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
 }
 
 /*
- * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
- * or deactivate the page.
+ * Check the page->freelist of a page and either transfer the freelist to the
+ * per cpu freelist or deactivate the page.
  *
  * The page is still frozen if the return value is not NULL.
  *
@@ -2314,7 +2323,8 @@ new_slab:
                goto load_freelist;
 
        /* Only entered in the debug case */
-       if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
+       if (kmem_cache_debug(s) &&
+                       !alloc_debug_processing(s, page, freelist, addr))
                goto new_slab;  /* Slab failed checks. Next slab needed */
 
        deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2372,7 +2382,7 @@ redo:
 
        object = c->freelist;
        page = c->page;
-       if (unlikely(!object || !page || !node_match(page, node)))
+       if (unlikely(!object || !node_match(page, node)))
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
        else {
@@ -2382,13 +2392,15 @@ redo:
                 * The cmpxchg will only match if there was no additional
                 * operation and if we are on the right processor.
                 *
-                * The cmpxchg does the following atomically (without lock semantics!)
+                * The cmpxchg does the following atomically (without lock
+                * semantics!)
                 * 1. Relocate first pointer to the current per cpu area.
                 * 2. Verify that tid and freelist have not been changed
                 * 3. If they were not changed replace tid and freelist
                 *
-                * Since this is without lock semantics the protection is only against
-                * code executing on this cpu *not* from access by other cpus.
+                * Since this is without lock semantics the protection is only
+                * against code executing on this cpu *not* from access by
+                * other cpus.
                 */
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2420,7 +2432,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
        void *ret = slab_alloc(s, gfpflags, _RET_IP_);
 
-       trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
+       trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
+                               s->size, gfpflags);
 
        return ret;
 }
@@ -2512,8 +2525,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                        if (kmem_cache_has_cpu_partial(s) && !prior)
 
                                /*
-                                * Slab was on no list before and will be partially empty
-                                * We can defer the list move and instead freeze it.
+                                * Slab was on no list before and will be
+                                * partially empty
+                                * We can defer the list move and instead
+                                * freeze it.
                                 */
                                new.frozen = 1;
 
@@ -3071,8 +3086,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
         * A) The number of objects from per cpu partial slabs dumped to the
         *    per node list when we reach the limit.
         * B) The number of objects in cpu partial slabs to extract from the
-        *    per node list when we run out of per cpu objects. We only fetch 50%
-        *    to keep some capacity around for frees.
+        *    per node list when we run out of per cpu objects. We only fetch
+        *    50% to keep some capacity around for frees.
         */
        if (!kmem_cache_has_cpu_partial(s))
                s->cpu_partial = 0;
@@ -3099,8 +3114,8 @@ error:
        if (flags & SLAB_PANIC)
                panic("Cannot create slab %s size=%lu realsize=%u "
                        "order=%u offset=%u flags=%lx\n",
-                       s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
-                       s->offset, flags);
+                       s->name, (unsigned long)s->size, s->size,
+                       oo_order(s->oo), s->offset, flags);
        return -EINVAL;
 }
 
@@ -3338,7 +3353,8 @@ bool verify_mem_not_deleted(const void *x)
 
        slab_lock(page);
        if (on_freelist(page->slab_cache, page, object)) {
-               object_err(page->slab_cache, page, object, "Object is on free-list");
+               object_err(page->slab_cache, page, object,
+                               "Object is on free-list");
                rv = false;
        } else {
                rv = true;
@@ -4162,15 +4178,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
                                !cpumask_empty(to_cpumask(l->cpus)) &&
                                len < PAGE_SIZE - 60) {
                        len += sprintf(buf + len, " cpus=");
-                       len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
+                       len += cpulist_scnprintf(buf + len,
+                                                PAGE_SIZE - len - 50,
                                                 to_cpumask(l->cpus));
                }
 
                if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
                                len < PAGE_SIZE - 60) {
                        len += sprintf(buf + len, " nodes=");
-                       len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
-                                       l->nodes);
+                       len += nodelist_scnprintf(buf + len,
+                                                 PAGE_SIZE - len - 50,
+                                                 l->nodes);
                }
 
                len += sprintf(buf + len, "\n");
@@ -4268,18 +4286,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
        int node;
        int x;
        unsigned long *nodes;
-       unsigned long *per_cpu;
 
-       nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
+       nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
        if (!nodes)
                return -ENOMEM;
-       per_cpu = nodes + nr_node_ids;
 
        if (flags & SO_CPU) {
                int cpu;
 
                for_each_possible_cpu(cpu) {
-                       struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+                       struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
+                                                              cpu);
                        int node;
                        struct page *page;
 
@@ -4304,8 +4321,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                                total += x;
                                nodes[node] += x;
                        }
-
-                       per_cpu[node]++;
                }
        }
 
@@ -4315,12 +4330,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                for_each_node_state(node, N_NORMAL_MEMORY) {
                        struct kmem_cache_node *n = get_node(s, node);
 
-               if (flags & SO_TOTAL)
-                       x = atomic_long_read(&n->total_objects);
-               else if (flags & SO_OBJECTS)
-                       x = atomic_long_read(&n->total_objects) -
-                               count_partial(n, count_free);
-
+                       if (flags & SO_TOTAL)
+                               x = atomic_long_read(&n->total_objects);
+                       else if (flags & SO_OBJECTS)
+                               x = atomic_long_read(&n->total_objects) -
+                                       count_partial(n, count_free);
                        else
                                x = atomic_long_read(&n->nr_slabs);
                        total += x;
@@ -5136,7 +5150,8 @@ static char *create_unique_id(struct kmem_cache *s)
 
 #ifdef CONFIG_MEMCG_KMEM
        if (!is_root_cache(s))
-               p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
+               p += sprintf(p, "-%08d",
+                               memcg_cache_id(s->memcg_params->memcg));
 #endif
 
        BUG_ON(p > name + ID_STR_LENGTH - 1);
index 0c1e37d829fa026a9fc61ef93b569087939867c5..e0f62837c3f4873ea2d54c14845c30cb888a386b 100644 (file)
@@ -74,15 +74,10 @@ static struct vmpressure *work_to_vmpressure(struct work_struct *work)
        return container_of(work, struct vmpressure, work);
 }
 
-static struct vmpressure *cg_to_vmpressure(struct cgroup *cg)
-{
-       return css_to_vmpressure(cgroup_subsys_state(cg, mem_cgroup_subsys_id));
-}
-
 static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
 {
-       struct cgroup *cg = vmpressure_to_css(vmpr)->cgroup;
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cg);
+       struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
        memcg = parent_mem_cgroup(memcg);
        if (!memcg)
@@ -283,7 +278,7 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
 
 /**
  * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
- * @cg:                cgroup that is interested in vmpressure notifications
+ * @css:       css that is interested in vmpressure notifications
  * @cft:       cgroup control files handle
  * @eventfd:   eventfd context to link notifications with
  * @args:      event arguments (used to set up a pressure level threshold)
@@ -298,10 +293,11 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
  * cftype).register_event, and then cgroup core will handle everything by
  * itself.
  */
-int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
-                             struct eventfd_ctx *eventfd, const char *args)
+int vmpressure_register_event(struct cgroup_subsys_state *css,
+                             struct cftype *cft, struct eventfd_ctx *eventfd,
+                             const char *args)
 {
-       struct vmpressure *vmpr = cg_to_vmpressure(cg);
+       struct vmpressure *vmpr = css_to_vmpressure(css);
        struct vmpressure_event *ev;
        int level;
 
@@ -329,7 +325,7 @@ int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
 
 /**
  * vmpressure_unregister_event() - Unbind eventfd from vmpressure
- * @cg:                cgroup handle
+ * @css:       css handle
  * @cft:       cgroup control files handle
  * @eventfd:   eventfd context that was used to link vmpressure with the @cg
  *
@@ -341,10 +337,11 @@ int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
  * cftype).unregister_event, and then cgroup core will handle everything
  * by itself.
  */
-void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
+void vmpressure_unregister_event(struct cgroup_subsys_state *css,
+                                struct cftype *cft,
                                 struct eventfd_ctx *eventfd)
 {
-       struct vmpressure *vmpr = cg_to_vmpressure(cg);
+       struct vmpressure *vmpr = css_to_vmpressure(css);
        struct vmpressure_event *ev;
 
        mutex_lock(&vmpr->events_lock);
index 2fb2d88e8c2e329ec11653c20478bede1c4e5ad8..61fc573f1142f707fee7d886e67153a62c91c14a 100644 (file)
@@ -210,6 +210,7 @@ out_vid_del:
 static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
 {
        struct net_device *new_dev;
+       struct vlan_dev_priv *vlan;
        struct net *net = dev_net(real_dev);
        struct vlan_net *vn = net_generic(net, vlan_net_id);
        char name[IFNAMSIZ];
@@ -260,11 +261,12 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
        new_dev->mtu = real_dev->mtu;
        new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
 
-       vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q);
-       vlan_dev_priv(new_dev)->vlan_id = vlan_id;
-       vlan_dev_priv(new_dev)->real_dev = real_dev;
-       vlan_dev_priv(new_dev)->dent = NULL;
-       vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+       vlan = vlan_dev_priv(new_dev);
+       vlan->vlan_proto = htons(ETH_P_8021Q);
+       vlan->vlan_id = vlan_id;
+       vlan->real_dev = real_dev;
+       vlan->dent = NULL;
+       vlan->flags = VLAN_FLAG_REORDER_HDR;
 
        new_dev->rtnl_link_ops = &vlan_link_ops;
        err = register_vlan_dev(new_dev);
@@ -459,6 +461,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
 
        case NETDEV_NOTIFY_PEERS:
        case NETDEV_BONDING_FAILOVER:
+       case NETDEV_RESEND_IGMP:
                /* Propagate to vlan devices */
                vlan_group_for_each_dev(grp, i, vlandev)
                        call_netdevice_notifiers(event, vlandev);
index 1cd3d2a406f5a268113d0d802f03578f2a3cbe20..9ab8a7ed99c0e983c1d6a0c97b378f3b70c4397f 100644 (file)
@@ -107,10 +107,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
        u16 vlan_tci = 0;
        int rc;
 
-       if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+       if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
                vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
 
-               vlan_tci = vlan_dev_priv(dev)->vlan_id;
+               vlan_tci = vlan->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
                vhdr->h_vlan_TCI = htons(vlan_tci);
 
@@ -133,7 +133,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                saddr = dev->dev_addr;
 
        /* Now make the underlying real hard header */
-       dev = vlan_dev_priv(dev)->real_dev;
+       dev = vlan->real_dev;
        rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
        if (rc > 0)
                rc += vhdrlen;
index 8b93cae2d11d7d013d50c276bd08fc11a1187d08..ba93bdab2701939e2488b30c5c55be315acb0670 100644 (file)
@@ -658,17 +658,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
 
        /*
         * if we haven't received a response for oldreq,
-        * remove it from the list, and notify the transport
-        * layer that the reply will never arrive.
+        * remove it from the list
         */
-       spin_lock(&c->lock);
        if (oldreq->status == REQ_STATUS_FLSH) {
+               spin_lock(&c->lock);
                list_del(&oldreq->req_list);
                spin_unlock(&c->lock);
-               if (c->trans_mod->cancelled)
-                       c->trans_mod->cancelled(c, req);
-       } else {
-               spin_unlock(&c->lock);
        }
 
        p9_free_req(c, req);
index 928f2bb9bf8d5d4d43d6324c3ea989a9daf12728..8f68df5d29731cf23b6de1bbee5284c904006c93 100644 (file)
@@ -588,17 +588,6 @@ static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
        return 1;
 }
 
-/* A request has been fully flushed without a reply.
- * That means we have posted one buffer in excess.
- */
-static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
-{
-       struct p9_trans_rdma *rdma = client->trans;
-
-       atomic_inc(&rdma->excess_rc);
-       return 0;
-}
-
 /**
  * trans_create_rdma - Transport method for creating atransport instance
  * @client: client instance
index 2b406608a1a4a42606e6ca3eb583f5f1b136176a..ee0213667272cc8eb5425af16921c69bee385311 100644 (file)
@@ -281,7 +281,7 @@ menu "Network testing"
 
 config NET_PKTGEN
        tristate "Packet Generator (USE WITH CAUTION)"
-       depends on PROC_FS
+       depends on INET && PROC_FS
        ---help---
          This module will inject preconfigured packets, at a configurable
          rate, out of a given interface.  It is used for network interface
index c30f3a0717fb8a609239cd8578416eac63c61dd5..af46bc49e1e9946ce1ff4ff0fc772ce14a231670 100644 (file)
@@ -178,7 +178,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
        at = at_sk(s);
 
        seq_printf(seq, "%02X   %04X:%02X:%02X  %04X:%02X:%02X  %08X:%08X "
-                       "%02X %d\n",
+                       "%02X %u\n",
                   s->sk_type, ntohs(at->src_net), at->src_node, at->src_port,
                   ntohs(at->dest_net), at->dest_node, at->dest_port,
                   sk_wmem_alloc_get(s),
index 688a0419756bfc6ce2a9f632f6e84a6ed42debcf..857e1b8349ee417e96fd4e6afb840c541245d04c 100644 (file)
@@ -432,12 +432,16 @@ find_router:
 
        switch (packet_type) {
        case BATADV_UNICAST:
-               batadv_unicast_prepare_skb(skb, orig_node);
+               if (!batadv_unicast_prepare_skb(skb, orig_node))
+                       goto out;
+
                header_len = sizeof(struct batadv_unicast_packet);
                break;
        case BATADV_UNICAST_4ADDR:
-               batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
-                                                packet_subtype);
+               if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
+                                                     packet_subtype))
+                       goto out;
+
                header_len = sizeof(struct batadv_unicast_4addr_packet);
                break;
        default:
index 6c7f3637972254c9bbbe0b5a9c6f007ba1ec41e2..f0817121ec5e6b0c5f50e82493f9033376908f61 100644 (file)
 #include <net/bluetooth/a2mp.h>
 #include <net/bluetooth/smp.h>
 
+struct sco_param {
+       u16 pkt_type;
+       u16 max_latency;
+};
+
+static const struct sco_param sco_param_cvsd[] = {
+       { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
+       { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
+       { EDR_ESCO_MASK | ESCO_EV3,   0x0007 }, /* S1 */
+       { EDR_ESCO_MASK | ESCO_HV3,   0xffff }, /* D1 */
+       { EDR_ESCO_MASK | ESCO_HV1,   0xffff }, /* D0 */
+};
+
+static const struct sco_param sco_param_wideband[] = {
+       { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
+       { EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
+};
+
 static void hci_le_create_connection(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
@@ -172,10 +190,11 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle)
        hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
 }
 
-void hci_setup_sync(struct hci_conn *conn, __u16 handle)
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
 {
        struct hci_dev *hdev = conn->hdev;
        struct hci_cp_setup_sync_conn cp;
+       const struct sco_param *param;
 
        BT_DBG("hcon %p", conn);
 
@@ -185,15 +204,35 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
        conn->attempt++;
 
        cp.handle   = cpu_to_le16(handle);
-       cp.pkt_type = cpu_to_le16(conn->pkt_type);
 
        cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
        cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
-       cp.max_latency    = __constant_cpu_to_le16(0xffff);
-       cp.voice_setting  = cpu_to_le16(hdev->voice_setting);
-       cp.retrans_effort = 0xff;
+       cp.voice_setting  = cpu_to_le16(conn->setting);
+
+       switch (conn->setting & SCO_AIRMODE_MASK) {
+       case SCO_AIRMODE_TRANSP:
+               if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
+                       return false;
+               cp.retrans_effort = 0x02;
+               param = &sco_param_wideband[conn->attempt - 1];
+               break;
+       case SCO_AIRMODE_CVSD:
+               if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
+                       return false;
+               cp.retrans_effort = 0x01;
+               param = &sco_param_cvsd[conn->attempt - 1];
+               break;
+       default:
+               return false;
+       }
 
-       hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
+       cp.pkt_type = __cpu_to_le16(param->pkt_type);
+       cp.max_latency = __cpu_to_le16(param->max_latency);
+
+       if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
+               return false;
+
+       return true;
 }
 
 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
@@ -560,13 +599,13 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
        return acl;
 }
 
-static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
-                               bdaddr_t *dst, u8 sec_level, u8 auth_type)
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+                                __u16 setting)
 {
        struct hci_conn *acl;
        struct hci_conn *sco;
 
-       acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
+       acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
        if (IS_ERR(acl))
                return acl;
 
@@ -584,6 +623,8 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
 
        hci_conn_hold(sco);
 
+       sco->setting = setting;
+
        if (acl->state == BT_CONNECTED &&
            (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
                set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
@@ -612,9 +653,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
                return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
        case ACL_LINK:
                return hci_connect_acl(hdev, dst, sec_level, auth_type);
-       case SCO_LINK:
-       case ESCO_LINK:
-               return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
        }
 
        return ERR_PTR(-EINVAL);
index cc27297da5a9f9094e3079ef43e207dbd9848b68..634debab4d54582f04c69a4664b00c7541696a93 100644 (file)
@@ -454,6 +454,18 @@ static void hci_setup_event_mask(struct hci_request *req)
                events[4] |= 0x04; /* Read Remote Extended Features Complete */
                events[5] |= 0x08; /* Synchronous Connection Complete */
                events[5] |= 0x10; /* Synchronous Connection Changed */
+       } else {
+               /* Use a different default for LE-only devices */
+               memset(events, 0, sizeof(events));
+               events[0] |= 0x10; /* Disconnection Complete */
+               events[0] |= 0x80; /* Encryption Change */
+               events[1] |= 0x08; /* Read Remote Version Information Complete */
+               events[1] |= 0x20; /* Command Complete */
+               events[1] |= 0x40; /* Command Status */
+               events[1] |= 0x80; /* Hardware Error */
+               events[2] |= 0x04; /* Number of Completed Packets */
+               events[3] |= 0x02; /* Data Buffer Overflow */
+               events[5] |= 0x80; /* Encryption Key Refresh Complete */
        }
 
        if (lmp_inq_rssi_capable(hdev))
@@ -608,7 +620,7 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
         * as supported send it. If not supported assume that the controller
         * does not have actual support for stored link keys which makes this
         * command redundant anyway.
-         */
+        */
        if (hdev->commands[6] & 0x80) {
                struct hci_cp_delete_stored_link_key cp;
 
index 0437200d92f45ca90bb09f3cad13d15c40a851de..94aab73f89d4c9e447d2b65f12f8ec71ee1a66e6 100644 (file)
@@ -2904,15 +2904,16 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
                hci_conn_add_sysfs(conn);
                break;
 
+       case 0x0d:      /* Connection Rejected due to Limited Resources */
        case 0x11:      /* Unsupported Feature or Parameter Value */
        case 0x1c:      /* SCO interval rejected */
        case 0x1a:      /* Unsupported Remote Feature */
        case 0x1f:      /* Unspecified error */
-               if (conn->out && conn->attempt < 2) {
+               if (conn->out) {
                        conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
                                        (hdev->esco_type & EDR_ESCO_MASK);
-                       hci_setup_sync(conn, conn->link->handle);
-                       goto unlock;
+                       if (hci_setup_sync(conn, conn->link->handle))
+                               goto unlock;
                }
                /* fall through */
 
@@ -3024,17 +3025,20 @@ unlock:
 static u8 hci_get_auth_req(struct hci_conn *conn)
 {
        /* If remote requests dedicated bonding follow that lead */
-       if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
+       if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
+           conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
                /* If both remote and local IO capabilities allow MITM
                 * protection then require it, otherwise don't */
-               if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
-                       return 0x02;
+               if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
+                   conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
+                       return HCI_AT_DEDICATED_BONDING;
                else
-                       return 0x03;
+                       return HCI_AT_DEDICATED_BONDING_MITM;
        }
 
        /* If remote requests no-bonding follow that lead */
-       if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
+       if (conn->remote_auth == HCI_AT_NO_BONDING ||
+           conn->remote_auth == HCI_AT_NO_BONDING_MITM)
                return conn->remote_auth | (conn->auth_type & 0x01);
 
        return conn->auth_type;
@@ -3066,7 +3070,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                /* Change the IO capability from KeyboardDisplay
                 * to DisplayYesNo as it is not supported by BT spec. */
                cp.capability = (conn->io_capability == 0x04) ?
-                                               0x01 : conn->io_capability;
+                               HCI_IO_DISPLAY_YESNO : conn->io_capability;
                conn->auth_type = hci_get_auth_req(conn);
                cp.authentication = conn->auth_type;
 
@@ -3140,7 +3144,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
         * request. The only exception is when we're dedicated bonding
         * initiators (connect_cfm_cb set) since then we always have the MITM
         * bit set. */
-       if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
+       if (!conn->connect_cfm_cb && loc_mitm &&
+           conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
                BT_DBG("Rejecting request: remote device can't provide MITM");
                hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
                             sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3148,8 +3153,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
        }
 
        /* If no side requires MITM protection; auto-accept */
-       if ((!loc_mitm || conn->remote_cap == 0x03) &&
-           (!rem_mitm || conn->io_capability == 0x03)) {
+       if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
+           (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
 
                /* If we're not the initiators request authorization to
                 * proceed from user space (mgmt_user_confirm with
index 0c699cdc3696edbdf7e7c3d3e213a88979da662e..bdc35a7a7feeaf4ac7a918547e82d5d19a7ef537 100644 (file)
@@ -225,17 +225,47 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
 
 static int hidp_send_report(struct hidp_session *session, struct hid_report *report)
 {
-       unsigned char buf[32], hdr;
-       int rsize;
+       unsigned char hdr;
+       u8 *buf;
+       int rsize, ret;
 
-       rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
-       if (rsize > sizeof(buf))
+       buf = hid_alloc_report_buf(report, GFP_ATOMIC);
+       if (!buf)
                return -EIO;
 
        hid_output_report(report, buf);
        hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
 
-       return hidp_send_intr_message(session, hdr, buf, rsize);
+       rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       ret = hidp_send_intr_message(session, hdr, buf, rsize);
+
+       kfree(buf);
+       return ret;
+}
+
+static int hidp_hidinput_event(struct input_dev *dev, unsigned int type,
+                              unsigned int code, int value)
+{
+       struct hid_device *hid = input_get_drvdata(dev);
+       struct hidp_session *session = hid->driver_data;
+       struct hid_field *field;
+       int offset;
+
+       BT_DBG("session %p type %d code %d value %d",
+              session, type, code, value);
+
+       if (type != EV_LED)
+               return -1;
+
+       offset = hidinput_find_field(hid, type, code, &field);
+       if (offset == -1) {
+               hid_warn(dev, "event field not found\n");
+               return -1;
+       }
+
+       hid_set_field(field, offset, value);
+
+       return hidp_send_report(session, field->report);
 }
 
 static int hidp_get_raw_report(struct hid_device *hid,
@@ -678,20 +708,6 @@ static int hidp_parse(struct hid_device *hid)
 
 static int hidp_start(struct hid_device *hid)
 {
-       struct hidp_session *session = hid->driver_data;
-       struct hid_report *report;
-
-       if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
-               return 0;
-
-       list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
-                       report_list, list)
-               hidp_send_report(session, report);
-
-       list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].
-                       report_list, list)
-               hidp_send_report(session, report);
-
        return 0;
 }
 
@@ -711,6 +727,7 @@ static struct hid_ll_driver hidp_hid_driver = {
        .stop = hidp_stop,
        .open  = hidp_open,
        .close = hidp_close,
+       .hidinput_input_event = hidp_hidinput_event,
 };
 
 /* This function sets up the hid device. It does not add it
index 8c3499bec89319289073b2f1e7f3f202cd179ca6..b3bb7bca8e606439edbd9f9838bb8021200bb28c 100644 (file)
@@ -1415,8 +1415,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
                        sk->sk_state_change(sk);
                        release_sock(sk);
 
-               } else if (chan->state == BT_CONNECT)
+               } else if (chan->state == BT_CONNECT) {
                        l2cap_do_start(chan);
+               }
 
                l2cap_chan_unlock(chan);
        }
index b6e44ad6cca6e691556f630bcb056373684203ad..6d126faf145fe5107fbd0dfd9b810671c34e1e66 100644 (file)
@@ -58,7 +58,6 @@ struct rfcomm_dev {
        uint                    modem_status;
 
        struct rfcomm_dlc       *dlc;
-       wait_queue_head_t       wait;
 
        struct device           *tty_dev;
 
@@ -76,13 +75,6 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
 
 /* ---- Device functions ---- */
 
-/*
- * The reason this isn't actually a race, as you no doubt have a little voice
- * screaming at you in your head, is that the refcount should never actually
- * reach zero unless the device has already been taken off the list, in
- * rfcomm_dev_del(). And if that's not true, we'll hit the BUG() in
- * rfcomm_dev_destruct() anyway.
- */
 static void rfcomm_dev_destruct(struct tty_port *port)
 {
        struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
@@ -90,10 +82,9 @@ static void rfcomm_dev_destruct(struct tty_port *port)
 
        BT_DBG("dev %p dlc %p", dev, dlc);
 
-       /* Refcount should only hit zero when called from rfcomm_dev_del()
-          which will have taken us off the list. Everything else are
-          refcounting bugs. */
-       BUG_ON(!list_empty(&dev->list));
+       spin_lock(&rfcomm_dev_lock);
+       list_del(&dev->list);
+       spin_unlock(&rfcomm_dev_lock);
 
        rfcomm_dlc_lock(dlc);
        /* Detach DLC if it's owned by this dev */
@@ -112,8 +103,39 @@ static void rfcomm_dev_destruct(struct tty_port *port)
        module_put(THIS_MODULE);
 }
 
+/* device-specific initialization: open the dlc */
+static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
+{
+       struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+       return rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel);
+}
+
+/* we block the open until the dlc->state becomes BT_CONNECTED */
+static int rfcomm_dev_carrier_raised(struct tty_port *port)
+{
+       struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+       return (dev->dlc->state == BT_CONNECTED);
+}
+
+/* device-specific cleanup: close the dlc */
+static void rfcomm_dev_shutdown(struct tty_port *port)
+{
+       struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+       if (dev->tty_dev->parent)
+               device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
+
+       /* close the dlc */
+       rfcomm_dlc_close(dev->dlc, 0);
+}
+
 static const struct tty_port_operations rfcomm_port_ops = {
        .destruct = rfcomm_dev_destruct,
+       .activate = rfcomm_dev_activate,
+       .shutdown = rfcomm_dev_shutdown,
+       .carrier_raised = rfcomm_dev_carrier_raised,
 };
 
 static struct rfcomm_dev *__rfcomm_dev_get(int id)
@@ -236,7 +258,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
 
        tty_port_init(&dev->port);
        dev->port.ops = &rfcomm_port_ops;
-       init_waitqueue_head(&dev->wait);
 
        skb_queue_head_init(&dev->pending);
 
@@ -282,7 +303,9 @@ out:
                        dev->id, NULL);
        if (IS_ERR(dev->tty_dev)) {
                err = PTR_ERR(dev->tty_dev);
+               spin_lock(&rfcomm_dev_lock);
                list_del(&dev->list);
+               spin_unlock(&rfcomm_dev_lock);
                goto free;
        }
 
@@ -301,27 +324,6 @@ free:
        return err;
 }
 
-static void rfcomm_dev_del(struct rfcomm_dev *dev)
-{
-       unsigned long flags;
-       BT_DBG("dev %p", dev);
-
-       BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
-
-       spin_lock_irqsave(&dev->port.lock, flags);
-       if (dev->port.count > 0) {
-               spin_unlock_irqrestore(&dev->port.lock, flags);
-               return;
-       }
-       spin_unlock_irqrestore(&dev->port.lock, flags);
-
-       spin_lock(&rfcomm_dev_lock);
-       list_del_init(&dev->list);
-       spin_unlock(&rfcomm_dev_lock);
-
-       tty_port_put(&dev->port);
-}
-
 /* ---- Send buffer ---- */
 static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
 {
@@ -333,10 +335,9 @@ static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
 static void rfcomm_wfree(struct sk_buff *skb)
 {
        struct rfcomm_dev *dev = (void *) skb->sk;
-       struct tty_struct *tty = dev->port.tty;
        atomic_sub(skb->truesize, &dev->wmem_alloc);
-       if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags) && tty)
-               tty_wakeup(tty);
+       if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
+               tty_port_tty_wakeup(&dev->port);
        tty_port_put(&dev->port);
 }
 
@@ -410,6 +411,7 @@ static int rfcomm_release_dev(void __user *arg)
 {
        struct rfcomm_dev_req req;
        struct rfcomm_dev *dev;
+       struct tty_struct *tty;
 
        if (copy_from_user(&req, arg, sizeof(req)))
                return -EFAULT;
@@ -429,11 +431,15 @@ static int rfcomm_release_dev(void __user *arg)
                rfcomm_dlc_close(dev->dlc, 0);
 
        /* Shut down TTY synchronously before freeing rfcomm_dev */
-       if (dev->port.tty)
-               tty_vhangup(dev->port.tty);
+       tty = tty_port_tty_get(&dev->port);
+       if (tty) {
+               tty_vhangup(tty);
+               tty_kref_put(tty);
+       }
+
+       if (!test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
+               tty_port_put(&dev->port);
 
-       if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
-               rfcomm_dev_del(dev);
        tty_port_put(&dev->port);
        return 0;
 }
@@ -563,16 +569,21 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
 static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
 {
        struct rfcomm_dev *dev = dlc->owner;
+       struct tty_struct *tty;
        if (!dev)
                return;
 
        BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
 
        dev->err = err;
-       wake_up_interruptible(&dev->wait);
+       if (dlc->state == BT_CONNECTED) {
+               device_move(dev->tty_dev, rfcomm_get_device(dev),
+                           DPM_ORDER_DEV_AFTER_PARENT);
 
-       if (dlc->state == BT_CLOSED) {
-               if (!dev->port.tty) {
+               wake_up_interruptible(&dev->port.open_wait);
+       } else if (dlc->state == BT_CLOSED) {
+               tty = tty_port_tty_get(&dev->port);
+               if (!tty) {
                        if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
                                /* Drop DLC lock here to avoid deadlock
                                 * 1. rfcomm_dev_get will take rfcomm_dev_lock
@@ -580,6 +591,9 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
                                 *    rfcomm_dev_lock -> dlc lock
                                 * 2. tty_port_put will deadlock if it's
                                 *    the last reference
+                                *
+                                * FIXME: when we release the lock anything
+                                * could happen to dev, even its destruction
                                 */
                                rfcomm_dlc_unlock(dlc);
                                if (rfcomm_dev_get(dev->id) == NULL) {
@@ -587,12 +601,17 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
                                        return;
                                }
 
-                               rfcomm_dev_del(dev);
+                               if (!test_and_set_bit(RFCOMM_TTY_RELEASED,
+                                                     &dev->flags))
+                                       tty_port_put(&dev->port);
+
                                tty_port_put(&dev->port);
                                rfcomm_dlc_lock(dlc);
                        }
-               } else
-                       tty_hangup(dev->port.tty);
+               } else {
+                       tty_hangup(tty);
+                       tty_kref_put(tty);
+               }
        }
 }
 
@@ -604,10 +623,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
 
        BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
 
-       if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
-               if (dev->port.tty && !C_CLOCAL(dev->port.tty))
-                       tty_hangup(dev->port.tty);
-       }
+       if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV))
+               tty_port_tty_hangup(&dev->port, true);
 
        dev->modem_status =
                ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
@@ -638,124 +655,92 @@ static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
                tty_flip_buffer_push(&dev->port);
 }
 
-static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+/* do the reverse of install, clearing the tty fields and releasing the
+ * reference to tty_port
+ */
+static void rfcomm_tty_cleanup(struct tty_struct *tty)
 {
-       DECLARE_WAITQUEUE(wait, current);
-       struct rfcomm_dev *dev;
-       struct rfcomm_dlc *dlc;
-       unsigned long flags;
-       int err, id;
+       struct rfcomm_dev *dev = tty->driver_data;
 
-       id = tty->index;
+       clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
 
-       BT_DBG("tty %p id %d", tty, id);
+       rfcomm_dlc_lock(dev->dlc);
+       tty->driver_data = NULL;
+       rfcomm_dlc_unlock(dev->dlc);
 
-       /* We don't leak this refcount. For reasons which are not entirely
-          clear, the TTY layer will call our ->close() method even if the
-          open fails. We decrease the refcount there, and decreasing it
-          here too would cause breakage. */
-       dev = rfcomm_dev_get(id);
-       if (!dev)
-               return -ENODEV;
+       /*
+        * purge the dlc->tx_queue to avoid circular dependencies
+        * between dev and dlc
+        */
+       skb_queue_purge(&dev->dlc->tx_queue);
 
-       BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
-              dev->channel, dev->port.count);
+       tty_port_put(&dev->port);
+}
 
-       spin_lock_irqsave(&dev->port.lock, flags);
-       if (++dev->port.count > 1) {
-               spin_unlock_irqrestore(&dev->port.lock, flags);
-               return 0;
-       }
-       spin_unlock_irqrestore(&dev->port.lock, flags);
+/* we acquire the tty_port reference since it's here the tty is first used
+ * by setting the termios. We also populate the driver_data field and install
+ * the tty port
+ */
+static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+       struct rfcomm_dev *dev;
+       struct rfcomm_dlc *dlc;
+       int err;
+
+       dev = rfcomm_dev_get(tty->index);
+       if (!dev)
+               return -ENODEV;
 
        dlc = dev->dlc;
 
        /* Attach TTY and open DLC */
-
        rfcomm_dlc_lock(dlc);
        tty->driver_data = dev;
-       dev->port.tty = tty;
        rfcomm_dlc_unlock(dlc);
        set_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
 
-       err = rfcomm_dlc_open(dlc, &dev->src, &dev->dst, dev->channel);
-       if (err < 0)
-               return err;
-
-       /* Wait for DLC to connect */
-       add_wait_queue(&dev->wait, &wait);
-       while (1) {
-               set_current_state(TASK_INTERRUPTIBLE);
+       /* install the tty_port */
+       err = tty_port_install(&dev->port, driver, tty);
+       if (err)
+               rfcomm_tty_cleanup(tty);
 
-               if (dlc->state == BT_CLOSED) {
-                       err = -dev->err;
-                       break;
-               }
+       return err;
+}
 
-               if (dlc->state == BT_CONNECTED)
-                       break;
+static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+       struct rfcomm_dev *dev = tty->driver_data;
+       int err;
 
-               if (signal_pending(current)) {
-                       err = -EINTR;
-                       break;
-               }
+       BT_DBG("tty %p id %d", tty, tty->index);
 
-               tty_unlock(tty);
-               schedule();
-               tty_lock(tty);
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&dev->wait, &wait);
+       BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+              dev->channel, dev->port.count);
 
-       if (err == 0)
-               device_move(dev->tty_dev, rfcomm_get_device(dev),
-                           DPM_ORDER_DEV_AFTER_PARENT);
+       err = tty_port_open(&dev->port, tty, filp);
+       if (err)
+               return err;
 
+       /*
+        * FIXME: rfcomm should use proper flow control for
+        * received data. This hack will be unnecessary and can
+        * be removed when that's implemented
+        */
        rfcomm_tty_copy_pending(dev);
 
        rfcomm_dlc_unthrottle(dev->dlc);
 
-       return err;
+       return 0;
 }
 
 static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
 {
        struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
-       unsigned long flags;
-
-       if (!dev)
-               return;
 
        BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
                                                dev->port.count);
 
-       spin_lock_irqsave(&dev->port.lock, flags);
-       if (!--dev->port.count) {
-               spin_unlock_irqrestore(&dev->port.lock, flags);
-               if (dev->tty_dev->parent)
-                       device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
-
-               /* Close DLC and dettach TTY */
-               rfcomm_dlc_close(dev->dlc, 0);
-
-               clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
-
-               rfcomm_dlc_lock(dev->dlc);
-               tty->driver_data = NULL;
-               dev->port.tty = NULL;
-               rfcomm_dlc_unlock(dev->dlc);
-
-               if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
-                       spin_lock(&rfcomm_dev_lock);
-                       list_del_init(&dev->list);
-                       spin_unlock(&rfcomm_dev_lock);
-
-                       tty_port_put(&dev->port);
-               }
-       } else
-               spin_unlock_irqrestore(&dev->port.lock, flags);
-
-       tty_port_put(&dev->port);
+       tty_port_close(&dev->port, tty, filp);
 }
 
 static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -1055,17 +1040,11 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
 
        BT_DBG("tty %p dev %p", tty, dev);
 
-       if (!dev)
-               return;
-
-       rfcomm_tty_flush_buffer(tty);
+       tty_port_hangup(&dev->port);
 
-       if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
-               if (rfcomm_dev_get(dev->id) == NULL)
-                       return;
-               rfcomm_dev_del(dev);
+       if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) &&
+           !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
                tty_port_put(&dev->port);
-       }
 }
 
 static int rfcomm_tty_tiocmget(struct tty_struct *tty)
@@ -1128,6 +1107,8 @@ static const struct tty_operations rfcomm_ops = {
        .wait_until_sent        = rfcomm_tty_wait_until_sent,
        .tiocmget               = rfcomm_tty_tiocmget,
        .tiocmset               = rfcomm_tty_tiocmset,
+       .install                = rfcomm_tty_install,
+       .cleanup                = rfcomm_tty_cleanup,
 };
 
 int __init rfcomm_init_ttys(void)
@@ -1146,7 +1127,7 @@ int __init rfcomm_init_ttys(void)
        rfcomm_tty_driver->subtype      = SERIAL_TYPE_NORMAL;
        rfcomm_tty_driver->flags        = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
        rfcomm_tty_driver->init_termios = tty_std_termios;
-       rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+       rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
        rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
        tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
 
index e7bd4eea575cff782401430147c9cdcc1a1ad6b0..96bd388d93a4aae145bfb207d26fbc3a90b9e0a8 100644 (file)
@@ -176,8 +176,13 @@ static int sco_connect(struct sock *sk)
        else
                type = SCO_LINK;
 
-       hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
-                          HCI_AT_NO_BONDING);
+       if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
+           (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
+       hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting);
        if (IS_ERR(hcon)) {
                err = PTR_ERR(hcon);
                goto done;
@@ -417,6 +422,8 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
        sk->sk_protocol = proto;
        sk->sk_state    = BT_OPEN;
 
+       sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
+
        setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
 
        bt_sock_link(&sco_sk_list, sk);
@@ -652,7 +659,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        return err;
 }
 
-static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
+static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
 {
        struct hci_dev *hdev = conn->hdev;
 
@@ -664,11 +671,7 @@ static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
                struct hci_cp_accept_conn_req cp;
 
                bacpy(&cp.bdaddr, &conn->dst);
-
-               if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
-                       cp.role = 0x00; /* Become master */
-               else
-                       cp.role = 0x01; /* Remain slave */
+               cp.role = 0x00; /* Ignored */
 
                hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
        } else {
@@ -679,9 +682,21 @@ static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
 
                cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
                cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
-               cp.max_latency    = __constant_cpu_to_le16(0xffff);
-               cp.content_format = cpu_to_le16(hdev->voice_setting);
-               cp.retrans_effort = 0xff;
+               cp.content_format = cpu_to_le16(setting);
+
+               switch (setting & SCO_AIRMODE_MASK) {
+               case SCO_AIRMODE_TRANSP:
+                       if (conn->pkt_type & ESCO_2EV3)
+                               cp.max_latency = __constant_cpu_to_le16(0x0008);
+                       else
+                               cp.max_latency = __constant_cpu_to_le16(0x000D);
+                       cp.retrans_effort = 0x02;
+                       break;
+               case SCO_AIRMODE_CVSD:
+                       cp.max_latency = __constant_cpu_to_le16(0xffff);
+                       cp.retrans_effort = 0xff;
+                       break;
+               }
 
                hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
                             sizeof(cp), &cp);
@@ -698,7 +713,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        if (sk->sk_state == BT_CONNECT2 &&
            test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
-               sco_conn_defer_accept(pi->conn->hcon, 0);
+               sco_conn_defer_accept(pi->conn->hcon, pi->setting);
                sk->sk_state = BT_CONFIG;
                msg->msg_namelen = 0;
 
@@ -714,7 +729,8 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
-       int err = 0;
+       int len, err = 0;
+       struct bt_voice voice;
        u32 opt;
 
        BT_DBG("sk %p", sk);
@@ -740,6 +756,31 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char
                        clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
                break;
 
+       case BT_VOICE:
+               if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
+                   sk->sk_state != BT_CONNECT2) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               voice.setting = sco_pi(sk)->setting;
+
+               len = min_t(unsigned int, sizeof(voice), optlen);
+               if (copy_from_user((char *) &voice, optval, len)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               /* Explicitly check for these values */
+               if (voice.setting != BT_VOICE_TRANSPARENT &&
+                   voice.setting != BT_VOICE_CVSD_16BIT) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               sco_pi(sk)->setting = voice.setting;
+               break;
+
        default:
                err = -ENOPROTOOPT;
                break;
@@ -765,7 +806,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
 
        switch (optname) {
        case SCO_OPTIONS:
-               if (sk->sk_state != BT_CONNECTED) {
+               if (sk->sk_state != BT_CONNECTED &&
+                   !(sk->sk_state == BT_CONNECT2 &&
+                     test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
                        err = -ENOTCONN;
                        break;
                }
@@ -781,7 +824,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
                break;
 
        case SCO_CONNINFO:
-               if (sk->sk_state != BT_CONNECTED) {
+               if (sk->sk_state != BT_CONNECTED &&
+                   !(sk->sk_state == BT_CONNECT2 &&
+                     test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
                        err = -ENOTCONN;
                        break;
                }
@@ -809,6 +854,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
 {
        struct sock *sk = sock->sk;
        int len, err = 0;
+       struct bt_voice voice;
 
        BT_DBG("sk %p", sk);
 
@@ -834,6 +880,15 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
 
                break;
 
+       case BT_VOICE:
+               voice.setting = sco_pi(sk)->setting;
+
+               len = min_t(unsigned int, len, sizeof(voice));
+               if (copy_to_user(optval, (char *)&voice, len))
+                       err = -EFAULT;
+
+               break;
+
        default:
                err = -ENOPROTOOPT;
                break;
index 69363bd37f64ae016441df793e0f66ea3487bbb8..0feaaa0d37d11979999a898fe297f0e31397c07b 100644 (file)
@@ -245,22 +245,22 @@ fail:
 int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
 {
        struct netpoll *np;
-       int err = 0;
+       int err;
+
+       if (!p->br->dev->npinfo)
+               return 0;
 
        np = kzalloc(sizeof(*p->np), gfp);
-       err = -ENOMEM;
        if (!np)
-               goto out;
+               return -ENOMEM;
 
        err = __netpoll_setup(np, p->dev, gfp);
        if (err) {
                kfree(np);
-               goto out;
+               return err;
        }
 
        p->np = np;
-
-out:
        return err;
 }
 
index 60aca9109a508d5a75c5b34befbdca8c7364fe52..ffd5874f25920a94c74f5d97ebf4a0e2aa77f48d 100644 (file)
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
        if (!pv)
                return;
 
-       for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+       for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
                f = __br_fdb_get(br, br->dev->dev_addr, vid);
                if (f && f->is_local && !f->dst)
                        fdb_delete(br, f);
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                /* VID was specified, so use it. */
                err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
        } else {
-               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+               if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
                        err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
                        goto out;
                }
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                 * specify a VLAN.  To be nice, add/update entry for every
                 * vlan on this port.
                 */
-               for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
                        err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
                        if (err)
                                goto out;
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 
                err = __br_fdb_delete(p, addr, vid);
        } else {
-               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+               if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
                        err = __br_fdb_delete(p, addr, 0);
                        goto out;
                }
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                 * vlan on this port.
                 */
                err = -ENOENT;
-               for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
                        err &= __br_fdb_delete(p, addr, vid);
                }
        }
index 5623be6b9ecda3f77d62cb2db7fc5e9aef2bd65c..aa6c9a8ba32a8df827e0749c6c4f870f6c3f2362 100644 (file)
@@ -363,7 +363,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
        if (err)
                goto err2;
 
-       if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
+       err = br_netpoll_enable(p, GFP_KERNEL);
+       if (err)
                goto err3;
 
        err = netdev_master_upper_dev_link(dev, br->dev);
index 0daae3ec2355543bce3600001db5fd32cf6d6ff7..e4d5cd43b7fbd2596ab9c785e91273ba64626f97 100644 (file)
@@ -61,7 +61,8 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
 
        for (i = 0; i < mdb->max; i++) {
                struct net_bridge_mdb_entry *mp;
-               struct net_bridge_port_group *p, **pp;
+               struct net_bridge_port_group *p;
+               struct net_bridge_port_group __rcu **pp;
                struct net_bridge_port *port;
 
                hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
index 1fc30abd3a523912376ce01fcae932f3b6b8c746..b9259efa636ef8fe2b79ec25de49a16efa9034db 100644 (file)
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                else
                        pv = br_get_vlan_info(br);
 
-               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
+               if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
                        goto done;
 
                af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                        goto nla_put_failure;
 
                pvid = br_get_pvid(pv);
-               for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
                        vinfo.vid = vid;
                        vinfo.flags = 0;
                        if (vid == pvid)
index 3a3f371b28415c110e4db6a21069f9153dafe904..2998dd1769a055b244c21a4fb9d9579b04099222 100644 (file)
@@ -102,6 +102,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
        case NETDEV_PRE_TYPE_CHANGE:
                /* Forbid underlaying device to change its type. */
                return NOTIFY_BAD;
+
+       case NETDEV_RESEND_IGMP:
+               /* Propagate to master device */
+               call_netdevice_notifiers(event, br->dev);
+               break;
        }
 
        /* Events that may cause spanning tree to refresh */
index 2f7da41851bffc64d20c0de3db6977a800718747..d41283c57952104c57f0eb674e7c6294599b32dc 100644 (file)
@@ -334,11 +334,6 @@ extern void br_dev_delete(struct net_device *dev, struct list_head *list);
 extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
                               struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
-{
-       return br->dev->npinfo;
-}
-
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                                       struct sk_buff *skb)
 {
@@ -351,11 +346,6 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
 extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
 extern void br_netpoll_disable(struct net_bridge_port *p);
 #else
-static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
-{
-       return NULL;
-}
-
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                                       struct sk_buff *skb)
 {
@@ -476,7 +466,7 @@ extern void br_multicast_free_pg(struct rcu_head *head);
 extern struct net_bridge_port_group *br_multicast_new_port_group(
                                struct net_bridge_port *port,
                                struct br_ip *group,
-                               struct net_bridge_port_group *next,
+                               struct net_bridge_port_group __rcu *next,
                                unsigned char state);
 extern void br_mdb_init(void);
 extern void br_mdb_uninit(void);
index bd58b45f5f901fd4c6a3ad00abe068baeba8d898..9a9ffe7e4019741d75456e3b9afdba21c44785b3 100644 (file)
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
 
        clear_bit(vid, v->vlan_bitmap);
        v->num_vlans--;
-       if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+       if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
                if (v->port_idx)
                        rcu_assign_pointer(v->parent.port->vlan_info, NULL);
                else
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
 {
        smp_wmb();
        v->pvid = 0;
-       bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+       bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
        if (v->port_idx)
                rcu_assign_pointer(v->parent.port->vlan_info, NULL);
        else
index eb0a46a49bd42351d23878f118384ab09a8d2ee8..4a5df7b1cc9ff5652185e0248cf5a4df4006effd 100644 (file)
@@ -290,7 +290,7 @@ int ceph_msgr_init(void)
        if (ceph_msgr_slab_init())
                return -ENOMEM;
 
-       ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
+       ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
        if (ceph_msgr_wq)
                return 0;
 
@@ -409,7 +409,7 @@ static void ceph_sock_write_space(struct sock *sk)
         * and net/core/stream.c:sk_stream_write_space().
         */
        if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
-               if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+               if (sk_stream_is_writeable(sk)) {
                        dout("%s %p queueing write work\n", __func__, con);
                        clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                        queue_con(con);
index dd47889adc4aec94941d6f17105878ebe235db8f..dbc0a7392d67b67b276e985453aa5e6d5ae8ee9f 100644 (file)
@@ -2129,6 +2129,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
                        dout("osdc_start_request failed map, "
                                " will retry %lld\n", req->r_tid);
                        rc = 0;
+               } else {
+                       __unregister_request(osdc, req);
                }
                goto out_unlock;
        }
index 8ab48cd8955971ec7db7e60464a946380a7be49a..af814e764206f6d4221b91f03dc2e3bfa1da05f2 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
+#include <linux/pagemap.h>
 
 #include <net/protocol.h>
 #include <linux/skbuff.h>
@@ -573,6 +574,77 @@ fault:
 }
 EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
 
+/**
+ *     zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
+ *     @skb: buffer to copy
+ *     @from: io vector to copy to
+ *     @offset: offset in the io vector to start copying from
+ *     @count: amount of vectors to copy to buffer from
+ *
+ *     The function will first copy up to headlen, and then pin the userspace
+ *     pages and build frags through them.
+ *
+ *     Returns 0, -EFAULT or -EMSGSIZE.
+ *     Note: the iovec is not modified during the copy
+ */
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+                                 int offset, size_t count)
+{
+       int len = iov_length(from, count) - offset;
+       int copy = min_t(int, skb_headlen(skb), len);
+       int size;
+       int i = 0;
+
+       /* copy up to skb headlen */
+       if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy))
+               return -EFAULT;
+
+       if (len == copy)
+               return 0;
+
+       offset += copy;
+       while (count--) {
+               struct page *page[MAX_SKB_FRAGS];
+               int num_pages;
+               unsigned long base;
+               unsigned long truesize;
+
+               /* Skip over from offset and copied */
+               if (offset >= from->iov_len) {
+                       offset -= from->iov_len;
+                       ++from;
+                       continue;
+               }
+               len = from->iov_len - offset;
+               base = (unsigned long)from->iov_base + offset;
+               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+               if (i + size > MAX_SKB_FRAGS)
+                       return -EMSGSIZE;
+               num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+               if (num_pages != size) {
+                       release_pages(&page[i], num_pages, 0);
+                       return -EFAULT;
+               }
+               truesize = size * PAGE_SIZE;
+               skb->data_len += len;
+               skb->len += len;
+               skb->truesize += truesize;
+               atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+               while (len) {
+                       int off = base & ~PAGE_MASK;
+                       int size = min_t(int, len, PAGE_SIZE - off);
+                       skb_fill_page_desc(skb, i, page[i], off, size);
+                       base += size;
+                       len -= size;
+                       i++;
+               }
+               offset = 0;
+               ++from;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(zerocopy_sg_from_iovec);
+
 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                      u8 __user *to, int len,
                                      __wsum *csump)
index 26755dd40daa82ff5db7dcb994cabb3d6bf6252f..1ed2b66a10a6009c219292b17b4046e3e970f15b 100644 (file)
@@ -174,7 +174,7 @@ static DEFINE_SPINLOCK(napi_hash_lock);
 static unsigned int napi_gen_id;
 static DEFINE_HASHTABLE(napi_hash, 8);
 
-seqcount_t devnet_rename_seq;
+static seqcount_t devnet_rename_seq;
 
 static inline void dev_base_seq_inc(struct net *net)
 {
@@ -1691,13 +1691,13 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
                kfree_skb(skb);
                return NET_RX_DROP;
        }
-       skb_scrub_packet(skb);
        skb->protocol = eth_type_trans(skb, dev);
 
        /* eth_type_trans() can set pkt_type.
-        * clear pkt_type _after_ calling eth_type_trans()
+        * call skb_scrub_packet() after it to clear pkt_type _after_ calling
+        * eth_type_trans().
         */
-       skb->pkt_type = PACKET_HOST;
+       skb_scrub_packet(skb);
 
        return netif_rx(skb);
 }
@@ -4988,6 +4988,24 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier)
 }
 EXPORT_SYMBOL(dev_change_carrier);
 
+/**
+ *     dev_get_phys_port_id - Get device physical port ID
+ *     @dev: device
+ *     @ppid: port ID
+ *
+ *     Get device physical port ID
+ */
+int dev_get_phys_port_id(struct net_device *dev,
+                        struct netdev_phys_port_id *ppid)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (!ops->ndo_get_phys_port_id)
+               return -EOPNOTSUPP;
+       return ops->ndo_get_phys_port_id(dev, ppid);
+}
+EXPORT_SYMBOL(dev_get_phys_port_id);
+
 /**
  *     dev_new_index   -       allocate an ifindex
  *     @net: the applicable net namespace
index 21735440c44a85b611308e6b3c97bef77452c9a1..2e654138433c0521cee0b07992e6aa7535e970b0 100644 (file)
@@ -33,6 +33,9 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
        r->flags = flags;
        r->fr_net = hold_net(ops->fro_net);
 
+       r->suppress_prefixlen = -1;
+       r->suppress_ifgroup = -1;
+
        /* The lock is not required here, the list in unreacheable
         * at the moment this function is called */
        list_add_tail(&r->list, &ops->rules_list);
@@ -226,6 +229,9 @@ jumped:
                else
                        err = ops->action(rule, fl, flags, arg);
 
+               if (!err && ops->suppress && ops->suppress(rule, arg))
+                       continue;
+
                if (err != -EAGAIN) {
                        if ((arg->flags & FIB_LOOKUP_NOREF) ||
                            likely(atomic_inc_not_zero(&rule->refcnt))) {
@@ -337,6 +343,15 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        rule->action = frh->action;
        rule->flags = frh->flags;
        rule->table = frh_get_table(frh, tb);
+       if (tb[FRA_SUPPRESS_PREFIXLEN])
+               rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
+       else
+               rule->suppress_prefixlen = -1;
+
+       if (tb[FRA_SUPPRESS_IFGROUP])
+               rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
+       else
+               rule->suppress_ifgroup = -1;
 
        if (!tb[FRA_PRIORITY] && ops->default_pref)
                rule->pref = ops->default_pref(ops);
@@ -523,6 +538,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
                         + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
                         + nla_total_size(4) /* FRA_PRIORITY */
                         + nla_total_size(4) /* FRA_TABLE */
+                        + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
+                        + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
                         + nla_total_size(4) /* FRA_FWMARK */
                         + nla_total_size(4); /* FRA_FWMASK */
 
@@ -548,6 +565,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
        frh->table = rule->table;
        if (nla_put_u32(skb, FRA_TABLE, rule->table))
                goto nla_put_failure;
+       if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
+               goto nla_put_failure;
        frh->res1 = 0;
        frh->res2 = 0;
        frh->action = rule->action;
@@ -580,6 +599,12 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
            (rule->target &&
             nla_put_u32(skb, FRA_GOTO, rule->target)))
                goto nla_put_failure;
+
+       if (rule->suppress_ifgroup != -1) {
+               if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
+                       goto nla_put_failure;
+       }
+
        if (ops->fill(rule, skb, frh) < 0)
                goto nla_put_failure;
 
index b84a1b155bc133e39d0f118b61be85fb1d2ed02f..159737cac76c3d180be308b64e5e37bf34cc51f0 100644 (file)
@@ -140,7 +140,11 @@ ipv6:
                break;
        }
        case IPPROTO_IPIP:
-               goto again;
+               proto = htons(ETH_P_IP);
+               goto ip;
+       case IPPROTO_IPV6:
+               proto = htons(ETH_P_IPV6);
+               goto ipv6;
        default:
                break;
        }
index de178e462682af6c97dbfc4326df85942ec25174..b77eeecc00115eefd2395648e71cb337ee53757a 100644 (file)
@@ -212,3 +212,27 @@ out_fault:
        goto out;
 }
 EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
+
+unsigned long iov_pages(const struct iovec *iov, int offset,
+                       unsigned long nr_segs)
+{
+       unsigned long seg, base;
+       int pages = 0, len, size;
+
+       while (nr_segs && (offset >= iov->iov_len)) {
+               offset -= iov->iov_len;
+               ++iov;
+               --nr_segs;
+       }
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               base = (unsigned long)iov[seg].iov_base + offset;
+               len = iov[seg].iov_len - offset;
+               size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+               pages += size;
+               offset = 0;
+       }
+
+       return pages;
+}
+EXPORT_SYMBOL(iov_pages);
index 981fed397d1d8af3b0d607a1ac260ad0cc847ade..8826b0d1e0cc4f6d9db1ebf7d7c54811432c008d 100644 (file)
@@ -334,6 +334,27 @@ static ssize_t store_group(struct device *dev, struct device_attribute *attr,
        return netdev_store(dev, attr, buf, len, change_group);
 }
 
+static ssize_t show_phys_port_id(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct net_device *netdev = to_net_dev(dev);
+       ssize_t ret = -EINVAL;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       if (dev_isalive(netdev)) {
+               struct netdev_phys_port_id ppid;
+
+               ret = dev_get_phys_port_id(netdev, &ppid);
+               if (!ret)
+                       ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+       }
+       rtnl_unlock();
+
+       return ret;
+}
+
 static struct device_attribute net_class_attributes[] = {
        __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
        __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
@@ -355,6 +376,7 @@ static struct device_attribute net_class_attributes[] = {
        __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
               store_tx_queue_len),
        __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
+       __ATTR(phys_port_id, S_IRUGO, show_phys_port_id, NULL),
        {}
 };
 
index e533259dce3ccad8a04cc095fa0056b8693fde70..d9cd627e6a16a55bb9b6e8ec1619a0291e1621ab 100644 (file)
 
 #define PRIOMAP_MIN_SZ         128
 
-static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
-{
-       return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
-                           struct cgroup_netprio_state, css);
-}
-
 /*
  * Extend @dev->priomap so that it's large enough to accomodate
  * @target_idx.  @dev->priomap.priomap_len > @target_idx after successful
@@ -87,67 +81,70 @@ static int extend_netdev_table(struct net_device *dev, u32 target_idx)
 
 /**
  * netprio_prio - return the effective netprio of a cgroup-net_device pair
- * @cgrp: cgroup part of the target pair
+ * @css: css part of the target pair
  * @dev: net_device part of the target pair
  *
  * Should be called under RCU read or rtnl lock.
  */
-static u32 netprio_prio(struct cgroup *cgrp, struct net_device *dev)
+static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev)
 {
        struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
+       int id = css->cgroup->id;
 
-       if (map && cgrp->id < map->priomap_len)
-               return map->priomap[cgrp->id];
+       if (map && id < map->priomap_len)
+               return map->priomap[id];
        return 0;
 }
 
 /**
  * netprio_set_prio - set netprio on a cgroup-net_device pair
- * @cgrp: cgroup part of the target pair
+ * @css: css part of the target pair
  * @dev: net_device part of the target pair
  * @prio: prio to set
  *
- * Set netprio to @prio on @cgrp-@dev pair.  Should be called under rtnl
+ * Set netprio to @prio on @css-@dev pair.  Should be called under rtnl
  * lock and may fail under memory pressure for non-zero @prio.
  */
-static int netprio_set_prio(struct cgroup *cgrp, struct net_device *dev,
-                           u32 prio)
+static int netprio_set_prio(struct cgroup_subsys_state *css,
+                           struct net_device *dev, u32 prio)
 {
        struct netprio_map *map;
+       int id = css->cgroup->id;
        int ret;
 
        /* avoid extending priomap for zero writes */
        map = rtnl_dereference(dev->priomap);
-       if (!prio && (!map || map->priomap_len <= cgrp->id))
+       if (!prio && (!map || map->priomap_len <= id))
                return 0;
 
-       ret = extend_netdev_table(dev, cgrp->id);
+       ret = extend_netdev_table(dev, id);
        if (ret)
                return ret;
 
        map = rtnl_dereference(dev->priomap);
-       map->priomap[cgrp->id] = prio;
+       map->priomap[id] = prio;
        return 0;
 }
 
-static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
 {
-       struct cgroup_netprio_state *cs;
+       struct cgroup_subsys_state *css;
 
-       cs = kzalloc(sizeof(*cs), GFP_KERNEL);
-       if (!cs)
+       css = kzalloc(sizeof(*css), GFP_KERNEL);
+       if (!css)
                return ERR_PTR(-ENOMEM);
 
-       return &cs->css;
+       return css;
 }
 
-static int cgrp_css_online(struct cgroup *cgrp)
+static int cgrp_css_online(struct cgroup_subsys_state *css)
 {
-       struct cgroup *parent = cgrp->parent;
+       struct cgroup_subsys_state *parent_css = css_parent(css);
        struct net_device *dev;
        int ret = 0;
 
-       if (!parent)
+       if (!parent_css)
                return 0;
 
        rtnl_lock();
@@ -156,9 +153,9 @@ static int cgrp_css_online(struct cgroup *cgrp)
         * onlining, there is no need to clear them on offline.
         */
        for_each_netdev(&init_net, dev) {
-               u32 prio = netprio_prio(parent, dev);
+               u32 prio = netprio_prio(parent_css, dev);
 
-               ret = netprio_set_prio(cgrp, dev, prio);
+               ret = netprio_set_prio(css, dev, prio);
                if (ret)
                        break;
        }
@@ -166,29 +163,29 @@ static int cgrp_css_online(struct cgroup *cgrp)
        return ret;
 }
 
-static void cgrp_css_free(struct cgroup *cgrp)
+static void cgrp_css_free(struct cgroup_subsys_state *css)
 {
-       kfree(cgrp_netprio_state(cgrp));
+       kfree(css);
 }
 
-static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
+static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       return cgrp->id;
+       return css->cgroup->id;
 }
 
-static int read_priomap(struct cgroup *cont, struct cftype *cft,
+static int read_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
                        struct cgroup_map_cb *cb)
 {
        struct net_device *dev;
 
        rcu_read_lock();
        for_each_netdev_rcu(&init_net, dev)
-               cb->fill(cb, dev->name, netprio_prio(cont, dev));
+               cb->fill(cb, dev->name, netprio_prio(css, dev));
        rcu_read_unlock();
        return 0;
 }
 
-static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
+static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
                         const char *buffer)
 {
        char devname[IFNAMSIZ + 1];
@@ -205,7 +202,7 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
 
        rtnl_lock();
 
-       ret = netprio_set_prio(cgrp, dev, prio);
+       ret = netprio_set_prio(css, dev, prio);
 
        rtnl_unlock();
        dev_put(dev);
@@ -221,12 +218,13 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
        return 0;
 }
 
-static void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void net_prio_attach(struct cgroup_subsys_state *css,
+                           struct cgroup_taskset *tset)
 {
        struct task_struct *p;
        void *v;
 
-       cgroup_taskset_for_each(p, cgrp, tset) {
+       cgroup_taskset_for_each(p, css, tset) {
                task_lock(p);
                v = (void *)(unsigned long)task_netprioidx(p);
                iterate_fd(p->files, 0, update_netprio, v);
index 9640972ec50e5658eac7493fb0da180a0de353c9..261357a663001ccf98a92ba5f6d0f6ad141c7ce3 100644 (file)
 #include <net/net_namespace.h>
 #include <net/checksum.h>
 #include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
 #include <net/addrconf.h>
 #ifdef CONFIG_XFRM
 #include <net/xfrm.h>
 #define F_QUEUE_MAP_RND (1<<13)        /* queue map Random */
 #define F_QUEUE_MAP_CPU (1<<14)        /* queue map mirrors smp_processor_id() */
 #define F_NODE          (1<<15)        /* Node memory alloc*/
+#define F_UDPCSUM       (1<<16)        /* Include UDP checksum */
 
 /* Thread control flag bits */
 #define T_STOP        (1<<0)   /* Stop run */
@@ -631,6 +634,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        if (pkt_dev->flags & F_UDPDST_RND)
                seq_printf(seq, "UDPDST_RND  ");
 
+       if (pkt_dev->flags & F_UDPCSUM)
+               seq_printf(seq, "UDPCSUM  ");
+
        if (pkt_dev->flags & F_MPLS_RND)
                seq_printf(seq,  "MPLS_RND  ");
 
@@ -1228,6 +1234,12 @@ static ssize_t pktgen_if_write(struct file *file,
                else if (strcmp(f, "!NODE_ALLOC") == 0)
                        pkt_dev->flags &= ~F_NODE;
 
+               else if (strcmp(f, "UDPCSUM") == 0)
+                       pkt_dev->flags |= F_UDPCSUM;
+
+               else if (strcmp(f, "!UDPCSUM") == 0)
+                       pkt_dev->flags &= ~F_UDPCSUM;
+
                else {
                        sprintf(pg_result,
                                "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
@@ -2733,7 +2745,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        udph->source = htons(pkt_dev->cur_udp_src);
        udph->dest = htons(pkt_dev->cur_udp_dst);
        udph->len = htons(datalen + 8); /* DATA + udphdr */
-       udph->check = 0;        /* No checksum */
+       udph->check = 0;
 
        iph->ihl = 5;
        iph->version = 4;
@@ -2747,11 +2759,28 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        iph->frag_off = 0;
        iplen = 20 + 8 + datalen;
        iph->tot_len = htons(iplen);
-       iph->check = 0;
-       iph->check = ip_fast_csum((void *)iph, iph->ihl);
+       ip_send_check(iph);
        skb->protocol = protocol;
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
+
+       if (!(pkt_dev->flags & F_UDPCSUM)) {
+               skb->ip_summed = CHECKSUM_NONE;
+       } else if (odev->features & NETIF_F_V4_CSUM) {
+               skb->ip_summed = CHECKSUM_PARTIAL;
+               skb->csum = 0;
+               udp4_hwcsum(skb, udph->source, udph->dest);
+       } else {
+               __wsum csum = udp_csum(skb);
+
+               /* add protocol-dependent pseudo-header */
+               udph->check = csum_tcpudp_magic(udph->source, udph->dest,
+                                               datalen + 8, IPPROTO_UDP, csum);
+
+               if (udph->check == 0)
+                       udph->check = CSUM_MANGLED_0;
+       }
+
        pktgen_finalize_skb(pkt_dev, skb, datalen);
 
 #ifdef CONFIG_XFRM
@@ -2768,7 +2797,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        struct sk_buff *skb = NULL;
        __u8 *eth;
        struct udphdr *udph;
-       int datalen;
+       int datalen, udplen;
        struct ipv6hdr *iph;
        __be16 protocol = htons(ETH_P_IPV6);
        __be32 *mpls;
@@ -2844,10 +2873,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
                net_info_ratelimited("increased datalen to %d\n", datalen);
        }
 
+       udplen = datalen + sizeof(struct udphdr);
        udph->source = htons(pkt_dev->cur_udp_src);
        udph->dest = htons(pkt_dev->cur_udp_dst);
-       udph->len = htons(datalen + sizeof(struct udphdr));
-       udph->check = 0;        /* No checksum */
+       udph->len = htons(udplen);
+       udph->check = 0;
 
        *(__be32 *) iph = htonl(0x60000000);    /* Version + flow */
 
@@ -2858,7 +2888,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
 
        iph->hop_limit = 32;
 
-       iph->payload_len = htons(sizeof(struct udphdr) + datalen);
+       iph->payload_len = htons(udplen);
        iph->nexthdr = IPPROTO_UDP;
 
        iph->daddr = pkt_dev->cur_in6_daddr;
@@ -2868,6 +2898,23 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        skb->dev = odev;
        skb->pkt_type = PACKET_HOST;
 
+       if (!(pkt_dev->flags & F_UDPCSUM)) {
+               skb->ip_summed = CHECKSUM_NONE;
+       } else if (odev->features & NETIF_F_V6_CSUM) {
+               skb->ip_summed = CHECKSUM_PARTIAL;
+               skb->csum_start = skb_transport_header(skb) - skb->head;
+               skb->csum_offset = offsetof(struct udphdr, check);
+               udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+       } else {
+               __wsum csum = udp_csum(skb);
+
+               /* add protocol-dependent pseudo-header */
+               udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+
+               if (udph->check == 0)
+                       udph->check = CSUM_MANGLED_0;
+       }
+
        pktgen_finalize_skb(pkt_dev, skb, datalen);
 
        return skb;
index ca198c1d1d3047baededa2c1f2c6284b07010bfb..2a0e21de3060cddbc9cd657acb24ee84b748c4d1 100644 (file)
@@ -767,7 +767,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
               + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
               + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
-              + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+              + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+              + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
 }
 
 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -846,6 +847,24 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
+static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
+{
+       int err;
+       struct netdev_phys_port_id ppid;
+
+       err = dev_get_phys_port_id(dev, &ppid);
+       if (err) {
+               if (err == -EOPNOTSUPP)
+                       return 0;
+               return err;
+       }
+
+       if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                            int type, u32 pid, u32 seq, u32 change,
                            unsigned int flags, u32 ext_filter_mask)
@@ -913,6 +932,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                        goto nla_put_failure;
        }
 
+       if (rtnl_phys_port_id_fill(skb, dev))
+               goto nla_put_failure;
+
        attr = nla_reserve(skb, IFLA_STATS,
                        sizeof(struct rtnl_link_stats));
        if (attr == NULL)
@@ -1113,6 +1135,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_PROMISCUITY]      = { .type = NLA_U32 },
        [IFLA_NUM_TX_QUEUES]    = { .type = NLA_U32 },
        [IFLA_NUM_RX_QUEUES]    = { .type = NLA_U32 },
+       [IFLA_PHYS_PORT_ID]     = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
 };
 EXPORT_SYMBOL(ifla_policy);
 
@@ -1844,10 +1867,10 @@ replay:
                else
                        err = register_netdevice(dev);
 
-               if (err < 0 && !IS_ERR(dev))
+               if (err < 0) {
                        free_netdev(dev);
-               if (err < 0)
                        goto out;
+               }
 
                err = rtnl_configure_link(dev, ifm);
                if (err < 0)
index 2c097c5a35dd8550d8a3041ae41e6afec35bf48b..5b6beba494a350cb28adfc7724487f1a13e6c011 100644 (file)
@@ -93,6 +93,7 @@
 
 #include <linux/capability.h>
 #include <linux/errno.h>
+#include <linux/errqueue.h>
 #include <linux/types.h>
 #include <linux/socket.h>
 #include <linux/in.h>
@@ -1575,6 +1576,25 @@ void sock_wfree(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_wfree);
 
+void skb_orphan_partial(struct sk_buff *skb)
+{
+       /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
+        * so we do not completely orphan skb, but transfert all
+        * accounted bytes but one, to avoid unexpected reorders.
+        */
+       if (skb->destructor == sock_wfree
+#ifdef CONFIG_INET
+           || skb->destructor == tcp_wfree
+#endif
+               ) {
+               atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
+               skb->truesize = 1;
+       } else {
+               skb_orphan(skb);
+       }
+}
+EXPORT_SYMBOL(skb_orphan_partial);
+
 /*
  * Read buffer destructor automatically called from kfree_skb.
  */
@@ -1721,24 +1741,23 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
 
 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                                     unsigned long data_len, int noblock,
-                                    int *errcode)
+                                    int *errcode, int max_page_order)
 {
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
+       unsigned long chunk;
        gfp_t gfp_mask;
        long timeo;
        int err;
        int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+       struct page *page;
+       int i;
 
        err = -EMSGSIZE;
        if (npages > MAX_SKB_FRAGS)
                goto failure;
 
-       gfp_mask = sk->sk_allocation;
-       if (gfp_mask & __GFP_WAIT)
-               gfp_mask |= __GFP_REPEAT;
-
        timeo = sock_sndtimeo(sk, noblock);
-       while (1) {
+       while (!skb) {
                err = sock_error(sk);
                if (err != 0)
                        goto failure;
@@ -1747,50 +1766,52 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        goto failure;
 
-               if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
-                       skb = alloc_skb(header_len, gfp_mask);
-                       if (skb) {
-                               int i;
-
-                               /* No pages, we're done... */
-                               if (!data_len)
-                                       break;
-
-                               skb->truesize += data_len;
-                               skb_shinfo(skb)->nr_frags = npages;
-                               for (i = 0; i < npages; i++) {
-                                       struct page *page;
-
-                                       page = alloc_pages(sk->sk_allocation, 0);
-                                       if (!page) {
-                                               err = -ENOBUFS;
-                                               skb_shinfo(skb)->nr_frags = i;
-                                               kfree_skb(skb);
-                                               goto failure;
-                                       }
-
-                                       __skb_fill_page_desc(skb, i,
-                                                       page, 0,
-                                                       (data_len >= PAGE_SIZE ?
-                                                        PAGE_SIZE :
-                                                        data_len));
-                                       data_len -= PAGE_SIZE;
-                               }
+               if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
+                       set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+                       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+                       err = -EAGAIN;
+                       if (!timeo)
+                               goto failure;
+                       if (signal_pending(current))
+                               goto interrupted;
+                       timeo = sock_wait_for_wmem(sk, timeo);
+                       continue;
+               }
 
-                               /* Full success... */
-                               break;
-                       }
-                       err = -ENOBUFS;
+               err = -ENOBUFS;
+               gfp_mask = sk->sk_allocation;
+               if (gfp_mask & __GFP_WAIT)
+                       gfp_mask |= __GFP_REPEAT;
+
+               skb = alloc_skb(header_len, gfp_mask);
+               if (!skb)
                        goto failure;
+
+               skb->truesize += data_len;
+
+               for (i = 0; npages > 0; i++) {
+                       int order = max_page_order;
+
+                       while (order) {
+                               if (npages >= 1 << order) {
+                                       page = alloc_pages(sk->sk_allocation |
+                                                          __GFP_COMP | __GFP_NOWARN,
+                                                          order);
+                                       if (page)
+                                               goto fill_page;
+                               }
+                               order--;
+                       }
+                       page = alloc_page(sk->sk_allocation);
+                       if (!page)
+                               goto failure;
+fill_page:
+                       chunk = min_t(unsigned long, data_len,
+                                     PAGE_SIZE << order);
+                       skb_fill_page_desc(skb, i, page, 0, chunk);
+                       data_len -= chunk;
+                       npages -= 1 << order;
                }
-               set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
-               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-               err = -EAGAIN;
-               if (!timeo)
-                       goto failure;
-               if (signal_pending(current))
-                       goto interrupted;
-               timeo = sock_wait_for_wmem(sk, timeo);
        }
 
        skb_set_owner_w(skb, sk);
@@ -1799,6 +1820,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
 interrupted:
        err = sock_intr_errno(timeo);
 failure:
+       kfree_skb(skb);
        *errcode = err;
        return NULL;
 }
@@ -1807,7 +1829,7 @@ EXPORT_SYMBOL(sock_alloc_send_pskb);
 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
                                    int noblock, int *errcode)
 {
-       return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
+       return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
@@ -2425,6 +2447,52 @@ void sock_enable_timestamp(struct sock *sk, int flag)
        }
 }
 
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+                      int level, int type)
+{
+       struct sock_exterr_skb *serr;
+       struct sk_buff *skb, *skb2;
+       int copied, err;
+
+       err = -EAGAIN;
+       skb = skb_dequeue(&sk->sk_error_queue);
+       if (skb == NULL)
+               goto out;
+
+       copied = skb->len;
+       if (copied > len) {
+               msg->msg_flags |= MSG_TRUNC;
+               copied = len;
+       }
+       err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       if (err)
+               goto out_free_skb;
+
+       sock_recv_timestamp(msg, sk, skb);
+
+       serr = SKB_EXT_ERR(skb);
+       put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
+
+       msg->msg_flags |= MSG_ERRQUEUE;
+       err = copied;
+
+       /* Reset and regenerate socket error */
+       spin_lock_bh(&sk->sk_error_queue.lock);
+       sk->sk_err = 0;
+       if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+               sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+               spin_unlock_bh(&sk->sk_error_queue.lock);
+               sk->sk_error_report(sk);
+       } else
+               spin_unlock_bh(&sk->sk_error_queue.lock);
+
+out_free_skb:
+       kfree_skb(skb);
+out:
+       return err;
+}
+EXPORT_SYMBOL(sock_recv_errqueue);
+
 /*
  *     Get a socket option on an socket.
  *
index f5df85dcd20bc7f790aec8f58967f55e02586444..512f0a24269b59cc45baa14c13e586b34fce9b60 100644 (file)
@@ -30,7 +30,7 @@ void sk_stream_write_space(struct sock *sk)
        struct socket *sock = sk->sk_socket;
        struct socket_wq *wq;
 
-       if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
+       if (sk_stream_is_writeable(sk) && sock) {
                clear_bit(SOCK_NOSPACE, &sock->flags);
 
                rcu_read_lock();
index 6c7c78b839403f7582c8fd9daef4cfe0e1e571a0..ba64750f038726a990caa71d90e45f77bad93230 100644 (file)
@@ -336,7 +336,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
                        mask |= POLLIN | POLLRDNORM;
 
                if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
-                       if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+                       if (sk_stream_is_writeable(sk)) {
                                mask |= POLLOUT | POLLWRNORM;
                        } else {  /* send SIGIO later */
                                set_bit(SOCK_ASYNC_NOSPACE,
@@ -347,7 +347,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
                                 * wspace test but before the flags are set,
                                 * IO signal will be lost.
                                 */
-                               if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+                               if (sk_stream_is_writeable(sk))
                                        mask |= POLLOUT | POLLWRNORM;
                        }
                }
index 3b9d5f20bd1c695de768db960b190beb4370fa35..c85e71e0c7ffc640bd9592ce52b03dbde6cad926 100644 (file)
@@ -67,39 +67,6 @@ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
 
 static LIST_HEAD(lowpan_devices);
 
-/*
- * Uncompression of linklocal:
- *   0 -> 16 bytes from packet
- *   1 -> 2  bytes from prefix - bunch of zeroes and 8 from packet
- *   2 -> 2  bytes from prefix - zeroes + 2 from packet
- *   3 -> 2  bytes from prefix - infer 8 bytes from lladdr
- *
- *  NOTE: => the uncompress function does change 0xf to 0x10
- *  NOTE: 0x00 => no-autoconfig => unspecified
- */
-static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
-
-/*
- * Uncompression of ctx-based:
- *   0 -> 0 bits  from packet [unspecified / reserved]
- *   1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
- *   2 -> 8 bytes from prefix - zeroes + 2 from packet
- *   3 -> 8 bytes from prefix - infer 8 bytes from lladdr
- */
-static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
-
-/*
- * Uncompression of ctx-base
- *   0 -> 0 bits from packet
- *   1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
- *   2 -> 2 bytes from prefix - zeroes + 3 from packet
- *   3 -> 2 bytes from prefix - infer 1 bytes from lladdr
- */
-static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
-
-/* Link local prefix */
-static const u8 lowpan_llprefix[] = {0xfe, 0x80};
-
 /* private device info */
 struct lowpan_dev_info {
        struct net_device       *real_dev; /* real WPAN device ptr */
@@ -191,55 +158,177 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
        return rol8(val, shift);
 }
 
-static void
-lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
+/*
+ * Uncompress address function for source and
+ * destination address(non-multicast).
+ *
+ * address_mode is sam value or dam value.
+ */
+static int
+lowpan_uncompress_addr(struct sk_buff *skb,
+               struct in6_addr *ipaddr,
+               const u8 address_mode,
+               const struct ieee802154_addr *lladdr)
 {
-       memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
-       /* second bit-flip (Universe/Local) is done according RFC2464 */
-       ipaddr->s6_addr[8] ^= 0x02;
+       bool fail;
+
+       switch (address_mode) {
+       case LOWPAN_IPHC_ADDR_00:
+               /* for global link addresses */
+               fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+               break;
+       case LOWPAN_IPHC_ADDR_01:
+               /* fe:80::XXXX:XXXX:XXXX:XXXX */
+               ipaddr->s6_addr[0] = 0xFE;
+               ipaddr->s6_addr[1] = 0x80;
+               fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
+               break;
+       case LOWPAN_IPHC_ADDR_02:
+               /* fe:80::ff:fe00:XXXX */
+               ipaddr->s6_addr[0] = 0xFE;
+               ipaddr->s6_addr[1] = 0x80;
+               ipaddr->s6_addr[11] = 0xFF;
+               ipaddr->s6_addr[12] = 0xFE;
+               fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
+               break;
+       case LOWPAN_IPHC_ADDR_03:
+               fail = false;
+               switch (lladdr->addr_type) {
+               case IEEE802154_ADDR_LONG:
+                       /* fe:80::XXXX:XXXX:XXXX:XXXX
+                        *        \_________________/
+                        *              hwaddr
+                        */
+                       ipaddr->s6_addr[0] = 0xFE;
+                       ipaddr->s6_addr[1] = 0x80;
+                       memcpy(&ipaddr->s6_addr[8], lladdr->hwaddr,
+                                       IEEE802154_ADDR_LEN);
+                       /* second bit-flip (Universe/Local)
+                        * is done according RFC2464
+                        */
+                       ipaddr->s6_addr[8] ^= 0x02;
+                       break;
+               case IEEE802154_ADDR_SHORT:
+                       /* fe:80::ff:fe00:XXXX
+                        *                \__/
+                        *             short_addr
+                        *
+                        * Universe/Local bit is zero.
+                        */
+                       ipaddr->s6_addr[0] = 0xFE;
+                       ipaddr->s6_addr[1] = 0x80;
+                       ipaddr->s6_addr[11] = 0xFF;
+                       ipaddr->s6_addr[12] = 0xFE;
+                       ipaddr->s6_addr16[7] = htons(lladdr->short_addr);
+                       break;
+               default:
+                       pr_debug("Invalid addr_type set\n");
+                       return -EINVAL;
+               }
+               break;
+       default:
+               pr_debug("Invalid address mode value: 0x%x\n", address_mode);
+               return -EINVAL;
+       }
+
+       if (fail) {
+               pr_debug("Failed to fetch skb data\n");
+               return -EIO;
+       }
+
+       lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 addr is:\n",
+                       ipaddr->s6_addr, 16);
+
+       return 0;
 }
 
-/*
- * Uncompress addresses based on a prefix and a postfix with zeroes in
- * between. If the postfix is zero in length it will use the link address
- * to configure the IP address (autoconf style).
- * pref_post_count takes a byte where the first nibble specify prefix count
- * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
+/* Uncompress address function for source context
+ * based address(non-multicast).
  */
 static int
-lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
-       u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
+lowpan_uncompress_context_based_src_addr(struct sk_buff *skb,
+               struct in6_addr *ipaddr,
+               const u8 sam)
 {
-       u8 prefcount = pref_post_count >> 4;
-       u8 postcount = pref_post_count & 0x0f;
-
-       /* full nibble 15 => 16 */
-       prefcount = (prefcount == 15 ? 16 : prefcount);
-       postcount = (postcount == 15 ? 16 : postcount);
-
-       if (lladdr)
-               lowpan_raw_dump_inline(__func__, "linklocal address",
-                                               lladdr, IEEE802154_ADDR_LEN);
-       if (prefcount > 0)
-               memcpy(ipaddr, prefix, prefcount);
-
-       if (prefcount + postcount < 16)
-               memset(&ipaddr->s6_addr[prefcount], 0,
-                                       16 - (prefcount + postcount));
-
-       if (postcount > 0) {
-               memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
-               skb_pull(skb, postcount);
-       } else if (prefcount > 0) {
-               if (lladdr == NULL)
-                       return -EINVAL;
+       switch (sam) {
+       case LOWPAN_IPHC_ADDR_00:
+               /* unspec address ::
+                * Do nothing, address is already ::
+                */
+               break;
+       case LOWPAN_IPHC_ADDR_01:
+               /* TODO */
+       case LOWPAN_IPHC_ADDR_02:
+               /* TODO */
+       case LOWPAN_IPHC_ADDR_03:
+               /* TODO */
+               netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
+               return -EINVAL;
+       default:
+               pr_debug("Invalid sam value: 0x%x\n", sam);
+               return -EINVAL;
+       }
+
+       lowpan_raw_dump_inline(NULL,
+                       "Reconstructed context based ipv6 src addr is:\n",
+                       ipaddr->s6_addr, 16);
+
+       return 0;
+}
 
-               /* no IID based configuration if no prefix and no data */
-               lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
+/* Uncompress function for multicast destination address,
+ * when M bit is set.
+ */
+static int
+lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
+               struct in6_addr *ipaddr,
+               const u8 dam)
+{
+       bool fail;
+
+       switch (dam) {
+       case LOWPAN_IPHC_DAM_00:
+               /* 00:  128 bits.  The full address
+                * is carried in-line.
+                */
+               fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+               break;
+       case LOWPAN_IPHC_DAM_01:
+               /* 01:  48 bits.  The address takes
+                * the form ffXX::00XX:XXXX:XXXX.
+                */
+               ipaddr->s6_addr[0] = 0xFF;
+               fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+               fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
+               break;
+       case LOWPAN_IPHC_DAM_10:
+               /* 10:  32 bits.  The address takes
+                * the form ffXX::00XX:XXXX.
+                */
+               ipaddr->s6_addr[0] = 0xFF;
+               fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+               fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
+               break;
+       case LOWPAN_IPHC_DAM_11:
+               /* 11:  8 bits.  The address takes
+                * the form ff02::00XX.
+                */
+               ipaddr->s6_addr[0] = 0xFF;
+               ipaddr->s6_addr[1] = 0x02;
+               fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
+               break;
+       default:
+               pr_debug("DAM value has a wrong value: 0x%x\n", dam);
+               return -EINVAL;
+       }
+
+       if (fail) {
+               pr_debug("Failed to fetch skb data\n");
+               return -EIO;
        }
 
-       pr_debug("uncompressing %d + %d => ", prefcount, postcount);
-       lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
+       lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n",
+                       ipaddr->s6_addr, 16);
 
        return 0;
 }
@@ -702,6 +791,12 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
        skb_reserve(frame->skb, sizeof(struct ipv6hdr));
        skb_put(frame->skb, frame->length);
 
+       /* copy the first control block to keep a
+        * trace of the link-layer addresses in case
+        * of a link-local compressed address
+        */
+       memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
+
        init_timer(&frame->timer);
        /* time out is the same as for ipv6 - 60 sec */
        frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
@@ -723,9 +818,9 @@ frame_err:
 static int
 lowpan_process_data(struct sk_buff *skb)
 {
-       struct ipv6hdr hdr;
+       struct ipv6hdr hdr = {};
        u8 tmp, iphc0, iphc1, num_context = 0;
-       u8 *_saddr, *_daddr;
+       const struct ieee802154_addr *_saddr, *_daddr;
        int err;
 
        lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
@@ -828,8 +923,8 @@ lowpan_process_data(struct sk_buff *skb)
        if (lowpan_fetch_skb_u8(skb, &iphc1))
                goto drop;
 
-       _saddr = mac_cb(skb)->sa.hwaddr;
-       _daddr = mac_cb(skb)->da.hwaddr;
+       _saddr = &mac_cb(skb)->sa;
+       _daddr = &mac_cb(skb)->da;
 
        pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
 
@@ -868,8 +963,6 @@ lowpan_process_data(struct sk_buff *skb)
 
                hdr.priority = ((tmp >> 2) & 0x0f);
                hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
-               hdr.flow_lbl[1] = 0;
-               hdr.flow_lbl[2] = 0;
                break;
        /*
         * Flow Label carried in-line
@@ -885,10 +978,6 @@ lowpan_process_data(struct sk_buff *skb)
                break;
        /* Traffic Class and Flow Label are elided */
        case 3: /* 11b */
-               hdr.priority = 0;
-               hdr.flow_lbl[0] = 0;
-               hdr.flow_lbl[1] = 0;
-               hdr.flow_lbl[2] = 0;
                break;
        default:
                break;
@@ -915,10 +1004,18 @@ lowpan_process_data(struct sk_buff *skb)
        /* Extract SAM to the tmp variable */
        tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
 
-       /* Source address uncompression */
-       pr_debug("source address stateless compression\n");
-       err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
-                               lowpan_unc_llconf[tmp], skb->data);
+       if (iphc1 & LOWPAN_IPHC_SAC) {
+               /* Source address context based uncompression */
+               pr_debug("SAC bit is set. Handle context based source address.\n");
+               err = lowpan_uncompress_context_based_src_addr(
+                               skb, &hdr.saddr, tmp);
+       } else {
+               /* Source address uncompression */
+               pr_debug("source address stateless compression\n");
+               err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr);
+       }
+
+       /* Check on error of previous branch */
        if (err)
                goto drop;
 
@@ -931,23 +1028,14 @@ lowpan_process_data(struct sk_buff *skb)
                        pr_debug("dest: context-based mcast compression\n");
                        /* TODO: implement this */
                } else {
-                       u8 prefix[] = {0xff, 0x02};
-
-                       pr_debug("dest: non context-based mcast compression\n");
-                       if (0 < tmp && tmp < 3) {
-                               if (lowpan_fetch_skb_u8(skb, &prefix[1]))
-                                       goto drop;
-                       }
-
-                       err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
-                                       lowpan_unc_mxconf[tmp], NULL);
+                       err = lowpan_uncompress_multicast_daddr(
+                                       skb, &hdr.daddr, tmp);
                        if (err)
                                goto drop;
                }
        } else {
                pr_debug("dest: stateless compression\n");
-               err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
-                               lowpan_unc_llconf[tmp], skb->data);
+               err = lowpan_uncompress_addr(skb, &hdr.daddr, tmp, _daddr);
                if (err)
                        goto drop;
        }
index 4b8f917658b52fa028200d9ec1ab788130ecd143..2869c0526dad698b574ddd05f3f2175d5fb69455 100644 (file)
 /* Values of fields within the IPHC encoding second byte */
 #define LOWPAN_IPHC_CID                0x80
 
+#define LOWPAN_IPHC_ADDR_00    0x00
+#define LOWPAN_IPHC_ADDR_01    0x01
+#define LOWPAN_IPHC_ADDR_02    0x02
+#define LOWPAN_IPHC_ADDR_03    0x03
+
 #define LOWPAN_IPHC_SAC                0x40
-#define LOWPAN_IPHC_SAM_00     0x00
-#define LOWPAN_IPHC_SAM_01     0x10
-#define LOWPAN_IPHC_SAM_10     0x20
 #define LOWPAN_IPHC_SAM                0x30
 
 #define LOWPAN_IPHC_SAM_BIT    4
                                        dest = 16 bit inline */
 #define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
 
+static inline bool lowpan_fetch_skb(struct sk_buff *skb,
+               void *data, const unsigned int len)
+{
+       if (unlikely(!pskb_may_pull(skb, len)))
+               return true;
+
+       skb_copy_from_linear_data(skb, data, len);
+       skb_pull(skb, len);
+
+       return false;
+}
+
 #endif /* __6LOWPAN_H__ */
index 34ca6d5a3a4b48f5dc8282a514115a278642f344..a1b5bcbd04ae01e358d1542c354fac083c9f8582 100644 (file)
@@ -73,6 +73,8 @@ static struct ipv4_devconf ipv4_devconf = {
                [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
                [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
                [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
+               [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
+               [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
        },
 };
 
@@ -83,6 +85,8 @@ static struct ipv4_devconf ipv4_devconf_dflt = {
                [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
                [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
                [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
+               [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
+               [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] =  1000 /*ms*/,
        },
 };
 
@@ -1126,10 +1130,7 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
                if (len < (int) sizeof(ifr))
                        break;
                memset(&ifr, 0, sizeof(struct ifreq));
-               if (ifa->ifa_label)
-                       strcpy(ifr.ifr_name, ifa->ifa_label);
-               else
-                       strcpy(ifr.ifr_name, dev->name);
+               strcpy(ifr.ifr_name, ifa->ifa_label);
 
                (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
                (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
@@ -2097,11 +2098,15 @@ static struct devinet_sysctl_table {
                DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
                DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
                DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
+               DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
+                                       "force_igmp_version"),
+               DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
+                                       "igmpv2_unsolicited_report_interval"),
+               DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+                                       "igmpv3_unsolicited_report_interval"),
 
                DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
                DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
-               DEVINET_SYSCTL_FLUSHING_ENTRY(FORCE_IGMP_VERSION,
-                                             "force_igmp_version"),
                DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
                                              "promote_secondaries"),
                DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
index 26aa65d1fce49dfeaa45cbc02046218b5bc70453..523be38e37de82736a28bc9aa229f1911d2eaa31 100644 (file)
@@ -101,6 +101,30 @@ errout:
        return err;
 }
 
+static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+{
+       struct fib_result *result = (struct fib_result *) arg->result;
+       struct net_device *dev = result->fi->fib_dev;
+
+       /* do not accept result if the route does
+        * not meet the required prefix length
+        */
+       if (result->prefixlen <= rule->suppress_prefixlen)
+               goto suppress_route;
+
+       /* do not accept result if the route uses a device
+        * belonging to a forbidden interface group
+        */
+       if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup)
+               goto suppress_route;
+
+       return false;
+
+suppress_route:
+       if (!(arg->flags & FIB_LOOKUP_NOREF))
+               fib_info_put(result->fi);
+       return true;
+}
 
 static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
 {
@@ -267,6 +291,7 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
        .rule_size      = sizeof(struct fib4_rule),
        .addr_size      = sizeof(u32),
        .action         = fib4_rule_action,
+       .suppress       = fib4_rule_suppress,
        .match          = fib4_rule_match,
        .configure      = fib4_rule_configure,
        .delete         = fib4_rule_delete,
index cd71190d29625c11fe25aa603026cc98e0e2c34b..d6c0e64ec97f2147124e84d4b634f263bac9eb95 100644 (file)
@@ -88,6 +88,7 @@
 #include <linux/if_arp.h>
 #include <linux/rtnetlink.h>
 #include <linux/times.h>
+#include <linux/pkt_sched.h>
 
 #include <net/net_namespace.h>
 #include <net/arp.h>
 
 #define IGMP_V1_Router_Present_Timeout         (400*HZ)
 #define IGMP_V2_Router_Present_Timeout         (400*HZ)
-#define IGMP_Unsolicited_Report_Interval       (10*HZ)
+#define IGMP_V2_Unsolicited_Report_Interval    (10*HZ)
+#define IGMP_V3_Unsolicited_Report_Interval    (1*HZ)
 #define IGMP_Query_Response_Interval           (10*HZ)
 #define IGMP_Unsolicited_Report_Count          2
 
         ((in_dev)->mr_v2_seen && \
          time_before(jiffies, (in_dev)->mr_v2_seen)))
 
+static int unsolicited_report_interval(struct in_device *in_dev)
+{
+       int interval_ms, interval_jiffies;
+
+       if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
+               interval_ms = IN_DEV_CONF_GET(
+                       in_dev,
+                       IGMPV2_UNSOLICITED_REPORT_INTERVAL);
+       else /* v3 */
+               interval_ms = IN_DEV_CONF_GET(
+                       in_dev,
+                       IGMPV3_UNSOLICITED_REPORT_INTERVAL);
+
+       interval_jiffies = msecs_to_jiffies(interval_ms);
+
+       /* _timer functions can't handle a delay of 0 jiffies so ensure
+        *  we always return a positive value.
+        */
+       if (interval_jiffies <= 0)
+               interval_jiffies = 1;
+       return interval_jiffies;
+}
+
 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
 static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
 static void igmpv3_clear_delrec(struct in_device *in_dev);
@@ -315,6 +340,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
                if (size < 256)
                        return NULL;
        }
+       skb->priority = TC_PRIO_CONTROL;
        igmp_skb_size(skb) = size;
 
        rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
@@ -670,6 +696,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
                ip_rt_put(rt);
                return -1;
        }
+       skb->priority = TC_PRIO_CONTROL;
 
        skb_dst_set(skb, &rt->dst);
 
@@ -719,7 +746,8 @@ static void igmp_ifc_timer_expire(unsigned long data)
        igmpv3_send_cr(in_dev);
        if (in_dev->mr_ifc_count) {
                in_dev->mr_ifc_count--;
-               igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
+               igmp_ifc_start_timer(in_dev,
+                                    unsolicited_report_interval(in_dev));
        }
        __in_dev_put(in_dev);
 }
@@ -744,7 +772,7 @@ static void igmp_timer_expire(unsigned long data)
 
        if (im->unsolicit_count) {
                im->unsolicit_count--;
-               igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
+               igmp_start_timer(im, unsolicited_report_interval(in_dev));
        }
        im->reporter = 1;
        spin_unlock(&im->lock);
@@ -1323,16 +1351,17 @@ out:
 EXPORT_SYMBOL(ip_mc_inc_group);
 
 /*
- *     Resend IGMP JOIN report; used for bonding.
- *     Called with rcu_read_lock()
+ *     Resend IGMP JOIN report; used by netdev notifier.
  */
-void ip_mc_rejoin_groups(struct in_device *in_dev)
+static void ip_mc_rejoin_groups(struct in_device *in_dev)
 {
 #ifdef CONFIG_IP_MULTICAST
        struct ip_mc_list *im;
        int type;
 
-       for_each_pmc_rcu(in_dev, im) {
+       ASSERT_RTNL();
+
+       for_each_pmc_rtnl(in_dev, im) {
                if (im->multiaddr == IGMP_ALL_HOSTS)
                        continue;
 
@@ -1349,7 +1378,6 @@ void ip_mc_rejoin_groups(struct in_device *in_dev)
        }
 #endif
 }
-EXPORT_SYMBOL(ip_mc_rejoin_groups);
 
 /*
  *     A socket has left a multicast group on device dev
@@ -2735,8 +2763,42 @@ static struct pernet_operations igmp_net_ops = {
        .exit = igmp_net_exit,
 };
 
+static int igmp_netdev_event(struct notifier_block *this,
+                            unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct in_device *in_dev;
+
+       switch (event) {
+       case NETDEV_RESEND_IGMP:
+               in_dev = __in_dev_get_rtnl(dev);
+               if (in_dev)
+                       ip_mc_rejoin_groups(in_dev);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block igmp_notifier = {
+       .notifier_call = igmp_netdev_event,
+};
+
 int __init igmp_mc_proc_init(void)
 {
-       return register_pernet_subsys(&igmp_net_ops);
+       int err;
+
+       err = register_pernet_subsys(&igmp_net_ops);
+       if (err)
+               return err;
+       err = register_netdevice_notifier(&igmp_notifier);
+       if (err)
+               goto reg_notif_fail;
+       return 0;
+
+reg_notif_fail:
+       unregister_pernet_subsys(&igmp_net_ops);
+       return err;
 }
 #endif
index 8d6939eeb49247b1c2ee8659cc190814d64eb4e5..d7aea4c5b9400efec37d15db3c5a896769d0da68 100644 (file)
@@ -534,7 +534,7 @@ static int __net_init ipgre_init_net(struct net *net)
 static void __net_exit ipgre_exit_net(struct net *net)
 {
        struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
-       ip_tunnel_delete_net(itn);
+       ip_tunnel_delete_net(itn, &ipgre_link_ops);
 }
 
 static struct pernet_operations ipgre_net_ops = {
@@ -767,7 +767,7 @@ static int __net_init ipgre_tap_init_net(struct net *net)
 static void __net_exit ipgre_tap_exit_net(struct net *net)
 {
        struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
-       ip_tunnel_delete_net(itn);
+       ip_tunnel_delete_net(itn, &ipgre_tap_ops);
 }
 
 static struct pernet_operations ipgre_tap_net_ops = {
index 15e3e683adece394b02347eeed1d05afe5a62d01..054a3e97d822b61646cfff92c2ff2ed0f1e0740a 100644 (file)
 #include <net/icmp.h>
 #include <net/raw.h>
 #include <net/checksum.h>
+#include <net/inet_ecn.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/xfrm.h>
 #include <linux/mroute.h>
@@ -410,6 +411,13 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
        if (iph->ihl < 5 || iph->version != 4)
                goto inhdr_error;
 
+       BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
+       BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
+       BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
+       IP_ADD_STATS_BH(dev_net(dev),
+                       IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
+                       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
+
        if (!pskb_may_pull(skb, iph->ihl*4))
                goto inhdr_error;
 
index 4bcabf3ab4cad3bdc43f5b9ed33eba9c1357557d..9ee17e3d11c30e4054558729df205b41a762e806 100644 (file)
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
-static inline int ip_skb_dst_mtu(struct sk_buff *skb)
-{
-       struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
-
-       return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
-              skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
-}
-
 static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
index ca1cb2d5f6e2bcb81eea9d35e3178518492a7a63..24549b4aeae219f19e433f31e032f795bd249166 100644 (file)
@@ -350,7 +350,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
                struct flowi4 fl4;
                struct rtable *rt;
 
-               rt = ip_route_output_tunnel(dev_net(dev), &fl4,
+               rt = ip_route_output_tunnel(tunnel->net, &fl4,
                                            tunnel->parms.iph.protocol,
                                            iph->daddr, iph->saddr,
                                            tunnel->parms.o_key,
@@ -365,7 +365,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
        }
 
        if (!tdev && tunnel->parms.link)
-               tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
+               tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
 
        if (tdev) {
                hlen = tdev->hard_header_len + tdev->needed_headroom;
@@ -454,15 +454,16 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
        tstats->rx_bytes += skb->len;
        u64_stats_update_end(&tstats->syncp);
 
-       if (tunnel->net != dev_net(tunnel->dev))
-               skb_scrub_packet(skb);
-
        if (tunnel->dev->type == ARPHRD_ETHER) {
                skb->protocol = eth_type_trans(skb, tunnel->dev);
                skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
        } else {
                skb->dev = tunnel->dev;
        }
+
+       if (!net_eq(tunnel->net, dev_net(tunnel->dev)))
+               skb_scrub_packet(skb);
+
        gro_cells_receive(&tunnel->gro_cells, skb);
        return 0;
 
@@ -613,7 +614,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                goto tx_error;
        }
 
-       if (tunnel->net != dev_net(dev))
+       if (!net_eq(tunnel->net, dev_net(dev)))
                skb_scrub_packet(skb);
 
        if (tunnel->err_count > 0) {
@@ -653,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       err = iptunnel_xmit(dev_net(dev), rt, skb,
+       err = iptunnel_xmit(tunnel->net, rt, skb,
                            fl4.saddr, fl4.daddr, protocol,
                            ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df);
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
@@ -820,11 +821,10 @@ static void ip_tunnel_dev_free(struct net_device *dev)
 
 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
 {
-       struct net *net = dev_net(dev);
        struct ip_tunnel *tunnel = netdev_priv(dev);
        struct ip_tunnel_net *itn;
 
-       itn = net_generic(net, tunnel->ip_tnl_net_id);
+       itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
 
        if (itn->fb_tunnel_dev != dev) {
                ip_tunnel_del(netdev_priv(dev));
@@ -838,15 +838,16 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
 {
        struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
        struct ip_tunnel_parm parms;
+       unsigned int i;
 
-       itn->tunnels = kzalloc(IP_TNL_HASH_SIZE * sizeof(struct hlist_head), GFP_KERNEL);
-       if (!itn->tunnels)
-               return -ENOMEM;
+       for (i = 0; i < IP_TNL_HASH_SIZE; i++)
+               INIT_HLIST_HEAD(&itn->tunnels[i]);
 
        if (!ops) {
                itn->fb_tunnel_dev = NULL;
                return 0;
        }
+
        memset(&parms, 0, sizeof(parms));
        if (devname)
                strlcpy(parms.name, devname, IFNAMSIZ);
@@ -854,40 +855,53 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
        rtnl_lock();
        itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
        rtnl_unlock();
-       if (IS_ERR(itn->fb_tunnel_dev)) {
-               kfree(itn->tunnels);
+
+       if (IS_ERR(itn->fb_tunnel_dev))
                return PTR_ERR(itn->fb_tunnel_dev);
-       }
+       /* FB netdevice is special: we have one, and only one per netns.
+        * Allowing to move it to another netns is clearly unsafe.
+        */
+       itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
 
-static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
+static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
+                             struct rtnl_link_ops *ops)
 {
+       struct net *net = dev_net(itn->fb_tunnel_dev);
+       struct net_device *dev, *aux;
        int h;
 
+       for_each_netdev_safe(net, dev, aux)
+               if (dev->rtnl_link_ops == ops)
+                       unregister_netdevice_queue(dev, head);
+
        for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
                struct ip_tunnel *t;
                struct hlist_node *n;
                struct hlist_head *thead = &itn->tunnels[h];
 
                hlist_for_each_entry_safe(t, n, thead, hash_node)
-                       unregister_netdevice_queue(t->dev, head);
+                       /* If dev is in the same netns, it has already
+                        * been added to the list by the previous loop.
+                        */
+                       if (!net_eq(dev_net(t->dev), net))
+                               unregister_netdevice_queue(t->dev, head);
        }
        if (itn->fb_tunnel_dev)
                unregister_netdevice_queue(itn->fb_tunnel_dev, head);
 }
 
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
 {
        LIST_HEAD(list);
 
        rtnl_lock();
-       ip_tunnel_destroy(itn, &list);
+       ip_tunnel_destroy(itn, &list, ops);
        unregister_netdevice_many(&list);
        rtnl_unlock();
-       kfree(itn->tunnels);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
 
@@ -929,23 +943,21 @@ EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
                         struct ip_tunnel_parm *p)
 {
-       struct ip_tunnel *t, *nt;
-       struct net *net = dev_net(dev);
+       struct ip_tunnel *t;
        struct ip_tunnel *tunnel = netdev_priv(dev);
+       struct net *net = tunnel->net;
        struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
 
        if (dev == itn->fb_tunnel_dev)
                return -EINVAL;
 
-       nt = netdev_priv(dev);
-
        t = ip_tunnel_find(itn, p, dev->type);
 
        if (t) {
                if (t->dev != dev)
                        return -EEXIST;
        } else {
-               t = nt;
+               t = tunnel;
 
                if (dev->type != ARPHRD_ETHER) {
                        unsigned int nflags = 0;
@@ -984,6 +996,7 @@ int ip_tunnel_init(struct net_device *dev)
        }
 
        tunnel->dev = dev;
+       tunnel->net = dev_net(dev);
        strcpy(tunnel->parms.name, dev->name);
        iph->version            = 4;
        iph->ihl                = 5;
@@ -994,8 +1007,8 @@ EXPORT_SYMBOL_GPL(ip_tunnel_init);
 
 void ip_tunnel_uninit(struct net_device *dev)
 {
-       struct net *net = dev_net(dev);
        struct ip_tunnel *tunnel = netdev_priv(dev);
+       struct net *net = tunnel->net;
        struct ip_tunnel_net *itn;
 
        itn = net_generic(net, tunnel->ip_tnl_net_id);
index 17cc0ffa8c0d0ea2a3732b76b63be1f85c896778..e805e7b3030e3dad2f8fd83d140f0bb7f100d69c 100644 (file)
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 
-#define HASH_SIZE  16
-#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
-
 static struct rtnl_link_ops vti_link_ops __read_mostly;
 
 static int vti_net_id __read_mostly;
-struct vti_net {
-       struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
-       struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
-       struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
-       struct ip_tunnel __rcu *tunnels_wc[1];
-       struct ip_tunnel __rcu **tunnels[4];
-
-       struct net_device *fb_tunnel_dev;
-};
-
-static int vti_fb_tunnel_init(struct net_device *dev);
 static int vti_tunnel_init(struct net_device *dev);
-static void vti_tunnel_setup(struct net_device *dev);
-static void vti_dev_free(struct net_device *dev);
-static int vti_tunnel_bind_dev(struct net_device *dev);
-
-#define VTI_XMIT(stats1, stats2) do {                          \
-       int err;                                                \
-       int pkt_len = skb->len;                                 \
-       err = dst_output(skb);                                  \
-       if (net_xmit_eval(err) == 0) {                          \
-               u64_stats_update_begin(&(stats1)->syncp);       \
-               (stats1)->tx_bytes += pkt_len;                  \
-               (stats1)->tx_packets++;                         \
-               u64_stats_update_end(&(stats1)->syncp);         \
-       } else {                                                \
-               (stats2)->tx_errors++;                          \
-               (stats2)->tx_aborted_errors++;                  \
-       }                                                       \
-} while (0)
-
-
-static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
-                                          __be32 remote, __be32 local)
-{
-       unsigned h0 = HASH(remote);
-       unsigned h1 = HASH(local);
-       struct ip_tunnel *t;
-       struct vti_net *ipn = net_generic(net, vti_net_id);
-
-       for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
-               if (local == t->parms.iph.saddr &&
-                   remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
-                       return t;
-       for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
-               if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
-                       return t;
-
-       for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
-               if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
-                       return t;
-
-       for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
-               if (t && (t->dev->flags&IFF_UP))
-                       return t;
-       return NULL;
-}
-
-static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
-                                            struct ip_tunnel_parm *parms)
-{
-       __be32 remote = parms->iph.daddr;
-       __be32 local = parms->iph.saddr;
-       unsigned h = 0;
-       int prio = 0;
-
-       if (remote) {
-               prio |= 2;
-               h ^= HASH(remote);
-       }
-       if (local) {
-               prio |= 1;
-               h ^= HASH(local);
-       }
-       return &ipn->tunnels[prio][h];
-}
-
-static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
-                                                 struct ip_tunnel *t)
-{
-       return __vti_bucket(ipn, &t->parms);
-}
-
-static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
-{
-       struct ip_tunnel __rcu **tp;
-       struct ip_tunnel *iter;
-
-       for (tp = vti_bucket(ipn, t);
-            (iter = rtnl_dereference(*tp)) != NULL;
-            tp = &iter->next) {
-               if (t == iter) {
-                       rcu_assign_pointer(*tp, t->next);
-                       break;
-               }
-       }
-}
-
-static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
-{
-       struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
-
-       rcu_assign_pointer(t->next, rtnl_dereference(*tp));
-       rcu_assign_pointer(*tp, t);
-}
-
-static struct ip_tunnel *vti_tunnel_locate(struct net *net,
-                                          struct ip_tunnel_parm *parms,
-                                          int create)
-{
-       __be32 remote = parms->iph.daddr;
-       __be32 local = parms->iph.saddr;
-       struct ip_tunnel *t, *nt;
-       struct ip_tunnel __rcu **tp;
-       struct net_device *dev;
-       char name[IFNAMSIZ];
-       struct vti_net *ipn = net_generic(net, vti_net_id);
-
-       for (tp = __vti_bucket(ipn, parms);
-            (t = rtnl_dereference(*tp)) != NULL;
-            tp = &t->next) {
-               if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
-                       return t;
-       }
-       if (!create)
-               return NULL;
-
-       if (parms->name[0])
-               strlcpy(name, parms->name, IFNAMSIZ);
-       else
-               strcpy(name, "vti%d");
-
-       dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
-       if (dev == NULL)
-               return NULL;
-
-       dev_net_set(dev, net);
-
-       nt = netdev_priv(dev);
-       nt->parms = *parms;
-       dev->rtnl_link_ops = &vti_link_ops;
-
-       vti_tunnel_bind_dev(dev);
-
-       if (register_netdevice(dev) < 0)
-               goto failed_free;
-
-       dev_hold(dev);
-       vti_tunnel_link(ipn, nt);
-       return nt;
-
-failed_free:
-       free_netdev(dev);
-       return NULL;
-}
-
-static void vti_tunnel_uninit(struct net_device *dev)
-{
-       struct net *net = dev_net(dev);
-       struct vti_net *ipn = net_generic(net, vti_net_id);
-
-       vti_tunnel_unlink(ipn, netdev_priv(dev));
-       dev_put(dev);
-}
 
 static int vti_err(struct sk_buff *skb, u32 info)
 {
@@ -222,6 +56,8 @@ static int vti_err(struct sk_buff *skb, u32 info)
         * 8 bytes of packet payload. It means, that precise relaying of
         * ICMP in the real Internet is absolutely infeasible.
         */
+       struct net *net = dev_net(skb->dev);
+       struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
        struct iphdr *iph = (struct iphdr *)skb->data;
        const int type = icmp_hdr(skb)->type;
        const int code = icmp_hdr(skb)->code;
@@ -252,7 +88,8 @@ static int vti_err(struct sk_buff *skb, u32 info)
 
        err = -ENOENT;
 
-       t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
+       t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+                            iph->daddr, iph->saddr, 0);
        if (t == NULL)
                goto out;
 
@@ -281,8 +118,11 @@ static int vti_rcv(struct sk_buff *skb)
 {
        struct ip_tunnel *tunnel;
        const struct iphdr *iph = ip_hdr(skb);
+       struct net *net = dev_net(skb->dev);
+       struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
 
-       tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
+       tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+                                 iph->saddr, iph->daddr, 0);
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
 
@@ -311,7 +151,6 @@ static int vti_rcv(struct sk_buff *skb)
 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
-       struct pcpu_tstats *tstats;
        struct iphdr  *tiph = &tunnel->parms.iph;
        u8     tos;
        struct rtable *rt;              /* Route to the other host */
@@ -319,6 +158,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        struct iphdr  *old_iph = ip_hdr(skb);
        __be32 dst = tiph->daddr;
        struct flowi4 fl4;
+       int err;
 
        if (skb->protocol != htons(ETH_P_IP))
                goto tx_error;
@@ -367,8 +207,10 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        nf_reset(skb);
        skb->dev = skb_dst(skb)->dev;
 
-       tstats = this_cpu_ptr(dev->tstats);
-       VTI_XMIT(tstats, &dev->stats);
+       err = dst_output(skb);
+       if (net_xmit_eval(err) == 0)
+               err = skb->len;
+       iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
        return NETDEV_TX_OK;
 
 tx_error_icmp:
@@ -379,198 +221,57 @@ tx_error:
        return NETDEV_TX_OK;
 }
 
-static int vti_tunnel_bind_dev(struct net_device *dev)
-{
-       struct net_device *tdev = NULL;
-       struct ip_tunnel *tunnel;
-       struct iphdr *iph;
-
-       tunnel = netdev_priv(dev);
-       iph = &tunnel->parms.iph;
-
-       if (iph->daddr) {
-               struct rtable *rt;
-               struct flowi4 fl4;
-               memset(&fl4, 0, sizeof(fl4));
-               flowi4_init_output(&fl4, tunnel->parms.link,
-                                  be32_to_cpu(tunnel->parms.i_key),
-                                  RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
-                                  IPPROTO_IPIP, 0,
-                                  iph->daddr, iph->saddr, 0, 0);
-               rt = ip_route_output_key(dev_net(dev), &fl4);
-               if (!IS_ERR(rt)) {
-                       tdev = rt->dst.dev;
-                       ip_rt_put(rt);
-               }
-               dev->flags |= IFF_POINTOPOINT;
-       }
-
-       if (!tdev && tunnel->parms.link)
-               tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
-
-       if (tdev) {
-               dev->hard_header_len = tdev->hard_header_len +
-                                      sizeof(struct iphdr);
-               dev->mtu = tdev->mtu;
-       }
-       dev->iflink = tunnel->parms.link;
-       return dev->mtu;
-}
-
 static int
 vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        int err = 0;
        struct ip_tunnel_parm p;
-       struct ip_tunnel *t;
-       struct net *net = dev_net(dev);
-       struct vti_net *ipn = net_generic(net, vti_net_id);
-
-       switch (cmd) {
-       case SIOCGETTUNNEL:
-               t = NULL;
-               if (dev == ipn->fb_tunnel_dev) {
-                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
-                                          sizeof(p))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       t = vti_tunnel_locate(net, &p, 0);
-               }
-               if (t == NULL)
-                       t = netdev_priv(dev);
-               memcpy(&p, &t->parms, sizeof(p));
-               p.i_flags |= GRE_KEY | VTI_ISVTI;
-               p.o_flags |= GRE_KEY;
-               if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
-                       err = -EFAULT;
-               break;
-
-       case SIOCADDTUNNEL:
-       case SIOCCHGTUNNEL:
-               err = -EPERM;
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-                       goto done;
 
-               err = -EFAULT;
-               if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
-                       goto done;
+       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+               return -EFAULT;
 
-               err = -EINVAL;
+       if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
                if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
                    p.iph.ihl != 5)
-                       goto done;
-
-               t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
-
-               if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
-                               if (t->dev != dev) {
-                                       err = -EEXIST;
-                                       break;
-                               }
-                       } else {
-                               if (((dev->flags&IFF_POINTOPOINT) &&
-                                   !p.iph.daddr) ||
-                                   (!(dev->flags&IFF_POINTOPOINT) &&
-                                   p.iph.daddr)) {
-                                       err = -EINVAL;
-                                       break;
-                               }
-                               t = netdev_priv(dev);
-                               vti_tunnel_unlink(ipn, t);
-                               synchronize_net();
-                               t->parms.iph.saddr = p.iph.saddr;
-                               t->parms.iph.daddr = p.iph.daddr;
-                               t->parms.i_key = p.i_key;
-                               t->parms.o_key = p.o_key;
-                               t->parms.iph.protocol = IPPROTO_IPIP;
-                               memcpy(dev->dev_addr, &p.iph.saddr, 4);
-                               memcpy(dev->broadcast, &p.iph.daddr, 4);
-                               vti_tunnel_link(ipn, t);
-                               netdev_state_change(dev);
-                       }
-               }
-
-               if (t) {
-                       err = 0;
-                       if (cmd == SIOCCHGTUNNEL) {
-                               t->parms.i_key = p.i_key;
-                               t->parms.o_key = p.o_key;
-                               if (t->parms.link != p.link) {
-                                       t->parms.link = p.link;
-                                       vti_tunnel_bind_dev(dev);
-                                       netdev_state_change(dev);
-                               }
-                       }
-                       p.i_flags |= GRE_KEY | VTI_ISVTI;
-                       p.o_flags |= GRE_KEY;
-                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
-                                        sizeof(p)))
-                               err = -EFAULT;
-               } else
-                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
-               break;
+                       return -EINVAL;
+       }
 
-       case SIOCDELTUNNEL:
-               err = -EPERM;
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-                       goto done;
-
-               if (dev == ipn->fb_tunnel_dev) {
-                       err = -EFAULT;
-                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
-                                          sizeof(p)))
-                               goto done;
-                       err = -ENOENT;
-
-                       t = vti_tunnel_locate(net, &p, 0);
-                       if (t == NULL)
-                               goto done;
-                       err = -EPERM;
-                       if (t->dev == ipn->fb_tunnel_dev)
-                               goto done;
-                       dev = t->dev;
-               }
-               unregister_netdevice(dev);
-               err = 0;
-               break;
+       err = ip_tunnel_ioctl(dev, &p, cmd);
+       if (err)
+               return err;
 
-       default:
-               err = -EINVAL;
+       if (cmd != SIOCDELTUNNEL) {
+               p.i_flags |= GRE_KEY | VTI_ISVTI;
+               p.o_flags |= GRE_KEY;
        }
 
-done:
-       return err;
-}
-
-static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
-{
-       if (new_mtu < 68 || new_mtu > 0xFFF8)
-               return -EINVAL;
-       dev->mtu = new_mtu;
+       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+               return -EFAULT;
        return 0;
 }
 
 static const struct net_device_ops vti_netdev_ops = {
        .ndo_init       = vti_tunnel_init,
-       .ndo_uninit     = vti_tunnel_uninit,
+       .ndo_uninit     = ip_tunnel_uninit,
        .ndo_start_xmit = vti_tunnel_xmit,
        .ndo_do_ioctl   = vti_tunnel_ioctl,
-       .ndo_change_mtu = vti_tunnel_change_mtu,
+       .ndo_change_mtu = ip_tunnel_change_mtu,
        .ndo_get_stats64 = ip_tunnel_get_stats64,
 };
 
-static void vti_dev_free(struct net_device *dev)
+static void vti_tunnel_setup(struct net_device *dev)
 {
-       free_percpu(dev->tstats);
-       free_netdev(dev);
+       dev->netdev_ops         = &vti_netdev_ops;
+       ip_tunnel_setup(dev, vti_net_id);
 }
 
-static void vti_tunnel_setup(struct net_device *dev)
+static int vti_tunnel_init(struct net_device *dev)
 {
-       dev->netdev_ops         = &vti_netdev_ops;
-       dev->destructor         = vti_dev_free;
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+       struct iphdr *iph = &tunnel->parms.iph;
+
+       memcpy(dev->dev_addr, &iph->saddr, 4);
+       memcpy(dev->broadcast, &iph->daddr, 4);
 
        dev->type               = ARPHRD_TUNNEL;
        dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -581,38 +282,18 @@ static void vti_tunnel_setup(struct net_device *dev)
        dev->features           |= NETIF_F_NETNS_LOCAL;
        dev->features           |= NETIF_F_LLTX;
        dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
-}
 
-static int vti_tunnel_init(struct net_device *dev)
-{
-       struct ip_tunnel *tunnel = netdev_priv(dev);
-
-       tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
-
-       memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
-       memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
-
-       dev->tstats = alloc_percpu(struct pcpu_tstats);
-       if (!dev->tstats)
-               return -ENOMEM;
-
-       return 0;
+       return ip_tunnel_init(dev);
 }
 
-static int __net_init vti_fb_tunnel_init(struct net_device *dev)
+static void __net_init vti_fb_tunnel_init(struct net_device *dev)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        struct iphdr *iph = &tunnel->parms.iph;
-       struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
 
        iph->version            = 4;
        iph->protocol           = IPPROTO_IPIP;
        iph->ihl                = 5;
-
-       dev_hold(dev);
-       rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
-       return 0;
 }
 
 static struct xfrm_tunnel vti_handler __read_mostly = {
@@ -621,76 +302,30 @@ static struct xfrm_tunnel vti_handler __read_mostly = {
        .priority       =       1,
 };
 
-static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
-{
-       int prio;
-
-       for (prio = 1; prio < 4; prio++) {
-               int h;
-               for (h = 0; h < HASH_SIZE; h++) {
-                       struct ip_tunnel *t;
-
-                       t = rtnl_dereference(ipn->tunnels[prio][h]);
-                       while (t != NULL) {
-                               unregister_netdevice_queue(t->dev, head);
-                               t = rtnl_dereference(t->next);
-                       }
-               }
-       }
-}
-
 static int __net_init vti_init_net(struct net *net)
 {
        int err;
-       struct vti_net *ipn = net_generic(net, vti_net_id);
-
-       ipn->tunnels[0] = ipn->tunnels_wc;
-       ipn->tunnels[1] = ipn->tunnels_l;
-       ipn->tunnels[2] = ipn->tunnels_r;
-       ipn->tunnels[3] = ipn->tunnels_r_l;
-
-       ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
-                                         "ip_vti0",
-                                         vti_tunnel_setup);
-       if (!ipn->fb_tunnel_dev) {
-               err = -ENOMEM;
-               goto err_alloc_dev;
-       }
-       dev_net_set(ipn->fb_tunnel_dev, net);
-
-       err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
-       if (err)
-               goto err_reg_dev;
-       ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
+       struct ip_tunnel_net *itn;
 
-       err = register_netdev(ipn->fb_tunnel_dev);
+       err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
        if (err)
-               goto err_reg_dev;
+               return err;
+       itn = net_generic(net, vti_net_id);
+       vti_fb_tunnel_init(itn->fb_tunnel_dev);
        return 0;
-
-err_reg_dev:
-       vti_dev_free(ipn->fb_tunnel_dev);
-err_alloc_dev:
-       /* nothing */
-       return err;
 }
 
 static void __net_exit vti_exit_net(struct net *net)
 {
-       struct vti_net *ipn = net_generic(net, vti_net_id);
-       LIST_HEAD(list);
-
-       rtnl_lock();
-       vti_destroy_tunnels(ipn, &list);
-       unregister_netdevice_many(&list);
-       rtnl_unlock();
+       struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+       ip_tunnel_delete_net(itn, &vti_link_ops);
 }
 
 static struct pernet_operations vti_net_ops = {
        .init = vti_init_net,
        .exit = vti_exit_net,
        .id   = &vti_net_id,
-       .size = sizeof(struct vti_net),
+       .size = sizeof(struct ip_tunnel_net),
 };
 
 static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -728,78 +363,19 @@ static void vti_netlink_parms(struct nlattr *data[],
 static int vti_newlink(struct net *src_net, struct net_device *dev,
                       struct nlattr *tb[], struct nlattr *data[])
 {
-       struct ip_tunnel *nt;
-       struct net *net = dev_net(dev);
-       struct vti_net *ipn = net_generic(net, vti_net_id);
-       int mtu;
-       int err;
-
-       nt = netdev_priv(dev);
-       vti_netlink_parms(data, &nt->parms);
-
-       if (vti_tunnel_locate(net, &nt->parms, 0))
-               return -EEXIST;
+       struct ip_tunnel_parm parms;
 
-       mtu = vti_tunnel_bind_dev(dev);
-       if (!tb[IFLA_MTU])
-               dev->mtu = mtu;
-
-       err = register_netdevice(dev);
-       if (err)
-               goto out;
-
-       dev_hold(dev);
-       vti_tunnel_link(ipn, nt);
-
-out:
-       return err;
+       vti_netlink_parms(data, &parms);
+       return ip_tunnel_newlink(dev, tb, &parms);
 }
 
 static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
                          struct nlattr *data[])
 {
-       struct ip_tunnel *t, *nt;
-       struct net *net = dev_net(dev);
-       struct vti_net *ipn = net_generic(net, vti_net_id);
        struct ip_tunnel_parm p;
-       int mtu;
-
-       if (dev == ipn->fb_tunnel_dev)
-               return -EINVAL;
 
-       nt = netdev_priv(dev);
        vti_netlink_parms(data, &p);
-
-       t = vti_tunnel_locate(net, &p, 0);
-
-       if (t) {
-               if (t->dev != dev)
-                       return -EEXIST;
-       } else {
-               t = nt;
-
-               vti_tunnel_unlink(ipn, t);
-               t->parms.iph.saddr = p.iph.saddr;
-               t->parms.iph.daddr = p.iph.daddr;
-               t->parms.i_key = p.i_key;
-               t->parms.o_key = p.o_key;
-               if (dev->type != ARPHRD_ETHER) {
-                       memcpy(dev->dev_addr, &p.iph.saddr, 4);
-                       memcpy(dev->broadcast, &p.iph.daddr, 4);
-               }
-               vti_tunnel_link(ipn, t);
-               netdev_state_change(dev);
-       }
-
-       if (t->parms.link != p.link) {
-               t->parms.link = p.link;
-               mtu = vti_tunnel_bind_dev(dev);
-               if (!tb[IFLA_MTU])
-                       dev->mtu = mtu;
-               netdev_state_change(dev);
-       }
-
-       return 0;
+       return ip_tunnel_changelink(dev, tb, &p);
 }
 
 static size_t vti_get_size(const struct net_device *dev)
@@ -865,7 +441,7 @@ static int __init vti_init(void)
        err = xfrm4_mode_tunnel_input_register(&vti_handler);
        if (err < 0) {
                unregister_pernet_device(&vti_net_ops);
-               pr_info(KERN_INFO "vti init: can't register tunnel\n");
+               pr_info("vti init: can't register tunnel\n");
        }
 
        err = rtnl_link_register(&vti_link_ops);
index 51fc2a1dcdd3aa564147933d7485ec6356e9ad12..87bd2952c7334e54ed49b1fa6bf5cb8237269ba6 100644 (file)
@@ -286,7 +286,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
        dev->flags              = IFF_NOARP;
        dev->iflink             = 0;
        dev->addr_len           = 4;
-       dev->features           |= NETIF_F_NETNS_LOCAL;
        dev->features           |= NETIF_F_LLTX;
        dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
 
@@ -437,7 +436,7 @@ static int __net_init ipip_init_net(struct net *net)
 static void __net_exit ipip_exit_net(struct net *net)
 {
        struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
-       ip_tunnel_delete_net(itn);
+       ip_tunnel_delete_net(itn, &ipip_link_ops);
 }
 
 static struct pernet_operations ipip_net_ops = {
index 132a09664704ed73ed850e8961f11fa3e3641473..bacc0bcf48ce5b1cec365e5886d820492d41566d 100644 (file)
@@ -127,9 +127,9 @@ static struct kmem_cache *mrt_cachep __read_mostly;
 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
 static void ipmr_free_table(struct mr_table *mrt);
 
-static int ip_mr_forward(struct net *net, struct mr_table *mrt,
-                        struct sk_buff *skb, struct mfc_cache *cache,
-                        int local);
+static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+                         struct sk_buff *skb, struct mfc_cache *cache,
+                         int local);
 static int ipmr_cache_report(struct mr_table *mrt,
                             struct sk_buff *pkt, vifi_t vifi, int assert);
 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -1795,9 +1795,9 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
 
 /* "local" means that we should preserve one skb (for local delivery) */
 
-static int ip_mr_forward(struct net *net, struct mr_table *mrt,
-                        struct sk_buff *skb, struct mfc_cache *cache,
-                        int local)
+static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+                         struct sk_buff *skb, struct mfc_cache *cache,
+                         int local)
 {
        int psend = -1;
        int vif, ct;
@@ -1903,14 +1903,13 @@ last_forward:
                                ipmr_queue_xmit(net, mrt, skb2, cache, psend);
                } else {
                        ipmr_queue_xmit(net, mrt, skb, cache, psend);
-                       return 0;
+                       return;
                }
        }
 
 dont_forward:
        if (!local)
                kfree_skb(skb);
-       return 0;
 }
 
 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
index 30e4de94056722535ee5304e93b49bfb6c5cf889..00352ce0f0dec43f0f0d08e0e77115c97625ab89 100644 (file)
@@ -118,7 +118,7 @@ static int masq_device_event(struct notifier_block *this,
                NF_CT_ASSERT(dev->ifindex != 0);
 
                nf_ct_iterate_cleanup(net, device_cmp,
-                                     (void *)(long)dev->ifindex);
+                                     (void *)(long)dev->ifindex, 0, 0);
        }
 
        return NOTIFY_DONE;
index 746427c9e7199513c12f4254ce048234f108a35d..d7d9882d4caea169964a58e294ffe6c73a99d36c 100644 (file)
@@ -1082,7 +1082,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
        __u16 srcp = ntohs(inet->inet_sport);
 
        seq_printf(f, "%5d: %08X:%04X %08X:%04X"
-               " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
+               " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
                bucket, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
index 463bd1273346d72cad00a42a5852f95cef490c19..4a0335854b89c305442e257057ca560811159df8 100644 (file)
@@ -111,7 +111,7 @@ static const struct snmp_mib snmp4_ipstats_list[] = {
        SNMP_MIB_SENTINEL
 };
 
-/* Following RFC4293 items are displayed in /proc/net/netstat */
+/* Following items are displayed in /proc/net/netstat */
 static const struct snmp_mib snmp4_ipextstats_list[] = {
        SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES),
        SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
@@ -125,7 +125,12 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
        SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
        SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
        SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
+       /* Non RFC4293 fields */
        SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+       SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+       SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+       SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+       SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
        SNMP_MIB_SENTINEL
 };
 
index dd44e0ab600cafe76ac50f74aaff5c6d429ea534..41d84505a9225af90386b135c4b053019070f7b4 100644 (file)
@@ -987,7 +987,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
              srcp  = inet->inet_num;
 
        seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
-               " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+               " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
                i, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
index a9a54a2368323243be30f536d45646d48ec6c42d..727f4365bcdff3acdb415fe75fd878a3bc5050af 100644 (file)
 #define RT_FL_TOS(oldflp4) \
        ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 
-#define IP_MAX_MTU     0xFFF0
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU     0xFFFF
 
 #define RT_GC_TIMEOUT (300*HZ)
 
@@ -435,12 +436,12 @@ static inline int ip_rt_proc_init(void)
 
 static inline bool rt_is_expired(const struct rtable *rth)
 {
-       return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
+       return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
 }
 
 void rt_cache_flush(struct net *net)
 {
-       rt_genid_bump(net);
+       rt_genid_bump_ipv4(net);
 }
 
 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
@@ -1227,10 +1228,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
                        mtu = 576;
        }
 
-       if (mtu > IP_MAX_MTU)
-               mtu = IP_MAX_MTU;
-
-       return mtu;
+       return min_t(unsigned int, mtu, IP_MAX_MTU);
 }
 
 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
@@ -1458,7 +1456,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 #endif
        rth->dst.output = ip_rt_bug;
 
-       rth->rt_genid   = rt_genid(dev_net(dev));
+       rth->rt_genid   = rt_genid_ipv4(dev_net(dev));
        rth->rt_flags   = RTCF_MULTICAST;
        rth->rt_type    = RTN_MULTICAST;
        rth->rt_is_input= 1;
@@ -1589,7 +1587,7 @@ static int __mkroute_input(struct sk_buff *skb,
                goto cleanup;
        }
 
-       rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
+       rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
        rth->rt_flags = flags;
        rth->rt_type = res->type;
        rth->rt_is_input = 1;
@@ -1760,7 +1758,7 @@ local_input:
        rth->dst.tclassid = itag;
 #endif
 
-       rth->rt_genid = rt_genid(net);
+       rth->rt_genid = rt_genid_ipv4(net);
        rth->rt_flags   = flags|RTCF_LOCAL;
        rth->rt_type    = res.type;
        rth->rt_is_input = 1;
@@ -1945,7 +1943,7 @@ add:
 
        rth->dst.output = ip_output;
 
-       rth->rt_genid = rt_genid(dev_net(dev_out));
+       rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
        rth->rt_flags   = flags;
        rth->rt_type    = type;
        rth->rt_is_input = 0;
@@ -2227,7 +2225,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
                rt->rt_iif = ort->rt_iif;
                rt->rt_pmtu = ort->rt_pmtu;
 
-               rt->rt_genid = rt_genid(net);
+               rt->rt_genid = rt_genid_ipv4(net);
                rt->rt_flags = ort->rt_flags;
                rt->rt_type = ort->rt_type;
                rt->rt_gateway = ort->rt_gateway;
@@ -2665,7 +2663,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
 
 static __net_init int rt_genid_init(struct net *net)
 {
-       atomic_set(&net->rt_genid, 0);
+       atomic_set(&net->ipv4.rt_genid, 0);
        atomic_set(&net->fnhe_genid, 0);
        get_random_bytes(&net->ipv4.dev_addr_genid,
                         sizeof(net->ipv4.dev_addr_genid));
index 610e324348d1cc5acdd861d8af916331d49244d7..8ed7c32ae28e47bbb73052f4686643d0f5cf7eb6 100644 (file)
@@ -558,6 +558,13 @@ static struct ctl_table ipv4_table[] = {
                .proc_handler   = proc_dointvec_minmax,
                .extra1         = &one,
        },
+       {
+               .procname       = "tcp_notsent_lowat",
+               .data           = &sysctl_tcp_notsent_lowat,
+               .maxlen         = sizeof(sysctl_tcp_notsent_lowat),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        {
                .procname       = "tcp_rmem",
                .data           = &sysctl_tcp_rmem,
index 5423223e93c25074f87c92dc60b338fddcb66f3f..4e42c03859f46e9b8067494710b3db71bf803593 100644 (file)
@@ -410,10 +410,6 @@ void tcp_init_sock(struct sock *sk)
 
        icsk->icsk_sync_mss = tcp_sync_mss;
 
-       /* Presumed zeroed, in order of appearance:
-        *      cookie_in_always, cookie_out_never,
-        *      s_data_constant, s_data_in, s_data_out
-        */
        sk->sk_sndbuf = sysctl_tcp_wmem[1];
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
@@ -499,7 +495,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                        mask |= POLLIN | POLLRDNORM;
 
                if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
-                       if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+                       if (sk_stream_is_writeable(sk)) {
                                mask |= POLLOUT | POLLWRNORM;
                        } else {  /* send SIGIO later */
                                set_bit(SOCK_ASYNC_NOSPACE,
@@ -510,7 +506,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
                                 * wspace test but before the flags are set,
                                 * IO signal will be lost.
                                 */
-                               if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+                               if (sk_stream_is_writeable(sk))
                                        mask |= POLLOUT | POLLWRNORM;
                        }
                } else
@@ -1120,6 +1116,13 @@ new_segment:
                                if (!skb)
                                        goto wait_for_memory;
 
+                               /*
+                                * All packets are restored as if they have
+                                * already been sent.
+                                */
+                               if (tp->repair)
+                                       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+
                                /*
                                 * Check whether we can use HW checksum.
                                 */
@@ -2631,6 +2634,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                else
                        tp->tsoffset = val - tcp_time_stamp;
                break;
+       case TCP_NOTSENT_LOWAT:
+               tp->notsent_lowat = val;
+               sk->sk_write_space(sk);
+               break;
        default:
                err = -ENOPROTOOPT;
                break;
@@ -2847,6 +2854,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
        case TCP_TIMESTAMP:
                val = tcp_time_stamp + tp->tsoffset;
                break;
+       case TCP_NOTSENT_LOWAT:
+               val = tp->notsent_lowat;
+               break;
        default:
                return -ENOPROTOOPT;
        }
index 8f7ef0ad80e5b6b062b7b40f3049634e63b93b40..ab7bd35bb312c6e9e07aa2950d75ec4bbc982eac 100644 (file)
@@ -58,23 +58,22 @@ error:              kfree(ctx);
        return err;
 }
 
-/* Computes the fastopen cookie for the peer.
- * The peer address is a 128 bits long (pad with zeros for IPv4).
+/* Computes the fastopen cookie for the IP path.
+ * The path is a 128 bits long (pad with zeros for IPv4).
  *
  * The caller must check foc->len to determine if a valid cookie
  * has been generated successfully.
 */
-void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
+void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+                            struct tcp_fastopen_cookie *foc)
 {
-       __be32 peer_addr[4] = { addr, 0, 0, 0 };
+       __be32 path[4] = { src, dst, 0, 0 };
        struct tcp_fastopen_context *ctx;
 
        rcu_read_lock();
        ctx = rcu_dereference(tcp_fastopen_ctx);
        if (ctx) {
-               crypto_cipher_encrypt_one(ctx->tfm,
-                                         foc->val,
-                                         (__u8 *)peer_addr);
+               crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
                foc->len = TCP_FASTOPEN_COOKIE_SIZE;
        }
        rcu_read_unlock();
index 28af45abe0622fabac4d53ab651099a580808766..e965cc7b87ffdd02c6260ab86c3febea6971a60b 100644 (file)
@@ -1048,6 +1048,7 @@ struct tcp_sacktag_state {
        int reord;
        int fack_count;
        int flag;
+       s32 rtt; /* RTT measured by SACKing never-retransmitted data */
 };
 
 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
@@ -1108,7 +1109,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
 static u8 tcp_sacktag_one(struct sock *sk,
                          struct tcp_sacktag_state *state, u8 sacked,
                          u32 start_seq, u32 end_seq,
-                         bool dup_sack, int pcount)
+                         int dup_sack, int pcount, u32 xmit_time)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int fack_count = state->fack_count;
@@ -1148,6 +1149,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
                                                           state->reord);
                                if (!after(end_seq, tp->high_seq))
                                        state->flag |= FLAG_ORIG_SACK_ACKED;
+                               /* Pick the earliest sequence sacked for RTT */
+                               if (state->rtt < 0)
+                                       state->rtt = tcp_time_stamp - xmit_time;
                        }
 
                        if (sacked & TCPCB_LOST) {
@@ -1205,7 +1209,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
         * tcp_highest_sack_seq() when skb is highest_sack.
         */
        tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
-                       start_seq, end_seq, dup_sack, pcount);
+                       start_seq, end_seq, dup_sack, pcount,
+                       TCP_SKB_CB(skb)->when);
 
        if (skb == tp->lost_skb_hint)
                tp->lost_cnt_hint += pcount;
@@ -1479,7 +1484,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                                                TCP_SKB_CB(skb)->seq,
                                                TCP_SKB_CB(skb)->end_seq,
                                                dup_sack,
-                                               tcp_skb_pcount(skb));
+                                               tcp_skb_pcount(skb),
+                                               TCP_SKB_CB(skb)->when);
 
                        if (!before(TCP_SKB_CB(skb)->seq,
                                    tcp_highest_sack_seq(tp)))
@@ -1536,7 +1542,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
 
 static int
 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
-                       u32 prior_snd_una)
+                       u32 prior_snd_una, s32 *sack_rtt)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1554,6 +1560,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
 
        state.flag = 0;
        state.reord = tp->packets_out;
+       state.rtt = -1;
 
        if (!tp->sacked_out) {
                if (WARN_ON(tp->fackets_out))
@@ -1737,6 +1744,7 @@ out:
        WARN_ON((int)tp->retrans_out < 0);
        WARN_ON((int)tcp_packets_in_flight(tp) < 0);
 #endif
+       *sack_rtt = state.rtt;
        return state.flag;
 }
 
@@ -1869,8 +1877,13 @@ void tcp_enter_loss(struct sock *sk, int how)
        }
        tcp_verify_left_out(tp);
 
-       tp->reordering = min_t(unsigned int, tp->reordering,
-                              sysctl_tcp_reordering);
+       /* Timeout in disordered state after receiving substantial DUPACKs
+        * suggests that the degree of reordering is over-estimated.
+        */
+       if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
+           tp->sacked_out >= sysctl_tcp_reordering)
+               tp->reordering = min_t(unsigned int, tp->reordering,
+                                      sysctl_tcp_reordering);
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
        TCP_ECN_queue_cwr(tp);
@@ -2792,65 +2805,51 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
        tcp_xmit_retransmit_queue(sk);
 }
 
-void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+                                     s32 seq_rtt, s32 sack_rtt)
 {
-       tcp_rtt_estimator(sk, seq_rtt);
-       tcp_set_rto(sk);
-       inet_csk(sk)->icsk_backoff = 0;
-}
-EXPORT_SYMBOL(tcp_valid_rtt_meas);
+       const struct tcp_sock *tp = tcp_sk(sk);
+
+       /* Prefer RTT measured from ACK's timing to TS-ECR. This is because
+        * broken middle-boxes or peers may corrupt TS-ECR fields. But
+        * Karn's algorithm forbids taking RTT if some retransmitted data
+        * is acked (RFC6298).
+        */
+       if (flag & FLAG_RETRANS_DATA_ACKED)
+               seq_rtt = -1;
+
+       if (seq_rtt < 0)
+               seq_rtt = sack_rtt;
 
-/* Read draft-ietf-tcplw-high-performance before mucking
- * with this code. (Supersedes RFC1323)
- */
-static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
-{
        /* RTTM Rule: A TSecr value received in a segment is used to
         * update the averaged RTT measurement only if the segment
         * acknowledges some new data, i.e., only if it advances the
         * left edge of the send window.
-        *
         * See draft-ietf-tcplw-high-performance-00, section 3.3.
-        * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
-        *
-        * Changed: reset backoff as soon as we see the first valid sample.
-        * If we do not, we get strongly overestimated rto. With timestamps
-        * samples are accepted even from very old segments: f.e., when rtt=1
-        * increases to 8, we retransmit 5 times and after 8 seconds delayed
-        * answer arrives rto becomes 120 seconds! If at least one of segments
-        * in window is lost... Voila.                          --ANK (010210)
         */
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
-}
+       if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
+               seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
 
-static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
-{
-       /* We don't have a timestamp. Can only use
-        * packets that are not retransmitted to determine
-        * rtt estimates. Also, we must not reset the
-        * backoff for rto until we get a non-retransmitted
-        * packet. This allows us to deal with a situation
-        * where the network delay has increased suddenly.
-        * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
-        */
+       if (seq_rtt < 0)
+               return false;
 
-       if (flag & FLAG_RETRANS_DATA_ACKED)
-               return;
+       tcp_rtt_estimator(sk, seq_rtt);
+       tcp_set_rto(sk);
 
-       tcp_valid_rtt_meas(sk, seq_rtt);
+       /* RFC6298: only reset backoff on valid RTT measurement. */
+       inet_csk(sk)->icsk_backoff = 0;
+       return true;
 }
 
-static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
-                                     const s32 seq_rtt)
+/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
+static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
 {
-       const struct tcp_sock *tp = tcp_sk(sk);
-       /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
-       if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
-               tcp_ack_saw_tstamp(sk, flag);
-       else if (seq_rtt >= 0)
-               tcp_ack_no_tstamp(sk, seq_rtt, flag);
+       struct tcp_sock *tp = tcp_sk(sk);
+       s32 seq_rtt = -1;
+
+       if (tp->lsndtime && !tp->total_retrans)
+               seq_rtt = tcp_time_stamp - tp->lsndtime;
+       tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
 }
 
 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -2939,7 +2938,7 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
  * arrived at the other end.
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
-                              u32 prior_snd_una)
+                              u32 prior_snd_una, s32 sack_rtt)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2978,8 +2977,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                        if (sacked & TCPCB_SACKED_RETRANS)
                                tp->retrans_out -= acked_pcount;
                        flag |= FLAG_RETRANS_DATA_ACKED;
-                       ca_seq_rtt = -1;
-                       seq_rtt = -1;
                } else {
                        ca_seq_rtt = now - scb->when;
                        last_ackt = skb->tstamp;
@@ -3031,6 +3028,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
                flag |= FLAG_SACK_RENEGING;
 
+       if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) ||
+           (flag & FLAG_ACKED))
+               tcp_rearm_rto(sk);
+
        if (flag & FLAG_ACKED) {
                const struct tcp_congestion_ops *ca_ops
                        = inet_csk(sk)->icsk_ca_ops;
@@ -3040,9 +3041,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                        tcp_mtup_probe_success(sk);
                }
 
-               tcp_ack_update_rtt(sk, flag, seq_rtt);
-               tcp_rearm_rto(sk);
-
                if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
                } else {
@@ -3274,6 +3272,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        int prior_packets = tp->packets_out;
        const int prior_unsacked = tp->packets_out - tp->sacked_out;
        int acked = 0; /* Number of packets newly acked */
+       s32 sack_rtt = -1;
 
        /* If the ack is older than previous acks
         * then we can probably ignore it.
@@ -3330,7 +3329,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
                if (TCP_SKB_CB(skb)->sacked)
-                       flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+                       flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+                                                       &sack_rtt);
 
                if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
                        flag |= FLAG_ECE;
@@ -3349,7 +3349,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        /* See if we can take anything off of the retransmit queue. */
        acked = tp->packets_out;
-       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
+       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
        acked -= tp->packets_out;
 
        if (tcp_ack_is_dubious(sk, flag)) {
@@ -3402,7 +3402,8 @@ old_ack:
         * If data was DSACKed, see if we can undo a cwnd reduction.
         */
        if (TCP_SKB_CB(skb)->sacked) {
-               flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+               flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+                                               &sack_rtt);
                tcp_fastretrans_alert(sk, acked, prior_unsacked,
                                      is_dupack, flag);
        }
@@ -5624,9 +5625,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                 * so release it.
                 */
                if (req) {
-                       tcp_synack_rtt_meas(sk, req);
                        tp->total_retrans = req->num_retrans;
-
                        reqsk_fastopen_remove(sk, req, false);
                } else {
                        /* Make sure socket is routed, for correct metrics. */
@@ -5651,6 +5650,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
                tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
                tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+               tcp_synack_rtt_meas(sk, req);
 
                if (tp->rx_opt.tstamp_ok)
                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
index b299da5ff4996fb5785d2725c66c43d91db918d4..09d45d718973398918879d1321000ca82cdf1e3d 100644 (file)
@@ -821,8 +821,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
-                             u16 queue_mapping,
-                             bool nocache)
+                             u16 queue_mapping)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct flowi4 fl4;
@@ -852,7 +851,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
 
 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
 {
-       int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
+       int res = tcp_v4_send_synack(sk, NULL, req, 0);
 
        if (!res)
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -890,7 +889,7 @@ bool tcp_syn_flood_action(struct sock *sk,
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
        lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
-       if (!lopt->synflood_warned) {
+       if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
                lopt->synflood_warned = 1;
                pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
                        proto, ntohs(tcp_hdr(skb)->dest), msg);
@@ -1316,9 +1315,11 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
                tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                return true;
        }
+
        if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
                if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
-                       tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+                       tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+                                               ip_hdr(skb)->daddr, valid_foc);
                        if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
                            memcmp(&foc->val[0], &valid_foc->val[0],
                            TCP_FASTOPEN_COOKIE_SIZE) != 0)
@@ -1329,14 +1330,16 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
                tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                return true;
        } else if (foc->len == 0) { /* Client requesting a cookie */
-               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+                                       ip_hdr(skb)->daddr, valid_foc);
                NET_INC_STATS_BH(sock_net(sk),
                    LINUX_MIB_TCPFASTOPENCOOKIEREQD);
        } else {
                /* Client sent a cookie with wrong size. Treat it
                 * the same as invalid and return a valid one.
                 */
-               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+                                       ip_hdr(skb)->daddr, valid_foc);
        }
        return false;
 }
@@ -1462,7 +1465,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * limitations, they conserve resources and peer is
         * evidently real one.
         */
-       if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+       if ((sysctl_tcp_syncookies == 2 ||
+            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
                want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
                if (!want_cookie)
                        goto drop;
@@ -1671,8 +1675,6 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
 
        tcp_initialize_rcv_mss(newsk);
-       tcp_synack_rtt_meas(newsk, req);
-       newtp->total_retrans = req->num_retrans;
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Copy over the MD5 key from the original socket */
@@ -2605,7 +2607,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
        long delta = req->expires - jiffies;
 
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
-               " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
+               " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
                i,
                ireq->loc_addr,
                ntohs(inet_sk(sk)->inet_sport),
@@ -2663,7 +2665,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
                rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
        seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
-                       "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
+                       "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n",
                i, src, srcp, dest, destp, sk->sk_state,
                tp->write_seq - tp->snd_una,
                rx_queue,
@@ -2802,6 +2804,7 @@ struct proto tcp_prot = {
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
        .enter_memory_pressure  = tcp_enter_memory_pressure,
+       .stream_memory_free     = tcp_stream_memory_free,
        .sockets_allocated      = &tcp_sockets_allocated,
        .orphan_count           = &tcp_orphan_count,
        .memory_allocated       = &tcp_memory_allocated,
index da14436c1735677f7207030cfb8d76ae15a1157f..8a57d79b0b16c9bffa9615295095525bc9b90f98 100644 (file)
@@ -132,10 +132,10 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
        return 0;
 }
 
-static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
                            const char *buffer)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        unsigned long long val;
        int ret = 0;
 
@@ -180,9 +180,9 @@ static u64 tcp_read_usage(struct mem_cgroup *memcg)
        return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
 }
 
-static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
+static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        u64 val;
 
        switch (cft->private) {
@@ -202,13 +202,13 @@ static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
        return val;
 }
 
-static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event)
+static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
 {
        struct mem_cgroup *memcg;
        struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
-       memcg = mem_cgroup_from_cont(cont);
+       memcg = mem_cgroup_from_css(css);
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return 0;
index ab1c08658528e7737fd1683841a4d11701cbc0bb..58a3e69aef6440d36061ea9e6291152442954239 100644 (file)
@@ -411,6 +411,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
                tcp_enable_early_retrans(newtp);
                newtp->tlp_high_seq = 0;
+               newtp->lsndtime = treq->snt_synack;
+               newtp->total_retrans = req->num_retrans;
 
                /* So many TCP implementations out there (incorrectly) count the
                 * initial SYN frame in their delayed-ACK and congestion control
@@ -666,12 +668,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        if (!(flg & TCP_FLAG_ACK))
                return NULL;
 
-       /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
-       if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
-               tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
-       else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
-               tcp_rsk(req)->snt_synack = 0;
-
        /* For Fast Open no more processing is needed (sk is the
         * child socket).
         */
index 92fde8d1aa821c38b59ba467fe386eb3306c2dfe..884efff5b531f9c6177a789ea5013c0492939afa 100644 (file)
@@ -65,6 +65,9 @@ int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
 /* By default, RFC2861 behavior.  */
 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
 
+unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
+EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
+
 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                           int push_one, gfp_t gfp);
 
index 766e6bab911362cb0caa44511b10bb82418e7980..0b24508bcdc47fceb9d6a3e2755d0653e1f0e731 100644 (file)
@@ -704,7 +704,7 @@ EXPORT_SYMBOL(udp_flush_pending_frames);
  *     @src:   source IP address
  *     @dst:   destination IP address
  */
-static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
 {
        struct udphdr *uh = udp_hdr(skb);
        struct sk_buff *frags = skb_shinfo(skb)->frag_list;
@@ -740,6 +740,7 @@ static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
                        uh->check = CSUM_MANGLED_0;
        }
 }
+EXPORT_SYMBOL_GPL(udp4_hwcsum);
 
 static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
 {
@@ -2158,7 +2159,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
        __u16 srcp        = ntohs(inet->inet_sport);
 
        seq_printf(f, "%5d: %08X:%04X %08X:%04X"
-               " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
+               " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
                bucket, src, srcp, dest, destp, sp->sk_state,
                sk_wmem_alloc_get(sp),
                sk_rmem_alloc_get(sp),
index 327a617d594cd04929ffa668082ed575b0f54d7f..80baf4a3b1b5cae54aa4827fe33ab733c12ed171 100644 (file)
@@ -21,7 +21,6 @@
 static int xfrm4_tunnel_check_size(struct sk_buff *skb)
 {
        int mtu, ret = 0;
-       struct dst_entry *dst;
 
        if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
                goto out;
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
        if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
                goto out;
 
-       dst = skb_dst(skb);
-       mtu = dst_mtu(dst);
+       mtu = xfrm_skb_dst_mtu(skb);
        if (skb->len > mtu) {
                if (skb->sk)
-                       ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
-                                      inet_sk(skb->sk)->inet_dport, mtu);
+                       xfrm_local_error(skb, mtu);
                else
                        icmp_send(skb, ICMP_DEST_UNREACH,
                                  ICMP_FRAG_NEEDED, htonl(mtu));
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
                            x->outer_mode->afinfo->output_finish,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
+
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
+{
+       struct iphdr *hdr;
+
+       hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
+                      inet_sk(skb->sk)->inet_dport, mtu);
+}
index 9258e751babaa1b9bfd954e6da771a0a80071a1b..0b2a0641526a74118cf46336a185e62b9621fc95 100644 (file)
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
        .extract_input          = xfrm4_extract_input,
        .extract_output         = xfrm4_extract_output,
        .transport_finish       = xfrm4_transport_finish,
+       .local_error            = xfrm4_local_error,
 };
 
 void __init xfrm4_state_init(void)
index da4241c8c7dafe0004a53ed85dfc270cd3be16ba..2d6d1793bbfed73fc001ccfbff9485601ea527d7 100644 (file)
@@ -99,9 +99,9 @@
 #define ACONF_DEBUG 2
 
 #if ACONF_DEBUG >= 3
-#define ADBG(x) printk x
+#define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
 #else
-#define ADBG(x)
+#define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
 #endif
 
 #define        INFINITY_LIFE_TIME      0xFFFFFFFF
@@ -177,6 +177,8 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .accept_redirects       = 1,
        .autoconf               = 1,
        .force_mld_version      = 0,
+       .mldv1_unsolicited_report_interval = 10 * HZ,
+       .mldv2_unsolicited_report_interval = HZ,
        .dad_transmits          = 1,
        .rtr_solicits           = MAX_RTR_SOLICITATIONS,
        .rtr_solicit_interval   = RTR_SOLICITATION_INTERVAL,
@@ -211,6 +213,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .accept_ra              = 1,
        .accept_redirects       = 1,
        .autoconf               = 1,
+       .force_mld_version      = 0,
+       .mldv1_unsolicited_report_interval = 10 * HZ,
+       .mldv2_unsolicited_report_interval = HZ,
        .dad_transmits          = 1,
        .rtr_solicits           = MAX_RTR_SOLICITATIONS,
        .rtr_solicit_interval   = RTR_SOLICITATION_INTERVAL,
@@ -369,9 +374,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        dev_hold(dev);
 
        if (snmp6_alloc_dev(ndev) < 0) {
-               ADBG((KERN_WARNING
+               ADBG(KERN_WARNING
                        "%s: cannot allocate memory for statistics; dev=%s.\n",
-                       __func__, dev->name));
+                       __func__, dev->name);
                neigh_parms_release(&nd_tbl, ndev->nd_parms);
                dev_put(dev);
                kfree(ndev);
@@ -379,9 +384,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        }
 
        if (snmp6_register_dev(ndev) < 0) {
-               ADBG((KERN_WARNING
+               ADBG(KERN_WARNING
                        "%s: cannot create /proc/net/dev_snmp6/%s\n",
-                       __func__, dev->name));
+                       __func__, dev->name);
                neigh_parms_release(&nd_tbl, ndev->nd_parms);
                ndev->dead = 1;
                in6_dev_finish_destroy(ndev);
@@ -844,7 +849,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
 
        /* Ignore adding duplicate addresses on an interface */
        if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
-               ADBG(("ipv6_add_addr: already assigned\n"));
+               ADBG("ipv6_add_addr: already assigned\n");
                err = -EEXIST;
                goto out;
        }
@@ -852,7 +857,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
        ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
 
        if (ifa == NULL) {
-               ADBG(("ipv6_add_addr: malloc failed\n"));
+               ADBG("ipv6_add_addr: malloc failed\n");
                err = -ENOBUFS;
                goto out;
        }
@@ -1126,12 +1131,10 @@ retry:
        if (ifp->flags & IFA_F_OPTIMISTIC)
                addr_flags |= IFA_F_OPTIMISTIC;
 
-       ift = !max_addresses ||
-             ipv6_count_addresses(idev) < max_addresses ?
-               ipv6_add_addr(idev, &addr, NULL, tmp_plen,
-                             ipv6_addr_scope(&addr), addr_flags,
-                             tmp_valid_lft, tmp_prefered_lft) : NULL;
-       if (IS_ERR_OR_NULL(ift)) {
+       ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
+                           ipv6_addr_scope(&addr), addr_flags,
+                           tmp_valid_lft, tmp_prefered_lft);
+       if (IS_ERR(ift)) {
                in6_ifa_put(ifp);
                in6_dev_put(idev);
                pr_info("%s: retry temporary address regeneration\n", __func__);
@@ -1809,6 +1812,16 @@ static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
        return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
 }
 
+static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
+{
+       memcpy(eui, dev->perm_addr, 3);
+       memcpy(eui + 5, dev->perm_addr + 3, 3);
+       eui[3] = 0xFF;
+       eui[4] = 0xFE;
+       eui[0] ^= 2;
+       return 0;
+}
+
 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
 {
        switch (dev->type) {
@@ -1827,6 +1840,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
                return addrconf_ifid_eui64(eui, dev);
        case ARPHRD_IEEE1394:
                return addrconf_ifid_ieee1394(eui, dev);
+       case ARPHRD_TUNNEL6:
+               return addrconf_ifid_ip6tnl(eui, dev);
        }
        return -1;
 }
@@ -2052,7 +2067,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
        pinfo = (struct prefix_info *) opt;
 
        if (len < sizeof(struct prefix_info)) {
-               ADBG(("addrconf: prefix option too short\n"));
+               ADBG("addrconf: prefix option too short\n");
                return;
        }
 
@@ -2704,7 +2719,8 @@ static void addrconf_dev_config(struct net_device *dev)
            (dev->type != ARPHRD_ARCNET) &&
            (dev->type != ARPHRD_INFINIBAND) &&
            (dev->type != ARPHRD_IEEE802154) &&
-           (dev->type != ARPHRD_IEEE1394)) {
+           (dev->type != ARPHRD_IEEE1394) &&
+           (dev->type != ARPHRD_TUNNEL6)) {
                /* Alas, we support only Ethernet autoconfiguration. */
                return;
        }
@@ -2790,44 +2806,6 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
        return -1;
 }
 
-static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
-{
-       struct net_device *link_dev;
-       struct net *net = dev_net(idev->dev);
-
-       /* first try to inherit the link-local address from the link device */
-       if (idev->dev->iflink &&
-           (link_dev = __dev_get_by_index(net, idev->dev->iflink))) {
-               if (!ipv6_inherit_linklocal(idev, link_dev))
-                       return;
-       }
-       /* then try to inherit it from any device */
-       for_each_netdev(net, link_dev) {
-               if (!ipv6_inherit_linklocal(idev, link_dev))
-                       return;
-       }
-       pr_debug("init ip6-ip6: add_linklocal failed\n");
-}
-
-/*
- * Autoconfigure tunnel with a link-local address so routing protocols,
- * DHCPv6, MLD etc. can be run over the virtual link
- */
-
-static void addrconf_ip6_tnl_config(struct net_device *dev)
-{
-       struct inet6_dev *idev;
-
-       ASSERT_RTNL();
-
-       idev = addrconf_add_dev(dev);
-       if (IS_ERR(idev)) {
-               pr_debug("init ip6-ip6: add_dev failed\n");
-               return;
-       }
-       ip6_tnl_add_linklocal(idev);
-}
-
 static int addrconf_notify(struct notifier_block *this, unsigned long event,
                           void *ptr)
 {
@@ -2895,9 +2873,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        addrconf_gre_config(dev);
                        break;
 #endif
-               case ARPHRD_TUNNEL6:
-                       addrconf_ip6_tnl_config(dev);
-                       break;
                case ARPHRD_LOOPBACK:
                        init_loopback(dev);
                        break;
@@ -3632,8 +3607,8 @@ restart:
        if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
                next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
 
-       ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
-             now, next, next_sec, next_sched));
+       ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
+             now, next, next_sec, next_sched);
 
        addr_chk_timer.expires = next_sched;
        add_timer(&addr_chk_timer);
@@ -4179,6 +4154,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_RTR_SOLICIT_DELAY] =
                jiffies_to_msecs(cnf->rtr_solicit_delay);
        array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
+       array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
+               jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
+       array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
+               jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
 #ifdef CONFIG_IPV6_PRIVACY
        array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
        array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
@@ -4654,6 +4633,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                break;
        }
        atomic_inc(&net->ipv6.dev_addr_genid);
+       rt_genid_bump_ipv6(net);
 }
 
 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -4861,6 +4841,22 @@ static struct addrconf_sysctl_table
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
+               {
+                       .procname       = "mldv1_unsolicited_report_interval",
+                       .data           =
+                               &ipv6_devconf.mldv1_unsolicited_report_interval,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec_ms_jiffies,
+               },
+               {
+                       .procname       = "mldv2_unsolicited_report_interval",
+                       .data           =
+                               &ipv6_devconf.mldv2_unsolicited_report_interval,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec_ms_jiffies,
+               },
 #ifdef CONFIG_IPV6_PRIVACY
                {
                        .procname       = "use_tempaddr",
index a5ac969aeefe5337d7fa4c5601912fb96b1a5496..0d1a9b153fbb8c82d680015d382429472d42cfcb 100644 (file)
@@ -766,6 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
 
        net->ipv6.sysctl.bindv6only = 0;
        net->ipv6.sysctl.icmpv6_time = 1*HZ;
+       atomic_set(&net->ipv6.rt_genid, 0);
 
        err = ipv6_init_mibs(net);
        if (err)
index 197e6f4a2b7499c67440b66e04d646d631caefa5..48b6bd2a9a1451b7adf9a678ccce859003d57a79 100644 (file)
@@ -890,7 +890,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
        src   = &np->rcv_saddr;
        seq_printf(seq,
                   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
-                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+                  "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
                   bucket,
                   src->s6_addr32[0], src->s6_addr32[1],
                   src->s6_addr32[2], src->s6_addr32[3], srcp,
index 2e1a432867c0897f4e0d4439f9c6fd5d3f4c66cb..a6c58ce43d34aaa878c6ea239e0ff7c3616bd302 100644 (file)
@@ -55,26 +55,33 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
        struct fib6_table *table;
        struct net *net = rule->fr_net;
        pol_lookup_t lookup = arg->lookup_ptr;
+       int err = 0;
 
        switch (rule->action) {
        case FR_ACT_TO_TBL:
                break;
        case FR_ACT_UNREACHABLE:
+               err = -ENETUNREACH;
                rt = net->ipv6.ip6_null_entry;
                goto discard_pkt;
        default:
        case FR_ACT_BLACKHOLE:
+               err = -EINVAL;
                rt = net->ipv6.ip6_blk_hole_entry;
                goto discard_pkt;
        case FR_ACT_PROHIBIT:
+               err = -EACCES;
                rt = net->ipv6.ip6_prohibit_entry;
                goto discard_pkt;
        }
 
        table = fib6_get_table(net, rule->table);
-       if (table)
-               rt = lookup(net, table, flp6, flags);
+       if (!table) {
+               err = -EAGAIN;
+               goto out;
+       }
 
+       rt = lookup(net, table, flp6, flags);
        if (rt != net->ipv6.ip6_null_entry) {
                struct fib6_rule *r = (struct fib6_rule *)rule;
 
@@ -101,6 +108,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
        }
 again:
        ip6_rt_put(rt);
+       err = -EAGAIN;
        rt = NULL;
        goto out;
 
@@ -108,9 +116,31 @@ discard_pkt:
        dst_hold(&rt->dst);
 out:
        arg->result = rt;
-       return rt == NULL ? -EAGAIN : 0;
+       return err;
 }
 
+static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+{
+       struct rt6_info *rt = (struct rt6_info *) arg->result;
+       struct net_device *dev = rt->rt6i_idev->dev;
+       /* do not accept result if the route does
+        * not meet the required prefix length
+        */
+       if (rt->rt6i_dst.plen <= rule->suppress_prefixlen)
+               goto suppress_route;
+
+       /* do not accept result if the route uses a device
+        * belonging to a forbidden interface group
+        */
+       if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup)
+               goto suppress_route;
+
+       return false;
+
+suppress_route:
+               ip6_rt_put(rt);
+               return true;
+}
 
 static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
 {
@@ -244,6 +274,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
        .addr_size              = sizeof(struct in6_addr),
        .action                 = fib6_rule_action,
        .match                  = fib6_rule_match,
+       .suppress               = fib6_rule_suppress,
        .configure              = fib6_rule_configure,
        .compare                = fib6_rule_compare,
        .fill                   = fib6_rule_fill,
index c4ff5bbb45c44a1bda8eee4f5d7728338de88a47..73db48eba1c48faa046c584912f09cda8f6ae2fa 100644 (file)
@@ -425,8 +425,8 @@ out:
  *     node.
  */
 
-static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
-                                    int addrlen, int plen,
+static struct fib6_node *fib6_add_1(struct fib6_node *root,
+                                    struct in6_addr *addr, int plen,
                                     int offset, int allow_create,
                                     int replace_required)
 {
@@ -543,7 +543,7 @@ insert_above:
           but if it is >= plen, the value is ignored in any case.
         */
 
-       bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
+       bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
 
        /*
         *              (intermediate)[in]
@@ -822,9 +822,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
        if (!allow_create && !replace_required)
                pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
 
-       fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
-                       rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
-                       allow_create, replace_required);
+       fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
+                       offsetof(struct rt6_info, rt6i_dst), allow_create,
+                       replace_required);
 
        if (IS_ERR(fn)) {
                err = PTR_ERR(fn);
@@ -863,7 +863,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
                        /* Now add the first leaf node to new subtree */
 
                        sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
-                                       sizeof(struct in6_addr), rt->rt6i_src.plen,
+                                       rt->rt6i_src.plen,
                                        offsetof(struct rt6_info, rt6i_src),
                                        allow_create, replace_required);
 
@@ -882,7 +882,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
                        fn->subtree = sfn;
                } else {
                        sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
-                                       sizeof(struct in6_addr), rt->rt6i_src.plen,
+                                       rt->rt6i_src.plen,
                                        offsetof(struct rt6_info, rt6i_src),
                                        allow_create, replace_required);
 
index ecd60733e5e24afdb28a52c95686fec28e2e4d73..37be4ec78d3ccf804291d97db150e639d6e610d7 100644 (file)
@@ -335,6 +335,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
        dev->rtnl_link_ops = &ip6gre_link_ops;
 
        nt->dev = dev;
+       nt->net = dev_net(dev);
        ip6gre_tnl_link_config(nt, 1);
 
        if (register_netdevice(dev) < 0)
@@ -724,6 +725,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
                ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
        }
 
+       if (likely(!skb->encapsulation)) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+
        skb_push(skb, gre_hlen);
        skb_reset_network_header(skb);
        skb_set_transport_header(skb, sizeof(*ipv6h));
@@ -1255,6 +1261,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
        tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
+       tunnel->net = dev_net(dev);
        strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
@@ -1275,6 +1282,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
        struct ip6_tnl *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
+       tunnel->net = dev_net(dev);
        strcpy(tunnel->parms.name, dev->name);
 
        tunnel->hlen            = sizeof(struct ipv6hdr) + 4;
@@ -1450,6 +1458,7 @@ static int ip6gre_tap_init(struct net_device *dev)
        tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
+       tunnel->net = dev_net(dev);
        strcpy(tunnel->parms.name, dev->name);
 
        ip6gre_tnl_link_config(tunnel, 1);
@@ -1501,6 +1510,7 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
                eth_hw_addr_random(dev);
 
        nt->dev = dev;
+       nt->net = dev_net(dev);
        ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
 
        /* Can use a lockless transmit, unless we generate output sequences */
index 2bab2aa597450813ae4bd60d362998830a7e7e3b..302d6fb1ff2b43fb027a633a0ba3f87eea58d4e9 100644 (file)
@@ -44,7 +44,7 @@
 #include <net/ip6_route.h>
 #include <net/addrconf.h>
 #include <net/xfrm.h>
-
+#include <net/inet_ecn.h>
 
 
 int ip6_rcv_finish(struct sk_buff *skb)
@@ -109,6 +109,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        if (hdr->version != 6)
                goto err;
 
+       IP6_ADD_STATS_BH(dev_net(dev), idev,
+                        IPSTATS_MIB_NOECTPKTS +
+                               (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
+                        max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
        /*
         * RFC4291 2.5.3
         * A packet received on an interface with a destination address
index 1e55866cead7425eefdff36c1ddca1aab9504286..9cb5c61bf23bc4ae8ace1bd1d7a9ddf9ab5c4b75 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/slab.h>
 #include <linux/hash.h>
+#include <linux/etherdevice.h>
 
 #include <asm/uaccess.h>
 #include <linux/atomic.h>
@@ -315,6 +316,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
 
        t = netdev_priv(dev);
        t->parms = *p;
+       t->net = dev_net(dev);
        err = ip6_tnl_create2(dev);
        if (err < 0)
                goto failed_free;
@@ -374,7 +376,7 @@ static void
 ip6_tnl_dev_uninit(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       struct net *net = dev_net(dev);
+       struct net *net = t->net;
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
        if (dev == ip6n->fb_tnl_dev)
@@ -741,7 +743,7 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
 {
        struct __ip6_tnl_parm *p = &t->parms;
        int ret = 0;
-       struct net *net = dev_net(t->dev);
+       struct net *net = t->net;
 
        if ((p->flags & IP6_TNL_F_CAP_RCV) ||
            ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
@@ -827,6 +829,9 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
 
+               if (!net_eq(t->net, dev_net(t->dev)))
+                       skb_scrub_packet(skb);
+
                netif_rx(skb);
 
                rcu_read_unlock();
@@ -895,7 +900,7 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
 {
        struct __ip6_tnl_parm *p = &t->parms;
        int ret = 0;
-       struct net *net = dev_net(t->dev);
+       struct net *net = t->net;
 
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
                struct net_device *ldev = NULL;
@@ -945,8 +950,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
                         int encap_limit,
                         __u32 *pmtu)
 {
-       struct net *net = dev_net(dev);
        struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = t->net;
        struct net_device_stats *stats = &t->dev->stats;
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        struct ipv6_tel_txoption opt;
@@ -996,6 +1001,9 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
                goto tx_err_dst_release;
        }
 
+       if (!net_eq(t->net, dev_net(dev)))
+               skb_scrub_packet(skb);
+
        /*
         * Okay, now see if we can stuff it in the buffer as-is.
         */
@@ -1027,6 +1035,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
                init_tel_txopt(&opt, encap_limit);
                ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
        }
+
+       if (likely(!skb->encapsulation)) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
@@ -1202,7 +1216,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
                int strict = (ipv6_addr_type(&p->raddr) &
                              (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
 
-               struct rt6_info *rt = rt6_lookup(dev_net(dev),
+               struct rt6_info *rt = rt6_lookup(t->net,
                                                 &p->raddr, &p->laddr,
                                                 p->link, strict);
 
@@ -1251,7 +1265,7 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
 
 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
 {
-       struct net *net = dev_net(t->dev);
+       struct net *net = t->net;
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
        int err;
 
@@ -1463,8 +1477,10 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
                dev->mtu-=8;
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+       /* This perm addr will be used as interface identifier by IPv6 */
+       dev->addr_assign_type = NET_ADDR_RANDOM;
+       eth_random_addr(dev->perm_addr);
 }
 
 
@@ -1479,6 +1495,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
 
        t->dev = dev;
+       t->net = dev_net(dev);
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
@@ -1596,9 +1613,9 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
                              struct nlattr *data[])
 {
-       struct ip6_tnl *t;
+       struct ip6_tnl *t = netdev_priv(dev);
        struct __ip6_tnl_parm p;
-       struct net *net = dev_net(dev);
+       struct net *net = t->net;
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
        if (dev == ip6n->fb_tnl_dev)
@@ -1699,14 +1716,24 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
 
 static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
 {
+       struct net *net = dev_net(ip6n->fb_tnl_dev);
+       struct net_device *dev, *aux;
        int h;
        struct ip6_tnl *t;
        LIST_HEAD(list);
 
+       for_each_netdev_safe(net, dev, aux)
+               if (dev->rtnl_link_ops == &ip6_link_ops)
+                       unregister_netdevice_queue(dev, &list);
+
        for (h = 0; h < HASH_SIZE; h++) {
                t = rtnl_dereference(ip6n->tnls_r_l[h]);
                while (t != NULL) {
-                       unregister_netdevice_queue(t->dev, &list);
+                       /* If dev is in the same netns, it has already
+                        * been added to the list by the previous loop.
+                        */
+                       if (!net_eq(dev_net(t->dev), net))
+                               unregister_netdevice_queue(t->dev, &list);
                        t = rtnl_dereference(t->next);
                }
        }
@@ -1732,6 +1759,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
        dev_net_set(ip6n->fb_tnl_dev, net);
+       /* FB netdevice is special: we have one, and only one per netns.
+        * Allowing to move it to another netns is clearly unsafe.
+        */
+       ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
 
        err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
        if (err < 0)
index 03986d31fa417a357b72e529e28fea0de0294a9e..a60a84ef04f7f880b23479e9f8fa047b3905af75 100644 (file)
@@ -110,8 +110,8 @@ static struct kmem_cache *mrt_cachep __read_mostly;
 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
 static void ip6mr_free_table(struct mr6_table *mrt);
 
-static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
-                         struct sk_buff *skb, struct mfc6_cache *cache);
+static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+                          struct sk_buff *skb, struct mfc6_cache *cache);
 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
                              mifi_t mifi, int assert);
 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
@@ -2074,8 +2074,8 @@ static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
        return ct;
 }
 
-static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
-                         struct sk_buff *skb, struct mfc6_cache *cache)
+static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+                          struct sk_buff *skb, struct mfc6_cache *cache)
 {
        int psend = -1;
        int vif, ct;
@@ -2156,12 +2156,11 @@ forward:
 last_forward:
        if (psend != -1) {
                ip6mr_forward2(net, mrt, skb, cache, psend);
-               return 0;
+               return;
        }
 
 dont_forward:
        kfree_skb(skb);
-       return 0;
 }
 
 
index 99cd65c715cdd4a41a31f6ddc1fa28d327651634..98ead2b1a669272be8c60676505e831dcda87afb 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/pkt_sched.h>
 #include <net/mld.h>
 
 #include <linux/netfilter.h>
@@ -106,10 +107,12 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
                            struct inet6_dev *idev);
 
-
-#define IGMP6_UNSOLICITED_IVAL (10*HZ)
 #define MLD_QRV_DEFAULT                2
 
+/* RFC3810, 8.1 Query Version Distinctions */
+#define MLD_V1_QUERY_LEN       24
+#define MLD_V2_QUERY_LEN_MIN   28
+
 #define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \
                (idev)->cnf.force_mld_version == 1 || \
                ((idev)->mc_v1_seen && \
@@ -128,6 +131,18 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
             pmc != NULL;                                       \
             pmc = rcu_dereference(pmc->next))
 
+static int unsolicited_report_interval(struct inet6_dev *idev)
+{
+       int iv;
+
+       if (MLD_V1_SEEN(idev))
+               iv = idev->cnf.mldv1_unsolicited_report_interval;
+       else
+               iv = idev->cnf.mldv2_unsolicited_report_interval;
+
+       return iv > 0 ? iv : 1;
+}
+
 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 {
        struct net_device *dev = NULL;
@@ -984,24 +999,24 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
 
 static void mld_gq_start_timer(struct inet6_dev *idev)
 {
-       int tv = net_random() % idev->mc_maxdelay;
+       unsigned long tv = net_random() % idev->mc_maxdelay;
 
        idev->mc_gq_running = 1;
        if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
                in6_dev_hold(idev);
 }
 
-static void mld_ifc_start_timer(struct inet6_dev *idev, int delay)
+static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
 {
-       int tv = net_random() % delay;
+       unsigned long tv = net_random() % delay;
 
        if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
                in6_dev_hold(idev);
 }
 
-static void mld_dad_start_timer(struct inet6_dev *idev, int delay)
+static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
 {
-       int tv = net_random() % delay;
+       unsigned long tv = net_random() % delay;
 
        if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
                in6_dev_hold(idev);
@@ -1134,13 +1149,11 @@ int igmp6_event_query(struct sk_buff *skb)
            !(group_type&IPV6_ADDR_MULTICAST))
                return -EINVAL;
 
-       if (len == 24) {
+       if (len == MLD_V1_QUERY_LEN) {
                int switchback;
                /* MLDv1 router present */
 
-               /* Translate milliseconds to jiffies */
-               max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
-
+               max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
                switchback = (idev->mc_qrv + 1) * max_delay;
                idev->mc_v1_seen = jiffies + switchback;
 
@@ -1150,17 +1163,18 @@ int igmp6_event_query(struct sk_buff *skb)
                        __in6_dev_put(idev);
                /* clear deleted report items */
                mld_clear_delrec(idev);
-       } else if (len >= 28) {
+       } else if (len >= MLD_V2_QUERY_LEN_MIN) {
                int srcs_offset = sizeof(struct mld2_query) -
                                  sizeof(struct icmp6hdr);
                if (!pskb_may_pull(skb, srcs_offset))
                        return -EINVAL;
 
                mlh2 = (struct mld2_query *)skb_transport_header(skb);
-               max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
-               if (!max_delay)
-                       max_delay = 1;
+
+               max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mlh2->mld2q_mrc))), 1UL);
+
                idev->mc_maxdelay = max_delay;
+
                if (mlh2->mld2q_qrv)
                        idev->mc_qrv = mlh2->mld2q_qrv;
                if (group_type == IPV6_ADDR_ANY) { /* general query */
@@ -1376,6 +1390,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
        if (!skb)
                return NULL;
 
+       skb->priority = TC_PRIO_CONTROL;
        skb_reserve(skb, hlen);
 
        if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1769,7 +1784,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
                rcu_read_unlock();
                return;
        }
-
+       skb->priority = TC_PRIO_CONTROL;
        skb_reserve(skb, hlen);
 
        if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -2156,7 +2171,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
 
        igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
 
-       delay = net_random() % IGMP6_UNSOLICITED_IVAL;
+       delay = net_random() % unsolicited_report_interval(ma->idev);
 
        spin_lock_bh(&ma->mca_lock);
        if (del_timer(&ma->mca_timer)) {
@@ -2323,7 +2338,7 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
        setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
                    (unsigned long)idev);
        idev->mc_qrv = MLD_QRV_DEFAULT;
-       idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
+       idev->mc_maxdelay = unsolicited_report_interval(idev);
        idev->mc_v1_seen = 0;
        write_unlock_bh(&idev->lock);
 }
index 47bff610751922ebd80b21eb59387a334af9b3fb..3e4e92d5e157358520fc5dc1cb04c3c18175df9f 100644 (file)
@@ -76,7 +76,7 @@ static int masq_device_event(struct notifier_block *this,
 
        if (event == NETDEV_DOWN)
                nf_ct_iterate_cleanup(net, device_cmp,
-                                     (void *)(long)dev->ifindex);
+                                     (void *)(long)dev->ifindex, 0, 0);
 
        return NOTIFY_DONE;
 }
index 51c3285b5d9b582cb1d6101d3fafc70c781f5a74..091d066a57b3711c5bd0eb5a352573d0aa2f3776 100644 (file)
@@ -91,6 +91,10 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
        SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
        SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
        /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
+       SNMP_MIB_ITEM("Ip6InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+       SNMP_MIB_ITEM("Ip6InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+       SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+       SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS),
        SNMP_MIB_SENTINEL
 };
 
index c45f7a5c36e96f98487ca194c5f08e5d0f931852..c1e53349820348b859f2f973454b40786635838f 100644 (file)
@@ -63,6 +63,8 @@
 #include <linux/seq_file.h>
 #include <linux/export.h>
 
+#define        ICMPV6_HDRLEN   4       /* ICMPv6 header, RFC 4443 Section 2.1 */
+
 static struct raw_hashinfo raw_v6_hashinfo = {
        .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
 };
@@ -108,11 +110,14 @@ found:
  */
 static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
 {
-       struct icmp6hdr *_hdr;
+       struct icmp6hdr _hdr;
        const struct icmp6hdr *hdr;
 
+       /* We require only the four bytes of the ICMPv6 header, not any
+        * additional bytes of message body in "struct icmp6hdr".
+        */
        hdr = skb_header_pointer(skb, skb_transport_offset(skb),
-                                sizeof(_hdr), &_hdr);
+                                ICMPV6_HDRLEN, &_hdr);
        if (hdr) {
                const __u32 *data = &raw6_sk(sk)->filter.data[0];
                unsigned int type = hdr->icmp6_type;
index 790d9f4b8b0b21c1d4dd4577ee6a472bd96fd729..1aeb473b2cc695d8d2b0a3696972ec9228455d14 100644 (file)
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
        ipv6_hdr(head)->payload_len = htons(payload_len);
        ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
        IP6CB(head)->nhoff = nhoff;
+       IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
 
        /* Yes, and fold redundant checksum back. 8) */
        if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        struct net *net = dev_net(skb_dst(skb)->dev);
        int evicted;
 
+       if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+               goto fail_hdr;
+
        IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
 
        /* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                                 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
 
                IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+               IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
                return 1;
        }
 
index b70f8979003b5e23c4b1e78e44713ba3d3520de9..e22c4db8d07aded12104ed64bafd52e95be2b963 100644 (file)
@@ -283,9 +283,8 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
 
                memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
                rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
-               rt->rt6i_genid = rt_genid(net);
+               rt->rt6i_genid = rt_genid_ipv6(net);
                INIT_LIST_HEAD(&rt->rt6i_siblings);
-               rt->rt6i_nsiblings = 0;
        }
        return rt;
 }
@@ -1062,7 +1061,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
         * DST_OBSOLETE_FORCE_CHK which forces validation calls down
         * into this function always.
         */
-       if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
+       if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
                return NULL;
 
        if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
index a3437a4cd07ecfdd28cbe47e0acd00e379ba9b19..c76a1185b563d2f25d363f25e8d87ca077dd63c7 100644 (file)
@@ -621,7 +621,7 @@ static int ipip6_rcv(struct sk_buff *skb)
                tstats->rx_packets++;
                tstats->rx_bytes += skb->len;
 
-               if (tunnel->net != dev_net(tunnel->dev))
+               if (!net_eq(tunnel->net, dev_net(tunnel->dev)))
                        skb_scrub_packet(skb);
                netif_rx(skb);
 
@@ -860,7 +860,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                        tunnel->err_count = 0;
        }
 
-       if (tunnel->net != dev_net(dev))
+       if (!net_eq(tunnel->net, dev_net(dev)))
                skb_scrub_packet(skb);
 
        /*
@@ -888,6 +888,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                ttl = iph6->hop_limit;
        tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 
+       if (likely(!skb->encapsulation)) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+
        err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
                            IPPROTO_IPV6, tos, ttl, df);
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
@@ -1589,7 +1594,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
                                /* If dev is in the same netns, it has already
                                 * been added to the list by the previous loop.
                                 */
-                               if (dev_net(t->dev) != net)
+                               if (!net_eq(dev_net(t->dev), net))
                                        unregister_netdevice_queue(t->dev,
                                                                   head);
                                t = rtnl_dereference(t->next);
index 6e1649d58533fd9a1b0a3f9f4373348a625bfb60..5bcfadf09e9533787d82957830a26de46cf66097 100644 (file)
@@ -963,7 +963,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!ipv6_unicast_destination(skb))
                goto drop;
 
-       if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+       if ((sysctl_tcp_syncookies == 2 ||
+            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
                want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
                if (!want_cookie)
                        goto drop;
@@ -1237,8 +1238,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
 
        tcp_initialize_rcv_mss(newsk);
-       tcp_synack_rtt_meas(newsk, req);
-       newtp->total_retrans = req->num_retrans;
 
        newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
        newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
@@ -1732,7 +1731,7 @@ static void get_openreq6(struct seq_file *seq,
 
        seq_printf(seq,
                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
-                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
+                  "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
                   i,
                   src->s6_addr32[0], src->s6_addr32[1],
                   src->s6_addr32[2], src->s6_addr32[3],
@@ -1783,7 +1782,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 
        seq_printf(seq,
                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
-                  "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
+                  "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
                   i,
                   src->s6_addr32[0], src->s6_addr32[1],
                   src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -1926,6 +1925,7 @@ struct proto tcpv6_prot = {
        .unhash                 = inet_unhash,
        .get_port               = inet_csk_get_port,
        .enter_memory_pressure  = tcp_enter_memory_pressure,
+       .stream_memory_free     = tcp_stream_memory_free,
        .sockets_allocated      = &tcp_sockets_allocated,
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
index 8755a3079d0f3279255e54520fbfcdeb19a2006b..e092e306882dd88116a8734ab4e8d5236529aaa2 100644 (file)
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
        struct sock *sk = skb->sk;
 
        if (sk) {
-               proto = sk->sk_protocol;
+               if (sk->sk_family != AF_INET6)
+                       return 0;
 
+               proto = sk->sk_protocol;
                if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
                        return inet6_sk(sk)->dontfrag;
        }
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
        ipv6_local_rxpmtu(sk, &fl6, mtu);
 }
 
-static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
 {
        struct flowi6 fl6;
+       const struct ipv6hdr *hdr;
        struct sock *sk = skb->sk;
 
+       hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
        fl6.fl6_dport = inet_sk(sk)->inet_dport;
-       fl6.daddr = ipv6_hdr(skb)->daddr;
+       fl6.daddr = hdr->daddr;
 
        ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
 }
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
                if (xfrm6_local_dontfrag(skb))
                        xfrm6_local_rxpmtu(skb, mtu);
                else if (skb->sk)
-                       xfrm6_local_error(skb, mtu);
+                       xfrm_local_error(skb, mtu);
                else
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                ret = -EMSGSIZE;
@@ -136,13 +140,16 @@ static int __xfrm6_output(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_state *x = dst->xfrm;
-       int mtu = ip6_skb_dst_mtu(skb);
+       int mtu = xfrm_skb_dst_mtu(skb);
+
+       if (mtu < IPV6_MIN_MTU)
+               mtu = IPV6_MIN_MTU;
 
        if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
                return -EMSGSIZE;
        } else if (!skb->local_df && skb->len > mtu && skb->sk) {
-               xfrm6_local_error(skb, mtu);
+               xfrm_local_error(skb, mtu);
                return -EMSGSIZE;
        }
 
index d8c70b8efc24231358ab50a32f354b69064f7860..3fc970135fc66583e5842943246993da7e2b69b4 100644 (file)
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
        .extract_input          = xfrm6_extract_input,
        .extract_output         = xfrm6_extract_output,
        .transport_finish       = xfrm6_transport_finish,
+       .local_error            = xfrm6_local_error,
 };
 
 int __init xfrm6_state_init(void)
index 65e8833a251097885ca2c84fbb7bd761b1510a69..e15c16a517e72c162d3442bfa11fccfe3ef371b5 100644 (file)
@@ -213,7 +213,7 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
                           ntohs(ipxs->dest_addr.sock));
        }
 
-       seq_printf(seq, "%08X  %08X  %02X     %03d\n",
+       seq_printf(seq, "%08X  %08X  %02X     %03u\n",
                   sk_wmem_alloc_get(s),
                   sk_rmem_alloc_get(s),
                   s->sk_state,
index ae43c62f9045ba94ced0f025a9ecacb3bf145af1..85372cfa7b9f82b690579de4533ac27cf4ad09f6 100644 (file)
@@ -75,7 +75,7 @@ static pi_minor_info_t pi_minor_call_table[] = {
        { NULL, 0 },                                             /* 0x00 */
        { irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
 };
-static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
+static pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table, 2 } };
 static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
 
 /************************ GLOBAL PROCEDURES ************************/
@@ -205,7 +205,7 @@ static void irttp_todo_expired(unsigned long data)
  */
 static void irttp_flush_queues(struct tsap_cb *self)
 {
-       struct sk_buffskb;
+       struct sk_buff *skb;
 
        IRDA_DEBUG(4, "%s()\n", __func__);
 
@@ -400,7 +400,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
        /* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
         * use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
         * JeanII */
-       if((stsap_sel != LSAP_ANY) &&
+       if ((stsap_sel != LSAP_ANY) &&
           ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
                IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
                return NULL;
@@ -427,7 +427,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
        ttp_notify.data_indication = irttp_data_indication;
        ttp_notify.udata_indication = irttp_udata_indication;
        ttp_notify.flow_indication = irttp_flow_indication;
-       if(notify->status_indication != NULL)
+       if (notify->status_indication != NULL)
                ttp_notify.status_indication = irttp_status_indication;
        ttp_notify.instance = self;
        strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
@@ -639,8 +639,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
         */
        if ((self->tx_max_sdu_size != 0) &&
            (self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
-           (skb->len > self->tx_max_sdu_size))
-       {
+           (skb->len > self->tx_max_sdu_size)) {
                IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
                           __func__);
                ret = -EMSGSIZE;
@@ -733,8 +732,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
         * poll us through irttp_flow_indication() - Jean II */
        while ((self->send_credit > 0) &&
               (!irlmp_lap_tx_queue_full(self->lsap)) &&
-              (skb = skb_dequeue(&self->tx_queue)))
-       {
+              (skb = skb_dequeue(&self->tx_queue))) {
                /*
                 *  Since we can transmit and receive frames concurrently,
                 *  the code below is a critical region and we must assure that
@@ -798,8 +796,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
         * where we can spend a bit of time doing stuff. - Jean II */
        if ((self->tx_sdu_busy) &&
            (skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
-           (!self->close_pend))
-       {
+           (!self->close_pend)) {
                if (self->notify.flow_indication)
                        self->notify.flow_indication(self->notify.instance,
                                                     self, FLOW_START);
@@ -892,7 +889,7 @@ static int irttp_udata_indication(void *instance, void *sap,
        /* Just pass data to layer above */
        if (self->notify.udata_indication) {
                err = self->notify.udata_indication(self->notify.instance,
-                                                   self,skb);
+                                                   self, skb);
                /* Same comment as in irttp_do_data_indication() */
                if (!err)
                        return 0;
@@ -1057,7 +1054,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
         * to do that. Jean II */
 
        /* If we need to send disconnect. try to do it now */
-       if(self->disconnect_pend)
+       if (self->disconnect_pend)
                irttp_start_todo_timer(self, 0);
 }
 
@@ -1116,7 +1113,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
        IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
 
        if (self->connected) {
-               if(userdata)
+               if (userdata)
                        dev_kfree_skb(userdata);
                return -EISCONN;
        }
@@ -1137,7 +1134,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
                 *  headers
                 */
                IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
-                       { dev_kfree_skb(userdata); return -1; } );
+                       { dev_kfree_skb(userdata); return -1; });
        }
 
        /* Initialize connection parameters */
@@ -1157,7 +1154,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
         *  Give away max 127 credits for now
         */
        if (n > 127) {
-               self->avail_credit=n-127;
+               self->avail_credit = n - 127;
                n = 127;
        }
 
@@ -1166,10 +1163,10 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
        /* SAR enabled? */
        if (max_sdu_size > 0) {
                IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
-                       { dev_kfree_skb(tx_skb); return -1; } );
+                       { dev_kfree_skb(tx_skb); return -1; });
 
                /* Insert SAR parameters */
-               frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
+               frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
 
                frame[0] = TTP_PARAMETERS | n;
                frame[1] = 0x04; /* Length */
@@ -1386,7 +1383,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
                 *  headers
                 */
                IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
-                       { dev_kfree_skb(userdata); return -1; } );
+                       { dev_kfree_skb(userdata); return -1; });
        }
 
        self->avail_credit = 0;
@@ -1409,10 +1406,10 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
        /* SAR enabled? */
        if (max_sdu_size > 0) {
                IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
-                       { dev_kfree_skb(tx_skb); return -1; } );
+                       { dev_kfree_skb(tx_skb); return -1; });
 
                /* Insert TTP header with SAR parameters */
-               frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
+               frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
 
                frame[0] = TTP_PARAMETERS | n;
                frame[1] = 0x04; /* Length */
@@ -1522,7 +1519,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
         * function may be called from various context, like user, timer
         * for following a disconnect_indication() (i.e. net_bh).
         * Jean II */
-       if(test_and_set_bit(0, &self->disconnect_pend)) {
+       if (test_and_set_bit(0, &self->disconnect_pend)) {
                IRDA_DEBUG(0, "%s(), disconnect already pending\n",
                           __func__);
                if (userdata)
@@ -1627,7 +1624,7 @@ static void irttp_disconnect_indication(void *instance, void *sap,
         * Jean II */
 
        /* No need to notify the client if has already tried to disconnect */
-       if(self->notify.disconnect_indication)
+       if (self->notify.disconnect_indication)
                self->notify.disconnect_indication(self->notify.instance, self,
                                                   reason, skb);
        else
@@ -1738,8 +1735,7 @@ static void irttp_run_rx_queue(struct tsap_cb *self)
                 *  This is the last fragment, so time to reassemble!
                 */
                if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
-                   (self->rx_max_sdu_size == TTP_SAR_UNBOUND))
-               {
+                   (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) {
                        /*
                         * A little optimizing. Only queue the fragment if
                         * there are other fragments. Since if this is the
@@ -1860,7 +1856,7 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq, "dtsap_sel: %02x\n",
                   self->dtsap_sel);
        seq_printf(seq, "  connected: %s, ",
-                  self->connected? "TRUE":"FALSE");
+                  self->connected ? "TRUE" : "FALSE");
        seq_printf(seq, "avail credit: %d, ",
                   self->avail_credit);
        seq_printf(seq, "remote credit: %d, ",
@@ -1876,9 +1872,9 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq, "rx_queue len: %u\n",
                   skb_queue_len(&self->rx_queue));
        seq_printf(seq, "  tx_sdu_busy: %s, ",
-                  self->tx_sdu_busy? "TRUE":"FALSE");
+                  self->tx_sdu_busy ? "TRUE" : "FALSE");
        seq_printf(seq, "rx_sdu_busy: %s\n",
-                  self->rx_sdu_busy? "TRUE":"FALSE");
+                  self->rx_sdu_busy ? "TRUE" : "FALSE");
        seq_printf(seq, "  max_seg_size: %u, ",
                   self->max_seg_size);
        seq_printf(seq, "tx_max_sdu_size: %u, ",
index ab8bd2cabfa090a4fa50524e7a76064891e535d6..9d585370c5b4d6a1e60728df4dad6c79ca196ee3 100644 (file)
@@ -45,7 +45,7 @@ struct netns_pfkey {
 static DEFINE_MUTEX(pfkey_mutex);
 
 #define DUMMY_MARK 0
-static struct xfrm_mark dummy_mark = {0, 0};
+static const struct xfrm_mark dummy_mark = {0, 0};
 struct pfkey_sock {
        /* struct sock must be the first member of struct pfkey_sock */
        struct sock     sk;
@@ -338,7 +338,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
        return 0;
 }
 
-static u8 sadb_ext_min_len[] = {
+static const u8 sadb_ext_min_len[] = {
        [SADB_EXT_RESERVED]             = (u8) 0,
        [SADB_EXT_SA]                   = (u8) sizeof(struct sadb_sa),
        [SADB_EXT_LIFETIME_CURRENT]     = (u8) sizeof(struct sadb_lifetime),
@@ -1196,10 +1196,6 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 
        x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
                                                    &x->props.saddr);
-       if (!x->props.family) {
-               err = -EAFNOSUPPORT;
-               goto out;
-       }
        pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1],
                                  &x->id.daddr);
 
@@ -2205,10 +2201,6 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
 
        sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
        xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
-       if (!xp->family) {
-               err = -EINVAL;
-               goto out;
-       }
        xp->selector.family = xp->family;
        xp->selector.prefixlen_s = sa->sadb_address_prefixlen;
        xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2737,7 +2729,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
 
 typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
                             const struct sadb_msg *hdr, void * const *ext_hdrs);
-static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
+static const pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
        [SADB_RESERVED]         = pfkey_reserved,
        [SADB_GETSPI]           = pfkey_getspi,
        [SADB_UPDATE]           = pfkey_add,
index 7b4799cfbf8dbf456fc8f356b4fe01d02e1f396f..1a3c7e0f5d0de3c1d35759e1b2ae1bfa7849af84 100644 (file)
@@ -147,7 +147,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
        }
        seq_printf(seq, "@%02X ", llc->sap->laddr.lsap);
        llc_ui_format_mac(seq, llc->daddr.mac);
-       seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap,
+       seq_printf(seq, "@%02X %8d %8d %2d %3u %4d\n", llc->daddr.lsap,
                   sk_wmem_alloc_get(sk),
                   sk_rmem_alloc_get(sk) - llc->copied_seq,
                   sk->sk_state,
index 43dd7525bfcba54c02dbd9dde2bd89cfc2d29c97..2e7855a1b10d17198adcfec98d550838daef80bd 100644 (file)
@@ -395,9 +395,13 @@ void sta_set_rate_info_tx(struct sta_info *sta,
                rinfo->nss = ieee80211_rate_get_vht_nss(rate);
        } else {
                struct ieee80211_supported_band *sband;
+               int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+               u16 brate;
+
                sband = sta->local->hw.wiphy->bands[
                                ieee80211_get_sdata_band(sta->sdata)];
-               rinfo->legacy = sband->bitrates[rate->idx].bitrate;
+               brate = sband->bitrates[rate->idx].bitrate;
+               rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
        }
        if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
                rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
@@ -422,11 +426,13 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
                rinfo->mcs = sta->last_rx_rate_idx;
        } else {
                struct ieee80211_supported_band *sband;
+               int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+               u16 brate;
 
                sband = sta->local->hw.wiphy->bands[
                                ieee80211_get_sdata_band(sta->sdata)];
-               rinfo->legacy =
-                       sband->bitrates[sta->last_rx_rate_idx].bitrate;
+               brate = sband->bitrates[sta->last_rx_rate_idx].bitrate;
+               rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
        }
 
        if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
@@ -856,8 +862,8 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
        return 0;
 }
 
-static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
-                                  struct cfg80211_beacon_data *params)
+int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+                           struct cfg80211_beacon_data *params)
 {
        struct beacon_data *new, *old;
        int new_head_len, new_tail_len;
@@ -1020,6 +1026,12 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
 
        sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
+       /* don't allow changing the beacon while CSA is in place - offset
+        * of channel switch counter may change
+        */
+       if (sdata->vif.csa_active)
+               return -EBUSY;
+
        old = rtnl_dereference(sdata->u.ap.beacon);
        if (!old)
                return -ENOENT;
@@ -1044,6 +1056,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
                return -ENOENT;
        old_probe_resp = rtnl_dereference(sdata->u.ap.probe_resp);
 
+       /* abort any running channel switch */
+       sdata->vif.csa_active = false;
+       cancel_work_sync(&sdata->csa_finalize_work);
+
        /* turn off carrier for this interface and dependent VLANs */
        list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
                netif_carrier_off(vlan->dev);
@@ -1192,8 +1208,6 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                                struct station_parameters *params)
 {
        int ret = 0;
-       u32 rates;
-       int i, j;
        struct ieee80211_supported_band *sband;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
@@ -1286,16 +1300,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                sta->listen_interval = params->listen_interval;
 
        if (params->supported_rates) {
-               rates = 0;
-
-               for (i = 0; i < params->supported_rates_len; i++) {
-                       int rate = (params->supported_rates[i] & 0x7f) * 5;
-                       for (j = 0; j < sband->n_bitrates; j++) {
-                               if (sband->bitrates[j].bitrate == rate)
-                                       rates |= BIT(j);
-                       }
-               }
-               sta->sta.supp_rates[band] = rates;
+               ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+                                        sband, params->supported_rates,
+                                        params->supported_rates_len,
+                                        &sta->sta.supp_rates[band]);
        }
 
        if (params->ht_capa)
@@ -1958,18 +1966,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
        }
 
        if (params->basic_rates) {
-               int i, j;
-               u32 rates = 0;
-               struct ieee80211_supported_band *sband = wiphy->bands[band];
-
-               for (i = 0; i < params->basic_rates_len; i++) {
-                       int rate = (params->basic_rates[i] & 0x7f) * 5;
-                       for (j = 0; j < sband->n_bitrates; j++) {
-                               if (sband->bitrates[j].bitrate == rate)
-                                       rates |= BIT(j);
-                       }
-               }
-               sdata->vif.bss_conf.basic_rates = rates;
+               ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+                                        wiphy->bands[band],
+                                        params->basic_rates,
+                                        params->basic_rates_len,
+                                        &sdata->vif.bss_conf.basic_rates);
                changed |= BSS_CHANGED_BASIC_RATES;
        }
 
@@ -2301,14 +2302,25 @@ static void ieee80211_rfkill_poll(struct wiphy *wiphy)
 }
 
 #ifdef CONFIG_NL80211_TESTMODE
-static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+static int ieee80211_testmode_cmd(struct wiphy *wiphy,
+                                 struct wireless_dev *wdev,
+                                 void *data, int len)
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
+       struct ieee80211_vif *vif = NULL;
 
        if (!local->ops->testmode_cmd)
                return -EOPNOTSUPP;
 
-       return local->ops->testmode_cmd(&local->hw, data, len);
+       if (wdev) {
+               struct ieee80211_sub_if_data *sdata;
+
+               sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+               if (sdata->flags & IEEE80211_SDATA_IN_DRIVER)
+                       vif = &sdata->vif;
+       }
+
+       return local->ops->testmode_cmd(&local->hw, vif, data, len);
 }
 
 static int ieee80211_testmode_dump(struct wiphy *wiphy,
@@ -2786,6 +2798,178 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
        return 0;
 }
 
+static struct cfg80211_beacon_data *
+cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
+{
+       struct cfg80211_beacon_data *new_beacon;
+       u8 *pos;
+       int len;
+
+       len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
+             beacon->proberesp_ies_len + beacon->assocresp_ies_len +
+             beacon->probe_resp_len;
+
+       new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
+       if (!new_beacon)
+               return NULL;
+
+       pos = (u8 *)(new_beacon + 1);
+       if (beacon->head_len) {
+               new_beacon->head_len = beacon->head_len;
+               new_beacon->head = pos;
+               memcpy(pos, beacon->head, beacon->head_len);
+               pos += beacon->head_len;
+       }
+       if (beacon->tail_len) {
+               new_beacon->tail_len = beacon->tail_len;
+               new_beacon->tail = pos;
+               memcpy(pos, beacon->tail, beacon->tail_len);
+               pos += beacon->tail_len;
+       }
+       if (beacon->beacon_ies_len) {
+               new_beacon->beacon_ies_len = beacon->beacon_ies_len;
+               new_beacon->beacon_ies = pos;
+               memcpy(pos, beacon->beacon_ies, beacon->beacon_ies_len);
+               pos += beacon->beacon_ies_len;
+       }
+       if (beacon->proberesp_ies_len) {
+               new_beacon->proberesp_ies_len = beacon->proberesp_ies_len;
+               new_beacon->proberesp_ies = pos;
+               memcpy(pos, beacon->proberesp_ies, beacon->proberesp_ies_len);
+               pos += beacon->proberesp_ies_len;
+       }
+       if (beacon->assocresp_ies_len) {
+               new_beacon->assocresp_ies_len = beacon->assocresp_ies_len;
+               new_beacon->assocresp_ies = pos;
+               memcpy(pos, beacon->assocresp_ies, beacon->assocresp_ies_len);
+               pos += beacon->assocresp_ies_len;
+       }
+       if (beacon->probe_resp_len) {
+               new_beacon->probe_resp_len = beacon->probe_resp_len;
+               beacon->probe_resp = pos;
+               memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
+               pos += beacon->probe_resp_len;
+       }
+
+       return new_beacon;
+}
+
+void ieee80211_csa_finalize_work(struct work_struct *work)
+{
+       struct ieee80211_sub_if_data *sdata =
+               container_of(work, struct ieee80211_sub_if_data,
+                            csa_finalize_work);
+       struct ieee80211_local *local = sdata->local;
+       int err, changed;
+
+       if (!ieee80211_sdata_running(sdata))
+               return;
+
+       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
+               return;
+
+       sdata->radar_required = sdata->csa_radar_required;
+       err = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
+                                          &changed);
+       if (WARN_ON(err < 0))
+               return;
+
+       err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
+       if (err < 0)
+               return;
+
+       changed |= err;
+       kfree(sdata->u.ap.next_beacon);
+       sdata->u.ap.next_beacon = NULL;
+       sdata->vif.csa_active = false;
+
+       ieee80211_wake_queues_by_reason(&sdata->local->hw,
+                                       IEEE80211_MAX_QUEUE_MAP,
+                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+
+       ieee80211_bss_info_change_notify(sdata, changed);
+
+       cfg80211_ch_switch_notify(sdata->dev, &local->csa_chandef);
+}
+
+static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+                                   struct cfg80211_csa_settings *params)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_chanctx *chanctx;
+       int err, num_chanctx;
+
+       if (!list_empty(&local->roc_list) || local->scanning)
+               return -EBUSY;
+
+       if (sdata->wdev.cac_started)
+               return -EBUSY;
+
+       if (cfg80211_chandef_identical(&params->chandef,
+                                      &sdata->vif.bss_conf.chandef))
+               return -EINVAL;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+       if (!chanctx_conf) {
+               rcu_read_unlock();
+               return -EBUSY;
+       }
+
+       /* don't handle for multi-VIF cases */
+       chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+       if (chanctx->refcount > 1) {
+               rcu_read_unlock();
+               return -EBUSY;
+       }
+       num_chanctx = 0;
+       list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
+               num_chanctx++;
+       rcu_read_unlock();
+
+       if (num_chanctx > 1)
+               return -EBUSY;
+
+       /* don't allow another channel switch if one is already active. */
+       if (sdata->vif.csa_active)
+               return -EBUSY;
+
+       /* only handle AP for now. */
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       sdata->u.ap.next_beacon = cfg80211_beacon_dup(&params->beacon_after);
+       if (!sdata->u.ap.next_beacon)
+               return -ENOMEM;
+
+       sdata->csa_counter_offset_beacon = params->counter_offset_beacon;
+       sdata->csa_counter_offset_presp = params->counter_offset_presp;
+       sdata->csa_radar_required = params->radar_required;
+
+       if (params->block_tx)
+               ieee80211_stop_queues_by_reason(&local->hw,
+                               IEEE80211_MAX_QUEUE_MAP,
+                               IEEE80211_QUEUE_STOP_REASON_CSA);
+
+       err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
+       if (err < 0)
+               return err;
+
+       local->csa_chandef = params->chandef;
+       sdata->vif.csa_active = true;
+
+       ieee80211_bss_info_change_notify(sdata, err);
+       drv_channel_switch_beacon(sdata, &params->chandef);
+
+       return 0;
+}
+
 static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                             struct ieee80211_channel *chan, bool offchan,
                             unsigned int wait, const u8 *buf, size_t len,
@@ -3503,4 +3687,5 @@ struct cfg80211_ops mac80211_config_ops = {
        .get_et_strings = ieee80211_get_et_strings,
        .get_channel = ieee80211_cfg_get_channel,
        .start_radar_detection = ieee80211_start_radar_detection,
+       .channel_switch = ieee80211_channel_switch,
 };
index 03e8d2e3270e23f0e97a58595513b33fcb048cb3..3a4764b2869efffdbcc3f90a363cf3f8b095496c 100644 (file)
@@ -410,6 +410,64 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        return ret;
 }
 
+int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+                                const struct cfg80211_chan_def *chandef,
+                                u32 *changed)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+       struct ieee80211_chanctx *ctx;
+       int ret;
+       u32 chanctx_changed = 0;
+
+       /* should never be called if not performing a channel switch. */
+       if (WARN_ON(!sdata->vif.csa_active))
+               return -EINVAL;
+
+       if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+                                    IEEE80211_CHAN_DISABLED))
+               return -EINVAL;
+
+       mutex_lock(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ctx = container_of(conf, struct ieee80211_chanctx, conf);
+       if (ctx->refcount != 1) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (sdata->vif.bss_conf.chandef.width != chandef->width) {
+               chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
+               *changed |= BSS_CHANGED_BANDWIDTH;
+       }
+
+       sdata->vif.bss_conf.chandef = *chandef;
+       ctx->conf.def = *chandef;
+
+       chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
+       drv_change_chanctx(local, ctx, chanctx_changed);
+
+       if (!local->use_chanctx) {
+               local->_oper_chandef = *chandef;
+               ieee80211_hw_config(local, 0);
+       }
+
+       ieee80211_recalc_chanctx_chantype(local, ctx);
+       ieee80211_recalc_smps_chanctx(local, ctx);
+       ieee80211_recalc_radar_chanctx(local, ctx);
+
+       ret = 0;
+ out:
+       mutex_unlock(&local->chanctx_mtx);
+       return ret;
+}
+
 int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
                                   const struct cfg80211_chan_def *chandef,
                                   u32 *changed)
index 44e201d60a13991f0144954ac1dd1ebd00cb4672..19c54a44ed4793823713b717a86111d4dda0d57d 100644 (file)
@@ -455,6 +455,15 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
        DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
        DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
 
+       if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
+               debugfs_create_x32("driver_buffered_tids", 0400,
+                                  sta->debugfs.dir,
+                                  (u32 *)&sta->driver_buffered_tids);
+       else
+               debugfs_create_x64("driver_buffered_tids", 0400,
+                                  sta->debugfs.dir,
+                                  (u64 *)&sta->driver_buffered_tids);
+
        drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
 }
 
index b931c96a596fb3e92e9effa845ab10f74144dfd1..b3ea11f3d526962ddd8190587f82d8d84134067c 100644 (file)
@@ -1072,4 +1072,17 @@ static inline void drv_ipv6_addr_change(struct ieee80211_local *local,
 }
 #endif
 
+static inline void
+drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata,
+                         struct cfg80211_chan_def *chandef)
+{
+       struct ieee80211_local *local = sdata->local;
+
+       if (local->ops->channel_switch_beacon) {
+               trace_drv_channel_switch_beacon(local, sdata, chandef);
+               local->ops->channel_switch_beacon(&local->hw, &sdata->vif,
+                                                 chandef);
+       }
+}
+
 #endif /* __MAC80211_DRIVER_OPS */
index f83534f6a2eec14abcb237196e4d6a233e2f8136..529bf58bc14511beae95c4ff250ec54eff5c9710 100644 (file)
 #include "ieee80211_i.h"
 #include "rate.h"
 
-static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
+static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
+                                 struct ieee80211_ht_cap *ht_capa_mask,
                                  struct ieee80211_sta_ht_cap *ht_cap,
                                  u16 flag)
 {
        __le16 le_flag = cpu_to_le16(flag);
-       if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
-               if (!(sdata->u.mgd.ht_capa.cap_info & le_flag))
+       if (ht_capa_mask->cap_info & le_flag) {
+               if (!(ht_capa->cap_info & le_flag))
                        ht_cap->cap &= ~flag;
        }
 }
@@ -33,13 +34,30 @@ static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
                                     struct ieee80211_sta_ht_cap *ht_cap)
 {
-       u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask);
-       u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
+       struct ieee80211_ht_cap *ht_capa, *ht_capa_mask;
+       u8 *scaps, *smask;
        int i;
 
        if (!ht_cap->ht_supported)
                return;
 
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_STATION:
+               ht_capa = &sdata->u.mgd.ht_capa;
+               ht_capa_mask = &sdata->u.mgd.ht_capa_mask;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ht_capa = &sdata->u.ibss.ht_capa;
+               ht_capa_mask = &sdata->u.ibss.ht_capa_mask;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       scaps = (u8 *)(&ht_capa->mcs.rx_mask);
+       smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
+
        /* NOTE:  If you add more over-rides here, update register_hw
         * ht_capa_mod_msk logic in main.c as well.
         * And, if this method can ever change ht_cap.ht_supported, fix
@@ -55,28 +73,32 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
        }
 
        /* Force removal of HT-40 capabilities? */
-       __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-       __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
+       __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+                             IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+       __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+                             IEEE80211_HT_CAP_SGI_40);
 
        /* Allow user to disable SGI-20 (SGI-40 is handled above) */
-       __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_20);
+       __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+                             IEEE80211_HT_CAP_SGI_20);
 
        /* Allow user to disable the max-AMSDU bit. */
-       __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
+       __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+                             IEEE80211_HT_CAP_MAX_AMSDU);
 
        /* Allow user to decrease AMPDU factor */
-       if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+       if (ht_capa_mask->ampdu_params_info &
            IEEE80211_HT_AMPDU_PARM_FACTOR) {
-               u8 n = sdata->u.mgd.ht_capa.ampdu_params_info
-                       & IEEE80211_HT_AMPDU_PARM_FACTOR;
+               u8 n = ht_capa->ampdu_params_info &
+                      IEEE80211_HT_AMPDU_PARM_FACTOR;
                if (n < ht_cap->ampdu_factor)
                        ht_cap->ampdu_factor = n;
        }
 
        /* Allow the user to increase AMPDU density. */
-       if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+       if (ht_capa_mask->ampdu_params_info &
            IEEE80211_HT_AMPDU_PARM_DENSITY) {
-               u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info &
+               u8 n = (ht_capa->ampdu_params_info &
                        IEEE80211_HT_AMPDU_PARM_DENSITY)
                        >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
                if (n > ht_cap->ampdu_density)
@@ -112,7 +134,8 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
         * we advertised a restricted capability set to. Override
         * our own capabilities and then use those below.
         */
-       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+       if ((sdata->vif.type == NL80211_IFTYPE_STATION ||
+            sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
            !test_sta_flag(sta, WLAN_STA_TDLS_PEER))
                ieee80211_apply_htcap_overrides(sdata, &own_cap);
 
index ea7b9c2c7e66db19a811244d82e907bdb79620f1..74de0f10558a81b901f35122acedbc1fd1be0d98 100644 (file)
 
 #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
 #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
+#define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ)
 
 #define IEEE80211_IBSS_MAX_STA_ENTRIES 128
 
-
-static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
-                                     const u8 *bssid, const int beacon_int,
-                                     struct ieee80211_channel *chan,
-                                     const u32 basic_rates,
-                                     const u16 capability, u64 tsf,
-                                     bool creator)
+static struct beacon_data *
+ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
+                          const int beacon_int, const u32 basic_rates,
+                          const u16 capability, u64 tsf,
+                          struct cfg80211_chan_def *chandef,
+                          bool *have_higher_than_11mbit)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
-       int ratesi;
+       int rates_n = 0, i, ri;
        struct ieee80211_mgmt *mgmt;
        u8 *pos;
        struct ieee80211_supported_band *sband;
-       struct cfg80211_bss *bss;
-       u32 bss_change;
-       u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
-       struct cfg80211_chan_def chandef;
+       u32 rate_flags, rates = 0, rates_added = 0;
        struct beacon_data *presp;
        int frame_len;
-
-       sdata_assert_lock(sdata);
-
-       /* Reset own TSF to allow time synchronization work. */
-       drv_reset_tsf(local, sdata);
-
-       if (!ether_addr_equal(ifibss->bssid, bssid))
-               sta_info_flush(sdata);
-
-       /* if merging, indicate to driver that we leave the old IBSS */
-       if (sdata->vif.bss_conf.ibss_joined) {
-               sdata->vif.bss_conf.ibss_joined = false;
-               sdata->vif.bss_conf.ibss_creator = false;
-               sdata->vif.bss_conf.enable_beacon = false;
-               netif_carrier_off(sdata->dev);
-               ieee80211_bss_info_change_notify(sdata,
-                                                BSS_CHANGED_IBSS |
-                                                BSS_CHANGED_BEACON_ENABLED);
-       }
-
-       presp = rcu_dereference_protected(ifibss->presp,
-                                         lockdep_is_held(&sdata->wdev.mtx));
-       rcu_assign_pointer(ifibss->presp, NULL);
-       if (presp)
-               kfree_rcu(presp, rcu_head);
-
-       sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
-
-       chandef = ifibss->chandef;
-       if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
-               chandef.width = NL80211_CHAN_WIDTH_20;
-               chandef.center_freq1 = chan->center_freq;
-       }
-
-       ieee80211_vif_release_channel(sdata);
-       if (ieee80211_vif_use_channel(sdata, &chandef,
-                                     ifibss->fixed_channel ?
-                                       IEEE80211_CHANCTX_SHARED :
-                                       IEEE80211_CHANCTX_EXCLUSIVE)) {
-               sdata_info(sdata, "Failed to join IBSS, no channel context\n");
-               return;
-       }
-
-       memcpy(ifibss->bssid, bssid, ETH_ALEN);
-
-       sband = local->hw.wiphy->bands[chan->band];
+       int shift;
 
        /* Build IBSS probe response */
        frame_len = sizeof(struct ieee80211_hdr_3addr) +
@@ -113,7 +65,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                    ifibss->ie_len;
        presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
        if (!presp)
-               return;
+               return NULL;
 
        presp->head = (void *)(presp + 1);
 
@@ -134,21 +86,47 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        memcpy(pos, ifibss->ssid, ifibss->ssid_len);
        pos += ifibss->ssid_len;
 
-       rates = min_t(int, 8, sband->n_bitrates);
+       sband = local->hw.wiphy->bands[chandef->chan->band];
+       rate_flags = ieee80211_chandef_rate_flags(chandef);
+       shift = ieee80211_chandef_get_shift(chandef);
+       rates_n = 0;
+       if (have_higher_than_11mbit)
+               *have_higher_than_11mbit = false;
+
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+               if (sband->bitrates[i].bitrate > 110 &&
+                   have_higher_than_11mbit)
+                       *have_higher_than_11mbit = true;
+
+               rates |= BIT(i);
+               rates_n++;
+       }
+
        *pos++ = WLAN_EID_SUPP_RATES;
-       *pos++ = rates;
-       for (i = 0; i < rates; i++) {
-               int rate = sband->bitrates[i].bitrate;
+       *pos++ = min_t(int, 8, rates_n);
+       for (ri = 0; ri < sband->n_bitrates; ri++) {
+               int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
+                                       5 * (1 << shift));
                u8 basic = 0;
-               if (basic_rates & BIT(i))
+               if (!(rates & BIT(ri)))
+                       continue;
+
+               if (basic_rates & BIT(ri))
                        basic = 0x80;
-               *pos++ = basic | (u8) (rate / 5);
+               *pos++ = basic | (u8) rate;
+               if (++rates_added == 8) {
+                       ri++; /* continue at next rate for EXT_SUPP_RATES */
+                       break;
+               }
        }
 
        if (sband->band == IEEE80211_BAND_2GHZ) {
                *pos++ = WLAN_EID_DS_PARAMS;
                *pos++ = 1;
-               *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
+               *pos++ = ieee80211_frequency_to_channel(
+                               chandef->chan->center_freq);
        }
 
        *pos++ = WLAN_EID_IBSS_PARAMS;
@@ -157,15 +135,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        *pos++ = 0;
        *pos++ = 0;
 
-       if (sband->n_bitrates > 8) {
+       /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
+       if (rates_n > 8) {
                *pos++ = WLAN_EID_EXT_SUPP_RATES;
-               *pos++ = sband->n_bitrates - 8;
-               for (i = 8; i < sband->n_bitrates; i++) {
-                       int rate = sband->bitrates[i].bitrate;
+               *pos++ = rates_n - 8;
+               for (; ri < sband->n_bitrates; ri++) {
+                       int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
+                                               5 * (1 << shift));
                        u8 basic = 0;
-                       if (basic_rates & BIT(i))
+                       if (!(rates & BIT(ri)))
+                               continue;
+
+                       if (basic_rates & BIT(ri))
                                basic = 0x80;
-                       *pos++ = basic | (u8) (rate / 5);
+                       *pos++ = basic | (u8) rate;
                }
        }
 
@@ -175,19 +158,23 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        }
 
        /* add HT capability and information IEs */
-       if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
-           chandef.width != NL80211_CHAN_WIDTH_5 &&
-           chandef.width != NL80211_CHAN_WIDTH_10 &&
+       if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT &&
+           chandef->width != NL80211_CHAN_WIDTH_5 &&
+           chandef->width != NL80211_CHAN_WIDTH_10 &&
            sband->ht_cap.ht_supported) {
-               pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
-                                               sband->ht_cap.cap);
+               struct ieee80211_sta_ht_cap ht_cap;
+
+               memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+               ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+               pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
                /*
                 * Note: According to 802.11n-2009 9.13.3.1, HT Protection
                 * field and RIFS Mode are reserved in IBSS mode, therefore
                 * keep them at 0
                 */
                pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
-                                                &chandef, 0);
+                                                chandef, 0);
        }
 
        if (local->hw.queues >= IEEE80211_NUM_ACS) {
@@ -204,9 +191,94 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        presp->head_len = pos - presp->head;
        if (WARN_ON(presp->head_len > frame_len))
+               goto error;
+
+       return presp;
+error:
+       kfree(presp);
+       return NULL;
+}
+
+static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
+                                     const u8 *bssid, const int beacon_int,
+                                     struct ieee80211_channel *chan,
+                                     const u32 basic_rates,
+                                     const u16 capability, u64 tsf,
+                                     bool creator)
+{
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_mgmt *mgmt;
+       struct cfg80211_bss *bss;
+       u32 bss_change;
+       struct cfg80211_chan_def chandef;
+       struct beacon_data *presp;
+       enum nl80211_bss_scan_width scan_width;
+       bool have_higher_than_11mbit;
+
+       sdata_assert_lock(sdata);
+
+       /* Reset own TSF to allow time synchronization work. */
+       drv_reset_tsf(local, sdata);
+
+       if (!ether_addr_equal(ifibss->bssid, bssid))
+               sta_info_flush(sdata);
+
+       /* if merging, indicate to driver that we leave the old IBSS */
+       if (sdata->vif.bss_conf.ibss_joined) {
+               sdata->vif.bss_conf.ibss_joined = false;
+               sdata->vif.bss_conf.ibss_creator = false;
+               sdata->vif.bss_conf.enable_beacon = false;
+               netif_carrier_off(sdata->dev);
+               ieee80211_bss_info_change_notify(sdata,
+                                                BSS_CHANGED_IBSS |
+                                                BSS_CHANGED_BEACON_ENABLED);
+       }
+
+       presp = rcu_dereference_protected(ifibss->presp,
+                                         lockdep_is_held(&sdata->wdev.mtx));
+       rcu_assign_pointer(ifibss->presp, NULL);
+       if (presp)
+               kfree_rcu(presp, rcu_head);
+
+       sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
+
+       chandef = ifibss->chandef;
+       if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+               if (chandef.width == NL80211_CHAN_WIDTH_5 ||
+                   chandef.width == NL80211_CHAN_WIDTH_10 ||
+                   chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+                   chandef.width == NL80211_CHAN_WIDTH_20) {
+                       sdata_info(sdata,
+                                  "Failed to join IBSS, beacons forbidden\n");
+                       return;
+               }
+               chandef.width = NL80211_CHAN_WIDTH_20;
+               chandef.center_freq1 = chan->center_freq;
+       }
+
+       ieee80211_vif_release_channel(sdata);
+       if (ieee80211_vif_use_channel(sdata, &chandef,
+                                     ifibss->fixed_channel ?
+                                       IEEE80211_CHANCTX_SHARED :
+                                       IEEE80211_CHANCTX_EXCLUSIVE)) {
+               sdata_info(sdata, "Failed to join IBSS, no channel context\n");
+               return;
+       }
+
+       memcpy(ifibss->bssid, bssid, ETH_ALEN);
+
+       sband = local->hw.wiphy->bands[chan->band];
+
+       presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
+                                          capability, tsf, &chandef,
+                                          &have_higher_than_11mbit);
+       if (!presp)
                return;
 
        rcu_assign_pointer(ifibss->presp, presp);
+       mgmt = (void *)presp->head;
 
        sdata->vif.bss_conf.enable_beacon = true;
        sdata->vif.bss_conf.beacon_int = beacon_int;
@@ -236,18 +308,26 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ;
        bss_change |= BSS_CHANGED_ERP_SLOT;
 
+       /* cf. IEEE 802.11 9.2.12 */
+       if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit)
+               sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
+       else
+               sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
+
        sdata->vif.bss_conf.ibss_joined = true;
        sdata->vif.bss_conf.ibss_creator = creator;
        ieee80211_bss_info_change_notify(sdata, bss_change);
 
-       ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
+       ieee80211_set_wmm_default(sdata, true);
 
        ifibss->state = IEEE80211_IBSS_MLME_JOINED;
        mod_timer(&ifibss->timer,
                  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
-       bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
-                                       mgmt, presp->head_len, 0, GFP_KERNEL);
+       scan_width = cfg80211_chandef_to_scan_width(&chandef);
+       bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan,
+                                             scan_width, mgmt,
+                                             presp->head_len, 0, GFP_KERNEL);
        cfg80211_put_bss(local->hw.wiphy, bss);
        netif_carrier_on(sdata->dev);
        cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
@@ -264,6 +344,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        u16 beacon_int = cbss->beacon_interval;
        const struct cfg80211_bss_ies *ies;
        u64 tsf;
+       u32 rate_flags;
+       int shift;
 
        sdata_assert_lock(sdata);
 
@@ -271,15 +353,24 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                beacon_int = 10;
 
        sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
+       rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef);
+       shift = ieee80211_vif_get_shift(&sdata->vif);
 
        basic_rates = 0;
 
        for (i = 0; i < bss->supp_rates_len; i++) {
-               int rate = (bss->supp_rates[i] & 0x7f) * 5;
+               int rate = bss->supp_rates[i] & 0x7f;
                bool is_basic = !!(bss->supp_rates[i] & 0x80);
 
                for (j = 0; j < sband->n_bitrates; j++) {
-                       if (sband->bitrates[j].bitrate == rate) {
+                       int brate;
+                       if ((rate_flags & sband->bitrates[j].flags)
+                           != rate_flags)
+                               continue;
+
+                       brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
+                                            5 * (1 << shift));
+                       if (brate == rate) {
                                if (is_basic)
                                        basic_rates |= BIT(j);
                                break;
@@ -335,6 +426,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
        struct sta_info *sta;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_supported_band *sband;
+       enum nl80211_bss_scan_width scan_width;
        int band;
 
        /*
@@ -363,6 +455,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
        if (WARN_ON_ONCE(!chanctx_conf))
                return NULL;
        band = chanctx_conf->def.chan->band;
+       scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
        rcu_read_unlock();
 
        sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
@@ -376,7 +469,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
        /* make sure mandatory rates are always added */
        sband = local->hw.wiphy->bands[band];
        sta->sta.supp_rates[band] = supp_rates |
-                       ieee80211_mandatory_rates(sband);
+                       ieee80211_mandatory_rates(sband, scan_width);
 
        return ieee80211_ibss_finish_sta(sta);
 }
@@ -440,6 +533,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        u64 beacon_timestamp, rx_timestamp;
        u32 supp_rates = 0;
        enum ieee80211_band band = rx_status->band;
+       enum nl80211_bss_scan_width scan_width;
        struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
        bool rates_updated = false;
 
@@ -461,16 +555,22 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                sta = sta_info_get(sdata, mgmt->sa);
 
                if (elems->supp_rates) {
-                       supp_rates = ieee80211_sta_get_rates(local, elems,
+                       supp_rates = ieee80211_sta_get_rates(sdata, elems,
                                                             band, NULL);
                        if (sta) {
                                u32 prev_rates;
 
                                prev_rates = sta->sta.supp_rates[band];
                                /* make sure mandatory rates are always added */
-                               sta->sta.supp_rates[band] = supp_rates |
-                                       ieee80211_mandatory_rates(sband);
+                               scan_width = NL80211_BSS_CHAN_WIDTH_20;
+                               if (rx_status->flag & RX_FLAG_5MHZ)
+                                       scan_width = NL80211_BSS_CHAN_WIDTH_5;
+                               if (rx_status->flag & RX_FLAG_10MHZ)
+                                       scan_width = NL80211_BSS_CHAN_WIDTH_10;
 
+                               sta->sta.supp_rates[band] = supp_rates |
+                                       ieee80211_mandatory_rates(sband,
+                                                                 scan_width);
                                if (sta->sta.supp_rates[band] != prev_rates) {
                                        ibss_dbg(sdata,
                                                 "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
@@ -585,7 +685,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                         "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n",
                         mgmt->bssid);
                ieee80211_sta_join_ibss(sdata, bss);
-               supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
+               supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL);
                ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
                                       supp_rates);
                rcu_read_unlock();
@@ -604,6 +704,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
        struct sta_info *sta;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_supported_band *sband;
+       enum nl80211_bss_scan_width scan_width;
        int band;
 
        /*
@@ -629,6 +730,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
                return;
        }
        band = chanctx_conf->def.chan->band;
+       scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
        rcu_read_unlock();
 
        sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -640,7 +742,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
        /* make sure mandatory rates are always added */
        sband = local->hw.wiphy->bands[band];
        sta->sta.supp_rates[band] = supp_rates |
-                       ieee80211_mandatory_rates(sband);
+                       ieee80211_mandatory_rates(sband, scan_width);
 
        spin_lock(&ifibss->incomplete_lock);
        list_add(&sta->list, &ifibss->incomplete_stations);
@@ -672,6 +774,33 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
        return active;
 }
 
+static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta, *tmp;
+       unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT;
+       unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
+
+       mutex_lock(&local->sta_mtx);
+
+       list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+               if (sdata != sta->sdata)
+                       continue;
+
+               if (time_after(jiffies, sta->last_rx + exp_time) ||
+                   (time_after(jiffies, sta->last_rx + exp_rsn_time) &&
+                    sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
+                       sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
+                               sta->sta_state != IEEE80211_STA_AUTHORIZED ?
+                               "not authorized " : "", sta->sta.addr);
+
+                       WARN_ON(__sta_info_destroy(sta));
+               }
+       }
+
+       mutex_unlock(&local->sta_mtx);
+}
+
 /*
  * This function is called with state == IEEE80211_IBSS_MLME_JOINED
  */
@@ -679,13 +808,14 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
 static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       enum nl80211_bss_scan_width scan_width;
 
        sdata_assert_lock(sdata);
 
        mod_timer(&ifibss->timer,
                  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
-       ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
+       ieee80211_ibss_sta_expire(sdata);
 
        if (time_before(jiffies, ifibss->last_scan_completed +
                       IEEE80211_IBSS_MERGE_INTERVAL))
@@ -700,8 +830,9 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
        sdata_info(sdata,
                   "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
 
+       scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
        ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
-                                   NULL);
+                                   NULL, scan_width);
 }
 
 static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -751,6 +882,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
        struct cfg80211_bss *cbss;
        struct ieee80211_channel *chan = NULL;
        const u8 *bssid = NULL;
+       enum nl80211_bss_scan_width scan_width;
        int active_ibss;
        u16 capability;
 
@@ -792,6 +924,17 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
+       /* if a fixed bssid and a fixed freq have been provided create the IBSS
+        * directly and do not waste time scanning
+        */
+       if (ifibss->fixed_bssid && ifibss->fixed_channel) {
+               sdata_info(sdata, "Created IBSS using preconfigured BSSID %pM\n",
+                          bssid);
+               ieee80211_sta_create_ibss(sdata);
+               return;
+       }
+
+
        ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n");
 
        /* Selected IBSS not found in current scan results - try to scan */
@@ -799,8 +942,10 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
                                        IEEE80211_SCAN_INTERVAL)) {
                sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
 
+               scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
                ieee80211_request_ibss_scan(sdata, ifibss->ssid,
-                                           ifibss->ssid_len, chan);
+                                           ifibss->ssid_len, chan,
+                                           scan_width);
        } else {
                int interval = IEEE80211_SCAN_INTERVAL;
 
@@ -1020,6 +1165,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
                        struct cfg80211_ibss_params *params)
 {
        u32 changed = 0;
+       u32 rate_flags;
+       struct ieee80211_supported_band *sband;
+       int i;
 
        if (params->bssid) {
                memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
@@ -1030,6 +1178,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        sdata->u.ibss.privacy = params->privacy;
        sdata->u.ibss.control_port = params->control_port;
        sdata->u.ibss.basic_rates = params->basic_rates;
+
+       /* fix basic_rates if channel does not support these rates */
+       rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
+       sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band];
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       sdata->u.ibss.basic_rates &= ~BIT(i);
+       }
        memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
               sizeof(params->mcast_rate));
 
@@ -1051,6 +1207,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
        sdata->u.ibss.ssid_len = params->ssid_len;
 
+       memcpy(&sdata->u.ibss.ht_capa, &params->ht_capa,
+              sizeof(sdata->u.ibss.ht_capa));
+       memcpy(&sdata->u.ibss.ht_capa_mask, &params->ht_capa_mask,
+              sizeof(sdata->u.ibss.ht_capa_mask));
+
        /*
         * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
         * reserved, but an HT STA shall protect HT transmissions as though
@@ -1131,6 +1292,11 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
        presp = rcu_dereference_protected(ifibss->presp,
                                          lockdep_is_held(&sdata->wdev.mtx));
        RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
+
+       /* on the next join, re-program HT parameters */
+       memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
+       memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask));
+
        sdata->vif.bss_conf.ibss_joined = false;
        sdata->vif.bss_conf.ibss_creator = false;
        sdata->vif.bss_conf.enable_beacon = false;
index 8412a303993a647eed61e6b0794e91e49b27dd29..b6186517ec567e85eb986d77dd6ff72c36a4379d 100644 (file)
@@ -53,9 +53,6 @@ struct ieee80211_local;
  * increased memory use (about 2 kB of RAM per entry). */
 #define IEEE80211_FRAGMENT_MAX 4
 
-#define TU_TO_JIFFIES(x)       (usecs_to_jiffies((x) * 1024))
-#define TU_TO_EXP_TIME(x)      (jiffies + TU_TO_JIFFIES(x))
-
 /* power level hasn't been configured (or set to automatic) */
 #define IEEE80211_UNSET_POWER_LEVEL    INT_MIN
 
@@ -259,6 +256,8 @@ struct ieee80211_if_ap {
        struct beacon_data __rcu *beacon;
        struct probe_resp __rcu *probe_resp;
 
+       /* to be used after channel switch. */
+       struct cfg80211_beacon_data *next_beacon;
        struct list_head vlans;
 
        struct ps_data ps;
@@ -509,6 +508,9 @@ struct ieee80211_if_ibss {
        /* probe response/beacon for IBSS */
        struct beacon_data __rcu *presp;
 
+       struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
+       struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
+
        spinlock_t incomplete_lock;
        struct list_head incomplete_stations;
 
@@ -713,6 +715,11 @@ struct ieee80211_sub_if_data {
 
        struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
 
+       struct work_struct csa_finalize_work;
+       int csa_counter_offset_beacon;
+       int csa_counter_offset_presp;
+       bool csa_radar_required;
+
        /* used to reconfigure hardware SM PS */
        struct work_struct recalc_smps;
 
@@ -809,6 +816,34 @@ ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
        return band;
 }
 
+static inline int
+ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef)
+{
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_5:
+               return 2;
+       case NL80211_CHAN_WIDTH_10:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static inline int
+ieee80211_vif_get_shift(struct ieee80211_vif *vif)
+{
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       int shift = 0;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       if (chanctx_conf)
+               shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+       rcu_read_unlock();
+
+       return shift;
+}
+
 enum sdata_queue_type {
        IEEE80211_SDATA_QUEUE_TYPE_FRAME        = 0,
        IEEE80211_SDATA_QUEUE_AGG_START         = 1,
@@ -1026,7 +1061,7 @@ struct ieee80211_local {
        struct cfg80211_ssid scan_ssid;
        struct cfg80211_scan_request *int_scan_req;
        struct cfg80211_scan_request *scan_req, *hw_scan_req;
-       struct ieee80211_channel *scan_channel;
+       struct cfg80211_chan_def scan_chandef;
        enum ieee80211_band hw_scan_band;
        int scan_channel_idx;
        int scan_ies_len;
@@ -1063,7 +1098,6 @@ struct ieee80211_local {
        u32 dot11TransmittedFrameCount;
 
 #ifdef CONFIG_MAC80211_LEDS
-       int tx_led_counter, rx_led_counter;
        struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led;
        struct tpt_led_trigger *tpt_led_trigger;
        char tx_led_name[32], rx_led_name[32],
@@ -1306,7 +1340,8 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 void ieee80211_scan_work(struct work_struct *work);
 int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
                                const u8 *ssid, u8 ssid_len,
-                               struct ieee80211_channel *chan);
+                               struct ieee80211_channel *chan,
+                               enum nl80211_bss_scan_width scan_width);
 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
                           struct cfg80211_scan_request *req);
 void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1341,6 +1376,9 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
 void ieee80211_sw_roc_work(struct work_struct *work);
 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
 
+/* channel switch handling */
+void ieee80211_csa_finalize_work(struct work_struct *work);
+
 /* interface handling */
 int ieee80211_iface_init(void);
 void ieee80211_iface_exit(void);
@@ -1362,6 +1400,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
 
 bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
 void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+                           struct cfg80211_beacon_data *params);
 
 static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
 {
@@ -1465,7 +1505,8 @@ extern void *mac80211_wiphy_privid; /* for wiphy privid */
 u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
                        enum nl80211_iftype type);
 int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
-                            int rate, int erp, int short_preamble);
+                            int rate, int erp, int short_preamble,
+                            int shift);
 void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
                                     struct ieee80211_hdr *hdr, const u8 *tsc,
                                     gfp_t gfp);
@@ -1569,7 +1610,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
 int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             size_t buffer_len, const u8 *ie, size_t ie_len,
                             enum ieee80211_band band, u32 rate_mask,
-                            u8 channel);
+                            struct cfg80211_chan_def *chandef);
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
                                          struct ieee80211_channel *chan,
@@ -1582,10 +1623,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
                              u32 ratemask, bool directed, u32 tx_flags,
                              struct ieee80211_channel *channel, bool scan);
 
-void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
-                                 const size_t supp_rates_len,
-                                 const u8 *supp_rates);
-u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
+u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
                            struct ieee802_11_elems *elems,
                            enum ieee80211_band band, u32 *basic_rates);
 int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
@@ -1602,6 +1640,9 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
                               u16 prot_mode);
 u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
                               u32 cap);
+int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
+                            const struct ieee80211_supported_band *sband,
+                            const u8 *srates, int srates_len, u32 *rates);
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
                            struct sk_buff *skb, bool need_basic,
                            enum ieee80211_band band);
@@ -1622,6 +1663,11 @@ int __must_check
 ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
                               const struct cfg80211_chan_def *chandef,
                               u32 *changed);
+/* NOTE: only use ieee80211_vif_change_channel() for channel switch */
+int __must_check
+ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+                            const struct cfg80211_chan_def *chandef,
+                            u32 *changed);
 void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
index cc117591f678463c2cd9a3eb4060af5e01bf51ca..7ca534bf4ceaef6d3125a6204d66bf29ced020ab 100644 (file)
@@ -54,7 +54,7 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
                return false;
        }
 
-       power = chanctx_conf->def.chan->max_power;
+       power = ieee80211_chandef_max_power(&chanctx_conf->def);
        rcu_read_unlock();
 
        if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
@@ -274,6 +274,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
                        if (iftype == NL80211_IFTYPE_ADHOC &&
                            nsdata->vif.type == NL80211_IFTYPE_ADHOC)
                                return -EBUSY;
+                       /*
+                        * will not add another interface while any channel
+                        * switch is active.
+                        */
+                       if (nsdata->vif.csa_active)
+                               return -EBUSY;
 
                        /*
                         * The remaining checks are only performed for interfaces
@@ -804,6 +810,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        cancel_work_sync(&local->dynamic_ps_enable_work);
 
        cancel_work_sync(&sdata->recalc_smps);
+       sdata->vif.csa_active = false;
+       cancel_work_sync(&sdata->csa_finalize_work);
 
        cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
 
@@ -1267,6 +1275,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        skb_queue_head_init(&sdata->skb_queue);
        INIT_WORK(&sdata->work, ieee80211_iface_work);
        INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
+       INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
 
        switch (type) {
        case NL80211_IFTYPE_P2P_GO:
index e39cc91d0cf125c584464ef73be6df5e97729e3d..620677e897bd0fc82fd5f4e737156096c05a9eef 100644 (file)
@@ -93,6 +93,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 
        might_sleep();
 
+       if (key->flags & KEY_FLAG_TAINTED)
+               return -EINVAL;
+
        if (!key->local->ops->set_key)
                goto out_unsupported;
 
@@ -455,6 +458,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
                       struct ieee80211_sub_if_data *sdata,
                       struct sta_info *sta)
 {
+       struct ieee80211_local *local = sdata->local;
        struct ieee80211_key *old_key;
        int idx, ret;
        bool pairwise;
@@ -484,10 +488,13 @@ int ieee80211_key_link(struct ieee80211_key *key,
 
        ieee80211_debugfs_key_add(key);
 
-       ret = ieee80211_key_enable_hw_accel(key);
-
-       if (ret)
-               ieee80211_key_free(key, true);
+       if (!local->wowlan) {
+               ret = ieee80211_key_enable_hw_accel(key);
+               if (ret)
+                       ieee80211_key_free(key, true);
+       } else {
+               ret = 0;
+       }
 
        mutex_unlock(&sdata->local->key_mtx);
 
@@ -540,7 +547,7 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
                         void *iter_data)
 {
        struct ieee80211_local *local = hw_to_local(hw);
-       struct ieee80211_key *key;
+       struct ieee80211_key *key, *tmp;
        struct ieee80211_sub_if_data *sdata;
 
        ASSERT_RTNL();
@@ -548,13 +555,14 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
        mutex_lock(&local->key_mtx);
        if (vif) {
                sdata = vif_to_sdata(vif);
-               list_for_each_entry(key, &sdata->key_list, list)
+               list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
                        iter(hw, &sdata->vif,
                             key->sta ? &key->sta->sta : NULL,
                             &key->conf, iter_data);
        } else {
                list_for_each_entry(sdata, &local->interfaces, list)
-                       list_for_each_entry(key, &sdata->key_list, list)
+                       list_for_each_entry_safe(key, tmp,
+                                                &sdata->key_list, list)
                                iter(hw, &sdata->vif,
                                     key->sta ? &key->sta->sta : NULL,
                                     &key->conf, iter_data);
@@ -751,3 +759,135 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
        }
 }
 EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
+
+void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
+                             struct ieee80211_key_seq *seq)
+{
+       struct ieee80211_key *key;
+       u64 pn64;
+
+       key = container_of(keyconf, struct ieee80211_key, conf);
+
+       switch (key->conf.cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->u.tkip.tx.iv32 = seq->tkip.iv32;
+               key->u.tkip.tx.iv16 = seq->tkip.iv16;
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               pn64 = (u64)seq->ccmp.pn[5] |
+                      ((u64)seq->ccmp.pn[4] << 8) |
+                      ((u64)seq->ccmp.pn[3] << 16) |
+                      ((u64)seq->ccmp.pn[2] << 24) |
+                      ((u64)seq->ccmp.pn[1] << 32) |
+                      ((u64)seq->ccmp.pn[0] << 40);
+               atomic64_set(&key->u.ccmp.tx_pn, pn64);
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               pn64 = (u64)seq->aes_cmac.pn[5] |
+                      ((u64)seq->aes_cmac.pn[4] << 8) |
+                      ((u64)seq->aes_cmac.pn[3] << 16) |
+                      ((u64)seq->aes_cmac.pn[2] << 24) |
+                      ((u64)seq->aes_cmac.pn[1] << 32) |
+                      ((u64)seq->aes_cmac.pn[0] << 40);
+               atomic64_set(&key->u.aes_cmac.tx_pn, pn64);
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(ieee80211_set_key_tx_seq);
+
+void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
+                             int tid, struct ieee80211_key_seq *seq)
+{
+       struct ieee80211_key *key;
+       u8 *pn;
+
+       key = container_of(keyconf, struct ieee80211_key, conf);
+
+       switch (key->conf.cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS))
+                       return;
+               key->u.tkip.rx[tid].iv32 = seq->tkip.iv32;
+               key->u.tkip.rx[tid].iv16 = seq->tkip.iv16;
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+                       return;
+               if (tid < 0)
+                       pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS];
+               else
+                       pn = key->u.ccmp.rx_pn[tid];
+               memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN);
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               if (WARN_ON(tid != 0))
+                       return;
+               pn = key->u.aes_cmac.rx_pn;
+               memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN);
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq);
+
+void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
+{
+       struct ieee80211_key *key;
+
+       key = container_of(keyconf, struct ieee80211_key, conf);
+
+       assert_key_lock(key->local);
+
+       /*
+        * if key was uploaded, we assume the driver will/has remove(d)
+        * it, so adjust bookkeeping accordingly
+        */
+       if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
+               key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
+
+               if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+                       increment_tailroom_need_count(key->sdata);
+       }
+
+       ieee80211_key_free(key, false);
+}
+EXPORT_SYMBOL_GPL(ieee80211_remove_key);
+
+struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+                       struct ieee80211_key_conf *keyconf)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_key *key;
+       int err;
+
+       if (WARN_ON(!local->wowlan))
+               return ERR_PTR(-EINVAL);
+
+       if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+               return ERR_PTR(-EINVAL);
+
+       key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx,
+                                 keyconf->keylen, keyconf->key,
+                                 0, NULL);
+       if (IS_ERR(key))
+               return ERR_PTR(PTR_ERR(key));
+
+       if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
+               key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+
+       err = ieee80211_key_link(key, sdata, NULL);
+       if (err)
+               return ERR_PTR(err);
+
+       return &key->conf;
+}
+EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add);
index bcffa69031298f97ee99f50ade02a6e862c5a8a8..e2b836446af386dba692304bda48fd1dcb038c5b 100644 (file)
 #include <linux/export.h>
 #include "led.h"
 
+#define MAC80211_BLINK_DELAY 50 /* ms */
+
 void ieee80211_led_rx(struct ieee80211_local *local)
 {
+       unsigned long led_delay = MAC80211_BLINK_DELAY;
        if (unlikely(!local->rx_led))
                return;
-       if (local->rx_led_counter++ % 2 == 0)
-               led_trigger_event(local->rx_led, LED_OFF);
-       else
-               led_trigger_event(local->rx_led, LED_FULL);
+       led_trigger_blink_oneshot(local->rx_led, &led_delay, &led_delay, 0);
 }
 
-/* q is 1 if a packet was enqueued, 0 if it has been transmitted */
-void ieee80211_led_tx(struct ieee80211_local *local, int q)
+void ieee80211_led_tx(struct ieee80211_local *local)
 {
+       unsigned long led_delay = MAC80211_BLINK_DELAY;
        if (unlikely(!local->tx_led))
                return;
-       /* not sure how this is supposed to work ... */
-       local->tx_led_counter += 2*q-1;
-       if (local->tx_led_counter % 2 == 0)
-               led_trigger_event(local->tx_led, LED_OFF);
-       else
-               led_trigger_event(local->tx_led, LED_FULL);
+       led_trigger_blink_oneshot(local->tx_led, &led_delay, &led_delay, 0);
 }
 
 void ieee80211_led_assoc(struct ieee80211_local *local, bool associated)
index e0275d9befa8da1702da7246a18ec8c301333269..89f4344f13b973509344d2431960c4d51193d56e 100644 (file)
@@ -13,7 +13,7 @@
 
 #ifdef CONFIG_MAC80211_LEDS
 void ieee80211_led_rx(struct ieee80211_local *local);
-void ieee80211_led_tx(struct ieee80211_local *local, int q);
+void ieee80211_led_tx(struct ieee80211_local *local);
 void ieee80211_led_assoc(struct ieee80211_local *local,
                         bool associated);
 void ieee80211_led_radio(struct ieee80211_local *local,
@@ -27,7 +27,7 @@ void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
 static inline void ieee80211_led_rx(struct ieee80211_local *local)
 {
 }
-static inline void ieee80211_led_tx(struct ieee80211_local *local, int q)
+static inline void ieee80211_led_tx(struct ieee80211_local *local)
 {
 }
 static inline void ieee80211_led_assoc(struct ieee80211_local *local,
index 091088ac7890a9c7f873f623f7789cffb9e12def..25eb35b01938e9e91d5dfd75adcee8acb030036c 100644 (file)
@@ -102,17 +102,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
 
        offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
 
-       if (local->scan_channel) {
-               chandef.chan = local->scan_channel;
-               /* If scanning on oper channel, use whatever channel-type
-                * is currently in use.
-                */
-               if (chandef.chan == local->_oper_chandef.chan) {
-                       chandef = local->_oper_chandef;
-               } else {
-                       chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
-                       chandef.center_freq1 = chandef.chan->center_freq;
-               }
+       if (local->scan_chandef.chan) {
+               chandef = local->scan_chandef;
        } else if (local->tmp_channel) {
                chandef.chan = local->tmp_channel;
                chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -151,7 +142,7 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
                changed |= IEEE80211_CONF_CHANGE_SMPS;
        }
 
-       power = chandef.chan->max_power;
+       power = ieee80211_chandef_max_power(&chandef);
 
        rcu_read_lock();
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
index 447f41bbe744d79b570eb4c85dd4cdb8ffb72e70..885a5f6e2c219b5c4e17b6574bd32e4231ed39b1 100644 (file)
@@ -62,7 +62,6 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
                        struct ieee802_11_elems *ie)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-       struct ieee80211_local *local = sdata->local;
        u32 basic_rates = 0;
        struct cfg80211_chan_def sta_chan_def;
 
@@ -85,7 +84,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
             (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
                return false;
 
-       ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata),
+       ieee80211_sta_get_rates(sdata, ie, ieee80211_get_sdata_band(sdata),
                                &basic_rates);
 
        if (sdata->vif.bss_conf.basic_rates != basic_rates)
@@ -274,7 +273,9 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
        neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS);
        *pos++ = neighbors << 1;
        /* Mesh capability */
-       *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
+       *pos = 0x00;
+       *pos |= ifmsh->mshcfg.dot11MeshForwarding ?
+                       IEEE80211_MESHCONF_CAPAB_FORWARDING : 0x00;
        *pos |= ifmsh->accepting_plinks ?
                        IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
        /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
index 02c05fa15c203b9a51dc576579f7455979fe0376..6b65d5055f5bf7572d29712c6345c88dd11b33a5 100644 (file)
@@ -379,7 +379,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
        u32 rates, basic_rates = 0, changed = 0;
 
        sband = local->hw.wiphy->bands[band];
-       rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
+       rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
 
        spin_lock_bh(&sta->lock);
        sta->last_rx = jiffies;
index cc9e02d79b550106ee07b6aca48ffa9e18081a45..2aab1308690fad0416457a6ccbd6638636d69310 100644 (file)
@@ -489,27 +489,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
 
 /* frame sending functions */
 
-static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
-                                     struct ieee80211_supported_band *sband,
-                                     u32 *rates)
-{
-       int i, j, count;
-       *rates = 0;
-       count = 0;
-       for (i = 0; i < supp_rates_len; i++) {
-               int rate = (supp_rates[i] & 0x7F) * 5;
-
-               for (j = 0; j < sband->n_bitrates; j++)
-                       if (sband->bitrates[j].bitrate == rate) {
-                               *rates |= BIT(j);
-                               count++;
-                               break;
-                       }
-       }
-
-       return count;
-}
-
 static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
                                struct sk_buff *skb, u8 ap_ht_param,
                                struct ieee80211_supported_band *sband,
@@ -628,12 +607,12 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_mgmt *mgmt;
        u8 *pos, qos_info;
        size_t offset = 0, noffset;
-       int i, count, rates_len, supp_rates_len;
+       int i, count, rates_len, supp_rates_len, shift;
        u16 capab;
        struct ieee80211_supported_band *sband;
        struct ieee80211_chanctx_conf *chanctx_conf;
        struct ieee80211_channel *chan;
-       u32 rates = 0;
+       u32 rate_flags, rates = 0;
 
        sdata_assert_lock(sdata);
 
@@ -644,8 +623,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                return;
        }
        chan = chanctx_conf->def.chan;
+       rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
        rcu_read_unlock();
        sband = local->hw.wiphy->bands[chan->band];
+       shift = ieee80211_vif_get_shift(&sdata->vif);
 
        if (assoc_data->supp_rates_len) {
                /*
@@ -654,17 +635,24 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                 * in the association request (e.g. D-Link DAP 1353 in
                 * b-only mode)...
                 */
-               rates_len = ieee80211_compatible_rates(assoc_data->supp_rates,
-                                                      assoc_data->supp_rates_len,
-                                                      sband, &rates);
+               rates_len = ieee80211_parse_bitrates(&chanctx_conf->def, sband,
+                                                    assoc_data->supp_rates,
+                                                    assoc_data->supp_rates_len,
+                                                    &rates);
        } else {
                /*
                 * In case AP not provide any supported rates information
                 * before association, we send information element(s) with
                 * all rates that we support.
                 */
-               rates = ~0;
-               rates_len = sband->n_bitrates;
+               rates_len = 0;
+               for (i = 0; i < sband->n_bitrates; i++) {
+                       if ((rate_flags & sband->bitrates[i].flags)
+                           != rate_flags)
+                               continue;
+                       rates |= BIT(i);
+                       rates_len++;
+               }
        }
 
        skb = alloc_skb(local->hw.extra_tx_headroom +
@@ -741,8 +729,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        count = 0;
        for (i = 0; i < sband->n_bitrates; i++) {
                if (BIT(i) & rates) {
-                       int rate = sband->bitrates[i].bitrate;
-                       *pos++ = (u8) (rate / 5);
+                       int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+                                               5 * (1 << shift));
+                       *pos++ = (u8) rate;
                        if (++count == 8)
                                break;
                }
@@ -755,8 +744,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 
                for (i++; i < sband->n_bitrates; i++) {
                        if (BIT(i) & rates) {
-                               int rate = sband->bitrates[i].bitrate;
-                               *pos++ = (u8) (rate / 5);
+                               int rate;
+                               rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+                                                   5 * (1 << shift));
+                               *pos++ = (u8) rate;
                        }
                }
        }
@@ -767,7 +758,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                *pos++ = WLAN_EID_PWR_CAPABILITY;
                *pos++ = 2;
                *pos++ = 0; /* min tx power */
-               *pos++ = chan->max_power; /* max tx power */
+                /* max tx power */
+               *pos++ = ieee80211_chandef_max_power(&chanctx_conf->def);
 
                /* 2. supported channels */
                /* TODO: get this in reg domain format */
@@ -1121,6 +1113,15 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        case -1:
                cfg80211_chandef_create(&new_chandef, new_chan,
                                        NL80211_CHAN_NO_HT);
+               /* keep width for 5/10 MHz channels */
+               switch (sdata->vif.bss_conf.chandef.width) {
+               case NL80211_CHAN_WIDTH_5:
+               case NL80211_CHAN_WIDTH_10:
+                       new_chandef.width = sdata->vif.bss_conf.chandef.width;
+                       break;
+               default:
+                       break;
+               }
                break;
        }
 
@@ -2443,15 +2444,16 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
                                u8 *supp_rates, unsigned int supp_rates_len,
                                u32 *rates, u32 *basic_rates,
                                bool *have_higher_than_11mbit,
-                               int *min_rate, int *min_rate_index)
+                               int *min_rate, int *min_rate_index,
+                               int shift, u32 rate_flags)
 {
        int i, j;
 
        for (i = 0; i < supp_rates_len; i++) {
-               int rate = (supp_rates[i] & 0x7f) * 5;
+               int rate = supp_rates[i] & 0x7f;
                bool is_basic = !!(supp_rates[i] & 0x80);
 
-               if (rate > 110)
+               if ((rate * 5 * (1 << shift)) > 110)
                        *have_higher_than_11mbit = true;
 
                /*
@@ -2467,12 +2469,20 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
                        continue;
 
                for (j = 0; j < sband->n_bitrates; j++) {
-                       if (sband->bitrates[j].bitrate == rate) {
+                       struct ieee80211_rate *br;
+                       int brate;
+
+                       br = &sband->bitrates[j];
+                       if ((rate_flags & br->flags) != rate_flags)
+                               continue;
+
+                       brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+                       if (brate == rate) {
                                *rates |= BIT(j);
                                if (is_basic)
                                        *basic_rates |= BIT(j);
-                               if (rate < *min_rate) {
-                                       *min_rate = rate;
+                               if ((rate * 5) < *min_rate) {
+                                       *min_rate = rate * 5;
                                        *min_rate_index = j;
                                }
                                break;
@@ -3902,27 +3912,40 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                if (!new_sta)
                        return -ENOMEM;
        }
-
        if (new_sta) {
                u32 rates = 0, basic_rates = 0;
                bool have_higher_than_11mbit;
                int min_rate = INT_MAX, min_rate_index = -1;
+               struct ieee80211_chanctx_conf *chanctx_conf;
                struct ieee80211_supported_band *sband;
                const struct cfg80211_bss_ies *ies;
+               int shift;
+               u32 rate_flags;
 
                sband = local->hw.wiphy->bands[cbss->channel->band];
 
                err = ieee80211_prep_channel(sdata, cbss);
                if (err) {
                        sta_info_free(local, new_sta);
-                       return err;
+                       return -EINVAL;
                }
+               shift = ieee80211_vif_get_shift(&sdata->vif);
+
+               rcu_read_lock();
+               chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+               if (WARN_ON(!chanctx_conf)) {
+                       rcu_read_unlock();
+                       return -EINVAL;
+               }
+               rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+               rcu_read_unlock();
 
                ieee80211_get_rates(sband, bss->supp_rates,
                                    bss->supp_rates_len,
                                    &rates, &basic_rates,
                                    &have_higher_than_11mbit,
-                                   &min_rate, &min_rate_index);
+                                   &min_rate, &min_rate_index,
+                                   shift, rate_flags);
 
                /*
                 * This used to be a workaround for basic rates missing
index 30d58d2d13e26a667024a3a2fca655b6fce1bd34..e126605cec66baf82aadb835856110d74105795f 100644 (file)
@@ -210,7 +210,7 @@ static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
                !ieee80211_is_data(fc);
 }
 
-static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
+static void rc_send_low_basicrate(s8 *idx, u32 basic_rates,
                                  struct ieee80211_supported_band *sband)
 {
        u8 i;
@@ -232,37 +232,28 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
        /* could not find a basic rate; use original selection */
 }
 
-static inline s8
-rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
-                         struct ieee80211_sta *sta)
+static void __rate_control_send_low(struct ieee80211_hw *hw,
+                                   struct ieee80211_supported_band *sband,
+                                   struct ieee80211_sta *sta,
+                                   struct ieee80211_tx_info *info)
 {
        int i;
+       u32 rate_flags =
+               ieee80211_chandef_rate_flags(&hw->conf.chandef);
+
+       if ((sband->band == IEEE80211_BAND_2GHZ) &&
+           (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
+               rate_flags |= IEEE80211_RATE_ERP_G;
 
+       info->control.rates[0].idx = 0;
        for (i = 0; i < sband->n_bitrates; i++) {
-               struct ieee80211_rate *srate = &sband->bitrates[i];
-               if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
-                   (srate->bitrate == 55) || (srate->bitrate == 110))
+               if (!rate_supported(sta, sband->band, i))
                        continue;
 
-               if (rate_supported(sta, sband->band, i))
-                       return i;
+               info->control.rates[0].idx = i;
+               break;
        }
-
-       /* No matching rate found */
-       return 0;
-}
-
-static void __rate_control_send_low(struct ieee80211_hw *hw,
-                                   struct ieee80211_supported_band *sband,
-                                   struct ieee80211_sta *sta,
-                                   struct ieee80211_tx_info *info)
-{
-       if ((sband->band != IEEE80211_BAND_2GHZ) ||
-           !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
-               info->control.rates[0].idx = rate_lowest_index(sband, sta);
-       else
-               info->control.rates[0].idx =
-                       rate_lowest_non_cck_index(sband, sta);
+       WARN_ON_ONCE(i == sband->n_bitrates);
 
        info->control.rates[0].count =
                (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
@@ -272,28 +263,37 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
 }
 
 
-bool rate_control_send_low(struct ieee80211_sta *sta,
+bool rate_control_send_low(struct ieee80211_sta *pubsta,
                           void *priv_sta,
                           struct ieee80211_tx_rate_control *txrc)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
        struct ieee80211_supported_band *sband = txrc->sband;
+       struct sta_info *sta;
        int mcast_rate;
+       bool use_basicrate = false;
 
-       if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
-               __rate_control_send_low(txrc->hw, sband, sta, info);
+       if (!pubsta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
+               __rate_control_send_low(txrc->hw, sband, pubsta, info);
 
-               if (!sta && txrc->bss) {
+               if (!pubsta && txrc->bss) {
                        mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
                        if (mcast_rate > 0) {
                                info->control.rates[0].idx = mcast_rate - 1;
                                return true;
                        }
+                       use_basicrate = true;
+               } else if (pubsta) {
+                       sta = container_of(pubsta, struct sta_info, sta);
+                       if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+                               use_basicrate = true;
+               }
 
-                       rc_send_low_broadcast(&info->control.rates[0].idx,
+               if (use_basicrate)
+                       rc_send_low_basicrate(&info->control.rates[0].idx,
                                              txrc->bss_conf->basic_rates,
                                              sband);
-               }
+
                return true;
        }
        return false;
@@ -585,6 +585,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
        u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
        bool has_mcs_mask;
        u32 mask;
+       u32 rate_flags;
        int i;
 
        /*
@@ -594,6 +595,12 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
         */
        mask = sdata->rc_rateidx_mask[info->band];
        has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
+       rate_flags =
+               ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+       for (i = 0; i < sband->n_bitrates; i++)
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       mask &= ~BIT(i);
+
        if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
                return;
 
index d35a5dd3fb13d3f6d742cdc2ccbbeb823365ed63..5dedc56c94dbe91a1b9bd6c959094ab3b494be86 100644 (file)
@@ -66,11 +66,12 @@ static inline void rate_control_rate_init(struct sta_info *sta)
        }
 
        sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
-       rcu_read_unlock();
 
        ieee80211_sta_set_rx_nss(sta);
 
-       ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
+       ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+                           priv_sta);
+       rcu_read_unlock();
        set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
 }
 
@@ -81,10 +82,21 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
        struct rate_control_ref *ref = local->rate_ctrl;
        struct ieee80211_sta *ista = &sta->sta;
        void *priv_sta = sta->rate_ctrl_priv;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+
+       if (ref && ref->ops->rate_update) {
+               rcu_read_lock();
 
-       if (ref && ref->ops->rate_update)
-               ref->ops->rate_update(ref->priv, sband, ista,
-                                     priv_sta, changed);
+               chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+               if (WARN_ON(!chanctx_conf)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+                                     ista, priv_sta, changed);
+               rcu_read_unlock();
+       }
        drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
 }
 
index e6512e2ffd200223cd5cb75090802a98f544bb7d..8b5f7ef7c0c9f14db5dba3baf8ab82247500c1c3 100644 (file)
@@ -383,14 +383,18 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
 static void
 calc_rate_durations(enum ieee80211_band band,
                    struct minstrel_rate *d,
-                   struct ieee80211_rate *rate)
+                   struct ieee80211_rate *rate,
+                   struct cfg80211_chan_def *chandef)
 {
        int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
+       int shift = ieee80211_chandef_get_shift(chandef);
 
        d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
-                       rate->bitrate, erp, 1);
+                       DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
+                       shift);
        d->ack_time = ieee80211_frame_duration(band, 10,
-                       rate->bitrate, erp, 1);
+                       DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
+                       shift);
 }
 
 static void
@@ -418,21 +422,25 @@ init_sample_table(struct minstrel_sta_info *mi)
 
 static void
 minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
-               struct ieee80211_sta *sta, void *priv_sta)
+                  struct cfg80211_chan_def *chandef,
+                  struct ieee80211_sta *sta, void *priv_sta)
 {
        struct minstrel_sta_info *mi = priv_sta;
        struct minstrel_priv *mp = priv;
        struct ieee80211_rate *ctl_rate;
        unsigned int i, n = 0;
        unsigned int t_slot = 9; /* FIXME: get real slot time */
+       u32 rate_flags;
 
        mi->sta = sta;
        mi->lowest_rix = rate_lowest_index(sband, sta);
        ctl_rate = &sband->bitrates[mi->lowest_rix];
        mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
                                ctl_rate->bitrate,
-                               !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
+                               !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1,
+                               ieee80211_chandef_get_shift(chandef));
 
+       rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
        memset(mi->max_tp_rate, 0, sizeof(mi->max_tp_rate));
        mi->max_prob_rate = 0;
 
@@ -441,15 +449,22 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
                unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
                unsigned int tx_time_single;
                unsigned int cw = mp->cw_min;
+               int shift;
 
                if (!rate_supported(sta, sband->band, i))
                        continue;
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+
                n++;
                memset(mr, 0, sizeof(*mr));
 
                mr->rix = i;
-               mr->bitrate = sband->bitrates[i].bitrate / 5;
-               calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
+               shift = ieee80211_chandef_get_shift(chandef);
+               mr->bitrate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+                                          (1 << shift) * 5);
+               calc_rate_durations(sband->band, mr, &sband->bitrates[i],
+                                   chandef);
 
                /* calculate maximum number of retransmissions before
                 * fallback (based on maximum segment size) */
@@ -547,6 +562,7 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
 {
        static const int bitrates[4] = { 10, 20, 55, 110 };
        struct ieee80211_supported_band *sband;
+       u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
        int i, j;
 
        sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
@@ -559,6 +575,9 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
                if (rate->flags & IEEE80211_RATE_ERP_G)
                        continue;
 
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+
                for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
                        if (rate->bitrate != bitrates[j])
                                continue;
index f5aed963b22e62cd997872d1068bfb6c45eeda7a..a9909651dc0bc2be78e895a7aa0e3b073a1c40d2 100644 (file)
@@ -439,12 +439,13 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        u16 tid;
 
        if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
                return;
 
-       if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+       if (unlikely(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
                return;
 
        tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
@@ -776,7 +777,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
 
        /* Don't use EAPOL frames for sampling on non-mrr hw */
        if (mp->hw->max_rates == 1 &&
-           txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
+           (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
                sample_idx = -1;
        else
                sample_idx = minstrel_get_sample_rate(mp, mi);
@@ -844,6 +845,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
 
 static void
 minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
+                       struct cfg80211_chan_def *chandef,
                         struct ieee80211_sta *sta, void *priv_sta)
 {
        struct minstrel_priv *mp = priv;
@@ -869,8 +871,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
        mi->sta = sta;
        mi->stats_update = jiffies;
 
-       ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
-       mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur;
+       ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
+       mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
+       mi->overhead += ack_dur;
        mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
 
        mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
@@ -939,22 +942,25 @@ use_legacy:
        memset(&msp->legacy, 0, sizeof(msp->legacy));
        msp->legacy.r = msp->ratelist;
        msp->legacy.sample_table = msp->sample_table;
-       return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy);
+       return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
+                                          &msp->legacy);
 }
 
 static void
 minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
+                     struct cfg80211_chan_def *chandef,
                       struct ieee80211_sta *sta, void *priv_sta)
 {
-       minstrel_ht_update_caps(priv, sband, sta, priv_sta);
+       minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
 }
 
 static void
 minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
+                       struct cfg80211_chan_def *chandef,
                         struct ieee80211_sta *sta, void *priv_sta,
                         u32 changed)
 {
-       minstrel_ht_update_caps(priv, sband, sta, priv_sta);
+       minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
 }
 
 static void *
index 502d3ecc4a797b4004128604e5324d0cfc198bfe..958fad07b54cf64856e3600bd6299f4ca9abd72a 100644 (file)
@@ -293,6 +293,7 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
 
 static void
 rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
+                          struct cfg80211_chan_def *chandef,
                           struct ieee80211_sta *sta, void *priv_sta)
 {
        struct rc_pid_sta_info *spinfo = priv_sta;
index 2c5a79bd3777f5c313d3205a1154ef6b714a8d4d..a84f319c11adcc3122f12d9a679e294aa29277d2 100644 (file)
@@ -87,11 +87,13 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
        int len;
 
        /* always present fields */
-       len = sizeof(struct ieee80211_radiotap_header) + 9;
+       len = sizeof(struct ieee80211_radiotap_header) + 8;
 
-       /* allocate extra bitmap */
+       /* allocate extra bitmaps */
        if (status->vendor_radiotap_len)
                len += 4;
+       if (status->chains)
+               len += 4 * hweight8(status->chains);
 
        if (ieee80211_have_rx_timestamp(status)) {
                len = ALIGN(len, 8);
@@ -100,6 +102,10 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
        if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
                len += 1;
 
+       /* antenna field, if we don't have per-chain info */
+       if (!status->chains)
+               len += 1;
+
        /* padding for RX_FLAGS if necessary */
        len = ALIGN(len, 2);
 
@@ -116,6 +122,11 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
                len += 12;
        }
 
+       if (status->chains) {
+               /* antenna and antenna signal fields */
+               len += 2 * hweight8(status->chains);
+       }
+
        if (status->vendor_radiotap_len) {
                if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
                        status->vendor_radiotap_align = 1;
@@ -145,8 +156,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_radiotap_header *rthdr;
        unsigned char *pos;
+       __le32 *it_present;
+       u32 it_present_val;
        u16 rx_flags = 0;
-       int mpdulen;
+       u16 channel_flags = 0;
+       int mpdulen, chain;
+       unsigned long chains = status->chains;
 
        mpdulen = skb->len;
        if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
@@ -154,25 +169,39 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
 
        rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
        memset(rthdr, 0, rtap_len);
+       it_present = &rthdr->it_present;
 
        /* radiotap header, set always present flags */
-       rthdr->it_present =
-               cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
-                           (1 << IEEE80211_RADIOTAP_CHANNEL) |
-                           (1 << IEEE80211_RADIOTAP_ANTENNA) |
-                           (1 << IEEE80211_RADIOTAP_RX_FLAGS));
        rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
+       it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
+                        BIT(IEEE80211_RADIOTAP_CHANNEL) |
+                        BIT(IEEE80211_RADIOTAP_RX_FLAGS);
+
+       if (!status->chains)
+               it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
 
-       pos = (unsigned char *)(rthdr + 1);
+       for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
+               it_present_val |=
+                       BIT(IEEE80211_RADIOTAP_EXT) |
+                       BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
+               put_unaligned_le32(it_present_val, it_present);
+               it_present++;
+               it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
+                                BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
+       }
 
        if (status->vendor_radiotap_len) {
-               rthdr->it_present |=
-                       cpu_to_le32(BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) |
-                       cpu_to_le32(BIT(IEEE80211_RADIOTAP_EXT));
-               put_unaligned_le32(status->vendor_radiotap_bitmap, pos);
-               pos += 4;
+               it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
+                                 BIT(IEEE80211_RADIOTAP_EXT);
+               put_unaligned_le32(it_present_val, it_present);
+               it_present++;
+               it_present_val = status->vendor_radiotap_bitmap;
        }
 
+       put_unaligned_le32(it_present_val, it_present);
+
+       pos = (void *)(it_present + 1);
+
        /* the order of the following fields is important */
 
        /* IEEE80211_RADIOTAP_TSFT */
@@ -207,28 +236,35 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                 */
                *pos = 0;
        } else {
+               int shift = 0;
                rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
-               *pos = rate->bitrate / 5;
+               if (status->flag & RX_FLAG_10MHZ)
+                       shift = 1;
+               else if (status->flag & RX_FLAG_5MHZ)
+                       shift = 2;
+               *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
        }
        pos++;
 
        /* IEEE80211_RADIOTAP_CHANNEL */
        put_unaligned_le16(status->freq, pos);
        pos += 2;
+       if (status->flag & RX_FLAG_10MHZ)
+               channel_flags |= IEEE80211_CHAN_HALF;
+       else if (status->flag & RX_FLAG_5MHZ)
+               channel_flags |= IEEE80211_CHAN_QUARTER;
+
        if (status->band == IEEE80211_BAND_5GHZ)
-               put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
-                                  pos);
+               channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
        else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
-               put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
-                                  pos);
+               channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
        else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
-               put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
-                                  pos);
+               channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
        else if (rate)
-               put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
-                                  pos);
+               channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
        else
-               put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
+               channel_flags |= IEEE80211_CHAN_2GHZ;
+       put_unaligned_le16(channel_flags, pos);
        pos += 2;
 
        /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
@@ -242,9 +278,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
 
        /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
 
-       /* IEEE80211_RADIOTAP_ANTENNA */
-       *pos = status->antenna;
-       pos++;
+       if (!status->chains) {
+               /* IEEE80211_RADIOTAP_ANTENNA */
+               *pos = status->antenna;
+               pos++;
+       }
 
        /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
 
@@ -341,6 +379,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                pos += 2;
        }
 
+       for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
+               *pos++ = status->chain_signal[chain];
+               *pos++ = chain;
+       }
+
        if (status->vendor_radiotap_len) {
                /* ensure 2 byte alignment for the vendor field as required */
                if ((pos - (u8 *)rthdr) & 1)
@@ -1011,207 +1054,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
 }
 
 
-static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
-{
-       struct sk_buff *skb = rx->skb;
-       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       int keyidx;
-       int hdrlen;
-       ieee80211_rx_result result = RX_DROP_UNUSABLE;
-       struct ieee80211_key *sta_ptk = NULL;
-       int mmie_keyidx = -1;
-       __le16 fc;
-
-       /*
-        * Key selection 101
-        *
-        * There are four types of keys:
-        *  - GTK (group keys)
-        *  - IGTK (group keys for management frames)
-        *  - PTK (pairwise keys)
-        *  - STK (station-to-station pairwise keys)
-        *
-        * When selecting a key, we have to distinguish between multicast
-        * (including broadcast) and unicast frames, the latter can only
-        * use PTKs and STKs while the former always use GTKs and IGTKs.
-        * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
-        * unicast frames can also use key indices like GTKs. Hence, if we
-        * don't have a PTK/STK we check the key index for a WEP key.
-        *
-        * Note that in a regular BSS, multicast frames are sent by the
-        * AP only, associated stations unicast the frame to the AP first
-        * which then multicasts it on their behalf.
-        *
-        * There is also a slight problem in IBSS mode: GTKs are negotiated
-        * with each station, that is something we don't currently handle.
-        * The spec seems to expect that one negotiates the same key with
-        * every station but there's no such requirement; VLANs could be
-        * possible.
-        */
-
-       /*
-        * No point in finding a key and decrypting if the frame is neither
-        * addressed to us nor a multicast frame.
-        */
-       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-               return RX_CONTINUE;
-
-       /* start without a key */
-       rx->key = NULL;
-
-       if (rx->sta)
-               sta_ptk = rcu_dereference(rx->sta->ptk);
-
-       fc = hdr->frame_control;
-
-       if (!ieee80211_has_protected(fc))
-               mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
-
-       if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
-               rx->key = sta_ptk;
-               if ((status->flag & RX_FLAG_DECRYPTED) &&
-                   (status->flag & RX_FLAG_IV_STRIPPED))
-                       return RX_CONTINUE;
-               /* Skip decryption if the frame is not protected. */
-               if (!ieee80211_has_protected(fc))
-                       return RX_CONTINUE;
-       } else if (mmie_keyidx >= 0) {
-               /* Broadcast/multicast robust management frame / BIP */
-               if ((status->flag & RX_FLAG_DECRYPTED) &&
-                   (status->flag & RX_FLAG_IV_STRIPPED))
-                       return RX_CONTINUE;
-
-               if (mmie_keyidx < NUM_DEFAULT_KEYS ||
-                   mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
-                       return RX_DROP_MONITOR; /* unexpected BIP keyidx */
-               if (rx->sta)
-                       rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
-               if (!rx->key)
-                       rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
-       } else if (!ieee80211_has_protected(fc)) {
-               /*
-                * The frame was not protected, so skip decryption. However, we
-                * need to set rx->key if there is a key that could have been
-                * used so that the frame may be dropped if encryption would
-                * have been expected.
-                */
-               struct ieee80211_key *key = NULL;
-               struct ieee80211_sub_if_data *sdata = rx->sdata;
-               int i;
-
-               if (ieee80211_is_mgmt(fc) &&
-                   is_multicast_ether_addr(hdr->addr1) &&
-                   (key = rcu_dereference(rx->sdata->default_mgmt_key)))
-                       rx->key = key;
-               else {
-                       if (rx->sta) {
-                               for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
-                                       key = rcu_dereference(rx->sta->gtk[i]);
-                                       if (key)
-                                               break;
-                               }
-                       }
-                       if (!key) {
-                               for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
-                                       key = rcu_dereference(sdata->keys[i]);
-                                       if (key)
-                                               break;
-                               }
-                       }
-                       if (key)
-                               rx->key = key;
-               }
-               return RX_CONTINUE;
-       } else {
-               u8 keyid;
-               /*
-                * The device doesn't give us the IV so we won't be
-                * able to look up the key. That's ok though, we
-                * don't need to decrypt the frame, we just won't
-                * be able to keep statistics accurate.
-                * Except for key threshold notifications, should
-                * we somehow allow the driver to tell us which key
-                * the hardware used if this flag is set?
-                */
-               if ((status->flag & RX_FLAG_DECRYPTED) &&
-                   (status->flag & RX_FLAG_IV_STRIPPED))
-                       return RX_CONTINUE;
-
-               hdrlen = ieee80211_hdrlen(fc);
-
-               if (rx->skb->len < 8 + hdrlen)
-                       return RX_DROP_UNUSABLE; /* TODO: count this? */
-
-               /*
-                * no need to call ieee80211_wep_get_keyidx,
-                * it verifies a bunch of things we've done already
-                */
-               skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
-               keyidx = keyid >> 6;
-
-               /* check per-station GTK first, if multicast packet */
-               if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
-                       rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
-
-               /* if not found, try default key */
-               if (!rx->key) {
-                       rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
-
-                       /*
-                        * RSNA-protected unicast frames should always be
-                        * sent with pairwise or station-to-station keys,
-                        * but for WEP we allow using a key index as well.
-                        */
-                       if (rx->key &&
-                           rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
-                           rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
-                           !is_multicast_ether_addr(hdr->addr1))
-                               rx->key = NULL;
-               }
-       }
-
-       if (rx->key) {
-               if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
-                       return RX_DROP_MONITOR;
-
-               rx->key->tx_rx_count++;
-               /* TODO: add threshold stuff again */
-       } else {
-               return RX_DROP_MONITOR;
-       }
-
-       switch (rx->key->conf.cipher) {
-       case WLAN_CIPHER_SUITE_WEP40:
-       case WLAN_CIPHER_SUITE_WEP104:
-               result = ieee80211_crypto_wep_decrypt(rx);
-               break;
-       case WLAN_CIPHER_SUITE_TKIP:
-               result = ieee80211_crypto_tkip_decrypt(rx);
-               break;
-       case WLAN_CIPHER_SUITE_CCMP:
-               result = ieee80211_crypto_ccmp_decrypt(rx);
-               break;
-       case WLAN_CIPHER_SUITE_AES_CMAC:
-               result = ieee80211_crypto_aes_cmac_decrypt(rx);
-               break;
-       default:
-               /*
-                * We can reach here only with HW-only algorithms
-                * but why didn't it decrypt the frame?!
-                */
-               return RX_DROP_UNUSABLE;
-       }
-
-       /* the hdr variable is invalid after the decrypt handlers */
-
-       /* either the frame has been decrypted or will be dropped */
-       status->flag |= RX_FLAG_DECRYPTED;
-
-       return result;
-}
-
 static ieee80211_rx_result debug_noinline
 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
 {
@@ -1513,6 +1355,207 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
        return RX_CONTINUE;
 } /* ieee80211_rx_h_sta_process */
 
+static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
+{
+       struct sk_buff *skb = rx->skb;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int keyidx;
+       int hdrlen;
+       ieee80211_rx_result result = RX_DROP_UNUSABLE;
+       struct ieee80211_key *sta_ptk = NULL;
+       int mmie_keyidx = -1;
+       __le16 fc;
+
+       /*
+        * Key selection 101
+        *
+        * There are four types of keys:
+        *  - GTK (group keys)
+        *  - IGTK (group keys for management frames)
+        *  - PTK (pairwise keys)
+        *  - STK (station-to-station pairwise keys)
+        *
+        * When selecting a key, we have to distinguish between multicast
+        * (including broadcast) and unicast frames, the latter can only
+        * use PTKs and STKs while the former always use GTKs and IGTKs.
+        * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
+        * unicast frames can also use key indices like GTKs. Hence, if we
+        * don't have a PTK/STK we check the key index for a WEP key.
+        *
+        * Note that in a regular BSS, multicast frames are sent by the
+        * AP only, associated stations unicast the frame to the AP first
+        * which then multicasts it on their behalf.
+        *
+        * There is also a slight problem in IBSS mode: GTKs are negotiated
+        * with each station, that is something we don't currently handle.
+        * The spec seems to expect that one negotiates the same key with
+        * every station but there's no such requirement; VLANs could be
+        * possible.
+        */
+
+       /*
+        * No point in finding a key and decrypting if the frame is neither
+        * addressed to us nor a multicast frame.
+        */
+       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+               return RX_CONTINUE;
+
+       /* start without a key */
+       rx->key = NULL;
+
+       if (rx->sta)
+               sta_ptk = rcu_dereference(rx->sta->ptk);
+
+       fc = hdr->frame_control;
+
+       if (!ieee80211_has_protected(fc))
+               mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
+
+       if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
+               rx->key = sta_ptk;
+               if ((status->flag & RX_FLAG_DECRYPTED) &&
+                   (status->flag & RX_FLAG_IV_STRIPPED))
+                       return RX_CONTINUE;
+               /* Skip decryption if the frame is not protected. */
+               if (!ieee80211_has_protected(fc))
+                       return RX_CONTINUE;
+       } else if (mmie_keyidx >= 0) {
+               /* Broadcast/multicast robust management frame / BIP */
+               if ((status->flag & RX_FLAG_DECRYPTED) &&
+                   (status->flag & RX_FLAG_IV_STRIPPED))
+                       return RX_CONTINUE;
+
+               if (mmie_keyidx < NUM_DEFAULT_KEYS ||
+                   mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
+                       return RX_DROP_MONITOR; /* unexpected BIP keyidx */
+               if (rx->sta)
+                       rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
+               if (!rx->key)
+                       rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
+       } else if (!ieee80211_has_protected(fc)) {
+               /*
+                * The frame was not protected, so skip decryption. However, we
+                * need to set rx->key if there is a key that could have been
+                * used so that the frame may be dropped if encryption would
+                * have been expected.
+                */
+               struct ieee80211_key *key = NULL;
+               struct ieee80211_sub_if_data *sdata = rx->sdata;
+               int i;
+
+               if (ieee80211_is_mgmt(fc) &&
+                   is_multicast_ether_addr(hdr->addr1) &&
+                   (key = rcu_dereference(rx->sdata->default_mgmt_key)))
+                       rx->key = key;
+               else {
+                       if (rx->sta) {
+                               for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+                                       key = rcu_dereference(rx->sta->gtk[i]);
+                                       if (key)
+                                               break;
+                               }
+                       }
+                       if (!key) {
+                               for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+                                       key = rcu_dereference(sdata->keys[i]);
+                                       if (key)
+                                               break;
+                               }
+                       }
+                       if (key)
+                               rx->key = key;
+               }
+               return RX_CONTINUE;
+       } else {
+               u8 keyid;
+               /*
+                * The device doesn't give us the IV so we won't be
+                * able to look up the key. That's ok though, we
+                * don't need to decrypt the frame, we just won't
+                * be able to keep statistics accurate.
+                * Except for key threshold notifications, should
+                * we somehow allow the driver to tell us which key
+                * the hardware used if this flag is set?
+                */
+               if ((status->flag & RX_FLAG_DECRYPTED) &&
+                   (status->flag & RX_FLAG_IV_STRIPPED))
+                       return RX_CONTINUE;
+
+               hdrlen = ieee80211_hdrlen(fc);
+
+               if (rx->skb->len < 8 + hdrlen)
+                       return RX_DROP_UNUSABLE; /* TODO: count this? */
+
+               /*
+                * no need to call ieee80211_wep_get_keyidx,
+                * it verifies a bunch of things we've done already
+                */
+               skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
+               keyidx = keyid >> 6;
+
+               /* check per-station GTK first, if multicast packet */
+               if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
+                       rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
+
+               /* if not found, try default key */
+               if (!rx->key) {
+                       rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
+
+                       /*
+                        * RSNA-protected unicast frames should always be
+                        * sent with pairwise or station-to-station keys,
+                        * but for WEP we allow using a key index as well.
+                        */
+                       if (rx->key &&
+                           rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
+                           rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
+                           !is_multicast_ether_addr(hdr->addr1))
+                               rx->key = NULL;
+               }
+       }
+
+       if (rx->key) {
+               if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
+                       return RX_DROP_MONITOR;
+
+               rx->key->tx_rx_count++;
+               /* TODO: add threshold stuff again */
+       } else {
+               return RX_DROP_MONITOR;
+       }
+
+       switch (rx->key->conf.cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               result = ieee80211_crypto_wep_decrypt(rx);
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               result = ieee80211_crypto_tkip_decrypt(rx);
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               result = ieee80211_crypto_ccmp_decrypt(rx);
+               break;
+       case WLAN_CIPHER_SUITE_AES_CMAC:
+               result = ieee80211_crypto_aes_cmac_decrypt(rx);
+               break;
+       default:
+               /*
+                * We can reach here only with HW-only algorithms
+                * but why didn't it decrypt the frame?!
+                */
+               return RX_DROP_UNUSABLE;
+       }
+
+       /* the hdr variable is invalid after the decrypt handlers */
+
+       /* either the frame has been decrypted or will be dropped */
+       status->flag |= RX_FLAG_DECRYPTED;
+
+       return result;
+}
+
 static inline struct ieee80211_fragment_entry *
 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
                         unsigned int frag, unsigned int seq, int rx_queue,
@@ -2896,10 +2939,10 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
                 */
                rx->skb = skb;
 
-               CALL_RXH(ieee80211_rx_h_decrypt)
                CALL_RXH(ieee80211_rx_h_check_more_data)
                CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
                CALL_RXH(ieee80211_rx_h_sta_process)
+               CALL_RXH(ieee80211_rx_h_decrypt)
                CALL_RXH(ieee80211_rx_h_defragment)
                CALL_RXH(ieee80211_rx_h_michael_mic_verify)
                /* must be after MMIC verify so header is counted in MPDU mic */
index 1b122a79b0d8369a4118a6f238dc6d69db9e072c..08afe74b98f4b6cbdda21ed3d8ff68b9a95fa64b 100644 (file)
@@ -66,6 +66,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
        struct cfg80211_bss *cbss;
        struct ieee80211_bss *bss;
        int clen, srlen;
+       enum nl80211_bss_scan_width scan_width;
        s32 signal = 0;
 
        if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -73,8 +74,15 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
        else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
                signal = (rx_status->signal * 100) / local->hw.max_signal;
 
-       cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
-                                        mgmt, len, signal, GFP_ATOMIC);
+       scan_width = NL80211_BSS_CHAN_WIDTH_20;
+       if (rx_status->flag & RX_FLAG_5MHZ)
+               scan_width = NL80211_BSS_CHAN_WIDTH_5;
+       if (rx_status->flag & RX_FLAG_10MHZ)
+               scan_width = NL80211_BSS_CHAN_WIDTH_10;
+
+       cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel,
+                                              scan_width, mgmt, len, signal,
+                                              GFP_ATOMIC);
        if (!cbss)
                return NULL;
 
@@ -204,10 +212,29 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
                ieee80211_rx_bss_put(local, bss);
 }
 
+static void
+ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef,
+                              enum nl80211_bss_scan_width scan_width)
+{
+       memset(chandef, 0, sizeof(*chandef));
+       switch (scan_width) {
+       case NL80211_BSS_CHAN_WIDTH_5:
+               chandef->width = NL80211_CHAN_WIDTH_5;
+               break;
+       case NL80211_BSS_CHAN_WIDTH_10:
+               chandef->width = NL80211_CHAN_WIDTH_10;
+               break;
+       default:
+               chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+               break;
+       }
+}
+
 /* return false if no more work */
 static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
 {
        struct cfg80211_scan_request *req = local->scan_req;
+       struct cfg80211_chan_def chandef;
        enum ieee80211_band band;
        int i, ielen, n_chans;
 
@@ -229,11 +256,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
        } while (!n_chans);
 
        local->hw_scan_req->n_channels = n_chans;
+       ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
 
        ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
                                         local->hw_scan_ies_bufsize,
                                         req->ie, req->ie_len, band,
-                                        req->rates[band], 0);
+                                        req->rates[band], &chandef);
        local->hw_scan_req->ie_len = ielen;
        local->hw_scan_req->no_cck = req->no_cck;
 
@@ -280,7 +308,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
        rcu_assign_pointer(local->scan_sdata, NULL);
 
        local->scanning = 0;
-       local->scan_channel = NULL;
+       local->scan_chandef.chan = NULL;
 
        /* Set power back to normal operating levels. */
        ieee80211_hw_config(local, 0);
@@ -615,11 +643,34 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
 {
        int skip;
        struct ieee80211_channel *chan;
+       enum nl80211_bss_scan_width oper_scan_width;
 
        skip = 0;
        chan = local->scan_req->channels[local->scan_channel_idx];
 
-       local->scan_channel = chan;
+       local->scan_chandef.chan = chan;
+       local->scan_chandef.center_freq1 = chan->center_freq;
+       local->scan_chandef.center_freq2 = 0;
+       switch (local->scan_req->scan_width) {
+       case NL80211_BSS_CHAN_WIDTH_5:
+               local->scan_chandef.width = NL80211_CHAN_WIDTH_5;
+               break;
+       case NL80211_BSS_CHAN_WIDTH_10:
+               local->scan_chandef.width = NL80211_CHAN_WIDTH_10;
+               break;
+       case NL80211_BSS_CHAN_WIDTH_20:
+               /* If scanning on oper channel, use whatever channel-type
+                * is currently in use.
+                */
+               oper_scan_width = cfg80211_chandef_to_scan_width(
+                                       &local->_oper_chandef);
+               if (chan == local->_oper_chandef.chan &&
+                   oper_scan_width == local->scan_req->scan_width)
+                       local->scan_chandef = local->_oper_chandef;
+               else
+                       local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+               break;
+       }
 
        if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
                skip = 1;
@@ -659,7 +710,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
                                         unsigned long *next_delay)
 {
        /* switch back to the operating channel */
-       local->scan_channel = NULL;
+       local->scan_chandef.chan = NULL;
        ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
        /* disable PS */
@@ -801,7 +852,8 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
 
 int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
                                const u8 *ssid, u8 ssid_len,
-                               struct ieee80211_channel *chan)
+                               struct ieee80211_channel *chan,
+                               enum nl80211_bss_scan_width scan_width)
 {
        struct ieee80211_local *local = sdata->local;
        int ret = -EBUSY;
@@ -851,6 +903,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
 
        local->int_scan_req->ssids = &local->scan_ssid;
        local->int_scan_req->n_ssids = 1;
+       local->int_scan_req->scan_width = scan_width;
        memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
        local->int_scan_req->ssids[0].ssid_len = ssid_len;
 
@@ -912,6 +965,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_sched_scan_ies sched_scan_ies = {};
+       struct cfg80211_chan_def chandef;
        int ret, i, iebufsz;
 
        iebufsz = 2 + IEEE80211_MAX_SSID_LEN +
@@ -939,10 +993,12 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                        goto out_free;
                }
 
+               ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+
                sched_scan_ies.len[i] =
                        ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
                                                 iebufsz, req->ie, req->ie_len,
-                                                i, (u32) -1, 0);
+                                                i, (u32) -1, &chandef);
        }
 
        ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
index 43439203f4e4cf2262092a9e391e7d048f10ef9a..368837fe3b800e87f408039ef4d36e84bfaa7069 100644 (file)
@@ -235,7 +235,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
 
        /* IEEE80211_RADIOTAP_RATE rate */
        if (info->status.rates[0].idx >= 0 &&
-           !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
+           !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+                                            IEEE80211_TX_RC_VHT_MCS)))
                len += 2;
 
        /* IEEE80211_RADIOTAP_TX_FLAGS */
@@ -244,17 +245,23 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
        /* IEEE80211_RADIOTAP_DATA_RETRIES */
        len += 1;
 
-       /* IEEE80211_TX_RC_MCS */
-       if (info->status.rates[0].idx >= 0 &&
-           info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
-               len += 3;
+       /* IEEE80211_RADIOTAP_MCS
+        * IEEE80211_RADIOTAP_VHT */
+       if (info->status.rates[0].idx >= 0) {
+               if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
+                       len += 3;
+               else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS)
+                       len = ALIGN(len, 2) + 12;
+       }
 
        return len;
 }
 
-static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
-                                            *sband, struct sk_buff *skb,
-                                            int retry_count, int rtap_len)
+static void
+ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
+                                struct ieee80211_supported_band *sband,
+                                struct sk_buff *skb, int retry_count,
+                                int rtap_len, int shift)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -279,9 +286,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
 
        /* IEEE80211_RADIOTAP_RATE */
        if (info->status.rates[0].idx >= 0 &&
-           !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
+           !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+                                            IEEE80211_TX_RC_VHT_MCS))) {
+               u16 rate;
+
                rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
-               *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
+               rate = sband->bitrates[info->status.rates[0].idx].bitrate;
+               *pos = DIV_ROUND_UP(rate, 5 * (1 << shift));
                /* padding for tx flags */
                pos += 2;
        }
@@ -306,9 +317,12 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
        *pos = retry_count;
        pos++;
 
-       /* IEEE80211_TX_RC_MCS */
-       if (info->status.rates[0].idx >= 0 &&
-           info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
+       if (info->status.rates[0].idx < 0)
+               return;
+
+       /* IEEE80211_RADIOTAP_MCS
+        * IEEE80211_RADIOTAP_VHT */
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
                rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
                pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
                         IEEE80211_RADIOTAP_MCS_HAVE_GI |
@@ -321,8 +335,48 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
                        pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
                pos[2] = info->status.rates[0].idx;
                pos += 3;
-       }
+       } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
+               u16 known = local->hw.radiotap_vht_details &
+                       (IEEE80211_RADIOTAP_VHT_KNOWN_GI |
+                        IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
+
+               rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
+
+               /* required alignment from rthdr */
+               pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
 
+               /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */
+               put_unaligned_le16(known, pos);
+               pos += 2;
+
+               /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
+               if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+                       *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
+               pos++;
+
+               /* u8 bandwidth */
+               if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       *pos = 1;
+               else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+                       *pos = 4;
+               else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+                       *pos = 11;
+               else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */
+                       *pos = 0;
+               pos++;
+
+               /* u8 mcs_nss[4] */
+               *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) |
+                       ieee80211_rate_get_vht_nss(&info->status.rates[0]);
+               pos += 4;
+
+               /* u8 coding */
+               pos++;
+               /* u8 group_id */
+               pos++;
+               /* u16 partial_aid */
+               pos += 2;
+       }
 }
 
 static void ieee80211_report_used_skb(struct ieee80211_local *local,
@@ -424,6 +478,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        bool acked;
        struct ieee80211_bar *bar;
        int rtap_len;
+       int shift = 0;
 
        for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
                if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
@@ -458,6 +513,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
                        continue;
 
+               shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+
                if (info->flags & IEEE80211_TX_STATUS_EOSP)
                        clear_sta_flag(sta, WLAN_STA_SP);
 
@@ -557,7 +614,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        rcu_read_unlock();
 
-       ieee80211_led_tx(local, 0);
+       ieee80211_led_tx(local);
 
        /* SNMP counters
         * Fragments are passed to low-level drivers as separate skbs, so these
@@ -624,7 +681,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                dev_kfree_skb(skb);
                return;
        }
-       ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
+       ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
+                                        rtap_len, shift);
 
        /* XXX: is this sufficient for BPF? */
        skb_set_mac_header(skb, 0);
index c215fafd7a2fc1ac3e206b4551c60de8d656ab24..1aba645882bd92abbf9e5a4cdd90f7544a173c4a 100644 (file)
@@ -1906,6 +1906,32 @@ TRACE_EVENT(api_radar_detected,
        )
 );
 
+TRACE_EVENT(drv_channel_switch_beacon,
+       TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata,
+                struct cfg80211_chan_def *chandef),
+
+       TP_ARGS(local, sdata, chandef),
+
+       TP_STRUCT__entry(
+               LOCAL_ENTRY
+               VIF_ENTRY
+               CHANDEF_ENTRY
+       ),
+
+       TP_fast_assign(
+               LOCAL_ASSIGN;
+               VIF_ASSIGN;
+               CHANDEF_ASSIGN(chandef);
+       ),
+
+       TP_printk(
+               LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT,
+               LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG
+       )
+);
+
+
 #ifdef CONFIG_MAC80211_MESSAGE_TRACING
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211_msg
index 4105d0ca963e25dfdd558e30c48274277e5ad2df..098ae854ad3c31cb4984236d801bc468ab3763fa 100644 (file)
@@ -40,12 +40,22 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
                                 struct sk_buff *skb, int group_addr,
                                 int next_frag_len)
 {
-       int rate, mrate, erp, dur, i;
+       int rate, mrate, erp, dur, i, shift = 0;
        struct ieee80211_rate *txrate;
        struct ieee80211_local *local = tx->local;
        struct ieee80211_supported_band *sband;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       u32 rate_flags = 0;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
+       if (chanctx_conf) {
+               shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+               rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+       }
+       rcu_read_unlock();
 
        /* assume HW handles this */
        if (tx->rate.flags & IEEE80211_TX_RC_MCS)
@@ -122,8 +132,11 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
                if (r->bitrate > txrate->bitrate)
                        break;
 
+               if ((rate_flags & r->flags) != rate_flags)
+                       continue;
+
                if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
-                       rate = r->bitrate;
+                       rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
 
                switch (sband->band) {
                case IEEE80211_BAND_2GHZ: {
@@ -150,7 +163,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
        if (rate == -1) {
                /* No matching basic rate found; use highest suitable mandatory
                 * PHY rate */
-               rate = mrate;
+               rate = DIV_ROUND_UP(mrate, 1 << shift);
        }
 
        /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
@@ -162,7 +175,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
                 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
                 * to closest integer */
                dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
-                               tx->sdata->vif.bss_conf.use_short_preamble);
+                               tx->sdata->vif.bss_conf.use_short_preamble,
+                               shift);
 
        if (next_frag_len) {
                /* Frame is fragmented: duration increases with time needed to
@@ -171,7 +185,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
                /* next fragment */
                dur += ieee80211_frame_duration(sband->band, next_frag_len,
                                txrate->bitrate, erp,
-                               tx->sdata->vif.bss_conf.use_short_preamble);
+                               tx->sdata->vif.bss_conf.use_short_preamble,
+                               shift);
        }
 
        return cpu_to_le16(dur);
@@ -524,9 +539,11 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 
-       if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
-                    tx->sdata->control_port_no_encrypt))
-               info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+       if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
+               if (tx->sdata->control_port_no_encrypt)
+                       info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+               info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+       }
 
        return TX_CONTINUE;
 }
@@ -1257,6 +1274,10 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_MONITOR:
+               if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+                       vif = &sdata->vif;
+                       break;
+               }
                sdata = rcu_dereference(local->monitor_sdata);
                if (sdata) {
                        vif = &sdata->vif;
@@ -1281,7 +1302,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
                                    txpending);
 
        ieee80211_tpt_led_trig_tx(local, fc, led_len);
-       ieee80211_led_tx(local, 1);
 
        WARN_ON_ONCE(!skb_queue_empty(skbs));
 
@@ -2320,6 +2340,81 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
        return 0;
 }
 
+void ieee80211_csa_finish(struct ieee80211_vif *vif)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+       ieee80211_queue_work(&sdata->local->hw,
+                            &sdata->csa_finalize_work);
+}
+EXPORT_SYMBOL(ieee80211_csa_finish);
+
+static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
+                                struct beacon_data *beacon)
+{
+       struct probe_resp *resp;
+       int counter_offset_beacon = sdata->csa_counter_offset_beacon;
+       int counter_offset_presp = sdata->csa_counter_offset_presp;
+
+       /* warn if the driver did not check for/react to csa completeness */
+       if (WARN_ON(((u8 *)beacon->tail)[counter_offset_beacon] == 0))
+               return;
+
+       ((u8 *)beacon->tail)[counter_offset_beacon]--;
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP &&
+           counter_offset_presp) {
+               rcu_read_lock();
+               resp = rcu_dereference(sdata->u.ap.probe_resp);
+
+               /* if nl80211 accepted the offset, this should not happen. */
+               if (WARN_ON(!resp)) {
+                       rcu_read_unlock();
+                       return;
+               }
+               resp->data[counter_offset_presp]--;
+               rcu_read_unlock();
+       }
+}
+
+bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct beacon_data *beacon = NULL;
+       u8 *beacon_data;
+       size_t beacon_data_len;
+       int counter_beacon = sdata->csa_counter_offset_beacon;
+       int ret = false;
+
+       if (!ieee80211_sdata_running(sdata))
+               return false;
+
+       rcu_read_lock();
+       if (vif->type == NL80211_IFTYPE_AP) {
+               struct ieee80211_if_ap *ap = &sdata->u.ap;
+
+               beacon = rcu_dereference(ap->beacon);
+               if (WARN_ON(!beacon || !beacon->tail))
+                       goto out;
+               beacon_data = beacon->tail;
+               beacon_data_len = beacon->tail_len;
+       } else {
+               WARN_ON(1);
+               goto out;
+       }
+
+       if (WARN_ON(counter_beacon > beacon_data_len))
+               goto out;
+
+       if (beacon_data[counter_beacon] == 0)
+               ret = true;
+ out:
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(ieee80211_csa_is_complete);
+
 struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                                         struct ieee80211_vif *vif,
                                         u16 *tim_offset, u16 *tim_length)
@@ -2350,6 +2445,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                struct beacon_data *beacon = rcu_dereference(ap->beacon);
 
                if (beacon) {
+                       if (sdata->vif.csa_active)
+                               ieee80211_update_csa(sdata, beacon);
+
                        /*
                         * headroom, head length,
                         * tail length and maximum TIM length
index 22654452a56157b44835f97efacdf2e8960ee5bc..e1b34a18b24344cb95a642c436fc5322054207ce 100644 (file)
@@ -107,7 +107,8 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
 }
 
 int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
-                            int rate, int erp, int short_preamble)
+                            int rate, int erp, int short_preamble,
+                            int shift)
 {
        int dur;
 
@@ -118,6 +119,9 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
         *
         * rate is in 100 kbps, so divident is multiplied by 10 in the
         * DIV_ROUND_UP() operations.
+        *
+        * shift may be 2 for 5 MHz channels or 1 for 10 MHz channels, and
+        * is assumed to be 0 otherwise.
         */
 
        if (band == IEEE80211_BAND_5GHZ || erp) {
@@ -130,13 +134,23 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
                 * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext
                 *
                 * T_SYM = 4 usec
-                * 802.11a - 17.5.2: aSIFSTime = 16 usec
+                * 802.11a - 18.5.2: aSIFSTime = 16 usec
                 * 802.11g - 19.8.4: aSIFSTime = 10 usec +
                 *      signal ext = 6 usec
                 */
                dur = 16; /* SIFS + signal ext */
-               dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */
-               dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */
+               dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */
+               dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */
+
+               /* IEEE 802.11-2012 18.3.2.4: all values above are:
+                *  * times 4 for 5 MHz
+                *  * times 2 for 10 MHz
+                */
+               dur *= 1 << shift;
+
+               /* rates should already consider the channel bandwidth,
+                * don't apply divisor again.
+                */
                dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10,
                                        4 * rate); /* T_SYM x N_SYM */
        } else {
@@ -168,7 +182,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
 {
        struct ieee80211_sub_if_data *sdata;
        u16 dur;
-       int erp;
+       int erp, shift = 0;
        bool short_preamble = false;
 
        erp = 0;
@@ -177,10 +191,11 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
                short_preamble = sdata->vif.bss_conf.use_short_preamble;
                if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
                        erp = rate->flags & IEEE80211_RATE_ERP_G;
+               shift = ieee80211_vif_get_shift(vif);
        }
 
        dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
-                                      short_preamble);
+                                      short_preamble, shift);
 
        return cpu_to_le16(dur);
 }
@@ -194,7 +209,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
        struct ieee80211_rate *rate;
        struct ieee80211_sub_if_data *sdata;
        bool short_preamble;
-       int erp;
+       int erp, shift = 0, bitrate;
        u16 dur;
        struct ieee80211_supported_band *sband;
 
@@ -210,17 +225,20 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
                short_preamble = sdata->vif.bss_conf.use_short_preamble;
                if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
                        erp = rate->flags & IEEE80211_RATE_ERP_G;
+               shift = ieee80211_vif_get_shift(vif);
        }
 
+       bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+
        /* CTS duration */
-       dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate,
-                                      erp, short_preamble);
+       dur = ieee80211_frame_duration(sband->band, 10, bitrate,
+                                      erp, short_preamble, shift);
        /* Data frame duration */
-       dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
-                                       erp, short_preamble);
+       dur += ieee80211_frame_duration(sband->band, frame_len, bitrate,
+                                       erp, short_preamble, shift);
        /* ACK duration */
-       dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
-                                       erp, short_preamble);
+       dur += ieee80211_frame_duration(sband->band, 10, bitrate,
+                                       erp, short_preamble, shift);
 
        return cpu_to_le16(dur);
 }
@@ -235,7 +253,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
        struct ieee80211_rate *rate;
        struct ieee80211_sub_if_data *sdata;
        bool short_preamble;
-       int erp;
+       int erp, shift = 0, bitrate;
        u16 dur;
        struct ieee80211_supported_band *sband;
 
@@ -250,15 +268,18 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
                short_preamble = sdata->vif.bss_conf.use_short_preamble;
                if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
                        erp = rate->flags & IEEE80211_RATE_ERP_G;
+               shift = ieee80211_vif_get_shift(vif);
        }
 
+       bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+
        /* Data frame duration */
-       dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
-                                      erp, short_preamble);
+       dur = ieee80211_frame_duration(sband->band, frame_len, bitrate,
+                                      erp, short_preamble, shift);
        if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
                /* ACK duration */
-               dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
-                                               erp, short_preamble);
+               dur += ieee80211_frame_duration(sband->band, 10, bitrate,
+                                               erp, short_preamble, shift);
        }
 
        return cpu_to_le16(dur);
@@ -1052,32 +1073,6 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
        }
 }
 
-void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
-                                 const size_t supp_rates_len,
-                                 const u8 *supp_rates)
-{
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       int i, have_higher_than_11mbit = 0;
-
-       /* cf. IEEE 802.11 9.2.12 */
-       for (i = 0; i < supp_rates_len; i++)
-               if ((supp_rates[i] & 0x7f) * 5 > 110)
-                       have_higher_than_11mbit = 1;
-
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-
-       if (chanctx_conf &&
-           chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ &&
-           have_higher_than_11mbit)
-               sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
-       else
-               sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
-       rcu_read_unlock();
-
-       ieee80211_set_wmm_default(sdata, true);
-}
-
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                         u16 transaction, u16 auth_alg, u16 status,
                         const u8 *extra, size_t extra_len, const u8 *da,
@@ -1162,7 +1157,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
 int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                             size_t buffer_len, const u8 *ie, size_t ie_len,
                             enum ieee80211_band band, u32 rate_mask,
-                            u8 channel)
+                            struct cfg80211_chan_def *chandef)
 {
        struct ieee80211_supported_band *sband;
        u8 *pos = buffer, *end = buffer + buffer_len;
@@ -1171,16 +1166,26 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
        u8 rates[32];
        int num_rates;
        int ext_rates_len;
+       int shift;
+       u32 rate_flags;
 
        sband = local->hw.wiphy->bands[band];
        if (WARN_ON_ONCE(!sband))
                return 0;
 
+       rate_flags = ieee80211_chandef_rate_flags(chandef);
+       shift = ieee80211_chandef_get_shift(chandef);
+
        num_rates = 0;
        for (i = 0; i < sband->n_bitrates; i++) {
                if ((BIT(i) & rate_mask) == 0)
                        continue; /* skip rate */
-               rates[num_rates++] = (u8) (sband->bitrates[i].bitrate / 5);
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+
+               rates[num_rates++] =
+                       (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate,
+                                         (1 << shift) * 5);
        }
 
        supp_rates_len = min_t(int, num_rates, 8);
@@ -1220,12 +1225,13 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                pos += ext_rates_len;
        }
 
-       if (channel && sband->band == IEEE80211_BAND_2GHZ) {
+       if (chandef->chan && sband->band == IEEE80211_BAND_2GHZ) {
                if (end - pos < 3)
                        goto out_err;
                *pos++ = WLAN_EID_DS_PARAMS;
                *pos++ = 1;
-               *pos++ = channel;
+               *pos++ = ieee80211_frequency_to_channel(
+                               chandef->chan->center_freq);
        }
 
        /* insert custom IEs that go before HT */
@@ -1290,9 +1296,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          bool directed)
 {
        struct ieee80211_local *local = sdata->local;
+       struct cfg80211_chan_def chandef;
        struct sk_buff *skb;
        struct ieee80211_mgmt *mgmt;
-       u8 chan_no;
        int ies_len;
 
        /*
@@ -1300,10 +1306,11 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
         * in order to maximize the chance that we get a response.  Some
         * badly-behaved APs don't respond when this parameter is included.
         */
+       chandef.width = sdata->vif.bss_conf.chandef.width;
        if (directed)
-               chan_no = 0;
+               chandef.chan = NULL;
        else
-               chan_no = ieee80211_frequency_to_channel(chan->center_freq);
+               chandef.chan = chan;
 
        skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
                                     ssid, ssid_len, 100 + ie_len);
@@ -1313,7 +1320,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
                                           skb_tailroom(skb),
                                           ie, ie_len, chan->band,
-                                          ratemask, chan_no);
+                                          ratemask, &chandef);
        skb_put(skb, ies_len);
 
        if (dst) {
@@ -1347,16 +1354,19 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
        }
 }
 
-u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
+u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
                            struct ieee802_11_elems *elems,
                            enum ieee80211_band band, u32 *basic_rates)
 {
        struct ieee80211_supported_band *sband;
        struct ieee80211_rate *bitrates;
        size_t num_rates;
-       u32 supp_rates;
-       int i, j;
-       sband = local->hw.wiphy->bands[band];
+       u32 supp_rates, rate_flags;
+       int i, j, shift;
+       sband = sdata->local->hw.wiphy->bands[band];
+
+       rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+       shift = ieee80211_vif_get_shift(&sdata->vif);
 
        if (WARN_ON(!sband))
                return 1;
@@ -1381,7 +1391,15 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
                        continue;
 
                for (j = 0; j < num_rates; j++) {
-                       if (bitrates[j].bitrate == own_rate) {
+                       int brate;
+                       if ((rate_flags & sband->bitrates[j].flags)
+                           != rate_flags)
+                               continue;
+
+                       brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
+                                            1 << shift);
+
+                       if (brate == own_rate) {
                                supp_rates |= BIT(j);
                                if (basic_rates && is_basic)
                                        *basic_rates |= BIT(j);
@@ -1435,8 +1453,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                local->resuming = true;
 
        if (local->wowlan) {
-               local->wowlan = false;
                res = drv_resume(local);
+               local->wowlan = false;
                if (res < 0) {
                        local->resuming = false;
                        return res;
@@ -2004,18 +2022,56 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
        cfg80211_chandef_create(chandef, control_chan, channel_type);
 }
 
+int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
+                            const struct ieee80211_supported_band *sband,
+                            const u8 *srates, int srates_len, u32 *rates)
+{
+       u32 rate_flags = ieee80211_chandef_rate_flags(chandef);
+       int shift = ieee80211_chandef_get_shift(chandef);
+       struct ieee80211_rate *br;
+       int brate, rate, i, j, count = 0;
+
+       *rates = 0;
+
+       for (i = 0; i < srates_len; i++) {
+               rate = srates[i] & 0x7f;
+
+               for (j = 0; j < sband->n_bitrates; j++) {
+                       br = &sband->bitrates[j];
+                       if ((rate_flags & br->flags) != rate_flags)
+                               continue;
+
+                       brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+                       if (brate == rate) {
+                               *rates |= BIT(j);
+                               count++;
+                               break;
+                       }
+               }
+       }
+       return count;
+}
+
 int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
                            struct sk_buff *skb, bool need_basic,
                            enum ieee80211_band band)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
-       int rate;
+       int rate, shift;
        u8 i, rates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+       u32 rate_flags;
 
+       shift = ieee80211_vif_get_shift(&sdata->vif);
+       rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
        sband = local->hw.wiphy->bands[band];
-       rates = sband->n_bitrates;
+       rates = 0;
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+               rates++;
+       }
        if (rates > 8)
                rates = 8;
 
@@ -2027,10 +2083,15 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
        *pos++ = rates;
        for (i = 0; i < rates; i++) {
                u8 basic = 0;
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+
                if (need_basic && basic_rates & BIT(i))
                        basic = 0x80;
                rate = sband->bitrates[i].bitrate;
-               *pos++ = basic | (u8) (rate / 5);
+               rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+                                   5 * (1 << shift));
+               *pos++ = basic | (u8) rate;
        }
 
        return 0;
@@ -2042,12 +2103,22 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
-       int rate;
+       int rate, skip, shift;
        u8 i, exrates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+       u32 rate_flags;
+
+       rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+       shift = ieee80211_vif_get_shift(&sdata->vif);
 
        sband = local->hw.wiphy->bands[band];
-       exrates = sband->n_bitrates;
+       exrates = 0;
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+               exrates++;
+       }
+
        if (exrates > 8)
                exrates -= 8;
        else
@@ -2060,12 +2131,19 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
                pos = skb_put(skb, exrates + 2);
                *pos++ = WLAN_EID_EXT_SUPP_RATES;
                *pos++ = exrates;
+               skip = 0;
                for (i = 8; i < sband->n_bitrates; i++) {
                        u8 basic = 0;
+                       if ((rate_flags & sband->bitrates[i].flags)
+                           != rate_flags)
+                               continue;
+                       if (skip++ < 8)
+                               continue;
                        if (need_basic && basic_rates & BIT(i))
                                basic = 0x80;
-                       rate = sband->bitrates[i].bitrate;
-                       *pos++ = basic | (u8) (rate / 5);
+                       rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+                                           5 * (1 << shift));
+                       *pos++ = basic | (u8) rate;
                }
        }
        return 0;
@@ -2149,9 +2227,17 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
                        ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
        } else {
                struct ieee80211_supported_band *sband;
+               int shift = 0;
+               int bitrate;
+
+               if (status->flag & RX_FLAG_10MHZ)
+                       shift = 1;
+               if (status->flag & RX_FLAG_5MHZ)
+                       shift = 2;
 
                sband = local->hw.wiphy->bands[status->band];
-               ri.legacy = sband->bitrates[status->rate_idx].bitrate;
+               bitrate = sband->bitrates[status->rate_idx].bitrate;
+               ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
        }
 
        rate = cfg80211_calculate_bitrate(&ri);
index 56d22cae590683c9a3b530ad0152b105d5c19c2d..c45fc1a60e0dde53416508ff29e22ee567f8883c 100644 (file)
@@ -410,20 +410,6 @@ config NF_NAT_TFTP
 
 endif # NF_CONNTRACK
 
-# transparent proxy support
-config NETFILTER_TPROXY
-       tristate "Transparent proxying support"
-       depends on IP_NF_MANGLE
-       depends on NETFILTER_ADVANCED
-       help
-         This option enables transparent proxying support, that is,
-         support for handling non-locally bound IPv4 TCP and UDP sockets.
-         For it to work you will have to configure certain iptables rules
-         and use policy routing. For more information on how to set it up
-         see Documentation/networking/tproxy.txt.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config NETFILTER_XTABLES
        tristate "Netfilter Xtables support (required for ip_tables)"
        default m if NETFILTER_ADVANCED=n
@@ -720,10 +706,10 @@ config NETFILTER_XT_TARGET_TEE
        this clone be rerouted to another nexthop.
 
 config NETFILTER_XT_TARGET_TPROXY
-       tristate '"TPROXY" target support'
-       depends on NETFILTER_TPROXY
+       tristate '"TPROXY" target transparent proxying support'
        depends on NETFILTER_XTABLES
        depends on NETFILTER_ADVANCED
+       depends on IP_NF_MANGLE
        select NF_DEFRAG_IPV4
        select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
        help
@@ -731,6 +717,9 @@ config NETFILTER_XT_TARGET_TPROXY
          REDIRECT.  It can only be used in the mangle table and is useful
          to redirect traffic to a transparent proxy.  It does _not_ depend
          on Netfilter connection tracking and NAT, unlike REDIRECT.
+         For it to work you will have to configure certain iptables rules
+         and use policy routing. For more information on how to set it up
+         see Documentation/networking/tproxy.txt.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
@@ -1180,7 +1169,6 @@ config NETFILTER_XT_MATCH_SCTP
 
 config NETFILTER_XT_MATCH_SOCKET
        tristate '"socket" match support'
-       depends on NETFILTER_TPROXY
        depends on NETFILTER_XTABLES
        depends on NETFILTER_ADVANCED
        depends on !NF_CONNTRACK || NF_CONNTRACK
index a1abf87d43bfbd902f82cc9f8aae156d936cd89a..ebfa7dc747cd3fdc3b322ad2a44862c7fe94a3a2 100644 (file)
@@ -61,9 +61,6 @@ obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
 obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
 obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
 
-# transparent proxy support
-obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
-
 # generic X tables 
 obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 
index 2217363ab4229212b0f309d1f724e1f23f5ceb17..593b16ea45e0d817787c4d06d9d3d703df2fc65b 100644 (file)
@@ -234,12 +234,13 @@ EXPORT_SYMBOL(skb_make_writable);
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
    manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
+void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
+               __rcu __read_mostly;
 EXPORT_SYMBOL(ip_ct_attach);
 
-void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
+void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
 {
-       void (*attach)(struct sk_buff *, struct sk_buff *);
+       void (*attach)(struct sk_buff *, const struct sk_buff *);
 
        if (skb->nfct) {
                rcu_read_lock();
index 3cd85b2fc67c83e7f4a12b6fa35765d8ebc986ec..5199448697f64fcf0da0e35dbc38903477a92861 100644 (file)
@@ -414,7 +414,7 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
 
        spin_lock_bh(&svc->sched_lock);
        tbl->dead = 1;
-       for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+       for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
                hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
                        ip_vs_lblcr_free(en);
                }
@@ -440,7 +440,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
        struct ip_vs_lblcr_entry *en;
        struct hlist_node *next;
 
-       for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+       for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
                spin_lock(&svc->sched_lock);
@@ -495,7 +495,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
        if (goal > tbl->max_size/2)
                goal = tbl->max_size/2;
 
-       for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+       for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
                spin_lock(&svc->sched_lock);
@@ -536,7 +536,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
        /*
         *    Initialize the hash buckets
         */
-       for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+       for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
                INIT_HLIST_HEAD(&tbl->bucket[i]);
        }
        tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
index 3c0da8728036346a6a9ba235821779b49931b311..23e596e438b3fb4e51a97782c4cf00ad7d33af68 100644 (file)
@@ -66,15 +66,7 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
 static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
                          unsigned int sctphoff)
 {
-       __u32 crc32;
-       struct sk_buff *iter;
-
-       crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
-       skb_walk_frags(skb, iter)
-               crc32 = sctp_update_cksum((u8 *) iter->data,
-                                         skb_headlen(iter), crc32);
-       sctph->checksum = sctp_end_cksum(crc32);
-
+       sctph->checksum = sctp_compute_cksum(skb, sctphoff);
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
@@ -151,10 +143,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
 {
        unsigned int sctphoff;
        struct sctphdr *sh, _sctph;
-       struct sk_buff *iter;
-       __le32 cmp;
-       __le32 val;
-       __u32 tmp;
+       __le32 cmp, val;
 
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6)
@@ -168,13 +157,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
                return 0;
 
        cmp = sh->checksum;
-
-       tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
-       skb_walk_frags(skb, iter)
-               tmp = sctp_update_cksum((__u8 *) iter->data,
-                                       skb_headlen(iter), tmp);
-
-       val = sctp_end_cksum(tmp);
+       val = sctp_compute_cksum(skb, sctphoff);
 
        if (val != cmp) {
                /* CRC failure, dump it. */
index f16c027df15bb38241d6750428fdad1733de88ac..3588faebe5298149b918614656ca8b6ee7d12e4e 100644 (file)
@@ -269,14 +269,20 @@ ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
        switch (iph->protocol) {
        case IPPROTO_TCP:
                th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+               if (unlikely(th == NULL))
+                       return 0;
                port = th->source;
                break;
        case IPPROTO_UDP:
                uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
+               if (unlikely(uh == NULL))
+                       return 0;
                port = uh->source;
                break;
        case IPPROTO_SCTP:
                sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
+               if (unlikely(sh == NULL))
+                       return 0;
                port = sh->source;
                break;
        default:
index 0283baedcdfb5b48627002607731dabb992e15ae..da6f1787a102b34b1978ab06d3f95593eb9d9729 100644 (file)
@@ -238,7 +238,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
        nf_conntrack_free(ct);
 }
 
-void nf_ct_delete_from_lists(struct nf_conn *ct)
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
 
@@ -253,7 +253,6 @@ void nf_ct_delete_from_lists(struct nf_conn *ct)
                             &net->ct.dying);
        spin_unlock_bh(&nf_conntrack_lock);
 }
-EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
 
 static void death_by_event(unsigned long ul_conntrack)
 {
@@ -275,7 +274,7 @@ static void death_by_event(unsigned long ul_conntrack)
        nf_ct_put(ct);
 }
 
-void nf_ct_dying_timeout(struct nf_conn *ct)
+static void nf_ct_dying_timeout(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
@@ -288,27 +287,33 @@ void nf_ct_dying_timeout(struct nf_conn *ct)
                (prandom_u32() % net->ct.sysctl_events_retry_timeout);
        add_timer(&ecache->timeout);
 }
-EXPORT_SYMBOL_GPL(nf_ct_dying_timeout);
 
-static void death_by_timeout(unsigned long ul_conntrack)
+bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
 {
-       struct nf_conn *ct = (void *)ul_conntrack;
        struct nf_conn_tstamp *tstamp;
 
        tstamp = nf_conn_tstamp_find(ct);
        if (tstamp && tstamp->stop == 0)
                tstamp->stop = ktime_to_ns(ktime_get_real());
 
-       if (!test_bit(IPS_DYING_BIT, &ct->status) &&
-           unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
+       if (!nf_ct_is_dying(ct) &&
+           unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
+           portid, report) < 0)) {
                /* destroy event was not delivered */
                nf_ct_delete_from_lists(ct);
                nf_ct_dying_timeout(ct);
-               return;
+               return false;
        }
        set_bit(IPS_DYING_BIT, &ct->status);
        nf_ct_delete_from_lists(ct);
        nf_ct_put(ct);
+       return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_delete);
+
+static void death_by_timeout(unsigned long ul_conntrack)
+{
+       nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
 }
 
 /*
@@ -643,10 +648,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
                return dropped;
 
        if (del_timer(&ct->timeout)) {
-               death_by_timeout((unsigned long)ct);
-               /* Check if we indeed killed this entry. Reliable event
-                  delivery may have inserted it into the dying list. */
-               if (test_bit(IPS_DYING_BIT, &ct->status)) {
+               if (nf_ct_delete(ct, 0, 0)) {
                        dropped = 1;
                        NF_CT_STAT_INC_ATOMIC(net, early_drop);
                }
@@ -1192,7 +1194,7 @@ EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
 #endif
 
 /* Used by ipt_REJECT and ip6t_REJECT. */
-static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
+static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
 {
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
@@ -1244,7 +1246,7 @@ found:
 
 void nf_ct_iterate_cleanup(struct net *net,
                           int (*iter)(struct nf_conn *i, void *data),
-                          void *data)
+                          void *data, u32 portid, int report)
 {
        struct nf_conn *ct;
        unsigned int bucket = 0;
@@ -1252,7 +1254,8 @@ void nf_ct_iterate_cleanup(struct net *net,
        while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
                /* Time to push up daises... */
                if (del_timer(&ct->timeout))
-                       death_by_timeout((unsigned long)ct);
+                       nf_ct_delete(ct, portid, report);
+
                /* ... else the timer will get him soon. */
 
                nf_ct_put(ct);
@@ -1260,30 +1263,6 @@ void nf_ct_iterate_cleanup(struct net *net,
 }
 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
 
-struct __nf_ct_flush_report {
-       u32 portid;
-       int report;
-};
-
-static int kill_report(struct nf_conn *i, void *data)
-{
-       struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
-       struct nf_conn_tstamp *tstamp;
-
-       tstamp = nf_conn_tstamp_find(i);
-       if (tstamp && tstamp->stop == 0)
-               tstamp->stop = ktime_to_ns(ktime_get_real());
-
-       /* If we fail to deliver the event, death_by_timeout() will retry */
-       if (nf_conntrack_event_report(IPCT_DESTROY, i,
-                                     fr->portid, fr->report) < 0)
-               return 1;
-
-       /* Avoid the delivery of the destroy event in death_by_timeout(). */
-       set_bit(IPS_DYING_BIT, &i->status);
-       return 1;
-}
-
 static int kill_all(struct nf_conn *i, void *data)
 {
        return 1;
@@ -1301,11 +1280,7 @@ EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
 
 void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
 {
-       struct __nf_ct_flush_report fr = {
-               .portid = portid,
-               .report = report,
-       };
-       nf_ct_iterate_cleanup(net, kill_report, &fr);
+       nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
 
@@ -1386,7 +1361,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
 i_see_dead_people:
        busy = 0;
        list_for_each_entry(net, net_exit_list, exit_list) {
-               nf_ct_iterate_cleanup(net, kill_all, NULL);
+               nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
                nf_ct_release_dying_list(net);
                if (atomic_read(&net->ct.count) != 0)
                        busy = 1;
@@ -1692,7 +1667,7 @@ err_stat:
        return ret;
 }
 
-s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
                        enum ip_conntrack_dir dir,
                        u32 seq);
 EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
index 355d2ef0809477a36176ff5287bd8bfc25faee8c..bb53f120e79cb3547b1f23eba8e3d26c91aa43a4 100644 (file)
@@ -8,12 +8,8 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/ctype.h>
 #include <linux/export.h>
-#include <linux/jhash.h>
-#include <linux/spinlock.h>
 #include <linux/types.h>
-#include <linux/slab.h>
 
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_labels.h>
index edc410e778f770b7d1bef94cf2de3d0caec0b184..fa61fea63234d171d6ba8e32d61cc3e491be0634 100644 (file)
@@ -1038,21 +1038,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
                }
        }
 
-       if (del_timer(&ct->timeout)) {
-               if (nf_conntrack_event_report(IPCT_DESTROY, ct,
-                                             NETLINK_CB(skb).portid,
-                                             nlmsg_report(nlh)) < 0) {
-                       nf_ct_delete_from_lists(ct);
-                       /* we failed to report the event, try later */
-                       nf_ct_dying_timeout(ct);
-                       nf_ct_put(ct);
-                       return 0;
-               }
-               /* death_by_timeout would report the event again */
-               set_bit(IPS_DYING_BIT, &ct->status);
-               nf_ct_delete_from_lists(ct);
-               nf_ct_put(ct);
-       }
+       if (del_timer(&ct->timeout))
+               nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
+
        nf_ct_put(ct);
 
        return 0;
@@ -1999,6 +1987,27 @@ out:
        return err == -EAGAIN ? -ENOBUFS : err;
 }
 
+static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
+       [CTA_EXPECT_MASTER]     = { .type = NLA_NESTED },
+       [CTA_EXPECT_TUPLE]      = { .type = NLA_NESTED },
+       [CTA_EXPECT_MASK]       = { .type = NLA_NESTED },
+       [CTA_EXPECT_TIMEOUT]    = { .type = NLA_U32 },
+       [CTA_EXPECT_ID]         = { .type = NLA_U32 },
+       [CTA_EXPECT_HELP_NAME]  = { .type = NLA_NUL_STRING,
+                                   .len = NF_CT_HELPER_NAME_LEN - 1 },
+       [CTA_EXPECT_ZONE]       = { .type = NLA_U16 },
+       [CTA_EXPECT_FLAGS]      = { .type = NLA_U32 },
+       [CTA_EXPECT_CLASS]      = { .type = NLA_U32 },
+       [CTA_EXPECT_NAT]        = { .type = NLA_NESTED },
+       [CTA_EXPECT_FN]         = { .type = NLA_NUL_STRING },
+};
+
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
+                      struct nf_conntrack_helper *helper,
+                      struct nf_conntrack_tuple *tuple,
+                      struct nf_conntrack_tuple *mask);
+
 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
 static size_t
 ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
@@ -2139,10 +2148,69 @@ ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
        return ret;
 }
 
+static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
+                                      const struct nf_conn *ct,
+                                      struct nf_conntrack_tuple *tuple,
+                                      struct nf_conntrack_tuple *mask)
+{
+       int err;
+
+       err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
+                                   nf_ct_l3num(ct));
+       if (err < 0)
+               return err;
+
+       return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
+                                    nf_ct_l3num(ct));
+}
+
+static int
+ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
+                               u32 portid, u32 report)
+{
+       struct nlattr *cda[CTA_EXPECT_MAX+1];
+       struct nf_conntrack_tuple tuple, mask;
+       struct nf_conntrack_helper *helper;
+       struct nf_conntrack_expect *exp;
+       int err;
+
+       err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
+       if (err < 0)
+               return err;
+
+       err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
+                                         ct, &tuple, &mask);
+       if (err < 0)
+               return err;
+
+       if (cda[CTA_EXPECT_HELP_NAME]) {
+               const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+               helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+                                                   nf_ct_protonum(ct));
+               if (helper == NULL)
+                       return -EOPNOTSUPP;
+       }
+
+       exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
+                                    helper, &tuple, &mask);
+       if (IS_ERR(exp))
+               return PTR_ERR(exp);
+
+       err = nf_ct_expect_related_report(exp, portid, report);
+       if (err < 0) {
+               nf_ct_expect_put(exp);
+               return err;
+       }
+
+       return 0;
+}
+
 static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
        .build_size     = ctnetlink_nfqueue_build_size,
        .build          = ctnetlink_nfqueue_build,
        .parse          = ctnetlink_nfqueue_parse,
+       .attach_expect  = ctnetlink_nfqueue_attach_expect,
 };
 #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
 
@@ -2510,21 +2578,6 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
        return err;
 }
 
-static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
-       [CTA_EXPECT_MASTER]     = { .type = NLA_NESTED },
-       [CTA_EXPECT_TUPLE]      = { .type = NLA_NESTED },
-       [CTA_EXPECT_MASK]       = { .type = NLA_NESTED },
-       [CTA_EXPECT_TIMEOUT]    = { .type = NLA_U32 },
-       [CTA_EXPECT_ID]         = { .type = NLA_U32 },
-       [CTA_EXPECT_HELP_NAME]  = { .type = NLA_NUL_STRING,
-                                   .len = NF_CT_HELPER_NAME_LEN - 1 },
-       [CTA_EXPECT_ZONE]       = { .type = NLA_U16 },
-       [CTA_EXPECT_FLAGS]      = { .type = NLA_U32 },
-       [CTA_EXPECT_CLASS]      = { .type = NLA_U32 },
-       [CTA_EXPECT_NAT]        = { .type = NLA_NESTED },
-       [CTA_EXPECT_FN]         = { .type = NLA_NUL_STRING },
-};
-
 static int
 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
                     const struct nlmsghdr *nlh,
@@ -2747,76 +2800,26 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
 #endif
 }
 
-static int
-ctnetlink_create_expect(struct net *net, u16 zone,
-                       const struct nlattr * const cda[],
-                       u_int8_t u3,
-                       u32 portid, int report)
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
+                      struct nf_conntrack_helper *helper,
+                      struct nf_conntrack_tuple *tuple,
+                      struct nf_conntrack_tuple *mask)
 {
-       struct nf_conntrack_tuple tuple, mask, master_tuple;
-       struct nf_conntrack_tuple_hash *h = NULL;
+       u_int32_t class = 0;
        struct nf_conntrack_expect *exp;
-       struct nf_conn *ct;
        struct nf_conn_help *help;
-       struct nf_conntrack_helper *helper = NULL;
-       u_int32_t class = 0;
-       int err = 0;
-
-       /* caller guarantees that those three CTA_EXPECT_* exist */
-       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
-       if (err < 0)
-               return err;
-       err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
-       if (err < 0)
-               return err;
-       err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
-       if (err < 0)
-               return err;
-
-       /* Look for master conntrack of this expectation */
-       h = nf_conntrack_find_get(net, zone, &master_tuple);
-       if (!h)
-               return -ENOENT;
-       ct = nf_ct_tuplehash_to_ctrack(h);
-
-       /* Look for helper of this expectation */
-       if (cda[CTA_EXPECT_HELP_NAME]) {
-               const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
-
-               helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
-                                                   nf_ct_protonum(ct));
-               if (helper == NULL) {
-#ifdef CONFIG_MODULES
-                       if (request_module("nfct-helper-%s", helpname) < 0) {
-                               err = -EOPNOTSUPP;
-                               goto out;
-                       }
-
-                       helper = __nf_conntrack_helper_find(helpname,
-                                                           nf_ct_l3num(ct),
-                                                           nf_ct_protonum(ct));
-                       if (helper) {
-                               err = -EAGAIN;
-                               goto out;
-                       }
-#endif
-                       err = -EOPNOTSUPP;
-                       goto out;
-               }
-       }
+       int err;
 
        if (cda[CTA_EXPECT_CLASS] && helper) {
                class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
-               if (class > helper->expect_class_max) {
-                       err = -EINVAL;
-                       goto out;
-               }
+               if (class > helper->expect_class_max)
+                       return ERR_PTR(-EINVAL);
        }
        exp = nf_ct_expect_alloc(ct);
-       if (!exp) {
-               err = -ENOMEM;
-               goto out;
-       }
+       if (!exp)
+               return ERR_PTR(-ENOMEM);
+
        help = nfct_help(ct);
        if (!help) {
                if (!cda[CTA_EXPECT_TIMEOUT]) {
@@ -2854,21 +2857,89 @@ ctnetlink_create_expect(struct net *net, u16 zone,
        exp->class = class;
        exp->master = ct;
        exp->helper = helper;
-       memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
-       memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
-       exp->mask.src.u.all = mask.src.u.all;
+       exp->tuple = *tuple;
+       exp->mask.src.u3 = mask->src.u3;
+       exp->mask.src.u.all = mask->src.u.all;
 
        if (cda[CTA_EXPECT_NAT]) {
                err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
-                                                exp, u3);
+                                                exp, nf_ct_l3num(ct));
                if (err < 0)
                        goto err_out;
        }
-       err = nf_ct_expect_related_report(exp, portid, report);
+       return exp;
 err_out:
        nf_ct_expect_put(exp);
-out:
-       nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
+       return ERR_PTR(err);
+}
+
+static int
+ctnetlink_create_expect(struct net *net, u16 zone,
+                       const struct nlattr * const cda[],
+                       u_int8_t u3, u32 portid, int report)
+{
+       struct nf_conntrack_tuple tuple, mask, master_tuple;
+       struct nf_conntrack_tuple_hash *h = NULL;
+       struct nf_conntrack_helper *helper = NULL;
+       struct nf_conntrack_expect *exp;
+       struct nf_conn *ct;
+       int err;
+
+       /* caller guarantees that those three CTA_EXPECT_* exist */
+       err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+       if (err < 0)
+               return err;
+       err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+       if (err < 0)
+               return err;
+       err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+       if (err < 0)
+               return err;
+
+       /* Look for master conntrack of this expectation */
+       h = nf_conntrack_find_get(net, zone, &master_tuple);
+       if (!h)
+               return -ENOENT;
+       ct = nf_ct_tuplehash_to_ctrack(h);
+
+       if (cda[CTA_EXPECT_HELP_NAME]) {
+               const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+               helper = __nf_conntrack_helper_find(helpname, u3,
+                                                   nf_ct_protonum(ct));
+               if (helper == NULL) {
+#ifdef CONFIG_MODULES
+                       if (request_module("nfct-helper-%s", helpname) < 0) {
+                               err = -EOPNOTSUPP;
+                               goto err_ct;
+                       }
+                       helper = __nf_conntrack_helper_find(helpname, u3,
+                                                           nf_ct_protonum(ct));
+                       if (helper) {
+                               err = -EAGAIN;
+                               goto err_ct;
+                       }
+#endif
+                       err = -EOPNOTSUPP;
+                       goto err_ct;
+               }
+       }
+
+       exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
+       if (IS_ERR(exp)) {
+               err = PTR_ERR(exp);
+               goto err_ct;
+       }
+
+       err = nf_ct_expect_related_report(exp, portid, report);
+       if (err < 0)
+               goto err_exp;
+
+       return 0;
+err_exp:
+       nf_ct_expect_put(exp);
+err_ct:
+       nf_ct_put(ct);
        return err;
 }
 
index 0ab9636ac57e03308ee1f316b0f11ca49d4e826d..ce3004156eeb923171c2ef08eae6e3447ce7906e 100644 (file)
@@ -281,7 +281,7 @@ void nf_ct_l3proto_pernet_unregister(struct net *net,
        nf_ct_l3proto_unregister_sysctl(net, proto);
 
        /* Remove all contrack entries for this protocol */
-       nf_ct_iterate_cleanup(net, kill_l3proto, proto);
+       nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
 }
 EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
 
@@ -476,7 +476,7 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
        nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
 
        /* Remove all contrack entries for this protocol */
-       nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
+       nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
 }
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
 
index 2f8010707d015dc62348ca758390c194388d7a8b..d224e001f14fbbb3df3e4e2a36beeaa35d69728d 100644 (file)
@@ -496,7 +496,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
 }
 
 #ifdef CONFIG_NF_NAT_NEEDED
-static inline s16 nat_offset(const struct nf_conn *ct,
+static inline s32 nat_offset(const struct nf_conn *ct,
                             enum ip_conntrack_dir dir,
                             u32 seq)
 {
@@ -525,7 +525,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
        struct ip_ct_tcp_state *receiver = &state->seen[!dir];
        const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
        __u32 seq, ack, sack, end, win, swin;
-       s16 receiver_offset;
+       s32 receiver_offset;
        bool res, in_recv_win;
 
        /*
index 038eee5c8f8548787bff468c40256d52bb6655fd..6ff808375b5eb41dadc2ef7de7a24dffd64a613c 100644 (file)
@@ -497,7 +497,7 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
 
        rtnl_lock();
        for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+               nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
        rtnl_unlock();
 }
 
@@ -511,7 +511,7 @@ static void nf_nat_l3proto_clean(u8 l3proto)
        rtnl_lock();
 
        for_each_net(net)
-               nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+               nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
        rtnl_unlock();
 }
 
@@ -749,7 +749,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
 {
        struct nf_nat_proto_clean clean = {};
 
-       nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
+       nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
index 85e20a9190816f348a845937b8a4fe470a917744..46b9baa845a66e5de4da09a0a36f37428e1c1786 100644 (file)
@@ -30,8 +30,6 @@
        pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
                 x->offset_before, x->offset_after, x->correction_pos);
 
-static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
-
 /* Setup TCP sequence correction given this change at this sequence */
 static inline void
 adjust_tcp_sequence(u32 seq,
@@ -49,7 +47,7 @@ adjust_tcp_sequence(u32 seq,
        pr_debug("adjust_tcp_sequence: Seq_offset before: ");
        DUMP_OFFSET(this_way);
 
-       spin_lock_bh(&nf_nat_seqofs_lock);
+       spin_lock_bh(&ct->lock);
 
        /* SYN adjust. If it's uninitialized, or this is after last
         * correction, record it: we don't handle more than one
@@ -61,31 +59,26 @@ adjust_tcp_sequence(u32 seq,
                this_way->offset_before = this_way->offset_after;
                this_way->offset_after += sizediff;
        }
-       spin_unlock_bh(&nf_nat_seqofs_lock);
+       spin_unlock_bh(&ct->lock);
 
        pr_debug("adjust_tcp_sequence: Seq_offset after: ");
        DUMP_OFFSET(this_way);
 }
 
-/* Get the offset value, for conntrack */
-s16 nf_nat_get_offset(const struct nf_conn *ct,
+/* Get the offset value, for conntrack. Caller must have the conntrack locked */
+s32 nf_nat_get_offset(const struct nf_conn *ct,
                      enum ip_conntrack_dir dir,
                      u32 seq)
 {
        struct nf_conn_nat *nat = nfct_nat(ct);
        struct nf_nat_seq *this_way;
-       s16 offset;
 
        if (!nat)
                return 0;
 
        this_way = &nat->seq[dir];
-       spin_lock_bh(&nf_nat_seqofs_lock);
-       offset = after(seq, this_way->correction_pos)
+       return after(seq, this_way->correction_pos)
                 ? this_way->offset_after : this_way->offset_before;
-       spin_unlock_bh(&nf_nat_seqofs_lock);
-
-       return offset;
 }
 
 /* Frobs data inside this packet, which is linear. */
@@ -143,7 +136,7 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
 }
 
 void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                          __be32 seq, s16 off)
+                          __be32 seq, s32 off)
 {
        if (!off)
                return;
@@ -370,9 +363,10 @@ nf_nat_seq_adjust(struct sk_buff *skb,
        struct tcphdr *tcph;
        int dir;
        __be32 newseq, newack;
-       s16 seqoff, ackoff;
+       s32 seqoff, ackoff;
        struct nf_conn_nat *nat = nfct_nat(ct);
        struct nf_nat_seq *this_way, *other_way;
+       int res;
 
        dir = CTINFO2DIR(ctinfo);
 
@@ -383,6 +377,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
                return 0;
 
        tcph = (void *)skb->data + protoff;
+       spin_lock_bh(&ct->lock);
        if (after(ntohl(tcph->seq), this_way->correction_pos))
                seqoff = this_way->offset_after;
        else
@@ -407,7 +402,10 @@ nf_nat_seq_adjust(struct sk_buff *skb,
        tcph->seq = newseq;
        tcph->ack_seq = newack;
 
-       return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+       res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+       spin_unlock_bh(&ct->lock);
+
+       return res;
 }
 
 /* Setup NAT on this expected conntrack so it follows master. */
index 396e55d46f90c77ea25a88cc1e94decc76f81050..754536f2c67488acd4345cdcec3740916ab24fea 100644 (file)
@@ -34,9 +34,7 @@ sctp_manip_pkt(struct sk_buff *skb,
               const struct nf_conntrack_tuple *tuple,
               enum nf_nat_manip_type maniptype)
 {
-       struct sk_buff *frag;
        sctp_sctphdr_t *hdr;
-       __u32 crc32;
 
        if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
                return false;
@@ -51,11 +49,7 @@ sctp_manip_pkt(struct sk_buff *skb,
                hdr->dest = tuple->dst.u.sctp.port;
        }
 
-       crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
-       skb_walk_frags(skb, frag)
-               crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
-                                         crc32);
-       hdr->checksum = sctp_end_cksum(crc32);
+       hdr->checksum = sctp_compute_cksum(skb, hdroff);
 
        return true;
 }
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
deleted file mode 100644 (file)
index 474d621..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Transparent proxy support for Linux/iptables
- *
- * Copyright (c) 2006-2007 BalaBit IT Ltd.
- * Author: Balazs Scheidler, Krisztian Kovacs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/net.h>
-#include <linux/if.h>
-#include <linux/netdevice.h>
-#include <net/udp.h>
-#include <net/netfilter/nf_tproxy_core.h>
-
-
-static void
-nf_tproxy_destructor(struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-
-       skb->sk = NULL;
-       skb->destructor = NULL;
-
-       if (sk)
-               sock_put(sk);
-}
-
-/* consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
-{
-       /* assigning tw sockets complicates things; most
-        * skb->sk->X checks would have to test sk->sk_state first */
-       if (sk->sk_state == TCP_TIME_WAIT) {
-               inet_twsk_put(inet_twsk(sk));
-               return;
-       }
-
-       skb_orphan(skb);
-       skb->sk = sk;
-       skb->destructor = nf_tproxy_destructor;
-}
-EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
-
-static int __init nf_tproxy_init(void)
-{
-       pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n");
-       pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n");
-       return 0;
-}
-
-module_init(nf_tproxy_init);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Krisztian Kovacs");
-MODULE_DESCRIPTION("Transparent proxy support core routines");
index 8a703c3dd318b660c5e02b326495dbbc7b9f5b3e..95a98c8c1da65be10ea5499aba6291cdf8319fed 100644 (file)
@@ -862,6 +862,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
        [NFQA_MARK]             = { .type = NLA_U32 },
        [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
        [NFQA_CT]               = { .type = NLA_UNSPEC },
+       [NFQA_EXP]              = { .type = NLA_UNSPEC },
 };
 
 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -990,9 +991,14 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
        if (entry == NULL)
                return -ENOENT;
 
-       rcu_read_lock();
-       if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
+       if (nfqa[NFQA_CT]) {
                ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
+               if (ct && nfqa[NFQA_EXP]) {
+                       nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
+                                           NETLINK_CB(skb).portid,
+                                           nlmsg_report(nlh));
+               }
+       }
 
        if (nfqa[NFQA_PAYLOAD]) {
                u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
@@ -1005,7 +1011,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
                if (ct)
                        nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
        }
-       rcu_read_unlock();
 
        if (nfqa[NFQA_MARK])
                entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
index ab61d66bc0b9e53fb5c0b657d0d954f8ea16cf3b..be893039966d5b4be828b8b0418e86cb0282c383 100644 (file)
@@ -96,3 +96,18 @@ void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
        if ((ct->status & IPS_NAT_MASK) && diff)
                nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
 }
+
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+                       u32 portid, u32 report)
+{
+       struct nfq_ct_hook *nfq_ct;
+
+       if (nf_ct_is_untracked(ct))
+               return 0;
+
+       nfq_ct = rcu_dereference(nfq_ct_hook);
+       if (nfq_ct == NULL)
+               return -EOPNOTSUPP;
+
+       return nfq_ct->attach_expect(attr, ct, portid, report);
+}
index d7f195388f66c5a76fcd62bd66ee6ac9567dd334..5d8a3a3cd5a7cd04b714d92a1366c04ad464e716 100644 (file)
@@ -15,7 +15,9 @@
 #include <linux/ip.h>
 #include <net/checksum.h>
 #include <net/udp.h>
+#include <net/tcp.h>
 #include <net/inet_sock.h>
+#include <net/inet_hashtables.h>
 #include <linux/inetdevice.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 #define XT_TPROXY_HAVE_IPV6 1
 #include <net/if_inet6.h>
 #include <net/addrconf.h>
+#include <net/inet6_hashtables.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 #endif
 
-#include <net/netfilter/nf_tproxy_core.h>
 #include <linux/netfilter/xt_TPROXY.h>
 
+enum nf_tproxy_lookup_t {
+        NFT_LOOKUP_LISTENER,
+        NFT_LOOKUP_ESTABLISHED,
+};
+
 static bool tproxy_sk_is_transparent(struct sock *sk)
 {
        if (sk->sk_state != TCP_TIME_WAIT) {
@@ -68,6 +75,157 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
        return laddr ? laddr : daddr;
 }
 
+/*
+ * This is used when the user wants to intercept a connection matching
+ * an explicit iptables rule. In this case the sockets are assumed
+ * matching in preference order:
+ *
+ *   - match: if there's a fully established connection matching the
+ *     _packet_ tuple, it is returned, assuming the redirection
+ *     already took place and we process a packet belonging to an
+ *     established connection
+ *
+ *   - match: if there's a listening socket matching the redirection
+ *     (e.g. on-port & on-ip of the connection), it is returned,
+ *     regardless if it was bound to 0.0.0.0 or an explicit
+ *     address. The reasoning is that if there's an explicit rule, it
+ *     does not really matter if the listener is bound to an interface
+ *     or to 0. The user already stated that he wants redirection
+ *     (since he added the rule).
+ *
+ * Please note that there's an overlap between what a TPROXY target
+ * and a socket match will match. Normally if you have both rules the
+ * "socket" match will be the first one, effectively all packets
+ * belonging to established connections going through that one.
+ */
+static inline struct sock *
+nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
+                     const __be32 saddr, const __be32 daddr,
+                     const __be16 sport, const __be16 dport,
+                     const struct net_device *in,
+                     const enum nf_tproxy_lookup_t lookup_type)
+{
+       struct sock *sk;
+
+       switch (protocol) {
+       case IPPROTO_TCP:
+               switch (lookup_type) {
+               case NFT_LOOKUP_LISTENER:
+                       sk = inet_lookup_listener(net, &tcp_hashinfo,
+                                                   saddr, sport,
+                                                   daddr, dport,
+                                                   in->ifindex);
+
+                       /* NOTE: we return listeners even if bound to
+                        * 0.0.0.0, those are filtered out in
+                        * xt_socket, since xt_TPROXY needs 0 bound
+                        * listeners too
+                        */
+                       break;
+               case NFT_LOOKUP_ESTABLISHED:
+                       sk = inet_lookup_established(net, &tcp_hashinfo,
+                                                   saddr, sport, daddr, dport,
+                                                   in->ifindex);
+                       break;
+               default:
+                       BUG();
+               }
+               break;
+       case IPPROTO_UDP:
+               sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
+                                    in->ifindex);
+               if (sk) {
+                       int connected = (sk->sk_state == TCP_ESTABLISHED);
+                       int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
+
+                       /* NOTE: we return listeners even if bound to
+                        * 0.0.0.0, those are filtered out in
+                        * xt_socket, since xt_TPROXY needs 0 bound
+                        * listeners too
+                        */
+                       if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+                           (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+                               sock_put(sk);
+                               sk = NULL;
+                       }
+               }
+               break;
+       default:
+               WARN_ON(1);
+               sk = NULL;
+       }
+
+       pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
+                protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
+
+       return sk;
+}
+
+#ifdef XT_TPROXY_HAVE_IPV6
+static inline struct sock *
+nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
+                     const struct in6_addr *saddr, const struct in6_addr *daddr,
+                     const __be16 sport, const __be16 dport,
+                     const struct net_device *in,
+                     const enum nf_tproxy_lookup_t lookup_type)
+{
+       struct sock *sk;
+
+       switch (protocol) {
+       case IPPROTO_TCP:
+               switch (lookup_type) {
+               case NFT_LOOKUP_LISTENER:
+                       sk = inet6_lookup_listener(net, &tcp_hashinfo,
+                                                  saddr, sport,
+                                                  daddr, ntohs(dport),
+                                                  in->ifindex);
+
+                       /* NOTE: we return listeners even if bound to
+                        * 0.0.0.0, those are filtered out in
+                        * xt_socket, since xt_TPROXY needs 0 bound
+                        * listeners too
+                        */
+                       break;
+               case NFT_LOOKUP_ESTABLISHED:
+                       sk = __inet6_lookup_established(net, &tcp_hashinfo,
+                                                       saddr, sport, daddr, ntohs(dport),
+                                                       in->ifindex);
+                       break;
+               default:
+                       BUG();
+               }
+               break;
+       case IPPROTO_UDP:
+               sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
+                                    in->ifindex);
+               if (sk) {
+                       int connected = (sk->sk_state == TCP_ESTABLISHED);
+                       int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+
+                       /* NOTE: we return listeners even if bound to
+                        * 0.0.0.0, those are filtered out in
+                        * xt_socket, since xt_TPROXY needs 0 bound
+                        * listeners too
+                        */
+                       if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+                           (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+                               sock_put(sk);
+                               sk = NULL;
+                       }
+               }
+               break;
+       default:
+               WARN_ON(1);
+               sk = NULL;
+       }
+
+       pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
+                protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
+
+       return sk;
+}
+#endif
+
 /**
  * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
  * @skb:       The skb being processed.
@@ -117,6 +275,15 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
        return sk;
 }
 
+/* assign a socket to the skb -- consumes sk */
+static void
+nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+       skb_orphan(skb);
+       skb->sk = sk;
+       skb->destructor = sock_edemux;
+}
+
 static unsigned int
 tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
           u_int32_t mark_mask, u_int32_t mark_value)
index 68ff29f608679f598f718e3018bdd8e516963c2a..fab6eea1bf382704b07449d88deaece0aa9d7d7e 100644 (file)
@@ -202,7 +202,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
                        return -EINVAL;
                }
                if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
-                       pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n");
+                       pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
                        return -EINVAL;
                }
                if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
index 20b15916f40363ff789d2d7312dd5fe4dedf69ab..06df2b9110f5f2b9376c19e34241c257b31e5f63 100644 (file)
 #include <net/icmp.h>
 #include <net/sock.h>
 #include <net/inet_sock.h>
-#include <net/netfilter/nf_tproxy_core.h>
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
 
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
 #define XT_SOCKET_HAVE_IPV6 1
 #include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/inet6_hashtables.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 #endif
 
@@ -101,6 +101,43 @@ extract_icmp4_fields(const struct sk_buff *skb,
        return 0;
 }
 
+/* "socket" match based redirection (no specific rule)
+ * ===================================================
+ *
+ * There are connections with dynamic endpoints (e.g. FTP data
+ * connection) that the user is unable to add explicit rules
+ * for. These are taken care of by a generic "socket" rule. It is
+ * assumed that the proxy application is trusted to open such
+ * connections without explicit iptables rule (except of course the
+ * generic 'socket' rule). In this case the following sockets are
+ * matched in preference order:
+ *
+ *   - match: if there's a fully established connection matching the
+ *     _packet_ tuple
+ *
+ *   - match: if there's a non-zero bound listener (possibly with a
+ *     non-local address) We don't accept zero-bound listeners, since
+ *     then local services could intercept traffic going through the
+ *     box.
+ */
+static struct sock *
+xt_socket_get_sock_v4(struct net *net, const u8 protocol,
+                     const __be32 saddr, const __be32 daddr,
+                     const __be16 sport, const __be16 dport,
+                     const struct net_device *in)
+{
+       switch (protocol) {
+       case IPPROTO_TCP:
+               return __inet_lookup(net, &tcp_hashinfo,
+                                    saddr, sport, daddr, dport,
+                                    in->ifindex);
+       case IPPROTO_UDP:
+               return udp4_lib_lookup(net, saddr, sport, daddr, dport,
+                                      in->ifindex);
+       }
+       return NULL;
+}
+
 static bool
 socket_match(const struct sk_buff *skb, struct xt_action_param *par,
             const struct xt_socket_mtinfo1 *info)
@@ -156,9 +193,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
 #endif
 
        if (!sk)
-               sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
+               sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
                                           saddr, daddr, sport, dport,
-                                          par->in, NFT_LOOKUP_ANY);
+                                          par->in);
        if (sk) {
                bool wildcard;
                bool transparent = true;
@@ -265,6 +302,25 @@ extract_icmp6_fields(const struct sk_buff *skb,
        return 0;
 }
 
+static struct sock *
+xt_socket_get_sock_v6(struct net *net, const u8 protocol,
+                     const struct in6_addr *saddr, const struct in6_addr *daddr,
+                     const __be16 sport, const __be16 dport,
+                     const struct net_device *in)
+{
+       switch (protocol) {
+       case IPPROTO_TCP:
+               return inet6_lookup(net, &tcp_hashinfo,
+                                   saddr, sport, daddr, dport,
+                                   in->ifindex);
+       case IPPROTO_UDP:
+               return udp6_lib_lookup(net, saddr, sport, daddr, dport,
+                                      in->ifindex);
+       }
+
+       return NULL;
+}
+
 static bool
 socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -302,9 +358,9 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
        }
 
        if (!sk)
-               sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+               sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
                                           saddr, daddr, sport, dport,
-                                          par->in, NFT_LOOKUP_ANY);
+                                          par->in);
        if (sk) {
                bool wildcard;
                bool transparent = true;
index 0c61b59175dca5d9276c0fa65489d22cec39b435..a17dda1bbee0704935c92c7d265268fb73c724a1 100644 (file)
@@ -294,14 +294,14 @@ static void **alloc_pg_vec(struct netlink_sock *nlk,
 {
        unsigned int block_nr = req->nm_block_nr;
        unsigned int i;
-       void **pg_vec, *ptr;
+       void **pg_vec;
 
        pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
        if (pg_vec == NULL)
                return NULL;
 
        for (i = 0; i < block_nr; i++) {
-               pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
+               pg_vec[i] = alloc_one_pg_vec_page(order);
                if (pg_vec[i] == NULL)
                        goto err1;
        }
@@ -595,7 +595,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
                 * for dumps is performed here. A dump is allowed to continue
                 * if at least half the ring is unused.
                 */
-               while (nlk->cb != NULL && netlink_dump_space(nlk)) {
+               while (nlk->cb_running && netlink_dump_space(nlk)) {
                        err = netlink_dump(sk);
                        if (err < 0) {
                                sk->sk_err = err;
@@ -802,18 +802,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb)    0
 #endif /* CONFIG_NETLINK_MMAP */
 
-static void netlink_destroy_callback(struct netlink_callback *cb)
-{
-       kfree_skb(cb->skb);
-       kfree(cb);
-}
-
-static void netlink_consume_callback(struct netlink_callback *cb)
-{
-       consume_skb(cb->skb);
-       kfree(cb);
-}
-
 static void netlink_skb_destructor(struct sk_buff *skb)
 {
 #ifdef CONFIG_NETLINK_MMAP
@@ -872,12 +860,12 @@ static void netlink_sock_destruct(struct sock *sk)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
 
-       if (nlk->cb) {
-               if (nlk->cb->done)
-                       nlk->cb->done(nlk->cb);
+       if (nlk->cb_running) {
+               if (nlk->cb.done)
+                       nlk->cb.done(&nlk->cb);
 
-               module_put(nlk->cb->module);
-               netlink_destroy_callback(nlk->cb);
+               module_put(nlk->cb.module);
+               kfree_skb(nlk->cb.skb);
        }
 
        skb_queue_purge(&sk->sk_receive_queue);
@@ -2350,7 +2338,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
 
        skb_free_datagram(sk, skb);
 
-       if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+       if (nlk->cb_running &&
+           atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
                ret = netlink_dump(sk);
                if (ret) {
                        sk->sk_err = ret;
@@ -2566,13 +2555,12 @@ static int netlink_dump(struct sock *sk)
        int alloc_size;
 
        mutex_lock(nlk->cb_mutex);
-
-       cb = nlk->cb;
-       if (cb == NULL) {
+       if (!nlk->cb_running) {
                err = -EINVAL;
                goto errout_skb;
        }
 
+       cb = &nlk->cb;
        alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
 
        if (!netlink_rx_is_mmaped(sk) &&
@@ -2610,11 +2598,11 @@ static int netlink_dump(struct sock *sk)
 
        if (cb->done)
                cb->done(cb);
-       nlk->cb = NULL;
-       mutex_unlock(nlk->cb_mutex);
 
+       nlk->cb_running = false;
+       mutex_unlock(nlk->cb_mutex);
        module_put(cb->module);
-       netlink_consume_callback(cb);
+       consume_skb(cb->skb);
        return 0;
 
 errout_skb:
@@ -2632,59 +2620,51 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        struct netlink_sock *nlk;
        int ret;
 
-       cb = kzalloc(sizeof(*cb), GFP_KERNEL);
-       if (cb == NULL)
-               return -ENOBUFS;
-
        /* Memory mapped dump requests need to be copied to avoid looping
         * on the pending state in netlink_mmap_sendmsg() while the CB hold
         * a reference to the skb.
         */
        if (netlink_skb_is_mmaped(skb)) {
                skb = skb_copy(skb, GFP_KERNEL);
-               if (skb == NULL) {
-                       kfree(cb);
+               if (skb == NULL)
                        return -ENOBUFS;
-               }
        } else
                atomic_inc(&skb->users);
 
-       cb->dump = control->dump;
-       cb->done = control->done;
-       cb->nlh = nlh;
-       cb->data = control->data;
-       cb->module = control->module;
-       cb->min_dump_alloc = control->min_dump_alloc;
-       cb->skb = skb;
-
        sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
        if (sk == NULL) {
-               netlink_destroy_callback(cb);
-               return -ECONNREFUSED;
+               ret = -ECONNREFUSED;
+               goto error_free;
        }
-       nlk = nlk_sk(sk);
 
+       nlk = nlk_sk(sk);
        mutex_lock(nlk->cb_mutex);
        /* A dump is in progress... */
-       if (nlk->cb) {
-               mutex_unlock(nlk->cb_mutex);
-               netlink_destroy_callback(cb);
+       if (nlk->cb_running) {
                ret = -EBUSY;
-               goto out;
+               goto error_unlock;
        }
        /* add reference of module which cb->dump belongs to */
-       if (!try_module_get(cb->module)) {
-               mutex_unlock(nlk->cb_mutex);
-               netlink_destroy_callback(cb);
+       if (!try_module_get(control->module)) {
                ret = -EPROTONOSUPPORT;
-               goto out;
+               goto error_unlock;
        }
 
-       nlk->cb = cb;
+       cb = &nlk->cb;
+       memset(cb, 0, sizeof(*cb));
+       cb->dump = control->dump;
+       cb->done = control->done;
+       cb->nlh = nlh;
+       cb->data = control->data;
+       cb->module = control->module;
+       cb->min_dump_alloc = control->min_dump_alloc;
+       cb->skb = skb;
+
+       nlk->cb_running = true;
+
        mutex_unlock(nlk->cb_mutex);
 
        ret = netlink_dump(sk);
-out:
        sock_put(sk);
 
        if (ret)
@@ -2694,6 +2674,13 @@ out:
         * signal not to send ACK even if it was requested.
         */
        return -EINTR;
+
+error_unlock:
+       sock_put(sk);
+       mutex_unlock(nlk->cb_mutex);
+error_free:
+       kfree_skb(skb);
+       return ret;
 }
 EXPORT_SYMBOL(__netlink_dump_start);
 
@@ -2916,14 +2903,14 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
                struct sock *s = v;
                struct netlink_sock *nlk = nlk_sk(s);
 
-               seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
+               seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
                           s,
                           s->sk_protocol,
                           nlk->portid,
                           nlk->groups ? (u32)nlk->groups[0] : 0,
                           sk_rmem_alloc_get(s),
                           sk_wmem_alloc_get(s),
-                          nlk->cb,
+                          nlk->cb_running,
                           atomic_read(&s->sk_refcnt),
                           atomic_read(&s->sk_drops),
                           sock_i_ino(s)
index eaa88d187cdcebc65152296213c3a82aaa6ab5ad..acbd774eeb7c5afd568d8eb9aa375ecc81949793 100644 (file)
@@ -32,7 +32,8 @@ struct netlink_sock {
        unsigned long           *groups;
        unsigned long           state;
        wait_queue_head_t       wait;
-       struct netlink_callback *cb;
+       bool                    cb_running;
+       struct netlink_callback cb;
        struct mutex            *cb_mutex;
        struct mutex            cb_def_mutex;
        void                    (*netlink_rcv)(struct sk_buff *skb);
index 1d074dd1650ff2fd326dd141eedb828d812a12e6..e92923cf3e0374f950417e5039ba2e61369c8e27 100644 (file)
@@ -77,11 +77,19 @@ error:
        return rc;
 }
 
-int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
+/**
+ * nfc_fw_download_done - inform that a firmware download was completed
+ *
+ * @dev: The nfc device to which firmware was downloaded
+ * @firmware_name: The firmware filename
+ * @result: The positive value of a standard errno value
+ */
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+                        u32 result)
 {
        dev->fw_download_in_progress = false;
 
-       return nfc_genl_fw_download_done(dev, firmware_name);
+       return nfc_genl_fw_download_done(dev, firmware_name, result);
 }
 EXPORT_SYMBOL(nfc_fw_download_done);
 
@@ -129,7 +137,7 @@ int nfc_dev_up(struct nfc_dev *dev)
        /* We have to enable the device before discovering SEs */
        if (dev->ops->discover_se) {
                rc = dev->ops->discover_se(dev);
-               if (!rc)
+               if (rc)
                        pr_warn("SE discovery failed\n");
        }
 
@@ -575,12 +583,14 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
                goto error;
        }
 
-       if (se->type == NFC_SE_ENABLED) {
+       if (se->state == NFC_SE_ENABLED) {
                rc = -EALREADY;
                goto error;
        }
 
        rc = dev->ops->enable_se(dev, se_idx);
+       if (rc >= 0)
+               se->state = NFC_SE_ENABLED;
 
 error:
        device_unlock(&dev->dev);
@@ -618,12 +628,14 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
                goto error;
        }
 
-       if (se->type == NFC_SE_DISABLED) {
+       if (se->state == NFC_SE_DISABLED) {
                rc = -EALREADY;
                goto error;
        }
 
        rc = dev->ops->disable_se(dev, se_idx);
+       if (rc >= 0)
+               se->state = NFC_SE_DISABLED;
 
 error:
        device_unlock(&dev->dev);
index fe66908401f55ed5f608a75b559e5fad1ff15548..d07ca4c5cf8c988c6240d0f6d9c666fe2ef76c4a 100644 (file)
@@ -717,7 +717,7 @@ static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
 
        if (hdev->ops->disable_se)
-               return hdev->ops->enable_se(hdev, se_idx);
+               return hdev->ops->disable_se(hdev, se_idx);
 
        return 0;
 }
index f16fd59d41607d80aeeae916ae3d43153a0a5b4e..68063b2025da2750519dac5a652cdbbea36dac74 100644 (file)
@@ -1114,7 +1114,8 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
        return rc;
 }
 
-int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
+int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+                             u32 result)
 {
        struct sk_buff *msg;
        void *hdr;
@@ -1129,6 +1130,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name)
                goto free_msg;
 
        if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) ||
+           nla_put_u32(msg, NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, result) ||
            nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
                goto nla_put_failure;
 
@@ -1191,6 +1193,91 @@ static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info)
        return rc;
 }
 
+static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev,
+                               u32 portid, u32 seq,
+                               struct netlink_callback *cb,
+                               int flags)
+{
+       void *hdr;
+       struct nfc_se *se, *n;
+
+       list_for_each_entry_safe(se, n, &dev->secure_elements, list) {
+               hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags,
+                                 NFC_CMD_GET_SE);
+               if (!hdr)
+                       goto nla_put_failure;
+
+               if (cb)
+                       genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
+
+               if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+                   nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) ||
+                   nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type))
+                       goto nla_put_failure;
+
+               if (genlmsg_end(msg, hdr) < 0)
+                       goto nla_put_failure;
+       }
+
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       return -EMSGSIZE;
+}
+
+static int nfc_genl_dump_ses(struct sk_buff *skb,
+                                struct netlink_callback *cb)
+{
+       struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
+       struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
+       bool first_call = false;
+
+       if (!iter) {
+               first_call = true;
+               iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
+               if (!iter)
+                       return -ENOMEM;
+               cb->args[0] = (long) iter;
+       }
+
+       mutex_lock(&nfc_devlist_mutex);
+
+       cb->seq = nfc_devlist_generation;
+
+       if (first_call) {
+               nfc_device_iter_init(iter);
+               dev = nfc_device_iter_next(iter);
+       }
+
+       while (dev) {
+               int rc;
+
+               rc = nfc_genl_send_se(skb, dev, NETLINK_CB(cb->skb).portid,
+                                         cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
+               if (rc < 0)
+                       break;
+
+               dev = nfc_device_iter_next(iter);
+       }
+
+       mutex_unlock(&nfc_devlist_mutex);
+
+       cb->args[1] = (long) dev;
+
+       return skb->len;
+}
+
+static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
+{
+       struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
+
+       nfc_device_iter_exit(iter);
+       kfree(iter);
+
+       return 0;
+}
+
 static struct genl_ops nfc_genl_ops[] = {
        {
                .cmd = NFC_CMD_GET_DEVICE,
@@ -1265,6 +1352,12 @@ static struct genl_ops nfc_genl_ops[] = {
                .doit = nfc_genl_disable_se,
                .policy = nfc_genl_policy,
        },
+       {
+               .cmd = NFC_CMD_GET_SE,
+               .dumpit = nfc_genl_dump_ses,
+               .done = nfc_genl_dump_ses_done,
+               .policy = nfc_genl_policy,
+       },
 };
 
 
index 820a7850c36ac7e49012869a88472a96358e2220..aaf606fc1faa5d2d9a46a691fed2fa3840ea094a 100644 (file)
@@ -124,9 +124,8 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
 }
 
 int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name);
-int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
-
-int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name);
+int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+                             u32 result);
 
 int nfc_dev_up(struct nfc_dev *dev);
 
index 27ee56b688a39eea9650f1dd0b434a3c42aa515b..bed30e69baa76590cc6afbd9bf9fdb8cabe4f2e1 100644 (file)
@@ -40,3 +40,16 @@ config OPENVSWITCH_GRE
          Say N to exclude this support and reduce the binary size.
 
          If unsure, say Y.
+
+config OPENVSWITCH_VXLAN
+       bool "Open vSwitch VXLAN tunneling support"
+       depends on INET
+       depends on OPENVSWITCH
+       depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m)
+       default y
+       ---help---
+         If you say Y here, then the Open vSwitch will be able create vxlan vport.
+
+         Say N to exclude this support and reduce the binary size.
+
+         If unsure, say Y.
index 01bddb2991e3578e74ee7894bc6eccb175905002..82e4ee54a44b4ad5039018a2862b039afaa7f4b1 100644 (file)
@@ -13,3 +13,7 @@ openvswitch-y := \
        vport-gre.o \
        vport-internal_dev.o \
        vport-netdev.o
+
+ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
+openvswitch-y += vport-vxlan.o
+endif
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
new file mode 100644 (file)
index 0000000..36848bd
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2013 Nicira, Inc.
+ * Copyright (c) 2013 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/net.h>
+#include <linux/rculist.h>
+#include <linux/udp.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/vxlan.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+/**
+ * struct vxlan_port - Keeps track of open UDP ports
+ * @vs: vxlan_sock created for the port.
+ * @name: vport name.
+ */
+struct vxlan_port {
+       struct vxlan_sock *vs;
+       char name[IFNAMSIZ];
+};
+
+static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
+{
+       return vport_priv(vport);
+}
+
+/* Called with rcu_read_lock and BH disabled. */
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
+{
+       struct ovs_key_ipv4_tunnel tun_key;
+       struct vport *vport = vs->data;
+       struct iphdr *iph;
+       __be64 key;
+
+       /* Save outer tunnel values */
+       iph = ip_hdr(skb);
+       key = cpu_to_be64(ntohl(vx_vni) >> 8);
+       ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
+
+       ovs_vport_receive(vport, skb, &tun_key);
+}
+
+static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+       struct vxlan_port *vxlan_port = vxlan_vport(vport);
+       __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+
+       if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
+               return -EMSGSIZE;
+       return 0;
+}
+
+static void vxlan_tnl_destroy(struct vport *vport)
+{
+       struct vxlan_port *vxlan_port = vxlan_vport(vport);
+
+       vxlan_sock_release(vxlan_port->vs);
+
+       ovs_vport_deferred_free(vport);
+}
+
+static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+{
+       struct net *net = ovs_dp_get_net(parms->dp);
+       struct nlattr *options = parms->options;
+       struct vxlan_port *vxlan_port;
+       struct vxlan_sock *vs;
+       struct vport *vport;
+       struct nlattr *a;
+       u16 dst_port;
+       int err;
+
+       if (!options) {
+               err = -EINVAL;
+               goto error;
+       }
+       a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
+       if (a && nla_len(a) == sizeof(u16)) {
+               dst_port = nla_get_u16(a);
+       } else {
+               /* Require destination port from userspace. */
+               err = -EINVAL;
+               goto error;
+       }
+
+       vport = ovs_vport_alloc(sizeof(struct vxlan_port),
+                               &ovs_vxlan_vport_ops, parms);
+       if (IS_ERR(vport))
+               return vport;
+
+       vxlan_port = vxlan_vport(vport);
+       strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
+
+       vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true);
+       if (IS_ERR(vs)) {
+               ovs_vport_free(vport);
+               return (void *)vs;
+       }
+       vxlan_port->vs = vs;
+
+       return vport;
+
+error:
+       return ERR_PTR(err);
+}
+
+static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+       struct net *net = ovs_dp_get_net(vport->dp);
+       struct vxlan_port *vxlan_port = vxlan_vport(vport);
+       __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+       struct rtable *rt;
+       struct flowi4 fl;
+       __be16 src_port;
+       int port_min;
+       int port_max;
+       __be16 df;
+       int err;
+
+       if (unlikely(!OVS_CB(skb)->tun_key)) {
+               err = -EINVAL;
+               goto error;
+       }
+
+       /* Route lookup */
+       memset(&fl, 0, sizeof(fl));
+       fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
+       fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
+       fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
+       fl.flowi4_mark = skb->mark;
+       fl.flowi4_proto = IPPROTO_UDP;
+
+       rt = ip_route_output_key(net, &fl);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
+               goto error;
+       }
+
+       df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
+               htons(IP_DF) : 0;
+
+       skb->local_df = 1;
+
+       inet_get_local_port_range(&port_min, &port_max);
+       src_port = vxlan_src_port(port_min, port_max, skb);
+
+       err = vxlan_xmit_skb(net, vxlan_port->vs, rt, skb,
+                            fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst,
+                            OVS_CB(skb)->tun_key->ipv4_tos,
+                            OVS_CB(skb)->tun_key->ipv4_ttl, df,
+                            src_port, dst_port,
+                            htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
+       if (err < 0)
+               ip_rt_put(rt);
+error:
+       return err;
+}
+
+static const char *vxlan_get_name(const struct vport *vport)
+{
+       struct vxlan_port *vxlan_port = vxlan_vport(vport);
+       return vxlan_port->name;
+}
+
+const struct vport_ops ovs_vxlan_vport_ops = {
+       .type           = OVS_VPORT_TYPE_VXLAN,
+       .create         = vxlan_tnl_create,
+       .destroy        = vxlan_tnl_destroy,
+       .get_name       = vxlan_get_name,
+       .get_options    = vxlan_get_options,
+       .send           = vxlan_tnl_send,
+};
index d4c7fa04ce08c2a148c01488d518776c4fcbb066..d69e0c06dfde61d75c1700b53f3ad06293aec667 100644 (file)
@@ -42,6 +42,9 @@ static const struct vport_ops *vport_ops_list[] = {
 #ifdef CONFIG_OPENVSWITCH_GRE
        &ovs_gre_vport_ops,
 #endif
+#ifdef CONFIG_OPENVSWITCH_VXLAN
+       &ovs_vxlan_vport_ops,
+#endif
 };
 
 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
index 376045c42f8b43ba156acb2fd723398b108208b6..1a9fbcec6e1bf9c1d9bc95a3ec9d711c02865a82 100644 (file)
@@ -199,6 +199,7 @@ void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
 extern const struct vport_ops ovs_netdev_vport_ops;
 extern const struct vport_ops ovs_internal_vport_ops;
 extern const struct vport_ops ovs_gre_vport_ops;
+extern const struct vport_ops ovs_vxlan_vport_ops;
 
 static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
                                      const void *start, unsigned int len)
index 4b66c752eae5d99b2bfe5fa7832a1301d20519ef..1fdf9ab91c3fad327c4cd14bd67ed328fde93be7 100644 (file)
@@ -2181,7 +2181,7 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
                linear = len;
 
        skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
-                                  err);
+                                  err, 0);
        if (!skb)
                return NULL;
 
@@ -2638,51 +2638,6 @@ out:
        return err;
 }
 
-static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
-{
-       struct sock_exterr_skb *serr;
-       struct sk_buff *skb, *skb2;
-       int copied, err;
-
-       err = -EAGAIN;
-       skb = skb_dequeue(&sk->sk_error_queue);
-       if (skb == NULL)
-               goto out;
-
-       copied = skb->len;
-       if (copied > len) {
-               msg->msg_flags |= MSG_TRUNC;
-               copied = len;
-       }
-       err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
-       if (err)
-               goto out_free_skb;
-
-       sock_recv_timestamp(msg, sk, skb);
-
-       serr = SKB_EXT_ERR(skb);
-       put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
-                sizeof(serr->ee), &serr->ee);
-
-       msg->msg_flags |= MSG_ERRQUEUE;
-       err = copied;
-
-       /* Reset and regenerate socket error */
-       spin_lock_bh(&sk->sk_error_queue.lock);
-       sk->sk_err = 0;
-       if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
-               sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
-               spin_unlock_bh(&sk->sk_error_queue.lock);
-               sk->sk_error_report(sk);
-       } else
-               spin_unlock_bh(&sk->sk_error_queue.lock);
-
-out_free_skb:
-       kfree_skb(skb);
-out:
-       return err;
-}
-
 /*
  *     Pull a packet from our receive queue and hand it to the user.
  *     If necessary we block.
@@ -2708,7 +2663,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
 #endif
 
        if (flags & MSG_ERRQUEUE) {
-               err = packet_recv_error(sk, msg, len);
+               err = sock_recv_errqueue(sk, msg, len,
+                                        SOL_PACKET, PACKET_TX_TIMESTAMP);
                goto out;
        }
 
@@ -3259,9 +3215,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 
                if (po->tp_version == TPACKET_V3) {
                        lv = sizeof(struct tpacket_stats_v3);
+                       st.stats3.tp_packets += st.stats3.tp_drops;
                        data = &st.stats3;
                } else {
                        lv = sizeof(struct tpacket_stats);
+                       st.stats1.tp_packets += st.stats1.tp_drops;
                        data = &st.stats1;
                }
 
index 1afd1381cdc75a576bffaac360685e3ab7bfa6d4..77e38f733496c407f2b8a351fac733d3195a655a 100644 (file)
@@ -793,7 +793,7 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
                struct sock **psk = v;
                struct sock *sk = *psk;
 
-               seq_printf(seq, "%02X %5d %lu%n",
+               seq_printf(seq, "%02X %5u %lu%n",
                           (int) (psk - pnres.sk),
                           from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
                           sock_i_ino(sk), &len);
index d11ac79246e46d8388d6bee79de663928aa0f86e..cf5b145902e5e0a4616122bbdcfecfb1662c7ec2 100644 (file)
@@ -30,6 +30,7 @@ struct rfkill_regulator_data {
 static int rfkill_regulator_set_block(void *data, bool blocked)
 {
        struct rfkill_regulator_data *rfkill_data = data;
+       int ret = 0;
 
        pr_debug("%s: blocked: %d\n", __func__, blocked);
 
@@ -40,15 +41,16 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
                }
        } else {
                if (!rfkill_data->reg_enabled) {
-                       regulator_enable(rfkill_data->vcc);
-                       rfkill_data->reg_enabled = true;
+                       ret = regulator_enable(rfkill_data->vcc);
+                       if (!ret)
+                               rfkill_data->reg_enabled = true;
                }
        }
 
        pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
                regulator_is_enabled(rfkill_data->vcc));
 
-       return 0;
+       return ret;
 }
 
 static struct rfkill_ops rfkill_regulator_ops = {
index 3a294eb98d6178733edc49c98da4069ccf643a39..867b4a3e39800fb44819e3cfd68ad0fb30d6ea05 100644 (file)
 #include <net/sock.h>
 #include <net/cls_cgroup.h>
 
-static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
+static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
 {
-       return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
-                           struct cgroup_cls_state, css);
+       return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
 }
 
 static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
 {
-       return container_of(task_subsys_state(p, net_cls_subsys_id),
-                           struct cgroup_cls_state, css);
+       return css_cls_state(task_css(p, net_cls_subsys_id));
 }
 
-static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        struct cgroup_cls_state *cs;
 
@@ -45,17 +44,19 @@ static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
        return &cs->css;
 }
 
-static int cgrp_css_online(struct cgroup *cgrp)
+static int cgrp_css_online(struct cgroup_subsys_state *css)
 {
-       if (cgrp->parent)
-               cgrp_cls_state(cgrp)->classid =
-                       cgrp_cls_state(cgrp->parent)->classid;
+       struct cgroup_cls_state *cs = css_cls_state(css);
+       struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
+
+       if (parent)
+               cs->classid = parent->classid;
        return 0;
 }
 
-static void cgrp_css_free(struct cgroup *cgrp)
+static void cgrp_css_free(struct cgroup_subsys_state *css)
 {
-       kfree(cgrp_cls_state(cgrp));
+       kfree(css_cls_state(css));
 }
 
 static int update_classid(const void *v, struct file *file, unsigned n)
@@ -67,12 +68,13 @@ static int update_classid(const void *v, struct file *file, unsigned n)
        return 0;
 }
 
-static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void cgrp_attach(struct cgroup_subsys_state *css,
+                       struct cgroup_taskset *tset)
 {
        struct task_struct *p;
        void *v;
 
-       cgroup_taskset_for_each(p, cgrp, tset) {
+       cgroup_taskset_for_each(p, css, tset) {
                task_lock(p);
                v = (void *)(unsigned long)task_cls_classid(p);
                iterate_fd(p->files, 0, update_classid, v);
@@ -80,14 +82,15 @@ static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
        }
 }
 
-static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
+static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       return cgrp_cls_state(cgrp)->classid;
+       return css_cls_state(css)->classid;
 }
 
-static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
+static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
+                        u64 value)
 {
-       cgrp_cls_state(cgrp)->classid = (u32) value;
+       css_cls_state(css)->classid = (u32) value;
        return 0;
 }
 
index ef53ab8d0aaec69628fe6bf497ced92dd5ebc44c..ddd73cb2d7ba4080f0e8f75ddc715cf9cb2a1587 100644 (file)
@@ -438,7 +438,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
        if (mask != q->tab_mask) {
                struct sk_buff **ntab;
 
-               ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
+               ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
+                              GFP_KERNEL | __GFP_NOWARN);
                if (!ntab)
                        ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
                if (!ntab)
index 82f6016d89abac391a8f0c03fdda7be6db2372b7..a6d788d45216a6f286e5aaea0cdb0587cd0f7848 100644 (file)
@@ -412,12 +412,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        /* If a delay is expected, orphan the skb. (orphaning usually takes
         * place at TX completion time, so _before_ the link transit delay)
-        * Ideally, this orphaning should be done after the rate limiting
-        * module, because this breaks TCP Small Queue, and other mechanisms
-        * based on socket sk_wmem_alloc.
         */
        if (q->latency || q->jitter)
-               skb_orphan(skb);
+               skb_orphan_partial(skb);
 
        /*
         * If we need to duplicate packet, then re-insert at top of the
index ab67efc64b2490efba497e93db105bf9ed25b117..cef509985192a041f8d437adc35cd540e32f58bd 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
  *    Daisy Chang          <daisyc@us.ibm.com>
  *    Ryan Layer           <rmlayer@us.ibm.com>
  *    Kevin Gao             <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index ba1dfc3f8def25701bca3546c883677b03088f5f..8c4fa5dec8245e984ddf637134ca82265c5ddfe5 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *   Vlad Yasevich     <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/slab.h>
index 64977ea0f9c55e02988377ef6e2de73693607dea..077bb070052bac32e551cef707b0a6d6e3f65511 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
  *    Karl Knutson          <karl@athena.chicago.il.us>
  *    Jon Grimm             <jgrimm@us.ibm.com>
  *    Daisy Chang           <daisyc@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/types.h>
index 5780565f5b7d0c885faacf86d96a742e1ebbdae5..bd0bdd0ba8f1ca956f8ec4f61c409f089ac6cbd9 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Jon Grimm             <jgrimm@us.ibm.com>
  *    Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index c0044019db9eca2b84eaa9b11f28500bf0fb4086..3d9a9ff69c036e6f83d3b85470458647f413bf5c 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
  *    Karl Knutson <karl@athena.chicago.il.us>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/types.h>
index f4998780d6df1171f4f1e616fc328dd92431c030..e89015d8935a160f3e0f3e571a976d1cd4823993 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
  *    Jon Grimm             <jgrimm@us.ibm.com>
  *    Daisy Chang          <daisyc@us.ibm.com>
  *    Sridhar Samudrala            <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <net/sctp/sctp.h>
index 9e3d257de0e0814274cf852ae883835bb4c72eac..09b8daac87c8039f2c5b33e04b109a44e6006038 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
  *    Jon Grimm <jgrimm@austin.ibm.com>
  *    Daisy Chang <daisyc@us.ibm.com>
  *    Dajiang Zhang <dajiang.zhang@nokia.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/types.h>
index 3fa4d858c35a508b56cf56bce9a3c0078c6379c0..5f2068679f8339b8a85b85cf2dc7b185a663dd9c 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
  *    Daisy Chang <daisyc@us.ibm.com>
  *    Sridhar Samudrala <sri@us.ibm.com>
  *    Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/types.h>
@@ -87,15 +81,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
 {
        struct sctphdr *sh = sctp_hdr(skb);
        __le32 cmp = sh->checksum;
-       struct sk_buff *list;
-       __le32 val;
-       __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
-
-       skb_walk_frags(skb, list)
-               tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
-                                       tmp);
-
-       val = sctp_end_cksum(tmp);
+       __le32 val = sctp_compute_cksum(skb, 0);
 
        if (val != cmp) {
                /* CRC failure, dump it. */
index cb25f040fed03d85d7bf7b9b7248678655e1a716..5856932fdc38906ae78b3da2e170f0699634b24e 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
  *    Karl Knutson <karl@athena.chicago.il.us>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index 09ffcc912d236426b5c5f7724d7145db31b061bf..da613ceae28cc95b38dc1a3d7366c2f38b348d9b 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Le Yanqun                    <yanqun.le@nokia.com>
@@ -42,9 +39,6 @@
  *
  * Based on:
  *     linux/net/ipv6/tcp_ipv6.c
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -351,7 +345,7 @@ out:
 
                rt = (struct rt6_info *)dst;
                t->dst = dst;
-
+               t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
                pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr,
                         &fl6->saddr);
        } else {
index fe012c44f8dff15e4882165e2718b11f919cf260..5ea573b37648b2591341b980c3ff1ab1632d202d 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Jon Grimm             <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index a46d1eb41762801e2d95011bf3af8327c57ebac2..0ac3a65daccb71cd78ae5e2c52696c33df7e16e4 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
  *    Karl Knutson          <karl@athena.chicago.il.us>
  *    Jon Grimm             <jgrimm@austin.ibm.com>
  *    Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index ef9e2bbc0f2f8d1dc497166b16bb6ea1d96c1124..94df7587786992fa0a6341caaed284beb1df42e0 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
  *    Hui Huang            <hui.huang@nokia.com>
  *    Sridhar Samudrala     <sri@us.ibm.com>
  *    Jon Grimm             <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index 794bb14decdea60ec58e68a59bc5a6b2feda4a01..ce1ffd811775b414b4d79f3a7b55a5145d599bc9 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
  *    Karl Knutson          <karl@athena.chicago.il.us>
  *    Ardelle Fan          <ardelle.fan@intel.com>
  *    Kevin Gao             <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/types.h>
index 62526c4770505e0f741123bd009e91a90781f587..0c06421568427e4995ea0798c9ca8535ca5ff9b5 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/types.h>
@@ -232,7 +226,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
                sk = epb->sk;
                if (!net_eq(sock_net(sk), seq_file_net(seq)))
                        continue;
-               seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
+               seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
                           sctp_sk(sk)->type, sk->sk_state, hash,
                           epb->bind_addr.port,
                           from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
@@ -342,7 +336,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
                        continue;
                seq_printf(seq,
                           "%8pK %8pK %-3d %-3d %-2d %-4d "
-                          "%4d %8d %8d %7d %5lu %-5d %5d ",
+                          "%4d %8d %8d %7u %5lu %-5d %5d ",
                           assoc, sk, sctp_sk(sk)->type, sk->sk_state,
                           assoc->state, hash,
                           assoc->assoc_id,
index 4a17494d736c4b6a50f0e838971204e1e901de43..5e17092f4adacbf0335dc33bcbde5a1f830b43df 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
  *    Sridhar Samudrala <sri@us.ibm.com>
  *    Daisy Chang <daisyc@us.ibm.com>
  *    Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1547,7 +1541,7 @@ module_exit(sctp_exit);
  */
 MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132");
 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132");
-MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
+MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>");
 MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
 module_param_named(no_checksums, sctp_checksum_disable, bool, 0644);
 MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification");
index 362ae6e2fd93a6e145fefb15e5f6e5cf4579204b..01e97836ca6c95ddd378dbd4c9fec09041c4fc26 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -45,9 +42,6 @@
  *    Daisy Chang          <daisyc@us.ibm.com>
  *    Ardelle Fan          <ardelle.fan@intel.com>
  *    Kevin Gao             <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
-                                         __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+                                           __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+                                        __u8 flags, int paylen);
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+                                          __u8 type, __u8 flags, int paylen);
 static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
                                        const struct sctp_chunk *init_chunk,
@@ -82,6 +80,28 @@ static int sctp_process_param(struct sctp_association *asoc,
 static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
                              const void *data);
 
+/* Control chunk destructor */
+static void sctp_control_release_owner(struct sk_buff *skb)
+{
+       /*TODO: do memory release */
+}
+
+static void sctp_control_set_owner_w(struct sctp_chunk *chunk)
+{
+       struct sctp_association *asoc = chunk->asoc;
+       struct sk_buff *skb = chunk->skb;
+
+       /* TODO: properly account for control chunks.
+        * To do it right we'll need:
+        *  1) endpoint if association isn't known.
+        *  2) proper memory accounting.
+        *
+        *  For now don't do anything for now.
+        */
+       skb->sk = asoc ? asoc->base.sk : NULL;
+       skb->destructor = sctp_control_release_owner;
+}
+
 /* What was the inbound interface for this chunk? */
 int sctp_chunk_iif(const struct sctp_chunk *chunk)
 {
@@ -296,7 +316,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
         * PLEASE DO NOT FIXME [This version does not support Host Name.]
         */
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_INIT, 0, chunksize);
+       retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize);
        if (!retval)
                goto nodata;
 
@@ -443,7 +463,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
                                        num_ext);
 
        /* Now allocate and fill out the chunk.  */
-       retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
+       retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
        if (!retval)
                goto nomem_chunk;
 
@@ -548,7 +568,7 @@ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
        cookie_len = asoc->peer.cookie_len;
 
        /* Build a cookie echo chunk.  */
-       retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
+       retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
        if (!retval)
                goto nodata;
        retval->subh.cookie_hdr =
@@ -593,7 +613,7 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
 {
        struct sctp_chunk *retval;
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ACK, 0, 0);
+       retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0);
 
        /* RFC 2960 6.4 Multi-homed SCTP Endpoints
         *
@@ -641,8 +661,8 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
        sctp_cwrhdr_t cwr;
 
        cwr.lowest_tsn = htonl(lowest_tsn);
-       retval = sctp_make_chunk(asoc, SCTP_CID_ECN_CWR, 0,
-                                sizeof(sctp_cwrhdr_t));
+       retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0,
+                                  sizeof(sctp_cwrhdr_t));
 
        if (!retval)
                goto nodata;
@@ -675,8 +695,8 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
        sctp_ecnehdr_t ecne;
 
        ecne.lowest_tsn = htonl(lowest_tsn);
-       retval = sctp_make_chunk(asoc, SCTP_CID_ECN_ECNE, 0,
-                                sizeof(sctp_ecnehdr_t));
+       retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0,
+                                  sizeof(sctp_ecnehdr_t));
        if (!retval)
                goto nodata;
        retval->subh.ecne_hdr =
@@ -712,7 +732,7 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
                dp.ssn = htons(ssn);
 
        chunk_len = sizeof(dp) + data_len;
-       retval = sctp_make_chunk(asoc, SCTP_CID_DATA, flags, chunk_len);
+       retval = sctp_make_data(asoc, flags, chunk_len);
        if (!retval)
                goto nodata;
 
@@ -759,7 +779,7 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
                + sizeof(__u32) * num_dup_tsns;
 
        /* Create the chunk.  */
-       retval = sctp_make_chunk(asoc, SCTP_CID_SACK, 0, len);
+       retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len);
        if (!retval)
                goto nodata;
 
@@ -838,8 +858,8 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
        ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
        shut.cum_tsn_ack = htonl(ctsn);
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN, 0,
-                                sizeof(sctp_shutdownhdr_t));
+       retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0,
+                                  sizeof(sctp_shutdownhdr_t));
        if (!retval)
                goto nodata;
 
@@ -857,7 +877,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
 {
        struct sctp_chunk *retval;
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0);
+       retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0);
 
        /* RFC 2960 6.4 Multi-homed SCTP Endpoints
         *
@@ -886,7 +906,7 @@ struct sctp_chunk *sctp_make_shutdown_complete(
         */
        flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T;
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
+       retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
 
        /* RFC 2960 6.4 Multi-homed SCTP Endpoints
         *
@@ -925,7 +945,7 @@ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
                        flags = SCTP_CHUNK_FLAG_T;
        }
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_ABORT, flags, hint);
+       retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint);
 
        /* RFC 2960 6.4 Multi-homed SCTP Endpoints
         *
@@ -1117,7 +1137,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
        struct sctp_chunk *retval;
        sctp_sender_hb_info_t hbinfo;
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
+       retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
 
        if (!retval)
                goto nodata;
@@ -1145,7 +1165,7 @@ struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
 {
        struct sctp_chunk *retval;
 
-       retval  = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen);
+       retval  = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen);
        if (!retval)
                goto nodata;
 
@@ -1177,8 +1197,8 @@ static struct sctp_chunk *sctp_make_op_error_space(
 {
        struct sctp_chunk *retval;
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0,
-                                sizeof(sctp_errhdr_t) + size);
+       retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0,
+                                  sizeof(sctp_errhdr_t) + size);
        if (!retval)
                goto nodata;
 
@@ -1248,7 +1268,7 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
        if (unlikely(!hmac_desc))
                return NULL;
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_AUTH, 0,
+       retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0,
                        hmac_desc->hmac_len + sizeof(sctp_authhdr_t));
        if (!retval)
                return NULL;
@@ -1351,8 +1371,8 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk)
 /* Create a new chunk, setting the type and flags headers from the
  * arguments, reserving enough space for a 'paylen' byte payload.
  */
-static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
-                                         __u8 type, __u8 flags, int paylen)
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+                                           __u8 type, __u8 flags, int paylen)
 {
        struct sctp_chunk *retval;
        sctp_chunkhdr_t *chunk_hdr;
@@ -1385,14 +1405,27 @@ static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
        if (sctp_auth_send_cid(type, asoc))
                retval->auth = 1;
 
-       /* Set the skb to the belonging sock for accounting.  */
-       skb->sk = sk;
-
        return retval;
 nodata:
        return NULL;
 }
 
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+                                        __u8 flags, int paylen)
+{
+       return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen);
+}
+
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+                                           __u8 type, __u8 flags, int paylen)
+{
+       struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen);
+
+       if (chunk)
+               sctp_control_set_owner_w(chunk);
+
+       return chunk;
+}
 
 /* Release the memory occupied by a chunk.  */
 static void sctp_chunk_destroy(struct sctp_chunk *chunk)
@@ -2733,7 +2766,7 @@ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
        length += addrlen;
 
        /* Create the chunk.  */
-       retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF, 0, length);
+       retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length);
        if (!retval)
                return NULL;
 
@@ -2917,7 +2950,7 @@ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *as
        int                     length = sizeof(asconf) + vparam_len;
 
        /* Create the chunk.  */
-       retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF_ACK, 0, length);
+       retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length);
        if (!retval)
                return NULL;
 
@@ -3448,7 +3481,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
 
        hint = (nstreams + 1) * sizeof(__u32);
 
-       retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint);
+       retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint);
 
        if (!retval)
                return NULL;
index 9da68852ee94fbf0a7dcb02b61ba90700d18f77c..666c668427996903b9baf185a831c1c10c75d8cb 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -42,9 +39,6 @@
  *    Daisy Chang          <daisyc@us.ibm.com>
  *    Sridhar Samudrala            <sri@us.ibm.com>
  *    Ardelle Fan          <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index f6b7109195a629563b7a49a06e32847fd3b23984..dfe3f36ff2aa27165b35a39d382583759cd3ebae 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -45,9 +42,6 @@
  *    Ardelle Fan          <ardelle.fan@intel.com>
  *    Ryan Layer           <rmlayer@us.ibm.com>
  *    Kevin Gao                    <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index 84d98d8a5a7417bd92ea919c56e0f8033073a6c4..c5999b2dde7dfa5fcd53e68cf3d7791e838e602b 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
  *    Daisy Chang          <daisyc@us.ibm.com>
  *    Ardelle Fan          <ardelle.fan@intel.com>
  *    Sridhar Samudrala            <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index c6670d2e3f8d1bfa7c7fd0cef617de4944ca95e0..d5d5882a2891ac4c1047faa002375ba829598bb5 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -52,9 +49,6 @@
  *    Ryan Layer           <rmlayer@us.ibm.com>
  *    Anup Pemmaiah         <pemmaiah@cc.usu.edu>
  *    Kevin Gao             <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index da860352380872ad1964af228906178bf8b56531..6007124aefa018fd958b9481e012f3db2df5a5da 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Jon Grimm             <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/types.h>
index 9a5c4c9eddafa0e4f91cabb56847c8c80bf8b59b..6b36561a1b3b7cceab5f5a4a9dce7aa2bc362266 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Mingqin Liu           <liuming@us.ibm.com>
@@ -36,9 +33,6 @@
  *    Ardelle Fan           <ardelle.fan@intel.com>
  *    Ryan Layer            <rmlayer@us.ibm.com>
  *    Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <net/sctp/structs.h>
index 8fdd16046d668b0c6cf927e5e87754f2ebfaf583..e332efb124cc0c34f93786232353c37dd2e10ea1 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
  *    Hui Huang             <hui.huang@nokia.com>
  *    Sridhar Samudrala            <sri@us.ibm.com>
  *    Ardelle Fan          <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index b46019568a86d15c71dc805c8d1b3e560866d726..fbda20028285a8bb67c1052601a9986b90f50258 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    La Monte H.P. Yarroll <piggy@acm.org>
  *    Jon Grimm             <jgrimm@us.ibm.com>
  *    Karl Knutson          <karl@athena.chicago.il.us>
  *    Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/slab.h>
index 44a45dbee4df37e835b258cb551e333aeb7582b9..81089ed654564db6367c27f174c9b8381557c512 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Jon Grimm             <jgrimm@us.ibm.com>
  *    La Monte H.P. Yarroll <piggy@acm.org>
  *    Ardelle Fan          <ardelle.fan@intel.com>
  *    Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/slab.h>
index 04e3d470f877e51cc3238a3939595c1d90b83b37..1c1484ed605d490686c8daa87e0a7e595b9fba3c 100644 (file)
  *
  * Please send any bug reports or fixes you make to the
  * email address(es):
- *    lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- *    http://www.sf.net/projects/lksctp
+ *    lksctp developers <linux-sctp@vger.kernel.org>
  *
  * Written or modified by:
  *    Jon Grimm             <jgrimm@us.ibm.com>
  *    La Monte H.P. Yarroll <piggy@acm.org>
  *    Sridhar Samudrala     <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
  */
 
 #include <linux/slab.h>
index 406859cc68aa90073ef237e91e0aa67116adcf33..017aedc8a7a1e9fe6d5e012342754e25c8a74260 100644 (file)
@@ -480,23 +480,6 @@ static const struct dentry_operations rpc_dentry_operations = {
        .d_delete = rpc_delete_dentry,
 };
 
-/*
- * Lookup the data. This is trivial - if the dentry didn't already
- * exist, we know it is negative.
- */
-static struct dentry *
-rpc_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
-{
-       if (dentry->d_name.len > NAME_MAX)
-               return ERR_PTR(-ENAMETOOLONG);
-       d_add(dentry, NULL);
-       return NULL;
-}
-
-static const struct inode_operations rpc_dir_inode_operations = {
-       .lookup         = rpc_lookup,
-};
-
 static struct inode *
 rpc_get_inode(struct super_block *sb, umode_t mode)
 {
@@ -509,7 +492,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
        switch (mode & S_IFMT) {
        case S_IFDIR:
                inode->i_fop = &simple_dir_operations;
-               inode->i_op = &rpc_dir_inode_operations;
+               inode->i_op = &simple_dir_inode_operations;
                inc_nlink(inode);
        default:
                break;
index 7762b9f8a8b72581f00003ad266ab060754f43b9..9c9caaa5e0d3129f8872350ded706321fb2509c2 100644 (file)
@@ -442,7 +442,7 @@ static void svc_tcp_write_space(struct sock *sk)
 {
        struct socket *sock = sk->sk_socket;
 
-       if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
+       if (sk_stream_is_writeable(sk) && sock)
                clear_bit(SOCK_NOSPACE, &sock->flags);
        svc_write_space(sk);
 }
index ddf0602603bdef4ac0a6ad29a9815bae56f04626..d6656d7768f4e689e94aca67189a0634bd950fad 100644 (file)
@@ -1602,7 +1602,7 @@ static void xs_tcp_write_space(struct sock *sk)
        read_lock_bh(&sk->sk_callback_lock);
 
        /* from net/core/stream.c:sk_stream_write_space */
-       if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+       if (sk_stream_is_writeable(sk))
                xs_write_space(sk);
 
        read_unlock_bh(&sk->sk_callback_lock);
index c4ce243824bb19c6e600dd98da2d8a6d16747dc1..86de99ad297605d04356f9efd7be174d2f7c7993 100644 (file)
@@ -1479,7 +1479,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
                                 MAX_SKB_FRAGS * PAGE_SIZE);
 
        skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
-                                  msg->msg_flags & MSG_DONTWAIT, &err);
+                                  msg->msg_flags & MSG_DONTWAIT, &err,
+                                  PAGE_ALLOC_COSTLY_ORDER);
        if (skb == NULL)
                goto out;
 
@@ -1596,6 +1597,10 @@ out:
        return err;
 }
 
+/* We use paged skbs for stream sockets, and limit occupancy to 32768
+ * bytes, and a minimun of a full page.
+ */
+#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
 
 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                               struct msghdr *msg, size_t len)
@@ -1609,6 +1614,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
        struct scm_cookie tmp_scm;
        bool fds_sent = false;
        int max_level;
+       int data_len;
 
        if (NULL == siocb->scm)
                siocb->scm = &tmp_scm;
@@ -1635,40 +1641,22 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                goto pipe_err;
 
        while (sent < len) {
-               /*
-                *      Optimisation for the fact that under 0.01% of X
-                *      messages typically need breaking up.
-                */
-
-               size = len-sent;
+               size = len - sent;
 
                /* Keep two messages in the pipe so it schedules better */
-               if (size > ((sk->sk_sndbuf >> 1) - 64))
-                       size = (sk->sk_sndbuf >> 1) - 64;
+               size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
 
-               if (size > SKB_MAX_ALLOC)
-                       size = SKB_MAX_ALLOC;
-
-               /*
-                *      Grab a buffer
-                */
+               /* allow fallback to order-0 allocations */
+               size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
 
-               skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
-                                         &err);
+               data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
 
-               if (skb == NULL)
+               skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
+                                          msg->msg_flags & MSG_DONTWAIT, &err,
+                                          get_order(UNIX_SKB_FRAGS_SZ));
+               if (!skb)
                        goto out_err;
 
-               /*
-                *      If you pass two values to the sock_alloc_send_skb
-                *      it tries to grab the large buffer with GFP_NOFS
-                *      (which can fail easily), and if it fails grab the
-                *      fallback size buffer which is under a page and will
-                *      succeed. [Alan]
-                */
-               size = min_t(int, size, skb_tailroom(skb));
-
-
                /* Only send the fds in the first buffer */
                err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
                if (err < 0) {
@@ -1678,7 +1666,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
                max_level = err + 1;
                fds_sent = true;
 
-               err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+               skb_put(skb, size - data_len);
+               skb->data_len = data_len;
+               skb->len = size;
+               err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
+                                                  sent, size);
                if (err) {
                        kfree_skb(skb);
                        goto out_err;
@@ -1890,6 +1882,11 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
        return timeo;
 }
 
+static unsigned int unix_skb_len(const struct sk_buff *skb)
+{
+       return skb->len - UNIXCB(skb).consumed;
+}
+
 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                               struct msghdr *msg, size_t size,
                               int flags)
@@ -1977,8 +1974,8 @@ again:
                }
 
                skip = sk_peek_offset(sk, flags);
-               while (skip >= skb->len) {
-                       skip -= skb->len;
+               while (skip >= unix_skb_len(skb)) {
+                       skip -= unix_skb_len(skb);
                        last = skb;
                        skb = skb_peek_next(skb, &sk->sk_receive_queue);
                        if (!skb)
@@ -2005,8 +2002,9 @@ again:
                        sunaddr = NULL;
                }
 
-               chunk = min_t(unsigned int, skb->len - skip, size);
-               if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
+               chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
+               if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
+                                           msg->msg_iov, chunk)) {
                        if (copied == 0)
                                copied = -EFAULT;
                        break;
@@ -2016,14 +2014,14 @@ again:
 
                /* Mark read part of skb as used */
                if (!(flags & MSG_PEEK)) {
-                       skb_pull(skb, chunk);
+                       UNIXCB(skb).consumed += chunk;
 
                        sk_peek_offset_bwd(sk, chunk);
 
                        if (UNIXCB(skb).fp)
                                unix_detach_fds(siocb->scm, skb);
 
-                       if (skb->len)
+                       if (unix_skb_len(skb))
                                break;
 
                        skb_unlink(skb, &sk->sk_receive_queue);
@@ -2107,7 +2105,7 @@ long unix_inq_len(struct sock *sk)
        if (sk->sk_type == SOCK_STREAM ||
            sk->sk_type == SOCK_SEQPACKET) {
                skb_queue_walk(&sk->sk_receive_queue, skb)
-                       amount += skb->len;
+                       amount += unix_skb_len(skb);
        } else {
                skb = skb_peek(&sk->sk_receive_queue);
                if (skb)
index 4d9334683f8409c3e064c44a25235ea48e65b38b..545c08b8a1d482ac0c155ab755f6623c62ffcbe5 100644 (file)
@@ -96,8 +96,7 @@
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <net/sock.h>
-
-#include "af_vsock.h"
+#include <net/af_vsock.h>
 
 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
 static void vsock_sk_destruct(struct sock *sk);
index ffc11df02af22cf90c03df730110ea2a96f033cb..9d6986634e0bfaf1a4431cc8f6fc015f212e7042 100644 (file)
@@ -34,8 +34,8 @@
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <net/sock.h>
+#include <net/af_vsock.h>
 
-#include "af_vsock.h"
 #include "vmci_transport_notify.h"
 
 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
index fd88ea8924e4dd89d8f0dc7939e5ccc75e39a747..ce6c9623d5f069029ce58294bcc7de9bc3728fcd 100644 (file)
@@ -19,8 +19,8 @@
 #include <linux/vmw_vmci_defs.h>
 #include <linux/vmw_vmci_api.h>
 
-#include "vsock_addr.h"
-#include "af_vsock.h"
+#include <net/vsock_addr.h>
+#include <net/af_vsock.h>
 
 /* If the packet format changes in a release then this should change too. */
 #define VMCI_TRANSPORT_PACKET_VERSION 1
index ec2611b4ea0ee4fc073926c9a33347ace8de9e4f..82486ee55eaca4cc4f4b1cb414cc848c1239ec46 100644 (file)
@@ -17,8 +17,7 @@
 #include <linux/socket.h>
 #include <linux/stddef.h>
 #include <net/sock.h>
-
-#include "vsock_addr.h"
+#include <net/vsock_addr.h>
 
 void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port)
 {
index a8c29fa4f1b3b90539f5370b9084056c5774812a..67153964aad2059652ffd34d79c956585cc9c1e3 100644 (file)
@@ -462,6 +462,14 @@ int wiphy_register(struct wiphy *wiphy)
                return -EINVAL;
 #endif
 
+       if (WARN_ON(wiphy->coalesce &&
+                   (!wiphy->coalesce->n_rules ||
+                    !wiphy->coalesce->n_patterns) &&
+                   (!wiphy->coalesce->pattern_min_len ||
+                    wiphy->coalesce->pattern_min_len >
+                       wiphy->coalesce->pattern_max_len)))
+               return -EINVAL;
+
        if (WARN_ON(wiphy->ap_sme_capa &&
                    !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
                return -EINVAL;
@@ -668,6 +676,7 @@ void wiphy_unregister(struct wiphy *wiphy)
                rdev_set_wakeup(rdev, false);
 #endif
        cfg80211_rdev_free_wowlan(rdev);
+       cfg80211_rdev_free_coalesce(rdev);
 }
 EXPORT_SYMBOL(wiphy_unregister);
 
index a6b45bf00f3357292a1ad6e3cc5a08fd6faf9a7a..9ad43c619c54830f915193daa60eadef92b5bda5 100644 (file)
@@ -79,6 +79,8 @@ struct cfg80211_registered_device {
        /* netlink port which started critical protocol (0 means not started) */
        u32 crit_proto_nlportid;
 
+       struct cfg80211_coalesce *coalesce;
+
        /* must be last because of the way we do wiphy_priv(),
         * and it should at least be aligned to NETDEV_ALIGN */
        struct wiphy wiphy __aligned(NETDEV_ALIGN);
index 30c49202ee4d12804b78bf4cc26e2766c4cc029e..0553fd4d85aeb4b9338d2661ba020448105a68e9 100644 (file)
@@ -167,9 +167,12 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
         * basic rates
         */
        if (!setup->basic_rates) {
+               enum nl80211_bss_scan_width scan_width;
                struct ieee80211_supported_band *sband =
                                rdev->wiphy.bands[setup->chandef.chan->band];
-               setup->basic_rates = ieee80211_mandatory_rates(sband);
+               scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
+               setup->basic_rates = ieee80211_mandatory_rates(sband,
+                                                              scan_width);
        }
 
        if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
index 3fcba69817e579244e836b2e2d39a1aab14cc210..741368c31270d0066c5adbd8f744a3d9b5b61ff8 100644 (file)
@@ -349,6 +349,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
                                  .len = IEEE80211_MAX_DATA_LEN },
        [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+       [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
+       [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
+       [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
+       [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 },
+       [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 },
 };
 
 /* policy for the key attributes */
@@ -403,6 +408,14 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
        [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
 };
 
+/* policy for coalesce rule attributes */
+static const struct nla_policy
+nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = {
+       [NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 },
+       [NL80211_ATTR_COALESCE_RULE_CONDITION] = { .type = NLA_U32 },
+       [NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED },
+};
+
 /* policy for GTK rekey offload attributes */
 static const struct nla_policy
 nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
@@ -976,7 +989,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
                return -ENOBUFS;
 
        if (dev->wiphy.wowlan->n_patterns) {
-               struct nl80211_wowlan_pattern_support pat = {
+               struct nl80211_pattern_support pat = {
                        .max_patterns = dev->wiphy.wowlan->n_patterns,
                        .min_pattern_len = dev->wiphy.wowlan->pattern_min_len,
                        .max_pattern_len = dev->wiphy.wowlan->pattern_max_len,
@@ -997,6 +1010,27 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
 }
 #endif
 
+static int nl80211_send_coalesce(struct sk_buff *msg,
+                                struct cfg80211_registered_device *dev)
+{
+       struct nl80211_coalesce_rule_support rule;
+
+       if (!dev->wiphy.coalesce)
+               return 0;
+
+       rule.max_rules = dev->wiphy.coalesce->n_rules;
+       rule.max_delay = dev->wiphy.coalesce->max_delay;
+       rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns;
+       rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len;
+       rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len;
+       rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset;
+
+       if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule))
+               return -ENOBUFS;
+
+       return 0;
+}
+
 static int nl80211_send_band_rateinfo(struct sk_buff *msg,
                                      struct ieee80211_supported_band *sband)
 {
@@ -1395,6 +1429,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (state->split) {
                        CMD(crit_proto_start, CRIT_PROTOCOL_START);
                        CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
+                       if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
+                               CMD(channel_switch, CHANNEL_SWITCH);
                }
 
 #ifdef CONFIG_NL80211_TESTMODE
@@ -1515,6 +1551,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                            dev->wiphy.vht_capa_mod_mask))
                        goto nla_put_failure;
 
+               state->split_start++;
+               break;
+       case 10:
+               if (nl80211_send_coalesce(msg, dev))
+                       goto nla_put_failure;
+
                /* done */
                state->split_start = 0;
                break;
@@ -2622,8 +2664,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_NEW_KEY);
-       if (IS_ERR(hdr))
-               return PTR_ERR(hdr);
+       if (!hdr)
+               return -ENOBUFS;
 
        cookie.msg = msg;
        cookie.idx = key_idx;
@@ -5580,6 +5622,111 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        return err;
 }
 
+static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct net_device *dev = info->user_ptr[1];
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_csa_settings params;
+       /* csa_attrs is defined static to avoid waste of stack size - this
+        * function is called under RTNL lock, so this should not be a problem.
+        */
+       static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
+       u8 radar_detect_width = 0;
+       int err;
+
+       if (!rdev->ops->channel_switch ||
+           !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
+               return -EOPNOTSUPP;
+
+       /* may add IBSS support later */
+       if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+               return -EOPNOTSUPP;
+
+       memset(&params, 0, sizeof(params));
+
+       if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
+           !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT])
+               return -EINVAL;
+
+       /* only important for AP, IBSS and mesh create IEs internally */
+       if (!info->attrs[NL80211_ATTR_CSA_IES])
+               return -EINVAL;
+
+       /* useless if AP is not running */
+       if (!wdev->beacon_interval)
+               return -EINVAL;
+
+       params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+
+       err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
+       if (err)
+               return err;
+
+       err = nla_parse_nested(csa_attrs, NL80211_ATTR_MAX,
+                              info->attrs[NL80211_ATTR_CSA_IES],
+                              nl80211_policy);
+       if (err)
+               return err;
+
+       err = nl80211_parse_beacon(csa_attrs, &params.beacon_csa);
+       if (err)
+               return err;
+
+       if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON])
+               return -EINVAL;
+
+       params.counter_offset_beacon =
+               nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
+       if (params.counter_offset_beacon >= params.beacon_csa.tail_len)
+               return -EINVAL;
+
+       /* sanity check - counters should be the same */
+       if (params.beacon_csa.tail[params.counter_offset_beacon] !=
+           params.count)
+               return -EINVAL;
+
+       if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) {
+               params.counter_offset_presp =
+                       nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
+               if (params.counter_offset_presp >=
+                   params.beacon_csa.probe_resp_len)
+                       return -EINVAL;
+
+               if (params.beacon_csa.probe_resp[params.counter_offset_presp] !=
+                   params.count)
+                       return -EINVAL;
+       }
+
+       err = nl80211_parse_chandef(rdev, info, &params.chandef);
+       if (err)
+               return err;
+
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
+               return -EINVAL;
+
+       err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
+       if (err < 0) {
+               return err;
+       } else if (err) {
+               radar_detect_width = BIT(params.chandef.width);
+               params.radar_required = true;
+       }
+
+       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+                                          params.chandef.chan,
+                                          CHAN_MODE_SHARED,
+                                          radar_detect_width);
+       if (err)
+               return err;
+
+       if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
+               params.block_tx = true;
+
+       return rdev_channel_switch(rdev, dev, &params);
+}
+
 static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
                            u32 seq, int flags,
                            struct cfg80211_registered_device *rdev,
@@ -5641,6 +5788,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
                goto nla_put_failure;
        if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
            nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
+           nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) ||
            nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
                        jiffies_to_msecs(jiffies - intbss->ts)))
                goto nla_put_failure;
@@ -6321,6 +6469,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
 
        switch (ibss.chandef.width) {
+       case NL80211_CHAN_WIDTH_5:
+       case NL80211_CHAN_WIDTH_10:
        case NL80211_CHAN_WIDTH_20_NOHT:
                break;
        case NL80211_CHAN_WIDTH_20:
@@ -6348,6 +6498,19 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
                        return err;
        }
 
+       if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+               memcpy(&ibss.ht_capa_mask,
+                      nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
+                      sizeof(ibss.ht_capa_mask));
+
+       if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+               if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+                       return -EINVAL;
+               memcpy(&ibss.ht_capa,
+                      nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
+                      sizeof(ibss.ht_capa));
+       }
+
        if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
            !nl80211_parse_mcast_rate(rdev, ibss.mcast_rate,
                        nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
@@ -6430,19 +6593,30 @@ static struct genl_multicast_group nl80211_testmode_mcgrp = {
 static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct wireless_dev *wdev =
+               __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs);
        int err;
 
+       if (!rdev->ops->testmode_cmd)
+               return -EOPNOTSUPP;
+
+       if (IS_ERR(wdev)) {
+               err = PTR_ERR(wdev);
+               if (err != -EINVAL)
+                       return err;
+               wdev = NULL;
+       } else if (wdev->wiphy != &rdev->wiphy) {
+               return -EINVAL;
+       }
+
        if (!info->attrs[NL80211_ATTR_TESTDATA])
                return -EINVAL;
 
-       err = -EOPNOTSUPP;
-       if (rdev->ops->testmode_cmd) {
-               rdev->testmode_info = info;
-               err = rdev_testmode_cmd(rdev,
+       rdev->testmode_info = info;
+       err = rdev_testmode_cmd(rdev, wdev,
                                nla_data(info->attrs[NL80211_ATTR_TESTDATA]),
                                nla_len(info->attrs[NL80211_ATTR_TESTDATA]));
-               rdev->testmode_info = NULL;
-       }
+       rdev->testmode_info = NULL;
 
        return err;
 }
@@ -6507,6 +6681,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                                           NL80211_CMD_TESTMODE);
                struct nlattr *tmdata;
 
+               if (!hdr)
+                       break;
+
                if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
                        genlmsg_cancel(skb, hdr);
                        break;
@@ -6951,9 +7128,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_REMAIN_ON_CHANNEL);
-
-       if (IS_ERR(hdr)) {
-               err = PTR_ERR(hdr);
+       if (!hdr) {
+               err = -ENOBUFS;
                goto free_msg;
        }
 
@@ -7251,9 +7427,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
 
                hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                                     NL80211_CMD_FRAME);
-
-               if (IS_ERR(hdr)) {
-                       err = PTR_ERR(hdr);
+               if (!hdr) {
+                       err = -ENOBUFS;
                        goto free_msg;
                }
        }
@@ -7403,14 +7578,12 @@ static int nl80211_set_cqm_txe(struct genl_info *info,
                               u32 rate, u32 pkts, u32 intvl)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
-       struct wireless_dev *wdev;
        struct net_device *dev = info->user_ptr[1];
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
 
        if (rate > 100 || intvl > NL80211_CQM_TXE_MAX_INTVL)
                return -EINVAL;
 
-       wdev = dev->ieee80211_ptr;
-
        if (!rdev->ops->set_cqm_txe_config)
                return -EOPNOTSUPP;
 
@@ -7425,13 +7598,15 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
                                s32 threshold, u32 hysteresis)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
-       struct wireless_dev *wdev;
        struct net_device *dev = info->user_ptr[1];
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
 
        if (threshold > 0)
                return -EINVAL;
 
-       wdev = dev->ieee80211_ptr;
+       /* disabling - hysteresis should also be zero then */
+       if (threshold == 0)
+               hysteresis = 0;
 
        if (!rdev->ops->set_cqm_rssi_config)
                return -EOPNOTSUPP;
@@ -7450,36 +7625,33 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
        int err;
 
        cqm = info->attrs[NL80211_ATTR_CQM];
-       if (!cqm) {
-               err = -EINVAL;
-               goto out;
-       }
+       if (!cqm)
+               return -EINVAL;
 
        err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm,
                               nl80211_attr_cqm_policy);
        if (err)
-               goto out;
+               return err;
 
        if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
            attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
-               s32 threshold;
-               u32 hysteresis;
-               threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
-               hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
-               err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
-       } else if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
-                  attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
-                  attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
-               u32 rate, pkts, intvl;
-               rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
-               pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
-               intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
-               err = nl80211_set_cqm_txe(info, rate, pkts, intvl);
-       } else
-               err = -EINVAL;
+               s32 threshold = nla_get_s32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
+               u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
 
-out:
-       return err;
+               return nl80211_set_cqm_rssi(info, threshold, hysteresis);
+       }
+
+       if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
+           attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
+           attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
+               u32 rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
+               u32 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
+               u32 intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
+
+               return nl80211_set_cqm_txe(info, rate, pkts, intvl);
+       }
+
+       return -EINVAL;
 }
 
 static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
@@ -7595,12 +7767,11 @@ static int nl80211_send_wowlan_patterns(struct sk_buff *msg,
                if (!nl_pat)
                        return -ENOBUFS;
                pat_len = wowlan->patterns[i].pattern_len;
-               if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
-                           DIV_ROUND_UP(pat_len, 8),
+               if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8),
                            wowlan->patterns[i].mask) ||
-                   nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
-                           pat_len, wowlan->patterns[i].pattern) ||
-                   nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET,
+                   nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len,
+                           wowlan->patterns[i].pattern) ||
+                   nla_put_u32(msg, NL80211_PKTPAT_OFFSET,
                                wowlan->patterns[i].pkt_offset))
                        return -ENOBUFS;
                nla_nest_end(msg, nl_pat);
@@ -7941,7 +8112,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                struct nlattr *pat;
                int n_patterns = 0;
                int rem, pat_len, mask_len, pkt_offset;
-               struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
+               struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
 
                nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
                                    rem)
@@ -7960,26 +8131,25 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
 
                nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
                                    rem) {
-                       nla_parse(pat_tb, MAX_NL80211_WOWLAN_PKTPAT,
-                                 nla_data(pat), nla_len(pat), NULL);
+                       nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
+                                 nla_len(pat), NULL);
                        err = -EINVAL;
-                       if (!pat_tb[NL80211_WOWLAN_PKTPAT_MASK] ||
-                           !pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN])
+                       if (!pat_tb[NL80211_PKTPAT_MASK] ||
+                           !pat_tb[NL80211_PKTPAT_PATTERN])
                                goto error;
-                       pat_len = nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]);
+                       pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]);
                        mask_len = DIV_ROUND_UP(pat_len, 8);
-                       if (nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]) !=
-                           mask_len)
+                       if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len)
                                goto error;
                        if (pat_len > wowlan->pattern_max_len ||
                            pat_len < wowlan->pattern_min_len)
                                goto error;
 
-                       if (!pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET])
+                       if (!pat_tb[NL80211_PKTPAT_OFFSET])
                                pkt_offset = 0;
                        else
                                pkt_offset = nla_get_u32(
-                                       pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]);
+                                       pat_tb[NL80211_PKTPAT_OFFSET]);
                        if (pkt_offset > wowlan->max_pkt_offset)
                                goto error;
                        new_triggers.patterns[i].pkt_offset = pkt_offset;
@@ -7993,11 +8163,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                        new_triggers.patterns[i].pattern =
                                new_triggers.patterns[i].mask + mask_len;
                        memcpy(new_triggers.patterns[i].mask,
-                              nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]),
+                              nla_data(pat_tb[NL80211_PKTPAT_MASK]),
                               mask_len);
                        new_triggers.patterns[i].pattern_len = pat_len;
                        memcpy(new_triggers.patterns[i].pattern,
-                              nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]),
+                              nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
                               pat_len);
                        i++;
                }
@@ -8036,6 +8206,264 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
 }
 #endif
 
+static int nl80211_send_coalesce_rules(struct sk_buff *msg,
+                                      struct cfg80211_registered_device *rdev)
+{
+       struct nlattr *nl_pats, *nl_pat, *nl_rule, *nl_rules;
+       int i, j, pat_len;
+       struct cfg80211_coalesce_rules *rule;
+
+       if (!rdev->coalesce->n_rules)
+               return 0;
+
+       nl_rules = nla_nest_start(msg, NL80211_ATTR_COALESCE_RULE);
+       if (!nl_rules)
+               return -ENOBUFS;
+
+       for (i = 0; i < rdev->coalesce->n_rules; i++) {
+               nl_rule = nla_nest_start(msg, i + 1);
+               if (!nl_rule)
+                       return -ENOBUFS;
+
+               rule = &rdev->coalesce->rules[i];
+               if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_DELAY,
+                               rule->delay))
+                       return -ENOBUFS;
+
+               if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_CONDITION,
+                               rule->condition))
+                       return -ENOBUFS;
+
+               nl_pats = nla_nest_start(msg,
+                               NL80211_ATTR_COALESCE_RULE_PKT_PATTERN);
+               if (!nl_pats)
+                       return -ENOBUFS;
+
+               for (j = 0; j < rule->n_patterns; j++) {
+                       nl_pat = nla_nest_start(msg, j + 1);
+                       if (!nl_pat)
+                               return -ENOBUFS;
+                       pat_len = rule->patterns[j].pattern_len;
+                       if (nla_put(msg, NL80211_PKTPAT_MASK,
+                                   DIV_ROUND_UP(pat_len, 8),
+                                   rule->patterns[j].mask) ||
+                           nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len,
+                                   rule->patterns[j].pattern) ||
+                           nla_put_u32(msg, NL80211_PKTPAT_OFFSET,
+                                       rule->patterns[j].pkt_offset))
+                               return -ENOBUFS;
+                       nla_nest_end(msg, nl_pat);
+               }
+               nla_nest_end(msg, nl_pats);
+               nla_nest_end(msg, nl_rule);
+       }
+       nla_nest_end(msg, nl_rules);
+
+       return 0;
+}
+
+static int nl80211_get_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct sk_buff *msg;
+       void *hdr;
+
+       if (!rdev->wiphy.coalesce)
+               return -EOPNOTSUPP;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
+                            NL80211_CMD_GET_COALESCE);
+       if (!hdr)
+               goto nla_put_failure;
+
+       if (rdev->coalesce && nl80211_send_coalesce_rules(msg, rdev))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return genlmsg_reply(msg, info);
+
+nla_put_failure:
+       nlmsg_free(msg);
+       return -ENOBUFS;
+}
+
+void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev)
+{
+       struct cfg80211_coalesce *coalesce = rdev->coalesce;
+       int i, j;
+       struct cfg80211_coalesce_rules *rule;
+
+       if (!coalesce)
+               return;
+
+       for (i = 0; i < coalesce->n_rules; i++) {
+               rule = &coalesce->rules[i];
+               for (j = 0; j < rule->n_patterns; j++)
+                       kfree(rule->patterns[j].mask);
+               kfree(rule->patterns);
+       }
+       kfree(coalesce->rules);
+       kfree(coalesce);
+       rdev->coalesce = NULL;
+}
+
+static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
+                                      struct nlattr *rule,
+                                      struct cfg80211_coalesce_rules *new_rule)
+{
+       int err, i;
+       const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
+       struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat;
+       int rem, pat_len, mask_len, pkt_offset, n_patterns = 0;
+       struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
+
+       err = nla_parse(tb, NL80211_ATTR_COALESCE_RULE_MAX, nla_data(rule),
+                       nla_len(rule), nl80211_coalesce_policy);
+       if (err)
+               return err;
+
+       if (tb[NL80211_ATTR_COALESCE_RULE_DELAY])
+               new_rule->delay =
+                       nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]);
+       if (new_rule->delay > coalesce->max_delay)
+               return -EINVAL;
+
+       if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION])
+               new_rule->condition =
+                       nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]);
+       if (new_rule->condition != NL80211_COALESCE_CONDITION_MATCH &&
+           new_rule->condition != NL80211_COALESCE_CONDITION_NO_MATCH)
+               return -EINVAL;
+
+       if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN])
+               return -EINVAL;
+
+       nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
+                           rem)
+               n_patterns++;
+       if (n_patterns > coalesce->n_patterns)
+               return -EINVAL;
+
+       new_rule->patterns = kcalloc(n_patterns, sizeof(new_rule->patterns[0]),
+                                    GFP_KERNEL);
+       if (!new_rule->patterns)
+               return -ENOMEM;
+
+       new_rule->n_patterns = n_patterns;
+       i = 0;
+
+       nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
+                           rem) {
+               nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
+                         nla_len(pat), NULL);
+               if (!pat_tb[NL80211_PKTPAT_MASK] ||
+                   !pat_tb[NL80211_PKTPAT_PATTERN])
+                       return -EINVAL;
+               pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]);
+               mask_len = DIV_ROUND_UP(pat_len, 8);
+               if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len)
+                       return -EINVAL;
+               if (pat_len > coalesce->pattern_max_len ||
+                   pat_len < coalesce->pattern_min_len)
+                       return -EINVAL;
+
+               if (!pat_tb[NL80211_PKTPAT_OFFSET])
+                       pkt_offset = 0;
+               else
+                       pkt_offset = nla_get_u32(pat_tb[NL80211_PKTPAT_OFFSET]);
+               if (pkt_offset > coalesce->max_pkt_offset)
+                       return -EINVAL;
+               new_rule->patterns[i].pkt_offset = pkt_offset;
+
+               new_rule->patterns[i].mask =
+                       kmalloc(mask_len + pat_len, GFP_KERNEL);
+               if (!new_rule->patterns[i].mask)
+                       return -ENOMEM;
+               new_rule->patterns[i].pattern =
+                       new_rule->patterns[i].mask + mask_len;
+               memcpy(new_rule->patterns[i].mask,
+                      nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len);
+               new_rule->patterns[i].pattern_len = pat_len;
+               memcpy(new_rule->patterns[i].pattern,
+                      nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len);
+               i++;
+       }
+
+       return 0;
+}
+
+static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
+       struct cfg80211_coalesce new_coalesce = {};
+       struct cfg80211_coalesce *n_coalesce;
+       int err, rem_rule, n_rules = 0, i, j;
+       struct nlattr *rule;
+       struct cfg80211_coalesce_rules *tmp_rule;
+
+       if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) {
+               cfg80211_rdev_free_coalesce(rdev);
+               rdev->ops->set_coalesce(&rdev->wiphy, NULL);
+               return 0;
+       }
+
+       nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
+                           rem_rule)
+               n_rules++;
+       if (n_rules > coalesce->n_rules)
+               return -EINVAL;
+
+       new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]),
+                                    GFP_KERNEL);
+       if (!new_coalesce.rules)
+               return -ENOMEM;
+
+       new_coalesce.n_rules = n_rules;
+       i = 0;
+
+       nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
+                           rem_rule) {
+               err = nl80211_parse_coalesce_rule(rdev, rule,
+                                                 &new_coalesce.rules[i]);
+               if (err)
+                       goto error;
+
+               i++;
+       }
+
+       err = rdev->ops->set_coalesce(&rdev->wiphy, &new_coalesce);
+       if (err)
+               goto error;
+
+       n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL);
+       if (!n_coalesce) {
+               err = -ENOMEM;
+               goto error;
+       }
+       cfg80211_rdev_free_coalesce(rdev);
+       rdev->coalesce = n_coalesce;
+
+       return 0;
+error:
+       for (i = 0; i < new_coalesce.n_rules; i++) {
+               tmp_rule = &new_coalesce.rules[i];
+               for (j = 0; j < tmp_rule->n_patterns; j++)
+                       kfree(tmp_rule->patterns[j].mask);
+               kfree(tmp_rule->patterns);
+       }
+       kfree(new_coalesce.rules);
+
+       return err;
+}
+
 static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8132,9 +8560,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_PROBE_CLIENT);
-
-       if (IS_ERR(hdr)) {
-               err = PTR_ERR(hdr);
+       if (!hdr) {
+               err = -ENOBUFS;
                goto free_msg;
        }
 
@@ -9043,7 +9470,30 @@ static struct genl_ops nl80211_ops[] = {
                .flags = GENL_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
-       }
+       },
+       {
+               .cmd = NL80211_CMD_GET_COALESCE,
+               .doit = nl80211_get_coalesce,
+               .policy = nl80211_policy,
+               .internal_flags = NL80211_FLAG_NEED_WIPHY |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_SET_COALESCE,
+               .doit = nl80211_set_coalesce,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_WIPHY |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_CHANNEL_SWITCH,
+               .doit = nl80211_channel_switch,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
index a4073e808c13c24bca525617d4de3cba8b18874e..44341bf53cfc98767f41899dd73479557fdc26aa 100644 (file)
@@ -74,4 +74,6 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
                     enum nl80211_radar_event event,
                     struct net_device *netdev, gfp_t gfp);
 
+void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
+
 #endif /* __NET_WIRELESS_NL80211_H */
index 9f15f0ac824dbb8c73ac88e2ce83f8505ea9a142..37ce9fdfe934345fe6603b399b8c4d3fd4f44ec1 100644 (file)
@@ -516,11 +516,12 @@ static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev)
 
 #ifdef CONFIG_NL80211_TESTMODE
 static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev,
+                                   struct wireless_dev *wdev,
                                    void *data, int len)
 {
        int ret;
-       trace_rdev_testmode_cmd(&rdev->wiphy);
-       ret = rdev->ops->testmode_cmd(&rdev->wiphy, data, len);
+       trace_rdev_testmode_cmd(&rdev->wiphy, wdev);
+       ret = rdev->ops->testmode_cmd(&rdev->wiphy, wdev, data, len);
        trace_rdev_return_int(&rdev->wiphy, ret);
        return ret;
 }
@@ -923,4 +924,16 @@ static inline void rdev_crit_proto_stop(struct cfg80211_registered_device *rdev,
        trace_rdev_return_void(&rdev->wiphy);
 }
 
+static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev,
+                                     struct net_device *dev,
+                                     struct cfg80211_csa_settings *params)
+{
+       int ret;
+
+       trace_rdev_channel_switch(&rdev->wiphy, dev, params);
+       ret = rdev->ops->channel_switch(&rdev->wiphy, dev, params);
+       trace_rdev_return_int(&rdev->wiphy, ret);
+       return ret;
+}
+
 #endif /* __CFG80211_RDEV_OPS */
index ae8c186b50d68510f88e758b959ad663e911a5de..ad1e4068ce06644218e6e005acdc129a1610ae56 100644 (file)
@@ -651,6 +651,8 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
                        continue;
                if (bss->pub.channel != new->pub.channel)
                        continue;
+               if (bss->pub.scan_width != new->pub.scan_width)
+                       continue;
                if (rcu_access_pointer(bss->pub.beacon_ies))
                        continue;
                ies = rcu_access_pointer(bss->pub.ies);
@@ -870,11 +872,12 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 struct cfg80211_bss*
-cfg80211_inform_bss(struct wiphy *wiphy,
-                   struct ieee80211_channel *channel,
-                   const u8 *bssid, u64 tsf, u16 capability,
-                   u16 beacon_interval, const u8 *ie, size_t ielen,
-                   s32 signal, gfp_t gfp)
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+                         struct ieee80211_channel *channel,
+                         enum nl80211_bss_scan_width scan_width,
+                         const u8 *bssid, u64 tsf, u16 capability,
+                         u16 beacon_interval, const u8 *ie, size_t ielen,
+                         s32 signal, gfp_t gfp)
 {
        struct cfg80211_bss_ies *ies;
        struct cfg80211_internal_bss tmp = {}, *res;
@@ -892,6 +895,7 @@ cfg80211_inform_bss(struct wiphy *wiphy,
 
        memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
        tmp.pub.channel = channel;
+       tmp.pub.scan_width = scan_width;
        tmp.pub.signal = signal;
        tmp.pub.beacon_interval = beacon_interval;
        tmp.pub.capability = capability;
@@ -924,14 +928,15 @@ cfg80211_inform_bss(struct wiphy *wiphy,
        /* cfg80211_bss_update gives us a referenced result */
        return &res->pub;
 }
-EXPORT_SYMBOL(cfg80211_inform_bss);
+EXPORT_SYMBOL(cfg80211_inform_bss_width);
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 struct cfg80211_bss *
-cfg80211_inform_bss_frame(struct wiphy *wiphy,
-                         struct ieee80211_channel *channel,
-                         struct ieee80211_mgmt *mgmt, size_t len,
-                         s32 signal, gfp_t gfp)
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+                               struct ieee80211_channel *channel,
+                               enum nl80211_bss_scan_width scan_width,
+                               struct ieee80211_mgmt *mgmt, size_t len,
+                               s32 signal, gfp_t gfp)
 {
        struct cfg80211_internal_bss tmp = {}, *res;
        struct cfg80211_bss_ies *ies;
@@ -941,7 +946,8 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
        BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
                        offsetof(struct ieee80211_mgmt, u.beacon.variable));
 
-       trace_cfg80211_inform_bss_frame(wiphy, channel, mgmt, len, signal);
+       trace_cfg80211_inform_bss_width_frame(wiphy, channel, scan_width, mgmt,
+                                             len, signal);
 
        if (WARN_ON(!mgmt))
                return NULL;
@@ -976,6 +982,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
        
        memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
        tmp.pub.channel = channel;
+       tmp.pub.scan_width = scan_width;
        tmp.pub.signal = signal;
        tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
        tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
@@ -991,7 +998,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
        /* cfg80211_bss_update gives us a referenced result */
        return &res->pub;
 }
-EXPORT_SYMBOL(cfg80211_inform_bss_frame);
+EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
 
 void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
index 81c8a10d743c04fb76981498b42588f9b821f33f..20e86a95dc4e0ed358485f04208c670297ee6517 100644 (file)
@@ -976,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
                        struct net_device *dev, u16 reason, bool wextev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       int err;
+       int err = 0;
 
        ASSERT_WDEV_LOCK(wdev);
 
        kfree(wdev->connect_keys);
        wdev->connect_keys = NULL;
 
-       if (wdev->conn) {
+       if (wdev->conn)
                err = cfg80211_sme_disconnect(wdev, reason);
-       } else if (!rdev->ops->disconnect) {
+       else if (!rdev->ops->disconnect)
                cfg80211_mlme_down(rdev, dev);
-               err = 0;
-       } else {
+       else if (wdev->current_bss)
                err = rdev_disconnect(rdev, dev, reason);
-       }
 
        return err;
 }
index e1534baf2ebbe5d0fdc41d4e2d6ceb42fd9630f3..ba5f0d6614d5d4cee4d08c609bb192dd501f0b34 100644 (file)
@@ -1293,15 +1293,17 @@ TRACE_EVENT(rdev_return_int_int,
 
 #ifdef CONFIG_NL80211_TESTMODE
 TRACE_EVENT(rdev_testmode_cmd,
-       TP_PROTO(struct wiphy *wiphy),
-       TP_ARGS(wiphy),
+       TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+       TP_ARGS(wiphy, wdev),
        TP_STRUCT__entry(
                WIPHY_ENTRY
+               WDEV_ENTRY
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
+               WDEV_ASSIGN;
        ),
-       TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+       TP_printk(WIPHY_PR_FMT WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
 );
 
 TRACE_EVENT(rdev_testmode_dump,
@@ -1841,6 +1843,39 @@ TRACE_EVENT(rdev_crit_proto_stop,
                  WIPHY_PR_ARG, WDEV_PR_ARG)
 );
 
+TRACE_EVENT(rdev_channel_switch,
+       TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+                struct cfg80211_csa_settings *params),
+       TP_ARGS(wiphy, netdev, params),
+       TP_STRUCT__entry(
+               WIPHY_ENTRY
+               NETDEV_ENTRY
+               CHAN_DEF_ENTRY
+               __field(u16, counter_offset_beacon)
+               __field(u16, counter_offset_presp)
+               __field(bool, radar_required)
+               __field(bool, block_tx)
+               __field(u8, count)
+       ),
+       TP_fast_assign(
+               WIPHY_ASSIGN;
+               NETDEV_ASSIGN;
+               CHAN_DEF_ASSIGN(&params->chandef);
+               __entry->counter_offset_beacon = params->counter_offset_beacon;
+               __entry->counter_offset_presp = params->counter_offset_presp;
+               __entry->radar_required = params->radar_required;
+               __entry->block_tx = params->block_tx;
+               __entry->count = params->count;
+       ),
+       TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
+                 ", block_tx: %d, count: %u, radar_required: %d"
+                 ", counter offsets (beacon/presp): %u/%u",
+                 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
+                 __entry->block_tx, __entry->count, __entry->radar_required,
+                 __entry->counter_offset_beacon,
+                 __entry->counter_offset_presp)
+);
+
 /*************************************************************
  *          cfg80211 exported functions traces              *
  *************************************************************/
@@ -2391,26 +2426,30 @@ TRACE_EVENT(cfg80211_get_bss,
                  __entry->capa_mask, __entry->capa_val)
 );
 
-TRACE_EVENT(cfg80211_inform_bss_frame,
+TRACE_EVENT(cfg80211_inform_bss_width_frame,
        TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
+                enum nl80211_bss_scan_width scan_width,
                 struct ieee80211_mgmt *mgmt, size_t len,
                 s32 signal),
-       TP_ARGS(wiphy, channel, mgmt, len, signal),
+       TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                CHAN_ENTRY
+               __field(enum nl80211_bss_scan_width, scan_width)
                __dynamic_array(u8, mgmt, len)
                __field(s32, signal)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                CHAN_ASSIGN(channel);
+               __entry->scan_width = scan_width;
                if (mgmt)
                        memcpy(__get_dynamic_array(mgmt), mgmt, len);
                __entry->signal = signal;
        ),
-       TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d",
-                 WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal)
+       TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d",
+                 WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
+                 __entry->signal)
 );
 
 DECLARE_EVENT_CLASS(cfg80211_bss_evt,
index 74458b7f61eb8bcd00eadd0cd4c24a380c589fb1..ce090c1c5e4fdb36459c4f6fde6b5af241a75f0e 100644 (file)
@@ -33,7 +33,8 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
 }
 EXPORT_SYMBOL(ieee80211_get_response_rate);
 
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+                             enum nl80211_bss_scan_width scan_width)
 {
        struct ieee80211_rate *bitrates;
        u32 mandatory_rates = 0;
@@ -43,10 +44,15 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
        if (WARN_ON(!sband))
                return 1;
 
-       if (sband->band == IEEE80211_BAND_2GHZ)
-               mandatory_flag = IEEE80211_RATE_MANDATORY_B;
-       else
+       if (sband->band == IEEE80211_BAND_2GHZ) {
+               if (scan_width == NL80211_BSS_CHAN_WIDTH_5 ||
+                   scan_width == NL80211_BSS_CHAN_WIDTH_10)
+                       mandatory_flag = IEEE80211_RATE_MANDATORY_G;
+               else
+                       mandatory_flag = IEEE80211_RATE_MANDATORY_B;
+       } else {
                mandatory_flag = IEEE80211_RATE_MANDATORY_A;
+       }
 
        bitrates = sband->bitrates;
        for (i = 0; i < sband->n_bitrates; i++)
index eb4a8428864879a1346fbd7895b4e5afaa4d91e3..3bb2cdc13b46e1468743e1130d6c4c25e08921e0 100644 (file)
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
        return inner_mode->afinfo->extract_output(x, skb);
 }
 
+void xfrm_local_error(struct sk_buff *skb, int mtu)
+{
+       unsigned int proto;
+       struct xfrm_state_afinfo *afinfo;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               proto = AF_INET;
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               proto = AF_INET6;
+       else
+               return;
+
+       afinfo = xfrm_state_get_afinfo(proto);
+       if (!afinfo)
+               return;
+
+       afinfo->local_error(skb, mtu);
+       xfrm_state_put_afinfo(afinfo);
+}
+
 EXPORT_SYMBOL_GPL(xfrm_output);
 EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
+EXPORT_SYMBOL_GPL(xfrm_local_error);
index e52cab3591dd78c373274bb64420f87383775e8c..ad8cc7bcf0651eb1f9f00085f779a80060372c15 100644 (file)
@@ -308,7 +308,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
 {
        BUG_ON(!policy->walk.dead);
 
-       if (del_timer(&policy->timer))
+       if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
                BUG();
 
        security_xfrm_policy_free(policy->security);
@@ -660,7 +660,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
        xfrm_pol_hold(policy);
        net->xfrm.policy_count[dir]++;
        atomic_inc(&flow_cache_genid);
-       rt_genid_bump(net);
+
+       /* After previous checking, family can either be AF_INET or AF_INET6 */
+       if (policy->family == AF_INET)
+               rt_genid_bump_ipv4(net);
+       else
+               rt_genid_bump_ipv6(net);
+
        if (delpol) {
                xfrm_policy_requeue(delpol, policy);
                __xfrm_policy_unlink(delpol, dir);
@@ -2126,8 +2132,6 @@ restart:
                 * have the xfrm_state's. We need to wait for KM to
                 * negotiate new SA's or bail out with error.*/
                if (net->xfrm.sysctl_larval_drop) {
-                       /* EREMOTE tells the caller to generate
-                        * a one-shot blackhole route. */
                        dst_release(dst);
                        xfrm_pols_put(pols, drop_pols);
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
index 78f66fa92449c92865dfa8314020854ed704744f..b9c3f9e943a9159d1617feec49c751055ea4dd55 100644 (file)
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
 
 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
 
-static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
-
 static inline unsigned int xfrm_dst_hash(struct net *net,
                                         const xfrm_address_t *daddr,
                                         const xfrm_address_t *saddr,
@@ -499,7 +496,8 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
                INIT_HLIST_NODE(&x->bydst);
                INIT_HLIST_NODE(&x->bysrc);
                INIT_HLIST_NODE(&x->byspi);
-               tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+               tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
+                                       CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
                setup_timer(&x->rtimer, xfrm_replay_timer_handler,
                                (unsigned long)x);
                x->curlft.add_time = get_seconds();
@@ -990,11 +988,13 @@ void xfrm_state_insert(struct xfrm_state *x)
 EXPORT_SYMBOL(xfrm_state_insert);
 
 /* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
+static struct xfrm_state *__find_acq_core(struct net *net,
+                                         const struct xfrm_mark *m,
                                          unsigned short family, u8 mode,
                                          u32 reqid, u8 proto,
                                          const xfrm_address_t *daddr,
-                                         const xfrm_address_t *saddr, int create)
+                                         const xfrm_address_t *saddr,
+                                         int create)
 {
        unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
        struct xfrm_state *x;
@@ -1399,9 +1399,9 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
 
 struct xfrm_state *
-xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
-             const xfrm_address_t *daddr, const xfrm_address_t *saddr,
-             int create, unsigned short family)
+xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
+             u8 proto, const xfrm_address_t *daddr,
+             const xfrm_address_t *saddr, int create, unsigned short family)
 {
        struct xfrm_state *x;
 
@@ -1860,7 +1860,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
 }
 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
 
-static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
 {
        struct xfrm_state_afinfo *afinfo;
        if (unlikely(family >= NPROTO))
@@ -1872,7 +1872,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
        return afinfo;
 }
 
-static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
 {
        rcu_read_unlock();
 }
diff --git a/samples/hidraw/.gitignore b/samples/hidraw/.gitignore
new file mode 100644 (file)
index 0000000..05e51a6
--- /dev/null
@@ -0,0 +1 @@
+hid-example
index ebf5e0c368ea0eb4bf48fbda2f76b5b347c3e247..366db1a9fb65b5662ffdb14564750c60b6f012c7 100644 (file)
@@ -37,6 +37,11 @@ static int handler_pre(struct kprobe *p, struct pt_regs *regs)
                        " status = 0x%lx\n",
                p->addr, regs->cp0_epc, regs->cp0_status);
 #endif
+#ifdef CONFIG_TILEGX
+       printk(KERN_INFO "pre_handler: p->addr = 0x%p, pc = 0x%lx,"
+                       " ex1 = 0x%lx\n",
+               p->addr, regs->pc, regs->ex1);
+#endif
 
        /* A dump_stack() here will give a stack backtrace */
        return 0;
@@ -58,6 +63,10 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs,
        printk(KERN_INFO "post_handler: p->addr = 0x%p, status = 0x%lx\n",
                p->addr, regs->cp0_status);
 #endif
+#ifdef CONFIG_TILEGX
+       printk(KERN_INFO "post_handler: p->addr = 0x%p, ex1 = 0x%lx\n",
+               p->addr, regs->ex1);
+#endif
 }
 
 /*
diff --git a/scripts/coccinelle/misc/boolreturn.cocci b/scripts/coccinelle/misc/boolreturn.cocci
new file mode 100644 (file)
index 0000000..a43c7b0
--- /dev/null
@@ -0,0 +1,58 @@
+/// Return statements in functions returning bool should use
+/// true/false instead of 1/0.
+//
+// Confidence: High
+// Options: --no-includes --include-headers
+
+virtual patch
+virtual report
+virtual context
+
+@r1 depends on patch@
+identifier fn;
+typedef bool;
+symbol false;
+symbol true;
+@@
+
+bool fn ( ... )
+{
+<...
+return
+(
+- 0
++ false
+|
+- 1
++ true
+)
+  ;
+...>
+}
+
+@r2 depends on report || context@
+identifier fn;
+position p;
+@@
+
+bool fn ( ... )
+{
+<...
+return
+(
+* 0@p
+|
+* 1@p
+)
+  ;
+...>
+}
+
+
+@script:python depends on report@
+p << r2.p;
+fn << r2.fn;
+@@
+
+msg = "WARNING: return of 0/1 in function '%s' with return type bool" % fn
+coccilib.report.print_report(p[0], msg)
index 567120a87c39c0152643fba45f5487979b305afc..2283be2bb62c067d3d318431d1071328a7d649fb 100755 (executable)
@@ -62,15 +62,52 @@ checkarg() {
        fi
 }
 
+txt_append() {
+       local anchor="$1"
+       local insert="$2"
+       local infile="$3"
+       local tmpfile="$infile.swp"
+
+       # sed append cmd: 'a\' + newline + text + newline
+       cmd="$(printf "a\\%b$insert" "\n")"
+
+       sed -e "/$anchor/$cmd" "$infile" >"$tmpfile"
+       # replace original file with the edited one
+       mv "$tmpfile" "$infile"
+}
+
+txt_subst() {
+       local before="$1"
+       local after="$2"
+       local infile="$3"
+       local tmpfile="$infile.swp"
+
+       sed -e "s/$before/$after/" "$infile" >"$tmpfile"
+       # replace original file with the edited one
+       mv "$tmpfile" "$infile"
+}
+
+txt_delete() {
+       local text="$1"
+       local infile="$2"
+       local tmpfile="$infile.swp"
+
+       sed -e "/$text/d" "$infile" >"$tmpfile"
+       # replace original file with the edited one
+       mv "$tmpfile" "$infile"
+}
+
 set_var() {
        local name=$1 new=$2 before=$3
 
        name_re="^($name=|# $name is not set)"
        before_re="^($before=|# $before is not set)"
        if test -n "$before" && grep -Eq "$before_re" "$FN"; then
-               sed -ri "/$before_re/a $new" "$FN"
+               txt_append "^$before=" "$new" "$FN"
+               txt_append "^# $before is not set" "$new" "$FN"
        elif grep -Eq "$name_re" "$FN"; then
-               sed -ri "s:$name_re.*:$new:" "$FN"
+               txt_subst "^$name=.*" "$new" "$FN"
+               txt_subst "^# $name is not set" "$new" "$FN"
        else
                echo "$new" >>"$FN"
        fi
@@ -79,7 +116,8 @@ set_var() {
 undef_var() {
        local name=$1
 
-       sed -ri "/^($name=|# $name is not set)/d" "$FN"
+       txt_delete "^$name=" "$FN"
+       txt_delete "^# $name is not set" "$FN"
 }
 
 if [ "$1" = "--file" ]; then
index b91f3e34d44d5d39907b8bbef3de4fb6f37fbb69..0ee65839f7aa00ee931dc2c0ebf2edb665654ca4 100755 (executable)
@@ -94,8 +94,13 @@ def main():
         configa_filename = sys.argv[1]
         configb_filename = sys.argv[2]
 
-    a = readconfig(file(configa_filename))
-    b = readconfig(file(configb_filename))
+    try:
+        a = readconfig(file(configa_filename))
+        b = readconfig(file(configb_filename))
+    except (IOError):
+        e = sys.exc_info()[1]
+        print("I/O error[%s]: %s\n" % (e.args[0],e.args[1]))
+        usage()
 
     # print items in a but not b (accumulate, sort and print)
     old = []
index c55c227af463008396bc32548337769f71cc0d15..87f723804079ed3b6c1fbb0d1470f717582c4a28 100644 (file)
@@ -140,7 +140,9 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
                        sym->flags |= def_flags;
                        break;
                }
-               conf_warning("symbol value '%s' invalid for %s", p, sym->name);
+               if (def != S_DEF_AUTO)
+                       conf_warning("symbol value '%s' invalid for %s",
+                                    p, sym->name);
                return 1;
        case S_OTHER:
                if (*p != '"') {
@@ -161,7 +163,8 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
                        memmove(p2, p2 + 1, strlen(p2));
                }
                if (!p2) {
-                       conf_warning("invalid string found");
+                       if (def != S_DEF_AUTO)
+                               conf_warning("invalid string found");
                        return 1;
                }
                /* fall through */
@@ -172,7 +175,9 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
                        sym->def[def].val = strdup(p);
                        sym->flags |= def_flags;
                } else {
-                       conf_warning("symbol value '%s' invalid for %s", p, sym->name);
+                       if (def != S_DEF_AUTO)
+                               conf_warning("symbol value '%s' invalid for %s",
+                                            p, sym->name);
                        return 1;
                }
                break;
index 6c9c45f9fbbac432a877112f4243f483ed01e9e3..2c3963165a0d83f45b2d180361e12e4542588302 100644 (file)
@@ -401,8 +401,8 @@ static void search_conf(void)
        struct subtitle_part stpart;
 
        title = str_new();
-       str_printf( &title, _("Enter %s (sub)string or regexp to search for "
-                             "(with or without \"%s\")"), CONFIG_, CONFIG_);
+       str_printf( &title, _("Enter (sub)string or regexp to search for "
+                             "(with or without \"%s\")"), CONFIG_);
 
 again:
        dialog_clear();
index 7975d8d258c3f6cb8848a137a3f263a4f16f1d20..4fbecd2473bc51904629056eaa8bacb7b42fb660 100644 (file)
@@ -695,8 +695,8 @@ static void search_conf(void)
        int dres;
 
        title = str_new();
-       str_printf( &title, _("Enter %s (sub)string or regexp to search for "
-                             "(with or without \"%s\")"), CONFIG_, CONFIG_);
+       str_printf( &title, _("Enter (sub)string or regexp to search for "
+                             "(with or without \"%s\")"), CONFIG_);
 
 again:
        dres = dialog_inputbox(main_window,
index d550300ec00c34e5b1748478c6381341e9412e0b..c9a6775565bfa13252138f81ef8a0312b11cfea8 100644 (file)
@@ -136,7 +136,7 @@ static struct property *sym_get_range_prop(struct symbol *sym)
        return NULL;
 }
 
-static long sym_get_range_val(struct symbol *sym, int base)
+static long long sym_get_range_val(struct symbol *sym, int base)
 {
        sym_calc_value(sym);
        switch (sym->type) {
@@ -149,13 +149,14 @@ static long sym_get_range_val(struct symbol *sym, int base)
        default:
                break;
        }
-       return strtol(sym->curr.val, NULL, base);
+       return strtoll(sym->curr.val, NULL, base);
 }
 
 static void sym_validate_range(struct symbol *sym)
 {
        struct property *prop;
-       long base, val, val2;
+       int base;
+       long long val, val2;
        char str[64];
 
        switch (sym->type) {
@@ -171,7 +172,7 @@ static void sym_validate_range(struct symbol *sym)
        prop = sym_get_range_prop(sym);
        if (!prop)
                return;
-       val = strtol(sym->curr.val, NULL, base);
+       val = strtoll(sym->curr.val, NULL, base);
        val2 = sym_get_range_val(prop->expr->left.sym, base);
        if (val >= val2) {
                val2 = sym_get_range_val(prop->expr->right.sym, base);
@@ -179,9 +180,9 @@ static void sym_validate_range(struct symbol *sym)
                        return;
        }
        if (sym->type == S_INT)
-               sprintf(str, "%ld", val2);
+               sprintf(str, "%lld", val2);
        else
-               sprintf(str, "0x%lx", val2);
+               sprintf(str, "0x%llx", val2);
        sym->curr.val = strdup(str);
 }
 
@@ -594,7 +595,7 @@ bool sym_string_valid(struct symbol *sym, const char *str)
 bool sym_string_within_range(struct symbol *sym, const char *str)
 {
        struct property *prop;
-       long val;
+       long long val;
 
        switch (sym->type) {
        case S_STRING:
@@ -605,7 +606,7 @@ bool sym_string_within_range(struct symbol *sym, const char *str)
                prop = sym_get_range_prop(sym);
                if (!prop)
                        return true;
-               val = strtol(str, NULL, 10);
+               val = strtoll(str, NULL, 10);
                return val >= sym_get_range_val(prop->expr->left.sym, 10) &&
                       val <= sym_get_range_val(prop->expr->right.sym, 10);
        case S_HEX:
@@ -614,7 +615,7 @@ bool sym_string_within_range(struct symbol *sym, const char *str)
                prop = sym_get_range_prop(sym);
                if (!prop)
                        return true;
-               val = strtol(str, NULL, 16);
+               val = strtoll(str, NULL, 16);
                return val >= sym_get_range_val(prop->expr->left.sym, 16) &&
                       val <= sym_get_range_val(prop->expr->right.sym, 16);
        case S_BOOLEAN:
@@ -963,11 +964,11 @@ struct sym_match {
  * - first, symbols that match exactly
  * - then, alphabetical sort
  */
-static int sym_rel_comp( const void *sym1, const void *sym2 )
+static int sym_rel_comp(const void *sym1, const void *sym2)
 {
-       struct sym_match *s1 = *(struct sym_match **)sym1;
-       struct sym_match *s2 = *(struct sym_match **)sym2;
-       int l1, l2;
+       const struct sym_match *s1 = sym1;
+       const struct sym_match *s2 = sym2;
+       int exact1, exact2;
 
        /* Exact match:
         * - if matched length on symbol s1 is the length of that symbol,
@@ -978,11 +979,11 @@ static int sym_rel_comp( const void *sym1, const void *sym2 )
         * exactly; if this is the case, we can't decide which comes first,
         * and we fallback to sorting alphabetically.
         */
-       l1 = s1->eo - s1->so;
-       l2 = s2->eo - s2->so;
-       if (l1 == strlen(s1->sym->name) && l2 != strlen(s2->sym->name))
+       exact1 = (s1->eo - s1->so) == strlen(s1->sym->name);
+       exact2 = (s2->eo - s2->so) == strlen(s2->sym->name);
+       if (exact1 && !exact2)
                return -1;
-       if (l1 != strlen(s1->sym->name) && l2 == strlen(s2->sym->name))
+       if (!exact1 && exact2)
                return 1;
 
        /* As a fallback, sort symbols alphabetically */
@@ -992,7 +993,7 @@ static int sym_rel_comp( const void *sym1, const void *sym2 )
 struct symbol **sym_re_search(const char *pattern)
 {
        struct symbol *sym, **sym_arr = NULL;
-       struct sym_match **sym_match_arr = NULL;
+       struct sym_match *sym_match_arr = NULL;
        int i, cnt, size;
        regex_t re;
        regmatch_t match[1];
@@ -1005,47 +1006,38 @@ struct symbol **sym_re_search(const char *pattern)
                return NULL;
 
        for_all_symbols(i, sym) {
-               struct sym_match *tmp_sym_match;
                if (sym->flags & SYMBOL_CONST || !sym->name)
                        continue;
                if (regexec(&re, sym->name, 1, match, 0))
                        continue;
-               if (cnt + 1 >= size) {
+               if (cnt >= size) {
                        void *tmp;
                        size += 16;
-                       tmp = realloc(sym_match_arr, size * sizeof(struct sym_match *));
-                       if (!tmp) {
+                       tmp = realloc(sym_match_arr, size * sizeof(struct sym_match));
+                       if (!tmp)
                                goto sym_re_search_free;
-                       }
                        sym_match_arr = tmp;
                }
                sym_calc_value(sym);
-               tmp_sym_match = (struct sym_match*)malloc(sizeof(struct sym_match));
-               if (!tmp_sym_match)
-                       goto sym_re_search_free;
-               tmp_sym_match->sym = sym;
-               /* As regexec return 0, we know we have a match, so
+               /* As regexec returned 0, we know we have a match, so
                 * we can use match[0].rm_[se]o without further checks
                 */
-               tmp_sym_match->so = match[0].rm_so;
-               tmp_sym_match->eo = match[0].rm_eo;
-               sym_match_arr[cnt++] = tmp_sym_match;
+               sym_match_arr[cnt].so = match[0].rm_so;
+               sym_match_arr[cnt].eo = match[0].rm_eo;
+               sym_match_arr[cnt++].sym = sym;
        }
        if (sym_match_arr) {
-               qsort(sym_match_arr, cnt, sizeof(struct sym_match*), sym_rel_comp);
+               qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp);
                sym_arr = malloc((cnt+1) * sizeof(struct symbol));
                if (!sym_arr)
                        goto sym_re_search_free;
                for (i = 0; i < cnt; i++)
-                       sym_arr[i] = sym_match_arr[i]->sym;
+                       sym_arr[i] = sym_match_arr[i].sym;
                sym_arr[cnt] = NULL;
        }
 sym_re_search_free:
-       if (sym_match_arr) {
-               for (i = 0; i < cnt; i++)
-                       free(sym_match_arr[i]);
-               free(sym_match_arr);
-       }
+       /* sym_match_arr can be NULL if no match, but free(NULL) is OK */
+       free(sym_match_arr);
        regfree(&re);
 
        return sym_arr;
index 62164348ecf7104e0a91671430702efbfb5fd610..8247979e8f64dd2eef5671544d3b39e8ed3487fc 100644 (file)
@@ -821,6 +821,7 @@ static const char *section_white_list[] =
 {
        ".comment*",
        ".debug*",
+       ".cranges",             /* sh64 */
        ".zdebug*",             /* Compressed debug sections. */
        ".GCC-command-line",    /* mn10300 */
        ".GCC.command.line",    /* record-gcc-switches, non mn10300 */
index acb86507828aaba1304594588959fbfc2804c528..90e521fde35fa827cf5008838024ef1bf1d03a88 100644 (file)
@@ -41,9 +41,9 @@ create_package() {
        parisc*)
                debarch=hppa ;;
        mips*)
-               debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y .config && echo el) ;;
+               debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el) ;;
        arm*)
-               debarch=arm$(grep -q CONFIG_AEABI=y .config && echo el) ;;
+               debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el) ;;
        *)
                echo "" >&2
                echo "** ** **  WARNING  ** ** **" >&2
@@ -78,17 +78,35 @@ tmpdir="$objtree/debian/tmp"
 fwdir="$objtree/debian/fwtmp"
 kernel_headers_dir="$objtree/debian/hdrtmp"
 libc_headers_dir="$objtree/debian/headertmp"
+dbg_dir="$objtree/debian/dbgtmp"
 packagename=linux-image-$version
-fwpackagename=linux-firmware-image
+fwpackagename=linux-firmware-image-$version
 kernel_headers_packagename=linux-headers-$version
 libc_headers_packagename=linux-libc-dev
+dbg_packagename=$packagename-dbg
 
 if [ "$ARCH" = "um" ] ; then
        packagename=user-mode-linux-$version
 fi
 
+# Not all arches have the same installed path in debian
+# XXX: have each arch Makefile export a variable of the canonical image install
+# path instead
+case $ARCH in
+um)
+       installed_image_path="usr/bin/linux-$version"
+       ;;
+parisc|mips|powerpc)
+       installed_image_path="boot/vmlinux-$version"
+       ;;
+*)
+       installed_image_path="boot/vmlinuz-$version"
+esac
+
+BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)"
+
 # Setup the directory structure
-rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir"
+rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir"
 mkdir -m 755 -p "$tmpdir/DEBIAN"
 mkdir -p  "$tmpdir/lib" "$tmpdir/boot" "$tmpdir/usr/share/doc/$packagename"
 mkdir -m 755 -p "$fwdir/DEBIAN"
@@ -101,26 +119,29 @@ mkdir -p "$kernel_headers_dir/lib/modules/$version/"
 if [ "$ARCH" = "um" ] ; then
        mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin"
 fi
+if [ -n "$BUILD_DEBUG" ] ; then
+       mkdir -p "$dbg_dir/usr/share/doc/$dbg_packagename"
+       mkdir -m 755 -p "$dbg_dir/DEBIAN"
+fi
 
 # Build and install the kernel
 if [ "$ARCH" = "um" ] ; then
        $MAKE linux
        cp System.map "$tmpdir/usr/lib/uml/modules/$version/System.map"
-       cp .config "$tmpdir/usr/share/doc/$packagename/config"
+       cp $KCONFIG_CONFIG "$tmpdir/usr/share/doc/$packagename/config"
        gzip "$tmpdir/usr/share/doc/$packagename/config"
-       cp $KBUILD_IMAGE "$tmpdir/usr/bin/linux-$version"
 else 
        cp System.map "$tmpdir/boot/System.map-$version"
-       cp .config "$tmpdir/boot/config-$version"
-       # Not all arches include the boot path in KBUILD_IMAGE
-       if [ -e $KBUILD_IMAGE ]; then
-               cp $KBUILD_IMAGE "$tmpdir/boot/vmlinuz-$version"
-       else
-               cp arch/$ARCH/boot/$KBUILD_IMAGE "$tmpdir/boot/vmlinuz-$version"
-       fi
+       cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
+fi
+# Not all arches include the boot path in KBUILD_IMAGE
+if [ -e $KBUILD_IMAGE ]; then
+       cp $KBUILD_IMAGE "$tmpdir/$installed_image_path"
+else
+       cp arch/$ARCH/boot/$KBUILD_IMAGE "$tmpdir/$installed_image_path"
 fi
 
-if grep -q '^CONFIG_MODULES=y' .config ; then
+if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
        INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_install
        rm -f "$tmpdir/lib/modules/$version/build"
        rm -f "$tmpdir/lib/modules/$version/source"
@@ -128,6 +149,20 @@ if grep -q '^CONFIG_MODULES=y' .config ; then
                mv "$tmpdir/lib/modules/$version"/* "$tmpdir/usr/lib/uml/modules/$version/"
                rmdir "$tmpdir/lib/modules/$version"
        fi
+       if [ -n "$BUILD_DEBUG" ] ; then
+               (
+                       cd $tmpdir
+                       for module in $(find lib/modules/ -name *.ko); do
+                               mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module)
+                               # only keep debug symbols in the debug file
+                               objcopy --only-keep-debug $module $dbg_dir/usr/lib/debug/$module
+                               # strip original module from debug symbols
+                               objcopy --strip-debug $module
+                               # then add a link to those
+                               objcopy --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module
+                       done
+               )
+       fi
 fi
 
 if [ "$ARCH" != "um" ]; then
@@ -149,7 +184,7 @@ set -e
 # Pass maintainer script parameters to hook scripts
 export DEB_MAINT_PARAMS="\$*"
 
-test -d $debhookdir/$script.d && run-parts --arg="$version" $debhookdir/$script.d
+test -d $debhookdir/$script.d && run-parts --arg="$version" --arg="/$installed_image_path" $debhookdir/$script.d
 exit 0
 EOF
        chmod 755 "$tmpdir/DEBIAN/$script"
@@ -245,11 +280,12 @@ fi
 # Build header package
 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
 (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
-(cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
 (cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
+(cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
 ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
 rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
 arch=$(dpkg --print-architecture)
@@ -299,4 +335,30 @@ fi
 
 create_package "$packagename" "$tmpdir"
 
+if [ -n "$BUILD_DEBUG" ] ; then
+       # Build debug package
+       # Different tools want the image in different locations
+       # perf
+       mkdir -p $dbg_dir/usr/lib/debug/lib/modules/$version/
+       cp vmlinux $dbg_dir/usr/lib/debug/lib/modules/$version/
+       # systemtap
+       mkdir -p $dbg_dir/usr/lib/debug/boot/
+       ln -s ../lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/boot/vmlinux-$version
+       # kdump-tools
+       ln -s lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/vmlinux-$version
+
+       cat <<EOF >> debian/control
+
+Package: $dbg_packagename
+Section: debug
+Provides: linux-debug, linux-debug-$version
+Architecture: any
+Description: Linux kernel debugging symbols for $version
+ This package will come in handy if you need to debug the kernel. It provides
+ all the necessary debug symbols for the kernel and its modules.
+EOF
+
+       create_package "$dbg_packagename" "$dbg_dir"
+fi
+
 exit 0
index cdd9bb909bcda05efd16ba21f6700316194f6116..aa22f9447ddc2492bfee4241a1380bc4fb91a735 100644 (file)
@@ -87,6 +87,27 @@ case "${ARCH}" in
                [ -f "${objtree}/vmlinux.SYS" ] && cp -v -- "${objtree}/vmlinux.SYS" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}.SYS"
                [ -f "${objtree}/vmlinux.dsk" ] && cp -v -- "${objtree}/vmlinux.dsk" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}.dsk"
                ;;
+       mips)
+               if [ -f "${objtree}/arch/mips/boot/compressed/vmlinux.bin" ]; then
+                       cp -v -- "${objtree}/arch/mips/boot/compressed/vmlinux.bin" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}"
+               elif [ -f "${objtree}/arch/mips/boot/compressed/vmlinux.ecoff" ]; then
+                       cp -v -- "${objtree}/arch/mips/boot/compressed/vmlinux.ecoff" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}"
+               elif [ -f "${objtree}/arch/mips/boot/compressed/vmlinux.srec" ]; then
+                       cp -v -- "${objtree}/arch/mips/boot/compressed/vmlinux.srec" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}"
+               elif [ -f "${objtree}/vmlinux.32" ]; then
+                       cp -v -- "${objtree}/vmlinux.32" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
+               elif [ -f "${objtree}/vmlinux.64" ]; then
+                       cp -v -- "${objtree}/vmlinux.64" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
+               elif [ -f "${objtree}/arch/mips/boot/vmlinux.bin" ]; then
+                       cp -v -- "${objtree}/arch/mips/boot/vmlinux.bin" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
+               elif [ -f "${objtree}/arch/mips/boot/vmlinux.ecoff" ]; then
+                       cp -v -- "${objtree}/arch/mips/boot/vmlinux.ecoff" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
+               elif [ -f "${objtree}/arch/mips/boot/vmlinux.srec" ]; then
+                       cp -v -- "${objtree}/arch/mips/boot/vmlinux.srec" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
+               elif [ -f "${objtree}/vmlinux" ]; then
+                       cp -v -- "${objtree}/vmlinux" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}"
+               fi
+               ;;
        *)
                [ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-kbuild-${KERNELRELEASE}"
                echo "" >&2
index fdd3fbf4d4a41a0d8bab7b93d5fb6f4c7896f94d..13957602f7ca5eb190170450b79ff877cd0cb3af 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 #
-#      Output a simple RPM spec file that uses no fancy features requiring
-#      RPM v4. This is intended to work with any RPM distro.
+#      Output a simple RPM spec file.
+#      This version assumes a minimum of RPM 4.0.3.
 #
 #      The only gothic bit here is redefining install_post to avoid
 #      stripping the symbols from files in the kernel which we want
@@ -59,6 +59,14 @@ echo "header files define structures and constants that are needed for"
 echo "building most standard programs and are also needed for rebuilding the"
 echo "glibc package."
 echo ""
+echo "%package devel"
+echo "Summary: Development package for building kernel modules to match the $__KERNELRELEASE kernel"
+echo "Group: System Environment/Kernel"
+echo "AutoReqProv: no"
+echo "%description -n kernel-devel"
+echo "This package provides kernel headers and makefiles sufficient to build modules"
+echo "against the $__KERNELRELEASE kernel package."
+echo ""
 
 if ! $PREBUILT; then
 echo "%prep"
@@ -77,13 +85,14 @@ echo "%install"
 echo 'KBUILD_IMAGE=$(make image_name)'
 echo "%ifarch ia64"
 echo 'mkdir -p $RPM_BUILD_ROOT/boot/efi $RPM_BUILD_ROOT/lib/modules'
-echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
 echo "%else"
 echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib/modules'
-echo 'mkdir -p $RPM_BUILD_ROOT/lib/firmware'
 echo "%endif"
+echo 'mkdir -p $RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE"
 
-echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= modules_install'
+echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make %{?_smp_mflags} KBUILD_SRC= mod-fw= modules_install'
+echo 'INSTALL_FW_PATH=$RPM_BUILD_ROOT'"/lib/firmware/$KERNELRELEASE"
+echo 'make INSTALL_FW_PATH=$INSTALL_FW_PATH' firmware_install
 echo "%ifarch ia64"
 echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/efi/vmlinuz-$KERNELRELEASE"
 echo 'ln -s '"efi/vmlinuz-$KERNELRELEASE" '$RPM_BUILD_ROOT'"/boot/"
@@ -108,18 +117,43 @@ echo 'mv vmlinux.bz2 $RPM_BUILD_ROOT'"/boot/vmlinux-$KERNELRELEASE.bz2"
 echo 'mv vmlinux.orig vmlinux'
 echo "%endif"
 
+echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
+echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
+echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
+echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)"
+echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
+echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
+echo "ln -sf /usr/src/kernels/$KERNELRELEASE source"
+
 echo ""
 echo "%clean"
 echo 'rm -rf $RPM_BUILD_ROOT'
 echo ""
+echo "%post"
+echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
+echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
+echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
+echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
+echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
+echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
+echo "fi"
+echo ""
 echo "%files"
 echo '%defattr (-, root, root)'
 echo "%dir /lib/modules"
 echo "/lib/modules/$KERNELRELEASE"
-echo "/lib/firmware"
+echo "%exclude /lib/modules/$KERNELRELEASE/build"
+echo "%exclude /lib/modules/$KERNELRELEASE/source"
+echo "/lib/firmware/$KERNELRELEASE"
 echo "/boot/*"
 echo ""
 echo "%files headers"
 echo '%defattr (-, root, root)'
 echo "/usr/include"
 echo ""
+echo "%files devel"
+echo '%defattr (-, root, root)'
+echo "/usr/src/kernels/$KERNELRELEASE"
+echo "/lib/modules/$KERNELRELEASE/build"
+echo "/lib/modules/$KERNELRELEASE/source"
+echo ""
index 858966ab019cc1951724b3a2f47b4bfd5b892bf6..a674fd5507c19db162af66787dcea6c65c19082d 100755 (executable)
@@ -364,6 +364,10 @@ if ($arch eq "x86_64") {
 } elsif ($arch eq "blackfin") {
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
     $mcount_adjust = -4;
+} elsif ($arch eq "tilegx") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$";
+    $type = ".quad";
+    $alignment = 8;
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
index f9ce1160419be2a81b7dabf097fc453fdb1ce9c3..7c2310c5b996dee9d43df56574b30236342c3c81 100644 (file)
@@ -64,14 +64,6 @@ fail_file(void)
        longjmp(jmpenv, SJ_FAIL);
 }
 
-static void __attribute__((noreturn))
-succeed_file(void)
-{
-       cleanup();
-       longjmp(jmpenv, SJ_SUCCEED);
-}
-
-
 /*
  * Get the whole file as a programming convenience in order to avoid
  * malloc+lseek+read+free of many pieces.  If successful, then mmap
index 9b9013b2e3211a9c6de17a9001e7939a73bf48e9..d49c53960b60c5010d314022d3e58d255ffac204 100644 (file)
@@ -29,3 +29,15 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE
          boot.
 
          If you are unsure how to answer this question, answer 1.
+
+config SECURITY_APPARMOR_HASH
+       bool "SHA1 hash of loaded profiles"
+       depends on SECURITY_APPARMOR
+       depends on CRYPTO
+       select CRYPTO_SHA1
+       default y
+
+       help
+         This option selects whether sha1 hashing is done against loaded
+          profiles and exported for inspection to user space via the apparmor
+          filesystem.
index 5706b74c857f550a2515dc445164063154a12cac..d693df87481837fa8d2fb95137d54f3b155cc280 100644 (file)
@@ -5,6 +5,7 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
 apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
               path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
               resource.o sid.o file.o
+apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
 
 clean-files := capability_names.h rlim_names.h
 
@@ -18,7 +19,11 @@ quiet_cmd_make-caps = GEN     $@
 cmd_make-caps = echo "static const char *const capability_names[] = {" > $@ ;\
        sed $< >>$@ -r -n -e '/CAP_FS_MASK/d' \
        -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/[\2] = "\L\1",/p';\
-       echo "};" >> $@
+       echo "};" >> $@ ;\
+       echo -n '\#define AA_FS_CAPS_MASK "' >> $@ ;\
+       sed $< -r -n -e '/CAP_FS_MASK/d' \
+           -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/\L\1/p' | \
+            tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
 
 
 # Build a lower case string table of rlimit names.
index 16c15ec6f67078238d25e10f3ea6fc3b7e243b88..95c2b2689a03aa521bc923323f521a8c191d107d 100644 (file)
@@ -12,6 +12,7 @@
  * License.
  */
 
+#include <linux/ctype.h>
 #include <linux/security.h>
 #include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/namei.h>
 #include <linux/capability.h>
+#include <linux/rcupdate.h>
 
 #include "include/apparmor.h"
 #include "include/apparmorfs.h"
 #include "include/audit.h"
 #include "include/context.h"
+#include "include/crypto.h"
 #include "include/policy.h"
 #include "include/resource.h"
 
+/**
+ * aa_mangle_name - mangle a profile name to std profile layout form
+ * @name: profile name to mangle  (NOT NULL)
+ * @target: buffer to store mangled name, same length as @name (MAYBE NULL)
+ *
+ * Returns: length of mangled name
+ */
+static int mangle_name(char *name, char *target)
+{
+       char *t = target;
+
+       while (*name == '/' || *name == '.')
+               name++;
+
+       if (target) {
+               for (; *name; name++) {
+                       if (*name == '/')
+                               *(t)++ = '.';
+                       else if (isspace(*name))
+                               *(t)++ = '_';
+                       else if (isalnum(*name) || strchr("._-", *name))
+                               *(t)++ = *name;
+               }
+
+               *t = 0;
+       } else {
+               int len = 0;
+               for (; *name; name++) {
+                       if (isalnum(*name) || isspace(*name) ||
+                           strchr("/._-", *name))
+                               len++;
+               }
+
+               return len;
+       }
+
+       return t - target;
+}
+
 /**
  * aa_simple_write_to_buffer - common routine for getting policy from user
  * @op: operation doing the user buffer copy
@@ -182,8 +224,567 @@ const struct file_operations aa_fs_seq_file_ops = {
        .release        = single_release,
 };
 
-/** Base file system setup **/
+static int aa_fs_seq_profile_open(struct inode *inode, struct file *file,
+                                 int (*show)(struct seq_file *, void *))
+{
+       struct aa_replacedby *r = aa_get_replacedby(inode->i_private);
+       int error = single_open(file, show, r);
+
+       if (error) {
+               file->private_data = NULL;
+               aa_put_replacedby(r);
+       }
+
+       return error;
+}
+
+static int aa_fs_seq_profile_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq = (struct seq_file *) file->private_data;
+       if (seq)
+               aa_put_replacedby(seq->private);
+       return single_release(inode, file);
+}
+
+static int aa_fs_seq_profname_show(struct seq_file *seq, void *v)
+{
+       struct aa_replacedby *r = seq->private;
+       struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+       seq_printf(seq, "%s\n", profile->base.name);
+       aa_put_profile(profile);
+
+       return 0;
+}
+
+static int aa_fs_seq_profname_open(struct inode *inode, struct file *file)
+{
+       return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profname_show);
+}
+
+static const struct file_operations aa_fs_profname_fops = {
+       .owner          = THIS_MODULE,
+       .open           = aa_fs_seq_profname_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_profmode_show(struct seq_file *seq, void *v)
+{
+       struct aa_replacedby *r = seq->private;
+       struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+       seq_printf(seq, "%s\n", aa_profile_mode_names[profile->mode]);
+       aa_put_profile(profile);
+
+       return 0;
+}
+
+static int aa_fs_seq_profmode_open(struct inode *inode, struct file *file)
+{
+       return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profmode_show);
+}
+
+static const struct file_operations aa_fs_profmode_fops = {
+       .owner          = THIS_MODULE,
+       .open           = aa_fs_seq_profmode_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_profattach_show(struct seq_file *seq, void *v)
+{
+       struct aa_replacedby *r = seq->private;
+       struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+       if (profile->attach)
+               seq_printf(seq, "%s\n", profile->attach);
+       else if (profile->xmatch)
+               seq_puts(seq, "<unknown>\n");
+       else
+               seq_printf(seq, "%s\n", profile->base.name);
+       aa_put_profile(profile);
+
+       return 0;
+}
+
+static int aa_fs_seq_profattach_open(struct inode *inode, struct file *file)
+{
+       return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profattach_show);
+}
+
+static const struct file_operations aa_fs_profattach_fops = {
+       .owner          = THIS_MODULE,
+       .open           = aa_fs_seq_profattach_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_hash_show(struct seq_file *seq, void *v)
+{
+       struct aa_replacedby *r = seq->private;
+       struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+       unsigned int i, size = aa_hash_size();
+
+       if (profile->hash) {
+               for (i = 0; i < size; i++)
+                       seq_printf(seq, "%.2x", profile->hash[i]);
+               seq_puts(seq, "\n");
+       }
+
+       return 0;
+}
+
+static int aa_fs_seq_hash_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, aa_fs_seq_hash_show, inode->i_private);
+}
+
+static const struct file_operations aa_fs_seq_hash_fops = {
+       .owner          = THIS_MODULE,
+       .open           = aa_fs_seq_hash_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/** fns to setup dynamic per profile/namespace files **/
+void __aa_fs_profile_rmdir(struct aa_profile *profile)
+{
+       struct aa_profile *child;
+       int i;
+
+       if (!profile)
+               return;
+
+       list_for_each_entry(child, &profile->base.profiles, base.list)
+               __aa_fs_profile_rmdir(child);
+
+       for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) {
+               struct aa_replacedby *r;
+               if (!profile->dents[i])
+                       continue;
+
+               r = profile->dents[i]->d_inode->i_private;
+               securityfs_remove(profile->dents[i]);
+               aa_put_replacedby(r);
+               profile->dents[i] = NULL;
+       }
+}
+
+void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+                                  struct aa_profile *new)
+{
+       int i;
+
+       for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
+               new->dents[i] = old->dents[i];
+               old->dents[i] = NULL;
+       }
+}
+
+static struct dentry *create_profile_file(struct dentry *dir, const char *name,
+                                         struct aa_profile *profile,
+                                         const struct file_operations *fops)
+{
+       struct aa_replacedby *r = aa_get_replacedby(profile->replacedby);
+       struct dentry *dent;
+
+       dent = securityfs_create_file(name, S_IFREG | 0444, dir, r, fops);
+       if (IS_ERR(dent))
+               aa_put_replacedby(r);
+
+       return dent;
+}
+
+/* requires lock be held */
+int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+{
+       struct aa_profile *child;
+       struct dentry *dent = NULL, *dir;
+       int error;
+
+       if (!parent) {
+               struct aa_profile *p;
+               p = aa_deref_parent(profile);
+               dent = prof_dir(p);
+               /* adding to parent that previously didn't have children */
+               dent = securityfs_create_dir("profiles", dent);
+               if (IS_ERR(dent))
+                       goto fail;
+               prof_child_dir(p) = parent = dent;
+       }
+
+       if (!profile->dirname) {
+               int len, id_len;
+               len = mangle_name(profile->base.name, NULL);
+               id_len = snprintf(NULL, 0, ".%ld", profile->ns->uniq_id);
+
+               profile->dirname = kmalloc(len + id_len + 1, GFP_KERNEL);
+               if (!profile->dirname)
+                       goto fail;
+
+               mangle_name(profile->base.name, profile->dirname);
+               sprintf(profile->dirname + len, ".%ld", profile->ns->uniq_id++);
+       }
+
+       dent = securityfs_create_dir(profile->dirname, parent);
+       if (IS_ERR(dent))
+               goto fail;
+       prof_dir(profile) = dir = dent;
+
+       dent = create_profile_file(dir, "name", profile, &aa_fs_profname_fops);
+       if (IS_ERR(dent))
+               goto fail;
+       profile->dents[AAFS_PROF_NAME] = dent;
+
+       dent = create_profile_file(dir, "mode", profile, &aa_fs_profmode_fops);
+       if (IS_ERR(dent))
+               goto fail;
+       profile->dents[AAFS_PROF_MODE] = dent;
+
+       dent = create_profile_file(dir, "attach", profile,
+                                  &aa_fs_profattach_fops);
+       if (IS_ERR(dent))
+               goto fail;
+       profile->dents[AAFS_PROF_ATTACH] = dent;
+
+       if (profile->hash) {
+               dent = create_profile_file(dir, "sha1", profile,
+                                          &aa_fs_seq_hash_fops);
+               if (IS_ERR(dent))
+                       goto fail;
+               profile->dents[AAFS_PROF_HASH] = dent;
+       }
+
+       list_for_each_entry(child, &profile->base.profiles, base.list) {
+               error = __aa_fs_profile_mkdir(child, prof_child_dir(profile));
+               if (error)
+                       goto fail2;
+       }
+
+       return 0;
+
+fail:
+       error = PTR_ERR(dent);
+
+fail2:
+       __aa_fs_profile_rmdir(profile);
+
+       return error;
+}
+
+void __aa_fs_namespace_rmdir(struct aa_namespace *ns)
+{
+       struct aa_namespace *sub;
+       struct aa_profile *child;
+       int i;
+
+       if (!ns)
+               return;
+
+       list_for_each_entry(child, &ns->base.profiles, base.list)
+               __aa_fs_profile_rmdir(child);
+
+       list_for_each_entry(sub, &ns->sub_ns, base.list) {
+               mutex_lock(&sub->lock);
+               __aa_fs_namespace_rmdir(sub);
+               mutex_unlock(&sub->lock);
+       }
 
+       for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) {
+               securityfs_remove(ns->dents[i]);
+               ns->dents[i] = NULL;
+       }
+}
+
+int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent,
+                           const char *name)
+{
+       struct aa_namespace *sub;
+       struct aa_profile *child;
+       struct dentry *dent, *dir;
+       int error;
+
+       if (!name)
+               name = ns->base.name;
+
+       dent = securityfs_create_dir(name, parent);
+       if (IS_ERR(dent))
+               goto fail;
+       ns_dir(ns) = dir = dent;
+
+       dent = securityfs_create_dir("profiles", dir);
+       if (IS_ERR(dent))
+               goto fail;
+       ns_subprofs_dir(ns) = dent;
+
+       dent = securityfs_create_dir("namespaces", dir);
+       if (IS_ERR(dent))
+               goto fail;
+       ns_subns_dir(ns) = dent;
+
+       list_for_each_entry(child, &ns->base.profiles, base.list) {
+               error = __aa_fs_profile_mkdir(child, ns_subprofs_dir(ns));
+               if (error)
+                       goto fail2;
+       }
+
+       list_for_each_entry(sub, &ns->sub_ns, base.list) {
+               mutex_lock(&sub->lock);
+               error = __aa_fs_namespace_mkdir(sub, ns_subns_dir(ns), NULL);
+               mutex_unlock(&sub->lock);
+               if (error)
+                       goto fail2;
+       }
+
+       return 0;
+
+fail:
+       error = PTR_ERR(dent);
+
+fail2:
+       __aa_fs_namespace_rmdir(ns);
+
+       return error;
+}
+
+
+#define list_entry_next(pos, member) \
+       list_entry(pos->member.next, typeof(*pos), member)
+#define list_entry_is_head(pos, head, member) (&pos->member == (head))
+
+/**
+ * __next_namespace - find the next namespace to list
+ * @root: root namespace to stop search at (NOT NULL)
+ * @ns: current ns position (NOT NULL)
+ *
+ * Find the next namespace from @ns under @root and handle all locking needed
+ * while switching current namespace.
+ *
+ * Returns: next namespace or NULL if at last namespace under @root
+ * Requires: ns->parent->lock to be held
+ * NOTE: will not unlock root->lock
+ */
+static struct aa_namespace *__next_namespace(struct aa_namespace *root,
+                                            struct aa_namespace *ns)
+{
+       struct aa_namespace *parent, *next;
+
+       /* is next namespace a child */
+       if (!list_empty(&ns->sub_ns)) {
+               next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
+               mutex_lock(&next->lock);
+               return next;
+       }
+
+       /* check if the next ns is a sibling, parent, gp, .. */
+       parent = ns->parent;
+       while (parent) {
+               mutex_unlock(&ns->lock);
+               next = list_entry_next(ns, base.list);
+               if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
+                       mutex_lock(&next->lock);
+                       return next;
+               }
+               if (parent == root)
+                       return NULL;
+               ns = parent;
+               parent = parent->parent;
+       }
+
+       return NULL;
+}
+
+/**
+ * __first_profile - find the first profile in a namespace
+ * @root: namespace that is root of profiles being displayed (NOT NULL)
+ * @ns: namespace to start in   (NOT NULL)
+ *
+ * Returns: unrefcounted profile or NULL if no profile
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__first_profile(struct aa_namespace *root,
+                                         struct aa_namespace *ns)
+{
+       for (; ns; ns = __next_namespace(root, ns)) {
+               if (!list_empty(&ns->base.profiles))
+                       return list_first_entry(&ns->base.profiles,
+                                               struct aa_profile, base.list);
+       }
+       return NULL;
+}
+
+/**
+ * __next_profile - step to the next profile in a profile tree
+ * @profile: current profile in tree (NOT NULL)
+ *
+ * Perform a depth first traversal on the profile tree in a namespace
+ *
+ * Returns: next profile or NULL if done
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__next_profile(struct aa_profile *p)
+{
+       struct aa_profile *parent;
+       struct aa_namespace *ns = p->ns;
+
+       /* is next profile a child */
+       if (!list_empty(&p->base.profiles))
+               return list_first_entry(&p->base.profiles, typeof(*p),
+                                       base.list);
+
+       /* is next profile a sibling, parent sibling, gp, sibling, .. */
+       parent = rcu_dereference_protected(p->parent,
+                                          mutex_is_locked(&p->ns->lock));
+       while (parent) {
+               p = list_entry_next(p, base.list);
+               if (!list_entry_is_head(p, &parent->base.profiles, base.list))
+                       return p;
+               p = parent;
+               parent = rcu_dereference_protected(parent->parent,
+                                           mutex_is_locked(&parent->ns->lock));
+       }
+
+       /* is next another profile in the namespace */
+       p = list_entry_next(p, base.list);
+       if (!list_entry_is_head(p, &ns->base.profiles, base.list))
+               return p;
+
+       return NULL;
+}
+
+/**
+ * next_profile - step to the next profile in where ever it may be
+ * @root: root namespace  (NOT NULL)
+ * @profile: current profile  (NOT NULL)
+ *
+ * Returns: next profile or NULL if there isn't one
+ */
+static struct aa_profile *next_profile(struct aa_namespace *root,
+                                      struct aa_profile *profile)
+{
+       struct aa_profile *next = __next_profile(profile);
+       if (next)
+               return next;
+
+       /* finished all profiles in namespace move to next namespace */
+       return __first_profile(root, __next_namespace(root, profile->ns));
+}
+
+/**
+ * p_start - start a depth first traversal of profile tree
+ * @f: seq_file to fill
+ * @pos: current position
+ *
+ * Returns: first profile under current namespace or NULL if none found
+ *
+ * acquires first ns->lock
+ */
+static void *p_start(struct seq_file *f, loff_t *pos)
+{
+       struct aa_profile *profile = NULL;
+       struct aa_namespace *root = aa_current_profile()->ns;
+       loff_t l = *pos;
+       f->private = aa_get_namespace(root);
+
+
+       /* find the first profile */
+       mutex_lock(&root->lock);
+       profile = __first_profile(root, root);
+
+       /* skip to position */
+       for (; profile && l > 0; l--)
+               profile = next_profile(root, profile);
+
+       return profile;
+}
+
+/**
+ * p_next - read the next profile entry
+ * @f: seq_file to fill
+ * @p: profile previously returned
+ * @pos: current position
+ *
+ * Returns: next profile after @p or NULL if none
+ *
+ * may acquire/release locks in namespace tree as necessary
+ */
+static void *p_next(struct seq_file *f, void *p, loff_t *pos)
+{
+       struct aa_profile *profile = p;
+       struct aa_namespace *ns = f->private;
+       (*pos)++;
+
+       return next_profile(ns, profile);
+}
+
+/**
+ * p_stop - stop depth first traversal
+ * @f: seq_file we are filling
+ * @p: the last profile writen
+ *
+ * Release all locking done by p_start/p_next on namespace tree
+ */
+static void p_stop(struct seq_file *f, void *p)
+{
+       struct aa_profile *profile = p;
+       struct aa_namespace *root = f->private, *ns;
+
+       if (profile) {
+               for (ns = profile->ns; ns && ns != root; ns = ns->parent)
+                       mutex_unlock(&ns->lock);
+       }
+       mutex_unlock(&root->lock);
+       aa_put_namespace(root);
+}
+
+/**
+ * seq_show_profile - show a profile entry
+ * @f: seq_file to file
+ * @p: current position (profile)    (NOT NULL)
+ *
+ * Returns: error on failure
+ */
+static int seq_show_profile(struct seq_file *f, void *p)
+{
+       struct aa_profile *profile = (struct aa_profile *)p;
+       struct aa_namespace *root = f->private;
+
+       if (profile->ns != root)
+               seq_printf(f, ":%s://", aa_ns_name(root, profile->ns));
+       seq_printf(f, "%s (%s)\n", profile->base.hname,
+                  aa_profile_mode_names[profile->mode]);
+
+       return 0;
+}
+
+static const struct seq_operations aa_fs_profiles_op = {
+       .start = p_start,
+       .next = p_next,
+       .stop = p_stop,
+       .show = seq_show_profile,
+};
+
+static int profiles_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &aa_fs_profiles_op);
+}
+
+static int profiles_release(struct inode *inode, struct file *file)
+{
+       return seq_release(inode, file);
+}
+
+static const struct file_operations aa_fs_profiles_fops = {
+       .open = profiles_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = profiles_release,
+};
+
+
+/** Base file system setup **/
 static struct aa_fs_entry aa_fs_entry_file[] = {
        AA_FS_FILE_STRING("mask", "create read write exec append mmap_exec " \
                                  "link lock"),
@@ -198,11 +799,18 @@ static struct aa_fs_entry aa_fs_entry_domain[] = {
        { }
 };
 
+static struct aa_fs_entry aa_fs_entry_policy[] = {
+       AA_FS_FILE_BOOLEAN("set_load",          1),
+       {}
+};
+
 static struct aa_fs_entry aa_fs_entry_features[] = {
+       AA_FS_DIR("policy",                     aa_fs_entry_policy),
        AA_FS_DIR("domain",                     aa_fs_entry_domain),
        AA_FS_DIR("file",                       aa_fs_entry_file),
        AA_FS_FILE_U64("capability",            VFS_CAP_FLAGS_MASK),
        AA_FS_DIR("rlimit",                     aa_fs_entry_rlimit),
+       AA_FS_DIR("caps",                       aa_fs_entry_caps),
        { }
 };
 
@@ -210,6 +818,7 @@ static struct aa_fs_entry aa_fs_entry_apparmor[] = {
        AA_FS_FILE_FOPS(".load", 0640, &aa_fs_profile_load),
        AA_FS_FILE_FOPS(".replace", 0640, &aa_fs_profile_replace),
        AA_FS_FILE_FOPS(".remove", 0640, &aa_fs_profile_remove),
+       AA_FS_FILE_FOPS("profiles", 0640, &aa_fs_profiles_fops),
        AA_FS_DIR("features", aa_fs_entry_features),
        { }
 };
@@ -240,6 +849,7 @@ static int __init aafs_create_file(struct aa_fs_entry *fs_file,
        return error;
 }
 
+static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir);
 /**
  * aafs_create_dir - recursively create a directory entry in the securityfs
  * @fs_dir: aa_fs_entry (and all child entries) to build (NOT NULL)
@@ -250,17 +860,16 @@ static int __init aafs_create_file(struct aa_fs_entry *fs_file,
 static int __init aafs_create_dir(struct aa_fs_entry *fs_dir,
                                  struct dentry *parent)
 {
-       int error;
        struct aa_fs_entry *fs_file;
+       struct dentry *dir;
+       int error;
 
-       fs_dir->dentry = securityfs_create_dir(fs_dir->name, parent);
-       if (IS_ERR(fs_dir->dentry)) {
-               error = PTR_ERR(fs_dir->dentry);
-               fs_dir->dentry = NULL;
-               goto failed;
-       }
+       dir = securityfs_create_dir(fs_dir->name, parent);
+       if (IS_ERR(dir))
+               return PTR_ERR(dir);
+       fs_dir->dentry = dir;
 
-       for (fs_file = fs_dir->v.files; fs_file->name; ++fs_file) {
+       for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
                if (fs_file->v_type == AA_FS_TYPE_DIR)
                        error = aafs_create_dir(fs_file, fs_dir->dentry);
                else
@@ -272,6 +881,8 @@ static int __init aafs_create_dir(struct aa_fs_entry *fs_dir,
        return 0;
 
 failed:
+       aafs_remove_dir(fs_dir);
+
        return error;
 }
 
@@ -296,7 +907,7 @@ static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir)
 {
        struct aa_fs_entry *fs_file;
 
-       for (fs_file = fs_dir->v.files; fs_file->name; ++fs_file) {
+       for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
                if (fs_file->v_type == AA_FS_TYPE_DIR)
                        aafs_remove_dir(fs_file);
                else
@@ -340,6 +951,11 @@ static int __init aa_create_aafs(void)
        if (error)
                goto error;
 
+       error = __aa_fs_namespace_mkdir(root_ns, aa_fs_entry.dentry,
+                                       "policy");
+       if (error)
+               goto error;
+
        /* TODO: add support for apparmorfs_null and apparmorfs_mnt */
 
        /* Report that AppArmor fs is enabled */
index 887a5e9489453c9a304b25759b4bed973f34fb0a..84d1f5f538778b58f0b60c48d4a55ede44ff4c4f 100644 (file)
  */
 #include "capability_names.h"
 
+struct aa_fs_entry aa_fs_entry_caps[] = {
+       AA_FS_FILE_STRING("mask", AA_FS_CAPS_MASK),
+       { }
+};
+
 struct audit_cache {
        struct aa_profile *profile;
        kernel_cap_t caps;
index d5af1d15f26d6feeec7acc5e421cbb25ca9de9ce..3064c6ced87cae69b8c2eb224e4d2ac5af9d9c79 100644 (file)
@@ -112,9 +112,9 @@ int aa_replace_current_profile(struct aa_profile *profile)
                aa_clear_task_cxt_trans(cxt);
 
        /* be careful switching cxt->profile, when racing replacement it
-        * is possible that cxt->profile->replacedby is the reference keeping
-        * @profile valid, so make sure to get its reference before dropping
-        * the reference on cxt->profile */
+        * is possible that cxt->profile->replacedby->profile is the reference
+        * keeping @profile valid, so make sure to get its reference before
+        * dropping the reference on cxt->profile */
        aa_get_profile(profile);
        aa_put_profile(cxt->profile);
        cxt->profile = profile;
@@ -175,7 +175,7 @@ int aa_set_current_hat(struct aa_profile *profile, u64 token)
                abort_creds(new);
                return -EACCES;
        }
-       cxt->profile = aa_get_profile(aa_newest_version(profile));
+       cxt->profile = aa_get_newest_profile(profile);
        /* clear exec on switching context */
        aa_put_profile(cxt->onexec);
        cxt->onexec = NULL;
@@ -212,14 +212,8 @@ int aa_restore_previous_profile(u64 token)
        }
 
        aa_put_profile(cxt->profile);
-       cxt->profile = aa_newest_version(cxt->previous);
+       cxt->profile = aa_get_newest_profile(cxt->previous);
        BUG_ON(!cxt->profile);
-       if (unlikely(cxt->profile != cxt->previous)) {
-               aa_get_profile(cxt->profile);
-               aa_put_profile(cxt->previous);
-       }
-       /* ref has been transfered so avoid putting ref in clear_task_cxt */
-       cxt->previous = NULL;
        /* clear exec && prev information when restoring to previous context */
        aa_clear_task_cxt_trans(cxt);
 
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
new file mode 100644 (file)
index 0000000..d6222ba
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Fns to provide a checksum of policy that has been loaded this can be
+ * compared to userspace policy compiles to check loaded policy is what
+ * it should be.
+ */
+
+#include <linux/crypto.h>
+
+#include "include/apparmor.h"
+#include "include/crypto.h"
+
+static unsigned int apparmor_hash_size;
+
+static struct crypto_hash *apparmor_tfm;
+
+unsigned int aa_hash_size(void)
+{
+       return apparmor_hash_size;
+}
+
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+                        size_t len)
+{
+       struct scatterlist sg[2];
+       struct hash_desc desc = {
+               .tfm = apparmor_tfm,
+               .flags = 0
+       };
+       int error = -ENOMEM;
+       u32 le32_version = cpu_to_le32(version);
+
+       if (!apparmor_tfm)
+               return 0;
+
+       sg_init_table(sg, 2);
+       sg_set_buf(&sg[0], &le32_version, 4);
+       sg_set_buf(&sg[1], (u8 *) start, len);
+
+       profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
+       if (!profile->hash)
+               goto fail;
+
+       error = crypto_hash_init(&desc);
+       if (error)
+               goto fail;
+       error = crypto_hash_update(&desc, &sg[0], 4);
+       if (error)
+               goto fail;
+       error = crypto_hash_update(&desc, &sg[1], len);
+       if (error)
+               goto fail;
+       error = crypto_hash_final(&desc, profile->hash);
+       if (error)
+               goto fail;
+
+       return 0;
+
+fail:
+       kfree(profile->hash);
+       profile->hash = NULL;
+
+       return error;
+}
+
+static int __init init_profile_hash(void)
+{
+       struct crypto_hash *tfm;
+
+       if (!apparmor_initialized)
+               return 0;
+
+       tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm)) {
+               int error = PTR_ERR(tfm);
+               AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
+               return error;
+       }
+       apparmor_tfm = tfm;
+       apparmor_hash_size = crypto_hash_digestsize(apparmor_tfm);
+
+       aa_info_message("AppArmor sha1 policy hashing enabled");
+
+       return 0;
+}
+
+late_initcall(init_profile_hash);
index 01b7bd669a88d8d130f32097d0d3200a6068cbe5..26c607c971f5656da192698602dbdb85afcfa8f1 100644 (file)
@@ -144,7 +144,7 @@ static struct aa_profile *__attach_match(const char *name,
        int len = 0;
        struct aa_profile *profile, *candidate = NULL;
 
-       list_for_each_entry(profile, head, base.list) {
+       list_for_each_entry_rcu(profile, head, base.list) {
                if (profile->flags & PFLAG_NULL)
                        continue;
                if (profile->xmatch && profile->xmatch_len > len) {
@@ -177,9 +177,9 @@ static struct aa_profile *find_attach(struct aa_namespace *ns,
 {
        struct aa_profile *profile;
 
-       read_lock(&ns->lock);
+       rcu_read_lock();
        profile = aa_get_profile(__attach_match(name, list));
-       read_unlock(&ns->lock);
+       rcu_read_unlock();
 
        return profile;
 }
@@ -359,7 +359,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
        cxt = cred_cxt(bprm->cred);
        BUG_ON(!cxt);
 
-       profile = aa_get_profile(aa_newest_version(cxt->profile));
+       profile = aa_get_newest_profile(cxt->profile);
        /*
         * get the namespace from the replacement profile as replacement
         * can change the namespace
@@ -371,8 +371,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
        error = aa_path_name(&bprm->file->f_path, profile->path_flags, &buffer,
                             &name, &info);
        if (error) {
-               if (profile->flags &
-                   (PFLAG_IX_ON_NAME_ERROR | PFLAG_UNCONFINED))
+               if (unconfined(profile) ||
+                   (profile->flags & PFLAG_IX_ON_NAME_ERROR))
                        error = 0;
                name = bprm->filename;
                goto audit;
@@ -417,7 +417,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
 
                if (!(cp.allow & AA_MAY_ONEXEC))
                        goto audit;
-               new_profile = aa_get_profile(aa_newest_version(cxt->onexec));
+               new_profile = aa_get_newest_profile(cxt->onexec);
                goto apply;
        }
 
@@ -434,7 +434,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
                                new_profile = aa_get_profile(profile);
                                goto x_clear;
                        } else if (perms.xindex & AA_X_UNCONFINED) {
-                               new_profile = aa_get_profile(ns->unconfined);
+                               new_profile = aa_get_newest_profile(ns->unconfined);
                                info = "ux fallback";
                        } else {
                                error = -ENOENT;
@@ -641,7 +641,10 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
        if (count) {
                /* attempting to change into a new hat or switch to a sibling */
                struct aa_profile *root;
-               root = PROFILE_IS_HAT(profile) ? profile->parent : profile;
+               if (PROFILE_IS_HAT(profile))
+                       root = aa_get_profile_rcu(&profile->parent);
+               else
+                       root = aa_get_profile(profile);
 
                /* find first matching hat */
                for (i = 0; i < count && !hat; i++)
@@ -653,6 +656,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
                                        error = -ECHILD;
                                else
                                        error = -ENOENT;
+                               aa_put_profile(root);
                                goto out;
                        }
 
@@ -667,6 +671,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
 
                        /* freed below */
                        name = new_compound_name(root->base.hname, hats[0]);
+                       aa_put_profile(root);
                        target = name;
                        /* released below */
                        hat = aa_new_null_profile(profile, 1);
@@ -676,6 +681,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
                                goto audit;
                        }
                } else {
+                       aa_put_profile(root);
                        target = hat->base.hname;
                        if (!PROFILE_IS_HAT(hat)) {
                                info = "target not hat";
index 1ba2ca56a6efe0245327b16ee4891981746c5b7a..8fb1488a3cd4499ecf8e374dcd47f956945a0ef7 100644 (file)
@@ -78,6 +78,12 @@ static inline void *kvzalloc(size_t size)
        return __aa_kvmalloc(size, __GFP_ZERO);
 }
 
+/* returns 0 if kref not incremented */
+static inline int kref_get_not0(struct kref *kref)
+{
+       return atomic_inc_not_zero(&kref->refcount);
+}
+
 /**
  * aa_strneq - compare null terminated @str to a non null terminated substring
  * @str: a null terminated string
index 7ea4769fab3f7785c056dc0cd60e6a8f1b606c98..414e56878dd0c0462c0f85f0b0296448ad8ba27e 100644 (file)
@@ -61,4 +61,44 @@ extern const struct file_operations aa_fs_seq_file_ops;
 
 extern void __init aa_destroy_aafs(void);
 
+struct aa_profile;
+struct aa_namespace;
+
+enum aafs_ns_type {
+       AAFS_NS_DIR,
+       AAFS_NS_PROFS,
+       AAFS_NS_NS,
+       AAFS_NS_COUNT,
+       AAFS_NS_MAX_COUNT,
+       AAFS_NS_SIZE,
+       AAFS_NS_MAX_SIZE,
+       AAFS_NS_OWNER,
+       AAFS_NS_SIZEOF,
+};
+
+enum aafs_prof_type {
+       AAFS_PROF_DIR,
+       AAFS_PROF_PROFS,
+       AAFS_PROF_NAME,
+       AAFS_PROF_MODE,
+       AAFS_PROF_ATTACH,
+       AAFS_PROF_HASH,
+       AAFS_PROF_SIZEOF,
+};
+
+#define ns_dir(X) ((X)->dents[AAFS_NS_DIR])
+#define ns_subns_dir(X) ((X)->dents[AAFS_NS_NS])
+#define ns_subprofs_dir(X) ((X)->dents[AAFS_NS_PROFS])
+
+#define prof_dir(X) ((X)->dents[AAFS_PROF_DIR])
+#define prof_child_dir(X) ((X)->dents[AAFS_PROF_PROFS])
+
+void __aa_fs_profile_rmdir(struct aa_profile *profile);
+void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+                                  struct aa_profile *new);
+int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent);
+void __aa_fs_namespace_rmdir(struct aa_namespace *ns);
+int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent,
+                           const char *name);
+
 #endif /* __AA_APPARMORFS_H */
index 69d8cae634e7bef79ea136520b404a730914d9d4..30e8d7687259aaef15defab3883e8e1e52d91c1f 100644 (file)
@@ -27,7 +27,6 @@ struct aa_profile;
 
 extern const char *const audit_mode_names[];
 #define AUDIT_MAX_INDEX 5
-
 enum audit_mode {
        AUDIT_NORMAL,           /* follow normal auditing of accesses */
        AUDIT_QUIET_DENIED,     /* quiet all denied access messages */
index c24d2959ea0201eff78244225216df0011b03fc4..2e7c9d6a2f3bb3f7b7ab6f4aab3b63a46a56a10a 100644 (file)
@@ -17,6 +17,8 @@
 
 #include <linux/sched.h>
 
+#include "apparmorfs.h"
+
 struct aa_profile;
 
 /* aa_caps - confinement data for capabilities
@@ -34,6 +36,8 @@ struct aa_caps {
        kernel_cap_t extended;
 };
 
+extern struct aa_fs_entry aa_fs_entry_caps[];
+
 int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
               int audit);
 
index d44ba5802e3dc03f56c6d183a9d97a3d608ee8e9..6bf65798e5d145e985a65f674273db20472ece67 100644 (file)
@@ -98,7 +98,7 @@ static inline struct aa_profile *aa_cred_profile(const struct cred *cred)
 {
        struct aa_task_cxt *cxt = cred_cxt(cred);
        BUG_ON(!cxt || !cxt->profile);
-       return aa_newest_version(cxt->profile);
+       return cxt->profile;
 }
 
 /**
@@ -152,15 +152,14 @@ static inline struct aa_profile *aa_current_profile(void)
        struct aa_profile *profile;
        BUG_ON(!cxt || !cxt->profile);
 
-       profile = aa_newest_version(cxt->profile);
-       /*
-        * Whether or not replacement succeeds, use newest profile so
-        * there is no need to update it after replacement.
-        */
-       if (unlikely((cxt->profile != profile)))
+       if (PROFILE_INVALID(cxt->profile)) {
+               profile = aa_get_newest_profile(cxt->profile);
                aa_replace_current_profile(profile);
+               aa_put_profile(profile);
+               cxt = current_cxt();
+       }
 
-       return profile;
+       return cxt->profile;
 }
 
 /**
diff --git a/security/apparmor/include/crypto.h b/security/apparmor/include/crypto.h
new file mode 100644 (file)
index 0000000..dc418e5
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __APPARMOR_CRYPTO_H
+#define __APPARMOR_CRYPTO_H
+
+#include "policy.h"
+
+#ifdef CONFIG_SECURITY_APPARMOR_HASH
+unsigned int aa_hash_size(void);
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+                        size_t len);
+#else
+static inline int aa_calc_profile_hash(struct aa_profile *profile, u32 version,
+                                      void *start, size_t len)
+{
+       return 0;
+}
+
+static inline unsigned int aa_hash_size(void)
+{
+       return 0;
+}
+#endif
+
+#endif /* __APPARMOR_CRYPTO_H */
index b25491a3046a2a1dd14f16b31828c52d19d5df7e..f2d4b6348cbc0a5f1ec0abdc0d2d975ad34ce493 100644 (file)
@@ -29,8 +29,8 @@
 #include "file.h"
 #include "resource.h"
 
-extern const char *const profile_mode_names[];
-#define APPARMOR_NAMES_MAX_INDEX 3
+extern const char *const aa_profile_mode_names[];
+#define APPARMOR_MODE_NAMES_MAX_INDEX 4
 
 #define PROFILE_MODE(_profile, _mode)          \
        ((aa_g_profile_mode == (_mode)) ||      \
@@ -42,6 +42,10 @@ extern const char *const profile_mode_names[];
 
 #define PROFILE_IS_HAT(_profile) ((_profile)->flags & PFLAG_HAT)
 
+#define PROFILE_INVALID(_profile) ((_profile)->flags & PFLAG_INVALID)
+
+#define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2)
+
 /*
  * FIXME: currently need a clean way to replace and remove profiles as a
  * set.  It should be done at the namespace level.
@@ -52,17 +56,19 @@ enum profile_mode {
        APPARMOR_ENFORCE,       /* enforce access rules */
        APPARMOR_COMPLAIN,      /* allow and log access violations */
        APPARMOR_KILL,          /* kill task on access violation */
+       APPARMOR_UNCONFINED,    /* profile set to unconfined */
 };
 
 enum profile_flags {
        PFLAG_HAT = 1,                  /* profile is a hat */
-       PFLAG_UNCONFINED = 2,           /* profile is an unconfined profile */
        PFLAG_NULL = 4,                 /* profile is null learning profile */
        PFLAG_IX_ON_NAME_ERROR = 8,     /* fallback to ix on name lookup fail */
        PFLAG_IMMUTABLE = 0x10,         /* don't allow changes/replacement */
        PFLAG_USER_DEFINED = 0x20,      /* user based profile - lower privs */
        PFLAG_NO_LIST_REF = 0x40,       /* list doesn't keep profile ref */
        PFLAG_OLD_NULL_TRANS = 0x100,   /* use // as the null transition */
+       PFLAG_INVALID = 0x200,          /* profile replaced/removed */
+       PFLAG_NS_COUNT = 0x400,         /* carries NS ref count */
 
        /* These flags must correspond with PATH_flags */
        PFLAG_MEDIATE_DELETED = 0x10000, /* mediate instead delegate deleted */
@@ -73,14 +79,12 @@ struct aa_profile;
 /* struct aa_policy - common part of both namespaces and profiles
  * @name: name of the object
  * @hname - The hierarchical name
- * @count: reference count of the obj
  * @list: list policy object is on
  * @profiles: head of the profiles list contained in the object
  */
 struct aa_policy {
        char *name;
        char *hname;
-       struct kref count;
        struct list_head list;
        struct list_head profiles;
 };
@@ -106,6 +110,8 @@ struct aa_ns_acct {
  * @unconfined: special unconfined profile for the namespace
  * @sub_ns: list of namespaces under the current namespace.
  * @uniq_null: uniq value used for null learning profiles
+ * @uniq_id: a unique id count for the profiles in the namespace
+ * @dents: dentries for the namespaces file entries in apparmorfs
  *
  * An aa_namespace defines the set profiles that are searched to determine
  * which profile to attach to a task.  Profiles can not be shared between
@@ -124,11 +130,14 @@ struct aa_ns_acct {
 struct aa_namespace {
        struct aa_policy base;
        struct aa_namespace *parent;
-       rwlock_t lock;
+       struct mutex lock;
        struct aa_ns_acct acct;
        struct aa_profile *unconfined;
        struct list_head sub_ns;
        atomic_t uniq_null;
+       long uniq_id;
+
+       struct dentry *dents[AAFS_NS_SIZEOF];
 };
 
 /* struct aa_policydb - match engine for a policy
@@ -142,12 +151,21 @@ struct aa_policydb {
 
 };
 
+struct aa_replacedby {
+       struct kref count;
+       struct aa_profile __rcu *profile;
+};
+
+
 /* struct aa_profile - basic confinement data
  * @base - base components of the profile (name, refcount, lists, lock ...)
+ * @count: reference count of the obj
+ * @rcu: rcu head used when removing from @list
  * @parent: parent of profile
  * @ns: namespace the profile is in
  * @replacedby: is set to the profile that replaced this profile
  * @rename: optional profile name that this profile renamed
+ * @attach: human readable attachment string
  * @xmatch: optional extended matching for unconfined executables names
  * @xmatch_len: xmatch prefix len, used to determine xmatch priority
  * @audit: the auditing mode of the profile
@@ -160,13 +178,15 @@ struct aa_policydb {
  * @caps: capabilities for the profile
  * @rlimits: rlimits for the profile
  *
+ * @dents: dentries for the profiles file entries in apparmorfs
+ * @dirname: name of the profile dir in apparmorfs
+ *
  * The AppArmor profile contains the basic confinement data.  Each profile
  * has a name, and exists in a namespace.  The @name and @exec_match are
  * used to determine profile attachment against unconfined tasks.  All other
  * attachments are determined by profile X transition rules.
  *
- * The @replacedby field is write protected by the profile lock.  Reads
- * are assumed to be atomic, and are done without locking.
+ * The @replacedby struct is write protected by the profile lock.
  *
  * Profiles have a hierarchy where hats and children profiles keep
  * a reference to their parent.
@@ -177,17 +197,20 @@ struct aa_policydb {
  */
 struct aa_profile {
        struct aa_policy base;
-       struct aa_profile *parent;
+       struct kref count;
+       struct rcu_head rcu;
+       struct aa_profile __rcu *parent;
 
        struct aa_namespace *ns;
-       struct aa_profile *replacedby;
+       struct aa_replacedby *replacedby;
        const char *rename;
 
+       const char *attach;
        struct aa_dfa *xmatch;
        int xmatch_len;
        enum audit_mode audit;
-       enum profile_mode mode;
-       u32 flags;
+       long mode;
+       long flags;
        u32 path_flags;
        int size;
 
@@ -195,6 +218,10 @@ struct aa_profile {
        struct aa_file_rules file;
        struct aa_caps caps;
        struct aa_rlimit rlimits;
+
+       unsigned char *hash;
+       char *dirname;
+       struct dentry *dents[AAFS_PROF_SIZEOF];
 };
 
 extern struct aa_namespace *root_ns;
@@ -211,43 +238,11 @@ void aa_free_namespace_kref(struct kref *kref);
 struct aa_namespace *aa_find_namespace(struct aa_namespace *root,
                                       const char *name);
 
-static inline struct aa_policy *aa_get_common(struct aa_policy *c)
-{
-       if (c)
-               kref_get(&c->count);
-
-       return c;
-}
-
-/**
- * aa_get_namespace - increment references count on @ns
- * @ns: namespace to increment reference count of (MAYBE NULL)
- *
- * Returns: pointer to @ns, if @ns is NULL returns NULL
- * Requires: @ns must be held with valid refcount when called
- */
-static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns)
-{
-       if (ns)
-               kref_get(&(ns->base.count));
-
-       return ns;
-}
-
-/**
- * aa_put_namespace - decrement refcount on @ns
- * @ns: namespace to put reference of
- *
- * Decrement reference count of @ns and if no longer in use free it
- */
-static inline void aa_put_namespace(struct aa_namespace *ns)
-{
-       if (ns)
-               kref_put(&ns->base.count, aa_free_namespace_kref);
-}
 
+void aa_free_replacedby_kref(struct kref *kref);
 struct aa_profile *aa_alloc_profile(const char *name);
 struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat);
+void aa_free_profile(struct aa_profile *profile);
 void aa_free_profile_kref(struct kref *kref);
 struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name);
 struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *name);
@@ -259,25 +254,13 @@ ssize_t aa_remove_profiles(char *name, size_t size);
 #define PROF_ADD 1
 #define PROF_REPLACE 0
 
-#define unconfined(X) ((X)->flags & PFLAG_UNCONFINED)
+#define unconfined(X) ((X)->mode == APPARMOR_UNCONFINED)
 
-/**
- * aa_newest_version - find the newest version of @profile
- * @profile: the profile to check for newer versions of (NOT NULL)
- *
- * Returns: newest version of @profile, if @profile is the newest version
- *          return @profile.
- *
- * NOTE: the profile returned is not refcounted, The refcount on @profile
- * must be held until the caller decides what to do with the returned newest
- * version.
- */
-static inline struct aa_profile *aa_newest_version(struct aa_profile *profile)
-{
-       while (profile->replacedby)
-               profile = profile->replacedby;
 
-       return profile;
+static inline struct aa_profile *aa_deref_parent(struct aa_profile *p)
+{
+       return rcu_dereference_protected(p->parent,
+                                        mutex_is_locked(&p->ns->lock));
 }
 
 /**
@@ -290,11 +273,65 @@ static inline struct aa_profile *aa_newest_version(struct aa_profile *profile)
 static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
 {
        if (p)
-               kref_get(&(p->base.count));
+               kref_get(&(p->count));
 
        return p;
 }
 
+/**
+ * aa_get_profile_not0 - increment refcount on profile @p found via lookup
+ * @p: profile  (MAYBE NULL)
+ *
+ * Returns: pointer to @p if @p is NULL will return NULL
+ * Requires: @p must be held with valid refcount when called
+ */
+static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
+{
+       if (p && kref_get_not0(&p->count))
+               return p;
+
+       return NULL;
+}
+
+/**
+ * aa_get_profile_rcu - increment a refcount profile that can be replaced
+ * @p: pointer to profile that can be replaced (NOT NULL)
+ *
+ * Returns: pointer to a refcounted profile.
+ *     else NULL if no profile
+ */
+static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
+{
+       struct aa_profile *c;
+
+       rcu_read_lock();
+       do {
+               c = rcu_dereference(*p);
+       } while (c && !kref_get_not0(&c->count));
+       rcu_read_unlock();
+
+       return c;
+}
+
+/**
+ * aa_get_newest_profile - find the newest version of @profile
+ * @profile: the profile to check for newer versions of
+ *
+ * Returns: refcounted newest version of @profile taking into account
+ *          replacement, renames and removals
+ *          return @profile.
+ */
+static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
+{
+       if (!p)
+               return NULL;
+
+       if (PROFILE_INVALID(p))
+               return aa_get_profile_rcu(&p->replacedby->profile);
+
+       return aa_get_profile(p);
+}
+
 /**
  * aa_put_profile - decrement refcount on profile @p
  * @p: profile  (MAYBE NULL)
@@ -302,7 +339,58 @@ static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
 static inline void aa_put_profile(struct aa_profile *p)
 {
        if (p)
-               kref_put(&p->base.count, aa_free_profile_kref);
+               kref_put(&p->count, aa_free_profile_kref);
+}
+
+static inline struct aa_replacedby *aa_get_replacedby(struct aa_replacedby *p)
+{
+       if (p)
+               kref_get(&(p->count));
+
+       return p;
+}
+
+static inline void aa_put_replacedby(struct aa_replacedby *p)
+{
+       if (p)
+               kref_put(&p->count, aa_free_replacedby_kref);
+}
+
+/* requires profile list write lock held */
+static inline void __aa_update_replacedby(struct aa_profile *orig,
+                                         struct aa_profile *new)
+{
+       struct aa_profile *tmp = rcu_dereference(orig->replacedby->profile);
+       rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
+       orig->flags |= PFLAG_INVALID;
+       aa_put_profile(tmp);
+}
+
+/**
+ * aa_get_namespace - increment references count on @ns
+ * @ns: namespace to increment reference count of (MAYBE NULL)
+ *
+ * Returns: pointer to @ns, if @ns is NULL returns NULL
+ * Requires: @ns must be held with valid refcount when called
+ */
+static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns)
+{
+       if (ns)
+               aa_get_profile(ns->unconfined);
+
+       return ns;
+}
+
+/**
+ * aa_put_namespace - decrement refcount on @ns
+ * @ns: namespace to put reference of
+ *
+ * Decrement reference count of @ns and if no longer in use free it
+ */
+static inline void aa_put_namespace(struct aa_namespace *ns)
+{
+       if (ns)
+               aa_put_profile(ns->unconfined);
 }
 
 static inline int AUDIT_MODE(struct aa_profile *profile)
index a2dcccac45aaae960d6a52c65477450e9d70e346..c214fb88b1bc8ada00577d675de1a149f6f30cf8 100644 (file)
 #ifndef __POLICY_INTERFACE_H
 #define __POLICY_INTERFACE_H
 
-struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns);
+#include <linux/list.h>
+
+struct aa_load_ent {
+       struct list_head list;
+       struct aa_profile *new;
+       struct aa_profile *old;
+       struct aa_profile *rename;
+};
+
+void aa_load_ent_free(struct aa_load_ent *ent);
+struct aa_load_ent *aa_load_ent_alloc(void);
+
+#define PACKED_FLAG_HAT                1
+
+#define PACKED_MODE_ENFORCE    0
+#define PACKED_MODE_COMPLAIN   1
+#define PACKED_MODE_KILL       2
+#define PACKED_MODE_UNCONFINED 3
+
+int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns);
 
 #endif /* __POLICY_INTERFACE_H */
index fcfe0233574cb2f32197dcbe84d5170da663d779..69689922c491b8a4eeda5d96115df4a49e6a844f 100644 (file)
@@ -97,11 +97,6 @@ void *__aa_kvmalloc(size_t size, gfp_t flags)
        if (size <= (16*PAGE_SIZE))
                buffer = kmalloc(size, flags | GFP_NOIO | __GFP_NOWARN);
        if (!buffer) {
-               /* see kvfree for why size must be at least work_struct size
-                * when allocated via vmalloc
-                */
-               if (size < sizeof(struct work_struct))
-                       size = sizeof(struct work_struct);
                if (flags & __GFP_ZERO)
                        buffer = vzalloc(size);
                else
index 2e2a0dd4a73f1234425ded75027baea3b1e0b03e..fb99e18123b41b4f049fd98078e88aafccb7729b 100644 (file)
@@ -508,19 +508,21 @@ static int apparmor_getprocattr(struct task_struct *task, char *name,
        /* released below */
        const struct cred *cred = get_task_cred(task);
        struct aa_task_cxt *cxt = cred_cxt(cred);
+       struct aa_profile *profile = NULL;
 
        if (strcmp(name, "current") == 0)
-               error = aa_getprocattr(aa_newest_version(cxt->profile),
-                                      value);
+               profile = aa_get_newest_profile(cxt->profile);
        else if (strcmp(name, "prev") == 0  && cxt->previous)
-               error = aa_getprocattr(aa_newest_version(cxt->previous),
-                                      value);
+               profile = aa_get_newest_profile(cxt->previous);
        else if (strcmp(name, "exec") == 0 && cxt->onexec)
-               error = aa_getprocattr(aa_newest_version(cxt->onexec),
-                                      value);
+               profile = aa_get_newest_profile(cxt->onexec);
        else
                error = -EINVAL;
 
+       if (profile)
+               error = aa_getprocattr(profile, value);
+
+       aa_put_profile(profile);
        put_cred(cred);
 
        return error;
@@ -666,6 +668,7 @@ static int param_set_aabool(const char *val, const struct kernel_param *kp);
 static int param_get_aabool(char *buffer, const struct kernel_param *kp);
 #define param_check_aabool param_check_bool
 static struct kernel_param_ops param_ops_aabool = {
+       .flags = KERNEL_PARAM_FL_NOARG,
        .set = param_set_aabool,
        .get = param_get_aabool
 };
@@ -682,6 +685,7 @@ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp
 static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
 #define param_check_aalockpolicy param_check_bool
 static struct kernel_param_ops param_ops_aalockpolicy = {
+       .flags = KERNEL_PARAM_FL_NOARG,
        .set = param_set_aalockpolicy,
        .get = param_get_aalockpolicy
 };
@@ -742,7 +746,7 @@ module_param_named(paranoid_load, aa_g_paranoid_load, aabool,
 
 /* Boot time disable flag */
 static bool apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE;
-module_param_named(enabled, apparmor_enabled, aabool, S_IRUSR);
+module_param_named(enabled, apparmor_enabled, bool, S_IRUGO);
 
 static int __init apparmor_enabled_setup(char *str)
 {
@@ -841,7 +845,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp)
        if (!apparmor_enabled)
                return -EINVAL;
 
-       return sprintf(buffer, "%s", profile_mode_names[aa_g_profile_mode]);
+       return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
 }
 
 static int param_set_mode(const char *val, struct kernel_param *kp)
@@ -856,8 +860,8 @@ static int param_set_mode(const char *val, struct kernel_param *kp)
        if (!val)
                return -EINVAL;
 
-       for (i = 0; i < APPARMOR_NAMES_MAX_INDEX; i++) {
-               if (strcmp(val, profile_mode_names[i]) == 0) {
+       for (i = 0; i < APPARMOR_MODE_NAMES_MAX_INDEX; i++) {
+               if (strcmp(val, aa_profile_mode_names[i]) == 0) {
                        aa_g_profile_mode = i;
                        return 0;
                }
index 0f345c4dee5f46d7a6bb015d19105a0654a2f7d1..6172509fa2b7441fbda60d1cd6e5b35fce432dc8 100644 (file)
 /* root profile namespace */
 struct aa_namespace *root_ns;
 
-const char *const profile_mode_names[] = {
+const char *const aa_profile_mode_names[] = {
        "enforce",
        "complain",
        "kill",
+       "unconfined",
 };
 
 /**
@@ -141,7 +142,6 @@ static bool policy_init(struct aa_policy *policy, const char *prefix,
        policy->name = (char *)hname_tail(policy->hname);
        INIT_LIST_HEAD(&policy->list);
        INIT_LIST_HEAD(&policy->profiles);
-       kref_init(&policy->count);
 
        return 1;
 }
@@ -153,13 +153,13 @@ static bool policy_init(struct aa_policy *policy, const char *prefix,
 static void policy_destroy(struct aa_policy *policy)
 {
        /* still contains profiles -- invalid */
-       if (!list_empty(&policy->profiles)) {
+       if (on_list_rcu(&policy->profiles)) {
                AA_ERROR("%s: internal error, "
                         "policy '%s' still contains profiles\n",
                         __func__, policy->name);
                BUG();
        }
-       if (!list_empty(&policy->list)) {
+       if (on_list_rcu(&policy->list)) {
                AA_ERROR("%s: internal error, policy '%s' still on list\n",
                         __func__, policy->name);
                BUG();
@@ -174,7 +174,7 @@ static void policy_destroy(struct aa_policy *policy)
  * @head: list to search  (NOT NULL)
  * @name: name to search for  (NOT NULL)
  *
- * Requires: correct locks for the @head list be held
+ * Requires: rcu_read_lock be held
  *
  * Returns: unrefcounted policy that match @name or NULL if not found
  */
@@ -182,7 +182,7 @@ static struct aa_policy *__policy_find(struct list_head *head, const char *name)
 {
        struct aa_policy *policy;
 
-       list_for_each_entry(policy, head, list) {
+       list_for_each_entry_rcu(policy, head, list) {
                if (!strcmp(policy->name, name))
                        return policy;
        }
@@ -195,7 +195,7 @@ static struct aa_policy *__policy_find(struct list_head *head, const char *name)
  * @str: string to search for  (NOT NULL)
  * @len: length of match required
  *
- * Requires: correct locks for the @head list be held
+ * Requires: rcu_read_lock be held
  *
  * Returns: unrefcounted policy that match @str or NULL if not found
  *
@@ -207,7 +207,7 @@ static struct aa_policy *__policy_strn_find(struct list_head *head,
 {
        struct aa_policy *policy;
 
-       list_for_each_entry(policy, head, list) {
+       list_for_each_entry_rcu(policy, head, list) {
                if (aa_strneq(policy->name, str, len))
                        return policy;
        }
@@ -284,22 +284,19 @@ static struct aa_namespace *alloc_namespace(const char *prefix,
                goto fail_ns;
 
        INIT_LIST_HEAD(&ns->sub_ns);
-       rwlock_init(&ns->lock);
+       mutex_init(&ns->lock);
 
        /* released by free_namespace */
        ns->unconfined = aa_alloc_profile("unconfined");
        if (!ns->unconfined)
                goto fail_unconfined;
 
-       ns->unconfined->flags = PFLAG_UNCONFINED | PFLAG_IX_ON_NAME_ERROR |
-           PFLAG_IMMUTABLE;
+       ns->unconfined->flags = PFLAG_IX_ON_NAME_ERROR |
+               PFLAG_IMMUTABLE | PFLAG_NS_COUNT;
+       ns->unconfined->mode = APPARMOR_UNCONFINED;
 
-       /*
-        * released by free_namespace, however __remove_namespace breaks
-        * the cyclic references (ns->unconfined, and unconfined->ns) and
-        * replaces with refs to parent namespace unconfined
-        */
-       ns->unconfined->ns = aa_get_namespace(ns);
+       /* ns and ns->unconfined share ns->unconfined refcount */
+       ns->unconfined->ns = ns;
 
        atomic_set(&ns->uniq_null, 0);
 
@@ -327,22 +324,11 @@ static void free_namespace(struct aa_namespace *ns)
        policy_destroy(&ns->base);
        aa_put_namespace(ns->parent);
 
-       if (ns->unconfined && ns->unconfined->ns == ns)
-               ns->unconfined->ns = NULL;
-
-       aa_put_profile(ns->unconfined);
+       ns->unconfined->ns = NULL;
+       aa_free_profile(ns->unconfined);
        kzfree(ns);
 }
 
-/**
- * aa_free_namespace_kref - free aa_namespace by kref (see aa_put_namespace)
- * @kr: kref callback for freeing of a namespace  (NOT NULL)
- */
-void aa_free_namespace_kref(struct kref *kref)
-{
-       free_namespace(container_of(kref, struct aa_namespace, base.count));
-}
-
 /**
  * __aa_find_namespace - find a namespace on a list by @name
  * @head: list to search for namespace on  (NOT NULL)
@@ -350,7 +336,7 @@ void aa_free_namespace_kref(struct kref *kref)
  *
  * Returns: unrefcounted namespace
  *
- * Requires: ns lock be held
+ * Requires: rcu_read_lock be held
  */
 static struct aa_namespace *__aa_find_namespace(struct list_head *head,
                                                const char *name)
@@ -373,9 +359,9 @@ struct aa_namespace *aa_find_namespace(struct aa_namespace *root,
 {
        struct aa_namespace *ns = NULL;
 
-       read_lock(&root->lock);
+       rcu_read_lock();
        ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name));
-       read_unlock(&root->lock);
+       rcu_read_unlock();
 
        return ns;
 }
@@ -392,7 +378,7 @@ static struct aa_namespace *aa_prepare_namespace(const char *name)
 
        root = aa_current_profile()->ns;
 
-       write_lock(&root->lock);
+       mutex_lock(&root->lock);
 
        /* if name isn't specified the profile is loaded to the current ns */
        if (!name) {
@@ -405,31 +391,23 @@ static struct aa_namespace *aa_prepare_namespace(const char *name)
        /* released by caller */
        ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name));
        if (!ns) {
-               /* namespace not found */
-               struct aa_namespace *new_ns;
-               write_unlock(&root->lock);
-               new_ns = alloc_namespace(root->base.hname, name);
-               if (!new_ns)
-                       return NULL;
-               write_lock(&root->lock);
-               /* test for race when new_ns was allocated */
-               ns = __aa_find_namespace(&root->sub_ns, name);
-               if (!ns) {
-                       /* add parent ref */
-                       new_ns->parent = aa_get_namespace(root);
-
-                       list_add(&new_ns->base.list, &root->sub_ns);
-                       /* add list ref */
-                       ns = aa_get_namespace(new_ns);
-               } else {
-                       /* raced so free the new one */
-                       free_namespace(new_ns);
-                       /* get reference on namespace */
-                       aa_get_namespace(ns);
+               ns = alloc_namespace(root->base.hname, name);
+               if (!ns)
+                       goto out;
+               if (__aa_fs_namespace_mkdir(ns, ns_subns_dir(root), name)) {
+                       AA_ERROR("Failed to create interface for ns %s\n",
+                                ns->base.name);
+                       free_namespace(ns);
+                       ns = NULL;
+                       goto out;
                }
+               ns->parent = aa_get_namespace(root);
+               list_add_rcu(&ns->base.list, &root->sub_ns);
+               /* add list ref */
+               aa_get_namespace(ns);
        }
 out:
-       write_unlock(&root->lock);
+       mutex_unlock(&root->lock);
 
        /* return ref */
        return ns;
@@ -447,7 +425,7 @@ out:
 static void __list_add_profile(struct list_head *list,
                               struct aa_profile *profile)
 {
-       list_add(&profile->base.list, list);
+       list_add_rcu(&profile->base.list, list);
        /* get list reference */
        aa_get_profile(profile);
 }
@@ -466,49 +444,8 @@ static void __list_add_profile(struct list_head *list,
  */
 static void __list_remove_profile(struct aa_profile *profile)
 {
-       list_del_init(&profile->base.list);
-       if (!(profile->flags & PFLAG_NO_LIST_REF))
-               /* release list reference */
-               aa_put_profile(profile);
-}
-
-/**
- * __replace_profile - replace @old with @new on a list
- * @old: profile to be replaced  (NOT NULL)
- * @new: profile to replace @old with  (NOT NULL)
- *
- * Will duplicate and refcount elements that @new inherits from @old
- * and will inherit @old children.
- *
- * refcount @new for list, put @old list refcount
- *
- * Requires: namespace list lock be held, or list not be shared
- */
-static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
-{
-       struct aa_policy *policy;
-       struct aa_profile *child, *tmp;
-
-       if (old->parent)
-               policy = &old->parent->base;
-       else
-               policy = &old->ns->base;
-
-       /* released when @new is freed */
-       new->parent = aa_get_profile(old->parent);
-       new->ns = aa_get_namespace(old->ns);
-       __list_add_profile(&policy->profiles, new);
-       /* inherit children */
-       list_for_each_entry_safe(child, tmp, &old->base.profiles, base.list) {
-               aa_put_profile(child->parent);
-               child->parent = aa_get_profile(new);
-               /* list refcount transferred to @new*/
-               list_move(&child->base.list, &new->base.profiles);
-       }
-
-       /* released by free_profile */
-       old->replacedby = aa_get_profile(new);
-       __list_remove_profile(old);
+       list_del_rcu(&profile->base.list);
+       aa_put_profile(profile);
 }
 
 static void __profile_list_release(struct list_head *head);
@@ -524,7 +461,8 @@ static void __remove_profile(struct aa_profile *profile)
        /* release any children lists first */
        __profile_list_release(&profile->base.profiles);
        /* released by free_profile */
-       profile->replacedby = aa_get_profile(profile->ns->unconfined);
+       __aa_update_replacedby(profile, profile->ns->unconfined);
+       __aa_fs_profile_rmdir(profile);
        __list_remove_profile(profile);
 }
 
@@ -552,14 +490,17 @@ static void destroy_namespace(struct aa_namespace *ns)
        if (!ns)
                return;
 
-       write_lock(&ns->lock);
+       mutex_lock(&ns->lock);
        /* release all profiles in this namespace */
        __profile_list_release(&ns->base.profiles);
 
        /* release all sub namespaces */
        __ns_list_release(&ns->sub_ns);
 
-       write_unlock(&ns->lock);
+       if (ns->parent)
+               __aa_update_replacedby(ns->unconfined, ns->parent->unconfined);
+       __aa_fs_namespace_rmdir(ns);
+       mutex_unlock(&ns->lock);
 }
 
 /**
@@ -570,25 +511,9 @@ static void destroy_namespace(struct aa_namespace *ns)
  */
 static void __remove_namespace(struct aa_namespace *ns)
 {
-       struct aa_profile *unconfined = ns->unconfined;
-
        /* remove ns from namespace list */
-       list_del_init(&ns->base.list);
-
-       /*
-        * break the ns, unconfined profile cyclic reference and forward
-        * all new unconfined profiles requests to the parent namespace
-        * This will result in all confined tasks that have a profile
-        * being removed, inheriting the parent->unconfined profile.
-        */
-       if (ns->parent)
-               ns->unconfined = aa_get_profile(ns->parent->unconfined);
-
+       list_del_rcu(&ns->base.list);
        destroy_namespace(ns);
-
-       /* release original ns->unconfined ref */
-       aa_put_profile(unconfined);
-       /* release ns->base.list ref, from removal above */
        aa_put_namespace(ns);
 }
 
@@ -634,8 +559,25 @@ void __init aa_free_root_ns(void)
         aa_put_namespace(ns);
 }
 
+
+static void free_replacedby(struct aa_replacedby *r)
+{
+       if (r) {
+               aa_put_profile(rcu_dereference(r->profile));
+               kzfree(r);
+       }
+}
+
+
+void aa_free_replacedby_kref(struct kref *kref)
+{
+       struct aa_replacedby *r = container_of(kref, struct aa_replacedby,
+                                              count);
+       free_replacedby(r);
+}
+
 /**
- * free_profile - free a profile
+ * aa_free_profile - free a profile
  * @profile: the profile to free  (MAYBE NULL)
  *
  * Free a profile, its hats and null_profile. All references to the profile,
@@ -644,25 +586,16 @@ void __init aa_free_root_ns(void)
  * If the profile was referenced from a task context, free_profile() will
  * be called from an rcu callback routine, so we must not sleep here.
  */
-static void free_profile(struct aa_profile *profile)
+void aa_free_profile(struct aa_profile *profile)
 {
-       struct aa_profile *p;
-
        AA_DEBUG("%s(%p)\n", __func__, profile);
 
        if (!profile)
                return;
 
-       if (!list_empty(&profile->base.list)) {
-               AA_ERROR("%s: internal error, "
-                        "profile '%s' still on ns list\n",
-                        __func__, profile->base.name);
-               BUG();
-       }
-
        /* free children profiles */
        policy_destroy(&profile->base);
-       aa_put_profile(profile->parent);
+       aa_put_profile(rcu_access_pointer(profile->parent));
 
        aa_put_namespace(profile->ns);
        kzfree(profile->rename);
@@ -671,44 +604,35 @@ static void free_profile(struct aa_profile *profile)
        aa_free_cap_rules(&profile->caps);
        aa_free_rlimit_rules(&profile->rlimits);
 
+       kzfree(profile->dirname);
        aa_put_dfa(profile->xmatch);
        aa_put_dfa(profile->policy.dfa);
-
-       /* put the profile reference for replacedby, but not via
-        * put_profile(kref_put).
-        * replacedby can form a long chain that can result in cascading
-        * frees that blows the stack because kref_put makes a nested fn
-        * call (it looks like recursion, with free_profile calling
-        * free_profile) for each profile in the chain lp#1056078.
-        */
-       for (p = profile->replacedby; p; ) {
-               if (atomic_dec_and_test(&p->base.count.refcount)) {
-                       /* no more refs on p, grab its replacedby */
-                       struct aa_profile *next = p->replacedby;
-                       /* break the chain */
-                       p->replacedby = NULL;
-                       /* now free p, chain is broken */
-                       free_profile(p);
-
-                       /* follow up with next profile in the chain */
-                       p = next;
-               } else
-                       break;
-       }
+       aa_put_replacedby(profile->replacedby);
 
        kzfree(profile);
 }
 
+/**
+ * aa_free_profile_rcu - free aa_profile by rcu (called by aa_free_profile_kref)
+ * @head: rcu_head callback for freeing of a profile  (NOT NULL)
+ */
+static void aa_free_profile_rcu(struct rcu_head *head)
+{
+       struct aa_profile *p = container_of(head, struct aa_profile, rcu);
+       if (p->flags & PFLAG_NS_COUNT)
+               free_namespace(p->ns);
+       else
+               aa_free_profile(p);
+}
+
 /**
  * aa_free_profile_kref - free aa_profile by kref (called by aa_put_profile)
  * @kr: kref callback for freeing of a profile  (NOT NULL)
  */
 void aa_free_profile_kref(struct kref *kref)
 {
-       struct aa_profile *p = container_of(kref, struct aa_profile,
-                                           base.count);
-
-       free_profile(p);
+       struct aa_profile *p = container_of(kref, struct aa_profile, count);
+       call_rcu(&p->rcu, aa_free_profile_rcu);
 }
 
 /**
@@ -726,13 +650,23 @@ struct aa_profile *aa_alloc_profile(const char *hname)
        if (!profile)
                return NULL;
 
-       if (!policy_init(&profile->base, NULL, hname)) {
-               kzfree(profile);
-               return NULL;
-       }
+       profile->replacedby = kzalloc(sizeof(struct aa_replacedby), GFP_KERNEL);
+       if (!profile->replacedby)
+               goto fail;
+       kref_init(&profile->replacedby->count);
+
+       if (!policy_init(&profile->base, NULL, hname))
+               goto fail;
+       kref_init(&profile->count);
 
        /* refcount released by caller */
        return profile;
+
+fail:
+       kzfree(profile->replacedby);
+       kzfree(profile);
+
+       return NULL;
 }
 
 /**
@@ -772,12 +706,12 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat)
                profile->flags |= PFLAG_HAT;
 
        /* released on free_profile */
-       profile->parent = aa_get_profile(parent);
+       rcu_assign_pointer(profile->parent, aa_get_profile(parent));
        profile->ns = aa_get_namespace(parent->ns);
 
-       write_lock(&profile->ns->lock);
+       mutex_lock(&profile->ns->lock);
        __list_add_profile(&parent->base.profiles, profile);
-       write_unlock(&profile->ns->lock);
+       mutex_unlock(&profile->ns->lock);
 
        /* refcount released by caller */
        return profile;
@@ -793,7 +727,7 @@ fail:
  * @head: list to search  (NOT NULL)
  * @name: name of profile (NOT NULL)
  *
- * Requires: ns lock protecting list be held
+ * Requires: rcu_read_lock be held
  *
  * Returns: unrefcounted profile ptr, or NULL if not found
  */
@@ -808,7 +742,7 @@ static struct aa_profile *__find_child(struct list_head *head, const char *name)
  * @name: name of profile (NOT NULL)
  * @len: length of @name substring to match
  *
- * Requires: ns lock protecting list be held
+ * Requires: rcu_read_lock be held
  *
  * Returns: unrefcounted profile ptr, or NULL if not found
  */
@@ -829,9 +763,9 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
 {
        struct aa_profile *profile;
 
-       read_lock(&parent->ns->lock);
+       rcu_read_lock();
        profile = aa_get_profile(__find_child(&parent->base.profiles, name));
-       read_unlock(&parent->ns->lock);
+       rcu_read_unlock();
 
        /* refcount released by caller */
        return profile;
@@ -846,7 +780,7 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
  * that matches hname does not need to exist, in general this
  * is used to load a new profile.
  *
- * Requires: ns->lock be held
+ * Requires: rcu_read_lock be held
  *
  * Returns: unrefcounted policy or NULL if not found
  */
@@ -878,7 +812,7 @@ static struct aa_policy *__lookup_parent(struct aa_namespace *ns,
  * @base: base list to start looking up profile name from  (NOT NULL)
  * @hname: hierarchical profile name  (NOT NULL)
  *
- * Requires: ns->lock be held
+ * Requires: rcu_read_lock be held
  *
  * Returns: unrefcounted profile pointer or NULL if not found
  *
@@ -917,13 +851,15 @@ struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *hname)
 {
        struct aa_profile *profile;
 
-       read_lock(&ns->lock);
-       profile = aa_get_profile(__lookup_profile(&ns->base, hname));
-       read_unlock(&ns->lock);
+       rcu_read_lock();
+       do {
+               profile = __lookup_profile(&ns->base, hname);
+       } while (profile && !aa_get_profile_not0(profile));
+       rcu_read_unlock();
 
        /* the unconfined profile is not in the regular profile list */
        if (!profile && strcmp(hname, "unconfined") == 0)
-               profile = aa_get_profile(ns->unconfined);
+               profile = aa_get_newest_profile(ns->unconfined);
 
        /* refcount released by caller */
        return profile;
@@ -952,25 +888,6 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
        return 0;
 }
 
-/**
- * __add_new_profile - simple wrapper around __list_add_profile
- * @ns: namespace that profile is being added to  (NOT NULL)
- * @policy: the policy container to add the profile to  (NOT NULL)
- * @profile: profile to add  (NOT NULL)
- *
- * add a profile to a list and do other required basic allocations
- */
-static void __add_new_profile(struct aa_namespace *ns, struct aa_policy *policy,
-                             struct aa_profile *profile)
-{
-       if (policy != &ns->base)
-               /* released on profile replacement or free_profile */
-               profile->parent = aa_get_profile((struct aa_profile *) policy);
-       __list_add_profile(&policy->profiles, profile);
-       /* released on free_profile */
-       profile->ns = aa_get_namespace(ns);
-}
-
 /**
  * aa_audit_policy - Do auditing of policy changes
  * @op: policy operation being performed
@@ -1019,6 +936,121 @@ bool aa_may_manage_policy(int op)
        return 1;
 }
 
+static struct aa_profile *__list_lookup_parent(struct list_head *lh,
+                                              struct aa_profile *profile)
+{
+       const char *base = hname_tail(profile->base.hname);
+       long len = base - profile->base.hname;
+       struct aa_load_ent *ent;
+
+       /* parent won't have trailing // so remove from len */
+       if (len <= 2)
+               return NULL;
+       len -= 2;
+
+       list_for_each_entry(ent, lh, list) {
+               if (ent->new == profile)
+                       continue;
+               if (strncmp(ent->new->base.hname, profile->base.hname, len) ==
+                   0 && ent->new->base.hname[len] == 0)
+                       return ent->new;
+       }
+
+       return NULL;
+}
+
+/**
+ * __replace_profile - replace @old with @new on a list
+ * @old: profile to be replaced  (NOT NULL)
+ * @new: profile to replace @old with  (NOT NULL)
+ * @share_replacedby: transfer @old->replacedby to @new
+ *
+ * Will duplicate and refcount elements that @new inherits from @old
+ * and will inherit @old children.
+ *
+ * refcount @new for list, put @old list refcount
+ *
+ * Requires: namespace list lock be held, or list not be shared
+ */
+static void __replace_profile(struct aa_profile *old, struct aa_profile *new,
+                             bool share_replacedby)
+{
+       struct aa_profile *child, *tmp;
+
+       if (!list_empty(&old->base.profiles)) {
+               LIST_HEAD(lh);
+               list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu);
+
+               list_for_each_entry_safe(child, tmp, &lh, base.list) {
+                       struct aa_profile *p;
+
+                       list_del_init(&child->base.list);
+                       p = __find_child(&new->base.profiles, child->base.name);
+                       if (p) {
+                               /* @p replaces @child  */
+                               __replace_profile(child, p, share_replacedby);
+                               continue;
+                       }
+
+                       /* inherit @child and its children */
+                       /* TODO: update hname of inherited children */
+                       /* list refcount transferred to @new */
+                       p = aa_deref_parent(child);
+                       rcu_assign_pointer(child->parent, aa_get_profile(new));
+                       list_add_rcu(&child->base.list, &new->base.profiles);
+                       aa_put_profile(p);
+               }
+       }
+
+       if (!rcu_access_pointer(new->parent)) {
+               struct aa_profile *parent = aa_deref_parent(old);
+               rcu_assign_pointer(new->parent, aa_get_profile(parent));
+       }
+       __aa_update_replacedby(old, new);
+       if (share_replacedby) {
+               aa_put_replacedby(new->replacedby);
+               new->replacedby = aa_get_replacedby(old->replacedby);
+       } else if (!rcu_access_pointer(new->replacedby->profile))
+               /* aafs interface uses replacedby */
+               rcu_assign_pointer(new->replacedby->profile,
+                                  aa_get_profile(new));
+       __aa_fs_profile_migrate_dents(old, new);
+
+       if (list_empty(&new->base.list)) {
+               /* new is not on a list already */
+               list_replace_rcu(&old->base.list, &new->base.list);
+               aa_get_profile(new);
+               aa_put_profile(old);
+       } else
+               __list_remove_profile(old);
+}
+
+/**
+ * __lookup_replace - lookup replacement information for a profile
+ * @ns - namespace the lookup occurs in
+ * @hname - name of profile to lookup
+ * @noreplace - true if not replacing an existing profile
+ * @p - Returns: profile to be replaced
+ * @info - Returns: info string on why lookup failed
+ *
+ * Returns: profile to replace (no ref) on success else ptr error
+ */
+static int __lookup_replace(struct aa_namespace *ns, const char *hname,
+                           bool noreplace, struct aa_profile **p,
+                           const char **info)
+{
+       *p = aa_get_profile(__lookup_profile(&ns->base, hname));
+       if (*p) {
+               int error = replacement_allowed(*p, noreplace, info);
+               if (error) {
+                       *info = "profile can not be replaced";
+                       return error;
+               }
+       }
+
+       return 0;
+}
+
 /**
  * aa_replace_profiles - replace profile(s) on the profile list
  * @udata: serialized data stream  (NOT NULL)
@@ -1033,21 +1065,17 @@ bool aa_may_manage_policy(int op)
  */
 ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
 {
-       struct aa_policy *policy;
-       struct aa_profile *old_profile = NULL, *new_profile = NULL;
-       struct aa_profile *rename_profile = NULL;
-       struct aa_namespace *ns = NULL;
        const char *ns_name, *name = NULL, *info = NULL;
+       struct aa_namespace *ns = NULL;
+       struct aa_load_ent *ent, *tmp;
        int op = OP_PROF_REPL;
        ssize_t error;
+       LIST_HEAD(lh);
 
        /* released below */
-       new_profile = aa_unpack(udata, size, &ns_name);
-       if (IS_ERR(new_profile)) {
-               error = PTR_ERR(new_profile);
-               new_profile = NULL;
-               goto fail;
-       }
+       error = aa_unpack(udata, size, &lh, &ns_name);
+       if (error)
+               goto out;
 
        /* released below */
        ns = aa_prepare_namespace(ns_name);
@@ -1058,71 +1086,140 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
                goto fail;
        }
 
-       name = new_profile->base.hname;
-
-       write_lock(&ns->lock);
-       /* no ref on policy only use inside lock */
-       policy = __lookup_parent(ns, new_profile->base.hname);
+       mutex_lock(&ns->lock);
+       /* setup parent and ns info */
+       list_for_each_entry(ent, &lh, list) {
+               struct aa_policy *policy;
+
+               name = ent->new->base.hname;
+               error = __lookup_replace(ns, ent->new->base.hname, noreplace,
+                                        &ent->old, &info);
+               if (error)
+                       goto fail_lock;
+
+               if (ent->new->rename) {
+                       error = __lookup_replace(ns, ent->new->rename,
+                                                noreplace, &ent->rename,
+                                                &info);
+                       if (error)
+                               goto fail_lock;
+               }
 
-       if (!policy) {
-               info = "parent does not exist";
-               error = -ENOENT;
-               goto audit;
+               /* released when @new is freed */
+               ent->new->ns = aa_get_namespace(ns);
+
+               if (ent->old || ent->rename)
+                       continue;
+
+               /* no ref on policy only use inside lock */
+               policy = __lookup_parent(ns, ent->new->base.hname);
+               if (!policy) {
+                       struct aa_profile *p;
+                       p = __list_lookup_parent(&lh, ent->new);
+                       if (!p) {
+                               error = -ENOENT;
+                               info = "parent does not exist";
+                               name = ent->new->base.hname;
+                               goto fail_lock;
+                       }
+                       rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+               } else if (policy != &ns->base) {
+                       /* released on profile replacement or free_profile */
+                       struct aa_profile *p = (struct aa_profile *) policy;
+                       rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+               }
        }
 
-       old_profile = __find_child(&policy->profiles, new_profile->base.name);
-       /* released below */
-       aa_get_profile(old_profile);
+       /* create new fs entries for introspection if needed */
+       list_for_each_entry(ent, &lh, list) {
+               if (ent->old) {
+                       /* inherit old interface files */
 
-       if (new_profile->rename) {
-               rename_profile = __lookup_profile(&ns->base,
-                                                 new_profile->rename);
-               /* released below */
-               aa_get_profile(rename_profile);
+                       /* if (ent->rename)
+                               TODO: support rename */
+               /* } else if (ent->rename) {
+                       TODO: support rename */
+               } else {
+                       struct dentry *parent;
+                       if (rcu_access_pointer(ent->new->parent)) {
+                               struct aa_profile *p;
+                               p = aa_deref_parent(ent->new);
+                               parent = prof_child_dir(p);
+                       } else
+                               parent = ns_subprofs_dir(ent->new->ns);
+                       error = __aa_fs_profile_mkdir(ent->new, parent);
+               }
 
-               if (!rename_profile) {
-                       info = "profile to rename does not exist";
-                       name = new_profile->rename;
-                       error = -ENOENT;
-                       goto audit;
+               if (error) {
+                       info = "failed to create ";
+                       goto fail_lock;
                }
        }
 
-       error = replacement_allowed(old_profile, noreplace, &info);
-       if (error)
-               goto audit;
-
-       error = replacement_allowed(rename_profile, noreplace, &info);
-       if (error)
-               goto audit;
-
-audit:
-       if (!old_profile && !rename_profile)
-               op = OP_PROF_LOAD;
-
-       error = audit_policy(op, GFP_ATOMIC, name, info, error);
-
-       if (!error) {
-               if (rename_profile)
-                       __replace_profile(rename_profile, new_profile);
-               if (old_profile)
-                       __replace_profile(old_profile, new_profile);
-               if (!(old_profile || rename_profile))
-                       __add_new_profile(ns, policy, new_profile);
+       /* Done with checks that may fail - do actual replacement */
+       list_for_each_entry_safe(ent, tmp, &lh, list) {
+               list_del_init(&ent->list);
+               op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
+
+               audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error);
+
+               if (ent->old) {
+                       __replace_profile(ent->old, ent->new, 1);
+                       if (ent->rename) {
+                               /* aafs interface uses replacedby */
+                               struct aa_replacedby *r = ent->new->replacedby;
+                               rcu_assign_pointer(r->profile,
+                                                  aa_get_profile(ent->new));
+                               __replace_profile(ent->rename, ent->new, 0);
+                       }
+               } else if (ent->rename) {
+                       /* aafs interface uses replacedby */
+                       rcu_assign_pointer(ent->new->replacedby->profile,
+                                          aa_get_profile(ent->new));
+                       __replace_profile(ent->rename, ent->new, 0);
+               } else if (ent->new->parent) {
+                       struct aa_profile *parent, *newest;
+                       parent = aa_deref_parent(ent->new);
+                       newest = aa_get_newest_profile(parent);
+
+                       /* parent replaced in this atomic set? */
+                       if (newest != parent) {
+                               aa_get_profile(newest);
+                               aa_put_profile(parent);
+                               rcu_assign_pointer(ent->new->parent, newest);
+                       } else
+                               aa_put_profile(newest);
+                       /* aafs interface uses replacedby */
+                       rcu_assign_pointer(ent->new->replacedby->profile,
+                                          aa_get_profile(ent->new));
+                       __list_add_profile(&parent->base.profiles, ent->new);
+               } else {
+                       /* aafs interface uses replacedby */
+                       rcu_assign_pointer(ent->new->replacedby->profile,
+                                          aa_get_profile(ent->new));
+                       __list_add_profile(&ns->base.profiles, ent->new);
+               }
+               aa_load_ent_free(ent);
        }
-       write_unlock(&ns->lock);
+       mutex_unlock(&ns->lock);
 
 out:
        aa_put_namespace(ns);
-       aa_put_profile(rename_profile);
-       aa_put_profile(old_profile);
-       aa_put_profile(new_profile);
+
        if (error)
                return error;
        return size;
 
+fail_lock:
+       mutex_unlock(&ns->lock);
 fail:
        error = audit_policy(op, GFP_KERNEL, name, info, error);
+
+       list_for_each_entry_safe(ent, tmp, &lh, list) {
+               list_del_init(&ent->list);
+               aa_load_ent_free(ent);
+       }
+
        goto out;
 }
 
@@ -1169,12 +1266,12 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
 
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
-               write_lock(&ns->parent->lock);
+               mutex_lock(&ns->parent->lock);
                __remove_namespace(ns);
-               write_unlock(&ns->parent->lock);
+               mutex_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
-               write_lock(&ns->lock);
+               mutex_lock(&ns->lock);
                profile = aa_get_profile(__lookup_profile(&ns->base, name));
                if (!profile) {
                        error = -ENOENT;
@@ -1183,7 +1280,7 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                }
                name = profile->base.hname;
                __remove_profile(profile);
-               write_unlock(&ns->lock);
+               mutex_unlock(&ns->lock);
        }
 
        /* don't fail removal if audit fails */
@@ -1193,7 +1290,7 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
        return size;
 
 fail_ns_lock:
-       write_unlock(&ns->lock);
+       mutex_unlock(&ns->lock);
        aa_put_namespace(ns);
 
 fail:
index 6dac7d77cb4d53c1241402d7a267466c31dbb613..a689f10930b5e825c4da751508e274d8715ef2b5 100644 (file)
@@ -24,6 +24,7 @@
 #include "include/apparmor.h"
 #include "include/audit.h"
 #include "include/context.h"
+#include "include/crypto.h"
 #include "include/match.h"
 #include "include/policy.h"
 #include "include/policy_unpack.h"
@@ -333,8 +334,10 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e)
                /*
                 * The dfa is aligned with in the blob to 8 bytes
                 * from the beginning of the stream.
+                * alignment adjust needed by dfa unpack
                 */
-               size_t sz = blob - (char *)e->start;
+               size_t sz = blob - (char *) e->start -
+                       ((e->pos - e->start) & 7);
                size_t pad = ALIGN(sz, 8) - sz;
                int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
                        TO_ACCEPT2_FLAG(YYTD_DATA32);
@@ -490,6 +493,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
        /* profile renaming is optional */
        (void) unpack_str(e, &profile->rename, "rename");
 
+       /* attachment string is optional */
+       (void) unpack_str(e, &profile->attach, "attach");
+
        /* xmatch is optional and may be NULL */
        profile->xmatch = unpack_dfa(e);
        if (IS_ERR(profile->xmatch)) {
@@ -509,12 +515,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
                goto fail;
        if (!unpack_u32(e, &tmp, NULL))
                goto fail;
-       if (tmp)
+       if (tmp & PACKED_FLAG_HAT)
                profile->flags |= PFLAG_HAT;
        if (!unpack_u32(e, &tmp, NULL))
                goto fail;
-       if (tmp)
+       if (tmp == PACKED_MODE_COMPLAIN)
                profile->mode = APPARMOR_COMPLAIN;
+       else if (tmp == PACKED_MODE_KILL)
+               profile->mode = APPARMOR_KILL;
+       else if (tmp == PACKED_MODE_UNCONFINED)
+               profile->mode = APPARMOR_UNCONFINED;
        if (!unpack_u32(e, &tmp, NULL))
                goto fail;
        if (tmp)
@@ -614,7 +624,7 @@ fail:
        else if (!name)
                name = "unknown";
        audit_iface(profile, name, "failed to unpack profile", e, error);
-       aa_put_profile(profile);
+       aa_free_profile(profile);
 
        return ERR_PTR(error);
 }
@@ -622,29 +632,41 @@ fail:
 /**
  * verify_head - unpack serialized stream header
  * @e: serialized data read head (NOT NULL)
+ * @required: whether the header is required or optional
  * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
  *
  * Returns: error or 0 if header is good
  */
-static int verify_header(struct aa_ext *e, const char **ns)
+static int verify_header(struct aa_ext *e, int required, const char **ns)
 {
        int error = -EPROTONOSUPPORT;
+       const char *name = NULL;
+       *ns = NULL;
+
        /* get the interface version */
        if (!unpack_u32(e, &e->version, "version")) {
-               audit_iface(NULL, NULL, "invalid profile format", e, error);
-               return error;
-       }
+               if (required) {
+                       audit_iface(NULL, NULL, "invalid profile format", e,
+                                   error);
+                       return error;
+               }
 
-       /* check that the interface version is currently supported */
-       if (e->version != 5) {
-               audit_iface(NULL, NULL, "unsupported interface version", e,
-                           error);
-               return error;
+               /* check that the interface version is currently supported */
+               if (e->version != 5) {
+                       audit_iface(NULL, NULL, "unsupported interface version",
+                                   e, error);
+                       return error;
+               }
        }
 
+
        /* read the namespace if present */
-       if (!unpack_str(e, ns, "namespace"))
-               *ns = NULL;
+       if (unpack_str(e, &name, "namespace")) {
+               if (*ns && strcmp(*ns, name))
+                       audit_iface(NULL, NULL, "invalid ns change", e, error);
+               else if (!*ns)
+                       *ns = name;
+       }
 
        return 0;
 }
@@ -693,18 +715,40 @@ static int verify_profile(struct aa_profile *profile)
        return 0;
 }
 
+void aa_load_ent_free(struct aa_load_ent *ent)
+{
+       if (ent) {
+               aa_put_profile(ent->rename);
+               aa_put_profile(ent->old);
+               aa_put_profile(ent->new);
+               kzfree(ent);
+       }
+}
+
+struct aa_load_ent *aa_load_ent_alloc(void)
+{
+       struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
+       if (ent)
+               INIT_LIST_HEAD(&ent->list);
+       return ent;
+}
+
 /**
- * aa_unpack - unpack packed binary profile data loaded from user space
+ * aa_unpack - unpack packed binary profile(s) data loaded from user space
  * @udata: user data copied to kmem  (NOT NULL)
  * @size: the size of the user data
+ * @lh: list to place unpacked profiles in a aa_repl_ws
  * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
  *
- * Unpack user data and return refcounted allocated profile or ERR_PTR
+ * Unpack user data and return refcounted allocated profile(s) stored in
+ * @lh in order of discovery, with the list chain stored in base.list
+ * or error
  *
- * Returns: profile else error pointer if fails to unpack
+ * Returns: profile(s) on @lh else error pointer if fails to unpack
  */
-struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns)
+int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns)
 {
+       struct aa_load_ent *tmp, *ent;
        struct aa_profile *profile = NULL;
        int error;
        struct aa_ext e = {
@@ -713,20 +757,49 @@ struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns)
                .pos = udata,
        };
 
-       error = verify_header(&e, ns);
-       if (error)
-               return ERR_PTR(error);
+       *ns = NULL;
+       while (e.pos < e.end) {
+               void *start;
+               error = verify_header(&e, e.pos == e.start, ns);
+               if (error)
+                       goto fail;
+
+               start = e.pos;
+               profile = unpack_profile(&e);
+               if (IS_ERR(profile)) {
+                       error = PTR_ERR(profile);
+                       goto fail;
+               }
+
+               error = verify_profile(profile);
+               if (error)
+                       goto fail_profile;
+
+               error = aa_calc_profile_hash(profile, e.version, start,
+                                            e.pos - start);
+               if (error)
+                       goto fail_profile;
+
+               ent = aa_load_ent_alloc();
+               if (!ent) {
+                       error = -ENOMEM;
+                       goto fail_profile;
+               }
+
+               ent->new = profile;
+               list_add_tail(&ent->list, lh);
+       }
+
+       return 0;
 
-       profile = unpack_profile(&e);
-       if (IS_ERR(profile))
-               return profile;
+fail_profile:
+       aa_put_profile(profile);
 
-       error = verify_profile(profile);
-       if (error) {
-               aa_put_profile(profile);
-               profile = ERR_PTR(error);
+fail:
+       list_for_each_entry_safe(ent, tmp, lh, list) {
+               list_del_init(&ent->list);
+               aa_load_ent_free(ent);
        }
 
-       /* return refcount */
-       return profile;
+       return error;
 }
index 6c9390179b8909882e8a86e91e4f61349d79dd09..b125acc9aa26cc327572955fb7aa83dbf2c5d09d 100644 (file)
@@ -37,7 +37,7 @@ int aa_getprocattr(struct aa_profile *profile, char **string)
 {
        char *str;
        int len = 0, mode_len = 0, ns_len = 0, name_len;
-       const char *mode_str = profile_mode_names[profile->mode];
+       const char *mode_str = aa_profile_mode_names[profile->mode];
        const char *ns_name = NULL;
        struct aa_namespace *ns = profile->ns;
        struct aa_namespace *current_ns = __aa_current_profile()->ns;
index 32b515766df17c984c6a1c56c86fa67dba4ee25d..8b4f24ae43381de05af67271edd9a8ddd57c651f 100644 (file)
@@ -129,7 +129,7 @@ static void cap_inode_free_security(struct inode *inode)
 }
 
 static int cap_inode_init_security(struct inode *inode, struct inode *dir,
-                                  const struct qstr *qstr, char **name,
+                                  const struct qstr *qstr, const char **name,
                                   void **value, size_t *len)
 {
        return -EOPNOTSUPP;
@@ -777,9 +777,15 @@ static int cap_xfrm_policy_delete_security(struct xfrm_sec_ctx *ctx)
        return 0;
 }
 
-static int cap_xfrm_state_alloc_security(struct xfrm_state *x,
-                                        struct xfrm_user_sec_ctx *sec_ctx,
-                                        u32 secid)
+static int cap_xfrm_state_alloc(struct xfrm_state *x,
+                               struct xfrm_user_sec_ctx *sec_ctx)
+{
+       return 0;
+}
+
+static int cap_xfrm_state_alloc_acquire(struct xfrm_state *x,
+                                       struct xfrm_sec_ctx *polsec,
+                                       u32 secid)
 {
        return 0;
 }
@@ -1101,7 +1107,8 @@ void __init security_fixup_ops(struct security_operations *ops)
        set_to_cap_if_null(ops, xfrm_policy_clone_security);
        set_to_cap_if_null(ops, xfrm_policy_free_security);
        set_to_cap_if_null(ops, xfrm_policy_delete_security);
-       set_to_cap_if_null(ops, xfrm_state_alloc_security);
+       set_to_cap_if_null(ops, xfrm_state_alloc);
+       set_to_cap_if_null(ops, xfrm_state_alloc_acquire);
        set_to_cap_if_null(ops, xfrm_state_free_security);
        set_to_cap_if_null(ops, xfrm_state_delete_security);
        set_to_cap_if_null(ops, xfrm_policy_lookup);
index e8aad69f0d696c70b21dc8af16b0d28d990b3eda..c123628d3f848ee25497858a96c861a835f723d2 100644 (file)
@@ -53,22 +53,17 @@ struct dev_cgroup {
 
 static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
 {
-       return container_of(s, struct dev_cgroup, css);
-}
-
-static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
-{
-       return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
+       return s ? container_of(s, struct dev_cgroup, css) : NULL;
 }
 
 static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
 {
-       return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
+       return css_to_devcgroup(task_css(task, devices_subsys_id));
 }
 
 struct cgroup_subsys devices_subsys;
 
-static int devcgroup_can_attach(struct cgroup *new_cgrp,
+static int devcgroup_can_attach(struct cgroup_subsys_state *new_css,
                                struct cgroup_taskset *set)
 {
        struct task_struct *task = cgroup_taskset_first(set);
@@ -193,18 +188,16 @@ static inline bool is_devcg_online(const struct dev_cgroup *devcg)
 /**
  * devcgroup_online - initializes devcgroup's behavior and exceptions based on
  *                   parent's
- * @cgroup: cgroup getting online
+ * @css: css getting online
  * returns 0 in case of success, error code otherwise
  */
-static int devcgroup_online(struct cgroup *cgroup)
+static int devcgroup_online(struct cgroup_subsys_state *css)
 {
-       struct dev_cgroup *dev_cgroup, *parent_dev_cgroup = NULL;
+       struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+       struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css_parent(css));
        int ret = 0;
 
        mutex_lock(&devcgroup_mutex);
-       dev_cgroup = cgroup_to_devcgroup(cgroup);
-       if (cgroup->parent)
-               parent_dev_cgroup = cgroup_to_devcgroup(cgroup->parent);
 
        if (parent_dev_cgroup == NULL)
                dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
@@ -219,9 +212,9 @@ static int devcgroup_online(struct cgroup *cgroup)
        return ret;
 }
 
-static void devcgroup_offline(struct cgroup *cgroup)
+static void devcgroup_offline(struct cgroup_subsys_state *css)
 {
-       struct dev_cgroup *dev_cgroup = cgroup_to_devcgroup(cgroup);
+       struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
 
        mutex_lock(&devcgroup_mutex);
        dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
@@ -231,7 +224,8 @@ static void devcgroup_offline(struct cgroup *cgroup)
 /*
  * called from kernel/cgroup.c with cgroup_lock() held.
  */
-static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
        struct dev_cgroup *dev_cgroup;
 
@@ -244,11 +238,10 @@ static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
        return &dev_cgroup->css;
 }
 
-static void devcgroup_css_free(struct cgroup *cgroup)
+static void devcgroup_css_free(struct cgroup_subsys_state *css)
 {
-       struct dev_cgroup *dev_cgroup;
+       struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
 
-       dev_cgroup = cgroup_to_devcgroup(cgroup);
        __dev_exception_clean(dev_cgroup);
        kfree(dev_cgroup);
 }
@@ -291,10 +284,10 @@ static void set_majmin(char *str, unsigned m)
                sprintf(str, "%u", m);
 }
 
-static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
-                               struct seq_file *m)
+static int devcgroup_seq_read(struct cgroup_subsys_state *css,
+                             struct cftype *cft, struct seq_file *m)
 {
-       struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
+       struct dev_cgroup *devcgroup = css_to_devcgroup(css);
        struct dev_exception_item *ex;
        char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
 
@@ -394,12 +387,10 @@ static bool may_access(struct dev_cgroup *dev_cgroup,
 static int parent_has_perm(struct dev_cgroup *childcg,
                                  struct dev_exception_item *ex)
 {
-       struct cgroup *pcg = childcg->css.cgroup->parent;
-       struct dev_cgroup *parent;
+       struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
 
-       if (!pcg)
+       if (!parent)
                return 1;
-       parent = cgroup_to_devcgroup(pcg);
        return may_access(parent, ex, childcg->behavior);
 }
 
@@ -451,13 +442,13 @@ static void revalidate_active_exceptions(struct dev_cgroup *devcg)
 static int propagate_exception(struct dev_cgroup *devcg_root,
                               struct dev_exception_item *ex)
 {
-       struct cgroup *root = devcg_root->css.cgroup, *pos;
+       struct cgroup_subsys_state *pos;
        int rc = 0;
 
        rcu_read_lock();
 
-       cgroup_for_each_descendant_pre(pos, root) {
-               struct dev_cgroup *devcg = cgroup_to_devcgroup(pos);
+       css_for_each_descendant_pre(pos, &devcg_root->css) {
+               struct dev_cgroup *devcg = css_to_devcgroup(pos);
 
                /*
                 * Because devcgroup_mutex is held, no devcg will become
@@ -465,7 +456,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
                 * methods), and online ones are safe to access outside RCU
                 * read lock without bumping refcnt.
                 */
-               if (!is_devcg_online(devcg))
+               if (pos == &devcg_root->css || !is_devcg_online(devcg))
                        continue;
 
                rcu_read_unlock();
@@ -524,15 +515,11 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
        char temp[12];          /* 11 + 1 characters needed for a u32 */
        int count, rc = 0;
        struct dev_exception_item ex;
-       struct cgroup *p = devcgroup->css.cgroup;
-       struct dev_cgroup *parent = NULL;
+       struct dev_cgroup *parent = css_to_devcgroup(css_parent(&devcgroup->css));
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (p->parent)
-               parent = cgroup_to_devcgroup(p->parent);
-
        memset(&ex, 0, sizeof(ex));
        b = buffer;
 
@@ -677,13 +664,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
        return rc;
 }
 
-static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
-                                 const char *buffer)
+static int devcgroup_access_write(struct cgroup_subsys_state *css,
+                                 struct cftype *cft, const char *buffer)
 {
        int retval;
 
        mutex_lock(&devcgroup_mutex);
-       retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
+       retval = devcgroup_update_access(css_to_devcgroup(css),
                                         cft->private, buffer);
        mutex_unlock(&devcgroup_mutex);
        return retval;
index df0fa451a8718312ba319d4d27f2edfdf7a3b8e4..af9b6852f4e1bf571b55a2010fd6ab0488119cda 100644 (file)
@@ -418,7 +418,7 @@ int evm_inode_init_security(struct inode *inode,
 
        evm_xattr->value = xattr_data;
        evm_xattr->value_len = sizeof(*xattr_data);
-       evm_xattr->name = kstrdup(XATTR_EVM_SUFFIX, GFP_NOFS);
+       evm_xattr->name = XATTR_EVM_SUFFIX;
        return 0;
 out:
        kfree(xattr_data);
index 94b35aef6871a9978cf21799cfec3385502bc555..15b6928592ef68aac565e3fc94daf4737b6adc54 100644 (file)
@@ -348,10 +348,10 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
        if (unlikely(IS_PRIVATE(inode)))
                return 0;
 
-       memset(new_xattrs, 0, sizeof new_xattrs);
        if (!initxattrs)
                return security_ops->inode_init_security(inode, dir, qstr,
                                                         NULL, NULL, NULL);
+       memset(new_xattrs, 0, sizeof(new_xattrs));
        lsm_xattr = new_xattrs;
        ret = security_ops->inode_init_security(inode, dir, qstr,
                                                &lsm_xattr->name,
@@ -366,16 +366,14 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
                goto out;
        ret = initxattrs(inode, new_xattrs, fs_data);
 out:
-       for (xattr = new_xattrs; xattr->name != NULL; xattr++) {
-               kfree(xattr->name);
+       for (xattr = new_xattrs; xattr->value != NULL; xattr++)
                kfree(xattr->value);
-       }
        return (ret == -EOPNOTSUPP) ? 0 : ret;
 }
 EXPORT_SYMBOL(security_inode_init_security);
 
 int security_old_inode_init_security(struct inode *inode, struct inode *dir,
-                                    const struct qstr *qstr, char **name,
+                                    const struct qstr *qstr, const char **name,
                                     void **value, size_t *len)
 {
        if (unlikely(IS_PRIVATE(inode)))
@@ -1342,22 +1340,17 @@ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
        return security_ops->xfrm_policy_delete_security(ctx);
 }
 
-int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_state_alloc(struct xfrm_state *x,
+                             struct xfrm_user_sec_ctx *sec_ctx)
 {
-       return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0);
+       return security_ops->xfrm_state_alloc(x, sec_ctx);
 }
 EXPORT_SYMBOL(security_xfrm_state_alloc);
 
 int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
                                      struct xfrm_sec_ctx *polsec, u32 secid)
 {
-       if (!polsec)
-               return 0;
-       /*
-        * We want the context to be taken from secid which is usually
-        * from the sock.
-        */
-       return security_ops->xfrm_state_alloc_security(x, NULL, secid);
+       return security_ops->xfrm_state_alloc_acquire(x, polsec, secid);
 }
 
 int security_xfrm_state_delete(struct xfrm_state *x)
index c956390a9136b75a7fb8ed17ded49c69310b31cf..cf0f8df7f8ed60f34bfba5a0af8ac6b352f886e5 100644 (file)
@@ -95,8 +95,6 @@
 #include "audit.h"
 #include "avc_ss.h"
 
-#define NUM_SEL_MNT_OPTS 5
-
 extern struct security_operations *security_ops;
 
 /* SECMARK reference count */
@@ -139,12 +137,28 @@ static struct kmem_cache *sel_inode_cache;
  * This function checks the SECMARK reference counter to see if any SECMARK
  * targets are currently configured, if the reference counter is greater than
  * zero SECMARK is considered to be enabled.  Returns true (1) if SECMARK is
- * enabled, false (0) if SECMARK is disabled.
+ * enabled, false (0) if SECMARK is disabled.  If the always_check_network
+ * policy capability is enabled, SECMARK is always considered enabled.
  *
  */
 static int selinux_secmark_enabled(void)
 {
-       return (atomic_read(&selinux_secmark_refcount) > 0);
+       return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount));
+}
+
+/**
+ * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled
+ *
+ * Description:
+ * This function checks if NetLabel or labeled IPSEC is enabled.  Returns true
+ * (1) if any are enabled or false (0) if neither are enabled.  If the
+ * always_check_network policy capability is enabled, peer labeling
+ * is always considered enabled.
+ *
+ */
+static int selinux_peerlbl_enabled(void)
+{
+       return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled());
 }
 
 /*
@@ -309,8 +323,11 @@ enum {
        Opt_defcontext = 3,
        Opt_rootcontext = 4,
        Opt_labelsupport = 5,
+       Opt_nextmntopt = 6,
 };
 
+#define NUM_SEL_MNT_OPTS       (Opt_nextmntopt - 1)
+
 static const match_table_t tokens = {
        {Opt_context, CONTEXT_STR "%s"},
        {Opt_fscontext, FSCONTEXT_STR "%s"},
@@ -355,6 +372,29 @@ static int may_context_mount_inode_relabel(u32 sid,
        return rc;
 }
 
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+       struct superblock_security_struct *sbsec = sb->s_security;
+
+       if (sbsec->behavior == SECURITY_FS_USE_XATTR ||
+           sbsec->behavior == SECURITY_FS_USE_TRANS ||
+           sbsec->behavior == SECURITY_FS_USE_TASK)
+               return 1;
+
+       /* Special handling for sysfs. Is genfs but also has setxattr handler*/
+       if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
+               return 1;
+
+       /*
+        * Special handling for rootfs. Is genfs but supports
+        * setting SELinux context on in-core inodes.
+        */
+       if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
+               return 1;
+
+       return 0;
+}
+
 static int sb_finish_set_opts(struct super_block *sb)
 {
        struct superblock_security_struct *sbsec = sb->s_security;
@@ -388,8 +428,6 @@ static int sb_finish_set_opts(struct super_block *sb)
                }
        }
 
-       sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP);
-
        if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
                printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
                       sb->s_id, sb->s_type->name);
@@ -398,15 +436,9 @@ static int sb_finish_set_opts(struct super_block *sb)
                       sb->s_id, sb->s_type->name,
                       labeling_behaviors[sbsec->behavior-1]);
 
-       if (sbsec->behavior == SECURITY_FS_USE_GENFS ||
-           sbsec->behavior == SECURITY_FS_USE_MNTPOINT ||
-           sbsec->behavior == SECURITY_FS_USE_NONE ||
-           sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
-               sbsec->flags &= ~SE_SBLABELSUPP;
-
-       /* Special handling for sysfs. Is genfs but also has setxattr handler*/
-       if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
-               sbsec->flags |= SE_SBLABELSUPP;
+       sbsec->flags |= SE_SBINITIALIZED;
+       if (selinux_is_sblabel_mnt(sb))
+               sbsec->flags |= SBLABEL_MNT;
 
        /* Initialize the root inode. */
        rc = inode_doinit_with_dentry(root_inode, root);
@@ -460,16 +492,16 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
        if (!ss_initialized)
                return -EINVAL;
 
+       /* make sure we always check enough bits to cover the mask */
+       BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS));
+
        tmp = sbsec->flags & SE_MNTMASK;
        /* count the number of mount options for this sb */
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < NUM_SEL_MNT_OPTS; i++) {
                if (tmp & 0x01)
                        opts->num_mnt_opts++;
                tmp >>= 1;
        }
-       /* Check if the Label support flag is set */
-       if (sbsec->flags & SE_SBLABELSUPP)
-               opts->num_mnt_opts++;
 
        opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
        if (!opts->mnt_opts) {
@@ -515,9 +547,9 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
                opts->mnt_opts[i] = context;
                opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
        }
-       if (sbsec->flags & SE_SBLABELSUPP) {
+       if (sbsec->flags & SBLABEL_MNT) {
                opts->mnt_opts[i] = NULL;
-               opts->mnt_opts_flags[i++] = SE_SBLABELSUPP;
+               opts->mnt_opts_flags[i++] = SBLABEL_MNT;
        }
 
        BUG_ON(i != opts->num_mnt_opts);
@@ -614,7 +646,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
        for (i = 0; i < num_opts; i++) {
                u32 sid;
 
-               if (flags[i] == SE_SBLABELSUPP)
+               if (flags[i] == SBLABEL_MNT)
                        continue;
                rc = security_context_to_sid(mount_options[i],
                                             strlen(mount_options[i]), &sid);
@@ -685,9 +717,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
                 * Determine the labeling behavior to use for this
                 * filesystem type.
                 */
-               rc = security_fs_use((sbsec->flags & SE_SBPROC) ?
-                                       "proc" : sb->s_type->name,
-                                       &sbsec->behavior, &sbsec->sid);
+               rc = security_fs_use(sb);
                if (rc) {
                        printk(KERN_WARNING
                                "%s: security_fs_use(%s) returned %d\n",
@@ -1037,7 +1067,7 @@ static void selinux_write_opts(struct seq_file *m,
                case DEFCONTEXT_MNT:
                        prefix = DEFCONTEXT_STR;
                        break;
-               case SE_SBLABELSUPP:
+               case SBLABEL_MNT:
                        seq_putc(m, ',');
                        seq_puts(m, LABELSUPP_STR);
                        continue;
@@ -1650,7 +1680,7 @@ static int may_create(struct inode *dir,
        if (rc)
                return rc;
 
-       if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+       if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
                rc = security_transition_sid(sid, dsec->sid, tclass,
                                             &dentry->d_name, &newsid);
                if (rc)
@@ -2438,7 +2468,7 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
                u32 sid;
                size_t len;
 
-               if (flags[i] == SE_SBLABELSUPP)
+               if (flags[i] == SBLABEL_MNT)
                        continue;
                len = strlen(mount_options[i]);
                rc = security_context_to_sid(mount_options[i], len, &sid);
@@ -2587,7 +2617,8 @@ static int selinux_dentry_init_security(struct dentry *dentry, int mode,
 }
 
 static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
-                                      const struct qstr *qstr, char **name,
+                                      const struct qstr *qstr,
+                                      const char **name,
                                       void **value, size_t *len)
 {
        const struct task_security_struct *tsec = current_security();
@@ -2595,7 +2626,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
        struct superblock_security_struct *sbsec;
        u32 sid, newsid, clen;
        int rc;
-       char *namep = NULL, *context;
+       char *context;
 
        dsec = dir->i_security;
        sbsec = dir->i_sb->s_security;
@@ -2606,7 +2637,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
        if ((sbsec->flags & SE_SBINITIALIZED) &&
            (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
                newsid = sbsec->mntpoint_sid;
-       else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+       else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
                rc = security_transition_sid(sid, dsec->sid,
                                             inode_mode_to_security_class(inode->i_mode),
                                             qstr, &newsid);
@@ -2628,22 +2659,16 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
                isec->initialized = 1;
        }
 
-       if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP))
+       if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
                return -EOPNOTSUPP;
 
-       if (name) {
-               namep = kstrdup(XATTR_SELINUX_SUFFIX, GFP_NOFS);
-               if (!namep)
-                       return -ENOMEM;
-               *name = namep;
-       }
+       if (name)
+               *name = XATTR_SELINUX_SUFFIX;
 
        if (value && len) {
                rc = security_sid_to_context_force(newsid, &context, &clen);
-               if (rc) {
-                       kfree(namep);
+               if (rc)
                        return rc;
-               }
                *value = context;
                *len = clen;
        }
@@ -2836,7 +2861,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
                return selinux_inode_setotherxattr(dentry, name);
 
        sbsec = inode->i_sb->s_security;
-       if (!(sbsec->flags & SE_SBLABELSUPP))
+       if (!(sbsec->flags & SBLABEL_MNT))
                return -EOPNOTSUPP;
 
        if (!inode_owner_or_capable(inode))
@@ -3797,8 +3822,12 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
        u32 nlbl_sid;
        u32 nlbl_type;
 
-       selinux_skb_xfrm_sid(skb, &xfrm_sid);
-       selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+       err = selinux_skb_xfrm_sid(skb, &xfrm_sid);
+       if (unlikely(err))
+               return -EACCES;
+       err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+       if (unlikely(err))
+               return -EACCES;
 
        err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
        if (unlikely(err)) {
@@ -4252,7 +4281,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
                return selinux_sock_rcv_skb_compat(sk, skb, family);
 
        secmark_active = selinux_secmark_enabled();
-       peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+       peerlbl_active = selinux_peerlbl_enabled();
        if (!secmark_active && !peerlbl_active)
                return 0;
 
@@ -4634,7 +4663,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
 
        secmark_active = selinux_secmark_enabled();
        netlbl_active = netlbl_enabled();
-       peerlbl_active = netlbl_active || selinux_xfrm_enabled();
+       peerlbl_active = selinux_peerlbl_enabled();
        if (!secmark_active && !peerlbl_active)
                return NF_ACCEPT;
 
@@ -4786,7 +4815,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
                return NF_ACCEPT;
 #endif
        secmark_active = selinux_secmark_enabled();
-       peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+       peerlbl_active = selinux_peerlbl_enabled();
        if (!secmark_active && !peerlbl_active)
                return NF_ACCEPT;
 
@@ -5790,7 +5819,8 @@ static struct security_operations selinux_ops = {
        .xfrm_policy_clone_security =   selinux_xfrm_policy_clone,
        .xfrm_policy_free_security =    selinux_xfrm_policy_free,
        .xfrm_policy_delete_security =  selinux_xfrm_policy_delete,
-       .xfrm_state_alloc_security =    selinux_xfrm_state_alloc,
+       .xfrm_state_alloc =             selinux_xfrm_state_alloc,
+       .xfrm_state_alloc_acquire =     selinux_xfrm_state_alloc_acquire,
        .xfrm_state_free_security =     selinux_xfrm_state_free,
        .xfrm_state_delete_security =   selinux_xfrm_state_delete,
        .xfrm_policy_lookup =           selinux_xfrm_policy_lookup,
index aa47bcabb5f65e728aadbaa39cdecfa55d20aa16..b1dfe104945078ead53647c247c46aa6134fac2e 100644 (file)
@@ -58,8 +58,8 @@ struct superblock_security_struct {
        u32 sid;                        /* SID of file system superblock */
        u32 def_sid;                    /* default SID for labeling */
        u32 mntpoint_sid;               /* SECURITY_FS_USE_MNTPOINT context for files */
-       unsigned int behavior;          /* labeling behavior */
-       unsigned char flags;            /* which mount options were specified */
+       unsigned short behavior;        /* labeling behavior */
+       unsigned short flags;           /* which mount options were specified */
        struct mutex lock;
        struct list_head isec_head;
        spinlock_t isec_lock;
index 8fd8e18ea34019c863d91ba88268b8c4018f3410..216e53fd61369842769da1850f9db3dc5cdb3375 100644 (file)
 #endif
 
 /* Mask for just the mount related flags */
-#define SE_MNTMASK     0x0f
+#define SE_MNTMASK     0x1f
 /* Super block security struct flags for mount options */
+/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
 #define CONTEXT_MNT    0x01
 #define FSCONTEXT_MNT  0x02
 #define ROOTCONTEXT_MNT        0x04
 #define DEFCONTEXT_MNT 0x08
+#define SBLABEL_MNT    0x10
 /* Non-mount related flags */
-#define SE_SBINITIALIZED       0x10
-#define SE_SBPROC              0x20
-#define SE_SBLABELSUPP 0x40
+#define SE_SBINITIALIZED       0x0100
+#define SE_SBPROC              0x0200
 
 #define CONTEXT_STR    "context="
 #define FSCONTEXT_STR  "fscontext="
@@ -68,12 +69,15 @@ extern int selinux_enabled;
 enum {
        POLICYDB_CAPABILITY_NETPEER,
        POLICYDB_CAPABILITY_OPENPERM,
+       POLICYDB_CAPABILITY_REDHAT1,
+       POLICYDB_CAPABILITY_ALWAYSNETWORK,
        __POLICYDB_CAPABILITY_MAX
 };
 #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
 
 extern int selinux_policycap_netpeer;
 extern int selinux_policycap_openperm;
+extern int selinux_policycap_alwaysnetwork;
 
 /*
  * type_datum properties
@@ -172,8 +176,7 @@ int security_get_allow_unknown(void);
 #define SECURITY_FS_USE_NATIVE         7 /* use native label support */
 #define SECURITY_FS_USE_MAX            7 /* Highest SECURITY_FS_USE_XXX */
 
-int security_fs_use(const char *fstype, unsigned int *behavior,
-       u32 *sid);
+int security_fs_use(struct super_block *sb);
 
 int security_genfs_sid(const char *fstype, char *name, u16 sclass,
        u32 *sid);
index 65f67cb0aefb22f323d8048c140417555c273c9d..0dec76c64cf53853d0eea6aac983db307c8636b8 100644 (file)
 #include <net/flow.h>
 
 int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
-                             struct xfrm_user_sec_ctx *sec_ctx);
+                             struct xfrm_user_sec_ctx *uctx);
 int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
                              struct xfrm_sec_ctx **new_ctxp);
 void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
 int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
 int selinux_xfrm_state_alloc(struct xfrm_state *x,
-       struct xfrm_user_sec_ctx *sec_ctx, u32 secid);
+                            struct xfrm_user_sec_ctx *uctx);
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+                                    struct xfrm_sec_ctx *polsec, u32 secid);
 void selinux_xfrm_state_free(struct xfrm_state *x);
 int selinux_xfrm_state_delete(struct xfrm_state *x);
 int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                       struct xfrm_policy *xp, const struct flowi *fl);
-
-/*
- * Extract the security blob from the sock (it's actually on the socket)
- */
-static inline struct inode_security_struct *get_sock_isec(struct sock *sk)
-{
-       if (!sk->sk_socket)
-               return NULL;
-
-       return SOCK_INODE(sk->sk_socket)->i_security;
-}
+                                     struct xfrm_policy *xp,
+                                     const struct flowi *fl);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 extern atomic_t selinux_xfrm_refcount;
@@ -42,16 +34,21 @@ static inline int selinux_xfrm_enabled(void)
        return (atomic_read(&selinux_xfrm_refcount) > 0);
 }
 
-int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
-                       struct common_audit_data *ad);
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
-                       struct common_audit_data *ad, u8 proto);
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+                             struct common_audit_data *ad);
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+                               struct common_audit_data *ad, u8 proto);
 int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
 
 static inline void selinux_xfrm_notify_policyload(void)
 {
+       struct net *net;
+
        atomic_inc(&flow_cache_genid);
-       rt_genid_bump(&init_net);
+       rtnl_lock();
+       for_each_net(net)
+               rt_genid_bump_all(net);
+       rtnl_unlock();
 }
 #else
 static inline int selinux_xfrm_enabled(void)
@@ -59,19 +56,21 @@ static inline int selinux_xfrm_enabled(void)
        return 0;
 }
 
-static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
-                       struct common_audit_data *ad)
+static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+                                           struct common_audit_data *ad)
 {
        return 0;
 }
 
-static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
-                       struct common_audit_data *ad, u8 proto)
+static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+                                             struct common_audit_data *ad,
+                                             u8 proto)
 {
        return 0;
 }
 
-static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
+                                             int ckall)
 {
        *sid = SECSID_NULL;
        return 0;
@@ -82,10 +81,9 @@ static inline void selinux_xfrm_notify_policyload(void)
 }
 #endif
 
-static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
+static inline int selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
 {
-       int err = selinux_xfrm_decode_session(skb, sid, 0);
-       BUG_ON(err);
+       return selinux_xfrm_decode_session(skb, sid, 0);
 }
 
 #endif /* _SELINUX_XFRM_H_ */
index c5454c0477c346e4d814f5ff209feba86e5b86ad..03a72c32afd738ccad5c188bbe853202c32f53f6 100644 (file)
@@ -166,6 +166,7 @@ static void sel_netnode_insert(struct sel_netnode *node)
                break;
        default:
                BUG();
+               return;
        }
 
        /* we need to impose a limit on the growth of the hash table so check
@@ -225,6 +226,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
                break;
        default:
                BUG();
+               ret = -EINVAL;
        }
        if (ret != 0)
                goto out;
index ff427733c2903cab275a05da0887478850e1e374..5122affe06a8840e193150d62bd9b2f996fe67fe 100644 (file)
@@ -44,7 +44,9 @@
 /* Policy capability filenames */
 static char *policycap_names[] = {
        "network_peer_controls",
-       "open_perms"
+       "open_perms",
+       "redhat1",
+       "always_check_network"
 };
 
 unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
index 30f119b1d1ec36a95dc456c52b5b0ac1a6868514..820313a04d49bf4c4a8bc0f04ea01514ff184a64 100644 (file)
@@ -213,7 +213,12 @@ netlbl_import_failure:
 }
 #endif /* CONFIG_NETLABEL */
 
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
+/*
+ * Check to see if all the bits set in e2 are also set in e1. Optionally,
+ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
+ * last_e2bit.
+ */
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
 {
        struct ebitmap_node *n1, *n2;
        int i;
@@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
 
        n1 = e1->node;
        n2 = e2->node;
+
        while (n1 && n2 && (n1->startbit <= n2->startbit)) {
                if (n1->startbit < n2->startbit) {
                        n1 = n1->next;
                        continue;
                }
-               for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
+               for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
+                       i--;    /* Skip trailing NULL map entries */
+               if (last_e2bit && (i >= 0)) {
+                       u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
+                                        __fls(n2->maps[i]);
+                       if (lastsetbit > last_e2bit)
+                               return 0;
+               }
+
+               while (i >= 0) {
                        if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
                                return 0;
+                       i--;
                }
 
                n1 = n1->next;
index 922f8afa89dd5837e2617daaf793db0e40ad009e..712c8a7b8e8b879d3835b5ee3650b66baa46e106 100644 (file)
 
 #include <net/netlabel.h>
 
-#define EBITMAP_UNIT_NUMS      ((32 - sizeof(void *) - sizeof(u32))    \
+#ifdef CONFIG_64BIT
+#define        EBITMAP_NODE_SIZE       64
+#else
+#define        EBITMAP_NODE_SIZE       32
+#endif
+
+#define EBITMAP_UNIT_NUMS      ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
                                        / sizeof(unsigned long))
 #define EBITMAP_UNIT_SIZE      BITS_PER_LONG
 #define EBITMAP_SIZE           (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
@@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
 
 int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
 int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
 int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
 int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
 void ebitmap_destroy(struct ebitmap *e);
index 40de8d3f208ecf95db162f4ae355d0d53ba99265..c85bc1ec040c0c58f93772004361cbcd04861575 100644 (file)
@@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
 int mls_level_isvalid(struct policydb *p, struct mls_level *l)
 {
        struct level_datum *levdatum;
-       struct ebitmap_node *node;
-       int i;
 
        if (!l->sens || l->sens > p->p_levels.nprim)
                return 0;
@@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
        if (!levdatum)
                return 0;
 
-       ebitmap_for_each_positive_bit(&l->cat, node, i) {
-               if (i > p->p_cats.nprim)
-                       return 0;
-               if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
-                       /*
-                        * Category may not be associated with
-                        * sensitivity.
-                        */
-                       return 0;
-               }
-       }
-
-       return 1;
+       /*
+        * Return 1 iff all the bits set in l->cat are also be set in
+        * levdatum->level->cat and no bit in l->cat is larger than
+        * p->p_cats.nprim.
+        */
+       return ebitmap_contains(&levdatum->level->cat, &l->cat,
+                               p->p_cats.nprim);
 }
 
 int mls_range_isvalid(struct policydb *p, struct mls_range *r)
index 03bed52a80526abfbda766a33859595cc1d8bfa5..e93648774137c601f5ec90ce14a03983655ce36d 100644 (file)
@@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
 static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
 {
        return ((l1->sens >= l2->sens) &&
-               ebitmap_contains(&l1->cat, &l2->cat));
+               ebitmap_contains(&l1->cat, &l2->cat, 0));
 }
 
 #define mls_level_incomp(l1, l2) \
index c8adde3aff8fdbe93fb2f867e55f71b9879685a5..f6195ebde3c94eef0cdf1cf92933246069b25059 100644 (file)
@@ -3203,9 +3203,8 @@ static int range_write_helper(void *key, void *data, void *ptr)
 
 static int range_write(struct policydb *p, void *fp)
 {
-       size_t nel;
        __le32 buf[1];
-       int rc;
+       int rc, nel;
        struct policy_data pd;
 
        pd.p = p;
index b4feecc3fe0110d10bbdc183c369a03ab8495a6c..d106733ad9878d6ee7543ff31c05a51f2c74b523 100644 (file)
@@ -72,6 +72,7 @@
 
 int selinux_policycap_netpeer;
 int selinux_policycap_openperm;
+int selinux_policycap_alwaysnetwork;
 
 static DEFINE_RWLOCK(policy_rwlock);
 
@@ -1812,6 +1813,8 @@ static void security_load_policycaps(void)
                                                  POLICYDB_CAPABILITY_NETPEER);
        selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
                                                  POLICYDB_CAPABILITY_OPENPERM);
+       selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps,
+                                                 POLICYDB_CAPABILITY_ALWAYSNETWORK);
 }
 
 static int security_preserve_bools(struct policydb *p);
@@ -2323,17 +2326,14 @@ out:
 
 /**
  * security_fs_use - Determine how to handle labeling for a filesystem.
- * @fstype: filesystem type
- * @behavior: labeling behavior
- * @sid: SID for filesystem (superblock)
+ * @sb: superblock in question
  */
-int security_fs_use(
-       const char *fstype,
-       unsigned int *behavior,
-       u32 *sid)
+int security_fs_use(struct super_block *sb)
 {
        int rc = 0;
        struct ocontext *c;
+       struct superblock_security_struct *sbsec = sb->s_security;
+       const char *fstype = sb->s_type->name;
 
        read_lock(&policy_rwlock);
 
@@ -2345,21 +2345,21 @@ int security_fs_use(
        }
 
        if (c) {
-               *behavior = c->v.behavior;
+               sbsec->behavior = c->v.behavior;
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(&sidtab, &c->context[0],
                                                   &c->sid[0]);
                        if (rc)
                                goto out;
                }
-               *sid = c->sid[0];
+               sbsec->sid = c->sid[0];
        } else {
-               rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid);
+               rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid);
                if (rc) {
-                       *behavior = SECURITY_FS_USE_NONE;
+                       sbsec->behavior = SECURITY_FS_USE_NONE;
                        rc = 0;
                } else {
-                       *behavior = SECURITY_FS_USE_GENFS;
+                       sbsec->behavior = SECURITY_FS_USE_GENFS;
                }
        }
 
index d030818862146732ebe30c8cc3f266d485ef0677..425b9f91d755f6aa229f9bc71c58a747d0f9fa98 100644 (file)
@@ -56,7 +56,7 @@
 atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0);
 
 /*
- * Returns true if an LSM/SELinux context
+ * Returns true if the context is an LSM/SELinux context.
  */
 static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
 {
@@ -66,7 +66,7 @@ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
 }
 
 /*
- * Returns true if the xfrm contains a security blob for SELinux
+ * Returns true if the xfrm contains a security blob for SELinux.
  */
 static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
 {
@@ -74,48 +74,111 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
 }
 
 /*
- * LSM hook implementation that authorizes that a flow can use
- * a xfrm policy rule.
+ * Allocates a xfrm_sec_state and populates it using the supplied security
+ * xfrm_user_sec_ctx context.
  */
-int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
+                                  struct xfrm_user_sec_ctx *uctx)
 {
        int rc;
-       u32 sel_sid;
+       const struct task_security_struct *tsec = current_security();
+       struct xfrm_sec_ctx *ctx = NULL;
+       u32 str_len;
 
-       /* Context sid is either set to label or ANY_ASSOC */
-       if (ctx) {
-               if (!selinux_authorizable_ctx(ctx))
-                       return -EINVAL;
-
-               sel_sid = ctx->ctx_sid;
-       } else
-               /*
-                * All flows should be treated as polmatch'ing an
-                * otherwise applicable "non-labeled" policy. This
-                * would prevent inadvertent "leaks".
-                */
-               return 0;
+       if (ctxp == NULL || uctx == NULL ||
+           uctx->ctx_doi != XFRM_SC_DOI_LSM ||
+           uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
+               return -EINVAL;
 
-       rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION,
-                         ASSOCIATION__POLMATCH,
-                         NULL);
+       str_len = uctx->ctx_len;
+       if (str_len >= PAGE_SIZE)
+               return -ENOMEM;
 
-       if (rc == -EACCES)
-               return -ESRCH;
+       ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
 
+       ctx->ctx_doi = XFRM_SC_DOI_LSM;
+       ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+       ctx->ctx_len = str_len;
+       memcpy(ctx->ctx_str, &uctx[1], str_len);
+       ctx->ctx_str[str_len] = '\0';
+       rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid);
+       if (rc)
+               goto err;
+
+       rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+                         SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
+       if (rc)
+               goto err;
+
+       *ctxp = ctx;
+       atomic_inc(&selinux_xfrm_refcount);
+       return 0;
+
+err:
+       kfree(ctx);
        return rc;
 }
 
+/*
+ * Free the xfrm_sec_ctx structure.
+ */
+static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
+{
+       if (!ctx)
+               return;
+
+       atomic_dec(&selinux_xfrm_refcount);
+       kfree(ctx);
+}
+
+/*
+ * Authorize the deletion of a labeled SA or policy rule.
+ */
+static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
+{
+       const struct task_security_struct *tsec = current_security();
+
+       if (!ctx)
+               return 0;
+
+       return avc_has_perm(tsec->sid, ctx->ctx_sid,
+                           SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+                           NULL);
+}
+
+/*
+ * LSM hook implementation that authorizes that a flow can use a xfrm policy
+ * rule.
+ */
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+{
+       int rc;
+
+       /* All flows should be treated as polmatch'ing an otherwise applicable
+        * "non-labeled" policy. This would prevent inadvertent "leaks". */
+       if (!ctx)
+               return 0;
+
+       /* Context sid is either set to label or ANY_ASSOC */
+       if (!selinux_authorizable_ctx(ctx))
+               return -EINVAL;
+
+       rc = avc_has_perm(fl_secid, ctx->ctx_sid,
+                         SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
+       return (rc == -EACCES ? -ESRCH : rc);
+}
+
 /*
  * LSM hook implementation that authorizes that a state matches
  * the given policy, flow combo.
  */
-
-int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp,
-                       const struct flowi *fl)
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+                                     struct xfrm_policy *xp,
+                                     const struct flowi *fl)
 {
        u32 state_sid;
-       int rc;
 
        if (!xp->security)
                if (x->security)
@@ -138,187 +201,80 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
        if (fl->flowi_secid != state_sid)
                return 0;
 
-       rc = avc_has_perm(fl->flowi_secid, state_sid, SECCLASS_ASSOCIATION,
-                         ASSOCIATION__SENDTO,
-                         NULL)? 0:1;
-
-       /*
-        * We don't need a separate SA Vs. policy polmatch check
-        * since the SA is now of the same label as the flow and
-        * a flow Vs. policy polmatch check had already happened
-        * in selinux_xfrm_policy_lookup() above.
-        */
-
-       return rc;
+       /* We don't need a separate SA Vs. policy polmatch check since the SA
+        * is now of the same label as the flow and a flow Vs. policy polmatch
+        * check had already happened in selinux_xfrm_policy_lookup() above. */
+       return (avc_has_perm(fl->flowi_secid, state_sid,
+                           SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
+                           NULL) ? 0 : 1);
 }
 
 /*
  * LSM hook implementation that checks and/or returns the xfrm sid for the
  * incoming packet.
  */
-
 int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
 {
+       u32 sid_session = SECSID_NULL;
        struct sec_path *sp;
 
-       *sid = SECSID_NULL;
-
        if (skb == NULL)
-               return 0;
+               goto out;
 
        sp = skb->sp;
        if (sp) {
-               int i, sid_set = 0;
+               int i;
 
-               for (i = sp->len-1; i >= 0; i--) {
+               for (i = sp->len - 1; i >= 0; i--) {
                        struct xfrm_state *x = sp->xvec[i];
                        if (selinux_authorizable_xfrm(x)) {
                                struct xfrm_sec_ctx *ctx = x->security;
 
-                               if (!sid_set) {
-                                       *sid = ctx->ctx_sid;
-                                       sid_set = 1;
-
+                               if (sid_session == SECSID_NULL) {
+                                       sid_session = ctx->ctx_sid;
                                        if (!ckall)
-                                               break;
-                               } else if (*sid != ctx->ctx_sid)
+                                               goto out;
+                               } else if (sid_session != ctx->ctx_sid) {
+                                       *sid = SECSID_NULL;
                                        return -EINVAL;
+                               }
                        }
                }
        }
 
-       return 0;
-}
-
-/*
- * Security blob allocation for xfrm_policy and xfrm_state
- * CTX does not have a meaningful value on input
- */
-static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
-       struct xfrm_user_sec_ctx *uctx, u32 sid)
-{
-       int rc = 0;
-       const struct task_security_struct *tsec = current_security();
-       struct xfrm_sec_ctx *ctx = NULL;
-       char *ctx_str = NULL;
-       u32 str_len;
-
-       BUG_ON(uctx && sid);
-
-       if (!uctx)
-               goto not_from_user;
-
-       if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
-               return -EINVAL;
-
-       str_len = uctx->ctx_len;
-       if (str_len >= PAGE_SIZE)
-               return -ENOMEM;
-
-       *ctxp = ctx = kmalloc(sizeof(*ctx) +
-                             str_len + 1,
-                             GFP_KERNEL);
-
-       if (!ctx)
-               return -ENOMEM;
-
-       ctx->ctx_doi = uctx->ctx_doi;
-       ctx->ctx_len = str_len;
-       ctx->ctx_alg = uctx->ctx_alg;
-
-       memcpy(ctx->ctx_str,
-              uctx+1,
-              str_len);
-       ctx->ctx_str[str_len] = 0;
-       rc = security_context_to_sid(ctx->ctx_str,
-                                    str_len,
-                                    &ctx->ctx_sid);
-
-       if (rc)
-               goto out;
-
-       /*
-        * Does the subject have permission to set security context?
-        */
-       rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
-                         SECCLASS_ASSOCIATION,
-                         ASSOCIATION__SETCONTEXT, NULL);
-       if (rc)
-               goto out;
-
-       return rc;
-
-not_from_user:
-       rc = security_sid_to_context(sid, &ctx_str, &str_len);
-       if (rc)
-               goto out;
-
-       *ctxp = ctx = kmalloc(sizeof(*ctx) +
-                             str_len,
-                             GFP_ATOMIC);
-
-       if (!ctx) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       ctx->ctx_doi = XFRM_SC_DOI_LSM;
-       ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
-       ctx->ctx_sid = sid;
-       ctx->ctx_len = str_len;
-       memcpy(ctx->ctx_str,
-              ctx_str,
-              str_len);
-
-       goto out2;
-
 out:
-       *ctxp = NULL;
-       kfree(ctx);
-out2:
-       kfree(ctx_str);
-       return rc;
+       *sid = sid_session;
+       return 0;
 }
 
 /*
- * LSM hook implementation that allocs and transfers uctx spec to
- * xfrm_policy.
+ * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
  */
 int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
                              struct xfrm_user_sec_ctx *uctx)
 {
-       int err;
-
-       BUG_ON(!uctx);
-
-       err = selinux_xfrm_sec_ctx_alloc(ctxp, uctx, 0);
-       if (err == 0)
-               atomic_inc(&selinux_xfrm_refcount);
-
-       return err;
+       return selinux_xfrm_alloc_user(ctxp, uctx);
 }
 
-
 /*
- * LSM hook implementation that copies security data structure from old to
- * new for policy cloning.
+ * LSM hook implementation that copies security data structure from old to new
+ * for policy cloning.
  */
 int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
                              struct xfrm_sec_ctx **new_ctxp)
 {
        struct xfrm_sec_ctx *new_ctx;
 
-       if (old_ctx) {
-               new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
-                                 GFP_ATOMIC);
-               if (!new_ctx)
-                       return -ENOMEM;
+       if (!old_ctx)
+               return 0;
+
+       new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, GFP_ATOMIC);
+       if (!new_ctx)
+               return -ENOMEM;
+       memcpy(new_ctx, old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len);
+       atomic_inc(&selinux_xfrm_refcount);
+       *new_ctxp = new_ctx;
 
-               memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
-               memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
-               atomic_inc(&selinux_xfrm_refcount);
-               *new_ctxp = new_ctx;
-       }
        return 0;
 }
 
@@ -327,8 +283,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
  */
 void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
 {
-       atomic_dec(&selinux_xfrm_refcount);
-       kfree(ctx);
+       selinux_xfrm_free(ctx);
 }
 
 /*
@@ -336,31 +291,55 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
  */
 int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
 {
-       const struct task_security_struct *tsec = current_security();
-
-       if (!ctx)
-               return 0;
+       return selinux_xfrm_delete(ctx);
+}
 
-       return avc_has_perm(tsec->sid, ctx->ctx_sid,
-                           SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
-                           NULL);
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state, populates it using
+ * the supplied security context, and assigns it to the xfrm_state.
+ */
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+                            struct xfrm_user_sec_ctx *uctx)
+{
+       return selinux_xfrm_alloc_user(&x->security, uctx);
 }
 
 /*
- * LSM hook implementation that allocs and transfers sec_ctx spec to
- * xfrm_state.
+ * LSM hook implementation that allocates a xfrm_sec_state and populates based
+ * on a secid.
  */
-int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx,
-               u32 secid)
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+                                    struct xfrm_sec_ctx *polsec, u32 secid)
 {
-       int err;
+       int rc;
+       struct xfrm_sec_ctx *ctx;
+       char *ctx_str = NULL;
+       int str_len;
+
+       if (!polsec)
+               return 0;
 
-       BUG_ON(!x);
+       if (secid == 0)
+               return -EINVAL;
 
-       err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid);
-       if (err == 0)
-               atomic_inc(&selinux_xfrm_refcount);
-       return err;
+       rc = security_sid_to_context(secid, &ctx_str, &str_len);
+       if (rc)
+               return rc;
+
+       ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->ctx_doi = XFRM_SC_DOI_LSM;
+       ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+       ctx->ctx_sid = secid;
+       ctx->ctx_len = str_len;
+       memcpy(ctx->ctx_str, ctx_str, str_len);
+       kfree(ctx_str);
+
+       x->security = ctx;
+       atomic_inc(&selinux_xfrm_refcount);
+       return 0;
 }
 
 /*
@@ -368,24 +347,15 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
  */
 void selinux_xfrm_state_free(struct xfrm_state *x)
 {
-       atomic_dec(&selinux_xfrm_refcount);
-       kfree(x->security);
+       selinux_xfrm_free(x->security);
 }
 
- /*
 * LSM hook implementation that authorizes deletion of labeled SAs.
 */
+/*
+ * LSM hook implementation that authorizes deletion of labeled SAs.
+ */
 int selinux_xfrm_state_delete(struct xfrm_state *x)
 {
-       const struct task_security_struct *tsec = current_security();
-       struct xfrm_sec_ctx *ctx = x->security;
-
-       if (!ctx)
-               return 0;
-
-       return avc_has_perm(tsec->sid, ctx->ctx_sid,
-                           SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
-                           NULL);
+       return selinux_xfrm_delete(x->security);
 }
 
 /*
@@ -395,14 +365,12 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
  * we need to check for unlabelled access since this may not have
  * gone thru the IPSec process.
  */
-int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
-                               struct common_audit_data *ad)
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+                             struct common_audit_data *ad)
 {
-       int i, rc = 0;
-       struct sec_path *sp;
-       u32 sel_sid = SECINITSID_UNLABELED;
-
-       sp = skb->sp;
+       int i;
+       struct sec_path *sp = skb->sp;
+       u32 peer_sid = SECINITSID_UNLABELED;
 
        if (sp) {
                for (i = 0; i < sp->len; i++) {
@@ -410,23 +378,17 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
 
                        if (x && selinux_authorizable_xfrm(x)) {
                                struct xfrm_sec_ctx *ctx = x->security;
-                               sel_sid = ctx->ctx_sid;
+                               peer_sid = ctx->ctx_sid;
                                break;
                        }
                }
        }
 
-       /*
-        * This check even when there's no association involved is
-        * intended, according to Trent Jaeger, to make sure a
-        * process can't engage in non-ipsec communication unless
-        * explicitly allowed by policy.
-        */
-
-       rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION,
-                         ASSOCIATION__RECVFROM, ad);
-
-       return rc;
+       /* This check even when there's no association involved is intended,
+        * according to Trent Jaeger, to make sure a process can't engage in
+        * non-IPsec communication unless explicitly allowed by policy. */
+       return avc_has_perm(sk_sid, peer_sid,
+                           SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
 }
 
 /*
@@ -436,49 +398,38 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
  * If we do have a authorizable security association, then it has already been
  * checked in the selinux_xfrm_state_pol_flow_match hook above.
  */
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
-                                       struct common_audit_data *ad, u8 proto)
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+                               struct common_audit_data *ad, u8 proto)
 {
        struct dst_entry *dst;
-       int rc = 0;
-
-       dst = skb_dst(skb);
-
-       if (dst) {
-               struct dst_entry *dst_test;
-
-               for (dst_test = dst; dst_test != NULL;
-                    dst_test = dst_test->child) {
-                       struct xfrm_state *x = dst_test->xfrm;
-
-                       if (x && selinux_authorizable_xfrm(x))
-                               goto out;
-               }
-       }
 
        switch (proto) {
        case IPPROTO_AH:
        case IPPROTO_ESP:
        case IPPROTO_COMP:
-               /*
-                * We should have already seen this packet once before
-                * it underwent xfrm(s). No need to subject it to the
-                * unlabeled check.
-                */
-               goto out;
+               /* We should have already seen this packet once before it
+                * underwent xfrm(s). No need to subject it to the unlabeled
+                * check. */
+               return 0;
        default:
                break;
        }
 
-       /*
-        * This check even when there's no association involved is
-        * intended, according to Trent Jaeger, to make sure a
-        * process can't engage in non-ipsec communication unless
-        * explicitly allowed by policy.
-        */
+       dst = skb_dst(skb);
+       if (dst) {
+               struct dst_entry *iter;
 
-       rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION,
-                         ASSOCIATION__SENDTO, ad);
-out:
-       return rc;
+               for (iter = dst; iter != NULL; iter = iter->child) {
+                       struct xfrm_state *x = iter->xfrm;
+
+                       if (x && selinux_authorizable_xfrm(x))
+                               return 0;
+               }
+       }
+
+       /* This check even when there's no association involved is intended,
+        * according to Trent Jaeger, to make sure a process can't engage in
+        * non-IPsec communication unless explicitly allowed by policy. */
+       return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
+                           SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
 }
index eefbd10e408f18b87d35c35d729fabf088734d65..4392c5d238f032a27c86c38a022909a6654a49bb 100644 (file)
@@ -582,7 +582,7 @@ static void smack_inode_free_security(struct inode *inode)
  * Returns 0 if it all works out, -ENOMEM if there's no memory
  */
 static int smack_inode_init_security(struct inode *inode, struct inode *dir,
-                                    const struct qstr *qstr, char **name,
+                                    const struct qstr *qstr, const char **name,
                                     void **value, size_t *len)
 {
        struct inode_smack *issp = inode->i_security;
@@ -591,11 +591,8 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
        char *dsp = smk_of_inode(dir);
        int may;
 
-       if (name) {
-               *name = kstrdup(XATTR_SMACK_SUFFIX, GFP_NOFS);
-               if (*name == NULL)
-                       return -ENOMEM;
-       }
+       if (name)
+               *name = XATTR_SMACK_SUFFIX;
 
        if (value) {
                rcu_read_lock();
index ce431e6e07cfc7df27e8320d6da1b13516310df9..5066a3768b2841a4f46d8cbb8116b4d8276334da 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/dmaengine.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
 
 #include <mach/regs-ac97.h>
 #include <mach/audio.h>
@@ -41,20 +43,20 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
        .reset  = pxa2xx_ac97_reset,
 };
 
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_out = {
-       .name                   = "AC97 PCM out",
-       .dev_addr               = __PREG(PCDR),
-       .drcmr                  = &DRCMR(12),
-       .dcmd                   = DCMD_INCSRCADDR | DCMD_FLOWTRG |
-                                 DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_ac97_pcm_out_req = 12;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_out = {
+       .addr           = __PREG(PCDR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
+       .maxburst       = 32,
+       .filter_data    = &pxa2xx_ac97_pcm_out_req,
 };
 
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_in = {
-       .name                   = "AC97 PCM in",
-       .dev_addr               = __PREG(PCDR),
-       .drcmr                  = &DRCMR(11),
-       .dcmd                   = DCMD_INCTRGADDR | DCMD_FLOWSRC |
-                                 DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_ac97_pcm_in_req = 11;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_in = {
+       .addr           = __PREG(PCDR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
+       .maxburst       = 32,
+       .filter_data    = &pxa2xx_ac97_pcm_in_req,
 };
 
 static struct snd_pcm *pxa2xx_ac97_pcm;
index 823359ed95e16a0969ee6c28843a4777387d3561..a61d7a9a995e86daaeea8b8243c93f9fbfe911d2 100644 (file)
@@ -7,11 +7,13 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
 
 #include <mach/dma.h>
 
@@ -43,6 +45,35 @@ int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
        size_t period = params_period_bytes(params);
        pxa_dma_desc *dma_desc;
        dma_addr_t dma_buff_phys, next_desc_phys;
+       u32 dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
+
+       /* temporary transition hack */
+       switch (rtd->params->addr_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               dcmd |= DCMD_WIDTH1;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               dcmd |= DCMD_WIDTH2;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               dcmd |= DCMD_WIDTH4;
+               break;
+       default:
+               /* can't happen */
+               break;
+       }
+
+       switch (rtd->params->maxburst) {
+       case 8:
+               dcmd |= DCMD_BURST8;
+               break;
+       case 16:
+               dcmd |= DCMD_BURST16;
+               break;
+       case 32:
+               dcmd |= DCMD_BURST32;
+               break;
+       }
 
        snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
        runtime->dma_bytes = totsize;
@@ -55,14 +86,14 @@ int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
                dma_desc->ddadr = next_desc_phys;
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        dma_desc->dsadr = dma_buff_phys;
-                       dma_desc->dtadr = rtd->params->dev_addr;
+                       dma_desc->dtadr = rtd->params->addr;
                } else {
-                       dma_desc->dsadr = rtd->params->dev_addr;
+                       dma_desc->dsadr = rtd->params->addr;
                        dma_desc->dtadr = dma_buff_phys;
                }
                if (period > totsize)
                        period = totsize;
-               dma_desc->dcmd = rtd->params->dcmd | period | DCMD_ENDIRQEN;
+               dma_desc->dcmd = dcmd | period | DCMD_ENDIRQEN;
                dma_desc++;
                dma_buff_phys += period;
        } while (totsize -= period);
@@ -76,8 +107,10 @@ int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream)
 {
        struct pxa2xx_runtime_data *rtd = substream->runtime->private_data;
 
-       if (rtd && rtd->params && rtd->params->drcmr)
-               *rtd->params->drcmr = 0;
+       if (rtd && rtd->params && rtd->params->filter_data) {
+               unsigned long req = *(unsigned long *) rtd->params->filter_data;
+               DRCMR(req) = 0;
+       }
 
        snd_pcm_set_runtime_buffer(substream, NULL);
        return 0;
@@ -136,6 +169,7 @@ EXPORT_SYMBOL(pxa2xx_pcm_pointer);
 int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
 {
        struct pxa2xx_runtime_data *prtd = substream->runtime->private_data;
+       unsigned long req;
 
        if (!prtd || !prtd->params)
                return 0;
@@ -146,7 +180,8 @@ int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
        DCSR(prtd->dma_ch) &= ~DCSR_RUN;
        DCSR(prtd->dma_ch) = 0;
        DCMD(prtd->dma_ch) = 0;
-       *prtd->params->drcmr = prtd->dma_ch | DRCMR_MAPVLD;
+       req = *(unsigned long *) prtd->params->filter_data;
+       DRCMR(req) = prtd->dma_ch | DRCMR_MAPVLD;
 
        return 0;
 }
@@ -155,7 +190,6 @@ EXPORT_SYMBOL(__pxa2xx_pcm_prepare);
 void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
 {
        struct snd_pcm_substream *substream = dev_id;
-       struct pxa2xx_runtime_data *rtd = substream->runtime->private_data;
        int dcsr;
 
        dcsr = DCSR(dma_ch);
@@ -164,8 +198,8 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
        if (dcsr & DCSR_ENDINTR) {
                snd_pcm_period_elapsed(substream);
        } else {
-               printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
-                       rtd->params->name, dma_ch, dcsr);
+               printk(KERN_ERR "DMA error on channel %d (DCSR=%#x)\n",
+                       dma_ch, dcsr);
                snd_pcm_stream_lock(substream);
                snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
                snd_pcm_stream_unlock(substream);
index 26422a3584ea6684e934f66b33a7f9aad8b5b762..69a2455b447210d42c2bf2c37b23fbe9035eada9 100644 (file)
  */
 
 #include <linux/module.h>
+#include <linux/dmaengine.h>
+
 #include <sound/core.h>
 #include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
 
 #include "pxa2xx-pcm.h"
 
@@ -40,7 +43,7 @@ static int pxa2xx_pcm_open(struct snd_pcm_substream *substream)
 
        rtd->params = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
                      client->playback_params : client->capture_params;
-       ret = pxa_request_dma(rtd->params->name, DMA_PRIO_LOW,
+       ret = pxa_request_dma("dma", DMA_PRIO_LOW,
                              pxa2xx_pcm_dma_irq, substream);
        if (ret < 0)
                goto err2;
index 65f86b56ba422a2c28c6d0c70c731c6250df7c63..2a8fc08d52a173686fa9e946f2f3932179a64812 100644 (file)
 
 struct pxa2xx_runtime_data {
        int dma_ch;
-       struct pxa2xx_pcm_dma_params *params;
+       struct snd_dmaengine_dai_dma_data *params;
        pxa_dma_desc *dma_desc_array;
        dma_addr_t dma_desc_array_phys;
 };
 
 struct pxa2xx_pcm_client {
-       struct pxa2xx_pcm_dma_params *playback_params;
-       struct pxa2xx_pcm_dma_params *capture_params;
+       struct snd_dmaengine_dai_dma_data *playback_params;
+       struct snd_dmaengine_dai_dma_data *capture_params;
        int (*startup)(struct snd_pcm_substream *);
        void (*shutdown)(struct snd_pcm_substream *);
        int (*prepare)(struct snd_pcm_substream *);
index c0c2f57a0d6f332b6143c3eaa6850921c1dde485..313f22e9d929d8f5dbd039d224a529788b22c635 100644 (file)
@@ -6,6 +6,9 @@ config SND_PCM
        tristate
        select SND_TIMER
 
+config SND_DMAENGINE_PCM
+       tristate
+
 config SND_HWDEP
        tristate
 
index 43d4117428ac9d551391f0f89d1edfd3b2665c6b..5e890cfed42363e1aa1b1ebfbfba76d0d16f0326 100644 (file)
@@ -13,6 +13,8 @@ snd-$(CONFIG_SND_JACK)          += jack.o
 snd-pcm-objs := pcm.o pcm_native.o pcm_lib.o pcm_timer.o pcm_misc.o \
                pcm_memory.o
 
+snd-pcm-dmaengine-objs := pcm_dmaengine.o
+
 snd-page-alloc-y := memalloc.o
 snd-page-alloc-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
 
@@ -30,6 +32,7 @@ obj-$(CONFIG_SND_TIMER)               += snd-timer.o
 obj-$(CONFIG_SND_HRTIMER)      += snd-hrtimer.o
 obj-$(CONFIG_SND_RTCTIMER)     += snd-rtctimer.o
 obj-$(CONFIG_SND_PCM)          += snd-pcm.o snd-page-alloc.o
+obj-$(CONFIG_SND_DMAENGINE_PCM)        += snd-pcm-dmaengine.o
 obj-$(CONFIG_SND_RAWMIDI)      += snd-rawmidi.o
 
 obj-$(CONFIG_SND_OSSEMUL)      += oss/
index 82bb029d4414155e60ae2a01ddd4707d926b83ae..6e03b465e44e3a05f82e93a2d25a61f030cc3cf6 100644 (file)
@@ -184,7 +184,7 @@ static void xrun(struct snd_pcm_substream *substream)
        do {                                                            \
                if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {          \
                        xrun_log_show(substream);                       \
-                       if (printk_ratelimit()) {                       \
+                       if (snd_printd_ratelimit()) {                   \
                                snd_printd("PCM: " fmt, ##args);        \
                        }                                               \
                        dump_stack_on_xrun(substream);                  \
@@ -342,7 +342,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
                return -EPIPE;
        }
        if (pos >= runtime->buffer_size) {
-               if (printk_ratelimit()) {
+               if (snd_printd_ratelimit()) {
                        char name[16];
                        snd_pcm_debug_name(substream, name, sizeof(name));
                        xrun_log_show(substream);
index 11048cc744d0ef01034ce0a9aceb517ff202b5a5..915b4d7fbb23fe8c1309e05c17ff525a6d7e7d78 100644 (file)
@@ -1022,7 +1022,7 @@ static void dummy_proc_write(struct snd_info_entry *entry,
                if (i >= ARRAY_SIZE(fields))
                        continue;
                snd_info_get_str(item, ptr, sizeof(item));
-               if (strict_strtoull(item, 0, &val))
+               if (kstrtoull(item, 0, &val))
                        continue;
                if (fields[i].size == sizeof(int))
                        *get_dummy_int_ptr(dummy, fields[i].offset) = val;
index 2c6386503940bf6db7f996a2dcc6a9a843182013..fe9e6e2f2c5b2a825ae24a432541d45de8f029fb 100644 (file)
@@ -49,7 +49,6 @@ struct fwspk {
        struct snd_card *card;
        struct fw_unit *unit;
        const struct device_info *device_info;
-       struct snd_pcm_substream *pcm;
        struct mutex mutex;
        struct cmp_connection connection;
        struct amdtp_out_stream stream;
@@ -363,8 +362,7 @@ static int fwspk_create_pcm(struct fwspk *fwspk)
                return err;
        pcm->private_data = fwspk;
        strcpy(pcm->name, fwspk->device_info->short_name);
-       fwspk->pcm = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
-       fwspk->pcm->ops = &ops;
+       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &ops);
        return 0;
 }
 
index 8a36a1d9803fbf305bdbcc2bca35c961c4643f5f..46ec4dff094ba566ee28018167e244065734b074 100644 (file)
@@ -486,13 +486,9 @@ static const struct v4l2_ctrl_ops tea575x_ctrl_ops = {
        .s_ctrl = tea575x_s_ctrl,
 };
 
-/*
- * initialize all the tea575x chips
- */
-int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
-{
-       int retval;
 
+int snd_tea575x_hw_init(struct snd_tea575x *tea)
+{
        tea->mute = true;
 
        /* Not all devices can or know how to read the data back.
@@ -507,6 +503,17 @@ int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
        tea->freq = 90500 * 16;         /* 90.5Mhz default */
        snd_tea575x_set_freq(tea);
 
+       return 0;
+}
+EXPORT_SYMBOL(snd_tea575x_hw_init);
+
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
+{
+       int retval = snd_tea575x_hw_init(tea);
+
+       if (retval)
+               return retval;
+
        tea->vd = tea575x_radio;
        video_set_drvdata(&tea->vd, tea);
        mutex_init(&tea->mutex);
index 9942691cc0ca9c101bfd830b134bc4e97bddb727..afef0d738078f27935d844f5d002db1b7bbe2609 100644 (file)
@@ -443,8 +443,7 @@ static void snd_interwave_detect_memory(struct snd_gus_card *gus)
                for (i = 0; i < 8; ++i)
                        iwave[i] = snd_gf1_peek(gus, bank_pos + i);
 #ifdef CONFIG_SND_DEBUG_ROM
-               printk(KERN_DEBUG "ROM at 0x%06x = %*phC\n", bank_pos,
-                                 8, iwave);
+               printk(KERN_DEBUG "ROM at 0x%06x = %8phC\n", bank_pos, iwave);
 #endif
                if (strncmp(iwave, "INTRWAVE", 8))
                        continue;       /* first check */
index a59c88818f48a2dbf1f79e9ac4a627ee81f6cb26..461d94cfecbee6bc0ac02d324db0f3f74c9d4eae 100644 (file)
@@ -557,7 +557,6 @@ int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock)
        unsigned long flags;
        int err = 0, n = 0;
        struct dma_buffparms *dmap = adev->dmap_in;
-       int go;
 
        if (!(adev->open_mode & OPEN_READ))
                return -EIO;
@@ -584,7 +583,7 @@ int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock)
                        spin_unlock_irqrestore(&dmap->lock,flags);
                        return -EAGAIN;
                }
-               if ((go = adev->go))
+               if (adev->go)
                        timeout = dmabuf_timeout(dmap);
 
                spin_unlock_irqrestore(&dmap->lock,flags);
index 8a005f0e5ca484c842e2fe5f684275abafbde248..fdbb09a9b9e53cc095ac41c0ac6b44d6bfcb871b 100644 (file)
@@ -1216,11 +1216,13 @@ static void hda_jackpoll_work(struct work_struct *work)
 {
        struct hda_codec *codec =
                container_of(work, struct hda_codec, jackpoll_work.work);
-       if (!codec->jackpoll_interval)
-               return;
 
        snd_hda_jack_set_dirty_all(codec);
        snd_hda_jack_poll_all(codec);
+
+       if (!codec->jackpoll_interval)
+               return;
+
        queue_delayed_work(codec->bus->workq, &codec->jackpoll_work,
                           codec->jackpoll_interval);
 }
index e3c7ba8d7582643f9d1f52681251e16e3835d042..ac41e9cdc976a1c190c51684519abb9c475c2c59 100644 (file)
@@ -142,6 +142,9 @@ static void parse_user_hints(struct hda_codec *codec)
        val = snd_hda_get_bool_hint(codec, "primary_hp");
        if (val >= 0)
                spec->no_primary_hp = !val;
+       val = snd_hda_get_bool_hint(codec, "multi_io");
+       if (val >= 0)
+               spec->no_multi_io = !val;
        val = snd_hda_get_bool_hint(codec, "multi_cap_vol");
        if (val >= 0)
                spec->multi_cap_vol = !!val;
@@ -813,6 +816,8 @@ static void resume_path_from_idx(struct hda_codec *codec, int path_idx)
 
 static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
                                  struct snd_ctl_elem_value *ucontrol);
+static int hda_gen_bind_mute_put(struct snd_kcontrol *kcontrol,
+                                struct snd_ctl_elem_value *ucontrol);
 
 enum {
        HDA_CTL_WIDGET_VOL,
@@ -830,7 +835,13 @@ static const struct snd_kcontrol_new control_templates[] = {
                .put = hda_gen_mixer_mute_put, /* replaced */
                .private_value = HDA_COMPOSE_AMP_VAL(0, 3, 0, 0),
        },
-       HDA_BIND_MUTE(NULL, 0, 0, 0),
+       {
+               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+               .info = snd_hda_mixer_amp_switch_info,
+               .get = snd_hda_mixer_bind_switch_get,
+               .put = hda_gen_bind_mute_put, /* replaced */
+               .private_value = HDA_COMPOSE_AMP_VAL(0, 3, 0, 0),
+       },
 };
 
 /* add dynamic controls from template */
@@ -937,8 +948,8 @@ static int add_stereo_sw(struct hda_codec *codec, const char *pfx,
 }
 
 /* playback mute control with the software mute bit check */
-static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
-                                 struct snd_ctl_elem_value *ucontrol)
+static void sync_auto_mute_bits(struct snd_kcontrol *kcontrol,
+                               struct snd_ctl_elem_value *ucontrol)
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
        struct hda_gen_spec *spec = codec->spec;
@@ -949,10 +960,22 @@ static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
                ucontrol->value.integer.value[0] &= enabled;
                ucontrol->value.integer.value[1] &= enabled;
        }
+}
 
+static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
+                                 struct snd_ctl_elem_value *ucontrol)
+{
+       sync_auto_mute_bits(kcontrol, ucontrol);
        return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
 }
 
+static int hda_gen_bind_mute_put(struct snd_kcontrol *kcontrol,
+                                struct snd_ctl_elem_value *ucontrol)
+{
+       sync_auto_mute_bits(kcontrol, ucontrol);
+       return snd_hda_mixer_bind_switch_put(kcontrol, ucontrol);
+}
+
 /* any ctl assigned to the path with the given index? */
 static bool path_has_mixer(struct hda_codec *codec, int path_idx, int ctl_type)
 {
@@ -1541,7 +1564,8 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
                                              cfg->speaker_pins,
                                              spec->multiout.extra_out_nid,
                                              spec->speaker_paths);
-                       if (fill_mio_first && cfg->line_outs == 1 &&
+                       if (!spec->no_multi_io &&
+                           fill_mio_first && cfg->line_outs == 1 &&
                            cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
                                err = fill_multi_ios(codec, cfg->line_out_pins[0], true);
                                if (!err)
@@ -1554,7 +1578,7 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
                                   spec->private_dac_nids, spec->out_paths,
                                   spec->main_out_badness);
 
-       if (fill_mio_first &&
+       if (!spec->no_multi_io && fill_mio_first &&
            cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
                /* try to fill multi-io first */
                err = fill_multi_ios(codec, cfg->line_out_pins[0], false);
@@ -1582,7 +1606,8 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
                        return err;
                badness += err;
        }
-       if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+       if (!spec->no_multi_io &&
+           cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
                err = fill_multi_ios(codec, cfg->line_out_pins[0], false);
                if (err < 0)
                        return err;
@@ -1600,7 +1625,8 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
                                check_aamix_out_path(codec, spec->speaker_paths[0]);
        }
 
-       if (cfg->hp_outs && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+       if (!spec->no_multi_io &&
+           cfg->hp_outs && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
                if (count_multiio_pins(codec, cfg->hp_pins[0]) >= 2)
                        spec->multi_ios = 1; /* give badness */
 
@@ -3724,7 +3750,8 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
 /* check each pin in the given array; returns true if any of them is plugged */
 static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins)
 {
-       int i, present = 0;
+       int i;
+       bool present = false;
 
        for (i = 0; i < num_pins; i++) {
                hda_nid_t nid = pins[i];
@@ -3733,14 +3760,15 @@ static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins)
                /* don't detect pins retasked as inputs */
                if (snd_hda_codec_get_pin_target(codec, nid) & AC_PINCTL_IN_EN)
                        continue;
-               present |= snd_hda_jack_detect(codec, nid);
+               if (snd_hda_jack_detect_state(codec, nid) == HDA_JACK_PRESENT)
+                       present = true;
        }
        return present;
 }
 
 /* standard HP/line-out auto-mute helper */
 static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
-                       bool mute)
+                       int *paths, bool mute)
 {
        struct hda_gen_spec *spec = codec->spec;
        int i;
@@ -3752,10 +3780,19 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
                        break;
 
                if (spec->auto_mute_via_amp) {
+                       struct nid_path *path;
+                       hda_nid_t mute_nid;
+
+                       path = snd_hda_get_path_from_idx(codec, paths[i]);
+                       if (!path)
+                               continue;
+                       mute_nid = get_amp_nid_(path->ctls[NID_PATH_MUTE_CTL]);
+                       if (!mute_nid)
+                               continue;
                        if (mute)
-                               spec->mute_bits |= (1ULL << nid);
+                               spec->mute_bits |= (1ULL << mute_nid);
                        else
-                               spec->mute_bits &= ~(1ULL << nid);
+                               spec->mute_bits &= ~(1ULL << mute_nid);
                        set_pin_eapd(codec, nid, !mute);
                        continue;
                }
@@ -3786,14 +3823,19 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
 void snd_hda_gen_update_outputs(struct hda_codec *codec)
 {
        struct hda_gen_spec *spec = codec->spec;
+       int *paths;
        int on;
 
        /* Control HP pins/amps depending on master_mute state;
         * in general, HP pins/amps control should be enabled in all cases,
         * but currently set only for master_mute, just to be safe
         */
+       if (spec->autocfg.line_out_type == AUTO_PIN_HP_OUT)
+               paths = spec->out_paths;
+       else
+               paths = spec->hp_paths;
        do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
-                   spec->autocfg.hp_pins, spec->master_mute);
+                   spec->autocfg.hp_pins, paths, spec->master_mute);
 
        if (!spec->automute_speaker)
                on = 0;
@@ -3801,8 +3843,12 @@ void snd_hda_gen_update_outputs(struct hda_codec *codec)
                on = spec->hp_jack_present | spec->line_jack_present;
        on |= spec->master_mute;
        spec->speaker_muted = on;
+       if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
+               paths = spec->out_paths;
+       else
+               paths = spec->speaker_paths;
        do_automute(codec, ARRAY_SIZE(spec->autocfg.speaker_pins),
-                   spec->autocfg.speaker_pins, on);
+                   spec->autocfg.speaker_pins, paths, on);
 
        /* toggle line-out mutes if needed, too */
        /* if LO is a copy of either HP or Speaker, don't need to handle it */
@@ -3815,8 +3861,9 @@ void snd_hda_gen_update_outputs(struct hda_codec *codec)
                on = spec->hp_jack_present;
        on |= spec->master_mute;
        spec->line_out_muted = on;
+       paths = spec->out_paths;
        do_automute(codec, ARRAY_SIZE(spec->autocfg.line_out_pins),
-                   spec->autocfg.line_out_pins, on);
+                   spec->autocfg.line_out_pins, paths, on);
 }
 EXPORT_SYMBOL_HDA(snd_hda_gen_update_outputs);
 
@@ -3887,7 +3934,7 @@ void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *ja
                /* don't detect pins retasked as outputs */
                if (snd_hda_codec_get_pin_target(codec, pin) & AC_PINCTL_OUT_EN)
                        continue;
-               if (snd_hda_jack_detect(codec, pin)) {
+               if (snd_hda_jack_detect_state(codec, pin) == HDA_JACK_PRESENT) {
                        mux_select(codec, 0, spec->am_entry[i].idx);
                        return;
                }
index e199a852388b4506148b0cfcbcca7ee380406a52..48d44026705b0f05ac01c51fbdb199215195979f 100644 (file)
@@ -220,6 +220,7 @@ struct hda_gen_spec {
        unsigned int hp_mic:1; /* Allow HP as a mic-in */
        unsigned int suppress_hp_mic_detect:1; /* Don't detect HP/mic */
        unsigned int no_primary_hp:1; /* Don't prefer HP pins to speaker pins */
+       unsigned int no_multi_io:1; /* Don't try multi I/O config */
        unsigned int multi_cap_vol:1; /* allow multiple capture xxx volumes */
        unsigned int inv_dmic_split:1; /* inverted dmic w/a for conexant */
        unsigned int own_eapd_ctl:1; /* set EAPD by own function */
index ce67608734b58da8a762346eb678e082b2d6b747..fe0bda19de153f78da5da0c4add17e5fb3d95d65 100644 (file)
@@ -295,7 +295,7 @@ static ssize_t type##_store(struct device *dev,                     \
        struct snd_hwdep *hwdep = dev_get_drvdata(dev);         \
        struct hda_codec *codec = hwdep->private_data;          \
        unsigned long val;                                      \
-       int err = strict_strtoul(buf, 0, &val);                 \
+       int err = kstrtoul(buf, 0, &val);                       \
        if (err < 0)                                            \
                return err;                                     \
        codec->type = val;                                      \
@@ -654,7 +654,7 @@ int snd_hda_get_int_hint(struct hda_codec *codec, const char *key, int *valp)
        p = snd_hda_get_hint(codec, key);
        if (!p)
                ret = -ENOENT;
-       else if (strict_strtoul(p, 0, &val))
+       else if (kstrtoul(p, 0, &val))
                ret = -EINVAL;
        else {
                *valp = val;
@@ -751,7 +751,7 @@ static void parse_##name##_mode(char *buf, struct hda_bus *bus, \
                                 struct hda_codec **codecp) \
 { \
        unsigned long val; \
-       if (!strict_strtoul(buf, 0, &val)) \
+       if (!kstrtoul(buf, 0, &val)) \
                (*codecp)->name = val; \
 }
 
index 8860dd529520d0335264af050fdb918ae47c1c60..c6c98298ac397c024d35cf55e5d85f3fe095decc 100644 (file)
@@ -1160,7 +1160,7 @@ static int azx_reset(struct azx *chip, int full_reset)
                goto __skip;
 
        /* clear STATESTS */
-       azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
+       azx_writew(chip, STATESTS, STATESTS_INT_MASK);
 
        /* reset controller */
        azx_enter_link_reset(chip);
@@ -1242,7 +1242,7 @@ static void azx_int_clear(struct azx *chip)
        }
 
        /* clear STATESTS */
-       azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
+       azx_writew(chip, STATESTS, STATESTS_INT_MASK);
 
        /* clear rirb status */
        azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
@@ -1451,8 +1451,8 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
 
 #if 0
        /* clear state status int */
-       if (azx_readb(chip, STATESTS) & 0x04)
-               azx_writeb(chip, STATESTS, 0x04);
+       if (azx_readw(chip, STATESTS) & 0x04)
+               azx_writew(chip, STATESTS, 0x04);
 #endif
        spin_unlock(&chip->reg_lock);
        
@@ -2971,6 +2971,10 @@ static int azx_runtime_suspend(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
+       /* enable controller wake up event */
+       azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
+                 STATESTS_INT_MASK);
+
        azx_stop_chip(chip);
        azx_enter_link_reset(chip);
        azx_clear_irq_pending(chip);
@@ -2983,11 +2987,31 @@ static int azx_runtime_resume(struct device *dev)
 {
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
+       struct hda_bus *bus;
+       struct hda_codec *codec;
+       int status;
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                hda_display_power(true);
+
+       /* Read STATESTS before controller reset */
+       status = azx_readw(chip, STATESTS);
+
        azx_init_pci(chip);
        azx_init_chip(chip, 1);
+
+       bus = chip->bus;
+       if (status && bus) {
+               list_for_each_entry(codec, &bus->codec_list, list)
+                       if (status & (1 << codec->addr))
+                               queue_delayed_work(codec->bus->workq,
+                                                  &codec->jackpoll_work, codec->jackpoll_interval);
+       }
+
+       /* disable controller Wake Up event*/
+       azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
+                       ~STATESTS_INT_MASK);
+
        return 0;
 }
 
@@ -3831,11 +3855,13 @@ static int azx_probe_continue(struct azx *chip)
 
        /* Request power well for Haswell HDA controller and codec */
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+#ifdef CONFIG_SND_HDA_I915
                err = hda_i915_init();
                if (err < 0) {
                        snd_printk(KERN_ERR SFX "Error request power-well from i915\n");
                        goto out_free;
                }
+#endif
                hda_display_power(true);
        }
 
index 3fd2973183e2dda266a78c00a96bc5517b5bb29f..05b3e3e9108fda50f16f2472b2dad5bfbc870067 100644 (file)
@@ -194,18 +194,24 @@ u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid)
 EXPORT_SYMBOL_HDA(snd_hda_pin_sense);
 
 /**
- * snd_hda_jack_detect - query pin Presence Detect status
+ * snd_hda_jack_detect_state - query pin Presence Detect status
  * @codec: the CODEC to sense
  * @nid: the pin NID to sense
  *
- * Query and return the pin's Presence Detect status.
+ * Query and return the pin's Presence Detect status, as either
+ * HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT or HDA_JACK_PHANTOM.
  */
-int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
+int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid)
 {
-       u32 sense = snd_hda_pin_sense(codec, nid);
-       return get_jack_plug_state(sense);
+       struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
+       if (jack && jack->phantom_jack)
+               return HDA_JACK_PHANTOM;
+       else if (snd_hda_pin_sense(codec, nid) & AC_PINSENSE_PRESENCE)
+               return HDA_JACK_PRESENT;
+       else
+               return HDA_JACK_NOT_PRESENT;
 }
-EXPORT_SYMBOL_HDA(snd_hda_jack_detect);
+EXPORT_SYMBOL_HDA(snd_hda_jack_detect_state);
 
 /**
  * snd_hda_jack_detect_enable - enable the jack-detection
@@ -247,8 +253,8 @@ EXPORT_SYMBOL_HDA(snd_hda_jack_detect_enable);
 int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
                                 hda_nid_t gating_nid)
 {
-       struct hda_jack_tbl *gated = snd_hda_jack_tbl_get(codec, gated_nid);
-       struct hda_jack_tbl *gating = snd_hda_jack_tbl_get(codec, gating_nid);
+       struct hda_jack_tbl *gated = snd_hda_jack_tbl_new(codec, gated_nid);
+       struct hda_jack_tbl *gating = snd_hda_jack_tbl_new(codec, gating_nid);
 
        if (!gated || !gating)
                return -EINVAL;
index ec12abd452631d12b9915e0c75a4c45fdbc07c95..379420c44eefac051e512b83c67790f9f3ce99a2 100644 (file)
@@ -75,7 +75,18 @@ int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
                                 hda_nid_t gating_nid);
 
 u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid);
-int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
+
+/* the jack state returned from snd_hda_jack_detect_state() */
+enum {
+       HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT, HDA_JACK_PHANTOM,
+};
+
+int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid);
+
+static inline bool snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
+{
+       return snd_hda_jack_detect_state(codec, nid) != HDA_JACK_NOT_PRESENT;
+}
 
 bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid);
 
index d97f0d61a15b5779bd10a17ef3d90a184673c008..0cbdd87dde6d90ac67767ddd30331c62b8d02862 100644 (file)
@@ -32,7 +32,6 @@
 #include "hda_jack.h"
 #include "hda_generic.h"
 
-#define ENABLE_AD_STATIC_QUIRKS
 
 struct ad198x_spec {
        struct hda_gen_spec gen;
@@ -43,114 +42,8 @@ struct ad198x_spec {
        hda_nid_t eapd_nid;
 
        unsigned int beep_amp;  /* beep amp value, set via set_beep_amp() */
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-       const struct snd_kcontrol_new *mixers[6];
-       int num_mixers;
-       const struct hda_verb *init_verbs[6];   /* initialization verbs
-                                                * don't forget NULL termination!
-                                                */
-       unsigned int num_init_verbs;
-
-       /* playback */
-       struct hda_multi_out multiout;  /* playback set-up
-                                        * max_channels, dacs must be set
-                                        * dig_out_nid and hp_nid are optional
-                                        */
-       unsigned int cur_eapd;
-       unsigned int need_dac_fix;
-
-       /* capture */
-       unsigned int num_adc_nids;
-       const hda_nid_t *adc_nids;
-       hda_nid_t dig_in_nid;           /* digital-in NID; optional */
-
-       /* capture source */
-       const struct hda_input_mux *input_mux;
-       const hda_nid_t *capsrc_nids;
-       unsigned int cur_mux[3];
-
-       /* channel model */
-       const struct hda_channel_mode *channel_mode;
-       int num_channel_mode;
-
-       /* PCM information */
-       struct hda_pcm pcm_rec[3];      /* used in alc_build_pcms() */
-
-       unsigned int spdif_route;
-
-       unsigned int jack_present: 1;
-       unsigned int inv_jack_detect: 1;/* inverted jack-detection */
-       unsigned int analog_beep: 1;    /* analog beep input present */
-       unsigned int avoid_init_slave_vol:1;
-
-#ifdef CONFIG_PM
-       struct hda_loopback_check loopback;
-#endif
-       /* for virtual master */
-       hda_nid_t vmaster_nid;
-       const char * const *slave_vols;
-       const char * const *slave_sws;
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-};
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-/*
- * input MUX handling (common part)
- */
-static int ad198x_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-
-       return snd_hda_input_mux_info(spec->input_mux, uinfo);
-}
-
-static int ad198x_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-       unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
-
-       ucontrol->value.enumerated.item[0] = spec->cur_mux[adc_idx];
-       return 0;
-}
-
-static int ad198x_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-       unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
-
-       return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
-                                    spec->capsrc_nids[adc_idx],
-                                    &spec->cur_mux[adc_idx]);
-}
-
-/*
- * initialization (common callbacks)
- */
-static int ad198x_init(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec = codec->spec;
-       int i;
-
-       for (i = 0; i < spec->num_init_verbs; i++)
-               snd_hda_sequence_write(codec, spec->init_verbs[i]);
-       return 0;
-}
-
-static const char * const ad_slave_pfxs[] = {
-       "Front", "Surround", "Center", "LFE", "Side",
-       "Headphone", "Mono", "Speaker", "IEC958",
-       NULL
 };
 
-static const char * const ad1988_6stack_fp_slave_pfxs[] = {
-       "Front", "Surround", "Center", "LFE", "Side", "IEC958",
-       NULL
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
 
 #ifdef CONFIG_SND_HDA_INPUT_BEEP
 /* additional beep mixers; the actual parameters are overwritten at build */
@@ -160,12 +53,6 @@ static const struct snd_kcontrol_new ad_beep_mixer[] = {
        { } /* end */
 };
 
-static const struct snd_kcontrol_new ad_beep2_mixer[] = {
-       HDA_CODEC_VOLUME("Digital Beep Playback Volume", 0, 0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_BEEP("Digital Beep Playback Switch", 0, 0, HDA_OUTPUT),
-       { } /* end */
-};
-
 #define set_beep_amp(spec, nid, idx, dir) \
        ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir)) /* mono */
 #else
@@ -181,8 +68,7 @@ static int create_beep_ctls(struct hda_codec *codec)
        if (!spec->beep_amp)
                return 0;
 
-       knew = spec->analog_beep ? ad_beep2_mixer : ad_beep_mixer;
-       for ( ; knew->name; knew++) {
+       for (knew = ad_beep_mixer ; knew->name; knew++) {
                int err;
                struct snd_kcontrol *kctl;
                kctl = snd_ctl_new1(knew, codec);
@@ -199,268 +85,6 @@ static int create_beep_ctls(struct hda_codec *codec)
 #define create_beep_ctls(codec)                0
 #endif
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int ad198x_build_controls(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec = codec->spec;
-       struct snd_kcontrol *kctl;
-       unsigned int i;
-       int err;
-
-       for (i = 0; i < spec->num_mixers; i++) {
-               err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
-               if (err < 0)
-                       return err;
-       }
-       if (spec->multiout.dig_out_nid) {
-               err = snd_hda_create_spdif_out_ctls(codec,
-                                                   spec->multiout.dig_out_nid,
-                                                   spec->multiout.dig_out_nid);
-               if (err < 0)
-                       return err;
-               err = snd_hda_create_spdif_share_sw(codec,
-                                                   &spec->multiout);
-               if (err < 0)
-                       return err;
-               spec->multiout.share_spdif = 1;
-       } 
-       if (spec->dig_in_nid) {
-               err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in_nid);
-               if (err < 0)
-                       return err;
-       }
-
-       /* create beep controls if needed */
-       err = create_beep_ctls(codec);
-       if (err < 0)
-               return err;
-
-       /* if we have no master control, let's create it */
-       if (!snd_hda_find_mixer_ctl(codec, "Master Playback Volume")) {
-               unsigned int vmaster_tlv[4];
-               snd_hda_set_vmaster_tlv(codec, spec->vmaster_nid,
-                                       HDA_OUTPUT, vmaster_tlv);
-               err = __snd_hda_add_vmaster(codec, "Master Playback Volume",
-                                         vmaster_tlv,
-                                         (spec->slave_vols ?
-                                          spec->slave_vols : ad_slave_pfxs),
-                                         "Playback Volume",
-                                         !spec->avoid_init_slave_vol, NULL);
-               if (err < 0)
-                       return err;
-       }
-       if (!snd_hda_find_mixer_ctl(codec, "Master Playback Switch")) {
-               err = snd_hda_add_vmaster(codec, "Master Playback Switch",
-                                         NULL,
-                                         (spec->slave_sws ?
-                                          spec->slave_sws : ad_slave_pfxs),
-                                         "Playback Switch");
-               if (err < 0)
-                       return err;
-       }
-
-       /* assign Capture Source enums to NID */
-       kctl = snd_hda_find_mixer_ctl(codec, "Capture Source");
-       if (!kctl)
-               kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
-       for (i = 0; kctl && i < kctl->count; i++) {
-               err = snd_hda_add_nid(codec, kctl, i, spec->capsrc_nids[i]);
-               if (err < 0)
-                       return err;
-       }
-
-       /* assign IEC958 enums to NID */
-       kctl = snd_hda_find_mixer_ctl(codec,
-                       SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source");
-       if (kctl) {
-               err = snd_hda_add_nid(codec, kctl, 0,
-                                     spec->multiout.dig_out_nid);
-               if (err < 0)
-                       return err;
-       }
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int ad198x_check_power_status(struct hda_codec *codec, hda_nid_t nid)
-{
-       struct ad198x_spec *spec = codec->spec;
-       return snd_hda_check_amp_list_power(codec, &spec->loopback, nid);
-}
-#endif
-
-/*
- * Analog playback callbacks
- */
-static int ad198x_playback_pcm_open(struct hda_pcm_stream *hinfo,
-                                   struct hda_codec *codec,
-                                   struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
-                                            hinfo);
-}
-
-static int ad198x_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
-                                      struct hda_codec *codec,
-                                      unsigned int stream_tag,
-                                      unsigned int format,
-                                      struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, stream_tag,
-                                               format, substream);
-}
-
-static int ad198x_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
-                                      struct hda_codec *codec,
-                                      struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
-}
-
-/*
- * Digital out
- */
-static int ad198x_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
-                                       struct hda_codec *codec,
-                                       struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       return snd_hda_multi_out_dig_open(codec, &spec->multiout);
-}
-
-static int ad198x_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
-                                        struct hda_codec *codec,
-                                        struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       return snd_hda_multi_out_dig_close(codec, &spec->multiout);
-}
-
-static int ad198x_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
-                                          struct hda_codec *codec,
-                                          unsigned int stream_tag,
-                                          unsigned int format,
-                                          struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag,
-                                            format, substream);
-}
-
-static int ad198x_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
-                                          struct hda_codec *codec,
-                                          struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
-}
-
-/*
- * Analog capture
- */
-static int ad198x_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
-                                     struct hda_codec *codec,
-                                     unsigned int stream_tag,
-                                     unsigned int format,
-                                     struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       snd_hda_codec_setup_stream(codec, spec->adc_nids[substream->number],
-                                  stream_tag, 0, format);
-       return 0;
-}
-
-static int ad198x_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
-                                     struct hda_codec *codec,
-                                     struct snd_pcm_substream *substream)
-{
-       struct ad198x_spec *spec = codec->spec;
-       snd_hda_codec_cleanup_stream(codec, spec->adc_nids[substream->number]);
-       return 0;
-}
-
-/*
- */
-static const struct hda_pcm_stream ad198x_pcm_analog_playback = {
-       .substreams = 1,
-       .channels_min = 2,
-       .channels_max = 6, /* changed later */
-       .nid = 0, /* fill later */
-       .ops = {
-               .open = ad198x_playback_pcm_open,
-               .prepare = ad198x_playback_pcm_prepare,
-               .cleanup = ad198x_playback_pcm_cleanup,
-       },
-};
-
-static const struct hda_pcm_stream ad198x_pcm_analog_capture = {
-       .substreams = 1,
-       .channels_min = 2,
-       .channels_max = 2,
-       .nid = 0, /* fill later */
-       .ops = {
-               .prepare = ad198x_capture_pcm_prepare,
-               .cleanup = ad198x_capture_pcm_cleanup
-       },
-};
-
-static const struct hda_pcm_stream ad198x_pcm_digital_playback = {
-       .substreams = 1,
-       .channels_min = 2,
-       .channels_max = 2,
-       .nid = 0, /* fill later */
-       .ops = {
-               .open = ad198x_dig_playback_pcm_open,
-               .close = ad198x_dig_playback_pcm_close,
-               .prepare = ad198x_dig_playback_pcm_prepare,
-               .cleanup = ad198x_dig_playback_pcm_cleanup
-       },
-};
-
-static const struct hda_pcm_stream ad198x_pcm_digital_capture = {
-       .substreams = 1,
-       .channels_min = 2,
-       .channels_max = 2,
-       /* NID is set in alc_build_pcms */
-};
-
-static int ad198x_build_pcms(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec = codec->spec;
-       struct hda_pcm *info = spec->pcm_rec;
-
-       codec->num_pcms = 1;
-       codec->pcm_info = info;
-
-       info->name = "AD198x Analog";
-       info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_analog_playback;
-       info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max = spec->multiout.max_channels;
-       info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dac_nids[0];
-       info->stream[SNDRV_PCM_STREAM_CAPTURE] = ad198x_pcm_analog_capture;
-       info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adc_nids;
-       info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0];
-
-       if (spec->multiout.dig_out_nid) {
-               info++;
-               codec->num_pcms++;
-               codec->spdif_status_reset = 1;
-               info->name = "AD198x Digital";
-               info->pcm_type = HDA_PCM_TYPE_SPDIF;
-               info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback;
-               info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dig_out_nid;
-               if (spec->dig_in_nid) {
-                       info->stream[SNDRV_PCM_STREAM_CAPTURE] = ad198x_pcm_digital_capture;
-                       info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in_nid;
-               }
-       }
-
-       return 0;
-}
-#endif /* ENABLE_AD_STATIC_QUIRKS */
 
 static void ad198x_power_eapd_write(struct hda_codec *codec, hda_nid_t front,
                                hda_nid_t hp)
@@ -507,18 +131,6 @@ static void ad198x_shutup(struct hda_codec *codec)
        ad198x_power_eapd(codec);
 }
 
-static void ad198x_free(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec = codec->spec;
-
-       if (!spec)
-               return;
-
-       snd_hda_gen_spec_free(&spec->gen);
-       kfree(spec);
-       snd_hda_detach_beep_device(codec);
-}
-
 #ifdef CONFIG_PM
 static int ad198x_suspend(struct hda_codec *codec)
 {
@@ -527,65 +139,6 @@ static int ad198x_suspend(struct hda_codec *codec)
 }
 #endif
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const struct hda_codec_ops ad198x_patch_ops = {
-       .build_controls = ad198x_build_controls,
-       .build_pcms = ad198x_build_pcms,
-       .init = ad198x_init,
-       .free = ad198x_free,
-#ifdef CONFIG_PM
-       .check_power_status = ad198x_check_power_status,
-       .suspend = ad198x_suspend,
-#endif
-       .reboot_notify = ad198x_shutup,
-};
-
-
-/*
- * EAPD control
- * the private value = nid
- */
-#define ad198x_eapd_info       snd_ctl_boolean_mono_info
-
-static int ad198x_eapd_get(struct snd_kcontrol *kcontrol,
-                          struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-       if (codec->inv_eapd)
-               ucontrol->value.integer.value[0] = ! spec->cur_eapd;
-       else
-               ucontrol->value.integer.value[0] = spec->cur_eapd;
-       return 0;
-}
-
-static int ad198x_eapd_put(struct snd_kcontrol *kcontrol,
-                          struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-       hda_nid_t nid = kcontrol->private_value & 0xff;
-       unsigned int eapd;
-       eapd = !!ucontrol->value.integer.value[0];
-       if (codec->inv_eapd)
-               eapd = !eapd;
-       if (eapd == spec->cur_eapd)
-               return 0;
-       spec->cur_eapd = eapd;
-       snd_hda_codec_write_cache(codec, nid,
-                                 0, AC_VERB_SET_EAPD_BTLENABLE,
-                                 eapd ? 0x02 : 0x00);
-       return 1;
-}
-
-static int ad198x_ch_mode_info(struct snd_kcontrol *kcontrol,
-                              struct snd_ctl_elem_info *uinfo);
-static int ad198x_ch_mode_get(struct snd_kcontrol *kcontrol,
-                             struct snd_ctl_elem_value *ucontrol);
-static int ad198x_ch_mode_put(struct snd_kcontrol *kcontrol,
-                             struct snd_ctl_elem_value *ucontrol);
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
 
 /*
  * Automatic parse of I/O pins from the BIOS configuration
@@ -646,580 +199,124 @@ static int ad198x_parse_auto_config(struct hda_codec *codec)
  * AD1986A specific
  */
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-#define AD1986A_SPDIF_OUT      0x02
-#define AD1986A_FRONT_DAC      0x03
-#define AD1986A_SURR_DAC       0x04
-#define AD1986A_CLFE_DAC       0x05
-#define AD1986A_ADC            0x06
-
-static const hda_nid_t ad1986a_dac_nids[3] = {
-       AD1986A_FRONT_DAC, AD1986A_SURR_DAC, AD1986A_CLFE_DAC
-};
-static const hda_nid_t ad1986a_adc_nids[1] = { AD1986A_ADC };
-static const hda_nid_t ad1986a_capsrc_nids[1] = { 0x12 };
-
-static const struct hda_input_mux ad1986a_capture_source = {
-       .num_items = 7,
-       .items = {
-               { "Mic", 0x0 },
-               { "CD", 0x1 },
-               { "Aux", 0x3 },
-               { "Line", 0x4 },
-               { "Mix", 0x5 },
-               { "Mono", 0x6 },
-               { "Phone", 0x7 },
-       },
-};
-
-
-static const struct hda_bind_ctls ad1986a_bind_pcm_vol = {
-       .ops = &snd_hda_bind_vol,
-       .values = {
-               HDA_COMPOSE_AMP_VAL(AD1986A_FRONT_DAC, 3, 0, HDA_OUTPUT),
-               HDA_COMPOSE_AMP_VAL(AD1986A_SURR_DAC, 3, 0, HDA_OUTPUT),
-               HDA_COMPOSE_AMP_VAL(AD1986A_CLFE_DAC, 3, 0, HDA_OUTPUT),
-               0
-       },
-};
+static int alloc_ad_spec(struct hda_codec *codec)
+{
+       struct ad198x_spec *spec;
 
-static const struct hda_bind_ctls ad1986a_bind_pcm_sw = {
-       .ops = &snd_hda_bind_sw,
-       .values = {
-               HDA_COMPOSE_AMP_VAL(AD1986A_FRONT_DAC, 3, 0, HDA_OUTPUT),
-               HDA_COMPOSE_AMP_VAL(AD1986A_SURR_DAC, 3, 0, HDA_OUTPUT),
-               HDA_COMPOSE_AMP_VAL(AD1986A_CLFE_DAC, 3, 0, HDA_OUTPUT),
-               0
-       },
-};
+       spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return -ENOMEM;
+       codec->spec = spec;
+       snd_hda_gen_spec_init(&spec->gen);
+       return 0;
+}
 
 /*
- * mixers
+ * AD1986A fixup codes
  */
-static const struct snd_kcontrol_new ad1986a_mixers[] = {
-       /*
-        * bind volumes/mutes of 3 DACs as a single PCM control for simplicity
-        */
-       HDA_BIND_VOL("PCM Playback Volume", &ad1986a_bind_pcm_vol),
-       HDA_BIND_SW("PCM Playback Switch", &ad1986a_bind_pcm_sw),
-       HDA_CODEC_VOLUME("Front Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Surround Playback Volume", 0x1c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Surround Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x1d, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x1d, 2, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Center Playback Switch", 0x1d, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("LFE Playback Switch", 0x1d, 2, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Headphone Playback Volume", 0x1a, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Headphone Playback Switch", 0x1a, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x17, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x17, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Aux Playback Volume", 0x16, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Capture Source",
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       HDA_CODEC_MUTE("Stereo Downmix Switch", 0x09, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-/* additional mixers for 3stack mode */
-static const struct snd_kcontrol_new ad1986a_3st_mixers[] = {
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Channel Mode",
-               .info = ad198x_ch_mode_info,
-               .get = ad198x_ch_mode_get,
-               .put = ad198x_ch_mode_put,
-       },
-       { } /* end */
-};
 
-/* laptop model - 2ch only */
-static const hda_nid_t ad1986a_laptop_dac_nids[1] = { AD1986A_FRONT_DAC };
+/* Lenovo N100 seems to report the reversed bit for HP jack-sensing */
+static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
+                                    const struct hda_fixup *fix, int action)
+{
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               codec->inv_jack_detect = 1;
+}
 
-/* master controls both pins 0x1a and 0x1b */
-static const struct hda_bind_ctls ad1986a_laptop_master_vol = {
-       .ops = &snd_hda_bind_vol,
-       .values = {
-               HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT),
-               HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT),
-               0,
-       },
+enum {
+       AD1986A_FIXUP_INV_JACK_DETECT,
+       AD1986A_FIXUP_ULTRA,
+       AD1986A_FIXUP_SAMSUNG,
+       AD1986A_FIXUP_3STACK,
+       AD1986A_FIXUP_LAPTOP,
+       AD1986A_FIXUP_LAPTOP_IMIC,
 };
 
-static const struct hda_bind_ctls ad1986a_laptop_master_sw = {
-       .ops = &snd_hda_bind_sw,
-       .values = {
-               HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT),
-               HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT),
-               0,
+static const struct hda_fixup ad1986a_fixups[] = {
+       [AD1986A_FIXUP_INV_JACK_DETECT] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = ad_fixup_inv_jack_detect,
        },
-};
-
-static const struct snd_kcontrol_new ad1986a_laptop_mixers[] = {
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
-       HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
-       HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x17, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x17, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Aux Playback Volume", 0x16, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
-       /* 
-          HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
-          HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT), */
-       HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Capture Source",
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
+       [AD1986A_FIXUP_ULTRA] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x90170110 }, /* speaker */
+                       { 0x1d, 0x90a7013e }, /* int mic */
+                       {}
+               },
        },
-       { } /* end */
-};
-
-/* laptop-eapd model - 2ch only */
-
-static const struct hda_input_mux ad1986a_laptop_eapd_capture_source = {
-       .num_items = 3,
-       .items = {
-               { "Mic", 0x0 },
-               { "Internal Mic", 0x4 },
-               { "Mix", 0x5 },
+       [AD1986A_FIXUP_SAMSUNG] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x90170110 }, /* speaker */
+                       { 0x1d, 0x90a7013e }, /* int mic */
+                       { 0x20, 0x411111f0 }, /* N/A */
+                       { 0x24, 0x411111f0 }, /* N/A */
+                       {}
+               },
        },
-};
-
-static const struct hda_input_mux ad1986a_automic_capture_source = {
-       .num_items = 2,
-       .items = {
-               { "Mic", 0x0 },
-               { "Mix", 0x5 },
+       [AD1986A_FIXUP_3STACK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x02214021 }, /* headphone */
+                       { 0x1b, 0x01014011 }, /* front */
+                       { 0x1c, 0x01013012 }, /* surround */
+                       { 0x1d, 0x01019015 }, /* clfe */
+                       { 0x1e, 0x411111f0 }, /* N/A */
+                       { 0x1f, 0x02a190f0 }, /* mic */
+                       { 0x20, 0x018130f0 }, /* line-in */
+                       {}
+               },
        },
-};
-
-static const struct snd_kcontrol_new ad1986a_laptop_master_mixers[] = {
-       HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
-       HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Capture Source",
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
+       [AD1986A_FIXUP_LAPTOP] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x02214021 }, /* headphone */
+                       { 0x1b, 0x90170110 }, /* speaker */
+                       { 0x1c, 0x411111f0 }, /* N/A */
+                       { 0x1d, 0x411111f0 }, /* N/A */
+                       { 0x1e, 0x411111f0 }, /* N/A */
+                       { 0x1f, 0x02a191f0 }, /* mic */
+                       { 0x20, 0x411111f0 }, /* N/A */
+                       {}
+               },
        },
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "External Amplifier",
-               .subdevice = HDA_SUBDEV_NID_FLAG | 0x1b,
-               .info = ad198x_eapd_info,
-               .get = ad198x_eapd_get,
-               .put = ad198x_eapd_put,
-               .private_value = 0x1b, /* port-D */
+       [AD1986A_FIXUP_LAPTOP_IMIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1d, 0x90a7013e }, /* int mic */
+                       {}
+               },
+               .chained_before = 1,
+               .chain_id = AD1986A_FIXUP_LAPTOP,
        },
-       { } /* end */
 };
 
-static const struct snd_kcontrol_new ad1986a_laptop_intmic_mixers[] = {
-       HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
-       { } /* end */
+static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+       SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
+       SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
+       SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
+       SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
+       SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
+       SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
+       SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_FIXUP_INV_JACK_DETECT),
+       SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_FIXUP_3STACK),
+       SND_PCI_QUIRK(0x17aa, 0x1017, "Lenovo A60", AD1986A_FIXUP_3STACK),
+       {}
 };
 
-/* re-connect the mic boost input according to the jack sensing */
-static void ad1986a_automic(struct hda_codec *codec)
-{
-       unsigned int present;
-       present = snd_hda_jack_detect(codec, 0x1f);
-       /* 0 = 0x1f, 2 = 0x1d, 4 = mixed */
-       snd_hda_codec_write(codec, 0x0f, 0, AC_VERB_SET_CONNECT_SEL,
-                           present ? 0 : 2);
-}
-
-#define AD1986A_MIC_EVENT              0x36
-
-static void ad1986a_automic_unsol_event(struct hda_codec *codec,
-                                           unsigned int res)
-{
-       if ((res >> 26) != AD1986A_MIC_EVENT)
-               return;
-       ad1986a_automic(codec);
-}
-
-static int ad1986a_automic_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1986a_automic(codec);
-       return 0;
-}
-
-/* laptop-automute - 2ch only */
-
-static void ad1986a_update_hp(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec = codec->spec;
-       unsigned int mute;
-
-       if (spec->jack_present)
-               mute = HDA_AMP_MUTE; /* mute internal speaker */
-       else
-               /* unmute internal speaker if necessary */
-               mute = snd_hda_codec_amp_read(codec, 0x1a, 0, HDA_OUTPUT, 0);
-       snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0,
-                                HDA_AMP_MUTE, mute);
-}
-
-static void ad1986a_hp_automute(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec = codec->spec;
-
-       spec->jack_present = snd_hda_jack_detect(codec, 0x1a);
-       if (spec->inv_jack_detect)
-               spec->jack_present = !spec->jack_present;
-       ad1986a_update_hp(codec);
-}
-
-#define AD1986A_HP_EVENT               0x37
-
-static void ad1986a_hp_unsol_event(struct hda_codec *codec, unsigned int res)
-{
-       if ((res >> 26) != AD1986A_HP_EVENT)
-               return;
-       ad1986a_hp_automute(codec);
-}
-
-static int ad1986a_hp_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1986a_hp_automute(codec);
-       return 0;
-}
-
-/* bind hp and internal speaker mute (with plug check) */
-static int ad1986a_hp_master_sw_put(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       int change = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
-       if (change)
-               ad1986a_update_hp(codec);
-       return change;
-}
-
-static const struct snd_kcontrol_new ad1986a_automute_master_mixers[] = {
-       HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Master Playback Switch",
-               .subdevice = HDA_SUBDEV_AMP_FLAG,
-               .info = snd_hda_mixer_amp_switch_info,
-               .get = snd_hda_mixer_amp_switch_get,
-               .put = ad1986a_hp_master_sw_put,
-               .private_value = HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT),
-       },
-       { } /* end */
-};
-
-
-/*
- * initialization verbs
- */
-static const struct hda_verb ad1986a_init_verbs[] = {
-       /* Front, Surround, CLFE DAC; mute as default */
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x05, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* Downmix - off */
-       {0x09, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* HP, Line-Out, Surround, CLFE selectors */
-       {0x0a, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x0b, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Mono selector */
-       {0x0e, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Mic selector: Mic 1/2 pin */
-       {0x0f, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Line-in selector: Line-in */
-       {0x10, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Mic 1/2 swap */
-       {0x11, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Record selector: mic */
-       {0x12, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Mic, Phone, CD, Aux, Line-In amp; mute as default */
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x16, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x17, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* PC beep */
-       {0x18, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* HP, Line-Out, Surround, CLFE, Mono pins; mute as default */
-       {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* HP Pin */
-       {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
-       /* Front, Surround, CLFE Pins */
-       {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
-       {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
-       {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
-       /* Mono Pin */
-       {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
-       /* Mic Pin */
-       {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
-       /* Line, Aux, CD, Beep-In Pin */
-       {0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
-       {0x21, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
-       {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
-       {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
-       {0x24, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
-       { } /* end */
-};
-
-static const struct hda_verb ad1986a_ch2_init[] = {
-       /* Surround out -> Line In */
-       { 0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
-       /* Line-in selectors */
-       { 0x10, AC_VERB_SET_CONNECT_SEL, 0x1 },
-       /* CLFE -> Mic in */
-       { 0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
-       /* Mic selector, mix C/LFE (backmic) and Mic (frontmic) */
-       { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x4 },
-       { } /* end */
-};
-
-static const struct hda_verb ad1986a_ch4_init[] = {
-       /* Surround out -> Surround */
-       { 0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
-       { 0x10, AC_VERB_SET_CONNECT_SEL, 0x0 },
-       /* CLFE -> Mic in */
-       { 0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
-       { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x4 },
-       { } /* end */
-};
-
-static const struct hda_verb ad1986a_ch6_init[] = {
-       /* Surround out -> Surround out */
-       { 0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
-       { 0x10, AC_VERB_SET_CONNECT_SEL, 0x0 },
-       /* CLFE -> CLFE */
-       { 0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
-       { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x0 },
-       { } /* end */
-};
-
-static const struct hda_channel_mode ad1986a_modes[3] = {
-       { 2, ad1986a_ch2_init },
-       { 4, ad1986a_ch4_init },
-       { 6, ad1986a_ch6_init },
-};
-
-/* eapd initialization */
-static const struct hda_verb ad1986a_eapd_init_verbs[] = {
-       {0x1b, AC_VERB_SET_EAPD_BTLENABLE, 0x00 },
-       {}
-};
-
-static const struct hda_verb ad1986a_automic_verbs[] = {
-       {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       /*{0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},*/
-       {0x0f, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x1f, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1986A_MIC_EVENT},
-       {}
-};
-
-/* Ultra initialization */
-static const struct hda_verb ad1986a_ultra_init[] = {
-       /* eapd initialization */
-       { 0x1b, AC_VERB_SET_EAPD_BTLENABLE, 0x00 },
-       /* CLFE -> Mic in */
-       { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x2 },
-       { 0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
-       { 0x1d, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080 },
-       { } /* end */
-};
-
-/* pin sensing on HP jack */
-static const struct hda_verb ad1986a_hp_init_verbs[] = {
-       {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1986A_HP_EVENT},
-       {}
-};
-
-static void ad1986a_samsung_p50_unsol_event(struct hda_codec *codec,
-                                           unsigned int res)
-{
-       switch (res >> 26) {
-       case AD1986A_HP_EVENT:
-               ad1986a_hp_automute(codec);
-               break;
-       case AD1986A_MIC_EVENT:
-               ad1986a_automic(codec);
-               break;
-       }
-}
-
-static int ad1986a_samsung_p50_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1986a_hp_automute(codec);
-       ad1986a_automic(codec);
-       return 0;
-}
-
-
-/* models */
-enum {
-       AD1986A_AUTO,
-       AD1986A_6STACK,
-       AD1986A_3STACK,
-       AD1986A_LAPTOP,
-       AD1986A_LAPTOP_EAPD,
-       AD1986A_LAPTOP_AUTOMUTE,
-       AD1986A_ULTRA,
-       AD1986A_SAMSUNG,
-       AD1986A_SAMSUNG_P50,
-       AD1986A_MODELS
-};
-
-static const char * const ad1986a_models[AD1986A_MODELS] = {
-       [AD1986A_AUTO]          = "auto",
-       [AD1986A_6STACK]        = "6stack",
-       [AD1986A_3STACK]        = "3stack",
-       [AD1986A_LAPTOP]        = "laptop",
-       [AD1986A_LAPTOP_EAPD]   = "laptop-eapd",
-       [AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute",
-       [AD1986A_ULTRA]         = "ultra",
-       [AD1986A_SAMSUNG]       = "samsung",
-       [AD1986A_SAMSUNG_P50]   = "samsung-p50",
-};
-
-static const struct snd_pci_quirk ad1986a_cfg_tbl[] = {
-       SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_LAPTOP_EAPD),
-       SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9", AD1986A_LAPTOP_EAPD),
-       SND_PCI_QUIRK(0x1043, 0x11f7, "ASUS U5A", AD1986A_LAPTOP_EAPD),
-       SND_PCI_QUIRK(0x1043, 0x1213, "ASUS A6J", AD1986A_LAPTOP_EAPD),
-       SND_PCI_QUIRK(0x1043, 0x1263, "ASUS U5F", AD1986A_LAPTOP_EAPD),
-       SND_PCI_QUIRK(0x1043, 0x1297, "ASUS Z62F", AD1986A_LAPTOP_EAPD),
-       SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS V1j", AD1986A_LAPTOP_EAPD),
-       SND_PCI_QUIRK(0x1043, 0x1302, "ASUS W3j", AD1986A_LAPTOP_EAPD),
-       SND_PCI_QUIRK(0x1043, 0x1443, "ASUS VX1", AD1986A_LAPTOP),
-       SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8J", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x1043, 0x817f, "ASUS P5", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x1043, 0x818f, "ASUS P5", AD1986A_LAPTOP),
-       SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS P5", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x1043, 0x81cb, "ASUS M2N", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x1043, 0x8234, "ASUS M2N", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40-10Q", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
-       SND_PCI_QUIRK(0x144d, 0xc024, "Samsung P50", AD1986A_SAMSUNG_P50),
-       SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA),
-       SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_SAMSUNG),
-       SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_LAPTOP),
-       SND_PCI_QUIRK(0x17aa, 0x1017, "Lenovo A60", AD1986A_3STACK),
-       SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_LAPTOP_AUTOMUTE),
-       SND_PCI_QUIRK(0x17c0, 0x2017, "Samsung M50", AD1986A_LAPTOP),
-       {}
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1986a_loopbacks[] = {
-       { 0x13, HDA_OUTPUT, 0 }, /* Mic */
-       { 0x14, HDA_OUTPUT, 0 }, /* Phone */
-       { 0x15, HDA_OUTPUT, 0 }, /* CD */
-       { 0x16, HDA_OUTPUT, 0 }, /* Aux */
-       { 0x17, HDA_OUTPUT, 0 }, /* Line */
-       { } /* end */
-};
-#endif
-
-static int is_jack_available(struct hda_codec *codec, hda_nid_t nid)
-{
-       unsigned int conf = snd_hda_codec_get_pincfg(codec, nid);
-       return get_defcfg_connect(conf) != AC_JACK_PORT_NONE;
-}
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-static int alloc_ad_spec(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-
-       spec = kzalloc(sizeof(*spec), GFP_KERNEL);
-       if (!spec)
-               return -ENOMEM;
-       codec->spec = spec;
-       snd_hda_gen_spec_init(&spec->gen);
-       return 0;
-}
-
-/*
- * AD1986A fixup codes
- */
-
-/* Lenovo N100 seems to report the reversed bit for HP jack-sensing */
-static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
-                                    const struct hda_fixup *fix, int action)
-{
-       if (action == HDA_FIXUP_ACT_PRE_PROBE)
-               codec->inv_jack_detect = 1;
-}
-
-enum {
-       AD1986A_FIXUP_INV_JACK_DETECT,
-};
-
-static const struct hda_fixup ad1986a_fixups[] = {
-       [AD1986A_FIXUP_INV_JACK_DETECT] = {
-               .type = HDA_FIXUP_FUNC,
-               .v.func = ad_fixup_inv_jack_detect,
-       },
-};
-
-static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
-       SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_FIXUP_INV_JACK_DETECT),
+static const struct hda_model_fixup ad1986a_fixup_models[] = {
+       { .id = AD1986A_FIXUP_3STACK, .name = "3stack" },
+       { .id = AD1986A_FIXUP_LAPTOP, .name = "laptop" },
+       { .id = AD1986A_FIXUP_LAPTOP_IMIC, .name = "laptop-imic" },
+       { .id = AD1986A_FIXUP_LAPTOP_IMIC, .name = "laptop-eapd" }, /* alias */
        {}
 };
 
 /*
  */
-static int ad1986a_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1986a(struct hda_codec *codec)
 {
        int err;
        struct ad198x_spec *spec;
@@ -1244,7 +341,8 @@ static int ad1986a_parse_auto_config(struct hda_codec *codec)
         */
        spec->gen.multiout.no_share_stream = 1;
 
-       snd_hda_pick_fixup(codec, NULL, ad1986a_fixup_tbl, ad1986a_fixups);
+       snd_hda_pick_fixup(codec, ad1986a_fixup_models, ad1986a_fixup_tbl,
+                          ad1986a_fixups);
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
 
        err = ad198x_parse_auto_config(codec);
@@ -1258,330 +356,11 @@ static int ad1986a_parse_auto_config(struct hda_codec *codec)
        return 0;
 }
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1986a(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-       int err, board_config;
-
-       board_config = snd_hda_check_board_config(codec, AD1986A_MODELS,
-                                                 ad1986a_models,
-                                                 ad1986a_cfg_tbl);
-       if (board_config < 0) {
-               printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
-                      codec->chip_name);
-               board_config = AD1986A_AUTO;
-       }
-
-       if (board_config == AD1986A_AUTO)
-               return ad1986a_parse_auto_config(codec);
-
-       err = alloc_ad_spec(codec);
-       if (err < 0)
-               return err;
-       spec = codec->spec;
-
-       err = snd_hda_attach_beep_device(codec, 0x19);
-       if (err < 0) {
-               ad198x_free(codec);
-               return err;
-       }
-       set_beep_amp(spec, 0x18, 0, HDA_OUTPUT);
-
-       spec->multiout.max_channels = 6;
-       spec->multiout.num_dacs = ARRAY_SIZE(ad1986a_dac_nids);
-       spec->multiout.dac_nids = ad1986a_dac_nids;
-       spec->multiout.dig_out_nid = AD1986A_SPDIF_OUT;
-       spec->num_adc_nids = 1;
-       spec->adc_nids = ad1986a_adc_nids;
-       spec->capsrc_nids = ad1986a_capsrc_nids;
-       spec->input_mux = &ad1986a_capture_source;
-       spec->num_mixers = 1;
-       spec->mixers[0] = ad1986a_mixers;
-       spec->num_init_verbs = 1;
-       spec->init_verbs[0] = ad1986a_init_verbs;
-#ifdef CONFIG_PM
-       spec->loopback.amplist = ad1986a_loopbacks;
-#endif
-       spec->vmaster_nid = 0x1b;
-       codec->inv_eapd = 1; /* AD1986A has the inverted EAPD implementation */
-
-       codec->patch_ops = ad198x_patch_ops;
-
-       /* override some parameters */
-       switch (board_config) {
-       case AD1986A_3STACK:
-               spec->num_mixers = 2;
-               spec->mixers[1] = ad1986a_3st_mixers;
-               spec->num_init_verbs = 2;
-               spec->init_verbs[1] = ad1986a_ch2_init;
-               spec->channel_mode = ad1986a_modes;
-               spec->num_channel_mode = ARRAY_SIZE(ad1986a_modes);
-               spec->need_dac_fix = 1;
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               break;
-       case AD1986A_LAPTOP:
-               spec->mixers[0] = ad1986a_laptop_mixers;
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
-               break;
-       case AD1986A_LAPTOP_EAPD:
-               spec->num_mixers = 3;
-               spec->mixers[0] = ad1986a_laptop_master_mixers;
-               spec->mixers[1] = ad1986a_laptop_eapd_mixers;
-               spec->mixers[2] = ad1986a_laptop_intmic_mixers;
-               spec->num_init_verbs = 2;
-               spec->init_verbs[1] = ad1986a_eapd_init_verbs;
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
-               if (!is_jack_available(codec, 0x25))
-                       spec->multiout.dig_out_nid = 0;
-               spec->input_mux = &ad1986a_laptop_eapd_capture_source;
-               break;
-       case AD1986A_SAMSUNG:
-               spec->num_mixers = 2;
-               spec->mixers[0] = ad1986a_laptop_master_mixers;
-               spec->mixers[1] = ad1986a_laptop_eapd_mixers;
-               spec->num_init_verbs = 3;
-               spec->init_verbs[1] = ad1986a_eapd_init_verbs;
-               spec->init_verbs[2] = ad1986a_automic_verbs;
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
-               if (!is_jack_available(codec, 0x25))
-                       spec->multiout.dig_out_nid = 0;
-               spec->input_mux = &ad1986a_automic_capture_source;
-               codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
-               codec->patch_ops.init = ad1986a_automic_init;
-               break;
-       case AD1986A_SAMSUNG_P50:
-               spec->num_mixers = 2;
-               spec->mixers[0] = ad1986a_automute_master_mixers;
-               spec->mixers[1] = ad1986a_laptop_eapd_mixers;
-               spec->num_init_verbs = 4;
-               spec->init_verbs[1] = ad1986a_eapd_init_verbs;
-               spec->init_verbs[2] = ad1986a_automic_verbs;
-               spec->init_verbs[3] = ad1986a_hp_init_verbs;
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
-               if (!is_jack_available(codec, 0x25))
-                       spec->multiout.dig_out_nid = 0;
-               spec->input_mux = &ad1986a_automic_capture_source;
-               codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
-               codec->patch_ops.init = ad1986a_samsung_p50_init;
-               break;
-       case AD1986A_LAPTOP_AUTOMUTE:
-               spec->num_mixers = 3;
-               spec->mixers[0] = ad1986a_automute_master_mixers;
-               spec->mixers[1] = ad1986a_laptop_eapd_mixers;
-               spec->mixers[2] = ad1986a_laptop_intmic_mixers;
-               spec->num_init_verbs = 3;
-               spec->init_verbs[1] = ad1986a_eapd_init_verbs;
-               spec->init_verbs[2] = ad1986a_hp_init_verbs;
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
-               if (!is_jack_available(codec, 0x25))
-                       spec->multiout.dig_out_nid = 0;
-               spec->input_mux = &ad1986a_laptop_eapd_capture_source;
-               codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
-               codec->patch_ops.init = ad1986a_hp_init;
-               /* Lenovo N100 seems to report the reversed bit
-                * for HP jack-sensing
-                */
-               spec->inv_jack_detect = 1;
-               break;
-       case AD1986A_ULTRA:
-               spec->mixers[0] = ad1986a_laptop_eapd_mixers;
-               spec->num_init_verbs = 2;
-               spec->init_verbs[1] = ad1986a_ultra_init;
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
-               spec->multiout.dig_out_nid = 0;
-               break;
-       }
-
-       /* AD1986A has a hardware problem that it can't share a stream
-        * with multiple output pins.  The copy of front to surrounds
-        * causes noisy or silent outputs at a certain timing, e.g.
-        * changing the volume.
-        * So, let's disable the shared stream.
-        */
-       spec->multiout.no_share_stream = 1;
-
-       codec->no_trigger_sense = 1;
-       codec->no_sticky_stream = 1;
-
-       return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1986a  ad1986a_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
 
 /*
  * AD1983 specific
  */
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-#define AD1983_SPDIF_OUT       0x02
-#define AD1983_DAC             0x03
-#define AD1983_ADC             0x04
-
-static const hda_nid_t ad1983_dac_nids[1] = { AD1983_DAC };
-static const hda_nid_t ad1983_adc_nids[1] = { AD1983_ADC };
-static const hda_nid_t ad1983_capsrc_nids[1] = { 0x15 };
-
-static const struct hda_input_mux ad1983_capture_source = {
-       .num_items = 4,
-       .items = {
-               { "Mic", 0x0 },
-               { "Line", 0x1 },
-               { "Mix", 0x2 },
-               { "Mix Mono", 0x3 },
-       },
-};
-
-/*
- * SPDIF playback route
- */
-static int ad1983_spdif_route_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
-{
-       static const char * const texts[] = { "PCM", "ADC" };
-
-       uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-       uinfo->count = 1;
-       uinfo->value.enumerated.items = 2;
-       if (uinfo->value.enumerated.item > 1)
-               uinfo->value.enumerated.item = 1;
-       strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-       return 0;
-}
-
-static int ad1983_spdif_route_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-
-       ucontrol->value.enumerated.item[0] = spec->spdif_route;
-       return 0;
-}
-
-static int ad1983_spdif_route_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-
-       if (ucontrol->value.enumerated.item[0] > 1)
-               return -EINVAL;
-       if (spec->spdif_route != ucontrol->value.enumerated.item[0]) {
-               spec->spdif_route = ucontrol->value.enumerated.item[0];
-               snd_hda_codec_write_cache(codec, spec->multiout.dig_out_nid, 0,
-                                         AC_VERB_SET_CONNECT_SEL,
-                                         spec->spdif_route);
-               return 1;
-       }
-       return 0;
-}
-
-static const struct snd_kcontrol_new ad1983_mixers[] = {
-       HDA_CODEC_VOLUME("Front Playback Volume", 0x05, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Front Playback Switch", 0x05, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Headphone Playback Volume", 0x06, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Headphone Playback Switch", 0x06, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x07, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x07, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Capture Source",
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
-               .info = ad1983_spdif_route_info,
-               .get = ad1983_spdif_route_get,
-               .put = ad1983_spdif_route_put,
-       },
-       { } /* end */
-};
-
-static const struct hda_verb ad1983_init_verbs[] = {
-       /* Front, HP, Mono; mute as default */
-       {0x05, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x06, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* Beep, PCM, Mic, Line-In: mute */
-       {0x10, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* Front, HP selectors; from Mix */
-       {0x05, AC_VERB_SET_CONNECT_SEL, 0x01},
-       {0x06, AC_VERB_SET_CONNECT_SEL, 0x01},
-       /* Mono selector; from Mix */
-       {0x0b, AC_VERB_SET_CONNECT_SEL, 0x03},
-       /* Mic selector; Mic */
-       {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Line-in selector: Line-in */
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Mic boost: 0dB */
-       {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
-       /* Record selector: mic */
-       {0x15, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* SPDIF route: PCM */
-       {0x02, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Front Pin */
-       {0x05, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
-       /* HP Pin */
-       {0x06, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
-       /* Mono Pin */
-       {0x07, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
-       /* Mic Pin */
-       {0x08, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
-       /* Line Pin */
-       {0x09, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
-       { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1983_loopbacks[] = {
-       { 0x12, HDA_OUTPUT, 0 }, /* Mic */
-       { 0x13, HDA_OUTPUT, 0 }, /* Line */
-       { } /* end */
-};
-#endif
-
-/* models */
-enum {
-       AD1983_AUTO,
-       AD1983_BASIC,
-       AD1983_MODELS
-};
-
-static const char * const ad1983_models[AD1983_MODELS] = {
-       [AD1983_AUTO]           = "auto",
-       [AD1983_BASIC]          = "basic",
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
 /*
  * SPDIF mux control for AD1983 auto-parser
  */
@@ -1656,7 +435,7 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec)
        return 0;
 }
 
-static int ad1983_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1983(struct hda_codec *codec)
 {
        struct ad198x_spec *spec;
        int err;
@@ -1681,432 +460,11 @@ static int ad1983_parse_auto_config(struct hda_codec *codec)
        return err;
 }
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1983(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-       int board_config;
-       int err;
-
-       board_config = snd_hda_check_board_config(codec, AD1983_MODELS,
-                                                 ad1983_models, NULL);
-       if (board_config < 0) {
-               printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
-                      codec->chip_name);
-               board_config = AD1983_AUTO;
-       }
-
-       if (board_config == AD1983_AUTO)
-               return ad1983_parse_auto_config(codec);
-
-       err = alloc_ad_spec(codec);
-       if (err < 0)
-               return err;
-       spec = codec->spec;
-
-       err = snd_hda_attach_beep_device(codec, 0x10);
-       if (err < 0) {
-               ad198x_free(codec);
-               return err;
-       }
-       set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
-       spec->multiout.max_channels = 2;
-       spec->multiout.num_dacs = ARRAY_SIZE(ad1983_dac_nids);
-       spec->multiout.dac_nids = ad1983_dac_nids;
-       spec->multiout.dig_out_nid = AD1983_SPDIF_OUT;
-       spec->num_adc_nids = 1;
-       spec->adc_nids = ad1983_adc_nids;
-       spec->capsrc_nids = ad1983_capsrc_nids;
-       spec->input_mux = &ad1983_capture_source;
-       spec->num_mixers = 1;
-       spec->mixers[0] = ad1983_mixers;
-       spec->num_init_verbs = 1;
-       spec->init_verbs[0] = ad1983_init_verbs;
-       spec->spdif_route = 0;
-#ifdef CONFIG_PM
-       spec->loopback.amplist = ad1983_loopbacks;
-#endif
-       spec->vmaster_nid = 0x05;
-
-       codec->patch_ops = ad198x_patch_ops;
-
-       codec->no_trigger_sense = 1;
-       codec->no_sticky_stream = 1;
-
-       return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1983   ad1983_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
 
 /*
  * AD1981 HD specific
  */
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-#define AD1981_SPDIF_OUT       0x02
-#define AD1981_DAC             0x03
-#define AD1981_ADC             0x04
-
-static const hda_nid_t ad1981_dac_nids[1] = { AD1981_DAC };
-static const hda_nid_t ad1981_adc_nids[1] = { AD1981_ADC };
-static const hda_nid_t ad1981_capsrc_nids[1] = { 0x15 };
-
-/* 0x0c, 0x09, 0x0e, 0x0f, 0x19, 0x05, 0x18, 0x17 */
-static const struct hda_input_mux ad1981_capture_source = {
-       .num_items = 7,
-       .items = {
-               { "Front Mic", 0x0 },
-               { "Line", 0x1 },
-               { "Mix", 0x2 },
-               { "Mix Mono", 0x3 },
-               { "CD", 0x4 },
-               { "Mic", 0x6 },
-               { "Aux", 0x7 },
-       },
-};
-
-static const struct snd_kcontrol_new ad1981_mixers[] = {
-       HDA_CODEC_VOLUME("Front Playback Volume", 0x05, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Front Playback Switch", 0x05, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Headphone Playback Volume", 0x06, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Headphone Playback Switch", 0x06, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x07, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x07, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Front Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Aux Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Aux Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x1c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Capture Source",
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       /* identical with AD1983 */
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
-               .info = ad1983_spdif_route_info,
-               .get = ad1983_spdif_route_get,
-               .put = ad1983_spdif_route_put,
-       },
-       { } /* end */
-};
-
-static const struct hda_verb ad1981_init_verbs[] = {
-       /* Front, HP, Mono; mute as default */
-       {0x05, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x06, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* Beep, PCM, Front Mic, Line, Rear Mic, Aux, CD-In: mute */
-       {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* Front, HP selectors; from Mix */
-       {0x05, AC_VERB_SET_CONNECT_SEL, 0x01},
-       {0x06, AC_VERB_SET_CONNECT_SEL, 0x01},
-       /* Mono selector; from Mix */
-       {0x0b, AC_VERB_SET_CONNECT_SEL, 0x03},
-       /* Mic Mixer; select Front Mic */
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
-       {0x1f, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* Mic boost: 0dB */
-       {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       /* Record selector: Front mic */
-       {0x15, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       /* SPDIF route: PCM */
-       {0x02, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Front Pin */
-       {0x05, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
-       /* HP Pin */
-       {0x06, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
-       /* Mono Pin */
-       {0x07, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
-       /* Front & Rear Mic Pins */
-       {0x08, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
-       {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
-       /* Line Pin */
-       {0x09, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
-       /* Digital Beep */
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x00},
-       /* Line-Out as Input: disabled */
-       {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-       { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1981_loopbacks[] = {
-       { 0x12, HDA_OUTPUT, 0 }, /* Front Mic */
-       { 0x13, HDA_OUTPUT, 0 }, /* Line */
-       { 0x1b, HDA_OUTPUT, 0 }, /* Aux */
-       { 0x1c, HDA_OUTPUT, 0 }, /* Mic */
-       { 0x1d, HDA_OUTPUT, 0 }, /* CD */
-       { } /* end */
-};
-#endif
-
-/*
- * Patch for HP nx6320
- *
- * nx6320 uses EAPD in the reverse way - EAPD-on means the internal
- * speaker output enabled _and_ mute-LED off.
- */
-
-#define AD1981_HP_EVENT                0x37
-#define AD1981_MIC_EVENT       0x38
-
-static const struct hda_verb ad1981_hp_init_verbs[] = {
-       {0x05, AC_VERB_SET_EAPD_BTLENABLE, 0x00 }, /* default off */
-       /* pin sensing on HP and Mic jacks */
-       {0x06, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1981_HP_EVENT},
-       {0x08, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1981_MIC_EVENT},
-       {}
-};
-
-/* turn on/off EAPD (+ mute HP) as a master switch */
-static int ad1981_hp_master_sw_put(struct snd_kcontrol *kcontrol,
-                                  struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-
-       if (! ad198x_eapd_put(kcontrol, ucontrol))
-               return 0;
-       /* change speaker pin appropriately */
-       snd_hda_set_pin_ctl(codec, 0x05, spec->cur_eapd ? PIN_OUT : 0);
-       /* toggle HP mute appropriately */
-       snd_hda_codec_amp_stereo(codec, 0x06, HDA_OUTPUT, 0,
-                                HDA_AMP_MUTE,
-                                spec->cur_eapd ? 0 : HDA_AMP_MUTE);
-       return 1;
-}
-
-/* bind volumes of both NID 0x05 and 0x06 */
-static const struct hda_bind_ctls ad1981_hp_bind_master_vol = {
-       .ops = &snd_hda_bind_vol,
-       .values = {
-               HDA_COMPOSE_AMP_VAL(0x05, 3, 0, HDA_OUTPUT),
-               HDA_COMPOSE_AMP_VAL(0x06, 3, 0, HDA_OUTPUT),
-               0
-       },
-};
-
-/* mute internal speaker if HP is plugged */
-static void ad1981_hp_automute(struct hda_codec *codec)
-{
-       unsigned int present;
-
-       present = snd_hda_jack_detect(codec, 0x06);
-       snd_hda_codec_amp_stereo(codec, 0x05, HDA_OUTPUT, 0,
-                                HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
-/* toggle input of built-in and mic jack appropriately */
-static void ad1981_hp_automic(struct hda_codec *codec)
-{
-       static const struct hda_verb mic_jack_on[] = {
-               {0x1f, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-               {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
-               {}
-       };
-       static const struct hda_verb mic_jack_off[] = {
-               {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
-               {0x1f, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
-               {}
-       };
-       unsigned int present;
-
-       present = snd_hda_jack_detect(codec, 0x08);
-       if (present)
-               snd_hda_sequence_write(codec, mic_jack_on);
-       else
-               snd_hda_sequence_write(codec, mic_jack_off);
-}
-
-/* unsolicited event for HP jack sensing */
-static void ad1981_hp_unsol_event(struct hda_codec *codec,
-                                 unsigned int res)
-{
-       res >>= 26;
-       switch (res) {
-       case AD1981_HP_EVENT:
-               ad1981_hp_automute(codec);
-               break;
-       case AD1981_MIC_EVENT:
-               ad1981_hp_automic(codec);
-               break;
-       }
-}
-
-static const struct hda_input_mux ad1981_hp_capture_source = {
-       .num_items = 3,
-       .items = {
-               { "Mic", 0x0 },
-               { "Dock Mic", 0x1 },
-               { "Mix", 0x2 },
-       },
-};
-
-static const struct snd_kcontrol_new ad1981_hp_mixers[] = {
-       HDA_BIND_VOL("Master Playback Volume", &ad1981_hp_bind_master_vol),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .subdevice = HDA_SUBDEV_NID_FLAG | 0x05,
-               .name = "Master Playback Switch",
-               .info = ad198x_eapd_info,
-               .get = ad198x_eapd_get,
-               .put = ad1981_hp_master_sw_put,
-               .private_value = 0x05,
-       },
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-#if 0
-       /* FIXME: analog mic/line loopback doesn't work with my tests...
-        *        (although recording is OK)
-        */
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x1c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
-       /* FIXME: does this laptop have analog CD connection? */
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-#endif
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Capture Source",
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       { } /* end */
-};
-
-/* initialize jack-sensing, too */
-static int ad1981_hp_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1981_hp_automute(codec);
-       ad1981_hp_automic(codec);
-       return 0;
-}
-
-/* configuration for Toshiba Laptops */
-static const struct hda_verb ad1981_toshiba_init_verbs[] = {
-       {0x05, AC_VERB_SET_EAPD_BTLENABLE, 0x01 }, /* default on */
-       /* pin sensing on HP and Mic jacks */
-       {0x06, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1981_HP_EVENT},
-       {0x08, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1981_MIC_EVENT},
-       {}
-};
-
-static const struct snd_kcontrol_new ad1981_toshiba_mixers[] = {
-       HDA_CODEC_VOLUME("Amp Volume", 0x1a, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Amp Switch", 0x1a, 0x0, HDA_OUTPUT),
-       { }
-};
-
-/* configuration for Lenovo Thinkpad T60 */
-static const struct snd_kcontrol_new ad1981_thinkpad_mixers[] = {
-       HDA_CODEC_VOLUME("Master Playback Volume", 0x05, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Master Playback Switch", 0x05, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Capture Source",
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       /* identical with AD1983 */
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
-               .info = ad1983_spdif_route_info,
-               .get = ad1983_spdif_route_get,
-               .put = ad1983_spdif_route_put,
-       },
-       { } /* end */
-};
-
-static const struct hda_input_mux ad1981_thinkpad_capture_source = {
-       .num_items = 3,
-       .items = {
-               { "Mic", 0x0 },
-               { "Mix", 0x2 },
-               { "CD", 0x4 },
-       },
-};
-
-/* models */
-enum {
-       AD1981_AUTO,
-       AD1981_BASIC,
-       AD1981_HP,
-       AD1981_THINKPAD,
-       AD1981_TOSHIBA,
-       AD1981_MODELS
-};
-
-static const char * const ad1981_models[AD1981_MODELS] = {
-       [AD1981_AUTO]           = "auto",
-       [AD1981_HP]             = "hp",
-       [AD1981_THINKPAD]       = "thinkpad",
-       [AD1981_BASIC]          = "basic",
-       [AD1981_TOSHIBA]        = "toshiba"
-};
-
-static const struct snd_pci_quirk ad1981_cfg_tbl[] = {
-       SND_PCI_QUIRK(0x1014, 0x0597, "Lenovo Z60", AD1981_THINKPAD),
-       SND_PCI_QUIRK(0x1014, 0x05b7, "Lenovo Z60m", AD1981_THINKPAD),
-       /* All HP models */
-       SND_PCI_QUIRK_VENDOR(0x103c, "HP nx", AD1981_HP),
-       SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba U205", AD1981_TOSHIBA),
-       /* Lenovo Thinkpad T60/X60/Z6xx */
-       SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1981_THINKPAD),
-       /* HP nx6320 (reversed SSID, H/W bug) */
-       SND_PCI_QUIRK(0x30b0, 0x103c, "HP nx6320", AD1981_HP),
-       {}
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
 /* follow EAPD via vmaster hook */
 static void ad_vmaster_eapd_hook(void *private_data, int enabled)
 {
@@ -2172,7 +530,7 @@ static const struct snd_pci_quirk ad1981_fixup_tbl[] = {
        {}
 };
 
-static int ad1981_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1981(struct hda_codec *codec)
 {
        struct ad198x_spec *spec;
        int err;
@@ -2205,110 +563,6 @@ static int ad1981_parse_auto_config(struct hda_codec *codec)
        return err;
 }
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1981(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-       int err, board_config;
-
-       board_config = snd_hda_check_board_config(codec, AD1981_MODELS,
-                                                 ad1981_models,
-                                                 ad1981_cfg_tbl);
-       if (board_config < 0) {
-               printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
-                      codec->chip_name);
-               board_config = AD1981_AUTO;
-       }
-
-       if (board_config == AD1981_AUTO)
-               return ad1981_parse_auto_config(codec);
-
-       err = alloc_ad_spec(codec);
-       if (err < 0)
-               return -ENOMEM;
-       spec = codec->spec;
-
-       err = snd_hda_attach_beep_device(codec, 0x10);
-       if (err < 0) {
-               ad198x_free(codec);
-               return err;
-       }
-       set_beep_amp(spec, 0x0d, 0, HDA_OUTPUT);
-
-       spec->multiout.max_channels = 2;
-       spec->multiout.num_dacs = ARRAY_SIZE(ad1981_dac_nids);
-       spec->multiout.dac_nids = ad1981_dac_nids;
-       spec->multiout.dig_out_nid = AD1981_SPDIF_OUT;
-       spec->num_adc_nids = 1;
-       spec->adc_nids = ad1981_adc_nids;
-       spec->capsrc_nids = ad1981_capsrc_nids;
-       spec->input_mux = &ad1981_capture_source;
-       spec->num_mixers = 1;
-       spec->mixers[0] = ad1981_mixers;
-       spec->num_init_verbs = 1;
-       spec->init_verbs[0] = ad1981_init_verbs;
-       spec->spdif_route = 0;
-#ifdef CONFIG_PM
-       spec->loopback.amplist = ad1981_loopbacks;
-#endif
-       spec->vmaster_nid = 0x05;
-
-       codec->patch_ops = ad198x_patch_ops;
-
-       /* override some parameters */
-       switch (board_config) {
-       case AD1981_HP:
-               spec->mixers[0] = ad1981_hp_mixers;
-               spec->num_init_verbs = 2;
-               spec->init_verbs[1] = ad1981_hp_init_verbs;
-               if (!is_jack_available(codec, 0x0a))
-                       spec->multiout.dig_out_nid = 0;
-               spec->input_mux = &ad1981_hp_capture_source;
-
-               codec->patch_ops.init = ad1981_hp_init;
-               codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
-               /* set the upper-limit for mixer amp to 0dB for avoiding the
-                * possible damage by overloading
-                */
-               snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
-                                         (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
-                                         (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
-                                         (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
-                                         (1 << AC_AMPCAP_MUTE_SHIFT));
-               break;
-       case AD1981_THINKPAD:
-               spec->mixers[0] = ad1981_thinkpad_mixers;
-               spec->input_mux = &ad1981_thinkpad_capture_source;
-               /* set the upper-limit for mixer amp to 0dB for avoiding the
-                * possible damage by overloading
-                */
-               snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
-                                         (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
-                                         (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
-                                         (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
-                                         (1 << AC_AMPCAP_MUTE_SHIFT));
-               break;
-       case AD1981_TOSHIBA:
-               spec->mixers[0] = ad1981_hp_mixers;
-               spec->mixers[1] = ad1981_toshiba_mixers;
-               spec->num_init_verbs = 2;
-               spec->init_verbs[1] = ad1981_toshiba_init_verbs;
-               spec->multiout.dig_out_nid = 0;
-               spec->input_mux = &ad1981_hp_capture_source;
-               codec->patch_ops.init = ad1981_hp_init;
-               codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
-               break;
-       }
-
-       codec->no_trigger_sense = 1;
-       codec->no_sticky_stream = 1;
-
-       return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1981   ad1981_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
 
 /*
  * AD1988
@@ -2395,90 +649,7 @@ static int patch_ad1981(struct hda_codec *codec)
  *      E/F quad mic array
  */
 
-
 #ifdef ENABLE_AD_STATIC_QUIRKS
-/* models */
-enum {
-       AD1988_AUTO,
-       AD1988_6STACK,
-       AD1988_6STACK_DIG,
-       AD1988_3STACK,
-       AD1988_3STACK_DIG,
-       AD1988_LAPTOP,
-       AD1988_LAPTOP_DIG,
-       AD1988_MODEL_LAST,
-};
-
-/* reivision id to check workarounds */
-#define AD1988A_REV2           0x100200
-
-#define is_rev2(codec) \
-       ((codec)->vendor_id == 0x11d41988 && \
-        (codec)->revision_id == AD1988A_REV2)
-
-/*
- * mixers
- */
-
-static const hda_nid_t ad1988_6stack_dac_nids[4] = {
-       0x04, 0x06, 0x05, 0x0a
-};
-
-static const hda_nid_t ad1988_3stack_dac_nids[3] = {
-       0x04, 0x05, 0x0a
-};
-
-/* for AD1988A revision-2, DAC2-4 are swapped */
-static const hda_nid_t ad1988_6stack_dac_nids_rev2[4] = {
-       0x04, 0x05, 0x0a, 0x06
-};
-
-static const hda_nid_t ad1988_alt_dac_nid[1] = {
-       0x03
-};
-
-static const hda_nid_t ad1988_3stack_dac_nids_rev2[3] = {
-       0x04, 0x0a, 0x06
-};
-
-static const hda_nid_t ad1988_adc_nids[3] = {
-       0x08, 0x09, 0x0f
-};
-
-static const hda_nid_t ad1988_capsrc_nids[3] = {
-       0x0c, 0x0d, 0x0e
-};
-
-#define AD1988_SPDIF_OUT               0x02
-#define AD1988_SPDIF_OUT_HDMI  0x0b
-#define AD1988_SPDIF_IN                0x07
-
-static const hda_nid_t ad1989b_slave_dig_outs[] = {
-       AD1988_SPDIF_OUT, AD1988_SPDIF_OUT_HDMI, 0
-};
-
-static const struct hda_input_mux ad1988_6stack_capture_source = {
-       .num_items = 5,
-       .items = {
-               { "Front Mic", 0x1 },   /* port-B */
-               { "Line", 0x2 },        /* port-C */
-               { "Mic", 0x4 },         /* port-E */
-               { "CD", 0x5 },
-               { "Mix", 0x9 },
-       },
-};
-
-static const struct hda_input_mux ad1988_laptop_capture_source = {
-       .num_items = 3,
-       .items = {
-               { "Mic/Line", 0x1 },    /* port-B */
-               { "CD", 0x5 },
-               { "Mix", 0x9 },
-       },
-};
-
-/*
- */
 static int ad198x_ch_mode_info(struct snd_kcontrol *kcontrol,
                               struct snd_ctl_elem_info *uinfo)
 {
@@ -2509,636 +680,73 @@ static int ad198x_ch_mode_put(struct snd_kcontrol *kcontrol,
                spec->multiout.num_dacs = spec->multiout.max_channels / 2;
        return err;
 }
+#endif /* ENABLE_AD_STATIC_QUIRKS */
 
-/* 6-stack mode */
-static const struct snd_kcontrol_new ad1988_6stack_mixers1[] = {
-       HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Surround Playback Volume", 0x06, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x05, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x05, 2, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Side Playback Volume", 0x0a, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_6stack_mixers1_rev2[] = {
-       HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Surround Playback Volume", 0x05, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0a, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0a, 2, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Side Playback Volume", 0x06, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_6stack_mixers2[] = {
-       HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
-       HDA_BIND_MUTE("Front Playback Switch", 0x29, 2, HDA_INPUT),
-       HDA_BIND_MUTE("Surround Playback Switch", 0x2a, 2, HDA_INPUT),
-       HDA_BIND_MUTE_MONO("Center Playback Switch", 0x27, 1, 2, HDA_INPUT),
-       HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x27, 2, 2, HDA_INPUT),
-       HDA_BIND_MUTE("Side Playback Switch", 0x28, 2, HDA_INPUT),
-       HDA_BIND_MUTE("Headphone Playback Switch", 0x22, 2, HDA_INPUT),
-       HDA_BIND_MUTE("Mono Playback Switch", 0x1e, 2, HDA_INPUT),
-
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x6, HDA_INPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x6, HDA_INPUT),
-       HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x1, HDA_INPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x1, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x4, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x4, HDA_INPUT),
-
-       HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-
-       HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-/* 3-stack mode */
-static const struct snd_kcontrol_new ad1988_3stack_mixers1[] = {
-       HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Surround Playback Volume", 0x0a, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x05, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x05, 2, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_3stack_mixers1_rev2[] = {
-       HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Surround Playback Volume", 0x0a, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x06, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x06, 2, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_3stack_mixers2[] = {
-       HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
-       HDA_BIND_MUTE("Front Playback Switch", 0x29, 2, HDA_INPUT),
-       HDA_BIND_MUTE("Surround Playback Switch", 0x2c, 2, HDA_INPUT),
-       HDA_BIND_MUTE_MONO("Center Playback Switch", 0x26, 1, 2, HDA_INPUT),
-       HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x26, 2, 2, HDA_INPUT),
-       HDA_BIND_MUTE("Headphone Playback Switch", 0x22, 2, HDA_INPUT),
-       HDA_BIND_MUTE("Mono Playback Switch", 0x1e, 2, HDA_INPUT),
-
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x6, HDA_INPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x6, HDA_INPUT),
-       HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x1, HDA_INPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x1, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x4, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x4, HDA_INPUT),
-
-       HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-
-       HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Channel Mode",
-               .info = ad198x_ch_mode_info,
-               .get = ad198x_ch_mode_get,
-               .put = ad198x_ch_mode_put,
-       },
-
-       { } /* end */
-};
-
-/* laptop mode */
-static const struct snd_kcontrol_new ad1988_laptop_mixers[] = {
-       HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x29, 0x0, HDA_INPUT),
-       HDA_BIND_MUTE("Mono Playback Switch", 0x1e, 2, HDA_INPUT),
-
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x6, HDA_INPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x6, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x1, HDA_INPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x1, HDA_INPUT),
-
-       HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
-
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "External Amplifier",
-               .subdevice = HDA_SUBDEV_NID_FLAG | 0x12,
-               .info = ad198x_eapd_info,
-               .get = ad198x_eapd_get,
-               .put = ad198x_eapd_put,
-               .private_value = 0x12, /* port-D */
-       },
-
-       { } /* end */
-};
-
-/* capture */
-static const struct snd_kcontrol_new ad1988_capture_mixers[] = {
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 2, 0x0e, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 2, 0x0e, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               /* The multiple "Capture Source" controls confuse alsamixer
-                * So call somewhat different..
-                */
-               /* .name = "Capture Source", */
-               .name = "Input Source",
-               .count = 3,
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       { } /* end */
-};
-
-static int ad1988_spdif_playback_source_info(struct snd_kcontrol *kcontrol,
-                                            struct snd_ctl_elem_info *uinfo)
+static int ad1988_auto_smux_enum_info(struct snd_kcontrol *kcontrol,
+                                     struct snd_ctl_elem_info *uinfo)
 {
+       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
        static const char * const texts[] = {
-               "PCM", "ADC1", "ADC2", "ADC3"
+               "PCM", "ADC1", "ADC2", "ADC3",
        };
-       uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-       uinfo->count = 1;
-       uinfo->value.enumerated.items = 4;
-       if (uinfo->value.enumerated.item >= 4)
-               uinfo->value.enumerated.item = 3;
-       strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-       return 0;
+       int num_conns = snd_hda_get_num_conns(codec, 0x0b) + 1;
+       if (num_conns > 4)
+               num_conns = 4;
+       return snd_hda_enum_helper_info(kcontrol, uinfo, num_conns, texts);
 }
 
-static int ad1988_spdif_playback_source_get(struct snd_kcontrol *kcontrol,
-                                           struct snd_ctl_elem_value *ucontrol)
+static int ad1988_auto_smux_enum_get(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       unsigned int sel;
-
-       sel = snd_hda_codec_read(codec, 0x1d, 0, AC_VERB_GET_AMP_GAIN_MUTE,
-                                AC_AMP_GET_INPUT);
-       if (!(sel & 0x80))
-               ucontrol->value.enumerated.item[0] = 0;
-       else {
-               sel = snd_hda_codec_read(codec, 0x0b, 0,
-                                        AC_VERB_GET_CONNECT_SEL, 0);
-               if (sel < 3)
-                       sel++;
-               else
-                       sel = 0;
-               ucontrol->value.enumerated.item[0] = sel;
-       }
+       struct ad198x_spec *spec = codec->spec;
+
+       ucontrol->value.enumerated.item[0] = spec->cur_smux;
        return 0;
 }
 
-static int ad1988_spdif_playback_source_put(struct snd_kcontrol *kcontrol,
-                                           struct snd_ctl_elem_value *ucontrol)
+static int ad1988_auto_smux_enum_put(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       unsigned int val, sel;
-       int change;
+       struct ad198x_spec *spec = codec->spec;
+       unsigned int val = ucontrol->value.enumerated.item[0];
+       struct nid_path *path;
+       int num_conns = snd_hda_get_num_conns(codec, 0x0b) + 1;
 
-       val = ucontrol->value.enumerated.item[0];
-       if (val > 3)
+       if (val >= num_conns)
                return -EINVAL;
-       if (!val) {
-               sel = snd_hda_codec_read(codec, 0x1d, 0,
-                                        AC_VERB_GET_AMP_GAIN_MUTE,
-                                        AC_AMP_GET_INPUT);
-               change = sel & 0x80;
-               if (change) {
-                       snd_hda_codec_write_cache(codec, 0x1d, 0,
-                                                 AC_VERB_SET_AMP_GAIN_MUTE,
-                                                 AMP_IN_UNMUTE(0));
-                       snd_hda_codec_write_cache(codec, 0x1d, 0,
-                                                 AC_VERB_SET_AMP_GAIN_MUTE,
-                                                 AMP_IN_MUTE(1));
-               }
-       } else {
-               sel = snd_hda_codec_read(codec, 0x1d, 0,
-                                        AC_VERB_GET_AMP_GAIN_MUTE,
-                                        AC_AMP_GET_INPUT | 0x01);
-               change = sel & 0x80;
-               if (change) {
-                       snd_hda_codec_write_cache(codec, 0x1d, 0,
-                                                 AC_VERB_SET_AMP_GAIN_MUTE,
-                                                 AMP_IN_MUTE(0));
-                       snd_hda_codec_write_cache(codec, 0x1d, 0,
-                                                 AC_VERB_SET_AMP_GAIN_MUTE,
-                                                 AMP_IN_UNMUTE(1));
-               }
-               sel = snd_hda_codec_read(codec, 0x0b, 0,
-                                        AC_VERB_GET_CONNECT_SEL, 0) + 1;
-               change |= sel != val;
-               if (change)
-                       snd_hda_codec_write_cache(codec, 0x0b, 0,
-                                                 AC_VERB_SET_CONNECT_SEL,
-                                                 val - 1);
-       }
-       return change;
-}
+       if (spec->cur_smux == val)
+               return 0;
 
-static const struct snd_kcontrol_new ad1988_spdif_out_mixers[] = {
-       HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "IEC958 Playback Source",
-               .subdevice = HDA_SUBDEV_NID_FLAG | 0x1b,
-               .info = ad1988_spdif_playback_source_info,
-               .get = ad1988_spdif_playback_source_get,
-               .put = ad1988_spdif_playback_source_put,
-       },
-       { } /* end */
-};
+       mutex_lock(&codec->control_mutex);
+       codec->cached_write = 1;
+       path = snd_hda_get_path_from_idx(codec,
+                                        spec->smux_paths[spec->cur_smux]);
+       if (path)
+               snd_hda_activate_path(codec, path, false, true);
+       path = snd_hda_get_path_from_idx(codec, spec->smux_paths[val]);
+       if (path)
+               snd_hda_activate_path(codec, path, true, true);
+       spec->cur_smux = val;
+       codec->cached_write = 0;
+       mutex_unlock(&codec->control_mutex);
+       snd_hda_codec_flush_cache(codec); /* flush the updates */
+       return 1;
+}
 
-static const struct snd_kcontrol_new ad1988_spdif_in_mixers[] = {
-       HDA_CODEC_VOLUME("IEC958 Capture Volume", 0x1c, 0x0, HDA_INPUT),
-       { } /* end */
+static struct snd_kcontrol_new ad1988_auto_smux_mixer = {
+       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+       .name = "IEC958 Playback Source",
+       .info = ad1988_auto_smux_enum_info,
+       .get = ad1988_auto_smux_enum_get,
+       .put = ad1988_auto_smux_enum_put,
 };
 
-static const struct snd_kcontrol_new ad1989_spdif_out_mixers[] = {
-       HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("HDMI Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-/*
- * initialization verbs
- */
-
-/*
- * for 6-stack (+dig)
- */
-static const struct hda_verb ad1988_6stack_init_verbs[] = {
-       /* Front, Surround, CLFE, side DAC; unmute as default */
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x06, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* Port-A front headphon path */
-       {0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       /* Port-D line-out path */
-       {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       /* Port-F surround path */
-       {0x2a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x2a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       /* Port-G CLFE path */
-       {0x27, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x27, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x24, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       /* Port-H side path */
-       {0x28, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x28, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x25, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       /* Mono out path */
-       {0x36, AC_VERB_SET_CONNECT_SEL, 0x1}, /* DAC1:04h */
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f}, /* unmute, 0dB */
-       /* Port-B front mic-in path */
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       /* Port-C line-in path */
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x33, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Port-E mic-in path */
-       {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x3c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x34, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Analog CD Input */
-       {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       /* Analog Mix output amp */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
-
-       { }
-};
-
-static const struct hda_verb ad1988_6stack_fp_init_verbs[] = {
-       /* Headphone; unmute as default */
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* Port-A front headphon path */
-       {0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-
-       { }
-};
-
-static const struct hda_verb ad1988_capture_init_verbs[] = {
-       /* mute analog mix */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)},
-       /* select ADCs - front-mic */
-       {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1},
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x1},
-       {0x0e, AC_VERB_SET_CONNECT_SEL, 0x1},
-
-       { }
-};
-
-static const struct hda_verb ad1988_spdif_init_verbs[] = {
-       /* SPDIF out sel */
-       {0x02, AC_VERB_SET_CONNECT_SEL, 0x0}, /* PCM */
-       {0x0b, AC_VERB_SET_CONNECT_SEL, 0x0}, /* ADC1 */
-       {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       /* SPDIF out pin */
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
-
-       { }
-};
-
-static const struct hda_verb ad1988_spdif_in_init_verbs[] = {
-       /* unmute SPDIF input pin */
-       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       { }
-};
-
-/* AD1989 has no ADC -> SPDIF route */
-static const struct hda_verb ad1989_spdif_init_verbs[] = {
-       /* SPDIF-1 out pin */
-       {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
-       /* SPDIF-2/HDMI out pin */
-       {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
-       {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
-       { }
-};
-
-/*
- * verbs for 3stack (+dig)
- */
-static const struct hda_verb ad1988_3stack_ch2_init[] = {
-       /* set port-C to line-in */
-       { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
-       { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
-       /* set port-E to mic-in */
-       { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
-       { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
-       { } /* end */
-};
-
-static const struct hda_verb ad1988_3stack_ch6_init[] = {
-       /* set port-C to surround out */
-       { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
-       { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
-       /* set port-E to CLFE out */
-       { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
-       { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
-       { } /* end */
-};
-
-static const struct hda_channel_mode ad1988_3stack_modes[2] = {
-       { 2, ad1988_3stack_ch2_init },
-       { 6, ad1988_3stack_ch6_init },
-};
-
-static const struct hda_verb ad1988_3stack_init_verbs[] = {
-       /* Front, Surround, CLFE, side DAC; unmute as default */
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x06, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* Port-A front headphon path */
-       {0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       /* Port-D line-out path */
-       {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       /* Mono out path */
-       {0x36, AC_VERB_SET_CONNECT_SEL, 0x1}, /* DAC1:04h */
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f}, /* unmute, 0dB */
-       /* Port-B front mic-in path */
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       /* Port-C line-in/surround path - 6ch mode as default */
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x31, AC_VERB_SET_CONNECT_SEL, 0x0}, /* output sel: DAC 0x05 */
-       {0x33, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* Port-E mic-in/CLFE path - 6ch mode as default */
-       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x3c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x32, AC_VERB_SET_CONNECT_SEL, 0x1}, /* output sel: DAC 0x0a */
-       {0x34, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* mute analog mix */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)},
-       /* select ADCs - front-mic */
-       {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1},
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x1},
-       {0x0e, AC_VERB_SET_CONNECT_SEL, 0x1},
-       /* Analog Mix output amp */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
-       { }
-};
-
-/*
- * verbs for laptop mode (+dig)
- */
-static const struct hda_verb ad1988_laptop_hp_on[] = {
-       /* unmute port-A and mute port-D */
-       { 0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
-       { 0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
-       { } /* end */
-};
-static const struct hda_verb ad1988_laptop_hp_off[] = {
-       /* mute port-A and unmute port-D */
-       { 0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
-       { 0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
-       { } /* end */
-};
-
-#define AD1988_HP_EVENT        0x01
-
-static const struct hda_verb ad1988_laptop_init_verbs[] = {
-       /* Front, Surround, CLFE, side DAC; unmute as default */
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x06, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* Port-A front headphon path */
-       {0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       /* unsolicited event for pin-sense */
-       {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1988_HP_EVENT },
-       /* Port-D line-out path + EAPD */
-       {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x12, AC_VERB_SET_EAPD_BTLENABLE, 0x00}, /* EAPD-off */
-       /* Mono out path */
-       {0x36, AC_VERB_SET_CONNECT_SEL, 0x1}, /* DAC1:04h */
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f}, /* unmute, 0dB */
-       /* Port-B mic-in path */
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       /* Port-C docking station - try to output */
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x33, AC_VERB_SET_CONNECT_SEL, 0x0},
-       /* mute analog mix */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)},
-       /* select ADCs - mic */
-       {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1},
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x1},
-       {0x0e, AC_VERB_SET_CONNECT_SEL, 0x1},
-       /* Analog Mix output amp */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
-       { }
-};
-
-static void ad1988_laptop_unsol_event(struct hda_codec *codec, unsigned int res)
-{
-       if ((res >> 26) != AD1988_HP_EVENT)
-               return;
-       if (snd_hda_jack_detect(codec, 0x11))
-               snd_hda_sequence_write(codec, ad1988_laptop_hp_on);
-       else
-               snd_hda_sequence_write(codec, ad1988_laptop_hp_off);
-} 
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1988_loopbacks[] = {
-       { 0x20, HDA_INPUT, 0 }, /* Front Mic */
-       { 0x20, HDA_INPUT, 1 }, /* Line */
-       { 0x20, HDA_INPUT, 4 }, /* Mic */
-       { 0x20, HDA_INPUT, 6 }, /* CD */
-       { } /* end */
-};
-#endif
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-static int ad1988_auto_smux_enum_info(struct snd_kcontrol *kcontrol,
-                                     struct snd_ctl_elem_info *uinfo)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       static const char * const texts[] = {
-               "PCM", "ADC1", "ADC2", "ADC3",
-       };
-       int num_conns = snd_hda_get_num_conns(codec, 0x0b) + 1;
-       if (num_conns > 4)
-               num_conns = 4;
-       return snd_hda_enum_helper_info(kcontrol, uinfo, num_conns, texts);
-}
-
-static int ad1988_auto_smux_enum_get(struct snd_kcontrol *kcontrol,
-                                    struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-
-       ucontrol->value.enumerated.item[0] = spec->cur_smux;
-       return 0;
-}
-
-static int ad1988_auto_smux_enum_put(struct snd_kcontrol *kcontrol,
-                                    struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct ad198x_spec *spec = codec->spec;
-       unsigned int val = ucontrol->value.enumerated.item[0];
-       struct nid_path *path;
-       int num_conns = snd_hda_get_num_conns(codec, 0x0b) + 1;
-
-       if (val >= num_conns)
-               return -EINVAL;
-       if (spec->cur_smux == val)
-               return 0;
-
-       mutex_lock(&codec->control_mutex);
-       codec->cached_write = 1;
-       path = snd_hda_get_path_from_idx(codec,
-                                        spec->smux_paths[spec->cur_smux]);
-       if (path)
-               snd_hda_activate_path(codec, path, false, true);
-       path = snd_hda_get_path_from_idx(codec, spec->smux_paths[val]);
-       if (path)
-               snd_hda_activate_path(codec, path, true, true);
-       spec->cur_smux = val;
-       codec->cached_write = 0;
-       mutex_unlock(&codec->control_mutex);
-       snd_hda_codec_flush_cache(codec); /* flush the updates */
-       return 1;
-}
-
-static struct snd_kcontrol_new ad1988_auto_smux_mixer = {
-       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-       .name = "IEC958 Playback Source",
-       .info = ad1988_auto_smux_enum_info,
-       .get = ad1988_auto_smux_enum_get,
-       .put = ad1988_auto_smux_enum_put,
-};
-
-static int ad1988_auto_init(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec = codec->spec;
-       int i, err;
+static int ad1988_auto_init(struct hda_codec *codec)
+{
+       struct ad198x_spec *spec = codec->spec;
+       int i, err;
 
        err = snd_hda_gen_init(codec);
        if (err < 0)
@@ -3220,7 +828,34 @@ static int ad1988_add_spdif_mux_ctl(struct hda_codec *codec)
 /*
  */
 
-static int ad1988_parse_auto_config(struct hda_codec *codec)
+enum {
+       AD1988_FIXUP_6STACK_DIG,
+};
+
+static const struct hda_fixup ad1988_fixups[] = {
+       [AD1988_FIXUP_6STACK_DIG] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x11, 0x02214130 }, /* front-hp */
+                       { 0x12, 0x01014010 }, /* line-out */
+                       { 0x14, 0x02a19122 }, /* front-mic */
+                       { 0x15, 0x01813021 }, /* line-in */
+                       { 0x16, 0x01011012 }, /* line-out */
+                       { 0x17, 0x01a19020 }, /* mic */
+                       { 0x1b, 0x0145f1f0 }, /* SPDIF */
+                       { 0x24, 0x01016011 }, /* line-out */
+                       { 0x25, 0x01012013 }, /* line-out */
+                       { }
+               }
+       },
+};
+
+static const struct hda_model_fixup ad1988_fixup_models[] = {
+       { .id = AD1988_FIXUP_6STACK_DIG, .name = "6stack-dig" },
+       {}
+};
+
+static int patch_ad1988(struct hda_codec *codec)
 {
        struct ad198x_spec *spec;
        int err;
@@ -3234,12 +869,19 @@ static int ad1988_parse_auto_config(struct hda_codec *codec)
        spec->gen.mixer_merge_nid = 0x21;
        spec->gen.beep_nid = 0x10;
        set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+
+       snd_hda_pick_fixup(codec, ad1988_fixup_models, NULL, ad1988_fixups);
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
        err = ad198x_parse_auto_config(codec);
        if (err < 0)
                goto error;
        err = ad1988_add_spdif_mux_ctl(codec);
        if (err < 0)
                goto error;
+
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
+
        return 0;
 
  error:
@@ -3247,169 +889,6 @@ static int ad1988_parse_auto_config(struct hda_codec *codec)
        return err;
 }
 
-/*
- */
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const char * const ad1988_models[AD1988_MODEL_LAST] = {
-       [AD1988_6STACK]         = "6stack",
-       [AD1988_6STACK_DIG]     = "6stack-dig",
-       [AD1988_3STACK]         = "3stack",
-       [AD1988_3STACK_DIG]     = "3stack-dig",
-       [AD1988_LAPTOP]         = "laptop",
-       [AD1988_LAPTOP_DIG]     = "laptop-dig",
-       [AD1988_AUTO]           = "auto",
-};
-
-static const struct snd_pci_quirk ad1988_cfg_tbl[] = {
-       SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG),
-       SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG),
-       SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG),
-       SND_PCI_QUIRK(0x1043, 0x82c0, "Asus M3N-HT Deluxe", AD1988_6STACK_DIG),
-       SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG),
-       {}
-};
-
-static int patch_ad1988(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-       int err, board_config;
-
-       board_config = snd_hda_check_board_config(codec, AD1988_MODEL_LAST,
-                                                 ad1988_models, ad1988_cfg_tbl);
-       if (board_config < 0) {
-               printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
-                      codec->chip_name);
-               board_config = AD1988_AUTO;
-       }
-
-       if (board_config == AD1988_AUTO)
-               return ad1988_parse_auto_config(codec);
-
-       err = alloc_ad_spec(codec);
-       if (err < 0)
-               return err;
-       spec = codec->spec;
-
-       if (is_rev2(codec))
-               snd_printk(KERN_INFO "patch_analog: AD1988A rev.2 is detected, enable workarounds\n");
-
-       err = snd_hda_attach_beep_device(codec, 0x10);
-       if (err < 0) {
-               ad198x_free(codec);
-               return err;
-       }
-       set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
-       if (!spec->multiout.hp_nid)
-               spec->multiout.hp_nid = ad1988_alt_dac_nid[0];
-       switch (board_config) {
-       case AD1988_6STACK:
-       case AD1988_6STACK_DIG:
-               spec->multiout.max_channels = 8;
-               spec->multiout.num_dacs = 4;
-               if (is_rev2(codec))
-                       spec->multiout.dac_nids = ad1988_6stack_dac_nids_rev2;
-               else
-                       spec->multiout.dac_nids = ad1988_6stack_dac_nids;
-               spec->input_mux = &ad1988_6stack_capture_source;
-               spec->num_mixers = 2;
-               if (is_rev2(codec))
-                       spec->mixers[0] = ad1988_6stack_mixers1_rev2;
-               else
-                       spec->mixers[0] = ad1988_6stack_mixers1;
-               spec->mixers[1] = ad1988_6stack_mixers2;
-               spec->num_init_verbs = 1;
-               spec->init_verbs[0] = ad1988_6stack_init_verbs;
-               if (board_config == AD1988_6STACK_DIG) {
-                       spec->multiout.dig_out_nid = AD1988_SPDIF_OUT;
-                       spec->dig_in_nid = AD1988_SPDIF_IN;
-               }
-               break;
-       case AD1988_3STACK:
-       case AD1988_3STACK_DIG:
-               spec->multiout.max_channels = 6;
-               spec->multiout.num_dacs = 3;
-               if (is_rev2(codec))
-                       spec->multiout.dac_nids = ad1988_3stack_dac_nids_rev2;
-               else
-                       spec->multiout.dac_nids = ad1988_3stack_dac_nids;
-               spec->input_mux = &ad1988_6stack_capture_source;
-               spec->channel_mode = ad1988_3stack_modes;
-               spec->num_channel_mode = ARRAY_SIZE(ad1988_3stack_modes);
-               spec->num_mixers = 2;
-               if (is_rev2(codec))
-                       spec->mixers[0] = ad1988_3stack_mixers1_rev2;
-               else
-                       spec->mixers[0] = ad1988_3stack_mixers1;
-               spec->mixers[1] = ad1988_3stack_mixers2;
-               spec->num_init_verbs = 1;
-               spec->init_verbs[0] = ad1988_3stack_init_verbs;
-               if (board_config == AD1988_3STACK_DIG)
-                       spec->multiout.dig_out_nid = AD1988_SPDIF_OUT;
-               break;
-       case AD1988_LAPTOP:
-       case AD1988_LAPTOP_DIG:
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               spec->multiout.dac_nids = ad1988_3stack_dac_nids;
-               spec->input_mux = &ad1988_laptop_capture_source;
-               spec->num_mixers = 1;
-               spec->mixers[0] = ad1988_laptop_mixers;
-               codec->inv_eapd = 1; /* inverted EAPD */
-               spec->num_init_verbs = 1;
-               spec->init_verbs[0] = ad1988_laptop_init_verbs;
-               if (board_config == AD1988_LAPTOP_DIG)
-                       spec->multiout.dig_out_nid = AD1988_SPDIF_OUT;
-               break;
-       }
-
-       spec->num_adc_nids = ARRAY_SIZE(ad1988_adc_nids);
-       spec->adc_nids = ad1988_adc_nids;
-       spec->capsrc_nids = ad1988_capsrc_nids;
-       spec->mixers[spec->num_mixers++] = ad1988_capture_mixers;
-       spec->init_verbs[spec->num_init_verbs++] = ad1988_capture_init_verbs;
-       if (spec->multiout.dig_out_nid) {
-               if (codec->vendor_id >= 0x11d4989a) {
-                       spec->mixers[spec->num_mixers++] =
-                               ad1989_spdif_out_mixers;
-                       spec->init_verbs[spec->num_init_verbs++] =
-                               ad1989_spdif_init_verbs;
-                       codec->slave_dig_outs = ad1989b_slave_dig_outs;
-               } else {
-                       spec->mixers[spec->num_mixers++] =
-                               ad1988_spdif_out_mixers;
-                       spec->init_verbs[spec->num_init_verbs++] =
-                               ad1988_spdif_init_verbs;
-               }
-       }
-       if (spec->dig_in_nid && codec->vendor_id < 0x11d4989a) {
-               spec->mixers[spec->num_mixers++] = ad1988_spdif_in_mixers;
-               spec->init_verbs[spec->num_init_verbs++] =
-                       ad1988_spdif_in_init_verbs;
-       }
-
-       codec->patch_ops = ad198x_patch_ops;
-       switch (board_config) {
-       case AD1988_LAPTOP:
-       case AD1988_LAPTOP_DIG:
-               codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
-               break;
-       }
-#ifdef CONFIG_PM
-       spec->loopback.amplist = ad1988_loopbacks;
-#endif
-       spec->vmaster_nid = 0x04;
-
-       codec->no_trigger_sense = 1;
-       codec->no_sticky_stream = 1;
-
-       return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1988   ad1988_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
 
 /*
  * AD1884 / AD1984
@@ -3423,167 +902,19 @@ static int patch_ad1988(struct hda_codec *codec)
  *
  * AD1984 = AD1884 + two digital mic-ins
  *
- * FIXME:
- * For simplicity, we share the single DAC for both HP and line-outs
- * right now.  The inidividual playbacks could be easily implemented,
- * but no build-up framework is given, so far.
- */
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const hda_nid_t ad1884_dac_nids[1] = {
-       0x04,
-};
-
-static const hda_nid_t ad1884_adc_nids[2] = {
-       0x08, 0x09,
-};
-
-static const hda_nid_t ad1884_capsrc_nids[2] = {
-       0x0c, 0x0d,
-};
-
-#define AD1884_SPDIF_OUT       0x02
-
-static const struct hda_input_mux ad1884_capture_source = {
-       .num_items = 4,
-       .items = {
-               { "Front Mic", 0x0 },
-               { "Mic", 0x1 },
-               { "CD", 0x2 },
-               { "Mix", 0x3 },
-       },
-};
-
-static const struct snd_kcontrol_new ad1884_base_mixers[] = {
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       /* HDA_CODEC_VOLUME_IDX("PCM Playback Volume", 1, 0x03, 0x0, HDA_OUTPUT), */
-       HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               /* The multiple "Capture Source" controls confuse alsamixer
-                * So call somewhat different..
-                */
-               /* .name = "Capture Source", */
-               .name = "Input Source",
-               .count = 2,
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       /* SPDIF controls */
-       HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
-               /* identical with ad1983 */
-               .info = ad1983_spdif_route_info,
-               .get = ad1983_spdif_route_get,
-               .put = ad1983_spdif_route_put,
-       },
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1984_dmic_mixers[] = {
-       HDA_CODEC_VOLUME("Digital Mic Capture Volume", 0x05, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE("Digital Mic Capture Switch", 0x05, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME_IDX("Digital Mic Capture Volume", 1, 0x06, 0x0,
-                            HDA_INPUT),
-       HDA_CODEC_MUTE_IDX("Digital Mic Capture Switch", 1, 0x06, 0x0,
-                          HDA_INPUT),
-       { } /* end */
-};
-
-/*
- * initialization verbs
+ * AD1883 / AD1884A / AD1984A / AD1984B
+ *
+ * port-B (0x14) - front mic-in
+ * port-E (0x1c) - rear mic-in
+ * port-F (0x16) - CD / ext out
+ * port-C (0x15) - rear line-in
+ * port-D (0x12) - rear line-out
+ * port-A (0x11) - front hp-out
+ *
+ * AD1984A = AD1884A + digital-mic
+ * AD1883 = equivalent with AD1984A
+ * AD1984B = AD1984A + extra SPDIF-out
  */
-static const struct hda_verb ad1884_init_verbs[] = {
-       /* DACs; mute as default */
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       /* Port-A (HP) mixer */
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-A pin */
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* HP selector - select DAC2 */
-       {0x22, AC_VERB_SET_CONNECT_SEL, 0x1},
-       /* Port-D (Line-out) mixer */
-       {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-D pin */
-       {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Mono-out mixer */
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Mono-out pin */
-       {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Mono selector */
-       {0x0e, AC_VERB_SET_CONNECT_SEL, 0x1},
-       /* Port-B (front mic) pin */
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       /* Port-C (rear mic) pin */
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       /* Analog mixer; mute as default */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       /* Analog Mix output amp */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
-       /* SPDIF output selector */
-       {0x02, AC_VERB_SET_CONNECT_SEL, 0x0}, /* PCM */
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
-       { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1884_loopbacks[] = {
-       { 0x20, HDA_INPUT, 0 }, /* Front Mic */
-       { 0x20, HDA_INPUT, 1 }, /* Mic */
-       { 0x20, HDA_INPUT, 2 }, /* CD */
-       { 0x20, HDA_INPUT, 4 }, /* Docking */
-       { } /* end */
-};
-#endif
-
-static const char * const ad1884_slave_vols[] = {
-       "PCM", "Mic", "Mono", "Front Mic", "Mic", "CD",
-       "Internal Mic", "Dock Mic", /* "Beep", */ "IEC958",
-       NULL
-};
-
-enum {
-       AD1884_AUTO,
-       AD1884_BASIC,
-       AD1884_MODELS
-};
-
-static const char * const ad1884_models[AD1884_MODELS] = {
-       [AD1884_AUTO]           = "auto",
-       [AD1884_BASIC]          = "basic",
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
 
 /* set the upper-limit for mixer amp to 0dB for avoiding the possible
  * damage by overloading
@@ -3599,14 +930,34 @@ static void ad1884_fixup_amp_override(struct hda_codec *codec,
                                          (1 << AC_AMPCAP_MUTE_SHIFT));
 }
 
+/* toggle GPIO1 according to the mute state */
+static void ad1884_vmaster_hp_gpio_hook(void *private_data, int enabled)
+{
+       struct hda_codec *codec = private_data;
+       struct ad198x_spec *spec = codec->spec;
+
+       if (spec->eapd_nid)
+               ad_vmaster_eapd_hook(private_data, enabled);
+       snd_hda_codec_update_cache(codec, 0x01, 0,
+                                  AC_VERB_SET_GPIO_DATA,
+                                  enabled ? 0x00 : 0x02);
+}
+
 static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
        struct ad198x_spec *spec = codec->spec;
+       static const struct hda_verb gpio_init_verbs[] = {
+               {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
+               {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
+               {0x01, AC_VERB_SET_GPIO_DATA, 0x02},
+               {},
+       };
 
        switch (action) {
        case HDA_FIXUP_ACT_PRE_PROBE:
-               spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
+               spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook;
+               snd_hda_sequence_write_cache(codec, gpio_init_verbs);
                break;
        case HDA_FIXUP_ACT_PROBE:
                if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
@@ -3617,9 +968,18 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
        }
 }
 
+/* set magic COEFs for dmic */
+static const struct hda_verb ad1884_dmic_init_verbs[] = {
+       {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
+       {0x01, AC_VERB_SET_PROC_COEF, 0x08},
+       {}
+};
+
 enum {
        AD1884_FIXUP_AMP_OVERRIDE,
        AD1884_FIXUP_HP_EAPD,
+       AD1884_FIXUP_DMIC_COEF,
+       AD1884_FIXUP_HP_TOUCHSMART,
 };
 
 static const struct hda_fixup ad1884_fixups[] = {
@@ -3633,15 +993,27 @@ static const struct hda_fixup ad1884_fixups[] = {
                .chained = true,
                .chain_id = AD1884_FIXUP_AMP_OVERRIDE,
        },
+       [AD1884_FIXUP_DMIC_COEF] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = ad1884_dmic_init_verbs,
+       },
+       [AD1884_FIXUP_HP_TOUCHSMART] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = ad1884_dmic_init_verbs,
+               .chained = true,
+               .chain_id = AD1884_FIXUP_HP_EAPD,
+       },
 };
 
 static const struct snd_pci_quirk ad1884_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART),
        SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD),
+       SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_DMIC_COEF),
        {}
 };
 
 
-static int ad1884_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1884(struct hda_codec *codec)
 {
        struct ad198x_spec *spec;
        int err;
@@ -3674,1170 +1046,6 @@ static int ad1884_parse_auto_config(struct hda_codec *codec)
        return err;
 }
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1884_basic(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-       int err;
-
-       err = alloc_ad_spec(codec);
-       if (err < 0)
-               return err;
-       spec = codec->spec;
-
-       err = snd_hda_attach_beep_device(codec, 0x10);
-       if (err < 0) {
-               ad198x_free(codec);
-               return err;
-       }
-       set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
-       spec->multiout.max_channels = 2;
-       spec->multiout.num_dacs = ARRAY_SIZE(ad1884_dac_nids);
-       spec->multiout.dac_nids = ad1884_dac_nids;
-       spec->multiout.dig_out_nid = AD1884_SPDIF_OUT;
-       spec->num_adc_nids = ARRAY_SIZE(ad1884_adc_nids);
-       spec->adc_nids = ad1884_adc_nids;
-       spec->capsrc_nids = ad1884_capsrc_nids;
-       spec->input_mux = &ad1884_capture_source;
-       spec->num_mixers = 1;
-       spec->mixers[0] = ad1884_base_mixers;
-       spec->num_init_verbs = 1;
-       spec->init_verbs[0] = ad1884_init_verbs;
-       spec->spdif_route = 0;
-#ifdef CONFIG_PM
-       spec->loopback.amplist = ad1884_loopbacks;
-#endif
-       spec->vmaster_nid = 0x04;
-       /* we need to cover all playback volumes */
-       spec->slave_vols = ad1884_slave_vols;
-       /* slaves may contain input volumes, so we can't raise to 0dB blindly */
-       spec->avoid_init_slave_vol = 1;
-
-       codec->patch_ops = ad198x_patch_ops;
-
-       codec->no_trigger_sense = 1;
-       codec->no_sticky_stream = 1;
-
-       return 0;
-}
-
-static int patch_ad1884(struct hda_codec *codec)
-{
-       int board_config;
-
-       board_config = snd_hda_check_board_config(codec, AD1884_MODELS,
-                                                 ad1884_models, NULL);
-       if (board_config < 0) {
-               printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
-                      codec->chip_name);
-               board_config = AD1884_AUTO;
-       }
-
-       if (board_config == AD1884_AUTO)
-               return ad1884_parse_auto_config(codec);
-       else
-               return patch_ad1884_basic(codec);
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1884   ad1884_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-/*
- * Lenovo Thinkpad T61/X61
- */
-static const struct hda_input_mux ad1984_thinkpad_capture_source = {
-       .num_items = 4,
-       .items = {
-               { "Mic", 0x0 },
-               { "Internal Mic", 0x1 },
-               { "Mix", 0x3 },
-               { "Dock Mic", 0x4 },
-       },
-};
-
-
-/*
- * Dell Precision T3400
- */
-static const struct hda_input_mux ad1984_dell_desktop_capture_source = {
-       .num_items = 3,
-       .items = {
-               { "Front Mic", 0x0 },
-               { "Line-In", 0x1 },
-               { "Mix", 0x3 },
-       },
-};
-
-
-static const struct snd_kcontrol_new ad1984_thinkpad_mixers[] = {
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       /* HDA_CODEC_VOLUME_IDX("PCM Playback Volume", 1, 0x03, 0x0, HDA_OUTPUT), */
-       HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Speaker Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_VOLUME("Beep Playback Volume", 0x20, 0x03, HDA_INPUT),
-       HDA_CODEC_MUTE("Beep Playback Switch", 0x20, 0x03, HDA_INPUT),
-       HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               /* The multiple "Capture Source" controls confuse alsamixer
-                * So call somewhat different..
-                */
-               /* .name = "Capture Source", */
-               .name = "Input Source",
-               .count = 2,
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       /* SPDIF controls */
-       HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
-               /* identical with ad1983 */
-               .info = ad1983_spdif_route_info,
-               .get = ad1983_spdif_route_get,
-               .put = ad1983_spdif_route_put,
-       },
-       { } /* end */
-};
-
-/* additional verbs */
-static const struct hda_verb ad1984_thinkpad_init_verbs[] = {
-       /* Port-E (docking station mic) pin */
-       {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* docking mic boost */
-       {0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       /* Analog PC Beeper - allow firmware/ACPI beeps */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3) | 0x1a},
-       /* Analog mixer - docking mic; mute as default */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       /* enable EAPD bit */
-       {0x12, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
-       { } /* end */
-};
-
-/*
- * Dell Precision T3400
- */
-static const struct snd_kcontrol_new ad1984_dell_desktop_mixers[] = {
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Speaker Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line-In Playback Volume", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_MUTE("Line-In Playback Switch", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line-In Boost Volume", 0x15, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               /* The multiple "Capture Source" controls confuse alsamixer
-                * So call somewhat different..
-                */
-               /* .name = "Capture Source", */
-               .name = "Input Source",
-               .count = 2,
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       { } /* end */
-};
-
-/* Digial MIC ADC NID 0x05 + 0x06 */
-static int ad1984_pcm_dmic_prepare(struct hda_pcm_stream *hinfo,
-                                  struct hda_codec *codec,
-                                  unsigned int stream_tag,
-                                  unsigned int format,
-                                  struct snd_pcm_substream *substream)
-{
-       snd_hda_codec_setup_stream(codec, 0x05 + substream->number,
-                                  stream_tag, 0, format);
-       return 0;
-}
-
-static int ad1984_pcm_dmic_cleanup(struct hda_pcm_stream *hinfo,
-                                  struct hda_codec *codec,
-                                  struct snd_pcm_substream *substream)
-{
-       snd_hda_codec_cleanup_stream(codec, 0x05 + substream->number);
-       return 0;
-}
-
-static const struct hda_pcm_stream ad1984_pcm_dmic_capture = {
-       .substreams = 2,
-       .channels_min = 2,
-       .channels_max = 2,
-       .nid = 0x05,
-       .ops = {
-               .prepare = ad1984_pcm_dmic_prepare,
-               .cleanup = ad1984_pcm_dmic_cleanup
-       },
-};
-
-static int ad1984_build_pcms(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec = codec->spec;
-       struct hda_pcm *info;
-       int err;
-
-       err = ad198x_build_pcms(codec);
-       if (err < 0)
-               return err;
-
-       info = spec->pcm_rec + codec->num_pcms;
-       codec->num_pcms++;
-       info->name = "AD1984 Digital Mic";
-       info->stream[SNDRV_PCM_STREAM_CAPTURE] = ad1984_pcm_dmic_capture;
-       return 0;
-}
-
-/* models */
-enum {
-       AD1984_AUTO,
-       AD1984_BASIC,
-       AD1984_THINKPAD,
-       AD1984_DELL_DESKTOP,
-       AD1984_MODELS
-};
-
-static const char * const ad1984_models[AD1984_MODELS] = {
-       [AD1984_AUTO]           = "auto",
-       [AD1984_BASIC]          = "basic",
-       [AD1984_THINKPAD]       = "thinkpad",
-       [AD1984_DELL_DESKTOP]   = "dell_desktop",
-};
-
-static const struct snd_pci_quirk ad1984_cfg_tbl[] = {
-       /* Lenovo Thinkpad T61/X61 */
-       SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
-       SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
-       SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
-       {}
-};
-
-static int patch_ad1984(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-       int board_config, err;
-
-       board_config = snd_hda_check_board_config(codec, AD1984_MODELS,
-                                                 ad1984_models, ad1984_cfg_tbl);
-       if (board_config < 0) {
-               printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
-                      codec->chip_name);
-               board_config = AD1984_AUTO;
-       }
-
-       if (board_config == AD1984_AUTO)
-               return ad1884_parse_auto_config(codec);
-
-       err = patch_ad1884_basic(codec);
-       if (err < 0)
-               return err;
-       spec = codec->spec;
-
-       switch (board_config) {
-       case AD1984_BASIC:
-               /* additional digital mics */
-               spec->mixers[spec->num_mixers++] = ad1984_dmic_mixers;
-               codec->patch_ops.build_pcms = ad1984_build_pcms;
-               break;
-       case AD1984_THINKPAD:
-               if (codec->subsystem_id == 0x17aa20fb) {
-                       /* Thinpad X300 does not have the ability to do SPDIF,
-                          or attach to docking station to use SPDIF */
-                       spec->multiout.dig_out_nid = 0;
-               } else
-                       spec->multiout.dig_out_nid = AD1884_SPDIF_OUT;
-               spec->input_mux = &ad1984_thinkpad_capture_source;
-               spec->mixers[0] = ad1984_thinkpad_mixers;
-               spec->init_verbs[spec->num_init_verbs++] = ad1984_thinkpad_init_verbs;
-               spec->analog_beep = 1;
-               break;
-       case AD1984_DELL_DESKTOP:
-               spec->multiout.dig_out_nid = 0;
-               spec->input_mux = &ad1984_dell_desktop_capture_source;
-               spec->mixers[0] = ad1984_dell_desktop_mixers;
-               break;
-       }
-       return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1984   ad1884_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
-/*
- * AD1883 / AD1884A / AD1984A / AD1984B
- *
- * port-B (0x14) - front mic-in
- * port-E (0x1c) - rear mic-in
- * port-F (0x16) - CD / ext out
- * port-C (0x15) - rear line-in
- * port-D (0x12) - rear line-out
- * port-A (0x11) - front hp-out
- *
- * AD1984A = AD1884A + digital-mic
- * AD1883 = equivalent with AD1984A
- * AD1984B = AD1984A + extra SPDIF-out
- *
- * FIXME:
- * We share the single DAC for both HP and line-outs (see AD1884/1984).
- */
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const hda_nid_t ad1884a_dac_nids[1] = {
-       0x03,
-};
-
-#define ad1884a_adc_nids       ad1884_adc_nids
-#define ad1884a_capsrc_nids    ad1884_capsrc_nids
-
-#define AD1884A_SPDIF_OUT      0x02
-
-static const struct hda_input_mux ad1884a_capture_source = {
-       .num_items = 5,
-       .items = {
-               { "Front Mic", 0x0 },
-               { "Mic", 0x4 },
-               { "Line", 0x1 },
-               { "CD", 0x2 },
-               { "Mix", 0x3 },
-       },
-};
-
-static const struct snd_kcontrol_new ad1884a_base_mixers[] = {
-       HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
-       HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               /* The multiple "Capture Source" controls confuse alsamixer
-                * So call somewhat different..
-                */
-               /* .name = "Capture Source", */
-               .name = "Input Source",
-               .count = 2,
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       /* SPDIF controls */
-       HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
-               /* identical with ad1983 */
-               .info = ad1983_spdif_route_info,
-               .get = ad1983_spdif_route_get,
-               .put = ad1983_spdif_route_put,
-       },
-       { } /* end */
-};
-
-/*
- * initialization verbs
- */
-static const struct hda_verb ad1884a_init_verbs[] = {
-       /* DACs; unmute as default */
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
-       /* Port-A (HP) mixer - route only from analog mixer */
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-A pin */
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Port-D (Line-out) mixer - route only from analog mixer */
-       {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-D pin */
-       {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Mono-out mixer - route only from analog mixer */
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Mono-out pin */
-       {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Port-B (front mic) pin */
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       /* Port-C (rear line-in) pin */
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       /* Port-E (rear mic) pin */
-       {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, /* no boost */
-       /* Port-F (CD) pin */
-       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Analog mixer; mute as default */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(4)}, /* aux */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
-       /* Analog Mix output amp */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* capture sources */
-       {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* SPDIF output amp */
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
-       { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1884a_loopbacks[] = {
-       { 0x20, HDA_INPUT, 0 }, /* Front Mic */
-       { 0x20, HDA_INPUT, 1 }, /* Mic */
-       { 0x20, HDA_INPUT, 2 }, /* CD */
-       { 0x20, HDA_INPUT, 4 }, /* Docking */
-       { } /* end */
-};
-#endif
-
-/*
- * Laptop model
- *
- * Port A: Headphone jack
- * Port B: MIC jack
- * Port C: Internal MIC
- * Port D: Dock Line Out (if enabled)
- * Port E: Dock Line In (if enabled)
- * Port F: Internal speakers
- */
-
-static int ad1884a_mobile_master_sw_put(struct snd_kcontrol *kcontrol,
-                                       struct snd_ctl_elem_value *ucontrol)
-{
-       struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
-       int ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
-       int mute = (!ucontrol->value.integer.value[0] &&
-                   !ucontrol->value.integer.value[1]);
-       /* toggle GPIO1 according to the mute state */
-       snd_hda_codec_write_cache(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
-                           mute ? 0x02 : 0x0);
-       return ret;
-}
-
-static const struct snd_kcontrol_new ad1884a_laptop_mixers[] = {
-       HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Master Playback Switch",
-               .subdevice = HDA_SUBDEV_AMP_FLAG,
-               .info = snd_hda_mixer_amp_switch_info,
-               .get = snd_hda_mixer_amp_switch_get,
-               .put = ad1884a_mobile_master_sw_put,
-               .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
-       },
-       HDA_CODEC_MUTE("Dock Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1884a_mobile_mixers[] = {
-       HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-       /*HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Master Playback Switch",
-               .subdevice = HDA_SUBDEV_AMP_FLAG,
-               .info = snd_hda_mixer_amp_switch_info,
-               .get = snd_hda_mixer_amp_switch_get,
-               .put = ad1884a_mobile_master_sw_put,
-               .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
-       },
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Capture Volume", 0x14, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x15, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-/* mute internal speaker if HP is plugged */
-static void ad1884a_hp_automute(struct hda_codec *codec)
-{
-       unsigned int present;
-
-       present = snd_hda_jack_detect(codec, 0x11);
-       snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
-                                HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-       snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_EAPD_BTLENABLE,
-                           present ? 0x00 : 0x02);
-}
-
-/* switch to external mic if plugged */
-static void ad1884a_hp_automic(struct hda_codec *codec)
-{
-       unsigned int present;
-
-       present = snd_hda_jack_detect(codec, 0x14);
-       snd_hda_codec_write(codec, 0x0c, 0, AC_VERB_SET_CONNECT_SEL,
-                           present ? 0 : 1);
-}
-
-#define AD1884A_HP_EVENT               0x37
-#define AD1884A_MIC_EVENT              0x36
-
-/* unsolicited event for HP jack sensing */
-static void ad1884a_hp_unsol_event(struct hda_codec *codec, unsigned int res)
-{
-       switch (res >> 26) {
-       case AD1884A_HP_EVENT:
-               ad1884a_hp_automute(codec);
-               break;
-       case AD1884A_MIC_EVENT:
-               ad1884a_hp_automic(codec);
-               break;
-       }
-}
-
-/* initialize jack-sensing, too */
-static int ad1884a_hp_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1884a_hp_automute(codec);
-       ad1884a_hp_automic(codec);
-       return 0;
-}
-
-/* mute internal speaker if HP or docking HP is plugged */
-static void ad1884a_laptop_automute(struct hda_codec *codec)
-{
-       unsigned int present;
-
-       present = snd_hda_jack_detect(codec, 0x11);
-       if (!present)
-               present = snd_hda_jack_detect(codec, 0x12);
-       snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
-                                HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-       snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_EAPD_BTLENABLE,
-                           present ? 0x00 : 0x02);
-}
-
-/* switch to external mic if plugged */
-static void ad1884a_laptop_automic(struct hda_codec *codec)
-{
-       unsigned int idx;
-
-       if (snd_hda_jack_detect(codec, 0x14))
-               idx = 0;
-       else if (snd_hda_jack_detect(codec, 0x1c))
-               idx = 4;
-       else
-               idx = 1;
-       snd_hda_codec_write(codec, 0x0c, 0, AC_VERB_SET_CONNECT_SEL, idx);
-}
-
-/* unsolicited event for HP jack sensing */
-static void ad1884a_laptop_unsol_event(struct hda_codec *codec,
-                                      unsigned int res)
-{
-       switch (res >> 26) {
-       case AD1884A_HP_EVENT:
-               ad1884a_laptop_automute(codec);
-               break;
-       case AD1884A_MIC_EVENT:
-               ad1884a_laptop_automic(codec);
-               break;
-       }
-}
-
-/* initialize jack-sensing, too */
-static int ad1884a_laptop_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1884a_laptop_automute(codec);
-       ad1884a_laptop_automic(codec);
-       return 0;
-}
-
-/* additional verbs for laptop model */
-static const struct hda_verb ad1884a_laptop_verbs[] = {
-       /* Port-A (HP) pin - always unmuted */
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* Port-F (int speaker) mixer - route only from analog mixer */
-       {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-F (int speaker) pin */
-       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* required for compaq 6530s/6531s speaker output */
-       {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       /* Port-C pin - internal mic-in */
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
-       /* Port-D (docking line-out) pin - default unmuted */
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* analog mix */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       /* unsolicited event for pin-sense */
-       {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
-       {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
-       {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
-       {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
-       /* allow to touch GPIO1 (for mute control) */
-       {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
-       {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
-       {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
-       { } /* end */
-};
-
-static const struct hda_verb ad1884a_mobile_verbs[] = {
-       /* DACs; unmute as default */
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
-       /* Port-A (HP) mixer - route only from analog mixer */
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-A pin */
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       /* Port-A (HP) pin - always unmuted */
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* Port-B (mic jack) pin */
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
-       /* Port-C (int mic) pin */
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
-       /* Port-F (int speaker) mixer - route only from analog mixer */
-       {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-F pin */
-       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Analog mixer; mute as default */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
-       /* Analog Mix output amp */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* capture sources */
-       /* {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0}, */ /* set via unsol */
-       {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* unsolicited event for pin-sense */
-       {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
-       {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
-       /* allow to touch GPIO1 (for mute control) */
-       {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
-       {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
-       {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
-       { } /* end */
-};
-
-/*
- * Thinkpad X300
- * 0x11 - HP
- * 0x12 - speaker
- * 0x14 - mic-in
- * 0x17 - built-in mic
- */
-
-static const struct hda_verb ad1984a_thinkpad_verbs[] = {
-       /* HP unmute */
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* analog mix */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       /* turn on EAPD */
-       {0x12, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
-       /* unsolicited event for pin-sense */
-       {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
-       /* internal mic - dmic */
-       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       /* set magic COEFs for dmic */
-       {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
-       {0x01, AC_VERB_SET_PROC_COEF, 0x08},
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1984a_thinkpad_mixers[] = {
-       HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Capture Source",
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       { } /* end */
-};
-
-static const struct hda_input_mux ad1984a_thinkpad_capture_source = {
-       .num_items = 3,
-       .items = {
-               { "Mic", 0x0 },
-               { "Internal Mic", 0x5 },
-               { "Mix", 0x3 },
-       },
-};
-
-/* mute internal speaker if HP is plugged */
-static void ad1984a_thinkpad_automute(struct hda_codec *codec)
-{
-       unsigned int present;
-
-       present = snd_hda_jack_detect(codec, 0x11);
-       snd_hda_codec_amp_stereo(codec, 0x12, HDA_OUTPUT, 0,
-                                HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
-/* unsolicited event for HP jack sensing */
-static void ad1984a_thinkpad_unsol_event(struct hda_codec *codec,
-                                        unsigned int res)
-{
-       if ((res >> 26) != AD1884A_HP_EVENT)
-               return;
-       ad1984a_thinkpad_automute(codec);
-}
-
-/* initialize jack-sensing, too */
-static int ad1984a_thinkpad_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1984a_thinkpad_automute(codec);
-       return 0;
-}
-
-/*
- * Precision R5500
- * 0x12 - HP/line-out
- * 0x13 - speaker (mono)
- * 0x15 - mic-in
- */
-
-static const struct hda_verb ad1984a_precision_verbs[] = {
-       /* Unmute main output path */
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x1f}, /* 0dB */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5) + 0x17}, /* 0dB */
-       /* Analog mixer; mute as default */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       /* Select mic as input */
-       {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1},
-       {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x27}, /* 0dB */
-       /* Configure as mic */
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
-       /* HP unmute */
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* turn on EAPD */
-       {0x13, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
-       /* unsolicited event for pin-sense */
-       {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1984a_precision_mixers[] = {
-       HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
-       HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Speaker Playback Volume", 0x13, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-
-/* mute internal speaker if HP is plugged */
-static void ad1984a_precision_automute(struct hda_codec *codec)
-{
-       unsigned int present;
-
-       present = snd_hda_jack_detect(codec, 0x12);
-       snd_hda_codec_amp_stereo(codec, 0x13, HDA_OUTPUT, 0,
-                                HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
-
-/* unsolicited event for HP jack sensing */
-static void ad1984a_precision_unsol_event(struct hda_codec *codec,
-                                        unsigned int res)
-{
-       if ((res >> 26) != AD1884A_HP_EVENT)
-               return;
-       ad1984a_precision_automute(codec);
-}
-
-/* initialize jack-sensing, too */
-static int ad1984a_precision_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1984a_precision_automute(codec);
-       return 0;
-}
-
-
-/*
- * HP Touchsmart
- * port-A (0x11)      - front hp-out
- * port-B (0x14)      - unused
- * port-C (0x15)      - unused
- * port-D (0x12)      - rear line out
- * port-E (0x1c)      - front mic-in
- * port-F (0x16)      - Internal speakers
- * digital-mic (0x17) - Internal mic
- */
-
-static const struct hda_verb ad1984a_touchsmart_verbs[] = {
-       /* DACs; unmute as default */
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
-       /* Port-A (HP) mixer - route only from analog mixer */
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-A pin */
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       /* Port-A (HP) pin - always unmuted */
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       /* Port-E (int speaker) mixer - route only from analog mixer */
-       {0x25, AC_VERB_SET_AMP_GAIN_MUTE, 0x03},
-       /* Port-E pin */
-       {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-       {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       /* Port-F (int speaker) mixer - route only from analog mixer */
-       {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-F pin */
-       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Analog mixer; mute as default */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
-       /* Analog Mix output amp */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* capture sources */
-       /* {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0}, */ /* set via unsol */
-       {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
-       {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* unsolicited event for pin-sense */
-       {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
-       {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
-       /* allow to touch GPIO1 (for mute control) */
-       {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
-       {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
-       {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
-       /* internal mic - dmic */
-       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       /* set magic COEFs for dmic */
-       {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
-       {0x01, AC_VERB_SET_PROC_COEF, 0x08},
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1984a_touchsmart_mixers[] = {
-       HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-/*     HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .subdevice = HDA_SUBDEV_AMP_FLAG,
-               .name = "Master Playback Switch",
-               .info = snd_hda_mixer_amp_switch_info,
-               .get = snd_hda_mixer_amp_switch_get,
-               .put = ad1884a_mobile_master_sw_put,
-               .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
-       },
-       HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
-       { } /* end */
-};
-
-/* switch to external mic if plugged */
-static void ad1984a_touchsmart_automic(struct hda_codec *codec)
-{
-       if (snd_hda_jack_detect(codec, 0x1c))
-               snd_hda_codec_write(codec, 0x0c, 0,
-                                    AC_VERB_SET_CONNECT_SEL, 0x4);
-       else
-               snd_hda_codec_write(codec, 0x0c, 0,
-                                    AC_VERB_SET_CONNECT_SEL, 0x5);
-}
-
-
-/* unsolicited event for HP jack sensing */
-static void ad1984a_touchsmart_unsol_event(struct hda_codec *codec,
-       unsigned int res)
-{
-       switch (res >> 26) {
-       case AD1884A_HP_EVENT:
-               ad1884a_hp_automute(codec);
-               break;
-       case AD1884A_MIC_EVENT:
-               ad1984a_touchsmart_automic(codec);
-               break;
-       }
-}
-
-/* initialize jack-sensing, too */
-static int ad1984a_touchsmart_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1884a_hp_automute(codec);
-       ad1984a_touchsmart_automic(codec);
-       return 0;
-}
-
-
-/*
- */
-
-enum {
-       AD1884A_AUTO,
-       AD1884A_DESKTOP,
-       AD1884A_LAPTOP,
-       AD1884A_MOBILE,
-       AD1884A_THINKPAD,
-       AD1984A_TOUCHSMART,
-       AD1984A_PRECISION,
-       AD1884A_MODELS
-};
-
-static const char * const ad1884a_models[AD1884A_MODELS] = {
-       [AD1884A_AUTO]          = "auto",
-       [AD1884A_DESKTOP]       = "desktop",
-       [AD1884A_LAPTOP]        = "laptop",
-       [AD1884A_MOBILE]        = "mobile",
-       [AD1884A_THINKPAD]      = "thinkpad",
-       [AD1984A_TOUCHSMART]    = "touchsmart",
-       [AD1984A_PRECISION]     = "precision",
-};
-
-static const struct snd_pci_quirk ad1884a_cfg_tbl[] = {
-       SND_PCI_QUIRK(0x1028, 0x04ac, "Precision R5500", AD1984A_PRECISION),
-       SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE),
-       SND_PCI_QUIRK(0x103c, 0x3037, "HP 2230s", AD1884A_LAPTOP),
-       SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE),
-       SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x3070, "HP", AD1884A_MOBILE),
-       SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30d0, "HP laptop", AD1884A_LAPTOP),
-       SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30e0, "HP laptop", AD1884A_LAPTOP),
-       SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP),
-       SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x7010, "HP laptop", AD1884A_MOBILE),
-       SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
-       SND_PCI_QUIRK(0x103c, 0x2a82, "Touchsmart", AD1984A_TOUCHSMART),
-       {}
-};
-
-static int patch_ad1884a(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-       int err, board_config;
-
-       board_config = snd_hda_check_board_config(codec, AD1884A_MODELS,
-                                                 ad1884a_models,
-                                                 ad1884a_cfg_tbl);
-       if (board_config < 0) {
-               printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
-                      codec->chip_name);
-               board_config = AD1884A_AUTO;
-       }
-
-       if (board_config == AD1884A_AUTO)
-               return ad1884_parse_auto_config(codec);
-
-       err = alloc_ad_spec(codec);
-       if (err < 0)
-               return err;
-       spec = codec->spec;
-
-       err = snd_hda_attach_beep_device(codec, 0x10);
-       if (err < 0) {
-               ad198x_free(codec);
-               return err;
-       }
-       set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
-       spec->multiout.max_channels = 2;
-       spec->multiout.num_dacs = ARRAY_SIZE(ad1884a_dac_nids);
-       spec->multiout.dac_nids = ad1884a_dac_nids;
-       spec->multiout.dig_out_nid = AD1884A_SPDIF_OUT;
-       spec->num_adc_nids = ARRAY_SIZE(ad1884a_adc_nids);
-       spec->adc_nids = ad1884a_adc_nids;
-       spec->capsrc_nids = ad1884a_capsrc_nids;
-       spec->input_mux = &ad1884a_capture_source;
-       spec->num_mixers = 1;
-       spec->mixers[0] = ad1884a_base_mixers;
-       spec->num_init_verbs = 1;
-       spec->init_verbs[0] = ad1884a_init_verbs;
-       spec->spdif_route = 0;
-#ifdef CONFIG_PM
-       spec->loopback.amplist = ad1884a_loopbacks;
-#endif
-       codec->patch_ops = ad198x_patch_ops;
-
-       /* override some parameters */
-       switch (board_config) {
-       case AD1884A_LAPTOP:
-               spec->mixers[0] = ad1884a_laptop_mixers;
-               spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs;
-               spec->multiout.dig_out_nid = 0;
-               codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
-               codec->patch_ops.init = ad1884a_laptop_init;
-               /* set the upper-limit for mixer amp to 0dB for avoiding the
-                * possible damage by overloading
-                */
-               snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
-                                         (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
-                                         (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
-                                         (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
-                                         (1 << AC_AMPCAP_MUTE_SHIFT));
-               break;
-       case AD1884A_MOBILE:
-               spec->mixers[0] = ad1884a_mobile_mixers;
-               spec->init_verbs[0] = ad1884a_mobile_verbs;
-               spec->multiout.dig_out_nid = 0;
-               codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
-               codec->patch_ops.init = ad1884a_hp_init;
-               /* set the upper-limit for mixer amp to 0dB for avoiding the
-                * possible damage by overloading
-                */
-               snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
-                                         (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
-                                         (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
-                                         (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
-                                         (1 << AC_AMPCAP_MUTE_SHIFT));
-               break;
-       case AD1884A_THINKPAD:
-               spec->mixers[0] = ad1984a_thinkpad_mixers;
-               spec->init_verbs[spec->num_init_verbs++] =
-                       ad1984a_thinkpad_verbs;
-               spec->multiout.dig_out_nid = 0;
-               spec->input_mux = &ad1984a_thinkpad_capture_source;
-               codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
-               codec->patch_ops.init = ad1984a_thinkpad_init;
-               break;
-       case AD1984A_PRECISION:
-               spec->mixers[0] = ad1984a_precision_mixers;
-               spec->init_verbs[spec->num_init_verbs++] =
-                       ad1984a_precision_verbs;
-               spec->multiout.dig_out_nid = 0;
-               codec->patch_ops.unsol_event = ad1984a_precision_unsol_event;
-               codec->patch_ops.init = ad1984a_precision_init;
-               break;
-       case AD1984A_TOUCHSMART:
-               spec->mixers[0] = ad1984a_touchsmart_mixers;
-               spec->init_verbs[0] = ad1984a_touchsmart_verbs;
-               spec->multiout.dig_out_nid = 0;
-               codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
-               codec->patch_ops.init = ad1984a_touchsmart_init;
-               /* set the upper-limit for mixer amp to 0dB for avoiding the
-                * possible damage by overloading
-                */
-               snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
-                                         (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
-                                         (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
-                                         (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
-                                         (1 << AC_AMPCAP_MUTE_SHIFT));
-               break;
-       }
-
-       codec->no_trigger_sense = 1;
-       codec->no_sticky_stream = 1;
-
-       return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1884a  ad1884_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
 /*
  * AD1882 / AD1882A
  *
@@ -4850,299 +1058,7 @@ static int patch_ad1884a(struct hda_codec *codec)
  * port-G - rear clfe-out (6stack)
  */
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const hda_nid_t ad1882_dac_nids[3] = {
-       0x04, 0x03, 0x05
-};
-
-static const hda_nid_t ad1882_adc_nids[2] = {
-       0x08, 0x09,
-};
-
-static const hda_nid_t ad1882_capsrc_nids[2] = {
-       0x0c, 0x0d,
-};
-
-#define AD1882_SPDIF_OUT       0x02
-
-/* list: 0x11, 0x39, 0x3a, 0x18, 0x3c, 0x3b, 0x12, 0x20 */
-static const struct hda_input_mux ad1882_capture_source = {
-       .num_items = 5,
-       .items = {
-               { "Front Mic", 0x1 },
-               { "Mic", 0x4 },
-               { "Line", 0x2 },
-               { "CD", 0x3 },
-               { "Mix", 0x7 },
-       },
-};
-
-/* list: 0x11, 0x39, 0x3a, 0x3c, 0x18, 0x1f, 0x12, 0x20 */
-static const struct hda_input_mux ad1882a_capture_source = {
-       .num_items = 5,
-       .items = {
-               { "Front Mic", 0x1 },
-               { "Mic", 0x4},
-               { "Line", 0x2 },
-               { "Digital Mic", 0x06 },
-               { "Mix", 0x7 },
-       },
-};
-
-static const struct snd_kcontrol_new ad1882_base_mixers[] = {
-       HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Surround Playback Volume", 0x03, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x05, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x05, 2, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
-
-       HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Line-In Boost Volume", 0x3a, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-       HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               /* The multiple "Capture Source" controls confuse alsamixer
-                * So call somewhat different..
-                */
-               /* .name = "Capture Source", */
-               .name = "Input Source",
-               .count = 2,
-               .info = ad198x_mux_enum_info,
-               .get = ad198x_mux_enum_get,
-               .put = ad198x_mux_enum_put,
-       },
-       /* SPDIF controls */
-       HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
-               /* identical with ad1983 */
-               .info = ad1983_spdif_route_info,
-               .get = ad1983_spdif_route_get,
-               .put = ad1983_spdif_route_put,
-       },
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1882_loopback_mixers[] = {
-       HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x06, HDA_INPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x06, HDA_INPUT),
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1882a_loopback_mixers[] = {
-       HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-       HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-       HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x01, HDA_INPUT),
-       HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x06, HDA_INPUT),
-       HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x06, HDA_INPUT),
-       HDA_CODEC_VOLUME("Digital Mic Boost Volume", 0x1f, 0x0, HDA_INPUT),
-       { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1882_3stack_mixers[] = {
-       HDA_CODEC_MUTE("Surround Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Center Playback Switch", 0x17, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("LFE Playback Switch", 0x17, 2, 0x0, HDA_OUTPUT),
-       {
-               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-               .name = "Channel Mode",
-               .info = ad198x_ch_mode_info,
-               .get = ad198x_ch_mode_get,
-               .put = ad198x_ch_mode_put,
-       },
-       { } /* end */
-};
-
-/* simple auto-mute control for AD1882 3-stack board */
-#define AD1882_HP_EVENT        0x01
-
-static void ad1882_3stack_automute(struct hda_codec *codec)
-{
-       bool mute = snd_hda_jack_detect(codec, 0x11);
-       snd_hda_codec_write(codec, 0x12, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
-                           mute ? 0 : PIN_OUT);
-}
-
-static int ad1882_3stack_automute_init(struct hda_codec *codec)
-{
-       ad198x_init(codec);
-       ad1882_3stack_automute(codec);
-       return 0;
-}
-
-static void ad1882_3stack_unsol_event(struct hda_codec *codec, unsigned int res)
-{
-       switch (res >> 26) {
-       case AD1882_HP_EVENT:
-               ad1882_3stack_automute(codec);
-               break;
-       }
-}
-
-static const struct snd_kcontrol_new ad1882_6stack_mixers[] = {
-       HDA_CODEC_MUTE("Surround Playback Switch", 0x16, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("Center Playback Switch", 0x24, 1, 0x0, HDA_OUTPUT),
-       HDA_CODEC_MUTE_MONO("LFE Playback Switch", 0x24, 2, 0x0, HDA_OUTPUT),
-       { } /* end */
-};
-
-static const struct hda_verb ad1882_ch2_init[] = {
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       { } /* end */
-};
-
-static const struct hda_verb ad1882_ch4_init[] = {
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       { } /* end */
-};
-
-static const struct hda_verb ad1882_ch6_init[] = {
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       { } /* end */
-};
-
-static const struct hda_channel_mode ad1882_modes[3] = {
-       { 2, ad1882_ch2_init },
-       { 4, ad1882_ch4_init },
-       { 6, ad1882_ch6_init },
-};
-
-/*
- * initialization verbs
- */
-static const struct hda_verb ad1882_init_verbs[] = {
-       /* DACs; mute as default */
-       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
-       /* Port-A (HP) mixer */
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-A pin */
-       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* HP selector - select DAC2 */
-       {0x37, AC_VERB_SET_CONNECT_SEL, 0x1},
-       /* Port-D (Line-out) mixer */
-       {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Port-D pin */
-       {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Mono-out mixer */
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-       {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-       /* Mono-out pin */
-       {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-       {0x13, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Port-B (front mic) pin */
-       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, /* boost */
-       /* Port-C (line-in) pin */
-       {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
-       {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, /* boost */
-       /* Port-C mixer - mute as input */
-       {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       /* Port-E (mic-in) pin */
-       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
-       {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       {0x3c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, /* boost */
-       /* Port-E mixer - mute as input */
-       {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       /* Port-F (surround) */
-       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Port-G (CLFE) */
-       {0x24, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
-       {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
-       /* Analog mixer; mute as default */
-       /* list: 0x39, 0x3a, 0x11, 0x12, 0x3c, 0x3b, 0x18, 0x1a */
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)},
-       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)},
-       /* Analog Mix output amp */
-       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
-       /* SPDIF output selector */
-       {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
-       {0x02, AC_VERB_SET_CONNECT_SEL, 0x0}, /* PCM */
-       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
-       { } /* end */
-};
-
-static const struct hda_verb ad1882_3stack_automute_verbs[] = {
-       {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1882_HP_EVENT},
-       { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1882_loopbacks[] = {
-       { 0x20, HDA_INPUT, 0 }, /* Front Mic */
-       { 0x20, HDA_INPUT, 1 }, /* Mic */
-       { 0x20, HDA_INPUT, 4 }, /* Line */
-       { 0x20, HDA_INPUT, 6 }, /* CD */
-       { } /* end */
-};
-#endif
-
-/* models */
-enum {
-       AD1882_AUTO,
-       AD1882_3STACK,
-       AD1882_6STACK,
-       AD1882_3STACK_AUTOMUTE,
-       AD1882_MODELS
-};
-
-static const char * const ad1882_models[AD1986A_MODELS] = {
-       [AD1882_AUTO]           = "auto",
-       [AD1882_3STACK]         = "3stack",
-       [AD1882_6STACK]         = "6stack",
-       [AD1882_3STACK_AUTOMUTE] = "3stack-automute",
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-static int ad1882_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1882(struct hda_codec *codec)
 {
        struct ad198x_spec *spec;
        int err;
@@ -5169,110 +1085,20 @@ static int ad1882_parse_auto_config(struct hda_codec *codec)
        return err;
 }
 
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1882(struct hda_codec *codec)
-{
-       struct ad198x_spec *spec;
-       int err, board_config;
-
-       board_config = snd_hda_check_board_config(codec, AD1882_MODELS,
-                                                 ad1882_models, NULL);
-       if (board_config < 0) {
-               printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
-                      codec->chip_name);
-               board_config = AD1882_AUTO;
-       }
-
-       if (board_config == AD1882_AUTO)
-               return ad1882_parse_auto_config(codec);
-
-       err = alloc_ad_spec(codec);
-       if (err < 0)
-               return err;
-       spec = codec->spec;
-
-       err = snd_hda_attach_beep_device(codec, 0x10);
-       if (err < 0) {
-               ad198x_free(codec);
-               return err;
-       }
-       set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
-       spec->multiout.max_channels = 6;
-       spec->multiout.num_dacs = 3;
-       spec->multiout.dac_nids = ad1882_dac_nids;
-       spec->multiout.dig_out_nid = AD1882_SPDIF_OUT;
-       spec->num_adc_nids = ARRAY_SIZE(ad1882_adc_nids);
-       spec->adc_nids = ad1882_adc_nids;
-       spec->capsrc_nids = ad1882_capsrc_nids;
-       if (codec->vendor_id == 0x11d41882)
-               spec->input_mux = &ad1882_capture_source;
-       else
-               spec->input_mux = &ad1882a_capture_source;
-       spec->num_mixers = 2;
-       spec->mixers[0] = ad1882_base_mixers;
-       if (codec->vendor_id == 0x11d41882)
-               spec->mixers[1] = ad1882_loopback_mixers;
-       else
-               spec->mixers[1] = ad1882a_loopback_mixers;
-       spec->num_init_verbs = 1;
-       spec->init_verbs[0] = ad1882_init_verbs;
-       spec->spdif_route = 0;
-#ifdef CONFIG_PM
-       spec->loopback.amplist = ad1882_loopbacks;
-#endif
-       spec->vmaster_nid = 0x04;
-
-       codec->patch_ops = ad198x_patch_ops;
-
-       /* override some parameters */
-       switch (board_config) {
-       default:
-       case AD1882_3STACK:
-       case AD1882_3STACK_AUTOMUTE:
-               spec->num_mixers = 3;
-               spec->mixers[2] = ad1882_3stack_mixers;
-               spec->channel_mode = ad1882_modes;
-               spec->num_channel_mode = ARRAY_SIZE(ad1882_modes);
-               spec->need_dac_fix = 1;
-               spec->multiout.max_channels = 2;
-               spec->multiout.num_dacs = 1;
-               if (board_config != AD1882_3STACK) {
-                       spec->init_verbs[spec->num_init_verbs++] =
-                               ad1882_3stack_automute_verbs;
-                       codec->patch_ops.unsol_event = ad1882_3stack_unsol_event;
-                       codec->patch_ops.init = ad1882_3stack_automute_init;
-               }
-               break;
-       case AD1882_6STACK:
-               spec->num_mixers = 3;
-               spec->mixers[2] = ad1882_6stack_mixers;
-               break;
-       }
-
-       codec->no_trigger_sense = 1;
-       codec->no_sticky_stream = 1;
-
-       return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1882   ad1882_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
 
 /*
  * patch entries
  */
 static const struct hda_codec_preset snd_hda_preset_analog[] = {
-       { .id = 0x11d4184a, .name = "AD1884A", .patch = patch_ad1884a },
+       { .id = 0x11d4184a, .name = "AD1884A", .patch = patch_ad1884 },
        { .id = 0x11d41882, .name = "AD1882", .patch = patch_ad1882 },
-       { .id = 0x11d41883, .name = "AD1883", .patch = patch_ad1884a },
+       { .id = 0x11d41883, .name = "AD1883", .patch = patch_ad1884 },
        { .id = 0x11d41884, .name = "AD1884", .patch = patch_ad1884 },
-       { .id = 0x11d4194a, .name = "AD1984A", .patch = patch_ad1884a },
-       { .id = 0x11d4194b, .name = "AD1984B", .patch = patch_ad1884a },
+       { .id = 0x11d4194a, .name = "AD1984A", .patch = patch_ad1884 },
+       { .id = 0x11d4194b, .name = "AD1984B", .patch = patch_ad1884 },
        { .id = 0x11d41981, .name = "AD1981", .patch = patch_ad1981 },
        { .id = 0x11d41983, .name = "AD1983", .patch = patch_ad1983 },
-       { .id = 0x11d41984, .name = "AD1984", .patch = patch_ad1984 },
+       { .id = 0x11d41984, .name = "AD1984", .patch = patch_ad1884 },
        { .id = 0x11d41986, .name = "AD1986A", .patch = patch_ad1986a },
        { .id = 0x11d41988, .name = "AD1988", .patch = patch_ad1988 },
        { .id = 0x11d4198b, .name = "AD1988B", .patch = patch_ad1988 },
index de00ce166470d5c73ac646c452f8a3ccd86e717a..4edd2d0f9a3ce66e625f7576b3b7053548397a51 100644 (file)
@@ -66,6 +66,8 @@ struct conexant_spec {
        hda_nid_t eapds[4];
        bool dynamic_eapd;
 
+       unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */
+
 #ifdef ENABLE_CXT_STATIC_QUIRKS
        const struct snd_kcontrol_new *mixers[5];
        int num_mixers;
@@ -3200,6 +3202,9 @@ static int cx_auto_init(struct hda_codec *codec)
        snd_hda_gen_init(codec);
        if (!spec->dynamic_eapd)
                cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
+
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
+
        return 0;
 }
 
@@ -3224,6 +3229,8 @@ enum {
        CXT_PINCFG_LEMOTE_A1205,
        CXT_FIXUP_STEREO_DMIC,
        CXT_FIXUP_INC_MIC_BOOST,
+       CXT_FIXUP_HEADPHONE_MIC_PIN,
+       CXT_FIXUP_HEADPHONE_MIC,
 };
 
 static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
@@ -3246,6 +3253,59 @@ static void cxt5066_increase_mic_boost(struct hda_codec *codec,
                                  (0 << AC_AMPCAP_MUTE_SHIFT));
 }
 
+static void cxt_update_headset_mode(struct hda_codec *codec)
+{
+       /* The verbs used in this function were tested on a Conexant CX20751/2 codec. */
+       int i;
+       bool mic_mode = false;
+       struct conexant_spec *spec = codec->spec;
+       struct auto_pin_cfg *cfg = &spec->gen.autocfg;
+
+       hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
+
+       for (i = 0; i < cfg->num_inputs; i++)
+               if (cfg->inputs[i].pin == mux_pin) {
+                       mic_mode = !!cfg->inputs[i].is_headphone_mic;
+                       break;
+               }
+
+       if (mic_mode) {
+               snd_hda_codec_write_cache(codec, 0x1c, 0, 0x410, 0x7c); /* enable merged mode for analog int-mic */
+               spec->gen.hp_jack_present = false;
+       } else {
+               snd_hda_codec_write_cache(codec, 0x1c, 0, 0x410, 0x54); /* disable merged mode for analog int-mic */
+               spec->gen.hp_jack_present = snd_hda_jack_detect(codec, spec->gen.autocfg.hp_pins[0]);
+       }
+
+       snd_hda_gen_update_outputs(codec);
+}
+
+static void cxt_update_headset_mode_hook(struct hda_codec *codec,
+                            struct snd_ctl_elem_value *ucontrol)
+{
+       cxt_update_headset_mode(codec);
+}
+
+static void cxt_fixup_headphone_mic(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct conexant_spec *spec = codec->spec;
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               spec->parse_flags |= HDA_PINCFG_HEADPHONE_MIC;
+               break;
+       case HDA_FIXUP_ACT_PROBE:
+               spec->gen.cap_sync_hook = cxt_update_headset_mode_hook;
+               spec->gen.automute_hook = cxt_update_headset_mode;
+               break;
+       case HDA_FIXUP_ACT_INIT:
+               cxt_update_headset_mode(codec);
+               break;
+       }
+}
+
+
 /* ThinkPad X200 & co with cxt5051 */
 static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
        { 0x16, 0x042140ff }, /* HP (seq# overridden) */
@@ -3302,6 +3362,19 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt5066_increase_mic_boost,
        },
+       [CXT_FIXUP_HEADPHONE_MIC_PIN] = {
+               .type = HDA_FIXUP_PINS,
+               .chained = true,
+               .chain_id = CXT_FIXUP_HEADPHONE_MIC,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x03a1913d }, /* use as headphone mic, without its own jack detect */
+                       { }
+               }
+       },
+       [CXT_FIXUP_HEADPHONE_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_headphone_mic,
+       },
 };
 
 static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -3311,6 +3384,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
 
 static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
        SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
@@ -3395,7 +3469,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
 
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
 
-       err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
+       err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL,
+                                      spec->parse_flags);
        if (err < 0)
                goto error;
 
@@ -3416,6 +3491,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
                codec->bus->allow_bus_reset = 1;
        }
 
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
+
        return 0;
 
  error:
index f303cd898515d5f20000ec5138400bfd93e63d85..394766a1b80c50241491506b2c109dbc40698b41 100644 (file)
@@ -282,6 +282,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
 {
        alc_auto_setup_eapd(codec, false);
        msleep(200);
+       snd_hda_shutup_pins(codec);
 }
 
 /* generic EAPD initialization */
@@ -826,7 +827,8 @@ static inline void alc_shutup(struct hda_codec *codec)
 
        if (spec && spec->shutup)
                spec->shutup(codec);
-       snd_hda_shutup_pins(codec);
+       else
+               snd_hda_shutup_pins(codec);
 }
 
 #define alc_free       snd_hda_gen_free
@@ -1853,8 +1855,10 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
                                       const struct hda_fixup *fix, int action)
 {
        struct alc_spec *spec = codec->spec;
-       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
                spec->gen.no_primary_hp = 1;
+               spec->gen.no_multi_io = 1;
+       }
 }
 
 static const struct hda_fixup alc882_fixups[] = {
@@ -2583,15 +2587,13 @@ static void alc269_shutup(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
 
-       if (spec->codec_variant != ALC269_TYPE_ALC269VB)
-               return;
-
        if (spec->codec_variant == ALC269_TYPE_ALC269VB)
                alc269vb_toggle_power_output(codec, 0);
        if (spec->codec_variant == ALC269_TYPE_ALC269VB &&
                        (alc_get_coef0(codec) & 0x00ff) == 0x018) {
                msleep(150);
        }
+       snd_hda_shutup_pins(codec);
 }
 
 static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
@@ -2722,6 +2724,13 @@ static int alc269_resume(struct hda_codec *codec)
        hda_call_check_power_status(codec, 0x01);
        if (spec->has_alc5505_dsp)
                alc5505_dsp_resume(codec);
+
+       /* clear the power-save mode for ALC283 */
+       if (codec->vendor_id == 0x10ec0283) {
+               alc_write_coef_idx(codec, 0x4, 0xaf01);
+               alc_write_coef_idx(codec, 0x6, 0x2104);
+       }
+
        return 0;
 }
 #endif /* CONFIG_PM */
@@ -3261,6 +3270,28 @@ static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
        alc_fixup_headset_mode(codec, fix, action);
 }
 
+/* Returns the nid of the external mic input pin, or 0 if it cannot be found. */
+static int find_ext_mic_pin(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       struct auto_pin_cfg *cfg = &spec->gen.autocfg;
+       hda_nid_t nid;
+       unsigned int defcfg;
+       int i;
+
+       for (i = 0; i < cfg->num_inputs; i++) {
+               if (cfg->inputs[i].type != AUTO_PIN_MIC)
+                       continue;
+               nid = cfg->inputs[i].pin;
+               defcfg = snd_hda_codec_get_pincfg(codec, nid);
+               if (snd_hda_get_input_pin_attr(defcfg) == INPUT_PIN_ATTR_INT)
+                       continue;
+               return nid;
+       }
+
+       return 0;
+}
+
 static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
                                    const struct hda_fixup *fix,
                                    int action)
@@ -3268,11 +3299,12 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PROBE) {
-               if (snd_BUG_ON(!spec->gen.am_entry[1].pin ||
-                              !spec->gen.autocfg.hp_pins[0]))
+               int mic_pin = find_ext_mic_pin(codec);
+               int hp_pin = spec->gen.autocfg.hp_pins[0];
+
+               if (snd_BUG_ON(!mic_pin || !hp_pin))
                        return;
-               snd_hda_jack_set_gating_jack(codec, spec->gen.am_entry[1].pin,
-                                            spec->gen.autocfg.hp_pins[0]);
+               snd_hda_jack_set_gating_jack(codec, mic_pin, hp_pin);
        }
 }
 
@@ -3600,6 +3632,11 @@ static const struct hda_fixup alc269_fixups[] = {
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
+       SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
+       SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+       SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
+       SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05c4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -3655,11 +3692,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
-       SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
-       SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
-       SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
-       SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
-       SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
        SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -3670,8 +3702,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
-       SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
@@ -3785,6 +3825,30 @@ static void alc269_fill_coef(struct hda_codec *codec)
        alc_write_coef_idx(codec, 0x4, val | (1<<11));
 }
 
+/* don't clear mic pin; otherwise it results in noise in D3 */
+static void alc283_headset_shutup(struct hda_codec *codec)
+{
+       int i;
+
+       if (codec->bus->shutdown)
+               return;
+
+       for (i = 0; i < codec->init_pins.used; i++) {
+               struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i);
+               /* use read here for syncing after issuing each verb */
+               if (pin->nid != 0x19)
+                       snd_hda_codec_read(codec, pin->nid, 0,
+                                          AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+       }
+
+       alc_write_coef_idx(codec, 0x4, 0x0f01); /* power save */
+       alc_write_coef_idx(codec, 0x6, 0x2100); /* power save */
+       snd_hda_codec_write(codec, 0x19, 0,
+                       AC_VERB_SET_PIN_WIDGET_CONTROL,
+                       PIN_VREFHIZ);
+       codec->pins_shutup = 1;
+}
+
 /*
  */
 static int patch_alc269(struct hda_codec *codec)
@@ -3799,6 +3863,9 @@ static int patch_alc269(struct hda_codec *codec)
        spec = codec->spec;
        spec->gen.shared_mic_vref_pin = 0x18;
 
+       if (codec->vendor_id == 0x10ec0283)
+               spec->shutup = alc283_headset_shutup;
+
        snd_hda_pick_fixup(codec, alc269_fixup_models,
                       alc269_fixup_tbl, alc269_fixups);
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
@@ -3872,7 +3939,8 @@ static int patch_alc269(struct hda_codec *codec)
        codec->patch_ops.suspend = alc269_suspend;
        codec->patch_ops.resume = alc269_resume;
 #endif
-       spec->shutup = alc269_shutup;
+       if (!spec->shutup)
+               spec->shutup = alc269_shutup;
 
        snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
 
@@ -4336,6 +4404,7 @@ static const struct hda_fixup alc662_fixups[] = {
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
+       SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
        SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
index 6d1924c19abfdb41d1dddc27e119630c24995abc..fba0cef1c47fff7a49632bf663402785e5cf24f8 100644 (file)
@@ -158,6 +158,7 @@ enum {
        STAC_D965_VERBS,
        STAC_DELL_3ST,
        STAC_DELL_BIOS,
+       STAC_DELL_BIOS_AMIC,
        STAC_DELL_BIOS_SPDIF,
        STAC_927X_DELL_DMIC,
        STAC_927X_VOLKNOB,
@@ -3231,8 +3232,6 @@ static const struct hda_fixup stac927x_fixups[] = {
        [STAC_DELL_BIOS] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
-                       /* configure the analog microphone on some laptops */
-                       { 0x0c, 0x90a79130 },
                        /* correct the front output jack as a hp out */
                        { 0x0f, 0x0221101f },
                        /* correct the front input jack as a mic */
@@ -3242,6 +3241,16 @@ static const struct hda_fixup stac927x_fixups[] = {
                .chained = true,
                .chain_id = STAC_927X_DELL_DMIC,
        },
+       [STAC_DELL_BIOS_AMIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       /* configure the analog microphone on some laptops */
+                       { 0x0c, 0x90a79130 },
+                       {}
+               },
+               .chained = true,
+               .chain_id = STAC_DELL_BIOS,
+       },
        [STAC_DELL_BIOS_SPDIF] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -3270,6 +3279,7 @@ static const struct hda_model_fixup stac927x_models[] = {
        { .id = STAC_D965_5ST_NO_FP, .name = "5stack-no-fp" },
        { .id = STAC_DELL_3ST, .name = "dell-3stack" },
        { .id = STAC_DELL_BIOS, .name = "dell-bios" },
+       { .id = STAC_DELL_BIOS_AMIC, .name = "dell-bios-amic" },
        { .id = STAC_927X_VOLKNOB, .name = "volknob" },
        {}
 };
index e2481baddc70ff6ff265ee0ed118377869847db6..0bc20ef5687ac3558996ca3fd56a741c75bc042b 100644 (file)
@@ -207,9 +207,9 @@ static void vt1708_stop_hp_work(struct hda_codec *codec)
                return;
        if (spec->hp_work_active) {
                snd_hda_codec_write(codec, 0x1, 0, 0xf81, 1);
+               codec->jackpoll_interval = 0;
                cancel_delayed_work_sync(&codec->jackpoll_work);
                spec->hp_work_active = false;
-               codec->jackpoll_interval = 0;
        }
 }
 
index 2a8ad9d1a2aea290cf433c0fccb4218ab78cf5ff..4e9a5563eeca8c8f634994d2338b02cfd8db5bb4 100644 (file)
@@ -198,6 +198,31 @@ MODULE_PARM_DESC(enable, "Enable RME Digi96 soundcard.");
 #define RME96_AD1852_VOL_BITS 14
 #define RME96_AD1855_VOL_BITS 10
 
+/* Defines for snd_rme96_trigger */
+#define RME96_TB_START_PLAYBACK 1
+#define RME96_TB_START_CAPTURE 2
+#define RME96_TB_STOP_PLAYBACK 4
+#define RME96_TB_STOP_CAPTURE 8
+#define RME96_TB_RESET_PLAYPOS 16
+#define RME96_TB_RESET_CAPTUREPOS 32
+#define RME96_TB_CLEAR_PLAYBACK_IRQ 64
+#define RME96_TB_CLEAR_CAPTURE_IRQ 128
+#define RME96_RESUME_PLAYBACK  (RME96_TB_START_PLAYBACK)
+#define RME96_RESUME_CAPTURE   (RME96_TB_START_CAPTURE)
+#define RME96_RESUME_BOTH      (RME96_RESUME_PLAYBACK \
+                               | RME96_RESUME_CAPTURE)
+#define RME96_START_PLAYBACK   (RME96_TB_START_PLAYBACK \
+                               | RME96_TB_RESET_PLAYPOS)
+#define RME96_START_CAPTURE    (RME96_TB_START_CAPTURE \
+                               | RME96_TB_RESET_CAPTUREPOS)
+#define RME96_START_BOTH       (RME96_START_PLAYBACK \
+                               | RME96_START_CAPTURE)
+#define RME96_STOP_PLAYBACK    (RME96_TB_STOP_PLAYBACK \
+                               | RME96_TB_CLEAR_PLAYBACK_IRQ)
+#define RME96_STOP_CAPTURE     (RME96_TB_STOP_CAPTURE \
+                               | RME96_TB_CLEAR_CAPTURE_IRQ)
+#define RME96_STOP_BOTH                (RME96_STOP_PLAYBACK \
+                               | RME96_STOP_CAPTURE)
 
 struct rme96 {
        spinlock_t    lock;
@@ -344,6 +369,7 @@ static struct snd_pcm_hardware snd_rme96_playback_spdif_info =
 {
        .info =              (SNDRV_PCM_INFO_MMAP_IOMEM |
                              SNDRV_PCM_INFO_MMAP_VALID |
+                             SNDRV_PCM_INFO_SYNC_START |
                              SNDRV_PCM_INFO_INTERLEAVED |
                              SNDRV_PCM_INFO_PAUSE),
        .formats =           (SNDRV_PCM_FMTBIT_S16_LE |
@@ -373,6 +399,7 @@ static struct snd_pcm_hardware snd_rme96_capture_spdif_info =
 {
        .info =              (SNDRV_PCM_INFO_MMAP_IOMEM |
                              SNDRV_PCM_INFO_MMAP_VALID |
+                             SNDRV_PCM_INFO_SYNC_START |
                              SNDRV_PCM_INFO_INTERLEAVED |
                              SNDRV_PCM_INFO_PAUSE),
        .formats =           (SNDRV_PCM_FMTBIT_S16_LE |
@@ -402,6 +429,7 @@ static struct snd_pcm_hardware snd_rme96_playback_adat_info =
 {
        .info =              (SNDRV_PCM_INFO_MMAP_IOMEM |
                              SNDRV_PCM_INFO_MMAP_VALID |
+                             SNDRV_PCM_INFO_SYNC_START |
                              SNDRV_PCM_INFO_INTERLEAVED |
                              SNDRV_PCM_INFO_PAUSE),
        .formats =           (SNDRV_PCM_FMTBIT_S16_LE |
@@ -427,6 +455,7 @@ static struct snd_pcm_hardware snd_rme96_capture_adat_info =
 {
        .info =              (SNDRV_PCM_INFO_MMAP_IOMEM |
                              SNDRV_PCM_INFO_MMAP_VALID |
+                             SNDRV_PCM_INFO_SYNC_START |
                              SNDRV_PCM_INFO_INTERLEAVED |
                              SNDRV_PCM_INFO_PAUSE),
        .formats =           (SNDRV_PCM_FMTBIT_S16_LE |
@@ -1045,54 +1074,35 @@ snd_rme96_capture_hw_params(struct snd_pcm_substream *substream,
 }
 
 static void
-snd_rme96_playback_start(struct rme96 *rme96,
-                        int from_pause)
+snd_rme96_trigger(struct rme96 *rme96,
+                 int op)
 {
-       if (!from_pause) {
+       if (op & RME96_TB_RESET_PLAYPOS)
                writel(0, rme96->iobase + RME96_IO_RESET_PLAY_POS);
-       }
-
-       rme96->wcreg |= RME96_WCR_START;
-       writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
-}
-
-static void
-snd_rme96_capture_start(struct rme96 *rme96,
-                       int from_pause)
-{
-       if (!from_pause) {
+       if (op & RME96_TB_RESET_CAPTUREPOS)
                writel(0, rme96->iobase + RME96_IO_RESET_REC_POS);
-       }
-
-       rme96->wcreg |= RME96_WCR_START_2;
+       if (op & RME96_TB_CLEAR_PLAYBACK_IRQ) {
+               rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER);
+               if (rme96->rcreg & RME96_RCR_IRQ)
+                       writel(0, rme96->iobase + RME96_IO_CONFIRM_PLAY_IRQ);
+       }
+       if (op & RME96_TB_CLEAR_CAPTURE_IRQ) {
+               rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER);
+               if (rme96->rcreg & RME96_RCR_IRQ_2)
+                       writel(0, rme96->iobase + RME96_IO_CONFIRM_REC_IRQ);
+       }
+       if (op & RME96_TB_START_PLAYBACK)
+               rme96->wcreg |= RME96_WCR_START;
+       if (op & RME96_TB_STOP_PLAYBACK)
+               rme96->wcreg &= ~RME96_WCR_START;
+       if (op & RME96_TB_START_CAPTURE)
+               rme96->wcreg |= RME96_WCR_START_2;
+       if (op & RME96_TB_STOP_CAPTURE)
+               rme96->wcreg &= ~RME96_WCR_START_2;
        writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
 }
 
-static void
-snd_rme96_playback_stop(struct rme96 *rme96)
-{
-       /*
-        * Check if there is an unconfirmed IRQ, if so confirm it, or else
-        * the hardware will not stop generating interrupts
-        */
-       rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER);
-       if (rme96->rcreg & RME96_RCR_IRQ) {
-               writel(0, rme96->iobase + RME96_IO_CONFIRM_PLAY_IRQ);
-       }       
-       rme96->wcreg &= ~RME96_WCR_START;
-       writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
-}
 
-static void
-snd_rme96_capture_stop(struct rme96 *rme96)
-{
-       rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER);
-       if (rme96->rcreg & RME96_RCR_IRQ_2) {
-               writel(0, rme96->iobase + RME96_IO_CONFIRM_REC_IRQ);
-       }       
-       rme96->wcreg &= ~RME96_WCR_START_2;
-       writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
-}
 
 static irqreturn_t
 snd_rme96_interrupt(int irq,
@@ -1155,6 +1165,7 @@ snd_rme96_playback_spdif_open(struct snd_pcm_substream *substream)
        struct rme96 *rme96 = snd_pcm_substream_chip(substream);
        struct snd_pcm_runtime *runtime = substream->runtime;
 
+       snd_pcm_set_sync(substream);
        spin_lock_irq(&rme96->lock);    
         if (rme96->playback_substream != NULL) {
                spin_unlock_irq(&rme96->lock);
@@ -1191,6 +1202,7 @@ snd_rme96_capture_spdif_open(struct snd_pcm_substream *substream)
        struct rme96 *rme96 = snd_pcm_substream_chip(substream);
        struct snd_pcm_runtime *runtime = substream->runtime;
 
+       snd_pcm_set_sync(substream);
        runtime->hw = snd_rme96_capture_spdif_info;
         if (snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG &&
             (rate = snd_rme96_capture_getrate(rme96, &isadat)) > 0)
@@ -1222,6 +1234,7 @@ snd_rme96_playback_adat_open(struct snd_pcm_substream *substream)
        struct rme96 *rme96 = snd_pcm_substream_chip(substream);
        struct snd_pcm_runtime *runtime = substream->runtime;        
        
+       snd_pcm_set_sync(substream);
        spin_lock_irq(&rme96->lock);    
         if (rme96->playback_substream != NULL) {
                spin_unlock_irq(&rme96->lock);
@@ -1253,6 +1266,7 @@ snd_rme96_capture_adat_open(struct snd_pcm_substream *substream)
        struct rme96 *rme96 = snd_pcm_substream_chip(substream);
        struct snd_pcm_runtime *runtime = substream->runtime;
 
+       snd_pcm_set_sync(substream);
        runtime->hw = snd_rme96_capture_adat_info;
         if (snd_rme96_getinputtype(rme96) == RME96_INPUT_ANALOG) {
                 /* makes no sense to use analog input. Note that analog
@@ -1288,7 +1302,7 @@ snd_rme96_playback_close(struct snd_pcm_substream *substream)
 
        spin_lock_irq(&rme96->lock);    
        if (RME96_ISPLAYING(rme96)) {
-               snd_rme96_playback_stop(rme96);
+               snd_rme96_trigger(rme96, RME96_STOP_PLAYBACK);
        }
        rme96->playback_substream = NULL;
        rme96->playback_periodsize = 0;
@@ -1309,7 +1323,7 @@ snd_rme96_capture_close(struct snd_pcm_substream *substream)
        
        spin_lock_irq(&rme96->lock);    
        if (RME96_ISRECORDING(rme96)) {
-               snd_rme96_capture_stop(rme96);
+               snd_rme96_trigger(rme96, RME96_STOP_CAPTURE);
        }
        rme96->capture_substream = NULL;
        rme96->capture_periodsize = 0;
@@ -1324,7 +1338,7 @@ snd_rme96_playback_prepare(struct snd_pcm_substream *substream)
        
        spin_lock_irq(&rme96->lock);    
        if (RME96_ISPLAYING(rme96)) {
-               snd_rme96_playback_stop(rme96);
+               snd_rme96_trigger(rme96, RME96_STOP_PLAYBACK);
        }
        writel(0, rme96->iobase + RME96_IO_RESET_PLAY_POS);
        spin_unlock_irq(&rme96->lock);
@@ -1338,7 +1352,7 @@ snd_rme96_capture_prepare(struct snd_pcm_substream *substream)
        
        spin_lock_irq(&rme96->lock);    
        if (RME96_ISRECORDING(rme96)) {
-               snd_rme96_capture_stop(rme96);
+               snd_rme96_trigger(rme96, RME96_STOP_CAPTURE);
        }
        writel(0, rme96->iobase + RME96_IO_RESET_REC_POS);
        spin_unlock_irq(&rme96->lock);
@@ -1350,41 +1364,53 @@ snd_rme96_playback_trigger(struct snd_pcm_substream *substream,
                           int cmd)
 {
        struct rme96 *rme96 = snd_pcm_substream_chip(substream);
+       struct snd_pcm_substream *s;
+       bool sync;
+
+       snd_pcm_group_for_each_entry(s, substream) {
+               if (snd_pcm_substream_chip(s) == rme96)
+                       snd_pcm_trigger_done(s, substream);
+       }
+
+       sync = (rme96->playback_substream && rme96->capture_substream) &&
+              (rme96->playback_substream->group ==
+               rme96->capture_substream->group);
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
                if (!RME96_ISPLAYING(rme96)) {
-                       if (substream != rme96->playback_substream) {
+                       if (substream != rme96->playback_substream)
                                return -EBUSY;
-                       }
-                       snd_rme96_playback_start(rme96, 0);
+                       snd_rme96_trigger(rme96, sync ? RME96_START_BOTH
+                                                : RME96_START_PLAYBACK);
                }
                break;
 
        case SNDRV_PCM_TRIGGER_STOP:
                if (RME96_ISPLAYING(rme96)) {
-                       if (substream != rme96->playback_substream) {
+                       if (substream != rme96->playback_substream)
                                return -EBUSY;
-                       }
-                       snd_rme96_playback_stop(rme96);
+                       snd_rme96_trigger(rme96, sync ? RME96_STOP_BOTH
+                                                :  RME96_STOP_PLAYBACK);
                }
                break;
 
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               if (RME96_ISPLAYING(rme96)) {
-                       snd_rme96_playback_stop(rme96);
-               }
+               if (RME96_ISPLAYING(rme96))
+                       snd_rme96_trigger(rme96, sync ? RME96_STOP_BOTH
+                                                : RME96_STOP_PLAYBACK);
                break;
 
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               if (!RME96_ISPLAYING(rme96)) {
-                       snd_rme96_playback_start(rme96, 1);
-               }
+               if (!RME96_ISPLAYING(rme96))
+                       snd_rme96_trigger(rme96, sync ? RME96_RESUME_BOTH
+                                                : RME96_RESUME_PLAYBACK);
                break;
-               
+
        default:
                return -EINVAL;
        }
+
        return 0;
 }
 
@@ -1393,38 +1419,49 @@ snd_rme96_capture_trigger(struct snd_pcm_substream *substream,
                          int cmd)
 {
        struct rme96 *rme96 = snd_pcm_substream_chip(substream);
+       struct snd_pcm_substream *s;
+       bool sync;
+
+       snd_pcm_group_for_each_entry(s, substream) {
+               if (snd_pcm_substream_chip(s) == rme96)
+                       snd_pcm_trigger_done(s, substream);
+       }
+
+       sync = (rme96->playback_substream && rme96->capture_substream) &&
+              (rme96->playback_substream->group ==
+               rme96->capture_substream->group);
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
                if (!RME96_ISRECORDING(rme96)) {
-                       if (substream != rme96->capture_substream) {
+                       if (substream != rme96->capture_substream)
                                return -EBUSY;
-                       }
-                       snd_rme96_capture_start(rme96, 0);
+                       snd_rme96_trigger(rme96, sync ? RME96_START_BOTH
+                                                : RME96_START_CAPTURE);
                }
                break;
 
        case SNDRV_PCM_TRIGGER_STOP:
                if (RME96_ISRECORDING(rme96)) {
-                       if (substream != rme96->capture_substream) {
+                       if (substream != rme96->capture_substream)
                                return -EBUSY;
-                       }
-                       snd_rme96_capture_stop(rme96);
+                       snd_rme96_trigger(rme96, sync ? RME96_STOP_BOTH
+                                                : RME96_STOP_CAPTURE);
                }
                break;
 
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               if (RME96_ISRECORDING(rme96)) {
-                       snd_rme96_capture_stop(rme96);
-               }
+               if (RME96_ISRECORDING(rme96))
+                       snd_rme96_trigger(rme96, sync ? RME96_STOP_BOTH
+                                                : RME96_STOP_CAPTURE);
                break;
 
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               if (!RME96_ISRECORDING(rme96)) {
-                       snd_rme96_capture_start(rme96, 1);
-               }
+               if (!RME96_ISRECORDING(rme96))
+                       snd_rme96_trigger(rme96, sync ? RME96_RESUME_BOTH
+                                                : RME96_RESUME_CAPTURE);
                break;
-               
+
        default:
                return -EINVAL;
        }
@@ -1505,8 +1542,7 @@ snd_rme96_free(void *private_data)
                return;
        }
        if (rme96->irq >= 0) {
-               snd_rme96_playback_stop(rme96);
-               snd_rme96_capture_stop(rme96);
+               snd_rme96_trigger(rme96, RME96_STOP_BOTH);
                rme96->areg &= ~RME96_AR_DAC_EN;
                writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG);
                free_irq(rme96->irq, (void *)rme96);
@@ -1606,8 +1642,7 @@ snd_rme96_create(struct rme96 *rme96)
        rme96->capture_periodsize = 0;
        
        /* make sure playback/capture is stopped, if by some reason active */
-       snd_rme96_playback_stop(rme96);
-       snd_rme96_capture_stop(rme96);
+       snd_rme96_trigger(rme96, RME96_STOP_BOTH);
        
        /* set default values in registers */
        rme96->wcreg =
index bd501931ee2341f1a6f913dc1b21a967268de704..3cde55b753e26086c13569dbee92847db02c8a9c 100644 (file)
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  *
  */
+
+/* *************    Register Documentation   *******************************************************
+ *
+ * Work in progress! Documentation is based on the code in this file.
+ *
+ * --------- HDSPM_controlRegister ---------
+ * :7654.3210:7654.3210:7654.3210:7654.3210: bit number per byte
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :3322.2222:2222.1111:1111.1100:0000.0000: bit number
+ * :1098.7654:3210.9876:5432.1098:7654.3210: 0..31
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :8421.8421:8421.8421:8421.8421:8421.8421: hex digit
+ * :    .    :    .    :    .    :  x .    :  HDSPM_AudioInterruptEnable \_ setting both bits
+ * :    .    :    .    :    .    :    .   x:  HDSPM_Start                /  enables audio IO
+ * :    .    :    .    :    .    :   x.    :  HDSPM_ClockModeMaster - 1: Master, 0: Slave
+ * :    .    :    .    :    .    :    .210 :  HDSPM_LatencyMask - 3 Bit value for latency
+ * :    .    :    .    :    .    :    .    :      0:64, 1:128, 2:256, 3:512,
+ * :    .    :    .    :    .    :    .    :      4:1024, 5:2048, 6:4096, 7:8192
+ * :x   .    :    .    :    .   x:xx  .    :  HDSPM_FrequencyMask
+ * :    .    :    .    :    .    :10  .    :  HDSPM_Frequency1|HDSPM_Frequency0: 1=32K,2=44.1K,3=48K,0=??
+ * :    .    :    .    :    .   x:    .    :  <MADI> HDSPM_DoubleSpeed
+ * :x   .    :    .    :    .    :    .    :  <MADI> HDSPM_QuadSpeed
+ * :    .  3 :    .  10:  2 .    :    .    :  HDSPM_SyncRefMask :
+ * :    .    :    .   x:    .    :    .    :  HDSPM_SyncRef0
+ * :    .    :    .  x :    .    :    .    :  HDSPM_SyncRef1
+ * :    .    :    .    :  x .    :    .    :  <AES32> HDSPM_SyncRef2
+ * :    .  x :    .    :    .    :    .    :  <AES32> HDSPM_SyncRef3
+ * :    .    :    .  10:    .    :    .    :  <MADI> sync ref: 0:WC, 1:Madi, 2:TCO, 3:SyncIn
+ * :    .  3 :    .  10:  2 .    :    .    :  <AES32>  0:WC, 1:AES1 ... 8:AES8, 9: TCO, 10:SyncIn?
+ * :    .  x :    .    :    .    :    .    :  <MADIe> HDSPe_FLOAT_FORMAT
+ * :    .    :    .    : x  .    :    .    :  <MADI> HDSPM_InputSelect0 : 0=optical,1=coax
+ * :    .    :    .    :x   .    :    .    :  <MADI> HDSPM_InputSelect1
+ * :    .    :    .x   :    .    :    .    :  <MADI> HDSPM_clr_tms
+ * :    .    :    .    :    . x  :    .    :  <MADI> HDSPM_TX_64ch
+ * :    .    :    .    :    . x  :    .    :  <AES32> HDSPM_Emphasis
+ * :    .    :    .    :    .x   :    .    :  <MADI> HDSPM_AutoInp
+ * :    .    :    . x  :    .    :    .    :  <MADI> HDSPM_SMUX
+ * :    .    :    .x   :    .    :    .    :  <MADI> HDSPM_clr_tms
+ * :    .    :   x.    :    .    :    .    :  <MADI> HDSPM_taxi_reset
+ * :    .   x:    .    :    .    :    .    :  <MADI> HDSPM_LineOut
+ * :    .   x:    .    :    .    :    .    :  <AES32> ??????????????????
+ * :    .    :   x.    :    .    :    .    :  <AES32> HDSPM_WCK48
+ * :    .    :    .    :    .x   :    .    :  <AES32> HDSPM_Dolby
+ * :    .    : x  .    :    .    :    .    :  HDSPM_Midi0InterruptEnable
+ * :    .    :x   .    :    .    :    .    :  HDSPM_Midi1InterruptEnable
+ * :    .    :  x .    :    .    :    .    :  HDSPM_Midi2InterruptEnable
+ * :    . x  :    .    :    .    :    .    :  <MADI> HDSPM_Midi3InterruptEnable
+ * :    . x  :    .    :    .    :    .    :  <AES32> HDSPM_DS_DoubleWire
+ * :    .x   :    .    :    .    :    .    :  <AES32> HDSPM_QS_DoubleWire
+ * :   x.    :    .    :    .    :    .    :  <AES32> HDSPM_QS_QuadWire
+ * :    .    :    .    :    .  x :    .    :  <AES32> HDSPM_Professional
+ * : x  .    :    .    :    .    :    .    :  HDSPM_wclk_sel
+ * :    .    :    .    :    .    :    .    :
+ * :7654.3210:7654.3210:7654.3210:7654.3210: bit number per byte
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :3322.2222:2222.1111:1111.1100:0000.0000: bit number
+ * :1098.7654:3210.9876:5432.1098:7654.3210: 0..31
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :8421.8421:8421.8421:8421.8421:8421.8421:hex digit
+ *
+ *
+ *
+ * AIO / RayDAT only
+ *
+ * ------------ HDSPM_WR_SETTINGS ----------
+ * :3322.2222:2222.1111:1111.1100:0000.0000: bit number per byte
+ * :1098.7654:3210.9876:5432.1098:7654.3210:
+ * :||||.||||:||||.||||:||||.||||:||||.||||: bit number
+ * :7654.3210:7654.3210:7654.3210:7654.3210: 0..31
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :8421.8421:8421.8421:8421.8421:8421.8421: hex digit
+ * :    .    :    .    :    .    :    .   x: HDSPM_c0Master 1: Master, 0: Slave
+ * :    .    :    .    :    .    :    .  x : HDSPM_c0_SyncRef0
+ * :    .    :    .    :    .    :    . x  : HDSPM_c0_SyncRef1
+ * :    .    :    .    :    .    :    .x   : HDSPM_c0_SyncRef2
+ * :    .    :    .    :    .    :   x.    : HDSPM_c0_SyncRef3
+ * :    .    :    .    :    .    :   3.210 : HDSPM_c0_SyncRefMask:
+ * :    .    :    .    :    .    :    .    :  RayDat: 0:WC, 1:AES, 2:SPDIF, 3..6: ADAT1..4,
+ * :    .    :    .    :    .    :    .    :          9:TCO, 10:SyncIn
+ * :    .    :    .    :    .    :    .    :  AIO: 0:WC, 1:AES, 2: SPDIF, 3: ATAT,
+ * :    .    :    .    :    .    :    .    :          9:TCO, 10:SyncIn
+ * :    .    :    .    :    .    :    .    :
+ * :    .    :    .    :    .    :    .    :
+ * :3322.2222:2222.1111:1111.1100:0000.0000: bit number per byte
+ * :1098.7654:3210.9876:5432.1098:7654.3210:
+ * :||||.||||:||||.||||:||||.||||:||||.||||: bit number
+ * :7654.3210:7654.3210:7654.3210:7654.3210: 0..31
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :8421.8421:8421.8421:8421.8421:8421.8421: hex digit
+ *
+ */
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -95,7 +186,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 #define HDSPM_controlRegister       64
 #define HDSPM_interruptConfirmation  96
 #define HDSPM_control2Reg           256  /* not in specs ???????? */
-#define HDSPM_freqReg                256  /* for AES32 */
+#define HDSPM_freqReg                256  /* for setting arbitrary clock values (DDS feature) */
 #define HDSPM_midiDataOut0          352  /* just believe in old code */
 #define HDSPM_midiDataOut1          356
 #define HDSPM_eeprom_wr                     384  /* for AES32 */
@@ -258,6 +349,25 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 
 #define HDSPM_wclk_sel (1<<30)
 
+/* additional control register bits for AIO*/
+#define HDSPM_c0_Wck48                         0x20 /* also RayDAT */
+#define HDSPM_c0_Input0                                0x1000
+#define HDSPM_c0_Input1                                0x2000
+#define HDSPM_c0_Spdif_Opt                     0x4000
+#define HDSPM_c0_Pro                           0x8000
+#define HDSPM_c0_clr_tms                       0x10000
+#define HDSPM_c0_AEB1                          0x20000
+#define HDSPM_c0_AEB2                          0x40000
+#define HDSPM_c0_LineOut                       0x80000
+#define HDSPM_c0_AD_GAIN0                      0x100000
+#define HDSPM_c0_AD_GAIN1                      0x200000
+#define HDSPM_c0_DA_GAIN0                      0x400000
+#define HDSPM_c0_DA_GAIN1                      0x800000
+#define HDSPM_c0_PH_GAIN0                      0x1000000
+#define HDSPM_c0_PH_GAIN1                      0x2000000
+#define HDSPM_c0_Sym6db                                0x4000000
+
+
 /* --- bit helper defines */
 #define HDSPM_LatencyMask    (HDSPM_Latency0|HDSPM_Latency1|HDSPM_Latency2)
 #define HDSPM_FrequencyMask  (HDSPM_Frequency0|HDSPM_Frequency1|\
@@ -341,11 +451,11 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 #define HDSPM_madiLock           (1<<3)        /* MADI Locked =1, no=0 */
 #define HDSPM_madiSync          (1<<18) /* MADI is in sync */
 
-#define HDSPM_tcoLock    0x00000020 /* Optional TCO locked status FOR HDSPe MADI! */
-#define HDSPM_tcoSync    0x10000000 /* Optional TCO sync status */
+#define HDSPM_tcoLockMadi    0x00000020 /* Optional TCO locked status for HDSPe MADI*/
+#define HDSPM_tcoSync    0x10000000 /* Optional TCO sync status for HDSPe MADI and AES32!*/
 
-#define HDSPM_syncInLock 0x00010000 /* Sync In lock status FOR HDSPe MADI! */
-#define HDSPM_syncInSync 0x00020000 /* Sync In sync status FOR HDSPe MADI! */
+#define HDSPM_syncInLock 0x00010000 /* Sync In lock status for HDSPe MADI! */
+#define HDSPM_syncInSync 0x00020000 /* Sync In sync status for HDSPe MADI! */
 
 #define HDSPM_BufferPositionMask 0x000FFC0 /* Bit 6..15 : h/w buffer pointer */
                        /* since 64byte accurate, last 6 bits are not used */
@@ -363,7 +473,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
                                         * Interrupt
                                         */
 #define HDSPM_tco_detect         0x08000000
-#define HDSPM_tco_lock          0x20000000
+#define HDSPM_tcoLockAes         0x20000000 /* Optional TCO locked status for HDSPe AES */
 
 #define HDSPM_s2_tco_detect      0x00000040
 #define HDSPM_s2_AEBO_D          0x00000080
@@ -461,7 +571,9 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 #define HDSPM_AES32_AUTOSYNC_FROM_AES6 6
 #define HDSPM_AES32_AUTOSYNC_FROM_AES7 7
 #define HDSPM_AES32_AUTOSYNC_FROM_AES8 8
-#define HDSPM_AES32_AUTOSYNC_FROM_NONE 9
+#define HDSPM_AES32_AUTOSYNC_FROM_TCO 9
+#define HDSPM_AES32_AUTOSYNC_FROM_SYNC_IN 10
+#define HDSPM_AES32_AUTOSYNC_FROM_NONE 11
 
 /*  status2 */
 /* HDSPM_LockAES_bit is given by HDSPM_LockAES >> (AES# - 1) */
@@ -537,36 +649,39 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 /* names for speed modes */
 static char *hdspm_speed_names[] = { "single", "double", "quad" };
 
-static char *texts_autosync_aes_tco[] = { "Word Clock",
+static const char *const texts_autosync_aes_tco[] = { "Word Clock",
                                          "AES1", "AES2", "AES3", "AES4",
                                          "AES5", "AES6", "AES7", "AES8",
-                                         "TCO" };
-static char *texts_autosync_aes[] = { "Word Clock",
+                                         "TCO", "Sync In"
+};
+static const char *const texts_autosync_aes[] = { "Word Clock",
                                      "AES1", "AES2", "AES3", "AES4",
-                                     "AES5", "AES6", "AES7", "AES8" };
-static char *texts_autosync_madi_tco[] = { "Word Clock",
+                                     "AES5", "AES6", "AES7", "AES8",
+                                     "Sync In"
+};
+static const char *const texts_autosync_madi_tco[] = { "Word Clock",
                                           "MADI", "TCO", "Sync In" };
-static char *texts_autosync_madi[] = { "Word Clock",
+static const char *const texts_autosync_madi[] = { "Word Clock",
                                       "MADI", "Sync In" };
 
-static char *texts_autosync_raydat_tco[] = {
+static const char *const texts_autosync_raydat_tco[] = {
        "Word Clock",
        "ADAT 1", "ADAT 2", "ADAT 3", "ADAT 4",
        "AES", "SPDIF", "TCO", "Sync In"
 };
-static char *texts_autosync_raydat[] = {
+static const char *const texts_autosync_raydat[] = {
        "Word Clock",
        "ADAT 1", "ADAT 2", "ADAT 3", "ADAT 4",
        "AES", "SPDIF", "Sync In"
 };
-static char *texts_autosync_aio_tco[] = {
+static const char *const texts_autosync_aio_tco[] = {
        "Word Clock",
        "ADAT", "AES", "SPDIF", "TCO", "Sync In"
 };
-static char *texts_autosync_aio[] = { "Word Clock",
+static const char *const texts_autosync_aio[] = { "Word Clock",
                                      "ADAT", "AES", "SPDIF", "Sync In" };
 
-static char *texts_freq[] = {
+static const char *const texts_freq[] = {
        "No Lock",
        "32 kHz",
        "44.1 kHz",
@@ -629,7 +744,8 @@ static char *texts_ports_aio_in_ss[] = {
        "AES.L", "AES.R",
        "SPDIF.L", "SPDIF.R",
        "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4", "ADAT.5", "ADAT.6",
-       "ADAT.7", "ADAT.8"
+       "ADAT.7", "ADAT.8",
+       "AEB.1", "AEB.2", "AEB.3", "AEB.4"
 };
 
 static char *texts_ports_aio_out_ss[] = {
@@ -638,14 +754,16 @@ static char *texts_ports_aio_out_ss[] = {
        "SPDIF.L", "SPDIF.R",
        "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4", "ADAT.5", "ADAT.6",
        "ADAT.7", "ADAT.8",
-       "Phone.L", "Phone.R"
+       "Phone.L", "Phone.R",
+       "AEB.1", "AEB.2", "AEB.3", "AEB.4"
 };
 
 static char *texts_ports_aio_in_ds[] = {
        "Analogue.L", "Analogue.R",
        "AES.L", "AES.R",
        "SPDIF.L", "SPDIF.R",
-       "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4"
+       "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
+       "AEB.1", "AEB.2", "AEB.3", "AEB.4"
 };
 
 static char *texts_ports_aio_out_ds[] = {
@@ -653,14 +771,16 @@ static char *texts_ports_aio_out_ds[] = {
        "AES.L", "AES.R",
        "SPDIF.L", "SPDIF.R",
        "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
-       "Phone.L", "Phone.R"
+       "Phone.L", "Phone.R",
+       "AEB.1", "AEB.2", "AEB.3", "AEB.4"
 };
 
 static char *texts_ports_aio_in_qs[] = {
        "Analogue.L", "Analogue.R",
        "AES.L", "AES.R",
        "SPDIF.L", "SPDIF.R",
-       "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4"
+       "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
+       "AEB.1", "AEB.2", "AEB.3", "AEB.4"
 };
 
 static char *texts_ports_aio_out_qs[] = {
@@ -668,7 +788,8 @@ static char *texts_ports_aio_out_qs[] = {
        "AES.L", "AES.R",
        "SPDIF.L", "SPDIF.R",
        "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
-       "Phone.L", "Phone.R"
+       "Phone.L", "Phone.R",
+       "AEB.1", "AEB.2", "AEB.3", "AEB.4"
 };
 
 static char *texts_ports_aes32[] = {
@@ -745,8 +866,8 @@ static char channel_map_aio_in_ss[HDSPM_MAX_CHANNELS] = {
        8, 9,                   /* aes in, */
        10, 11,                 /* spdif in */
        12, 13, 14, 15, 16, 17, 18, 19, /* ADAT in */
-       -1, -1,
-       -1, -1, -1, -1, -1, -1, -1, -1,
+       2, 3, 4, 5,             /* AEB */
+       -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
@@ -760,7 +881,8 @@ static char channel_map_aio_out_ss[HDSPM_MAX_CHANNELS] = {
        10, 11,                 /* spdif out */
        12, 13, 14, 15, 16, 17, 18, 19, /* ADAT out */
        6, 7,                   /* phone out */
-       -1, -1, -1, -1, -1, -1, -1, -1,
+       2, 3, 4, 5,             /* AEB */
+       -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
@@ -773,7 +895,8 @@ static char channel_map_aio_in_ds[HDSPM_MAX_CHANNELS] = {
        8, 9,                   /* aes in */
        10, 11,                 /* spdif in */
        12, 14, 16, 18,         /* adat in */
-       -1, -1, -1, -1, -1, -1,
+       2, 3, 4, 5,             /* AEB */
+       -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
@@ -788,7 +911,7 @@ static char channel_map_aio_out_ds[HDSPM_MAX_CHANNELS] = {
        10, 11,                 /* spdif out */
        12, 14, 16, 18,         /* adat out */
        6, 7,                   /* phone out */
-       -1, -1, -1, -1,
+       2, 3, 4, 5,             /* AEB */
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
@@ -802,7 +925,8 @@ static char channel_map_aio_in_qs[HDSPM_MAX_CHANNELS] = {
        8, 9,                   /* aes in */
        10, 11,                 /* spdif in */
        12, 16,                 /* adat in */
-       -1, -1, -1, -1, -1, -1, -1, -1,
+       2, 3, 4, 5,             /* AEB */
+       -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
@@ -817,7 +941,8 @@ static char channel_map_aio_out_qs[HDSPM_MAX_CHANNELS] = {
        10, 11,                 /* spdif out */
        12, 16,                 /* adat out */
        6, 7,                   /* phone out */
-       -1, -1, -1, -1, -1, -1,
+       2, 3, 4, 5,             /* AEB */
+       -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
        -1, -1, -1, -1, -1, -1, -1, -1,
@@ -856,11 +981,11 @@ struct hdspm_midi {
 };
 
 struct hdspm_tco {
-       int input;
-       int framerate;
-       int wordclock;
-       int samplerate;
-       int pull;
+       int input; /* 0: LTC, 1:Video, 2: WC*/
+       int framerate; /* 0=24, 1=25, 2=29.97, 3=29.97d, 4=30, 5=30d */
+       int wordclock; /* 0=1:1, 1=44.1->48, 2=48->44.1 */
+       int samplerate; /* 0=44.1, 1=48, 2= freq from app */
+       int pull; /*   0=0, 1=+0.1%, 2=-0.1%, 3=+4%, 4=-4%*/
        int term; /* 0 = off, 1 = on */
 };
 
@@ -879,7 +1004,7 @@ struct hdspm {
 
        u32 control_register;   /* cached value */
        u32 control2_register;  /* cached value */
-       u32 settings_register;
+       u32 settings_register;  /* cached value for AIO / RayDat (sync reference, master/slave) */
 
        struct hdspm_midi midi[4];
        struct tasklet_struct midi_tasklet;
@@ -941,7 +1066,7 @@ struct hdspm {
 
        struct hdspm_tco *tco;  /* NULL if no TCO detected */
 
-       char **texts_autosync;
+       const char *const *texts_autosync;
        int texts_autosync_items;
 
        cycles_t last_interrupt;
@@ -976,12 +1101,24 @@ static inline void snd_hdspm_initialize_midi_flush(struct hdspm *hdspm);
 static inline int hdspm_get_pll_freq(struct hdspm *hdspm);
 static int hdspm_update_simple_mixer_controls(struct hdspm *hdspm);
 static int hdspm_autosync_ref(struct hdspm *hdspm);
+static int hdspm_set_toggle_setting(struct hdspm *hdspm, u32 regmask, int out);
 static int snd_hdspm_set_defaults(struct hdspm *hdspm);
 static int hdspm_system_clock_mode(struct hdspm *hdspm);
 static void hdspm_set_sgbuf(struct hdspm *hdspm,
                            struct snd_pcm_substream *substream,
                             unsigned int reg, int channels);
 
+static int hdspm_aes_sync_check(struct hdspm *hdspm, int idx);
+static int hdspm_wc_sync_check(struct hdspm *hdspm);
+static int hdspm_tco_sync_check(struct hdspm *hdspm);
+static int hdspm_sync_in_sync_check(struct hdspm *hdspm);
+
+static int hdspm_get_aes_sample_rate(struct hdspm *hdspm, int index);
+static int hdspm_get_tco_sample_rate(struct hdspm *hdspm);
+static int hdspm_get_wc_sample_rate(struct hdspm *hdspm);
+
+
+
 static inline int HDSPM_bit2freq(int n)
 {
        static const int bit2freq_tab[] = {
@@ -992,6 +1129,12 @@ static inline int HDSPM_bit2freq(int n)
        return bit2freq_tab[n];
 }
 
+static bool hdspm_is_raydat_or_aio(struct hdspm *hdspm)
+{
+       return ((AIO == hdspm->io_type) || (RayDAT == hdspm->io_type));
+}
+
+
 /* Write/read to/from HDSPM with Adresses in Bytes
    not words but only 32Bit writes are allowed */
 
@@ -1107,14 +1250,11 @@ static int hdspm_rate_multiplier(struct hdspm *hdspm, int rate)
                else if (hdspm->control_register &
                                HDSPM_DoubleSpeed)
                        return rate * 2;
-       };
+       }
        return rate;
 }
 
-static int hdspm_tco_sync_check(struct hdspm *hdspm);
-static int hdspm_sync_in_sync_check(struct hdspm *hdspm);
-
-/* check for external sample rate */
+/* check for external sample rate, returns the sample rate in Hz*/
 static int hdspm_external_sample_rate(struct hdspm *hdspm)
 {
        unsigned int status, status2, timecode;
@@ -1127,17 +1267,36 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
                timecode = hdspm_read(hdspm, HDSPM_timecodeRegister);
 
                syncref = hdspm_autosync_ref(hdspm);
+               switch (syncref) {
+               case HDSPM_AES32_AUTOSYNC_FROM_WORD:
+               /* Check WC sync and get sample rate */
+                       if (hdspm_wc_sync_check(hdspm))
+                               return HDSPM_bit2freq(hdspm_get_wc_sample_rate(hdspm));
+                       break;
 
-               if (syncref == HDSPM_AES32_AUTOSYNC_FROM_WORD &&
-                               status & HDSPM_AES32_wcLock)
-                       return HDSPM_bit2freq((status >> HDSPM_AES32_wcFreq_bit) & 0xF);
+               case HDSPM_AES32_AUTOSYNC_FROM_AES1:
+               case HDSPM_AES32_AUTOSYNC_FROM_AES2:
+               case HDSPM_AES32_AUTOSYNC_FROM_AES3:
+               case HDSPM_AES32_AUTOSYNC_FROM_AES4:
+               case HDSPM_AES32_AUTOSYNC_FROM_AES5:
+               case HDSPM_AES32_AUTOSYNC_FROM_AES6:
+               case HDSPM_AES32_AUTOSYNC_FROM_AES7:
+               case HDSPM_AES32_AUTOSYNC_FROM_AES8:
+               /* Check AES sync and get sample rate */
+                       if (hdspm_aes_sync_check(hdspm, syncref - HDSPM_AES32_AUTOSYNC_FROM_AES1))
+                               return HDSPM_bit2freq(hdspm_get_aes_sample_rate(hdspm,
+                                                       syncref - HDSPM_AES32_AUTOSYNC_FROM_AES1));
+                       break;
 
-               if (syncref >= HDSPM_AES32_AUTOSYNC_FROM_AES1 &&
-                               syncref <= HDSPM_AES32_AUTOSYNC_FROM_AES8 &&
-                               status2 & (HDSPM_LockAES >>
-                               (syncref - HDSPM_AES32_AUTOSYNC_FROM_AES1)))
-                       return HDSPM_bit2freq((timecode >> (4*(syncref-HDSPM_AES32_AUTOSYNC_FROM_AES1))) & 0xF);
-               return 0;
+
+               case HDSPM_AES32_AUTOSYNC_FROM_TCO:
+               /* Check TCO sync and get sample rate */
+                       if (hdspm_tco_sync_check(hdspm))
+                               return HDSPM_bit2freq(hdspm_get_tco_sample_rate(hdspm));
+                       break;
+               default:
+                       return 0;
+               } /* end switch(syncref) */
                break;
 
        case MADIface:
@@ -2129,6 +2288,9 @@ static int hdspm_get_wc_sample_rate(struct hdspm *hdspm)
                status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
                return (status >> 16) & 0xF;
                break;
+       case AES32:
+               status = hdspm_read(hdspm, HDSPM_statusRegister);
+               return (status >> HDSPM_AES32_wcFreq_bit) & 0xF;
        default:
                break;
        }
@@ -2152,6 +2314,9 @@ static int hdspm_get_tco_sample_rate(struct hdspm *hdspm)
                        status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
                        return (status >> 20) & 0xF;
                        break;
+               case AES32:
+                       status = hdspm_read(hdspm, HDSPM_statusRegister);
+                       return (status >> 1) & 0xF;
                default:
                        break;
                }
@@ -2183,6 +2348,23 @@ static int hdspm_get_sync_in_sample_rate(struct hdspm *hdspm)
        return 0;
 }
 
+/**
+ * Returns the AES sample rate class for the given card.
+ **/
+static int hdspm_get_aes_sample_rate(struct hdspm *hdspm, int index)
+{
+       int timecode;
+
+       switch (hdspm->io_type) {
+       case AES32:
+               timecode = hdspm_read(hdspm, HDSPM_timecodeRegister);
+               return (timecode >> (4*index)) & 0xF;
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
 
 /**
  * Returns the sample rate class for input source <idx> for
@@ -2196,15 +2378,23 @@ static int hdspm_get_s1_sample_rate(struct hdspm *hdspm, unsigned int idx)
 }
 
 #define ENUMERATED_CTL_INFO(info, texts) \
-{ \
-       uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; \
-       uinfo->count = 1; \
-       uinfo->value.enumerated.items = ARRAY_SIZE(texts); \
-       if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) \
-               uinfo->value.enumerated.item =  uinfo->value.enumerated.items - 1; \
-       strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); \
-}
+       snd_ctl_enum_info(info, 1, ARRAY_SIZE(texts), texts)
+
 
+/* Helper function to query the external sample rate and return the
+ * corresponding enum to be returned to userspace.
+ */
+static int hdspm_external_rate_to_enum(struct hdspm *hdspm)
+{
+       int rate = hdspm_external_sample_rate(hdspm);
+       int i, selected_rate = 0;
+       for (i = 1; i < 10; i++)
+               if (HDSPM_bit2freq(i) == rate) {
+                       selected_rate = i;
+                       break;
+               }
+       return selected_rate;
+}
 
 
 #define HDSPM_AUTOSYNC_SAMPLE_RATE(xname, xindex) \
@@ -2270,7 +2460,7 @@ static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
                default:
                        ucontrol->value.enumerated.item[0] =
                                hdspm_get_s1_sample_rate(hdspm,
-                                               ucontrol->id.index-1);
+                                               kcontrol->private_value-1);
                }
                break;
 
@@ -2289,28 +2479,24 @@ static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
                        ucontrol->value.enumerated.item[0] =
                                hdspm_get_sync_in_sample_rate(hdspm);
                        break;
+               case 11: /* External Rate */
+                       ucontrol->value.enumerated.item[0] =
+                               hdspm_external_rate_to_enum(hdspm);
+                       break;
                default: /* AES1 to AES8 */
                        ucontrol->value.enumerated.item[0] =
-                               hdspm_get_s1_sample_rate(hdspm,
-                                               kcontrol->private_value-1);
+                               hdspm_get_aes_sample_rate(hdspm,
+                                               kcontrol->private_value -
+                                               HDSPM_AES32_AUTOSYNC_FROM_AES1);
                        break;
                }
                break;
 
        case MADI:
        case MADIface:
-               {
-                       int rate = hdspm_external_sample_rate(hdspm);
-                       int i, selected_rate = 0;
-                       for (i = 1; i < 10; i++)
-                               if (HDSPM_bit2freq(i) == rate) {
-                                       selected_rate = i;
-                                       break;
-                               }
-                       ucontrol->value.enumerated.item[0] = selected_rate;
-               }
+               ucontrol->value.enumerated.item[0] =
+                       hdspm_external_rate_to_enum(hdspm);
                break;
-
        default:
                break;
        }
@@ -2359,33 +2545,17 @@ static int hdspm_system_clock_mode(struct hdspm *hdspm)
  **/
 static void hdspm_set_system_clock_mode(struct hdspm *hdspm, int mode)
 {
-       switch (hdspm->io_type) {
-       case AIO:
-       case RayDAT:
-               if (0 == mode)
-                       hdspm->settings_register |= HDSPM_c0Master;
-               else
-                       hdspm->settings_register &= ~HDSPM_c0Master;
-
-               hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
-               break;
-
-       default:
-               if (0 == mode)
-                       hdspm->control_register |= HDSPM_ClockModeMaster;
-               else
-                       hdspm->control_register &= ~HDSPM_ClockModeMaster;
-
-               hdspm_write(hdspm, HDSPM_controlRegister,
-                               hdspm->control_register);
-       }
+       hdspm_set_toggle_setting(hdspm,
+                       (hdspm_is_raydat_or_aio(hdspm)) ?
+                       HDSPM_c0Master : HDSPM_ClockModeMaster,
+                       (0 == mode));
 }
 
 
 static int snd_hdspm_info_system_clock_mode(struct snd_kcontrol *kcontrol,
                                            struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "Master", "AutoSync" };
+       static const char *const texts[] = { "Master", "AutoSync" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -2809,16 +2979,7 @@ static int snd_hdspm_info_pref_sync_ref(struct snd_kcontrol *kcontrol,
 {
        struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
 
-       uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-       uinfo->count = 1;
-       uinfo->value.enumerated.items = hdspm->texts_autosync_items;
-
-       if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
-               uinfo->value.enumerated.item =
-                       uinfo->value.enumerated.items - 1;
-
-       strcpy(uinfo->value.enumerated.name,
-                       hdspm->texts_autosync[uinfo->value.enumerated.item]);
+       snd_ctl_enum_info(uinfo, 1, hdspm->texts_autosync_items, hdspm->texts_autosync);
 
        return 0;
 }
@@ -2873,19 +3034,20 @@ static int snd_hdspm_put_pref_sync_ref(struct snd_kcontrol *kcontrol,
 
 static int hdspm_autosync_ref(struct hdspm *hdspm)
 {
+       /* This looks at the autosync selected sync reference */
        if (AES32 == hdspm->io_type) {
+
                unsigned int status = hdspm_read(hdspm, HDSPM_statusRegister);
-               unsigned int syncref =
-                       (status >> HDSPM_AES32_syncref_bit) & 0xF;
-               if (syncref == 0)
-                       return HDSPM_AES32_AUTOSYNC_FROM_WORD;
-               if (syncref <= 8)
+               unsigned int syncref = (status >> HDSPM_AES32_syncref_bit) & 0xF;
+               if ((syncref >= HDSPM_AES32_AUTOSYNC_FROM_WORD) &&
+                               (syncref <= HDSPM_AES32_AUTOSYNC_FROM_SYNC_IN)) {
                        return syncref;
+               }
                return HDSPM_AES32_AUTOSYNC_FROM_NONE;
+
        } else if (MADI == hdspm->io_type) {
-               /* This looks at the autosync selected sync reference */
-               unsigned int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
 
+               unsigned int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
                switch (status2 & HDSPM_SelSyncRefMask) {
                case HDSPM_SelSyncRef_WORD:
                        return HDSPM_AUTOSYNC_FROM_WORD;
@@ -2898,7 +3060,7 @@ static int hdspm_autosync_ref(struct hdspm *hdspm)
                case HDSPM_SelSyncRef_NVALID:
                        return HDSPM_AUTOSYNC_FROM_NONE;
                default:
-                       return 0;
+                       return HDSPM_AUTOSYNC_FROM_NONE;
                }
 
        }
@@ -2912,31 +3074,15 @@ static int snd_hdspm_info_autosync_ref(struct snd_kcontrol *kcontrol,
        struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
 
        if (AES32 == hdspm->io_type) {
-               static char *texts[] = { "WordClock", "AES1", "AES2", "AES3",
-                       "AES4", "AES5", "AES6", "AES7", "AES8", "None"};
-
-               uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-               uinfo->count = 1;
-               uinfo->value.enumerated.items = 10;
-               if (uinfo->value.enumerated.item >=
-                   uinfo->value.enumerated.items)
-                       uinfo->value.enumerated.item =
-                               uinfo->value.enumerated.items - 1;
-               strcpy(uinfo->value.enumerated.name,
-                               texts[uinfo->value.enumerated.item]);
+               static const char *const texts[] = { "WordClock", "AES1", "AES2", "AES3",
+                       "AES4", "AES5", "AES6", "AES7", "AES8", "TCO", "Sync In", "None"};
+
+               ENUMERATED_CTL_INFO(uinfo, texts);
        } else if (MADI == hdspm->io_type) {
-               static char *texts[] = {"Word Clock", "MADI", "TCO",
+               static const char *const texts[] = {"Word Clock", "MADI", "TCO",
                        "Sync In", "None" };
 
-               uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-               uinfo->count = 1;
-               uinfo->value.enumerated.items = 5;
-               if (uinfo->value.enumerated.item >=
-                               uinfo->value.enumerated.items)
-                       uinfo->value.enumerated.item =
-                               uinfo->value.enumerated.items - 1;
-               strcpy(uinfo->value.enumerated.name,
-                               texts[uinfo->value.enumerated.item]);
+               ENUMERATED_CTL_INFO(uinfo, texts);
        }
        return 0;
 }
@@ -2964,7 +3110,7 @@ static int snd_hdspm_get_autosync_ref(struct snd_kcontrol *kcontrol,
 static int snd_hdspm_info_tco_video_input_format(struct snd_kcontrol *kcontrol,
                                       struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = {"No video", "NTSC", "PAL"};
+       static const char *const texts[] = {"No video", "NTSC", "PAL"};
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -3010,7 +3156,7 @@ static int snd_hdspm_get_tco_video_input_format(struct snd_kcontrol *kcontrol,
 static int snd_hdspm_info_tco_ltc_frames(struct snd_kcontrol *kcontrol,
                                       struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = {"No lock", "24 fps", "25 fps", "29.97 fps",
+       static const char *const texts[] = {"No lock", "24 fps", "25 fps", "29.97 fps",
                                "30 fps"};
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
@@ -3027,19 +3173,19 @@ static int hdspm_tco_ltc_frames(struct hdspm *hdspm)
                                        HDSPM_TCO1_LTC_Format_MSB)) {
                case 0:
                        /* 24 fps */
-                       ret = 1;
+                       ret = fps_24;
                        break;
                case HDSPM_TCO1_LTC_Format_LSB:
                        /* 25 fps */
-                       ret = 2;
+                       ret = fps_25;
                        break;
                case HDSPM_TCO1_LTC_Format_MSB:
-                       /* 25 fps */
-                       ret = 3;
+                       /* 29.97 fps */
+                       ret = fps_2997;
                        break;
                default:
                        /* 30 fps */
-                       ret = 4;
+                       ret = fps_30;
                        break;
                }
        }
@@ -3067,16 +3213,35 @@ static int snd_hdspm_get_tco_ltc_frames(struct snd_kcontrol *kcontrol,
 
 static int hdspm_toggle_setting(struct hdspm *hdspm, u32 regmask)
 {
-       return (hdspm->control_register & regmask) ? 1 : 0;
+       u32 reg;
+
+       if (hdspm_is_raydat_or_aio(hdspm))
+               reg = hdspm->settings_register;
+       else
+               reg = hdspm->control_register;
+
+       return (reg & regmask) ? 1 : 0;
 }
 
 static int hdspm_set_toggle_setting(struct hdspm *hdspm, u32 regmask, int out)
 {
+       u32 *reg;
+       u32 target_reg;
+
+       if (hdspm_is_raydat_or_aio(hdspm)) {
+               reg = &(hdspm->settings_register);
+               target_reg = HDSPM_WR_SETTINGS;
+       } else {
+               reg = &(hdspm->control_register);
+               target_reg = HDSPM_controlRegister;
+       }
+
        if (out)
-               hdspm->control_register |= regmask;
+               *reg |= regmask;
        else
-               hdspm->control_register &= ~regmask;
-       hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
+               *reg &= ~regmask;
+
+       hdspm_write(hdspm, target_reg, *reg);
 
        return 0;
 }
@@ -3141,7 +3306,7 @@ static int hdspm_set_input_select(struct hdspm * hdspm, int out)
 static int snd_hdspm_info_input_select(struct snd_kcontrol *kcontrol,
                                       struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "optical", "coaxial" };
+       static const char *const texts[] = { "optical", "coaxial" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -3203,7 +3368,7 @@ static int hdspm_set_ds_wire(struct hdspm * hdspm, int ds)
 static int snd_hdspm_info_ds_wire(struct snd_kcontrol *kcontrol,
                                  struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "Single", "Double" };
+       static const char *const texts[] = { "Single", "Double" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -3276,7 +3441,7 @@ static int hdspm_set_qs_wire(struct hdspm * hdspm, int mode)
 static int snd_hdspm_info_qs_wire(struct snd_kcontrol *kcontrol,
                                       struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "Single", "Double", "Quad" };
+       static const char *const texts[] = { "Single", "Double", "Quad" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -3313,6 +3478,84 @@ static int snd_hdspm_put_qs_wire(struct snd_kcontrol *kcontrol,
        return change;
 }
 
+#define HDSPM_CONTROL_TRISTATE(xname, xindex) \
+{      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+       .name = xname, \
+       .private_value = xindex, \
+       .info = snd_hdspm_info_tristate, \
+       .get = snd_hdspm_get_tristate, \
+       .put = snd_hdspm_put_tristate \
+}
+
+static int hdspm_tristate(struct hdspm *hdspm, u32 regmask)
+{
+       u32 reg = hdspm->settings_register & (regmask * 3);
+       return reg / regmask;
+}
+
+static int hdspm_set_tristate(struct hdspm *hdspm, int mode, u32 regmask)
+{
+       hdspm->settings_register &= ~(regmask * 3);
+       hdspm->settings_register |= (regmask * mode);
+       hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
+
+       return 0;
+}
+
+static int snd_hdspm_info_tristate(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_info *uinfo)
+{
+       u32 regmask = kcontrol->private_value;
+
+       static const char *const texts_spdif[] = { "Optical", "Coaxial", "Internal" };
+       static const char *const texts_levels[] = { "Hi Gain", "+4 dBu", "-10 dBV" };
+
+       switch (regmask) {
+       case HDSPM_c0_Input0:
+               ENUMERATED_CTL_INFO(uinfo, texts_spdif);
+               break;
+       default:
+               ENUMERATED_CTL_INFO(uinfo, texts_levels);
+               break;
+       }
+       return 0;
+}
+
+static int snd_hdspm_get_tristate(struct snd_kcontrol *kcontrol,
+                                     struct snd_ctl_elem_value *ucontrol)
+{
+       struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+       u32 regmask = kcontrol->private_value;
+
+       spin_lock_irq(&hdspm->lock);
+       ucontrol->value.enumerated.item[0] = hdspm_tristate(hdspm, regmask);
+       spin_unlock_irq(&hdspm->lock);
+       return 0;
+}
+
+static int snd_hdspm_put_tristate(struct snd_kcontrol *kcontrol,
+                                     struct snd_ctl_elem_value *ucontrol)
+{
+       struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+       u32 regmask = kcontrol->private_value;
+       int change;
+       int val;
+
+       if (!snd_hdspm_use_is_exclusive(hdspm))
+               return -EBUSY;
+       val = ucontrol->value.integer.value[0];
+       if (val < 0)
+               val = 0;
+       if (val > 2)
+               val = 2;
+
+       spin_lock_irq(&hdspm->lock);
+       change = val != hdspm_tristate(hdspm, regmask);
+       hdspm_set_tristate(hdspm, val, regmask);
+       spin_unlock_irq(&hdspm->lock);
+       return change;
+}
+
 #define HDSPM_MADI_SPEEDMODE(xname, xindex) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
        .name = xname, \
@@ -3352,7 +3595,7 @@ static int hdspm_set_madi_speedmode(struct hdspm *hdspm, int mode)
 static int snd_hdspm_info_madi_speedmode(struct snd_kcontrol *kcontrol,
                                       struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "Single", "Double", "Quad" };
+       static const char *const texts[] = { "Single", "Double", "Quad" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -3587,7 +3830,7 @@ static int snd_hdspm_put_playback_mixer(struct snd_kcontrol *kcontrol,
 static int snd_hdspm_info_sync_check(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "No Lock", "Lock", "Sync", "N/A" };
+       static const char *const texts[] = { "No Lock", "Lock", "Sync", "N/A" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -3595,7 +3838,7 @@ static int snd_hdspm_info_sync_check(struct snd_kcontrol *kcontrol,
 static int snd_hdspm_tco_info_lock_check(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "No Lock", "Lock" };
+       static const char *const texts[] = { "No Lock", "Lock" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -3745,9 +3988,18 @@ static int hdspm_tco_sync_check(struct hdspm *hdspm)
        if (hdspm->tco) {
                switch (hdspm->io_type) {
                case MADI:
+                       status = hdspm_read(hdspm, HDSPM_statusRegister);
+                       if (status & HDSPM_tcoLockMadi) {
+                               if (status & HDSPM_tcoSync)
+                                       return 2;
+                               else
+                                       return 1;
+                       }
+                       return 0;
+                       break;
                case AES32:
                        status = hdspm_read(hdspm, HDSPM_statusRegister);
-                       if (status & HDSPM_tcoLock) {
+                       if (status & HDSPM_tcoLockAes) {
                                if (status & HDSPM_tcoSync)
                                        return 2;
                                else
@@ -3807,7 +4059,8 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
                case 5: /* SYNC IN */
                        val = hdspm_sync_in_sync_check(hdspm); break;
                default:
-                       val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
+                       val = hdspm_s1_sync_check(hdspm,
+                                       kcontrol->private_value-1);
                }
                break;
 
@@ -3975,7 +4228,8 @@ static void hdspm_tco_write(struct hdspm *hdspm)
 static int snd_hdspm_info_tco_sample_rate(struct snd_kcontrol *kcontrol,
                                          struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "44.1 kHz", "48 kHz" };
+       /* TODO freq from app could be supported here, see tco->samplerate */
+       static const char *const texts[] = { "44.1 kHz", "48 kHz" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -4021,7 +4275,8 @@ static int snd_hdspm_put_tco_sample_rate(struct snd_kcontrol *kcontrol,
 static int snd_hdspm_info_tco_pull(struct snd_kcontrol *kcontrol,
                                   struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "0", "+ 0.1 %", "- 0.1 %", "+ 4 %", "- 4 %" };
+       static const char *const texts[] = { "0", "+ 0.1 %", "- 0.1 %",
+               "+ 4 %", "- 4 %" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -4066,7 +4321,7 @@ static int snd_hdspm_put_tco_pull(struct snd_kcontrol *kcontrol,
 static int snd_hdspm_info_tco_wck_conversion(struct snd_kcontrol *kcontrol,
                                             struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "1:1", "44.1 -> 48", "48 -> 44.1" };
+       static const char *const texts[] = { "1:1", "44.1 -> 48", "48 -> 44.1" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -4112,7 +4367,7 @@ static int snd_hdspm_put_tco_wck_conversion(struct snd_kcontrol *kcontrol,
 static int snd_hdspm_info_tco_frame_rate(struct snd_kcontrol *kcontrol,
                                          struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "24 fps", "25 fps", "29.97fps",
+       static const char *const texts[] = { "24 fps", "25 fps", "29.97fps",
                "29.97 dfps", "30 fps", "30 dfps" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
@@ -4159,7 +4414,7 @@ static int snd_hdspm_put_tco_frame_rate(struct snd_kcontrol *kcontrol,
 static int snd_hdspm_info_tco_sync_source(struct snd_kcontrol *kcontrol,
                                          struct snd_ctl_elem_info *uinfo)
 {
-       static char *texts[] = { "LTC", "Video", "WCK" };
+       static const char *const texts[] = { "LTC", "Video", "WCK" };
        ENUMERATED_CTL_INFO(uinfo, texts);
        return 0;
 }
@@ -4284,7 +4539,6 @@ static struct snd_kcontrol_new snd_hdspm_controls_aio[] = {
        HDSPM_INTERNAL_CLOCK("Internal Clock", 0),
        HDSPM_SYSTEM_CLOCK_MODE("System Clock Mode", 0),
        HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
-       HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
        HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
        HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
        HDSPM_SYNC_CHECK("WC SyncCheck", 0),
@@ -4298,7 +4552,16 @@ static struct snd_kcontrol_new snd_hdspm_controls_aio[] = {
        HDSPM_AUTOSYNC_SAMPLE_RATE("SPDIF Frequency", 2),
        HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT Frequency", 3),
        HDSPM_AUTOSYNC_SAMPLE_RATE("TCO Frequency", 4),
-       HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 5)
+       HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 5),
+       HDSPM_CONTROL_TRISTATE("S/PDIF Input", HDSPM_c0_Input0),
+       HDSPM_TOGGLE_SETTING("S/PDIF Out Optical", HDSPM_c0_Spdif_Opt),
+       HDSPM_TOGGLE_SETTING("S/PDIF Out Professional", HDSPM_c0_Pro),
+       HDSPM_TOGGLE_SETTING("ADAT internal (AEB/TEB)", HDSPM_c0_AEB1),
+       HDSPM_TOGGLE_SETTING("XLR Breakout Cable", HDSPM_c0_Sym6db),
+       HDSPM_TOGGLE_SETTING("Single Speed WordClock Out", HDSPM_c0_Wck48),
+       HDSPM_CONTROL_TRISTATE("Input Level", HDSPM_c0_AD_GAIN0),
+       HDSPM_CONTROL_TRISTATE("Output Level", HDSPM_c0_DA_GAIN0),
+       HDSPM_CONTROL_TRISTATE("Phones Level", HDSPM_c0_PH_GAIN0)
 
                /*
                   HDSPM_INPUT_SELECT("Input Select", 0),
@@ -4335,7 +4598,9 @@ static struct snd_kcontrol_new snd_hdspm_controls_raydat[] = {
        HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT3 Frequency", 5),
        HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT4 Frequency", 6),
        HDSPM_AUTOSYNC_SAMPLE_RATE("TCO Frequency", 7),
-       HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 8)
+       HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 8),
+       HDSPM_TOGGLE_SETTING("S/PDIF Out Professional", HDSPM_c0_Pro),
+       HDSPM_TOGGLE_SETTING("Single Speed WordClock Out", HDSPM_c0_Wck48)
 };
 
 static struct snd_kcontrol_new snd_hdspm_controls_aes32[] = {
@@ -4345,7 +4610,7 @@ static struct snd_kcontrol_new snd_hdspm_controls_aes32[] = {
        HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
        HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
        HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
-       HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
+       HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 11),
        HDSPM_SYNC_CHECK("WC Sync Check", 0),
        HDSPM_SYNC_CHECK("AES1 Sync Check", 1),
        HDSPM_SYNC_CHECK("AES2 Sync Check", 2),
@@ -4501,77 +4766,22 @@ static int snd_hdspm_create_controls(struct snd_card *card,
  ------------------------------------------------------------*/
 
 static void
-snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
-                        struct snd_info_buffer *buffer)
+snd_hdspm_proc_read_tco(struct snd_info_entry *entry,
+                                       struct snd_info_buffer *buffer)
 {
        struct hdspm *hdspm = entry->private_data;
-       unsigned int status, status2, control, freq;
-
-       char *pref_sync_ref;
-       char *autosync_ref;
-       char *system_clock_mode;
-       char *insel;
-       int x, x2;
-
-       /* TCO stuff */
+       unsigned int status, control;
        int a, ltc, frames, seconds, minutes, hours;
        unsigned int period;
        u64 freq_const = 0;
        u32 rate;
 
+       snd_iprintf(buffer, "--- TCO ---\n");
+
        status = hdspm_read(hdspm, HDSPM_statusRegister);
-       status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
        control = hdspm->control_register;
-       freq = hdspm_read(hdspm, HDSPM_timecodeRegister);
 
-       snd_iprintf(buffer, "%s (Card #%d) Rev.%x Status2first3bits: %x\n",
-                       hdspm->card_name, hdspm->card->number + 1,
-                       hdspm->firmware_rev,
-                       (status2 & HDSPM_version0) |
-                       (status2 & HDSPM_version1) | (status2 &
-                               HDSPM_version2));
 
-       snd_iprintf(buffer, "HW Serial: 0x%06x%06x\n",
-                       (hdspm_read(hdspm, HDSPM_midiStatusIn1)>>8) & 0xFFFFFF,
-                       hdspm->serial);
-
-       snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
-                       hdspm->irq, hdspm->port, (unsigned long)hdspm->iobase);
-
-       snd_iprintf(buffer, "--- System ---\n");
-
-       snd_iprintf(buffer,
-               "IRQ Pending: Audio=%d, MIDI0=%d, MIDI1=%d, IRQcount=%d\n",
-               status & HDSPM_audioIRQPending,
-               (status & HDSPM_midi0IRQPending) ? 1 : 0,
-               (status & HDSPM_midi1IRQPending) ? 1 : 0,
-               hdspm->irq_count);
-       snd_iprintf(buffer,
-               "HW pointer: id = %d, rawptr = %d (%d->%d) "
-               "estimated= %ld (bytes)\n",
-               ((status & HDSPM_BufferID) ? 1 : 0),
-               (status & HDSPM_BufferPositionMask),
-               (status & HDSPM_BufferPositionMask) %
-               (2 * (int)hdspm->period_bytes),
-               ((status & HDSPM_BufferPositionMask) - 64) %
-               (2 * (int)hdspm->period_bytes),
-               (long) hdspm_hw_pointer(hdspm) * 4);
-
-       snd_iprintf(buffer,
-               "MIDI FIFO: Out1=0x%x, Out2=0x%x, In1=0x%x, In2=0x%x \n",
-               hdspm_read(hdspm, HDSPM_midiStatusOut0) & 0xFF,
-               hdspm_read(hdspm, HDSPM_midiStatusOut1) & 0xFF,
-               hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xFF,
-               hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xFF);
-       snd_iprintf(buffer,
-               "MIDIoverMADI FIFO: In=0x%x, Out=0x%x \n",
-               hdspm_read(hdspm, HDSPM_midiStatusIn2) & 0xFF,
-               hdspm_read(hdspm, HDSPM_midiStatusOut2) & 0xFF);
-       snd_iprintf(buffer,
-               "Register: ctrl1=0x%x, ctrl2=0x%x, status1=0x%x, "
-               "status2=0x%x\n",
-               hdspm->control_register, hdspm->control2_register,
-               status, status2);
        if (status & HDSPM_tco_detect) {
                snd_iprintf(buffer, "TCO module detected.\n");
                a = hdspm_read(hdspm, HDSPM_RD_TCO+4);
@@ -4665,6 +4875,75 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
        } else {
                snd_iprintf(buffer, "No TCO module detected.\n");
        }
+}
+
+static void
+snd_hdspm_proc_read_madi(struct snd_info_entry *entry,
+                        struct snd_info_buffer *buffer)
+{
+       struct hdspm *hdspm = entry->private_data;
+       unsigned int status, status2, control, freq;
+
+       char *pref_sync_ref;
+       char *autosync_ref;
+       char *system_clock_mode;
+       char *insel;
+       int x, x2;
+
+       status = hdspm_read(hdspm, HDSPM_statusRegister);
+       status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+       control = hdspm->control_register;
+       freq = hdspm_read(hdspm, HDSPM_timecodeRegister);
+
+       snd_iprintf(buffer, "%s (Card #%d) Rev.%x Status2first3bits: %x\n",
+                       hdspm->card_name, hdspm->card->number + 1,
+                       hdspm->firmware_rev,
+                       (status2 & HDSPM_version0) |
+                       (status2 & HDSPM_version1) | (status2 &
+                               HDSPM_version2));
+
+       snd_iprintf(buffer, "HW Serial: 0x%06x%06x\n",
+                       (hdspm_read(hdspm, HDSPM_midiStatusIn1)>>8) & 0xFFFFFF,
+                       hdspm->serial);
+
+       snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
+                       hdspm->irq, hdspm->port, (unsigned long)hdspm->iobase);
+
+       snd_iprintf(buffer, "--- System ---\n");
+
+       snd_iprintf(buffer,
+               "IRQ Pending: Audio=%d, MIDI0=%d, MIDI1=%d, IRQcount=%d\n",
+               status & HDSPM_audioIRQPending,
+               (status & HDSPM_midi0IRQPending) ? 1 : 0,
+               (status & HDSPM_midi1IRQPending) ? 1 : 0,
+               hdspm->irq_count);
+       snd_iprintf(buffer,
+               "HW pointer: id = %d, rawptr = %d (%d->%d) "
+               "estimated= %ld (bytes)\n",
+               ((status & HDSPM_BufferID) ? 1 : 0),
+               (status & HDSPM_BufferPositionMask),
+               (status & HDSPM_BufferPositionMask) %
+               (2 * (int)hdspm->period_bytes),
+               ((status & HDSPM_BufferPositionMask) - 64) %
+               (2 * (int)hdspm->period_bytes),
+               (long) hdspm_hw_pointer(hdspm) * 4);
+
+       snd_iprintf(buffer,
+               "MIDI FIFO: Out1=0x%x, Out2=0x%x, In1=0x%x, In2=0x%x \n",
+               hdspm_read(hdspm, HDSPM_midiStatusOut0) & 0xFF,
+               hdspm_read(hdspm, HDSPM_midiStatusOut1) & 0xFF,
+               hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xFF,
+               hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xFF);
+       snd_iprintf(buffer,
+               "MIDIoverMADI FIFO: In=0x%x, Out=0x%x \n",
+               hdspm_read(hdspm, HDSPM_midiStatusIn2) & 0xFF,
+               hdspm_read(hdspm, HDSPM_midiStatusOut2) & 0xFF);
+       snd_iprintf(buffer,
+               "Register: ctrl1=0x%x, ctrl2=0x%x, status1=0x%x, "
+               "status2=0x%x\n",
+               hdspm->control_register, hdspm->control2_register,
+               status, status2);
+
 
        snd_iprintf(buffer, "--- Settings ---\n");
 
@@ -4768,6 +5047,9 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
                (status & HDSPM_RX_64ch) ? "64 channels" :
                "56 channels");
 
+       /* call readout function for TCO specific status */
+       snd_hdspm_proc_read_tco(entry, buffer);
+
        snd_iprintf(buffer, "\n");
 }
 
@@ -4909,11 +5191,18 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
                autosync_ref = "AES7"; break;
        case HDSPM_AES32_AUTOSYNC_FROM_AES8:
                autosync_ref = "AES8"; break;
+       case HDSPM_AES32_AUTOSYNC_FROM_TCO:
+               autosync_ref = "TCO"; break;
+       case HDSPM_AES32_AUTOSYNC_FROM_SYNC_IN:
+               autosync_ref = "Sync In"; break;
        default:
                autosync_ref = "---"; break;
        }
        snd_iprintf(buffer, "AutoSync ref = %s\n", autosync_ref);
 
+       /* call readout function for TCO specific status */
+       snd_hdspm_proc_read_tco(entry, buffer);
+
        snd_iprintf(buffer, "\n");
 }
 
@@ -5097,7 +5386,7 @@ static int snd_hdspm_set_defaults(struct hdspm * hdspm)
 
        case AES32:
                hdspm->control_register =
-                       HDSPM_ClockModeMaster | /* Master Cloack Mode on */
+                       HDSPM_ClockModeMaster | /* Master Clock Mode on */
                        hdspm_encode_latency(7) | /* latency max=8192samples */
                        HDSPM_SyncRef0 |        /* AES1 is syncclock */
                        HDSPM_LineOut | /* Analog output in */
@@ -5123,9 +5412,8 @@ static int snd_hdspm_set_defaults(struct hdspm * hdspm)
 
        all_in_all_mixer(hdspm, 0 * UNITY_GAIN);
 
-       if (hdspm->io_type == AIO || hdspm->io_type == RayDAT) {
+       if (hdspm_is_raydat_or_aio(hdspm))
                hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
-       }
 
        /* set a default rate so that the channel map is set up. */
        hdspm_set_rate(hdspm, 48000, 1);
@@ -5371,6 +5659,16 @@ static int snd_hdspm_hw_params(struct snd_pcm_substream *substream,
           */
 
 
+       /*  For AES cards, the float format bit is the same as the
+        *  preferred sync reference. Since we don't want to break
+        *  sync settings, we have to skip the remaining part of this
+        *  function.
+        */
+       if (hdspm->io_type == AES32) {
+               return 0;
+       }
+
+
        /* Switch to native float format if requested */
        if (SNDRV_PCM_FORMAT_FLOAT_LE == params_format(params)) {
                if (!(hdspm->control_register & HDSPe_FLOAT_FORMAT))
@@ -6013,7 +6311,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
                                ltc.format = fps_2997;
                                break;
                        default:
-                               ltc.format = 30;
+                               ltc.format = fps_30;
                                break;
                        }
                        if (i & HDSPM_TCO1_set_drop_frame_flag) {
@@ -6479,10 +6777,6 @@ static int snd_hdspm_create(struct snd_card *card,
                break;
 
        case AIO:
-               if (0 == (hdspm_read(hdspm, HDSPM_statusRegister2) & HDSPM_s2_AEBI_D)) {
-                       snd_printk(KERN_INFO "HDSPM: AEB input board found, but not supported\n");
-               }
-
                hdspm->ss_in_channels = AIO_IN_SS_CHANNELS;
                hdspm->ds_in_channels = AIO_IN_DS_CHANNELS;
                hdspm->qs_in_channels = AIO_IN_QS_CHANNELS;
@@ -6490,6 +6784,20 @@ static int snd_hdspm_create(struct snd_card *card,
                hdspm->ds_out_channels = AIO_OUT_DS_CHANNELS;
                hdspm->qs_out_channels = AIO_OUT_QS_CHANNELS;
 
+               if (0 == (hdspm_read(hdspm, HDSPM_statusRegister2) & HDSPM_s2_AEBI_D)) {
+                       snd_printk(KERN_INFO "HDSPM: AEB input board found\n");
+                       hdspm->ss_in_channels += 4;
+                       hdspm->ds_in_channels += 4;
+                       hdspm->qs_in_channels += 4;
+               }
+
+               if (0 == (hdspm_read(hdspm, HDSPM_statusRegister2) & HDSPM_s2_AEBO_D)) {
+                       snd_printk(KERN_INFO "HDSPM: AEB output board found\n");
+                       hdspm->ss_out_channels += 4;
+                       hdspm->ds_out_channels += 4;
+                       hdspm->qs_out_channels += 4;
+               }
+
                hdspm->channel_map_out_ss = channel_map_aio_out_ss;
                hdspm->channel_map_out_ds = channel_map_aio_out_ds;
                hdspm->channel_map_out_qs = channel_map_aio_out_qs;
@@ -6558,6 +6866,7 @@ static int snd_hdspm_create(struct snd_card *card,
                break;
 
        case MADI:
+       case AES32:
                if (hdspm_read(hdspm, HDSPM_statusRegister) & HDSPM_tco_detect) {
                        hdspm->midiPorts++;
                        hdspm->tco = kzalloc(sizeof(struct hdspm_tco),
@@ -6565,7 +6874,7 @@ static int snd_hdspm_create(struct snd_card *card,
                        if (NULL != hdspm->tco) {
                                hdspm_tco_write(hdspm);
                        }
-                       snd_printk(KERN_INFO "HDSPM: MADI TCO module found\n");
+                       snd_printk(KERN_INFO "HDSPM: MADI/AES TCO module found\n");
                } else {
                        hdspm->tco = NULL;
                }
@@ -6580,10 +6889,12 @@ static int snd_hdspm_create(struct snd_card *card,
        case AES32:
                if (hdspm->tco) {
                        hdspm->texts_autosync = texts_autosync_aes_tco;
-                       hdspm->texts_autosync_items = 10;
+                       hdspm->texts_autosync_items =
+                               ARRAY_SIZE(texts_autosync_aes_tco);
                } else {
                        hdspm->texts_autosync = texts_autosync_aes;
-                       hdspm->texts_autosync_items = 9;
+                       hdspm->texts_autosync_items =
+                               ARRAY_SIZE(texts_autosync_aes);
                }
                break;
 
index 45eeaa9f7fec49f035cf8faa54fdfdb5a8ea9df5..5138b8493051fd54c674fe26ea87d9d5fd1411c6 100644 (file)
@@ -26,12 +26,9 @@ if SND_SOC
 config SND_SOC_AC97_BUS
        bool
 
-config SND_SOC_DMAENGINE_PCM
-       bool
-
 config SND_SOC_GENERIC_DMAENGINE_PCM
        bool
-       select SND_SOC_DMAENGINE_PCM
+       select SND_DMAENGINE_PCM
 
 # All the supported SoCs
 source "sound/soc/atmel/Kconfig"
index bc0261476d7afc78767856683398ec1bd7bc463e..61a64d281905e4b8997bc2ae3eb42ce387de9e7c 100644 (file)
@@ -1,10 +1,6 @@
 snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
 snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o
 
-ifneq ($(CONFIG_SND_SOC_DMAENGINE_PCM),)
-snd-soc-core-objs += soc-dmaengine-pcm.o
-endif
-
 ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
 snd-soc-core-objs += soc-generic-dmaengine-pcm.o
 endif
index 3fdd87fa18a970db151fdadcc578ca61c87d3241..e48d38a1b95c6d6b13d3eef9bb09efed5d970964 100644 (file)
@@ -13,6 +13,7 @@ config SND_ATMEL_SOC_PDC
 config SND_ATMEL_SOC_DMA
        tristate
        depends on SND_ATMEL_SOC
+       select SND_SOC_GENERIC_DMAENGINE_PCM
 
 config SND_ATMEL_SOC_SSC
        tristate
@@ -32,6 +33,26 @@ config SND_AT91_SOC_SAM9G20_WM8731
          Say Y if you want to add support for SoC audio on WM8731-based
          AT91sam9g20 evaluation board.
 
+config SND_ATMEL_SOC_WM8904
+       tristate "Atmel ASoC driver for boards using WM8904 codec"
+       depends on ARCH_AT91 && ATMEL_SSC && SND_ATMEL_SOC
+       select SND_ATMEL_SOC_SSC
+       select SND_ATMEL_SOC_DMA
+       select SND_SOC_WM8904
+       help
+         Say Y if you want to add support for Atmel ASoC driver for boards using
+         WM8904 codec.
+
+config SND_AT91_SOC_SAM9X5_WM8731
+       tristate "SoC Audio support for WM8731-based at91sam9x5 board"
+       depends on ATMEL_SSC && SND_ATMEL_SOC && SOC_AT91SAM9X5
+       select SND_ATMEL_SOC_SSC
+       select SND_ATMEL_SOC_DMA
+       select SND_SOC_WM8731
+       help
+         Say Y if you want to add support for audio SoC on an
+         at91sam9x5 based board that is using WM8731 codec.
+
 config SND_AT91_SOC_AFEB9260
        tristate "SoC Audio support for AFEB9260 board"
        depends on ARCH_AT91 && ATMEL_SSC && ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
index 41967ccb6f41e3968753d4922ad03f2439ad81b9..5baabc8bde3abfa4dfcb833c1c3fbcb858bf7ced 100644 (file)
@@ -11,6 +11,10 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
 
 # AT91 Machine Support
 snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
+snd-atmel-soc-wm8904-objs := atmel_wm8904.o
+snd-soc-sam9x5-wm8731-objs := sam9x5_wm8731.o
 
 obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
+obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o
+obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o
 obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
index d12826526798fc47620e4cb613df9e95da8a8f7c..06082e5e5dcb7d726e2bdba6e5f2d7f816dcf262 100644 (file)
@@ -91,138 +91,52 @@ static void atmel_pcm_dma_irq(u32 ssc_sr,
        }
 }
 
-/*--------------------------------------------------------------------------*\
- * DMAENGINE operations
-\*--------------------------------------------------------------------------*/
-static bool filter(struct dma_chan *chan, void *slave)
-{
-       struct at_dma_slave *sl = slave;
-
-       if (sl->dma_dev == chan->device->dev) {
-               chan->private = sl;
-               return true;
-       } else {
-               return false;
-       }
-}
-
 static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
-       struct snd_pcm_hw_params *params, struct atmel_pcm_dma_params *prtd)
+       struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
 {
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct atmel_pcm_dma_params *prtd;
        struct ssc_device *ssc;
-       struct dma_chan *dma_chan;
-       struct dma_slave_config slave_config;
        int ret;
 
+       prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
        ssc = prtd->ssc;
 
-       ret = snd_hwparams_to_dma_slave_config(substream, params,
-                       &slave_config);
+       ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
        if (ret) {
                pr_err("atmel-pcm: hwparams to dma slave configure failed\n");
                return ret;
        }
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               slave_config.dst_addr = (dma_addr_t)ssc->phybase + SSC_THR;
-               slave_config.dst_maxburst = 1;
+               slave_config->dst_addr = ssc->phybase + SSC_THR;
+               slave_config->dst_maxburst = 1;
        } else {
-               slave_config.src_addr = (dma_addr_t)ssc->phybase + SSC_RHR;
-               slave_config.src_maxburst = 1;
-       }
-
-       dma_chan = snd_dmaengine_pcm_get_chan(substream);
-       if (dmaengine_slave_config(dma_chan, &slave_config)) {
-               pr_err("atmel-pcm: failed to configure dma channel\n");
-               ret = -EBUSY;
-               return ret;
-       }
-
-       return 0;
-}
-
-static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
-       struct snd_pcm_hw_params *params)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct atmel_pcm_dma_params *prtd;
-       struct ssc_device *ssc;
-       struct at_dma_slave *sdata = NULL;
-       int ret;
-
-       snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
-
-       prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-       ssc = prtd->ssc;
-       if (ssc->pdev)
-               sdata = ssc->pdev->dev.platform_data;
-
-       ret = snd_dmaengine_pcm_open_request_chan(substream, filter, sdata);
-       if (ret) {
-               pr_err("atmel-pcm: dmaengine pcm open failed\n");
-               return -EINVAL;
-       }
-
-       ret = atmel_pcm_configure_dma(substream, params, prtd);
-       if (ret) {
-               pr_err("atmel-pcm: failed to configure dmai\n");
-               goto err;
+               slave_config->src_addr = ssc->phybase + SSC_RHR;
+               slave_config->src_maxburst = 1;
        }
 
        prtd->dma_intr_handler = atmel_pcm_dma_irq;
 
        return 0;
-err:
-       snd_dmaengine_pcm_close_release_chan(substream);
-       return ret;
 }
 
-static int atmel_pcm_dma_prepare(struct snd_pcm_substream *substream)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct atmel_pcm_dma_params *prtd;
-
-       prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
-       ssc_writex(prtd->ssc->regs, SSC_IER, prtd->mask->ssc_error);
-       ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_enable);
-
-       return 0;
-}
-
-static int atmel_pcm_open(struct snd_pcm_substream *substream)
-{
-       snd_soc_set_runtime_hwparams(substream, &atmel_pcm_dma_hardware);
-
-       return 0;
-}
-
-static struct snd_pcm_ops atmel_pcm_ops = {
-       .open           = atmel_pcm_open,
-       .close          = snd_dmaengine_pcm_close_release_chan,
-       .ioctl          = snd_pcm_lib_ioctl,
-       .hw_params      = atmel_pcm_hw_params,
-       .prepare        = atmel_pcm_dma_prepare,
-       .trigger        = snd_dmaengine_pcm_trigger,
-       .pointer        = snd_dmaengine_pcm_pointer_no_residue,
-       .mmap           = atmel_pcm_mmap,
-};
-
-static struct snd_soc_platform_driver atmel_soc_platform = {
-       .ops            = &atmel_pcm_ops,
-       .pcm_new        = atmel_pcm_new,
-       .pcm_free       = atmel_pcm_free,
+static const struct snd_dmaengine_pcm_config atmel_dmaengine_pcm_config = {
+       .prepare_slave_config = atmel_pcm_configure_dma,
+       .pcm_hardware = &atmel_pcm_dma_hardware,
+       .prealloc_buffer_size = ATMEL_SSC_DMABUF_SIZE,
 };
 
 int atmel_pcm_dma_platform_register(struct device *dev)
 {
-       return snd_soc_register_platform(dev, &atmel_soc_platform);
+       return snd_dmaengine_pcm_register(dev, &atmel_dmaengine_pcm_config,
+                       SND_DMAENGINE_PCM_FLAG_NO_RESIDUE);
 }
 EXPORT_SYMBOL(atmel_pcm_dma_platform_register);
 
 void atmel_pcm_dma_platform_unregister(struct device *dev)
 {
-       snd_soc_unregister_platform(dev);
+       snd_dmaengine_pcm_unregister(dev);
 }
 EXPORT_SYMBOL(atmel_pcm_dma_platform_unregister);
 
index f3fdfa07fcb9fafa76d913b64be070b54d31486e..0ecf356027f6c1fcc91693e4c7063152fe3ac706 100644 (file)
@@ -73,6 +73,7 @@ static struct atmel_ssc_mask ssc_tx_mask = {
        .ssc_disable    = SSC_BIT(CR_TXDIS),
        .ssc_endx       = SSC_BIT(SR_ENDTX),
        .ssc_endbuf     = SSC_BIT(SR_TXBUFE),
+       .ssc_error      = SSC_BIT(SR_OVRUN),
        .pdc_enable     = ATMEL_PDC_TXTEN,
        .pdc_disable    = ATMEL_PDC_TXTDIS,
 };
@@ -82,6 +83,7 @@ static struct atmel_ssc_mask ssc_rx_mask = {
        .ssc_disable    = SSC_BIT(CR_RXDIS),
        .ssc_endx       = SSC_BIT(SR_ENDRX),
        .ssc_endbuf     = SSC_BIT(SR_RXBUFF),
+       .ssc_error      = SSC_BIT(SR_OVRUN),
        .pdc_enable     = ATMEL_PDC_RXTEN,
        .pdc_disable    = ATMEL_PDC_RXTDIS,
 };
@@ -196,15 +198,27 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
                             struct snd_soc_dai *dai)
 {
        struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
-       int dir_mask;
+       struct atmel_pcm_dma_params *dma_params;
+       int dir, dir_mask;
 
        pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
                ssc_readl(ssc_p->ssc->regs, SR));
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               dir = 0;
                dir_mask = SSC_DIR_MASK_PLAYBACK;
-       else
+       } else {
+               dir = 1;
                dir_mask = SSC_DIR_MASK_CAPTURE;
+       }
+
+       dma_params = &ssc_dma_params[dai->id][dir];
+       dma_params->ssc = ssc_p->ssc;
+       dma_params->substream = substream;
+
+       ssc_p->dma_params[dir] = dma_params;
+
+       snd_soc_dai_set_dma_data(dai, substream, dma_params);
 
        spin_lock_irq(&ssc_p->lock);
        if (ssc_p->dir_mask & dir_mask) {
@@ -325,7 +339,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params,
        struct snd_soc_dai *dai)
 {
-       struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
        int id = dai->id;
        struct atmel_ssc_info *ssc_p = &ssc_info[id];
        struct atmel_pcm_dma_params *dma_params;
@@ -344,19 +357,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
        else
                dir = 1;
 
-       dma_params = &ssc_dma_params[id][dir];
-       dma_params->ssc = ssc_p->ssc;
-       dma_params->substream = substream;
-
-       ssc_p->dma_params[dir] = dma_params;
-
-       /*
-        * The snd_soc_pcm_stream->dma_data field is only used to communicate
-        * the appropriate DMA parameters to the pcm driver hw_params()
-        * function.  It should not be used for other purposes
-        * as it is common to all substreams.
-        */
-       snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_params);
+       dma_params = ssc_p->dma_params[dir];
 
        channels = params_channels(params);
 
@@ -648,6 +649,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
        dma_params = ssc_p->dma_params[dir];
 
        ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
+       ssc_writel(ssc_p->ssc->regs, IER, dma_params->mask->ssc_error);
 
        pr_debug("%s enabled SSC_SR=0x%08x\n",
                        dir ? "receive" : "transmit",
diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c
new file mode 100644 (file)
index 0000000..7222380
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * atmel_wm8904 - Atmel ASoC driver for boards with WM8904 codec.
+ *
+ * Copyright (C) 2012 Atmel
+ *
+ * Author: Bo Shen <voice.shen@atmel.com>
+ *
+ * GPLv2 or later
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <sound/soc.h>
+
+#include "../codecs/wm8904.h"
+#include "atmel_ssc_dai.h"
+
+#define MCLK_RATE 32768
+
+static struct clk *mclk;
+
+static const struct snd_soc_dapm_widget atmel_asoc_wm8904_dapm_widgets[] = {
+       SND_SOC_DAPM_HP("Headphone Jack", NULL),
+       SND_SOC_DAPM_MIC("Mic", NULL),
+       SND_SOC_DAPM_LINE("Line In Jack", NULL),
+};
+
+static int atmel_asoc_wm8904_hw_params(struct snd_pcm_substream *substream,
+               struct snd_pcm_hw_params *params)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *codec_dai = rtd->codec_dai;
+       int ret;
+
+       ret = snd_soc_dai_set_pll(codec_dai, WM8904_FLL_MCLK, WM8904_FLL_MCLK,
+               32768, params_rate(params) * 256);
+       if (ret < 0) {
+               pr_err("%s - failed to set wm8904 codec PLL.", __func__);
+               return ret;
+       }
+
+       /*
+        * As here wm8904 use FLL output as its system clock
+        * so calling set_sysclk won't care freq parameter
+        * then we pass 0
+        */
+       ret = snd_soc_dai_set_sysclk(codec_dai, WM8904_CLK_FLL,
+                       0, SND_SOC_CLOCK_IN);
+       if (ret < 0) {
+               pr_err("%s -failed to set wm8904 SYSCLK\n", __func__);
+               return ret;
+       }
+
+       return 0;
+}
+
+static struct snd_soc_ops atmel_asoc_wm8904_ops = {
+       .hw_params = atmel_asoc_wm8904_hw_params,
+};
+
+static int atmel_set_bias_level(struct snd_soc_card *card,
+               struct snd_soc_dapm_context *dapm,
+               enum snd_soc_bias_level level)
+{
+       if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
+               switch (level) {
+               case SND_SOC_BIAS_PREPARE:
+                       clk_prepare_enable(mclk);
+                       break;
+               case SND_SOC_BIAS_OFF:
+                       clk_disable_unprepare(mclk);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+};
+
+static struct snd_soc_dai_link atmel_asoc_wm8904_dailink = {
+       .name = "WM8904",
+       .stream_name = "WM8904 PCM",
+       .codec_dai_name = "wm8904-hifi",
+       .dai_fmt = SND_SOC_DAIFMT_I2S
+               | SND_SOC_DAIFMT_NB_NF
+               | SND_SOC_DAIFMT_CBM_CFM,
+       .ops = &atmel_asoc_wm8904_ops,
+};
+
+static struct snd_soc_card atmel_asoc_wm8904_card = {
+       .name = "atmel_asoc_wm8904",
+       .owner = THIS_MODULE,
+       .set_bias_level = atmel_set_bias_level,
+       .dai_link = &atmel_asoc_wm8904_dailink,
+       .num_links = 1,
+       .dapm_widgets = atmel_asoc_wm8904_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(atmel_asoc_wm8904_dapm_widgets),
+       .fully_routed = true,
+};
+
+static int atmel_asoc_wm8904_dt_init(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *codec_np, *cpu_np;
+       struct snd_soc_card *card = &atmel_asoc_wm8904_card;
+       struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
+       int ret;
+
+       if (!np) {
+               dev_err(&pdev->dev, "only device tree supported\n");
+               return -EINVAL;
+       }
+
+       ret = snd_soc_of_parse_card_name(card, "atmel,model");
+       if (ret) {
+               dev_err(&pdev->dev, "failed to parse card name\n");
+               return ret;
+       }
+
+       ret = snd_soc_of_parse_audio_routing(card, "atmel,audio-routing");
+       if (ret) {
+               dev_err(&pdev->dev, "failed to parse audio routing\n");
+               return ret;
+       }
+
+       cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
+       if (!cpu_np) {
+               dev_err(&pdev->dev, "failed to get dai and pcm info\n");
+               ret = -EINVAL;
+               return ret;
+       }
+       dailink->cpu_of_node = cpu_np;
+       dailink->platform_of_node = cpu_np;
+       of_node_put(cpu_np);
+
+       codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
+       if (!codec_np) {
+               dev_err(&pdev->dev, "failed to get codec info\n");
+               ret = -EINVAL;
+               return ret;
+       }
+       dailink->codec_of_node = codec_np;
+       of_node_put(codec_np);
+
+       return 0;
+}
+
+static int atmel_asoc_wm8904_probe(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = &atmel_asoc_wm8904_card;
+       struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
+       struct clk *clk_src;
+       struct pinctrl *pinctrl;
+       int id, ret;
+
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl)) {
+               dev_err(&pdev->dev, "failed to request pinctrl\n");
+               return PTR_ERR(pinctrl);
+       }
+
+       card->dev = &pdev->dev;
+       ret = atmel_asoc_wm8904_dt_init(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to init dt info\n");
+               return ret;
+       }
+
+       id = of_alias_get_id((struct device_node *)dailink->cpu_of_node, "ssc");
+       ret = atmel_ssc_set_audio(id);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "failed to set SSC %d for audio\n", id);
+               return ret;
+       }
+
+       mclk = clk_get(NULL, "pck0");
+       if (IS_ERR(mclk)) {
+               dev_err(&pdev->dev, "failed to get pck0\n");
+               ret = PTR_ERR(mclk);
+               goto err_set_audio;
+       }
+
+       clk_src = clk_get(NULL, "clk32k");
+       if (IS_ERR(clk_src)) {
+               dev_err(&pdev->dev, "failed to get clk32k\n");
+               ret = PTR_ERR(clk_src);
+               goto err_set_audio;
+       }
+
+       ret = clk_set_parent(mclk, clk_src);
+       clk_put(clk_src);
+       if (ret != 0) {
+               dev_err(&pdev->dev, "failed to set MCLK parent\n");
+               goto err_set_audio;
+       }
+
+       dev_info(&pdev->dev, "setting pck0 to %dHz\n", MCLK_RATE);
+       clk_set_rate(mclk, MCLK_RATE);
+
+       ret = snd_soc_register_card(card);
+       if (ret) {
+               dev_err(&pdev->dev, "snd_soc_register_card failed\n");
+               goto err_set_audio;
+       }
+
+       return 0;
+
+err_set_audio:
+       atmel_ssc_put_audio(id);
+       return ret;
+}
+
+static int atmel_asoc_wm8904_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
+       int id;
+
+       id = of_alias_get_id((struct device_node *)dailink->cpu_of_node, "ssc");
+
+       snd_soc_unregister_card(card);
+       atmel_ssc_put_audio(id);
+
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_asoc_wm8904_dt_ids[] = {
+       { .compatible = "atmel,asoc-wm8904", },
+       { }
+};
+#endif
+
+static struct platform_driver atmel_asoc_wm8904_driver = {
+       .driver = {
+               .name = "atmel-wm8904-audio",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(atmel_asoc_wm8904_dt_ids),
+       },
+       .probe = atmel_asoc_wm8904_probe,
+       .remove = atmel_asoc_wm8904_remove,
+};
+
+module_platform_driver(atmel_asoc_wm8904_driver);
+
+/* Module information */
+MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>");
+MODULE_DESCRIPTION("ALSA SoC machine driver for Atmel EK with WM8904 codec");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
new file mode 100644 (file)
index 0000000..992ae38
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * sam9x5_wm8731   --  SoC audio for AT91SAM9X5-based boards
+ *                     that are using WM8731 as codec.
+ *
+ *  Copyright (C) 2011 Atmel,
+ *               Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ *  Copyright (C) 2013 Paratronic,
+ *               Richard Genoud <richard.genoud@gmail.com>
+ *
+ * Based on sam9g20_wm8731.c by:
+ * Sedji Gaouaou <sedji.gaouaou@atmel.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+#include <linux/of.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/soc-dapm.h>
+
+#include "../codecs/wm8731.h"
+#include "atmel_ssc_dai.h"
+
+
+#define MCLK_RATE 12288000
+
+#define DRV_NAME "sam9x5-snd-wm8731"
+
+struct sam9x5_drvdata {
+       int ssc_id;
+};
+
+/*
+ * Logic for a wm8731 as connected on a at91sam9x5ek based board.
+ */
+static int sam9x5_wm8731_init(struct snd_soc_pcm_runtime *rtd)
+{
+       struct snd_soc_dai *codec_dai = rtd->codec_dai;
+       struct device *dev = rtd->dev;
+       int ret;
+
+       dev_dbg(dev, "ASoC: %s called\n", __func__);
+
+       /* set the codec system clock for DAC and ADC */
+       ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
+                                    MCLK_RATE, SND_SOC_CLOCK_IN);
+       if (ret < 0) {
+               dev_err(dev, "ASoC: Failed to set WM8731 SYSCLK: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * Audio paths on at91sam9x5ek board:
+ *
+ *  |A| ------------> |      | ---R----> Headphone Jack
+ *  |T| <----\        |  WM  | ---L--/
+ *  |9| ---> CLK <--> | 8731 | <--R----- Line In Jack
+ *  |1| <------------ |      | <--L--/
+ */
+static const struct snd_soc_dapm_widget sam9x5_dapm_widgets[] = {
+       SND_SOC_DAPM_HP("Headphone Jack", NULL),
+       SND_SOC_DAPM_LINE("Line In Jack", NULL),
+};
+
+static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *codec_np, *cpu_np;
+       struct snd_soc_card *card;
+       struct snd_soc_dai_link *dai;
+       struct sam9x5_drvdata *priv;
+       int ret;
+
+       if (!np) {
+               dev_err(&pdev->dev, "No device node supplied\n");
+               return -EINVAL;
+       }
+
+       card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       dai = devm_kzalloc(&pdev->dev, sizeof(*dai), GFP_KERNEL);
+       if (!dai || !card || !priv) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       card->dev = &pdev->dev;
+       card->owner = THIS_MODULE;
+       card->dai_link = dai;
+       card->num_links = 1;
+       card->dapm_widgets = sam9x5_dapm_widgets;
+       card->num_dapm_widgets = ARRAY_SIZE(sam9x5_dapm_widgets);
+       dai->name = "WM8731";
+       dai->stream_name = "WM8731 PCM";
+       dai->codec_dai_name = "wm8731-hifi";
+       dai->init = sam9x5_wm8731_init;
+       dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+               | SND_SOC_DAIFMT_CBM_CFM;
+
+       ret = snd_soc_of_parse_card_name(card, "atmel,model");
+       if (ret) {
+               dev_err(&pdev->dev, "atmel,model node missing\n");
+               goto out;
+       }
+
+       ret = snd_soc_of_parse_audio_routing(card, "atmel,audio-routing");
+       if (ret) {
+               dev_err(&pdev->dev, "atmel,audio-routing node missing\n");
+               goto out;
+       }
+
+       codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
+       if (!codec_np) {
+               dev_err(&pdev->dev, "atmel,audio-codec node missing\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       dai->codec_of_node = codec_np;
+
+       cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
+       if (!cpu_np) {
+               dev_err(&pdev->dev, "atmel,ssc-controller node missing\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       dai->cpu_of_node = cpu_np;
+       dai->platform_of_node = cpu_np;
+
+       priv->ssc_id = of_alias_get_id(cpu_np, "ssc");
+
+       ret = atmel_ssc_set_audio(priv->ssc_id);
+       if (ret != 0) {
+               dev_err(&pdev->dev,
+                       "ASoC: Failed to set SSC %d for audio: %d\n",
+                       ret, priv->ssc_id);
+               goto out;
+       }
+
+       of_node_put(codec_np);
+       of_node_put(cpu_np);
+
+       platform_set_drvdata(pdev, card);
+
+       ret = snd_soc_register_card(card);
+       if (ret) {
+               dev_err(&pdev->dev,
+                       "ASoC: Platform device allocation failed\n");
+               goto out_put_audio;
+       }
+
+       dev_dbg(&pdev->dev, "ASoC: %s ok\n", __func__);
+
+       return ret;
+
+out_put_audio:
+       atmel_ssc_put_audio(priv->ssc_id);
+out:
+       return ret;
+}
+
+static int sam9x5_wm8731_driver_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct sam9x5_drvdata *priv = card->drvdata;
+
+       snd_soc_unregister_card(card);
+       atmel_ssc_put_audio(priv->ssc_id);
+
+       return 0;
+}
+
+static const struct of_device_id sam9x5_wm8731_of_match[] = {
+       { .compatible = "atmel,sam9x5-wm8731-audio", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, sam9x5_wm8731_of_match);
+
+static struct platform_driver sam9x5_wm8731_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(sam9x5_wm8731_of_match),
+       },
+       .probe = sam9x5_wm8731_driver_probe,
+       .remove = sam9x5_wm8731_driver_remove,
+};
+module_platform_driver(sam9x5_wm8731_driver);
+
+/* Module information */
+MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
+MODULE_AUTHOR("Richard Genoud <richard.genoud@gmail.com>");
+MODULE_DESCRIPTION("ALSA SoC machine driver for AT91SAM9x5 - WM8731");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
index a497a0cfeba153aa70630b0dfadc7e69bf2d0e7c..decba87a074c6267eb1bd93265fc8fc96787db7a 100644 (file)
@@ -73,12 +73,14 @@ static struct snd_soc_dai_link db1300_ac97_dai = {
 
 static struct snd_soc_card db1300_ac97_machine = {
        .name           = "DB1300_AC97",
+       .owner          = THIS_MODULE,
        .dai_link       = &db1300_ac97_dai,
        .num_links      = 1,
 };
 
 static struct snd_soc_card db1550_ac97_machine = {
        .name           = "DB1550_AC97",
+       .owner          = THIS_MODULE,
        .dai_link       = &db1200_ac97_dai,
        .num_links      = 1,
 };
@@ -145,6 +147,7 @@ static struct snd_soc_dai_link db1300_i2s_dai = {
 
 static struct snd_soc_card db1300_i2s_machine = {
        .name           = "DB1300_I2S",
+       .owner          = THIS_MODULE,
        .dai_link       = &db1300_i2s_dai,
        .num_links      = 1,
 };
@@ -161,6 +164,7 @@ static struct snd_soc_dai_link db1550_i2s_dai = {
 
 static struct snd_soc_card db1550_i2s_machine = {
        .name           = "DB1550_I2S",
+       .owner          = THIS_MODULE,
        .dai_link       = &db1550_i2s_dai,
        .num_links      = 1,
 };
index a822ab822bb7ba40f5f1ead77a04a7caa7069233..986dcec79fa02e1552fb631456878cb9dbe5f940 100644 (file)
@@ -379,9 +379,6 @@ static int au1xpsc_ac97_drvprobe(struct platform_device *pdev)
        mutex_init(&wd->lock);
 
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!iores)
-               return -ENODEV;
-
        wd->mmio = devm_ioremap_resource(&pdev->dev, iores);
        if (IS_ERR(wd->mmio))
                return PTR_ERR(wd->mmio);
index 0c3e22d90a8d903fbf1495936c5a97a79ab8a77e..a680fdc9bb42012f72366a4a09f7ece684a06ce1 100644 (file)
@@ -9,7 +9,6 @@
 #ifndef _BF5XX_AC97_H
 #define _BF5XX_AC97_H
 
-extern struct snd_ac97 *ac97;
 /* Frame format in memory, only support stereo currently */
 struct ac97_frame {
        u16 ac97_tag;           /* slot 0 */
index 04491f0e8d1bde2636f8772e3b7d3bb65284d44a..efa75b5086a4b00e83802d476c87aae0a6914d0e 100644 (file)
@@ -363,9 +363,6 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
-
        info->regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(info->regs))
                return PTR_ERR(info->regs);
index 17ad70bca9fe5828d4a8b3b5327dfc6b30bdb260..f23f331e9a974409adf876356a2f766cb81ac812 100644 (file)
@@ -376,9 +376,6 @@ static int ep93xx_i2s_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENODEV;
-
        info->regs = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(info->regs))
                return PTR_ERR(info->regs);
index badb6fbacaa66222bea9cef708c1379783746c7f..15106c045478e644719a73e76edb6ea795909ff1 100644 (file)
@@ -10,6 +10,7 @@ config SND_SOC_I2C_AND_SPI
 
 config SND_SOC_ALL_CODECS
        tristate "Build all ASoC CODEC drivers"
+       depends on COMPILE_TEST
        select SND_SOC_88PM860X if MFD_88PM860X
        select SND_SOC_L3
        select SND_SOC_AB8500_CODEC if ABX500_CORE
@@ -20,6 +21,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_AD73311
        select SND_SOC_ADAU1373 if I2C
        select SND_SOC_ADAV80X if SND_SOC_I2C_AND_SPI
+       select SND_SOC_ADAU1701 if I2C
        select SND_SOC_ADS117X
        select SND_SOC_AK4104 if SPI_MASTER
        select SND_SOC_AK4535 if I2C
@@ -54,6 +56,8 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_MC13783 if MFD_MC13XXX
        select SND_SOC_ML26124 if I2C
        select SND_SOC_HDMI_CODEC
+       select SND_SOC_PCM1681 if I2C
+       select SND_SOC_PCM1792A if SPI_MASTER
        select SND_SOC_PCM3008
        select SND_SOC_RT5631 if I2C
        select SND_SOC_RT5640 if I2C
@@ -122,6 +126,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_WM8994 if MFD_WM8994
        select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI
        select SND_SOC_WM8996 if I2C
+       select SND_SOC_WM8997 if MFD_WM8997
        select SND_SOC_WM9081 if I2C
        select SND_SOC_WM9090 if I2C
        select SND_SOC_WM9705 if SND_SOC_AC97_BUS
@@ -145,8 +150,10 @@ config SND_SOC_ARIZONA
        tristate
        default y if SND_SOC_WM5102=y
        default y if SND_SOC_WM5110=y
+       default y if SND_SOC_WM8997=y
        default m if SND_SOC_WM5102=m
        default m if SND_SOC_WM5110=m
+       default m if SND_SOC_WM8997=m
 
 config SND_SOC_WM_HUBS
        tristate
@@ -198,6 +205,9 @@ config SND_SOC_AK4104
 config SND_SOC_AK4535
        tristate
 
+config SND_SOC_AK4554
+       tristate
+
 config SND_SOC_AK4641
        tristate
 
@@ -292,6 +302,12 @@ config SND_SOC_MAX9850
 config SND_SOC_HDMI_CODEC
        tristate
 
+config SND_SOC_PCM1681
+       tristate
+
+config SND_SOC_PCM1792A
+       tristate
+
 config SND_SOC_PCM3008
        tristate
 
@@ -500,6 +516,9 @@ config SND_SOC_WM8995
 config SND_SOC_WM8996
        tristate
 
+config SND_SOC_WM8997
+       tristate
+
 config SND_SOC_WM9081
        tristate
 
index 70fd8066f546e9cfc39dcb024f1efa6e5ea6bf1f..bc126764a44d02cb38b50f3ae74900a9ce490da2 100644 (file)
@@ -11,6 +11,7 @@ snd-soc-adav80x-objs := adav80x.o
 snd-soc-ads117x-objs := ads117x.o
 snd-soc-ak4104-objs := ak4104.o
 snd-soc-ak4535-objs := ak4535.o
+snd-soc-ak4554-objs := ak4554.o
 snd-soc-ak4641-objs := ak4641.o
 snd-soc-ak4642-objs := ak4642.o
 snd-soc-ak4671-objs := ak4671.o
@@ -42,6 +43,8 @@ snd-soc-max9850-objs := max9850.o
 snd-soc-mc13783-objs := mc13783.o
 snd-soc-ml26124-objs := ml26124.o
 snd-soc-hdmi-codec-objs := hdmi.o
+snd-soc-pcm1681-objs := pcm1681.o
+snd-soc-pcm1792a-codec-objs := pcm1792a.o
 snd-soc-pcm3008-objs := pcm3008.o
 snd-soc-rt5631-objs := rt5631.o
 snd-soc-rt5640-objs := rt5640.o
@@ -114,6 +117,7 @@ snd-soc-wm8991-objs := wm8991.o
 snd-soc-wm8993-objs := wm8993.o
 snd-soc-wm8994-objs := wm8994.o wm8958-dsp2.o
 snd-soc-wm8995-objs := wm8995.o
+snd-soc-wm8997-objs := wm8997.o
 snd-soc-wm9081-objs := wm9081.o
 snd-soc-wm9090-objs := wm9090.o
 snd-soc-wm9705-objs := wm9705.o
@@ -138,6 +142,7 @@ obj-$(CONFIG_SND_SOC_ADAV80X)  += snd-soc-adav80x.o
 obj-$(CONFIG_SND_SOC_ADS117X)  += snd-soc-ads117x.o
 obj-$(CONFIG_SND_SOC_AK4104)   += snd-soc-ak4104.o
 obj-$(CONFIG_SND_SOC_AK4535)   += snd-soc-ak4535.o
+obj-$(CONFIG_SND_SOC_AK4554)   += snd-soc-ak4554.o
 obj-$(CONFIG_SND_SOC_AK4641)   += snd-soc-ak4641.o
 obj-$(CONFIG_SND_SOC_AK4642)   += snd-soc-ak4642.o
 obj-$(CONFIG_SND_SOC_AK4671)   += snd-soc-ak4671.o
@@ -171,6 +176,8 @@ obj-$(CONFIG_SND_SOC_MAX9850)       += snd-soc-max9850.o
 obj-$(CONFIG_SND_SOC_MC13783)  += snd-soc-mc13783.o
 obj-$(CONFIG_SND_SOC_ML26124)  += snd-soc-ml26124.o
 obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
+obj-$(CONFIG_SND_SOC_PCM1681)  += snd-soc-pcm1681.o
+obj-$(CONFIG_SND_SOC_PCM1792A) += snd-soc-pcm1792a-codec.o
 obj-$(CONFIG_SND_SOC_PCM3008)  += snd-soc-pcm3008.o
 obj-$(CONFIG_SND_SOC_RT5631)   += snd-soc-rt5631.o
 obj-$(CONFIG_SND_SOC_RT5640)   += snd-soc-rt5640.o
@@ -239,6 +246,7 @@ obj-$(CONFIG_SND_SOC_WM8991)        += snd-soc-wm8991.o
 obj-$(CONFIG_SND_SOC_WM8993)   += snd-soc-wm8993.o
 obj-$(CONFIG_SND_SOC_WM8994)   += snd-soc-wm8994.o
 obj-$(CONFIG_SND_SOC_WM8995)   += snd-soc-wm8995.o
+obj-$(CONFIG_SND_SOC_WM8997)   += snd-soc-wm8997.o
 obj-$(CONFIG_SND_SOC_WM9081)   += snd-soc-wm9081.o
 obj-$(CONFIG_SND_SOC_WM9090)   += snd-soc-wm9090.o
 obj-$(CONFIG_SND_SOC_WM9705)   += snd-soc-wm9705.o
index ec7351803c245b7ba209a582e0d82fc958effb24..8d9ba4ba4bfe0b2fece2dc5c30f955fd5d6dcc00 100644 (file)
 #include <sound/initval.h>
 #include <sound/soc.h>
 
+static const struct snd_soc_dapm_widget ac97_widgets[] = {
+       SND_SOC_DAPM_INPUT("RX"),
+       SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route ac97_routes[] = {
+       { "AC97 Capture", NULL, "RX" },
+       { "TX", NULL, "AC97 Playback" },
+};
+
 static int ac97_prepare(struct snd_pcm_substream *substream,
                        struct snd_soc_dai *dai)
 {
@@ -117,6 +127,11 @@ static struct snd_soc_codec_driver soc_codec_dev_ac97 = {
        .probe =        ac97_soc_probe,
        .suspend =      ac97_soc_suspend,
        .resume =       ac97_soc_resume,
+
+       .dapm_widgets = ac97_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(ac97_widgets),
+       .dapm_routes = ac97_routes,
+       .num_dapm_routes = ARRAY_SIZE(ac97_routes),
 };
 
 static int ac97_probe(struct platform_device *pdev)
index 89fcf7d6e7b8fdfb94bac498c935ad9bf3646fcc..7257a8885f426d7cf05b017fbda1aa3f01abe236 100644 (file)
@@ -96,6 +96,44 @@ SOC_ENUM("Capture Source", ad1980_cap_src),
 SOC_SINGLE("Mic Boost Switch", AC97_MIC, 6, 1, 0),
 };
 
+static const struct snd_soc_dapm_widget ad1980_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("MIC1"),
+SND_SOC_DAPM_INPUT("MIC2"),
+SND_SOC_DAPM_INPUT("CD_L"),
+SND_SOC_DAPM_INPUT("CD_R"),
+SND_SOC_DAPM_INPUT("AUX_L"),
+SND_SOC_DAPM_INPUT("AUX_R"),
+SND_SOC_DAPM_INPUT("LINE_IN_L"),
+SND_SOC_DAPM_INPUT("LINE_IN_R"),
+
+SND_SOC_DAPM_OUTPUT("LFE_OUT"),
+SND_SOC_DAPM_OUTPUT("CENTER_OUT"),
+SND_SOC_DAPM_OUTPUT("LINE_OUT_L"),
+SND_SOC_DAPM_OUTPUT("LINE_OUT_R"),
+SND_SOC_DAPM_OUTPUT("MONO_OUT"),
+SND_SOC_DAPM_OUTPUT("HP_OUT_L"),
+SND_SOC_DAPM_OUTPUT("HP_OUT_R"),
+};
+
+static const struct snd_soc_dapm_route ad1980_dapm_routes[] = {
+       { "Capture", NULL, "MIC1" },
+       { "Capture", NULL, "MIC2" },
+       { "Capture", NULL, "CD_L" },
+       { "Capture", NULL, "CD_R" },
+       { "Capture", NULL, "AUX_L" },
+       { "Capture", NULL, "AUX_R" },
+       { "Capture", NULL, "LINE_IN_L" },
+       { "Capture", NULL, "LINE_IN_R" },
+
+       { "LFE_OUT", NULL, "Playback" },
+       { "CENTER_OUT", NULL, "Playback" },
+       { "LINE_OUT_L", NULL, "Playback" },
+       { "LINE_OUT_R", NULL, "Playback" },
+       { "MONO_OUT", NULL, "Playback" },
+       { "HP_OUT_L", NULL, "Playback" },
+       { "HP_OUT_R", NULL, "Playback" },
+};
+
 static unsigned int ac97_read(struct snd_soc_codec *codec,
        unsigned int reg)
 {
@@ -253,6 +291,11 @@ static struct snd_soc_codec_driver soc_codec_dev_ad1980 = {
        .reg_cache_step = 2,
        .write = ac97_write,
        .read = ac97_read,
+
+       .dapm_widgets = ad1980_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(ad1980_dapm_widgets),
+       .dapm_routes = ad1980_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(ad1980_dapm_routes),
 };
 
 static int ad1980_probe(struct platform_device *pdev)
index b1f2baf42b48233678ae051bea1bd2b9fb747327..5fac8adbc1367a8b01d1243e9768a25b2b015643 100644 (file)
 
 #include "ad73311.h"
 
+static const struct snd_soc_dapm_widget ad73311_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("VINP"),
+SND_SOC_DAPM_INPUT("VINN"),
+SND_SOC_DAPM_OUTPUT("VOUTN"),
+SND_SOC_DAPM_OUTPUT("VOUTP"),
+};
+
+static const struct snd_soc_dapm_route ad73311_dapm_routes[] = {
+       { "Capture", NULL, "VINP" },
+       { "Capture", NULL, "VINN" },
+
+       { "VOUTN", NULL, "Playback" },
+       { "VOUTP", NULL, "Playback" },
+};
+
 static struct snd_soc_dai_driver ad73311_dai = {
        .name = "ad73311-hifi",
        .playback = {
@@ -39,7 +54,12 @@ static struct snd_soc_dai_driver ad73311_dai = {
                .formats = SNDRV_PCM_FMTBIT_S16_LE, },
 };
 
-static struct snd_soc_codec_driver soc_codec_dev_ad73311;
+static struct snd_soc_codec_driver soc_codec_dev_ad73311 = {
+       .dapm_widgets = ad73311_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(ad73311_dapm_widgets),
+       .dapm_routes = ad73311_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(ad73311_dapm_routes),
+};
 
 static int ad73311_probe(struct platform_device *pdev)
 {
index d1124a5b34713c688d92846e25cca563ed9c20ff..ebff1128be595ca7ff63f5ca049d2f30342dee45 100644 (file)
@@ -91,7 +91,7 @@
 #define ADAU1701_OSCIPOW_OPD           0x04
 #define ADAU1701_DACSET_DACINIT                1
 
-#define ADAU1707_CLKDIV_UNSET          (-1UL)
+#define ADAU1707_CLKDIV_UNSET          (-1U)
 
 #define ADAU1701_FIRMWARE "adau1701.bin"
 
@@ -247,21 +247,21 @@ static int adau1701_reset(struct snd_soc_codec *codec, unsigned int clkdiv)
            gpio_is_valid(adau1701->gpio_pll_mode[1])) {
                switch (clkdiv) {
                case 64:
-                       gpio_set_value(adau1701->gpio_pll_mode[0], 0);
-                       gpio_set_value(adau1701->gpio_pll_mode[1], 0);
+                       gpio_set_value_cansleep(adau1701->gpio_pll_mode[0], 0);
+                       gpio_set_value_cansleep(adau1701->gpio_pll_mode[1], 0);
                        break;
                case 256:
-                       gpio_set_value(adau1701->gpio_pll_mode[0], 0);
-                       gpio_set_value(adau1701->gpio_pll_mode[1], 1);
+                       gpio_set_value_cansleep(adau1701->gpio_pll_mode[0], 0);
+                       gpio_set_value_cansleep(adau1701->gpio_pll_mode[1], 1);
                        break;
                case 384:
-                       gpio_set_value(adau1701->gpio_pll_mode[0], 1);
-                       gpio_set_value(adau1701->gpio_pll_mode[1], 0);
+                       gpio_set_value_cansleep(adau1701->gpio_pll_mode[0], 1);
+                       gpio_set_value_cansleep(adau1701->gpio_pll_mode[1], 0);
                        break;
                case 0: /* fallback */
                case 512:
-                       gpio_set_value(adau1701->gpio_pll_mode[0], 1);
-                       gpio_set_value(adau1701->gpio_pll_mode[1], 1);
+                       gpio_set_value_cansleep(adau1701->gpio_pll_mode[0], 1);
+                       gpio_set_value_cansleep(adau1701->gpio_pll_mode[1], 1);
                        break;
                }
        }
@@ -269,10 +269,10 @@ static int adau1701_reset(struct snd_soc_codec *codec, unsigned int clkdiv)
        adau1701->pll_clkdiv = clkdiv;
 
        if (gpio_is_valid(adau1701->gpio_nreset)) {
-               gpio_set_value(adau1701->gpio_nreset, 0);
+               gpio_set_value_cansleep(adau1701->gpio_nreset, 0);
                /* minimum reset time is 20ns */
                udelay(1);
-               gpio_set_value(adau1701->gpio_nreset, 1);
+               gpio_set_value_cansleep(adau1701->gpio_nreset, 1);
                /* power-up time may be as long as 85ms */
                mdelay(85);
        }
@@ -734,7 +734,10 @@ static int adau1701_i2c_remove(struct i2c_client *client)
 }
 
 static const struct i2c_device_id adau1701_i2c_id[] = {
+       { "adau1401", 0 },
+       { "adau1401a", 0 },
        { "adau1701", 0 },
+       { "adau1702", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, adau1701_i2c_id);
index 3c839cc4e00ecb48a5e3948567c14dbede7706a3..15b012d0f226c48a4178b1c9f4d10468708dcd1d 100644 (file)
@@ -868,6 +868,12 @@ static int adav80x_bus_remove(struct device *dev)
 }
 
 #if defined(CONFIG_SPI_MASTER)
+static const struct spi_device_id adav80x_spi_id[] = {
+       { "adav801", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, adav80x_spi_id);
+
 static int adav80x_spi_probe(struct spi_device *spi)
 {
        return adav80x_bus_probe(&spi->dev, SND_SOC_SPI);
@@ -885,15 +891,16 @@ static struct spi_driver adav80x_spi_driver = {
        },
        .probe          = adav80x_spi_probe,
        .remove         = adav80x_spi_remove,
+       .id_table       = adav80x_spi_id,
 };
 #endif
 
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static const struct i2c_device_id adav80x_id[] = {
+static const struct i2c_device_id adav80x_i2c_id[] = {
        { "adav803", 0 },
        { }
 };
-MODULE_DEVICE_TABLE(i2c, adav80x_id);
+MODULE_DEVICE_TABLE(i2c, adav80x_i2c_id);
 
 static int adav80x_i2c_probe(struct i2c_client *client,
                             const struct i2c_device_id *id)
@@ -913,7 +920,7 @@ static struct i2c_driver adav80x_i2c_driver = {
        },
        .probe = adav80x_i2c_probe,
        .remove = adav80x_i2c_remove,
-       .id_table = adav80x_id,
+       .id_table = adav80x_i2c_id,
 };
 #endif
 
index 506d474c4d2227372b6b8e4a6743ca8f67ef7da3..8f388edff5864632bf1eb9f8382bdeedf4267486 100644 (file)
 #define ADS117X_RATES (SNDRV_PCM_RATE_8000_48000)
 #define ADS117X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
 
+static const struct snd_soc_dapm_widget ads117x_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("Input1"),
+SND_SOC_DAPM_INPUT("Input2"),
+SND_SOC_DAPM_INPUT("Input3"),
+SND_SOC_DAPM_INPUT("Input4"),
+SND_SOC_DAPM_INPUT("Input5"),
+SND_SOC_DAPM_INPUT("Input6"),
+SND_SOC_DAPM_INPUT("Input7"),
+SND_SOC_DAPM_INPUT("Input8"),
+};
+
+static const struct snd_soc_dapm_route ads117x_dapm_routes[] = {
+       { "Capture", NULL, "Input1" },
+       { "Capture", NULL, "Input2" },
+       { "Capture", NULL, "Input3" },
+       { "Capture", NULL, "Input4" },
+       { "Capture", NULL, "Input5" },
+       { "Capture", NULL, "Input6" },
+       { "Capture", NULL, "Input7" },
+       { "Capture", NULL, "Input8" },
+};
+
 static struct snd_soc_dai_driver ads117x_dai = {
 /* ADC */
        .name = "ads117x-hifi",
@@ -34,7 +56,12 @@ static struct snd_soc_dai_driver ads117x_dai = {
                .formats = ADS117X_FORMATS,},
 };
 
-static struct snd_soc_codec_driver soc_codec_dev_ads117x;
+static struct snd_soc_codec_driver soc_codec_dev_ads117x = {
+       .dapm_widgets = ads117x_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(ads117x_dapm_widgets),
+       .dapm_routes = ads117x_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(ads117x_dapm_routes),
+};
 
 static int ads117x_probe(struct platform_device *pdev)
 {
index c7cfdf957e4dac91c04ffad2f9e2d2dc1e2434aa..71059c07ae7be5abc3a9c70fd5443bce27493ced 100644 (file)
@@ -51,6 +51,17 @@ struct ak4104_private {
        struct regmap *regmap;
 };
 
+static const struct snd_soc_dapm_widget ak4104_dapm_widgets[] = {
+SND_SOC_DAPM_PGA("TXE", AK4104_REG_TX, AK4104_TX_TXE, 0, NULL, 0),
+
+SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route ak4104_dapm_routes[] = {
+       { "TXE", NULL, "Playback" },
+       { "TX", NULL, "TXE" },
+};
+
 static int ak4104_set_dai_fmt(struct snd_soc_dai *codec_dai,
                              unsigned int format)
 {
@@ -138,29 +149,11 @@ static int ak4104_hw_params(struct snd_pcm_substream *substream,
        if (ret < 0)
                return ret;
 
-       /* enable transmitter */
-       ret = regmap_update_bits(ak4104->regmap, AK4104_REG_TX,
-                                AK4104_TX_TXE, AK4104_TX_TXE);
-       if (ret < 0)
-               return ret;
-
        return 0;
 }
 
-static int ak4104_hw_free(struct snd_pcm_substream *substream,
-                         struct snd_soc_dai *dai)
-{
-       struct snd_soc_codec *codec = dai->codec;
-       struct ak4104_private *ak4104 = snd_soc_codec_get_drvdata(codec);
-
-       /* disable transmitter */
-       return regmap_update_bits(ak4104->regmap, AK4104_REG_TX,
-                                 AK4104_TX_TXE, 0);
-}
-
 static const struct snd_soc_dai_ops ak4101_dai_ops = {
        .hw_params = ak4104_hw_params,
-       .hw_free = ak4104_hw_free,
        .set_fmt = ak4104_set_dai_fmt,
 };
 
@@ -214,6 +207,11 @@ static int ak4104_remove(struct snd_soc_codec *codec)
 static struct snd_soc_codec_driver soc_codec_device_ak4104 = {
        .probe =        ak4104_probe,
        .remove =       ak4104_remove,
+
+       .dapm_widgets = ak4104_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(ak4104_dapm_widgets),
+       .dapm_routes = ak4104_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(ak4104_dapm_routes),
 };
 
 static const struct regmap_config ak4104_regmap = {
diff --git a/sound/soc/codecs/ak4554.c b/sound/soc/codecs/ak4554.c
new file mode 100644 (file)
index 0000000..79e9555
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * ak4554.c
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <sound/soc.h>
+
+/*
+ * ak4554 is very simple DA/AD converter which has no setting register.
+ *
+ * CAUTION
+ *
+ * ak4554 playback format is SND_SOC_DAIFMT_RIGHT_J,
+ * and,   capture  format is SND_SOC_DAIFMT_LEFT_J
+ * on same bit clock, LR clock.
+ * But, this driver doesn't have snd_soc_dai_ops :: set_fmt
+ *
+ * CPU/Codec DAI image
+ *
+ * CPU-DAI1 (plaback only fmt = RIGHT_J) --+-- ak4554
+ *                                        |
+ * CPU-DAI2 (capture only fmt = LEFT_J) ---+
+ */
+
+static const struct snd_soc_dapm_widget ak4554_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINL"),
+SND_SOC_DAPM_INPUT("AINR"),
+
+SND_SOC_DAPM_OUTPUT("AOUTL"),
+SND_SOC_DAPM_OUTPUT("AOUTR"),
+};
+
+static const struct snd_soc_dapm_route ak4554_dapm_routes[] = {
+       { "Capture", NULL, "AINL" },
+       { "Capture", NULL, "AINR" },
+
+       { "AOUTL", NULL, "Playback" },
+       { "AOUTR", NULL, "Playback" },
+};
+
+static struct snd_soc_dai_driver ak4554_dai = {
+       .name = "ak4554-hifi",
+       .playback = {
+               .stream_name = "Playback",
+               .channels_min = 2,
+               .channels_max = 2,
+               .rates = SNDRV_PCM_RATE_8000_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
+       },
+       .capture = {
+               .stream_name = "Capture",
+               .channels_min = 2,
+               .channels_max = 2,
+               .rates = SNDRV_PCM_RATE_8000_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
+       },
+       .symmetric_rates = 1,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_ak4554 = {
+       .dapm_widgets = ak4554_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(ak4554_dapm_widgets),
+       .dapm_routes = ak4554_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(ak4554_dapm_routes),
+};
+
+static int ak4554_soc_probe(struct platform_device *pdev)
+{
+       return snd_soc_register_codec(&pdev->dev,
+                                     &soc_codec_dev_ak4554,
+                                     &ak4554_dai, 1);
+}
+
+static int ak4554_soc_remove(struct platform_device *pdev)
+{
+       snd_soc_unregister_codec(&pdev->dev);
+       return 0;
+}
+
+static struct of_device_id ak4554_of_match[] = {
+       { .compatible = "asahi-kasei,ak4554" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, ak4554_of_match);
+
+static struct platform_driver ak4554_driver = {
+       .driver = {
+               .name = "ak4554-adc-dac",
+               .owner = THIS_MODULE,
+               .of_match_table = ak4554_of_match,
+       },
+       .probe  = ak4554_soc_probe,
+       .remove = ak4554_soc_remove,
+};
+module_platform_driver(ak4554_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SoC AK4554 driver");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
index 1f303983ae02ddaf0a433ff32c188046d3da68a7..72e953b2cb41d8e4e7611b4f3141c59caf68041a 100644 (file)
@@ -22,7 +22,22 @@ struct ak5386_priv {
        int reset_gpio;
 };
 
-static struct snd_soc_codec_driver soc_codec_ak5386;
+static const struct snd_soc_dapm_widget ak5386_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINL"),
+SND_SOC_DAPM_INPUT("AINR"),
+};
+
+static const struct snd_soc_dapm_route ak5386_dapm_routes[] = {
+       { "Capture", NULL, "AINL" },
+       { "Capture", NULL, "AINR" },
+};
+
+static struct snd_soc_codec_driver soc_codec_ak5386 = {
+       .dapm_widgets = ak5386_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(ak5386_dapm_widgets),
+       .dapm_routes = ak5386_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(ak5386_dapm_routes),
+};
 
 static int ak5386_set_dai_fmt(struct snd_soc_dai *codec_dai,
                              unsigned int format)
index de625813c0e65c7aff7807d6904c53d2c581249d..657808ba1418d5588e961081c5e1578cbb671400 100644 (file)
@@ -19,6 +19,7 @@
 #include <sound/tlv.h>
 
 #include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/gpio.h>
 #include <linux/mfd/arizona/registers.h>
 
 #include "arizona.h"
@@ -199,9 +200,16 @@ int arizona_init_spk(struct snd_soc_codec *codec)
        if (ret != 0)
                return ret;
 
-       ret = snd_soc_dapm_new_controls(&codec->dapm, &arizona_spkr, 1);
-       if (ret != 0)
-               return ret;
+       switch (arizona->type) {
+       case WM8997:
+               break;
+       default:
+               ret = snd_soc_dapm_new_controls(&codec->dapm,
+                                               &arizona_spkr, 1);
+               if (ret != 0)
+                       return ret;
+               break;
+       }
 
        ret = arizona_request_irq(arizona, ARIZONA_IRQ_SPK_SHUTDOWN_WARN,
                                  "Thermal warning", arizona_thermal_warn,
@@ -223,6 +231,41 @@ int arizona_init_spk(struct snd_soc_codec *codec)
 }
 EXPORT_SYMBOL_GPL(arizona_init_spk);
 
+int arizona_init_gpio(struct snd_soc_codec *codec)
+{
+       struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->arizona;
+       int i;
+
+       switch (arizona->type) {
+       case WM5110:
+               snd_soc_dapm_disable_pin(&codec->dapm, "DRC2 Signal Activity");
+               break;
+       default:
+               break;
+       }
+
+       snd_soc_dapm_disable_pin(&codec->dapm, "DRC1 Signal Activity");
+
+       for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
+               switch (arizona->pdata.gpio_defaults[i] & ARIZONA_GPN_FN_MASK) {
+               case ARIZONA_GP_FN_DRC1_SIGNAL_DETECT:
+                       snd_soc_dapm_enable_pin(&codec->dapm,
+                                               "DRC1 Signal Activity");
+                       break;
+               case ARIZONA_GP_FN_DRC2_SIGNAL_DETECT:
+                       snd_soc_dapm_enable_pin(&codec->dapm,
+                                               "DRC2 Signal Activity");
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_gpio);
+
 const char *arizona_mixer_texts[ARIZONA_NUM_MIXER_INPUTS] = {
        "None",
        "Tone Generator 1",
@@ -517,6 +560,26 @@ const struct soc_enum arizona_ng_hold =
                        4, arizona_ng_hold_text);
 EXPORT_SYMBOL_GPL(arizona_ng_hold);
 
+static const char * const arizona_in_dmic_osr_text[] = {
+       "1.536MHz", "3.072MHz", "6.144MHz",
+};
+
+const struct soc_enum arizona_in_dmic_osr[] = {
+       SOC_ENUM_SINGLE(ARIZONA_IN1L_CONTROL, ARIZONA_IN1_OSR_SHIFT,
+                       ARRAY_SIZE(arizona_in_dmic_osr_text),
+                       arizona_in_dmic_osr_text),
+       SOC_ENUM_SINGLE(ARIZONA_IN2L_CONTROL, ARIZONA_IN2_OSR_SHIFT,
+                       ARRAY_SIZE(arizona_in_dmic_osr_text),
+                       arizona_in_dmic_osr_text),
+       SOC_ENUM_SINGLE(ARIZONA_IN3L_CONTROL, ARIZONA_IN3_OSR_SHIFT,
+                       ARRAY_SIZE(arizona_in_dmic_osr_text),
+                       arizona_in_dmic_osr_text),
+       SOC_ENUM_SINGLE(ARIZONA_IN4L_CONTROL, ARIZONA_IN4_OSR_SHIFT,
+                       ARRAY_SIZE(arizona_in_dmic_osr_text),
+                       arizona_in_dmic_osr_text),
+};
+EXPORT_SYMBOL_GPL(arizona_in_dmic_osr);
+
 static void arizona_in_set_vu(struct snd_soc_codec *codec, int ena)
 {
        struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
index b60b08ccc1d06e54bf09579ec1ffff3640cb2d15..9e81b6392692cbd78e7e9ad7b263fcd00c214a1d 100644 (file)
@@ -150,7 +150,8 @@ extern int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS];
        ARIZONA_MUX(name_str " Aux 5", &name##_aux5_mux), \
        ARIZONA_MUX(name_str " Aux 6", &name##_aux6_mux)
 
-#define ARIZONA_MUX_ROUTES(name) \
+#define ARIZONA_MUX_ROUTES(widget, name) \
+       { widget, NULL, name " Input" }, \
        ARIZONA_MIXER_INPUT_ROUTES(name " Input")
 
 #define ARIZONA_MIXER_ROUTES(widget, name) \
@@ -198,6 +199,7 @@ extern const struct soc_enum arizona_lhpf3_mode;
 extern const struct soc_enum arizona_lhpf4_mode;
 
 extern const struct soc_enum arizona_ng_hold;
+extern const struct soc_enum arizona_in_dmic_osr[];
 
 extern int arizona_in_ev(struct snd_soc_dapm_widget *w,
                         struct snd_kcontrol *kcontrol,
@@ -242,6 +244,7 @@ extern int arizona_set_fll(struct arizona_fll *fll, int source,
                           unsigned int Fref, unsigned int Fout);
 
 extern int arizona_init_spk(struct snd_soc_codec *codec);
+extern int arizona_init_gpio(struct snd_soc_codec *codec);
 
 extern int arizona_init_dai(struct arizona_priv *priv, int dai);
 
index a081d9fcb1668ed8393429d60d3cecac63ea7ae5..c4cf0699e77fe4cc98182f3fa877eb02960a9235 100644 (file)
 
 #include <sound/soc.h>
 
+static const struct snd_soc_dapm_widget bt_sco_widgets[] = {
+       SND_SOC_DAPM_INPUT("RX"),
+       SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route bt_sco_routes[] = {
+       { "Capture", NULL, "RX" },
+       { "TX", NULL, "Playback" },
+};
+
 static struct snd_soc_dai_driver bt_sco_dai = {
        .name = "bt-sco-pcm",
        .playback = {
+               .stream_name = "Playback",
                .channels_min = 1,
                .channels_max = 1,
                .rates = SNDRV_PCM_RATE_8000,
                .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
        .capture = {
+                .stream_name = "Capture",
                .channels_min = 1,
                .channels_max = 1,
                .rates = SNDRV_PCM_RATE_8000,
@@ -31,7 +43,12 @@ static struct snd_soc_dai_driver bt_sco_dai = {
        },
 };
 
-static struct snd_soc_codec_driver soc_codec_dev_bt_sco;
+static struct snd_soc_codec_driver soc_codec_dev_bt_sco = {
+       .dapm_widgets = bt_sco_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(bt_sco_widgets),
+       .dapm_routes = bt_sco_routes,
+       .num_dapm_routes = ARRAY_SIZE(bt_sco_routes),
+};
 
 static int bt_sco_probe(struct platform_device *pdev)
 {
@@ -50,6 +67,9 @@ static struct platform_device_id bt_sco_driver_ids[] = {
        {
                .name           = "dfbmcs320",
        },
+       {
+               .name           = "bt-sco",
+       },
        {},
 };
 MODULE_DEVICE_TABLE(platform, bt_sco_driver_ids);
index 8e4779812b96c03d11f07ca6a4c47ca628b3295b..83c835d9fd884b18a87cdbb5420838b8fd9229f0 100644 (file)
@@ -139,6 +139,22 @@ struct cs4270_private {
        struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
 };
 
+static const struct snd_soc_dapm_widget cs4270_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINL"),
+SND_SOC_DAPM_INPUT("AINR"),
+
+SND_SOC_DAPM_OUTPUT("AOUTL"),
+SND_SOC_DAPM_OUTPUT("AOUTR"),
+};
+
+static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
+       { "Capture", NULL, "AINA" },
+       { "Capture", NULL, "AINB" },
+
+       { "AOUTA", NULL, "Playback" },
+       { "AOUTB", NULL, "Playback" },
+};
+
 /**
  * struct cs4270_mode_ratios - clock ratio tables
  * @ratio: the ratio of MCLK to the sample rate
@@ -612,6 +628,10 @@ static const struct snd_soc_codec_driver soc_codec_device_cs4270 = {
 
        .controls =             cs4270_snd_controls,
        .num_controls =         ARRAY_SIZE(cs4270_snd_controls),
+       .dapm_widgets =         cs4270_dapm_widgets,
+       .num_dapm_widgets =     ARRAY_SIZE(cs4270_dapm_widgets),
+       .dapm_routes =          cs4270_dapm_routes,
+       .num_dapm_routes =      ARRAY_SIZE(cs4270_dapm_routes),
 };
 
 /*
index 03036b326732ddfec0fad5b390806d9000a58589..a20f1bb8f0715011dad1a710cf038a80121e30b9 100644 (file)
@@ -173,6 +173,26 @@ struct cs4271_private {
        bool                            enable_soft_reset;
 };
 
+static const struct snd_soc_dapm_widget cs4271_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINA"),
+SND_SOC_DAPM_INPUT("AINB"),
+
+SND_SOC_DAPM_OUTPUT("AOUTA+"),
+SND_SOC_DAPM_OUTPUT("AOUTA-"),
+SND_SOC_DAPM_OUTPUT("AOUTB+"),
+SND_SOC_DAPM_OUTPUT("AOUTB-"),
+};
+
+static const struct snd_soc_dapm_route cs4271_dapm_routes[] = {
+       { "Capture", NULL, "AINA" },
+       { "Capture", NULL, "AINB" },
+
+       { "AOUTA+", NULL, "Playback" },
+       { "AOUTA-", NULL, "Playback" },
+       { "AOUTB+", NULL, "Playback" },
+       { "AOUTB-", NULL, "Playback" },
+};
+
 /*
  * @freq is the desired MCLK rate
  * MCLK rate should (c) be the sample rate, multiplied by one of the
@@ -576,8 +596,7 @@ static int cs4271_probe(struct snd_soc_codec *codec)
                                   CS4271_MODE2_MUTECAEQUB,
                                   CS4271_MODE2_MUTECAEQUB);
 
-       return snd_soc_add_codec_controls(codec, cs4271_snd_controls,
-               ARRAY_SIZE(cs4271_snd_controls));
+       return 0;
 }
 
 static int cs4271_remove(struct snd_soc_codec *codec)
@@ -596,6 +615,13 @@ static struct snd_soc_codec_driver soc_codec_dev_cs4271 = {
        .remove                 = cs4271_remove,
        .suspend                = cs4271_soc_suspend,
        .resume                 = cs4271_soc_resume,
+
+       .controls               = cs4271_snd_controls,
+       .num_controls           = ARRAY_SIZE(cs4271_snd_controls),
+       .dapm_widgets           = cs4271_dapm_widgets,
+       .num_dapm_widgets       = ARRAY_SIZE(cs4271_dapm_widgets),
+       .dapm_routes            = cs4271_dapm_routes,
+       .num_dapm_routes        = ARRAY_SIZE(cs4271_dapm_routes),
 };
 
 #if defined(CONFIG_SPI_MASTER)
index 2bcae2b40c92cd48a35aaf59ad333bb689851ff2..68342b121c966aa7c90ace1384bbfbefe4737ab7 100644 (file)
 
 #define DRV_NAME "hdmi-audio-codec"
 
-static struct snd_soc_codec_driver hdmi_codec;
+static const struct snd_soc_dapm_widget hdmi_widgets[] = {
+       SND_SOC_DAPM_INPUT("RX"),
+       SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route hdmi_routes[] = {
+       { "Capture", NULL, "RX" },
+       { "TX", NULL, "Playback" },
+};
 
 static struct snd_soc_dai_driver hdmi_codec_dai = {
        .name = "hdmi-hifi",
        .playback = {
+               .stream_name = "Playback",
                .channels_min = 2,
                .channels_max = 8,
                .rates = SNDRV_PCM_RATE_32000 |
@@ -37,6 +46,25 @@ static struct snd_soc_dai_driver hdmi_codec_dai = {
                .formats = SNDRV_PCM_FMTBIT_S16_LE |
                        SNDRV_PCM_FMTBIT_S24_LE,
        },
+       .capture = {
+               .stream_name = "Capture",
+               .channels_min = 2,
+               .channels_max = 2,
+               .rates = SNDRV_PCM_RATE_32000 |
+                       SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+                       SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
+                       SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE |
+                       SNDRV_PCM_FMTBIT_S24_LE,
+       },
+
+};
+
+static struct snd_soc_codec_driver hdmi_codec = {
+       .dapm_widgets = hdmi_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
+       .dapm_routes = hdmi_routes,
+       .num_dapm_routes = ARRAY_SIZE(hdmi_routes),
 };
 
 static int hdmi_codec_probe(struct platform_device *pdev)
index 9f9f59573f721344995233da13ddf875f43436fc..0e5743ea79dfdf6c4a4191694955883fbede59ca 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/i2c.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 
 #include <sound/core.h>
 #include <sound/tlv.h>
 
 struct lm4857 {
-       struct i2c_client *i2c;
+       struct regmap *regmap;
        uint8_t mode;
 };
 
-static const uint8_t lm4857_default_regs[] = {
-       0x00, 0x00, 0x00, 0x00,
+static const struct reg_default lm4857_default_regs[] = {
+       { 0x0, 0x00 },
+       { 0x1, 0x00 },
+       { 0x2, 0x00 },
+       { 0x3, 0x00 },
 };
 
 /* The register offsets in the cache array */
@@ -42,39 +46,6 @@ static const uint8_t lm4857_default_regs[] = {
 #define LM4857_WAKEUP 5
 #define LM4857_EPGAIN 4
 
-static int lm4857_write(struct snd_soc_codec *codec, unsigned int reg,
-               unsigned int value)
-{
-       uint8_t data;
-       int ret;
-
-       ret = snd_soc_cache_write(codec, reg, value);
-       if (ret < 0)
-               return ret;
-
-       data = (reg << 6) | value;
-       ret = i2c_master_send(codec->control_data, &data, 1);
-       if (ret != 1) {
-               dev_err(codec->dev, "Failed to write register: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static unsigned int lm4857_read(struct snd_soc_codec *codec,
-               unsigned int reg)
-{
-       unsigned int val;
-       int ret;
-
-       ret = snd_soc_cache_read(codec, reg, &val);
-       if (ret)
-               return -1;
-
-       return val;
-}
-
 static int lm4857_get_mode(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
@@ -96,7 +67,7 @@ static int lm4857_set_mode(struct snd_kcontrol *kcontrol,
        lm4857->mode = value;
 
        if (codec->dapm.bias_level == SND_SOC_BIAS_ON)
-               snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, value + 6);
+               regmap_update_bits(lm4857->regmap, LM4857_CTRL, 0x0F, value + 6);
 
        return 1;
 }
@@ -108,10 +79,11 @@ static int lm4857_set_bias_level(struct snd_soc_codec *codec,
 
        switch (level) {
        case SND_SOC_BIAS_ON:
-               snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, lm4857->mode + 6);
+               regmap_update_bits(lm4857->regmap, LM4857_CTRL, 0x0F,
+                       lm4857->mode + 6);
                break;
        case SND_SOC_BIAS_STANDBY:
-               snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, 0);
+               regmap_update_bits(lm4857->regmap, LM4857_CTRL, 0x0F, 0);
                break;
        default:
                break;
@@ -171,49 +143,32 @@ static const struct snd_soc_dapm_route lm4857_routes[] = {
        {"EP", NULL, "IN"},
 };
 
-static int lm4857_probe(struct snd_soc_codec *codec)
-{
-       struct lm4857 *lm4857 = snd_soc_codec_get_drvdata(codec);
-       struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int ret;
-
-       codec->control_data = lm4857->i2c;
-
-       ret = snd_soc_add_codec_controls(codec, lm4857_controls,
-                       ARRAY_SIZE(lm4857_controls));
-       if (ret)
-               return ret;
-
-       ret = snd_soc_dapm_new_controls(dapm, lm4857_dapm_widgets,
-                       ARRAY_SIZE(lm4857_dapm_widgets));
-       if (ret)
-               return ret;
+static struct snd_soc_codec_driver soc_codec_dev_lm4857 = {
+       .set_bias_level = lm4857_set_bias_level,
 
-       ret = snd_soc_dapm_add_routes(dapm, lm4857_routes,
-                       ARRAY_SIZE(lm4857_routes));
-       if (ret)
-               return ret;
+       .controls = lm4857_controls,
+       .num_controls = ARRAY_SIZE(lm4857_controls),
+       .dapm_widgets = lm4857_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(lm4857_dapm_widgets),
+       .dapm_routes = lm4857_routes,
+       .num_dapm_routes = ARRAY_SIZE(lm4857_routes),
+};
 
-       snd_soc_dapm_new_widgets(dapm);
+static const struct regmap_config lm4857_regmap_config = {
+       .val_bits = 6,
+       .reg_bits = 2,
 
-       return 0;
-}
+       .max_register = LM4857_CTRL,
 
-static struct snd_soc_codec_driver soc_codec_dev_lm4857 = {
-       .write = lm4857_write,
-       .read = lm4857_read,
-       .probe = lm4857_probe,
-       .reg_cache_size = ARRAY_SIZE(lm4857_default_regs),
-       .reg_word_size = sizeof(uint8_t),
-       .reg_cache_default = lm4857_default_regs,
-       .set_bias_level = lm4857_set_bias_level,
+       .cache_type = REGCACHE_FLAT,
+       .reg_defaults = lm4857_default_regs,
+       .num_reg_defaults = ARRAY_SIZE(lm4857_default_regs),
 };
 
 static int lm4857_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
        struct lm4857 *lm4857;
-       int ret;
 
        lm4857 = devm_kzalloc(&i2c->dev, sizeof(*lm4857), GFP_KERNEL);
        if (!lm4857)
@@ -221,11 +176,11 @@ static int lm4857_i2c_probe(struct i2c_client *i2c,
 
        i2c_set_clientdata(i2c, lm4857);
 
-       lm4857->i2c = i2c;
-
-       ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_lm4857, NULL, 0);
+       lm4857->regmap = devm_regmap_init_i2c(i2c, &lm4857_regmap_config);
+       if (IS_ERR(lm4857->regmap))
+               return PTR_ERR(lm4857->regmap);
 
-       return ret;
+       return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_lm4857, NULL, 0);
 }
 
 static int lm4857_i2c_remove(struct i2c_client *i2c)
index a6ac2313047def253ddc0ff4ebddde4947803c1b..31f91560e9f6df0fb115ea862b712e9863cad9dd 100644 (file)
@@ -118,6 +118,18 @@ static const struct snd_kcontrol_new max9768_mute[] = {
        SOC_SINGLE_BOOL_EXT("Playback Switch", 0, max9768_get_gpio, max9768_set_gpio),
 };
 
+static const struct snd_soc_dapm_widget max9768_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("IN"),
+
+SND_SOC_DAPM_OUTPUT("OUT+"),
+SND_SOC_DAPM_OUTPUT("OUT-"),
+};
+
+static const struct snd_soc_dapm_route max9768_dapm_routes[] = {
+       { "OUT+", NULL, "IN" },
+       { "OUT-", NULL, "IN" },
+};
+
 static int max9768_probe(struct snd_soc_codec *codec)
 {
        struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
@@ -148,6 +160,10 @@ static struct snd_soc_codec_driver max9768_codec_driver = {
        .probe = max9768_probe,
        .controls = max9768_volume,
        .num_controls = ARRAY_SIZE(max9768_volume),
+       .dapm_widgets = max9768_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(max9768_dapm_widgets),
+       .dapm_routes = max9768_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(max9768_dapm_routes),
 };
 
 static const struct regmap_config max9768_i2c_regmap_config = {
index ad5313f98f286b8f95223b56ef88ae4718f7a73d..0569a4c3ae00e7ce8b8fb27952387d87913f4098 100644 (file)
@@ -2084,8 +2084,9 @@ static irqreturn_t max98090_interrupt(int irq, void *data)
 
                pm_wakeup_event(codec->dev, 100);
 
-               schedule_delayed_work(&max98090->jack_work,
-                       msecs_to_jiffies(100));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &max98090->jack_work,
+                                  msecs_to_jiffies(100));
        }
 
        if (active & M98090_DRCACT_MASK)
@@ -2132,8 +2133,9 @@ int max98090_mic_detect(struct snd_soc_codec *codec,
        snd_soc_jack_report(max98090->jack, 0,
                            SND_JACK_HEADSET | SND_JACK_BTN_0);
 
-       schedule_delayed_work(&max98090->jack_work,
-               msecs_to_jiffies(100));
+       queue_delayed_work(system_power_efficient_wq,
+                          &max98090->jack_work,
+                          msecs_to_jiffies(100));
 
        return 0;
 }
index 6b6c74cd83e2bcc2d9e5292cca7c2b70618eaa18..29549cdbf4c1c6138e825e3372644aa9e3008f90 100644 (file)
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/i2c.h>
+#include <linux/regmap.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
 
 #include "max9877.h"
 
-static struct i2c_client *i2c;
+static struct regmap *regmap;
 
-static u8 max9877_regs[5] = { 0x40, 0x00, 0x00, 0x00, 0x49 };
-
-static void max9877_write_regs(void)
-{
-       unsigned int i;
-       u8 data[6];
-
-       data[0] = MAX9877_INPUT_MODE;
-       for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
-               data[i + 1] = max9877_regs[i];
-
-       if (i2c_master_send(i2c, data, 6) != 6)
-               dev_err(&i2c->dev, "i2c write failed\n");
-}
-
-static int max9877_get_reg(struct snd_kcontrol *kcontrol,
-               struct snd_ctl_elem_value *ucontrol)
-{
-       struct soc_mixer_control *mc =
-               (struct soc_mixer_control *)kcontrol->private_value;
-       unsigned int reg = mc->reg;
-       unsigned int shift = mc->shift;
-       unsigned int mask = mc->max;
-       unsigned int invert = mc->invert;
-
-       ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask;
-
-       if (invert)
-               ucontrol->value.integer.value[0] =
-                       mask - ucontrol->value.integer.value[0];
-
-       return 0;
-}
-
-static int max9877_set_reg(struct snd_kcontrol *kcontrol,
-               struct snd_ctl_elem_value *ucontrol)
-{
-       struct soc_mixer_control *mc =
-               (struct soc_mixer_control *)kcontrol->private_value;
-       unsigned int reg = mc->reg;
-       unsigned int shift = mc->shift;
-       unsigned int mask = mc->max;
-       unsigned int invert = mc->invert;
-       unsigned int val = (ucontrol->value.integer.value[0] & mask);
-
-       if (invert)
-               val = mask - val;
-
-       if (((max9877_regs[reg] >> shift) & mask) == val)
-               return 0;
-
-       max9877_regs[reg] &= ~(mask << shift);
-       max9877_regs[reg] |= val << shift;
-       max9877_write_regs();
-
-       return 1;
-}
-
-static int max9877_get_2reg(struct snd_kcontrol *kcontrol,
-               struct snd_ctl_elem_value *ucontrol)
-{
-       struct soc_mixer_control *mc =
-               (struct soc_mixer_control *)kcontrol->private_value;
-       unsigned int reg = mc->reg;
-       unsigned int reg2 = mc->rreg;
-       unsigned int shift = mc->shift;
-       unsigned int mask = mc->max;
-
-       ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask;
-       ucontrol->value.integer.value[1] = (max9877_regs[reg2] >> shift) & mask;
-
-       return 0;
-}
-
-static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
-               struct snd_ctl_elem_value *ucontrol)
-{
-       struct soc_mixer_control *mc =
-               (struct soc_mixer_control *)kcontrol->private_value;
-       unsigned int reg = mc->reg;
-       unsigned int reg2 = mc->rreg;
-       unsigned int shift = mc->shift;
-       unsigned int mask = mc->max;
-       unsigned int val = (ucontrol->value.integer.value[0] & mask);
-       unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
-       unsigned int change = 0;
-
-       if (((max9877_regs[reg] >> shift) & mask) != val)
-               change = 1;
-
-       if (((max9877_regs[reg2] >> shift) & mask) != val2)
-               change = 1;
-
-       if (change) {
-               max9877_regs[reg] &= ~(mask << shift);
-               max9877_regs[reg] |= val << shift;
-               max9877_regs[reg2] &= ~(mask << shift);
-               max9877_regs[reg2] |= val2 << shift;
-               max9877_write_regs();
-       }
-
-       return change;
-}
-
-static int max9877_get_out_mode(struct snd_kcontrol *kcontrol,
-               struct snd_ctl_elem_value *ucontrol)
-{
-       u8 value = max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK;
-
-       if (value)
-               value -= 1;
-
-       ucontrol->value.integer.value[0] = value;
-       return 0;
-}
-
-static int max9877_set_out_mode(struct snd_kcontrol *kcontrol,
-               struct snd_ctl_elem_value *ucontrol)
-{
-       u8 value = ucontrol->value.integer.value[0];
-
-       value += 1;
-
-       if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK) == value)
-               return 0;
-
-       max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OUTMODE_MASK;
-       max9877_regs[MAX9877_OUTPUT_MODE] |= value;
-       max9877_write_regs();
-       return 1;
-}
-
-static int max9877_get_osc_mode(struct snd_kcontrol *kcontrol,
-               struct snd_ctl_elem_value *ucontrol)
-{
-       u8 value = (max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK);
-
-       value = value >> MAX9877_OSC_OFFSET;
-
-       ucontrol->value.integer.value[0] = value;
-       return 0;
-}
-
-static int max9877_set_osc_mode(struct snd_kcontrol *kcontrol,
-               struct snd_ctl_elem_value *ucontrol)
-{
-       u8 value = ucontrol->value.integer.value[0];
-
-       value = value << MAX9877_OSC_OFFSET;
-       if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK) == value)
-               return 0;
-
-       max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OSC_MASK;
-       max9877_regs[MAX9877_OUTPUT_MODE] |= value;
-       max9877_write_regs();
-       return 1;
-}
+static struct reg_default max9877_regs[] = {
+       { 0, 0x40 },
+       { 1, 0x00 },
+       { 2, 0x00 },
+       { 3, 0x00 },
+       { 4, 0x49 },
+};
 
 static const unsigned int max9877_pgain_tlv[] = {
        TLV_DB_RANGE_HEAD(2),
@@ -212,65 +63,104 @@ static const char *max9877_osc_mode[] = {
 };
 
 static const struct soc_enum max9877_enum[] = {
-       SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_out_mode), max9877_out_mode),
-       SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_osc_mode), max9877_osc_mode),
+       SOC_ENUM_SINGLE(MAX9877_OUTPUT_MODE, 0, ARRAY_SIZE(max9877_out_mode),
+                       max9877_out_mode),
+       SOC_ENUM_SINGLE(MAX9877_OUTPUT_MODE, MAX9877_OSC_OFFSET,
+                       ARRAY_SIZE(max9877_osc_mode), max9877_osc_mode),
 };
 
 static const struct snd_kcontrol_new max9877_controls[] = {
-       SOC_SINGLE_EXT_TLV("MAX9877 PGAINA Playback Volume",
-                       MAX9877_INPUT_MODE, 0, 2, 0,
-                       max9877_get_reg, max9877_set_reg, max9877_pgain_tlv),
-       SOC_SINGLE_EXT_TLV("MAX9877 PGAINB Playback Volume",
-                       MAX9877_INPUT_MODE, 2, 2, 0,
-                       max9877_get_reg, max9877_set_reg, max9877_pgain_tlv),
-       SOC_SINGLE_EXT_TLV("MAX9877 Amp Speaker Playback Volume",
-                       MAX9877_SPK_VOLUME, 0, 31, 0,
-                       max9877_get_reg, max9877_set_reg, max9877_output_tlv),
-       SOC_DOUBLE_R_EXT_TLV("MAX9877 Amp HP Playback Volume",
-                       MAX9877_HPL_VOLUME, MAX9877_HPR_VOLUME, 0, 31, 0,
-                       max9877_get_2reg, max9877_set_2reg, max9877_output_tlv),
-       SOC_SINGLE_EXT("MAX9877 INB Stereo Switch",
-                       MAX9877_INPUT_MODE, 4, 1, 1,
-                       max9877_get_reg, max9877_set_reg),
-       SOC_SINGLE_EXT("MAX9877 INA Stereo Switch",
-                       MAX9877_INPUT_MODE, 5, 1, 1,
-                       max9877_get_reg, max9877_set_reg),
-       SOC_SINGLE_EXT("MAX9877 Zero-crossing detection Switch",
-                       MAX9877_INPUT_MODE, 6, 1, 0,
-                       max9877_get_reg, max9877_set_reg),
-       SOC_SINGLE_EXT("MAX9877 Bypass Mode Switch",
-                       MAX9877_OUTPUT_MODE, 6, 1, 0,
-                       max9877_get_reg, max9877_set_reg),
-       SOC_SINGLE_EXT("MAX9877 Shutdown Mode Switch",
-                       MAX9877_OUTPUT_MODE, 7, 1, 1,
-                       max9877_get_reg, max9877_set_reg),
-       SOC_ENUM_EXT("MAX9877 Output Mode", max9877_enum[0],
-                       max9877_get_out_mode, max9877_set_out_mode),
-       SOC_ENUM_EXT("MAX9877 Oscillator Mode", max9877_enum[1],
-                       max9877_get_osc_mode, max9877_set_osc_mode),
+       SOC_SINGLE_TLV("MAX9877 PGAINA Playback Volume",
+                      MAX9877_INPUT_MODE, 0, 2, 0, max9877_pgain_tlv),
+       SOC_SINGLE_TLV("MAX9877 PGAINB Playback Volume",
+                      MAX9877_INPUT_MODE, 2, 2, 0, max9877_pgain_tlv),
+       SOC_SINGLE_TLV("MAX9877 Amp Speaker Playback Volume",
+                      MAX9877_SPK_VOLUME, 0, 31, 0, max9877_output_tlv),
+       SOC_DOUBLE_R_TLV("MAX9877 Amp HP Playback Volume",
+                        MAX9877_HPL_VOLUME, MAX9877_HPR_VOLUME, 0, 31, 0,
+                        max9877_output_tlv),
+       SOC_SINGLE("MAX9877 INB Stereo Switch",
+                  MAX9877_INPUT_MODE, 4, 1, 1),
+       SOC_SINGLE("MAX9877 INA Stereo Switch",
+                  MAX9877_INPUT_MODE, 5, 1, 1),
+       SOC_SINGLE("MAX9877 Zero-crossing detection Switch",
+                  MAX9877_INPUT_MODE, 6, 1, 0),
+       SOC_SINGLE("MAX9877 Bypass Mode Switch",
+                  MAX9877_OUTPUT_MODE, 6, 1, 0),
+       SOC_ENUM("MAX9877 Output Mode", max9877_enum[0]),
+       SOC_ENUM("MAX9877 Oscillator Mode", max9877_enum[1]),
 };
 
-/* This function is called from ASoC machine driver */
-int max9877_add_controls(struct snd_soc_codec *codec)
-{
-       return snd_soc_add_codec_controls(codec, max9877_controls,
-                       ARRAY_SIZE(max9877_controls));
-}
-EXPORT_SYMBOL_GPL(max9877_add_controls);
+static const struct snd_soc_dapm_widget max9877_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("INA1"),
+SND_SOC_DAPM_INPUT("INA2"),
+SND_SOC_DAPM_INPUT("INB1"),
+SND_SOC_DAPM_INPUT("INB2"),
+SND_SOC_DAPM_INPUT("RXIN+"),
+SND_SOC_DAPM_INPUT("RXIN-"),
+
+SND_SOC_DAPM_PGA("SHDN", MAX9877_OUTPUT_MODE, 7, 1, NULL, 0),
+
+SND_SOC_DAPM_OUTPUT("OUT+"),
+SND_SOC_DAPM_OUTPUT("OUT-"),
+SND_SOC_DAPM_OUTPUT("HPL"),
+SND_SOC_DAPM_OUTPUT("HPR"),
+};
+
+static const struct snd_soc_dapm_route max9877_dapm_routes[] = {
+       { "SHDN", NULL, "INA1" },
+       { "SHDN", NULL, "INA2" },
+       { "SHDN", NULL, "INB1" },
+       { "SHDN", NULL, "INB2" },
+
+       { "OUT+", NULL, "RXIN+" },
+       { "OUT+", NULL, "SHDN" },
+
+       { "OUT-", NULL, "SHDN" },
+       { "OUT-", NULL, "RXIN-" },
+
+       { "HPL", NULL, "SHDN" },
+       { "HPR", NULL, "SHDN" },
+};
+
+static const struct snd_soc_codec_driver max9877_codec = {
+       .controls = max9877_controls,
+       .num_controls = ARRAY_SIZE(max9877_controls),
+
+       .dapm_widgets = max9877_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(max9877_dapm_widgets),
+       .dapm_routes = max9877_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(max9877_dapm_routes),
+};
+
+static const struct regmap_config max9877_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .reg_defaults = max9877_regs,
+       .num_reg_defaults = ARRAY_SIZE(max9877_regs),
+       .cache_type = REGCACHE_RBTREE,
+};
 
 static int max9877_i2c_probe(struct i2c_client *client,
                             const struct i2c_device_id *id)
 {
-       i2c = client;
+       int i;
 
-       max9877_write_regs();
+       regmap = devm_regmap_init_i2c(client, &max9877_regmap);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
 
-       return 0;
+       /* Ensure the device is in reset state */
+       for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
+               regmap_write(regmap, max9877_regs[i].reg, max9877_regs[i].def);
+
+       return snd_soc_register_codec(&client->dev, &max9877_codec, NULL, 0);
 }
 
 static int max9877_i2c_remove(struct i2c_client *client)
 {
-       i2c = NULL;
+       snd_soc_unregister_codec(&client->dev);
 
        return 0;
 }
index 5402dfbbb7162f668d8841a83ad3c04db5d2516f..4d3c8fd8c5db5aeec4632e958d3737a0d59adc0f 100644 (file)
@@ -94,7 +94,6 @@
 #define AUDIO_DAC_CFS_DLY_B            (1 << 10)
 
 struct mc13783_priv {
-       struct snd_soc_codec codec;
        struct mc13xxx *mc13xxx;
 
        enum mc13783_ssi_port adc_ssi_port;
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
new file mode 100644 (file)
index 0000000..651ce09
--- /dev/null
@@ -0,0 +1,339 @@
+/*
+ * PCM1681 ASoC codec driver
+ *
+ * Copyright (c) StreamUnlimited GmbH 2013
+ *     Marek Belisko <marek.belisko@streamunlimited.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define PCM1681_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE  |                \
+                            SNDRV_PCM_FMTBIT_S24_LE)
+
+#define PCM1681_PCM_RATES   (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | \
+                            SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100  | \
+                            SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200  | \
+                            SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
+
+#define PCM1681_SOFT_MUTE_ALL          0xff
+#define PCM1681_DEEMPH_RATE_MASK       0x18
+#define PCM1681_DEEMPH_MASK            0x01
+
+#define PCM1681_ATT_CONTROL(X) (X <= 6 ? X : X + 9) /* Attenuation level */
+#define PCM1681_SOFT_MUTE      0x07    /* Soft mute control register */
+#define PCM1681_DAC_CONTROL    0x08    /* DAC operation control */
+#define PCM1681_FMT_CONTROL    0x09    /* Audio interface data format */
+#define PCM1681_DEEMPH_CONTROL 0x0a    /* De-emphasis control */
+#define PCM1681_ZERO_DETECT_STATUS     0x0e    /* Zero detect status reg */
+
+static const struct reg_default pcm1681_reg_defaults[] = {
+       { 0x01, 0xff },
+       { 0x02, 0xff },
+       { 0x03, 0xff },
+       { 0x04, 0xff },
+       { 0x05, 0xff },
+       { 0x06, 0xff },
+       { 0x07, 0x00 },
+       { 0x08, 0x00 },
+       { 0x09, 0x06 },
+       { 0x0A, 0x00 },
+       { 0x0B, 0xff },
+       { 0x0C, 0x0f },
+       { 0x0D, 0x00 },
+       { 0x10, 0xff },
+       { 0x11, 0xff },
+       { 0x12, 0x00 },
+       { 0x13, 0x00 },
+};
+
+static bool pcm1681_accessible_reg(struct device *dev, unsigned int reg)
+{
+       return !((reg == 0x00) || (reg == 0x0f));
+}
+
+static bool pcm1681_writeable_reg(struct device *dev, unsigned register reg)
+{
+       return pcm1681_accessible_reg(dev, reg) &&
+               (reg != PCM1681_ZERO_DETECT_STATUS);
+}
+
+struct pcm1681_private {
+       struct regmap *regmap;
+       unsigned int format;
+       /* Current deemphasis status */
+       unsigned int deemph;
+       /* Current rate for deemphasis control */
+       unsigned int rate;
+};
+
+static const int pcm1681_deemph[] = { 44100, 48000, 32000 };
+
+static int pcm1681_set_deemph(struct snd_soc_codec *codec)
+{
+       struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+       int i = 0, val = -1, enable = 0;
+
+       if (priv->deemph)
+               for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++)
+                       if (pcm1681_deemph[i] == priv->rate)
+                               val = i;
+
+       if (val != -1) {
+               regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
+                                       PCM1681_DEEMPH_RATE_MASK, val);
+               enable = 1;
+       } else
+               enable = 0;
+
+       /* enable/disable deemphasis functionality */
+       return regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
+                                       PCM1681_DEEMPH_MASK, enable);
+}
+
+static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = priv->deemph;
+
+       return 0;
+}
+
+static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol,
+                             struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+       struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+
+       priv->deemph = ucontrol->value.enumerated.item[0];
+
+       return pcm1681_set_deemph(codec);
+}
+
+static int pcm1681_set_dai_fmt(struct snd_soc_dai *codec_dai,
+                             unsigned int format)
+{
+       struct snd_soc_codec *codec = codec_dai->codec;
+       struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+
+       /* The PCM1681 can only be slave to all clocks */
+       if ((format & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+               dev_err(codec->dev, "Invalid clocking mode\n");
+               return -EINVAL;
+       }
+
+       priv->format = format;
+
+       return 0;
+}
+
+static int pcm1681_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+       int val;
+
+       if (mute)
+               val = PCM1681_SOFT_MUTE_ALL;
+       else
+               val = 0;
+
+       return regmap_write(priv->regmap, PCM1681_SOFT_MUTE, val);
+}
+
+static int pcm1681_hw_params(struct snd_pcm_substream *substream,
+                            struct snd_pcm_hw_params *params,
+                            struct snd_soc_dai *dai)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+       int val = 0, ret;
+       int pcm_format = params_format(params);
+
+       priv->rate = params_rate(params);
+
+       switch (priv->format & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_RIGHT_J:
+               if (pcm_format == SNDRV_PCM_FORMAT_S24_LE)
+                       val = 0x00;
+               else if (pcm_format == SNDRV_PCM_FORMAT_S16_LE)
+                       val = 0x03;
+               break;
+       case SND_SOC_DAIFMT_I2S:
+               val = 0x04;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               val = 0x05;
+               break;
+       default:
+               dev_err(codec->dev, "Invalid DAI format\n");
+               return -EINVAL;
+       }
+
+       ret = regmap_update_bits(priv->regmap, PCM1681_FMT_CONTROL, 0x0f, val);
+       if (ret < 0)
+               return ret;
+
+       return pcm1681_set_deemph(codec);
+}
+
+static const struct snd_soc_dai_ops pcm1681_dai_ops = {
+       .set_fmt        = pcm1681_set_dai_fmt,
+       .hw_params      = pcm1681_hw_params,
+       .digital_mute   = pcm1681_digital_mute,
+};
+
+static const struct snd_soc_dapm_widget pcm1681_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("VOUT1"),
+SND_SOC_DAPM_OUTPUT("VOUT2"),
+SND_SOC_DAPM_OUTPUT("VOUT3"),
+SND_SOC_DAPM_OUTPUT("VOUT4"),
+SND_SOC_DAPM_OUTPUT("VOUT5"),
+SND_SOC_DAPM_OUTPUT("VOUT6"),
+SND_SOC_DAPM_OUTPUT("VOUT7"),
+SND_SOC_DAPM_OUTPUT("VOUT8"),
+};
+
+static const struct snd_soc_dapm_route pcm1681_dapm_routes[] = {
+       { "VOUT1", NULL, "Playback" },
+       { "VOUT2", NULL, "Playback" },
+       { "VOUT3", NULL, "Playback" },
+       { "VOUT4", NULL, "Playback" },
+       { "VOUT5", NULL, "Playback" },
+       { "VOUT6", NULL, "Playback" },
+       { "VOUT7", NULL, "Playback" },
+       { "VOUT8", NULL, "Playback" },
+};
+
+static const DECLARE_TLV_DB_SCALE(pcm1681_dac_tlv, -6350, 50, 1);
+
+static const struct snd_kcontrol_new pcm1681_controls[] = {
+       SOC_DOUBLE_R_TLV("Channel 1/2 Playback Volume",
+                       PCM1681_ATT_CONTROL(1), PCM1681_ATT_CONTROL(2), 0,
+                       0x7f, 0, pcm1681_dac_tlv),
+       SOC_DOUBLE_R_TLV("Channel 3/4 Playback Volume",
+                       PCM1681_ATT_CONTROL(3), PCM1681_ATT_CONTROL(4), 0,
+                       0x7f, 0, pcm1681_dac_tlv),
+       SOC_DOUBLE_R_TLV("Channel 5/6 Playback Volume",
+                       PCM1681_ATT_CONTROL(5), PCM1681_ATT_CONTROL(6), 0,
+                       0x7f, 0, pcm1681_dac_tlv),
+       SOC_DOUBLE_R_TLV("Channel 7/8 Playback Volume",
+                       PCM1681_ATT_CONTROL(7), PCM1681_ATT_CONTROL(8), 0,
+                       0x7f, 0, pcm1681_dac_tlv),
+       SOC_SINGLE_BOOL_EXT("De-emphasis Switch", 0,
+                           pcm1681_get_deemph, pcm1681_put_deemph),
+};
+
+static struct snd_soc_dai_driver pcm1681_dai = {
+       .name = "pcm1681-hifi",
+       .playback = {
+               .stream_name = "Playback",
+               .channels_min = 2,
+               .channels_max = 8,
+               .rates = PCM1681_PCM_RATES,
+               .formats = PCM1681_PCM_FORMATS,
+       },
+       .ops = &pcm1681_dai_ops,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcm1681_dt_ids[] = {
+       { .compatible = "ti,pcm1681", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, pcm1681_dt_ids);
+#endif
+
+static const struct regmap_config pcm1681_regmap = {
+       .reg_bits               = 8,
+       .val_bits               = 8,
+       .max_register           = ARRAY_SIZE(pcm1681_reg_defaults) + 1,
+       .reg_defaults           = pcm1681_reg_defaults,
+       .num_reg_defaults       = ARRAY_SIZE(pcm1681_reg_defaults),
+       .writeable_reg          = pcm1681_writeable_reg,
+       .readable_reg           = pcm1681_accessible_reg,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1681 = {
+       .controls               = pcm1681_controls,
+       .num_controls           = ARRAY_SIZE(pcm1681_controls),
+       .dapm_widgets           = pcm1681_dapm_widgets,
+       .num_dapm_widgets       = ARRAY_SIZE(pcm1681_dapm_widgets),
+       .dapm_routes            = pcm1681_dapm_routes,
+       .num_dapm_routes        = ARRAY_SIZE(pcm1681_dapm_routes),
+};
+
+static const struct i2c_device_id pcm1681_i2c_id[] = {
+       {"pcm1681", 0},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, pcm1681_i2c_id);
+
+static int pcm1681_i2c_probe(struct i2c_client *client,
+                             const struct i2c_device_id *id)
+{
+       int ret;
+       struct pcm1681_private *priv;
+
+       priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->regmap = devm_regmap_init_i2c(client, &pcm1681_regmap);
+       if (IS_ERR(priv->regmap)) {
+               ret = PTR_ERR(priv->regmap);
+               dev_err(&client->dev, "Failed to create regmap: %d\n", ret);
+               return ret;
+       }
+
+       i2c_set_clientdata(client, priv);
+
+       return snd_soc_register_codec(&client->dev, &soc_codec_dev_pcm1681,
+               &pcm1681_dai, 1);
+}
+
+static int pcm1681_i2c_remove(struct i2c_client *client)
+{
+       snd_soc_unregister_codec(&client->dev);
+       return 0;
+}
+
+static struct i2c_driver pcm1681_i2c_driver = {
+       .driver = {
+               .name   = "pcm1681",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(pcm1681_dt_ids),
+       },
+       .id_table       = pcm1681_i2c_id,
+       .probe          = pcm1681_i2c_probe,
+       .remove         = pcm1681_i2c_remove,
+};
+
+module_i2c_driver(pcm1681_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments PCM1681 ALSA SoC Codec Driver");
+MODULE_AUTHOR("Marek Belisko <marek.belisko@streamunlimited.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/pcm1792a.c b/sound/soc/codecs/pcm1792a.c
new file mode 100644 (file)
index 0000000..2a8eccf
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * PCM1792A ASoC codec driver
+ *
+ * Copyright (c) Amarula Solutions B.V. 2013
+ *
+ *     Michael Trimarchi <michael@amarulasolutions.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <linux/of_device.h>
+
+#include "pcm1792a.h"
+
+#define PCM1792A_DAC_VOL_LEFT  0x10
+#define PCM1792A_DAC_VOL_RIGHT 0x11
+#define PCM1792A_FMT_CONTROL   0x12
+#define PCM1792A_SOFT_MUTE     PCM1792A_FMT_CONTROL
+
+#define PCM1792A_FMT_MASK      0x70
+#define PCM1792A_FMT_SHIFT     4
+#define PCM1792A_MUTE_MASK     0x01
+#define PCM1792A_MUTE_SHIFT    0
+#define PCM1792A_ATLD_ENABLE   (1 << 7)
+
+static const struct reg_default pcm1792a_reg_defaults[] = {
+       { 0x10, 0xff },
+       { 0x11, 0xff },
+       { 0x12, 0x50 },
+       { 0x13, 0x00 },
+       { 0x14, 0x00 },
+       { 0x15, 0x01 },
+       { 0x16, 0x00 },
+       { 0x17, 0x00 },
+};
+
+static bool pcm1792a_accessible_reg(struct device *dev, unsigned int reg)
+{
+       return reg >= 0x10 && reg <= 0x17;
+}
+
+static bool pcm1792a_writeable_reg(struct device *dev, unsigned register reg)
+{
+       bool accessible;
+
+       accessible = pcm1792a_accessible_reg(dev, reg);
+
+       return accessible && reg != 0x16 && reg != 0x17;
+}
+
+struct pcm1792a_private {
+       struct regmap *regmap;
+       unsigned int format;
+       unsigned int rate;
+};
+
+static int pcm1792a_set_dai_fmt(struct snd_soc_dai *codec_dai,
+                             unsigned int format)
+{
+       struct snd_soc_codec *codec = codec_dai->codec;
+       struct pcm1792a_private *priv = snd_soc_codec_get_drvdata(codec);
+
+       priv->format = format;
+
+       return 0;
+}
+
+static int pcm1792a_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct pcm1792a_private *priv = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       ret = regmap_update_bits(priv->regmap, PCM1792A_SOFT_MUTE,
+                                PCM1792A_MUTE_MASK, !!mute);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int pcm1792a_hw_params(struct snd_pcm_substream *substream,
+                            struct snd_pcm_hw_params *params,
+                            struct snd_soc_dai *dai)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct pcm1792a_private *priv = snd_soc_codec_get_drvdata(codec);
+       int val = 0, ret;
+       int pcm_format = params_format(params);
+
+       priv->rate = params_rate(params);
+
+       switch (priv->format & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_RIGHT_J:
+               if (pcm_format == SNDRV_PCM_FORMAT_S24_LE ||
+                   pcm_format == SNDRV_PCM_FORMAT_S32_LE)
+                       val = 0x02;
+               else if (pcm_format == SNDRV_PCM_FORMAT_S16_LE)
+                       val = 0x00;
+               break;
+       case SND_SOC_DAIFMT_I2S:
+               if (pcm_format == SNDRV_PCM_FORMAT_S24_LE ||
+                   pcm_format == SNDRV_PCM_FORMAT_S32_LE)
+                       val = 0x05;
+               else if (pcm_format == SNDRV_PCM_FORMAT_S16_LE)
+                       val = 0x04;
+               break;
+       default:
+               dev_err(codec->dev, "Invalid DAI format\n");
+               return -EINVAL;
+       }
+
+       val = val << PCM1792A_FMT_SHIFT | PCM1792A_ATLD_ENABLE;
+
+       ret = regmap_update_bits(priv->regmap, PCM1792A_FMT_CONTROL,
+                                PCM1792A_FMT_MASK | PCM1792A_ATLD_ENABLE, val);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static const struct snd_soc_dai_ops pcm1792a_dai_ops = {
+       .set_fmt        = pcm1792a_set_dai_fmt,
+       .hw_params      = pcm1792a_hw_params,
+       .digital_mute   = pcm1792a_digital_mute,
+};
+
+static const DECLARE_TLV_DB_SCALE(pcm1792a_dac_tlv, -12000, 50, 1);
+
+static const struct snd_kcontrol_new pcm1792a_controls[] = {
+       SOC_DOUBLE_R_RANGE_TLV("DAC Playback Volume", PCM1792A_DAC_VOL_LEFT,
+                        PCM1792A_DAC_VOL_RIGHT, 0, 0xf, 0xff, 0,
+                        pcm1792a_dac_tlv),
+};
+
+static const struct snd_soc_dapm_widget pcm1792a_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("IOUTL+"),
+SND_SOC_DAPM_OUTPUT("IOUTL-"),
+SND_SOC_DAPM_OUTPUT("IOUTR+"),
+SND_SOC_DAPM_OUTPUT("IOUTR-"),
+};
+
+static const struct snd_soc_dapm_route pcm1792a_dapm_routes[] = {
+       { "IOUTL+", NULL, "Playback" },
+       { "IOUTL-", NULL, "Playback" },
+       { "IOUTR+", NULL, "Playback" },
+       { "IOUTR-", NULL, "Playback" },
+};
+
+static struct snd_soc_dai_driver pcm1792a_dai = {
+       .name = "pcm1792a-hifi",
+       .playback = {
+               .stream_name = "Playback",
+               .channels_min = 2,
+               .channels_max = 2,
+               .rates = PCM1792A_RATES,
+               .formats = PCM1792A_FORMATS, },
+       .ops = &pcm1792a_dai_ops,
+};
+
+static const struct of_device_id pcm1792a_of_match[] = {
+       { .compatible = "ti,pcm1792a", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, pcm1792a_of_match);
+
+static const struct regmap_config pcm1792a_regmap = {
+       .reg_bits               = 8,
+       .val_bits               = 8,
+       .max_register           = 24,
+       .reg_defaults           = pcm1792a_reg_defaults,
+       .num_reg_defaults       = ARRAY_SIZE(pcm1792a_reg_defaults),
+       .writeable_reg          = pcm1792a_writeable_reg,
+       .readable_reg           = pcm1792a_accessible_reg,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1792a = {
+       .controls               = pcm1792a_controls,
+       .num_controls           = ARRAY_SIZE(pcm1792a_controls),
+       .dapm_widgets           = pcm1792a_dapm_widgets,
+       .num_dapm_widgets       = ARRAY_SIZE(pcm1792a_dapm_widgets),
+       .dapm_routes            = pcm1792a_dapm_routes,
+       .num_dapm_routes        = ARRAY_SIZE(pcm1792a_dapm_routes),
+};
+
+static int pcm1792a_spi_probe(struct spi_device *spi)
+{
+       struct pcm1792a_private *pcm1792a;
+       int ret;
+
+       pcm1792a = devm_kzalloc(&spi->dev, sizeof(struct pcm1792a_private),
+                               GFP_KERNEL);
+       if (!pcm1792a)
+               return -ENOMEM;
+
+       spi_set_drvdata(spi, pcm1792a);
+
+       pcm1792a->regmap = devm_regmap_init_spi(spi, &pcm1792a_regmap);
+       if (IS_ERR(pcm1792a->regmap)) {
+               ret = PTR_ERR(pcm1792a->regmap);
+               dev_err(&spi->dev, "Failed to register regmap: %d\n", ret);
+               return ret;
+       }
+
+       return snd_soc_register_codec(&spi->dev,
+                       &soc_codec_dev_pcm1792a, &pcm1792a_dai, 1);
+}
+
+static int pcm1792a_spi_remove(struct spi_device *spi)
+{
+       snd_soc_unregister_codec(&spi->dev);
+       return 0;
+}
+
+static const struct spi_device_id pcm1792a_spi_ids[] = {
+       { "pcm1792a", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, pcm1792a_spi_ids);
+
+static struct spi_driver pcm1792a_codec_driver = {
+       .driver = {
+               .name = "pcm1792a",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(pcm1792a_of_match),
+       },
+       .id_table = pcm1792a_spi_ids,
+       .probe = pcm1792a_spi_probe,
+       .remove = pcm1792a_spi_remove,
+};
+
+module_spi_driver(pcm1792a_codec_driver);
+
+MODULE_DESCRIPTION("ASoC PCM1792A driver");
+MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/pcm1792a.h b/sound/soc/codecs/pcm1792a.h
new file mode 100644 (file)
index 0000000..7a83d1f
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * definitions for PCM1792A
+ *
+ * Copyright 2013 Amarula Solutions
+ *
+  * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PCM1792A_H__
+#define __PCM1792A_H__
+
+#define PCM1792A_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_8000_48000 | \
+                       SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
+
+#define PCM1792A_FORMATS (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \
+                         SNDRV_PCM_FMTBIT_S16_LE)
+
+#endif
index f2a6282b41f4e07a1627463222b4ba9d091a7cf1..b6618c4a7597fee9d0bf11528f5997f7548a747f 100644 (file)
 
 #include "pcm3008.h"
 
-#define PCM3008_VERSION "0.2"
+static int pcm3008_dac_ev(struct snd_soc_dapm_widget *w,
+                         struct snd_kcontrol *kcontrol,
+                         int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct pcm3008_setup_data *setup = codec->dev->platform_data;
+
+       gpio_set_value_cansleep(setup->pdda_pin,
+                               SND_SOC_DAPM_EVENT_ON(event));
+
+       return 0;
+}
+
+static int pcm3008_adc_ev(struct snd_soc_dapm_widget *w,
+                         struct snd_kcontrol *kcontrol,
+                         int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct pcm3008_setup_data *setup = codec->dev->platform_data;
+
+       gpio_set_value_cansleep(setup->pdad_pin,
+                               SND_SOC_DAPM_EVENT_ON(event));
+
+       return 0;
+}
+
+static const struct snd_soc_dapm_widget pcm3008_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("VINL"),
+SND_SOC_DAPM_INPUT("VINR"),
+
+SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, pcm3008_dac_ev,
+                  SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_ADC_E("ADC", NULL, SND_SOC_NOPM, 0, 0, pcm3008_adc_ev,
+                  SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+SND_SOC_DAPM_OUTPUT("VOUTL"),
+SND_SOC_DAPM_OUTPUT("VOUTR"),
+};
+
+static const struct snd_soc_dapm_route pcm3008_dapm_routes[] = {
+       { "PCM3008 Capture", NULL, "ADC" },
+       { "ADC", NULL, "VINL" },
+       { "ADC", NULL, "VINR" },
+
+       { "DAC", NULL, "PCM3008 Playback" },
+       { "VOUTL", NULL, "DAC" },
+       { "VOUTR", NULL, "DAC" },
+};
 
 #define PCM3008_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |   \
                       SNDRV_PCM_RATE_48000)
@@ -51,20 +98,20 @@ static struct snd_soc_dai_driver pcm3008_dai = {
        },
 };
 
-static void pcm3008_gpio_free(struct pcm3008_setup_data *setup)
-{
-       gpio_free(setup->dem0_pin);
-       gpio_free(setup->dem1_pin);
-       gpio_free(setup->pdad_pin);
-       gpio_free(setup->pdda_pin);
-}
+static struct snd_soc_codec_driver soc_codec_dev_pcm3008 = {
+       .dapm_widgets = pcm3008_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(pcm3008_dapm_widgets),
+       .dapm_routes = pcm3008_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(pcm3008_dapm_routes),
+};
 
-static int pcm3008_soc_probe(struct snd_soc_codec *codec)
+static int pcm3008_codec_probe(struct platform_device *pdev)
 {
-       struct pcm3008_setup_data *setup = codec->dev->platform_data;
-       int ret = 0;
+       struct pcm3008_setup_data *setup = pdev->dev.platform_data;
+       int ret;
 
-       printk(KERN_INFO "PCM3008 SoC Audio Codec %s\n", PCM3008_VERSION);
+       if (!setup)
+               return -EINVAL;
 
        /* DEM1  DEM0  DE-EMPHASIS_MODE
         * Low   Low   De-emphasis 44.1 kHz ON
@@ -74,83 +121,29 @@ static int pcm3008_soc_probe(struct snd_soc_codec *codec)
         */
 
        /* Configure DEM0 GPIO (turning OFF DAC De-emphasis). */
-       ret = gpio_request(setup->dem0_pin, "codec_dem0");
-       if (ret == 0)
-               ret = gpio_direction_output(setup->dem0_pin, 1);
+       ret = devm_gpio_request_one(&pdev->dev, setup->dem0_pin,
+                                   GPIOF_OUT_INIT_HIGH, "codec_dem0");
        if (ret != 0)
-               goto gpio_err;
+               return ret;
 
        /* Configure DEM1 GPIO (turning OFF DAC De-emphasis). */
-       ret = gpio_request(setup->dem1_pin, "codec_dem1");
-       if (ret == 0)
-               ret = gpio_direction_output(setup->dem1_pin, 0);
+       ret = devm_gpio_request_one(&pdev->dev, setup->dem1_pin,
+                                   GPIOF_OUT_INIT_LOW, "codec_dem1");
        if (ret != 0)
-               goto gpio_err;
+               return ret;
 
        /* Configure PDAD GPIO. */
-       ret = gpio_request(setup->pdad_pin, "codec_pdad");
-       if (ret == 0)
-               ret = gpio_direction_output(setup->pdad_pin, 1);
+       ret = devm_gpio_request_one(&pdev->dev, setup->pdad_pin,
+                                   GPIOF_OUT_INIT_LOW, "codec_pdad");
        if (ret != 0)
-               goto gpio_err;
+               return ret;
 
        /* Configure PDDA GPIO. */
-       ret = gpio_request(setup->pdda_pin, "codec_pdda");
-       if (ret == 0)
-               ret = gpio_direction_output(setup->pdda_pin, 1);
+       ret = devm_gpio_request_one(&pdev->dev, setup->pdda_pin,
+                                   GPIOF_OUT_INIT_LOW, "codec_pdda");
        if (ret != 0)
-               goto gpio_err;
-
-       return ret;
-
-gpio_err:
-       pcm3008_gpio_free(setup);
+               return ret;
 
-       return ret;
-}
-
-static int pcm3008_soc_remove(struct snd_soc_codec *codec)
-{
-       struct pcm3008_setup_data *setup = codec->dev->platform_data;
-
-       pcm3008_gpio_free(setup);
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int pcm3008_soc_suspend(struct snd_soc_codec *codec)
-{
-       struct pcm3008_setup_data *setup = codec->dev->platform_data;
-
-       gpio_set_value(setup->pdad_pin, 0);
-       gpio_set_value(setup->pdda_pin, 0);
-
-       return 0;
-}
-
-static int pcm3008_soc_resume(struct snd_soc_codec *codec)
-{
-       struct pcm3008_setup_data *setup = codec->dev->platform_data;
-
-       gpio_set_value(setup->pdad_pin, 1);
-       gpio_set_value(setup->pdda_pin, 1);
-
-       return 0;
-}
-#else
-#define pcm3008_soc_suspend NULL
-#define pcm3008_soc_resume NULL
-#endif
-
-static struct snd_soc_codec_driver soc_codec_dev_pcm3008 = {
-       .probe =        pcm3008_soc_probe,
-       .remove =       pcm3008_soc_remove,
-       .suspend =      pcm3008_soc_suspend,
-       .resume =       pcm3008_soc_resume,
-};
-
-static int pcm3008_codec_probe(struct platform_device *pdev)
-{
        return snd_soc_register_codec(&pdev->dev,
                        &soc_codec_dev_pcm3008, &pcm3008_dai, 1);
 }
@@ -158,6 +151,7 @@ static int pcm3008_codec_probe(struct platform_device *pdev)
 static int pcm3008_codec_remove(struct platform_device *pdev)
 {
        snd_soc_unregister_codec(&pdev->dev);
+
        return 0;
 }
 
index ce585e37e38a8f8edda36a0146ed477ec61224bc..4db7314baabcab95feb9f1b0a1e5e2ab83d7a54a 100644 (file)
@@ -737,29 +737,6 @@ static const struct snd_kcontrol_new rt5640_mono_mix[] = {
                        RT5640_M_BST1_MM_SFT, 1, 1),
 };
 
-/* INL/R source */
-static const char * const rt5640_inl_src[] = {
-       "IN2P", "MONOP"
-};
-
-static const SOC_ENUM_SINGLE_DECL(
-       rt5640_inl_enum, RT5640_INL_INR_VOL,
-       RT5640_INL_SEL_SFT, rt5640_inl_src);
-
-static const struct snd_kcontrol_new rt5640_inl_mux =
-       SOC_DAPM_ENUM("INL source", rt5640_inl_enum);
-
-static const char * const rt5640_inr_src[] = {
-       "IN2N", "MONON"
-};
-
-static const SOC_ENUM_SINGLE_DECL(
-       rt5640_inr_enum, RT5640_INL_INR_VOL,
-       RT5640_INR_SEL_SFT, rt5640_inr_src);
-
-static const struct snd_kcontrol_new rt5640_inr_mux =
-       SOC_DAPM_ENUM("INR source", rt5640_inr_enum);
-
 /* Stereo ADC source */
 static const char * const rt5640_stereo_adc1_src[] = {
        "DIG MIX", "ADC"
@@ -1005,9 +982,6 @@ static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = {
                RT5640_PWR_IN_L_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("INR VOL", RT5640_PWR_VOL,
                RT5640_PWR_IN_R_BIT, 0, NULL, 0),
-       /* IN Mux */
-       SND_SOC_DAPM_MUX("INL Mux", SND_SOC_NOPM, 0, 0, &rt5640_inl_mux),
-       SND_SOC_DAPM_MUX("INR Mux", SND_SOC_NOPM, 0, 0, &rt5640_inr_mux),
        /* REC Mixer */
        SND_SOC_DAPM_MIXER("RECMIXL", RT5640_PWR_MIXER, RT5640_PWR_RM_L_BIT, 0,
                        rt5640_rec_l_mix, ARRAY_SIZE(rt5640_rec_l_mix)),
index 760e8bfeacaadef370eb0ce30e1ce5ad124da295..1f4093f3f3a19219318eb0f3c407d8f302ce34d3 100644 (file)
@@ -654,16 +654,19 @@ static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
                snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
                        SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP,
                        SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP);
+
+               /* if using pll, clk_ctrl must be set after pll power up */
+               snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
        } else {
+               /* otherwise, clk_ctrl must be set before pll power down */
+               snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
+
                /* power down pll */
                snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
                        SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP,
                        0);
        }
 
-       /* if using pll, clk_ctrl must be set after pll power up */
-       snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
-
        return 0;
 }
 
@@ -1480,6 +1483,7 @@ static struct snd_soc_codec_driver sgtl5000_driver = {
 static const struct regmap_config sgtl5000_regmap = {
        .reg_bits = 16,
        .val_bits = 16,
+       .reg_stride = 2,
 
        .max_register = SGTL5000_MAX_REG_OFFSET,
        .volatile_reg = sgtl5000_volatile,
index 73e205c892a0d087b4cfc8a6cd0a7de5c26873f3..38f3b105c17dc2da0809c903190dd37644f2508d 100644 (file)
@@ -102,6 +102,16 @@ static int si476x_codec_write(struct snd_soc_codec *codec,
        return err;
 }
 
+static const struct snd_soc_dapm_widget si476x_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("LOUT"),
+SND_SOC_DAPM_OUTPUT("ROUT"),
+};
+
+static const struct snd_soc_dapm_route si476x_dapm_routes[] = {
+       { "Capture", NULL, "LOUT" },
+       { "Capture", NULL, "ROUT" },
+};
+
 static int si476x_codec_set_dai_fmt(struct snd_soc_dai *codec_dai,
                                    unsigned int fmt)
 {
@@ -260,6 +270,10 @@ static struct snd_soc_codec_driver soc_codec_dev_si476x = {
        .probe  = si476x_codec_probe,
        .read   = si476x_codec_read,
        .write  = si476x_codec_write,
+       .dapm_widgets = si476x_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(si476x_dapm_widgets),
+       .dapm_routes = si476x_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(si476x_dapm_routes),
 };
 
 static int si476x_platform_probe(struct platform_device *pdev)
index e9d7881ed2c8272c2c64c7255e60728356aab009..e3501f40c7b3d866f965131e64ce2ae1227e1b77 100644 (file)
 #include <sound/initval.h>
 #include <linux/of.h>
 
+static const struct snd_soc_dapm_widget dir_widgets[] = {
+       SND_SOC_DAPM_INPUT("spdif-in"),
+};
+
+static const struct snd_soc_dapm_route dir_routes[] = {
+       { "Capture", NULL, "spdif-in" },
+};
+
 #define STUB_RATES     SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS   (SNDRV_PCM_FMTBIT_S16_LE | \
+                       SNDRV_PCM_FMTBIT_S20_3LE | \
+                       SNDRV_PCM_FMTBIT_S24_LE | \
                        SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
 
-static struct snd_soc_codec_driver soc_codec_spdif_dir;
+static struct snd_soc_codec_driver soc_codec_spdif_dir = {
+       .dapm_widgets = dir_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(dir_widgets),
+       .dapm_routes = dir_routes,
+       .num_dapm_routes = ARRAY_SIZE(dir_routes),
+};
 
 static struct snd_soc_dai_driver dir_stub_dai = {
        .name           = "dir-hifi",
index 18280499fd554ee13ca7f9c17d86c8594a40d8bb..a078aa31052a589e2ee4115f6657806b62ba0418 100644 (file)
 #define DRV_NAME "spdif-dit"
 
 #define STUB_RATES     SNDRV_PCM_RATE_8000_96000
-#define STUB_FORMATS   SNDRV_PCM_FMTBIT_S16_LE
+#define STUB_FORMATS   (SNDRV_PCM_FMTBIT_S16_LE | \
+                       SNDRV_PCM_FMTBIT_S20_3LE | \
+                       SNDRV_PCM_FMTBIT_S24_LE)
 
+static const struct snd_soc_dapm_widget dit_widgets[] = {
+       SND_SOC_DAPM_OUTPUT("spdif-out"),
+};
+
+static const struct snd_soc_dapm_route dit_routes[] = {
+       { "spdif-out", NULL, "Playback" },
+};
 
-static struct snd_soc_codec_driver soc_codec_spdif_dit;
+static struct snd_soc_codec_driver soc_codec_spdif_dit = {
+       .dapm_widgets = dit_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(dit_widgets),
+       .dapm_routes = dit_routes,
+       .num_dapm_routes = ARRAY_SIZE(dit_routes),
+};
 
 static struct snd_soc_dai_driver dit_stub_dai = {
        .name           = "dit-hifi",
index cfb55fe35e98691cbaf7b18404b93e02c1cce73c..06edb396e733f1ad2ef26698a41b4eb1fdc18ea6 100644 (file)
@@ -363,16 +363,18 @@ static void sta32x_watchdog(struct work_struct *work)
        }
 
        if (!sta32x->shutdown)
-               schedule_delayed_work(&sta32x->watchdog_work,
-                                     round_jiffies_relative(HZ));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &sta32x->watchdog_work,
+                                  round_jiffies_relative(HZ));
 }
 
 static void sta32x_watchdog_start(struct sta32x_priv *sta32x)
 {
        if (sta32x->pdata->needs_esd_watchdog) {
                sta32x->shutdown = 0;
-               schedule_delayed_work(&sta32x->watchdog_work,
-                                     round_jiffies_relative(HZ));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &sta32x->watchdog_work,
+                                  round_jiffies_relative(HZ));
        }
 }
 
index b1f6982c7c9c2615cc187649501f231098d1696c..7b8f3d965f43c814b9b5f5f918ddc6c8778129d4 100644 (file)
@@ -29,7 +29,7 @@ MODULE_LICENSE("GPL");
 /* AIC26 driver private data */
 struct aic26 {
        struct spi_device *spi;
-       struct snd_soc_codec codec;
+       struct snd_soc_codec *codec;
        int master;
        int datfm;
        int mclk;
@@ -119,6 +119,22 @@ static int aic26_reg_write(struct snd_soc_codec *codec, unsigned int reg,
        return 0;
 }
 
+static const struct snd_soc_dapm_widget tlv320aic26_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("MICIN"),
+SND_SOC_DAPM_INPUT("AUX"),
+
+SND_SOC_DAPM_OUTPUT("HPL"),
+SND_SOC_DAPM_OUTPUT("HPR"),
+};
+
+static const struct snd_soc_dapm_route tlv320aic26_dapm_routes[] = {
+       { "Capture", NULL, "MICIN" },
+       { "Capture", NULL, "AUX" },
+
+       { "HPL", NULL, "Playback" },
+       { "HPR", NULL, "Playback" },
+};
+
 /* ---------------------------------------------------------------------
  * Digital Audio Interface Operations
  */
@@ -174,9 +190,9 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
        dev_dbg(&aic26->spi->dev, "Setting PLLM to %d.%04d\n", jval, dval);
        qval = 0;
        reg = 0x8000 | qval << 11 | pval << 8 | jval << 2;
-       aic26_reg_write(codec, AIC26_REG_PLL_PROG1, reg);
+       snd_soc_write(codec, AIC26_REG_PLL_PROG1, reg);
        reg = dval << 2;
-       aic26_reg_write(codec, AIC26_REG_PLL_PROG2, reg);
+       snd_soc_write(codec, AIC26_REG_PLL_PROG2, reg);
 
        /* Audio Control 3 (master mode, fsref rate) */
        reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL3);
@@ -185,13 +201,13 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
                reg |= 0x0800;
        if (fsref == 48000)
                reg |= 0x2000;
-       aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
+       snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
 
        /* Audio Control 1 (FSref divisor) */
        reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL1);
        reg &= ~0x0fff;
        reg |= wlen | aic26->datfm | (divisor << 3) | divisor;
-       aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL1, reg);
+       snd_soc_write(codec, AIC26_REG_AUDIO_CTRL1, reg);
 
        return 0;
 }
@@ -212,7 +228,7 @@ static int aic26_mute(struct snd_soc_dai *dai, int mute)
                reg |= 0x8080;
        else
                reg &= ~0x8080;
-       aic26_reg_write(codec, AIC26_REG_DAC_GAIN, reg);
+       snd_soc_write(codec, AIC26_REG_DAC_GAIN, reg);
 
        return 0;
 }
@@ -330,7 +346,7 @@ static ssize_t aic26_keyclick_show(struct device *dev,
        struct aic26 *aic26 = dev_get_drvdata(dev);
        int val, amp, freq, len;
 
-       val = aic26_reg_read_cache(&aic26->codec, AIC26_REG_AUDIO_CTRL2);
+       val = aic26_reg_read_cache(aic26->codec, AIC26_REG_AUDIO_CTRL2);
        amp = (val >> 12) & 0x7;
        freq = (125 << ((val >> 8) & 0x7)) >> 1;
        len = 2 * (1 + ((val >> 4) & 0xf));
@@ -346,9 +362,9 @@ static ssize_t aic26_keyclick_set(struct device *dev,
        struct aic26 *aic26 = dev_get_drvdata(dev);
        int val;
 
-       val = aic26_reg_read_cache(&aic26->codec, AIC26_REG_AUDIO_CTRL2);
+       val = aic26_reg_read_cache(aic26->codec, AIC26_REG_AUDIO_CTRL2);
        val |= 0x8000;
-       aic26_reg_write(&aic26->codec, AIC26_REG_AUDIO_CTRL2, val);
+       snd_soc_write(aic26->codec, AIC26_REG_AUDIO_CTRL2, val);
 
        return count;
 }
@@ -360,25 +376,26 @@ static DEVICE_ATTR(keyclick, 0644, aic26_keyclick_show, aic26_keyclick_set);
  */
 static int aic26_probe(struct snd_soc_codec *codec)
 {
+       struct aic26 *aic26 = dev_get_drvdata(codec->dev);
        int ret, err, i, reg;
 
-       dev_info(codec->dev, "Probing AIC26 SoC CODEC driver\n");
+       aic26->codec = codec;
 
        /* Reset the codec to power on defaults */
-       aic26_reg_write(codec, AIC26_REG_RESET, 0xBB00);
+       snd_soc_write(codec, AIC26_REG_RESET, 0xBB00);
 
        /* Power up CODEC */
-       aic26_reg_write(codec, AIC26_REG_POWER_CTRL, 0);
+       snd_soc_write(codec, AIC26_REG_POWER_CTRL, 0);
 
        /* Audio Control 3 (master mode, fsref rate) */
-       reg = aic26_reg_read(codec, AIC26_REG_AUDIO_CTRL3);
+       reg = snd_soc_read(codec, AIC26_REG_AUDIO_CTRL3);
        reg &= ~0xf800;
        reg |= 0x0800; /* set master mode */
-       aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
+       snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
 
        /* Fill register cache */
        for (i = 0; i < codec->driver->reg_cache_size; i++)
-               aic26_reg_read(codec, i);
+               snd_soc_read(codec, i);
 
        /* Register the sysfs files for debugging */
        /* Create SysFS files */
@@ -401,6 +418,10 @@ static struct snd_soc_codec_driver aic26_soc_codec_dev = {
        .write = aic26_reg_write,
        .reg_cache_size = AIC26_NUM_REGS,
        .reg_word_size = sizeof(u16),
+       .dapm_widgets = tlv320aic26_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(tlv320aic26_dapm_widgets),
+       .dapm_routes = tlv320aic26_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(tlv320aic26_dapm_routes),
 };
 
 /* ---------------------------------------------------------------------
index e5b926883131180c71cfe6239d1bca3be8fcf674..6e3f269243e050fe5149e60807c0e41cbf6e4147 100644 (file)
@@ -138,8 +138,7 @@ static const u8 aic3x_reg[AIC3X_CACHEREGNUM] = {
 static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
                                        struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
        unsigned int reg = mc->reg;
@@ -147,10 +146,9 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
        int max = mc->max;
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
-       unsigned short val, val_mask;
-       int ret;
-       struct snd_soc_dapm_path *path;
-       int found = 0;
+       unsigned short val;
+       struct snd_soc_dapm_update update;
+       int connect, change;
 
        val = (ucontrol->value.integer.value[0] & mask);
 
@@ -158,42 +156,26 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
        if (val)
                val = mask;
 
+       connect = !!val;
+
        if (invert)
                val = mask - val;
-       val_mask = mask << shift;
-       val = val << shift;
-
-       mutex_lock(&widget->codec->mutex);
 
-       if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
-               /* find dapm widget path assoc with kcontrol */
-               list_for_each_entry(path, &widget->dapm->card->paths, list) {
-                       if (path->kcontrol != kcontrol)
-                               continue;
+       mask <<= shift;
+       val <<= shift;
 
-                       /* found, now check type */
-                       found = 1;
-                       if (val)
-                               /* new connection */
-                               path->connect = invert ? 0 : 1;
-                       else
-                               /* old connection must be powered down */
-                               path->connect = invert ? 1 : 0;
+       change = snd_soc_test_bits(codec, val, mask, reg);
+       if (change) {
+               update.kcontrol = kcontrol;
+               update.reg = reg;
+               update.mask = mask;
+               update.val = val;
 
-                       dapm_mark_dirty(path->source, "tlv320aic3x source");
-                       dapm_mark_dirty(path->sink, "tlv320aic3x sink");
-
-                       break;
-               }
+               snd_soc_dapm_mixer_update_power(&codec->dapm, kcontrol, connect,
+                       &update);
        }
 
-       mutex_unlock(&widget->codec->mutex);
-
-       if (found)
-               snd_soc_dapm_sync(widget->dapm);
-
-       ret = snd_soc_update_bits_locked(widget->codec, reg, val_mask, val);
-       return ret;
+       return change;
 }
 
 /*
@@ -1492,6 +1474,7 @@ static const struct i2c_device_id aic3x_i2c_id[] = {
        { "tlv320aic3x", AIC3X_MODEL_3X },
        { "tlv320aic33", AIC3X_MODEL_33 },
        { "tlv320aic3007", AIC3X_MODEL_3007 },
+       { "tlv320aic3106", AIC3X_MODEL_3X },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
@@ -1582,6 +1565,9 @@ static int aic3x_i2c_remove(struct i2c_client *client)
 #if defined(CONFIG_OF)
 static const struct of_device_id tlv320aic3x_of_match[] = {
        { .compatible = "ti,tlv320aic3x", },
+       { .compatible = "ti,tlv320aic33" },
+       { .compatible = "ti,tlv320aic3007" },
+       { .compatible = "ti,tlv320aic3106" },
        {},
 };
 MODULE_DEVICE_TABLE(of, tlv320aic3x_of_match);
index 8e6e5b0160219ef140748a3dbf31bc7561b9a8d1..1e3884d6b3fbc7202cedb6fe478e5d00e71da49b 100644 (file)
@@ -137,8 +137,6 @@ static const u8 twl4030_reg[TWL4030_CACHEREGNUM] = {
 
 /* codec private data */
 struct twl4030_priv {
-       struct snd_soc_codec codec;
-
        unsigned int codec_powered;
 
        /* reference counts of AIF/APLL users */
index 44621ddc332d881b5e4b05a58507060259446e55..3c79dbb6c32323b36bc974616c5ff1157498f281 100644 (file)
@@ -429,7 +429,8 @@ static irqreturn_t twl6040_audio_handler(int irq, void *data)
        struct snd_soc_codec *codec = data;
        struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 
-       schedule_delayed_work(&priv->hs_jack.work, msecs_to_jiffies(200));
+       queue_delayed_work(system_power_efficient_wq,
+                          &priv->hs_jack.work, msecs_to_jiffies(200));
 
        return IRQ_HANDLED;
 }
@@ -437,9 +438,7 @@ static irqreturn_t twl6040_audio_handler(int irq, void *data)
 static int twl6040_soc_dapm_put_vibra_enum(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-       struct snd_soc_codec *codec = widget->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        unsigned int val;
 
index 6d0aa44c375755fb8715c2089d27d4a3686598f1..c94d4c1e3dacc1d7a51f2c6708848d56af6ecf9e 100644 (file)
@@ -325,7 +325,6 @@ static int uda134x_set_dai_fmt(struct snd_soc_dai *codec_dai,
 static int uda134x_set_bias_level(struct snd_soc_codec *codec,
                                  enum snd_soc_bias_level level)
 {
-       u8 reg;
        struct uda134x_platform_data *pd = codec->control_data;
        int i;
        u8 *cache = codec->reg_cache;
@@ -334,23 +333,6 @@ static int uda134x_set_bias_level(struct snd_soc_codec *codec,
 
        switch (level) {
        case SND_SOC_BIAS_ON:
-               /* ADC, DAC on */
-               switch (pd->model) {
-               case UDA134X_UDA1340:
-               case UDA134X_UDA1344:
-               case UDA134X_UDA1345:
-                       reg = uda134x_read_reg_cache(codec, UDA134X_DATA011);
-                       uda134x_write(codec, UDA134X_DATA011, reg | 0x03);
-                       break;
-               case UDA134X_UDA1341:
-                       reg = uda134x_read_reg_cache(codec, UDA134X_STATUS1);
-                       uda134x_write(codec, UDA134X_STATUS1, reg | 0x03);
-                       break;
-               default:
-                       printk(KERN_ERR "UDA134X SoC codec: "
-                              "unsupported model %d\n", pd->model);
-                       return -EINVAL;
-               }
                break;
        case SND_SOC_BIAS_PREPARE:
                /* power on */
@@ -362,23 +344,6 @@ static int uda134x_set_bias_level(struct snd_soc_codec *codec,
                }
                break;
        case SND_SOC_BIAS_STANDBY:
-               /* ADC, DAC power off */
-               switch (pd->model) {
-               case UDA134X_UDA1340:
-               case UDA134X_UDA1344:
-               case UDA134X_UDA1345:
-                       reg = uda134x_read_reg_cache(codec, UDA134X_DATA011);
-                       uda134x_write(codec, UDA134X_DATA011, reg & ~(0x03));
-                       break;
-               case UDA134X_UDA1341:
-                       reg = uda134x_read_reg_cache(codec, UDA134X_STATUS1);
-                       uda134x_write(codec, UDA134X_STATUS1, reg & ~(0x03));
-                       break;
-               default:
-                       printk(KERN_ERR "UDA134X SoC codec: "
-                              "unsupported model %d\n", pd->model);
-                       return -EINVAL;
-               }
                break;
        case SND_SOC_BIAS_OFF:
                /* power off */
@@ -450,6 +415,37 @@ SOC_ENUM("PCM Playback De-emphasis", uda134x_mixer_enum[1]),
 SOC_SINGLE("DC Filter Enable Switch", UDA134X_STATUS0, 0, 1, 0),
 };
 
+/* UDA1341 has the DAC/ADC power down in STATUS1 */
+static const struct snd_soc_dapm_widget uda1341_dapm_widgets[] = {
+       SND_SOC_DAPM_DAC("DAC", "Playback", UDA134X_STATUS1, 0, 0),
+       SND_SOC_DAPM_ADC("ADC", "Capture", UDA134X_STATUS1, 1, 0),
+};
+
+/* UDA1340/4/5 has the DAC/ADC pwoer down in DATA0 11 */
+static const struct snd_soc_dapm_widget uda1340_dapm_widgets[] = {
+       SND_SOC_DAPM_DAC("DAC", "Playback", UDA134X_DATA011, 0, 0),
+       SND_SOC_DAPM_ADC("ADC", "Capture", UDA134X_DATA011, 1, 0),
+};
+
+/* Common DAPM widgets */
+static const struct snd_soc_dapm_widget uda134x_dapm_widgets[] = {
+       SND_SOC_DAPM_INPUT("VINL1"),
+       SND_SOC_DAPM_INPUT("VINR1"),
+       SND_SOC_DAPM_INPUT("VINL2"),
+       SND_SOC_DAPM_INPUT("VINR2"),
+       SND_SOC_DAPM_OUTPUT("VOUTL"),
+       SND_SOC_DAPM_OUTPUT("VOUTR"),
+};
+
+static const struct snd_soc_dapm_route uda134x_dapm_routes[] = {
+       { "ADC", NULL, "VINL1" },
+       { "ADC", NULL, "VINR1" },
+       { "ADC", NULL, "VINL2" },
+       { "ADC", NULL, "VINR2" },
+       { "VOUTL", NULL, "DAC" },
+       { "VOUTR", NULL, "DAC" },
+};
+
 static const struct snd_soc_dai_ops uda134x_dai_ops = {
        .startup        = uda134x_startup,
        .shutdown       = uda134x_shutdown,
@@ -485,6 +481,8 @@ static int uda134x_soc_probe(struct snd_soc_codec *codec)
 {
        struct uda134x_priv *uda134x;
        struct uda134x_platform_data *pd = codec->card->dev->platform_data;
+       const struct snd_soc_dapm_widget *widgets;
+       unsigned num_widgets;
 
        int ret;
 
@@ -526,6 +524,22 @@ static int uda134x_soc_probe(struct snd_soc_codec *codec)
        else
                uda134x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
+       if (pd->model == UDA134X_UDA1341) {
+               widgets = uda1341_dapm_widgets;
+               num_widgets = ARRAY_SIZE(uda1341_dapm_widgets);
+       } else {
+               widgets = uda1340_dapm_widgets;
+               num_widgets = ARRAY_SIZE(uda1340_dapm_widgets);
+       }
+
+       ret = snd_soc_dapm_new_controls(&codec->dapm, widgets, num_widgets);
+       if (ret) {
+               printk(KERN_ERR "%s failed to register dapm controls: %d",
+                       __func__, ret);
+               kfree(uda134x);
+               return ret;
+       }
+
        switch (pd->model) {
        case UDA134X_UDA1340:
        case UDA134X_UDA1344:
@@ -599,6 +613,10 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
        .read = uda134x_read_reg_cache,
        .write = uda134x_write,
        .set_bias_level = uda134x_set_bias_level,
+       .dapm_widgets = uda134x_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(uda134x_dapm_widgets),
+       .dapm_routes = uda134x_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(uda134x_dapm_routes),
 };
 
 static int uda134x_codec_probe(struct platform_device *pdev)
index 54cd3da09abd036471291901461331231ce78ab7..b7ab2ef567c889ad7969c9bae6e2a4ca640b7de4 100644 (file)
@@ -290,6 +290,18 @@ static const struct snd_kcontrol_new wl1273_controls[] = {
                       snd_wl1273_fm_volume_get, snd_wl1273_fm_volume_put),
 };
 
+static const struct snd_soc_dapm_widget wl1273_dapm_widgets[] = {
+       SND_SOC_DAPM_INPUT("RX"),
+
+       SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route wl1273_dapm_routes[] = {
+       { "Capture", NULL, "RX" },
+
+       { "TX", NULL, "Playback" },
+};
+
 static int wl1273_startup(struct snd_pcm_substream *substream,
                          struct snd_soc_dai *dai)
 {
@@ -483,6 +495,11 @@ static int wl1273_remove(struct snd_soc_codec *codec)
 static struct snd_soc_codec_driver soc_codec_dev_wl1273 = {
        .probe = wl1273_probe,
        .remove = wl1273_remove,
+
+       .dapm_widgets = wl1273_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(wl1273_dapm_widgets),
+       .dapm_routes = wl1273_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(wl1273_dapm_routes),
 };
 
 static int wl1273_platform_probe(struct platform_device *pdev)
index 10adc4145d4639e5e44d3c587a595f476650b6fa..d5ebcb00019b7b807d9ad7e3940333c5ad2a4c4f 100644 (file)
@@ -420,7 +420,7 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
                xfer->codec = codec;
                list_add_tail(&xfer->list, &xfer_list);
 
-               out = kzalloc(len, GFP_KERNEL);
+               out = kzalloc(len, GFP_KERNEL | GFP_DMA);
                if (!out) {
                        dev_err(codec->dev,
                                "Failed to allocate RX buffer\n");
@@ -429,7 +429,7 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
                }
                xfer->t.rx_buf = out;
 
-               img = kzalloc(len, GFP_KERNEL);
+               img = kzalloc(len, GFP_KERNEL | GFP_DMA);
                if (!img) {
                        dev_err(codec->dev,
                                "Failed to allocate image buffer\n");
@@ -523,14 +523,14 @@ static int wm0010_stage2_load(struct snd_soc_codec *codec)
        dev_dbg(codec->dev, "Downloading %zu byte stage 2 loader\n", fw->size);
 
        /* Copy to local buffer first as vmalloc causes problems for dma */
-       img = kzalloc(fw->size, GFP_KERNEL);
+       img = kzalloc(fw->size, GFP_KERNEL | GFP_DMA);
        if (!img) {
                dev_err(codec->dev, "Failed to allocate image buffer\n");
                ret = -ENOMEM;
                goto abort2;
        }
 
-       out = kzalloc(fw->size, GFP_KERNEL);
+       out = kzalloc(fw->size, GFP_KERNEL | GFP_DMA);
        if (!out) {
                dev_err(codec->dev, "Failed to allocate output buffer\n");
                ret = -ENOMEM;
@@ -670,14 +670,14 @@ static int wm0010_boot(struct snd_soc_codec *codec)
 
                ret = -ENOMEM;
                len = pll_rec.length + 8;
-               out = kzalloc(len, GFP_KERNEL);
+               out = kzalloc(len, GFP_KERNEL | GFP_DMA);
                if (!out) {
                        dev_err(codec->dev,
                                "Failed to allocate RX buffer\n");
                        goto abort;
                }
 
-               img_swap = kzalloc(len, GFP_KERNEL);
+               img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
                if (!img_swap) {
                        dev_err(codec->dev,
                                "Failed to allocate image buffer\n");
index 282fd232cdf7cf62ce6d836578e3c518cd839d59..8bbddc151aa842eed14cae97cdabdcbbd9d8e469 100644 (file)
@@ -998,6 +998,8 @@ SND_SOC_DAPM_INPUT("IN2R"),
 SND_SOC_DAPM_INPUT("IN3L"),
 SND_SOC_DAPM_INPUT("IN3R"),
 
+SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"),
+
 SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
                   0, NULL, 0, arizona_in_ev,
                   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
@@ -1421,9 +1423,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
        { "Tone Generator 1", NULL, "TONE" },
        { "Tone Generator 2", NULL, "TONE" },
 
-       { "Mic Mute Mixer", NULL, "Noise Mixer" },
-       { "Mic Mute Mixer", NULL, "Mic Mixer" },
-
        { "AIF1 Capture", NULL, "AIF1TX1" },
        { "AIF1 Capture", NULL, "AIF1TX2" },
        { "AIF1 Capture", NULL, "AIF1TX3" },
@@ -1499,23 +1498,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
        { "IN3L PGA", NULL, "IN3L" },
        { "IN3R PGA", NULL, "IN3R" },
 
-       { "ASRC1L", NULL, "ASRC1L Input" },
-       { "ASRC1R", NULL, "ASRC1R Input" },
-       { "ASRC2L", NULL, "ASRC2L Input" },
-       { "ASRC2R", NULL, "ASRC2R Input" },
-
-       { "ISRC1DEC1", NULL, "ISRC1DEC1 Input" },
-       { "ISRC1DEC2", NULL, "ISRC1DEC2 Input" },
-
-       { "ISRC1INT1", NULL, "ISRC1INT1 Input" },
-       { "ISRC1INT2", NULL, "ISRC1INT2 Input" },
-
-       { "ISRC2DEC1", NULL, "ISRC2DEC1 Input" },
-       { "ISRC2DEC2", NULL, "ISRC2DEC2 Input" },
-
-       { "ISRC2INT1", NULL, "ISRC2INT1 Input" },
-       { "ISRC2INT2", NULL, "ISRC2INT2 Input" },
-
        ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
        ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
        ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
@@ -1567,22 +1549,25 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
        ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
        ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
 
-       ARIZONA_MUX_ROUTES("ASRC1L"),
-       ARIZONA_MUX_ROUTES("ASRC1R"),
-       ARIZONA_MUX_ROUTES("ASRC2L"),
-       ARIZONA_MUX_ROUTES("ASRC2R"),
+       ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Noise"),
+       ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Mic"),
 
-       ARIZONA_MUX_ROUTES("ISRC1INT1"),
-       ARIZONA_MUX_ROUTES("ISRC1INT2"),
+       ARIZONA_MUX_ROUTES("ASRC1L", "ASRC1L"),
+       ARIZONA_MUX_ROUTES("ASRC1R", "ASRC1R"),
+       ARIZONA_MUX_ROUTES("ASRC2L", "ASRC2L"),
+       ARIZONA_MUX_ROUTES("ASRC2R", "ASRC2R"),
 
-       ARIZONA_MUX_ROUTES("ISRC1DEC1"),
-       ARIZONA_MUX_ROUTES("ISRC1DEC2"),
+       ARIZONA_MUX_ROUTES("ISRC1INT1", "ISRC1INT1"),
+       ARIZONA_MUX_ROUTES("ISRC1INT2", "ISRC1INT2"),
 
-       ARIZONA_MUX_ROUTES("ISRC2INT1"),
-       ARIZONA_MUX_ROUTES("ISRC2INT2"),
+       ARIZONA_MUX_ROUTES("ISRC1DEC1", "ISRC1DEC1"),
+       ARIZONA_MUX_ROUTES("ISRC1DEC2", "ISRC1DEC2"),
 
-       ARIZONA_MUX_ROUTES("ISRC2DEC1"),
-       ARIZONA_MUX_ROUTES("ISRC2DEC2"),
+       ARIZONA_MUX_ROUTES("ISRC2INT1", "ISRC2INT1"),
+       ARIZONA_MUX_ROUTES("ISRC2INT2", "ISRC2INT2"),
+
+       ARIZONA_MUX_ROUTES("ISRC2DEC1", "ISRC2DEC1"),
+       ARIZONA_MUX_ROUTES("ISRC2DEC2", "ISRC2DEC2"),
 
        ARIZONA_DSP_ROUTES("DSP1"),
 
@@ -1614,6 +1599,9 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
        { "SPKDAT1R", NULL, "OUT5R" },
 
        { "MICSUPP", NULL, "SYSCLK" },
+
+       { "DRC1 Signal Activity", NULL, "DRC1L" },
+       { "DRC1 Signal Activity", NULL, "DRC1R" },
 };
 
 static int wm5102_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
@@ -1781,6 +1769,7 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
                return ret;
 
        arizona_init_spk(codec);
+       arizona_init_gpio(codec);
 
        snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
 
index 2e7cb4ba161a5fa2a6980ec59eae33eb9c49ad18..bbd64384ca1ce9b76f8efc63f8ead2e3bc8a1c0c 100644 (file)
@@ -58,14 +58,10 @@ static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
        SOC_SINGLE(name " NG SPKDAT2R Switch", base, 11, 1, 0)
 
 static const struct snd_kcontrol_new wm5110_snd_controls[] = {
-SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL,
-          ARIZONA_IN1_OSR_SHIFT, 1, 0),
-SOC_SINGLE("IN2 High Performance Switch", ARIZONA_IN2L_CONTROL,
-          ARIZONA_IN2_OSR_SHIFT, 1, 0),
-SOC_SINGLE("IN3 High Performance Switch", ARIZONA_IN3L_CONTROL,
-          ARIZONA_IN3_OSR_SHIFT, 1, 0),
-SOC_SINGLE("IN4 High Performance Switch", ARIZONA_IN4L_CONTROL,
-          ARIZONA_IN4_OSR_SHIFT, 1, 0),
+SOC_ENUM("IN1 OSR", arizona_in_dmic_osr[0]),
+SOC_ENUM("IN2 OSR", arizona_in_dmic_osr[1]),
+SOC_ENUM("IN3 OSR", arizona_in_dmic_osr[2]),
+SOC_ENUM("IN4 OSR", arizona_in_dmic_osr[3]),
 
 SOC_SINGLE_RANGE_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL,
                     ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
@@ -432,6 +428,9 @@ SND_SOC_DAPM_INPUT("IN3R"),
 SND_SOC_DAPM_INPUT("IN4L"),
 SND_SOC_DAPM_INPUT("IN4R"),
 
+SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"),
+SND_SOC_DAPM_OUTPUT("DRC2 Signal Activity"),
+
 SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
                   0, NULL, 0, arizona_in_ev,
                   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
@@ -842,9 +841,6 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
        { "Tone Generator 1", NULL, "TONE" },
        { "Tone Generator 2", NULL, "TONE" },
 
-       { "Mic Mute Mixer", NULL, "Noise Mixer" },
-       { "Mic Mute Mixer", NULL, "Mic Mixer" },
-
        { "AIF1 Capture", NULL, "AIF1TX1" },
        { "AIF1 Capture", NULL, "AIF1TX2" },
        { "AIF1 Capture", NULL, "AIF1TX3" },
@@ -979,10 +975,13 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
        ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
        ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
 
-       ARIZONA_MUX_ROUTES("ASRC1L"),
-       ARIZONA_MUX_ROUTES("ASRC1R"),
-       ARIZONA_MUX_ROUTES("ASRC2L"),
-       ARIZONA_MUX_ROUTES("ASRC2R"),
+       ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Noise"),
+       ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Mic"),
+
+       ARIZONA_MUX_ROUTES("ASRC1L", "ASRC1L"),
+       ARIZONA_MUX_ROUTES("ASRC1R", "ASRC1R"),
+       ARIZONA_MUX_ROUTES("ASRC2L", "ASRC2L"),
+       ARIZONA_MUX_ROUTES("ASRC2R", "ASRC2R"),
 
        { "HPOUT1L", NULL, "OUT1L" },
        { "HPOUT1R", NULL, "OUT1R" },
@@ -1006,6 +1005,11 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
        { "SPKDAT2R", NULL, "OUT6R" },
 
        { "MICSUPP", NULL, "SYSCLK" },
+
+       { "DRC1 Signal Activity", NULL, "DRC1L" },
+       { "DRC1 Signal Activity", NULL, "DRC1R" },
+       { "DRC2 Signal Activity", NULL, "DRC2L" },
+       { "DRC2 Signal Activity", NULL, "DRC2R" },
 };
 
 static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
@@ -1170,6 +1174,7 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
                return ret;
 
        arizona_init_spk(codec);
+       arizona_init_gpio(codec);
 
        snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
 
index 0e8b3aaf6c8d698e9dad1f25fdccbabc9ee9f1ef..af1318ddb0620e8425b5606a3d794c2b57918078 100644 (file)
@@ -1301,7 +1301,8 @@ static irqreturn_t wm8350_hpl_jack_handler(int irq, void *data)
        if (device_may_wakeup(wm8350->dev))
                pm_wakeup_event(wm8350->dev, 250);
 
-       schedule_delayed_work(&priv->hpl.work, msecs_to_jiffies(200));
+       queue_delayed_work(system_power_efficient_wq,
+                          &priv->hpl.work, msecs_to_jiffies(200));
 
        return IRQ_HANDLED;
 }
@@ -1318,7 +1319,8 @@ static irqreturn_t wm8350_hpr_jack_handler(int irq, void *data)
        if (device_may_wakeup(wm8350->dev))
                pm_wakeup_event(wm8350->dev, 250);
 
-       schedule_delayed_work(&priv->hpr.work, msecs_to_jiffies(200));
+       queue_delayed_work(system_power_efficient_wq,
+                          &priv->hpr.work, msecs_to_jiffies(200));
 
        return IRQ_HANDLED;
 }
index 462f5e4d5c05ffcc3c4876d35bca320ff1743afb..7b1a6d5c11c6d63eef94cda1e545f28625f7f786 100644 (file)
 #include <sound/initval.h>
 #include <sound/soc.h>
 
+static const struct snd_soc_dapm_widget wm8727_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("VOUTL"),
+SND_SOC_DAPM_OUTPUT("VOUTR"),
+};
+
+static const struct snd_soc_dapm_route wm8727_dapm_routes[] = {
+       { "VOUTL", NULL, "Playback" },
+       { "VOUTR", NULL, "Playback" },
+};
+
 /*
  * Note this is a simple chip with no configuration interface, sample rate is
  * determined automatically by examining the Master clock and Bit clock ratios
@@ -43,7 +53,12 @@ static struct snd_soc_dai_driver wm8727_dai = {
                },
 };
 
-static struct snd_soc_codec_driver soc_codec_dev_wm8727;
+static struct snd_soc_codec_driver soc_codec_dev_wm8727 = {
+       .dapm_widgets = wm8727_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(wm8727_dapm_widgets),
+       .dapm_routes = wm8727_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(wm8727_dapm_routes),
+};
 
 static int wm8727_probe(struct platform_device *pdev)
 {
index 5276062d6c79fa59d0c2c0992a5f9b52993fa156..456bb8c6d759176a3bf31c5f7003a6e8cfc2d799 100644 (file)
@@ -45,6 +45,7 @@ static const char *wm8731_supply_names[WM8731_NUM_SUPPLIES] = {
 struct wm8731_priv {
        struct regmap *regmap;
        struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES];
+       const struct snd_pcm_hw_constraint_list *constraints;
        unsigned int sysclk;
        int sysclk_type;
        int playback_fs;
@@ -290,6 +291,36 @@ static const struct _coeff_div coeff_div[] = {
        {12000000, 88200, 136, 0xf, 0x1, 0x1},
 };
 
+/* rates constraints */
+static const unsigned int wm8731_rates_12000000[] = {
+       8000, 32000, 44100, 48000, 96000, 88200,
+};
+
+static const unsigned int wm8731_rates_12288000_18432000[] = {
+       8000, 32000, 48000, 96000,
+};
+
+static const unsigned int wm8731_rates_11289600_16934400[] = {
+       8000, 44100, 88200,
+};
+
+static const struct snd_pcm_hw_constraint_list wm8731_constraints_12000000 = {
+       .list = wm8731_rates_12000000,
+       .count = ARRAY_SIZE(wm8731_rates_12000000),
+};
+
+static const
+struct snd_pcm_hw_constraint_list wm8731_constraints_12288000_18432000 = {
+       .list = wm8731_rates_12288000_18432000,
+       .count = ARRAY_SIZE(wm8731_rates_12288000_18432000),
+};
+
+static const
+struct snd_pcm_hw_constraint_list wm8731_constraints_11289600_16934400 = {
+       .list = wm8731_rates_11289600_16934400,
+       .count = ARRAY_SIZE(wm8731_rates_11289600_16934400),
+};
+
 static inline int get_coeff(int mclk, int rate)
 {
        int i;
@@ -362,17 +393,26 @@ static int wm8731_set_dai_sysclk(struct snd_soc_dai *codec_dai,
        }
 
        switch (freq) {
-       case 11289600:
+       case 0:
+               wm8731->constraints = NULL;
+               break;
        case 12000000:
+               wm8731->constraints = &wm8731_constraints_12000000;
+               break;
        case 12288000:
-       case 16934400:
        case 18432000:
-               wm8731->sysclk = freq;
+               wm8731->constraints = &wm8731_constraints_12288000_18432000;
+               break;
+       case 16934400:
+       case 11289600:
+               wm8731->constraints = &wm8731_constraints_11289600_16934400;
                break;
        default:
                return -EINVAL;
        }
 
+       wm8731->sysclk = freq;
+
        snd_soc_dapm_sync(&codec->dapm);
 
        return 0;
@@ -475,12 +515,26 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
        return 0;
 }
 
+static int wm8731_startup(struct snd_pcm_substream *substream,
+       struct snd_soc_dai *dai)
+{
+       struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(dai->codec);
+
+       if (wm8731->constraints)
+               snd_pcm_hw_constraint_list(substream->runtime, 0,
+                                          SNDRV_PCM_HW_PARAM_RATE,
+                                          wm8731->constraints);
+
+       return 0;
+}
+
 #define WM8731_RATES SNDRV_PCM_RATE_8000_96000
 
 #define WM8731_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
        SNDRV_PCM_FMTBIT_S24_LE)
 
 static const struct snd_soc_dai_ops wm8731_dai_ops = {
+       .startup        = wm8731_startup,
        .hw_params      = wm8731_hw_params,
        .digital_mute   = wm8731_mute,
        .set_sysclk     = wm8731_set_dai_sysclk,
index 0a4ab4c423d123f0bf1ce0e16ed6d261c4b5a52d..d96ebf52d953e850016184f4615f2faf979a738d 100644 (file)
@@ -1456,8 +1456,9 @@ static int wm8753_resume(struct snd_soc_codec *codec)
        if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
                wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
                codec->dapm.bias_level = SND_SOC_BIAS_ON;
-               schedule_delayed_work(&codec->dapm.delayed_work,
-                       msecs_to_jiffies(caps_charge));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &codec->dapm.delayed_work,
+                                  msecs_to_jiffies(caps_charge));
        }
 
        return 0;
index f1fdbf63abb4f41e4e8c1a69b92e1357f3d6df00..8092495605ce3ff147c762d452963f2d8f41c3b8 100644 (file)
 #include <sound/initval.h>
 #include <sound/soc.h>
 
+static const struct snd_soc_dapm_widget wm8782_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINL"),
+SND_SOC_DAPM_INPUT("AINR"),
+};
+
+static const struct snd_soc_dapm_route wm8782_dapm_routes[] = {
+       { "Capture", NULL, "AINL" },
+       { "Capture", NULL, "AINR" },
+};
+
 static struct snd_soc_dai_driver wm8782_dai = {
        .name = "wm8782",
        .capture = {
@@ -40,7 +50,12 @@ static struct snd_soc_dai_driver wm8782_dai = {
        },
 };
 
-static struct snd_soc_codec_driver soc_codec_dev_wm8782;
+static struct snd_soc_codec_driver soc_codec_dev_wm8782 = {
+       .dapm_widgets = wm8782_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(wm8782_dapm_widgets),
+       .dapm_routes = wm8782_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(wm8782_dapm_routes),
+};
 
 static int wm8782_probe(struct platform_device *pdev)
 {
index fa24cedee68769f2b723e2fb8b25c03869ee286a..eebcb1da3b7b10769f0e265f95c90a6638266f71 100644 (file)
@@ -364,9 +364,7 @@ static void wm8903_seq_notifier(struct snd_soc_dapm_context *dapm,
 static int wm8903_class_w_put(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-       struct snd_soc_codec *codec = widget->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
        u16 reg;
        int ret;
index 4c9fb142cb2d21d18ac90dd1b6e0272bf48c7e24..91dfbfeda6f81763c8afff38cc567b93b33bb218 100644 (file)
@@ -1012,7 +1012,7 @@ static const struct soc_enum liner_enum =
        SOC_ENUM_SINGLE(WM8904_ANALOGUE_OUT12_ZC, 0, 2, out_mux_text);
 
 static const struct snd_kcontrol_new liner_mux =
-       SOC_DAPM_ENUM("LINEL Mux", liner_enum);
+       SOC_DAPM_ENUM("LINER Mux", liner_enum);
 
 static const char *sidetone_text[] = {
        "None", "Left", "Right"
index 0a4ffdd1d2a70eb19c22445365d0acc5cc854dee..f156010e52bc1ae5f206e1ddcf1b3a62aa1c1f2d 100644 (file)
@@ -263,8 +263,8 @@ SOC_SINGLE("ALC Attack", WM8960_ALC3, 0, 15, 0),
 SOC_SINGLE("Noise Gate Threshold", WM8960_NOISEG, 3, 31, 0),
 SOC_SINGLE("Noise Gate Switch", WM8960_NOISEG, 0, 1, 0),
 
-SOC_DOUBLE_R("ADC PCM Capture Volume", WM8960_LINPATH, WM8960_RINPATH,
-       0, 127, 0),
+SOC_DOUBLE_R_TLV("ADC PCM Capture Volume", WM8960_LADC, WM8960_RADC,
+       0, 255, 0, adc_tlv),
 
 SOC_SINGLE_TLV("Left Output Mixer Boost Bypass Volume",
               WM8960_BYPASS1, 4, 7, 1, bypass_tlv),
@@ -857,9 +857,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
        if (pll_div.k) {
                reg |= 0x20;
 
-               snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
-               snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
-               snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
+               snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
+               snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
+               snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
        }
        snd_soc_write(codec, WM8960_PLL1, reg);
 
index e2de9ecfd6417146dc9976f85017bd34be9aef4b..11d80f3b61372d357870b44432d5e1f1c9f6e25c 100644 (file)
@@ -2621,8 +2621,6 @@ static int wm8962_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
 
        wm8962->sysclk_rate = freq;
 
-       wm8962_configure_bclk(codec);
-
        return 0;
 }
 
@@ -3046,8 +3044,9 @@ static irqreturn_t wm8962_irq(int irq, void *data)
 
                pm_wakeup_event(dev, 300);
 
-               schedule_delayed_work(&wm8962->mic_work,
-                                     msecs_to_jiffies(250));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &wm8962->mic_work,
+                                  msecs_to_jiffies(250));
        }
 
        return IRQ_HANDLED;
@@ -3175,7 +3174,7 @@ static ssize_t wm8962_beep_set(struct device *dev,
        long int time;
        int ret;
 
-       ret = strict_strtol(buf, 10, &time);
+       ret = kstrtol(buf, 10, &time);
        if (ret != 0)
                return ret;
 
index ba832b77c543af1cbdd13f2618836186c399f789..86426a117b07923dd4f89aa4811cb5d5dc21b907 100644 (file)
@@ -819,8 +819,9 @@ static int clk_sys_event(struct snd_soc_dapm_widget *w,
                 * don't want false reports.
                 */
                if (wm8994->jackdet && !wm8994->clk_has_run) {
-                       schedule_delayed_work(&wm8994->jackdet_bootstrap,
-                                             msecs_to_jiffies(1000));
+                       queue_delayed_work(system_power_efficient_wq,
+                                          &wm8994->jackdet_bootstrap,
+                                          msecs_to_jiffies(1000));
                        wm8994->clk_has_run = true;
                }
                break;
@@ -1432,14 +1433,12 @@ SOC_DAPM_SINGLE("AIF1.1 Switch", WM8994_DAC2_RIGHT_MIXER_ROUTING,
 
 #define WM8994_CLASS_W_SWITCH(xname, reg, shift, max, invert) \
        SOC_SINGLE_EXT(xname, reg, shift, max, invert, \
-               snd_soc_get_volsw, wm8994_put_class_w)
+               snd_soc_dapm_get_volsw, wm8994_put_class_w)
 
 static int wm8994_put_class_w(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *w = wlist->widgets[0];
-       struct snd_soc_codec *codec = w->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        int ret;
 
        ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
@@ -3487,7 +3486,8 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
 
        pm_wakeup_event(codec->dev, 300);
 
-       schedule_delayed_work(&priv->mic_work, msecs_to_jiffies(250));
+       queue_delayed_work(system_power_efficient_wq,
+                          &priv->mic_work, msecs_to_jiffies(250));
 
        return IRQ_HANDLED;
 }
@@ -3575,8 +3575,9 @@ static void wm8958_mic_id(void *data, u16 status)
                /* If nothing present then clear our statuses */
                dev_dbg(codec->dev, "Detected open circuit\n");
 
-               schedule_delayed_work(&wm8994->open_circuit_work,
-                                     msecs_to_jiffies(2500));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &wm8994->open_circuit_work,
+                                  msecs_to_jiffies(2500));
                return;
        }
 
@@ -3690,8 +3691,9 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
                                    WM1811_JACKDET_DB, 0);
 
                delay = control->pdata.micdet_delay;
-               schedule_delayed_work(&wm8994->mic_work,
-                                     msecs_to_jiffies(delay));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &wm8994->mic_work,
+                                  msecs_to_jiffies(delay));
        } else {
                dev_dbg(codec->dev, "Jack not detected\n");
 
@@ -3936,8 +3938,9 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
        id_delay = wm8994->wm8994->pdata.mic_id_delay;
 
        if (wm8994->mic_detecting)
-               schedule_delayed_work(&wm8994->mic_complete_work,
-                                     msecs_to_jiffies(id_delay));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &wm8994->mic_complete_work,
+                                  msecs_to_jiffies(id_delay));
        else
                wm8958_button_det(codec, reg);
 
@@ -4010,9 +4013,6 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
 
        wm8994->micdet_irq = control->pdata.micdet_irq;
 
-       pm_runtime_enable(codec->dev);
-       pm_runtime_idle(codec->dev);
-
        /* By default use idle_bias_off, will override for WM8994 */
        codec->dapm.idle_bias_off = 1;
 
@@ -4385,8 +4385,6 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
 
        wm8994_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
-       pm_runtime_disable(codec->dev);
-
        for (i = 0; i < ARRAY_SIZE(wm8994->fll_locked); i++)
                wm8994_free_irq(wm8994->wm8994, WM8994_IRQ_FLL1_LOCK + i,
                                &wm8994->fll_locked[i]);
@@ -4445,6 +4443,9 @@ static int wm8994_probe(struct platform_device *pdev)
 
        wm8994->wm8994 = dev_get_drvdata(pdev->dev.parent);
 
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_idle(&pdev->dev);
+
        return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8994,
                        wm8994_dai, ARRAY_SIZE(wm8994_dai));
 }
@@ -4452,6 +4453,8 @@ static int wm8994_probe(struct platform_device *pdev)
 static int wm8994_remove(struct platform_device *pdev)
 {
        snd_soc_unregister_codec(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
        return 0;
 }
 
index 90a65c427541fb9290616379f7b53bc17c01d0ea..da2899e6c4018fae6cd0a2411b58027f4048b6e4 100644 (file)
@@ -549,12 +549,9 @@ static int check_clk_sys(struct snd_soc_dapm_widget *source,
 static int wm8995_put_class_w(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *w = wlist->widgets[0];
-       struct snd_soc_codec *codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        int ret;
 
-       codec = w->codec;
        ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
        wm8995_update_class_w(codec);
        return ret;
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
new file mode 100644 (file)
index 0000000..6ec3de3
--- /dev/null
@@ -0,0 +1,1175 @@
+/*
+ * wm8997.c  --  WM8997 ALSA SoC Audio driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+#include "wm8997.h"
+
+struct wm8997_priv {
+       struct arizona_priv core;
+       struct arizona_fll fll[2];
+};
+
+static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
+
+static const struct reg_default wm8997_sysclk_reva_patch[] = {
+       { 0x301D, 0x7B15 },
+       { 0x301B, 0x0050 },
+       { 0x305D, 0x7B17 },
+       { 0x305B, 0x0050 },
+       { 0x3001, 0x08FE },
+       { 0x3003, 0x00F4 },
+       { 0x3041, 0x08FF },
+       { 0x3043, 0x0005 },
+       { 0x3020, 0x0225 },
+       { 0x3021, 0x0A00 },
+       { 0x3022, 0xE24D },
+       { 0x3023, 0x0800 },
+       { 0x3024, 0xE24D },
+       { 0x3025, 0xF000 },
+       { 0x3060, 0x0226 },
+       { 0x3061, 0x0A00 },
+       { 0x3062, 0xE252 },
+       { 0x3063, 0x0800 },
+       { 0x3064, 0xE252 },
+       { 0x3065, 0xF000 },
+       { 0x3116, 0x022B },
+       { 0x3117, 0xFA00 },
+       { 0x3110, 0x246C },
+       { 0x3111, 0x0A03 },
+       { 0x3112, 0x246E },
+       { 0x3113, 0x0A03 },
+       { 0x3114, 0x2470 },
+       { 0x3115, 0x0A03 },
+       { 0x3126, 0x246C },
+       { 0x3127, 0x0A02 },
+       { 0x3128, 0x246E },
+       { 0x3129, 0x0A02 },
+       { 0x312A, 0x2470 },
+       { 0x312B, 0xFA02 },
+       { 0x3125, 0x0800 },
+};
+
+static int wm8997_sysclk_ev(struct snd_soc_dapm_widget *w,
+                           struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+       struct regmap *regmap = codec->control_data;
+       const struct reg_default *patch = NULL;
+       int i, patch_size;
+
+       switch (arizona->rev) {
+       case 0:
+               patch = wm8997_sysclk_reva_patch;
+               patch_size = ARRAY_SIZE(wm8997_sysclk_reva_patch);
+               break;
+       default:
+               break;
+       }
+
+       switch (event) {
+       case SND_SOC_DAPM_POST_PMU:
+               if (patch)
+                       for (i = 0; i < patch_size; i++)
+                               regmap_write(regmap, patch[i].reg,
+                                            patch[i].def);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static const char *wm8997_osr_text[] = {
+       "Low power", "Normal", "High performance",
+};
+
+static const unsigned int wm8997_osr_val[] = {
+       0x0, 0x3, 0x5,
+};
+
+static const struct soc_enum wm8997_hpout_osr[] = {
+       SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_1L,
+                             ARIZONA_OUT1_OSR_SHIFT, 0x7, 3,
+                             wm8997_osr_text, wm8997_osr_val),
+       SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_3L,
+                             ARIZONA_OUT3_OSR_SHIFT, 0x7, 3,
+                             wm8997_osr_text, wm8997_osr_val),
+};
+
+#define WM8997_NG_SRC(name, base) \
+       SOC_SINGLE(name " NG HPOUT1L Switch",  base, 0, 1, 0), \
+       SOC_SINGLE(name " NG HPOUT1R Switch",  base, 1, 1, 0), \
+       SOC_SINGLE(name " NG EPOUT Switch",    base, 4, 1, 0), \
+       SOC_SINGLE(name " NG SPKOUT Switch",   base, 6, 1, 0), \
+       SOC_SINGLE(name " NG SPKDAT1L Switch", base, 8, 1, 0), \
+       SOC_SINGLE(name " NG SPKDAT1R Switch", base, 9, 1, 0)
+
+static const struct snd_kcontrol_new wm8997_snd_controls[] = {
+SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL,
+          ARIZONA_IN1_OSR_SHIFT, 1, 0),
+SOC_SINGLE("IN2 High Performance Switch", ARIZONA_IN2L_CONTROL,
+          ARIZONA_IN2_OSR_SHIFT, 1, 0),
+
+SOC_SINGLE_RANGE_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL,
+                    ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_SINGLE_RANGE_TLV("IN1R Volume", ARIZONA_IN1R_CONTROL,
+                    ARIZONA_IN1R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_SINGLE_RANGE_TLV("IN2L Volume", ARIZONA_IN2L_CONTROL,
+                    ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_SINGLE_RANGE_TLV("IN2R Volume", ARIZONA_IN2R_CONTROL,
+                    ARIZONA_IN2R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+
+SOC_SINGLE_TLV("IN1L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1L,
+              ARIZONA_IN1L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("IN1R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1R,
+              ARIZONA_IN1R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("IN2L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2L,
+              ARIZONA_IN2L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("IN2R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2R,
+              ARIZONA_IN2R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv),
+
+SOC_ENUM("Input Ramp Up", arizona_in_vi_ramp),
+SOC_ENUM("Input Ramp Down", arizona_in_vd_ramp),
+
+ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
+
+SND_SOC_BYTES_MASK("EQ1 Coefficeints", ARIZONA_EQ1_1, 21,
+                  ARIZONA_EQ1_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ2 Coefficeints", ARIZONA_EQ2_1, 21,
+                  ARIZONA_EQ2_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ3 Coefficeints", ARIZONA_EQ3_1, 21,
+                  ARIZONA_EQ3_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ4 Coefficeints", ARIZONA_EQ4_1, 21,
+                  ARIZONA_EQ4_ENA_MASK),
+
+SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B3 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B3_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
+              24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B3 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B3_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
+              24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B3 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B3_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
+              24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B3 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B3_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B4 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B4_GAIN_SHIFT,
+              24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT,
+              24, 0, eq_tlv),
+
+ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE),
+
+SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5,
+                  ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA),
+
+ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
+
+SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
+SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
+SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
+SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode),
+
+SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
+SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
+SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
+SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+
+SOC_VALUE_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]),
+SOC_VALUE_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]),
+
+ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
+
+SOC_SINGLE_TLV("Noise Generator Volume", ARIZONA_COMFORT_NOISE_GENERATOR,
+              ARIZONA_NOISE_GEN_GAIN_SHIFT, 0x16, 0, noise_tlv),
+
+ARIZONA_MIXER_CONTROLS("HPOUT1L", ARIZONA_OUT1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("HPOUT1R", ARIZONA_OUT1RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EPOUT", ARIZONA_OUT3LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKOUT", ARIZONA_OUT4LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT1L", ARIZONA_OUT5LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE),
+
+SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
+          ARIZONA_OUT4_OSR_SHIFT, 1, 0),
+SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
+          ARIZONA_OUT5_OSR_SHIFT, 1, 0),
+
+SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
+            ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
+SOC_SINGLE("EPOUT Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L,
+          ARIZONA_OUT3L_MUTE_SHIFT, 1, 1),
+SOC_SINGLE("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L,
+          ARIZONA_OUT4L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("SPKDAT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_5L,
+            ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_MUTE_SHIFT, 1, 1),
+
+SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L,
+                ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT,
+                0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("EPOUT Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L,
+              ARIZONA_OUT3L_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L,
+              ARIZONA_OUT4L_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_5L,
+                ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_VOL_SHIFT,
+                0xbf, 0, digital_tlv),
+
+SOC_VALUE_ENUM("HPOUT1 OSR", wm8997_hpout_osr[0]),
+SOC_VALUE_ENUM("EPOUT OSR", wm8997_hpout_osr[1]),
+
+SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp),
+SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp),
+
+SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
+          ARIZONA_SPK1R_MUTE_SHIFT, 1, 1),
+
+SOC_SINGLE("Noise Gate Switch", ARIZONA_NOISE_GATE_CONTROL,
+          ARIZONA_NGATE_ENA_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Noise Gate Threshold Volume", ARIZONA_NOISE_GATE_CONTROL,
+              ARIZONA_NGATE_THR_SHIFT, 7, 1, ng_tlv),
+SOC_ENUM("Noise Gate Hold", arizona_ng_hold),
+
+WM8997_NG_SRC("HPOUT1L", ARIZONA_NOISE_GATE_SELECT_1L),
+WM8997_NG_SRC("HPOUT1R", ARIZONA_NOISE_GATE_SELECT_1R),
+WM8997_NG_SRC("EPOUT", ARIZONA_NOISE_GATE_SELECT_3L),
+WM8997_NG_SRC("SPKOUT", ARIZONA_NOISE_GATE_SELECT_4L),
+WM8997_NG_SRC("SPKDAT1L", ARIZONA_NOISE_GATE_SELECT_5L),
+WM8997_NG_SRC("SPKDAT1R", ARIZONA_NOISE_GATE_SELECT_5R),
+
+ARIZONA_MIXER_CONTROLS("AIF1TX1", ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX2", ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX3", ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX4", ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX5", ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX6", ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX7", ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX8", ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE),
+
+ARIZONA_MIXER_CONTROLS("AIF2TX1", ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF2TX2", ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE),
+
+ARIZONA_MIXER_CONTROLS("SLIMTX1", ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX2", ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX3", ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX4", ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX5", ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX6", ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX7", ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX8", ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE),
+};
+
+ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF3, ARIZONA_HPLP3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF4, ARIZONA_HPLP4MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(Mic, ARIZONA_MICMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(Noise, ARIZONA_NOISEMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(PWM1, ARIZONA_PWM1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(PWM2, ARIZONA_PWM2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(OUT1L, ARIZONA_OUT1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT1R, ARIZONA_OUT1RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT3, ARIZONA_OUT3LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKOUT, ARIZONA_OUT4LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT1L, ARIZONA_OUT5LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT1R, ARIZONA_OUT5RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF1TX1, ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX2, ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX3, ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX4, ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX5, ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX6, ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX7, ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX8, ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF2TX1, ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(SLIMTX1, ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX2, ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX3, ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX4, ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX5, ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX6, ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX7, ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX8, ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE);
+
+ARIZONA_MUX_ENUMS(ISRC1INT1, ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ISRC1INT2, ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MUX_ENUMS(ISRC1DEC1, ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ISRC1DEC2, ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MUX_ENUMS(ISRC2INT1, ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ISRC2INT2, ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MUX_ENUMS(ISRC2DEC1, ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ISRC2DEC2, ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE);
+
+static const char *wm8997_aec_loopback_texts[] = {
+       "HPOUT1L", "HPOUT1R", "EPOUT", "SPKOUT", "SPKDAT1L", "SPKDAT1R",
+};
+
+static const unsigned int wm8997_aec_loopback_values[] = {
+       0, 1, 4, 6, 8, 9,
+};
+
+static const struct soc_enum wm8997_aec_loopback =
+       SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1,
+                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,
+                             ARRAY_SIZE(wm8997_aec_loopback_texts),
+                             wm8997_aec_loopback_texts,
+                             wm8997_aec_loopback_values);
+
+static const struct snd_kcontrol_new wm8997_aec_loopback_mux =
+       SOC_DAPM_VALUE_ENUM("AEC Loopback", wm8997_aec_loopback);
+
+static const struct snd_soc_dapm_widget wm8997_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
+                   0, wm8997_sysclk_ev, SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
+                   ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
+                   ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
+                   ARIZONA_OPCLK_ASYNC_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD2", 0, 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20, 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("MICVDD", 0, SND_SOC_DAPM_REGULATOR_BYPASS),
+SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDD", 0, 0),
+
+SND_SOC_DAPM_SIGGEN("TONE"),
+SND_SOC_DAPM_SIGGEN("NOISE"),
+SND_SOC_DAPM_SIGGEN("HAPTICS"),
+
+SND_SOC_DAPM_INPUT("IN1L"),
+SND_SOC_DAPM_INPUT("IN1R"),
+SND_SOC_DAPM_INPUT("IN2L"),
+SND_SOC_DAPM_INPUT("IN2R"),
+
+SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
+                  0, NULL, 0, arizona_in_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+                  SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN1R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1R_ENA_SHIFT,
+                  0, NULL, 0, arizona_in_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+                  SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN2L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2L_ENA_SHIFT,
+                  0, NULL, 0, arizona_in_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+                  SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN2R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2R_ENA_SHIFT,
+                  0, NULL, 0, arizona_in_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+                  SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+
+SND_SOC_DAPM_SUPPLY("MICBIAS1", ARIZONA_MIC_BIAS_CTRL_1,
+                   ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("MICBIAS2", ARIZONA_MIC_BIAS_CTRL_2,
+                   ARIZONA_MICB2_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("MICBIAS3", ARIZONA_MIC_BIAS_CTRL_3,
+                   ARIZONA_MICB3_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Noise Generator", ARIZONA_COMFORT_NOISE_GENERATOR,
+                ARIZONA_NOISE_GEN_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Tone Generator 1", ARIZONA_TONE_GENERATOR_1,
+                ARIZONA_TONE1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Tone Generator 2", ARIZONA_TONE_GENERATOR_1,
+                ARIZONA_TONE2_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Mic Mute Mixer", ARIZONA_MIC_NOISE_MIX_CONTROL_1,
+                ARIZONA_MICMUTE_MIX_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("EQ1", ARIZONA_EQ1_1, ARIZONA_EQ1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ2", ARIZONA_EQ2_1, ARIZONA_EQ2_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ3", ARIZONA_EQ3_1, ARIZONA_EQ3_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ4", ARIZONA_EQ4_1, ARIZONA_EQ4_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0,
+                NULL, 0),
+SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0,
+                NULL, 0),
+
+SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0,
+                NULL, 0),
+SND_SOC_DAPM_PGA("LHPF2", ARIZONA_HPLPF2_1, ARIZONA_LHPF2_ENA_SHIFT, 0,
+                NULL, 0),
+SND_SOC_DAPM_PGA("LHPF3", ARIZONA_HPLPF3_1, ARIZONA_LHPF3_ENA_SHIFT, 0,
+                NULL, 0),
+SND_SOC_DAPM_PGA("LHPF4", ARIZONA_HPLPF4_1, ARIZONA_LHPF4_ENA_SHIFT, 0,
+                NULL, 0),
+
+SND_SOC_DAPM_PGA("PWM1 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM1_ENA_SHIFT,
+                0, NULL, 0),
+SND_SOC_DAPM_PGA("PWM2 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM2_ENA_SHIFT,
+                0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ISRC1INT1", ARIZONA_ISRC_1_CTRL_3,
+                ARIZONA_ISRC1_INT0_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ISRC1INT2", ARIZONA_ISRC_1_CTRL_3,
+                ARIZONA_ISRC1_INT1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ISRC1DEC1", ARIZONA_ISRC_1_CTRL_3,
+                ARIZONA_ISRC1_DEC0_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ISRC1DEC2", ARIZONA_ISRC_1_CTRL_3,
+                ARIZONA_ISRC1_DEC1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ISRC2INT1", ARIZONA_ISRC_2_CTRL_3,
+                ARIZONA_ISRC2_INT0_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ISRC2INT2", ARIZONA_ISRC_2_CTRL_3,
+                ARIZONA_ISRC2_INT1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ISRC2DEC1", ARIZONA_ISRC_2_CTRL_3,
+                ARIZONA_ISRC2_DEC0_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ISRC2DEC2", ARIZONA_ISRC_2_CTRL_3,
+                ARIZONA_ISRC2_DEC1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
+                    ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+                    ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+                    ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+                    ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+                    ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+                    ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0,
+                    ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0,
+                    ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
+                   ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+                   ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+                   ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+                   ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+                   ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+                   ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0,
+                   ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0,
+                   ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
+                    ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+                    ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
+                   ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+                   ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("SLIMTX1", NULL, 0,
+                    ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+                    ARIZONA_SLIMTX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 0,
+                    ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+                    ARIZONA_SLIMTX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 0,
+                    ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+                    ARIZONA_SLIMTX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 0,
+                    ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+                    ARIZONA_SLIMTX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 0,
+                    ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+                    ARIZONA_SLIMTX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 0,
+                    ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+                    ARIZONA_SLIMTX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 0,
+                    ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+                    ARIZONA_SLIMTX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 0,
+                    ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+                    ARIZONA_SLIMTX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("SLIMRX1", NULL, 0,
+                   ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+                   ARIZONA_SLIMRX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 0,
+                   ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+                   ARIZONA_SLIMRX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 0,
+                   ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+                   ARIZONA_SLIMRX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 0,
+                   ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+                   ARIZONA_SLIMRX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 0,
+                   ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+                   ARIZONA_SLIMRX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 0,
+                   ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+                   ARIZONA_SLIMRX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 0,
+                   ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+                   ARIZONA_SLIMRX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 0,
+                   ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+                   ARIZONA_SLIMRX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
+                      ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0,
+                      &wm8997_aec_loopback_mux),
+
+SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
+                  ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM,
+                  ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
+                  ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
+                  ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT5R", ARIZONA_OUTPUT_ENABLES_1,
+                  ARIZONA_OUT5R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+                  SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+
+ARIZONA_MIXER_WIDGETS(EQ1, "EQ1"),
+ARIZONA_MIXER_WIDGETS(EQ2, "EQ2"),
+ARIZONA_MIXER_WIDGETS(EQ3, "EQ3"),
+ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"),
+
+ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"),
+ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"),
+
+ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"),
+ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"),
+ARIZONA_MIXER_WIDGETS(LHPF3, "LHPF3"),
+ARIZONA_MIXER_WIDGETS(LHPF4, "LHPF4"),
+
+ARIZONA_MIXER_WIDGETS(Mic, "Mic"),
+ARIZONA_MIXER_WIDGETS(Noise, "Noise"),
+
+ARIZONA_MIXER_WIDGETS(PWM1, "PWM1"),
+ARIZONA_MIXER_WIDGETS(PWM2, "PWM2"),
+
+ARIZONA_MIXER_WIDGETS(OUT1L, "HPOUT1L"),
+ARIZONA_MIXER_WIDGETS(OUT1R, "HPOUT1R"),
+ARIZONA_MIXER_WIDGETS(OUT3, "EPOUT"),
+ARIZONA_MIXER_WIDGETS(SPKOUT, "SPKOUT"),
+ARIZONA_MIXER_WIDGETS(SPKDAT1L, "SPKDAT1L"),
+ARIZONA_MIXER_WIDGETS(SPKDAT1R, "SPKDAT1R"),
+
+ARIZONA_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"),
+ARIZONA_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"),
+ARIZONA_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"),
+ARIZONA_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"),
+ARIZONA_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"),
+ARIZONA_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"),
+ARIZONA_MIXER_WIDGETS(AIF1TX7, "AIF1TX7"),
+ARIZONA_MIXER_WIDGETS(AIF1TX8, "AIF1TX8"),
+
+ARIZONA_MIXER_WIDGETS(AIF2TX1, "AIF2TX1"),
+ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"),
+
+ARIZONA_MIXER_WIDGETS(SLIMTX1, "SLIMTX1"),
+ARIZONA_MIXER_WIDGETS(SLIMTX2, "SLIMTX2"),
+ARIZONA_MIXER_WIDGETS(SLIMTX3, "SLIMTX3"),
+ARIZONA_MIXER_WIDGETS(SLIMTX4, "SLIMTX4"),
+ARIZONA_MIXER_WIDGETS(SLIMTX5, "SLIMTX5"),
+ARIZONA_MIXER_WIDGETS(SLIMTX6, "SLIMTX6"),
+ARIZONA_MIXER_WIDGETS(SLIMTX7, "SLIMTX7"),
+ARIZONA_MIXER_WIDGETS(SLIMTX8, "SLIMTX8"),
+
+ARIZONA_MUX_WIDGETS(ISRC1DEC1, "ISRC1DEC1"),
+ARIZONA_MUX_WIDGETS(ISRC1DEC2, "ISRC1DEC2"),
+
+ARIZONA_MUX_WIDGETS(ISRC1INT1, "ISRC1INT1"),
+ARIZONA_MUX_WIDGETS(ISRC1INT2, "ISRC1INT2"),
+
+ARIZONA_MUX_WIDGETS(ISRC2DEC1, "ISRC2DEC1"),
+ARIZONA_MUX_WIDGETS(ISRC2DEC2, "ISRC2DEC2"),
+
+ARIZONA_MUX_WIDGETS(ISRC2INT1, "ISRC2INT1"),
+ARIZONA_MUX_WIDGETS(ISRC2INT2, "ISRC2INT2"),
+
+SND_SOC_DAPM_OUTPUT("HPOUT1L"),
+SND_SOC_DAPM_OUTPUT("HPOUT1R"),
+SND_SOC_DAPM_OUTPUT("EPOUTN"),
+SND_SOC_DAPM_OUTPUT("EPOUTP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTN"),
+SND_SOC_DAPM_OUTPUT("SPKOUTP"),
+SND_SOC_DAPM_OUTPUT("SPKDAT1L"),
+SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
+
+SND_SOC_DAPM_OUTPUT("MICSUPP"),
+};
+
+#define ARIZONA_MIXER_INPUT_ROUTES(name)       \
+       { name, "Noise Generator", "Noise Generator" }, \
+       { name, "Tone Generator 1", "Tone Generator 1" }, \
+       { name, "Tone Generator 2", "Tone Generator 2" }, \
+       { name, "Haptics", "HAPTICS" }, \
+       { name, "AEC", "AEC Loopback" }, \
+       { name, "IN1L", "IN1L PGA" }, \
+       { name, "IN1R", "IN1R PGA" }, \
+       { name, "IN2L", "IN2L PGA" }, \
+       { name, "IN2R", "IN2R PGA" }, \
+       { name, "Mic Mute Mixer", "Mic Mute Mixer" }, \
+       { name, "AIF1RX1", "AIF1RX1" }, \
+       { name, "AIF1RX2", "AIF1RX2" }, \
+       { name, "AIF1RX3", "AIF1RX3" }, \
+       { name, "AIF1RX4", "AIF1RX4" }, \
+       { name, "AIF1RX5", "AIF1RX5" }, \
+       { name, "AIF1RX6", "AIF1RX6" }, \
+       { name, "AIF1RX7", "AIF1RX7" }, \
+       { name, "AIF1RX8", "AIF1RX8" }, \
+       { name, "AIF2RX1", "AIF2RX1" }, \
+       { name, "AIF2RX2", "AIF2RX2" }, \
+       { name, "SLIMRX1", "SLIMRX1" }, \
+       { name, "SLIMRX2", "SLIMRX2" }, \
+       { name, "SLIMRX3", "SLIMRX3" }, \
+       { name, "SLIMRX4", "SLIMRX4" }, \
+       { name, "SLIMRX5", "SLIMRX5" }, \
+       { name, "SLIMRX6", "SLIMRX6" }, \
+       { name, "SLIMRX7", "SLIMRX7" }, \
+       { name, "SLIMRX8", "SLIMRX8" }, \
+       { name, "EQ1", "EQ1" }, \
+       { name, "EQ2", "EQ2" }, \
+       { name, "EQ3", "EQ3" }, \
+       { name, "EQ4", "EQ4" }, \
+       { name, "DRC1L", "DRC1L" }, \
+       { name, "DRC1R", "DRC1R" }, \
+       { name, "LHPF1", "LHPF1" }, \
+       { name, "LHPF2", "LHPF2" }, \
+       { name, "LHPF3", "LHPF3" }, \
+       { name, "LHPF4", "LHPF4" }, \
+       { name, "ISRC1DEC1", "ISRC1DEC1" }, \
+       { name, "ISRC1DEC2", "ISRC1DEC2" }, \
+       { name, "ISRC1INT1", "ISRC1INT1" }, \
+       { name, "ISRC1INT2", "ISRC1INT2" }, \
+       { name, "ISRC2DEC1", "ISRC2DEC1" }, \
+       { name, "ISRC2DEC2", "ISRC2DEC2" }, \
+       { name, "ISRC2INT1", "ISRC2INT1" }, \
+       { name, "ISRC2INT2", "ISRC2INT2" }
+
+static const struct snd_soc_dapm_route wm8997_dapm_routes[] = {
+       { "AIF2 Capture", NULL, "DBVDD2" },
+       { "AIF2 Playback", NULL, "DBVDD2" },
+
+       { "OUT1L", NULL, "CPVDD" },
+       { "OUT1R", NULL, "CPVDD" },
+       { "OUT3L", NULL, "CPVDD" },
+
+       { "OUT4L", NULL, "SPKVDD" },
+
+       { "OUT1L", NULL, "SYSCLK" },
+       { "OUT1R", NULL, "SYSCLK" },
+       { "OUT3L", NULL, "SYSCLK" },
+       { "OUT4L", NULL, "SYSCLK" },
+
+       { "IN1L", NULL, "SYSCLK" },
+       { "IN1R", NULL, "SYSCLK" },
+       { "IN2L", NULL, "SYSCLK" },
+       { "IN2R", NULL, "SYSCLK" },
+
+       { "MICBIAS1", NULL, "MICVDD" },
+       { "MICBIAS2", NULL, "MICVDD" },
+       { "MICBIAS3", NULL, "MICVDD" },
+
+       { "Noise Generator", NULL, "SYSCLK" },
+       { "Tone Generator 1", NULL, "SYSCLK" },
+       { "Tone Generator 2", NULL, "SYSCLK" },
+
+       { "Noise Generator", NULL, "NOISE" },
+       { "Tone Generator 1", NULL, "TONE" },
+       { "Tone Generator 2", NULL, "TONE" },
+
+       { "AIF1 Capture", NULL, "AIF1TX1" },
+       { "AIF1 Capture", NULL, "AIF1TX2" },
+       { "AIF1 Capture", NULL, "AIF1TX3" },
+       { "AIF1 Capture", NULL, "AIF1TX4" },
+       { "AIF1 Capture", NULL, "AIF1TX5" },
+       { "AIF1 Capture", NULL, "AIF1TX6" },
+       { "AIF1 Capture", NULL, "AIF1TX7" },
+       { "AIF1 Capture", NULL, "AIF1TX8" },
+
+       { "AIF1RX1", NULL, "AIF1 Playback" },
+       { "AIF1RX2", NULL, "AIF1 Playback" },
+       { "AIF1RX3", NULL, "AIF1 Playback" },
+       { "AIF1RX4", NULL, "AIF1 Playback" },
+       { "AIF1RX5", NULL, "AIF1 Playback" },
+       { "AIF1RX6", NULL, "AIF1 Playback" },
+       { "AIF1RX7", NULL, "AIF1 Playback" },
+       { "AIF1RX8", NULL, "AIF1 Playback" },
+
+       { "AIF2 Capture", NULL, "AIF2TX1" },
+       { "AIF2 Capture", NULL, "AIF2TX2" },
+
+       { "AIF2RX1", NULL, "AIF2 Playback" },
+       { "AIF2RX2", NULL, "AIF2 Playback" },
+
+       { "Slim1 Capture", NULL, "SLIMTX1" },
+       { "Slim1 Capture", NULL, "SLIMTX2" },
+       { "Slim1 Capture", NULL, "SLIMTX3" },
+       { "Slim1 Capture", NULL, "SLIMTX4" },
+
+       { "SLIMRX1", NULL, "Slim1 Playback" },
+       { "SLIMRX2", NULL, "Slim1 Playback" },
+       { "SLIMRX3", NULL, "Slim1 Playback" },
+       { "SLIMRX4", NULL, "Slim1 Playback" },
+
+       { "Slim2 Capture", NULL, "SLIMTX5" },
+       { "Slim2 Capture", NULL, "SLIMTX6" },
+
+       { "SLIMRX5", NULL, "Slim2 Playback" },
+       { "SLIMRX6", NULL, "Slim2 Playback" },
+
+       { "Slim3 Capture", NULL, "SLIMTX7" },
+       { "Slim3 Capture", NULL, "SLIMTX8" },
+
+       { "SLIMRX7", NULL, "Slim3 Playback" },
+       { "SLIMRX8", NULL, "Slim3 Playback" },
+
+       { "AIF1 Playback", NULL, "SYSCLK" },
+       { "AIF2 Playback", NULL, "SYSCLK" },
+       { "Slim1 Playback", NULL, "SYSCLK" },
+       { "Slim2 Playback", NULL, "SYSCLK" },
+       { "Slim3 Playback", NULL, "SYSCLK" },
+
+       { "AIF1 Capture", NULL, "SYSCLK" },
+       { "AIF2 Capture", NULL, "SYSCLK" },
+       { "Slim1 Capture", NULL, "SYSCLK" },
+       { "Slim2 Capture", NULL, "SYSCLK" },
+       { "Slim3 Capture", NULL, "SYSCLK" },
+
+       { "IN1L PGA", NULL, "IN1L" },
+       { "IN1R PGA", NULL, "IN1R" },
+
+       { "IN2L PGA", NULL, "IN2L" },
+       { "IN2R PGA", NULL, "IN2R" },
+
+       ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
+       ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
+       ARIZONA_MIXER_ROUTES("OUT3L", "EPOUT"),
+
+       ARIZONA_MIXER_ROUTES("OUT4L", "SPKOUT"),
+       ARIZONA_MIXER_ROUTES("OUT5L", "SPKDAT1L"),
+       ARIZONA_MIXER_ROUTES("OUT5R", "SPKDAT1R"),
+
+       ARIZONA_MIXER_ROUTES("PWM1 Driver", "PWM1"),
+       ARIZONA_MIXER_ROUTES("PWM2 Driver", "PWM2"),
+
+       ARIZONA_MIXER_ROUTES("AIF1TX1", "AIF1TX1"),
+       ARIZONA_MIXER_ROUTES("AIF1TX2", "AIF1TX2"),
+       ARIZONA_MIXER_ROUTES("AIF1TX3", "AIF1TX3"),
+       ARIZONA_MIXER_ROUTES("AIF1TX4", "AIF1TX4"),
+       ARIZONA_MIXER_ROUTES("AIF1TX5", "AIF1TX5"),
+       ARIZONA_MIXER_ROUTES("AIF1TX6", "AIF1TX6"),
+       ARIZONA_MIXER_ROUTES("AIF1TX7", "AIF1TX7"),
+       ARIZONA_MIXER_ROUTES("AIF1TX8", "AIF1TX8"),
+
+       ARIZONA_MIXER_ROUTES("AIF2TX1", "AIF2TX1"),
+       ARIZONA_MIXER_ROUTES("AIF2TX2", "AIF2TX2"),
+
+       ARIZONA_MIXER_ROUTES("SLIMTX1", "SLIMTX1"),
+       ARIZONA_MIXER_ROUTES("SLIMTX2", "SLIMTX2"),
+       ARIZONA_MIXER_ROUTES("SLIMTX3", "SLIMTX3"),
+       ARIZONA_MIXER_ROUTES("SLIMTX4", "SLIMTX4"),
+       ARIZONA_MIXER_ROUTES("SLIMTX5", "SLIMTX5"),
+       ARIZONA_MIXER_ROUTES("SLIMTX6", "SLIMTX6"),
+       ARIZONA_MIXER_ROUTES("SLIMTX7", "SLIMTX7"),
+       ARIZONA_MIXER_ROUTES("SLIMTX8", "SLIMTX8"),
+
+       ARIZONA_MIXER_ROUTES("EQ1", "EQ1"),
+       ARIZONA_MIXER_ROUTES("EQ2", "EQ2"),
+       ARIZONA_MIXER_ROUTES("EQ3", "EQ3"),
+       ARIZONA_MIXER_ROUTES("EQ4", "EQ4"),
+
+       ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"),
+       ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"),
+
+       ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"),
+       ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"),
+       ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
+       ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
+
+       ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Noise"),
+       ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Mic"),
+
+       ARIZONA_MUX_ROUTES("ISRC1INT1", "ISRC1INT1"),
+       ARIZONA_MUX_ROUTES("ISRC1INT2", "ISRC2INT2"),
+
+       ARIZONA_MUX_ROUTES("ISRC1DEC1", "ISRC1DEC1"),
+       ARIZONA_MUX_ROUTES("ISRC1DEC2", "ISRC1DEC2"),
+
+       ARIZONA_MUX_ROUTES("ISRC2INT1", "ISRC2INT1"),
+       ARIZONA_MUX_ROUTES("ISRC2INT2", "ISRC2INT2"),
+
+       ARIZONA_MUX_ROUTES("ISRC2DEC1", "ISRC2DEC1"),
+       ARIZONA_MUX_ROUTES("ISRC2DEC2", "ISRC2DEC2"),
+
+       { "AEC Loopback", "HPOUT1L", "OUT1L" },
+       { "AEC Loopback", "HPOUT1R", "OUT1R" },
+       { "HPOUT1L", NULL, "OUT1L" },
+       { "HPOUT1R", NULL, "OUT1R" },
+
+       { "AEC Loopback", "EPOUT", "OUT3L" },
+       { "EPOUTN", NULL, "OUT3L" },
+       { "EPOUTP", NULL, "OUT3L" },
+
+       { "AEC Loopback", "SPKOUT", "OUT4L" },
+       { "SPKOUTN", NULL, "OUT4L" },
+       { "SPKOUTP", NULL, "OUT4L" },
+
+       { "AEC Loopback", "SPKDAT1L", "OUT5L" },
+       { "AEC Loopback", "SPKDAT1R", "OUT5R" },
+       { "SPKDAT1L", NULL, "OUT5L" },
+       { "SPKDAT1R", NULL, "OUT5R" },
+
+       { "MICSUPP", NULL, "SYSCLK" },
+};
+
+static int wm8997_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
+                         unsigned int Fref, unsigned int Fout)
+{
+       struct wm8997_priv *wm8997 = snd_soc_codec_get_drvdata(codec);
+
+       switch (fll_id) {
+       case WM8997_FLL1:
+               return arizona_set_fll(&wm8997->fll[0], source, Fref, Fout);
+       case WM8997_FLL2:
+               return arizona_set_fll(&wm8997->fll[1], source, Fref, Fout);
+       case WM8997_FLL1_REFCLK:
+               return arizona_set_fll_refclk(&wm8997->fll[0], source, Fref,
+                                             Fout);
+       case WM8997_FLL2_REFCLK:
+               return arizona_set_fll_refclk(&wm8997->fll[1], source, Fref,
+                                             Fout);
+       default:
+               return -EINVAL;
+       }
+}
+
+#define WM8997_RATES SNDRV_PCM_RATE_8000_192000
+
+#define WM8997_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+                       SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver wm8997_dai[] = {
+       {
+               .name = "wm8997-aif1",
+               .id = 1,
+               .base = ARIZONA_AIF1_BCLK_CTRL,
+               .playback = {
+                       .stream_name = "AIF1 Playback",
+                       .channels_min = 1,
+                       .channels_max = 8,
+                       .rates = WM8997_RATES,
+                       .formats = WM8997_FORMATS,
+               },
+               .capture = {
+                        .stream_name = "AIF1 Capture",
+                        .channels_min = 1,
+                        .channels_max = 8,
+                        .rates = WM8997_RATES,
+                        .formats = WM8997_FORMATS,
+                },
+               .ops = &arizona_dai_ops,
+               .symmetric_rates = 1,
+       },
+       {
+               .name = "wm8997-aif2",
+               .id = 2,
+               .base = ARIZONA_AIF2_BCLK_CTRL,
+               .playback = {
+                       .stream_name = "AIF2 Playback",
+                       .channels_min = 1,
+                       .channels_max = 2,
+                       .rates = WM8997_RATES,
+                       .formats = WM8997_FORMATS,
+               },
+               .capture = {
+                        .stream_name = "AIF2 Capture",
+                        .channels_min = 1,
+                        .channels_max = 2,
+                        .rates = WM8997_RATES,
+                        .formats = WM8997_FORMATS,
+                },
+               .ops = &arizona_dai_ops,
+               .symmetric_rates = 1,
+       },
+       {
+               .name = "wm8997-slim1",
+               .id = 3,
+               .playback = {
+                       .stream_name = "Slim1 Playback",
+                       .channels_min = 1,
+                       .channels_max = 4,
+                       .rates = WM8997_RATES,
+                       .formats = WM8997_FORMATS,
+               },
+               .capture = {
+                       .stream_name = "Slim1 Capture",
+                       .channels_min = 1,
+                       .channels_max = 4,
+                       .rates = WM8997_RATES,
+                       .formats = WM8997_FORMATS,
+               },
+               .ops = &arizona_simple_dai_ops,
+       },
+       {
+               .name = "wm8997-slim2",
+               .id = 4,
+               .playback = {
+                       .stream_name = "Slim2 Playback",
+                       .channels_min = 1,
+                       .channels_max = 2,
+                       .rates = WM8997_RATES,
+                       .formats = WM8997_FORMATS,
+               },
+               .capture = {
+                       .stream_name = "Slim2 Capture",
+                       .channels_min = 1,
+                       .channels_max = 2,
+                       .rates = WM8997_RATES,
+                       .formats = WM8997_FORMATS,
+               },
+               .ops = &arizona_simple_dai_ops,
+       },
+       {
+               .name = "wm8997-slim3",
+               .id = 5,
+               .playback = {
+                       .stream_name = "Slim3 Playback",
+                       .channels_min = 1,
+                       .channels_max = 2,
+                       .rates = WM8997_RATES,
+                       .formats = WM8997_FORMATS,
+               },
+               .capture = {
+                       .stream_name = "Slim3 Capture",
+                       .channels_min = 1,
+                       .channels_max = 2,
+                       .rates = WM8997_RATES,
+                       .formats = WM8997_FORMATS,
+               },
+               .ops = &arizona_simple_dai_ops,
+       },
+};
+
+static int wm8997_codec_probe(struct snd_soc_codec *codec)
+{
+       struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       codec->control_data = priv->core.arizona->regmap;
+
+       ret = snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP);
+       if (ret != 0)
+               return ret;
+
+       arizona_init_spk(codec);
+
+       snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
+
+       priv->core.arizona->dapm = &codec->dapm;
+
+       return 0;
+}
+
+static int wm8997_codec_remove(struct snd_soc_codec *codec)
+{
+       struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+       priv->core.arizona->dapm = NULL;
+
+       return 0;
+}
+
+#define WM8997_DIG_VU 0x0200
+
+static unsigned int wm8997_digital_vu[] = {
+       ARIZONA_DAC_DIGITAL_VOLUME_1L,
+       ARIZONA_DAC_DIGITAL_VOLUME_1R,
+       ARIZONA_DAC_DIGITAL_VOLUME_3L,
+       ARIZONA_DAC_DIGITAL_VOLUME_4L,
+       ARIZONA_DAC_DIGITAL_VOLUME_5L,
+       ARIZONA_DAC_DIGITAL_VOLUME_5R,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
+       .probe = wm8997_codec_probe,
+       .remove = wm8997_codec_remove,
+
+       .idle_bias_off = true,
+
+       .set_sysclk = arizona_set_sysclk,
+       .set_pll = wm8997_set_fll,
+
+       .controls = wm8997_snd_controls,
+       .num_controls = ARRAY_SIZE(wm8997_snd_controls),
+       .dapm_widgets = wm8997_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(wm8997_dapm_widgets),
+       .dapm_routes = wm8997_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(wm8997_dapm_routes),
+};
+
+static int wm8997_probe(struct platform_device *pdev)
+{
+       struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+       struct wm8997_priv *wm8997;
+       int i;
+
+       wm8997 = devm_kzalloc(&pdev->dev, sizeof(struct wm8997_priv),
+                             GFP_KERNEL);
+       if (wm8997 == NULL)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, wm8997);
+
+       wm8997->core.arizona = arizona;
+       wm8997->core.num_inputs = 4;
+
+       for (i = 0; i < ARRAY_SIZE(wm8997->fll); i++)
+               wm8997->fll[i].vco_mult = 1;
+
+       arizona_init_fll(arizona, 1, ARIZONA_FLL1_CONTROL_1 - 1,
+                        ARIZONA_IRQ_FLL1_LOCK, ARIZONA_IRQ_FLL1_CLOCK_OK,
+                        &wm8997->fll[0]);
+       arizona_init_fll(arizona, 2, ARIZONA_FLL2_CONTROL_1 - 1,
+                        ARIZONA_IRQ_FLL2_LOCK, ARIZONA_IRQ_FLL2_CLOCK_OK,
+                        &wm8997->fll[1]);
+
+       /* SR2 fixed at 8kHz, SR3 fixed at 16kHz */
+       regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_2,
+                          ARIZONA_SAMPLE_RATE_2_MASK, 0x11);
+       regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_3,
+                          ARIZONA_SAMPLE_RATE_3_MASK, 0x12);
+
+       for (i = 0; i < ARRAY_SIZE(wm8997_dai); i++)
+               arizona_init_dai(&wm8997->core, i);
+
+       /* Latch volume update bits */
+       for (i = 0; i < ARRAY_SIZE(wm8997_digital_vu); i++)
+               regmap_update_bits(arizona->regmap, wm8997_digital_vu[i],
+                                  WM8997_DIG_VU, WM8997_DIG_VU);
+
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_idle(&pdev->dev);
+
+       return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8997,
+                                     wm8997_dai, ARRAY_SIZE(wm8997_dai));
+}
+
+static int wm8997_remove(struct platform_device *pdev)
+{
+       snd_soc_unregister_codec(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+static struct platform_driver wm8997_codec_driver = {
+       .driver = {
+               .name = "wm8997-codec",
+               .owner = THIS_MODULE,
+       },
+       .probe = wm8997_probe,
+       .remove = wm8997_remove,
+};
+
+module_platform_driver(wm8997_codec_driver);
+
+MODULE_DESCRIPTION("ASoC WM8997 driver");
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm8997-codec");
diff --git a/sound/soc/codecs/wm8997.h b/sound/soc/codecs/wm8997.h
new file mode 100644 (file)
index 0000000..5e91c6a
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * wm8997.h  --  WM8997 ALSA SoC Audio driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8997_H
+#define _WM8997_H
+
+#include "arizona.h"
+
+#define WM8997_FLL1        1
+#define WM8997_FLL2        2
+#define WM8997_FLL1_REFCLK 3
+#define WM8997_FLL2_REFCLK 4
+
+#endif
index 05252ac936a369cb210a9043105d9d95cb614679..b38f3506418ff0d43dc927fe45424e3d9d998d75 100644 (file)
@@ -225,15 +225,8 @@ struct wm_coeff_ctl_ops {
                     struct snd_ctl_elem_info *uinfo);
 };
 
-struct wm_coeff {
-       struct device *dev;
-       struct list_head ctl_list;
-       struct regmap *regmap;
-};
-
 struct wm_coeff_ctl {
        const char *name;
-       struct snd_card *card;
        struct wm_adsp_alg_region region;
        struct wm_coeff_ctl_ops ops;
        struct wm_adsp *adsp;
@@ -378,7 +371,6 @@ static int wm_coeff_info(struct snd_kcontrol *kcontrol,
 static int wm_coeff_write_control(struct snd_kcontrol *kcontrol,
                                  const void *buf, size_t len)
 {
-       struct wm_coeff *wm_coeff= snd_kcontrol_chip(kcontrol);
        struct wm_coeff_ctl *ctl = (struct wm_coeff_ctl *)kcontrol->private_value;
        struct wm_adsp_alg_region *region = &ctl->region;
        const struct wm_adsp_region *mem;
@@ -401,7 +393,7 @@ static int wm_coeff_write_control(struct snd_kcontrol *kcontrol,
        if (!scratch)
                return -ENOMEM;
 
-       ret = regmap_raw_write(wm_coeff->regmap, reg, scratch,
+       ret = regmap_raw_write(adsp->regmap, reg, scratch,
                               ctl->len);
        if (ret) {
                adsp_err(adsp, "Failed to write %zu bytes to %x\n",
@@ -434,7 +426,6 @@ static int wm_coeff_put(struct snd_kcontrol *kcontrol,
 static int wm_coeff_read_control(struct snd_kcontrol *kcontrol,
                                 void *buf, size_t len)
 {
-       struct wm_coeff *wm_coeff= snd_kcontrol_chip(kcontrol);
        struct wm_coeff_ctl *ctl = (struct wm_coeff_ctl *)kcontrol->private_value;
        struct wm_adsp_alg_region *region = &ctl->region;
        const struct wm_adsp_region *mem;
@@ -457,7 +448,7 @@ static int wm_coeff_read_control(struct snd_kcontrol *kcontrol,
        if (!scratch)
                return -ENOMEM;
 
-       ret = regmap_raw_read(wm_coeff->regmap, reg, scratch, ctl->len);
+       ret = regmap_raw_read(adsp->regmap, reg, scratch, ctl->len);
        if (ret) {
                adsp_err(adsp, "Failed to read %zu bytes from %x\n",
                         ctl->len, reg);
@@ -481,37 +472,18 @@ static int wm_coeff_get(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
-static int wm_coeff_add_kcontrol(struct wm_coeff *wm_coeff,
-                                struct wm_coeff_ctl *ctl,
-                                const struct snd_kcontrol_new *kctl)
-{
-       int ret;
-       struct snd_kcontrol *kcontrol;
-
-       kcontrol = snd_ctl_new1(kctl, wm_coeff);
-       ret = snd_ctl_add(ctl->card, kcontrol);
-       if (ret < 0) {
-               dev_err(wm_coeff->dev, "Failed to add %s: %d\n",
-                       kctl->name, ret);
-               return ret;
-       }
-       ctl->kcontrol = kcontrol;
-       return 0;
-}
-
 struct wmfw_ctl_work {
-       struct wm_coeff *wm_coeff;
+       struct wm_adsp *adsp;
        struct wm_coeff_ctl *ctl;
        struct work_struct work;
 };
 
-static int wmfw_add_ctl(struct wm_coeff *wm_coeff,
-                       struct wm_coeff_ctl *ctl)
+static int wmfw_add_ctl(struct wm_adsp *adsp, struct wm_coeff_ctl *ctl)
 {
        struct snd_kcontrol_new *kcontrol;
        int ret;
 
-       if (!wm_coeff || !ctl || !ctl->name || !ctl->card)
+       if (!ctl || !ctl->name)
                return -EINVAL;
 
        kcontrol = kzalloc(sizeof(*kcontrol), GFP_KERNEL);
@@ -525,14 +497,17 @@ static int wmfw_add_ctl(struct wm_coeff *wm_coeff,
        kcontrol->put = wm_coeff_put;
        kcontrol->private_value = (unsigned long)ctl;
 
-       ret = wm_coeff_add_kcontrol(wm_coeff,
-                                   ctl, kcontrol);
+       ret = snd_soc_add_card_controls(adsp->card,
+                                       kcontrol, 1);
        if (ret < 0)
                goto err_kcontrol;
 
        kfree(kcontrol);
 
-       list_add(&ctl->list, &wm_coeff->ctl_list);
+       ctl->kcontrol = snd_soc_card_get_kcontrol(adsp->card,
+                                                 ctl->name);
+
+       list_add(&ctl->list, &adsp->ctl_list);
        return 0;
 
 err_kcontrol:
@@ -753,13 +728,12 @@ out:
        return ret;
 }
 
-static int wm_coeff_init_control_caches(struct wm_coeff *wm_coeff)
+static int wm_coeff_init_control_caches(struct wm_adsp *adsp)
 {
        struct wm_coeff_ctl *ctl;
        int ret;
 
-       list_for_each_entry(ctl, &wm_coeff->ctl_list,
-                           list) {
+       list_for_each_entry(ctl, &adsp->ctl_list, list) {
                if (!ctl->enabled || ctl->set)
                        continue;
                ret = wm_coeff_read_control(ctl->kcontrol,
@@ -772,13 +746,12 @@ static int wm_coeff_init_control_caches(struct wm_coeff *wm_coeff)
        return 0;
 }
 
-static int wm_coeff_sync_controls(struct wm_coeff *wm_coeff)
+static int wm_coeff_sync_controls(struct wm_adsp *adsp)
 {
        struct wm_coeff_ctl *ctl;
        int ret;
 
-       list_for_each_entry(ctl, &wm_coeff->ctl_list,
-                           list) {
+       list_for_each_entry(ctl, &adsp->ctl_list, list) {
                if (!ctl->enabled)
                        continue;
                if (ctl->set) {
@@ -799,15 +772,14 @@ static void wm_adsp_ctl_work(struct work_struct *work)
                                                      struct wmfw_ctl_work,
                                                      work);
 
-       wmfw_add_ctl(ctl_work->wm_coeff, ctl_work->ctl);
+       wmfw_add_ctl(ctl_work->adsp, ctl_work->ctl);
        kfree(ctl_work);
 }
 
-static int wm_adsp_create_control(struct snd_soc_codec *codec,
+static int wm_adsp_create_control(struct wm_adsp *dsp,
                                  const struct wm_adsp_alg_region *region)
 
 {
-       struct wm_adsp *dsp = snd_soc_codec_get_drvdata(codec);
        struct wm_coeff_ctl *ctl;
        struct wmfw_ctl_work *ctl_work;
        char *name;
@@ -842,7 +814,7 @@ static int wm_adsp_create_control(struct snd_soc_codec *codec,
        snprintf(name, PAGE_SIZE, "DSP%d %s %x",
                 dsp->num, region_name, region->alg);
 
-       list_for_each_entry(ctl, &dsp->wm_coeff->ctl_list,
+       list_for_each_entry(ctl, &dsp->ctl_list,
                            list) {
                if (!strcmp(ctl->name, name)) {
                        if (!ctl->enabled)
@@ -866,7 +838,6 @@ static int wm_adsp_create_control(struct snd_soc_codec *codec,
        ctl->set = 0;
        ctl->ops.xget = wm_coeff_get;
        ctl->ops.xput = wm_coeff_put;
-       ctl->card = codec->card->snd_card;
        ctl->adsp = dsp;
 
        ctl->len = region->len;
@@ -882,7 +853,7 @@ static int wm_adsp_create_control(struct snd_soc_codec *codec,
                goto err_ctl_cache;
        }
 
-       ctl_work->wm_coeff = dsp->wm_coeff;
+       ctl_work->adsp = dsp;
        ctl_work->ctl = ctl;
        INIT_WORK(&ctl_work->work, wm_adsp_ctl_work);
        schedule_work(&ctl_work->work);
@@ -903,7 +874,7 @@ err_name:
        return ret;
 }
 
-static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
+static int wm_adsp_setup_algs(struct wm_adsp *dsp)
 {
        struct regmap *regmap = dsp->regmap;
        struct wmfw_adsp1_id_hdr adsp1_id;
@@ -1091,7 +1062,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
                        if (i + 1 < algs) {
                                region->len = be32_to_cpu(adsp1_alg[i + 1].dm);
                                region->len -= be32_to_cpu(adsp1_alg[i].dm);
-                               wm_adsp_create_control(codec, region);
+                               wm_adsp_create_control(dsp, region);
                        } else {
                                adsp_warn(dsp, "Missing length info for region DM with ID %x\n",
                                          be32_to_cpu(adsp1_alg[i].alg.id));
@@ -1108,7 +1079,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
                        if (i + 1 < algs) {
                                region->len = be32_to_cpu(adsp1_alg[i + 1].zm);
                                region->len -= be32_to_cpu(adsp1_alg[i].zm);
-                               wm_adsp_create_control(codec, region);
+                               wm_adsp_create_control(dsp, region);
                        } else {
                                adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
                                          be32_to_cpu(adsp1_alg[i].alg.id));
@@ -1137,7 +1108,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
                        if (i + 1 < algs) {
                                region->len = be32_to_cpu(adsp2_alg[i + 1].xm);
                                region->len -= be32_to_cpu(adsp2_alg[i].xm);
-                               wm_adsp_create_control(codec, region);
+                               wm_adsp_create_control(dsp, region);
                        } else {
                                adsp_warn(dsp, "Missing length info for region XM with ID %x\n",
                                          be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1154,7 +1125,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
                        if (i + 1 < algs) {
                                region->len = be32_to_cpu(adsp2_alg[i + 1].ym);
                                region->len -= be32_to_cpu(adsp2_alg[i].ym);
-                               wm_adsp_create_control(codec, region);
+                               wm_adsp_create_control(dsp, region);
                        } else {
                                adsp_warn(dsp, "Missing length info for region YM with ID %x\n",
                                          be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1171,7 +1142,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
                        if (i + 1 < algs) {
                                region->len = be32_to_cpu(adsp2_alg[i + 1].zm);
                                region->len -= be32_to_cpu(adsp2_alg[i].zm);
-                               wm_adsp_create_control(codec, region);
+                               wm_adsp_create_control(dsp, region);
                        } else {
                                adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
                                          be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1391,6 +1362,8 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
        int ret;
        int val;
 
+       dsp->card = codec->card;
+
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
                regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
@@ -1425,7 +1398,7 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
                if (ret != 0)
                        goto err;
 
-               ret = wm_adsp_setup_algs(dsp, codec);
+               ret = wm_adsp_setup_algs(dsp);
                if (ret != 0)
                        goto err;
 
@@ -1434,12 +1407,12 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
                        goto err;
 
                /* Initialize caches for enabled and unset controls */
-               ret = wm_coeff_init_control_caches(dsp->wm_coeff);
+               ret = wm_coeff_init_control_caches(dsp);
                if (ret != 0)
                        goto err;
 
                /* Sync set controls */
-               ret = wm_coeff_sync_controls(dsp->wm_coeff);
+               ret = wm_coeff_sync_controls(dsp);
                if (ret != 0)
                        goto err;
 
@@ -1460,10 +1433,8 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
                regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
                                   ADSP1_SYS_ENA, 0);
 
-               list_for_each_entry(ctl, &dsp->wm_coeff->ctl_list,
-                                   list) {
+               list_for_each_entry(ctl, &dsp->ctl_list, list)
                        ctl->enabled = 0;
-               }
                break;
 
        default:
@@ -1520,6 +1491,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
        unsigned int val;
        int ret;
 
+       dsp->card = codec->card;
+
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
                /*
@@ -1582,7 +1555,7 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
                if (ret != 0)
                        goto err;
 
-               ret = wm_adsp_setup_algs(dsp, codec);
+               ret = wm_adsp_setup_algs(dsp);
                if (ret != 0)
                        goto err;
 
@@ -1591,12 +1564,12 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
                        goto err;
 
                /* Initialize caches for enabled and unset controls */
-               ret = wm_coeff_init_control_caches(dsp->wm_coeff);
+               ret = wm_coeff_init_control_caches(dsp);
                if (ret != 0)
                        goto err;
 
                /* Sync set controls */
-               ret = wm_coeff_sync_controls(dsp->wm_coeff);
+               ret = wm_coeff_sync_controls(dsp);
                if (ret != 0)
                        goto err;
 
@@ -1637,10 +1610,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
                                        ret);
                }
 
-               list_for_each_entry(ctl, &dsp->wm_coeff->ctl_list,
-                                   list) {
+               list_for_each_entry(ctl, &dsp->ctl_list, list)
                        ctl->enabled = 0;
-               }
 
                while (!list_empty(&dsp->alg_regions)) {
                        alg_region = list_first_entry(&dsp->alg_regions,
@@ -1679,49 +1650,38 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
        }
 
        INIT_LIST_HEAD(&adsp->alg_regions);
-
-       adsp->wm_coeff = kzalloc(sizeof(*adsp->wm_coeff),
-                                GFP_KERNEL);
-       if (!adsp->wm_coeff)
-               return -ENOMEM;
-       adsp->wm_coeff->regmap = adsp->regmap;
-       adsp->wm_coeff->dev = adsp->dev;
-       INIT_LIST_HEAD(&adsp->wm_coeff->ctl_list);
+       INIT_LIST_HEAD(&adsp->ctl_list);
 
        if (dvfs) {
                adsp->dvfs = devm_regulator_get(adsp->dev, "DCVDD");
                if (IS_ERR(adsp->dvfs)) {
                        ret = PTR_ERR(adsp->dvfs);
                        dev_err(adsp->dev, "Failed to get DCVDD: %d\n", ret);
-                       goto out_coeff;
+                       return ret;
                }
 
                ret = regulator_enable(adsp->dvfs);
                if (ret != 0) {
                        dev_err(adsp->dev, "Failed to enable DCVDD: %d\n",
                                ret);
-                       goto out_coeff;
+                       return ret;
                }
 
                ret = regulator_set_voltage(adsp->dvfs, 1200000, 1800000);
                if (ret != 0) {
                        dev_err(adsp->dev, "Failed to initialise DVFS: %d\n",
                                ret);
-                       goto out_coeff;
+                       return ret;
                }
 
                ret = regulator_disable(adsp->dvfs);
                if (ret != 0) {
                        dev_err(adsp->dev, "Failed to disable DCVDD: %d\n",
                                ret);
-                       goto out_coeff;
+                       return ret;
                }
        }
 
        return 0;
-
-out_coeff:
-       kfree(adsp->wm_coeff);
-       return ret;
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_init);
index 9f922c82536c52f8939db5834bbcfda80e1c5b59..d018dea6254de44c5997813fcb9b958eeca09706 100644 (file)
@@ -39,6 +39,7 @@ struct wm_adsp {
        int type;
        struct device *dev;
        struct regmap *regmap;
+       struct snd_soc_card *card;
 
        int base;
        int sysclk_reg;
@@ -57,7 +58,7 @@ struct wm_adsp {
 
        struct regulator *dvfs;
 
-       struct wm_coeff *wm_coeff;
+       struct list_head ctl_list;
 };
 
 #define WM_ADSP1(wname, num) \
index 2d9e099415a58cbe353c99f32f6f089a200db2de..8b50e5958de5a43030ac854d18c046cf903e5841 100644 (file)
@@ -699,9 +699,7 @@ EXPORT_SYMBOL_GPL(wm_hubs_update_class_w);
 static int class_w_put_volsw(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-       struct snd_soc_codec *codec = widget->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        int ret;
 
        ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
@@ -721,9 +719,7 @@ static int class_w_put_volsw(struct snd_kcontrol *kcontrol,
 static int class_w_put_double(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-       struct snd_soc_codec *codec = widget->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        int ret;
 
        ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
index aa438546c91271f69c26beb1646b85f4b34357cf..3a4808d376d023834317c7c39daa9e7f3ecf25b8 100644 (file)
@@ -98,7 +98,7 @@ endif # SND_POWERPC_SOC
 
 menuconfig SND_IMX_SOC
        tristate "SoC Audio for Freescale i.MX CPUs"
-       depends on ARCH_MXC
+       depends on ARCH_MXC || COMPILE_TEST
        help
          Say Y or M if you want to add support for codecs attached to
          the i.MX CPUs.
@@ -109,11 +109,11 @@ config SND_SOC_IMX_SSI
        tristate
 
 config SND_SOC_IMX_PCM_FIQ
-       bool
+       tristate
        select FIQ
 
 config SND_SOC_IMX_PCM_DMA
-       bool
+       tristate
        select SND_SOC_GENERIC_DMAENGINE_PCM
 
 config SND_SOC_IMX_AUDMUX
@@ -175,7 +175,6 @@ config SND_SOC_IMX_WM8962
        select SND_SOC_IMX_PCM_DMA
        select SND_SOC_IMX_AUDMUX
        select SND_SOC_FSL_SSI
-       select SND_SOC_FSL_UTILS
        help
          Say Y if you want to add support for SoC audio on an i.MX board with
          a wm8962 codec.
@@ -187,14 +186,13 @@ config SND_SOC_IMX_SGTL5000
        select SND_SOC_IMX_PCM_DMA
        select SND_SOC_IMX_AUDMUX
        select SND_SOC_FSL_SSI
-       select SND_SOC_FSL_UTILS
        help
          Say Y if you want to add support for SoC audio on an i.MX board with
          a sgtl5000 codec.
 
 config SND_SOC_IMX_MC13783
        tristate "SoC Audio support for I.MX boards with mc13783"
-       depends on MFD_MC13783
+       depends on MFD_MC13783 && ARM
        select SND_SOC_IMX_SSI
        select SND_SOC_IMX_AUDMUX
        select SND_SOC_MC13783
index 2f2d837df07f078e5b60da38308cbdb8d53b9969..0c072ff108759f53313d64fe4cc53253e8fc24ca 100644 (file)
@@ -8,6 +8,26 @@
  * This file is licensed under the terms of the GNU General Public License
  * version 2.  This program is licensed "as is" without any warranty of any
  * kind, whether express or implied.
+ *
+ *
+ * Some notes why imx-pcm-fiq is used instead of DMA on some boards:
+ *
+ * The i.MX SSI core has some nasty limitations in AC97 mode. While most
+ * sane processor vendors have a FIFO per AC97 slot, the i.MX has only
+ * one FIFO which combines all valid receive slots. We cannot even select
+ * which slots we want to receive. The WM9712 with which this driver
+ * was developed with always sends GPIO status data in slot 12 which
+ * we receive in our (PCM-) data stream. The only chance we have is to
+ * manually skip this data in the FIQ handler. With sampling rates different
+ * from 48000Hz not every frame has valid receive data, so the ratio
+ * between pcm data and GPIO status data changes. Our FIQ handler is not
+ * able to handle this, hence this driver only works with 48000Hz sampling
+ * rate.
+ * Reading and writing AC97 registers is another challenge. The core
+ * provides us status bits when the read register is updated with *another*
+ * value. When we read the same register two times (and the register still
+ * contains the same value) these status bits are not set. We work
+ * around this by not polling these bits but only wait a fixed delay.
  */
 
 #include <linux/init.h>
@@ -36,7 +56,7 @@
 #define read_ssi(addr)                  in_be32(addr)
 #define write_ssi(val, addr)            out_be32(addr, val)
 #define write_ssi_mask(addr, clear, set) clrsetbits_be32(addr, clear, set)
-#elif defined ARM
+#else
 #define read_ssi(addr)                  readl(addr)
 #define write_ssi(val, addr)            writel(val, addr)
 /*
@@ -121,11 +141,13 @@ struct fsl_ssi_private {
 
        bool new_binding;
        bool ssi_on_imx;
+       bool use_dma;
        struct clk *clk;
        struct snd_dmaengine_dai_dma_data dma_params_tx;
        struct snd_dmaengine_dai_dma_data dma_params_rx;
        struct imx_dma_data filter_data_tx;
        struct imx_dma_data filter_data_rx;
+       struct imx_pcm_fiq_params fiq_params;
 
        struct {
                unsigned int rfrc;
@@ -355,7 +377,12 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
                 */
 
                /* Enable the interrupts and DMA requests */
-               write_ssi(SIER_FLAGS, &ssi->sier);
+               if (ssi_private->use_dma)
+                       write_ssi(SIER_FLAGS, &ssi->sier);
+               else
+                       write_ssi(CCSR_SSI_SIER_TIE | CCSR_SSI_SIER_TFE0_EN |
+                                       CCSR_SSI_SIER_RIE |
+                                       CCSR_SSI_SIER_RFF0_EN, &ssi->sier);
 
                /*
                 * Set the watermark for transmit FIFI 0 and receive FIFO 0. We
@@ -510,6 +537,9 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
                        write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_TE, 0);
                else
                        write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_RE, 0);
+
+               if ((read_ssi(&ssi->scr) & (CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE)) == 0)
+                       write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_SSIEN, 0);
                break;
 
        default:
@@ -534,22 +564,13 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
                ssi_private->first_stream = ssi_private->second_stream;
 
        ssi_private->second_stream = NULL;
-
-       /*
-        * If this is the last active substream, disable the SSI.
-        */
-       if (!ssi_private->first_stream) {
-               struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
-
-               write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_SSIEN, 0);
-       }
 }
 
 static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
 {
        struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
 
-       if (ssi_private->ssi_on_imx) {
+       if (ssi_private->ssi_on_imx && ssi_private->use_dma) {
                dai->playback_dma_data = &ssi_private->dma_params_tx;
                dai->capture_dma_data = &ssi_private->dma_params_rx;
        }
@@ -680,7 +701,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
 
        /* The DAI name is the last part of the full name of the node. */
        p = strrchr(np->full_name, '/') + 1;
-       ssi_private = kzalloc(sizeof(struct fsl_ssi_private) + strlen(p),
+       ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private) + strlen(p),
                              GFP_KERNEL);
        if (!ssi_private) {
                dev_err(&pdev->dev, "could not allocate DAI object\n");
@@ -689,6 +710,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
 
        strcpy(ssi_private->name, p);
 
+       ssi_private->use_dma = !of_property_read_bool(np,
+                       "fsl,fiq-stream-filter");
+
        /* Initialize this copy of the CPU DAI driver structure */
        memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
               sizeof(fsl_ssi_dai_template));
@@ -698,29 +722,31 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        ret = of_address_to_resource(np, 0, &res);
        if (ret) {
                dev_err(&pdev->dev, "could not determine device resources\n");
-               goto error_kmalloc;
+               return ret;
        }
        ssi_private->ssi = of_iomap(np, 0);
        if (!ssi_private->ssi) {
                dev_err(&pdev->dev, "could not map device resources\n");
-               ret = -ENOMEM;
-               goto error_kmalloc;
+               return -ENOMEM;
        }
        ssi_private->ssi_phys = res.start;
 
        ssi_private->irq = irq_of_parse_and_map(np, 0);
        if (ssi_private->irq == NO_IRQ) {
                dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
-               ret = -ENXIO;
-               goto error_iomap;
-       }
-
-       /* The 'name' should not have any slashes in it. */
-       ret = request_irq(ssi_private->irq, fsl_ssi_isr, 0, ssi_private->name,
-                         ssi_private);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "could not claim irq %u\n", ssi_private->irq);
-               goto error_irqmap;
+               return -ENXIO;
+       }
+
+       if (ssi_private->use_dma) {
+               /* The 'name' should not have any slashes in it. */
+               ret = devm_request_irq(&pdev->dev, ssi_private->irq,
+                                       fsl_ssi_isr, 0, ssi_private->name,
+                                       ssi_private);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "could not claim irq %u\n",
+                                       ssi_private->irq);
+                       goto error_irqmap;
+               }
        }
 
        /* Are the RX and the TX clocks locked? */
@@ -739,13 +765,18 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                u32 dma_events[2];
                ssi_private->ssi_on_imx = true;
 
-               ssi_private->clk = clk_get(&pdev->dev, NULL);
+               ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
                if (IS_ERR(ssi_private->clk)) {
                        ret = PTR_ERR(ssi_private->clk);
                        dev_err(&pdev->dev, "could not get clock: %d\n", ret);
-                       goto error_irq;
+                       goto error_irqmap;
+               }
+               ret = clk_prepare_enable(ssi_private->clk);
+               if (ret) {
+                       dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n",
+                               ret);
+                       goto error_irqmap;
                }
-               clk_prepare_enable(ssi_private->clk);
 
                /*
                 * We have burstsize be "fifo_depth - 2" to match the SSI
@@ -763,24 +794,28 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                        &ssi_private->filter_data_tx;
                ssi_private->dma_params_rx.filter_data =
                        &ssi_private->filter_data_rx;
-               /*
-                * TODO: This is a temporary solution and should be changed
-                * to use generic DMA binding later when the helplers get in.
-                */
-               ret = of_property_read_u32_array(pdev->dev.of_node,
+               if (!of_property_read_bool(pdev->dev.of_node, "dmas") &&
+                               ssi_private->use_dma) {
+                       /*
+                        * FIXME: This is a temporary solution until all
+                        * necessary dma drivers support the generic dma
+                        * bindings.
+                        */
+                       ret = of_property_read_u32_array(pdev->dev.of_node,
                                        "fsl,ssi-dma-events", dma_events, 2);
-               if (ret) {
-                       dev_err(&pdev->dev, "could not get dma events\n");
-                       goto error_clk;
+                       if (ret && ssi_private->use_dma) {
+                               dev_err(&pdev->dev, "could not get dma events but fsl-ssi is configured to use DMA\n");
+                               goto error_clk;
+                       }
                }
 
                shared = of_device_is_compatible(of_get_parent(np),
                            "fsl,spba-bus");
 
                imx_pcm_dma_params_init_data(&ssi_private->filter_data_tx,
-                       dma_events[0], shared);
+                       dma_events[0], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI);
                imx_pcm_dma_params_init_data(&ssi_private->filter_data_rx,
-                       dma_events[1], shared);
+                       dma_events[1], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI);
        }
 
        /* Initialize the the device_attribute structure */
@@ -794,7 +829,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "could not create sysfs %s file\n",
                        ssi_private->dev_attr.attr.name);
-               goto error_irq;
+               goto error_clk;
        }
 
        /* Register with ASoC */
@@ -808,9 +843,30 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        }
 
        if (ssi_private->ssi_on_imx) {
-               ret = imx_pcm_dma_init(pdev);
-               if (ret)
-                       goto error_dev;
+               if (!ssi_private->use_dma) {
+
+                       /*
+                        * Some boards use an incompatible codec. To get it
+                        * working, we are using imx-fiq-pcm-audio, that
+                        * can handle those codecs. DMA is not possible in this
+                        * situation.
+                        */
+
+                       ssi_private->fiq_params.irq = ssi_private->irq;
+                       ssi_private->fiq_params.base = ssi_private->ssi;
+                       ssi_private->fiq_params.dma_params_rx =
+                               &ssi_private->dma_params_rx;
+                       ssi_private->fiq_params.dma_params_tx =
+                               &ssi_private->dma_params_tx;
+
+                       ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params);
+                       if (ret)
+                               goto error_dev;
+               } else {
+                       ret = imx_pcm_dma_init(pdev);
+                       if (ret)
+                               goto error_dev;
+               }
        }
 
        /*
@@ -857,23 +913,12 @@ error_dev:
        device_remove_file(&pdev->dev, dev_attr);
 
 error_clk:
-       if (ssi_private->ssi_on_imx) {
+       if (ssi_private->ssi_on_imx)
                clk_disable_unprepare(ssi_private->clk);
-               clk_put(ssi_private->clk);
-       }
-
-error_irq:
-       free_irq(ssi_private->irq, ssi_private);
 
 error_irqmap:
        irq_dispose_mapping(ssi_private->irq);
 
-error_iomap:
-       iounmap(ssi_private->ssi);
-
-error_kmalloc:
-       kfree(ssi_private);
-
        return ret;
 }
 
@@ -886,15 +931,10 @@ static int fsl_ssi_remove(struct platform_device *pdev)
        if (ssi_private->ssi_on_imx) {
                imx_pcm_dma_exit(pdev);
                clk_disable_unprepare(ssi_private->clk);
-               clk_put(ssi_private->clk);
        }
        snd_soc_unregister_component(&pdev->dev);
        device_remove_file(&pdev->dev, &ssi_private->dev_attr);
-
-       free_irq(ssi_private->irq, ssi_private);
        irq_dispose_mapping(ssi_private->irq);
-
-       kfree(ssi_private);
        dev_set_drvdata(&pdev->dev, NULL);
 
        return 0;
@@ -919,6 +959,7 @@ static struct platform_driver fsl_ssi_driver = {
 
 module_platform_driver(fsl_ssi_driver);
 
+MODULE_ALIAS("platform:fsl-ssi-dai");
 MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
 MODULE_DESCRIPTION("Freescale Synchronous Serial Interface (SSI) ASoC Driver");
 MODULE_LICENSE("GPL v2");
index e260f1f899dbb132135cb6745376f5dad8891fb9..ab17381cc9812a7e0538f30ea44496bfdd944c6d 100644 (file)
@@ -73,8 +73,11 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
-       if (audmux_clk)
-               clk_prepare_enable(audmux_clk);
+       if (audmux_clk) {
+               ret = clk_prepare_enable(audmux_clk);
+               if (ret)
+                       return ret;
+       }
 
        ptcr = readl(audmux_base + IMX_AUDMUX_V2_PTCR(port));
        pdcr = readl(audmux_base + IMX_AUDMUX_V2_PDCR(port));
@@ -224,14 +227,19 @@ EXPORT_SYMBOL_GPL(imx_audmux_v1_configure_port);
 int imx_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
                unsigned int pdcr)
 {
+       int ret;
+
        if (audmux_type != IMX31_AUDMUX)
                return -EINVAL;
 
        if (!audmux_base)
                return -ENOSYS;
 
-       if (audmux_clk)
-               clk_prepare_enable(audmux_clk);
+       if (audmux_clk) {
+               ret = clk_prepare_enable(audmux_clk);
+               if (ret)
+                       return ret;
+       }
 
        writel(ptcr, audmux_base + IMX_AUDMUX_V2_PTCR(port));
        writel(pdcr, audmux_base + IMX_AUDMUX_V2_PDCR(port));
@@ -243,6 +251,66 @@ int imx_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
 }
 EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port);
 
+static int imx_audmux_parse_dt_defaults(struct platform_device *pdev,
+               struct device_node *of_node)
+{
+       struct device_node *child;
+
+       for_each_available_child_of_node(of_node, child) {
+               unsigned int port;
+               unsigned int ptcr = 0;
+               unsigned int pdcr = 0;
+               unsigned int pcr = 0;
+               unsigned int val;
+               int ret;
+               int i = 0;
+
+               ret = of_property_read_u32(child, "fsl,audmux-port", &port);
+               if (ret) {
+                       dev_warn(&pdev->dev, "Failed to get fsl,audmux-port of child node \"%s\"\n",
+                                       child->full_name);
+                       continue;
+               }
+               if (!of_property_read_bool(child, "fsl,port-config")) {
+                       dev_warn(&pdev->dev, "child node \"%s\" does not have property fsl,port-config\n",
+                                       child->full_name);
+                       continue;
+               }
+
+               for (i = 0; (ret = of_property_read_u32_index(child,
+                                       "fsl,port-config", i, &val)) == 0;
+                               ++i) {
+                       if (audmux_type == IMX31_AUDMUX) {
+                               if (i % 2)
+                                       pdcr |= val;
+                               else
+                                       ptcr |= val;
+                       } else {
+                               pcr |= val;
+                       }
+               }
+
+               if (ret != -EOVERFLOW) {
+                       dev_err(&pdev->dev, "Failed to read u32 at index %d of child %s\n",
+                                       i, child->full_name);
+                       continue;
+               }
+
+               if (audmux_type == IMX31_AUDMUX) {
+                       if (i % 2) {
+                               dev_err(&pdev->dev, "One pdcr value is missing in child node %s\n",
+                                               child->full_name);
+                               continue;
+                       }
+                       imx_audmux_v2_configure_port(port, ptcr, pdcr);
+               } else {
+                       imx_audmux_v1_configure_port(port, pcr);
+               }
+       }
+
+       return 0;
+}
+
 static int imx_audmux_probe(struct platform_device *pdev)
 {
        struct resource *res;
@@ -267,6 +335,8 @@ static int imx_audmux_probe(struct platform_device *pdev)
        if (audmux_type == IMX31_AUDMUX)
                audmux_debugfs_init();
 
+       imx_audmux_parse_dt_defaults(pdev, pdev->dev.of_node);
+
        return 0;
 }
 
index b8ff44b9dafaaaeecd1a5c9fbc0ab4efd0e69b6b..38a4209af7c6f6e43e5ebe4cd74aae9895e09896 100644 (file)
@@ -1,57 +1,7 @@
 #ifndef __IMX_AUDMUX_H
 #define __IMX_AUDMUX_H
 
-#define MX27_AUDMUX_HPCR1_SSI0         0
-#define MX27_AUDMUX_HPCR2_SSI1         1
-#define MX27_AUDMUX_HPCR3_SSI_PINS_4   2
-#define MX27_AUDMUX_PPCR1_SSI_PINS_1   3
-#define MX27_AUDMUX_PPCR2_SSI_PINS_2   4
-#define MX27_AUDMUX_PPCR3_SSI_PINS_3   5
-
-#define MX31_AUDMUX_PORT1_SSI0         0
-#define MX31_AUDMUX_PORT2_SSI1         1
-#define MX31_AUDMUX_PORT3_SSI_PINS_3   2
-#define MX31_AUDMUX_PORT4_SSI_PINS_4   3
-#define MX31_AUDMUX_PORT5_SSI_PINS_5   4
-#define MX31_AUDMUX_PORT6_SSI_PINS_6   5
-#define MX31_AUDMUX_PORT7_SSI_PINS_7   6
-
-#define MX51_AUDMUX_PORT1_SSI0         0
-#define MX51_AUDMUX_PORT2_SSI1         1
-#define MX51_AUDMUX_PORT3              2
-#define MX51_AUDMUX_PORT4              3
-#define MX51_AUDMUX_PORT5              4
-#define MX51_AUDMUX_PORT6              5
-#define MX51_AUDMUX_PORT7              6
-
-/* Register definitions for the i.MX21/27 Digital Audio Multiplexer */
-#define IMX_AUDMUX_V1_PCR_INMMASK(x)   ((x) & 0xff)
-#define IMX_AUDMUX_V1_PCR_INMEN                (1 << 8)
-#define IMX_AUDMUX_V1_PCR_TXRXEN       (1 << 10)
-#define IMX_AUDMUX_V1_PCR_SYN          (1 << 12)
-#define IMX_AUDMUX_V1_PCR_RXDSEL(x)    (((x) & 0x7) << 13)
-#define IMX_AUDMUX_V1_PCR_RFCSEL(x)    (((x) & 0xf) << 20)
-#define IMX_AUDMUX_V1_PCR_RCLKDIR      (1 << 24)
-#define IMX_AUDMUX_V1_PCR_RFSDIR       (1 << 25)
-#define IMX_AUDMUX_V1_PCR_TFCSEL(x)    (((x) & 0xf) << 26)
-#define IMX_AUDMUX_V1_PCR_TCLKDIR      (1 << 30)
-#define IMX_AUDMUX_V1_PCR_TFSDIR       (1 << 31)
-
-/* Register definitions for the i.MX25/31/35/51 Digital Audio Multiplexer */
-#define IMX_AUDMUX_V2_PTCR_TFSDIR      (1 << 31)
-#define IMX_AUDMUX_V2_PTCR_TFSEL(x)    (((x) & 0xf) << 27)
-#define IMX_AUDMUX_V2_PTCR_TCLKDIR     (1 << 26)
-#define IMX_AUDMUX_V2_PTCR_TCSEL(x)    (((x) & 0xf) << 22)
-#define IMX_AUDMUX_V2_PTCR_RFSDIR      (1 << 21)
-#define IMX_AUDMUX_V2_PTCR_RFSEL(x)    (((x) & 0xf) << 17)
-#define IMX_AUDMUX_V2_PTCR_RCLKDIR     (1 << 16)
-#define IMX_AUDMUX_V2_PTCR_RCSEL(x)    (((x) & 0xf) << 12)
-#define IMX_AUDMUX_V2_PTCR_SYN         (1 << 11)
-
-#define IMX_AUDMUX_V2_PDCR_RXDSEL(x)   (((x) & 0x7) << 13)
-#define IMX_AUDMUX_V2_PDCR_TXRXEN      (1 << 12)
-#define IMX_AUDMUX_V2_PDCR_MODE(x)     (((x) & 0x3) << 8)
-#define IMX_AUDMUX_V2_PDCR_INMMASK(x)  ((x) & 0xff)
+#include <dt-bindings/sound/fsl-imx-audmux.h>
 
 int imx_audmux_v1_configure_port(unsigned int port, unsigned int pcr);
 
index 9df173c091a66b4fbb676036b9a66cacfe2eff17..a3d60d4bea4ce8ace84d2860921b5c46d3f00895 100644 (file)
@@ -90,6 +90,7 @@ static const struct snd_soc_dapm_route imx_mc13783_routes[] = {
 
 static struct snd_soc_card imx_mc13783 = {
        .name           = "imx_mc13783",
+       .owner          = THIS_MODULE,
        .dai_link       = imx_mc13783_dai_mc13783,
        .num_links      = ARRAY_SIZE(imx_mc13783_dai_mc13783),
        .dapm_widgets   = imx_mc13783_widget,
index fde4d2ea68c88afb32275963ae0b51e81be9112a..4dc1296688e9210531f904366dcc19603e0022ce 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/types.h>
+#include <linux/module.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -64,7 +65,6 @@ int imx_pcm_dma_init(struct platform_device *pdev)
 {
        return snd_dmaengine_pcm_register(&pdev->dev, &imx_dmaengine_pcm_config,
                SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
-               SND_DMAENGINE_PCM_FLAG_NO_DT |
                SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
 EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
@@ -74,3 +74,5 @@ void imx_pcm_dma_exit(struct platform_device *pdev)
        snd_dmaengine_pcm_unregister(&pdev->dev);
 }
 EXPORT_SYMBOL_GPL(imx_pcm_dma_exit);
+
+MODULE_LICENSE("GPL");
index 310d90290320c751b572f2a409f8c4800f6c60b2..34043c55f2a62f048232b09ff2f6165fd3726116 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 
 #include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
 #include <sound/initval.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -32,6 +33,7 @@
 #include <linux/platform_data/asoc-imx-ssi.h>
 
 #include "imx-ssi.h"
+#include "imx-pcm.h"
 
 struct imx_pcm_runtime_data {
        unsigned int period;
@@ -366,9 +368,9 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = {
        .pcm_free       = imx_pcm_fiq_free,
 };
 
-int imx_pcm_fiq_init(struct platform_device *pdev)
+int imx_pcm_fiq_init(struct platform_device *pdev,
+               struct imx_pcm_fiq_params *params)
 {
-       struct imx_ssi *ssi = platform_get_drvdata(pdev);
        int ret;
 
        ret = claim_fiq(&fh);
@@ -377,15 +379,15 @@ int imx_pcm_fiq_init(struct platform_device *pdev)
                return ret;
        }
 
-       mxc_set_irq_fiq(ssi->irq, 1);
-       ssi_irq = ssi->irq;
+       mxc_set_irq_fiq(params->irq, 1);
+       ssi_irq = params->irq;
 
-       imx_pcm_fiq = ssi->irq;
+       imx_pcm_fiq = params->irq;
 
-       imx_ssi_fiq_base = (unsigned long)ssi->base;
+       imx_ssi_fiq_base = (unsigned long)params->base;
 
-       ssi->dma_params_tx.maxburst = 4;
-       ssi->dma_params_rx.maxburst = 6;
+       params->dma_params_tx->maxburst = 4;
+       params->dma_params_rx->maxburst = 6;
 
        ret = snd_soc_register_platform(&pdev->dev, &imx_soc_platform_fiq);
        if (ret)
@@ -406,3 +408,5 @@ void imx_pcm_fiq_exit(struct platform_device *pdev)
        snd_soc_unregister_platform(&pdev->dev);
 }
 EXPORT_SYMBOL_GPL(imx_pcm_fiq_exit);
+
+MODULE_LICENSE("GPL");
index 67f656c7c320744334d4ca5d8233d36eacbe6d4d..5d5b73303e1169b7f6c2254e06d66a87409050cc 100644 (file)
 
 static inline void
 imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
-       int dma, bool shared)
+       int dma, enum sdma_peripheral_type peripheral_type)
 {
        dma_data->dma_request = dma;
        dma_data->priority = DMA_PRIO_HIGH;
-       if (shared)
-               dma_data->peripheral_type = IMX_DMATYPE_SSI_SP;
-       else
-               dma_data->peripheral_type = IMX_DMATYPE_SSI;
+       dma_data->peripheral_type = peripheral_type;
 }
 
-#ifdef CONFIG_SND_SOC_IMX_PCM_DMA
+struct imx_pcm_fiq_params {
+       int irq;
+       void __iomem *base;
+
+       /* Pointer to original ssi driver to setup tx rx sizes */
+       struct snd_dmaengine_dai_dma_data *dma_params_rx;
+       struct snd_dmaengine_dai_dma_data *dma_params_tx;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
 int imx_pcm_dma_init(struct platform_device *pdev);
 void imx_pcm_dma_exit(struct platform_device *pdev);
 #else
@@ -46,11 +52,13 @@ static inline void imx_pcm_dma_exit(struct platform_device *pdev)
 }
 #endif
 
-#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ
-int imx_pcm_fiq_init(struct platform_device *pdev);
+#if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_FIQ)
+int imx_pcm_fiq_init(struct platform_device *pdev,
+               struct imx_pcm_fiq_params *params);
 void imx_pcm_fiq_exit(struct platform_device *pdev);
 #else
-static inline int imx_pcm_fiq_init(struct platform_device *pdev)
+static inline int imx_pcm_fiq_init(struct platform_device *pdev,
+               struct imx_pcm_fiq_params *params)
 {
        return -ENODEV;
 }
index 3f726e4f88db8c2900840819e5d86ede5e9415d1..389cbfa6dca79d6100b34e64f57d6bc2514d20f8 100644 (file)
@@ -129,8 +129,10 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
        }
 
        data->codec_clk = devm_clk_get(&codec_dev->dev, NULL);
-       if (IS_ERR(data->codec_clk))
+       if (IS_ERR(data->codec_clk)) {
+               ret = PTR_ERR(data->codec_clk);
                goto fail;
+       }
 
        data->clk_frequency = clk_get_rate(data->codec_clk);
 
index 51be3772cba901bc17d245ab0077ec744fcf64b2..f58bcd85c07fbd8b302c0c3bcd97fd3cf6ed4d0c 100644 (file)
@@ -571,13 +571,13 @@ static int imx_ssi_probe(struct platform_device *pdev)
        res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx0");
        if (res) {
                imx_pcm_dma_params_init_data(&ssi->filter_data_tx, res->start,
-                       false);
+                       IMX_DMATYPE_SSI);
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx0");
        if (res) {
                imx_pcm_dma_params_init_data(&ssi->filter_data_rx, res->start,
-                       false);
+                       IMX_DMATYPE_SSI);
        }
 
        platform_set_drvdata(pdev, ssi);
@@ -595,7 +595,12 @@ static int imx_ssi_probe(struct platform_device *pdev)
                goto failed_register;
        }
 
-       ret = imx_pcm_fiq_init(pdev);
+       ssi->fiq_params.irq = ssi->irq;
+       ssi->fiq_params.base = ssi->base;
+       ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
+       ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
+
+       ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
        if (ret)
                goto failed_pcm_fiq;
 
index d5003cefca8decee028d937a9a9a975bb703102d..fb1616ba8c5967e1892b4ff6c7e180b2b9447047 100644 (file)
@@ -209,6 +209,7 @@ struct imx_ssi {
        struct snd_dmaengine_dai_dma_data dma_params_tx;
        struct imx_dma_data filter_data_tx;
        struct imx_dma_data filter_data_rx;
+       struct imx_pcm_fiq_params fiq_params;
 
        int enabled;
 };
index 52a36a90f4f4eb19c4739fda1714f48e715d1c68..1d70e278e9154e7f21d1ba230b4f573ad5b31f95 100644 (file)
@@ -217,7 +217,8 @@ static int imx_wm8962_probe(struct platform_device *pdev)
        codec_dev = of_find_i2c_device_by_node(codec_np);
        if (!codec_dev || !codec_dev->driver) {
                dev_err(&pdev->dev, "failed to find codec platform device\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto fail;
        }
 
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
index c62d715235e29ac5fa20639271d79958a57fd853..9e1970c44e86148b0ae0b4396977d0691b84adc6 100644 (file)
@@ -1,19 +1,15 @@
 config SND_KIRKWOOD_SOC
        tristate "SoC Audio for the Marvell Kirkwood chip"
-       depends on ARCH_KIRKWOOD
+       depends on ARCH_KIRKWOOD || COMPILE_TEST
        help
          Say Y or M if you want to add support for codecs attached to
          the Kirkwood I2S interface. You will also need to select the
          audio interfaces to support below.
 
-config SND_KIRKWOOD_SOC_I2S
-       tristate
-
 config SND_KIRKWOOD_SOC_OPENRD
        tristate "SoC Audio support for Kirkwood Openrd Client"
-       depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+       depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE || COMPILE_TEST)
        depends on I2C
-       select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_CS42L51
        help
          Say Y if you want to add support for SoC audio on
@@ -21,8 +17,7 @@ config SND_KIRKWOOD_SOC_OPENRD
 
 config SND_KIRKWOOD_SOC_T5325
        tristate "SoC Audio support for HP t5325"
-       depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
-       select SND_KIRKWOOD_SOC_I2S
+       depends on SND_KIRKWOOD_SOC && (MACH_T5325 || COMPILE_TEST) && I2C
        select SND_SOC_ALC5623
        help
          Say Y if you want to add support for SoC audio on
index 3e62ae9e7bbeadc0924b05447cc41a01d894dd5b..9e781385cb88be80d8b4eb3da2b2090ca92f3a15 100644 (file)
@@ -1,8 +1,6 @@
-snd-soc-kirkwood-objs := kirkwood-dma.o
-snd-soc-kirkwood-i2s-objs := kirkwood-i2s.o
+snd-soc-kirkwood-objs := kirkwood-dma.o kirkwood-i2s.o
 
 obj-$(CONFIG_SND_KIRKWOOD_SOC) += snd-soc-kirkwood.o
-obj-$(CONFIG_SND_KIRKWOOD_SOC_I2S) += snd-soc-kirkwood-i2s.o
 
 snd-soc-openrd-objs := kirkwood-openrd.o
 snd-soc-t5325-objs := kirkwood-t5325.o
index a9f14530c3db6a5eb9d3425f7bd6ef5ba4106add..b238434f92b099db8bf3ba613fc596b7006ec96d 100644 (file)
         SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | \
         SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE)
 
-struct kirkwood_dma_priv {
-       struct snd_pcm_substream *play_stream;
-       struct snd_pcm_substream *rec_stream;
-       struct kirkwood_dma_data *data;
-};
+static struct kirkwood_dma_data *kirkwood_priv(struct snd_pcm_substream *subs)
+{
+       struct snd_soc_pcm_runtime *soc_runtime = subs->private_data;
+       return snd_soc_dai_get_drvdata(soc_runtime->cpu_dai);
+}
 
 static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
        .info = (SNDRV_PCM_INFO_INTERLEAVED |
@@ -51,7 +51,7 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
        .rate_max               = 384000,
        .channels_min           = 1,
        .channels_max           = 8,
-       .buffer_bytes_max       = KIRKWOOD_SND_MAX_PERIOD_BYTES * KIRKWOOD_SND_MAX_PERIODS,
+       .buffer_bytes_max       = KIRKWOOD_SND_MAX_BUFFER_BYTES,
        .period_bytes_min       = KIRKWOOD_SND_MIN_PERIOD_BYTES,
        .period_bytes_max       = KIRKWOOD_SND_MAX_PERIOD_BYTES,
        .periods_min            = KIRKWOOD_SND_MIN_PERIODS,
@@ -63,8 +63,7 @@ static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
 
 static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
 {
-       struct kirkwood_dma_priv *prdata = dev_id;
-       struct kirkwood_dma_data *priv = prdata->data;
+       struct kirkwood_dma_data *priv = dev_id;
        unsigned long mask, status, cause;
 
        mask = readl(priv->io + KIRKWOOD_INT_MASK);
@@ -89,10 +88,10 @@ static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
        writel(status, priv->io + KIRKWOOD_INT_CAUSE);
 
        if (status & KIRKWOOD_INT_CAUSE_PLAY_BYTES)
-               snd_pcm_period_elapsed(prdata->play_stream);
+               snd_pcm_period_elapsed(priv->substream_play);
 
        if (status & KIRKWOOD_INT_CAUSE_REC_BYTES)
-               snd_pcm_period_elapsed(prdata->rec_stream);
+               snd_pcm_period_elapsed(priv->substream_rec);
 
        return IRQ_HANDLED;
 }
@@ -126,15 +125,10 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
 {
        int err;
        struct snd_pcm_runtime *runtime = substream->runtime;
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct snd_soc_platform *platform = soc_runtime->platform;
-       struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
-       struct kirkwood_dma_data *priv;
-       struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform);
+       struct kirkwood_dma_data *priv = kirkwood_priv(substream);
        const struct mbus_dram_target_info *dram;
        unsigned long addr;
 
-       priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
        snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw);
 
        /* Ensure that all constraints linked to dma burst are fulfilled */
@@ -157,21 +151,11 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
        if (err < 0)
                return err;
 
-       if (prdata == NULL) {
-               prdata = kzalloc(sizeof(struct kirkwood_dma_priv), GFP_KERNEL);
-               if (prdata == NULL)
-                       return -ENOMEM;
-
-               prdata->data = priv;
-
+       if (!priv->substream_play && !priv->substream_rec) {
                err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
-                                 "kirkwood-i2s", prdata);
-               if (err) {
-                       kfree(prdata);
+                                 "kirkwood-i2s", priv);
+               if (err)
                        return -EBUSY;
-               }
-
-               snd_soc_platform_set_drvdata(platform, prdata);
 
                /*
                 * Enable Error interrupts. We're only ack'ing them but
@@ -183,11 +167,11 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
        dram = mv_mbus_dram_info();
        addr = substream->dma_buffer.addr;
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               prdata->play_stream = substream;
+               priv->substream_play = substream;
                kirkwood_dma_conf_mbus_windows(priv->io,
                        KIRKWOOD_PLAYBACK_WIN, addr, dram);
        } else {
-               prdata->rec_stream = substream;
+               priv->substream_rec = substream;
                kirkwood_dma_conf_mbus_windows(priv->io,
                        KIRKWOOD_RECORD_WIN, addr, dram);
        }
@@ -197,27 +181,19 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
 
 static int kirkwood_dma_close(struct snd_pcm_substream *substream)
 {
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
-       struct snd_soc_platform *platform = soc_runtime->platform;
-       struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform);
-       struct kirkwood_dma_data *priv;
-
-       priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
+       struct kirkwood_dma_data *priv = kirkwood_priv(substream);
 
-       if (!prdata || !priv)
+       if (!priv)
                return 0;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               prdata->play_stream = NULL;
+               priv->substream_play = NULL;
        else
-               prdata->rec_stream = NULL;
+               priv->substream_rec = NULL;
 
-       if (!prdata->play_stream && !prdata->rec_stream) {
+       if (!priv->substream_play && !priv->substream_rec) {
                writel(0, priv->io + KIRKWOOD_ERR_MASK);
-               free_irq(priv->irq, prdata);
-               kfree(prdata);
-               snd_soc_platform_set_drvdata(platform, NULL);
+               free_irq(priv->irq, priv);
        }
 
        return 0;
@@ -243,13 +219,9 @@ static int kirkwood_dma_hw_free(struct snd_pcm_substream *substream)
 static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
-       struct kirkwood_dma_data *priv;
+       struct kirkwood_dma_data *priv = kirkwood_priv(substream);
        unsigned long size, count;
 
-       priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
-
        /* compute buffer size in term of "words" as requested in specs */
        size = frames_to_bytes(runtime, runtime->buffer_size);
        size = (size>>2)-1;
@@ -272,13 +244,9 @@ static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
 static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream
                                                *substream)
 {
-       struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
-       struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
-       struct kirkwood_dma_data *priv;
+       struct kirkwood_dma_data *priv = kirkwood_priv(substream);
        snd_pcm_uframes_t count;
 
-       priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
-
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                count = bytes_to_frames(substream->runtime,
                        readl(priv->io + KIRKWOOD_PLAY_BYTE_COUNT));
@@ -366,36 +334,8 @@ static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm)
        }
 }
 
-static struct snd_soc_platform_driver kirkwood_soc_platform = {
+struct snd_soc_platform_driver kirkwood_soc_platform = {
        .ops            = &kirkwood_dma_ops,
        .pcm_new        = kirkwood_dma_new,
        .pcm_free       = kirkwood_dma_free_dma_buffers,
 };
-
-static int kirkwood_soc_platform_probe(struct platform_device *pdev)
-{
-       return snd_soc_register_platform(&pdev->dev, &kirkwood_soc_platform);
-}
-
-static int kirkwood_soc_platform_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_platform(&pdev->dev);
-       return 0;
-}
-
-static struct platform_driver kirkwood_pcm_driver = {
-       .driver = {
-                       .name = "kirkwood-pcm-audio",
-                       .owner = THIS_MODULE,
-       },
-
-       .probe = kirkwood_soc_platform_probe,
-       .remove = kirkwood_soc_platform_remove,
-};
-
-module_platform_driver(kirkwood_pcm_driver);
-
-MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
-MODULE_DESCRIPTION("Marvell Kirkwood Audio DMA module");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:kirkwood-pcm-audio");
index 4c9dad3263c5f7b754d108b3bf66a3ebdb6d67f3..e5f3f7a9ea269cc0d4ef28868ac8148fe8d90013 100644 (file)
 #include <linux/platform_data/asoc-kirkwood.h>
 #include "kirkwood.h"
 
-#define DRV_NAME       "kirkwood-i2s"
+#define DRV_NAME       "mvebu-audio"
 
-#define KIRKWOOD_I2S_RATES \
-       (SNDRV_PCM_RATE_44100 | \
-        SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000)
 #define KIRKWOOD_I2S_FORMATS \
        (SNDRV_PCM_FMTBIT_S16_LE | \
         SNDRV_PCM_FMTBIT_S24_LE | \
@@ -105,14 +102,16 @@ static void kirkwood_set_rate(struct snd_soc_dai *dai,
        uint32_t clks_ctrl;
 
        if (rate == 44100 || rate == 48000 || rate == 96000) {
-               /* use internal dco for supported rates */
+               /* use internal dco for the supported rates
+                * defined in kirkwood_i2s_dai */
                dev_dbg(dai->dev, "%s: dco set rate = %lu\n",
                        __func__, rate);
                kirkwood_set_dco(priv->io, rate);
 
                clks_ctrl = KIRKWOOD_MCLK_SOURCE_DCO;
-       } else if (!IS_ERR(priv->extclk)) {
-               /* use optional external clk for other rates */
+       } else {
+               /* use the external clock for the other rates
+                * defined in kirkwood_i2s_dai_extclk */
                dev_dbg(dai->dev, "%s: extclk set rate = %lu -> %lu\n",
                        __func__, rate, 256 * rate);
                clk_set_rate(priv->extclk, 256 * rate);
@@ -199,8 +198,7 @@ static int kirkwood_i2s_hw_params(struct snd_pcm_substream *substream,
                        ctl_play |= KIRKWOOD_PLAYCTL_MONO_OFF;
 
                priv->ctl_play &= ~(KIRKWOOD_PLAYCTL_MONO_MASK |
-                                   KIRKWOOD_PLAYCTL_I2S_EN |
-                                   KIRKWOOD_PLAYCTL_SPDIF_EN |
+                                   KIRKWOOD_PLAYCTL_ENABLE_MASK |
                                    KIRKWOOD_PLAYCTL_SIZE_MASK);
                priv->ctl_play |= ctl_play;
        } else {
@@ -244,8 +242,7 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_START:
                /* configure */
                ctl = priv->ctl_play;
-               value = ctl & ~(KIRKWOOD_PLAYCTL_I2S_EN |
-                               KIRKWOOD_PLAYCTL_SPDIF_EN);
+               value = ctl & ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
                writel(value, priv->io + KIRKWOOD_PLAYCTL);
 
                /* enable interrupts */
@@ -267,7 +264,7 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
                writel(value, priv->io + KIRKWOOD_INT_MASK);
 
                /* disable all playbacks */
-               ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN);
+               ctl &= ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
                writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
                break;
 
@@ -387,7 +384,7 @@ static int kirkwood_i2s_probe(struct snd_soc_dai *dai)
 
        /* disable playback/record */
        value = readl(priv->io + KIRKWOOD_PLAYCTL);
-       value &= ~(KIRKWOOD_PLAYCTL_I2S_EN|KIRKWOOD_PLAYCTL_SPDIF_EN);
+       value &= ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
        writel(value, priv->io + KIRKWOOD_PLAYCTL);
 
        value = readl(priv->io + KIRKWOOD_RECCTL);
@@ -398,11 +395,6 @@ static int kirkwood_i2s_probe(struct snd_soc_dai *dai)
 
 }
 
-static int kirkwood_i2s_remove(struct snd_soc_dai *dai)
-{
-       return 0;
-}
-
 static const struct snd_soc_dai_ops kirkwood_i2s_dai_ops = {
        .startup        = kirkwood_i2s_startup,
        .trigger        = kirkwood_i2s_trigger,
@@ -413,17 +405,18 @@ static const struct snd_soc_dai_ops kirkwood_i2s_dai_ops = {
 
 static struct snd_soc_dai_driver kirkwood_i2s_dai = {
        .probe = kirkwood_i2s_probe,
-       .remove = kirkwood_i2s_remove,
        .playback = {
                .channels_min = 1,
                .channels_max = 2,
-               .rates = KIRKWOOD_I2S_RATES,
+               .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+                               SNDRV_PCM_RATE_96000,
                .formats = KIRKWOOD_I2S_FORMATS,
        },
        .capture = {
                .channels_min = 1,
                .channels_max = 2,
-               .rates = KIRKWOOD_I2S_RATES,
+               .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+                               SNDRV_PCM_RATE_96000,
                .formats = KIRKWOOD_I2S_FORMATS,
        },
        .ops = &kirkwood_i2s_dai_ops,
@@ -431,7 +424,6 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai = {
 
 static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk = {
        .probe = kirkwood_i2s_probe,
-       .remove = kirkwood_i2s_remove,
        .playback = {
                .channels_min = 1,
                .channels_max = 2,
@@ -498,10 +490,10 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
        if (err < 0)
                return err;
 
-       priv->extclk = clk_get(&pdev->dev, "extclk");
+       priv->extclk = devm_clk_get(&pdev->dev, "extclk");
        if (!IS_ERR(priv->extclk)) {
                if (priv->extclk == priv->clk) {
-                       clk_put(priv->extclk);
+                       devm_clk_put(&pdev->dev, priv->extclk);
                        priv->extclk = ERR_PTR(-EINVAL);
                } else {
                        dev_info(&pdev->dev, "found external clock\n");
@@ -525,14 +517,22 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
 
        err = snd_soc_register_component(&pdev->dev, &kirkwood_i2s_component,
                                         soc_dai, 1);
-       if (!err)
-               return 0;
-       dev_err(&pdev->dev, "snd_soc_register_component failed\n");
+       if (err) {
+               dev_err(&pdev->dev, "snd_soc_register_component failed\n");
+               goto err_component;
+       }
 
-       if (!IS_ERR(priv->extclk)) {
-               clk_disable_unprepare(priv->extclk);
-               clk_put(priv->extclk);
+       err = snd_soc_register_platform(&pdev->dev, &kirkwood_soc_platform);
+       if (err) {
+               dev_err(&pdev->dev, "snd_soc_register_platform failed\n");
+               goto err_platform;
        }
+       return 0;
+ err_platform:
+       snd_soc_unregister_component(&pdev->dev);
+ err_component:
+       if (!IS_ERR(priv->extclk))
+               clk_disable_unprepare(priv->extclk);
        clk_disable_unprepare(priv->clk);
 
        return err;
@@ -542,12 +542,11 @@ static int kirkwood_i2s_dev_remove(struct platform_device *pdev)
 {
        struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
 
+       snd_soc_unregister_platform(&pdev->dev);
        snd_soc_unregister_component(&pdev->dev);
 
-       if (!IS_ERR(priv->extclk)) {
+       if (!IS_ERR(priv->extclk))
                clk_disable_unprepare(priv->extclk);
-               clk_put(priv->extclk);
-       }
        clk_disable_unprepare(priv->clk);
 
        return 0;
@@ -568,4 +567,4 @@ module_platform_driver(kirkwood_i2s_driver);
 MODULE_AUTHOR("Arnaud Patard, <arnaud.patard@rtp-net.org>");
 MODULE_DESCRIPTION("Kirkwood I2S SoC Interface");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:kirkwood-i2s");
+MODULE_ALIAS("platform:mvebu-audio");
index b979c7154715a0ee9407c908f76f9f1ca9f91748..025be0e97164283cbd4ed7516a477bb15bee404b 100644 (file)
@@ -16,9 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <sound/soc.h>
-#include <mach/kirkwood.h>
 #include <linux/platform_data/asoc-kirkwood.h>
-#include <asm/mach-types.h>
 #include "../codecs/cs42l51.h"
 
 static int openrd_client_hw_params(struct snd_pcm_substream *substream,
@@ -54,8 +52,8 @@ static struct snd_soc_dai_link openrd_client_dai[] = {
 {
        .name = "CS42L51",
        .stream_name = "CS42L51 HiFi",
-       .cpu_dai_name = "kirkwood-i2s",
-       .platform_name = "kirkwood-pcm-audio",
+       .cpu_dai_name = "mvebu-audio",
+       .platform_name = "mvebu-audio",
        .codec_dai_name = "cs42l51-hifi",
        .codec_name = "cs42l51-codec.0-004a",
        .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS,
index 1d0ed6f8add7ba5a7236f428476ab6f981522ab6..27545b0c48561470e787c3c167c60514172f10aa 100644 (file)
@@ -15,9 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <sound/soc.h>
-#include <mach/kirkwood.h>
 #include <linux/platform_data/asoc-kirkwood.h>
-#include <asm/mach-types.h>
 #include "../codecs/alc5623.h"
 
 static int t5325_hw_params(struct snd_pcm_substream *substream,
@@ -70,8 +68,8 @@ static struct snd_soc_dai_link t5325_dai[] = {
 {
        .name = "ALC5621",
        .stream_name = "ALC5621 HiFi",
-       .cpu_dai_name = "kirkwood-i2s",
-       .platform_name = "kirkwood-pcm-audio",
+       .cpu_dai_name = "mvebu-audio",
+       .platform_name = "mvebu-audio",
        .codec_dai_name = "alc5621-hifi",
        .codec_name = "alc562x-codec.0-001a",
        .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS,
index 4d92637ddb3fd878f86e4d4525fbd024d94b2618..f8e1ccc1c58cc76f8ce2a314a1c8a0fcc5e998a5 100644 (file)
@@ -54,7 +54,7 @@
 #define KIRKWOOD_PLAYCTL_MONO_OFF              (0<<5)
 #define KIRKWOOD_PLAYCTL_I2S_MUTE              (1<<7)
 #define KIRKWOOD_PLAYCTL_SPDIF_EN              (1<<4)
-#define KIRKWOOD_PLAYCTL_I2S_EN                (1<<3)
+#define KIRKWOOD_PLAYCTL_I2S_EN                        (1<<3)
 #define KIRKWOOD_PLAYCTL_SIZE_MASK             (7<<0)
 #define KIRKWOOD_PLAYCTL_SIZE_16               (7<<0)
 #define KIRKWOOD_PLAYCTL_SIZE_16_C             (3<<0)
@@ -62,6 +62,9 @@
 #define KIRKWOOD_PLAYCTL_SIZE_24               (1<<0)
 #define KIRKWOOD_PLAYCTL_SIZE_32               (0<<0)
 
+#define KIRKWOOD_PLAYCTL_ENABLE_MASK           (KIRKWOOD_PLAYCTL_SPDIF_EN | \
+                                                KIRKWOOD_PLAYCTL_I2S_EN)
+
 #define KIRKWOOD_PLAY_BUF_ADDR                 0x1104
 #define KIRKWOOD_PLAY_BUF_SIZE                 0x1108
 #define KIRKWOOD_PLAY_BYTE_COUNT               0x110C
 #define KIRKWOOD_SND_MAX_PERIODS               16
 #define KIRKWOOD_SND_MIN_PERIOD_BYTES          0x4000
 #define KIRKWOOD_SND_MAX_PERIOD_BYTES          0x4000
+#define KIRKWOOD_SND_MAX_BUFFER_BYTES          (KIRKWOOD_SND_MAX_PERIOD_BYTES \
+                                                * KIRKWOOD_SND_MAX_PERIODS)
 
 struct kirkwood_dma_data {
        void __iomem *io;
@@ -129,8 +134,12 @@ struct kirkwood_dma_data {
        struct clk *extclk;
        uint32_t ctl_play;
        uint32_t ctl_rec;
+       struct snd_pcm_substream *substream_play;
+       struct snd_pcm_substream *substream_rec;
        int irq;
        int burst;
 };
 
+extern struct snd_soc_platform_driver kirkwood_soc_platform;
+
 #endif
index 78d321cbe8b44ab0b06c9142da04d47d725dedf4..219235c022125d84a7565a4c9c962dd5098990d0 100644 (file)
@@ -1,6 +1,7 @@
 menuconfig SND_MXS_SOC
        tristate "SoC Audio for Freescale MXS CPUs"
-       depends on ARCH_MXS
+       depends on ARCH_MXS || COMPILE_TEST
+       depends on COMMON_CLK
        select SND_SOC_GENERIC_DMAENGINE_PCM
        help
          Say Y or M if you want to add support for codecs attached to
index 54511c5e6a7ce8a75a2bc5897dd6a7d6085e007d..b56b8a0e8deb43b6fdca0668fb43677bb1ce47ba 100644 (file)
@@ -31,7 +31,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <asm/mach-types.h>
 
 #include "mxs-saif.h"
 
index 1b134d72f120a60b5956032986b964c6f659ab0e..ce084eb10c49f2f826fac137a14e0ba48e98db6b 100644 (file)
@@ -25,7 +25,6 @@
 #include <sound/soc.h>
 #include <sound/jack.h>
 #include <sound/soc-dapm.h>
-#include <asm/mach-types.h>
 
 #include "../codecs/sgtl5000.h"
 #include "mxs-saif.h"
@@ -51,18 +50,27 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
        }
 
        /* Sgtl5000 sysclk should be >= 8MHz and <= 27M */
-       if (mclk < 8000000 || mclk > 27000000)
+       if (mclk < 8000000 || mclk > 27000000) {
+               dev_err(codec_dai->dev, "Invalid mclk frequency: %u.%03uMHz\n",
+                       mclk / 1000000, mclk / 1000 % 1000);
                return -EINVAL;
+       }
 
        /* Set SGTL5000's SYSCLK (provided by SAIF MCLK) */
        ret = snd_soc_dai_set_sysclk(codec_dai, SGTL5000_SYSCLK, mclk, 0);
-       if (ret)
+       if (ret) {
+               dev_err(codec_dai->dev, "Failed to set sysclk to %u.%03uMHz\n",
+                       mclk / 1000000, mclk / 1000 % 1000);
                return ret;
+       }
 
        /* The SAIF MCLK should be the same as SGTL5000_SYSCLK */
        ret = snd_soc_dai_set_sysclk(cpu_dai, MXS_SAIF_MCLK, mclk, 0);
-       if (ret)
+       if (ret) {
+               dev_err(cpu_dai->dev, "Failed to set sysclk to %u.%03uMHz\n",
+                       mclk / 1000000, mclk / 1000 % 1000);
                return ret;
+       }
 
        /* set codec to slave mode */
        dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
@@ -70,13 +78,19 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
 
        /* set codec DAI configuration */
        ret = snd_soc_dai_set_fmt(codec_dai, dai_format);
-       if (ret)
+       if (ret) {
+               dev_err(codec_dai->dev, "Failed to set dai format to %08x\n",
+                       dai_format);
                return ret;
+       }
 
        /* set cpu DAI configuration */
        ret = snd_soc_dai_set_fmt(cpu_dai, dai_format);
-       if (ret)
+       if (ret) {
+               dev_err(cpu_dai->dev, "Failed to set dai format to %08x\n",
+                       dai_format);
                return ret;
+       }
 
        return 0;
 }
@@ -154,8 +168,10 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev)
         * should be >= 8MHz and <= 27M.
         */
        ret = mxs_saif_get_mclk(0, 44100 * 256, 44100);
-       if (ret)
+       if (ret) {
+               dev_err(&pdev->dev, "failed to get mclk\n");
                return ret;
+       }
 
        card->dev = &pdev->dev;
        platform_set_drvdata(pdev, card);
index f4c2417a8730bcf0ae99c37fe1bfcc76a685cb51..8987bf987e584f6ba7a53b93516056dfbf5bc9b5 100644 (file)
@@ -333,9 +333,6 @@ static int nuc900_ac97_drvprobe(struct platform_device *pdev)
        spin_lock_init(&nuc900_audio->lock);
 
        nuc900_audio->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!nuc900_audio->res)
-               return ret;
-
        nuc900_audio->mmio = devm_ioremap_resource(&pdev->dev,
                                                   nuc900_audio->res);
        if (IS_ERR(nuc900_audio->mmio))
index 9f5d55e6b17a53d7e50f2a3c392f565b0fb2e014..daa78a0095facf5ff35d1eb0b6ca61836b7c9210 100644 (file)
@@ -1,7 +1,7 @@
 config SND_OMAP_SOC
        tristate "SoC Audio for the Texas Instruments OMAP chips"
-       depends on ARCH_OMAP && DMA_OMAP
-       select SND_SOC_DMAENGINE_PCM
+       depends on (ARCH_OMAP && DMA_OMAP) || (ARCH_ARM && COMPILE_TEST)
+       select SND_DMAENGINE_PCM
 
 config SND_OMAP_SOC_DMIC
        tristate
@@ -26,7 +26,7 @@ config SND_OMAP_SOC_N810
 
 config SND_OMAP_SOC_RX51
        tristate "SoC Audio support for Nokia RX-51"
-       depends on SND_OMAP_SOC && MACH_NOKIA_RX51
+       depends on SND_OMAP_SOC && ARCH_ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
        select SND_OMAP_SOC_MCBSP
        select SND_SOC_TLV320AIC3X
        select SND_SOC_TPA6130A2
@@ -87,7 +87,7 @@ config SND_OMAP_SOC_OMAP_TWL4030
 
 config SND_OMAP_SOC_OMAP_ABE_TWL6040
        tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
-       depends on TWL6040_CORE && SND_OMAP_SOC && ARCH_OMAP4
+       depends on TWL6040_CORE && SND_OMAP_SOC && (ARCH_OMAP4 || COMPILE_TEST)
        select SND_OMAP_SOC_DMIC
        select SND_OMAP_SOC_MCPDM
        select SND_SOC_TWL6040
index 361e4c03646e32a9b73d76122a13e3e071c7a9b0..83433fdea32ad790f839edd357c14824def13c07 100644 (file)
@@ -781,7 +781,7 @@ static ssize_t prop##_store(struct device *dev,                             \
        unsigned long val;                                              \
        int status;                                                     \
                                                                        \
-       status = strict_strtoul(buf, 0, &val);                          \
+       status = kstrtoul(buf, 0, &val);                                \
        if (status)                                                     \
                return status;                                          \
                                                                        \
index 70cd5c7b2e145622b031ffd320786599738091de..ebb13906b3a0b41b4d3c20509c613378da5ae248 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/twl6040.h>
-#include <linux/platform_data/omap-abe-twl6040.h>
 #include <linux/module.h>
 #include <linux/of.h>
 
@@ -166,19 +165,10 @@ static const struct snd_soc_dapm_route audio_map[] = {
        {"AFMR", NULL, "Line In"},
 };
 
-static inline void twl6040_disconnect_pin(struct snd_soc_dapm_context *dapm,
-                                         int connected, char *pin)
-{
-       if (!connected)
-               snd_soc_dapm_disable_pin(dapm, pin);
-}
-
 static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_card *card = codec->card;
-       struct snd_soc_dapm_context *dapm = &codec->dapm;
-       struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
        struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
        int hs_trim;
        int ret = 0;
@@ -203,24 +193,6 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
                twl6040_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADSET);
        }
 
-       /*
-        * NULL pdata means we booted with DT. In this case the routing is
-        * provided and the card is fully routed, no need to mark pins.
-        */
-       if (!pdata)
-               return ret;
-
-       /* Disable not connected paths if not used */
-       twl6040_disconnect_pin(dapm, pdata->has_hs, "Headset Stereophone");
-       twl6040_disconnect_pin(dapm, pdata->has_hf, "Ext Spk");
-       twl6040_disconnect_pin(dapm, pdata->has_ep, "Earphone Spk");
-       twl6040_disconnect_pin(dapm, pdata->has_aux, "Line Out");
-       twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vibrator");
-       twl6040_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic");
-       twl6040_disconnect_pin(dapm, pdata->has_mainmic, "Main Handset Mic");
-       twl6040_disconnect_pin(dapm, pdata->has_submic, "Sub Handset Mic");
-       twl6040_disconnect_pin(dapm, pdata->has_afm, "Line In");
-
        return ret;
 }
 
@@ -274,13 +246,18 @@ static struct snd_soc_card omap_abe_card = {
 
 static int omap_abe_probe(struct platform_device *pdev)
 {
-       struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev);
        struct device_node *node = pdev->dev.of_node;
        struct snd_soc_card *card = &omap_abe_card;
+       struct device_node *dai_node;
        struct abe_twl6040 *priv;
        int num_links = 0;
        int ret = 0;
 
+       if (!node) {
+               dev_err(&pdev->dev, "of node is missing.\n");
+               return -ENODEV;
+       }
+
        card->dev = &pdev->dev;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
@@ -289,78 +266,50 @@ static int omap_abe_probe(struct platform_device *pdev)
 
        priv->dmic_codec_dev = ERR_PTR(-EINVAL);
 
-       if (node) {
-               struct device_node *dai_node;
-
-               if (snd_soc_of_parse_card_name(card, "ti,model")) {
-                       dev_err(&pdev->dev, "Card name is not provided\n");
-                       return -ENODEV;
-               }
+       if (snd_soc_of_parse_card_name(card, "ti,model")) {
+               dev_err(&pdev->dev, "Card name is not provided\n");
+               return -ENODEV;
+       }
 
-               ret = snd_soc_of_parse_audio_routing(card,
-                                               "ti,audio-routing");
-               if (ret) {
-                       dev_err(&pdev->dev,
-                               "Error while parsing DAPM routing\n");
-                       return ret;
-               }
+       ret = snd_soc_of_parse_audio_routing(card, "ti,audio-routing");
+       if (ret) {
+               dev_err(&pdev->dev, "Error while parsing DAPM routing\n");
+               return ret;
+       }
 
-               dai_node = of_parse_phandle(node, "ti,mcpdm", 0);
-               if (!dai_node) {
-                       dev_err(&pdev->dev, "McPDM node is not provided\n");
-                       return -EINVAL;
-               }
-               abe_twl6040_dai_links[0].cpu_dai_name  = NULL;
-               abe_twl6040_dai_links[0].cpu_of_node = dai_node;
+       dai_node = of_parse_phandle(node, "ti,mcpdm", 0);
+       if (!dai_node) {
+               dev_err(&pdev->dev, "McPDM node is not provided\n");
+               return -EINVAL;
+       }
+       abe_twl6040_dai_links[0].cpu_dai_name  = NULL;
+       abe_twl6040_dai_links[0].cpu_of_node = dai_node;
 
-               dai_node = of_parse_phandle(node, "ti,dmic", 0);
-               if (dai_node) {
-                       num_links = 2;
-                       abe_twl6040_dai_links[1].cpu_dai_name  = NULL;
-                       abe_twl6040_dai_links[1].cpu_of_node = dai_node;
+       dai_node = of_parse_phandle(node, "ti,dmic", 0);
+       if (dai_node) {
+               num_links = 2;
+               abe_twl6040_dai_links[1].cpu_dai_name  = NULL;
+               abe_twl6040_dai_links[1].cpu_of_node = dai_node;
 
-                       priv->dmic_codec_dev = platform_device_register_simple(
+               priv->dmic_codec_dev = platform_device_register_simple(
                                                "dmic-codec", -1, NULL, 0);
-                       if (IS_ERR(priv->dmic_codec_dev)) {
-                               dev_err(&pdev->dev,
-                                       "Can't instantiate dmic-codec\n");
-                               return PTR_ERR(priv->dmic_codec_dev);
-                       }
-               } else {
-                       num_links = 1;
-               }
-
-               priv->jack_detection = of_property_read_bool(node,
-                                                          "ti,jack-detection");
-               of_property_read_u32(node, "ti,mclk-freq",
-                                    &priv->mclk_freq);
-               if (!priv->mclk_freq) {
-                       dev_err(&pdev->dev, "MCLK frequency not provided\n");
-                       ret = -EINVAL;
-                       goto err_unregister;
+               if (IS_ERR(priv->dmic_codec_dev)) {
+                       dev_err(&pdev->dev, "Can't instantiate dmic-codec\n");
+                       return PTR_ERR(priv->dmic_codec_dev);
                }
-
-               omap_abe_card.fully_routed = 1;
-       } else if (pdata) {
-               if (pdata->card_name) {
-                       card->name = pdata->card_name;
-               } else {
-                       dev_err(&pdev->dev, "Card name is not provided\n");
-                       return -ENODEV;
-               }
-
-               if (pdata->has_dmic)
-                       num_links = 2;
-               else
-                       num_links = 1;
-
-               priv->jack_detection = pdata->jack_detection;
-               priv->mclk_freq = pdata->mclk_freq;
        } else {
-               dev_err(&pdev->dev, "Missing pdata\n");
-               return -ENODEV;
+               num_links = 1;
+       }
+
+       priv->jack_detection = of_property_read_bool(node, "ti,jack-detection");
+       of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq);
+       if (!priv->mclk_freq) {
+               dev_err(&pdev->dev, "MCLK frequency not provided\n");
+               ret = -EINVAL;
+               goto err_unregister;
        }
 
+       card->fully_routed = 1;
 
        if (!priv->mclk_freq) {
                dev_err(&pdev->dev, "MCLK frequency missing\n");
index 7483efb6dc674e9bdb90e9ea3bd39a7505a9c34b..6c19bba2357004903477a7300582d193b26f2962 100644 (file)
@@ -433,6 +433,11 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                /* Sample rate generator drives the FS */
                regs->srgr2     |= FSGM;
                break;
+       case SND_SOC_DAIFMT_CBM_CFS:
+               /* McBSP slave. FS clock as output */
+               regs->srgr2     |= FSGM;
+               regs->pcr0      |= FSXM;
+               break;
        case SND_SOC_DAIFMT_CBM_CFM:
                /* McBSP slave */
                break;
index b3580946754702b87a883220d547aeee26b04388..4db74a083db1ef792f90dde317ee9e7556eb4e1d 100644 (file)
@@ -11,7 +11,7 @@ config SND_PXA2XX_SOC
 config SND_MMP_SOC
        bool "Soc Audio for Marvell MMP chips"
        depends on ARCH_MMP
-       select SND_SOC_DMAENGINE_PCM
+       select SND_DMAENGINE_PCM
        select SND_ARM
        help
          Say Y if you want to add support for codecs attached to
index 4ad76099dd43fdec0d9e7262db77f1e07cd20713..5b7d969f89a96eebd418687ef5b7abc79d74ee97 100644 (file)
@@ -129,6 +129,7 @@ static struct snd_soc_dai_link brownstone_wm8994_dai[] = {
 /* audio machine driver */
 static struct snd_soc_card brownstone = {
        .name         = "brownstone",
+       .owner        = THIS_MODULE,
        .dai_link     = brownstone_wm8994_dai,
        .num_links    = ARRAY_SIZE(brownstone_wm8994_dai),
 
index 97b711e12821e02c2b62c6ceb377fb4a942ee71f..bbea7780eac640630803a024d3d0670b01c3c1eb 100644 (file)
@@ -56,8 +56,6 @@
 #include "pxa2xx-ac97.h"
 #include "../codecs/wm9713.h"
 
-#define ARRAY_AND_SIZE(x)      (x), ARRAY_SIZE(x)
-
 #define AC97_GPIO_PULL         0x58
 
 /* Use GPIO8 for rear speaker amplifier */
@@ -133,10 +131,11 @@ static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd)
        unsigned short reg;
 
        /* Add mioa701 specific widgets */
-       snd_soc_dapm_new_controls(dapm, ARRAY_AND_SIZE(mioa701_dapm_widgets));
+       snd_soc_dapm_new_controls(dapm, mioa701_dapm_widgets,
+                                 ARRAY_SIZE(mioa701_dapm_widgets));
 
        /* Set up mioa701 specific audio path audio_mapnects */
-       snd_soc_dapm_add_routes(dapm, ARRAY_AND_SIZE(audio_map));
+       snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
        /* Prepare GPIO8 for rear speaker amplifier */
        reg = codec->driver->read(codec, AC97_GPIO_CFG);
index 5d57e071cdf522e8f8b11670c32a47595b5d6e92..9a97843ab09f1270e97e7751a2ce86b637d85757 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/dmaengine.h>
 #include <linux/platform_data/dma-mmp_tdma.h>
 #include <linux/platform_data/mmp_audio.h>
+#include <linux/dmaengine.h>
+
 #include <sound/pxa2xx-lib.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -67,7 +69,7 @@ static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
 {
        struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct pxa2xx_pcm_dma_params *dma_params;
+       struct snd_dmaengine_dai_dma_data *dma_params;
        struct dma_slave_config slave_config;
        int ret;
 
@@ -80,10 +82,10 @@ static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
                return ret;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               slave_config.dst_addr     = dma_params->dev_addr;
+               slave_config.dst_addr     = dma_params->addr;
                slave_config.dst_maxburst = 4;
        } else {
-               slave_config.src_addr     = dma_params->dev_addr;
+               slave_config.src_addr     = dma_params->addr;
                slave_config.src_maxburst = 4;
        }
 
index 62142ce367c7e68fcffbe7b34b92b7364d7a0af3..41752a5fe3b07148c5402b2ea66d6f292fdc3133 100644 (file)
 #include <linux/slab.h>
 #include <linux/pxa2xx_ssp.h>
 #include <linux/io.h>
+#include <linux/dmaengine.h>
+
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/initval.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 #include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
 #include "mmp-sspa.h"
 
 /*
@@ -40,7 +43,7 @@
  */
 struct sspa_priv {
        struct ssp_device *sspa;
-       struct pxa2xx_pcm_dma_params *dma_params;
+       struct snd_dmaengine_dai_dma_data *dma_params;
        struct clk *audio_clk;
        struct clk *sysclk;
        int dai_fmt;
@@ -266,7 +269,7 @@ static int mmp_sspa_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
        struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(dai);
        struct ssp_device *sspa = sspa_priv->sspa;
-       struct pxa2xx_pcm_dma_params *dma_params;
+       struct snd_dmaengine_dai_dma_data *dma_params;
        u32 sspa_ctrl;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -309,7 +312,7 @@ static int mmp_sspa_hw_params(struct snd_pcm_substream *substream,
        }
 
        dma_params = &sspa_priv->dma_params[substream->stream];
-       dma_params->dev_addr = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+       dma_params->addr = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
                                (sspa->phys_base + SSPA_TXD) :
                                (sspa->phys_base + SSPA_RXD);
        snd_soc_dai_set_dma_data(cpu_dai, substream, dma_params);
@@ -425,14 +428,12 @@ static int asoc_mmp_sspa_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        priv->dma_params = devm_kzalloc(&pdev->dev,
-                       2 * sizeof(struct pxa2xx_pcm_dma_params), GFP_KERNEL);
+                       2 * sizeof(struct snd_dmaengine_dai_dma_data),
+                       GFP_KERNEL);
        if (priv->dma_params == NULL)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res == NULL)
-               return -ENOMEM;
-
        priv->sspa->mmio_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(priv->sspa->mmio_base))
                return PTR_ERR(priv->sspa->mmio_base);
index 6f4dd7543e829db8e73577f1184c667a596c89d0..a3119a00d8fac8ef8295ef3593f16a60c2603aab 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/pxa2xx_ssp.h>
+#include <linux/of.h>
+#include <linux/dmaengine.h>
 
 #include <asm/irq.h>
 
@@ -30,9 +32,9 @@
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 #include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
 
 #include <mach/hardware.h>
-#include <mach/dma.h>
 
 #include "../../arm/pxa2xx-pcm.h"
 #include "pxa-ssp.h"
@@ -79,27 +81,13 @@ static void pxa_ssp_disable(struct ssp_device *ssp)
        __raw_writel(sscr0, ssp->mmio_base + SSCR0);
 }
 
-struct pxa2xx_pcm_dma_data {
-       struct pxa2xx_pcm_dma_params params;
-       char name[20];
-};
-
 static void pxa_ssp_set_dma_params(struct ssp_device *ssp, int width4,
-                       int out, struct pxa2xx_pcm_dma_params *dma_data)
+                       int out, struct snd_dmaengine_dai_dma_data *dma)
 {
-       struct pxa2xx_pcm_dma_data *dma;
-
-       dma = container_of(dma_data, struct pxa2xx_pcm_dma_data, params);
-
-       snprintf(dma->name, 20, "SSP%d PCM %s %s", ssp->port_id,
-                       width4 ? "32-bit" : "16-bit", out ? "out" : "in");
-
-       dma->params.name = dma->name;
-       dma->params.drcmr = &DRCMR(out ? ssp->drcmr_tx : ssp->drcmr_rx);
-       dma->params.dcmd = (out ? (DCMD_INCSRCADDR | DCMD_FLOWTRG) :
-                                 (DCMD_INCTRGADDR | DCMD_FLOWSRC)) |
-                       (width4 ? DCMD_WIDTH4 : DCMD_WIDTH2) | DCMD_BURST16;
-       dma->params.dev_addr = ssp->phys_base + SSDR;
+       dma->addr_width = width4 ? DMA_SLAVE_BUSWIDTH_4_BYTES :
+                                  DMA_SLAVE_BUSWIDTH_2_BYTES;
+       dma->maxburst = 16;
+       dma->addr = ssp->phys_base + SSDR;
 }
 
 static int pxa_ssp_startup(struct snd_pcm_substream *substream,
@@ -107,7 +95,7 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
 {
        struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
        struct ssp_device *ssp = priv->ssp;
-       struct pxa2xx_pcm_dma_data *dma;
+       struct snd_dmaengine_dai_dma_data *dma;
        int ret = 0;
 
        if (!cpu_dai->active) {
@@ -115,10 +103,14 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
                pxa_ssp_disable(ssp);
        }
 
-       dma = kzalloc(sizeof(struct pxa2xx_pcm_dma_data), GFP_KERNEL);
+       dma = kzalloc(sizeof(struct snd_dmaengine_dai_dma_data), GFP_KERNEL);
        if (!dma)
                return -ENOMEM;
-       snd_soc_dai_set_dma_data(cpu_dai, substream, &dma->params);
+
+       dma->filter_data = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+                               &ssp->drcmr_tx : &ssp->drcmr_rx;
+
+       snd_soc_dai_set_dma_data(cpu_dai, substream, dma);
 
        return ret;
 }
@@ -559,7 +551,7 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
        u32 sspsp;
        int width = snd_pcm_format_physical_width(params_format(params));
        int ttsa = pxa_ssp_read_reg(ssp, SSTSA) & 0xf;
-       struct pxa2xx_pcm_dma_params *dma_data;
+       struct snd_dmaengine_dai_dma_data *dma_data;
 
        dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream);
 
@@ -719,6 +711,7 @@ static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
 
 static int pxa_ssp_probe(struct snd_soc_dai *dai)
 {
+       struct device *dev = dai->dev;
        struct ssp_priv *priv;
        int ret;
 
@@ -726,10 +719,26 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
        if (!priv)
                return -ENOMEM;
 
-       priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio");
-       if (priv->ssp == NULL) {
-               ret = -ENODEV;
-               goto err_priv;
+       if (dev->of_node) {
+               struct device_node *ssp_handle;
+
+               ssp_handle = of_parse_phandle(dev->of_node, "port", 0);
+               if (!ssp_handle) {
+                       dev_err(dev, "unable to get 'port' phandle\n");
+                       return -ENODEV;
+               }
+
+               priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio");
+               if (priv->ssp == NULL) {
+                       ret = -ENODEV;
+                       goto err_priv;
+               }
+       } else {
+               priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio");
+               if (priv->ssp == NULL) {
+                       ret = -ENODEV;
+                       goto err_priv;
+               }
        }
 
        priv->dai_fmt = (unsigned int) -1;
@@ -798,6 +807,12 @@ static const struct snd_soc_component_driver pxa_ssp_component = {
        .name           = "pxa-ssp",
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_ssp_of_ids[] = {
+       { .compatible = "mrvl,pxa-ssp-dai" },
+};
+#endif
+
 static int asoc_ssp_probe(struct platform_device *pdev)
 {
        return snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
@@ -812,8 +827,9 @@ static int asoc_ssp_remove(struct platform_device *pdev)
 
 static struct platform_driver asoc_ssp_driver = {
        .driver = {
-                       .name = "pxa-ssp-dai",
-                       .owner = THIS_MODULE,
+               .name = "pxa-ssp-dai",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(pxa_ssp_of_ids),
        },
 
        .probe = asoc_ssp_probe,
index 1475515712e65616b345f3d7e56ddedf954f6b4a..f1059d999de6d4128791861d2325cd8bf68371dd 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/dmaengine.h>
 
 #include <sound/core.h>
 #include <sound/ac97_codec.h>
 #include <sound/soc.h>
 #include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
 
 #include <mach/hardware.h>
 #include <mach/regs-ac97.h>
-#include <mach/dma.h>
 #include <mach/audio.h>
 
 #include "pxa2xx-ac97.h"
@@ -48,44 +49,44 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
        .reset  = pxa2xx_ac97_cold_reset,
 };
 
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_stereo_out = {
-       .name                   = "AC97 PCM Stereo out",
-       .dev_addr               = __PREG(PCDR),
-       .drcmr                  = &DRCMR(12),
-       .dcmd                   = DCMD_INCSRCADDR | DCMD_FLOWTRG |
-                                 DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+       .addr           = __PREG(PCDR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
+       .maxburst       = 32,
+       .filter_data    = &pxa2xx_ac97_pcm_stereo_in_req,
 };
 
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_stereo_in = {
-       .name                   = "AC97 PCM Stereo in",
-       .dev_addr               = __PREG(PCDR),
-       .drcmr                  = &DRCMR(11),
-       .dcmd                   = DCMD_INCTRGADDR | DCMD_FLOWSRC |
-                                 DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
+       .addr           = __PREG(PCDR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
+       .maxburst       = 32,
+       .filter_data    = &pxa2xx_ac97_pcm_stereo_out_req,
 };
 
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_aux_mono_out = {
-       .name                   = "AC97 Aux PCM (Slot 5) Mono out",
-       .dev_addr               = __PREG(MODR),
-       .drcmr                  = &DRCMR(10),
-       .dcmd                   = DCMD_INCSRCADDR | DCMD_FLOWTRG |
-                                 DCMD_BURST16 | DCMD_WIDTH2,
+static unsigned long pxa2xx_ac97_pcm_aux_mono_out_req = 10;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_out = {
+       .addr           = __PREG(MODR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_2_BYTES,
+       .maxburst       = 16,
+       .filter_data    = &pxa2xx_ac97_pcm_aux_mono_out_req,
 };
 
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_aux_mono_in = {
-       .name                   = "AC97 Aux PCM (Slot 5) Mono in",
-       .dev_addr               = __PREG(MODR),
-       .drcmr                  = &DRCMR(9),
-       .dcmd                   = DCMD_INCTRGADDR | DCMD_FLOWSRC |
-                                 DCMD_BURST16 | DCMD_WIDTH2,
+static unsigned long pxa2xx_ac97_pcm_aux_mono_in_req = 9;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_in = {
+       .addr           = __PREG(MODR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_2_BYTES,
+       .maxburst       = 16,
+       .filter_data    = &pxa2xx_ac97_pcm_aux_mono_in_req,
 };
 
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_mic_mono_in = {
-       .name                   = "AC97 Mic PCM (Slot 6) Mono in",
-       .dev_addr               = __PREG(MCDR),
-       .drcmr                  = &DRCMR(8),
-       .dcmd                   = DCMD_INCTRGADDR | DCMD_FLOWSRC |
-                                 DCMD_BURST16 | DCMD_WIDTH2,
+static unsigned long pxa2xx_ac97_pcm_aux_mic_mono_req = 8;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_mic_mono_in = {
+       .addr           = __PREG(MCDR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_2_BYTES,
+       .maxburst       = 16,
+       .filter_data    = &pxa2xx_ac97_pcm_aux_mic_mono_req,
 };
 
 #ifdef CONFIG_PM
@@ -119,7 +120,7 @@ static int pxa2xx_ac97_hw_params(struct snd_pcm_substream *substream,
                                 struct snd_pcm_hw_params *params,
                                 struct snd_soc_dai *cpu_dai)
 {
-       struct pxa2xx_pcm_dma_params *dma_data;
+       struct snd_dmaengine_dai_dma_data *dma_data;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                dma_data = &pxa2xx_ac97_pcm_stereo_out;
@@ -135,7 +136,7 @@ static int pxa2xx_ac97_hw_aux_params(struct snd_pcm_substream *substream,
                                     struct snd_pcm_hw_params *params,
                                     struct snd_soc_dai *cpu_dai)
 {
-       struct pxa2xx_pcm_dma_params *dma_data;
+       struct snd_dmaengine_dai_dma_data *dma_data;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
                dma_data = &pxa2xx_ac97_pcm_aux_mono_out;
index f7ca716641126bdb65ff5815823a500719bbeba8..d5340a088858eff3f8a8b578a38a2ea3e7476bf1 100644 (file)
@@ -23,9 +23,9 @@
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
 
 #include <mach/hardware.h>
-#include <mach/dma.h>
 #include <mach/audio.h>
 
 #include "pxa2xx-i2s.h"
@@ -82,20 +82,20 @@ static struct pxa_i2s_port pxa_i2s;
 static struct clk *clk_i2s;
 static int clk_ena = 0;
 
-static struct pxa2xx_pcm_dma_params pxa2xx_i2s_pcm_stereo_out = {
-       .name                   = "I2S PCM Stereo out",
-       .dev_addr               = __PREG(SADR),
-       .drcmr                  = &DRCMR(3),
-       .dcmd                   = DCMD_INCSRCADDR | DCMD_FLOWTRG |
-                                 DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_i2s_pcm_stereo_out_req = 3;
+static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_out = {
+       .addr           = __PREG(SADR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
+       .maxburst       = 32,
+       .filter_data    = &pxa2xx_i2s_pcm_stereo_out_req,
 };
 
-static struct pxa2xx_pcm_dma_params pxa2xx_i2s_pcm_stereo_in = {
-       .name                   = "I2S PCM Stereo in",
-       .dev_addr               = __PREG(SADR),
-       .drcmr                  = &DRCMR(2),
-       .dcmd                   = DCMD_INCTRGADDR | DCMD_FLOWSRC |
-                                 DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_i2s_pcm_stereo_in_req = 2;
+static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_in = {
+       .addr           = __PREG(SADR),
+       .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
+       .maxburst       = 32,
+       .filter_data    = &pxa2xx_i2s_pcm_stereo_in_req,
 };
 
 static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream,
@@ -163,7 +163,7 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream,
                                struct snd_pcm_hw_params *params,
                                struct snd_soc_dai *dai)
 {
-       struct pxa2xx_pcm_dma_params *dma_data;
+       struct snd_dmaengine_dai_dma_data *dma_data;
 
        BUG_ON(IS_ERR(clk_i2s));
        clk_prepare_enable(clk_i2s);
index ecff116cb7b034484cd3c420ae483d576ef0db95..806da27b8b671ed8596484856ec59f8092c75e8d 100644 (file)
 
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/of.h>
 
 #include <sound/core.h>
 #include <sound/soc.h>
 #include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
 
 #include "../../arm/pxa2xx-pcm.h"
 
@@ -25,7 +28,7 @@ static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct pxa2xx_runtime_data *prtd = runtime->private_data;
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct pxa2xx_pcm_dma_params *dma;
+       struct snd_dmaengine_dai_dma_data *dma;
        int ret;
 
        dma = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
@@ -39,7 +42,7 @@ static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
         * with different params */
        if (prtd->params == NULL) {
                prtd->params = dma;
-               ret = pxa_request_dma(prtd->params->name, DMA_PRIO_LOW,
+               ret = pxa_request_dma("name", DMA_PRIO_LOW,
                              pxa2xx_pcm_dma_irq, substream);
                if (ret < 0)
                        return ret;
@@ -47,7 +50,7 @@ static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
        } else if (prtd->params != dma) {
                pxa_free_dma(prtd->dma_ch);
                prtd->params = dma;
-               ret = pxa_request_dma(prtd->params->name, DMA_PRIO_LOW,
+               ret = pxa_request_dma("name", DMA_PRIO_LOW,
                              pxa2xx_pcm_dma_irq, substream);
                if (ret < 0)
                        return ret;
@@ -131,10 +134,18 @@ static int pxa2xx_soc_platform_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id snd_soc_pxa_audio_match[] = {
+       { .compatible   = "mrvl,pxa-pcm-audio" },
+       { }
+};
+#endif
+
 static struct platform_driver pxa_pcm_driver = {
        .driver = {
-                       .name = "pxa-pcm-audio",
-                       .owner = THIS_MODULE,
+               .name = "pxa-pcm-audio",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(snd_soc_pxa_audio_match),
        },
 
        .probe = pxa2xx_soc_platform_probe,
index f4ea4f6663a2c0d396997d7aef5cc1871f6ea6e5..13c9ee0cb83b988b06cb9b84963128b1cd703032 100644 (file)
@@ -122,6 +122,7 @@ static struct snd_soc_dai_link ttc_pm860x_hifi_dai[] = {
 /* ttc/td audio machine driver */
 static struct snd_soc_card ttc_dkb_card = {
        .name = "ttc-dkb-hifi",
+       .owner = THIS_MODULE,
        .dai_link = ttc_pm860x_hifi_dai,
        .num_links = ARRAY_SIZE(ttc_pm860x_hifi_dai),
 
index 58cfb1eb7dd3aece2e20b7c45421b0aa5cab5533..945e8abdc10fbf9579a1f89bd9a6a0610980256f 100644 (file)
@@ -192,7 +192,7 @@ static struct snd_soc_card snd_soc_card_s6105 = {
        .num_links = 1,
 };
 
-static struct s6000_snd_platform_data __initdata s6105_snd_data = {
+static struct s6000_snd_platform_data s6105_snd_data __initdata = {
        .wide           = 0,
        .channel_in     = 0,
        .channel_out    = 1,
index 2dd623fa3882b8fe17ceab2c6a86f45f54059f9d..2acf987844e8089704a4bf6d0ccde5ece2cc39e3 100644 (file)
@@ -404,18 +404,13 @@ static int s3c_ac97_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem_res) {
-               dev_err(&pdev->dev, "Unable to get register resource\n");
-               return -ENXIO;
-       }
-
        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!irq_res) {
                dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
                return -ENXIO;
        }
 
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        s3c_ac97.regs = devm_ioremap_resource(&pdev->dev, mem_res);
        if (IS_ERR(s3c_ac97.regs))
                return PTR_ERR(s3c_ac97.regs);
@@ -462,7 +457,7 @@ static int s3c_ac97_probe(struct platform_device *pdev)
        if (ret)
                goto err5;
 
-       ret = asoc_dma_platform_register(&pdev->dev);
+       ret = samsung_asoc_dma_platform_register(&pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
                goto err6;
@@ -485,7 +480,7 @@ static int s3c_ac97_remove(struct platform_device *pdev)
 {
        struct resource *irq_res;
 
-       asoc_dma_platform_unregister(&pdev->dev);
+       samsung_asoc_dma_platform_unregister(&pdev->dev);
        snd_soc_unregister_component(&pdev->dev);
 
        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
index 21b79262010ecb3774d8a5ad5ad7f8f87707549c..a0c67f60f594cdcc1af41958d3c8efa199b9c98d 100644 (file)
@@ -176,6 +176,10 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
                prtd->params->ch = prtd->params->ops->request(
                                prtd->params->channel, &req, rtd->cpu_dai->dev,
                                prtd->params->ch_name);
+               if (!prtd->params->ch) {
+                       pr_err("Failed to allocate DMA channel\n");
+                       return -ENXIO;
+               }
                prtd->params->ops->config(prtd->params->ch, &config);
        }
 
@@ -433,17 +437,17 @@ static struct snd_soc_platform_driver samsung_asoc_platform = {
        .pcm_free       = dma_free_dma_buffers,
 };
 
-int asoc_dma_platform_register(struct device *dev)
+int samsung_asoc_dma_platform_register(struct device *dev)
 {
        return snd_soc_register_platform(dev, &samsung_asoc_platform);
 }
-EXPORT_SYMBOL_GPL(asoc_dma_platform_register);
+EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_register);
 
-void asoc_dma_platform_unregister(struct device *dev)
+void samsung_asoc_dma_platform_unregister(struct device *dev)
 {
        snd_soc_unregister_platform(dev);
 }
-EXPORT_SYMBOL_GPL(asoc_dma_platform_unregister);
+EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_unregister);
 
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
 MODULE_DESCRIPTION("Samsung ASoC DMA Driver");
index 189a7a6d5020c637d1523ba8261ac9a421882c14..0e86315a3eaf11859d32ab78b1327ce1a60e39c4 100644 (file)
@@ -22,7 +22,7 @@ struct s3c_dma_params {
        char *ch_name;
 };
 
-int asoc_dma_platform_register(struct device *dev);
-void asoc_dma_platform_unregister(struct device *dev);
+int samsung_asoc_dma_platform_register(struct device *dev);
+void samsung_asoc_dma_platform_unregister(struct device *dev);
 
 #endif
index c0e6d9a19efc72562c31157acf776f1e24e57085..821a502310024ba221a2dee4fa33d7937ebe4828 100644 (file)
 #define I2SLVL1ADDR    0x34
 #define I2SLVL2ADDR    0x38
 #define I2SLVL3ADDR    0x3c
+#define I2SSTR1                0x40
+#define I2SVER         0x44
+#define I2SFIC2                0x48
+#define I2STDM         0x4c
 
 #define CON_RSTCLR             (1 << 31)
 #define CON_FRXOFSTATUS                (1 << 26)
 #define MOD_RXONLY             (1 << 8)
 #define MOD_TXRX               (2 << 8)
 #define MOD_MASK               (3 << 8)
-#define MOD_LR_LLOW            (0 << 7)
-#define MOD_LR_RLOW            (1 << 7)
-#define MOD_SDF_IIS            (0 << 5)
-#define MOD_SDF_MSB            (1 << 5)
-#define MOD_SDF_LSB            (2 << 5)
-#define MOD_SDF_MASK           (3 << 5)
-#define MOD_RCLK_256FS         (0 << 3)
-#define MOD_RCLK_512FS         (1 << 3)
-#define MOD_RCLK_384FS         (2 << 3)
-#define MOD_RCLK_768FS         (3 << 3)
-#define MOD_RCLK_MASK          (3 << 3)
-#define MOD_BCLK_32FS          (0 << 1)
-#define MOD_BCLK_48FS          (1 << 1)
-#define MOD_BCLK_16FS          (2 << 1)
-#define MOD_BCLK_24FS          (3 << 1)
-#define MOD_BCLK_MASK          (3 << 1)
+#define MOD_LRP_SHIFT          7
+#define MOD_LR_LLOW            0
+#define MOD_LR_RLOW            1
+#define MOD_SDF_SHIFT          5
+#define MOD_SDF_IIS            0
+#define MOD_SDF_MSB            1
+#define MOD_SDF_LSB            2
+#define MOD_SDF_MASK           3
+#define MOD_RCLK_SHIFT         3
+#define MOD_RCLK_256FS         0
+#define MOD_RCLK_512FS         1
+#define MOD_RCLK_384FS         2
+#define MOD_RCLK_768FS         3
+#define MOD_RCLK_MASK          3
+#define MOD_BCLK_SHIFT         1
+#define MOD_BCLK_32FS          0
+#define MOD_BCLK_48FS          1
+#define MOD_BCLK_16FS          2
+#define MOD_BCLK_24FS          3
+#define MOD_BCLK_MASK          3
 #define MOD_8BIT               (1 << 0)
 
+#define EXYNOS5420_MOD_LRP_SHIFT       15
+#define EXYNOS5420_MOD_SDF_SHIFT       6
+#define EXYNOS5420_MOD_RCLK_SHIFT      4
+#define EXYNOS5420_MOD_BCLK_SHIFT      0
+#define EXYNOS5420_MOD_BCLK_64FS       4
+#define EXYNOS5420_MOD_BCLK_96FS       5
+#define EXYNOS5420_MOD_BCLK_128FS      6
+#define EXYNOS5420_MOD_BCLK_192FS      7
+#define EXYNOS5420_MOD_BCLK_256FS      8
+#define EXYNOS5420_MOD_BCLK_MASK       0xf
+
 #define MOD_CDCLKCON           (1 << 12)
 
 #define PSR_PSREN              (1 << 15)
index 959c702235c8c6028d0946f29b622266f043ab2f..b302f3b7a587f3e480586419affc8e300c80ad8d 100644 (file)
@@ -40,6 +40,7 @@ enum samsung_dai_type {
 
 struct samsung_i2s_dai_data {
        int dai_type;
+       u32 quirks;
 };
 
 struct i2s_dai {
@@ -198,7 +199,13 @@ static inline bool is_manager(struct i2s_dai *i2s)
 /* Read RCLK of I2S (in multiples of LRCLK) */
 static inline unsigned get_rfs(struct i2s_dai *i2s)
 {
-       u32 rfs = (readl(i2s->addr + I2SMOD) >> 3) & 0x3;
+       u32 rfs;
+
+       if (i2s->quirks & QUIRK_SUPPORTS_TDM)
+               rfs = readl(i2s->addr + I2SMOD) >> EXYNOS5420_MOD_RCLK_SHIFT;
+       else
+               rfs = (readl(i2s->addr + I2SMOD) >> MOD_RCLK_SHIFT);
+       rfs &= MOD_RCLK_MASK;
 
        switch (rfs) {
        case 3: return 768;
@@ -212,21 +219,26 @@ static inline unsigned get_rfs(struct i2s_dai *i2s)
 static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
 {
        u32 mod = readl(i2s->addr + I2SMOD);
+       int rfs_shift;
 
-       mod &= ~MOD_RCLK_MASK;
+       if (i2s->quirks & QUIRK_SUPPORTS_TDM)
+               rfs_shift = EXYNOS5420_MOD_RCLK_SHIFT;
+       else
+               rfs_shift = MOD_RCLK_SHIFT;
+       mod &= ~(MOD_RCLK_MASK << rfs_shift);
 
        switch (rfs) {
        case 768:
-               mod |= MOD_RCLK_768FS;
+               mod |= (MOD_RCLK_768FS << rfs_shift);
                break;
        case 512:
-               mod |= MOD_RCLK_512FS;
+               mod |= (MOD_RCLK_512FS << rfs_shift);
                break;
        case 384:
-               mod |= MOD_RCLK_384FS;
+               mod |= (MOD_RCLK_384FS << rfs_shift);
                break;
        default:
-               mod |= MOD_RCLK_256FS;
+               mod |= (MOD_RCLK_256FS << rfs_shift);
                break;
        }
 
@@ -236,9 +248,22 @@ static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
 /* Read Bit-Clock of I2S (in multiples of LRCLK) */
 static inline unsigned get_bfs(struct i2s_dai *i2s)
 {
-       u32 bfs = (readl(i2s->addr + I2SMOD) >> 1) & 0x3;
+       u32 bfs;
+
+       if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
+               bfs = readl(i2s->addr + I2SMOD) >> EXYNOS5420_MOD_BCLK_SHIFT;
+               bfs &= EXYNOS5420_MOD_BCLK_MASK;
+       } else {
+               bfs =  readl(i2s->addr + I2SMOD) >> MOD_BCLK_SHIFT;
+               bfs &= MOD_BCLK_MASK;
+       }
 
        switch (bfs) {
+       case 8: return 256;
+       case 7: return 192;
+       case 6: return 128;
+       case 5: return 96;
+       case 4: return 64;
        case 3: return 24;
        case 2: return 16;
        case 1: return 48;
@@ -250,21 +275,50 @@ static inline unsigned get_bfs(struct i2s_dai *i2s)
 static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
 {
        u32 mod = readl(i2s->addr + I2SMOD);
+       int bfs_shift;
+       int tdm = i2s->quirks & QUIRK_SUPPORTS_TDM;
 
-       mod &= ~MOD_BCLK_MASK;
+       if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
+               bfs_shift = EXYNOS5420_MOD_BCLK_SHIFT;
+               mod &= ~(EXYNOS5420_MOD_BCLK_MASK << bfs_shift);
+       } else {
+               bfs_shift = MOD_BCLK_SHIFT;
+               mod &= ~(MOD_BCLK_MASK << bfs_shift);
+       }
+
+       /* Non-TDM I2S controllers do not support BCLK > 48 * FS */
+       if (!tdm && bfs > 48) {
+               dev_err(&i2s->pdev->dev, "Unsupported BCLK divider\n");
+               return;
+       }
 
        switch (bfs) {
        case 48:
-               mod |= MOD_BCLK_48FS;
+               mod |= (MOD_BCLK_48FS << bfs_shift);
                break;
        case 32:
-               mod |= MOD_BCLK_32FS;
+               mod |= (MOD_BCLK_32FS << bfs_shift);
                break;
        case 24:
-               mod |= MOD_BCLK_24FS;
+               mod |= (MOD_BCLK_24FS << bfs_shift);
                break;
        case 16:
-               mod |= MOD_BCLK_16FS;
+               mod |= (MOD_BCLK_16FS << bfs_shift);
+               break;
+       case 64:
+               mod |= (EXYNOS5420_MOD_BCLK_64FS << bfs_shift);
+               break;
+       case 96:
+               mod |= (EXYNOS5420_MOD_BCLK_96FS << bfs_shift);
+               break;
+       case 128:
+               mod |= (EXYNOS5420_MOD_BCLK_128FS << bfs_shift);
+               break;
+       case 192:
+               mod |= (EXYNOS5420_MOD_BCLK_192FS << bfs_shift);
+               break;
+       case 256:
+               mod |= (EXYNOS5420_MOD_BCLK_256FS << bfs_shift);
                break;
        default:
                dev_err(&i2s->pdev->dev, "Wrong BCLK Divider!\n");
@@ -491,20 +545,32 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
 {
        struct i2s_dai *i2s = to_info(dai);
        u32 mod = readl(i2s->addr + I2SMOD);
+       int lrp_shift, sdf_shift, sdf_mask, lrp_rlow;
        u32 tmp = 0;
 
+       if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
+               lrp_shift = EXYNOS5420_MOD_LRP_SHIFT;
+               sdf_shift = EXYNOS5420_MOD_SDF_SHIFT;
+       } else {
+               lrp_shift = MOD_LRP_SHIFT;
+               sdf_shift = MOD_SDF_SHIFT;
+       }
+
+       sdf_mask = MOD_SDF_MASK << sdf_shift;
+       lrp_rlow = MOD_LR_RLOW << lrp_shift;
+
        /* Format is priority */
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_RIGHT_J:
-               tmp |= MOD_LR_RLOW;
-               tmp |= MOD_SDF_MSB;
+               tmp |= lrp_rlow;
+               tmp |= (MOD_SDF_MSB << sdf_shift);
                break;
        case SND_SOC_DAIFMT_LEFT_J:
-               tmp |= MOD_LR_RLOW;
-               tmp |= MOD_SDF_LSB;
+               tmp |= lrp_rlow;
+               tmp |= (MOD_SDF_LSB << sdf_shift);
                break;
        case SND_SOC_DAIFMT_I2S:
-               tmp |= MOD_SDF_IIS;
+               tmp |= (MOD_SDF_IIS << sdf_shift);
                break;
        default:
                dev_err(&i2s->pdev->dev, "Format not supported\n");
@@ -519,10 +585,10 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
        case SND_SOC_DAIFMT_NB_NF:
                break;
        case SND_SOC_DAIFMT_NB_IF:
-               if (tmp & MOD_LR_RLOW)
-                       tmp &= ~MOD_LR_RLOW;
+               if (tmp & lrp_rlow)
+                       tmp &= ~lrp_rlow;
                else
-                       tmp |= MOD_LR_RLOW;
+                       tmp |= lrp_rlow;
                break;
        default:
                dev_err(&i2s->pdev->dev, "Polarity not supported\n");
@@ -544,15 +610,18 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
                return -EINVAL;
        }
 
+       /*
+        * Don't change the I2S mode if any controller is active on this
+        * channel.
+        */
        if (any_active(i2s) &&
-                       ((mod & (MOD_SDF_MASK | MOD_LR_RLOW
-                               | MOD_SLAVE)) != tmp)) {
+               ((mod & (sdf_mask | lrp_rlow | MOD_SLAVE)) != tmp)) {
                dev_err(&i2s->pdev->dev,
                                "%s:%d Other DAI busy\n", __func__, __LINE__);
                return -EAGAIN;
        }
 
-       mod &= ~(MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE);
+       mod &= ~(sdf_mask | lrp_rlow | MOD_SLAVE);
        mod |= tmp;
        writel(mod, i2s->addr + I2SMOD);
 
@@ -1007,6 +1076,8 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
                if (IS_ERR(i2s->pdev))
                        return NULL;
 
+               i2s->pdev->dev.parent = &pdev->dev;
+
                platform_set_drvdata(i2s->pdev, i2s);
                ret = platform_device_add(i2s->pdev);
                if (ret < 0)
@@ -1018,18 +1089,18 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
 
 static const struct of_device_id exynos_i2s_match[];
 
-static inline int samsung_i2s_get_driver_data(struct platform_device *pdev)
+static inline const struct samsung_i2s_dai_data *samsung_i2s_get_driver_data(
+                                               struct platform_device *pdev)
 {
 #ifdef CONFIG_OF
-       struct samsung_i2s_dai_data *data;
        if (pdev->dev.of_node) {
                const struct of_device_id *match;
                match = of_match_node(exynos_i2s_match, pdev->dev.of_node);
-               data = (struct samsung_i2s_dai_data *) match->data;
-               return data->dai_type;
+               return match->data;
        } else
 #endif
-               return platform_get_device_id(pdev)->driver_data;
+               return (struct samsung_i2s_dai_data *)
+                               platform_get_device_id(pdev)->driver_data;
 }
 
 #ifdef CONFIG_PM_RUNTIME
@@ -1060,13 +1131,13 @@ static int samsung_i2s_probe(struct platform_device *pdev)
        struct resource *res;
        u32 regs_base, quirks = 0, idma_addr = 0;
        struct device_node *np = pdev->dev.of_node;
-       enum samsung_dai_type samsung_dai_type;
+       const struct samsung_i2s_dai_data *i2s_dai_data;
        int ret = 0;
 
        /* Call during Seconday interface registration */
-       samsung_dai_type = samsung_i2s_get_driver_data(pdev);
+       i2s_dai_data = samsung_i2s_get_driver_data(pdev);
 
-       if (samsung_dai_type == TYPE_SEC) {
+       if (i2s_dai_data->dai_type == TYPE_SEC) {
                sec_dai = dev_get_drvdata(&pdev->dev);
                if (!sec_dai) {
                        dev_err(&pdev->dev, "Unable to get drvdata\n");
@@ -1075,7 +1146,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
                snd_soc_register_component(&sec_dai->pdev->dev,
                                           &samsung_i2s_component,
                                           &sec_dai->i2s_dai_drv, 1);
-               asoc_dma_platform_register(&pdev->dev);
+               samsung_asoc_dma_platform_register(&pdev->dev);
                return 0;
        }
 
@@ -1115,15 +1186,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
                        idma_addr = i2s_cfg->idma_addr;
                }
        } else {
-               if (of_find_property(np, "samsung,supports-6ch", NULL))
-                       quirks |= QUIRK_PRI_6CHAN;
-
-               if (of_find_property(np, "samsung,supports-secdai", NULL))
-                       quirks |= QUIRK_SEC_DAI;
-
-               if (of_find_property(np, "samsung,supports-rstclr", NULL))
-                       quirks |= QUIRK_NEED_RSTCLR;
-
+               quirks = i2s_dai_data->quirks;
                if (of_property_read_u32(np, "samsung,idma-addr",
                                         &idma_addr)) {
                        if (quirks & QUIRK_SEC_DAI) {
@@ -1200,7 +1263,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
 
        pm_runtime_enable(&pdev->dev);
 
-       asoc_dma_platform_register(&pdev->dev);
+       samsung_asoc_dma_platform_register(&pdev->dev);
 
        return 0;
 err:
@@ -1230,33 +1293,59 @@ static int samsung_i2s_remove(struct platform_device *pdev)
        i2s->pri_dai = NULL;
        i2s->sec_dai = NULL;
 
-       asoc_dma_platform_unregister(&pdev->dev);
+       samsung_asoc_dma_platform_unregister(&pdev->dev);
        snd_soc_unregister_component(&pdev->dev);
 
        return 0;
 }
 
+static const struct samsung_i2s_dai_data i2sv3_dai_type = {
+       .dai_type = TYPE_PRI,
+       .quirks = QUIRK_NO_MUXPSR,
+};
+
+static const struct samsung_i2s_dai_data i2sv5_dai_type = {
+       .dai_type = TYPE_PRI,
+       .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
+};
+
+static const struct samsung_i2s_dai_data i2sv6_dai_type = {
+       .dai_type = TYPE_PRI,
+       .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR |
+                       QUIRK_SUPPORTS_TDM,
+};
+
+static const struct samsung_i2s_dai_data samsung_dai_type_pri = {
+       .dai_type = TYPE_PRI,
+};
+
+static const struct samsung_i2s_dai_data samsung_dai_type_sec = {
+       .dai_type = TYPE_SEC,
+};
+
 static struct platform_device_id samsung_i2s_driver_ids[] = {
        {
                .name           = "samsung-i2s",
-               .driver_data    = TYPE_PRI,
+               .driver_data    = (kernel_ulong_t)&samsung_dai_type_pri,
        }, {
                .name           = "samsung-i2s-sec",
-               .driver_data    = TYPE_SEC,
+               .driver_data    = (kernel_ulong_t)&samsung_dai_type_sec,
        },
        {},
 };
 MODULE_DEVICE_TABLE(platform, samsung_i2s_driver_ids);
 
 #ifdef CONFIG_OF
-static struct samsung_i2s_dai_data samsung_i2s_dai_data_array[] = {
-       [TYPE_PRI] = { TYPE_PRI },
-       [TYPE_SEC] = { TYPE_SEC },
-};
-
 static const struct of_device_id exynos_i2s_match[] = {
-       { .compatible = "samsung,i2s-v5",
-         .data = &samsung_i2s_dai_data_array[TYPE_PRI],
+       {
+               .compatible = "samsung,s3c6410-i2s",
+               .data = &i2sv3_dai_type,
+       }, {
+               .compatible = "samsung,s5pv210-i2s",
+               .data = &i2sv5_dai_type,
+       }, {
+               .compatible = "samsung,exynos5420-i2s",
+               .data = &i2sv6_dai_type,
        },
        {},
 };
index 581ea4a06fc684d1e3f677477326426d7334b427..5fd7a05a9b9e2fe9730a962bcdca73482a9e5d71 100644 (file)
@@ -11,6 +11,7 @@
 #include <sound/pcm_params.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 
  /*
   * Default CFG switch settings to use this driver:
 /* SMDK has a 16.934MHZ crystal attached to WM8994 */
 #define SMDK_WM8994_FREQ 16934000
 
+struct smdk_wm8994_data {
+       int mclk1_rate;
+};
+
+/* Default SMDKs */
+static struct smdk_wm8994_data smdk_board_data = {
+       .mclk1_rate = SMDK_WM8994_FREQ,
+};
+
 static int smdk_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
        struct snd_soc_dai *codec_dai = rtd->codec_dai;
        unsigned int pll_out;
        int ret;
@@ -54,18 +63,6 @@ static int smdk_hw_params(struct snd_pcm_substream *substream,
        else
                pll_out = params_rate(params) * 256;
 
-       ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
-                                        | SND_SOC_DAIFMT_NB_NF
-                                        | SND_SOC_DAIFMT_CBM_CFM);
-       if (ret < 0)
-               return ret;
-
-       ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
-                                        | SND_SOC_DAIFMT_NB_NF
-                                        | SND_SOC_DAIFMT_CBM_CFM);
-       if (ret < 0)
-               return ret;
-
        ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
                                        SMDK_WM8994_FREQ, pll_out);
        if (ret < 0)
@@ -131,6 +128,8 @@ static struct snd_soc_dai_link smdk_dai[] = {
                .platform_name = "samsung-i2s.0",
                .codec_name = "wm8994-codec",
                .init = smdk_wm8994_init_paiftx,
+               .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+                       SND_SOC_DAIFMT_CBM_CFM,
                .ops = &smdk_ops,
        }, { /* Sec_Fifo Playback i/f */
                .name = "Sec_FIFO TX",
@@ -139,6 +138,8 @@ static struct snd_soc_dai_link smdk_dai[] = {
                .codec_dai_name = "wm8994-aif1",
                .platform_name = "samsung-i2s-sec",
                .codec_name = "wm8994-codec",
+               .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+                       SND_SOC_DAIFMT_CBM_CFM,
                .ops = &smdk_ops,
        },
 };
@@ -150,15 +151,28 @@ static struct snd_soc_card smdk = {
        .num_links = ARRAY_SIZE(smdk_dai),
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id samsung_wm8994_of_match[] = {
+       { .compatible = "samsung,smdk-wm8994", .data = &smdk_board_data },
+       {},
+};
+MODULE_DEVICE_TABLE(of, samsung_wm8994_of_match);
+#endif /* CONFIG_OF */
 
 static int smdk_audio_probe(struct platform_device *pdev)
 {
        int ret;
        struct device_node *np = pdev->dev.of_node;
        struct snd_soc_card *card = &smdk;
+       struct smdk_wm8994_data *board;
+       const struct of_device_id *id;
 
        card->dev = &pdev->dev;
 
+       board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
+       if (!board)
+               return -ENOMEM;
+
        if (np) {
                smdk_dai[0].cpu_dai_name = NULL;
                smdk_dai[0].cpu_of_node = of_parse_phandle(np,
@@ -173,6 +187,12 @@ static int smdk_audio_probe(struct platform_device *pdev)
                smdk_dai[0].platform_of_node = smdk_dai[0].cpu_of_node;
        }
 
+       id = of_match_device(samsung_wm8994_of_match, &pdev->dev);
+       if (id)
+               *board = *((struct smdk_wm8994_data *)id->data);
+
+       platform_set_drvdata(pdev, board);
+
        ret = snd_soc_register_card(card);
 
        if (ret)
@@ -190,17 +210,9 @@ static int smdk_audio_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF
-static const struct of_device_id samsung_wm8994_of_match[] = {
-       { .compatible = "samsung,smdk-wm8994", },
-       {},
-};
-MODULE_DEVICE_TABLE(of, samsung_wm8994_of_match);
-#endif /* CONFIG_OF */
-
 static struct platform_driver smdk_audio_driver = {
        .driver         = {
-               .name   = "smdk-audio",
+               .name   = "smdk-audio-wm8894",
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(samsung_wm8994_of_match),
        },
@@ -212,4 +224,4 @@ module_platform_driver(smdk_audio_driver);
 
 MODULE_DESCRIPTION("ALSA SoC SMDK WM8994");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:smdk-audio");
+MODULE_ALIAS("platform:smdk-audio-wm8994");
index 2e5ebb2f1982c2e849e8a8adc7924034a6931675..5ea70ab0ecb5897aab234b5b06de7f465d9a0f39 100644 (file)
@@ -395,7 +395,7 @@ static int spdif_probe(struct platform_device *pdev)
 
        spin_lock_init(&spdif->lock);
 
-       spdif->pclk = clk_get(&pdev->dev, "spdif");
+       spdif->pclk = devm_clk_get(&pdev->dev, "spdif");
        if (IS_ERR(spdif->pclk)) {
                dev_err(&pdev->dev, "failed to get peri-clock\n");
                ret = -ENOENT;
@@ -403,7 +403,7 @@ static int spdif_probe(struct platform_device *pdev)
        }
        clk_prepare_enable(spdif->pclk);
 
-       spdif->sclk = clk_get(&pdev->dev, "sclk_spdif");
+       spdif->sclk = devm_clk_get(&pdev->dev, "sclk_spdif");
        if (IS_ERR(spdif->sclk)) {
                dev_err(&pdev->dev, "failed to get internal source clock\n");
                ret = -ENOENT;
@@ -457,10 +457,8 @@ err3:
        release_mem_region(mem_res->start, resource_size(mem_res));
 err2:
        clk_disable_unprepare(spdif->sclk);
-       clk_put(spdif->sclk);
 err1:
        clk_disable_unprepare(spdif->pclk);
-       clk_put(spdif->pclk);
 err0:
        return ret;
 }
@@ -480,9 +478,7 @@ static int spdif_remove(struct platform_device *pdev)
                release_mem_region(mem_res->start, resource_size(mem_res));
 
        clk_disable_unprepare(spdif->sclk);
-       clk_put(spdif->sclk);
        clk_disable_unprepare(spdif->pclk);
-       clk_put(spdif->pclk);
 
        return 0;
 }
index 6bcb1164d599d61bec029fdb94d3f00495b8f66c..56d8ff6a402d2eaffdb4aeb5bc65faa5b815678a 100644 (file)
@@ -34,6 +34,13 @@ config SND_SOC_SH4_SIU
        select SH_DMAE
        select FW_LOADER
 
+config SND_SOC_RCAR
+       tristate "R-Car series SRU/SCU/SSIU/SSI support"
+       select SND_SIMPLE_CARD
+       select RCAR_CLK_ADG
+       help
+         This option enables R-Car SUR/SCU/SSIU/SSI sound support
+
 ##
 ## Boards
 ##
index 849b387d17d99a7d91b21bd1f64adc228341cfb9..aaf3dcd1ee2a197fae36c1f577443a386e8371a1 100644 (file)
@@ -12,6 +12,9 @@ obj-$(CONFIG_SND_SOC_SH4_SSI) += snd-soc-ssi.o
 obj-$(CONFIG_SND_SOC_SH4_FSI)  += snd-soc-fsi.o
 obj-$(CONFIG_SND_SOC_SH4_SIU)  += snd-soc-siu.o
 
+## audio units for R-Car
+obj-$(CONFIG_SND_SOC_RCAR)     += rcar/
+
 ## boards
 snd-soc-sh7760-ac97-objs       := sh7760-ac97.o
 snd-soc-migor-objs             := migor.o
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
new file mode 100644 (file)
index 0000000..0ff492d
--- /dev/null
@@ -0,0 +1,2 @@
+snd-soc-rcar-objs      := core.o gen.o scu.o adg.o ssi.o
+obj-$(CONFIG_SND_SOC_RCAR)     += snd-soc-rcar.o
\ No newline at end of file
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
new file mode 100644 (file)
index 0000000..d80deb7
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Helper routines for R-Car sound ADG.
+ *
+ *  Copyright (C) 2013  Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sh_clk.h>
+#include <mach/clock.h>
+#include "rsnd.h"
+
+#define CLKA   0
+#define CLKB   1
+#define CLKC   2
+#define CLKI   3
+#define CLKMAX 4
+
+struct rsnd_adg {
+       struct clk *clk[CLKMAX];
+
+       int rate_of_441khz_div_6;
+       int rate_of_48khz_div_6;
+};
+
+#define for_each_rsnd_clk(pos, adg, i)         \
+       for (i = 0, (pos) = adg->clk[i];        \
+            i < CLKMAX;                        \
+            i++, (pos) = adg->clk[i])
+#define rsnd_priv_to_adg(priv) ((struct rsnd_adg *)(priv)->adg)
+
+static enum rsnd_reg rsnd_adg_ssi_reg_get(int id)
+{
+       enum rsnd_reg reg;
+
+       /*
+        * SSI 8 is not connected to ADG.
+        * it works with SSI 7
+        */
+       if (id == 8)
+               return RSND_REG_MAX;
+
+       if (0 <= id && id <= 3)
+               reg = RSND_REG_AUDIO_CLK_SEL0;
+       else if (4 <= id && id <= 7)
+               reg = RSND_REG_AUDIO_CLK_SEL1;
+       else
+               reg = RSND_REG_AUDIO_CLK_SEL2;
+
+       return reg;
+}
+
+int rsnd_adg_ssi_clk_stop(struct rsnd_mod *mod)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       enum rsnd_reg reg;
+       int id;
+
+       /*
+        * "mod" = "ssi" here.
+        * we can get "ssi id" from mod
+        */
+       id  = rsnd_mod_id(mod);
+       reg = rsnd_adg_ssi_reg_get(id);
+
+       rsnd_write(priv, mod, reg, 0);
+
+       return 0;
+}
+
+int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct clk *clk;
+       enum rsnd_reg reg;
+       int id, shift, i;
+       u32 data;
+       int sel_table[] = {
+               [CLKA] = 0x1,
+               [CLKB] = 0x2,
+               [CLKC] = 0x3,
+               [CLKI] = 0x0,
+       };
+
+       dev_dbg(dev, "request clock = %d\n", rate);
+
+       /*
+        * find suitable clock from
+        * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
+        */
+       data = 0;
+       for_each_rsnd_clk(clk, adg, i) {
+               if (rate == clk_get_rate(clk)) {
+                       data = sel_table[i];
+                       goto found_clock;
+               }
+       }
+
+       /*
+        * find 1/6 clock from BRGA/BRGB
+        */
+       if (rate == adg->rate_of_441khz_div_6) {
+               data = 0x10;
+               goto found_clock;
+       }
+
+       if (rate == adg->rate_of_48khz_div_6) {
+               data = 0x20;
+               goto found_clock;
+       }
+
+       return -EIO;
+
+found_clock:
+
+       /*
+        * This "mod" = "ssi" here.
+        * we can get "ssi id" from mod
+        */
+       id  = rsnd_mod_id(mod);
+       reg = rsnd_adg_ssi_reg_get(id);
+
+       dev_dbg(dev, "ADG: ssi%d selects clk%d = %d", id, i, rate);
+
+       /*
+        * Enable SSIx clock
+        */
+       shift = (id % 4) * 8;
+
+       rsnd_bset(priv, mod, reg,
+                  0xFF << shift,
+                  data << shift);
+
+       return 0;
+}
+
+static void rsnd_adg_ssi_clk_init(struct rsnd_priv *priv, struct rsnd_adg *adg)
+{
+       struct clk *clk;
+       unsigned long rate;
+       u32 ckr;
+       int i;
+       int brg_table[] = {
+               [CLKA] = 0x0,
+               [CLKB] = 0x1,
+               [CLKC] = 0x4,
+               [CLKI] = 0x2,
+       };
+
+       /*
+        * This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC
+        * have 44.1kHz or 48kHz base clocks for now.
+        *
+        * SSI itself can divide parent clock by 1/1 - 1/16
+        * So,  BRGA outputs 44.1kHz base parent clock 1/32,
+        * and, BRGB outputs 48.0kHz base parent clock 1/32 here.
+        * see
+        *      rsnd_adg_ssi_clk_try_start()
+        */
+       ckr = 0;
+       adg->rate_of_441khz_div_6 = 0;
+       adg->rate_of_48khz_div_6  = 0;
+       for_each_rsnd_clk(clk, adg, i) {
+               rate = clk_get_rate(clk);
+
+               if (0 == rate) /* not used */
+                       continue;
+
+               /* RBGA */
+               if (!adg->rate_of_441khz_div_6 && (0 == rate % 44100)) {
+                       adg->rate_of_441khz_div_6 = rate / 6;
+                       ckr |= brg_table[i] << 20;
+               }
+
+               /* RBGB */
+               if (!adg->rate_of_48khz_div_6 && (0 == rate % 48000)) {
+                       adg->rate_of_48khz_div_6 = rate / 6;
+                       ckr |= brg_table[i] << 16;
+               }
+       }
+
+       rsnd_priv_bset(priv, SSICKR, 0x00FF0000, ckr);
+       rsnd_priv_write(priv, BRRA,  0x00000002); /* 1/6 */
+       rsnd_priv_write(priv, BRRB,  0x00000002); /* 1/6 */
+}
+
+int rsnd_adg_probe(struct platform_device *pdev,
+                  struct rcar_snd_info *info,
+                  struct rsnd_priv *priv)
+{
+       struct rsnd_adg *adg;
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct clk *clk;
+       int i;
+
+       adg = devm_kzalloc(dev, sizeof(*adg), GFP_KERNEL);
+       if (!adg) {
+               dev_err(dev, "ADG allocate failed\n");
+               return -ENOMEM;
+       }
+
+       adg->clk[CLKA] = clk_get(NULL, "audio_clk_a");
+       adg->clk[CLKB] = clk_get(NULL, "audio_clk_b");
+       adg->clk[CLKC] = clk_get(NULL, "audio_clk_c");
+       adg->clk[CLKI] = clk_get(NULL, "audio_clk_internal");
+       for_each_rsnd_clk(clk, adg, i) {
+               if (IS_ERR(clk)) {
+                       dev_err(dev, "Audio clock failed\n");
+                       return -EIO;
+               }
+       }
+
+       rsnd_adg_ssi_clk_init(priv, adg);
+
+       priv->adg = adg;
+
+       dev_dbg(dev, "adg probed\n");
+
+       return 0;
+}
+
+void rsnd_adg_remove(struct platform_device *pdev,
+                    struct rsnd_priv *priv)
+{
+       struct rsnd_adg *adg = priv->adg;
+       struct clk *clk;
+       int i;
+
+       for_each_rsnd_clk(clk, adg, i)
+               clk_put(clk);
+}
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
new file mode 100644 (file)
index 0000000..a357060
--- /dev/null
@@ -0,0 +1,861 @@
+/*
+ * Renesas R-Car SRU/SCU/SSIU/SSI support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * Based on fsi.c
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Renesas R-Car sound device structure
+ *
+ * Gen1
+ *
+ * SRU         : Sound Routing Unit
+ *  - SRC      : Sampling Rate Converter
+ *  - CMD
+ *    - CTU    : Channel Count Conversion Unit
+ *    - MIX    : Mixer
+ *    - DVC    : Digital Volume and Mute Function
+ *  - SSI      : Serial Sound Interface
+ *
+ * Gen2
+ *
+ * SCU         : Sampling Rate Converter Unit
+ *  - SRC      : Sampling Rate Converter
+ *  - CMD
+ *   - CTU     : Channel Count Conversion Unit
+ *   - MIX     : Mixer
+ *   - DVC     : Digital Volume and Mute Function
+ * SSIU                : Serial Sound Interface Unit
+ *  - SSI      : Serial Sound Interface
+ */
+
+/*
+ *     driver data Image
+ *
+ * rsnd_priv
+ *   |
+ *   | ** this depends on Gen1/Gen2
+ *   |
+ *   +- gen
+ *   |
+ *   | ** these depend on data path
+ *   | ** gen and platform data control it
+ *   |
+ *   +- rdai[0]
+ *   |   |              sru     ssiu      ssi
+ *   |   +- playback -> [mod] -> [mod] -> [mod] -> ...
+ *   |   |
+ *   |   |              sru     ssiu      ssi
+ *   |   +- capture  -> [mod] -> [mod] -> [mod] -> ...
+ *   |
+ *   +- rdai[1]
+ *   |   |              sru     ssiu      ssi
+ *   |   +- playback -> [mod] -> [mod] -> [mod] -> ...
+ *   |   |
+ *   |   |              sru     ssiu      ssi
+ *   |   +- capture  -> [mod] -> [mod] -> [mod] -> ...
+ *   ...
+ *   |
+ *   | ** these control ssi
+ *   |
+ *   +- ssi
+ *   |  |
+ *   |  +- ssi[0]
+ *   |  +- ssi[1]
+ *   |  +- ssi[2]
+ *   |  ...
+ *   |
+ *   | ** these control scu
+ *   |
+ *   +- scu
+ *      |
+ *      +- scu[0]
+ *      +- scu[1]
+ *      +- scu[2]
+ *      ...
+ *
+ *
+ * for_each_rsnd_dai(xx, priv, xx)
+ *  rdai[0] => rdai[1] => rdai[2] => ...
+ *
+ * for_each_rsnd_mod(xx, rdai, xx)
+ *  [mod] => [mod] => [mod] => ...
+ *
+ * rsnd_dai_call(xxx, fn )
+ *  [mod]->fn() -> [mod]->fn() -> [mod]->fn()...
+ *
+ */
+#include <linux/pm_runtime.h>
+#include "rsnd.h"
+
+#define RSND_RATES SNDRV_PCM_RATE_8000_96000
+#define RSND_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
+
+/*
+ *     rsnd_platform functions
+ */
+#define rsnd_platform_call(priv, dai, func, param...)  \
+       (!(priv->info->func) ? -ENODEV :                \
+        priv->info->func(param))
+
+
+/*
+ *     basic function
+ */
+u32 rsnd_read(struct rsnd_priv *priv,
+             struct rsnd_mod *mod, enum rsnd_reg reg)
+{
+       void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
+
+       BUG_ON(!base);
+
+       return ioread32(base);
+}
+
+void rsnd_write(struct rsnd_priv *priv,
+               struct rsnd_mod *mod,
+               enum rsnd_reg reg, u32 data)
+{
+       void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
+       struct device *dev = rsnd_priv_to_dev(priv);
+
+       BUG_ON(!base);
+
+       dev_dbg(dev, "w %p : %08x\n", base, data);
+
+       iowrite32(data, base);
+}
+
+void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
+              enum rsnd_reg reg, u32 mask, u32 data)
+{
+       void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       u32 val;
+
+       BUG_ON(!base);
+
+       val = ioread32(base);
+       val &= ~mask;
+       val |= data & mask;
+       iowrite32(val, base);
+
+       dev_dbg(dev, "s %p : %08x\n", base, val);
+}
+
+/*
+ *     rsnd_mod functions
+ */
+char *rsnd_mod_name(struct rsnd_mod *mod)
+{
+       if (!mod || !mod->ops)
+               return "unknown";
+
+       return mod->ops->name;
+}
+
+void rsnd_mod_init(struct rsnd_priv *priv,
+                  struct rsnd_mod *mod,
+                  struct rsnd_mod_ops *ops,
+                  int id)
+{
+       mod->priv       = priv;
+       mod->id         = id;
+       mod->ops        = ops;
+       INIT_LIST_HEAD(&mod->list);
+}
+
+/*
+ *     rsnd_dma functions
+ */
+static void rsnd_dma_continue(struct rsnd_dma *dma)
+{
+       /* push next A or B plane */
+       dma->submit_loop = 1;
+       schedule_work(&dma->work);
+}
+
+void rsnd_dma_start(struct rsnd_dma *dma)
+{
+       /* push both A and B plane*/
+       dma->submit_loop = 2;
+       schedule_work(&dma->work);
+}
+
+void rsnd_dma_stop(struct rsnd_dma *dma)
+{
+       dma->submit_loop = 0;
+       cancel_work_sync(&dma->work);
+       dmaengine_terminate_all(dma->chan);
+}
+
+static void rsnd_dma_complete(void *data)
+{
+       struct rsnd_dma *dma = (struct rsnd_dma *)data;
+       struct rsnd_priv *priv = dma->priv;
+       unsigned long flags;
+
+       rsnd_lock(priv, flags);
+
+       dma->complete(dma);
+
+       if (dma->submit_loop)
+               rsnd_dma_continue(dma);
+
+       rsnd_unlock(priv, flags);
+}
+
+static void rsnd_dma_do_work(struct work_struct *work)
+{
+       struct rsnd_dma *dma = container_of(work, struct rsnd_dma, work);
+       struct rsnd_priv *priv = dma->priv;
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct dma_async_tx_descriptor *desc;
+       dma_addr_t buf;
+       size_t len;
+       int i;
+
+       for (i = 0; i < dma->submit_loop; i++) {
+
+               if (dma->inquiry(dma, &buf, &len) < 0)
+                       return;
+
+               desc = dmaengine_prep_slave_single(
+                       dma->chan, buf, len, dma->dir,
+                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               if (!desc) {
+                       dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
+                       return;
+               }
+
+               desc->callback          = rsnd_dma_complete;
+               desc->callback_param    = dma;
+
+               if (dmaengine_submit(desc) < 0) {
+                       dev_err(dev, "dmaengine_submit() fail\n");
+                       return;
+               }
+
+       }
+
+       dma_async_issue_pending(dma->chan);
+}
+
+int rsnd_dma_available(struct rsnd_dma *dma)
+{
+       return !!dma->chan;
+}
+
+static bool rsnd_dma_filter(struct dma_chan *chan, void *param)
+{
+       chan->private = param;
+
+       return true;
+}
+
+int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
+                 int is_play, int id,
+                 int (*inquiry)(struct rsnd_dma *dma,
+                                 dma_addr_t *buf, int *len),
+                 int (*complete)(struct rsnd_dma *dma))
+{
+       struct device *dev = rsnd_priv_to_dev(priv);
+       dma_cap_mask_t mask;
+
+       if (dma->chan) {
+               dev_err(dev, "it already has dma channel\n");
+               return -EIO;
+       }
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_SLAVE, mask);
+
+       dma->slave.shdma_slave.slave_id = id;
+
+       dma->chan = dma_request_channel(mask, rsnd_dma_filter,
+                                       &dma->slave.shdma_slave);
+       if (!dma->chan) {
+               dev_err(dev, "can't get dma channel\n");
+               return -EIO;
+       }
+
+       dma->dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+       dma->priv = priv;
+       dma->inquiry = inquiry;
+       dma->complete = complete;
+       INIT_WORK(&dma->work, rsnd_dma_do_work);
+
+       return 0;
+}
+
+void  rsnd_dma_quit(struct rsnd_priv *priv,
+                   struct rsnd_dma *dma)
+{
+       if (dma->chan)
+               dma_release_channel(dma->chan);
+
+       dma->chan = NULL;
+}
+
+/*
+ *     rsnd_dai functions
+ */
+#define rsnd_dai_call(rdai, io, fn)                    \
+({                                                     \
+       struct rsnd_mod *mod, *n;                       \
+       int ret = 0;                                    \
+       for_each_rsnd_mod(mod, n, io) {                 \
+               ret = rsnd_mod_call(mod, fn, rdai, io); \
+               if (ret < 0)                            \
+                       break;                          \
+       }                                               \
+       ret;                                            \
+})
+
+int rsnd_dai_connect(struct rsnd_dai *rdai,
+                    struct rsnd_mod *mod,
+                    struct rsnd_dai_stream *io)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+
+       if (!mod) {
+               dev_err(dev, "NULL mod\n");
+               return -EIO;
+       }
+
+       if (!list_empty(&mod->list)) {
+               dev_err(dev, "%s%d is not empty\n",
+                       rsnd_mod_name(mod),
+                       rsnd_mod_id(mod));
+               return -EIO;
+       }
+
+       list_add_tail(&mod->list, &io->head);
+
+       return 0;
+}
+
+int rsnd_dai_disconnect(struct rsnd_mod *mod)
+{
+       list_del_init(&mod->list);
+
+       return 0;
+}
+
+int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai)
+{
+       int id = rdai - priv->rdai;
+
+       if ((id < 0) || (id >= rsnd_dai_nr(priv)))
+               return -EINVAL;
+
+       return id;
+}
+
+struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id)
+{
+       return priv->rdai + id;
+}
+
+static struct rsnd_dai *rsnd_dai_to_rdai(struct snd_soc_dai *dai)
+{
+       struct rsnd_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+       return rsnd_dai_get(priv, dai->id);
+}
+
+int rsnd_dai_is_play(struct rsnd_dai *rdai, struct rsnd_dai_stream *io)
+{
+       return &rdai->playback == io;
+}
+
+/*
+ *     rsnd_soc_dai functions
+ */
+int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional)
+{
+       struct snd_pcm_substream *substream = io->substream;
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       int pos = io->byte_pos + additional;
+
+       pos %= (runtime->periods * io->byte_per_period);
+
+       return pos;
+}
+
+void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int byte)
+{
+       io->byte_pos += byte;
+
+       if (io->byte_pos >= io->next_period_byte) {
+               struct snd_pcm_substream *substream = io->substream;
+               struct snd_pcm_runtime *runtime = substream->runtime;
+
+               io->period_pos++;
+               io->next_period_byte += io->byte_per_period;
+
+               if (io->period_pos >= runtime->periods) {
+                       io->byte_pos = 0;
+                       io->period_pos = 0;
+                       io->next_period_byte = io->byte_per_period;
+               }
+
+               snd_pcm_period_elapsed(substream);
+       }
+}
+
+static int rsnd_dai_stream_init(struct rsnd_dai_stream *io,
+                               struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+
+       if (!list_empty(&io->head))
+               return -EIO;
+
+       INIT_LIST_HEAD(&io->head);
+       io->substream           = substream;
+       io->byte_pos            = 0;
+       io->period_pos          = 0;
+       io->byte_per_period     = runtime->period_size *
+                                 runtime->channels *
+                                 samples_to_bytes(runtime, 1);
+       io->next_period_byte    = io->byte_per_period;
+
+       return 0;
+}
+
+static
+struct snd_soc_dai *rsnd_substream_to_dai(struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+       return  rtd->cpu_dai;
+}
+
+static
+struct rsnd_dai_stream *rsnd_rdai_to_io(struct rsnd_dai *rdai,
+                                       struct snd_pcm_substream *substream)
+{
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               return &rdai->playback;
+       else
+               return &rdai->capture;
+}
+
+static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+                           struct snd_soc_dai *dai)
+{
+       struct rsnd_priv *priv = snd_soc_dai_get_drvdata(dai);
+       struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+       struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+       struct rsnd_mod *mod = rsnd_ssi_mod_get_frm_dai(priv,
+                                               rsnd_dai_id(priv, rdai),
+                                               rsnd_dai_is_play(rdai, io));
+       int ssi_id = rsnd_mod_id(mod);
+       int ret;
+       unsigned long flags;
+
+       rsnd_lock(priv, flags);
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+               ret = rsnd_dai_stream_init(io, substream);
+               if (ret < 0)
+                       goto dai_trigger_end;
+
+               ret = rsnd_platform_call(priv, dai, start, ssi_id);
+               if (ret < 0)
+                       goto dai_trigger_end;
+
+               ret = rsnd_gen_path_init(priv, rdai, io);
+               if (ret < 0)
+                       goto dai_trigger_end;
+
+               ret = rsnd_dai_call(rdai, io, init);
+               if (ret < 0)
+                       goto dai_trigger_end;
+
+               ret = rsnd_dai_call(rdai, io, start);
+               if (ret < 0)
+                       goto dai_trigger_end;
+               break;
+       case SNDRV_PCM_TRIGGER_STOP:
+               ret = rsnd_dai_call(rdai, io, stop);
+               if (ret < 0)
+                       goto dai_trigger_end;
+
+               ret = rsnd_dai_call(rdai, io, quit);
+               if (ret < 0)
+                       goto dai_trigger_end;
+
+               ret = rsnd_gen_path_exit(priv, rdai, io);
+               if (ret < 0)
+                       goto dai_trigger_end;
+
+               ret = rsnd_platform_call(priv, dai, stop, ssi_id);
+               if (ret < 0)
+                       goto dai_trigger_end;
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+dai_trigger_end:
+       rsnd_unlock(priv, flags);
+
+       return ret;
+}
+
+static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+       struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+
+       /* set master/slave audio interface */
+       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBM_CFM:
+               rdai->clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFS:
+               rdai->clk_master = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* set clock inversion */
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_IF:
+               rdai->bit_clk_inv = 0;
+               rdai->frm_clk_inv = 1;
+               break;
+       case SND_SOC_DAIFMT_IB_NF:
+               rdai->bit_clk_inv = 1;
+               rdai->frm_clk_inv = 0;
+               break;
+       case SND_SOC_DAIFMT_IB_IF:
+               rdai->bit_clk_inv = 1;
+               rdai->frm_clk_inv = 1;
+               break;
+       case SND_SOC_DAIFMT_NB_NF:
+       default:
+               rdai->bit_clk_inv = 0;
+               rdai->frm_clk_inv = 0;
+               break;
+       }
+
+       /* set format */
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               rdai->sys_delay = 0;
+               rdai->data_alignment = 0;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               rdai->sys_delay = 1;
+               rdai->data_alignment = 0;
+               break;
+       case SND_SOC_DAIFMT_RIGHT_J:
+               rdai->sys_delay = 1;
+               rdai->data_alignment = 1;
+               break;
+       }
+
+       return 0;
+}
+
+static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
+       .trigger        = rsnd_soc_dai_trigger,
+       .set_fmt        = rsnd_soc_dai_set_fmt,
+};
+
+static int rsnd_dai_probe(struct platform_device *pdev,
+                         struct rcar_snd_info *info,
+                         struct rsnd_priv *priv)
+{
+       struct snd_soc_dai_driver *drv;
+       struct rsnd_dai *rdai;
+       struct rsnd_mod *pmod, *cmod;
+       struct device *dev = rsnd_priv_to_dev(priv);
+       int dai_nr;
+       int i;
+
+       /* get max dai nr */
+       for (dai_nr = 0; dai_nr < 32; dai_nr++) {
+               pmod = rsnd_ssi_mod_get_frm_dai(priv, dai_nr, 1);
+               cmod = rsnd_ssi_mod_get_frm_dai(priv, dai_nr, 0);
+
+               if (!pmod && !cmod)
+                       break;
+       }
+
+       if (!dai_nr) {
+               dev_err(dev, "no dai\n");
+               return -EIO;
+       }
+
+       drv  = devm_kzalloc(dev, sizeof(*drv)  * dai_nr, GFP_KERNEL);
+       rdai = devm_kzalloc(dev, sizeof(*rdai) * dai_nr, GFP_KERNEL);
+       if (!drv || !rdai) {
+               dev_err(dev, "dai allocate failed\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < dai_nr; i++) {
+
+               pmod = rsnd_ssi_mod_get_frm_dai(priv, i, 1);
+               cmod = rsnd_ssi_mod_get_frm_dai(priv, i, 0);
+
+               /*
+                *      init rsnd_dai
+                */
+               INIT_LIST_HEAD(&rdai[i].playback.head);
+               INIT_LIST_HEAD(&rdai[i].capture.head);
+
+               snprintf(rdai[i].name, RSND_DAI_NAME_SIZE, "rsnd-dai.%d", i);
+
+               /*
+                *      init snd_soc_dai_driver
+                */
+               drv[i].name     = rdai[i].name;
+               drv[i].ops      = &rsnd_soc_dai_ops;
+               if (pmod) {
+                       drv[i].playback.rates           = RSND_RATES;
+                       drv[i].playback.formats         = RSND_FMTS;
+                       drv[i].playback.channels_min    = 2;
+                       drv[i].playback.channels_max    = 2;
+               }
+               if (cmod) {
+                       drv[i].capture.rates            = RSND_RATES;
+                       drv[i].capture.formats          = RSND_FMTS;
+                       drv[i].capture.channels_min     = 2;
+                       drv[i].capture.channels_max     = 2;
+               }
+
+               dev_dbg(dev, "%s (%s/%s)\n", rdai[i].name,
+                       pmod ? "play"    : " -- ",
+                       cmod ? "capture" : "  --   ");
+       }
+
+       priv->dai_nr    = dai_nr;
+       priv->daidrv    = drv;
+       priv->rdai      = rdai;
+
+       return 0;
+}
+
+static void rsnd_dai_remove(struct platform_device *pdev,
+                         struct rsnd_priv *priv)
+{
+}
+
+/*
+ *             pcm ops
+ */
+static struct snd_pcm_hardware rsnd_pcm_hardware = {
+       .info =         SNDRV_PCM_INFO_INTERLEAVED      |
+                       SNDRV_PCM_INFO_MMAP             |
+                       SNDRV_PCM_INFO_MMAP_VALID       |
+                       SNDRV_PCM_INFO_PAUSE,
+       .formats                = RSND_FMTS,
+       .rates                  = RSND_RATES,
+       .rate_min               = 8000,
+       .rate_max               = 192000,
+       .channels_min           = 2,
+       .channels_max           = 2,
+       .buffer_bytes_max       = 64 * 1024,
+       .period_bytes_min       = 32,
+       .period_bytes_max       = 8192,
+       .periods_min            = 1,
+       .periods_max            = 32,
+       .fifo_size              = 256,
+};
+
+static int rsnd_pcm_open(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       int ret = 0;
+
+       snd_soc_set_runtime_hwparams(substream, &rsnd_pcm_hardware);
+
+       ret = snd_pcm_hw_constraint_integer(runtime,
+                                           SNDRV_PCM_HW_PARAM_PERIODS);
+
+       return ret;
+}
+
+static int rsnd_hw_params(struct snd_pcm_substream *substream,
+                        struct snd_pcm_hw_params *hw_params)
+{
+       return snd_pcm_lib_malloc_pages(substream,
+                                       params_buffer_bytes(hw_params));
+}
+
+static snd_pcm_uframes_t rsnd_pointer(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
+       struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+       struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+       return bytes_to_frames(runtime, io->byte_pos);
+}
+
+static struct snd_pcm_ops rsnd_pcm_ops = {
+       .open           = rsnd_pcm_open,
+       .ioctl          = snd_pcm_lib_ioctl,
+       .hw_params      = rsnd_hw_params,
+       .hw_free        = snd_pcm_lib_free_pages,
+       .pointer        = rsnd_pointer,
+};
+
+/*
+ *             snd_soc_platform
+ */
+
+#define PREALLOC_BUFFER                (32 * 1024)
+#define PREALLOC_BUFFER_MAX    (32 * 1024)
+
+static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+       return snd_pcm_lib_preallocate_pages_for_all(
+               rtd->pcm,
+               SNDRV_DMA_TYPE_DEV,
+               rtd->card->snd_card->dev,
+               PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
+}
+
+static void rsnd_pcm_free(struct snd_pcm *pcm)
+{
+       snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static struct snd_soc_platform_driver rsnd_soc_platform = {
+       .ops            = &rsnd_pcm_ops,
+       .pcm_new        = rsnd_pcm_new,
+       .pcm_free       = rsnd_pcm_free,
+};
+
+static const struct snd_soc_component_driver rsnd_soc_component = {
+       .name           = "rsnd",
+};
+
+/*
+ *     rsnd probe
+ */
+static int rsnd_probe(struct platform_device *pdev)
+{
+       struct rcar_snd_info *info;
+       struct rsnd_priv *priv;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       info = pdev->dev.platform_data;
+       if (!info) {
+               dev_err(dev, "driver needs R-Car sound information\n");
+               return -ENODEV;
+       }
+
+       /*
+        *      init priv data
+        */
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               dev_err(dev, "priv allocate failed\n");
+               return -ENODEV;
+       }
+
+       priv->dev       = dev;
+       priv->info      = info;
+       spin_lock_init(&priv->lock);
+
+       /*
+        *      init each module
+        */
+       ret = rsnd_gen_probe(pdev, info, priv);
+       if (ret < 0)
+               return ret;
+
+       ret = rsnd_scu_probe(pdev, info, priv);
+       if (ret < 0)
+               return ret;
+
+       ret = rsnd_adg_probe(pdev, info, priv);
+       if (ret < 0)
+               return ret;
+
+       ret = rsnd_ssi_probe(pdev, info, priv);
+       if (ret < 0)
+               return ret;
+
+       ret = rsnd_dai_probe(pdev, info, priv);
+       if (ret < 0)
+               return ret;
+
+       /*
+        *      asoc register
+        */
+       ret = snd_soc_register_platform(dev, &rsnd_soc_platform);
+       if (ret < 0) {
+               dev_err(dev, "cannot snd soc register\n");
+               return ret;
+       }
+
+       ret = snd_soc_register_component(dev, &rsnd_soc_component,
+                                        priv->daidrv, rsnd_dai_nr(priv));
+       if (ret < 0) {
+               dev_err(dev, "cannot snd dai register\n");
+               goto exit_snd_soc;
+       }
+
+       dev_set_drvdata(dev, priv);
+
+       pm_runtime_enable(dev);
+
+       dev_info(dev, "probed\n");
+       return ret;
+
+exit_snd_soc:
+       snd_soc_unregister_platform(dev);
+
+       return ret;
+}
+
+static int rsnd_remove(struct platform_device *pdev)
+{
+       struct rsnd_priv *priv = dev_get_drvdata(&pdev->dev);
+
+       pm_runtime_disable(&pdev->dev);
+
+       /*
+        *      remove each module
+        */
+       rsnd_ssi_remove(pdev, priv);
+       rsnd_adg_remove(pdev, priv);
+       rsnd_scu_remove(pdev, priv);
+       rsnd_dai_remove(pdev, priv);
+       rsnd_gen_remove(pdev, priv);
+
+       return 0;
+}
+
+static struct platform_driver rsnd_driver = {
+       .driver = {
+               .name   = "rcar_sound",
+       },
+       .probe          = rsnd_probe,
+       .remove         = rsnd_remove,
+};
+module_platform_driver(rsnd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas R-Car audio driver");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_ALIAS("platform:rcar-pcm-audio");
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
new file mode 100644 (file)
index 0000000..babb203
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Renesas R-Car Gen1 SRU/SSI support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+struct rsnd_gen_ops {
+       int (*path_init)(struct rsnd_priv *priv,
+                        struct rsnd_dai *rdai,
+                        struct rsnd_dai_stream *io);
+       int (*path_exit)(struct rsnd_priv *priv,
+                        struct rsnd_dai *rdai,
+                        struct rsnd_dai_stream *io);
+};
+
+struct rsnd_gen_reg_map {
+       int index;      /* -1 : not supported */
+       u32 offset_id;  /* offset of ssi0, ssi1, ssi2... */
+       u32 offset_adr; /* offset of SSICR, SSISR, ... */
+};
+
+struct rsnd_gen {
+       void __iomem *base[RSND_BASE_MAX];
+
+       struct rsnd_gen_reg_map reg_map[RSND_REG_MAX];
+       struct rsnd_gen_ops *ops;
+};
+
+#define rsnd_priv_to_gen(p)    ((struct rsnd_gen *)(p)->gen)
+
+/*
+ *             Gen2
+ *             will be filled in the future
+ */
+
+/*
+ *             Gen1
+ */
+static int rsnd_gen1_path_init(struct rsnd_priv *priv,
+                              struct rsnd_dai *rdai,
+                              struct rsnd_dai_stream *io)
+{
+       struct rsnd_mod *mod;
+       int ret;
+       int id;
+
+       /*
+        * Gen1 is created by SRU/SSI, and this SRU is base module of
+        * Gen2's SCU/SSIU/SSI. (Gen2 SCU/SSIU came from SRU)
+        *
+        * Easy image is..
+        *      Gen1 SRU = Gen2 SCU + SSIU + etc
+        *
+        * Gen2 SCU path is very flexible, but, Gen1 SRU (SCU parts) is
+        * using fixed path.
+        *
+        * Then, SSI id = SCU id here
+        */
+
+       /* get SSI's ID */
+       mod = rsnd_ssi_mod_get_frm_dai(priv,
+                                      rsnd_dai_id(priv, rdai),
+                                      rsnd_dai_is_play(rdai, io));
+       id = rsnd_mod_id(mod);
+
+       /* SSI */
+       mod = rsnd_ssi_mod_get(priv, id);
+       ret = rsnd_dai_connect(rdai, mod, io);
+       if (ret < 0)
+               return ret;
+
+       /* SCU */
+       mod = rsnd_scu_mod_get(priv, id);
+       ret = rsnd_dai_connect(rdai, mod, io);
+
+       return ret;
+}
+
+static int rsnd_gen1_path_exit(struct rsnd_priv *priv,
+                              struct rsnd_dai *rdai,
+                              struct rsnd_dai_stream *io)
+{
+       struct rsnd_mod *mod, *n;
+       int ret = 0;
+
+       /*
+        * remove all mod from rdai
+        */
+       for_each_rsnd_mod(mod, n, io)
+               ret |= rsnd_dai_disconnect(mod);
+
+       return ret;
+}
+
+static struct rsnd_gen_ops rsnd_gen1_ops = {
+       .path_init      = rsnd_gen1_path_init,
+       .path_exit      = rsnd_gen1_path_exit,
+};
+
+#define RSND_GEN1_REG_MAP(g, s, i, oi, oa)                             \
+       do {                                                            \
+               (g)->reg_map[RSND_REG_##i].index  = RSND_GEN1_##s;      \
+               (g)->reg_map[RSND_REG_##i].offset_id = oi;              \
+               (g)->reg_map[RSND_REG_##i].offset_adr = oa;             \
+       } while (0)
+
+static void rsnd_gen1_reg_map_init(struct rsnd_gen *gen)
+{
+       RSND_GEN1_REG_MAP(gen, SRU,     SRC_ROUTE_SEL,  0x0,    0x00);
+       RSND_GEN1_REG_MAP(gen, SRU,     SRC_TMG_SEL0,   0x0,    0x08);
+       RSND_GEN1_REG_MAP(gen, SRU,     SRC_TMG_SEL1,   0x0,    0x0c);
+       RSND_GEN1_REG_MAP(gen, SRU,     SRC_TMG_SEL2,   0x0,    0x10);
+       RSND_GEN1_REG_MAP(gen, SRU,     SRC_CTRL,       0x0,    0xc0);
+       RSND_GEN1_REG_MAP(gen, SRU,     SSI_MODE0,      0x0,    0xD0);
+       RSND_GEN1_REG_MAP(gen, SRU,     SSI_MODE1,      0x0,    0xD4);
+       RSND_GEN1_REG_MAP(gen, SRU,     BUSIF_MODE,     0x4,    0x20);
+       RSND_GEN1_REG_MAP(gen, SRU,     BUSIF_ADINR,    0x40,   0x214);
+
+       RSND_GEN1_REG_MAP(gen, ADG,     BRRA,           0x0,    0x00);
+       RSND_GEN1_REG_MAP(gen, ADG,     BRRB,           0x0,    0x04);
+       RSND_GEN1_REG_MAP(gen, ADG,     SSICKR,         0x0,    0x08);
+       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL0, 0x0,    0x0c);
+       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL1, 0x0,    0x10);
+       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL3, 0x0,    0x18);
+       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL4, 0x0,    0x1c);
+       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL5, 0x0,    0x20);
+
+       RSND_GEN1_REG_MAP(gen, SSI,     SSICR,          0x40,   0x00);
+       RSND_GEN1_REG_MAP(gen, SSI,     SSISR,          0x40,   0x04);
+       RSND_GEN1_REG_MAP(gen, SSI,     SSITDR,         0x40,   0x08);
+       RSND_GEN1_REG_MAP(gen, SSI,     SSIRDR,         0x40,   0x0c);
+       RSND_GEN1_REG_MAP(gen, SSI,     SSIWSR,         0x40,   0x20);
+}
+
+static int rsnd_gen1_probe(struct platform_device *pdev,
+                          struct rcar_snd_info *info,
+                          struct rsnd_priv *priv)
+{
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+       struct resource *sru_res;
+       struct resource *adg_res;
+       struct resource *ssi_res;
+
+       /*
+        * map address
+        */
+       sru_res = platform_get_resource(pdev, IORESOURCE_MEM, RSND_GEN1_SRU);
+       adg_res = platform_get_resource(pdev, IORESOURCE_MEM, RSND_GEN1_ADG);
+       ssi_res = platform_get_resource(pdev, IORESOURCE_MEM, RSND_GEN1_SSI);
+
+       gen->base[RSND_GEN1_SRU] = devm_ioremap_resource(dev, sru_res);
+       gen->base[RSND_GEN1_ADG] = devm_ioremap_resource(dev, adg_res);
+       gen->base[RSND_GEN1_SSI] = devm_ioremap_resource(dev, ssi_res);
+       if (IS_ERR(gen->base[RSND_GEN1_SRU]) ||
+           IS_ERR(gen->base[RSND_GEN1_ADG]) ||
+           IS_ERR(gen->base[RSND_GEN1_SSI]))
+               return -ENODEV;
+
+       gen->ops = &rsnd_gen1_ops;
+       rsnd_gen1_reg_map_init(gen);
+
+       dev_dbg(dev, "Gen1 device probed\n");
+       dev_dbg(dev, "SRU : %08x => %p\n",      sru_res->start,
+                                               gen->base[RSND_GEN1_SRU]);
+       dev_dbg(dev, "ADG : %08x => %p\n",      adg_res->start,
+                                               gen->base[RSND_GEN1_ADG]);
+       dev_dbg(dev, "SSI : %08x => %p\n",      ssi_res->start,
+                                               gen->base[RSND_GEN1_SSI]);
+
+       return 0;
+
+}
+
+static void rsnd_gen1_remove(struct platform_device *pdev,
+                            struct rsnd_priv *priv)
+{
+}
+
+/*
+ *             Gen
+ */
+int rsnd_gen_path_init(struct rsnd_priv *priv,
+                      struct rsnd_dai *rdai,
+                      struct rsnd_dai_stream *io)
+{
+       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+       return gen->ops->path_init(priv, rdai, io);
+}
+
+int rsnd_gen_path_exit(struct rsnd_priv *priv,
+                      struct rsnd_dai *rdai,
+                      struct rsnd_dai_stream *io)
+{
+       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+       return gen->ops->path_exit(priv, rdai, io);
+}
+
+void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
+                              struct rsnd_mod *mod,
+                              enum rsnd_reg reg)
+{
+       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       int index;
+       u32 offset_id, offset_adr;
+
+       if (reg >= RSND_REG_MAX) {
+               dev_err(dev, "rsnd_reg reg error\n");
+               return NULL;
+       }
+
+       index           = gen->reg_map[reg].index;
+       offset_id       = gen->reg_map[reg].offset_id;
+       offset_adr      = gen->reg_map[reg].offset_adr;
+
+       if (index < 0) {
+               dev_err(dev, "unsupported reg access %d\n", reg);
+               return NULL;
+       }
+
+       if (offset_id && mod)
+               offset_id *= rsnd_mod_id(mod);
+
+       /*
+        * index/offset were set on gen1/gen2
+        */
+
+       return gen->base[index] + offset_id + offset_adr;
+}
+
+int rsnd_gen_probe(struct platform_device *pdev,
+                  struct rcar_snd_info *info,
+                  struct rsnd_priv *priv)
+{
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct rsnd_gen *gen;
+       int i;
+
+       gen = devm_kzalloc(dev, sizeof(*gen), GFP_KERNEL);
+       if (!gen) {
+               dev_err(dev, "GEN allocate failed\n");
+               return -ENOMEM;
+       }
+
+       priv->gen = gen;
+
+       /*
+        * see
+        *      rsnd_reg_get()
+        *      rsnd_gen_probe()
+        */
+       for (i = 0; i < RSND_REG_MAX; i++)
+               gen->reg_map[i].index = -1;
+
+       /*
+        *      init each module
+        */
+       if (rsnd_is_gen1(priv))
+               return rsnd_gen1_probe(pdev, info, priv);
+
+       dev_err(dev, "unknown generation R-Car sound device\n");
+
+       return -ENODEV;
+}
+
+void rsnd_gen_remove(struct platform_device *pdev,
+                    struct rsnd_priv *priv)
+{
+       if (rsnd_is_gen1(priv))
+               rsnd_gen1_remove(pdev, priv);
+}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
new file mode 100644 (file)
index 0000000..9cc6986
--- /dev/null
@@ -0,0 +1,302 @@
+/*
+ * Renesas R-Car
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef RSND_H
+#define RSND_H
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sh_dma.h>
+#include <linux/workqueue.h>
+#include <sound/rcar_snd.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+/*
+ *     pseudo register
+ *
+ * The register address offsets SRU/SCU/SSIU on Gen1/Gen2 are very different.
+ * This driver uses pseudo register in order to hide it.
+ * see gen1/gen2 for detail
+ */
+enum rsnd_reg {
+       /* SRU/SCU */
+       RSND_REG_SRC_ROUTE_SEL,
+       RSND_REG_SRC_TMG_SEL0,
+       RSND_REG_SRC_TMG_SEL1,
+       RSND_REG_SRC_TMG_SEL2,
+       RSND_REG_SRC_CTRL,
+       RSND_REG_SSI_MODE0,
+       RSND_REG_SSI_MODE1,
+       RSND_REG_BUSIF_MODE,
+       RSND_REG_BUSIF_ADINR,
+
+       /* ADG */
+       RSND_REG_BRRA,
+       RSND_REG_BRRB,
+       RSND_REG_SSICKR,
+       RSND_REG_AUDIO_CLK_SEL0,
+       RSND_REG_AUDIO_CLK_SEL1,
+       RSND_REG_AUDIO_CLK_SEL2,
+       RSND_REG_AUDIO_CLK_SEL3,
+       RSND_REG_AUDIO_CLK_SEL4,
+       RSND_REG_AUDIO_CLK_SEL5,
+
+       /* SSI */
+       RSND_REG_SSICR,
+       RSND_REG_SSISR,
+       RSND_REG_SSITDR,
+       RSND_REG_SSIRDR,
+       RSND_REG_SSIWSR,
+
+       RSND_REG_MAX,
+};
+
+struct rsnd_priv;
+struct rsnd_mod;
+struct rsnd_dai;
+struct rsnd_dai_stream;
+
+/*
+ *     R-Car basic functions
+ */
+#define rsnd_mod_read(m, r) \
+       rsnd_read(rsnd_mod_to_priv(m), m, RSND_REG_##r)
+#define rsnd_mod_write(m, r, d) \
+       rsnd_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
+#define rsnd_mod_bset(m, r, s, d) \
+       rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
+
+#define rsnd_priv_read(p, r)           rsnd_read(p, NULL, RSND_REG_##r)
+#define rsnd_priv_write(p, r, d)       rsnd_write(p, NULL, RSND_REG_##r, d)
+#define rsnd_priv_bset(p, r, s, d)     rsnd_bset(p, NULL, RSND_REG_##r, s, d)
+
+u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
+void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
+               enum rsnd_reg reg, u32 data);
+void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
+                   u32 mask, u32 data);
+
+/*
+ *     R-Car DMA
+ */
+struct rsnd_dma {
+       struct rsnd_priv        *priv;
+       struct sh_dmae_slave    slave;
+       struct work_struct      work;
+       struct dma_chan         *chan;
+       enum dma_data_direction dir;
+       int (*inquiry)(struct rsnd_dma *dma, dma_addr_t *buf, int *len);
+       int (*complete)(struct rsnd_dma *dma);
+
+       int submit_loop;
+};
+
+void rsnd_dma_start(struct rsnd_dma *dma);
+void rsnd_dma_stop(struct rsnd_dma *dma);
+int rsnd_dma_available(struct rsnd_dma *dma);
+int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
+       int is_play, int id,
+       int (*inquiry)(struct rsnd_dma *dma, dma_addr_t *buf, int *len),
+       int (*complete)(struct rsnd_dma *dma));
+void  rsnd_dma_quit(struct rsnd_priv *priv,
+                   struct rsnd_dma *dma);
+
+
+/*
+ *     R-Car sound mod
+ */
+
+struct rsnd_mod_ops {
+       char *name;
+       int (*init)(struct rsnd_mod *mod,
+                   struct rsnd_dai *rdai,
+                   struct rsnd_dai_stream *io);
+       int (*quit)(struct rsnd_mod *mod,
+                   struct rsnd_dai *rdai,
+                   struct rsnd_dai_stream *io);
+       int (*start)(struct rsnd_mod *mod,
+                    struct rsnd_dai *rdai,
+                    struct rsnd_dai_stream *io);
+       int (*stop)(struct rsnd_mod *mod,
+                   struct rsnd_dai *rdai,
+                   struct rsnd_dai_stream *io);
+};
+
+struct rsnd_mod {
+       int id;
+       struct rsnd_priv *priv;
+       struct rsnd_mod_ops *ops;
+       struct list_head list; /* connect to rsnd_dai playback/capture */
+       struct rsnd_dma dma;
+};
+
+#define rsnd_mod_to_priv(mod) ((mod)->priv)
+#define rsnd_mod_to_dma(mod) (&(mod)->dma)
+#define rsnd_dma_to_mod(_dma) container_of((_dma), struct rsnd_mod, dma)
+#define rsnd_mod_id(mod) ((mod)->id)
+#define for_each_rsnd_mod(pos, n, io)  \
+       list_for_each_entry_safe(pos, n, &(io)->head, list)
+#define rsnd_mod_call(mod, func, rdai, io)     \
+       (!(mod) ? -ENODEV :                     \
+        !((mod)->ops->func) ? 0 :              \
+        (mod)->ops->func(mod, rdai, io))
+
+void rsnd_mod_init(struct rsnd_priv *priv,
+                  struct rsnd_mod *mod,
+                  struct rsnd_mod_ops *ops,
+                  int id);
+char *rsnd_mod_name(struct rsnd_mod *mod);
+
+/*
+ *     R-Car sound DAI
+ */
+#define RSND_DAI_NAME_SIZE     16
+struct rsnd_dai_stream {
+       struct list_head head; /* head of rsnd_mod list */
+       struct snd_pcm_substream *substream;
+       int byte_pos;
+       int period_pos;
+       int byte_per_period;
+       int next_period_byte;
+};
+
+struct rsnd_dai {
+       char name[RSND_DAI_NAME_SIZE];
+       struct rsnd_dai_platform_info *info; /* rcar_snd.h */
+       struct rsnd_dai_stream playback;
+       struct rsnd_dai_stream capture;
+
+       int clk_master:1;
+       int bit_clk_inv:1;
+       int frm_clk_inv:1;
+       int sys_delay:1;
+       int data_alignment:1;
+};
+
+#define rsnd_dai_nr(priv) ((priv)->dai_nr)
+#define for_each_rsnd_dai(rdai, priv, i)               \
+       for (i = 0, (rdai) = rsnd_dai_get(priv, i);     \
+            i < rsnd_dai_nr(priv);                     \
+            i++, (rdai) = rsnd_dai_get(priv, i))
+
+struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id);
+int rsnd_dai_disconnect(struct rsnd_mod *mod);
+int rsnd_dai_connect(struct rsnd_dai *rdai, struct rsnd_mod *mod,
+                    struct rsnd_dai_stream *io);
+int rsnd_dai_is_play(struct rsnd_dai *rdai, struct rsnd_dai_stream *io);
+int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai);
+#define rsnd_dai_get_platform_info(rdai) ((rdai)->info)
+#define rsnd_io_to_runtime(io) ((io)->substream->runtime)
+
+void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int cnt);
+int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional);
+
+/*
+ *     R-Car Gen1/Gen2
+ */
+int rsnd_gen_probe(struct platform_device *pdev,
+                  struct rcar_snd_info *info,
+                  struct rsnd_priv *priv);
+void rsnd_gen_remove(struct platform_device *pdev,
+                    struct rsnd_priv *priv);
+int rsnd_gen_path_init(struct rsnd_priv *priv,
+                      struct rsnd_dai *rdai,
+                      struct rsnd_dai_stream *io);
+int rsnd_gen_path_exit(struct rsnd_priv *priv,
+                      struct rsnd_dai *rdai,
+                      struct rsnd_dai_stream *io);
+void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
+                              struct rsnd_mod *mod,
+                              enum rsnd_reg reg);
+#define rsnd_is_gen1(s)                ((s)->info->flags & RSND_GEN1)
+#define rsnd_is_gen2(s)                ((s)->info->flags & RSND_GEN2)
+
+/*
+ *     R-Car ADG
+ */
+int rsnd_adg_ssi_clk_stop(struct rsnd_mod *mod);
+int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate);
+int rsnd_adg_probe(struct platform_device *pdev,
+                  struct rcar_snd_info *info,
+                  struct rsnd_priv *priv);
+void rsnd_adg_remove(struct platform_device *pdev,
+                  struct rsnd_priv *priv);
+
+/*
+ *     R-Car sound priv
+ */
+struct rsnd_priv {
+
+       struct device *dev;
+       struct rcar_snd_info *info;
+       spinlock_t lock;
+
+       /*
+        * below value will be filled on rsnd_gen_probe()
+        */
+       void *gen;
+
+       /*
+        * below value will be filled on rsnd_scu_probe()
+        */
+       void *scu;
+       int scu_nr;
+
+       /*
+        * below value will be filled on rsnd_adg_probe()
+        */
+       void *adg;
+
+       /*
+        * below value will be filled on rsnd_ssi_probe()
+        */
+       void *ssiu;
+
+       /*
+        * below value will be filled on rsnd_dai_probe()
+        */
+       struct snd_soc_dai_driver *daidrv;
+       struct rsnd_dai *rdai;
+       int dai_nr;
+};
+
+#define rsnd_priv_to_dev(priv) ((priv)->dev)
+#define rsnd_lock(priv, flags) spin_lock_irqsave(&priv->lock, flags)
+#define rsnd_unlock(priv, flags) spin_unlock_irqrestore(&priv->lock, flags)
+
+/*
+ *     R-Car SCU
+ */
+int rsnd_scu_probe(struct platform_device *pdev,
+                  struct rcar_snd_info *info,
+                  struct rsnd_priv *priv);
+void rsnd_scu_remove(struct platform_device *pdev,
+                    struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_scu_mod_get(struct rsnd_priv *priv, int id);
+#define rsnd_scu_nr(priv) ((priv)->scu_nr)
+
+/*
+ *     R-Car SSI
+ */
+int rsnd_ssi_probe(struct platform_device *pdev,
+                  struct rcar_snd_info *info,
+                  struct rsnd_priv *priv);
+void rsnd_ssi_remove(struct platform_device *pdev,
+                  struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
+struct rsnd_mod *rsnd_ssi_mod_get_frm_dai(struct rsnd_priv *priv,
+                                         int dai_id, int is_play);
+
+#endif
diff --git a/sound/soc/sh/rcar/scu.c b/sound/soc/sh/rcar/scu.c
new file mode 100644 (file)
index 0000000..184d900
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ * Renesas R-Car SCU support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+struct rsnd_scu {
+       struct rsnd_scu_platform_info *info; /* rcar_snd.h */
+       struct rsnd_mod mod;
+};
+
+#define rsnd_scu_mode_flags(p) ((p)->info->flags)
+
+/*
+ * ADINR
+ */
+#define OTBL_24                (0 << 16)
+#define OTBL_22                (2 << 16)
+#define OTBL_20                (4 << 16)
+#define OTBL_18                (6 << 16)
+#define OTBL_16                (8 << 16)
+
+
+#define rsnd_mod_to_scu(_mod)  \
+       container_of((_mod), struct rsnd_scu, mod)
+
+#define for_each_rsnd_scu(pos, priv, i)                                        \
+       for ((i) = 0;                                                   \
+            ((i) < rsnd_scu_nr(priv)) &&                               \
+                    ((pos) = (struct rsnd_scu *)(priv)->scu + i);      \
+            i++)
+
+static int rsnd_scu_set_route(struct rsnd_priv *priv,
+                             struct rsnd_mod *mod,
+                             struct rsnd_dai *rdai,
+                             struct rsnd_dai_stream *io)
+{
+       struct scu_route_config {
+               u32 mask;
+               int shift;
+       } routes[] = {
+               { 0xF,  0, }, /* 0 */
+               { 0xF,  4, }, /* 1 */
+               { 0xF,  8, }, /* 2 */
+               { 0x7, 12, }, /* 3 */
+               { 0x7, 16, }, /* 4 */
+               { 0x7, 20, }, /* 5 */
+               { 0x7, 24, }, /* 6 */
+               { 0x3, 28, }, /* 7 */
+               { 0x3, 30, }, /* 8 */
+       };
+
+       u32 mask;
+       u32 val;
+       int shift;
+       int id;
+
+       /*
+        * Gen1 only
+        */
+       if (!rsnd_is_gen1(priv))
+               return 0;
+
+       id = rsnd_mod_id(mod);
+       if (id < 0 || id > ARRAY_SIZE(routes))
+               return -EIO;
+
+       /*
+        * SRC_ROUTE_SELECT
+        */
+       val = rsnd_dai_is_play(rdai, io) ? 0x1 : 0x2;
+       val = val               << routes[id].shift;
+       mask = routes[id].mask  << routes[id].shift;
+
+       rsnd_mod_bset(mod, SRC_ROUTE_SEL, mask, val);
+
+       /*
+        * SRC_TIMING_SELECT
+        */
+       shift   = (id % 4) * 8;
+       mask    = 0x1F << shift;
+       if (8 == id) /* SRU8 is very special */
+               val = id << shift;
+       else
+               val = (id + 1) << shift;
+
+       switch (id / 4) {
+       case 0:
+               rsnd_mod_bset(mod, SRC_TMG_SEL0, mask, val);
+               break;
+       case 1:
+               rsnd_mod_bset(mod, SRC_TMG_SEL1, mask, val);
+               break;
+       case 2:
+               rsnd_mod_bset(mod, SRC_TMG_SEL2, mask, val);
+               break;
+       }
+
+       return 0;
+}
+
+static int rsnd_scu_set_mode(struct rsnd_priv *priv,
+                            struct rsnd_mod *mod,
+                            struct rsnd_dai *rdai,
+                            struct rsnd_dai_stream *io)
+{
+       int id = rsnd_mod_id(mod);
+       u32 val;
+
+       if (rsnd_is_gen1(priv)) {
+               val = (1 << id);
+               rsnd_mod_bset(mod, SRC_CTRL, val, val);
+       }
+
+       return 0;
+}
+
+static int rsnd_scu_set_hpbif(struct rsnd_priv *priv,
+                             struct rsnd_mod *mod,
+                             struct rsnd_dai *rdai,
+                             struct rsnd_dai_stream *io)
+{
+       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       u32 adinr = runtime->channels;
+
+       switch (runtime->sample_bits) {
+       case 16:
+               adinr |= OTBL_16;
+               break;
+       case 32:
+               adinr |= OTBL_24;
+               break;
+       default:
+               return -EIO;
+       }
+
+       rsnd_mod_write(mod, BUSIF_MODE, 1);
+       rsnd_mod_write(mod, BUSIF_ADINR, adinr);
+
+       return 0;
+}
+
+static int rsnd_scu_start(struct rsnd_mod *mod,
+                         struct rsnd_dai *rdai,
+                         struct rsnd_dai_stream *io)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct rsnd_scu *scu = rsnd_mod_to_scu(mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       u32 flags = rsnd_scu_mode_flags(scu);
+       int ret;
+
+       /*
+        * SCU will be used if it has RSND_SCU_USB_HPBIF flags
+        */
+       if (!(flags & RSND_SCU_USB_HPBIF)) {
+               /* it use PIO transter */
+               dev_dbg(dev, "%s%d is not used\n",
+                       rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+               return 0;
+       }
+
+       /* it use DMA transter */
+       ret = rsnd_scu_set_route(priv, mod, rdai, io);
+       if (ret < 0)
+               return ret;
+
+       ret = rsnd_scu_set_mode(priv, mod, rdai, io);
+       if (ret < 0)
+               return ret;
+
+       ret = rsnd_scu_set_hpbif(priv, mod, rdai, io);
+       if (ret < 0)
+               return ret;
+
+       dev_dbg(dev, "%s%d start\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+       return 0;
+}
+
+static struct rsnd_mod_ops rsnd_scu_ops = {
+       .name   = "scu",
+       .start  = rsnd_scu_start,
+};
+
+struct rsnd_mod *rsnd_scu_mod_get(struct rsnd_priv *priv, int id)
+{
+       BUG_ON(id < 0 || id >= rsnd_scu_nr(priv));
+
+       return &((struct rsnd_scu *)(priv->scu) + id)->mod;
+}
+
+int rsnd_scu_probe(struct platform_device *pdev,
+                  struct rcar_snd_info *info,
+                  struct rsnd_priv *priv)
+{
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct rsnd_scu *scu;
+       int i, nr;
+
+       /*
+        * init SCU
+        */
+       nr      = info->scu_info_nr;
+       scu     = devm_kzalloc(dev, sizeof(*scu) * nr, GFP_KERNEL);
+       if (!scu) {
+               dev_err(dev, "SCU allocate failed\n");
+               return -ENOMEM;
+       }
+
+       priv->scu_nr    = nr;
+       priv->scu       = scu;
+
+       for_each_rsnd_scu(scu, priv, i) {
+               rsnd_mod_init(priv, &scu->mod,
+                             &rsnd_scu_ops, i);
+               scu->info = &info->scu_info[i];
+
+               dev_dbg(dev, "SCU%d probed\n", i);
+       }
+       dev_dbg(dev, "scu probed\n");
+
+       return 0;
+}
+
+void rsnd_scu_remove(struct platform_device *pdev,
+                    struct rsnd_priv *priv)
+{
+}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
new file mode 100644 (file)
index 0000000..fae26d3
--- /dev/null
@@ -0,0 +1,728 @@
+/*
+ * Renesas R-Car SSIU/SSI support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * Based on fsi.c
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include "rsnd.h"
+#define RSND_SSI_NAME_SIZE 16
+
+/*
+ * SSICR
+ */
+#define        FORCE           (1 << 31)       /* Fixed */
+#define        DMEN            (1 << 28)       /* DMA Enable */
+#define        UIEN            (1 << 27)       /* Underflow Interrupt Enable */
+#define        OIEN            (1 << 26)       /* Overflow Interrupt Enable */
+#define        IIEN            (1 << 25)       /* Idle Mode Interrupt Enable */
+#define        DIEN            (1 << 24)       /* Data Interrupt Enable */
+
+#define        DWL_8           (0 << 19)       /* Data Word Length */
+#define        DWL_16          (1 << 19)       /* Data Word Length */
+#define        DWL_18          (2 << 19)       /* Data Word Length */
+#define        DWL_20          (3 << 19)       /* Data Word Length */
+#define        DWL_22          (4 << 19)       /* Data Word Length */
+#define        DWL_24          (5 << 19)       /* Data Word Length */
+#define        DWL_32          (6 << 19)       /* Data Word Length */
+
+#define        SWL_32          (3 << 16)       /* R/W System Word Length */
+#define        SCKD            (1 << 15)       /* Serial Bit Clock Direction */
+#define        SWSD            (1 << 14)       /* Serial WS Direction */
+#define        SCKP            (1 << 13)       /* Serial Bit Clock Polarity */
+#define        SWSP            (1 << 12)       /* Serial WS Polarity */
+#define        SDTA            (1 << 10)       /* Serial Data Alignment */
+#define        DEL             (1 <<  8)       /* Serial Data Delay */
+#define        CKDV(v)         (v <<  4)       /* Serial Clock Division Ratio */
+#define        TRMD            (1 <<  1)       /* Transmit/Receive Mode Select */
+#define        EN              (1 <<  0)       /* SSI Module Enable */
+
+/*
+ * SSISR
+ */
+#define        UIRQ            (1 << 27)       /* Underflow Error Interrupt Status */
+#define        OIRQ            (1 << 26)       /* Overflow Error Interrupt Status */
+#define        IIRQ            (1 << 25)       /* Idle Mode Interrupt Status */
+#define        DIRQ            (1 << 24)       /* Data Interrupt Status Flag */
+
+/*
+ * SSIWSR
+ */
+#define CONT           (1 << 8)        /* WS Continue Function */
+
+struct rsnd_ssi {
+       struct clk *clk;
+       struct rsnd_ssi_platform_info *info; /* rcar_snd.h */
+       struct rsnd_ssi *parent;
+       struct rsnd_mod mod;
+
+       struct rsnd_dai *rdai;
+       struct rsnd_dai_stream *io;
+       u32 cr_own;
+       u32 cr_clk;
+       u32 cr_etc;
+       int err;
+       int dma_offset;
+       unsigned int usrcnt;
+       unsigned int rate;
+};
+
+struct rsnd_ssiu {
+       u32 ssi_mode0;
+       u32 ssi_mode1;
+
+       int ssi_nr;
+       struct rsnd_ssi *ssi;
+};
+
+#define for_each_rsnd_ssi(pos, priv, i)                                        \
+       for (i = 0;                                                     \
+            (i < rsnd_ssi_nr(priv)) &&                                 \
+               ((pos) = ((struct rsnd_ssiu *)((priv)->ssiu))->ssi + i); \
+            i++)
+
+#define rsnd_ssi_nr(priv) (((struct rsnd_ssiu *)((priv)->ssiu))->ssi_nr)
+#define rsnd_mod_to_ssi(_mod) container_of((_mod), struct rsnd_ssi, mod)
+#define rsnd_dma_to_ssi(dma)  rsnd_mod_to_ssi(rsnd_dma_to_mod(dma))
+#define rsnd_ssi_pio_available(ssi) ((ssi)->info->pio_irq > 0)
+#define rsnd_ssi_dma_available(ssi) \
+       rsnd_dma_available(rsnd_mod_to_dma(&(ssi)->mod))
+#define rsnd_ssi_clk_from_parent(ssi) ((ssi)->parent)
+#define rsnd_rdai_is_clk_master(rdai) ((rdai)->clk_master)
+#define rsnd_ssi_mode_flags(p) ((p)->info->flags)
+#define rsnd_ssi_dai_id(ssi) ((ssi)->info->dai_id)
+#define rsnd_ssi_to_ssiu(ssi)\
+       (((struct rsnd_ssiu *)((ssi) - rsnd_mod_id(&(ssi)->mod))) - 1)
+
+static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
+                              struct rsnd_ssiu *ssiu)
+{
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct rsnd_ssi *ssi;
+       u32 flags;
+       u32 val;
+       int i;
+
+       /*
+        * SSI_MODE0
+        */
+       ssiu->ssi_mode0 = 0;
+       for_each_rsnd_ssi(ssi, priv, i) {
+               flags = rsnd_ssi_mode_flags(ssi);
+
+               /* see also BUSIF_MODE */
+               if (!(flags & RSND_SSI_DEPENDENT)) {
+                       ssiu->ssi_mode0 |= (1 << i);
+                       dev_dbg(dev, "SSI%d uses INDEPENDENT mode\n", i);
+               } else {
+                       dev_dbg(dev, "SSI%d uses DEPENDENT mode\n", i);
+               }
+       }
+
+       /*
+        * SSI_MODE1
+        */
+#define ssi_parent_set(p, sync, adg, ext)              \
+       do {                                            \
+               ssi->parent = ssiu->ssi + p;            \
+               if (flags & RSND_SSI_CLK_FROM_ADG)      \
+                       val = adg;                      \
+               else                                    \
+                       val = ext;                      \
+               if (flags & RSND_SSI_SYNC)              \
+                       val |= sync;                    \
+       } while (0)
+
+       ssiu->ssi_mode1 = 0;
+       for_each_rsnd_ssi(ssi, priv, i) {
+               flags = rsnd_ssi_mode_flags(ssi);
+
+               if (!(flags & RSND_SSI_CLK_PIN_SHARE))
+                       continue;
+
+               val = 0;
+               switch (i) {
+               case 1:
+                       ssi_parent_set(0, (1 << 4), (0x2 << 0), (0x1 << 0));
+                       break;
+               case 2:
+                       ssi_parent_set(0, (1 << 4), (0x2 << 2), (0x1 << 2));
+                       break;
+               case 4:
+                       ssi_parent_set(3, (1 << 20), (0x2 << 16), (0x1 << 16));
+                       break;
+               case 8:
+                       ssi_parent_set(7, 0, 0, 0);
+                       break;
+               }
+
+               ssiu->ssi_mode1 |= val;
+       }
+}
+
+static void rsnd_ssi_mode_set(struct rsnd_ssi *ssi)
+{
+       struct rsnd_ssiu *ssiu = rsnd_ssi_to_ssiu(ssi);
+
+       rsnd_mod_write(&ssi->mod, SSI_MODE0, ssiu->ssi_mode0);
+       rsnd_mod_write(&ssi->mod, SSI_MODE1, ssiu->ssi_mode1);
+}
+
+static void rsnd_ssi_status_check(struct rsnd_mod *mod,
+                                 u32 bit)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       u32 status;
+       int i;
+
+       for (i = 0; i < 1024; i++) {
+               status = rsnd_mod_read(mod, SSISR);
+               if (status & bit)
+                       return;
+
+               udelay(50);
+       }
+
+       dev_warn(dev, "status check failed\n");
+}
+
+static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi,
+                                    unsigned int rate)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       int i, j, ret;
+       int adg_clk_div_table[] = {
+               1, 6, /* see adg.c */
+       };
+       int ssi_clk_mul_table[] = {
+               1, 2, 4, 8, 16, 6, 12,
+       };
+       unsigned int main_rate;
+
+       /*
+        * Find best clock, and try to start ADG
+        */
+       for (i = 0; i < ARRAY_SIZE(adg_clk_div_table); i++) {
+               for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
+
+                       /*
+                        * this driver is assuming that
+                        * system word is 64fs (= 2 x 32bit)
+                        * see rsnd_ssi_start()
+                        */
+                       main_rate = rate / adg_clk_div_table[i]
+                               * 32 * 2 * ssi_clk_mul_table[j];
+
+                       ret = rsnd_adg_ssi_clk_try_start(&ssi->mod, main_rate);
+                       if (0 == ret) {
+                               ssi->rate       = rate;
+                               ssi->cr_clk     = FORCE | SWL_32 |
+                                                 SCKD | SWSD | CKDV(j);
+
+                               dev_dbg(dev, "ssi%d outputs %u Hz\n",
+                                       rsnd_mod_id(&ssi->mod), rate);
+
+                               return 0;
+                       }
+               }
+       }
+
+       dev_err(dev, "unsupported clock rate\n");
+       return -EIO;
+}
+
+static void rsnd_ssi_master_clk_stop(struct rsnd_ssi *ssi)
+{
+       ssi->rate = 0;
+       ssi->cr_clk = 0;
+       rsnd_adg_ssi_clk_stop(&ssi->mod);
+}
+
+static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
+                             struct rsnd_dai *rdai,
+                             struct rsnd_dai_stream *io)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       u32 cr;
+
+       if (0 == ssi->usrcnt) {
+               clk_enable(ssi->clk);
+
+               if (rsnd_rdai_is_clk_master(rdai)) {
+                       struct snd_pcm_runtime *runtime;
+
+                       runtime = rsnd_io_to_runtime(io);
+
+                       if (rsnd_ssi_clk_from_parent(ssi))
+                               rsnd_ssi_hw_start(ssi->parent, rdai, io);
+                       else
+                               rsnd_ssi_master_clk_start(ssi, runtime->rate);
+               }
+       }
+
+       cr  =   ssi->cr_own     |
+               ssi->cr_clk     |
+               ssi->cr_etc     |
+               EN;
+
+       rsnd_mod_write(&ssi->mod, SSICR, cr);
+
+       ssi->usrcnt++;
+
+       dev_dbg(dev, "ssi%d hw started\n", rsnd_mod_id(&ssi->mod));
+}
+
+static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
+                            struct rsnd_dai *rdai)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       u32 cr;
+
+       if (0 == ssi->usrcnt) /* stop might be called without start */
+               return;
+
+       ssi->usrcnt--;
+
+       if (0 == ssi->usrcnt) {
+               /*
+                * disable all IRQ,
+                * and, wait all data was sent
+                */
+               cr  =   ssi->cr_own     |
+                       ssi->cr_clk;
+
+               rsnd_mod_write(&ssi->mod, SSICR, cr | EN);
+               rsnd_ssi_status_check(&ssi->mod, DIRQ);
+
+               /*
+                * disable SSI,
+                * and, wait idle state
+                */
+               rsnd_mod_write(&ssi->mod, SSICR, cr);   /* disabled all */
+               rsnd_ssi_status_check(&ssi->mod, IIRQ);
+
+               if (rsnd_rdai_is_clk_master(rdai)) {
+                       if (rsnd_ssi_clk_from_parent(ssi))
+                               rsnd_ssi_hw_stop(ssi->parent, rdai);
+                       else
+                               rsnd_ssi_master_clk_stop(ssi);
+               }
+
+               clk_disable(ssi->clk);
+       }
+
+       dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
+}
+
+/*
+ *     SSI mod common functions
+ */
+static int rsnd_ssi_init(struct rsnd_mod *mod,
+                        struct rsnd_dai *rdai,
+                        struct rsnd_dai_stream *io)
+{
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       u32 cr;
+
+       cr = FORCE;
+
+       /*
+        * always use 32bit system word for easy clock calculation.
+        * see also rsnd_ssi_master_clk_enable()
+        */
+       cr |= SWL_32;
+
+       /*
+        * init clock settings for SSICR
+        */
+       switch (runtime->sample_bits) {
+       case 16:
+               cr |= DWL_16;
+               break;
+       case 32:
+               cr |= DWL_24;
+               break;
+       default:
+               return -EIO;
+       }
+
+       if (rdai->bit_clk_inv)
+               cr |= SCKP;
+       if (rdai->frm_clk_inv)
+               cr |= SWSP;
+       if (rdai->data_alignment)
+               cr |= SDTA;
+       if (rdai->sys_delay)
+               cr |= DEL;
+       if (rsnd_dai_is_play(rdai, io))
+               cr |= TRMD;
+
+       /*
+        * set ssi parameter
+        */
+       ssi->rdai       = rdai;
+       ssi->io         = io;
+       ssi->cr_own     = cr;
+       ssi->err        = -1; /* ignore 1st error */
+
+       rsnd_ssi_mode_set(ssi);
+
+       dev_dbg(dev, "%s.%d init\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+       return 0;
+}
+
+static int rsnd_ssi_quit(struct rsnd_mod *mod,
+                        struct rsnd_dai *rdai,
+                        struct rsnd_dai_stream *io)
+{
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+
+       dev_dbg(dev, "%s.%d quit\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+       if (ssi->err > 0)
+               dev_warn(dev, "ssi under/over flow err = %d\n", ssi->err);
+
+       ssi->rdai       = NULL;
+       ssi->io         = NULL;
+       ssi->cr_own     = 0;
+       ssi->err        = 0;
+
+       return 0;
+}
+
+static void rsnd_ssi_record_error(struct rsnd_ssi *ssi, u32 status)
+{
+       /* under/over flow error */
+       if (status & (UIRQ | OIRQ)) {
+               ssi->err++;
+
+               /* clear error status */
+               rsnd_mod_write(&ssi->mod, SSISR, 0);
+       }
+}
+
+/*
+ *             SSI PIO
+ */
+static irqreturn_t rsnd_ssi_pio_interrupt(int irq, void *data)
+{
+       struct rsnd_ssi *ssi = data;
+       struct rsnd_dai_stream *io = ssi->io;
+       u32 status = rsnd_mod_read(&ssi->mod, SSISR);
+       irqreturn_t ret = IRQ_NONE;
+
+       if (io && (status & DIRQ)) {
+               struct rsnd_dai *rdai = ssi->rdai;
+               struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+               u32 *buf = (u32 *)(runtime->dma_area +
+                                  rsnd_dai_pointer_offset(io, 0));
+
+               rsnd_ssi_record_error(ssi, status);
+
+               /*
+                * 8/16/32 data can be assesse to TDR/RDR register
+                * directly as 32bit data
+                * see rsnd_ssi_init()
+                */
+               if (rsnd_dai_is_play(rdai, io))
+                       rsnd_mod_write(&ssi->mod, SSITDR, *buf);
+               else
+                       *buf = rsnd_mod_read(&ssi->mod, SSIRDR);
+
+               rsnd_dai_pointer_update(io, sizeof(*buf));
+
+               ret = IRQ_HANDLED;
+       }
+
+       return ret;
+}
+
+static int rsnd_ssi_pio_start(struct rsnd_mod *mod,
+                             struct rsnd_dai *rdai,
+                             struct rsnd_dai_stream *io)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+
+       /* enable PIO IRQ */
+       ssi->cr_etc = UIEN | OIEN | DIEN;
+
+       rsnd_ssi_hw_start(ssi, rdai, io);
+
+       dev_dbg(dev, "%s.%d start\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+       return 0;
+}
+
+static int rsnd_ssi_pio_stop(struct rsnd_mod *mod,
+                            struct rsnd_dai *rdai,
+                            struct rsnd_dai_stream *io)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+       dev_dbg(dev, "%s.%d stop\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+       ssi->cr_etc = 0;
+
+       rsnd_ssi_hw_stop(ssi, rdai);
+
+       return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
+       .name   = "ssi (pio)",
+       .init   = rsnd_ssi_init,
+       .quit   = rsnd_ssi_quit,
+       .start  = rsnd_ssi_pio_start,
+       .stop   = rsnd_ssi_pio_stop,
+};
+
+static int rsnd_ssi_dma_inquiry(struct rsnd_dma *dma, dma_addr_t *buf, int *len)
+{
+       struct rsnd_ssi *ssi = rsnd_dma_to_ssi(dma);
+       struct rsnd_dai_stream *io = ssi->io;
+       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+
+       *len = io->byte_per_period;
+       *buf = runtime->dma_addr +
+               rsnd_dai_pointer_offset(io, ssi->dma_offset + *len);
+       ssi->dma_offset = *len; /* it cares A/B plane */
+
+       return 0;
+}
+
+static int rsnd_ssi_dma_complete(struct rsnd_dma *dma)
+{
+       struct rsnd_ssi *ssi = rsnd_dma_to_ssi(dma);
+       struct rsnd_dai_stream *io = ssi->io;
+       u32 status = rsnd_mod_read(&ssi->mod, SSISR);
+
+       rsnd_ssi_record_error(ssi, status);
+
+       rsnd_dai_pointer_update(ssi->io, io->byte_per_period);
+
+       return 0;
+}
+
+static int rsnd_ssi_dma_start(struct rsnd_mod *mod,
+                             struct rsnd_dai *rdai,
+                             struct rsnd_dai_stream *io)
+{
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       struct rsnd_dma *dma = rsnd_mod_to_dma(&ssi->mod);
+
+       /* enable DMA transfer */
+       ssi->cr_etc = DMEN;
+       ssi->dma_offset = 0;
+
+       rsnd_dma_start(dma);
+
+       rsnd_ssi_hw_start(ssi, ssi->rdai, io);
+
+       /* enable WS continue */
+       if (rsnd_rdai_is_clk_master(rdai))
+               rsnd_mod_write(&ssi->mod, SSIWSR, CONT);
+
+       return 0;
+}
+
+static int rsnd_ssi_dma_stop(struct rsnd_mod *mod,
+                            struct rsnd_dai *rdai,
+                            struct rsnd_dai_stream *io)
+{
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       struct rsnd_dma *dma = rsnd_mod_to_dma(&ssi->mod);
+
+       ssi->cr_etc = 0;
+
+       rsnd_ssi_hw_stop(ssi, rdai);
+
+       rsnd_dma_stop(dma);
+
+       return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
+       .name   = "ssi (dma)",
+       .init   = rsnd_ssi_init,
+       .quit   = rsnd_ssi_quit,
+       .start  = rsnd_ssi_dma_start,
+       .stop   = rsnd_ssi_dma_stop,
+};
+
+/*
+ *             Non SSI
+ */
+static int rsnd_ssi_non(struct rsnd_mod *mod,
+                       struct rsnd_dai *rdai,
+                       struct rsnd_dai_stream *io)
+{
+       struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+       struct device *dev = rsnd_priv_to_dev(priv);
+
+       dev_dbg(dev, "%s\n", __func__);
+
+       return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ssi_non_ops = {
+       .name   = "ssi (non)",
+       .init   = rsnd_ssi_non,
+       .quit   = rsnd_ssi_non,
+       .start  = rsnd_ssi_non,
+       .stop   = rsnd_ssi_non,
+};
+
+/*
+ *             ssi mod function
+ */
+struct rsnd_mod *rsnd_ssi_mod_get_frm_dai(struct rsnd_priv *priv,
+                                         int dai_id, int is_play)
+{
+       struct rsnd_ssi *ssi;
+       int i, has_play;
+
+       is_play = !!is_play;
+
+       for_each_rsnd_ssi(ssi, priv, i) {
+               if (rsnd_ssi_dai_id(ssi) != dai_id)
+                       continue;
+
+               has_play = !!(rsnd_ssi_mode_flags(ssi) & RSND_SSI_PLAY);
+
+               if (is_play == has_play)
+                       return &ssi->mod;
+       }
+
+       return NULL;
+}
+
+struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id)
+{
+       BUG_ON(id < 0 || id >= rsnd_ssi_nr(priv));
+
+       return &(((struct rsnd_ssiu *)(priv->ssiu))->ssi + id)->mod;
+}
+
+int rsnd_ssi_probe(struct platform_device *pdev,
+                  struct rcar_snd_info *info,
+                  struct rsnd_priv *priv)
+{
+       struct rsnd_ssi_platform_info *pinfo;
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct rsnd_mod_ops *ops;
+       struct clk *clk;
+       struct rsnd_ssiu *ssiu;
+       struct rsnd_ssi *ssi;
+       char name[RSND_SSI_NAME_SIZE];
+       int i, nr, ret;
+
+       /*
+        *      init SSI
+        */
+       nr      = info->ssi_info_nr;
+       ssiu    = devm_kzalloc(dev, sizeof(*ssiu) + (sizeof(*ssi) * nr),
+                              GFP_KERNEL);
+       if (!ssiu) {
+               dev_err(dev, "SSI allocate failed\n");
+               return -ENOMEM;
+       }
+
+       priv->ssiu      = ssiu;
+       ssiu->ssi       = (struct rsnd_ssi *)(ssiu + 1);
+       ssiu->ssi_nr    = nr;
+
+       for_each_rsnd_ssi(ssi, priv, i) {
+               pinfo = &info->ssi_info[i];
+
+               snprintf(name, RSND_SSI_NAME_SIZE, "ssi.%d", i);
+
+               clk = clk_get(dev, name);
+               if (IS_ERR(clk))
+                       return PTR_ERR(clk);
+
+               ssi->info       = pinfo;
+               ssi->clk        = clk;
+
+               ops = &rsnd_ssi_non_ops;
+
+               /*
+                * SSI DMA case
+                */
+               if (pinfo->dma_id > 0) {
+                       ret = rsnd_dma_init(
+                               priv, rsnd_mod_to_dma(&ssi->mod),
+                               (rsnd_ssi_mode_flags(ssi) & RSND_SSI_PLAY),
+                               pinfo->dma_id,
+                               rsnd_ssi_dma_inquiry,
+                               rsnd_ssi_dma_complete);
+                       if (ret < 0)
+                               dev_info(dev, "SSI DMA failed. try PIO transter\n");
+                       else
+                               ops     = &rsnd_ssi_dma_ops;
+
+                       dev_dbg(dev, "SSI%d use DMA transfer\n", i);
+               }
+
+               /*
+                * SSI PIO case
+                */
+               if (!rsnd_ssi_dma_available(ssi) &&
+                    rsnd_ssi_pio_available(ssi)) {
+                       ret = devm_request_irq(dev, pinfo->pio_irq,
+                                              &rsnd_ssi_pio_interrupt,
+                                              IRQF_SHARED,
+                                              dev_name(dev), ssi);
+                       if (ret) {
+                               dev_err(dev, "SSI request interrupt failed\n");
+                               return ret;
+                       }
+
+                       ops     = &rsnd_ssi_pio_ops;
+
+                       dev_dbg(dev, "SSI%d use PIO transfer\n", i);
+               }
+
+               rsnd_mod_init(priv, &ssi->mod, ops, i);
+       }
+
+       rsnd_ssi_mode_init(priv, ssiu);
+
+       dev_dbg(dev, "ssi probed\n");
+
+       return 0;
+}
+
+void rsnd_ssi_remove(struct platform_device *pdev,
+                  struct rsnd_priv *priv)
+{
+       struct rsnd_ssi *ssi;
+       int i;
+
+       for_each_rsnd_ssi(ssi, priv, i) {
+               clk_put(ssi->clk);
+               if (rsnd_ssi_dma_available(ssi))
+                       rsnd_dma_quit(priv, rsnd_mod_to_dma(&ssi->mod));
+       }
+
+}
index 06a8000aa07bedd1c47beb401d26e9052512fc54..53c9ecdd119f14fa7a33832d11a88746e257307a 100644 (file)
@@ -149,8 +149,9 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
                                        SND_SOC_DAPM_STREAM_STOP);
                } else {
                        rtd->pop_wait = 1;
-                       schedule_delayed_work(&rtd->delayed_work,
-                               msecs_to_jiffies(rtd->pmdown_time));
+                       queue_delayed_work(system_power_efficient_wq,
+                                          &rtd->delayed_work,
+                                          msecs_to_jiffies(rtd->pmdown_time));
                }
        } else {
                /* capture streams can be powered down now */
@@ -334,7 +335,7 @@ static int soc_compr_copy(struct snd_compr_stream *cstream,
        return ret;
 }
 
-static int sst_compr_set_metadata(struct snd_compr_stream *cstream,
+static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
                                struct snd_compr_metadata *metadata)
 {
        struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -347,7 +348,7 @@ static int sst_compr_set_metadata(struct snd_compr_stream *cstream,
        return ret;
 }
 
-static int sst_compr_get_metadata(struct snd_compr_stream *cstream,
+static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
                                struct snd_compr_metadata *metadata)
 {
        struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -364,8 +365,8 @@ static struct snd_compr_ops soc_compr_ops = {
        .open           = soc_compr_open,
        .free           = soc_compr_free,
        .set_params     = soc_compr_set_params,
-       .set_metadata   = sst_compr_set_metadata,
-       .get_metadata   = sst_compr_get_metadata,
+       .set_metadata   = soc_compr_set_metadata,
+       .get_metadata   = soc_compr_get_metadata,
        .get_params     = soc_compr_get_params,
        .trigger        = soc_compr_trigger,
        .pointer        = soc_compr_pointer,
index d82ee386eab564d352eccd4209dff767b222e6f3..5471940dc0f7546d2ff0f263742d404f7f7ec780 100644 (file)
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
 #include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/ctype.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <sound/ac97_codec.h>
 #include <sound/core.h>
 #include <sound/jack.h>
@@ -47,8 +50,6 @@
 
 #define NAME_SIZE      32
 
-static DECLARE_WAIT_QUEUE_HEAD(soc_pm_waitq);
-
 #ifdef CONFIG_DEBUG_FS
 struct dentry *snd_soc_debugfs_root;
 EXPORT_SYMBOL_GPL(snd_soc_debugfs_root);
@@ -69,6 +70,16 @@ static int pmdown_time = 5000;
 module_param(pmdown_time, int, 0);
 MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
 
+struct snd_ac97_reset_cfg {
+       struct pinctrl *pctl;
+       struct pinctrl_state *pstate_reset;
+       struct pinctrl_state *pstate_warm_reset;
+       struct pinctrl_state *pstate_run;
+       int gpio_sdata;
+       int gpio_sync;
+       int gpio_reset;
+};
+
 /* returns the minimum number of bytes needed to represent
  * a particular given value */
 static int min_bytes_needed(unsigned long val)
@@ -192,7 +203,7 @@ static ssize_t pmdown_time_set(struct device *dev,
        struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
        int ret;
 
-       ret = strict_strtol(buf, 10, &rtd->pmdown_time);
+       ret = kstrtol(buf, 10, &rtd->pmdown_time);
        if (ret)
                return ret;
 
@@ -237,6 +248,7 @@ static ssize_t codec_reg_write_file(struct file *file,
        char *start = buf;
        unsigned long reg, value;
        struct snd_soc_codec *codec = file->private_data;
+       int ret;
 
        buf_size = min(count, (sizeof(buf)-1));
        if (copy_from_user(buf, user_buf, buf_size))
@@ -248,8 +260,9 @@ static ssize_t codec_reg_write_file(struct file *file,
        reg = simple_strtoul(start, &start, 16);
        while (*start == ' ')
                start++;
-       if (strict_strtoul(start, 16, &value))
-               return -EINVAL;
+       ret = kstrtoul(start, 16, &value);
+       if (ret)
+               return ret;
 
        /* Userspace has been fiddling around behind the kernel's back */
        add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
@@ -530,6 +543,15 @@ static int soc_ac97_dev_register(struct snd_soc_codec *codec)
 }
 #endif
 
+static void codec2codec_close_delayed_work(struct work_struct *work)
+{
+       /* Currently nothing to do for c2c links
+        * Since c2c links are internal nodes in the DAPM graph and
+        * don't interface with the outside world or application layer
+        * we don't have to do any special handling on close.
+        */
+}
+
 #ifdef CONFIG_PM_SLEEP
 /* powers down audio subsystem for suspend */
 int snd_soc_suspend(struct device *dev)
@@ -1428,6 +1450,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
                                return ret;
                        }
                } else {
+                       INIT_DELAYED_WORK(&rtd->delayed_work,
+                                               codec2codec_close_delayed_work);
+
                        /* link the DAI widgets */
                        play_w = codec_dai->playback_widget;
                        capture_w = cpu_dai->capture_widget;
@@ -2080,6 +2105,117 @@ int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
 }
 EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
 
+static struct snd_ac97_reset_cfg snd_ac97_rst_cfg;
+
+static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97)
+{
+       struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
+
+       pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset);
+
+       gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 1);
+
+       udelay(10);
+
+       gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
+
+       pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
+       msleep(2);
+}
+
+static void snd_soc_ac97_reset(struct snd_ac97 *ac97)
+{
+       struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
+
+       pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset);
+
+       gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
+       gpio_direction_output(snd_ac97_rst_cfg.gpio_sdata, 0);
+       gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 0);
+
+       udelay(10);
+
+       gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 1);
+
+       pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
+       msleep(2);
+}
+
+static int snd_soc_ac97_parse_pinctl(struct device *dev,
+               struct snd_ac97_reset_cfg *cfg)
+{
+       struct pinctrl *p;
+       struct pinctrl_state *state;
+       int gpio;
+       int ret;
+
+       p = devm_pinctrl_get(dev);
+       if (IS_ERR(p)) {
+               dev_err(dev, "Failed to get pinctrl\n");
+               return PTR_RET(p);
+       }
+       cfg->pctl = p;
+
+       state = pinctrl_lookup_state(p, "ac97-reset");
+       if (IS_ERR(state)) {
+               dev_err(dev, "Can't find pinctrl state ac97-reset\n");
+               return PTR_RET(state);
+       }
+       cfg->pstate_reset = state;
+
+       state = pinctrl_lookup_state(p, "ac97-warm-reset");
+       if (IS_ERR(state)) {
+               dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n");
+               return PTR_RET(state);
+       }
+       cfg->pstate_warm_reset = state;
+
+       state = pinctrl_lookup_state(p, "ac97-running");
+       if (IS_ERR(state)) {
+               dev_err(dev, "Can't find pinctrl state ac97-running\n");
+               return PTR_RET(state);
+       }
+       cfg->pstate_run = state;
+
+       gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 0);
+       if (gpio < 0) {
+               dev_err(dev, "Can't find ac97-sync gpio\n");
+               return gpio;
+       }
+       ret = devm_gpio_request(dev, gpio, "AC97 link sync");
+       if (ret) {
+               dev_err(dev, "Failed requesting ac97-sync gpio\n");
+               return ret;
+       }
+       cfg->gpio_sync = gpio;
+
+       gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 1);
+       if (gpio < 0) {
+               dev_err(dev, "Can't find ac97-sdata gpio %d\n", gpio);
+               return gpio;
+       }
+       ret = devm_gpio_request(dev, gpio, "AC97 link sdata");
+       if (ret) {
+               dev_err(dev, "Failed requesting ac97-sdata gpio\n");
+               return ret;
+       }
+       cfg->gpio_sdata = gpio;
+
+       gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 2);
+       if (gpio < 0) {
+               dev_err(dev, "Can't find ac97-reset gpio\n");
+               return gpio;
+       }
+       ret = devm_gpio_request(dev, gpio, "AC97 link reset");
+       if (ret) {
+               dev_err(dev, "Failed requesting ac97-reset gpio\n");
+               return ret;
+       }
+       cfg->gpio_reset = gpio;
+
+       return 0;
+}
+
 struct snd_ac97_bus_ops *soc_ac97_ops;
 EXPORT_SYMBOL_GPL(soc_ac97_ops);
 
@@ -2097,6 +2233,35 @@ int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
 }
 EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops);
 
+/**
+ * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions
+ *
+ * This function sets the reset and warm_reset properties of ops and parses
+ * the device node of pdev to get pinctrl states and gpio numbers to use.
+ */
+int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
+               struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct snd_ac97_reset_cfg cfg;
+       int ret;
+
+       ret = snd_soc_ac97_parse_pinctl(dev, &cfg);
+       if (ret)
+               return ret;
+
+       ret = snd_soc_set_ac97_ops(ops);
+       if (ret)
+               return ret;
+
+       ops->warm_reset = snd_soc_ac97_warm_reset;
+       ops->reset = snd_soc_ac97_reset;
+
+       snd_ac97_rst_cfg = cfg;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset);
+
 /**
  * snd_soc_free_ac97_codec - free AC97 codec device
  * @codec: audio codec
@@ -2299,6 +2464,22 @@ static int snd_soc_add_controls(struct snd_card *card, struct device *dev,
        return 0;
 }
 
+struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+                                              const char *name)
+{
+       struct snd_card *card = soc_card->snd_card;
+       struct snd_kcontrol *kctl;
+
+       if (unlikely(!name))
+               return NULL;
+
+       list_for_each_entry(kctl, &card->controls, list)
+               if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name)))
+                       return kctl;
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol);
+
 /**
  * snd_soc_add_codec_controls - add an array of controls to a codec.
  * Convenience function to add a list of controls. Many codecs were
@@ -2540,59 +2721,6 @@ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
 }
 EXPORT_SYMBOL_GPL(snd_soc_put_value_enum_double);
 
-/**
- * snd_soc_info_enum_ext - external enumerated single mixer info callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information about an external enumerated
- * single mixer.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol,
-       struct snd_ctl_elem_info *uinfo)
-{
-       struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
-
-       uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-       uinfo->count = 1;
-       uinfo->value.enumerated.items = e->max;
-
-       if (uinfo->value.enumerated.item > e->max - 1)
-               uinfo->value.enumerated.item = e->max - 1;
-       strcpy(uinfo->value.enumerated.name,
-               e->texts[uinfo->value.enumerated.item]);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_enum_ext);
-
-/**
- * snd_soc_info_volsw_ext - external single mixer info callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information about a single external mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_volsw_ext(struct snd_kcontrol *kcontrol,
-       struct snd_ctl_elem_info *uinfo)
-{
-       int max = kcontrol->private_value;
-
-       if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
-               uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
-       else
-               uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-
-       uinfo->count = 1;
-       uinfo->value.integer.min = 0;
-       uinfo->value.integer.max = max;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_volsw_ext);
-
 /**
  * snd_soc_info_volsw - single mixer info callback
  * @kcontrol: mixer control
index 4375c9f2b791006717f807cdddcb343bb0f8074c..d84bd0f167b684e69bd93ef9c3205e93eca39d99 100644 (file)
 
 #define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++;
 
+static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
+       struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
+       const char *control,
+       int (*connected)(struct snd_soc_dapm_widget *source,
+                        struct snd_soc_dapm_widget *sink));
+static struct snd_soc_dapm_widget *
+snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
+                        const struct snd_soc_dapm_widget *widget);
+
 /* dapm power sequences - make this per codec in the future */
 static int dapm_up_seq[] = {
        [snd_soc_dapm_pre] = 0,
@@ -73,16 +82,18 @@ static int dapm_up_seq[] = {
        [snd_soc_dapm_hp] = 10,
        [snd_soc_dapm_spk] = 10,
        [snd_soc_dapm_line] = 10,
-       [snd_soc_dapm_post] = 11,
+       [snd_soc_dapm_kcontrol] = 11,
+       [snd_soc_dapm_post] = 12,
 };
 
 static int dapm_down_seq[] = {
        [snd_soc_dapm_pre] = 0,
-       [snd_soc_dapm_adc] = 1,
-       [snd_soc_dapm_hp] = 2,
-       [snd_soc_dapm_spk] = 2,
-       [snd_soc_dapm_line] = 2,
-       [snd_soc_dapm_out_drv] = 2,
+       [snd_soc_dapm_kcontrol] = 1,
+       [snd_soc_dapm_adc] = 2,
+       [snd_soc_dapm_hp] = 3,
+       [snd_soc_dapm_spk] = 3,
+       [snd_soc_dapm_line] = 3,
+       [snd_soc_dapm_out_drv] = 3,
        [snd_soc_dapm_pga] = 4,
        [snd_soc_dapm_switch] = 5,
        [snd_soc_dapm_mixer_named_ctl] = 5,
@@ -174,36 +185,176 @@ static inline struct snd_soc_dapm_widget *dapm_cnew_widget(
        return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
 }
 
-/* get snd_card from DAPM context */
-static inline struct snd_card *dapm_get_snd_card(
-       struct snd_soc_dapm_context *dapm)
+struct dapm_kcontrol_data {
+       unsigned int value;
+       struct snd_soc_dapm_widget *widget;
+       struct list_head paths;
+       struct snd_soc_dapm_widget_list *wlist;
+};
+
+static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
+       struct snd_kcontrol *kcontrol)
 {
-       if (dapm->codec)
-               return dapm->codec->card->snd_card;
-       else if (dapm->platform)
-               return dapm->platform->card->snd_card;
-       else
-               BUG();
+       struct dapm_kcontrol_data *data;
+       struct soc_mixer_control *mc;
 
-       /* unreachable */
-       return NULL;
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data) {
+               dev_err(widget->dapm->dev,
+                               "ASoC: can't allocate kcontrol data for %s\n",
+                               widget->name);
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&data->paths);
+
+       switch (widget->id) {
+       case snd_soc_dapm_switch:
+       case snd_soc_dapm_mixer:
+       case snd_soc_dapm_mixer_named_ctl:
+               mc = (struct soc_mixer_control *)kcontrol->private_value;
+
+               if (mc->autodisable) {
+                       struct snd_soc_dapm_widget template;
+
+                       memset(&template, 0, sizeof(template));
+                       template.reg = mc->reg;
+                       template.mask = (1 << fls(mc->max)) - 1;
+                       template.shift = mc->shift;
+                       if (mc->invert)
+                               template.off_val = mc->max;
+                       else
+                               template.off_val = 0;
+                       template.on_val = template.off_val;
+                       template.id = snd_soc_dapm_kcontrol;
+                       template.name = kcontrol->id.name;
+
+                       data->widget = snd_soc_dapm_new_control(widget->dapm,
+                               &template);
+                       if (!data->widget) {
+                               kfree(data);
+                               return -ENOMEM;
+                       }
+               }
+               break;
+       default:
+               break;
+       }
+
+       kcontrol->private_data = data;
+
+       return 0;
 }
 
-/* get soc_card from DAPM context */
-static inline struct snd_soc_card *dapm_get_soc_card(
-               struct snd_soc_dapm_context *dapm)
+static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
 {
-       if (dapm->codec)
-               return dapm->codec->card;
-       else if (dapm->platform)
-               return dapm->platform->card;
+       struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
+       kfree(data->widget);
+       kfree(data->wlist);
+       kfree(data);
+}
+
+static struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
+       const struct snd_kcontrol *kcontrol)
+{
+       struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+       return data->wlist;
+}
+
+static int dapm_kcontrol_add_widget(struct snd_kcontrol *kcontrol,
+       struct snd_soc_dapm_widget *widget)
+{
+       struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+       struct snd_soc_dapm_widget_list *new_wlist;
+       unsigned int n;
+
+       if (data->wlist)
+               n = data->wlist->num_widgets + 1;
        else
-               BUG();
+               n = 1;
 
-       /* unreachable */
-       return NULL;
+       new_wlist = krealloc(data->wlist,
+                       sizeof(*new_wlist) + sizeof(widget) * n, GFP_KERNEL);
+       if (!new_wlist)
+               return -ENOMEM;
+
+       new_wlist->widgets[n - 1] = widget;
+       new_wlist->num_widgets = n;
+
+       data->wlist = new_wlist;
+
+       return 0;
+}
+
+static void dapm_kcontrol_add_path(const struct snd_kcontrol *kcontrol,
+       struct snd_soc_dapm_path *path)
+{
+       struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+       list_add_tail(&path->list_kcontrol, &data->paths);
+
+       if (data->widget) {
+               snd_soc_dapm_add_path(data->widget->dapm, data->widget,
+                   path->source, NULL, NULL);
+       }
+}
+
+static bool dapm_kcontrol_is_powered(const struct snd_kcontrol *kcontrol)
+{
+       struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+       if (!data->widget)
+               return true;
+
+       return data->widget->power;
+}
+
+static struct list_head *dapm_kcontrol_get_path_list(
+       const struct snd_kcontrol *kcontrol)
+{
+       struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+       return &data->paths;
+}
+
+#define dapm_kcontrol_for_each_path(path, kcontrol) \
+       list_for_each_entry(path, dapm_kcontrol_get_path_list(kcontrol), \
+               list_kcontrol)
+
+static unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol)
+{
+       struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+       return data->value;
+}
+
+static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol,
+       unsigned int value)
+{
+       struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+       if (data->value == value)
+               return false;
+
+       if (data->widget)
+               data->widget->on_val = value;
+
+       data->value = value;
+
+       return true;
 }
 
+/**
+ * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
+ * @kcontrol: The kcontrol
+ */
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol)
+{
+       return dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->codec;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_codec);
+
 static void dapm_reset(struct snd_soc_card *card)
 {
        struct snd_soc_dapm_widget *w;
@@ -211,6 +362,7 @@ static void dapm_reset(struct snd_soc_card *card)
        memset(&card->dapm_stats, 0, sizeof(card->dapm_stats));
 
        list_for_each_entry(w, &card->widgets, list) {
+               w->new_power = w->power;
                w->power_checked = false;
                w->inputs = -1;
                w->outputs = -1;
@@ -428,6 +580,7 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
        case snd_soc_dapm_spk:
        case snd_soc_dapm_line:
        case snd_soc_dapm_dai_link:
+       case snd_soc_dapm_kcontrol:
                p->connect = 1;
        break;
        /* does affect routing - dynamically connected */
@@ -507,17 +660,12 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
        return 0;
 }
 
-static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
-{
-       kfree(kctl->private_data);
-}
-
 /*
  * Determine if a kcontrol is shared. If it is, look it up. If it isn't,
  * create it. Either way, add the widget into the control's widget list
  */
 static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
-       int kci, struct snd_soc_dapm_path *path)
+       int kci)
 {
        struct snd_soc_dapm_context *dapm = w->dapm;
        struct snd_card *card = dapm->card->snd_card;
@@ -525,9 +673,6 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
        size_t prefix_len;
        int shared;
        struct snd_kcontrol *kcontrol;
-       struct snd_soc_dapm_widget_list *wlist;
-       int wlistentries;
-       size_t wlistsize;
        bool wname_in_long_name, kcname_in_long_name;
        char *long_name;
        const char *name;
@@ -546,25 +691,6 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
        shared = dapm_is_shared_kcontrol(dapm, w, &w->kcontrol_news[kci],
                                         &kcontrol);
 
-       if (kcontrol) {
-               wlist = kcontrol->private_data;
-               wlistentries = wlist->num_widgets + 1;
-       } else {
-               wlist = NULL;
-               wlistentries = 1;
-       }
-
-       wlistsize = sizeof(struct snd_soc_dapm_widget_list) +
-                       wlistentries * sizeof(struct snd_soc_dapm_widget *);
-       wlist = krealloc(wlist, wlistsize, GFP_KERNEL);
-       if (wlist == NULL) {
-               dev_err(dapm->dev, "ASoC: can't allocate widget list for %s\n",
-                       w->name);
-               return -ENOMEM;
-       }
-       wlist->num_widgets = wlistentries;
-       wlist->widgets[wlistentries - 1] = w;
-
        if (!kcontrol) {
                if (shared) {
                        wname_in_long_name = false;
@@ -587,7 +713,6 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
                                kcname_in_long_name = false;
                                break;
                        default:
-                               kfree(wlist);
                                return -EINVAL;
                        }
                }
@@ -602,10 +727,8 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
                        long_name = kasprintf(GFP_KERNEL, "%s %s",
                                 w->name + prefix_len,
                                 w->kcontrol_news[kci].name);
-                       if (long_name == NULL) {
-                               kfree(wlist);
+                       if (long_name == NULL)
                                return -ENOMEM;
-                       }
 
                        name = long_name;
                } else if (wname_in_long_name) {
@@ -616,23 +739,33 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
                        name = w->kcontrol_news[kci].name;
                }
 
-               kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], wlist, name,
+               kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name,
                                        prefix);
-               kcontrol->private_free = dapm_kcontrol_free;
                kfree(long_name);
+               if (!kcontrol)
+                       return -ENOMEM;
+               kcontrol->private_free = dapm_kcontrol_free;
+
+               ret = dapm_kcontrol_data_alloc(w, kcontrol);
+               if (ret) {
+                       snd_ctl_free_one(kcontrol);
+                       return ret;
+               }
+
                ret = snd_ctl_add(card, kcontrol);
                if (ret < 0) {
                        dev_err(dapm->dev,
                                "ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
                                w->name, name, ret);
-                       kfree(wlist);
                        return ret;
                }
        }
 
-       kcontrol->private_data = wlist;
+       ret = dapm_kcontrol_add_widget(kcontrol, w);
+       if (ret)
+               return ret;
+
        w->kcontrols[kci] = kcontrol;
-       path->kcontrol = kcontrol;
 
        return 0;
 }
@@ -652,13 +785,15 @@ static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
                                continue;
 
                        if (w->kcontrols[i]) {
-                               path->kcontrol = w->kcontrols[i];
+                               dapm_kcontrol_add_path(w->kcontrols[i], path);
                                continue;
                        }
 
-                       ret = dapm_create_or_share_mixmux_kcontrol(w, i, path);
+                       ret = dapm_create_or_share_mixmux_kcontrol(w, i);
                        if (ret < 0)
                                return ret;
+
+                       dapm_kcontrol_add_path(w->kcontrols[i], path);
                }
        }
 
@@ -684,15 +819,12 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
                return -EINVAL;
        }
 
-       path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
-                               list_sink);
-
-       ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
+       ret = dapm_create_or_share_mixmux_kcontrol(w, 0);
        if (ret < 0)
                return ret;
 
        list_for_each_entry(path, &w->sources, list_sink)
-               path->kcontrol = w->kcontrols[0];
+               dapm_kcontrol_add_path(w->kcontrols[0], path);
 
        return 0;
 }
@@ -813,6 +945,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
        case snd_soc_dapm_supply:
        case snd_soc_dapm_regulator_supply:
        case snd_soc_dapm_clock_supply:
+       case snd_soc_dapm_kcontrol:
                return 0;
        default:
                break;
@@ -908,6 +1041,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
        case snd_soc_dapm_supply:
        case snd_soc_dapm_regulator_supply:
        case snd_soc_dapm_clock_supply:
+       case snd_soc_dapm_kcontrol:
                return 0;
        default:
                break;
@@ -1062,7 +1196,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
        int ret;
 
        if (SND_SOC_DAPM_EVENT_ON(event)) {
-               if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
+               if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
                        ret = regulator_allow_bypass(w->regulator, false);
                        if (ret != 0)
                                dev_warn(w->dapm->dev,
@@ -1072,7 +1206,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
 
                return regulator_enable(w->regulator);
        } else {
-               if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
+               if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
                        ret = regulator_allow_bypass(w->regulator, true);
                        if (ret != 0)
                                dev_warn(w->dapm->dev,
@@ -1244,10 +1378,9 @@ static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget,
        list_add_tail(&new_widget->power_list, list);
 }
 
-static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm,
+static void dapm_seq_check_event(struct snd_soc_card *card,
                                 struct snd_soc_dapm_widget *w, int event)
 {
-       struct snd_soc_card *card = dapm->card;
        const char *ev_name;
        int power, ret;
 
@@ -1281,55 +1414,50 @@ static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm,
                return;
        }
 
-       if (w->power != power)
+       if (w->new_power != power)
                return;
 
        if (w->event && (w->event_flags & event)) {
-               pop_dbg(dapm->dev, card->pop_time, "pop test : %s %s\n",
+               pop_dbg(w->dapm->dev, card->pop_time, "pop test : %s %s\n",
                        w->name, ev_name);
                trace_snd_soc_dapm_widget_event_start(w, event);
                ret = w->event(w, NULL, event);
                trace_snd_soc_dapm_widget_event_done(w, event);
                if (ret < 0)
-                       dev_err(dapm->dev, "ASoC: %s: %s event failed: %d\n",
+                       dev_err(w->dapm->dev, "ASoC: %s: %s event failed: %d\n",
                               ev_name, w->name, ret);
        }
 }
 
 /* Apply the coalesced changes from a DAPM sequence */
-static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
+static void dapm_seq_run_coalesced(struct snd_soc_card *card,
                                   struct list_head *pending)
 {
-       struct snd_soc_card *card = dapm->card;
        struct snd_soc_dapm_widget *w;
-       int reg, power;
+       int reg;
        unsigned int value = 0;
        unsigned int mask = 0;
-       unsigned int cur_mask;
 
        reg = list_first_entry(pending, struct snd_soc_dapm_widget,
                               power_list)->reg;
 
        list_for_each_entry(w, pending, power_list) {
-               cur_mask = 1 << w->shift;
                BUG_ON(reg != w->reg);
+               w->power = w->new_power;
 
-               if (w->invert)
-                       power = !w->power;
+               mask |= w->mask << w->shift;
+               if (w->power)
+                       value |= w->on_val << w->shift;
                else
-                       power = w->power;
+                       value |= w->off_val << w->shift;
 
-               mask |= cur_mask;
-               if (power)
-                       value |= cur_mask;
-
-               pop_dbg(dapm->dev, card->pop_time,
+               pop_dbg(w->dapm->dev, card->pop_time,
                        "pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n",
                        w->name, reg, value, mask);
 
                /* Check for events */
-               dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMU);
-               dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMD);
+               dapm_seq_check_event(card, w, SND_SOC_DAPM_PRE_PMU);
+               dapm_seq_check_event(card, w, SND_SOC_DAPM_PRE_PMD);
        }
 
        if (reg >= 0) {
@@ -1339,7 +1467,7 @@ static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
                w = list_first_entry(pending, struct snd_soc_dapm_widget,
                                     power_list);
 
-               pop_dbg(dapm->dev, card->pop_time,
+               pop_dbg(w->dapm->dev, card->pop_time,
                        "pop test : Applying 0x%x/0x%x to %x in %dms\n",
                        value, mask, reg, card->pop_time);
                pop_wait(card->pop_time);
@@ -1347,8 +1475,8 @@ static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
        }
 
        list_for_each_entry(w, pending, power_list) {
-               dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMU);
-               dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMD);
+               dapm_seq_check_event(card, w, SND_SOC_DAPM_POST_PMU);
+               dapm_seq_check_event(card, w, SND_SOC_DAPM_POST_PMD);
        }
 }
 
@@ -1360,8 +1488,8 @@ static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
  * Currently anything that requires more than a single write is not
  * handled.
  */
-static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
-                        struct list_head *list, int event, bool power_up)
+static void dapm_seq_run(struct snd_soc_card *card,
+       struct list_head *list, int event, bool power_up)
 {
        struct snd_soc_dapm_widget *w, *n;
        LIST_HEAD(pending);
@@ -1384,7 +1512,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
                if (sort[w->id] != cur_sort || w->reg != cur_reg ||
                    w->dapm != cur_dapm || w->subseq != cur_subseq) {
                        if (!list_empty(&pending))
-                               dapm_seq_run_coalesced(cur_dapm, &pending);
+                               dapm_seq_run_coalesced(card, &pending);
 
                        if (cur_dapm && cur_dapm->seq_notifier) {
                                for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++)
@@ -1444,7 +1572,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
        }
 
        if (!list_empty(&pending))
-               dapm_seq_run_coalesced(cur_dapm, &pending);
+               dapm_seq_run_coalesced(card, &pending);
 
        if (cur_dapm && cur_dapm->seq_notifier) {
                for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++)
@@ -1454,37 +1582,48 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
        }
 }
 
-static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
+static void dapm_widget_update(struct snd_soc_card *card)
 {
-       struct snd_soc_dapm_update *update = dapm->update;
-       struct snd_soc_dapm_widget *w;
+       struct snd_soc_dapm_update *update = card->update;
+       struct snd_soc_dapm_widget_list *wlist;
+       struct snd_soc_dapm_widget *w = NULL;
+       unsigned int wi;
        int ret;
 
-       if (!update)
+       if (!update || !dapm_kcontrol_is_powered(update->kcontrol))
                return;
 
-       w = update->widget;
+       wlist = dapm_kcontrol_get_wlist(update->kcontrol);
 
-       if (w->event &&
-           (w->event_flags & SND_SOC_DAPM_PRE_REG)) {
-               ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
-               if (ret != 0)
-                       dev_err(dapm->dev, "ASoC: %s DAPM pre-event failed: %d\n",
-                              w->name, ret);
+       for (wi = 0; wi < wlist->num_widgets; wi++) {
+               w = wlist->widgets[wi];
+
+               if (w->event && (w->event_flags & SND_SOC_DAPM_PRE_REG)) {
+                       ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
+                       if (ret != 0)
+                               dev_err(w->dapm->dev, "ASoC: %s DAPM pre-event failed: %d\n",
+                                          w->name, ret);
+               }
        }
 
+       if (!w)
+               return;
+
        ret = soc_widget_update_bits_locked(w, update->reg, update->mask,
                                  update->val);
        if (ret < 0)
-               dev_err(dapm->dev, "ASoC: %s DAPM update failed: %d\n",
+               dev_err(w->dapm->dev, "ASoC: %s DAPM update failed: %d\n",
                        w->name, ret);
 
-       if (w->event &&
-           (w->event_flags & SND_SOC_DAPM_POST_REG)) {
-               ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
-               if (ret != 0)
-                       dev_err(dapm->dev, "ASoC: %s DAPM post-event failed: %d\n",
-                              w->name, ret);
+       for (wi = 0; wi < wlist->num_widgets; wi++) {
+               w = wlist->widgets[wi];
+
+               if (w->event && (w->event_flags & SND_SOC_DAPM_POST_REG)) {
+                       ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
+                       if (ret != 0)
+                               dev_err(w->dapm->dev, "ASoC: %s DAPM post-event failed: %d\n",
+                                          w->name, ret);
+               }
        }
 }
 
@@ -1596,6 +1735,7 @@ static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power,
        case snd_soc_dapm_supply:
        case snd_soc_dapm_regulator_supply:
        case snd_soc_dapm_clock_supply:
+       case snd_soc_dapm_kcontrol:
                /* Supplies can't affect their outputs, only their inputs */
                break;
        default:
@@ -1612,8 +1752,6 @@ static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power,
                dapm_seq_insert(w, up_list, true);
        else
                dapm_seq_insert(w, down_list, false);
-
-       w->power = power;
 }
 
 static void dapm_power_one_widget(struct snd_soc_dapm_widget *w,
@@ -1647,9 +1785,8 @@ static void dapm_power_one_widget(struct snd_soc_dapm_widget *w,
  *  o Input pin to Output pin (bypass, sidetone)
  *  o DAC to ADC (loopback).
  */
-static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
+static int dapm_power_widgets(struct snd_soc_card *card, int event)
 {
-       struct snd_soc_card *card = dapm->card;
        struct snd_soc_dapm_widget *w;
        struct snd_soc_dapm_context *d;
        LIST_HEAD(up_list);
@@ -1689,7 +1826,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
                        break;
                }
 
-               if (w->power) {
+               if (w->new_power) {
                        d = w->dapm;
 
                        /* Supplies and micbiases only bring the
@@ -1731,29 +1868,29 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
        trace_snd_soc_dapm_walk_done(card);
 
        /* Run all the bias changes in parallel */
-       list_for_each_entry(d, &dapm->card->dapm_list, list)
+       list_for_each_entry(d, &card->dapm_list, list)
                async_schedule_domain(dapm_pre_sequence_async, d,
                                        &async_domain);
        async_synchronize_full_domain(&async_domain);
 
        list_for_each_entry(w, &down_list, power_list) {
-               dapm_seq_check_event(dapm, w, SND_SOC_DAPM_WILL_PMD);
+               dapm_seq_check_event(card, w, SND_SOC_DAPM_WILL_PMD);
        }
 
        list_for_each_entry(w, &up_list, power_list) {
-               dapm_seq_check_event(dapm, w, SND_SOC_DAPM_WILL_PMU);
+               dapm_seq_check_event(card, w, SND_SOC_DAPM_WILL_PMU);
        }
 
        /* Power down widgets first; try to avoid amplifying pops. */
-       dapm_seq_run(dapm, &down_list, event, false);
+       dapm_seq_run(card, &down_list, event, false);
 
-       dapm_widget_update(dapm);
+       dapm_widget_update(card);
 
        /* Now power up. */
-       dapm_seq_run(dapm, &up_list, event, true);
+       dapm_seq_run(card, &up_list, event, true);
 
        /* Run all the bias changes in parallel */
-       list_for_each_entry(d, &dapm->card->dapm_list, list)
+       list_for_each_entry(d, &card->dapm_list, list)
                async_schedule_domain(dapm_post_sequence_async, d,
                                        &async_domain);
        async_synchronize_full_domain(&async_domain);
@@ -1764,7 +1901,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
                        d->stream_event(d, event);
        }
 
-       pop_dbg(dapm->dev, card->pop_time,
+       pop_dbg(card->dev, card->pop_time,
                "DAPM sequencing finished, waiting %dms\n", card->pop_time);
        pop_wait(card->pop_time);
 
@@ -1799,8 +1936,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
 
        if (w->reg >= 0)
                ret += snprintf(buf + ret, PAGE_SIZE - ret,
-                               " - R%d(0x%x) bit %d",
-                               w->reg, w->reg, w->shift);
+                               " - R%d(0x%x) mask 0x%x",
+                               w->reg, w->reg, w->mask << w->shift);
 
        ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
 
@@ -1937,22 +2074,14 @@ static inline void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
 #endif
 
 /* test and update the power status of a mux widget */
-static int soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
+static int soc_dapm_mux_update_power(struct snd_soc_card *card,
                                 struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e)
 {
        struct snd_soc_dapm_path *path;
        int found = 0;
 
-       if (widget->id != snd_soc_dapm_mux &&
-           widget->id != snd_soc_dapm_virt_mux &&
-           widget->id != snd_soc_dapm_value_mux)
-               return -ENODEV;
-
        /* find dapm widget path assoc with kcontrol */
-       list_for_each_entry(path, &widget->dapm->card->paths, list) {
-               if (path->kcontrol != kcontrol)
-                       continue;
-
+       dapm_kcontrol_for_each_path(path, kcontrol) {
                if (!path->name || !e->texts[mux])
                        continue;
 
@@ -1967,73 +2096,68 @@ static int soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
                                                "mux disconnection");
                        path->connect = 0; /* old connection must be powered down */
                }
+               dapm_mark_dirty(path->sink, "mux change");
        }
 
-       if (found) {
-               dapm_mark_dirty(widget, "mux change");
-               dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
-       }
+       if (found)
+               dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP);
 
        return found;
 }
 
-int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
-               struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e)
+int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
+       struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
+       struct snd_soc_dapm_update *update)
 {
-       struct snd_soc_card *card = widget->dapm->card;
+       struct snd_soc_card *card = dapm->card;
        int ret;
 
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-       ret = soc_dapm_mux_update_power(widget, kcontrol, mux, e);
+       card->update = update;
+       ret = soc_dapm_mux_update_power(card, kcontrol, mux, e);
+       card->update = NULL;
        mutex_unlock(&card->dapm_mutex);
        if (ret > 0)
-               soc_dpcm_runtime_update(widget);
+               soc_dpcm_runtime_update(card);
        return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_mux_update_power);
 
 /* test and update the power status of a mixer or switch widget */
-static int soc_dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
+static int soc_dapm_mixer_update_power(struct snd_soc_card *card,
                                   struct snd_kcontrol *kcontrol, int connect)
 {
        struct snd_soc_dapm_path *path;
        int found = 0;
 
-       if (widget->id != snd_soc_dapm_mixer &&
-           widget->id != snd_soc_dapm_mixer_named_ctl &&
-           widget->id != snd_soc_dapm_switch)
-               return -ENODEV;
-
        /* find dapm widget path assoc with kcontrol */
-       list_for_each_entry(path, &widget->dapm->card->paths, list) {
-               if (path->kcontrol != kcontrol)
-                       continue;
-
-               /* found, now check type */
+       dapm_kcontrol_for_each_path(path, kcontrol) {
                found = 1;
                path->connect = connect;
                dapm_mark_dirty(path->source, "mixer connection");
+               dapm_mark_dirty(path->sink, "mixer update");
        }
 
-       if (found) {
-               dapm_mark_dirty(widget, "mixer update");
-               dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
-       }
+       if (found)
+               dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP);
 
        return found;
 }
 
-int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
-                               struct snd_kcontrol *kcontrol, int connect)
+int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
+       struct snd_kcontrol *kcontrol, int connect,
+       struct snd_soc_dapm_update *update)
 {
-       struct snd_soc_card *card = widget->dapm->card;
+       struct snd_soc_card *card = dapm->card;
        int ret;
 
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-       ret = soc_dapm_mixer_update_power(widget, kcontrol, connect);
+       card->update = update;
+       ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
+       card->update = NULL;
        mutex_unlock(&card->dapm_mutex);
        if (ret > 0)
-               soc_dpcm_runtime_update(widget);
+               soc_dpcm_runtime_update(card);
        return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power);
@@ -2112,6 +2236,7 @@ static void dapm_free_path(struct snd_soc_dapm_path *path)
 {
        list_del(&path->list_sink);
        list_del(&path->list_source);
+       list_del(&path->list_kcontrol);
        list_del(&path->list);
        kfree(path);
 }
@@ -2206,70 +2331,20 @@ int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm)
                return 0;
 
        mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-       ret = dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
+       ret = dapm_power_widgets(dapm->card, SND_SOC_DAPM_STREAM_NOP);
        mutex_unlock(&dapm->card->dapm_mutex);
        return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
 
-static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
-                                 const struct snd_soc_dapm_route *route)
+static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
+       struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
+       const char *control,
+       int (*connected)(struct snd_soc_dapm_widget *source,
+                        struct snd_soc_dapm_widget *sink))
 {
        struct snd_soc_dapm_path *path;
-       struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w;
-       struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL;
-       const char *sink;
-       const char *control = route->control;
-       const char *source;
-       char prefixed_sink[80];
-       char prefixed_source[80];
-       int ret = 0;
-
-       if (dapm->codec && dapm->codec->name_prefix) {
-               snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s",
-                        dapm->codec->name_prefix, route->sink);
-               sink = prefixed_sink;
-               snprintf(prefixed_source, sizeof(prefixed_source), "%s %s",
-                        dapm->codec->name_prefix, route->source);
-               source = prefixed_source;
-       } else {
-               sink = route->sink;
-               source = route->source;
-       }
-
-       /*
-        * find src and dest widgets over all widgets but favor a widget from
-        * current DAPM context
-        */
-       list_for_each_entry(w, &dapm->card->widgets, list) {
-               if (!wsink && !(strcmp(w->name, sink))) {
-                       wtsink = w;
-                       if (w->dapm == dapm)
-                               wsink = w;
-                       continue;
-               }
-               if (!wsource && !(strcmp(w->name, source))) {
-                       wtsource = w;
-                       if (w->dapm == dapm)
-                               wsource = w;
-               }
-       }
-       /* use widget from another DAPM context if not found from this */
-       if (!wsink)
-               wsink = wtsink;
-       if (!wsource)
-               wsource = wtsource;
-
-       if (wsource == NULL) {
-               dev_err(dapm->dev, "ASoC: no source widget found for %s\n",
-                       route->source);
-               return -ENODEV;
-       }
-       if (wsink == NULL) {
-               dev_err(dapm->dev, "ASoC: no sink widget found for %s\n",
-                       route->sink);
-               return -ENODEV;
-       }
+       int ret;
 
        path = kzalloc(sizeof(struct snd_soc_dapm_path), GFP_KERNEL);
        if (!path)
@@ -2277,8 +2352,9 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
 
        path->source = wsource;
        path->sink = wsink;
-       path->connected = route->connected;
+       path->connected = connected;
        INIT_LIST_HEAD(&path->list);
+       INIT_LIST_HEAD(&path->list_kcontrol);
        INIT_LIST_HEAD(&path->list_source);
        INIT_LIST_HEAD(&path->list_sink);
 
@@ -2328,6 +2404,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
        case snd_soc_dapm_dai_in:
        case snd_soc_dapm_dai_out:
        case snd_soc_dapm_dai_link:
+       case snd_soc_dapm_kcontrol:
                list_add(&path->list, &dapm->card->paths);
                list_add(&path->list_sink, &wsink->sources);
                list_add(&path->list_source, &wsource->sinks);
@@ -2363,11 +2440,77 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
        dapm_mark_dirty(wsink, "Route added");
 
        return 0;
+err:
+       kfree(path);
+       return ret;
+}
+
+static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
+                                 const struct snd_soc_dapm_route *route)
+{
+       struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w;
+       struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL;
+       const char *sink;
+       const char *source;
+       char prefixed_sink[80];
+       char prefixed_source[80];
+       int ret;
+
+       if (dapm->codec && dapm->codec->name_prefix) {
+               snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s",
+                        dapm->codec->name_prefix, route->sink);
+               sink = prefixed_sink;
+               snprintf(prefixed_source, sizeof(prefixed_source), "%s %s",
+                        dapm->codec->name_prefix, route->source);
+               source = prefixed_source;
+       } else {
+               sink = route->sink;
+               source = route->source;
+       }
+
+       /*
+        * find src and dest widgets over all widgets but favor a widget from
+        * current DAPM context
+        */
+       list_for_each_entry(w, &dapm->card->widgets, list) {
+               if (!wsink && !(strcmp(w->name, sink))) {
+                       wtsink = w;
+                       if (w->dapm == dapm)
+                               wsink = w;
+                       continue;
+               }
+               if (!wsource && !(strcmp(w->name, source))) {
+                       wtsource = w;
+                       if (w->dapm == dapm)
+                               wsource = w;
+               }
+       }
+       /* use widget from another DAPM context if not found from this */
+       if (!wsink)
+               wsink = wtsink;
+       if (!wsource)
+               wsource = wtsource;
+
+       if (wsource == NULL) {
+               dev_err(dapm->dev, "ASoC: no source widget found for %s\n",
+                       route->source);
+               return -ENODEV;
+       }
+       if (wsink == NULL) {
+               dev_err(dapm->dev, "ASoC: no sink widget found for %s\n",
+                       route->sink);
+               return -ENODEV;
+       }
 
+       ret = snd_soc_dapm_add_path(dapm, wsource, wsink, route->control,
+               route->connected);
+       if (ret)
+               goto err;
+
+       return 0;
 err:
        dev_warn(dapm->dev, "ASoC: no dapm match for %s --> %s --> %s\n",
-                source, control, sink);
-       kfree(path);
+                source, route->control, sink);
        return ret;
 }
 
@@ -2571,12 +2714,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_weak_routes);
  */
 int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
 {
+       struct snd_soc_card *card = dapm->card;
        struct snd_soc_dapm_widget *w;
        unsigned int val;
 
-       mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
+       mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
 
-       list_for_each_entry(w, &dapm->card->widgets, list)
+       list_for_each_entry(w, &card->widgets, list)
        {
                if (w->new)
                        continue;
@@ -2586,7 +2730,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
                                                sizeof(struct snd_kcontrol *),
                                                GFP_KERNEL);
                        if (!w->kcontrols) {
-                               mutex_unlock(&dapm->card->dapm_mutex);
+                               mutex_unlock(&card->dapm_mutex);
                                return -ENOMEM;
                        }
                }
@@ -2612,12 +2756,9 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
 
                /* Read the initial power state from the device */
                if (w->reg >= 0) {
-                       val = soc_widget_read(w, w->reg);
-                       val &= 1 << w->shift;
-                       if (w->invert)
-                               val = !val;
-
-                       if (val)
+                       val = soc_widget_read(w, w->reg) >> w->shift;
+                       val &= w->mask;
+                       if (val == w->on_val)
                                w->power = 1;
                }
 
@@ -2627,8 +2768,8 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
                dapm_debugfs_add_widget(w);
        }
 
-       dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
-       mutex_unlock(&dapm->card->dapm_mutex);
+       dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP);
+       mutex_unlock(&card->dapm_mutex);
        return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets);
@@ -2645,8 +2786,8 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets);
 int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
+       struct snd_soc_card *card = codec->card;
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
        unsigned int reg = mc->reg;
@@ -2654,17 +2795,24 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
        int max = mc->max;
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
+       unsigned int val;
 
        if (snd_soc_volsw_is_stereo(mc))
-               dev_warn(widget->dapm->dev,
+               dev_warn(codec->dapm.dev,
                         "ASoC: Control '%s' is stereo, which is not supported\n",
                         kcontrol->id.name);
 
-       ucontrol->value.integer.value[0] =
-               (snd_soc_read(widget->codec, reg) >> shift) & mask;
+       mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+       if (dapm_kcontrol_is_powered(kcontrol))
+               val = (snd_soc_read(codec, reg) >> shift) & mask;
+       else
+               val = dapm_kcontrol_get_value(kcontrol);
+       mutex_unlock(&card->dapm_mutex);
+
        if (invert)
-               ucontrol->value.integer.value[0] =
-                       max - ucontrol->value.integer.value[0];
+               ucontrol->value.integer.value[0] = max - val;
+       else
+               ucontrol->value.integer.value[0] = val;
 
        return 0;
 }
@@ -2682,9 +2830,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_volsw);
 int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-       struct snd_soc_codec *codec = widget->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct snd_soc_card *card = codec->card;
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
@@ -2696,10 +2842,9 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
        unsigned int val;
        int connect, change;
        struct snd_soc_dapm_update update;
-       int wi;
 
        if (snd_soc_volsw_is_stereo(mc))
-               dev_warn(widget->dapm->dev,
+               dev_warn(codec->dapm.dev,
                         "ASoC: Control '%s' is stereo, which is not supported\n",
                         kcontrol->id.name);
 
@@ -2708,29 +2853,26 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
 
        if (invert)
                val = max - val;
-       mask = mask << shift;
-       val = val << shift;
 
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
-       change = snd_soc_test_bits(widget->codec, reg, mask, val);
-       if (change) {
-               for (wi = 0; wi < wlist->num_widgets; wi++) {
-                       widget = wlist->widgets[wi];
+       dapm_kcontrol_set_value(kcontrol, val);
 
-                       widget->value = val;
+       mask = mask << shift;
+       val = val << shift;
 
-                       update.kcontrol = kcontrol;
-                       update.widget = widget;
-                       update.reg = reg;
-                       update.mask = mask;
-                       update.val = val;
-                       widget->dapm->update = &update;
+       change = snd_soc_test_bits(codec, reg, mask, val);
+       if (change) {
+               update.kcontrol = kcontrol;
+               update.reg = reg;
+               update.mask = mask;
+               update.val = val;
 
-                       soc_dapm_mixer_update_power(widget, kcontrol, connect);
+               card->update = &update;
 
-                       widget->dapm->update = NULL;
-               }
+               soc_dapm_mixer_update_power(card, kcontrol, connect);
+
+               card->update = NULL;
        }
 
        mutex_unlock(&card->dapm_mutex);
@@ -2750,12 +2892,11 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw);
 int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        unsigned int val;
 
-       val = snd_soc_read(widget->codec, e->reg);
+       val = snd_soc_read(codec, e->reg);
        ucontrol->value.enumerated.item[0] = (val >> e->shift_l) & e->mask;
        if (e->shift_l != e->shift_r)
                ucontrol->value.enumerated.item[1] =
@@ -2777,15 +2918,12 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_double);
 int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-       struct snd_soc_codec *codec = widget->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct snd_soc_card *card = codec->card;
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        unsigned int val, mux, change;
        unsigned int mask;
        struct snd_soc_dapm_update update;
-       int wi;
 
        if (ucontrol->value.enumerated.item[0] > e->max - 1)
                return -EINVAL;
@@ -2801,24 +2939,17 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
 
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
-       change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+       change = snd_soc_test_bits(codec, e->reg, mask, val);
        if (change) {
-               for (wi = 0; wi < wlist->num_widgets; wi++) {
-                       widget = wlist->widgets[wi];
+               update.kcontrol = kcontrol;
+               update.reg = e->reg;
+               update.mask = mask;
+               update.val = val;
+               card->update = &update;
 
-                       widget->value = val;
+               soc_dapm_mux_update_power(card, kcontrol, mux, e);
 
-                       update.kcontrol = kcontrol;
-                       update.widget = widget;
-                       update.reg = e->reg;
-                       update.mask = mask;
-                       update.val = val;
-                       widget->dapm->update = &update;
-
-                       soc_dapm_mux_update_power(widget, kcontrol, mux, e);
-
-                       widget->dapm->update = NULL;
-               }
+               card->update = NULL;
        }
 
        mutex_unlock(&card->dapm_mutex);
@@ -2836,11 +2967,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double);
 int snd_soc_dapm_get_enum_virt(struct snd_kcontrol *kcontrol,
                               struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-
-       ucontrol->value.enumerated.item[0] = widget->value;
-
+       ucontrol->value.enumerated.item[0] = dapm_kcontrol_get_value(kcontrol);
        return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_virt);
@@ -2855,30 +2982,22 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_virt);
 int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol,
                               struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-       struct snd_soc_codec *codec = widget->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct snd_soc_card *card = codec->card;
+       unsigned int value;
        struct soc_enum *e =
                (struct soc_enum *)kcontrol->private_value;
        int change;
-       int wi;
 
        if (ucontrol->value.enumerated.item[0] >= e->max)
                return -EINVAL;
 
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
-       change = widget->value != ucontrol->value.enumerated.item[0];
-       if (change) {
-               for (wi = 0; wi < wlist->num_widgets; wi++) {
-                       widget = wlist->widgets[wi];
-
-                       widget->value = ucontrol->value.enumerated.item[0];
-
-                       soc_dapm_mux_update_power(widget, kcontrol, widget->value, e);
-               }
-       }
+       value = ucontrol->value.enumerated.item[0];
+       change = dapm_kcontrol_set_value(kcontrol, value);
+       if (change)
+               soc_dapm_mux_update_power(card, kcontrol, value, e);
 
        mutex_unlock(&card->dapm_mutex);
        return change;
@@ -2901,12 +3020,11 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_virt);
 int snd_soc_dapm_get_value_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        unsigned int reg_val, val, mux;
 
-       reg_val = snd_soc_read(widget->codec, e->reg);
+       reg_val = snd_soc_read(codec, e->reg);
        val = (reg_val >> e->shift_l) & e->mask;
        for (mux = 0; mux < e->max; mux++) {
                if (val == e->values[mux])
@@ -2942,15 +3060,12 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_value_enum_double);
 int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
-       struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-       struct snd_soc_codec *codec = widget->codec;
+       struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
        struct snd_soc_card *card = codec->card;
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        unsigned int val, mux, change;
        unsigned int mask;
        struct snd_soc_dapm_update update;
-       int wi;
 
        if (ucontrol->value.enumerated.item[0] > e->max - 1)
                return -EINVAL;
@@ -2966,24 +3081,17 @@ int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol,
 
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
-       change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+       change = snd_soc_test_bits(codec, e->reg, mask, val);
        if (change) {
-               for (wi = 0; wi < wlist->num_widgets; wi++) {
-                       widget = wlist->widgets[wi];
-
-                       widget->value = val;
+               update.kcontrol = kcontrol;
+               update.reg = e->reg;
+               update.mask = mask;
+               update.val = val;
+               card->update = &update;
 
-                       update.kcontrol = kcontrol;
-                       update.widget = widget;
-                       update.reg = e->reg;
-                       update.mask = mask;
-                       update.val = val;
-                       widget->dapm->update = &update;
+               soc_dapm_mux_update_power(card, kcontrol, mux, e);
 
-                       soc_dapm_mux_update_power(widget, kcontrol, mux, e);
-
-                       widget->dapm->update = NULL;
-               }
+               card->update = NULL;
        }
 
        mutex_unlock(&card->dapm_mutex);
@@ -3080,7 +3188,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
                        return NULL;
                }
 
-               if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
+               if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
                        ret = regulator_allow_bypass(w->regulator, true);
                        if (ret != 0)
                                dev_warn(w->dapm->dev,
@@ -3127,16 +3235,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
        case snd_soc_dapm_value_mux:
                w->power_check = dapm_generic_check_power;
                break;
-       case snd_soc_dapm_adc:
-       case snd_soc_dapm_aif_out:
        case snd_soc_dapm_dai_out:
                w->power_check = dapm_adc_check_power;
                break;
-       case snd_soc_dapm_dac:
-       case snd_soc_dapm_aif_in:
        case snd_soc_dapm_dai_in:
                w->power_check = dapm_dac_check_power;
                break;
+       case snd_soc_dapm_adc:
+       case snd_soc_dapm_aif_out:
+       case snd_soc_dapm_dac:
+       case snd_soc_dapm_aif_in:
        case snd_soc_dapm_pga:
        case snd_soc_dapm_out_drv:
        case snd_soc_dapm_input:
@@ -3152,6 +3260,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
        case snd_soc_dapm_supply:
        case snd_soc_dapm_regulator_supply:
        case snd_soc_dapm_clock_supply:
+       case snd_soc_dapm_kcontrol:
                w->power_check = dapm_supply_check_power;
                break;
        default:
@@ -3416,9 +3525,6 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
 {
        struct snd_soc_dapm_widget *dai_w, *w;
        struct snd_soc_dai *dai;
-       struct snd_soc_dapm_route r;
-
-       memset(&r, 0, sizeof(r));
 
        /* For each DAI widget... */
        list_for_each_entry(dai_w, &card->widgets, list) {
@@ -3445,29 +3551,27 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
                                break;
                        }
 
-                       if (!w->sname)
+                       if (!w->sname || !strstr(w->sname, dai_w->name))
                                continue;
 
                        if (dai->driver->playback.stream_name &&
                            strstr(w->sname,
                                   dai->driver->playback.stream_name)) {
-                               r.source = dai->playback_widget->name;
-                               r.sink = w->name;
                                dev_dbg(dai->dev, "%s -> %s\n",
-                                        r.source, r.sink);
+                                        dai->playback_widget->name, w->name);
 
-                               snd_soc_dapm_add_route(w->dapm, &r);
+                               snd_soc_dapm_add_path(w->dapm,
+                                       dai->playback_widget, w, NULL, NULL);
                        }
 
                        if (dai->driver->capture.stream_name &&
                            strstr(w->sname,
                                   dai->driver->capture.stream_name)) {
-                               r.source = w->name;
-                               r.sink = dai->capture_widget->name;
                                dev_dbg(dai->dev, "%s -> %s\n",
-                                       r.source, r.sink);
+                                       w->name, dai->capture_widget->name);
 
-                               snd_soc_dapm_add_route(w->dapm, &r);
+                               snd_soc_dapm_add_path(w->dapm, w,
+                                       dai->capture_widget, NULL, NULL);
                        }
                }
        }
@@ -3529,7 +3633,7 @@ static void soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
                }
        }
 
-       dapm_power_widgets(&rtd->card->dapm, event);
+       dapm_power_widgets(rtd->card, event);
 }
 
 /**
@@ -3798,7 +3902,7 @@ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
                if (dapm->bias_level == SND_SOC_BIAS_ON)
                        snd_soc_dapm_set_bias_level(dapm,
                                                    SND_SOC_BIAS_PREPARE);
-               dapm_seq_run(dapm, &down_list, 0, false);
+               dapm_seq_run(card, &down_list, 0, false);
                if (dapm->bias_level == SND_SOC_BIAS_PREPARE)
                        snd_soc_dapm_set_bias_level(dapm,
                                                    SND_SOC_BIAS_STANDBY);
index 0bb5cccd77663a4d819a67731245e51564af3224..7aa26b5178aa60ac68d7f2c281b515ecf35ef979 100644 (file)
@@ -263,7 +263,7 @@ static irqreturn_t gpio_handler(int irq, void *data)
        if (device_may_wakeup(dev))
                pm_wakeup_event(dev, gpio->debounce_time + 50);
 
-       schedule_delayed_work(&gpio->work,
+       queue_delayed_work(system_power_efficient_wq, &gpio->work,
                              msecs_to_jiffies(gpio->debounce_time));
 
        return IRQ_HANDLED;
index b6c640332a1722c95c27fa8aa99b4d8b135d456f..fb70fbe26862f63ef879b5fde3c58e49e2fd49a6 100644 (file)
@@ -411,8 +411,9 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
                } else {
                        /* start delayed pop wq here for playback streams */
                        rtd->pop_wait = 1;
-                       schedule_delayed_work(&rtd->delayed_work,
-                               msecs_to_jiffies(rtd->pmdown_time));
+                       queue_delayed_work(system_power_efficient_wq,
+                                          &rtd->delayed_work,
+                                          msecs_to_jiffies(rtd->pmdown_time));
                }
        } else {
                /* capture streams can be powered down now */
@@ -1832,18 +1833,10 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
 /* Called by DAPM mixer/mux changes to update audio routing between PCMs and
  * any DAI links.
  */
-int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *widget)
+int soc_dpcm_runtime_update(struct snd_soc_card *card)
 {
-       struct snd_soc_card *card;
        int i, old, new, paths;
 
-       if (widget->codec)
-               card = widget->codec->card;
-       else if (widget->platform)
-               card = widget->platform->card;
-       else
-               return -EINVAL;
-
        mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
        for (i = 0; i < card->num_rtd; i++) {
                struct snd_soc_dapm_widget_list *list;
index 3567d73b218ed9273a7b617ae3f91c5411f83cf3..0a53053495f3d39cf6363b2255bfa4ad629c6be0 100644 (file)
@@ -1,6 +1,6 @@
 config SND_SPEAR_SOC
        tristate
-       select SND_SOC_DMAENGINE_PCM
+       select SND_DMAENGINE_PCM
 
 config SND_SPEAR_SPDIF_OUT
        tristate
index 995b120c2cd0a49a90a191e1e06649cb52396839..8fc653ca3ab40b3ef04d1a58723a2466086ab8ff 100644 (file)
@@ -1,8 +1,8 @@
 config SND_SOC_TEGRA
        tristate "SoC Audio for the Tegra System-on-Chip"
-       depends on ARCH_TEGRA && TEGRA20_APB_DMA
+       depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
        select REGMAP_MMIO
-       select SND_SOC_GENERIC_DMAENGINE_PCM if TEGRA20_APB_DMA
+       select SND_SOC_GENERIC_DMAENGINE_PCM
        help
          Say Y or M here if you want support for SoC audio on Tegra.
 
@@ -61,7 +61,7 @@ config SND_SOC_TEGRA30_I2S
 
 config SND_SOC_TEGRA_RT5640
        tristate "SoC Audio support for Tegra boards using an RT5640 codec"
-       depends on SND_SOC_TEGRA && I2C
+       depends on SND_SOC_TEGRA && I2C && GPIOLIB
        select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
        select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
        select SND_SOC_RT5640
@@ -71,7 +71,7 @@ config SND_SOC_TEGRA_RT5640
 
 config SND_SOC_TEGRA_WM8753
        tristate "SoC Audio support for Tegra boards using a WM8753 codec"
-       depends on SND_SOC_TEGRA && I2C
+       depends on SND_SOC_TEGRA && I2C && GPIOLIB
        select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
        select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
        select SND_SOC_WM8753
@@ -81,7 +81,7 @@ config SND_SOC_TEGRA_WM8753
 
 config SND_SOC_TEGRA_WM8903
        tristate "SoC Audio support for Tegra boards using a WM8903 codec"
-       depends on SND_SOC_TEGRA && I2C
+       depends on SND_SOC_TEGRA && I2C && GPIOLIB
        select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
        select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
        select SND_SOC_WM8903
@@ -92,7 +92,7 @@ config SND_SOC_TEGRA_WM8903
 
 config SND_SOC_TEGRA_WM9712
        tristate "SoC Audio support for Tegra boards using a WM9712 codec"
-       depends on SND_SOC_TEGRA && ARCH_TEGRA_2x_SOC
+       depends on SND_SOC_TEGRA && ARCH_TEGRA_2x_SOC && GPIOLIB
        select SND_SOC_TEGRA20_AC97
        select SND_SOC_WM9712
        help
@@ -110,7 +110,7 @@ config SND_SOC_TEGRA_TRIMSLICE
 
 config SND_SOC_TEGRA_ALC5632
        tristate "SoC Audio support for Tegra boards using an ALC5632 codec"
-       depends on SND_SOC_TEGRA && I2C
+       depends on SND_SOC_TEGRA && I2C && GPIOLIB
        select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
        select SND_SOC_ALC5632
        help
index 6c486625321bebb78b6f66370fda54a49a300fcb..ae27bcd586d25428458c305c16448395b9f74d88 100644 (file)
@@ -334,12 +334,6 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
        }
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               dev_err(&pdev->dev, "No memory resource\n");
-               ret = -ENODEV;
-               goto err_clk_put;
-       }
-
        regs = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(regs)) {
                ret = PTR_ERR(regs);
@@ -432,8 +426,6 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
 
        return 0;
 
-err_unregister_pcm:
-       tegra_pcm_platform_unregister(&pdev->dev);
 err_unregister_component:
        snd_soc_unregister_component(&pdev->dev);
 err_asoc_utils_fini:
index 48d05d9e1002b3f13529f386d543c0b59803874e..c61ea3a1030f7a683f55bd087049392fe4867bfb 100644 (file)
@@ -13,8 +13,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <asm/mach-types.h>
-
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 08794f915a9433a1fdaa91842c74c16279e7f9f2..4511c5a875ecd8567e58c3f62496ab8e1076473f 100644 (file)
@@ -99,6 +99,7 @@ static struct snd_soc_jack_gpio tegra_rt5640_hp_jack_gpio = {
 static const struct snd_soc_dapm_widget tegra_rt5640_dapm_widgets[] = {
        SND_SOC_DAPM_HP("Headphones", NULL),
        SND_SOC_DAPM_SPK("Speakers", NULL),
+       SND_SOC_DAPM_MIC("Mic Jack", NULL),
 };
 
 static const struct snd_kcontrol_new tegra_rt5640_controls[] = {
index f87fc53e9b8cddab8d8f529c0ab730a8481ede8d..8e774d1a243c624dcca5b09c25065316629d886e 100644 (file)
@@ -28,8 +28,6 @@
  *
  */
 
-#include <asm/mach-types.h>
-
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 05c68aab5cf0a9a8e06a7fb93f27cd2b0d3cf3e0..734bfcd211481ccccf26c86a3db23399ecf9ffb3 100644 (file)
@@ -24,8 +24,6 @@
  *
  */
 
-#include <asm/mach-types.h>
-
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
index 4bcce8a3cdedd618e18844a55373c273cbd6427c..e0305a1485680b18cbc85ba33a506bda799f47c1 100644 (file)
@@ -184,9 +184,6 @@ static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
        if (irq < 0)
                return irq;
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r)
-               return -EBUSY;
-
        drvdata->base = devm_ioremap_resource(&pdev->dev, r);
        if (IS_ERR(drvdata->base))
                return PTR_ERR(drvdata->base);
index 8f5cd00a6e468622af895ed4faebeaf78d7b8f9e..178d1bad62591565589803f52c859a2062044df2 100644 (file)
@@ -52,6 +52,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
 
 static struct snd_soc_card mop500_card = {
        .name = "MOP500-card",
+       .owner = THIS_MODULE,
        .probe = NULL,
        .dai_link = mop500_dai_links,
        .num_links = ARRAY_SIZE(mop500_dai_links),
index b9defcdeb7ef805af05a6453ce309a2eb64bdb18..780bf3f62d2800f3a8f9da7ffc8d011d0a6c3bd2 100644 (file)
@@ -346,10 +346,10 @@ static int usb6fire_fw_check(u8 *version)
                if (!memcmp(version, known_fw_versions + i, 2))
                        return 0;
 
-       snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %*ph. "
+       snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %4ph. "
                        "please reconnect to power. if this failure "
                        "still happens, check your firmware installation.",
-                       4, version);
+                       version);
        return -EINVAL;
 }
 
index 659950e5b94f6b411577a84821f1a1c4d93b4593..93e970f2b3c0ad4d58957faed6b76ec2a1fb2991 100644 (file)
@@ -418,6 +418,9 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
        struct snd_usb_endpoint *ep;
        int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
 
+       if (WARN_ON(!alts))
+               return NULL;
+
        mutex_lock(&chip->mutex);
 
        list_for_each_entry(ep, &chip->ep_list, list) {
index 15b151ed4899c6032704d361b9c641d2b53b849f..b375d58871e7ce9f7eb0e63a3f73644984c6c5e1 100644 (file)
@@ -327,6 +327,137 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
        return 0;
 }
 
+static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+                                        struct usb_device *dev,
+                                        struct usb_interface_descriptor *altsd,
+                                        unsigned int attr)
+{
+       struct usb_host_interface *alts;
+       struct usb_interface *iface;
+       unsigned int ep;
+
+       /* Implicit feedback sync EPs consumers are always playback EPs */
+       if (subs->direction != SNDRV_PCM_STREAM_PLAYBACK)
+               return 0;
+
+       switch (subs->stream->chip->usb_id) {
+       case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+       case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
+               ep = 0x81;
+               iface = usb_ifnum_to_if(dev, 3);
+
+               if (!iface || iface->num_altsetting == 0)
+                       return -EINVAL;
+
+               alts = &iface->altsetting[1];
+               goto add_sync_ep;
+               break;
+       case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */
+       case USB_ID(0x0763, 0x2081):
+               ep = 0x81;
+               iface = usb_ifnum_to_if(dev, 2);
+
+               if (!iface || iface->num_altsetting == 0)
+                       return -EINVAL;
+
+               alts = &iface->altsetting[1];
+               goto add_sync_ep;
+       }
+       if (attr == USB_ENDPOINT_SYNC_ASYNC &&
+           altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+           altsd->bInterfaceProtocol == 2 &&
+           altsd->bNumEndpoints == 1 &&
+           USB_ID_VENDOR(subs->stream->chip->usb_id) == 0x0582 /* Roland */ &&
+           search_roland_implicit_fb(dev, altsd->bInterfaceNumber + 1,
+                                     altsd->bAlternateSetting,
+                                     &alts, &ep) >= 0) {
+               goto add_sync_ep;
+       }
+
+       /* No quirk */
+       return 0;
+
+add_sync_ep:
+       subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip,
+                                                  alts, ep, !subs->direction,
+                                                  SND_USB_ENDPOINT_TYPE_DATA);
+       if (!subs->sync_endpoint)
+               return -EINVAL;
+
+       subs->data_endpoint->sync_master = subs->sync_endpoint;
+
+       return 0;
+}
+
+static int set_sync_endpoint(struct snd_usb_substream *subs,
+                            struct audioformat *fmt,
+                            struct usb_device *dev,
+                            struct usb_host_interface *alts,
+                            struct usb_interface_descriptor *altsd)
+{
+       int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
+       unsigned int ep, attr;
+       bool implicit_fb;
+       int err;
+
+       /* we need a sync pipe in async OUT or adaptive IN mode */
+       /* check the number of EP, since some devices have broken
+        * descriptors which fool us.  if it has only one EP,
+        * assume it as adaptive-out or sync-in.
+        */
+       attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
+
+       err = set_sync_ep_implicit_fb_quirk(subs, dev, altsd, attr);
+       if (err < 0)
+               return err;
+
+       if (altsd->bNumEndpoints < 2)
+               return 0;
+
+       if ((is_playback && attr != USB_ENDPOINT_SYNC_ASYNC) ||
+           (!is_playback && attr != USB_ENDPOINT_SYNC_ADAPTIVE))
+               return 0;
+
+       /* check sync-pipe endpoint */
+       /* ... and check descriptor size before accessing bSynchAddress
+          because there is a version of the SB Audigy 2 NX firmware lacking
+          the audio fields in the endpoint descriptors */
+       if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC ||
+           (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+            get_endpoint(alts, 1)->bSynchAddress != 0)) {
+               snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
+                          dev->devnum, fmt->iface, fmt->altsetting,
+                          get_endpoint(alts, 1)->bmAttributes,
+                          get_endpoint(alts, 1)->bLength,
+                          get_endpoint(alts, 1)->bSynchAddress);
+               return -EINVAL;
+       }
+       ep = get_endpoint(alts, 1)->bEndpointAddress;
+       if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+           ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
+            (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+               snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
+                          dev->devnum, fmt->iface, fmt->altsetting,
+                          is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
+               return -EINVAL;
+       }
+
+       implicit_fb = (get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_USAGE_MASK)
+                       == USB_ENDPOINT_USAGE_IMPLICIT_FB;
+
+       subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip,
+                                                  alts, ep, !subs->direction,
+                                                  implicit_fb ?
+                                                       SND_USB_ENDPOINT_TYPE_DATA :
+                                                       SND_USB_ENDPOINT_TYPE_SYNC);
+       if (!subs->sync_endpoint)
+               return -EINVAL;
+
+       subs->data_endpoint->sync_master = subs->sync_endpoint;
+
+       return 0;
+}
+
 /*
  * find a matching format and set up the interface
  */
@@ -336,9 +467,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        struct usb_host_interface *alts;
        struct usb_interface_descriptor *altsd;
        struct usb_interface *iface;
-       unsigned int ep, attr;
-       int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
-       int err, implicit_fb = 0;
+       int err;
 
        iface = usb_ifnum_to_if(dev, fmt->iface);
        if (WARN_ON(!iface))
@@ -383,118 +512,22 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
        subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip,
                                                   alts, fmt->endpoint, subs->direction,
                                                   SND_USB_ENDPOINT_TYPE_DATA);
+
        if (!subs->data_endpoint)
                return -EINVAL;
 
-       /* we need a sync pipe in async OUT or adaptive IN mode */
-       /* check the number of EP, since some devices have broken
-        * descriptors which fool us.  if it has only one EP,
-        * assume it as adaptive-out or sync-in.
-        */
-       attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
-
-       switch (subs->stream->chip->usb_id) {
-       case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
-       case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
-               if (is_playback) {
-                       implicit_fb = 1;
-                       ep = 0x81;
-                       iface = usb_ifnum_to_if(dev, 3);
-
-                       if (!iface || iface->num_altsetting == 0)
-                               return -EINVAL;
-
-                       alts = &iface->altsetting[1];
-                       goto add_sync_ep;
-               }
-               break;
-       case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */
-       case USB_ID(0x0763, 0x2081):
-               if (is_playback) {
-                       implicit_fb = 1;
-                       ep = 0x81;
-                       iface = usb_ifnum_to_if(dev, 2);
-
-                       if (!iface || iface->num_altsetting == 0)
-                               return -EINVAL;
-
-                       alts = &iface->altsetting[1];
-                       goto add_sync_ep;
-               }
-       }
-       if (is_playback &&
-           attr == USB_ENDPOINT_SYNC_ASYNC &&
-           altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
-           altsd->bInterfaceProtocol == 2 &&
-           altsd->bNumEndpoints == 1 &&
-           USB_ID_VENDOR(subs->stream->chip->usb_id) == 0x0582 /* Roland */ &&
-           search_roland_implicit_fb(dev, altsd->bInterfaceNumber + 1,
-                                     altsd->bAlternateSetting,
-                                     &alts, &ep) >= 0) {
-               implicit_fb = 1;
-               goto add_sync_ep;
-       }
-
-       if (((is_playback && attr == USB_ENDPOINT_SYNC_ASYNC) ||
-            (!is_playback && attr == USB_ENDPOINT_SYNC_ADAPTIVE)) &&
-           altsd->bNumEndpoints >= 2) {
-               /* check sync-pipe endpoint */
-               /* ... and check descriptor size before accessing bSynchAddress
-                  because there is a version of the SB Audigy 2 NX firmware lacking
-                  the audio fields in the endpoint descriptors */
-               if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC ||
-                   (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
-                    get_endpoint(alts, 1)->bSynchAddress != 0 &&
-                    !implicit_fb)) {
-                       snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
-                                  dev->devnum, fmt->iface, fmt->altsetting,
-                                  get_endpoint(alts, 1)->bmAttributes,
-                                  get_endpoint(alts, 1)->bLength,
-                                  get_endpoint(alts, 1)->bSynchAddress);
-                       return -EINVAL;
-               }
-               ep = get_endpoint(alts, 1)->bEndpointAddress;
-               if (!implicit_fb &&
-                   get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
-                   (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
-                    (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
-                       snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
-                                  dev->devnum, fmt->iface, fmt->altsetting,
-                                  is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
-                       return -EINVAL;
-               }
-
-               implicit_fb = (get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_USAGE_MASK)
-                               == USB_ENDPOINT_USAGE_IMPLICIT_FB;
-
-add_sync_ep:
-               subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip,
-                                                          alts, ep, !subs->direction,
-                                                          implicit_fb ?
-                                                               SND_USB_ENDPOINT_TYPE_DATA :
-                                                               SND_USB_ENDPOINT_TYPE_SYNC);
-               if (!subs->sync_endpoint)
-                       return -EINVAL;
-
-               subs->data_endpoint->sync_master = subs->sync_endpoint;
-       }
+       err = set_sync_endpoint(subs, fmt, dev, alts, altsd);
+       if (err < 0)
+               return err;
 
-       if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0)
+       err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt);
+       if (err < 0)
                return err;
 
        subs->cur_audiofmt = fmt;
 
        snd_usb_set_format_quirk(subs, fmt);
 
-#if 0
-       printk(KERN_DEBUG
-              "setting done: format = %d, rate = %d..%d, channels = %d\n",
-              fmt->format, fmt->rate_min, fmt->rate_max, fmt->channels);
-       printk(KERN_DEBUG
-              "  datapipe = 0x%0x, syncpipe = 0x%0x\n",
-              subs->datapipe, subs->syncpipe);
-#endif
-
        return 0;
 }
 
index 1f9bbd55553fc79bd539b8ad17f369be5beed267..5a51b18c50fe9230faabb923219c281b45a47970 100644 (file)
@@ -305,11 +305,9 @@ static void usX2Y_unlinkSeq(struct snd_usX2Y_AsyncSeq *S)
 {
        int     i;
        for (i = 0; i < URBS_AsyncSeq; ++i) {
-               if (S[i].urb) {
-                       usb_kill_urb(S->urb[i]);
-                       usb_free_urb(S->urb[i]);
-                       S->urb[i] = NULL;
-               }
+               usb_kill_urb(S->urb[i]);
+               usb_free_urb(S->urb[i]);
+               S->urb[i] = NULL;
        }
        kfree(S->buffer);
 }
index 68f67cf3d3182f5dcc696a50f3ef889c05ea10d2..32cf2ce15d69bcfca9c24da9ad318fc1a2e84eb2 100644 (file)
 #include <pwd.h>
 #include <grp.h>
 
+#ifndef VIRTIO_F_ANY_LAYOUT
+#define VIRTIO_F_ANY_LAYOUT            27
+#endif
+
 /*L:110
  * We can ignore the 43 include files we need for this program, but I do want
  * to draw attention to the use of kernel-style types.
@@ -1544,6 +1548,8 @@ static void setup_tun_net(char *arg)
        add_feature(dev, VIRTIO_NET_F_HOST_ECN);
        /* We handle indirect ring entries */
        add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
+       /* We're compliant with the damn spec. */
+       add_feature(dev, VIRTIO_F_ANY_LAYOUT);
        set_config(dev, sizeof(conf), &conf);
 
        /* We don't need the socket any more; setup is done. */
diff --git a/tools/power/fspin/Makefile b/tools/power/fspin/Makefile
new file mode 100644 (file)
index 0000000..5274007
--- /dev/null
@@ -0,0 +1,21 @@
+CC             = $(CROSS_COMPILE)gcc
+BUILD_OUTPUT   := $(PWD)
+PREFIX         := /usr
+DESTDIR                :=
+
+fspin : fspin.c
+CFLAGS +=      -Wall
+
+%: %.c
+       @mkdir -p $(BUILD_OUTPUT)
+       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ -lpthread
+
+.PHONY : clean
+clean :
+       @rm -f $(BUILD_OUTPUT)/fspin
+
+install : fspin
+       install -d  $(DESTDIR)$(PREFIX)/bin
+       install $(BUILD_OUTPUT)/fspin $(DESTDIR)$(PREFIX)/bin/fspin
+       install -d  $(DESTDIR)$(PREFIX)/share/man/man1
+       install fspin.1 $(DESTDIR)$(PREFIX)/share/man/man1
diff --git a/tools/power/fspin/fspin.1 b/tools/power/fspin/fspin.1
new file mode 100644 (file)
index 0000000..b57308e
--- /dev/null
@@ -0,0 +1,68 @@
+.\"  This page Copyright (C) 2013 Len Brown <len.brown@intel.com>
+.\"  Distributed under the GPL, Copyleft 1994.
+.TH FSPIN 8
+.SH NAME
+fspin \- simple workload for power experiments
+.SH SYNOPSIS
+.ft B
+.B fspin
+.RB [ "\-v" ]
+.RB [ "\-i iterations" ]
+.RB [ "\-s sec_per_iteration" ]
+.RB [ "\-t threads" ]
+.RB [ "\-b bin_to_cpus" ]
+.RB [ "\-m memory (b|k|m)" ]
+.br
+.SH DESCRIPTION
+\fBfspin\fP
+heats up the hardware by running a
+floating-point spin loop per processor.
+Every
+.I interval_sec
+fspin presents the sum of the work completed
+by all threads.
+.SS Options
+.PP
+\fB-v\fP increases verbosity.
+By default, fspin prints only the quantity work completed.
+.PP
+\fB-s sec_per_iteration\fP
+Print the indicator of work completed every
+sec_per_interval seconds.  By default, 5 sec.
+.PP
+\fB-t threads\fP
+Create
+.I threads
+software threads.  Default is number of
+logical processors available, or if '-b' option is used,
+one thread per bound processor.
+.PP
+\fB-b bind_to_cpus\fP
+Bind the threads to the indicated list of comma-separated CPU numbers.
+A range of CPUs can be specified by using '-'.
+.PP
+\fB-i iterations\fP
+Exit after
+.I iterations
+and print total of work completed.
+Default is to continue running forever, printing work per iteration/sec.
+.PP
+\fB-m memory\fP
+Allocate arrays of 
+.I memory_size,
+which is followed by a modifier b|k|m, for bytes, kilobytes, or megabytes,
+respectively.  Default is 512 bytes, which will spin in-cache.
+Increase this number to exercise larger caches and memory.
+
+.SH WHAT FSPIN IS NOT
+Fspin is just a simple tool,
+and has not be characterized as a
+.I performance benchmark.
+Fspin is not a
+.I power virus for cooling HW design,
+as there are better tools, specialized for that purpose.
+
+.PP
+.SH AUTHORS
+.nf
+Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/fspin/fspin.c b/tools/power/fspin/fspin.c
new file mode 100644 (file)
index 0000000..38288c1
--- /dev/null
@@ -0,0 +1,443 @@
+/*
+ * fspin.c - user utility to burn CPU cycles, thrash the cache and memory
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/*
+ * Creates one thread per logical processor (override with -t).
+ * Threads run on any processor (override with -b).
+ * Each thread allocates and initializes its own data.
+ * Then it processes the data using an infinite DAXPY loop:
+ * Double precision Y[i] = A*X[i] + Y[i]
+ *
+ * The parent thread wakes up every reporting interval,
+ * (override 5 sec default with -i),
+ * sums up and prints aggregate performance.
+ *
+ * The actual computation is somewhat arbitrary, if not random.
+ * The performance number is intended only to be compared to itself
+ * on the same machine, to illustrate how various power limiting
+ * techniques impact performance.
+ */
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <ctype.h>
+#include <time.h>
+#include <sched.h>
+#include <errno.h>
+#include <sys/time.h>
+
+#define BANNER "fspin v1.1, April 7, 2013 - Len Brown <len.brown@intel.com>"
+
+#define handle_error_en(en, msg) \
+       do { errno = en; perror(msg); exit(EXIT_FAILURE); } while (0)
+
+#define handle_error(msg) \
+       do { perror(msg); exit(EXIT_FAILURE); } while (0)
+
+struct thread_info {           /* Used as argument to spin_loop() */
+       pthread_t thread_id;    /* ID returned by pthread_create() */
+       int thread_num;         /* Application-defined thread # */
+};
+
+struct padded {
+       double counter; /* 8 bytes */
+       double pad[(32 - 1)];   /* round up to 256 byte line */
+} *thread_data;
+
+int num_threads;
+int thread_num_override;
+int data_bytes = 512;
+int nrcpus = 64;
+int sec_per_interval = 5;      /* seconds */
+int iterations;
+int verbose;
+int do_binding;
+
+cpu_set_t *cpu_affinity_set;
+size_t cpu_affinity_setsize;
+
+void
+allocate_cpusets()
+{
+       /*
+        * Allocate and initialize cpu_affinity_set
+        */
+       cpu_affinity_set = CPU_ALLOC(nrcpus);
+       if (cpu_affinity_set == NULL) {
+               perror("CPU_ALLOC");
+               exit(3);
+       }
+       cpu_affinity_setsize = CPU_ALLOC_SIZE(nrcpus);
+       CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
+}
+
+void
+bind_to_cpus()
+{
+       if (!do_binding)
+               return;
+
+       if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) {
+               fprintf(stderr, "bind_to_cpus() failed\n");
+               perror("sched_setaffinity");
+               exit(-1);
+       }
+}
+
+int get_num_cpus()
+{
+       cpu_set_t *mask;
+       size_t size;
+       int num_cpus;
+
+realloc:
+       mask = CPU_ALLOC(nrcpus);
+       size = CPU_ALLOC_SIZE(nrcpus);
+       CPU_ZERO_S(size, mask);
+       if (sched_getaffinity(0, size, mask) == -1) {
+               CPU_FREE(mask);
+               if (errno == EINVAL &&
+                       nrcpus < (1024 << 8)) {
+                       nrcpus = nrcpus << 2;
+                       goto realloc;
+               }
+               perror("sched_getaffinity");
+               return -1;
+       }
+
+       num_cpus = CPU_COUNT_S(size, mask);
+
+       CPU_FREE(mask);
+
+       return num_cpus;
+}
+
+static void *spin_loop(void *arg)
+{
+       struct thread_info *tinfo = (struct thread_info *)arg;
+       double *x, *y;
+       int i = 0;
+       int data_entries = data_bytes / sizeof(double);
+       unsigned long long bitmask = random();
+
+               
+       x = malloc(data_bytes);
+       y = malloc(data_bytes);
+
+       if (x == NULL || y == NULL) {
+               perror("malloc");
+               exit(-1);
+       }
+
+       /*
+        * seed data array with random bits
+        */
+       for (i = 0; i < data_entries; ++i) {
+               x[i] = 1.0 + i * bitmask;
+               y[i] = 1.0 + i * bitmask;
+       }
+
+       for (i = 0; ; i++) {
+
+               double a = 3.1415926535 * i;
+
+               y[i] = a * x[i] + y[i];         /* DAXPY */
+
+               thread_data[tinfo->thread_num].counter++;
+
+               if (i >= data_entries)
+                       i = 0;
+       }
+       /* not reached */
+}
+
+void usage()
+{
+       fprintf(stderr,
+               "Usage: fspin [-v][-s sec_per_iteration][-i iterations][-t num_threads][-b cpu_list][-m memory(b|k|m)]\n");
+       fprintf(stderr, "\twhere 'cpu_list' is comma and dash separated numbers with no spaces\n");
+       exit(EXIT_FAILURE);
+}
+
+void parse_error(char *string, char c)
+{
+       fprintf(stderr, "parse error on '%s' at '%c'\n", string, c);
+       usage();
+}
+
+int add_cpu_to_bind_mask(int cpu) {
+       static int num_added;
+
+       /* check if cpu is valid */
+       if (cpu < 0 || cpu > nrcpus) {
+               fprintf(stderr, "invalid cpu %d\n", cpu);
+               exit(1);
+       }
+
+       if (CPU_ISSET_S(cpu, cpu_affinity_setsize, cpu_affinity_set)) {
+               fprintf(stderr, "can't bind to cpu %d more than once\n", cpu);
+               exit(1);
+       }
+
+       /* add cpu to set */
+       CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
+
+       if (verbose)
+               printf("%d, ", cpu);
+
+       num_added += 1;
+
+       return num_added;
+}
+
+
+int
+parse_bind_cpu_list(char *cpu_list)
+{
+       char *p;
+       int range_next = -1;
+       int total_cpus_added = 0;
+
+       allocate_cpusets();
+
+       for(p = cpu_list; *p != '\0'; ) {
+               int num, retval;
+
+               /* remaining list must start w/ valid cpu number */
+
+               if (!isdigit(*p))
+                       parse_error(p, *p);
+
+               retval = sscanf(p, "%u", &num);
+               if (retval == EOF)
+                       usage();
+               else if (retval == 0)
+                       parse_error(p, *p);
+
+               if (range_next >= 0) {
+                       if (num <= range_next)  /* range must be low to high */
+                               parse_error(p, *p);
+
+                       for ( ; range_next < num; range_next++)
+                               total_cpus_added = add_cpu_to_bind_mask(range_next);
+
+                       range_next = -1;
+               }
+
+               total_cpus_added = add_cpu_to_bind_mask(num);
+
+               while (isdigit(*p))
+                       p++;
+
+               switch (*p) {
+               case ',':
+                       p++;
+                       continue;
+               case '-':
+                       range_next = num + 1;
+                       p++;
+                       continue;
+               }
+
+       }
+       return total_cpus_added;
+}
+
+int parse_memory_param(char *p)
+{
+       int bytes;
+       char units;
+
+       if (2 != sscanf(p, "%d%c", &bytes, &units)) {
+               fprintf(stderr, "failed to parse -m\n");
+               usage();
+       }
+       switch (units) {
+       case 'b':
+       case 'B':
+               break;
+       case 'k':
+       case 'K':
+               bytes *= 1024;
+               break;
+       case 'm':
+       case 'M':
+               bytes *= 1024*1024;
+               break;
+       case 'g':
+       case 'G':
+               bytes *= 1024*1024*1024;
+               break;
+       default:
+               fprintf(stderr, "-m: bad memory units, use b, k, m, g\n");
+       
+       }
+       return bytes;
+       
+}
+
+void parse_args(int argc, char *argv[])
+{
+       int opt;
+
+       nrcpus = get_num_cpus();
+
+       while ((opt = getopt(argc, argv, "s:i:t:b:m:v")) != -1) {
+               switch (opt) {
+               case 's':
+                       sec_per_interval = atoi(optarg);
+                       if (verbose)
+                               printf("sec_per_interval %d\n", sec_per_interval);
+                       break;
+               case 'i':
+                       iterations = atoi(optarg);
+                       if (verbose)
+                               printf("iterations %d\n", iterations);
+                       break;
+               case 't':
+                       thread_num_override = atoi(optarg);
+                       if (verbose)
+                               printf("Thread Count Override: %d\n", thread_num_override);
+                       break;
+               case 'b':
+                       do_binding = parse_bind_cpu_list(optarg);
+                       if (verbose)
+                               printf("Binding to %d CPUs.\n", do_binding);
+                       break;
+               case 'm':
+                       data_bytes = parse_memory_param(optarg);
+                       if (verbose)
+                               printf("Memory Override: %d\n", data_bytes);
+                       break;
+               case 'v':
+                       verbose++;
+                       break;
+               default:        /* '?' */
+                       usage();        /* does not return */
+               }
+       }
+}
+
+unsigned long long lsum_old;
+
+
+struct thread_info *tinfo;
+pthread_attr_t attr;
+
+void create_threads()
+{
+       int s, tnum;
+
+       if (thread_num_override)
+               num_threads = thread_num_override;
+       else if (do_binding)
+               num_threads = do_binding;
+       else
+               num_threads = nrcpus;
+
+       thread_data = calloc(num_threads, sizeof(struct padded));
+       if (thread_data == NULL)
+               handle_error("calloc");
+
+       /* Initialize thread creation attributes */
+
+       s = pthread_attr_init(&attr);
+       if (s != 0)
+               handle_error_en(s, "pthread_attr_init");
+
+       /* Allocate memory for pthread_create() arguments */
+
+       tinfo = calloc(num_threads, sizeof(struct thread_info));
+       if (tinfo == NULL)
+               handle_error("calloc");
+
+       for (tnum = 0; tnum < num_threads; tnum++) {
+               tinfo[tnum].thread_num = tnum;
+
+               /* The pthread_create() call stores the thread ID into
+                * corresponding element of tinfo[]
+                */
+
+               s = pthread_create(&tinfo[tnum].thread_id, &attr,
+                                  &spin_loop, &tinfo[tnum]);
+               if (s != 0)
+                       handle_error_en(s, "pthread_create");
+       }
+       printf("%d threads created\n", num_threads);
+       return;
+}
+
+
+void monitor_threads()
+{
+       struct timespec ts;
+       struct timeval tv_old, tv_new, tv_delta;
+       int i, j;
+       double interval_float;
+       unsigned long long lsum;
+
+       ts.tv_sec = sec_per_interval;
+       ts.tv_nsec = 0;
+       gettimeofday(&tv_old, (struct timezone *)NULL);
+
+       for (i = 0; iterations ? i < iterations : 1 ; i++) {
+
+               if (nanosleep(&ts, NULL) != 0) {
+                       perror("nanosleep");
+                       exit(-1);
+               }
+
+               for (j = 0, lsum = 0; j < num_threads; ++j)
+                       lsum += thread_data[j].counter;
+
+               gettimeofday(&tv_new, NULL);
+               timersub(&tv_new, &tv_old, &tv_delta);
+
+               interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
+               printf("%.2f\n", (lsum - lsum_old)/interval_float/1000000);
+
+               tv_old = tv_new;
+               lsum_old = lsum;
+       }
+       /* summary */
+       for (j = 0, lsum = 0; j < num_threads; ++j) {
+               printf("%d %.2f\n", j, thread_data[j].counter/1000000.0);
+               lsum += thread_data[j].counter;
+       }
+       printf("Total %.2f\n", lsum/1000000.0);
+       
+}
+
+
+void print_banner()
+{
+       puts(BANNER);
+}
+
+int main(int argc, char *argv[])
+{
+       parse_args(argc, argv);
+
+
+       print_banner();
+
+       bind_to_cpus();
+
+       create_threads();
+
+       monitor_threads();      /* never returns */
+
+       return 0;
+}
index 4cb14cae37911ddbf41ee81fb46301b53ea144fb..9f3eae2909009517cb96de2f882121883f43444e 100644 (file)
@@ -8,6 +8,7 @@ TARGETS += net
 TARGETS += ptrace
 TARGETS += timers
 TARGETS += vm
+TARGETS += powerpc
 
 all:
        for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
new file mode 100644 (file)
index 0000000..bd24ae5
--- /dev/null
@@ -0,0 +1,39 @@
+# Makefile for powerpc selftests
+
+# ARCH can be overridden by the user for cross compiling
+ARCH ?= $(shell uname -m)
+ARCH := $(shell echo $(ARCH) | sed -e s/ppc.*/powerpc/)
+
+ifeq ($(ARCH),powerpc)
+
+GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
+
+CC := $(CROSS_COMPILE)$(CC)
+CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
+
+export CC CFLAGS
+
+TARGETS = pmu
+
+endif
+
+all:
+       @for TARGET in $(TARGETS); do \
+               $(MAKE) -C $$TARGET all; \
+       done;
+
+run_tests: all
+       @for TARGET in $(TARGETS); do \
+               $(MAKE) -C $$TARGET run_tests; \
+       done;
+
+clean:
+       @for TARGET in $(TARGETS); do \
+               $(MAKE) -C $$TARGET clean; \
+       done;
+       rm -f tags
+
+tags:
+       find . -name '*.c' -o -name '*.h' | xargs ctags
+
+.PHONY: all run_tests clean tags
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
new file mode 100644 (file)
index 0000000..e80c42a
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <errno.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "subunit.h"
+#include "utils.h"
+
+#define TIMEOUT                120
+#define KILL_TIMEOUT   5
+
+
+int run_test(int (test_function)(void), char *name)
+{
+       bool terminated;
+       int rc, status;
+       pid_t pid;
+
+       /* Make sure output is flushed before forking */
+       fflush(stdout);
+
+       pid = fork();
+       if (pid == 0) {
+               exit(test_function());
+       } else if (pid == -1) {
+               perror("fork");
+               return 1;
+       }
+
+       /* Wake us up in timeout seconds */
+       alarm(TIMEOUT);
+       terminated = false;
+
+wait:
+       rc = waitpid(pid, &status, 0);
+       if (rc == -1) {
+               if (errno != EINTR) {
+                       printf("unknown error from waitpid\n");
+                       return 1;
+               }
+
+               if (terminated) {
+                       printf("!! force killing %s\n", name);
+                       kill(pid, SIGKILL);
+                       return 1;
+               } else {
+                       printf("!! killing %s\n", name);
+                       kill(pid, SIGTERM);
+                       terminated = true;
+                       alarm(KILL_TIMEOUT);
+                       goto wait;
+               }
+       }
+
+       if (WIFEXITED(status))
+               status = WEXITSTATUS(status);
+       else {
+               if (WIFSIGNALED(status))
+                       printf("!! child died by signal %d\n", WTERMSIG(status));
+               else
+                       printf("!! child died by unknown cause\n");
+
+               status = 1; /* Signal or other */
+       }
+
+       return status;
+}
+
+static void alarm_handler(int signum)
+{
+       /* Jut wake us up from waitpid */
+}
+
+static struct sigaction alarm_action = {
+       .sa_handler = alarm_handler,
+};
+
+int test_harness(int (test_function)(void), char *name)
+{
+       int rc;
+
+       test_start(name);
+       test_set_git_version(GIT_VERSION);
+
+       if (sigaction(SIGALRM, &alarm_action, NULL)) {
+               perror("sigaction");
+               test_error(name);
+               return 1;
+       }
+
+       rc = run_test(test_function, name);
+
+       test_finish(name, rc);
+
+       return rc;
+}
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
new file mode 100644 (file)
index 0000000..7216f00
--- /dev/null
@@ -0,0 +1,23 @@
+noarg:
+       $(MAKE) -C ../
+
+PROGS := count_instructions
+EXTRA_SOURCES := ../harness.c event.c
+
+all: $(PROGS)
+
+$(PROGS): $(EXTRA_SOURCES)
+
+# loop.S can only be built 64-bit
+count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
+       $(CC) $(CFLAGS) -m64 -o $@ $^
+
+run_tests: all
+       @-for PROG in $(PROGS); do \
+               ./$$PROG; \
+       done;
+
+clean:
+       rm -f $(PROGS) loop.o
+
+.PHONY: all run_tests clean
diff --git a/tools/testing/selftests/powerpc/pmu/count_instructions.c b/tools/testing/selftests/powerpc/pmu/count_instructions.c
new file mode 100644 (file)
index 0000000..312b4f0
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/prctl.h>
+
+#include "event.h"
+#include "utils.h"
+
+extern void thirty_two_instruction_loop(u64 loops);
+
+static void setup_event(struct event *e, u64 config, char *name)
+{
+       event_init_opts(e, config, PERF_TYPE_HARDWARE, name);
+
+       e->attr.disabled = 1;
+       e->attr.exclude_kernel = 1;
+       e->attr.exclude_hv = 1;
+       e->attr.exclude_idle = 1;
+}
+
+static int do_count_loop(struct event *events, u64 instructions,
+                        u64 overhead, bool report)
+{
+       s64 difference, expected;
+       double percentage;
+
+       prctl(PR_TASK_PERF_EVENTS_ENABLE);
+
+       /* Run for 1M instructions */
+       thirty_two_instruction_loop(instructions >> 5);
+
+       prctl(PR_TASK_PERF_EVENTS_DISABLE);
+
+       event_read(&events[0]);
+       event_read(&events[1]);
+
+       expected = instructions + overhead;
+       difference = events[0].result.value - expected;
+       percentage = (double)difference / events[0].result.value * 100;
+
+       if (report) {
+               event_report(&events[0]);
+               event_report(&events[1]);
+
+               printf("Looped for %llu instructions, overhead %llu\n", instructions, overhead);
+               printf("Expected %llu\n", expected);
+               printf("Actual   %llu\n", events[0].result.value);
+               printf("Delta    %lld, %f%%\n", difference, percentage);
+       }
+
+       event_reset(&events[0]);
+       event_reset(&events[1]);
+
+       if (difference < 0)
+               difference = -difference;
+
+       /* Tolerate a difference below 0.0001 % */
+       difference *= 10000 * 100;
+       if (difference / events[0].result.value)
+               return -1;
+
+       return 0;
+}
+
+/* Count how many instructions it takes to do a null loop */
+static u64 determine_overhead(struct event *events)
+{
+       u64 current, overhead;
+       int i;
+
+       do_count_loop(events, 0, 0, false);
+       overhead = events[0].result.value;
+
+       for (i = 0; i < 100; i++) {
+               do_count_loop(events, 0, 0, false);
+               current = events[0].result.value;
+               if (current < overhead) {
+                       printf("Replacing overhead %llu with %llu\n", overhead, current);
+                       overhead = current;
+               }
+       }
+
+       return overhead;
+}
+
+static int count_instructions(void)
+{
+       struct event events[2];
+       u64 overhead;
+
+       setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+       setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+       if (event_open(&events[0])) {
+               perror("perf_event_open");
+               return -1;
+       }
+
+       if (event_open_with_group(&events[1], events[0].fd)) {
+               perror("perf_event_open");
+               return -1;
+       }
+
+       overhead = determine_overhead(events);
+       printf("Overhead of null loop: %llu instructions\n", overhead);
+
+       /* Run for 1M instructions */
+       FAIL_IF(do_count_loop(events, 0x100000, overhead, true));
+
+       /* Run for 10M instructions */
+       FAIL_IF(do_count_loop(events, 0xa00000, overhead, true));
+
+       /* Run for 100M instructions */
+       FAIL_IF(do_count_loop(events, 0x6400000, overhead, true));
+
+       /* Run for 1G instructions */
+       FAIL_IF(do_count_loop(events, 0x40000000, overhead, true));
+
+       event_close(&events[0]);
+       event_close(&events[1]);
+
+       return 0;
+}
+
+int main(void)
+{
+       return test_harness(count_instructions, "count_instructions");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event.c b/tools/testing/selftests/powerpc/pmu/event.c
new file mode 100644 (file)
index 0000000..2b2d11d
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <string.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+
+#include "event.h"
+
+
+int perf_event_open(struct perf_event_attr *attr, pid_t pid, int cpu,
+               int group_fd, unsigned long flags)
+{
+       return syscall(__NR_perf_event_open, attr, pid, cpu,
+                          group_fd, flags);
+}
+
+void event_init_opts(struct event *e, u64 config, int type, char *name)
+{
+       memset(e, 0, sizeof(*e));
+
+       e->name = name;
+
+       e->attr.type = type;
+       e->attr.config = config;
+       e->attr.size = sizeof(e->attr);
+       /* This has to match the structure layout in the header */
+       e->attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | \
+                                 PERF_FORMAT_TOTAL_TIME_RUNNING;
+}
+
+void event_init_named(struct event *e, u64 config, char *name)
+{
+       event_init_opts(e, config, PERF_TYPE_RAW, name);
+}
+
+#define PERF_CURRENT_PID       0
+#define PERF_NO_CPU            -1
+#define PERF_NO_GROUP          -1
+
+int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd)
+{
+       e->fd = perf_event_open(&e->attr, pid, cpu, group_fd, 0);
+       if (e->fd == -1) {
+               perror("perf_event_open");
+               return -1;
+       }
+
+       return 0;
+}
+
+int event_open_with_group(struct event *e, int group_fd)
+{
+       return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, group_fd);
+}
+
+int event_open(struct event *e)
+{
+       return event_open_with_options(e, PERF_CURRENT_PID, PERF_NO_CPU, PERF_NO_GROUP);
+}
+
+void event_close(struct event *e)
+{
+       close(e->fd);
+}
+
+int event_reset(struct event *e)
+{
+       return ioctl(e->fd, PERF_EVENT_IOC_RESET);
+}
+
+int event_read(struct event *e)
+{
+       int rc;
+
+       rc = read(e->fd, &e->result, sizeof(e->result));
+       if (rc != sizeof(e->result)) {
+               fprintf(stderr, "read error on event %p!\n", e);
+               return -1;
+       }
+
+       return 0;
+}
+
+void event_report_justified(struct event *e, int name_width, int result_width)
+{
+       printf("%*s: result %*llu ", name_width, e->name, result_width,
+              e->result.value);
+
+       if (e->result.running == e->result.enabled)
+               printf("running/enabled %llu\n", e->result.running);
+       else
+               printf("running %llu enabled %llu\n", e->result.running,
+                       e->result.enabled);
+}
+
+void event_report(struct event *e)
+{
+       event_report_justified(e, 0, 0);
+}
diff --git a/tools/testing/selftests/powerpc/pmu/event.h b/tools/testing/selftests/powerpc/pmu/event.h
new file mode 100644 (file)
index 0000000..e699319
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _SELFTESTS_POWERPC_PMU_EVENT_H
+#define _SELFTESTS_POWERPC_PMU_EVENT_H
+
+#include <unistd.h>
+#include <linux/perf_event.h>
+
+#include "utils.h"
+
+
+struct event {
+       struct perf_event_attr attr;
+       char *name;
+       int fd;
+       /* This must match the read_format we use */
+       struct {
+               u64 value;
+               u64 running;
+               u64 enabled;
+       } result;
+};
+
+void event_init(struct event *e, u64 config);
+void event_init_named(struct event *e, u64 config, char *name);
+void event_init_opts(struct event *e, u64 config, int type, char *name);
+int event_open_with_options(struct event *e, pid_t pid, int cpu, int group_fd);
+int event_open_with_group(struct event *e, int group_fd);
+int event_open(struct event *e);
+void event_close(struct event *e);
+int event_reset(struct event *e);
+int event_read(struct event *e);
+void event_report_justified(struct event *e, int name_width, int result_width);
+void event_report(struct event *e);
+
+#endif /* _SELFTESTS_POWERPC_PMU_EVENT_H */
diff --git a/tools/testing/selftests/powerpc/pmu/loop.S b/tools/testing/selftests/powerpc/pmu/loop.S
new file mode 100644 (file)
index 0000000..8820e3d
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+       .text
+
+       .global thirty_two_instruction_loop
+       .type .thirty_two_instruction_loop,@function
+       .section ".opd","aw",@progbits
+thirty_two_instruction_loop:
+       .quad .thirty_two_instruction_loop, .TOC.@tocbase, 0
+       .previous
+.thirty_two_instruction_loop:
+       cmpwi   %r3,0
+       beqlr
+       addi    %r4,%r3,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1
+       addi    %r4,%r4,1       # 28 addi's
+       subi    %r3,%r3,1
+       b       .thirty_two_instruction_loop
diff --git a/tools/testing/selftests/powerpc/subunit.h b/tools/testing/selftests/powerpc/subunit.h
new file mode 100644 (file)
index 0000000..98a2292
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _SELFTESTS_POWERPC_SUBUNIT_H
+#define _SELFTESTS_POWERPC_SUBUNIT_H
+
+static inline void test_start(char *name)
+{
+       printf("test: %s\n", name);
+}
+
+static inline void test_failure_detail(char *name, char *detail)
+{
+       printf("failure: %s [%s]\n", name, detail);
+}
+
+static inline void test_failure(char *name)
+{
+       printf("failure: %s\n", name);
+}
+
+static inline void test_error(char *name)
+{
+       printf("error: %s\n", name);
+}
+
+static inline void test_success(char *name)
+{
+       printf("success: %s\n", name);
+}
+
+static inline void test_finish(char *name, int status)
+{
+       if (status)
+               test_failure(name);
+       else
+               test_success(name);
+}
+
+static inline void test_set_git_version(char *value)
+{
+       printf("tags: git_version:%s\n", value);
+}
+
+#endif /* _SELFTESTS_POWERPC_SUBUNIT_H */
diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/utils.h
new file mode 100644 (file)
index 0000000..5851c4b
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _SELFTESTS_POWERPC_UTILS_H
+#define _SELFTESTS_POWERPC_UTILS_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/* Avoid headaches with PRI?64 - just use %ll? always */
+typedef unsigned long long u64;
+typedef   signed long long s64;
+
+/* Just for familiarity */
+typedef uint32_t u32;
+typedef uint8_t u8;
+
+
+int test_harness(int (test_function)(void), char *name);
+
+
+/* Yes, this is evil */
+#define FAIL_IF(x)                                             \
+do {                                                           \
+       if ((x)) {                                              \
+               fprintf(stderr,                                 \
+               "[FAIL] Test FAILED on line %d\n", __LINE__);   \
+               return 1;                                       \
+       }                                                       \
+} while (0)
+
+#endif /* _SELFTESTS_POWERPC_UTILS_H */
diff --git a/tools/virtio/.gitignore b/tools/virtio/.gitignore
new file mode 100644 (file)
index 0000000..1cfbb01
--- /dev/null
@@ -0,0 +1,3 @@
+*.d
+virtio_test
+vringh_test
index 1580dd4ace4eac20b37043c2f5c882349204ed4a..1b8a1f13fcea9899f191dafccc38c12dae3eb796 100644 (file)
@@ -1691,7 +1691,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        finish_wait(&vcpu->wq, &wait);
 }
 
-#ifndef CONFIG_S390
+#if !defined(CONFIG_S390) && !defined(CONFIG_TILE)
 /*
  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
  */
@@ -1714,7 +1714,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
        put_cpu();
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
-#endif /* !CONFIG_S390 */
+#endif
 
 void kvm_resched(struct kvm_vcpu *vcpu)
 {
@@ -1978,7 +1978,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
        if (vcpu->kvm->mm != current->mm)
                return -EIO;
 
-#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
+#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) || \
+       defined(CONFIG_TILEGX)
        /*
         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
         * so vcpu_load() would break it.